Libav
vc1dec.c
Go to the documentation of this file.
1 /*
2  * VC-1 and WMV3 decoder
3  * Copyright (c) 2011 Mashiat Sarker Shakkhar
4  * Copyright (c) 2006-2007 Konstantin Shishkov
5  * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6  *
7  * This file is part of Libav.
8  *
9  * Libav is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * Libav is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with Libav; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
29 #include "internal.h"
30 #include "avcodec.h"
31 #include "error_resilience.h"
32 #include "mpegvideo.h"
33 #include "h263.h"
34 #include "h264chroma.h"
35 #include "vc1.h"
36 #include "vc1data.h"
37 #include "vc1acdata.h"
38 #include "msmpeg4data.h"
39 #include "unary.h"
40 #include "mathops.h"
41 
42 #undef NDEBUG
43 #include <assert.h>
44 
45 #define MB_INTRA_VLC_BITS 9
46 #define DC_VLC_BITS 9
47 
48 
49 // offset tables for interlaced picture MVDATA decoding
50 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
51 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
52 
53 /***********************************************************************/
64 enum Imode {
72 }; //imode defines
74 
76 {
77  MpegEncContext *s = &v->s;
79  if (v->field_mode && !(v->second_field ^ v->tff)) {
80  s->dest[0] += s->current_picture_ptr->f.linesize[0];
81  s->dest[1] += s->current_picture_ptr->f.linesize[1];
82  s->dest[2] += s->current_picture_ptr->f.linesize[2];
83  }
84 }
85  //Bitplane group
87 
89 {
90  MpegEncContext *s = &v->s;
91  int topleft_mb_pos, top_mb_pos;
92  int stride_y, fieldtx = 0;
93  int v_dist;
94 
95  /* The put pixels loop is always one MB row behind the decoding loop,
96  * because we can only put pixels when overlap filtering is done, and
97  * for filtering of the bottom edge of a MB, we need the next MB row
98  * present as well.
99  * Within the row, the put pixels loop is also one MB col behind the
100  * decoding loop. The reason for this is again, because for filtering
101  * of the right MB edge, we need the next MB present. */
102  if (!s->first_slice_line) {
103  if (s->mb_x) {
104  topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
105  if (v->fcm == ILACE_FRAME)
106  fieldtx = v->fieldtx_plane[topleft_mb_pos];
107  stride_y = s->linesize << fieldtx;
108  v_dist = (16 - fieldtx) >> (fieldtx == 0);
110  s->dest[0] - 16 * s->linesize - 16,
111  stride_y);
113  s->dest[0] - 16 * s->linesize - 8,
114  stride_y);
116  s->dest[0] - v_dist * s->linesize - 16,
117  stride_y);
119  s->dest[0] - v_dist * s->linesize - 8,
120  stride_y);
122  s->dest[1] - 8 * s->uvlinesize - 8,
123  s->uvlinesize);
125  s->dest[2] - 8 * s->uvlinesize - 8,
126  s->uvlinesize);
127  }
128  if (s->mb_x == s->mb_width - 1) {
129  top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
130  if (v->fcm == ILACE_FRAME)
131  fieldtx = v->fieldtx_plane[top_mb_pos];
132  stride_y = s->linesize << fieldtx;
133  v_dist = fieldtx ? 15 : 8;
135  s->dest[0] - 16 * s->linesize,
136  stride_y);
138  s->dest[0] - 16 * s->linesize + 8,
139  stride_y);
141  s->dest[0] - v_dist * s->linesize,
142  stride_y);
144  s->dest[0] - v_dist * s->linesize + 8,
145  stride_y);
147  s->dest[1] - 8 * s->uvlinesize,
148  s->uvlinesize);
150  s->dest[2] - 8 * s->uvlinesize,
151  s->uvlinesize);
152  }
153  }
154 
155 #define inc_blk_idx(idx) do { \
156  idx++; \
157  if (idx >= v->n_allocated_blks) \
158  idx = 0; \
159  } while (0)
160 
165 }
166 
167 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
168 {
169  MpegEncContext *s = &v->s;
170  int j;
171  if (!s->first_slice_line) {
172  v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
173  if (s->mb_x)
174  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
175  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
176  for (j = 0; j < 2; j++) {
177  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
178  if (s->mb_x)
179  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
180  }
181  }
182  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
183 
184  if (s->mb_y == s->end_mb_y - 1) {
185  if (s->mb_x) {
186  v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
187  v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
188  v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
189  }
190  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
191  }
192 }
193 
195 {
196  MpegEncContext *s = &v->s;
197  int j;
198 
199  /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
200  * means it runs two rows/cols behind the decoding loop. */
201  if (!s->first_slice_line) {
202  if (s->mb_x) {
203  if (s->mb_y >= s->start_mb_y + 2) {
204  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
205 
206  if (s->mb_x >= 2)
207  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
208  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
209  for (j = 0; j < 2; j++) {
210  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
211  if (s->mb_x >= 2) {
212  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
213  }
214  }
215  }
216  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
217  }
218 
219  if (s->mb_x == s->mb_width - 1) {
220  if (s->mb_y >= s->start_mb_y + 2) {
221  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
222 
223  if (s->mb_x)
224  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
225  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
226  for (j = 0; j < 2; j++) {
227  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
228  if (s->mb_x >= 2) {
229  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
230  }
231  }
232  }
233  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
234  }
235 
236  if (s->mb_y == s->end_mb_y) {
237  if (s->mb_x) {
238  if (s->mb_x >= 2)
239  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
240  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
241  if (s->mb_x >= 2) {
242  for (j = 0; j < 2; j++) {
243  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
244  }
245  }
246  }
247 
248  if (s->mb_x == s->mb_width - 1) {
249  if (s->mb_x)
250  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
251  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
252  if (s->mb_x) {
253  for (j = 0; j < 2; j++) {
254  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
255  }
256  }
257  }
258  }
259  }
260 }
261 
263 {
264  MpegEncContext *s = &v->s;
265  int mb_pos;
266 
267  if (v->condover == CONDOVER_NONE)
268  return;
269 
270  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
271 
272  /* Within a MB, the horizontal overlap always runs before the vertical.
273  * To accomplish that, we run the H on left and internal borders of the
274  * currently decoded MB. Then, we wait for the next overlap iteration
275  * to do H overlap on the right edge of this MB, before moving over and
276  * running the V overlap. Therefore, the V overlap makes us trail by one
277  * MB col and the H overlap filter makes us trail by one MB row. This
278  * is reflected in the time at which we run the put_pixels loop. */
279  if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
280  if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
281  v->over_flags_plane[mb_pos - 1])) {
283  v->block[v->cur_blk_idx][0]);
285  v->block[v->cur_blk_idx][2]);
286  if (!(s->flags & CODEC_FLAG_GRAY)) {
288  v->block[v->cur_blk_idx][4]);
290  v->block[v->cur_blk_idx][5]);
291  }
292  }
294  v->block[v->cur_blk_idx][1]);
296  v->block[v->cur_blk_idx][3]);
297 
298  if (s->mb_x == s->mb_width - 1) {
299  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
300  v->over_flags_plane[mb_pos - s->mb_stride])) {
302  v->block[v->cur_blk_idx][0]);
304  v->block[v->cur_blk_idx][1]);
305  if (!(s->flags & CODEC_FLAG_GRAY)) {
307  v->block[v->cur_blk_idx][4]);
309  v->block[v->cur_blk_idx][5]);
310  }
311  }
313  v->block[v->cur_blk_idx][2]);
315  v->block[v->cur_blk_idx][3]);
316  }
317  }
318  if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
319  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
320  v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
322  v->block[v->left_blk_idx][0]);
324  v->block[v->left_blk_idx][1]);
325  if (!(s->flags & CODEC_FLAG_GRAY)) {
327  v->block[v->left_blk_idx][4]);
329  v->block[v->left_blk_idx][5]);
330  }
331  }
333  v->block[v->left_blk_idx][2]);
335  v->block[v->left_blk_idx][3]);
336  }
337 }
338 
342 static void vc1_mc_1mv(VC1Context *v, int dir)
343 {
344  MpegEncContext *s = &v->s;
345  H264ChromaContext *h264chroma = &v->h264chroma;
346  uint8_t *srcY, *srcU, *srcV;
347  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
348  int v_edge_pos = s->v_edge_pos >> v->field_mode;
349  int i;
350  uint8_t (*luty)[256], (*lutuv)[256];
351  int use_ic;
352 
353  if ((!v->field_mode ||
354  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
355  !v->s.last_picture.f.data[0])
356  return;
357 
358  mx = s->mv[dir][0][0];
359  my = s->mv[dir][0][1];
360 
361  // store motion vectors for further use in B frames
362  if (s->pict_type == AV_PICTURE_TYPE_P) {
363  for (i = 0; i < 4; i++) {
364  s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
365  s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
366  }
367  }
368 
369  uvmx = (mx + ((mx & 3) == 3)) >> 1;
370  uvmy = (my + ((my & 3) == 3)) >> 1;
371  v->luma_mv[s->mb_x][0] = uvmx;
372  v->luma_mv[s->mb_x][1] = uvmy;
373 
374  if (v->field_mode &&
375  v->cur_field_type != v->ref_field_type[dir]) {
376  my = my - 2 + 4 * v->cur_field_type;
377  uvmy = uvmy - 2 + 4 * v->cur_field_type;
378  }
379 
380  // fastuvmc shall be ignored for interlaced frame picture
381  if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
382  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
383  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
384  }
385  if (!dir) {
386  if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
387  srcY = s->current_picture.f.data[0];
388  srcU = s->current_picture.f.data[1];
389  srcV = s->current_picture.f.data[2];
390  luty = v->curr_luty;
391  lutuv = v->curr_lutuv;
392  use_ic = v->curr_use_ic;
393  } else {
394  srcY = s->last_picture.f.data[0];
395  srcU = s->last_picture.f.data[1];
396  srcV = s->last_picture.f.data[2];
397  luty = v->last_luty;
398  lutuv = v->last_lutuv;
399  use_ic = v->last_use_ic;
400  }
401  } else {
402  srcY = s->next_picture.f.data[0];
403  srcU = s->next_picture.f.data[1];
404  srcV = s->next_picture.f.data[2];
405  luty = v->next_luty;
406  lutuv = v->next_lutuv;
407  use_ic = v->next_use_ic;
408  }
409 
410  if (!srcY || !srcU) {
411  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
412  return;
413  }
414 
415  src_x = s->mb_x * 16 + (mx >> 2);
416  src_y = s->mb_y * 16 + (my >> 2);
417  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
418  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
419 
420  if (v->profile != PROFILE_ADVANCED) {
421  src_x = av_clip( src_x, -16, s->mb_width * 16);
422  src_y = av_clip( src_y, -16, s->mb_height * 16);
423  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
424  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
425  } else {
426  src_x = av_clip( src_x, -17, s->avctx->coded_width);
427  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
428  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
429  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
430  }
431 
432  srcY += src_y * s->linesize + src_x;
433  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
434  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
435 
436  if (v->field_mode && v->ref_field_type[dir]) {
437  srcY += s->current_picture_ptr->f.linesize[0];
438  srcU += s->current_picture_ptr->f.linesize[1];
439  srcV += s->current_picture_ptr->f.linesize[2];
440  }
441 
442  /* for grayscale we should not try to read from unknown area */
443  if (s->flags & CODEC_FLAG_GRAY) {
444  srcU = s->edge_emu_buffer + 18 * s->linesize;
445  srcV = s->edge_emu_buffer + 18 * s->linesize;
446  }
447 
448  if (v->rangeredfrm || use_ic
449  || s->h_edge_pos < 22 || v_edge_pos < 22
450  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
451  || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
452  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
453 
454  srcY -= s->mspel * (1 + s->linesize);
456  s->linesize, s->linesize,
457  17 + s->mspel * 2, 17 + s->mspel * 2,
458  src_x - s->mspel, src_y - s->mspel,
459  s->h_edge_pos, v_edge_pos);
460  srcY = s->edge_emu_buffer;
461  s->vdsp.emulated_edge_mc(uvbuf, srcU,
462  s->uvlinesize, s->uvlinesize,
463  8 + 1, 8 + 1,
464  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
465  s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
466  s->uvlinesize, s->uvlinesize,
467  8 + 1, 8 + 1,
468  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
469  srcU = uvbuf;
470  srcV = uvbuf + 16;
471  /* if we deal with range reduction we need to scale source blocks */
472  if (v->rangeredfrm) {
473  int i, j;
474  uint8_t *src, *src2;
475 
476  src = srcY;
477  for (j = 0; j < 17 + s->mspel * 2; j++) {
478  for (i = 0; i < 17 + s->mspel * 2; i++)
479  src[i] = ((src[i] - 128) >> 1) + 128;
480  src += s->linesize;
481  }
482  src = srcU;
483  src2 = srcV;
484  for (j = 0; j < 9; j++) {
485  for (i = 0; i < 9; i++) {
486  src[i] = ((src[i] - 128) >> 1) + 128;
487  src2[i] = ((src2[i] - 128) >> 1) + 128;
488  }
489  src += s->uvlinesize;
490  src2 += s->uvlinesize;
491  }
492  }
493  /* if we deal with intensity compensation we need to scale source blocks */
494  if (use_ic) {
495  int i, j;
496  uint8_t *src, *src2;
497 
498  src = srcY;
499  for (j = 0; j < 17 + s->mspel * 2; j++) {
500  int f = v->field_mode ? v->ref_field_type[dir] : ((j + src_y - s->mspel) & 1) ;
501  for (i = 0; i < 17 + s->mspel * 2; i++)
502  src[i] = luty[f][src[i]];
503  src += s->linesize;
504  }
505  src = srcU;
506  src2 = srcV;
507  for (j = 0; j < 9; j++) {
508  int f = v->field_mode ? v->ref_field_type[dir] : ((j + uvsrc_y) & 1);
509  for (i = 0; i < 9; i++) {
510  src[i] = lutuv[f][src[i]];
511  src2[i] = lutuv[f][src2[i]];
512  }
513  src += s->uvlinesize;
514  src2 += s->uvlinesize;
515  }
516  }
517  srcY += s->mspel * (1 + s->linesize);
518  }
519 
520  if (s->mspel) {
521  dxy = ((my & 3) << 2) | (mx & 3);
522  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
523  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
524  srcY += s->linesize * 8;
525  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
526  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
527  } else { // hpel mc - always used for luma
528  dxy = (my & 2) | ((mx & 2) >> 1);
529  if (!v->rnd)
530  s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
531  else
532  s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
533  }
534 
535  if (s->flags & CODEC_FLAG_GRAY) return;
536  /* Chroma MC always uses qpel bilinear */
537  uvmx = (uvmx & 3) << 1;
538  uvmy = (uvmy & 3) << 1;
539  if (!v->rnd) {
540  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
541  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
542  } else {
543  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
544  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
545  }
546 }
547 
548 static inline int median4(int a, int b, int c, int d)
549 {
550  if (a < b) {
551  if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
552  else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
553  } else {
554  if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
555  else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
556  }
557 }
558 
561 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
562 {
563  MpegEncContext *s = &v->s;
564  uint8_t *srcY;
565  int dxy, mx, my, src_x, src_y;
566  int off;
567  int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
568  int v_edge_pos = s->v_edge_pos >> v->field_mode;
569  uint8_t (*luty)[256];
570  int use_ic;
571 
572  if ((!v->field_mode ||
573  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
574  !v->s.last_picture.f.data[0])
575  return;
576 
577  mx = s->mv[dir][n][0];
578  my = s->mv[dir][n][1];
579 
580  if (!dir) {
581  if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
582  srcY = s->current_picture.f.data[0];
583  luty = v->curr_luty;
584  use_ic = v->curr_use_ic;
585  } else {
586  srcY = s->last_picture.f.data[0];
587  luty = v->last_luty;
588  use_ic = v->last_use_ic;
589  }
590  } else {
591  srcY = s->next_picture.f.data[0];
592  luty = v->next_luty;
593  use_ic = v->next_use_ic;
594  }
595 
596  if (!srcY) {
597  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
598  return;
599  }
600 
601  if (v->field_mode) {
602  if (v->cur_field_type != v->ref_field_type[dir])
603  my = my - 2 + 4 * v->cur_field_type;
604  }
605 
606  if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
607  int same_count = 0, opp_count = 0, k;
608  int chosen_mv[2][4][2], f;
609  int tx, ty;
610  for (k = 0; k < 4; k++) {
611  f = v->mv_f[0][s->block_index[k] + v->blocks_off];
612  chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
613  chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
614  opp_count += f;
615  same_count += 1 - f;
616  }
617  f = opp_count > same_count;
618  switch (f ? opp_count : same_count) {
619  case 4:
620  tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
621  chosen_mv[f][2][0], chosen_mv[f][3][0]);
622  ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
623  chosen_mv[f][2][1], chosen_mv[f][3][1]);
624  break;
625  case 3:
626  tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
627  ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
628  break;
629  case 2:
630  tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
631  ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
632  break;
633  }
634  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
635  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
636  for (k = 0; k < 4; k++)
637  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
638  }
639 
640  if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
641  int qx, qy;
642  int width = s->avctx->coded_width;
643  int height = s->avctx->coded_height >> 1;
644  if (s->pict_type == AV_PICTURE_TYPE_P) {
645  s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
646  s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
647  }
648  qx = (s->mb_x * 16) + (mx >> 2);
649  qy = (s->mb_y * 8) + (my >> 3);
650 
651  if (qx < -17)
652  mx -= 4 * (qx + 17);
653  else if (qx > width)
654  mx -= 4 * (qx - width);
655  if (qy < -18)
656  my -= 8 * (qy + 18);
657  else if (qy > height + 1)
658  my -= 8 * (qy - height - 1);
659  }
660 
661  if ((v->fcm == ILACE_FRAME) && fieldmv)
662  off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
663  else
664  off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
665 
666  src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
667  if (!fieldmv)
668  src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
669  else
670  src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
671 
672  if (v->profile != PROFILE_ADVANCED) {
673  src_x = av_clip(src_x, -16, s->mb_width * 16);
674  src_y = av_clip(src_y, -16, s->mb_height * 16);
675  } else {
676  src_x = av_clip(src_x, -17, s->avctx->coded_width);
677  if (v->fcm == ILACE_FRAME) {
678  if (src_y & 1)
679  src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
680  else
681  src_y = av_clip(src_y, -18, s->avctx->coded_height);
682  } else {
683  src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
684  }
685  }
686 
687  srcY += src_y * s->linesize + src_x;
688  if (v->field_mode && v->ref_field_type[dir])
689  srcY += s->current_picture_ptr->f.linesize[0];
690 
691  if (fieldmv && !(src_y & 1))
692  v_edge_pos--;
693  if (fieldmv && (src_y & 1) && src_y < 4)
694  src_y--;
695  if (v->rangeredfrm || use_ic
696  || s->h_edge_pos < 13 || v_edge_pos < 23
697  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
698  || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
699  srcY -= s->mspel * (1 + (s->linesize << fieldmv));
700  /* check emulate edge stride and offset */
702  s->linesize, s->linesize,
703  9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
704  src_x - s->mspel, src_y - (s->mspel << fieldmv),
705  s->h_edge_pos, v_edge_pos);
706  srcY = s->edge_emu_buffer;
707  /* if we deal with range reduction we need to scale source blocks */
708  if (v->rangeredfrm) {
709  int i, j;
710  uint8_t *src;
711 
712  src = srcY;
713  for (j = 0; j < 9 + s->mspel * 2; j++) {
714  for (i = 0; i < 9 + s->mspel * 2; i++)
715  src[i] = ((src[i] - 128) >> 1) + 128;
716  src += s->linesize << fieldmv;
717  }
718  }
719  /* if we deal with intensity compensation we need to scale source blocks */
720  if (use_ic) {
721  int i, j;
722  uint8_t *src;
723 
724  src = srcY;
725  for (j = 0; j < 9 + s->mspel * 2; j++) {
726  int f = v->field_mode ? v->ref_field_type[dir] : (((j<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1);
727  for (i = 0; i < 9 + s->mspel * 2; i++)
728  src[i] = luty[f][src[i]];
729  src += s->linesize << fieldmv;
730  }
731  }
732  srcY += s->mspel * (1 + (s->linesize << fieldmv));
733  }
734 
735  if (s->mspel) {
736  dxy = ((my & 3) << 2) | (mx & 3);
737  if (avg)
738  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
739  else
740  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
741  } else { // hpel mc - always used for luma
742  dxy = (my & 2) | ((mx & 2) >> 1);
743  if (!v->rnd)
744  s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
745  else
746  s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
747  }
748 }
749 
750 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
751 {
752  int idx, i;
753  static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
754 
755  idx = ((a[3] != flag) << 3)
756  | ((a[2] != flag) << 2)
757  | ((a[1] != flag) << 1)
758  | (a[0] != flag);
759  if (!idx) {
760  *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
761  *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
762  return 4;
763  } else if (count[idx] == 1) {
764  switch (idx) {
765  case 0x1:
766  *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
767  *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
768  return 3;
769  case 0x2:
770  *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
771  *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
772  return 3;
773  case 0x4:
774  *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
775  *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
776  return 3;
777  case 0x8:
778  *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
779  *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
780  return 3;
781  }
782  } else if (count[idx] == 2) {
783  int t1 = 0, t2 = 0;
784  for (i = 0; i < 3; i++)
785  if (!a[i]) {
786  t1 = i;
787  break;
788  }
789  for (i = t1 + 1; i < 4; i++)
790  if (!a[i]) {
791  t2 = i;
792  break;
793  }
794  *tx = (mvx[t1] + mvx[t2]) / 2;
795  *ty = (mvy[t1] + mvy[t2]) / 2;
796  return 2;
797  } else {
798  return 0;
799  }
800  return -1;
801 }
802 
805 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
806 {
807  MpegEncContext *s = &v->s;
808  H264ChromaContext *h264chroma = &v->h264chroma;
809  uint8_t *srcU, *srcV;
810  int uvmx, uvmy, uvsrc_x, uvsrc_y;
811  int k, tx = 0, ty = 0;
812  int mvx[4], mvy[4], intra[4], mv_f[4];
813  int valid_count;
814  int chroma_ref_type = v->cur_field_type;
815  int v_edge_pos = s->v_edge_pos >> v->field_mode;
816  uint8_t (*lutuv)[256];
817  int use_ic;
818 
819  if (!v->field_mode && !v->s.last_picture.f.data[0])
820  return;
821  if (s->flags & CODEC_FLAG_GRAY)
822  return;
823 
824  for (k = 0; k < 4; k++) {
825  mvx[k] = s->mv[dir][k][0];
826  mvy[k] = s->mv[dir][k][1];
827  intra[k] = v->mb_type[0][s->block_index[k]];
828  if (v->field_mode)
829  mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
830  }
831 
832  /* calculate chroma MV vector from four luma MVs */
833  if (!v->field_mode || (v->field_mode && !v->numref)) {
834  valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
835  chroma_ref_type = v->reffield;
836  if (!valid_count) {
837  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
838  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
839  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
840  return; //no need to do MC for intra blocks
841  }
842  } else {
843  int dominant = 0;
844  if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
845  dominant = 1;
846  valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
847  if (dominant)
848  chroma_ref_type = !v->cur_field_type;
849  }
850  if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
851  return;
852  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
853  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
854  uvmx = (tx + ((tx & 3) == 3)) >> 1;
855  uvmy = (ty + ((ty & 3) == 3)) >> 1;
856 
857  v->luma_mv[s->mb_x][0] = uvmx;
858  v->luma_mv[s->mb_x][1] = uvmy;
859 
860  if (v->fastuvmc) {
861  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
862  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
863  }
864  // Field conversion bias
865  if (v->cur_field_type != chroma_ref_type)
866  uvmy += 2 - 4 * chroma_ref_type;
867 
868  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
869  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
870 
871  if (v->profile != PROFILE_ADVANCED) {
872  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
873  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
874  } else {
875  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
876  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
877  }
878 
879  if (!dir) {
880  if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
881  srcU = s->current_picture.f.data[1];
882  srcV = s->current_picture.f.data[2];
883  lutuv = v->curr_lutuv;
884  use_ic = v->curr_use_ic;
885  } else {
886  srcU = s->last_picture.f.data[1];
887  srcV = s->last_picture.f.data[2];
888  lutuv = v->last_lutuv;
889  use_ic = v->last_use_ic;
890  }
891  } else {
892  srcU = s->next_picture.f.data[1];
893  srcV = s->next_picture.f.data[2];
894  lutuv = v->next_lutuv;
895  use_ic = v->next_use_ic;
896  }
897 
898  if (!srcU) {
899  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
900  return;
901  }
902 
903  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
904  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
905 
906  if (v->field_mode) {
907  if (chroma_ref_type) {
908  srcU += s->current_picture_ptr->f.linesize[1];
909  srcV += s->current_picture_ptr->f.linesize[2];
910  }
911  }
912 
913  if (v->rangeredfrm || use_ic
914  || s->h_edge_pos < 18 || v_edge_pos < 18
915  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
916  || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
918  s->uvlinesize, s->uvlinesize,
919  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
920  s->h_edge_pos >> 1, v_edge_pos >> 1);
921  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
922  s->uvlinesize, s->uvlinesize,
923  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
924  s->h_edge_pos >> 1, v_edge_pos >> 1);
925  srcU = s->edge_emu_buffer;
926  srcV = s->edge_emu_buffer + 16;
927 
928  /* if we deal with range reduction we need to scale source blocks */
929  if (v->rangeredfrm) {
930  int i, j;
931  uint8_t *src, *src2;
932 
933  src = srcU;
934  src2 = srcV;
935  for (j = 0; j < 9; j++) {
936  for (i = 0; i < 9; i++) {
937  src[i] = ((src[i] - 128) >> 1) + 128;
938  src2[i] = ((src2[i] - 128) >> 1) + 128;
939  }
940  src += s->uvlinesize;
941  src2 += s->uvlinesize;
942  }
943  }
944  /* if we deal with intensity compensation we need to scale source blocks */
945  if (use_ic) {
946  int i, j;
947  uint8_t *src, *src2;
948 
949  src = srcU;
950  src2 = srcV;
951  for (j = 0; j < 9; j++) {
952  int f = v->field_mode ? chroma_ref_type : ((j + uvsrc_y) & 1);
953  for (i = 0; i < 9; i++) {
954  src[i] = lutuv[f][src[i]];
955  src2[i] = lutuv[f][src2[i]];
956  }
957  src += s->uvlinesize;
958  src2 += s->uvlinesize;
959  }
960  }
961  }
962 
963  /* Chroma MC always uses qpel bilinear */
964  uvmx = (uvmx & 3) << 1;
965  uvmy = (uvmy & 3) << 1;
966  if (!v->rnd) {
967  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
968  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
969  } else {
970  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
971  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
972  }
973 }
974 
977 static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
978 {
979  MpegEncContext *s = &v->s;
980  H264ChromaContext *h264chroma = &v->h264chroma;
981  uint8_t *srcU, *srcV;
982  int uvsrc_x, uvsrc_y;
983  int uvmx_field[4], uvmy_field[4];
984  int i, off, tx, ty;
985  int fieldmv = v->blk_mv_type[s->block_index[0]];
986  static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
987  int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
988  int v_edge_pos = s->v_edge_pos >> 1;
989  int use_ic;
990  uint8_t (*lutuv)[256];
991 
992  if (s->flags & CODEC_FLAG_GRAY)
993  return;
994 
995  for (i = 0; i < 4; i++) {
996  int d = i < 2 ? dir: dir2;
997  tx = s->mv[d][i][0];
998  uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
999  ty = s->mv[d][i][1];
1000  if (fieldmv)
1001  uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1002  else
1003  uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1004  }
1005 
1006  for (i = 0; i < 4; i++) {
1007  off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1008  uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1009  uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1010  // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1011  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1012  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1013  if (i < 2 ? dir : dir2) {
1014  srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1015  srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1016  lutuv = v->next_lutuv;
1017  use_ic = v->next_use_ic;
1018  } else {
1019  srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1020  srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1021  lutuv = v->last_lutuv;
1022  use_ic = v->last_use_ic;
1023  }
1024  uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1025  uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1026 
1027  if (fieldmv && !(uvsrc_y & 1))
1028  v_edge_pos--;
1029  if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1030  uvsrc_y--;
1031  if (use_ic
1032  || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1033  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1034  || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1036  s->uvlinesize, s->uvlinesize,
1037  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1038  s->h_edge_pos >> 1, v_edge_pos);
1039  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
1040  s->uvlinesize, s->uvlinesize,
1041  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1042  s->h_edge_pos >> 1, v_edge_pos);
1043  srcU = s->edge_emu_buffer;
1044  srcV = s->edge_emu_buffer + 16;
1045 
1046  /* if we deal with intensity compensation we need to scale source blocks */
1047  if (use_ic) {
1048  int i, j;
1049  uint8_t *src, *src2;
1050 
1051  src = srcU;
1052  src2 = srcV;
1053  for (j = 0; j < 5; j++) {
1054  int f = (uvsrc_y + (j << fieldmv)) & 1;
1055  for (i = 0; i < 5; i++) {
1056  src[i] = lutuv[f][src[i]];
1057  src2[i] = lutuv[f][src2[i]];
1058  }
1059  src += s->uvlinesize << fieldmv;
1060  src2 += s->uvlinesize << fieldmv;
1061  }
1062  }
1063  }
1064  if (avg) {
1065  if (!v->rnd) {
1066  h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1067  h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1068  } else {
1069  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1070  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1071  }
1072  } else {
1073  if (!v->rnd) {
1074  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1075  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1076  } else {
1077  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1078  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1079  }
1080  }
1081  }
1082 }
1083 
1084 /***********************************************************************/
1095 #define GET_MQUANT() \
1096  if (v->dquantfrm) { \
1097  int edges = 0; \
1098  if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1099  if (v->dqbilevel) { \
1100  mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1101  } else { \
1102  mqdiff = get_bits(gb, 3); \
1103  if (mqdiff != 7) \
1104  mquant = v->pq + mqdiff; \
1105  else \
1106  mquant = get_bits(gb, 5); \
1107  } \
1108  } \
1109  if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1110  edges = 1 << v->dqsbedge; \
1111  else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1112  edges = (3 << v->dqsbedge) % 15; \
1113  else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1114  edges = 15; \
1115  if ((edges&1) && !s->mb_x) \
1116  mquant = v->altpq; \
1117  if ((edges&2) && s->first_slice_line) \
1118  mquant = v->altpq; \
1119  if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1120  mquant = v->altpq; \
1121  if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1122  mquant = v->altpq; \
1123  if (!mquant || mquant > 31) { \
1124  av_log(v->s.avctx, AV_LOG_ERROR, \
1125  "Overriding invalid mquant %d\n", mquant); \
1126  mquant = 1; \
1127  } \
1128  }
1129 
1137 #define GET_MVDATA(_dmv_x, _dmv_y) \
1138  index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1139  VC1_MV_DIFF_VLC_BITS, 2); \
1140  if (index > 36) { \
1141  mb_has_coeffs = 1; \
1142  index -= 37; \
1143  } else \
1144  mb_has_coeffs = 0; \
1145  s->mb_intra = 0; \
1146  if (!index) { \
1147  _dmv_x = _dmv_y = 0; \
1148  } else if (index == 35) { \
1149  _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1150  _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1151  } else if (index == 36) { \
1152  _dmv_x = 0; \
1153  _dmv_y = 0; \
1154  s->mb_intra = 1; \
1155  } else { \
1156  index1 = index % 6; \
1157  if (!s->quarter_sample && index1 == 5) val = 1; \
1158  else val = 0; \
1159  if (size_table[index1] - val > 0) \
1160  val = get_bits(gb, size_table[index1] - val); \
1161  else val = 0; \
1162  sign = 0 - (val&1); \
1163  _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1164  \
1165  index1 = index / 6; \
1166  if (!s->quarter_sample && index1 == 5) val = 1; \
1167  else val = 0; \
1168  if (size_table[index1] - val > 0) \
1169  val = get_bits(gb, size_table[index1] - val); \
1170  else val = 0; \
1171  sign = 0 - (val & 1); \
1172  _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1173  }
1174 
1176  int *dmv_y, int *pred_flag)
1177 {
1178  int index, index1;
1179  int extend_x = 0, extend_y = 0;
1180  GetBitContext *gb = &v->s.gb;
1181  int bits, esc;
1182  int val, sign;
1183  const int* offs_tab;
1184 
1185  if (v->numref) {
1186  bits = VC1_2REF_MVDATA_VLC_BITS;
1187  esc = 125;
1188  } else {
1189  bits = VC1_1REF_MVDATA_VLC_BITS;
1190  esc = 71;
1191  }
1192  switch (v->dmvrange) {
1193  case 1:
1194  extend_x = 1;
1195  break;
1196  case 2:
1197  extend_y = 1;
1198  break;
1199  case 3:
1200  extend_x = extend_y = 1;
1201  break;
1202  }
1203  index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1204  if (index == esc) {
1205  *dmv_x = get_bits(gb, v->k_x);
1206  *dmv_y = get_bits(gb, v->k_y);
1207  if (v->numref) {
1208  if (pred_flag) {
1209  *pred_flag = *dmv_y & 1;
1210  *dmv_y = (*dmv_y + *pred_flag) >> 1;
1211  } else {
1212  *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1213  }
1214  }
1215  }
1216  else {
1217  if (extend_x)
1218  offs_tab = offset_table2;
1219  else
1220  offs_tab = offset_table1;
1221  index1 = (index + 1) % 9;
1222  if (index1 != 0) {
1223  val = get_bits(gb, index1 + extend_x);
1224  sign = 0 -(val & 1);
1225  *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1226  } else
1227  *dmv_x = 0;
1228  if (extend_y)
1229  offs_tab = offset_table2;
1230  else
1231  offs_tab = offset_table1;
1232  index1 = (index + 1) / 9;
1233  if (index1 > v->numref) {
1234  val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1235  sign = 0 - (val & 1);
1236  *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1237  } else
1238  *dmv_y = 0;
1239  if (v->numref && pred_flag)
1240  *pred_flag = index1 & 1;
1241  }
1242 }
1243 
1244 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1245 {
1246  int scaledvalue, refdist;
1247  int scalesame1, scalesame2;
1248  int scalezone1_x, zone1offset_x;
1249  int table_index = dir ^ v->second_field;
1250 
1251  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1252  refdist = v->refdist;
1253  else
1254  refdist = dir ? v->brfd : v->frfd;
1255  if (refdist > 3)
1256  refdist = 3;
1257  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1258  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1259  scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1260  zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1261 
1262  if (FFABS(n) > 255)
1263  scaledvalue = n;
1264  else {
1265  if (FFABS(n) < scalezone1_x)
1266  scaledvalue = (n * scalesame1) >> 8;
1267  else {
1268  if (n < 0)
1269  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1270  else
1271  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1272  }
1273  }
1274  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1275 }
1276 
1277 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1278 {
1279  int scaledvalue, refdist;
1280  int scalesame1, scalesame2;
1281  int scalezone1_y, zone1offset_y;
1282  int table_index = dir ^ v->second_field;
1283 
1284  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1285  refdist = v->refdist;
1286  else
1287  refdist = dir ? v->brfd : v->frfd;
1288  if (refdist > 3)
1289  refdist = 3;
1290  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1291  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1292  scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1293  zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1294 
1295  if (FFABS(n) > 63)
1296  scaledvalue = n;
1297  else {
1298  if (FFABS(n) < scalezone1_y)
1299  scaledvalue = (n * scalesame1) >> 8;
1300  else {
1301  if (n < 0)
1302  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1303  else
1304  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1305  }
1306  }
1307 
1308  if (v->cur_field_type && !v->ref_field_type[dir])
1309  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1310  else
1311  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1312 }
1313 
1314 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1315 {
1316  int scalezone1_x, zone1offset_x;
1317  int scaleopp1, scaleopp2, brfd;
1318  int scaledvalue;
1319 
1320  brfd = FFMIN(v->brfd, 3);
1321  scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1322  zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1323  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1324  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1325 
1326  if (FFABS(n) > 255)
1327  scaledvalue = n;
1328  else {
1329  if (FFABS(n) < scalezone1_x)
1330  scaledvalue = (n * scaleopp1) >> 8;
1331  else {
1332  if (n < 0)
1333  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1334  else
1335  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1336  }
1337  }
1338  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1339 }
1340 
1341 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1342 {
1343  int scalezone1_y, zone1offset_y;
1344  int scaleopp1, scaleopp2, brfd;
1345  int scaledvalue;
1346 
1347  brfd = FFMIN(v->brfd, 3);
1348  scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1349  zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1350  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1351  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1352 
1353  if (FFABS(n) > 63)
1354  scaledvalue = n;
1355  else {
1356  if (FFABS(n) < scalezone1_y)
1357  scaledvalue = (n * scaleopp1) >> 8;
1358  else {
1359  if (n < 0)
1360  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1361  else
1362  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1363  }
1364  }
1365  if (v->cur_field_type && !v->ref_field_type[dir]) {
1366  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1367  } else {
1368  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1369  }
1370 }
1371 
1372 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1373  int dim, int dir)
1374 {
1375  int brfd, scalesame;
1376  int hpel = 1 - v->s.quarter_sample;
1377 
1378  n >>= hpel;
1379  if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1380  if (dim)
1381  n = scaleforsame_y(v, i, n, dir) << hpel;
1382  else
1383  n = scaleforsame_x(v, n, dir) << hpel;
1384  return n;
1385  }
1386  brfd = FFMIN(v->brfd, 3);
1387  scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1388 
1389  n = (n * scalesame >> 8) << hpel;
1390  return n;
1391 }
1392 
1393 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1394  int dim, int dir)
1395 {
1396  int refdist, scaleopp;
1397  int hpel = 1 - v->s.quarter_sample;
1398 
1399  n >>= hpel;
1400  if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1401  if (dim)
1402  n = scaleforopp_y(v, n, dir) << hpel;
1403  else
1404  n = scaleforopp_x(v, n) << hpel;
1405  return n;
1406  }
1407  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1408  refdist = FFMIN(v->refdist, 3);
1409  else
1410  refdist = dir ? v->brfd : v->frfd;
1411  scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1412 
1413  n = (n * scaleopp >> 8) << hpel;
1414  return n;
1415 }
1416 
1419 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1420  int mv1, int r_x, int r_y, uint8_t* is_intra,
1421  int pred_flag, int dir)
1422 {
1423  MpegEncContext *s = &v->s;
1424  int xy, wrap, off = 0;
1425  int16_t *A, *B, *C;
1426  int px, py;
1427  int sum;
1428  int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1429  int opposite, a_f, b_f, c_f;
1430  int16_t field_predA[2];
1431  int16_t field_predB[2];
1432  int16_t field_predC[2];
1433  int a_valid, b_valid, c_valid;
1434  int hybridmv_thresh, y_bias = 0;
1435 
1436  if (v->mv_mode == MV_PMODE_MIXED_MV ||
1438  mixedmv_pic = 1;
1439  else
1440  mixedmv_pic = 0;
1441  /* scale MV difference to be quad-pel */
1442  dmv_x <<= 1 - s->quarter_sample;
1443  dmv_y <<= 1 - s->quarter_sample;
1444 
1445  wrap = s->b8_stride;
1446  xy = s->block_index[n];
1447 
1448  if (s->mb_intra) {
1449  s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
1450  s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
1451  s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
1452  s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
1453  if (mv1) { /* duplicate motion data for 1-MV block */
1454  s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1455  s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1456  s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1457  s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1458  s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1459  s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1460  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1461  s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1462  s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1463  s->current_picture.motion_val[1][xy + wrap][0] = 0;
1464  s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1465  s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1466  s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1467  }
1468  return;
1469  }
1470 
1471  C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
1472  A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
1473  if (mv1) {
1474  if (v->field_mode && mixedmv_pic)
1475  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1476  else
1477  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1478  } else {
1479  //in 4-MV mode different blocks have different B predictor position
1480  switch (n) {
1481  case 0:
1482  off = (s->mb_x > 0) ? -1 : 1;
1483  break;
1484  case 1:
1485  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1486  break;
1487  case 2:
1488  off = 1;
1489  break;
1490  case 3:
1491  off = -1;
1492  }
1493  }
1494  B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
1495 
1496  a_valid = !s->first_slice_line || (n == 2 || n == 3);
1497  b_valid = a_valid && (s->mb_width > 1);
1498  c_valid = s->mb_x || (n == 1 || n == 3);
1499  if (v->field_mode) {
1500  a_valid = a_valid && !is_intra[xy - wrap];
1501  b_valid = b_valid && !is_intra[xy - wrap + off];
1502  c_valid = c_valid && !is_intra[xy - 1];
1503  }
1504 
1505  if (a_valid) {
1506  a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1507  num_oppfield += a_f;
1508  num_samefield += 1 - a_f;
1509  field_predA[0] = A[0];
1510  field_predA[1] = A[1];
1511  } else {
1512  field_predA[0] = field_predA[1] = 0;
1513  a_f = 0;
1514  }
1515  if (b_valid) {
1516  b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1517  num_oppfield += b_f;
1518  num_samefield += 1 - b_f;
1519  field_predB[0] = B[0];
1520  field_predB[1] = B[1];
1521  } else {
1522  field_predB[0] = field_predB[1] = 0;
1523  b_f = 0;
1524  }
1525  if (c_valid) {
1526  c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1527  num_oppfield += c_f;
1528  num_samefield += 1 - c_f;
1529  field_predC[0] = C[0];
1530  field_predC[1] = C[1];
1531  } else {
1532  field_predC[0] = field_predC[1] = 0;
1533  c_f = 0;
1534  }
1535 
1536  if (v->field_mode) {
1537  if (!v->numref)
1538  // REFFIELD determines if the last field or the second-last field is
1539  // to be used as reference
1540  opposite = 1 - v->reffield;
1541  else {
1542  if (num_samefield <= num_oppfield)
1543  opposite = 1 - pred_flag;
1544  else
1545  opposite = pred_flag;
1546  }
1547  } else
1548  opposite = 0;
1549  if (opposite) {
1550  if (a_valid && !a_f) {
1551  field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1552  field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1553  }
1554  if (b_valid && !b_f) {
1555  field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1556  field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1557  }
1558  if (c_valid && !c_f) {
1559  field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1560  field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1561  }
1562  v->mv_f[dir][xy + v->blocks_off] = 1;
1563  v->ref_field_type[dir] = !v->cur_field_type;
1564  } else {
1565  if (a_valid && a_f) {
1566  field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1567  field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1568  }
1569  if (b_valid && b_f) {
1570  field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1571  field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1572  }
1573  if (c_valid && c_f) {
1574  field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1575  field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1576  }
1577  v->mv_f[dir][xy + v->blocks_off] = 0;
1578  v->ref_field_type[dir] = v->cur_field_type;
1579  }
1580 
1581  if (a_valid) {
1582  px = field_predA[0];
1583  py = field_predA[1];
1584  } else if (c_valid) {
1585  px = field_predC[0];
1586  py = field_predC[1];
1587  } else if (b_valid) {
1588  px = field_predB[0];
1589  py = field_predB[1];
1590  } else {
1591  px = 0;
1592  py = 0;
1593  }
1594 
1595  if (num_samefield + num_oppfield > 1) {
1596  px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1597  py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1598  }
1599 
1600  /* Pullback MV as specified in 8.3.5.3.4 */
1601  if (!v->field_mode) {
1602  int qx, qy, X, Y;
1603  qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1604  qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1605  X = (s->mb_width << 6) - 4;
1606  Y = (s->mb_height << 6) - 4;
1607  if (mv1) {
1608  if (qx + px < -60) px = -60 - qx;
1609  if (qy + py < -60) py = -60 - qy;
1610  } else {
1611  if (qx + px < -28) px = -28 - qx;
1612  if (qy + py < -28) py = -28 - qy;
1613  }
1614  if (qx + px > X) px = X - qx;
1615  if (qy + py > Y) py = Y - qy;
1616  }
1617 
1618  if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1619  /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1620  hybridmv_thresh = 32;
1621  if (a_valid && c_valid) {
1622  if (is_intra[xy - wrap])
1623  sum = FFABS(px) + FFABS(py);
1624  else
1625  sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1626  if (sum > hybridmv_thresh) {
1627  if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1628  px = field_predA[0];
1629  py = field_predA[1];
1630  } else {
1631  px = field_predC[0];
1632  py = field_predC[1];
1633  }
1634  } else {
1635  if (is_intra[xy - 1])
1636  sum = FFABS(px) + FFABS(py);
1637  else
1638  sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1639  if (sum > hybridmv_thresh) {
1640  if (get_bits1(&s->gb)) {
1641  px = field_predA[0];
1642  py = field_predA[1];
1643  } else {
1644  px = field_predC[0];
1645  py = field_predC[1];
1646  }
1647  }
1648  }
1649  }
1650  }
1651 
1652  if (v->field_mode && v->numref)
1653  r_y >>= 1;
1654  if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1655  y_bias = 1;
1656  /* store MV using signed modulus of MV range defined in 4.11 */
1657  s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1658  s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1659  if (mv1) { /* duplicate motion data for 1-MV block */
1660  s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1661  s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1662  s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1663  s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1664  s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1665  s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1666  v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1667  v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1668  }
1669 }
1670 
1673 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1674  int mvn, int r_x, int r_y, uint8_t* is_intra, int dir)
1675 {
1676  MpegEncContext *s = &v->s;
1677  int xy, wrap, off = 0;
1678  int A[2], B[2], C[2];
1679  int px, py;
1680  int a_valid = 0, b_valid = 0, c_valid = 0;
1681  int field_a, field_b, field_c; // 0: same, 1: opposit
1682  int total_valid, num_samefield, num_oppfield;
1683  int pos_c, pos_b, n_adj;
1684 
1685  wrap = s->b8_stride;
1686  xy = s->block_index[n];
1687 
1688  if (s->mb_intra) {
1689  s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1690  s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1691  s->current_picture.motion_val[1][xy][0] = 0;
1692  s->current_picture.motion_val[1][xy][1] = 0;
1693  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1694  s->current_picture.motion_val[0][xy + 1][0] = 0;
1695  s->current_picture.motion_val[0][xy + 1][1] = 0;
1696  s->current_picture.motion_val[0][xy + wrap][0] = 0;
1697  s->current_picture.motion_val[0][xy + wrap][1] = 0;
1698  s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1699  s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1700  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1701  s->current_picture.motion_val[1][xy + 1][0] = 0;
1702  s->current_picture.motion_val[1][xy + 1][1] = 0;
1703  s->current_picture.motion_val[1][xy + wrap][0] = 0;
1704  s->current_picture.motion_val[1][xy + wrap][1] = 0;
1705  s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
1706  s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
1707  }
1708  return;
1709  }
1710 
1711  off = ((n == 0) || (n == 1)) ? 1 : -1;
1712  /* predict A */
1713  if (s->mb_x || (n == 1) || (n == 3)) {
1714  if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1715  || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1716  A[0] = s->current_picture.motion_val[dir][xy - 1][0];
1717  A[1] = s->current_picture.motion_val[dir][xy - 1][1];
1718  a_valid = 1;
1719  } else { // current block has frame mv and cand. has field MV (so average)
1720  A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
1721  + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
1722  A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
1723  + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
1724  a_valid = 1;
1725  }
1726  if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1727  a_valid = 0;
1728  A[0] = A[1] = 0;
1729  }
1730  } else
1731  A[0] = A[1] = 0;
1732  /* Predict B and C */
1733  B[0] = B[1] = C[0] = C[1] = 0;
1734  if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1735  if (!s->first_slice_line) {
1736  if (!v->is_intra[s->mb_x - s->mb_stride]) {
1737  b_valid = 1;
1738  n_adj = n | 2;
1739  pos_b = s->block_index[n_adj] - 2 * wrap;
1740  if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1741  n_adj = (n & 2) | (n & 1);
1742  }
1743  B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
1744  B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
1745  if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1746  B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1747  B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1748  }
1749  }
1750  if (s->mb_width > 1) {
1751  if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1752  c_valid = 1;
1753  n_adj = 2;
1754  pos_c = s->block_index[2] - 2 * wrap + 2;
1755  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1756  n_adj = n & 2;
1757  }
1758  C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
1759  C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
1760  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1761  C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1762  C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1763  }
1764  if (s->mb_x == s->mb_width - 1) {
1765  if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1766  c_valid = 1;
1767  n_adj = 3;
1768  pos_c = s->block_index[3] - 2 * wrap - 2;
1769  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1770  n_adj = n | 1;
1771  }
1772  C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
1773  C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
1774  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1775  C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1776  C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1777  }
1778  } else
1779  c_valid = 0;
1780  }
1781  }
1782  }
1783  }
1784  } else {
1785  pos_b = s->block_index[1];
1786  b_valid = 1;
1787  B[0] = s->current_picture.motion_val[dir][pos_b][0];
1788  B[1] = s->current_picture.motion_val[dir][pos_b][1];
1789  pos_c = s->block_index[0];
1790  c_valid = 1;
1791  C[0] = s->current_picture.motion_val[dir][pos_c][0];
1792  C[1] = s->current_picture.motion_val[dir][pos_c][1];
1793  }
1794 
1795  total_valid = a_valid + b_valid + c_valid;
1796  // check if predictor A is out of bounds
1797  if (!s->mb_x && !(n == 1 || n == 3)) {
1798  A[0] = A[1] = 0;
1799  }
1800  // check if predictor B is out of bounds
1801  if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1802  B[0] = B[1] = C[0] = C[1] = 0;
1803  }
1804  if (!v->blk_mv_type[xy]) {
1805  if (s->mb_width == 1) {
1806  px = B[0];
1807  py = B[1];
1808  } else {
1809  if (total_valid >= 2) {
1810  px = mid_pred(A[0], B[0], C[0]);
1811  py = mid_pred(A[1], B[1], C[1]);
1812  } else if (total_valid) {
1813  if (a_valid) { px = A[0]; py = A[1]; }
1814  if (b_valid) { px = B[0]; py = B[1]; }
1815  if (c_valid) { px = C[0]; py = C[1]; }
1816  } else
1817  px = py = 0;
1818  }
1819  } else {
1820  if (a_valid)
1821  field_a = (A[1] & 4) ? 1 : 0;
1822  else
1823  field_a = 0;
1824  if (b_valid)
1825  field_b = (B[1] & 4) ? 1 : 0;
1826  else
1827  field_b = 0;
1828  if (c_valid)
1829  field_c = (C[1] & 4) ? 1 : 0;
1830  else
1831  field_c = 0;
1832 
1833  num_oppfield = field_a + field_b + field_c;
1834  num_samefield = total_valid - num_oppfield;
1835  if (total_valid == 3) {
1836  if ((num_samefield == 3) || (num_oppfield == 3)) {
1837  px = mid_pred(A[0], B[0], C[0]);
1838  py = mid_pred(A[1], B[1], C[1]);
1839  } else if (num_samefield >= num_oppfield) {
1840  /* take one MV from same field set depending on priority
1841  the check for B may not be necessary */
1842  px = !field_a ? A[0] : B[0];
1843  py = !field_a ? A[1] : B[1];
1844  } else {
1845  px = field_a ? A[0] : B[0];
1846  py = field_a ? A[1] : B[1];
1847  }
1848  } else if (total_valid == 2) {
1849  if (num_samefield >= num_oppfield) {
1850  if (!field_a && a_valid) {
1851  px = A[0];
1852  py = A[1];
1853  } else if (!field_b && b_valid) {
1854  px = B[0];
1855  py = B[1];
1856  } else if (c_valid) {
1857  px = C[0];
1858  py = C[1];
1859  } else px = py = 0;
1860  } else {
1861  if (field_a && a_valid) {
1862  px = A[0];
1863  py = A[1];
1864  } else if (field_b && b_valid) {
1865  px = B[0];
1866  py = B[1];
1867  } else if (c_valid) {
1868  px = C[0];
1869  py = C[1];
1870  }
1871  }
1872  } else if (total_valid == 1) {
1873  px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1874  py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1875  } else
1876  px = py = 0;
1877  }
1878 
1879  /* store MV using signed modulus of MV range defined in 4.11 */
1880  s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1881  s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1882  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1883  s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
1884  s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
1885  s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
1886  s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
1887  s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
1888  s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
1889  } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1890  s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
1891  s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
1892  s->mv[dir][n + 1][0] = s->mv[dir][n][0];
1893  s->mv[dir][n + 1][1] = s->mv[dir][n][1];
1894  }
1895 }
1896 
1899 static void vc1_interp_mc(VC1Context *v)
1900 {
1901  MpegEncContext *s = &v->s;
1902  H264ChromaContext *h264chroma = &v->h264chroma;
1903  uint8_t *srcY, *srcU, *srcV;
1904  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1905  int off, off_uv;
1906  int v_edge_pos = s->v_edge_pos >> v->field_mode;
1907  int use_ic = v->next_use_ic;
1908 
1909  if (!v->field_mode && !v->s.next_picture.f.data[0])
1910  return;
1911 
1912  mx = s->mv[1][0][0];
1913  my = s->mv[1][0][1];
1914  uvmx = (mx + ((mx & 3) == 3)) >> 1;
1915  uvmy = (my + ((my & 3) == 3)) >> 1;
1916  if (v->field_mode) {
1917  if (v->cur_field_type != v->ref_field_type[1])
1918  my = my - 2 + 4 * v->cur_field_type;
1919  uvmy = uvmy - 2 + 4 * v->cur_field_type;
1920  }
1921  if (v->fastuvmc) {
1922  uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1923  uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1924  }
1925  srcY = s->next_picture.f.data[0];
1926  srcU = s->next_picture.f.data[1];
1927  srcV = s->next_picture.f.data[2];
1928 
1929  src_x = s->mb_x * 16 + (mx >> 2);
1930  src_y = s->mb_y * 16 + (my >> 2);
1931  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1932  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1933 
1934  if (v->profile != PROFILE_ADVANCED) {
1935  src_x = av_clip( src_x, -16, s->mb_width * 16);
1936  src_y = av_clip( src_y, -16, s->mb_height * 16);
1937  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1938  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1939  } else {
1940  src_x = av_clip( src_x, -17, s->avctx->coded_width);
1941  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1942  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1943  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1944  }
1945 
1946  srcY += src_y * s->linesize + src_x;
1947  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1948  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1949 
1950  if (v->field_mode && v->ref_field_type[1]) {
1951  srcY += s->current_picture_ptr->f.linesize[0];
1952  srcU += s->current_picture_ptr->f.linesize[1];
1953  srcV += s->current_picture_ptr->f.linesize[2];
1954  }
1955 
1956  /* for grayscale we should not try to read from unknown area */
1957  if (s->flags & CODEC_FLAG_GRAY) {
1958  srcU = s->edge_emu_buffer + 18 * s->linesize;
1959  srcV = s->edge_emu_buffer + 18 * s->linesize;
1960  }
1961 
1962  if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
1963  || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1964  || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1965  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1966 
1967  srcY -= s->mspel * (1 + s->linesize);
1969  s->linesize, s->linesize,
1970  17 + s->mspel * 2, 17 + s->mspel * 2,
1971  src_x - s->mspel, src_y - s->mspel,
1972  s->h_edge_pos, v_edge_pos);
1973  srcY = s->edge_emu_buffer;
1974  s->vdsp.emulated_edge_mc(uvbuf, srcU,
1975  s->uvlinesize, s->uvlinesize,
1976  8 + 1, 8 + 1,
1977  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1978  s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
1979  s->uvlinesize, s->uvlinesize,
1980  8 + 1, 8 + 1,
1981  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1982  srcU = uvbuf;
1983  srcV = uvbuf + 16;
1984  /* if we deal with range reduction we need to scale source blocks */
1985  if (v->rangeredfrm) {
1986  int i, j;
1987  uint8_t *src, *src2;
1988 
1989  src = srcY;
1990  for (j = 0; j < 17 + s->mspel * 2; j++) {
1991  for (i = 0; i < 17 + s->mspel * 2; i++)
1992  src[i] = ((src[i] - 128) >> 1) + 128;
1993  src += s->linesize;
1994  }
1995  src = srcU;
1996  src2 = srcV;
1997  for (j = 0; j < 9; j++) {
1998  for (i = 0; i < 9; i++) {
1999  src[i] = ((src[i] - 128) >> 1) + 128;
2000  src2[i] = ((src2[i] - 128) >> 1) + 128;
2001  }
2002  src += s->uvlinesize;
2003  src2 += s->uvlinesize;
2004  }
2005  }
2006 
2007  if (use_ic) {
2008  uint8_t (*luty )[256] = v->next_luty;
2009  uint8_t (*lutuv)[256] = v->next_lutuv;
2010  int i, j;
2011  uint8_t *src, *src2;
2012 
2013  src = srcY;
2014  for (j = 0; j < 17 + s->mspel * 2; j++) {
2015  int f = v->field_mode ? v->ref_field_type[1] : ((j+src_y - s->mspel) & 1);
2016  for (i = 0; i < 17 + s->mspel * 2; i++)
2017  src[i] = luty[f][src[i]];
2018  src += s->linesize;
2019  }
2020  src = srcU;
2021  src2 = srcV;
2022  for (j = 0; j < 9; j++) {
2023  int f = v->field_mode ? v->ref_field_type[1] : ((j+uvsrc_y) & 1);
2024  for (i = 0; i < 9; i++) {
2025  src[i] = lutuv[f][src[i]];
2026  src2[i] = lutuv[f][src2[i]];
2027  }
2028  src += s->uvlinesize;
2029  src2 += s->uvlinesize;
2030  }
2031  }
2032  srcY += s->mspel * (1 + s->linesize);
2033  }
2034 
2035  off = 0;
2036  off_uv = 0;
2037 
2038  if (s->mspel) {
2039  dxy = ((my & 3) << 2) | (mx & 3);
2040  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2041  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2042  srcY += s->linesize * 8;
2043  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2044  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2045  } else { // hpel mc
2046  dxy = (my & 2) | ((mx & 2) >> 1);
2047 
2048  if (!v->rnd)
2049  s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2050  else
2051  s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
2052  }
2053 
2054  if (s->flags & CODEC_FLAG_GRAY) return;
2055  /* Chroma MC always uses qpel blilinear */
2056  uvmx = (uvmx & 3) << 1;
2057  uvmy = (uvmy & 3) << 1;
2058  if (!v->rnd) {
2059  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2060  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2061  } else {
2062  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2063  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2064  }
2065 }
2066 
2067 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2068 {
2069  int n = bfrac;
2070 
2071 #if B_FRACTION_DEN==256
2072  if (inv)
2073  n -= 256;
2074  if (!qs)
2075  return 2 * ((value * n + 255) >> 9);
2076  return (value * n + 128) >> 8;
2077 #else
2078  if (inv)
2079  n -= B_FRACTION_DEN;
2080  if (!qs)
2081  return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2082  return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2083 #endif
2084 }
2085 
2088 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2089  int direct, int mode)
2090 {
2091  if (direct) {
2092  vc1_mc_1mv(v, 0);
2093  vc1_interp_mc(v);
2094  return;
2095  }
2096  if (mode == BMV_TYPE_INTERPOLATED) {
2097  vc1_mc_1mv(v, 0);
2098  vc1_interp_mc(v);
2099  return;
2100  }
2101 
2102  vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2103 }
2104 
2105 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2106  int direct, int mvtype)
2107 {
2108  MpegEncContext *s = &v->s;
2109  int xy, wrap, off = 0;
2110  int16_t *A, *B, *C;
2111  int px, py;
2112  int sum;
2113  int r_x, r_y;
2114  const uint8_t *is_intra = v->mb_type[0];
2115 
2116  r_x = v->range_x;
2117  r_y = v->range_y;
2118  /* scale MV difference to be quad-pel */
2119  dmv_x[0] <<= 1 - s->quarter_sample;
2120  dmv_y[0] <<= 1 - s->quarter_sample;
2121  dmv_x[1] <<= 1 - s->quarter_sample;
2122  dmv_y[1] <<= 1 - s->quarter_sample;
2123 
2124  wrap = s->b8_stride;
2125  xy = s->block_index[0];
2126 
2127  if (s->mb_intra) {
2128  s->current_picture.motion_val[0][xy + v->blocks_off][0] =
2129  s->current_picture.motion_val[0][xy + v->blocks_off][1] =
2130  s->current_picture.motion_val[1][xy + v->blocks_off][0] =
2131  s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
2132  return;
2133  }
2134  if (!v->field_mode) {
2135  s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2136  s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2137  s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2138  s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2139 
2140  /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2141  s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2142  s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2143  s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2144  s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2145  }
2146  if (direct) {
2147  s->current_picture.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2148  s->current_picture.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2149  s->current_picture.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2150  s->current_picture.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2151  return;
2152  }
2153 
2154  if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2155  C = s->current_picture.motion_val[0][xy - 2];
2156  A = s->current_picture.motion_val[0][xy - wrap * 2];
2157  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2158  B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
2159 
2160  if (!s->mb_x) C[0] = C[1] = 0;
2161  if (!s->first_slice_line) { // predictor A is not out of bounds
2162  if (s->mb_width == 1) {
2163  px = A[0];
2164  py = A[1];
2165  } else {
2166  px = mid_pred(A[0], B[0], C[0]);
2167  py = mid_pred(A[1], B[1], C[1]);
2168  }
2169  } else if (s->mb_x) { // predictor C is not out of bounds
2170  px = C[0];
2171  py = C[1];
2172  } else {
2173  px = py = 0;
2174  }
2175  /* Pullback MV as specified in 8.3.5.3.4 */
2176  {
2177  int qx, qy, X, Y;
2178  if (v->profile < PROFILE_ADVANCED) {
2179  qx = (s->mb_x << 5);
2180  qy = (s->mb_y << 5);
2181  X = (s->mb_width << 5) - 4;
2182  Y = (s->mb_height << 5) - 4;
2183  if (qx + px < -28) px = -28 - qx;
2184  if (qy + py < -28) py = -28 - qy;
2185  if (qx + px > X) px = X - qx;
2186  if (qy + py > Y) py = Y - qy;
2187  } else {
2188  qx = (s->mb_x << 6);
2189  qy = (s->mb_y << 6);
2190  X = (s->mb_width << 6) - 4;
2191  Y = (s->mb_height << 6) - 4;
2192  if (qx + px < -60) px = -60 - qx;
2193  if (qy + py < -60) py = -60 - qy;
2194  if (qx + px > X) px = X - qx;
2195  if (qy + py > Y) py = Y - qy;
2196  }
2197  }
2198  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2199  if (0 && !s->first_slice_line && s->mb_x) {
2200  if (is_intra[xy - wrap])
2201  sum = FFABS(px) + FFABS(py);
2202  else
2203  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2204  if (sum > 32) {
2205  if (get_bits1(&s->gb)) {
2206  px = A[0];
2207  py = A[1];
2208  } else {
2209  px = C[0];
2210  py = C[1];
2211  }
2212  } else {
2213  if (is_intra[xy - 2])
2214  sum = FFABS(px) + FFABS(py);
2215  else
2216  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2217  if (sum > 32) {
2218  if (get_bits1(&s->gb)) {
2219  px = A[0];
2220  py = A[1];
2221  } else {
2222  px = C[0];
2223  py = C[1];
2224  }
2225  }
2226  }
2227  }
2228  /* store MV using signed modulus of MV range defined in 4.11 */
2229  s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2230  s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2231  }
2232  if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2233  C = s->current_picture.motion_val[1][xy - 2];
2234  A = s->current_picture.motion_val[1][xy - wrap * 2];
2235  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2236  B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
2237 
2238  if (!s->mb_x)
2239  C[0] = C[1] = 0;
2240  if (!s->first_slice_line) { // predictor A is not out of bounds
2241  if (s->mb_width == 1) {
2242  px = A[0];
2243  py = A[1];
2244  } else {
2245  px = mid_pred(A[0], B[0], C[0]);
2246  py = mid_pred(A[1], B[1], C[1]);
2247  }
2248  } else if (s->mb_x) { // predictor C is not out of bounds
2249  px = C[0];
2250  py = C[1];
2251  } else {
2252  px = py = 0;
2253  }
2254  /* Pullback MV as specified in 8.3.5.3.4 */
2255  {
2256  int qx, qy, X, Y;
2257  if (v->profile < PROFILE_ADVANCED) {
2258  qx = (s->mb_x << 5);
2259  qy = (s->mb_y << 5);
2260  X = (s->mb_width << 5) - 4;
2261  Y = (s->mb_height << 5) - 4;
2262  if (qx + px < -28) px = -28 - qx;
2263  if (qy + py < -28) py = -28 - qy;
2264  if (qx + px > X) px = X - qx;
2265  if (qy + py > Y) py = Y - qy;
2266  } else {
2267  qx = (s->mb_x << 6);
2268  qy = (s->mb_y << 6);
2269  X = (s->mb_width << 6) - 4;
2270  Y = (s->mb_height << 6) - 4;
2271  if (qx + px < -60) px = -60 - qx;
2272  if (qy + py < -60) py = -60 - qy;
2273  if (qx + px > X) px = X - qx;
2274  if (qy + py > Y) py = Y - qy;
2275  }
2276  }
2277  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2278  if (0 && !s->first_slice_line && s->mb_x) {
2279  if (is_intra[xy - wrap])
2280  sum = FFABS(px) + FFABS(py);
2281  else
2282  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2283  if (sum > 32) {
2284  if (get_bits1(&s->gb)) {
2285  px = A[0];
2286  py = A[1];
2287  } else {
2288  px = C[0];
2289  py = C[1];
2290  }
2291  } else {
2292  if (is_intra[xy - 2])
2293  sum = FFABS(px) + FFABS(py);
2294  else
2295  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2296  if (sum > 32) {
2297  if (get_bits1(&s->gb)) {
2298  px = A[0];
2299  py = A[1];
2300  } else {
2301  px = C[0];
2302  py = C[1];
2303  }
2304  }
2305  }
2306  }
2307  /* store MV using signed modulus of MV range defined in 4.11 */
2308 
2309  s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2310  s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2311  }
2312  s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2313  s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2314  s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2315  s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2316 }
2317 
2318 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2319 {
2320  int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2321  MpegEncContext *s = &v->s;
2322  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2323 
2324  if (v->bmvtype == BMV_TYPE_DIRECT) {
2325  int total_opp, k, f;
2326  if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2327  s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2328  v->bfraction, 0, s->quarter_sample);
2329  s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2330  v->bfraction, 0, s->quarter_sample);
2331  s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2332  v->bfraction, 1, s->quarter_sample);
2333  s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2334  v->bfraction, 1, s->quarter_sample);
2335 
2336  total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2337  + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2338  + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2339  + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2340  f = (total_opp > 2) ? 1 : 0;
2341  } else {
2342  s->mv[0][0][0] = s->mv[0][0][1] = 0;
2343  s->mv[1][0][0] = s->mv[1][0][1] = 0;
2344  f = 0;
2345  }
2346  v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2347  for (k = 0; k < 4; k++) {
2348  s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2349  s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2350  s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2351  s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2352  v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2353  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2354  }
2355  return;
2356  }
2357  if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2358  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2359  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2360  return;
2361  }
2362  if (dir) { // backward
2363  vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2364  if (n == 3 || mv1) {
2365  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2366  }
2367  } else { // forward
2368  vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2369  if (n == 3 || mv1) {
2370  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2371  }
2372  }
2373 }
2374 
2384 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2385  int16_t **dc_val_ptr, int *dir_ptr)
2386 {
2387  int a, b, c, wrap, pred, scale;
2388  int16_t *dc_val;
2389  static const uint16_t dcpred[32] = {
2390  -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2391  114, 102, 93, 85, 79, 73, 68, 64,
2392  60, 57, 54, 51, 49, 47, 45, 43,
2393  41, 39, 38, 37, 35, 34, 33
2394  };
2395 
2396  /* find prediction - wmv3_dc_scale always used here in fact */
2397  if (n < 4) scale = s->y_dc_scale;
2398  else scale = s->c_dc_scale;
2399 
2400  wrap = s->block_wrap[n];
2401  dc_val = s->dc_val[0] + s->block_index[n];
2402 
2403  /* B A
2404  * C X
2405  */
2406  c = dc_val[ - 1];
2407  b = dc_val[ - 1 - wrap];
2408  a = dc_val[ - wrap];
2409 
2410  if (pq < 9 || !overlap) {
2411  /* Set outer values */
2412  if (s->first_slice_line && (n != 2 && n != 3))
2413  b = a = dcpred[scale];
2414  if (s->mb_x == 0 && (n != 1 && n != 3))
2415  b = c = dcpred[scale];
2416  } else {
2417  /* Set outer values */
2418  if (s->first_slice_line && (n != 2 && n != 3))
2419  b = a = 0;
2420  if (s->mb_x == 0 && (n != 1 && n != 3))
2421  b = c = 0;
2422  }
2423 
2424  if (abs(a - b) <= abs(b - c)) {
2425  pred = c;
2426  *dir_ptr = 1; // left
2427  } else {
2428  pred = a;
2429  *dir_ptr = 0; // top
2430  }
2431 
2432  /* update predictor */
2433  *dc_val_ptr = &dc_val[0];
2434  return pred;
2435 }
2436 
2437 
2449 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2450  int a_avail, int c_avail,
2451  int16_t **dc_val_ptr, int *dir_ptr)
2452 {
2453  int a, b, c, wrap, pred;
2454  int16_t *dc_val;
2455  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2456  int q1, q2 = 0;
2457  int dqscale_index;
2458 
2459  wrap = s->block_wrap[n];
2460  dc_val = s->dc_val[0] + s->block_index[n];
2461 
2462  /* B A
2463  * C X
2464  */
2465  c = dc_val[ - 1];
2466  b = dc_val[ - 1 - wrap];
2467  a = dc_val[ - wrap];
2468  /* scale predictors if needed */
2469  q1 = s->current_picture.qscale_table[mb_pos];
2470  dqscale_index = s->y_dc_scale_table[q1] - 1;
2471  if (dqscale_index < 0)
2472  return 0;
2473  if (c_avail && (n != 1 && n != 3)) {
2474  q2 = s->current_picture.qscale_table[mb_pos - 1];
2475  if (q2 && q2 != q1)
2476  c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2477  }
2478  if (a_avail && (n != 2 && n != 3)) {
2479  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2480  if (q2 && q2 != q1)
2481  a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2482  }
2483  if (a_avail && c_avail && (n != 3)) {
2484  int off = mb_pos;
2485  if (n != 1)
2486  off--;
2487  if (n != 2)
2488  off -= s->mb_stride;
2489  q2 = s->current_picture.qscale_table[off];
2490  if (q2 && q2 != q1)
2491  b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2492  }
2493 
2494  if (a_avail && c_avail) {
2495  if (abs(a - b) <= abs(b - c)) {
2496  pred = c;
2497  *dir_ptr = 1; // left
2498  } else {
2499  pred = a;
2500  *dir_ptr = 0; // top
2501  }
2502  } else if (a_avail) {
2503  pred = a;
2504  *dir_ptr = 0; // top
2505  } else if (c_avail) {
2506  pred = c;
2507  *dir_ptr = 1; // left
2508  } else {
2509  pred = 0;
2510  *dir_ptr = 1; // left
2511  }
2512 
2513  /* update predictor */
2514  *dc_val_ptr = &dc_val[0];
2515  return pred;
2516 }
2517  // Block group
2519 
2526 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2527  uint8_t **coded_block_ptr)
2528 {
2529  int xy, wrap, pred, a, b, c;
2530 
2531  xy = s->block_index[n];
2532  wrap = s->b8_stride;
2533 
2534  /* B C
2535  * A X
2536  */
2537  a = s->coded_block[xy - 1 ];
2538  b = s->coded_block[xy - 1 - wrap];
2539  c = s->coded_block[xy - wrap];
2540 
2541  if (b == c) {
2542  pred = a;
2543  } else {
2544  pred = c;
2545  }
2546 
2547  /* store value */
2548  *coded_block_ptr = &s->coded_block[xy];
2549 
2550  return pred;
2551 }
2552 
2562 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2563  int *value, int codingset)
2564 {
2565  GetBitContext *gb = &v->s.gb;
2566  int index, escape, run = 0, level = 0, lst = 0;
2567 
2568  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2569  if (index != ff_vc1_ac_sizes[codingset] - 1) {
2570  run = vc1_index_decode_table[codingset][index][0];
2571  level = vc1_index_decode_table[codingset][index][1];
2572  lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2573  if (get_bits1(gb))
2574  level = -level;
2575  } else {
2576  escape = decode210(gb);
2577  if (escape != 2) {
2578  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2579  run = vc1_index_decode_table[codingset][index][0];
2580  level = vc1_index_decode_table[codingset][index][1];
2581  lst = index >= vc1_last_decode_table[codingset];
2582  if (escape == 0) {
2583  if (lst)
2584  level += vc1_last_delta_level_table[codingset][run];
2585  else
2586  level += vc1_delta_level_table[codingset][run];
2587  } else {
2588  if (lst)
2589  run += vc1_last_delta_run_table[codingset][level] + 1;
2590  else
2591  run += vc1_delta_run_table[codingset][level] + 1;
2592  }
2593  if (get_bits1(gb))
2594  level = -level;
2595  } else {
2596  int sign;
2597  lst = get_bits1(gb);
2598  if (v->s.esc3_level_length == 0) {
2599  if (v->pq < 8 || v->dquantfrm) { // table 59
2600  v->s.esc3_level_length = get_bits(gb, 3);
2601  if (!v->s.esc3_level_length)
2602  v->s.esc3_level_length = get_bits(gb, 2) + 8;
2603  } else { // table 60
2604  v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2605  }
2606  v->s.esc3_run_length = 3 + get_bits(gb, 2);
2607  }
2608  run = get_bits(gb, v->s.esc3_run_length);
2609  sign = get_bits1(gb);
2610  level = get_bits(gb, v->s.esc3_level_length);
2611  if (sign)
2612  level = -level;
2613  }
2614  }
2615 
2616  *last = lst;
2617  *skip = run;
2618  *value = level;
2619 }
2620 
2628 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
2629  int coded, int codingset)
2630 {
2631  GetBitContext *gb = &v->s.gb;
2632  MpegEncContext *s = &v->s;
2633  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2634  int i;
2635  int16_t *dc_val;
2636  int16_t *ac_val, *ac_val2;
2637  int dcdiff;
2638 
2639  /* Get DC differential */
2640  if (n < 4) {
2642  } else {
2644  }
2645  if (dcdiff < 0) {
2646  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2647  return -1;
2648  }
2649  if (dcdiff) {
2650  if (dcdiff == 119 /* ESC index value */) {
2651  /* TODO: Optimize */
2652  if (v->pq == 1) dcdiff = get_bits(gb, 10);
2653  else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2654  else dcdiff = get_bits(gb, 8);
2655  } else {
2656  if (v->pq == 1)
2657  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2658  else if (v->pq == 2)
2659  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2660  }
2661  if (get_bits1(gb))
2662  dcdiff = -dcdiff;
2663  }
2664 
2665  /* Prediction */
2666  dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2667  *dc_val = dcdiff;
2668 
2669  /* Store the quantized DC coeff, used for prediction */
2670  if (n < 4) {
2671  block[0] = dcdiff * s->y_dc_scale;
2672  } else {
2673  block[0] = dcdiff * s->c_dc_scale;
2674  }
2675  /* Skip ? */
2676  if (!coded) {
2677  goto not_coded;
2678  }
2679 
2680  // AC Decoding
2681  i = 1;
2682 
2683  {
2684  int last = 0, skip, value;
2685  const uint8_t *zz_table;
2686  int scale;
2687  int k;
2688 
2689  scale = v->pq * 2 + v->halfpq;
2690 
2691  if (v->s.ac_pred) {
2692  if (!dc_pred_dir)
2693  zz_table = v->zz_8x8[2];
2694  else
2695  zz_table = v->zz_8x8[3];
2696  } else
2697  zz_table = v->zz_8x8[1];
2698 
2699  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2700  ac_val2 = ac_val;
2701  if (dc_pred_dir) // left
2702  ac_val -= 16;
2703  else // top
2704  ac_val -= 16 * s->block_wrap[n];
2705 
2706  while (!last) {
2707  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2708  i += skip;
2709  if (i > 63)
2710  break;
2711  block[zz_table[i++]] = value;
2712  }
2713 
2714  /* apply AC prediction if needed */
2715  if (s->ac_pred) {
2716  if (dc_pred_dir) { // left
2717  for (k = 1; k < 8; k++)
2718  block[k << v->left_blk_sh] += ac_val[k];
2719  } else { // top
2720  for (k = 1; k < 8; k++)
2721  block[k << v->top_blk_sh] += ac_val[k + 8];
2722  }
2723  }
2724  /* save AC coeffs for further prediction */
2725  for (k = 1; k < 8; k++) {
2726  ac_val2[k] = block[k << v->left_blk_sh];
2727  ac_val2[k + 8] = block[k << v->top_blk_sh];
2728  }
2729 
2730  /* scale AC coeffs */
2731  for (k = 1; k < 64; k++)
2732  if (block[k]) {
2733  block[k] *= scale;
2734  if (!v->pquantizer)
2735  block[k] += (block[k] < 0) ? -v->pq : v->pq;
2736  }
2737 
2738  if (s->ac_pred) i = 63;
2739  }
2740 
2741 not_coded:
2742  if (!coded) {
2743  int k, scale;
2744  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2745  ac_val2 = ac_val;
2746 
2747  i = 0;
2748  scale = v->pq * 2 + v->halfpq;
2749  memset(ac_val2, 0, 16 * 2);
2750  if (dc_pred_dir) { // left
2751  ac_val -= 16;
2752  if (s->ac_pred)
2753  memcpy(ac_val2, ac_val, 8 * 2);
2754  } else { // top
2755  ac_val -= 16 * s->block_wrap[n];
2756  if (s->ac_pred)
2757  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2758  }
2759 
2760  /* apply AC prediction if needed */
2761  if (s->ac_pred) {
2762  if (dc_pred_dir) { //left
2763  for (k = 1; k < 8; k++) {
2764  block[k << v->left_blk_sh] = ac_val[k] * scale;
2765  if (!v->pquantizer && block[k << v->left_blk_sh])
2766  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2767  }
2768  } else { // top
2769  for (k = 1; k < 8; k++) {
2770  block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2771  if (!v->pquantizer && block[k << v->top_blk_sh])
2772  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2773  }
2774  }
2775  i = 63;
2776  }
2777  }
2778  s->block_last_index[n] = i;
2779 
2780  return 0;
2781 }
2782 
2791 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
2792  int coded, int codingset, int mquant)
2793 {
2794  GetBitContext *gb = &v->s.gb;
2795  MpegEncContext *s = &v->s;
2796  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2797  int i;
2798  int16_t *dc_val;
2799  int16_t *ac_val, *ac_val2;
2800  int dcdiff;
2801  int a_avail = v->a_avail, c_avail = v->c_avail;
2802  int use_pred = s->ac_pred;
2803  int scale;
2804  int q1, q2 = 0;
2805  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2806 
2807  /* Get DC differential */
2808  if (n < 4) {
2810  } else {
2812  }
2813  if (dcdiff < 0) {
2814  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2815  return -1;
2816  }
2817  if (dcdiff) {
2818  if (dcdiff == 119 /* ESC index value */) {
2819  /* TODO: Optimize */
2820  if (mquant == 1) dcdiff = get_bits(gb, 10);
2821  else if (mquant == 2) dcdiff = get_bits(gb, 9);
2822  else dcdiff = get_bits(gb, 8);
2823  } else {
2824  if (mquant == 1)
2825  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2826  else if (mquant == 2)
2827  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2828  }
2829  if (get_bits1(gb))
2830  dcdiff = -dcdiff;
2831  }
2832 
2833  /* Prediction */
2834  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2835  *dc_val = dcdiff;
2836 
2837  /* Store the quantized DC coeff, used for prediction */
2838  if (n < 4) {
2839  block[0] = dcdiff * s->y_dc_scale;
2840  } else {
2841  block[0] = dcdiff * s->c_dc_scale;
2842  }
2843 
2844  //AC Decoding
2845  i = 1;
2846 
2847  /* check if AC is needed at all */
2848  if (!a_avail && !c_avail)
2849  use_pred = 0;
2850  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2851  ac_val2 = ac_val;
2852 
2853  scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2854 
2855  if (dc_pred_dir) // left
2856  ac_val -= 16;
2857  else // top
2858  ac_val -= 16 * s->block_wrap[n];
2859 
2860  q1 = s->current_picture.qscale_table[mb_pos];
2861  if ( dc_pred_dir && c_avail && mb_pos)
2862  q2 = s->current_picture.qscale_table[mb_pos - 1];
2863  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2864  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2865  if ( dc_pred_dir && n == 1)
2866  q2 = q1;
2867  if (!dc_pred_dir && n == 2)
2868  q2 = q1;
2869  if (n == 3)
2870  q2 = q1;
2871 
2872  if (coded) {
2873  int last = 0, skip, value;
2874  const uint8_t *zz_table;
2875  int k;
2876 
2877  if (v->s.ac_pred) {
2878  if (!use_pred && v->fcm == ILACE_FRAME) {
2879  zz_table = v->zzi_8x8;
2880  } else {
2881  if (!dc_pred_dir) // top
2882  zz_table = v->zz_8x8[2];
2883  else // left
2884  zz_table = v->zz_8x8[3];
2885  }
2886  } else {
2887  if (v->fcm != ILACE_FRAME)
2888  zz_table = v->zz_8x8[1];
2889  else
2890  zz_table = v->zzi_8x8;
2891  }
2892 
2893  while (!last) {
2894  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2895  i += skip;
2896  if (i > 63)
2897  break;
2898  block[zz_table[i++]] = value;
2899  }
2900 
2901  /* apply AC prediction if needed */
2902  if (use_pred) {
2903  /* scale predictors if needed*/
2904  if (q2 && q1 != q2) {
2905  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2906  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2907 
2908  if (q1 < 1)
2909  return AVERROR_INVALIDDATA;
2910  if (dc_pred_dir) { // left
2911  for (k = 1; k < 8; k++)
2912  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2913  } else { // top
2914  for (k = 1; k < 8; k++)
2915  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2916  }
2917  } else {
2918  if (dc_pred_dir) { //left
2919  for (k = 1; k < 8; k++)
2920  block[k << v->left_blk_sh] += ac_val[k];
2921  } else { //top
2922  for (k = 1; k < 8; k++)
2923  block[k << v->top_blk_sh] += ac_val[k + 8];
2924  }
2925  }
2926  }
2927  /* save AC coeffs for further prediction */
2928  for (k = 1; k < 8; k++) {
2929  ac_val2[k ] = block[k << v->left_blk_sh];
2930  ac_val2[k + 8] = block[k << v->top_blk_sh];
2931  }
2932 
2933  /* scale AC coeffs */
2934  for (k = 1; k < 64; k++)
2935  if (block[k]) {
2936  block[k] *= scale;
2937  if (!v->pquantizer)
2938  block[k] += (block[k] < 0) ? -mquant : mquant;
2939  }
2940 
2941  if (use_pred) i = 63;
2942  } else { // no AC coeffs
2943  int k;
2944 
2945  memset(ac_val2, 0, 16 * 2);
2946  if (dc_pred_dir) { // left
2947  if (use_pred) {
2948  memcpy(ac_val2, ac_val, 8 * 2);
2949  if (q2 && q1 != q2) {
2950  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2951  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2952  if (q1 < 1)
2953  return AVERROR_INVALIDDATA;
2954  for (k = 1; k < 8; k++)
2955  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2956  }
2957  }
2958  } else { // top
2959  if (use_pred) {
2960  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2961  if (q2 && q1 != q2) {
2962  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2963  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2964  if (q1 < 1)
2965  return AVERROR_INVALIDDATA;
2966  for (k = 1; k < 8; k++)
2967  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2968  }
2969  }
2970  }
2971 
2972  /* apply AC prediction if needed */
2973  if (use_pred) {
2974  if (dc_pred_dir) { // left
2975  for (k = 1; k < 8; k++) {
2976  block[k << v->left_blk_sh] = ac_val2[k] * scale;
2977  if (!v->pquantizer && block[k << v->left_blk_sh])
2978  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2979  }
2980  } else { // top
2981  for (k = 1; k < 8; k++) {
2982  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2983  if (!v->pquantizer && block[k << v->top_blk_sh])
2984  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2985  }
2986  }
2987  i = 63;
2988  }
2989  }
2990  s->block_last_index[n] = i;
2991 
2992  return 0;
2993 }
2994 
3003 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
3004  int coded, int mquant, int codingset)
3005 {
3006  GetBitContext *gb = &v->s.gb;
3007  MpegEncContext *s = &v->s;
3008  int dc_pred_dir = 0; /* Direction of the DC prediction used */
3009  int i;
3010  int16_t *dc_val;
3011  int16_t *ac_val, *ac_val2;
3012  int dcdiff;
3013  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3014  int a_avail = v->a_avail, c_avail = v->c_avail;
3015  int use_pred = s->ac_pred;
3016  int scale;
3017  int q1, q2 = 0;
3018 
3019  s->dsp.clear_block(block);
3020 
3021  /* XXX: Guard against dumb values of mquant */
3022  mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3023 
3024  /* Set DC scale - y and c use the same */
3025  s->y_dc_scale = s->y_dc_scale_table[mquant];
3026  s->c_dc_scale = s->c_dc_scale_table[mquant];
3027 
3028  /* Get DC differential */
3029  if (n < 4) {
3031  } else {
3033  }
3034  if (dcdiff < 0) {
3035  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3036  return -1;
3037  }
3038  if (dcdiff) {
3039  if (dcdiff == 119 /* ESC index value */) {
3040  /* TODO: Optimize */
3041  if (mquant == 1) dcdiff = get_bits(gb, 10);
3042  else if (mquant == 2) dcdiff = get_bits(gb, 9);
3043  else dcdiff = get_bits(gb, 8);
3044  } else {
3045  if (mquant == 1)
3046  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3047  else if (mquant == 2)
3048  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3049  }
3050  if (get_bits1(gb))
3051  dcdiff = -dcdiff;
3052  }
3053 
3054  /* Prediction */
3055  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3056  *dc_val = dcdiff;
3057 
3058  /* Store the quantized DC coeff, used for prediction */
3059 
3060  if (n < 4) {
3061  block[0] = dcdiff * s->y_dc_scale;
3062  } else {
3063  block[0] = dcdiff * s->c_dc_scale;
3064  }
3065 
3066  //AC Decoding
3067  i = 1;
3068 
3069  /* check if AC is needed at all and adjust direction if needed */
3070  if (!a_avail) dc_pred_dir = 1;
3071  if (!c_avail) dc_pred_dir = 0;
3072  if (!a_avail && !c_avail) use_pred = 0;
3073  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3074  ac_val2 = ac_val;
3075 
3076  scale = mquant * 2 + v->halfpq;
3077 
3078  if (dc_pred_dir) //left
3079  ac_val -= 16;
3080  else //top
3081  ac_val -= 16 * s->block_wrap[n];
3082 
3083  q1 = s->current_picture.qscale_table[mb_pos];
3084  if (dc_pred_dir && c_avail && mb_pos)
3085  q2 = s->current_picture.qscale_table[mb_pos - 1];
3086  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3087  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3088  if ( dc_pred_dir && n == 1)
3089  q2 = q1;
3090  if (!dc_pred_dir && n == 2)
3091  q2 = q1;
3092  if (n == 3) q2 = q1;
3093 
3094  if (coded) {
3095  int last = 0, skip, value;
3096  int k;
3097 
3098  while (!last) {
3099  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3100  i += skip;
3101  if (i > 63)
3102  break;
3103  if (v->fcm == PROGRESSIVE)
3104  block[v->zz_8x8[0][i++]] = value;
3105  else {
3106  if (use_pred && (v->fcm == ILACE_FRAME)) {
3107  if (!dc_pred_dir) // top
3108  block[v->zz_8x8[2][i++]] = value;
3109  else // left
3110  block[v->zz_8x8[3][i++]] = value;
3111  } else {
3112  block[v->zzi_8x8[i++]] = value;
3113  }
3114  }
3115  }
3116 
3117  /* apply AC prediction if needed */
3118  if (use_pred) {
3119  /* scale predictors if needed*/
3120  if (q2 && q1 != q2) {
3121  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3122  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3123 
3124  if (q1 < 1)
3125  return AVERROR_INVALIDDATA;
3126  if (dc_pred_dir) { // left
3127  for (k = 1; k < 8; k++)
3128  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3129  } else { //top
3130  for (k = 1; k < 8; k++)
3131  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3132  }
3133  } else {
3134  if (dc_pred_dir) { // left
3135  for (k = 1; k < 8; k++)
3136  block[k << v->left_blk_sh] += ac_val[k];
3137  } else { // top
3138  for (k = 1; k < 8; k++)
3139  block[k << v->top_blk_sh] += ac_val[k + 8];
3140  }
3141  }
3142  }
3143  /* save AC coeffs for further prediction */
3144  for (k = 1; k < 8; k++) {
3145  ac_val2[k ] = block[k << v->left_blk_sh];
3146  ac_val2[k + 8] = block[k << v->top_blk_sh];
3147  }
3148 
3149  /* scale AC coeffs */
3150  for (k = 1; k < 64; k++)
3151  if (block[k]) {
3152  block[k] *= scale;
3153  if (!v->pquantizer)
3154  block[k] += (block[k] < 0) ? -mquant : mquant;
3155  }
3156 
3157  if (use_pred) i = 63;
3158  } else { // no AC coeffs
3159  int k;
3160 
3161  memset(ac_val2, 0, 16 * 2);
3162  if (dc_pred_dir) { // left
3163  if (use_pred) {
3164  memcpy(ac_val2, ac_val, 8 * 2);
3165  if (q2 && q1 != q2) {
3166  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3167  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3168  if (q1 < 1)
3169  return AVERROR_INVALIDDATA;
3170  for (k = 1; k < 8; k++)
3171  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3172  }
3173  }
3174  } else { // top
3175  if (use_pred) {
3176  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3177  if (q2 && q1 != q2) {
3178  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3179  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3180  if (q1 < 1)
3181  return AVERROR_INVALIDDATA;
3182  for (k = 1; k < 8; k++)
3183  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3184  }
3185  }
3186  }
3187 
3188  /* apply AC prediction if needed */
3189  if (use_pred) {
3190  if (dc_pred_dir) { // left
3191  for (k = 1; k < 8; k++) {
3192  block[k << v->left_blk_sh] = ac_val2[k] * scale;
3193  if (!v->pquantizer && block[k << v->left_blk_sh])
3194  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3195  }
3196  } else { // top
3197  for (k = 1; k < 8; k++) {
3198  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3199  if (!v->pquantizer && block[k << v->top_blk_sh])
3200  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3201  }
3202  }
3203  i = 63;
3204  }
3205  }
3206  s->block_last_index[n] = i;
3207 
3208  return 0;
3209 }
3210 
3213 static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
3214  int mquant, int ttmb, int first_block,
3215  uint8_t *dst, int linesize, int skip_block,
3216  int *ttmb_out)
3217 {
3218  MpegEncContext *s = &v->s;
3219  GetBitContext *gb = &s->gb;
3220  int i, j;
3221  int subblkpat = 0;
3222  int scale, off, idx, last, skip, value;
3223  int ttblk = ttmb & 7;
3224  int pat = 0;
3225 
3226  s->dsp.clear_block(block);
3227 
3228  if (ttmb == -1) {
3230  }
3231  if (ttblk == TT_4X4) {
3232  subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3233  }
3234  if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3235  && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3236  || (!v->res_rtm_flag && !first_block))) {
3237  subblkpat = decode012(gb);
3238  if (subblkpat)
3239  subblkpat ^= 3; // swap decoded pattern bits
3240  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3241  ttblk = TT_8X4;
3242  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3243  ttblk = TT_4X8;
3244  }
3245  scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3246 
3247  // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3248  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3249  subblkpat = 2 - (ttblk == TT_8X4_TOP);
3250  ttblk = TT_8X4;
3251  }
3252  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3253  subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3254  ttblk = TT_4X8;
3255  }
3256  switch (ttblk) {
3257  case TT_8X8:
3258  pat = 0xF;
3259  i = 0;
3260  last = 0;
3261  while (!last) {
3262  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3263  i += skip;
3264  if (i > 63)
3265  break;
3266  if (!v->fcm)
3267  idx = v->zz_8x8[0][i++];
3268  else
3269  idx = v->zzi_8x8[i++];
3270  block[idx] = value * scale;
3271  if (!v->pquantizer)
3272  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3273  }
3274  if (!skip_block) {
3275  if (i == 1)
3276  v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3277  else {
3278  v->vc1dsp.vc1_inv_trans_8x8(block);
3279  s->dsp.add_pixels_clamped(block, dst, linesize);
3280  }
3281  }
3282  break;
3283  case TT_4X4:
3284  pat = ~subblkpat & 0xF;
3285  for (j = 0; j < 4; j++) {
3286  last = subblkpat & (1 << (3 - j));
3287  i = 0;
3288  off = (j & 1) * 4 + (j & 2) * 16;
3289  while (!last) {
3290  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3291  i += skip;
3292  if (i > 15)
3293  break;
3294  if (!v->fcm)
3296  else
3297  idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3298  block[idx + off] = value * scale;
3299  if (!v->pquantizer)
3300  block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3301  }
3302  if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3303  if (i == 1)
3304  v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3305  else
3306  v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3307  }
3308  }
3309  break;
3310  case TT_8X4:
3311  pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3312  for (j = 0; j < 2; j++) {
3313  last = subblkpat & (1 << (1 - j));
3314  i = 0;
3315  off = j * 32;
3316  while (!last) {
3317  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3318  i += skip;
3319  if (i > 31)
3320  break;
3321  if (!v->fcm)
3322  idx = v->zz_8x4[i++] + off;
3323  else
3324  idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3325  block[idx] = value * scale;
3326  if (!v->pquantizer)
3327  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3328  }
3329  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3330  if (i == 1)
3331  v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3332  else
3333  v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3334  }
3335  }
3336  break;
3337  case TT_4X8:
3338  pat = ~(subblkpat * 5) & 0xF;
3339  for (j = 0; j < 2; j++) {
3340  last = subblkpat & (1 << (1 - j));
3341  i = 0;
3342  off = j * 4;
3343  while (!last) {
3344  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3345  i += skip;
3346  if (i > 31)
3347  break;
3348  if (!v->fcm)
3349  idx = v->zz_4x8[i++] + off;
3350  else
3351  idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3352  block[idx] = value * scale;
3353  if (!v->pquantizer)
3354  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3355  }
3356  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3357  if (i == 1)
3358  v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3359  else
3360  v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3361  }
3362  }
3363  break;
3364  }
3365  if (ttmb_out)
3366  *ttmb_out |= ttblk << (n * 4);
3367  return pat;
3368 }
3369  // Macroblock group
3371 
3372 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3373 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3374 
3376 {
3377  MpegEncContext *s = &v->s;
3378  int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3379  block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3380  mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3381  block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3382  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3383  uint8_t *dst;
3384 
3385  if (block_num > 3) {
3386  dst = s->dest[block_num - 3];
3387  } else {
3388  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3389  }
3390  if (s->mb_y != s->end_mb_y || block_num < 2) {
3391  int16_t (*mv)[2];
3392  int mv_stride;
3393 
3394  if (block_num > 3) {
3395  bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3396  bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3397  mv = &v->luma_mv[s->mb_x - s->mb_stride];
3398  mv_stride = s->mb_stride;
3399  } else {
3400  bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3401  : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3402  bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3403  : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3404  mv_stride = s->b8_stride;
3405  mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3406  }
3407 
3408  if (bottom_is_intra & 1 || block_is_intra & 1 ||
3409  mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3410  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3411  } else {
3412  idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3413  if (idx == 3) {
3414  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3415  } else if (idx) {
3416  if (idx == 1)
3417  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3418  else
3419  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3420  }
3421  }
3422  }
3423 
3424  dst -= 4 * linesize;
3425  ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3426  if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3427  idx = (block_cbp | (block_cbp >> 2)) & 3;
3428  if (idx == 3) {
3429  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3430  } else if (idx) {
3431  if (idx == 1)
3432  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3433  else
3434  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3435  }
3436  }
3437 }
3438 
3440 {
3441  MpegEncContext *s = &v->s;
3442  int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3443  block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3444  mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3445  block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3446  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3447  uint8_t *dst;
3448 
3449  if (block_num > 3) {
3450  dst = s->dest[block_num - 3] - 8 * linesize;
3451  } else {
3452  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3453  }
3454 
3455  if (s->mb_x != s->mb_width || !(block_num & 5)) {
3456  int16_t (*mv)[2];
3457 
3458  if (block_num > 3) {
3459  right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3460  right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3461  mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3462  } else {
3463  right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3464  : (mb_cbp >> ((block_num + 1) * 4));
3465  right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3466  : (mb_is_intra >> ((block_num + 1) * 4));
3467  mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3468  }
3469  if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3470  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3471  } else {
3472  idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3473  if (idx == 5) {
3474  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3475  } else if (idx) {
3476  if (idx == 1)
3477  v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3478  else
3479  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3480  }
3481  }
3482  }
3483 
3484  dst -= 4;
3485  ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3486  if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3487  idx = (block_cbp | (block_cbp >> 1)) & 5;
3488  if (idx == 5) {
3489  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3490  } else if (idx) {
3491  if (idx == 1)
3492  v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3493  else
3494  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3495  }
3496  }
3497 }
3498 
3500 {
3501  MpegEncContext *s = &v->s;
3502  int i;
3503 
3504  for (i = 0; i < 6; i++) {
3506  }
3507 
3508  /* V always precedes H, therefore we run H one MB before V;
3509  * at the end of a row, we catch up to complete the row */
3510  if (s->mb_x) {
3511  for (i = 0; i < 6; i++) {
3513  }
3514  if (s->mb_x == s->mb_width - 1) {
3515  s->mb_x++;
3517  for (i = 0; i < 6; i++) {
3519  }
3520  }
3521  }
3522 }
3523 
3527 {
3528  MpegEncContext *s = &v->s;
3529  GetBitContext *gb = &s->gb;
3530  int i, j;
3531  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3532  int cbp; /* cbp decoding stuff */
3533  int mqdiff, mquant; /* MB quantization */
3534  int ttmb = v->ttfrm; /* MB Transform type */
3535 
3536  int mb_has_coeffs = 1; /* last_flag */
3537  int dmv_x, dmv_y; /* Differential MV components */
3538  int index, index1; /* LUT indexes */
3539  int val, sign; /* temp values */
3540  int first_block = 1;
3541  int dst_idx, off;
3542  int skipped, fourmv;
3543  int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3544 
3545  mquant = v->pq; /* lossy initialization */
3546 
3547  if (v->mv_type_is_raw)
3548  fourmv = get_bits1(gb);
3549  else
3550  fourmv = v->mv_type_mb_plane[mb_pos];
3551  if (v->skip_is_raw)
3552  skipped = get_bits1(gb);
3553  else
3554  skipped = v->s.mbskip_table[mb_pos];
3555 
3556  if (!fourmv) { /* 1MV mode */
3557  if (!skipped) {
3558  GET_MVDATA(dmv_x, dmv_y);
3559 
3560  if (s->mb_intra) {
3561  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3562  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3563  }
3565  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3566 
3567  /* FIXME Set DC val for inter block ? */
3568  if (s->mb_intra && !mb_has_coeffs) {
3569  GET_MQUANT();
3570  s->ac_pred = get_bits1(gb);
3571  cbp = 0;
3572  } else if (mb_has_coeffs) {
3573  if (s->mb_intra)
3574  s->ac_pred = get_bits1(gb);
3575  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3576  GET_MQUANT();
3577  } else {
3578  mquant = v->pq;
3579  cbp = 0;
3580  }
3581  s->current_picture.qscale_table[mb_pos] = mquant;
3582 
3583  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3584  ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3585  VC1_TTMB_VLC_BITS, 2);
3586  if (!s->mb_intra) vc1_mc_1mv(v, 0);
3587  dst_idx = 0;
3588  for (i = 0; i < 6; i++) {
3589  s->dc_val[0][s->block_index[i]] = 0;
3590  dst_idx += i >> 2;
3591  val = ((cbp >> (5 - i)) & 1);
3592  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3593  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3594  if (s->mb_intra) {
3595  /* check if prediction blocks A and C are available */
3596  v->a_avail = v->c_avail = 0;
3597  if (i == 2 || i == 3 || !s->first_slice_line)
3598  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3599  if (i == 1 || i == 3 || s->mb_x)
3600  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3601 
3602  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3603  (i & 4) ? v->codingset2 : v->codingset);
3604  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3605  continue;
3606  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3607  if (v->rangeredfrm)
3608  for (j = 0; j < 64; j++)
3609  s->block[i][j] <<= 1;
3610  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3611  if (v->pq >= 9 && v->overlap) {
3612  if (v->c_avail)
3613  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3614  if (v->a_avail)
3615  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3616  }
3617  block_cbp |= 0xF << (i << 2);
3618  block_intra |= 1 << i;
3619  } else if (val) {
3620  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3621  s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3622  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3623  block_cbp |= pat << (i << 2);
3624  if (!v->ttmbf && ttmb < 8)
3625  ttmb = -1;
3626  first_block = 0;
3627  }
3628  }
3629  } else { // skipped
3630  s->mb_intra = 0;
3631  for (i = 0; i < 6; i++) {
3632  v->mb_type[0][s->block_index[i]] = 0;
3633  s->dc_val[0][s->block_index[i]] = 0;
3634  }
3635  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3636  s->current_picture.qscale_table[mb_pos] = 0;
3637  vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3638  vc1_mc_1mv(v, 0);
3639  }
3640  } else { // 4MV mode
3641  if (!skipped /* unskipped MB */) {
3642  int intra_count = 0, coded_inter = 0;
3643  int is_intra[6], is_coded[6];
3644  /* Get CBPCY */
3645  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3646  for (i = 0; i < 6; i++) {
3647  val = ((cbp >> (5 - i)) & 1);
3648  s->dc_val[0][s->block_index[i]] = 0;
3649  s->mb_intra = 0;
3650  if (i < 4) {
3651  dmv_x = dmv_y = 0;
3652  s->mb_intra = 0;
3653  mb_has_coeffs = 0;
3654  if (val) {
3655  GET_MVDATA(dmv_x, dmv_y);
3656  }
3657  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3658  if (!s->mb_intra)
3659  vc1_mc_4mv_luma(v, i, 0, 0);
3660  intra_count += s->mb_intra;
3661  is_intra[i] = s->mb_intra;
3662  is_coded[i] = mb_has_coeffs;
3663  }
3664  if (i & 4) {
3665  is_intra[i] = (intra_count >= 3);
3666  is_coded[i] = val;
3667  }
3668  if (i == 4)
3669  vc1_mc_4mv_chroma(v, 0);
3670  v->mb_type[0][s->block_index[i]] = is_intra[i];
3671  if (!coded_inter)
3672  coded_inter = !is_intra[i] & is_coded[i];
3673  }
3674  // if there are no coded blocks then don't do anything more
3675  dst_idx = 0;
3676  if (!intra_count && !coded_inter)
3677  goto end;
3678  GET_MQUANT();
3679  s->current_picture.qscale_table[mb_pos] = mquant;
3680  /* test if block is intra and has pred */
3681  {
3682  int intrapred = 0;
3683  for (i = 0; i < 6; i++)
3684  if (is_intra[i]) {
3685  if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3686  || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3687  intrapred = 1;
3688  break;
3689  }
3690  }
3691  if (intrapred)
3692  s->ac_pred = get_bits1(gb);
3693  else
3694  s->ac_pred = 0;
3695  }
3696  if (!v->ttmbf && coded_inter)
3698  for (i = 0; i < 6; i++) {
3699  dst_idx += i >> 2;
3700  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3701  s->mb_intra = is_intra[i];
3702  if (is_intra[i]) {
3703  /* check if prediction blocks A and C are available */
3704  v->a_avail = v->c_avail = 0;
3705  if (i == 2 || i == 3 || !s->first_slice_line)
3706  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3707  if (i == 1 || i == 3 || s->mb_x)
3708  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3709 
3710  vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3711  (i & 4) ? v->codingset2 : v->codingset);
3712  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3713  continue;
3714  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3715  if (v->rangeredfrm)
3716  for (j = 0; j < 64; j++)
3717  s->block[i][j] <<= 1;
3718  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3719  (i & 4) ? s->uvlinesize : s->linesize);
3720  if (v->pq >= 9 && v->overlap) {
3721  if (v->c_avail)
3722  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3723  if (v->a_avail)
3724  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3725  }
3726  block_cbp |= 0xF << (i << 2);
3727  block_intra |= 1 << i;
3728  } else if (is_coded[i]) {
3729  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3730  first_block, s->dest[dst_idx] + off,
3731  (i & 4) ? s->uvlinesize : s->linesize,
3732  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3733  &block_tt);
3734  block_cbp |= pat << (i << 2);
3735  if (!v->ttmbf && ttmb < 8)
3736  ttmb = -1;
3737  first_block = 0;
3738  }
3739  }
3740  } else { // skipped MB
3741  s->mb_intra = 0;
3742  s->current_picture.qscale_table[mb_pos] = 0;
3743  for (i = 0; i < 6; i++) {
3744  v->mb_type[0][s->block_index[i]] = 0;
3745  s->dc_val[0][s->block_index[i]] = 0;
3746  }
3747  for (i = 0; i < 4; i++) {
3748  vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3749  vc1_mc_4mv_luma(v, i, 0, 0);
3750  }
3751  vc1_mc_4mv_chroma(v, 0);
3752  s->current_picture.qscale_table[mb_pos] = 0;
3753  }
3754  }
3755 end:
3756  v->cbp[s->mb_x] = block_cbp;
3757  v->ttblk[s->mb_x] = block_tt;
3758  v->is_intra[s->mb_x] = block_intra;
3759 
3760  return 0;
3761 }
3762 
3763 /* Decode one macroblock in an interlaced frame p picture */
3764 
3766 {
3767  MpegEncContext *s = &v->s;
3768  GetBitContext *gb = &s->gb;
3769  int i;
3770  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3771  int cbp = 0; /* cbp decoding stuff */
3772  int mqdiff, mquant; /* MB quantization */
3773  int ttmb = v->ttfrm; /* MB Transform type */
3774 
3775  int mb_has_coeffs = 1; /* last_flag */
3776  int dmv_x, dmv_y; /* Differential MV components */
3777  int val; /* temp value */
3778  int first_block = 1;
3779  int dst_idx, off;
3780  int skipped, fourmv = 0, twomv = 0;
3781  int block_cbp = 0, pat, block_tt = 0;
3782  int idx_mbmode = 0, mvbp;
3783  int stride_y, fieldtx;
3784 
3785  mquant = v->pq; /* Loosy initialization */
3786 
3787  if (v->skip_is_raw)
3788  skipped = get_bits1(gb);
3789  else
3790  skipped = v->s.mbskip_table[mb_pos];
3791  if (!skipped) {
3792  if (v->fourmvswitch)
3793  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3794  else
3795  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3796  switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3797  /* store the motion vector type in a flag (useful later) */
3798  case MV_PMODE_INTFR_4MV:
3799  fourmv = 1;
3800  v->blk_mv_type[s->block_index[0]] = 0;
3801  v->blk_mv_type[s->block_index[1]] = 0;
3802  v->blk_mv_type[s->block_index[2]] = 0;
3803  v->blk_mv_type[s->block_index[3]] = 0;
3804  break;
3806  fourmv = 1;
3807  v->blk_mv_type[s->block_index[0]] = 1;
3808  v->blk_mv_type[s->block_index[1]] = 1;
3809  v->blk_mv_type[s->block_index[2]] = 1;
3810  v->blk_mv_type[s->block_index[3]] = 1;
3811  break;
3813  twomv = 1;
3814  v->blk_mv_type[s->block_index[0]] = 1;
3815  v->blk_mv_type[s->block_index[1]] = 1;
3816  v->blk_mv_type[s->block_index[2]] = 1;
3817  v->blk_mv_type[s->block_index[3]] = 1;
3818  break;
3819  case MV_PMODE_INTFR_1MV:
3820  v->blk_mv_type[s->block_index[0]] = 0;
3821  v->blk_mv_type[s->block_index[1]] = 0;
3822  v->blk_mv_type[s->block_index[2]] = 0;
3823  v->blk_mv_type[s->block_index[3]] = 0;
3824  break;
3825  }
3826  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3827  for (i = 0; i < 4; i++) {
3828  s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
3829  s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
3830  }
3831  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3832  s->mb_intra = v->is_intra[s->mb_x] = 1;
3833  for (i = 0; i < 6; i++)
3834  v->mb_type[0][s->block_index[i]] = 1;
3835  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3836  mb_has_coeffs = get_bits1(gb);
3837  if (mb_has_coeffs)
3838  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3839  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3840  GET_MQUANT();
3841  s->current_picture.qscale_table[mb_pos] = mquant;
3842  /* Set DC scale - y and c use the same (not sure if necessary here) */
3843  s->y_dc_scale = s->y_dc_scale_table[mquant];
3844  s->c_dc_scale = s->c_dc_scale_table[mquant];
3845  dst_idx = 0;
3846  for (i = 0; i < 6; i++) {
3847  s->dc_val[0][s->block_index[i]] = 0;
3848  dst_idx += i >> 2;
3849  val = ((cbp >> (5 - i)) & 1);
3850  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3851  v->a_avail = v->c_avail = 0;
3852  if (i == 2 || i == 3 || !s->first_slice_line)
3853  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3854  if (i == 1 || i == 3 || s->mb_x)
3855  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3856 
3857  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3858  (i & 4) ? v->codingset2 : v->codingset);
3859  if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3860  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3861  if (i < 4) {
3862  stride_y = s->linesize << fieldtx;
3863  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3864  } else {
3865  stride_y = s->uvlinesize;
3866  off = 0;
3867  }
3868  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3869  //TODO: loop filter
3870  }
3871 
3872  } else { // inter MB
3873  mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3874  if (mb_has_coeffs)
3875  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3876  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3878  } else {
3879  if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3880  || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3882  }
3883  }
3884  s->mb_intra = v->is_intra[s->mb_x] = 0;
3885  for (i = 0; i < 6; i++)
3886  v->mb_type[0][s->block_index[i]] = 0;
3887  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3888  /* for all motion vector read MVDATA and motion compensate each block */
3889  dst_idx = 0;
3890  if (fourmv) {
3891  mvbp = v->fourmvbp;
3892  for (i = 0; i < 6; i++) {
3893  if (i < 4) {
3894  dmv_x = dmv_y = 0;
3895  val = ((mvbp >> (3 - i)) & 1);
3896  if (val) {
3897  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3898  }
3899  vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
3900  vc1_mc_4mv_luma(v, i, 0, 0);
3901  } else if (i == 4) {
3902  vc1_mc_4mv_chroma4(v, 0, 0, 0);
3903  }
3904  }
3905  } else if (twomv) {
3906  mvbp = v->twomvbp;
3907  dmv_x = dmv_y = 0;
3908  if (mvbp & 2) {
3909  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3910  }
3911  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3912  vc1_mc_4mv_luma(v, 0, 0, 0);
3913  vc1_mc_4mv_luma(v, 1, 0, 0);
3914  dmv_x = dmv_y = 0;
3915  if (mvbp & 1) {
3916  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3917  }
3918  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3919  vc1_mc_4mv_luma(v, 2, 0, 0);
3920  vc1_mc_4mv_luma(v, 3, 0, 0);
3921  vc1_mc_4mv_chroma4(v, 0, 0, 0);
3922  } else {
3923  mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3924  dmv_x = dmv_y = 0;
3925  if (mvbp) {
3926  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3927  }
3928  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3929  vc1_mc_1mv(v, 0);
3930  }
3931  if (cbp)
3932  GET_MQUANT(); // p. 227
3933  s->current_picture.qscale_table[mb_pos] = mquant;
3934  if (!v->ttmbf && cbp)
3936  for (i = 0; i < 6; i++) {
3937  s->dc_val[0][s->block_index[i]] = 0;
3938  dst_idx += i >> 2;
3939  val = ((cbp >> (5 - i)) & 1);
3940  if (!fieldtx)
3941  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3942  else
3943  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3944  if (val) {
3945  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3946  first_block, s->dest[dst_idx] + off,
3947  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3948  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3949  block_cbp |= pat << (i << 2);
3950  if (!v->ttmbf && ttmb < 8)
3951  ttmb = -1;
3952  first_block = 0;
3953  }
3954  }
3955  }
3956  } else { // skipped
3957  s->mb_intra = v->is_intra[s->mb_x] = 0;
3958  for (i = 0; i < 6; i++) {
3959  v->mb_type[0][s->block_index[i]] = 0;
3960  s->dc_val[0][s->block_index[i]] = 0;
3961  }
3962  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3963  s->current_picture.qscale_table[mb_pos] = 0;
3964  v->blk_mv_type[s->block_index[0]] = 0;
3965  v->blk_mv_type[s->block_index[1]] = 0;
3966  v->blk_mv_type[s->block_index[2]] = 0;
3967  v->blk_mv_type[s->block_index[3]] = 0;
3968  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3969  vc1_mc_1mv(v, 0);
3970  }
3971  if (s->mb_x == s->mb_width - 1)
3972  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3973  return 0;
3974 }
3975 
3977 {
3978  MpegEncContext *s = &v->s;
3979  GetBitContext *gb = &s->gb;
3980  int i;
3981  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3982  int cbp = 0; /* cbp decoding stuff */
3983  int mqdiff, mquant; /* MB quantization */
3984  int ttmb = v->ttfrm; /* MB Transform type */
3985 
3986  int mb_has_coeffs = 1; /* last_flag */
3987  int dmv_x, dmv_y; /* Differential MV components */
3988  int val; /* temp values */
3989  int first_block = 1;
3990  int dst_idx, off;
3991  int pred_flag;
3992  int block_cbp = 0, pat, block_tt = 0;
3993  int idx_mbmode = 0;
3994 
3995  mquant = v->pq; /* Loosy initialization */
3996 
3997  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
3998  if (idx_mbmode <= 1) { // intra MB
3999  s->mb_intra = v->is_intra[s->mb_x] = 1;
4000  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4001  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4002  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4003  GET_MQUANT();
4004  s->current_picture.qscale_table[mb_pos] = mquant;
4005  /* Set DC scale - y and c use the same (not sure if necessary here) */
4006  s->y_dc_scale = s->y_dc_scale_table[mquant];
4007  s->c_dc_scale = s->c_dc_scale_table[mquant];
4008  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4009  mb_has_coeffs = idx_mbmode & 1;
4010  if (mb_has_coeffs)
4011  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4012  dst_idx = 0;
4013  for (i = 0; i < 6; i++) {
4014  s->dc_val[0][s->block_index[i]] = 0;
4015  v->mb_type[0][s->block_index[i]] = 1;
4016  dst_idx += i >> 2;
4017  val = ((cbp >> (5 - i)) & 1);
4018  v->a_avail = v->c_avail = 0;
4019  if (i == 2 || i == 3 || !s->first_slice_line)
4020  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4021  if (i == 1 || i == 3 || s->mb_x)
4022  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4023 
4024  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4025  (i & 4) ? v->codingset2 : v->codingset);
4026  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4027  continue;
4028  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4029  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4030  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4031  // TODO: loop filter
4032  }
4033  } else {
4034  s->mb_intra = v->is_intra[s->mb_x] = 0;
4035  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4036  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4037  if (idx_mbmode <= 5) { // 1-MV
4038  dmv_x = dmv_y = pred_flag = 0;
4039  if (idx_mbmode & 1) {
4040  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4041  }
4042  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4043  vc1_mc_1mv(v, 0);
4044  mb_has_coeffs = !(idx_mbmode & 2);
4045  } else { // 4-MV
4047  for (i = 0; i < 6; i++) {
4048  if (i < 4) {
4049  dmv_x = dmv_y = pred_flag = 0;
4050  val = ((v->fourmvbp >> (3 - i)) & 1);
4051  if (val) {
4052  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4053  }
4054  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4055  vc1_mc_4mv_luma(v, i, 0, 0);
4056  } else if (i == 4)
4057  vc1_mc_4mv_chroma(v, 0);
4058  }
4059  mb_has_coeffs = idx_mbmode & 1;
4060  }
4061  if (mb_has_coeffs)
4062  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4063  if (cbp) {
4064  GET_MQUANT();
4065  }
4066  s->current_picture.qscale_table[mb_pos] = mquant;
4067  if (!v->ttmbf && cbp) {
4069  }
4070  dst_idx = 0;
4071  for (i = 0; i < 6; i++) {
4072  s->dc_val[0][s->block_index[i]] = 0;
4073  dst_idx += i >> 2;
4074  val = ((cbp >> (5 - i)) & 1);
4075  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4076  if (val) {
4077  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4078  first_block, s->dest[dst_idx] + off,
4079  (i & 4) ? s->uvlinesize : s->linesize,
4080  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4081  &block_tt);
4082  block_cbp |= pat << (i << 2);
4083  if (!v->ttmbf && ttmb < 8) ttmb = -1;
4084  first_block = 0;
4085  }
4086  }
4087  }
4088  if (s->mb_x == s->mb_width - 1)
4089  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4090  return 0;
4091 }
4092 
4096 {
4097  MpegEncContext *s = &v->s;
4098  GetBitContext *gb = &s->gb;
4099  int i, j;
4100  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4101  int cbp = 0; /* cbp decoding stuff */
4102  int mqdiff, mquant; /* MB quantization */
4103  int ttmb = v->ttfrm; /* MB Transform type */
4104  int mb_has_coeffs = 0; /* last_flag */
4105  int index, index1; /* LUT indexes */
4106  int val, sign; /* temp values */
4107  int first_block = 1;
4108  int dst_idx, off;
4109  int skipped, direct;
4110  int dmv_x[2], dmv_y[2];
4111  int bmvtype = BMV_TYPE_BACKWARD;
4112 
4113  mquant = v->pq; /* lossy initialization */
4114  s->mb_intra = 0;
4115 
4116  if (v->dmb_is_raw)
4117  direct = get_bits1(gb);
4118  else
4119  direct = v->direct_mb_plane[mb_pos];
4120  if (v->skip_is_raw)
4121  skipped = get_bits1(gb);
4122  else
4123  skipped = v->s.mbskip_table[mb_pos];
4124 
4125  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4126  for (i = 0; i < 6; i++) {
4127  v->mb_type[0][s->block_index[i]] = 0;
4128  s->dc_val[0][s->block_index[i]] = 0;
4129  }
4130  s->current_picture.qscale_table[mb_pos] = 0;
4131 
4132  if (!direct) {
4133  if (!skipped) {
4134  GET_MVDATA(dmv_x[0], dmv_y[0]);
4135  dmv_x[1] = dmv_x[0];
4136  dmv_y[1] = dmv_y[0];
4137  }
4138  if (skipped || !s->mb_intra) {
4139  bmvtype = decode012(gb);
4140  switch (bmvtype) {
4141  case 0:
4142  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4143  break;
4144  case 1:
4145  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4146  break;
4147  case 2:
4148  bmvtype = BMV_TYPE_INTERPOLATED;
4149  dmv_x[0] = dmv_y[0] = 0;
4150  }
4151  }
4152  }
4153  for (i = 0; i < 6; i++)
4154  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4155 
4156  if (skipped) {
4157  if (direct)
4158  bmvtype = BMV_TYPE_INTERPOLATED;
4159  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4160  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4161  return;
4162  }
4163  if (direct) {
4164  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4165  GET_MQUANT();
4166  s->mb_intra = 0;
4167  s->current_picture.qscale_table[mb_pos] = mquant;
4168  if (!v->ttmbf)
4170  dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4171  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4172  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4173  } else {
4174  if (!mb_has_coeffs && !s->mb_intra) {
4175  /* no coded blocks - effectively skipped */
4176  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4177  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4178  return;
4179  }
4180  if (s->mb_intra && !mb_has_coeffs) {
4181  GET_MQUANT();
4182  s->current_picture.qscale_table[mb_pos] = mquant;
4183  s->ac_pred = get_bits1(gb);
4184  cbp = 0;
4185  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4186  } else {
4187  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4188  GET_MVDATA(dmv_x[0], dmv_y[0]);
4189  if (!mb_has_coeffs) {
4190  /* interpolated skipped block */
4191  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4192  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4193  return;
4194  }
4195  }
4196  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4197  if (!s->mb_intra) {
4198  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4199  }
4200  if (s->mb_intra)
4201  s->ac_pred = get_bits1(gb);
4202  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4203  GET_MQUANT();
4204  s->current_picture.qscale_table[mb_pos] = mquant;
4205  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4207  }
4208  }
4209  dst_idx = 0;
4210  for (i = 0; i < 6; i++) {
4211  s->dc_val[0][s->block_index[i]] = 0;
4212  dst_idx += i >> 2;
4213  val = ((cbp >> (5 - i)) & 1);
4214  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4215  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4216  if (s->mb_intra) {
4217  /* check if prediction blocks A and C are available */
4218  v->a_avail = v->c_avail = 0;
4219  if (i == 2 || i == 3 || !s->first_slice_line)
4220  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4221  if (i == 1 || i == 3 || s->mb_x)
4222  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4223 
4224  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4225  (i & 4) ? v->codingset2 : v->codingset);
4226  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4227  continue;
4228  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4229  if (v->rangeredfrm)
4230  for (j = 0; j < 64; j++)
4231  s->block[i][j] <<= 1;
4232  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4233  } else if (val) {
4234  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4235  first_block, s->dest[dst_idx] + off,
4236  (i & 4) ? s->uvlinesize : s->linesize,
4237  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4238  if (!v->ttmbf && ttmb < 8)
4239  ttmb = -1;
4240  first_block = 0;
4241  }
4242  }
4243 }
4244 
4248 {
4249  MpegEncContext *s = &v->s;
4250  GetBitContext *gb = &s->gb;
4251  int i, j;
4252  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4253  int cbp = 0; /* cbp decoding stuff */
4254  int mqdiff, mquant; /* MB quantization */
4255  int ttmb = v->ttfrm; /* MB Transform type */
4256  int mb_has_coeffs = 0; /* last_flag */
4257  int val; /* temp value */
4258  int first_block = 1;
4259  int dst_idx, off;
4260  int fwd;
4261  int dmv_x[2], dmv_y[2], pred_flag[2];
4262  int bmvtype = BMV_TYPE_BACKWARD;
4263  int idx_mbmode, interpmvp;
4264 
4265  mquant = v->pq; /* Loosy initialization */
4266  s->mb_intra = 0;
4267 
4268  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4269  if (idx_mbmode <= 1) { // intra MB
4270  s->mb_intra = v->is_intra[s->mb_x] = 1;
4271  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4272  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4273  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4274  GET_MQUANT();
4275  s->current_picture.qscale_table[mb_pos] = mquant;
4276  /* Set DC scale - y and c use the same (not sure if necessary here) */
4277  s->y_dc_scale = s->y_dc_scale_table[mquant];
4278  s->c_dc_scale = s->c_dc_scale_table[mquant];
4279  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4280  mb_has_coeffs = idx_mbmode & 1;
4281  if (mb_has_coeffs)
4282  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4283  dst_idx = 0;
4284  for (i = 0; i < 6; i++) {
4285  s->dc_val[0][s->block_index[i]] = 0;
4286  dst_idx += i >> 2;
4287  val = ((cbp >> (5 - i)) & 1);
4288  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4289  v->a_avail = v->c_avail = 0;
4290  if (i == 2 || i == 3 || !s->first_slice_line)
4291  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4292  if (i == 1 || i == 3 || s->mb_x)
4293  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4294 
4295  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4296  (i & 4) ? v->codingset2 : v->codingset);
4297  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4298  continue;
4299  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4300  if (v->rangeredfrm)
4301  for (j = 0; j < 64; j++)
4302  s->block[i][j] <<= 1;
4303  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4304  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4305  // TODO: yet to perform loop filter
4306  }
4307  } else {
4308  s->mb_intra = v->is_intra[s->mb_x] = 0;
4309  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4310  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4311  if (v->fmb_is_raw)
4312  fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4313  else
4314  fwd = v->forward_mb_plane[mb_pos];
4315  if (idx_mbmode <= 5) { // 1-MV
4316  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4317  pred_flag[0] = pred_flag[1] = 0;
4318  if (fwd)
4319  bmvtype = BMV_TYPE_FORWARD;
4320  else {
4321  bmvtype = decode012(gb);
4322  switch (bmvtype) {
4323  case 0:
4324  bmvtype = BMV_TYPE_BACKWARD;
4325  break;
4326  case 1:
4327  bmvtype = BMV_TYPE_DIRECT;
4328  break;
4329  case 2:
4330  bmvtype = BMV_TYPE_INTERPOLATED;
4331  interpmvp = get_bits1(gb);
4332  }
4333  }
4334  v->bmvtype = bmvtype;
4335  if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4336  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4337  }
4338  if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4339  get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4340  }
4341  if (bmvtype == BMV_TYPE_DIRECT) {
4342  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4343  dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4344  }
4345  vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4346  vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4347  mb_has_coeffs = !(idx_mbmode & 2);
4348  } else { // 4-MV
4349  if (fwd)
4350  bmvtype = BMV_TYPE_FORWARD;
4351  v->bmvtype = bmvtype;
4353  for (i = 0; i < 6; i++) {
4354  if (i < 4) {
4355  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4356  dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4357  val = ((v->fourmvbp >> (3 - i)) & 1);
4358  if (val) {
4359  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4360  &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4361  &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4362  }
4363  vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4364  vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
4365  } else if (i == 4)
4366  vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4367  }
4368  mb_has_coeffs = idx_mbmode & 1;
4369  }
4370  if (mb_has_coeffs)
4371  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4372  if (cbp) {
4373  GET_MQUANT();
4374  }
4375  s->current_picture.qscale_table[mb_pos] = mquant;
4376  if (!v->ttmbf && cbp) {
4378  }
4379  dst_idx = 0;
4380  for (i = 0; i < 6; i++) {
4381  s->dc_val[0][s->block_index[i]] = 0;
4382  dst_idx += i >> 2;
4383  val = ((cbp >> (5 - i)) & 1);
4384  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4385  if (val) {
4386  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4387  first_block, s->dest[dst_idx] + off,
4388  (i & 4) ? s->uvlinesize : s->linesize,
4389  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4390  if (!v->ttmbf && ttmb < 8)
4391  ttmb = -1;
4392  first_block = 0;
4393  }
4394  }
4395  }
4396 }
4397 
4401 {
4402  MpegEncContext *s = &v->s;
4403  GetBitContext *gb = &s->gb;
4404  int i, j;
4405  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4406  int cbp = 0; /* cbp decoding stuff */
4407  int mqdiff, mquant; /* MB quantization */
4408  int ttmb = v->ttfrm; /* MB Transform type */
4409  int mvsw = 0; /* motion vector switch */
4410  int mb_has_coeffs = 1; /* last_flag */
4411  int dmv_x, dmv_y; /* Differential MV components */
4412  int val; /* temp value */
4413  int first_block = 1;
4414  int dst_idx, off;
4415  int skipped, direct, twomv = 0;
4416  int block_cbp = 0, pat, block_tt = 0;
4417  int idx_mbmode = 0, mvbp;
4418  int stride_y, fieldtx;
4419  int bmvtype = BMV_TYPE_BACKWARD;
4420  int dir, dir2;
4421 
4422  mquant = v->pq; /* Lossy initialization */
4423  s->mb_intra = 0;
4424  if (v->skip_is_raw)
4425  skipped = get_bits1(gb);
4426  else
4427  skipped = v->s.mbskip_table[mb_pos];
4428 
4429  if (!skipped) {
4430  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
4431  if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
4432  twomv = 1;
4433  v->blk_mv_type[s->block_index[0]] = 1;
4434  v->blk_mv_type[s->block_index[1]] = 1;
4435  v->blk_mv_type[s->block_index[2]] = 1;
4436  v->blk_mv_type[s->block_index[3]] = 1;
4437  } else {
4438  v->blk_mv_type[s->block_index[0]] = 0;
4439  v->blk_mv_type[s->block_index[1]] = 0;
4440  v->blk_mv_type[s->block_index[2]] = 0;
4441  v->blk_mv_type[s->block_index[3]] = 0;
4442  }
4443  }
4444 
4445  if (v->dmb_is_raw)
4446  direct = get_bits1(gb);
4447  else
4448  direct = v->direct_mb_plane[mb_pos];
4449 
4450  if (direct) {
4451  s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
4452  s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
4453  s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
4454  s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
4455 
4456  if (twomv) {
4457  s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
4458  s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
4459  s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
4460  s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
4461 
4462  for (i = 1; i < 4; i += 2) {
4463  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
4464  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
4465  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
4466  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
4467  }
4468  } else {
4469  for (i = 1; i < 4; i++) {
4470  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
4471  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
4472  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
4473  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
4474  }
4475  }
4476  }
4477 
4478  if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
4479  for (i = 0; i < 4; i++) {
4480  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
4481  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
4482  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
4483  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
4484  }
4485  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4486  s->mb_intra = v->is_intra[s->mb_x] = 1;
4487  for (i = 0; i < 6; i++)
4488  v->mb_type[0][s->block_index[i]] = 1;
4489  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
4490  mb_has_coeffs = get_bits1(gb);
4491  if (mb_has_coeffs)
4492  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4493  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4494  GET_MQUANT();
4495  s->current_picture.qscale_table[mb_pos] = mquant;
4496  /* Set DC scale - y and c use the same (not sure if necessary here) */
4497  s->y_dc_scale = s->y_dc_scale_table[mquant];
4498  s->c_dc_scale = s->c_dc_scale_table[mquant];
4499  dst_idx = 0;
4500  for (i = 0; i < 6; i++) {
4501  s->dc_val[0][s->block_index[i]] = 0;
4502  dst_idx += i >> 2;
4503  val = ((cbp >> (5 - i)) & 1);
4504  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4505  v->a_avail = v->c_avail = 0;
4506  if (i == 2 || i == 3 || !s->first_slice_line)
4507  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4508  if (i == 1 || i == 3 || s->mb_x)
4509  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4510 
4511  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4512  (i & 4) ? v->codingset2 : v->codingset);
4513  if (i > 3 && (s->flags & CODEC_FLAG_GRAY))
4514  continue;
4515  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4516  if (i < 4) {
4517  stride_y = s->linesize << fieldtx;
4518  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
4519  } else {
4520  stride_y = s->uvlinesize;
4521  off = 0;
4522  }
4523  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
4524  }
4525  } else {
4526  s->mb_intra = v->is_intra[s->mb_x] = 0;
4527  if (!direct) {
4528  if (skipped || !s->mb_intra) {
4529  bmvtype = decode012(gb);
4530  switch (bmvtype) {
4531  case 0:
4532  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4533  break;
4534  case 1:
4535  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4536  break;
4537  case 2:
4538  bmvtype = BMV_TYPE_INTERPOLATED;
4539  }
4540  }
4541 
4542  if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
4543  mvsw = get_bits1(gb);
4544  }
4545 
4546  if (!skipped) { // inter MB
4547  mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
4548  if (mb_has_coeffs)
4549  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4550  if (!direct) {
4551  if (bmvtype == BMV_TYPE_INTERPOLATED & twomv) {
4553  } else if (bmvtype == BMV_TYPE_INTERPOLATED | twomv) {
4555  }
4556  }
4557 
4558  for (i = 0; i < 6; i++)
4559  v->mb_type[0][s->block_index[i]] = 0;
4560  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
4561  /* for all motion vector read MVDATA and motion compensate each block */
4562  dst_idx = 0;
4563  if (direct) {
4564  if (twomv) {
4565  for (i = 0; i < 4; i++) {
4566  vc1_mc_4mv_luma(v, i, 0, 0);
4567  vc1_mc_4mv_luma(v, i, 1, 1);
4568  }
4569  vc1_mc_4mv_chroma4(v, 0, 0, 0);
4570  vc1_mc_4mv_chroma4(v, 1, 1, 1);
4571  } else {
4572  vc1_mc_1mv(v, 0);
4573  vc1_interp_mc(v);
4574  }
4575  } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
4576  mvbp = v->fourmvbp;
4577  for (i = 0; i < 4; i++) {
4578  dir = i==1 || i==3;
4579  dmv_x = dmv_y = 0;
4580  val = ((mvbp >> (3 - i)) & 1);
4581  if (val)
4582  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4583  j = i > 1 ? 2 : 0;
4584  vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4585  vc1_mc_4mv_luma(v, j, dir, dir);
4586  vc1_mc_4mv_luma(v, j+1, dir, dir);
4587  }
4588 
4589  vc1_mc_4mv_chroma4(v, 0, 0, 0);
4590  vc1_mc_4mv_chroma4(v, 1, 1, 1);
4591  } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
4592  mvbp = v->twomvbp;
4593  dmv_x = dmv_y = 0;
4594  if (mvbp & 2)
4595  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4596 
4597  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4598  vc1_mc_1mv(v, 0);
4599 
4600  dmv_x = dmv_y = 0;
4601  if (mvbp & 1)
4602  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4603 
4604  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4605  vc1_interp_mc(v);
4606  } else if (twomv) {
4607  dir = bmvtype == BMV_TYPE_BACKWARD;
4608  dir2 = dir;
4609  if (mvsw)
4610  dir2 = !dir;
4611  mvbp = v->twomvbp;
4612  dmv_x = dmv_y = 0;
4613  if (mvbp & 2)
4614  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4615  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4616 
4617  dmv_x = dmv_y = 0;
4618  if (mvbp & 1)
4619  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4620  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
4621 
4622  if (mvsw) {
4623  for (i = 0; i < 2; i++) {
4624  s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4625  s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4626  s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4627  s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4628  }
4629  } else {
4630  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4631  vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4632  }
4633 
4634  vc1_mc_4mv_luma(v, 0, dir, 0);
4635  vc1_mc_4mv_luma(v, 1, dir, 0);
4636  vc1_mc_4mv_luma(v, 2, dir2, 0);
4637  vc1_mc_4mv_luma(v, 3, dir2, 0);
4638  vc1_mc_4mv_chroma4(v, dir, dir2, 0);
4639  } else {
4640  dir = bmvtype == BMV_TYPE_BACKWARD;
4641 
4642  mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
4643  dmv_x = dmv_y = 0;
4644  if (mvbp)
4645  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4646 
4647  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4648  v->blk_mv_type[s->block_index[0]] = 1;
4649  v->blk_mv_type[s->block_index[1]] = 1;
4650  v->blk_mv_type[s->block_index[2]] = 1;
4651  v->blk_mv_type[s->block_index[3]] = 1;
4652  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4653  for (i = 0; i < 2; i++) {
4654  s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4655  s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4656  }
4657  vc1_mc_1mv(v, dir);
4658  }
4659 
4660  if (cbp)
4661  GET_MQUANT(); // p. 227
4662  s->current_picture.qscale_table[mb_pos] = mquant;
4663  if (!v->ttmbf && cbp)
4665  for (i = 0; i < 6; i++) {
4666  s->dc_val[0][s->block_index[i]] = 0;
4667  dst_idx += i >> 2;
4668  val = ((cbp >> (5 - i)) & 1);
4669  if (!fieldtx)
4670  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4671  else
4672  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
4673  if (val) {
4674  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4675  first_block, s->dest[dst_idx] + off,
4676  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
4677  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
4678  block_cbp |= pat << (i << 2);
4679  if (!v->ttmbf && ttmb < 8)
4680  ttmb = -1;
4681  first_block = 0;
4682  }
4683  }
4684 
4685  } else { // skipped
4686  dir = 0;
4687  for (i = 0; i < 6; i++) {
4688  v->mb_type[0][s->block_index[i]] = 0;
4689  s->dc_val[0][s->block_index[i]] = 0;
4690  }
4691  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
4692  s->current_picture.qscale_table[mb_pos] = 0;
4693  v->blk_mv_type[s->block_index[0]] = 0;
4694  v->blk_mv_type[s->block_index[1]] = 0;
4695  v->blk_mv_type[s->block_index[2]] = 0;
4696  v->blk_mv_type[s->block_index[3]] = 0;
4697 
4698  if (!direct) {
4699  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4700  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4701  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4702  } else {
4703  dir = bmvtype == BMV_TYPE_BACKWARD;
4704  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4705  if (mvsw) {
4706  int dir2 = dir;
4707  if (mvsw)
4708  dir2 = !dir;
4709  for (i = 0; i < 2; i++) {
4710  s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4711  s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4712  s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4713  s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4714  }
4715  } else {
4716  v->blk_mv_type[s->block_index[0]] = 1;
4717  v->blk_mv_type[s->block_index[1]] = 1;
4718  v->blk_mv_type[s->block_index[2]] = 1;
4719  v->blk_mv_type[s->block_index[3]] = 1;
4720  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4721  for (i = 0; i < 2; i++) {
4722  s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4723  s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4724  }
4725  }
4726  }
4727  }
4728 
4729  vc1_mc_1mv(v, dir);
4730  if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
4731  vc1_interp_mc(v);
4732  }
4733  }
4734  }
4735  if (s->mb_x == s->mb_width - 1)
4736  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4737  v->cbp[s->mb_x] = block_cbp;
4738  v->ttblk[s->mb_x] = block_tt;
4739  return 0;
4740 }
4741 
4745 {
4746  int k, j;
4747  MpegEncContext *s = &v->s;
4748  int cbp, val;
4749  uint8_t *coded_val;
4750  int mb_pos;
4751 
4752  /* select codingmode used for VLC tables selection */
4753  switch (v->y_ac_table_index) {
4754  case 0:
4756  break;
4757  case 1:
4759  break;
4760  case 2:
4762  break;
4763  }
4764 
4765  switch (v->c_ac_table_index) {
4766  case 0:
4768  break;
4769  case 1:
4771  break;
4772  case 2:
4774  break;
4775  }
4776 
4777  /* Set DC scale - y and c use the same */
4778  s->y_dc_scale = s->y_dc_scale_table[v->pq];
4779  s->c_dc_scale = s->c_dc_scale_table[v->pq];
4780 
4781  //do frame decode
4782  s->mb_x = s->mb_y = 0;
4783  s->mb_intra = 1;
4784  s->first_slice_line = 1;
4785  for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
4786  s->mb_x = 0;
4787  init_block_index(v);
4788  for (; s->mb_x < v->end_mb_x; s->mb_x++) {
4789  uint8_t *dst[6];
4791  dst[0] = s->dest[0];
4792  dst[1] = dst[0] + 8;
4793  dst[2] = s->dest[0] + s->linesize * 8;
4794  dst[3] = dst[2] + 8;
4795  dst[4] = s->dest[1];
4796  dst[5] = s->dest[2];
4797  s->dsp.clear_blocks(s->block[0]);
4798  mb_pos = s->mb_x + s->mb_y * s->mb_width;
4799  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4800  s->current_picture.qscale_table[mb_pos] = v->pq;
4801  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4802  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4803 
4804  // do actual MB decoding and displaying
4806  v->s.ac_pred = get_bits1(&v->s.gb);
4807 
4808  for (k = 0; k < 6; k++) {
4809  val = ((cbp >> (5 - k)) & 1);
4810 
4811  if (k < 4) {
4812  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4813  val = val ^ pred;
4814  *coded_val = val;
4815  }
4816  cbp |= val << (5 - k);
4817 
4818  vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4819 
4820  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4821  continue;
4822  v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4823  if (v->pq >= 9 && v->overlap) {
4824  if (v->rangeredfrm)
4825  for (j = 0; j < 64; j++)
4826  s->block[k][j] <<= 1;
4827  s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4828  } else {
4829  if (v->rangeredfrm)
4830  for (j = 0; j < 64; j++)
4831  s->block[k][j] = (s->block[k][j] - 64) << 1;
4832  s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4833  }
4834  }
4835 
4836  if (v->pq >= 9 && v->overlap) {
4837  if (s->mb_x) {
4838  v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4839  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4840  if (!(s->flags & CODEC_FLAG_GRAY)) {
4841  v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4842  v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4843  }
4844  }
4845  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4846  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4847  if (!s->first_slice_line) {
4848  v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4849  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4850  if (!(s->flags & CODEC_FLAG_GRAY)) {
4851  v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4852  v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4853  }
4854  }
4855  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4856  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4857  }
4858  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4859 
4860  if (get_bits_count(&s->gb) > v->bits) {
4861  ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4862  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4863  get_bits_count(&s->gb), v->bits);
4864  return;
4865  }
4866  }
4867  if (!v->s.loop_filter)
4868  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4869  else if (s->mb_y)
4870  ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4871 
4872  s->first_slice_line = 0;
4873  }
4874  if (v->s.loop_filter)
4875  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4876 
4877  /* This is intentionally mb_height and not end_mb_y - unlike in advanced
4878  * profile, these only differ are when decoding MSS2 rectangles. */
4879  ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4880 }
4881 
4885 {
4886  int k;
4887  MpegEncContext *s = &v->s;
4888  int cbp, val;
4889  uint8_t *coded_val;
4890  int mb_pos;
4891  int mquant = v->pq;
4892  int mqdiff;
4893  GetBitContext *gb = &s->gb;
4894 
4895  /* select codingmode used for VLC tables selection */
4896  switch (v->y_ac_table_index) {
4897  case 0:
4899  break;
4900  case 1:
4902  break;
4903  case 2:
4905  break;
4906  }
4907 
4908  switch (v->c_ac_table_index) {
4909  case 0:
4911  break;
4912  case 1:
4914  break;
4915  case 2:
4917  break;
4918  }
4919 
4920  // do frame decode
4921  s->mb_x = s->mb_y = 0;
4922  s->mb_intra = 1;
4923  s->first_slice_line = 1;
4924  s->mb_y = s->start_mb_y;
4925  if (s->start_mb_y) {
4926  s->mb_x = 0;
4927  init_block_index(v);
4928  memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4929  (1 + s->b8_stride) * sizeof(*s->coded_block));
4930  }
4931  for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4932  s->mb_x = 0;
4933  init_block_index(v);
4934  for (;s->mb_x < s->mb_width; s->mb_x++) {
4935  int16_t (*block)[64] = v->block[v->cur_blk_idx];
4937  s->dsp.clear_blocks(block[0]);
4938  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4939  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4940  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4941  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4942 
4943  // do actual MB decoding and displaying
4944  if (v->fieldtx_is_raw)
4945  v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4947  if ( v->acpred_is_raw)
4948  v->s.ac_pred = get_bits1(&v->s.gb);
4949  else
4950  v->s.ac_pred = v->acpred_plane[mb_pos];
4951 
4952  if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4953  v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4954 
4955  GET_MQUANT();
4956 
4957  s->current_picture.qscale_table[mb_pos] = mquant;
4958  /* Set DC scale - y and c use the same */
4959  s->y_dc_scale = s->y_dc_scale_table[mquant];
4960  s->c_dc_scale = s->c_dc_scale_table[mquant];
4961 
4962  for (k = 0; k < 6; k++) {
4963  val = ((cbp >> (5 - k)) & 1);
4964 
4965  if (k < 4) {
4966  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4967  val = val ^ pred;
4968  *coded_val = val;
4969  }
4970  cbp |= val << (5 - k);
4971 
4972  v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4973  v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4974 
4975  vc1_decode_i_block_adv(v, block[k], k, val,
4976  (k < 4) ? v->codingset : v->codingset2, mquant);
4977 
4978  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4979  continue;
4981  }
4982 
4986 
4987  if (get_bits_count(&s->gb) > v->bits) {
4988  // TODO: may need modification to handle slice coding
4989  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4990  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4991  get_bits_count(&s->gb), v->bits);
4992  return;
4993  }
4994  }
4995  if (!v->s.loop_filter)
4996  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4997  else if (s->mb_y)
4998  ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
4999  s->first_slice_line = 0;
5000  }
5001 
5002  /* raw bottom MB row */
5003  s->mb_x = 0;
5004  init_block_index(v);
5005 
5006  for (;s->mb_x < s->mb_width; s->mb_x++) {
5009  if (v->s.loop_filter)
5011  }
5012  if (v->s.loop_filter)
5013  ff_mpeg_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
5014  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5015  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5016 }
5017 
5019 {
5020  MpegEncContext *s = &v->s;
5021  int apply_loop_filter;
5022 
5023  /* select codingmode used for VLC tables selection */
5024  switch (v->c_ac_table_index) {
5025  case 0:
5027  break;
5028  case 1:
5030  break;
5031  case 2:
5033  break;
5034  }
5035 
5036  switch (v->c_ac_table_index) {
5037  case 0:
5039  break;
5040  case 1:
5042  break;
5043  case 2:
5045  break;
5046  }
5047 
5048  apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
5049  v->fcm == PROGRESSIVE;
5050  s->first_slice_line = 1;
5051  memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
5052  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5053  s->mb_x = 0;
5054  init_block_index(v);
5055  for (; s->mb_x < s->mb_width; s->mb_x++) {
5057 
5058  if (v->fcm == ILACE_FIELD)
5060  else if (v->fcm == ILACE_FRAME)
5062  else vc1_decode_p_mb(v);
5063  if (s->mb_y != s->start_mb_y && apply_loop_filter)
5065  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5066  // TODO: may need modification to handle slice coding
5067  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5068  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5069  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5070  return;
5071  }
5072  }
5073  memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
5074  memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
5075  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
5076  memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
5077  if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5078  s->first_slice_line = 0;
5079  }
5080  if (apply_loop_filter) {
5081  s->mb_x = 0;
5082  init_block_index(v);
5083  for (; s->mb_x < s->mb_width; s->mb_x++) {
5086  }
5087  }
5088  if (s->end_mb_y >= s->start_mb_y)
5089  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5090  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5091  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5092 }
5093 
5095 {
5096  MpegEncContext *s = &v->s;
5097 
5098  /* select codingmode used for VLC tables selection */
5099  switch (v->c_ac_table_index) {
5100  case 0:
5102  break;
5103  case 1:
5105  break;
5106  case 2:
5108  break;
5109  }
5110 
5111  switch (v->c_ac_table_index) {
5112  case 0:
5114  break;
5115  case 1:
5117  break;
5118  case 2:
5120  break;
5121  }
5122 
5123  s->first_slice_line = 1;
5124  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5125  s->mb_x = 0;
5126  init_block_index(v);
5127  for (; s->mb_x < s->mb_width; s->mb_x++) {
5129 
5130  if (v->fcm == ILACE_FIELD)
5132  else if (v->fcm == ILACE_FRAME)
5134  else
5135  vc1_decode_b_mb(v);
5136  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5137  // TODO: may need modification to handle slice coding
5138  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5139  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5140  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5141  return;
5142  }
5143  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
5144  }
5145  if (!v->s.loop_filter)
5146  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5147  else if (s->mb_y)
5148  ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5149  s->first_slice_line = 0;
5150  }
5151  if (v->s.loop_filter)
5152  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5153  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5154  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5155 }
5156 
5158 {
5159  MpegEncContext *s = &v->s;
5160 
5161  if (!v->s.last_picture.f.data[0])
5162  return;
5163 
5164  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
5165  s->first_slice_line = 1;
5166  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5167  s->mb_x = 0;
5168  init_block_index(v);
5170  memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
5171  memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5172  memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5173  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5174  s->first_slice_line = 0;
5175  }
5177 }
5178 
5180 {
5181 
5182  v->s.esc3_level_length = 0;
5183  if (v->x8_type) {
5184  ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
5185  } else {
5186  v->cur_blk_idx = 0;
5187  v->left_blk_idx = -1;
5188  v->topleft_blk_idx = 1;
5189  v->top_blk_idx = 2;
5190  switch (v->s.pict_type) {
5191  case AV_PICTURE_TYPE_I:
5192  if (v->profile == PROFILE_ADVANCED)
5194  else
5196  break;
5197  case AV_PICTURE_TYPE_P:
5198  if (v->p_frame_skipped)
5200  else
5202  break;
5203  case AV_PICTURE_TYPE_B:
5204  if (v->bi_type) {
5205  if (v->profile == PROFILE_ADVANCED)
5207  else
5209  } else
5211  break;
5212  }
5213  }
5214 }
5215 
5216 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5217 
5218 typedef struct {
5230  int coefs[2][7];
5231 
5232  int effect_type, effect_flag;
5233  int effect_pcount1, effect_pcount2;
5234  int effect_params1[15], effect_params2[10];
5235 } SpriteData;
5236 
5237 static inline int get_fp_val(GetBitContext* gb)
5238 {
5239  return (get_bits_long(gb, 30) - (1 << 29)) << 1;
5240 }
5241 
5242 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
5243 {
5244  c[1] = c[3] = 0;
5245 
5246  switch (get_bits(gb, 2)) {
5247  case 0:
5248  c[0] = 1 << 16;
5249  c[2] = get_fp_val(gb);
5250  c[4] = 1 << 16;
5251  break;
5252  case 1:
5253  c[0] = c[4] = get_fp_val(gb);
5254  c[2] = get_fp_val(gb);
5255  break;
5256  case 2:
5257  c[0] = get_fp_val(gb);
5258  c[2] = get_fp_val(gb);
5259  c[4] = get_fp_val(gb);
5260  break;
5261  case 3:
5262  c[0] = get_fp_val(gb);
5263  c[1] = get_fp_val(gb);
5264  c[2] = get_fp_val(gb);
5265  c[3] = get_fp_val(gb);
5266  c[4] = get_fp_val(gb);
5267  break;
5268  }
5269  c[5] = get_fp_val(gb);
5270  if (get_bits1(gb))
5271  c[6] = get_fp_val(gb);
5272  else
5273  c[6] = 1 << 16;
5274 }
5275 
5276 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
5277 {
5278  AVCodecContext *avctx = v->s.avctx;
5279  int sprite, i;
5280 
5281  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5282  vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
5283  if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
5284  avpriv_request_sample(avctx, "Non-zero rotation coefficients");
5285  av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
5286  for (i = 0; i < 7; i++)
5287  av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
5288  sd->coefs[sprite][i] / (1<<16),
5289  (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
5290  av_log(avctx, AV_LOG_DEBUG, "\n");
5291  }
5292 
5293  skip_bits(gb, 2);
5294  if (sd->effect_type = get_bits_long(gb, 30)) {
5295  switch (sd->effect_pcount1 = get_bits(gb, 4)) {
5296  case 7:
5297  vc1_sprite_parse_transform(gb, sd->effect_params1);
5298  break;
5299  case 14:
5300  vc1_sprite_parse_transform(gb, sd->effect_params1);
5301  vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5302  break;
5303  default:
5304  for (i = 0; i < sd->effect_pcount1; i++)
5305  sd->effect_params1[i] = get_fp_val(gb);
5306  }
5307  if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5308  // effect 13 is simple alpha blending and matches the opacity above
5309  av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5310  for (i = 0; i < sd->effect_pcount1; i++)
5311  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5312  sd->effect_params1[i] / (1 << 16),
5313  (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5314  av_log(avctx, AV_LOG_DEBUG, "\n");
5315  }
5316 
5317  sd->effect_pcount2 = get_bits(gb, 16);
5318  if (sd->effect_pcount2 > 10) {
5319  av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5320  return;
5321  } else if (sd->effect_pcount2) {
5322  i = -1;
5323  av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5324  while (++i < sd->effect_pcount2) {
5325  sd->effect_params2[i] = get_fp_val(gb);
5326  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5327  sd->effect_params2[i] / (1 << 16),
5328  (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5329  }
5330  av_log(avctx, AV_LOG_DEBUG, "\n");
5331  }
5332  }
5333  if (sd->effect_flag = get_bits1(gb))
5334  av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5335 
5336  if (get_bits_count(gb) >= gb->size_in_bits +
5337  (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE ? 64 : 0))
5338  av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5339  if (get_bits_count(gb) < gb->size_in_bits - 8)
5340  av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5341 }
5342 
5343 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5344 {
5345  int i, plane, row, sprite;
5346  int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5347  uint8_t* src_h[2][2];
5348  int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5349  int ysub[2];
5350  MpegEncContext *s = &v->s;
5351 
5352  for (i = 0; i < 2; i++) {
5353  xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5354  xadv[i] = sd->coefs[i][0];
5355  if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5356  xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5357 
5358  yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5359  yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5360  }
5361  alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5362 
5363  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5364  int width = v->output_width>>!!plane;
5365 
5366  for (row = 0; row < v->output_height>>!!plane; row++) {
5367  uint8_t *dst = v->sprite_output_frame->data[plane] +
5368  v->sprite_output_frame->linesize[plane] * row;
5369 
5370  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5371  uint8_t *iplane = s->current_picture.f.data[plane];
5372  int iline = s->current_picture.f.linesize[plane];
5373  int ycoord = yoff[sprite] + yadv[sprite] * row;
5374  int yline = ycoord >> 16;
5375  int next_line;
5376  ysub[sprite] = ycoord & 0xFFFF;
5377  if (sprite) {
5378  iplane = s->last_picture.f.data[plane];
5379  iline = s->last_picture.f.linesize[plane];
5380  }
5381  next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
5382  if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5383  src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5384  if (ysub[sprite])
5385  src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
5386  } else {
5387  if (sr_cache[sprite][0] != yline) {
5388  if (sr_cache[sprite][1] == yline) {
5389  FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5390  FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5391  } else {
5392  v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5393  sr_cache[sprite][0] = yline;
5394  }
5395  }
5396  if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5397  v->vc1dsp.sprite_h(v->sr_rows[sprite][1],
5398  iplane + next_line, xoff[sprite],
5399  xadv[sprite], width);
5400  sr_cache[sprite][1] = yline + 1;
5401  }
5402  src_h[sprite][0] = v->sr_rows[sprite][0];
5403  src_h[sprite][1] = v->sr_rows[sprite][1];
5404  }
5405  }
5406 
5407  if (!v->two_sprites) {
5408  if (ysub[0]) {
5409  v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5410  } else {
5411  memcpy(dst, src_h[0][0], width);
5412  }
5413  } else {
5414  if (ysub[0] && ysub[1]) {
5415  v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5416  src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5417  } else if (ysub[0]) {
5418  v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5419  src_h[1][0], alpha, width);
5420  } else if (ysub[1]) {
5421  v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5422  src_h[0][0], (1<<16)-1-alpha, width);
5423  } else {
5424  v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5425  }
5426  }
5427  }
5428 
5429  if (!plane) {
5430  for (i = 0; i < 2; i++) {
5431  xoff[i] >>= 1;
5432  yoff[i] >>= 1;
5433  }
5434  }
5435 
5436  }
5437 }
5438 
5439 
5440 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5441 {
5442  MpegEncContext *s = &v->s;
5443  AVCodecContext *avctx = s->avctx;
5444  SpriteData sd;
5445 
5446  vc1_parse_sprites(v, gb, &sd);
5447 
5448  if (!s->current_picture.f.data[0]) {
5449  av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5450  return -1;
5451  }
5452 
5453  if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5454  av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5455  v->two_sprites = 0;
5456  }
5457 
5459  if (ff_get_buffer(avctx, v->sprite_output_frame, 0) < 0) {
5460  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5461  return -1;
5462  }
5463 
5464  vc1_draw_sprites(v, &sd);
5465 
5466  return 0;
5467 }
5468 
5469 static void vc1_sprite_flush(AVCodecContext *avctx)
5470 {
5471  VC1Context *v = avctx->priv_data;
5472  MpegEncContext *s = &v->s;
5473  AVFrame *f = &s->current_picture.f;
5474  int plane, i;
5475 
5476  /* Windows Media Image codecs have a convergence interval of two keyframes.
5477  Since we can't enforce it, clear to black the missing sprite. This is
5478  wrong but it looks better than doing nothing. */
5479 
5480  if (f->data[0])
5481  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5482  for (i = 0; i < v->sprite_height>>!!plane; i++)
5483  memset(f->data[plane] + i * f->linesize[plane],
5484  plane ? 128 : 0, f->linesize[plane]);
5485 }
5486 
5487 #endif
5488 
5490 {
5491  MpegEncContext *s = &v->s;
5492  int i;
5493  int mb_height = FFALIGN(s->mb_height, 2);
5494 
5495  /* Allocate mb bitplanes */
5496  v->mv_type_mb_plane = av_malloc (s->mb_stride * mb_height);
5497  v->direct_mb_plane = av_malloc (s->mb_stride * mb_height);
5498  v->forward_mb_plane = av_malloc (s->mb_stride * mb_height);
5499  v->fieldtx_plane = av_mallocz(s->mb_stride * mb_height);
5500  v->acpred_plane = av_malloc (s->mb_stride * mb_height);
5501  v->over_flags_plane = av_malloc (s->mb_stride * mb_height);
5502 
5503  v->n_allocated_blks = s->mb_width + 2;
5504  v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5505  v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5506  v->cbp = v->cbp_base + s->mb_stride;
5507  v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5508  v->ttblk = v->ttblk_base + s->mb_stride;
5509  v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5510  v->is_intra = v->is_intra_base + s->mb_stride;
5511  v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5512  v->luma_mv = v->luma_mv_base + s->mb_stride;
5513 
5514  /* allocate block type info in that way so it could be used with s->block_index[] */
5515  v->mb_type_base = av_malloc(s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5516  v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5517  v->mb_type[1] = v->mb_type_base + s->b8_stride * (mb_height * 2 + 1) + s->mb_stride + 1;
5518  v->mb_type[2] = v->mb_type[1] + s->mb_stride * (mb_height + 1);
5519 
5520  /* allocate memory to store block level MV info */
5521  v->blk_mv_type_base = av_mallocz( s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5522  v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5523  v->mv_f_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5524  v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5525  v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5526  v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5527  v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5528  v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5529 
5530  /* Init coded blocks info */
5531  if (v->profile == PROFILE_ADVANCED) {
5532 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5533 // return -1;
5534 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5535 // return -1;
5536  }
5537 
5538  ff_intrax8_common_init(&v->x8,s);
5539 
5541  for (i = 0; i < 4; i++)
5542  if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5543  }
5544 
5545  if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5546  !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5547  !v->mb_type_base) {
5550  av_freep(&v->acpred_plane);
5552  av_freep(&v->block);
5553  av_freep(&v->cbp_base);
5554  av_freep(&v->ttblk_base);
5555  av_freep(&v->is_intra_base);
5556  av_freep(&v->luma_mv_base);
5557  av_freep(&v->mb_type_base);
5558  return AVERROR(ENOMEM);
5559  }
5560 
5561  return 0;
5562 }
5563 
5565 {
5566  int i;
5567  for (i = 0; i < 64; i++) {
5568 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5569  v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5570  v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5571  v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5572  v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5574  }
5575  v->left_blk_sh = 0;
5576  v->top_blk_sh = 3;
5577 }
5578 
5584 {
5585  VC1Context *v = avctx->priv_data;
5586  MpegEncContext *s = &v->s;
5587  GetBitContext gb;
5588 
5589  /* save the container output size for WMImage */
5590  v->output_width = avctx->width;
5591  v->output_height = avctx->height;
5592 
5593  if (!avctx->extradata_size || !avctx->extradata)
5594  return -1;
5595  if (!(avctx->flags & CODEC_FLAG_GRAY))
5596  avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5597  else
5598  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
5599  avctx->hwaccel = ff_find_hwaccel(avctx);
5600  v->s.avctx = avctx;
5601 
5602  if (ff_vc1_init_common(v) < 0)
5603  return -1;
5605  ff_vc1dsp_init(&v->vc1dsp);
5606 
5607  if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
5608  int count = 0;
5609 
5610  // looks like WMV3 has a sequence header stored in the extradata
5611  // advanced sequence header may be before the first frame
5612  // the last byte of the extradata is a version number, 1 for the
5613  // samples we can decode
5614 
5615  init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5616 
5617  if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0)
5618  return -1;
5619 
5620  count = avctx->extradata_size*8 - get_bits_count(&gb);
5621  if (count > 0) {
5622  av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5623  count, get_bits(&gb, count));
5624  } else if (count < 0) {
5625  av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5626  }
5627  } else { // VC1/WVC1/WVP2
5628  const uint8_t *start = avctx->extradata;
5629  uint8_t *end = avctx->extradata + avctx->extradata_size;
5630  const uint8_t *next;
5631  int size, buf2_size;
5632  uint8_t *buf2 = NULL;
5633  int seq_initialized = 0, ep_initialized = 0;
5634 
5635  if (avctx->extradata_size < 16) {
5636  av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5637  return -1;
5638  }
5639 
5641  start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5642  next = start;
5643  for (; next < end; start = next) {
5644  next = find_next_marker(start + 4, end);
5645  size = next - start - 4;
5646  if (size <= 0)
5647  continue;
5648  buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5649  init_get_bits(&gb, buf2, buf2_size * 8);
5650  switch (AV_RB32(start)) {
5651  case VC1_CODE_SEQHDR:
5652  if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5653  av_free(buf2);
5654  return -1;
5655  }
5656  seq_initialized = 1;
5657  break;
5658  case VC1_CODE_ENTRYPOINT:
5659  if (ff_vc1_decode_entry_point(avctx, v, &gb) < 0) {
5660  av_free(buf2);
5661  return -1;
5662  }
5663  ep_initialized = 1;
5664  break;
5665  }
5666  }
5667  av_free(buf2);
5668  if (!seq_initialized || !ep_initialized) {
5669  av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5670  return -1;
5671  }
5672  v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
5673  }
5674 
5676  if (!v->sprite_output_frame)
5677  return AVERROR(ENOMEM);
5678 
5679  avctx->profile = v->profile;
5680  if (v->profile == PROFILE_ADVANCED)
5681  avctx->level = v->level;
5682 
5683  avctx->has_b_frames = !!avctx->max_b_frames;
5684 
5685  s->mb_width = (avctx->coded_width + 15) >> 4;
5686  s->mb_height = (avctx->coded_height + 15) >> 4;
5687 
5688  if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5690  } else {
5691  memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5692  v->left_blk_sh = 3;
5693  v->top_blk_sh = 0;
5694  }
5695 
5696  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5697  v->sprite_width = avctx->coded_width;
5698  v->sprite_height = avctx->coded_height;
5699 
5700  avctx->coded_width = avctx->width = v->output_width;
5701  avctx->coded_height = avctx->height = v->output_height;
5702 
5703  // prevent 16.16 overflows
5704  if (v->sprite_width > 1 << 14 ||
5705  v->sprite_height > 1 << 14 ||
5706  v->output_width > 1 << 14 ||
5707  v->output_height > 1 << 14) return -1;
5708  }
5709  return 0;
5710 }
5711 
5716 {
5717  VC1Context *v = avctx->priv_data;
5718  int i;
5719 
5721 
5722  for (i = 0; i < 4; i++)
5723  av_freep(&v->sr_rows[i >> 1][i & 1]);
5724  av_freep(&v->hrd_rate);
5725  av_freep(&v->hrd_buffer);
5726  ff_MPV_common_end(&v->s);
5730  av_freep(&v->fieldtx_plane);
5731  av_freep(&v->acpred_plane);
5733  av_freep(&v->mb_type_base);
5735  av_freep(&v->mv_f_base);
5736  av_freep(&v->mv_f_next_base);
5737  av_freep(&v->block);
5738  av_freep(&v->cbp_base);
5739  av_freep(&v->ttblk_base);
5740  av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5741  av_freep(&v->luma_mv_base);
5743  return 0;
5744 }
5745 
5746 
5750 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5751  int *got_frame, AVPacket *avpkt)
5752 {
5753  const uint8_t *buf = avpkt->data;
5754  int buf_size = avpkt->size, n_slices = 0, i, ret;
5755  VC1Context *v = avctx->priv_data;
5756  MpegEncContext *s = &v->s;
5757  AVFrame *pict = data;
5758  uint8_t *buf2 = NULL;
5759  const uint8_t *buf_start = buf;
5760  int mb_height, n_slices1;
5761  struct {
5762  uint8_t *buf;
5763  GetBitContext gb;
5764  int mby_start;
5765  } *slices = NULL, *tmp;
5766 
5767  /* no supplementary picture */
5768  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5769  /* special case for last picture */
5770  if (s->low_delay == 0 && s->next_picture_ptr) {
5771  if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
5772  return ret;
5773  s->next_picture_ptr = NULL;
5774 
5775  *got_frame = 1;
5776  }
5777 
5778  return 0;
5779  }
5780 
5781  //for advanced profile we may need to parse and unescape data
5782  if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5783  int buf_size2 = 0;
5784  buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5785 
5786  if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5787  const uint8_t *start, *end, *next;
5788  int size;
5789 
5790  next = buf;
5791  for (start = buf, end = buf + buf_size; next < end; start = next) {
5792  next = find_next_marker(start + 4, end);
5793  size = next - start - 4;
5794  if (size <= 0) continue;
5795  switch (AV_RB32(start)) {
5796  case VC1_CODE_FRAME:
5797  if (avctx->hwaccel)
5798  buf_start = start;
5799  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5800  break;
5801  case VC1_CODE_FIELD: {
5802  int buf_size3;
5803  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5804  if (!tmp)
5805  goto err;
5806  slices = tmp;
5807  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5808  if (!slices[n_slices].buf)
5809  goto err;
5810  buf_size3 = vc1_unescape_buffer(start + 4, size,
5811  slices[n_slices].buf);
5812  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5813  buf_size3 << 3);
5814  /* assuming that the field marker is at the exact middle,
5815  hope it's correct */
5816  slices[n_slices].mby_start = s->mb_height >> 1;
5817  n_slices1 = n_slices - 1; // index of the last slice of the first field
5818  n_slices++;
5819  break;
5820  }
5821  case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5822  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5823  init_get_bits(&s->gb, buf2, buf_size2 * 8);
5824  ff_vc1_decode_entry_point(avctx, v, &s->gb);
5825  break;
5826  case VC1_CODE_SLICE: {
5827  int buf_size3;
5828  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5829  if (!tmp)
5830  goto err;
5831  slices = tmp;
5832  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5833  if (!slices[n_slices].buf)
5834  goto err;
5835  buf_size3 = vc1_unescape_buffer(start + 4, size,
5836  slices[n_slices].buf);
5837  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5838  buf_size3 << 3);
5839  slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5840  n_slices++;
5841  break;
5842  }
5843  }
5844  }
5845  } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5846  const uint8_t *divider;
5847  int buf_size3;
5848 
5849  divider = find_next_marker(buf, buf + buf_size);
5850  if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5851  av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5852  goto err;
5853  } else { // found field marker, unescape second field
5854  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5855  if (!tmp)
5856  goto err;
5857  slices = tmp;
5858  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5859  if (!slices[n_slices].buf)
5860  goto err;
5861  buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5862  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5863  buf_size3 << 3);
5864  slices[n_slices].mby_start = s->mb_height >> 1;
5865  n_slices1 = n_slices - 1;
5866  n_slices++;
5867  }
5868  buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5869  } else {
5870  buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5871  }
5872  init_get_bits(&s->gb, buf2, buf_size2*8);
5873  } else
5874  init_get_bits(&s->gb, buf, buf_size*8);
5875 
5876  if (v->res_sprite) {
5877  v->new_sprite = !get_bits1(&s->gb);
5878  v->two_sprites = get_bits1(&s->gb);
5879  /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
5880  we're using the sprite compositor. These are intentionally kept separate
5881  so you can get the raw sprites by using the wmv3 decoder for WMVP or
5882  the vc1 one for WVP2 */
5883  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5884  if (v->new_sprite) {
5885  // switch AVCodecContext parameters to those of the sprites
5886  avctx->width = avctx->coded_width = v->sprite_width;
5887  avctx->height = avctx->coded_height = v->sprite_height;
5888  } else {
5889  goto image;
5890  }
5891  }
5892  }
5893 
5894  if (s->context_initialized &&
5895  (s->width != avctx->coded_width ||
5896  s->height != avctx->coded_height)) {
5897  ff_vc1_decode_end(avctx);
5898  }
5899 
5900  if (!s->context_initialized) {
5901  if (ff_msmpeg4_decode_init(avctx) < 0)
5902  goto err;
5903  if (ff_vc1_decode_init_alloc_tables(v) < 0) {
5904  ff_MPV_common_end(s);
5905  goto err;
5906  }
5907 
5908  s->low_delay = !avctx->has_b_frames || v->res_sprite;
5909 
5910  if (v->profile == PROFILE_ADVANCED) {
5911  s->h_edge_pos = avctx->coded_width;
5912  s->v_edge_pos = avctx->coded_height;
5913  }
5914  }
5915 
5916  // do parse frame header
5917  v->pic_header_flag = 0;
5918  v->first_pic_header_flag = 1;
5919  if (v->profile < PROFILE_ADVANCED) {
5920  if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
5921  goto err;
5922  }
5923  } else {
5924  if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
5925  goto err;
5926  }
5927  }
5928  v->first_pic_header_flag = 0;
5929 
5930  if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5931  && s->pict_type != AV_PICTURE_TYPE_I) {
5932  av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5933  goto err;
5934  }
5935 
5936  // for skipping the frame
5939 
5940  /* skip B-frames if we don't have reference frames */
5941  if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
5942  goto err;
5943  }
5944  if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5945  (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5946  avctx->skip_frame >= AVDISCARD_ALL) {
5947  goto end;
5948  }
5949 
5950  if (s->next_p_frame_damaged) {
5951  if (s->pict_type == AV_PICTURE_TYPE_B)
5952  goto end;
5953  else
5954  s->next_p_frame_damaged = 0;
5955  }
5956 
5957  if (ff_MPV_frame_start(s, avctx) < 0) {
5958  goto err;
5959  }
5960 
5961  // process pulldown flags
5963  // Pulldown flags are only valid when 'broadcast' has been set.
5964  // So ticks_per_frame will be 2
5965  if (v->rff) {
5966  // repeat field
5968  } else if (v->rptfrm) {
5969  // repeat frames
5970  s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
5971  }
5972 
5975 
5976  if (avctx->hwaccel) {
5977  if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
5978  goto err;
5979  if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5980  goto err;
5981  if (avctx->hwaccel->end_frame(avctx) < 0)
5982  goto err;
5983  } else {
5984  int header_ret = 0;
5985 
5987 
5988  v->bits = buf_size * 8;
5989  v->end_mb_x = s->mb_width;
5990  if (v->field_mode) {
5991  s->current_picture.f.linesize[0] <<= 1;
5992  s->current_picture.f.linesize[1] <<= 1;
5993  s->current_picture.f.linesize[2] <<= 1;
5994  s->linesize <<= 1;
5995  s->uvlinesize <<= 1;
5996  }
5997  mb_height = s->mb_height >> v->field_mode;
5998 
5999  if (!mb_height) {
6000  av_log(v->s.avctx, AV_LOG_ERROR, "Invalid mb_height.\n");
6001  goto err;
6002  }
6003 
6004  for (i = 0; i <= n_slices; i++) {
6005  if (i > 0 && slices[i - 1].mby_start >= mb_height) {
6006  if (v->field_mode <= 0) {
6007  av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
6008  "picture boundary (%d >= %d)\n", i,
6009  slices[i - 1].mby_start, mb_height);
6010  continue;
6011  }
6012  v->second_field = 1;
6013  v->blocks_off = s->mb_width * s->mb_height << 1;
6014  v->mb_off = s->mb_stride * s->mb_height >> 1;
6015  } else {
6016  v->second_field = 0;
6017  v->blocks_off = 0;
6018  v->mb_off = 0;
6019  }
6020  if (i) {
6021  v->pic_header_flag = 0;
6022  if (v->field_mode && i == n_slices1 + 2) {
6023  if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6024  av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
6025  if (avctx->err_recognition & AV_EF_EXPLODE)
6026  goto err;
6027  continue;
6028  }
6029  } else if (get_bits1(&s->gb)) {
6030  v->pic_header_flag = 1;
6031  if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6032  av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
6033  if (avctx->err_recognition & AV_EF_EXPLODE)
6034  goto err;
6035  continue;
6036  }
6037  }
6038  }
6039  if (header_ret < 0)
6040  continue;
6041  s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
6042  if (!v->field_mode || v->second_field)
6043  s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6044  else
6045  s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6047  if (i != n_slices)
6048  s->gb = slices[i].gb;
6049  }
6050  if (v->field_mode) {
6051  v->second_field = 0;
6052  s->current_picture.f.linesize[0] >>= 1;
6053  s->current_picture.f.linesize[1] >>= 1;
6054  s->current_picture.f.linesize[2] >>= 1;
6055  s->linesize >>= 1;
6056  s->uvlinesize >>= 1;
6058  FFSWAP(uint8_t *, v->mv_f_next[0], v->mv_f[0]);
6059  FFSWAP(uint8_t *, v->mv_f_next[1], v->mv_f[1]);
6060  }
6061  }
6062  av_dlog(s->avctx, "Consumed %i/%i bits\n",
6063  get_bits_count(&s->gb), s->gb.size_in_bits);
6064 // if (get_bits_count(&s->gb) > buf_size * 8)
6065 // return -1;
6066  if (!v->field_mode)
6067  ff_er_frame_end(&s->er);
6068  }
6069 
6070  ff_MPV_frame_end(s);
6071 
6072  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
6073 image:
6074  avctx->width = avctx->coded_width = v->output_width;
6075  avctx->height = avctx->coded_height = v->output_height;
6076  if (avctx->skip_frame >= AVDISCARD_NONREF)
6077  goto end;
6078 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
6079  if (vc1_decode_sprites(v, &s->gb))
6080  goto err;
6081 #endif
6082  if ((ret = av_frame_ref(pict, v->sprite_output_frame)) < 0)
6083  goto err;
6084  *got_frame = 1;
6085  } else {
6086  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
6087  if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
6088  goto err;
6090  *got_frame = 1;
6091  } else if (s->last_picture_ptr != NULL) {
6092  if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
6093  goto err;
6095  *got_frame = 1;
6096  }
6097  }
6098 
6099 end:
6100  av_free(buf2);
6101  for (i = 0; i < n_slices; i++)
6102  av_free(slices[i].buf);
6103  av_free(slices);
6104  return buf_size;
6105 
6106 err:
6107  av_free(buf2);
6108  for (i = 0; i < n_slices; i++)
6109  av_free(slices[i].buf);
6110  av_free(slices);
6111  return -1;
6112 }
6113 
6114 
6115 static const AVProfile profiles[] = {
6116  { FF_PROFILE_VC1_SIMPLE, "Simple" },
6117  { FF_PROFILE_VC1_MAIN, "Main" },
6118  { FF_PROFILE_VC1_COMPLEX, "Complex" },
6119  { FF_PROFILE_VC1_ADVANCED, "Advanced" },
6120  { FF_PROFILE_UNKNOWN },
6121 };
6122 
6124 #if CONFIG_DXVA2
6126 #endif
6127 #if CONFIG_VAAPI
6129 #endif
6130 #if CONFIG_VDPAU
6132 #endif
6135 };
6136 
6138  .name = "vc1",
6139  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
6140  .type = AVMEDIA_TYPE_VIDEO,
6141  .id = AV_CODEC_ID_VC1,
6142  .priv_data_size = sizeof(VC1Context),
6143  .init = vc1_decode_init,
6146  .flush = ff_mpeg_flush,
6147  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6148  .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6149  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6150 };
6151 
6152 #if CONFIG_WMV3_DECODER
6153 AVCodec ff_wmv3_decoder = {
6154  .name = "wmv3",
6155  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
6156  .type = AVMEDIA_TYPE_VIDEO,
6157  .id = AV_CODEC_ID_WMV3,
6158  .priv_data_size = sizeof(VC1Context),
6159  .init = vc1_decode_init,
6162  .flush = ff_mpeg_flush,
6163  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6164  .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6165  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6166 };
6167 #endif
6168 
6169 #if CONFIG_WMV3IMAGE_DECODER
6170 AVCodec ff_wmv3image_decoder = {
6171  .name = "wmv3image",
6172  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
6173  .type = AVMEDIA_TYPE_VIDEO,
6174  .id = AV_CODEC_ID_WMV3IMAGE,
6175  .priv_data_size = sizeof(VC1Context),
6176  .init = vc1_decode_init,
6179  .capabilities = CODEC_CAP_DR1,
6180  .flush = vc1_sprite_flush,
6181  .pix_fmts = ff_pixfmt_list_420
6182 };
6183 #endif
6184 
6185 #if CONFIG_VC1IMAGE_DECODER
6186 AVCodec ff_vc1image_decoder = {
6187  .name = "vc1image",
6188  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
6189  .type = AVMEDIA_TYPE_VIDEO,
6190  .id = AV_CODEC_ID_VC1IMAGE,
6191  .priv_data_size = sizeof(VC1Context),
6192  .init = vc1_decode_init,
6195  .capabilities = CODEC_CAP_DR1,
6196  .flush = vc1_sprite_flush,
6197  .pix_fmts = ff_pixfmt_list_420
6198 };
6199 #endif
static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
Definition: vc1dec.c:262
in the bitstream is reported as 00b
Definition: vc1.h:173
const int ff_vc1_ttblk_to_tt[3][8]
Table for conversion between TTBLK and TTMB.
Definition: vc1data.c:34
op_pixels_func avg_vc1_mspel_pixels_tab[16]
Definition: vc1dsp.h:60
#define VC1_TTBLK_VLC_BITS
Definition: vc1data.c:126
void(* vc1_h_overlap)(uint8_t *src, int stride)
Definition: vc1dsp.h:46
const struct AVCodec * codec
Definition: avcodec.h:1063
int topleft_blk_idx
Definition: vc1.h:393
#define MB_TYPE_SKIP
Definition: avcodec.h:813
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:62
static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n, int coded, int codingset)
Decode intra block in intra frames - should be faster than decode_intra_block.
Definition: vc1dec.c:2628
discard all frames except keyframes
Definition: avcodec.h:545
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2372
#define VC1_IF_MBMODE_VLC_BITS
Definition: vc1data.c:145
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2903
BI type.
Definition: avutil.h:259
int p_frame_skipped
Definition: vc1.h:388
Imode
Imode types.
Definition: vc1.c:54
static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n, int mquant, int ttmb, int first_block, uint8_t *dst, int linesize, int skip_block, int *ttmb_out)
Decode P block.
Definition: vc1dec.c:3213
void(* put_signed_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
Definition: dsputil.h:129
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:54
The VC1 Context.
Definition: vc1.h:182
int size
int esc3_level_length
Definition: mpegvideo.h:624
This structure describes decoded (raw) audio or video data.
Definition: frame.h:107
VLC ff_vc1_ttblk_vlc[3]
Definition: vc1data.c:127
#define VC1_ICBPCY_VLC_BITS
Definition: vc1data.c:120
static int vc1_decode_p_mb(VC1Context *v)
Decode one P-frame MB.
Definition: vc1dec.c:3526
int k_x
Number of bits for MVs (depends on MV range)
Definition: vc1.h:243
void(* vc1_inv_trans_8x4)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:38
int reffield
if numref = 0 (1 reference) then reffield decides which
Definition: vc1.h:366
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:339
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
Definition: mpegvideo.h:374
uint8_t * mv_f_base
Definition: vc1.h:358
void(* vc1_inv_trans_4x8)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:39
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1227
int mv_type_is_raw
mv type mb plane is not coded
Definition: vc1.h:297
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
#define B
Definition: dsputil.c:1836
static av_always_inline int scaleforsame(VC1Context *v, int i, int n, int dim, int dir)
Definition: vc1dec.c:1372
uint8_t dmvrange
Frame decoding info for interlaced picture.
Definition: vc1.h:343
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:240
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:129
#define ER_MB_END
#define AC_VLC_BITS
Definition: intrax8.c:36
static av_always_inline int scaleforopp_y(VC1Context *v, int n, int dir)
Definition: vc1dec.c:1341
static const uint8_t vc1_index_decode_table[AC_MODES][185][2]
Definition: vc1acdata.h:34
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:340
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1302
int16_t(*[3] ac_val)[16]
used for for mpeg4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:380
static const int vc1_last_decode_table[AC_MODES]
Definition: vc1acdata.h:30
int tt_index
Index for Transform Type tables (to decode TTMB)
Definition: vc1.h:293
static void vc1_decode_p_blocks(VC1Context *v)
Definition: vc1dec.c:5018
static void vc1_put_signed_blocks_clamped(VC1Context *v)
Definition: vc1dec.c:88
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:304
#define GET_MVDATA(_dmv_x, _dmv_y)
Get MV differentials.
Definition: vc1dec.c:1137
#define VC1_2REF_MVDATA_VLC_BITS
Definition: vc1data.c:140
void ff_er_frame_end(ERContext *s)
static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V) ...
Definition: vc1dec.c:977
void(* sprite_v_double_onescale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, int alpha, int width)
Definition: vc1dsp.h:70
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:287
int next_use_ic
Definition: vc1.h:305
int size
Definition: avcodec.h:974
uint8_t rangeredfrm
Frame decoding info for S/M profiles only.
Definition: vc1.h:310
HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the b...
Definition: pixfmt.h:127
int ff_msmpeg4_decode_init(AVCodecContext *avctx)
Definition: msmpeg4dec.c:281
void ff_print_debug_info(MpegEncContext *s, Picture *p)
Print debugging info for the given picture.
Definition: mpegvideo.c:1864
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1247
int frfd
Definition: vc1.h:375
uint8_t zz_8x8[4][64]
Zigzag table for TT_8x8, permuted for IDCT.
Definition: vc1.h:247
static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n, int coded, int mquant, int codingset)
Decode intra block in inter frames - more generic version than vc1_decode_i_block.
Definition: vc1dec.c:3003
static void vc1_decode_b_blocks(VC1Context *v)
Definition: vc1dec.c:5094
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:39
mpegvideo header.
int top_blk_idx
Definition: vc1.h:393
IntraX8Context x8
Definition: vc1.h:184
VLC * imv_vlc
Definition: vc1.h:349
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
uint8_t * mb_type_base
Definition: vc1.h:272
discard all
Definition: avcodec.h:546
uint8_t * mv_f[2]
0: MV obtained from same field, 1: opposite field
Definition: vc1.h:358
int sprite_height
Definition: vc1.h:384
uint8_t run
Definition: svq3.c:142
int last_use_ic
Definition: vc1.h:305
int fastuvmc
Rounding of qpel vector to hpel ? (not in Simple)
Definition: vc1.h:230
int end_mb_x
Horizontal macroblock limit (used only by mss2)
Definition: vc1.h:401
Definition: vf_drawbox.c:37
int profile
profile
Definition: avcodec.h:2596
AVCodec.
Definition: avcodec.h:2755
void(* vc1_v_loop_filter8)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:51
void ff_vc1_decode_blocks(VC1Context *v)
Definition: vc1dec.c:5179
int block_wrap[6]
Definition: mpegvideo.h:486
#define FFALIGN(x, a)
Definition: common.h:62
uint8_t rff
Definition: vc1.h:319
static void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
Definition: vc1dec.c:2318
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
static int vc1_coded_block_pred(MpegEncContext *s, int n, uint8_t **coded_block_ptr)
Definition: vc1dec.c:2526
enum AVDiscard skip_frame
Definition: avcodec.h:2701
int bits
Definition: vc1.h:188
int range_x
Definition: vc1.h:245
#define VC1_4MV_BLOCK_PATTERN_VLC_BITS
Definition: vc1data.c:122
static void vc1_apply_p_loop_filter(VC1Context *v)
Definition: vc1dec.c:3499
const uint16_t ff_vc1_b_field_mvpred_scales[7][4]
Definition: vc1data.c:1121
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2417
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:2365
int esc3_run_length
Definition: mpegvideo.h:625
int refdist
distance of the current picture from reference
Definition: vc1.h:363
uint8_t * acpred_plane
AC prediction flags bitplane.
Definition: vc1.h:329
VC-1 tables.
int bi_type
Definition: vc1.h:389
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
Definition: mimic.c:269
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define MB_TYPE_INTRA
Definition: mpegvideo.h:157
static const AVProfile profiles[]
Definition: vc1dec.c:6115
uint8_t bits
Definition: crc.c:216
uint8_t
static int vc1_decode_b_mb_intfr(VC1Context *v)
Decode one B-frame MB (in interlaced frame B picture)
Definition: vc1dec.c:4400
#define av_cold
Definition: attributes.h:66
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:43
void(* vc1_v_loop_filter4)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:49
int first_pic_header_flag
Definition: vc1.h:376
uint16_t * hrd_rate
Definition: vc1.h:334
av_cold int ff_vc1_init_common(VC1Context *v)
Init VC-1 specific tables and VC1Context members.
Definition: vc1.c:1567
void(* sprite_v_double_twoscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, const uint8_t *src2b, int offset2, int alpha, int width)
Definition: vc1dsp.h:72
void(* vc1_inv_trans_8x8)(int16_t *b)
Definition: vc1dsp.h:37
#define DC_VLC_BITS
Definition: vc1dec.c:46
int left_blk_idx
Definition: vc1.h:393
#define AV_RB32
Definition: intreadwrite.h:130
int interlace
Progressive/interlaced (RPTFTM syntax element)
Definition: vc1.h:210
int y_ac_table_index
Luma index from AC2FRM element.
Definition: vc1.h:263
#define b
Definition: input.c:52
int second_field
Definition: vc1.h:362
#define ER_MB_ERROR
int n_allocated_blks
Definition: vc1.h:393
qpel_mc_func(* qpel_put)[16]
Definition: mpegvideo.h:251
int c_ac_table_index
AC coding set indexes.
Definition: vc1.h:262
const int ff_vc1_ac_sizes[AC_MODES]
Definition: vc1data.c:1133
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:174
void(* vc1_inv_trans_8x4_dc)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:42
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1162
int ttfrm
Transform type info present at frame level.
Definition: vc1.h:265
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:366
int codingset2
index of current table set from 11.8 to use for chroma block decoding
Definition: vc1.h:269
int16_t bfraction
Relative position % anchors=> how to scale MVs.
Definition: vc1.h:280
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:711
int16_t((* luma_mv)[2]
Definition: vc1.h:396
void(* add_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
Definition: dsputil.h:130
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags...
Definition: vc1.h:227
const char data[16]
Definition: mxf.c:66
static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
Definition: vc1dec.c:194
MSMPEG4 data tables.
uint8_t * data
Definition: avcodec.h:973
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:194
void(* vc1_h_loop_filter8)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:52
static av_always_inline int scaleforsame_x(VC1Context *v, int n, int dir)
Definition: vc1dec.c:1244
uint8_t * forward_mb_plane
bitplane for "forward" MBs
Definition: vc1.h:296
uint8_t last_luty[2][256]
Definition: vc1.h:301
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:300
enum AVPixelFormat ff_pixfmt_list_420[]
Definition: mpegvideo.c:107
int ff_vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
Decode Simple/Main Profiles sequence header.
Definition: vc1.c:294
static void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
Reconstruct motion vector for B-frame and do motion compensation.
Definition: vc1dec.c:2088
void ff_MPV_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1843
int fieldtx_is_raw
Definition: vc1.h:355
uint8_t * over_flags_plane
Overflags bitplane.
Definition: vc1.h:331
static void vc1_decode_b_mb(VC1Context *v)
Decode one B-frame MB (in Main profile)
Definition: vc1dec.c:4095
uint8_t fourmvbp
Definition: vc1.h:353
const int8_t ff_vc1_adv_interlaced_4x8_zz[32]
Definition: vc1data.c:1065
int range_y
MV range.
Definition: vc1.h:245
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:832
uint8_t last_lutuv[2][256]
lookup tables used for intensity compensation
Definition: vc1.h:301
qpel_mc_func put_qpel_pixels_tab[2][16]
Definition: dsputil.h:184
uint8_t ttmbf
Transform type flag.
Definition: vc1.h:266
Definition: vc1.h:143
int k_y
Number of bits for MVs (depends on MV range)
Definition: vc1.h:244
#define transpose(x)
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:555
int16_t * dc_val[3]
used for mpeg4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:373
uint8_t twomvbp
Definition: vc1.h:352
int dmb_is_raw
direct mb plane is raw
Definition: vc1.h:298
static int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n, int16_t **dc_val_ptr, int *dir_ptr)
Get predicted DC value for I-frames only prediction dir: left=0, top=1.
Definition: vc1dec.c:2384
int16_t(* block)[6][64]
Definition: vc1.h:392
void(* vc1_inv_trans_8x8_dc)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:41
#define VC1_CBPCY_P_VLC_BITS
Definition: vc1data.c:118
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:123
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1332
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:186
int overlap
overlapped transforms in use
Definition: vc1.h:234
in the bitstream is reported as 11b
Definition: vc1.h:175
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:740
const int8_t ff_vc1_simple_progressive_4x4_zz[16]
Definition: vc1data.c:1022
void(* vc1_inv_trans_4x4)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:40
#define AVERROR(e)
Definition: error.h:43
qpel_mc_func avg_qpel_pixels_tab[2][16]
Definition: dsputil.h:185
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:55
static void vc1_mc_1mv(VC1Context *v, int dir)
Do motion compensation over 1 macroblock Mostly adapted hpel_motion and qpel_motion from mpegvideo...
Definition: vc1dec.c:342
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:142
ERContext er
Definition: mpegvideo.h:719
static av_cold int vc1_decode_init(AVCodecContext *avctx)
Initialize a VC1/WMV3 decoder.
Definition: vc1dec.c:5583
#define GET_MQUANT()
Get macroblock-level quantizer scale.
Definition: vc1dec.c:1095
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:144
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegvideo.h:387
uint8_t * mv_f_next_base
Definition: vc1.h:359
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1142
VLC * mbmode_vlc
Definition: vc1.h:348
#define wrap(func)
Definition: neontest.h:62
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:148
const char * name
Name of the codec implementation.
Definition: avcodec.h:2762
#define IS_MARKER(state, i, buf, buf_size)
Definition: dca_parser.c:37
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:586
int low_delay
no reordering needed / has no b-frames
Definition: mpegvideo.h:591
GetBitContext gb
Definition: mpegvideo.h:632
#define FFMAX(a, b)
Definition: common.h:55
void(* vc1_v_loop_filter16)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:53
static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n, int dir)
Definition: vc1dec.c:1277
static void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y, int mvn, int r_x, int r_y, uint8_t *is_intra, int dir)
Predict and set motion vector for interlaced frame picture MBs.
Definition: vc1dec.c:1673
const uint8_t * zz_8x4
Zigzag scan table for TT_8x4 coding mode.
Definition: vc1.h:249
int res_rtm_flag
reserved, set to 1
Definition: vc1.h:200
void(* vc1_inv_trans_4x4_dc)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:44
int off
Definition: dsputil_bfin.c:29
int a_avail
Definition: vc1.h:271
uint8_t * blk_mv_type
0: frame MV, 1: field MV (interlaced frame)
Definition: vc1.h:357
static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
Decode one AC coefficient.
Definition: vc1dec.c:2562
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:2433
const int8_t ff_vc1_adv_interlaced_4x4_zz[16]
Definition: vc1data.c:1076
int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext *gb)
Definition: vc1.c:838
#define B_FRACTION_DEN
Definition: vc1data.h:99
VLC ff_vc1_ttmb_vlc[3]
Definition: vc1data.c:115
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:509
static av_always_inline int scaleforopp(VC1Context *v, int n, int dim, int dir)
Definition: vc1dec.c:1393
int cur_field_type
0: top, 1: bottom
Definition: vc1.h:370
const uint8_t ff_wmv1_scantable[WMV1_SCANTABLE_COUNT][64]
Definition: msmpeg4data.c:1825
VLC * twomvbp_vlc
Definition: vc1.h:350
const uint8_t * zz_4x8
Zigzag scan table for TT_4x8 coding mode.
Definition: vc1.h:250
AVCodec ff_vc1_decoder
Definition: vc1dec.c:6137
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:2776
static av_always_inline int scaleforopp_x(VC1Context *v, int n)
Definition: vc1dec.c:1314
void ff_mpeg_er_frame_start(MpegEncContext *s)
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:168
av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
Close a VC1/WMV3 decoder.
Definition: vc1dec.c:5715
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2390
int x8_type
Definition: vc1.h:390
#define FFMIN(a, b)
Definition: common.h:57
av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
Definition: vc1dec.c:5564
int next_p_frame_damaged
set if the next p frame is damaged, to avoid showing trashed b frames
Definition: mpegvideo.h:548
uint8_t * blk_mv_type_base
Definition: vc1.h:357
av_cold void ff_intrax8_common_init(IntraX8Context *w, MpegEncContext *const s)
Initialize IntraX8 frame decoder.
Definition: intrax8.c:694
int field_mode
1 for interlaced field pictures
Definition: vc1.h:360
av_cold void ff_intrax8_common_end(IntraX8Context *w)
Destroy IntraX8 frame structure.
Definition: intrax8.c:712
static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n, int coded, int codingset, int mquant)
Decode intra block in intra frames - should be faster than decode_intra_block.
Definition: vc1dec.c:2791
int width
picture width / height.
Definition: avcodec.h:1217
int8_t zzi_8x8[64]
Definition: vc1.h:356
#define VC1_SUBBLKPAT_VLC_BITS
Definition: vc1data.c:128
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for b-frame encodin...
Definition: mpegvideo.h:382
void(* clear_blocks)(int16_t *blocks)
Definition: dsputil.h:143
uint8_t mv_mode
Frame decoding info for all profiles.
Definition: vc1.h:241
#define FF_PROFILE_VC1_MAIN
Definition: avcodec.h:2642
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:107
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:370
int fourmvswitch
Definition: vc1.h:344
int mb_off
Definition: vc1.h:372
#define FF_PROFILE_UNKNOWN
Definition: avcodec.h:2597
static void vc1_decode_skip_blocks(VC1Context *v)
Definition: vc1dec.c:5157
static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
Definition: vc1dec.c:3375
int size_in_bits
Definition: get_bits.h:56
av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
Definition: vc1dec.c:5489
int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext *gb)
Definition: vc1.c:623
static const int offset_table[6]
Definition: vc1dec.c:3373
static int median4(int a, int b, int c, int d)
Definition: vc1dec.c:548
#define FFABS(a)
Definition: common.h:52
int level
level
Definition: avcodec.h:2679
static int vc1_decode_p_mb_intfr(VC1Context *v)
Definition: vc1dec.c:3765
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:522
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:314
MotionEstContext me
Definition: mpegvideo.h:457
#define AV_EF_EXPLODE
Definition: avcodec.h:2401
static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x, int *dmv_y, int *pred_flag)
Definition: vc1dec.c:1175
static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
Definition: vc1dec.c:3439
h264_chroma_mc_func avg_h264_chroma_pixels_tab[3]
Definition: h264chroma.h:28
const uint16_t ff_vc1_field_mvpred_scales[2][7][4]
Definition: vc1data.c:1097
#define FF_PROFILE_VC1_SIMPLE
Definition: avcodec.h:2641
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo.c:1628
uint32_t * cbp
Definition: vc1.h:394
int left_blk_sh
Definition: vc1.h:248
int16_t(* luma_mv_base)[2]
Definition: vc1.h:396
uint8_t * fieldtx_plane
Definition: vc1.h:354
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:485
int * ttblk_base
Definition: vc1.h:267
VLC * cbpcy_vlc
CBPCY VLC table.
Definition: vc1.h:292
static int decode210(GetBitContext *gb)
Definition: get_bits.h:547
Definition: vf_drawbox.c:37
if(ac->has_optimized_func)
static const float pred[4]
Definition: siprdata.h:259
uint8_t * sr_rows[2][2]
Sprite resizer line cache.
Definition: vc1.h:385
static const int8_t mv[256][2]
Definition: 4xm.c:72
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3]
Definition: vc1dsp.h:64
static void vc1_loop_filter_iblk(VC1Context *v, int pq)
Definition: vc1dec.c:167
static void vc1_interp_mc(VC1Context *v)
Motion compensation for direct or interpolated blocks in B-frames.
Definition: vc1dec.c:1899
int first_slice_line
used in mpeg4 too to handle resync markers
Definition: mpegvideo.h:620
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
Definition: vp3.c:1283
static const int offset_table1[9]
Definition: vc1dec.c:50
NULL
Definition: eval.c:55
static int width
Definition: utils.c:156
#define AV_LOG_INFO
Standard information.
Definition: log.h:134
int res_sprite
Simple/Main Profile sequence header.
Definition: vc1.h:192
void(* vc1_h_loop_filter4)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:50
int top_blk_sh
Either 3 or 0, positions of l/t in blk[].
Definition: vc1.h:248
Libavcodec external API header.
void(* put_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
Definition: dsputil.h:128
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:306
enum AVCodecID codec_id
Definition: avcodec.h:1065
int c_avail
Definition: vc1.h:271
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:125
const int8_t ff_vc1_adv_interlaced_8x8_zz[64]
Definition: vc1data.c:1047
static const uint8_t vc1_delta_run_table[AC_MODES][57]
Definition: vc1acdata.h:295
static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
Do motion compensation for 4-MV macroblock - luminance block.
Definition: vc1dec.c:561
uint32_t * cbp_base
Definition: vc1.h:394
main external API structure.
Definition: avcodec.h:1054
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:489
uint8_t * is_intra
Definition: vc1.h:395
static int vc1_decode_p_mb_intfi(VC1Context *v)
Definition: vc1dec.c:3976
uint8_t * coded_block
used for coded block pattern prediction (msmpeg4v3, wmv1)
Definition: mpegvideo.h:378
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:268
static void vc1_decode_b_mb_intfi(VC1Context *v)
Decode one B-frame MB (in interlaced field B picture)
Definition: vc1dec.c:4247
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: utils.c:575
static void init_block_index(VC1Context *v)
Definition: vc1dec.c:75
int curr_use_ic
Definition: vc1.h:305
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
void(* sprite_v_double_noscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src2a, int alpha, int width)
Definition: vc1dsp.h:69
int extradata_size
Definition: avcodec.h:1163
const uint8_t ff_vc1_mbmode_intfrp[2][15][4]
Definition: vc1data.c:53
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:271
int sprite_width
Definition: vc1.h:384
int fmb_is_raw
forward mb plane is raw
Definition: vc1.h:299
uint8_t * is_intra_base
Definition: vc1.h:395
int coded_height
Definition: avcodec.h:1227
Definition: vc1.h:139
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:263
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1294
#define MB_INTRA_VLC_BITS
Definition: vc1dec.c:45
int index
Definition: gxfenc.c:72
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
Definition: vc1dec.c:750
op_pixels_func put_no_rnd_pixels_tab[2][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:80
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:375
int context_initialized
Definition: mpegvideo.h:295
#define VC1_2MV_BLOCK_PATTERN_VLC_BITS
Definition: vc1data.c:124
static int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n, int a_avail, int c_avail, int16_t **dc_val_ptr, int *dir_ptr)
Get predicted DC value prediction dir: left=0, top=1.
Definition: vc1dec.c:2449
#define MB_TYPE_16x16
Definition: avcodec.h:805
#define mid_pred
Definition: mathops.h:94
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:307
int dim
DSPContext dsp
pointers for accelerated dsp functions
Definition: mpegvideo.h:411
int skip_is_raw
skip mb plane is not coded
Definition: vc1.h:300
int ff_intrax8_decode_picture(IntraX8Context *const w, int dquant, int quant_offset)
Decode single IntraX8 frame.
Definition: intrax8.c:727
#define FF_PROFILE_VC1_COMPLEX
Definition: avcodec.h:2643
uint8_t next_lutuv[2][256]
lookup tables used for intensity compensation
Definition: vc1.h:303
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:304
int ref_field_type[2]
forward and backward reference field type (top or bottom)
Definition: vc1.h:371
uint8_t * direct_mb_plane
bitplane for "direct" MBs
Definition: vc1.h:295
static const uint8_t vc1_last_delta_run_table[AC_MODES][10]
Definition: vc1acdata.h:339
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
Definition: pixfmt.h:138
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:399
static int vc1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Decode a VC1/WMV3 frame.
Definition: vc1dec.c:5750
uint8_t * mv_type_mb_plane
bitplane for mv_type == (4MV)
Definition: vc1.h:294
int numref
number of past field pictures used as reference
Definition: vc1.h:364
const int32_t ff_vc1_dqscale[63]
Definition: vc1data.c:1085
int blocks_off
Definition: vc1.h:372
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3]
Definition: vc1dsp.h:63
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:273
static const uint16_t scale[4]
uint8_t tff
Definition: vc1.h:319
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:113
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
Definition: mpegvideo.h:375
uint8_t level
Definition: svq3.c:143
qpel_mc_func(* qpel_avg)[16]
Definition: mpegvideo.h:252
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:451
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:302
MpegEncContext s
Definition: vc1.h:183
int height
Definition: gxfenc.c:72
in the bitstream is reported as 10b
Definition: vc1.h:174
MpegEncContext.
Definition: mpegvideo.h:264
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:369
int8_t * qscale_table
Definition: mpegvideo.h:104
struct AVCodecContext * avctx
Definition: mpegvideo.h:266
int cur_blk_idx
Definition: vc1.h:393
uint8_t pq
Definition: vc1.h:246
static const int offset_table2[9]
Definition: vc1dec.c:51
discard all non reference
Definition: avcodec.h:543
static void vc1_decode_i_blocks(VC1Context *v)
Decode blocks of I-frame.
Definition: vc1dec.c:4744
int pqindex
raw pqindex used in coding set selection
Definition: vc1.h:270
static const uint8_t vc1_last_delta_level_table[AC_MODES][44]
Definition: vc1acdata.h:246
#define VC1_1REF_MVDATA_VLC_BITS
Definition: vc1data.c:138
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:65
Y , 8bpp.
Definition: pixfmt.h:73
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:301
static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
Definition: vc1dec.c:2067
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:670
void * av_realloc(void *ptr, size_t size)
Allocate or reallocate a block of memory.
Definition: mem.c:117
static enum AVPixelFormat vc1_hwaccel_pixfmt_list_420[]
Definition: vc1dec.c:6123
#define VC1_TTMB_VLC_BITS
Definition: vc1data.c:114
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:33
uint8_t * dest[3]
Definition: mpegvideo.h:487
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
Definition: alsdec.c:1792
static const int size_table[6]
Definition: vc1dec.c:3372
int output_width
Definition: vc1.h:384
enum FrameCodingMode fcm
Frame decoding info for Advanced profile.
Definition: vc1.h:316
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:348
uint8_t dquantfrm
pquant parameters
Definition: vc1.h:253
uint8_t next_luty[2][256]
Definition: vc1.h:303
void(* clear_block)(int16_t *block)
Definition: dsputil.h:142
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:368
Bi-dir predicted.
Definition: avutil.h:255
AVProfile.
Definition: avcodec.h:2743
AVHWAccel * ff_find_hwaccel(AVCodecContext *avctx)
Return the hardware accelerated codec for codec codec_id and pixel format pix_fmt.
Definition: utils.c:2177
int res_fasttx
reserved, always 1
Definition: vc1.h:196
enum AVDiscard skip_loop_filter
Definition: avcodec.h:2687
int pic_header_flag
Definition: vc1.h:377
int * ttblk
Transform type at the block level.
Definition: vc1.h:267
static av_cold int init(AVCodecParserContext *s)
Definition: h264_parser.c:498
VLC ff_vc1_ac_coeff_table[8]
Definition: vc1data.c:143
void(* vc1_v_s_overlap)(int16_t *top, int16_t *bottom)
Definition: vc1dsp.h:47
uint8_t condover
Definition: vc1.h:333
void * priv_data
Definition: avcodec.h:1090
int ff_vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
Definition: vc1.c:525
#define VC1_INTFR_4MV_MBMODE_VLC_BITS
Definition: vc1data.c:130
#define FF_PROFILE_VC1_ADVANCED
Definition: avcodec.h:2644
uint8_t pquantizer
Uniform (over sequence) quantizer in use.
Definition: vc1.h:291
h264_chroma_mc_func put_h264_chroma_pixels_tab[3]
Definition: h264chroma.h:27
static void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t *is_intra, int pred_flag, int dir)
Predict and set motion vector.
Definition: vc1dec.c:1419
int rnd
rounding control
Definition: vc1.h:306
VideoDSPContext vdsp
Definition: mpegvideo.h:413
Definition: vc1.h:142
AVFrame * sprite_output_frame
Definition: vc1.h:383
void ff_MPV_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1436
int acpred_is_raw
Definition: vc1.h:330
void(* sprite_v_single)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset, int width)
Definition: vc1dsp.h:68
const int8_t ff_vc1_adv_interlaced_8x4_zz[32]
Definition: vc1data.c:1058
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:677
op_pixels_func avg_no_rnd_pixels_tab[4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:92
uint8_t rptfrm
Definition: vc1.h:319
uint8_t(* curr_luty)[256]
Definition: vc1.h:304
static int decode012(GetBitContext *gb)
Definition: get_bits.h:537
VLC_TYPE(* table)[2]
code, bits
Definition: get_bits.h:66
int bmvtype
Definition: vc1.h:374
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:354
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:163
static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
Do motion compensation for 4-MV macroblock - both chroma blocks.
Definition: vc1dec.c:805
static void vc1_decode_i_blocks_adv(VC1Context *v)
Decode blocks of I-frame for advanced profile.
Definition: vc1dec.c:4884
H264ChromaContext h264chroma
Definition: vc1.h:185
int overflg_is_raw
Definition: vc1.h:332
static av_always_inline int vc1_unescape_buffer(const uint8_t *src, int size, uint8_t *dst)
Definition: vc1.h:424
struct AVFrame f
Definition: mpegvideo.h:100
Definition: vc1.h:136
int level
Advanced Profile.
Definition: vc1.h:206
int flags
AVCodecContext.flags (HQ, MV4, ...)
Definition: mpegvideo.h:283
int brfd
reference frame distance (forward or backward)
Definition: vc1.h:375
uint32_t * mb_type
Definition: mpegvideo.h:110
VLC ff_msmp4_mb_i_vlc
Definition: msmpeg4data.c:38
#define av_always_inline
Definition: attributes.h:40
uint8_t mv_mode2
Secondary MV coding mode (B frames)
Definition: vc1.h:242
int new_sprite
Frame decoding info for sprite modes.
Definition: vc1.h:381
uint8_t * mv_f_next[2]
Definition: vc1.h:359
#define FFSWAP(type, a, b)
Definition: common.h:60
void(* vc1_h_s_overlap)(int16_t *left, int16_t *right)
Definition: vc1dsp.h:48
int two_sprites
Definition: vc1.h:382
int codingset
index of current table set from 11.8 to use for luma block decoding
Definition: vc1.h:268
uint8_t * mb_type[3]
Definition: vc1.h:272
uint16_t * hrd_buffer
Definition: vc1.h:334
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2916
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2927
#define VC1_INTFR_NON4MV_MBMODE_VLC_BITS
Definition: vc1data.c:132
AVPixelFormat
Pixel format.
Definition: pixfmt.h:63
This structure stores compressed data.
Definition: avcodec.h:950
void(* vc1_v_overlap)(uint8_t *src, int stride)
Definition: vc1dsp.h:45
av_cold void ff_vc1dsp_init(VC1DSPContext *dsp)
Definition: vc1dsp.c:867
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:52
VLC * fourmvbp_vlc
Definition: vc1.h:351
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
int dc_table_index
Definition: mpegvideo.h:617
VLC ff_msmp4_dc_luma_vlc[2]
Definition: msmpeg4data.c:39
VLC ff_vc1_subblkpat_vlc[3]
Definition: vc1data.c:129
#define inc_blk_idx(idx)
uint8_t halfpq
Uniform quant over image and qp+.5.
Definition: vc1.h:281
static void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
Definition: vc1dec.c:2105
static const uint8_t vc1_delta_level_table[AC_MODES][31]
Definition: vc1acdata.h:203
VC1DSPContext vc1dsp
Definition: vc1.h:186
Predicted.
Definition: avutil.h:254
uint8_t((* curr_lutuv)[256]
Definition: vc1.h:304
static av_always_inline const uint8_t * find_next_marker(const uint8_t *src, const uint8_t *end)
Find VC-1 marker in buffer.
Definition: vc1.h:410
int output_height
Definition: vc1.h:384
void(* vc1_inv_trans_4x8_dc)(uint8_t *dest, int line_size, int16_t *block)
Definition: vc1dsp.h:43
VLC ff_msmp4_dc_chroma_vlc[2]
Definition: msmpeg4data.c:40
op_pixels_func put_vc1_mspel_pixels_tab[16]
Definition: vc1dsp.h:59
void(* sprite_h)(uint8_t *dst, const uint8_t *src, int offset, int advance, int count)
Definition: vc1dsp.h:67
HpelDSPContext hdsp
Definition: mpegvideo.h:412
void(* vc1_h_loop_filter16)(uint8_t *src, int stride, int pq)
Definition: vc1dsp.h:54
static int16_t block[64]
Definition: dct-test.c:170