summaryrefslogtreecommitdiff
path: root/libavcodec/vp9prob.c (plain)
blob: cde909ce66a64c46446853ac3729c25b0bad7f26
1/*
2 * VP9 compatible video decoder
3 *
4 * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5 * Copyright (C) 2013 Clément Bœsch <u pkh me>
6 *
7 * This file is part of FFmpeg.
8 *
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#include "vp56.h"
25#include "vp9.h"
26#include "vp9data.h"
27#include "vp9dec.h"
28
29static av_always_inline void adapt_prob(uint8_t *p, unsigned ct0, unsigned ct1,
30 int max_count, int update_factor)
31{
32 unsigned ct = ct0 + ct1, p2, p1;
33
34 if (!ct)
35 return;
36
37 update_factor = FASTDIV(update_factor * FFMIN(ct, max_count), max_count);
38 p1 = *p;
39 p2 = ((((int64_t) ct0) << 8) + (ct >> 1)) / ct;
40 p2 = av_clip(p2, 1, 255);
41
42 // (p1 * (256 - update_factor) + p2 * update_factor + 128) >> 8
43 *p = p1 + (((p2 - p1) * update_factor + 128) >> 8);
44}
45
46void ff_vp9_adapt_probs(VP9Context *s)
47{
48 int i, j, k, l, m;
49 ProbContext *p = &s->prob_ctx[s->s.h.framectxid].p;
50 int uf = (s->s.h.keyframe || s->s.h.intraonly || !s->last_keyframe) ? 112 : 128;
51
52 // coefficients
53 for (i = 0; i < 4; i++)
54 for (j = 0; j < 2; j++)
55 for (k = 0; k < 2; k++)
56 for (l = 0; l < 6; l++)
57 for (m = 0; m < 6; m++) {
58 uint8_t *pp = s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m];
59 unsigned *e = s->counts.eob[i][j][k][l][m];
60 unsigned *c = s->counts.coef[i][j][k][l][m];
61
62 if (l == 0 && m >= 3) // dc only has 3 pt
63 break;
64
65 adapt_prob(&pp[0], e[0], e[1], 24, uf);
66 adapt_prob(&pp[1], c[0], c[1] + c[2], 24, uf);
67 adapt_prob(&pp[2], c[1], c[2], 24, uf);
68 }
69
70 if (s->s.h.keyframe || s->s.h.intraonly) {
71 memcpy(p->skip, s->prob.p.skip, sizeof(p->skip));
72 memcpy(p->tx32p, s->prob.p.tx32p, sizeof(p->tx32p));
73 memcpy(p->tx16p, s->prob.p.tx16p, sizeof(p->tx16p));
74 memcpy(p->tx8p, s->prob.p.tx8p, sizeof(p->tx8p));
75 return;
76 }
77
78 // skip flag
79 for (i = 0; i < 3; i++)
80 adapt_prob(&p->skip[i], s->counts.skip[i][0],
81 s->counts.skip[i][1], 20, 128);
82
83 // intra/inter flag
84 for (i = 0; i < 4; i++)
85 adapt_prob(&p->intra[i], s->counts.intra[i][0],
86 s->counts.intra[i][1], 20, 128);
87
88 // comppred flag
89 if (s->s.h.comppredmode == PRED_SWITCHABLE) {
90 for (i = 0; i < 5; i++)
91 adapt_prob(&p->comp[i], s->counts.comp[i][0],
92 s->counts.comp[i][1], 20, 128);
93 }
94
95 // reference frames
96 if (s->s.h.comppredmode != PRED_SINGLEREF) {
97 for (i = 0; i < 5; i++)
98 adapt_prob(&p->comp_ref[i], s->counts.comp_ref[i][0],
99 s->counts.comp_ref[i][1], 20, 128);
100 }
101
102 if (s->s.h.comppredmode != PRED_COMPREF) {
103 for (i = 0; i < 5; i++) {
104 uint8_t *pp = p->single_ref[i];
105 unsigned (*c)[2] = s->counts.single_ref[i];
106
107 adapt_prob(&pp[0], c[0][0], c[0][1], 20, 128);
108 adapt_prob(&pp[1], c[1][0], c[1][1], 20, 128);
109 }
110 }
111
112 // block partitioning
113 for (i = 0; i < 4; i++)
114 for (j = 0; j < 4; j++) {
115 uint8_t *pp = p->partition[i][j];
116 unsigned *c = s->counts.partition[i][j];
117
118 adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
119 adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
120 adapt_prob(&pp[2], c[2], c[3], 20, 128);
121 }
122
123 // tx size
124 if (s->s.h.txfmmode == TX_SWITCHABLE) {
125 for (i = 0; i < 2; i++) {
126 unsigned *c16 = s->counts.tx16p[i], *c32 = s->counts.tx32p[i];
127
128 adapt_prob(&p->tx8p[i], s->counts.tx8p[i][0],
129 s->counts.tx8p[i][1], 20, 128);
130 adapt_prob(&p->tx16p[i][0], c16[0], c16[1] + c16[2], 20, 128);
131 adapt_prob(&p->tx16p[i][1], c16[1], c16[2], 20, 128);
132 adapt_prob(&p->tx32p[i][0], c32[0], c32[1] + c32[2] + c32[3], 20, 128);
133 adapt_prob(&p->tx32p[i][1], c32[1], c32[2] + c32[3], 20, 128);
134 adapt_prob(&p->tx32p[i][2], c32[2], c32[3], 20, 128);
135 }
136 }
137
138 // interpolation filter
139 if (s->s.h.filtermode == FILTER_SWITCHABLE) {
140 for (i = 0; i < 4; i++) {
141 uint8_t *pp = p->filter[i];
142 unsigned *c = s->counts.filter[i];
143
144 adapt_prob(&pp[0], c[0], c[1] + c[2], 20, 128);
145 adapt_prob(&pp[1], c[1], c[2], 20, 128);
146 }
147 }
148
149 // inter modes
150 for (i = 0; i < 7; i++) {
151 uint8_t *pp = p->mv_mode[i];
152 unsigned *c = s->counts.mv_mode[i];
153
154 adapt_prob(&pp[0], c[2], c[1] + c[0] + c[3], 20, 128);
155 adapt_prob(&pp[1], c[0], c[1] + c[3], 20, 128);
156 adapt_prob(&pp[2], c[1], c[3], 20, 128);
157 }
158
159 // mv joints
160 {
161 uint8_t *pp = p->mv_joint;
162 unsigned *c = s->counts.mv_joint;
163
164 adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
165 adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
166 adapt_prob(&pp[2], c[2], c[3], 20, 128);
167 }
168
169 // mv components
170 for (i = 0; i < 2; i++) {
171 uint8_t *pp;
172 unsigned *c, (*c2)[2], sum;
173
174 adapt_prob(&p->mv_comp[i].sign, s->counts.mv_comp[i].sign[0],
175 s->counts.mv_comp[i].sign[1], 20, 128);
176
177 pp = p->mv_comp[i].classes;
178 c = s->counts.mv_comp[i].classes;
179 sum = c[1] + c[2] + c[3] + c[4] + c[5] +
180 c[6] + c[7] + c[8] + c[9] + c[10];
181 adapt_prob(&pp[0], c[0], sum, 20, 128);
182 sum -= c[1];
183 adapt_prob(&pp[1], c[1], sum, 20, 128);
184 sum -= c[2] + c[3];
185 adapt_prob(&pp[2], c[2] + c[3], sum, 20, 128);
186 adapt_prob(&pp[3], c[2], c[3], 20, 128);
187 sum -= c[4] + c[5];
188 adapt_prob(&pp[4], c[4] + c[5], sum, 20, 128);
189 adapt_prob(&pp[5], c[4], c[5], 20, 128);
190 sum -= c[6];
191 adapt_prob(&pp[6], c[6], sum, 20, 128);
192 adapt_prob(&pp[7], c[7] + c[8], c[9] + c[10], 20, 128);
193 adapt_prob(&pp[8], c[7], c[8], 20, 128);
194 adapt_prob(&pp[9], c[9], c[10], 20, 128);
195
196 adapt_prob(&p->mv_comp[i].class0, s->counts.mv_comp[i].class0[0],
197 s->counts.mv_comp[i].class0[1], 20, 128);
198 pp = p->mv_comp[i].bits;
199 c2 = s->counts.mv_comp[i].bits;
200 for (j = 0; j < 10; j++)
201 adapt_prob(&pp[j], c2[j][0], c2[j][1], 20, 128);
202
203 for (j = 0; j < 2; j++) {
204 pp = p->mv_comp[i].class0_fp[j];
205 c = s->counts.mv_comp[i].class0_fp[j];
206 adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
207 adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
208 adapt_prob(&pp[2], c[2], c[3], 20, 128);
209 }
210 pp = p->mv_comp[i].fp;
211 c = s->counts.mv_comp[i].fp;
212 adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
213 adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
214 adapt_prob(&pp[2], c[2], c[3], 20, 128);
215
216 if (s->s.h.highprecisionmvs) {
217 adapt_prob(&p->mv_comp[i].class0_hp,
218 s->counts.mv_comp[i].class0_hp[0],
219 s->counts.mv_comp[i].class0_hp[1], 20, 128);
220 adapt_prob(&p->mv_comp[i].hp, s->counts.mv_comp[i].hp[0],
221 s->counts.mv_comp[i].hp[1], 20, 128);
222 }
223 }
224
225 // y intra modes
226 for (i = 0; i < 4; i++) {
227 uint8_t *pp = p->y_mode[i];
228 unsigned *c = s->counts.y_mode[i], sum, s2;
229
230 sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
231 adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
232 sum -= c[TM_VP8_PRED];
233 adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
234 sum -= c[VERT_PRED];
235 adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
236 s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
237 sum -= s2;
238 adapt_prob(&pp[3], s2, sum, 20, 128);
239 s2 -= c[HOR_PRED];
240 adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
241 adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED],
242 20, 128);
243 sum -= c[DIAG_DOWN_LEFT_PRED];
244 adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
245 sum -= c[VERT_LEFT_PRED];
246 adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
247 adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
248 }
249
250 // uv intra modes
251 for (i = 0; i < 10; i++) {
252 uint8_t *pp = p->uv_mode[i];
253 unsigned *c = s->counts.uv_mode[i], sum, s2;
254
255 sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
256 adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
257 sum -= c[TM_VP8_PRED];
258 adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
259 sum -= c[VERT_PRED];
260 adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
261 s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
262 sum -= s2;
263 adapt_prob(&pp[3], s2, sum, 20, 128);
264 s2 -= c[HOR_PRED];
265 adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
266 adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED],
267 20, 128);
268 sum -= c[DIAG_DOWN_LEFT_PRED];
269 adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
270 sum -= c[VERT_LEFT_PRED];
271 adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
272 adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
273 }
274}
275