Line data Source code
1 : /*====================================================================================
2 : EVS Codec 3GPP TS26.452 Aug 12, 2021. Version 16.3.0
3 : ====================================================================================*/
4 :
5 : #include <stdint.h>
6 : #include "options.h" /* Compilation switches */
7 : #include "cnst.h" /* Common constants */
8 : #include "prot_fx.h" /* Function prototypes */
9 : #include "rom_com.h" /* Static table prototypes */
10 : #include "prot_fx_enc.h"
11 :
12 :
13 : /*---------------------------------------------------------------------*
14 : * Local function prototype
15 : *---------------------------------------------------------------------*/
16 :
17 : static void find_cn_fx( const Word16 xn[], const Word16 Ap[], const Word16 *p_Aq, Word16 cn[] );
18 :
19 :
20 : /*-----------------------------------------------------------------*
21 : * Transform domain contribution encoding
22 : *-----------------------------------------------------------------*/
23 :
24 : #define Q_MINUS 4
25 3370 : void transf_cdbk_enc_fx(
26 : Encoder_State *st_fx, /* i/o: encoder state structure */
27 : const Word16 harm_flag_acelp, /* i : harmonic flag for higher rates ACELP Q0*/
28 : const Word16 i_subfr, /* i : subframe index Q0*/
29 : Word16 cn[], /* i/o: target vector in residual domain Q_new*/
30 : Word16 exc[], /* i/o: pointer to excitation signal frame Q_new*/
31 : const Word16 *p_Aq, /* i : 12k8 Lp coefficient Q12*/
32 : const Word16 Ap[], /* i : weighted LP filter coefficients Q12*/
33 : const Word16 h1[], /* i : weighted filter input response Q15*/
34 : Word16 xn[], /* i/o: target vector Q_new + shift -1*/
35 : Word16 xn2[], /* i/o: target vector for innovation search Q_new + shift -1*/
36 : Word16 y1[], /* i/o: zero-memory filtered adaptive excitation Q_new + shift -1*/
37 : const Word16 y2[], /* i : zero-memory filtered innovative excitation Q9*/
38 : const Word16 Es_pred, /* i : predicited scaled innovation energy Q8*/
39 : Word16 *gain_pit, /* i/o: adaptive excitation gain Q14*/
40 : const Word32 gain_code, /* i : innovative excitation gain Q16*/
41 : Word16 g_corr[], /* o : ACELP correlation values Q15*/
42 : const Word16 clip_gain, /* i : adaptive gain clipping flag Q0*/
43 : Word16 *gain_preQ, /* o : prequantizer excitation gain Q2*/
44 : Word16 code_preQ[], /* o : prequantizer excitation Q_AVQ_OUT_DEC*/
45 : Word16 *unbits, /* o : number of AVQ unused bits Q0*/
46 : const Word16 Q_new, /* i : Current frame scaling */
47 : const Word16 shift /* i : shifting applied to y1, xn,... */
48 : )
49 : {
50 : Word16 i, index, nBits, Nsv, Es_pred_loc;
51 : Word16 x_in[L_SUBFR], x_tran[L_SUBFR], gcode16, stmp;
52 : Word16 e_corr, m_corr, e_ener, m_ener, m_den, e_den;
53 : Word16 x_norm[L_SUBFR + L_SUBFR / WIDTH_BAND];
54 : Word32 L_corr, L_ener, Ltmp, Ltmp1;
55 : Word16 nq[L_SUBFR / WIDTH_BAND];
56 : Word32 out32[L_SUBFR];
57 : Word16 Qdct;
58 : Word16 avq_bit_sFlag;
59 : Word16 trgtSvPos;
60 : #ifdef BASOP_NOGLOB_DECLARE_LOCAL
61 3370 : Flag Overflow = 0;
62 3370 : move32();
63 : #endif
64 :
65 3370 : avq_bit_sFlag = 0;
66 3370 : move16();
67 3370 : if ( st_fx->element_mode > EVS_MONO )
68 : {
69 0 : avq_bit_sFlag = 1;
70 0 : move16();
71 : }
72 :
73 : /*--------------------------------------------------------------*
74 : * Set bit-allocation
75 : *--------------------------------------------------------------*/
76 :
77 3370 : Nsv = 8;
78 3370 : move16();
79 3370 : nBits = st_fx->acelp_cfg.AVQ_cdk_bits[shr( i_subfr, 6 )]; /* Q0 */
80 3370 : move16();
81 :
82 : /* increase # of AVQ allocated bits by unused bits from the previous subframe */
83 3370 : nBits = add( nBits, *unbits );
84 :
85 : /*--------------------------------------------------------------*
86 : * Compute/Update target
87 : * For inactive frame, find target in residual domain
88 : * Deemphasis
89 : *--------------------------------------------------------------*/
90 :
91 3370 : IF( EQ_16( st_fx->coder_type, INACTIVE ) )
92 : {
93 0 : gcode16 = round_fx_sat( L_shl_sat( gain_code, Q_new ) );
94 0 : FOR( i = 0; i < L_SUBFR; i++ )
95 : {
96 : /*x_tran[i] = xn[i] - *gain_pit * y1[i] - gain_code * y2[i];*/
97 0 : Ltmp = L_mult( gcode16, y2[i] );
98 0 : Ltmp = L_shl( Ltmp, add( 5, shift ) );
99 0 : Ltmp = L_negate( Ltmp );
100 0 : Ltmp = L_mac( Ltmp, xn[i], 16384 );
101 0 : Ltmp = L_msu( Ltmp, y1[i], *gain_pit ); /* Q_new + 14 + shift */
102 0 : Ltmp = L_shl_sat( Ltmp, sub( 1, shift ) ); /* Q_new + 15 */
103 0 : x_tran[i] = round_fx_sat( Ltmp ); /*Q_new-1 */
104 0 : move16();
105 : }
106 0 : find_cn_fx( x_tran, Ap, p_Aq, x_in );
107 : }
108 : ELSE
109 : {
110 3370 : updt_tar_fx( cn, x_in, &exc[i_subfr], *gain_pit, L_SUBFR );
111 : }
112 3370 : Deemph2( x_in, FAC_PRE_AVQ_FX, L_SUBFR, &( st_fx->mem_deemp_preQ_fx ) );
113 :
114 : /*--------------------------------------------------------------*
115 : * DCT-II
116 : *--------------------------------------------------------------*/
117 :
118 3370 : test();
119 3370 : test();
120 3370 : test();
121 3370 : IF( NE_16( st_fx->coder_type, INACTIVE ) && LE_32( st_fx->core_brate, MAX_BRATE_AVQ_EXC_TD ) && GE_32( st_fx->core_brate, MIN_BRATE_AVQ_EXC ) && !harm_flag_acelp )
122 : {
123 0 : Copy_Scale_sig( x_in, x_tran, L_SUBFR, -Q_MINUS + 1 ); /*Q_new-1 -> Q_new-4*/
124 : /*Copy( x_in, x_tran, L_SUBFR );*/
125 0 : Qdct = sub( Q_new, Q_MINUS );
126 : }
127 : ELSE
128 : {
129 3370 : Qdct = 0;
130 3370 : move16();
131 3370 : edct2_fx( L_SUBFR, -1, x_in, out32, &Qdct, ip_edct2_64, w_edct2_64_fx );
132 3370 : Qdct = negate( Qdct );
133 3370 : Copy_Scale_sig_32_16( out32, x_tran, L_SUBFR, sub( Qdct, Q_MINUS - 1 ) ); /* Output in Q_new-4 */
134 3370 : Qdct = sub( Q_new, Q_MINUS );
135 : }
136 :
137 : /*--------------------------------------------------------------*
138 : * Split algebraic vector quantizer based on RE8 lattice
139 : *--------------------------------------------------------------*/
140 :
141 3370 : AVQ_cod_fx( x_tran, x_norm, nBits, Nsv, 0 );
142 :
143 : /*--------------------------------------------------------------*
144 : * Find prequantizer excitation gain
145 : * Quantize the gain
146 : *--------------------------------------------------------------*/
147 :
148 3370 : L_corr = L_deposit_l( 0 );
149 3370 : L_ener = L_deposit_l( 0 );
150 219050 : FOR( i = 0; i < Nsv * 8; i++ )
151 : {
152 : /*fcorr += fx_tran[i]*(float)ix_norm[i];*/
153 : /*fener += (float)ix_norm[i]*(float)ix_norm[i];*/
154 215680 : stmp = shl_sat( x_norm[i], Q_AVQ_OUT );
155 215680 : L_corr = L_mac_sat( L_corr, x_tran[i], stmp );
156 215680 : L_ener = L_mac_sat( L_ener, stmp, stmp );
157 : }
158 3370 : L_ener = L_max( L_ener, 1 );
159 :
160 : /* No negative gains allowed in the quantizer*/
161 3370 : L_corr = L_max( L_corr, 0 );
162 :
163 3370 : e_corr = norm_l( L_corr );
164 3370 : m_corr = extract_h( L_shl( L_corr, e_corr ) );
165 3370 : e_corr = sub( 30, add( e_corr, sub( Qdct, Q_AVQ_OUT ) ) );
166 3370 : e_ener = norm_l( L_ener );
167 3370 : m_ener = extract_h( L_shl( L_ener, e_ener ) );
168 3370 : e_ener = sub( 30, e_ener );
169 :
170 3370 : IF( GT_16( m_corr, m_ener ) )
171 : {
172 2319 : m_corr = shr( m_corr, 1 );
173 2319 : e_corr = add( e_corr, 1 );
174 : }
175 3370 : m_corr = div_s( m_corr, m_ener ); /* e_corr - e_ener */
176 3370 : e_corr = sub( e_corr, e_ener );
177 3370 : Ltmp = L_shl_sat( m_corr, s_min( add( e_corr, 1 ), 31 ) ); /* Lgain in Q16 */
178 3370 : IF( EQ_16( st_fx->coder_type, INACTIVE ) )
179 : {
180 0 : Ltmp1 = L_max( gain_code, 1 );
181 0 : e_den = norm_l( Ltmp1 );
182 0 : m_den = extract_h( L_shl_sat( Ltmp1, e_den ) );
183 : /* ensure m_corr < m_den */
184 0 : test();
185 0 : IF( m_corr > 0 && m_den > 0 )
186 : {
187 0 : m_corr = div_s( 16384, m_den );
188 0 : e_corr = sub( 14 + 4, e_den );
189 0 : Ltmp = L_shr( Mult_32_16( Ltmp, m_corr ), e_corr ); /*Q12*/
190 0 : stmp = round_fx_sat( L_shl_sat( Ltmp, 16 ) ); /* Q12 */
191 : }
192 : ELSE
193 : {
194 0 : stmp = 0;
195 0 : move16();
196 : }
197 0 : IF( GT_32( st_fx->core_brate, 56000 ) )
198 : {
199 0 : index = usquant_fx( stmp, &stmp, G_AVQ_MIN_INACT_64k_Q12, G_AVQ_DELTA_INACT_64k_Q12 >> 1, ( 1 << G_AVQ_BITS ) ); /* Q0 */
200 : }
201 0 : ELSE IF( GT_32( st_fx->core_brate, 42000 ) )
202 : {
203 0 : index = usquant_fx( stmp, &stmp, G_AVQ_MIN_INACT_48k_Q12, G_AVQ_DELTA_INACT_48k_Q12 >> 1, ( 1 << G_AVQ_BITS ) ); /* Q0 */
204 : }
205 : ELSE
206 : {
207 0 : index = usquant_fx( stmp, &stmp, G_AVQ_MIN_INACT_Q12, G_AVQ_DELTA_INACT_Q12 >> 1, ( 1 << G_AVQ_BITS ) ); /* Q0 */
208 : }
209 0 : Ltmp = Mult_32_16( gain_code, stmp ); /* Q16 * Q12 - 15 -> Q13*/
210 0 : Ltmp = L_shl_sat( Ltmp, 5 ); /* Q13 -> Q18*/
211 0 : *gain_preQ = round_fx_sat( Ltmp ); /* Q2*/
212 : }
213 : ELSE
214 : {
215 3370 : IF( Es_pred < 0 )
216 : {
217 35 : Es_pred_loc = shr( negate( Es_pred ), 2 ); /* Q8 */
218 : }
219 : ELSE
220 : {
221 3335 : Es_pred_loc = Es_pred; /* Q8 */
222 3335 : move16();
223 : }
224 :
225 3370 : e_den = norm_s( Es_pred_loc );
226 3370 : m_den = shl( Es_pred_loc, e_den );
227 : /* ensure m_corr < m_den */
228 3370 : test();
229 3370 : IF( m_corr > 0 && m_den > 0 )
230 : {
231 3369 : m_corr = div_s( 16384, m_den ); /* 14 - 8 - e_den */
232 3369 : e_corr = sub( 14 - 8, e_den );
233 3369 : Ltmp = L_shr( Mult_32_16( Ltmp, m_corr ), e_corr ); /* Q18 */
234 : }
235 : ELSE
236 : {
237 1 : Ltmp = L_deposit_l( 0 );
238 : }
239 3370 : test();
240 3370 : IF( LE_32( st_fx->core_brate, 42000 ) && GT_32( st_fx->core_brate, ACELP_24k40 ) )
241 : {
242 0 : index = gain_quant_fx( &Ltmp, &stmp, LG10_G_AVQ_MIN_32kbps_Q14, LG10_G_AVQ_MAX_Q13, G_AVQ_BITS, &e_den ); /* Q0 */
243 : }
244 : ELSE
245 : {
246 3370 : index = gain_quant_fx( &Ltmp, &stmp, LG10_G_AVQ_MIN_Q14, LG10_G_AVQ_MAX_Q13, G_AVQ_BITS, &e_den ); /* Q0 */
247 : }
248 3370 : Ltmp = L_mult( stmp, Es_pred_loc ); /* Q0*Q8 -> Q9*/
249 3370 : Ltmp = L_shl( Ltmp, add( e_den, 9 ) ); /* Q18*/
250 3370 : *gain_preQ = round_fx( Ltmp ); /* Q2*/
251 : }
252 3370 : push_indice( st_fx->hBstr, IND_AVQ_GAIN, index, G_AVQ_BITS );
253 :
254 : /*--------------------------------------------------------------*
255 : * Encode and multiplex subvectors into bit-stream
256 : *--------------------------------------------------------------*/
257 :
258 3370 : trgtSvPos = Nsv - 1;
259 3370 : move16();
260 3370 : test();
261 3370 : test();
262 3370 : test();
263 3370 : test();
264 3370 : test();
265 3370 : IF( avq_bit_sFlag && GT_16( nBits, 85 ) && !harm_flag_acelp && ( EQ_16( st_fx->coder_type, GENERIC ) || EQ_16( st_fx->coder_type, TRANSITION ) || EQ_16( st_fx->coder_type, INACTIVE ) ) )
266 : {
267 0 : trgtSvPos = 2;
268 0 : avq_bit_sFlag = 2;
269 0 : move16();
270 0 : move16();
271 : }
272 :
273 3370 : AVQ_encmux_fx( st_fx->hBstr, -1, x_norm, &nBits, Nsv, nq, avq_bit_sFlag, trgtSvPos );
274 :
275 : /* save # of AVQ unused bits for next subframe */
276 3370 : *unbits = nBits; /* Q0 */
277 3370 : move16();
278 :
279 : /* at the last subframe, write AVQ unused bits */
280 3370 : test();
281 3370 : test();
282 3370 : IF( EQ_16( i_subfr, 4 * L_SUBFR ) && NE_16( st_fx->extl, SWB_BWE_HIGHRATE ) && NE_16( st_fx->extl, FB_BWE_HIGHRATE ) )
283 : {
284 0 : WHILE( *unbits > 0 )
285 : {
286 0 : i = s_min( *unbits, 16 );
287 0 : push_indice( st_fx->hBstr, IND_UNUSED, 0, i );
288 0 : *unbits -= i;
289 : }
290 : }
291 :
292 : /*--------------------------------------------------------------*
293 : * DCT transform
294 : *--------------------------------------------------------------*/
295 :
296 219050 : FOR( i = 0; i < Nsv * WIDTH_BAND; i++ )
297 : {
298 215680 : x_tran[i] = shl_sat( x_norm[i], Q_AVQ_OUT_DEC );
299 215680 : move16();
300 : }
301 :
302 3370 : test();
303 3370 : test();
304 3370 : test();
305 3370 : IF( NE_16( st_fx->coder_type, INACTIVE ) && LE_32( st_fx->core_brate, MAX_BRATE_AVQ_EXC_TD ) && GE_32( st_fx->core_brate, MIN_BRATE_AVQ_EXC ) && !harm_flag_acelp )
306 : {
307 0 : Copy( x_tran, code_preQ, L_SUBFR ); /* Q_AVQ_OUT_DEC */
308 : }
309 : ELSE
310 : {
311 3370 : Qdct = 0;
312 3370 : move16();
313 3370 : edct2_fx( L_SUBFR, 1, x_tran, out32, &Qdct, ip_edct2_64, w_edct2_64_fx );
314 : /*qdct = sub(Q_AVQ_OUT_DEC,qdct+Q_AVQ_OUT_DEC);*/
315 3370 : Qdct = negate( Qdct );
316 3370 : Copy_Scale_sig_32_16( out32, code_preQ, L_SUBFR, Qdct ); /* Output in Q_AVQ_OUT_DEC */
317 : /*qdct = Q_AVQ_OUT_DEC;*/
318 : }
319 :
320 : /*--------------------------------------------------------------*
321 : * Preemphasise
322 : *--------------------------------------------------------------*/
323 :
324 : /* in extreme cases at subframe boundaries, lower the preemphasis memory to avoid a saturation */
325 3370 : test();
326 3370 : if ( ( nq[7] != 0 ) && ( GT_16( sub( st_fx->last_nq_preQ, nq[0] ), 7 ) ) )
327 : {
328 : /* *mem_preemp /= 16; */
329 0 : st_fx->mem_preemp_preQ_fx = shr( st_fx->mem_preemp_preQ_fx, 4 );
330 0 : move16();
331 : }
332 3370 : st_fx->last_nq_preQ = nq[7];
333 3370 : move16();
334 :
335 3370 : PREEMPH_FX( code_preQ, FAC_PRE_AVQ_FX, L_SUBFR, &( st_fx->mem_preemp_preQ_fx ) );
336 :
337 : /*--------------------------------------------------------------*
338 : * For inactive segments
339 : * - Zero-memory filtered pre-filter excitation
340 : * - Update of targets and gain_pit
341 : * For inactive segments
342 : * - Update xn[L_subfr-1] for updating the memory of the weighting filter
343 : *--------------------------------------------------------------*/
344 :
345 3370 : IF( EQ_16( st_fx->coder_type, INACTIVE ) )
346 : {
347 : /*ftemp = fcode_preQ[0] *fh1[L_SUBFR-1];*/
348 0 : Ltmp = L_mult( code_preQ[0], h1[L_SUBFR - 1] ); /*1+14+shift + Q_AVQ_OUT */
349 0 : FOR( i = 1; i < L_SUBFR; i++ )
350 : {
351 : /*ftemp += fcode_preQ[i] * fh1[L_SUBFR-1-i];*/
352 0 : Ltmp = L_mac( Ltmp, code_preQ[i], h1[L_SUBFR - 1 - i] );
353 : }
354 : /*fxn[L_SUBFR-1] -= *fgain_preQ * ftemp;*/
355 0 : Ltmp = L_shr( Mult_32_16( Ltmp, *gain_preQ ), sub( add( Q_AVQ_OUT_DEC, 2 ), Q_new ) ); /* (2 + 1 + 14 +shift+Q_AVQ_OUT)-(Q_AVQ_OUT+2-Q_new) = 15 + Q_new + shift */
356 0 : xn[L_SUBFR - 1] = round_fx( L_sub( L_mult( xn[L_SUBFR - 1], 32767 ), Ltmp ) ); /* -> Q_new + shift -1 */
357 : }
358 : ELSE
359 : {
360 3370 : conv_fx( code_preQ, h1, x_tran, L_SUBFR );
361 3370 : updt_tar_HR_fx( cn, cn, code_preQ, *gain_preQ, sub( Q_new, add( -15 + 2, Q_AVQ_OUT_DEC ) ), L_SUBFR );
362 :
363 3370 : updt_tar_HR_fx( xn, xn, x_tran, *gain_preQ, sub( Q_new, add( -15 + 2, Q_AVQ_OUT_DEC ) ), L_SUBFR );
364 3370 : *gain_pit = corr_xy1_fx( xn, y1, g_corr, L_SUBFR, 0, &Overflow ); /* Q14 */
365 : /* clip gain if necessary to avoid problems at decoder */
366 3370 : test();
367 3370 : if ( EQ_16( clip_gain, 1 ) && GT_16( *gain_pit, 15565 ) )
368 : {
369 37 : *gain_pit = 15565; /* 0.95 in Q15 */
370 37 : move16();
371 : }
372 3370 : updt_tar_fx( xn, xn2, y1, *gain_pit, L_SUBFR );
373 : }
374 :
375 3370 : st_fx->use_acelp_preq = 1;
376 3370 : move16();
377 :
378 3370 : return;
379 : }
380 :
381 :
382 70640 : void transf_cdbk_enc_ivas_fx(
383 : Encoder_State *st_fx, /* i/o: encoder state structure */
384 : const Word16 harm_flag_acelp, /* i : harmonic flag for higher rates ACELP Q0*/
385 : const Word16 i_subfr, /* i : subframe index Q0*/
386 : Word16 cn[], /* i/o: target vector in residual domain Q_new*/
387 : Word16 exc[], /* i/o: pointer to excitation signal frame Q_new*/
388 : const Word16 *p_Aq, /* i : 12k8 Lp coefficient Q12*/
389 : const Word16 Ap[], /* i : weighted LP filter coefficients Q12*/
390 : const Word16 h1[], /* i : weighted filter input response Q15*/
391 : Word16 xn[], /* i/o: target vector Q_new + shift -1*/
392 : Word16 xn2[], /* i/o: target vector for innovation search Q_new + shift -1*/
393 : Word16 y1[], /* i/o: zero-memory filtered adaptive excitation Q_new + shift -1*/
394 : const Word16 y2[], /* i : zero-memory filtered innovative excitation Q9*/
395 : const Word16 Es_pred, /* i : predicited scaled innovation energy Q8*/
396 : Word16 *gain_pit, /* i/o: adaptive excitation gain Q14*/
397 : const Word32 gain_code, /* i : innovative excitation gain Q16*/
398 : Word16 g_corr[], /* o : ACELP correlation values Q15*/
399 : const Word16 clip_gain, /* i : adaptive gain clipping flag Q0*/
400 : Word16 *gain_preQ, /* o : prequantizer excitation gain Q2*/
401 : Word16 code_preQ[], /* o : prequantizer excitation Q_AVQ_OUT_DEC*/
402 : Word16 *unbits, /* o : number of AVQ unused bits Q0*/
403 : const Word16 Q_new, /* i : Current frame scaling */
404 : const Word16 shift /* i : shifting applied to y1, xn,... */
405 : )
406 : {
407 : Word16 i, index, nBits, Nsv, Es_pred_loc;
408 : Word16 x_in[L_SUBFR], x_tran[L_SUBFR], gcode16, stmp;
409 : Word16 e_corr, m_corr, e_ener, m_ener, m_den, e_den;
410 : Word16 x_norm[L_SUBFR + L_SUBFR / WIDTH_BAND];
411 : Word32 L_corr, L_ener, Ltmp, Ltmp1;
412 : Word16 nq[L_SUBFR / WIDTH_BAND];
413 : Word32 out32[L_SUBFR];
414 : Word16 Qdct;
415 : Word16 avq_bit_sFlag;
416 : Word16 trgtSvPos;
417 : #ifdef BASOP_NOGLOB_DECLARE_LOCAL
418 70640 : Flag Overflow = 0;
419 70640 : move32();
420 : #endif
421 :
422 70640 : avq_bit_sFlag = 0;
423 70640 : move16();
424 70640 : if ( st_fx->element_mode > EVS_MONO )
425 : {
426 70640 : avq_bit_sFlag = 1;
427 70640 : move16();
428 : }
429 :
430 : /*--------------------------------------------------------------*
431 : * Set bit-allocation
432 : *--------------------------------------------------------------*/
433 :
434 70640 : Nsv = 8;
435 70640 : move16();
436 70640 : nBits = st_fx->acelp_cfg.AVQ_cdk_bits[i_subfr >> 6]; /* Q0 */
437 70640 : move16();
438 :
439 : /* increase # of AVQ allocated bits by unused bits from the previous subframe */
440 70640 : nBits = add( nBits, *unbits );
441 :
442 : /*--------------------------------------------------------------*
443 : * Compute/Update target
444 : * For inactive frame, find target in residual domain
445 : * Deemphasis
446 : *--------------------------------------------------------------*/
447 :
448 70640 : IF( EQ_16( st_fx->coder_type, INACTIVE ) )
449 : {
450 13030 : gcode16 = round_fx_sat( L_shl_sat( gain_code, Q_new ) );
451 846950 : FOR( i = 0; i < L_SUBFR; i++ )
452 : {
453 : /*x_tran[i] = xn[i] - *gain_pit * y1[i] - gain_code * y2[i];*/
454 833920 : Ltmp = L_mult( gcode16, y2[i] );
455 833920 : Ltmp = L_shl( Ltmp, add( 5, shift ) );
456 833920 : Ltmp = L_negate( Ltmp );
457 833920 : Ltmp = L_mac( Ltmp, xn[i], 16384 );
458 833920 : Ltmp = L_msu( Ltmp, y1[i], *gain_pit ); /* Q_new + 14 + shift */
459 833920 : Ltmp = L_shl_sat( Ltmp, sub( 1, shift ) ); /* Q_new + 15 */
460 833920 : x_tran[i] = round_fx_sat( Ltmp ); /*Q_new-1 */
461 : }
462 13030 : find_cn_fx( x_tran, Ap, p_Aq, x_in );
463 : }
464 : ELSE
465 : {
466 57610 : updt_tar_fx( cn, x_in, &exc[i_subfr], *gain_pit, L_SUBFR );
467 : }
468 70640 : Deemph2( x_in, FAC_PRE_AVQ_FX, L_SUBFR, &( st_fx->mem_deemp_preQ_fx ) );
469 :
470 : /*--------------------------------------------------------------*
471 : * DCT-II
472 : *--------------------------------------------------------------*/
473 :
474 70640 : test();
475 70640 : test();
476 70640 : test();
477 70640 : IF( NE_16( st_fx->coder_type, INACTIVE ) && LE_32( st_fx->core_brate, MAX_BRATE_AVQ_EXC_TD ) && GE_32( st_fx->core_brate, MIN_BRATE_AVQ_EXC ) && !harm_flag_acelp )
478 : {
479 41240 : Copy_Scale_sig( x_in, x_tran, L_SUBFR, -Q_MINUS + 1 ); /*Q_new-1 -> Q_new-4*/
480 : /*Copy( x_in, x_tran, L_SUBFR );*/
481 41240 : Qdct = sub( Q_new, Q_MINUS );
482 : }
483 : ELSE
484 : {
485 29400 : Qdct = 0;
486 29400 : move16();
487 29400 : edct2_fx( L_SUBFR, -1, x_in, out32, &Qdct, ip_edct2_64, w_edct2_64_fx );
488 29400 : Qdct = negate( Qdct );
489 29400 : Copy_Scale_sig_32_16( out32, x_tran, L_SUBFR, sub( Qdct, Q_MINUS - 1 ) ); /* Output in Q_new-4 */
490 29400 : Qdct = sub( Q_new, Q_MINUS );
491 : }
492 :
493 : /*--------------------------------------------------------------*
494 : * Split algebraic vector quantizer based on RE8 lattice
495 : *--------------------------------------------------------------*/
496 :
497 70640 : AVQ_cod_fx( x_tran, x_norm, nBits, Nsv, 0 );
498 :
499 : /*--------------------------------------------------------------*
500 : * Find prequantizer excitation gain
501 : * Quantize the gain
502 : *--------------------------------------------------------------*/
503 :
504 70640 : L_corr = L_deposit_l( 0 );
505 70640 : L_ener = L_deposit_l( 0 );
506 4591600 : FOR( i = 0; i < Nsv * 8; i++ )
507 : {
508 : /*fcorr += fx_tran[i]*(float)ix_norm[i];*/
509 : /*fener += (float)ix_norm[i]*(float)ix_norm[i];*/
510 4520960 : stmp = shl_sat( x_norm[i], Q_AVQ_OUT );
511 4520960 : L_corr = L_mac_sat( L_corr, x_tran[i], stmp );
512 4520960 : L_ener = L_mac_sat( L_ener, stmp, stmp );
513 : }
514 70640 : L_ener = L_max( L_ener, 1 );
515 :
516 : /* No negative gains allowed in the quantizer*/
517 70640 : L_corr = L_max( L_corr, 0 );
518 :
519 70640 : e_corr = norm_l( L_corr );
520 70640 : m_corr = extract_h( L_shl( L_corr, e_corr ) );
521 70640 : e_corr = sub( 30, add( e_corr, sub( Qdct, Q_AVQ_OUT ) ) );
522 70640 : e_ener = norm_l( L_ener );
523 70640 : m_ener = extract_h( L_shl( L_ener, e_ener ) ); /* 30 - e-ener */
524 70640 : e_ener = sub( 30, e_ener );
525 :
526 70640 : IF( GT_16( m_corr, m_ener ) )
527 : {
528 40661 : m_corr = shr( m_corr, 1 ); /* e_corr + 1 */
529 40661 : e_corr = add( e_corr, 1 );
530 : }
531 70640 : m_corr = div_s( m_corr, m_ener ); /* e_corr - e_ener */
532 70640 : e_corr = sub( e_corr, e_ener );
533 70640 : Ltmp = L_shl_sat( m_corr, s_min( add( e_corr, 1 ), 31 ) ); /* Lgain in Q16 */
534 70640 : IF( EQ_16( st_fx->coder_type, INACTIVE ) )
535 : {
536 13030 : Ltmp1 = L_max( gain_code, 1 );
537 13030 : e_den = norm_l( Ltmp1 );
538 13030 : m_den = extract_h( L_shl_sat( Ltmp1, e_den ) );
539 : /* ensure m_corr < m_den */
540 13030 : test();
541 13030 : IF( m_corr > 0 && m_den > 0 )
542 : {
543 13030 : m_corr = div_s( 16384, m_den );
544 13030 : e_corr = sub( 14 + 4, e_den );
545 13030 : Ltmp = L_shr( Mult_32_16( Ltmp, m_corr ), e_corr ); /*Q12*/
546 13030 : stmp = round_fx_sat( L_shl_sat( Ltmp, 16 ) ); /* Q12 */
547 : }
548 : ELSE
549 : {
550 0 : stmp = 0;
551 0 : move16();
552 : }
553 13030 : IF( GT_32( st_fx->core_brate, 56000 ) )
554 : {
555 0 : index = usquant_fx( stmp, &stmp, G_AVQ_MIN_INACT_64k_Q12, G_AVQ_DELTA_INACT_64k_Q12 >> 1, ( 1 << G_AVQ_BITS ) ); /* Q0 */
556 : }
557 13030 : ELSE IF( GT_32( st_fx->core_brate, 42000 ) )
558 : {
559 1570 : index = usquant_fx( stmp, &stmp, G_AVQ_MIN_INACT_48k_Q12, G_AVQ_DELTA_INACT_48k_Q12 >> 1, ( 1 << G_AVQ_BITS ) ); /* Q0 */
560 : }
561 : ELSE
562 : {
563 11460 : index = usquant_fx( stmp, &stmp, G_AVQ_MIN_INACT_Q12, G_AVQ_DELTA_INACT_Q12 >> 1, ( 1 << G_AVQ_BITS ) ); /* Q0 */
564 : }
565 13030 : Ltmp = Mult_32_16( gain_code, stmp ); /* Q16 * Q12 - 15 -> Q13*/
566 13030 : Ltmp = L_shl_sat( Ltmp, 5 ); /* Q13 -> Q18*/
567 13030 : *gain_preQ = round_fx_sat( Ltmp ); /* Q2*/
568 13030 : move16();
569 : }
570 : ELSE
571 : {
572 57610 : IF( Es_pred < 0 )
573 : {
574 160 : Es_pred_loc = shr( negate( Es_pred ), 2 );
575 : }
576 : ELSE
577 : {
578 57450 : Es_pred_loc = Es_pred;
579 57450 : move16();
580 : }
581 :
582 57610 : e_den = norm_s( Es_pred_loc );
583 57610 : m_den = shl( Es_pred_loc, e_den );
584 : /* ensure m_corr < m_den */
585 57610 : test();
586 57610 : IF( m_corr > 0 && m_den > 0 )
587 : {
588 56815 : m_corr = div_s( 16384, m_den );
589 56815 : e_corr = sub( 14 - 8, e_den );
590 56815 : Ltmp = L_shr( Mult_32_16( Ltmp, m_corr ), e_corr ); /* Q16 */
591 : }
592 : ELSE
593 : {
594 795 : Ltmp = L_deposit_l( 0 );
595 : }
596 57610 : test();
597 57610 : IF( LE_32( st_fx->core_brate, 42000 ) && GT_32( st_fx->core_brate, ACELP_24k40 ) )
598 : {
599 51960 : index = gain_quant_fx( &Ltmp, &stmp, LG10_G_AVQ_MIN_32kbps_Q14, LG10_G_AVQ_MAX_Q13, G_AVQ_BITS, &e_den ); /* Q0 */
600 : }
601 : ELSE
602 : {
603 5650 : index = gain_quant_fx( &Ltmp, &stmp, LG10_G_AVQ_MIN_Q14, LG10_G_AVQ_MAX_Q13, G_AVQ_BITS, &e_den ); /* Q0 */
604 : }
605 57610 : Ltmp = L_mult( stmp, Es_pred_loc ); /* Q0*Q8 -> Q9*/
606 57610 : Ltmp = L_shl( Ltmp, add( e_den, 9 ) ); /* Q18*/
607 57610 : *gain_preQ = round_fx( Ltmp ); /* Q2*/
608 57610 : move16();
609 : }
610 70640 : push_indice( st_fx->hBstr, IND_AVQ_GAIN, index, G_AVQ_BITS );
611 :
612 : /*--------------------------------------------------------------*
613 : * Encode and multiplex subvectors into bit-stream
614 : *--------------------------------------------------------------*/
615 :
616 70640 : trgtSvPos = sub( Nsv, 1 );
617 70640 : move16();
618 70640 : test();
619 70640 : test();
620 70640 : test();
621 70640 : test();
622 70640 : test();
623 70640 : IF( avq_bit_sFlag && GT_16( nBits, 85 ) && !harm_flag_acelp && ( EQ_16( st_fx->coder_type, GENERIC ) || EQ_16( st_fx->coder_type, TRANSITION ) || EQ_16( st_fx->coder_type, INACTIVE ) ) )
624 : {
625 19950 : trgtSvPos = 2;
626 19950 : avq_bit_sFlag = 2;
627 19950 : move16();
628 19950 : move16();
629 : }
630 :
631 70640 : AVQ_encmux_fx( st_fx->hBstr, -1, x_norm, &nBits, Nsv, nq, avq_bit_sFlag, trgtSvPos );
632 :
633 : /* save # of AVQ unused bits for next subframe */
634 70640 : *unbits = nBits;
635 70640 : move16();
636 :
637 : /* at the last subframe, write AVQ unused bits */
638 70640 : test();
639 70640 : test();
640 70640 : IF( EQ_16( i_subfr, 4 * L_SUBFR ) && NE_16( st_fx->extl, SWB_BWE_HIGHRATE ) && NE_16( st_fx->extl, FB_BWE_HIGHRATE ) )
641 : {
642 26568 : WHILE( *unbits > 0 )
643 : {
644 12440 : i = s_min( *unbits, 16 );
645 12440 : push_indice( st_fx->hBstr, IND_UNUSED, 0, i );
646 12440 : *unbits = sub( *unbits, i );
647 12440 : move16();
648 : }
649 : }
650 :
651 : /*--------------------------------------------------------------*
652 : * DCT transform
653 : *--------------------------------------------------------------*/
654 :
655 4591600 : FOR( i = 0; i < Nsv * WIDTH_BAND; i++ )
656 : {
657 4520960 : x_tran[i] = shl_sat( x_norm[i], Q_AVQ_OUT_DEC );
658 4520960 : move16();
659 : }
660 :
661 70640 : test();
662 70640 : test();
663 70640 : test();
664 70640 : IF( NE_16( st_fx->coder_type, INACTIVE ) && LE_32( st_fx->core_brate, MAX_BRATE_AVQ_EXC_TD ) && GE_32( st_fx->core_brate, MIN_BRATE_AVQ_EXC ) && !harm_flag_acelp )
665 : {
666 41240 : Copy( x_tran, code_preQ, L_SUBFR ); /* Q_AVQ_OUT_DEC */
667 : }
668 : ELSE
669 : {
670 29400 : Qdct = 0;
671 29400 : move16();
672 29400 : edct2_fx( L_SUBFR, 1, x_tran, out32, &Qdct, ip_edct2_64, w_edct2_64_fx );
673 : /*qdct = sub(Q_AVQ_OUT_DEC,qdct+Q_AVQ_OUT_DEC);*/
674 29400 : Qdct = negate( Qdct );
675 29400 : Copy_Scale_sig_32_16( out32, code_preQ, L_SUBFR, Qdct ); /* Output in Q_AVQ_OUT_DEC */
676 : /*qdct = Q_AVQ_OUT_DEC;*/
677 : }
678 :
679 : /*--------------------------------------------------------------*
680 : * Preemphasise
681 : *--------------------------------------------------------------*/
682 :
683 : /* in extreme cases at subframe boundaries, lower the preemphasis memory to avoid a saturation */
684 70640 : test();
685 70640 : if ( ( nq[7] != 0 ) && ( GT_16( sub( st_fx->last_nq_preQ, nq[0] ), 7 ) ) )
686 : {
687 : /* *mem_preemp /= 16; */
688 1 : st_fx->mem_preemp_preQ_fx = shr( st_fx->mem_preemp_preQ_fx, 4 );
689 1 : move16();
690 : }
691 70640 : st_fx->last_nq_preQ = nq[7];
692 70640 : move16();
693 :
694 : /* TD pre-quantizer: in extreme cases at subframe boundaries, lower the preemphasis memory to avoid a saturation */
695 70640 : test();
696 70640 : test();
697 70640 : test();
698 70640 : test();
699 70640 : test();
700 70640 : IF( GT_16( st_fx->element_mode, EVS_MONO ) && NE_16( st_fx->coder_type, INACTIVE ) && GE_32( st_fx->core_brate, MIN_BRATE_AVQ_EXC ) && LE_32( st_fx->core_brate, MAX_BRATE_AVQ_EXC_TD ) && !harm_flag_acelp && code_preQ[0] != 0 )
701 : {
702 18547 : IF( GT_16( abs_s( st_fx->last_code_preq ), shl_sat( abs_s( code_preQ[0] ), 4 ) ) )
703 : {
704 0 : st_fx->mem_preemp_preQ_fx = shr( st_fx->mem_preemp_preQ_fx, 4 );
705 0 : move16();
706 : }
707 18547 : ELSE IF( GT_16( abs_s( st_fx->last_code_preq ), shl_sat( abs_s( code_preQ[0] ), 3 ) ) )
708 : {
709 0 : st_fx->mem_preemp_preQ_fx = shr( st_fx->mem_preemp_preQ_fx, 3 );
710 0 : move16();
711 : }
712 : }
713 :
714 70640 : st_fx->last_code_preq = shr( code_preQ[L_SUBFR - 1], 9 ); // Q0
715 70640 : move16();
716 :
717 70640 : PREEMPH_FX( code_preQ, FAC_PRE_AVQ_FX, L_SUBFR, &( st_fx->mem_preemp_preQ_fx ) );
718 :
719 : /*--------------------------------------------------------------*
720 : * For inactive segments
721 : * - Zero-memory filtered pre-filter excitation
722 : * - Update of targets and gain_pit
723 : * For inactive segments
724 : * - Update xn[L_subfr-1] for updating the memory of the weighting filter
725 : *--------------------------------------------------------------*/
726 :
727 70640 : IF( EQ_16( st_fx->coder_type, INACTIVE ) )
728 : {
729 : /*ftemp = fcode_preQ[0] *fh1[L_SUBFR-1];*/
730 13030 : Ltmp = L_mult( code_preQ[0], h1[L_SUBFR - 1] ); /*1+14+shift + Q_AVQ_OUT */
731 833920 : FOR( i = 1; i < L_SUBFR; i++ )
732 : {
733 : /*ftemp += fcode_preQ[i] * fh1[L_SUBFR-1-i];*/
734 820890 : Ltmp = L_mac( Ltmp, code_preQ[i], h1[L_SUBFR - 1 - i] );
735 : }
736 : /*fxn[L_SUBFR-1] -= *fgain_preQ * ftemp;*/
737 13030 : Ltmp = L_shr( Mult_32_16( Ltmp, *gain_preQ ), sub( add( Q_AVQ_OUT_DEC, 2 ), Q_new ) ); /* (2 + 1 + 14 +shift+Q_AVQ_OUT)-(Q_AVQ_OUT+2-Q_new) = 15 + Q_new + shift */
738 13030 : xn[L_SUBFR - 1] = round_fx( L_sub( L_mult( xn[L_SUBFR - 1], 32767 ), Ltmp ) ); /* -> Q_new + shift -1 */
739 : }
740 : ELSE
741 : {
742 57610 : conv_fx( code_preQ, h1, x_tran, L_SUBFR );
743 57610 : updt_tar_HR_fx( cn, cn, code_preQ, *gain_preQ, sub( Q_new, add( -15 + 2, Q_AVQ_OUT_DEC ) ), L_SUBFR );
744 :
745 57610 : updt_tar_HR_fx( xn, xn, x_tran, *gain_preQ, sub( Q_new, add( -15 + 2, Q_AVQ_OUT_DEC ) ), L_SUBFR );
746 57610 : *gain_pit = corr_xy1_fx( xn, y1, g_corr, L_SUBFR, 0, &Overflow ); /* Q14 */
747 57610 : move16();
748 : /* clip gain if necessary to avoid problems at decoder */
749 57610 : test();
750 57610 : if ( EQ_16( clip_gain, 1 ) && GT_16( *gain_pit, 15565 ) )
751 : {
752 78 : *gain_pit = 15565; /* 0.95 in Q14 */
753 78 : move16();
754 : }
755 57610 : updt_tar_fx( xn, xn2, y1, *gain_pit, L_SUBFR );
756 : }
757 :
758 70640 : st_fx->use_acelp_preq = 1;
759 70640 : move16();
760 :
761 70640 : return;
762 : }
763 :
764 :
765 : /*-------------------------------------------------------------------*
766 : * Find target in residual domain - cn[]
767 : *-------------------------------------------------------------------*/
768 :
769 13030 : static void find_cn_fx(
770 : const Word16 xn[], /* i : target signal Qx*/
771 : const Word16 Ap[], /* i : weighted LP filter coefficients Q12*/
772 : const Word16 *p_Aq, /* i : 12k8 LP coefficients Q12*/
773 : Word16 cn[] /* o : target signal in residual domain Qx*/
774 : )
775 : {
776 : Word16 tmp, tmp_fl[L_SUBFR + M];
777 :
778 13030 : set16_fx( tmp_fl, 0, M );
779 13030 : Copy( xn, tmp_fl + M, L_SUBFR ); /* Qx */
780 13030 : tmp = 0;
781 13030 : move16();
782 13030 : PREEMPH_FX( tmp_fl + M, PREEMPH_FAC_16k, L_SUBFR, &tmp );
783 13030 : syn_filt_s_lc_fx( 0, Ap, tmp_fl + M, tmp_fl + M, L_SUBFR );
784 13030 : Residu3_lc_fx( p_Aq, M, tmp_fl + M, cn, L_SUBFR, 1 );
785 :
786 13030 : return;
787 : }
788 :
789 :
790 : /*-----------------------------------------------------------------*
791 : * Transform domain contribution encoding
792 : *-----------------------------------------------------------------*/
793 :
794 : /* o: quantization index Q0*/
795 114078 : Word16 gain_quant_fx(
796 : Word32 *gain, /* i : quantized gain Q16*/
797 : Word16 *gain16, /* o : quantized gain expg*/
798 : const Word16 c_min, /* i : log10 of lower limit in Q14*/
799 : const Word16 c_max, /* i : log10 of upper limit in Q13*/
800 : const Word16 bits, /* i : number of bits to quantize Q0*/
801 : Word16 *expg /* o : output exponent of gain16 */
802 : )
803 : {
804 : Word16 index, levels;
805 : Word16 c_gain;
806 : Word16 e_tmp, f_tmp, exp;
807 : Word16 tmp, tmp1, tmp2, frac;
808 : Word32 L_tmp;
809 :
810 114078 : levels = shl( 1, bits );
811 : /* Prevent gain to be smaller than 0.0003. */
812 : /* This is to avoid an overflow when the gain is very small */
813 : /* the log10 give a high negative value in Q13 that overflow */
814 : /* on this code (the resulting value of 'index' is not affected. */
815 : /* tmp2 = msu_r(L_deposit_h(c_gain),c_min,16384) */
816 114078 : L_tmp = L_max( *gain, 20 );
817 :
818 : /*c_min = (float)log10(min);*/
819 : /*c_mult = (float) ((levels-1)/(log10(max)-c_min));*/
820 :
821 : /*tmp = c_mult * ((float)log10(*gain) - c_min);
822 : = ((levels-1)/(log10(max)-log10(min)))*((float)log10(*gain) - log10(min));*/
823 :
824 114078 : e_tmp = norm_l( L_tmp );
825 114078 : f_tmp = Log2_norm_lc( L_shl( L_tmp, e_tmp ) );
826 114078 : e_tmp = sub( 30 - 16, e_tmp ); /*Q(min)=16*/
827 114078 : L_tmp = Mpy_32_16( e_tmp, f_tmp, 9864 ); /* Q16 */ /*log10(2) in Q15*/
828 114078 : c_gain = round_fx( L_shl( L_tmp, 13 ) ); /* Q13 */
829 :
830 : /*tmp1 = sub(c_max,c_min); Q14*/
831 : /*tmp2 = sub(c_gain,c_min); Q14*/
832 :
833 114078 : tmp1 = msu_r_sat( L_deposit_h( c_max /*in Q13 already*/ ), c_min, 16384 ); /*Q13*/
834 114078 : tmp2 = msu_r_sat( L_deposit_h( c_gain /*in Q13 already*/ ), c_min, 16384 ); /*Q13*/
835 114078 : IF( tmp1 != 0 )
836 : {
837 114078 : exp = norm_s( tmp1 );
838 114078 : frac = div_s( shl( 1, sub( 14, exp ) ), tmp1 ); /*Q(15-exp)*/
839 114078 : L_tmp = L_mult( tmp2, frac ); /*Q(30-exp)*/
840 114078 : L_tmp = Mult_32_16( L_tmp, sub( levels, 1 ) ); /*Q(15-exp)*/
841 114078 : index = extract_l( L_shr( L_add( L_tmp, shr( 1 << 14, exp ) ), sub( 15, exp ) ) ); /* Q0 */
842 : }
843 : ELSE
844 : {
845 0 : L_tmp = L_mult( tmp2, sub( levels, 1 ) ); /*Q15*/
846 0 : index = extract_l( L_shr( L_add( L_tmp, 1 << 14 ), 15 ) ); /* Q0 */
847 : }
848 :
849 114078 : index = s_max( index, 0 );
850 114078 : index = s_min( index, sub( levels, 1 ) );
851 :
852 : /**gain = (float)pow( 10.0, (((float)index)/c_mult) + c_min );
853 : y = index/c_mult + c_min;
854 : = (index/(levels-1))*(log10(max) - log10(min)) + log10(min);
855 : = z*log10(max) + (1-z)*log10(min)
856 : z = (index/(levels-1))*/
857 114078 : tmp = div_s( index, sub( levels, 1 ) ); /*Q15*/
858 114078 : L_tmp = L_mult( tmp, c_max ); /*Q29*/
859 114078 : L_tmp = L_mac0( L_tmp, sub( 32767, tmp ), c_min ); /*Q29*/
860 :
861 114078 : L_tmp = Mult_32_16( L_tmp, 27213 ); /*Q27, 3.321928 in Q13*/
862 114078 : L_tmp = L_shr( L_tmp, 11 ); /*Q27->Q16*/
863 :
864 114078 : frac = L_Extract_lc( L_tmp, expg ); /* Extract exponent of gcode0 */
865 :
866 114078 : *gain16 = extract_l( Pow2( 14, frac ) ); /* Put 14 as exponent so that */
867 : /* output of Pow2() will be: */
868 : /* 16384 < Pow2() <= 32767 */
869 114078 : *expg = sub( *expg, 14 );
870 114078 : move16();
871 :
872 114078 : return ( index );
873 : }
|