Line data Source code
1 : /*====================================================================================
2 : EVS Codec 3GPP TS26.452 Aug 12, 2021. Version 16.3.0
3 : ====================================================================================*/
4 :
5 : #include <stdint.h>
6 : #include "options.h" /* Compilation switches */
7 : #include "cnst.h" /* Common constants */
8 : #include "prot_fx.h" /* Function prototypes */
9 : #include "rom_com.h" /* Static table prototypes */
10 : #include "prot_fx_enc.h"
11 :
12 :
13 : /*---------------------------------------------------------------------*
14 : * Local function prototype
15 : *---------------------------------------------------------------------*/
16 :
17 : static void find_cn_fx( const Word16 xn[], const Word16 Ap[], const Word16 *p_Aq, Word16 cn[] );
18 :
19 :
20 : /*-----------------------------------------------------------------*
21 : * Transform domain contribution encoding
22 : *-----------------------------------------------------------------*/
23 :
24 : #define Q_MINUS 4
25 3370 : void transf_cdbk_enc_fx(
26 : Encoder_State *st_fx, /* i/o: encoder state structure */
27 : const Word16 harm_flag_acelp, /* i : harmonic flag for higher rates ACELP Q0*/
28 : const Word16 i_subfr, /* i : subframe index Q0*/
29 : Word16 cn[], /* i/o: target vector in residual domain Q_new*/
30 : Word16 exc[], /* i/o: pointer to excitation signal frame Q_new*/
31 : const Word16 *p_Aq, /* i : 12k8 Lp coefficient Q12*/
32 : const Word16 Ap[], /* i : weighted LP filter coefficients Q12*/
33 : const Word16 h1[], /* i : weighted filter input response Q15*/
34 : Word16 xn[], /* i/o: target vector Q_new + shift -1*/
35 : Word16 xn2[], /* i/o: target vector for innovation search Q_new + shift -1*/
36 : Word16 y1[], /* i/o: zero-memory filtered adaptive excitation Q_new + shift -1*/
37 : const Word16 y2[], /* i : zero-memory filtered innovative excitation Q9*/
38 : const Word16 Es_pred, /* i : predicited scaled innovation energy Q8*/
39 : Word16 *gain_pit, /* i/o: adaptive excitation gain Q14*/
40 : const Word32 gain_code, /* i : innovative excitation gain Q16*/
41 : Word16 g_corr[], /* o : ACELP correlation values Q15*/
42 : const Word16 clip_gain, /* i : adaptive gain clipping flag Q0*/
43 : Word16 *gain_preQ, /* o : prequantizer excitation gain Q2*/
44 : Word16 code_preQ[], /* o : prequantizer excitation Q_AVQ_OUT_DEC*/
45 : Word16 *unbits, /* o : number of AVQ unused bits Q0*/
46 : const Word16 Q_new, /* i : Current frame scaling */
47 : const Word16 shift /* i : shifting applied to y1, xn,... */
48 : )
49 : {
50 : Word16 i, index, nBits, Nsv, Es_pred_loc;
51 : Word16 x_in[L_SUBFR], x_tran[L_SUBFR], gcode16, stmp;
52 : Word16 e_corr, m_corr, e_ener, m_ener, m_den, e_den;
53 : Word16 x_norm[L_SUBFR + L_SUBFR / WIDTH_BAND];
54 : Word32 L_corr, L_ener, Ltmp, Ltmp1;
55 : Word16 nq[L_SUBFR / WIDTH_BAND];
56 : Word32 out32[L_SUBFR];
57 : Word16 Qdct;
58 : Word16 avq_bit_sFlag;
59 : Word16 trgtSvPos;
60 3370 : Flag Overflow = 0;
61 3370 : move32();
62 :
63 3370 : avq_bit_sFlag = 0;
64 3370 : move16();
65 3370 : if ( st_fx->element_mode > EVS_MONO )
66 : {
67 0 : avq_bit_sFlag = 1;
68 0 : move16();
69 : }
70 :
71 : /*--------------------------------------------------------------*
72 : * Set bit-allocation
73 : *--------------------------------------------------------------*/
74 :
75 3370 : Nsv = 8;
76 3370 : move16();
77 3370 : nBits = st_fx->acelp_cfg.AVQ_cdk_bits[shr( i_subfr, 6 )]; /* Q0 */
78 3370 : move16();
79 :
80 : /* increase # of AVQ allocated bits by unused bits from the previous subframe */
81 3370 : nBits = add( nBits, *unbits );
82 :
83 : /*--------------------------------------------------------------*
84 : * Compute/Update target
85 : * For inactive frame, find target in residual domain
86 : * Deemphasis
87 : *--------------------------------------------------------------*/
88 :
89 3370 : IF( EQ_16( st_fx->coder_type, INACTIVE ) )
90 : {
91 0 : gcode16 = round_fx_sat( L_shl_sat( gain_code, Q_new ) );
92 0 : FOR( i = 0; i < L_SUBFR; i++ )
93 : {
94 : /*x_tran[i] = xn[i] - *gain_pit * y1[i] - gain_code * y2[i];*/
95 0 : Ltmp = L_mult( gcode16, y2[i] );
96 0 : Ltmp = L_shl( Ltmp, add( 5, shift ) );
97 0 : Ltmp = L_negate( Ltmp );
98 0 : Ltmp = L_mac( Ltmp, xn[i], 16384 );
99 0 : Ltmp = L_msu( Ltmp, y1[i], *gain_pit ); /* Q_new + 14 + shift */
100 0 : Ltmp = L_shl_sat( Ltmp, sub( 1, shift ) ); /* Q_new + 15 */
101 0 : x_tran[i] = round_fx_sat( Ltmp ); /*Q_new-1 */
102 0 : move16();
103 : }
104 0 : find_cn_fx( x_tran, Ap, p_Aq, x_in );
105 : }
106 : ELSE
107 : {
108 3370 : updt_tar_fx( cn, x_in, &exc[i_subfr], *gain_pit, L_SUBFR );
109 : }
110 3370 : Deemph2( x_in, FAC_PRE_AVQ_FX, L_SUBFR, &( st_fx->mem_deemp_preQ_fx ) );
111 :
112 : /*--------------------------------------------------------------*
113 : * DCT-II
114 : *--------------------------------------------------------------*/
115 :
116 3370 : test();
117 3370 : test();
118 3370 : test();
119 3370 : IF( NE_16( st_fx->coder_type, INACTIVE ) && LE_32( st_fx->core_brate, MAX_BRATE_AVQ_EXC_TD ) && GE_32( st_fx->core_brate, MIN_BRATE_AVQ_EXC ) && !harm_flag_acelp )
120 : {
121 0 : Copy_Scale_sig( x_in, x_tran, L_SUBFR, -Q_MINUS + 1 ); /*Q_new-1 -> Q_new-4*/
122 : /*Copy( x_in, x_tran, L_SUBFR );*/
123 0 : Qdct = sub( Q_new, Q_MINUS );
124 : }
125 : ELSE
126 : {
127 3370 : Qdct = 0;
128 3370 : move16();
129 3370 : edct2_fx( L_SUBFR, -1, x_in, out32, &Qdct, ip_edct2_64, w_edct2_64_fx );
130 3370 : Qdct = negate( Qdct );
131 3370 : Copy_Scale_sig_32_16( out32, x_tran, L_SUBFR, sub( Qdct, Q_MINUS - 1 ) ); /* Output in Q_new-4 */
132 3370 : Qdct = sub( Q_new, Q_MINUS );
133 : }
134 :
135 : /*--------------------------------------------------------------*
136 : * Split algebraic vector quantizer based on RE8 lattice
137 : *--------------------------------------------------------------*/
138 :
139 3370 : AVQ_cod_fx( x_tran, x_norm, nBits, Nsv, 0 );
140 :
141 : /*--------------------------------------------------------------*
142 : * Find prequantizer excitation gain
143 : * Quantize the gain
144 : *--------------------------------------------------------------*/
145 :
146 3370 : L_corr = L_deposit_l( 0 );
147 3370 : L_ener = L_deposit_l( 0 );
148 219050 : FOR( i = 0; i < Nsv * 8; i++ )
149 : {
150 : /*fcorr += fx_tran[i]*(float)ix_norm[i];*/
151 : /*fener += (float)ix_norm[i]*(float)ix_norm[i];*/
152 215680 : stmp = shl_sat( x_norm[i], Q_AVQ_OUT );
153 215680 : L_corr = L_mac_sat( L_corr, x_tran[i], stmp );
154 215680 : L_ener = L_mac_sat( L_ener, stmp, stmp );
155 : }
156 3370 : L_ener = L_max( L_ener, 1 );
157 :
158 : /* No negative gains allowed in the quantizer*/
159 3370 : L_corr = L_max( L_corr, 0 );
160 :
161 3370 : e_corr = norm_l( L_corr );
162 3370 : m_corr = extract_h( L_shl( L_corr, e_corr ) );
163 3370 : e_corr = sub( 30, add( e_corr, sub( Qdct, Q_AVQ_OUT ) ) );
164 3370 : e_ener = norm_l( L_ener );
165 3370 : m_ener = extract_h( L_shl( L_ener, e_ener ) );
166 3370 : e_ener = sub( 30, e_ener );
167 :
168 3370 : IF( GT_16( m_corr, m_ener ) )
169 : {
170 2319 : m_corr = shr( m_corr, 1 );
171 2319 : e_corr = add( e_corr, 1 );
172 : }
173 3370 : m_corr = div_s( m_corr, m_ener ); /* e_corr - e_ener */
174 3370 : e_corr = sub( e_corr, e_ener );
175 3370 : Ltmp = L_shl_sat( m_corr, s_min( add( e_corr, 1 ), 31 ) ); /* Lgain in Q16 */
176 3370 : IF( EQ_16( st_fx->coder_type, INACTIVE ) )
177 : {
178 0 : Ltmp1 = L_max( gain_code, 1 );
179 0 : e_den = norm_l( Ltmp1 );
180 0 : m_den = extract_h( L_shl_sat( Ltmp1, e_den ) );
181 : /* ensure m_corr < m_den */
182 0 : test();
183 0 : IF( m_corr > 0 && m_den > 0 )
184 : {
185 0 : m_corr = div_s( 16384, m_den );
186 0 : e_corr = sub( 14 + 4, e_den );
187 0 : Ltmp = L_shr( Mult_32_16( Ltmp, m_corr ), e_corr ); /*Q12*/
188 0 : stmp = round_fx_sat( L_shl_sat( Ltmp, 16 ) ); /* Q12 */
189 : }
190 : ELSE
191 : {
192 0 : stmp = 0;
193 0 : move16();
194 : }
195 0 : IF( GT_32( st_fx->core_brate, 56000 ) )
196 : {
197 0 : index = usquant_fx( stmp, &stmp, G_AVQ_MIN_INACT_64k_Q12, G_AVQ_DELTA_INACT_64k_Q12 >> 1, ( 1 << G_AVQ_BITS ) ); /* Q0 */
198 : }
199 0 : ELSE IF( GT_32( st_fx->core_brate, 42000 ) )
200 : {
201 0 : index = usquant_fx( stmp, &stmp, G_AVQ_MIN_INACT_48k_Q12, G_AVQ_DELTA_INACT_48k_Q12 >> 1, ( 1 << G_AVQ_BITS ) ); /* Q0 */
202 : }
203 : ELSE
204 : {
205 0 : index = usquant_fx( stmp, &stmp, G_AVQ_MIN_INACT_Q12, G_AVQ_DELTA_INACT_Q12 >> 1, ( 1 << G_AVQ_BITS ) ); /* Q0 */
206 : }
207 0 : Ltmp = Mult_32_16( gain_code, stmp ); /* Q16 * Q12 - 15 -> Q13*/
208 0 : Ltmp = L_shl_sat( Ltmp, 5 ); /* Q13 -> Q18*/
209 0 : *gain_preQ = round_fx_sat( Ltmp ); /* Q2*/
210 : }
211 : ELSE
212 : {
213 3370 : IF( Es_pred < 0 )
214 : {
215 35 : Es_pred_loc = shr( negate( Es_pred ), 2 ); /* Q8 */
216 : }
217 : ELSE
218 : {
219 3335 : Es_pred_loc = Es_pred; /* Q8 */
220 3335 : move16();
221 : }
222 :
223 3370 : e_den = norm_s( Es_pred_loc );
224 3370 : m_den = shl( Es_pred_loc, e_den );
225 : /* ensure m_corr < m_den */
226 3370 : test();
227 3370 : IF( m_corr > 0 && m_den > 0 )
228 : {
229 3369 : m_corr = div_s( 16384, m_den ); /* 14 - 8 - e_den */
230 3369 : e_corr = sub( 14 - 8, e_den );
231 3369 : Ltmp = L_shr( Mult_32_16( Ltmp, m_corr ), e_corr ); /* Q18 */
232 : }
233 : ELSE
234 : {
235 1 : Ltmp = L_deposit_l( 0 );
236 : }
237 3370 : test();
238 3370 : IF( LE_32( st_fx->core_brate, 42000 ) && GT_32( st_fx->core_brate, ACELP_24k40 ) )
239 : {
240 0 : index = gain_quant_fx( &Ltmp, &stmp, LG10_G_AVQ_MIN_32kbps_Q14, LG10_G_AVQ_MAX_Q13, G_AVQ_BITS, &e_den ); /* Q0 */
241 : }
242 : ELSE
243 : {
244 3370 : index = gain_quant_fx( &Ltmp, &stmp, LG10_G_AVQ_MIN_Q14, LG10_G_AVQ_MAX_Q13, G_AVQ_BITS, &e_den ); /* Q0 */
245 : }
246 3370 : Ltmp = L_mult( stmp, Es_pred_loc ); /* Q0*Q8 -> Q9*/
247 3370 : Ltmp = L_shl( Ltmp, add( e_den, 9 ) ); /* Q18*/
248 3370 : *gain_preQ = round_fx( Ltmp ); /* Q2*/
249 : }
250 3370 : push_indice( st_fx->hBstr, IND_AVQ_GAIN, index, G_AVQ_BITS );
251 :
252 : /*--------------------------------------------------------------*
253 : * Encode and multiplex subvectors into bit-stream
254 : *--------------------------------------------------------------*/
255 :
256 3370 : trgtSvPos = Nsv - 1;
257 3370 : move16();
258 3370 : test();
259 3370 : test();
260 3370 : test();
261 3370 : test();
262 3370 : test();
263 3370 : IF( avq_bit_sFlag && GT_16( nBits, 85 ) && !harm_flag_acelp && ( EQ_16( st_fx->coder_type, GENERIC ) || EQ_16( st_fx->coder_type, TRANSITION ) || EQ_16( st_fx->coder_type, INACTIVE ) ) )
264 : {
265 0 : trgtSvPos = 2;
266 0 : avq_bit_sFlag = 2;
267 0 : move16();
268 0 : move16();
269 : }
270 :
271 3370 : AVQ_encmux_fx( st_fx->hBstr, -1, x_norm, &nBits, Nsv, nq, avq_bit_sFlag, trgtSvPos );
272 :
273 : /* save # of AVQ unused bits for next subframe */
274 3370 : *unbits = nBits; /* Q0 */
275 3370 : move16();
276 :
277 : /* at the last subframe, write AVQ unused bits */
278 3370 : test();
279 3370 : test();
280 3370 : IF( EQ_16( i_subfr, 4 * L_SUBFR ) && NE_16( st_fx->extl, SWB_BWE_HIGHRATE ) && NE_16( st_fx->extl, FB_BWE_HIGHRATE ) )
281 : {
282 0 : WHILE( *unbits > 0 )
283 : {
284 0 : i = s_min( *unbits, 16 );
285 0 : push_indice( st_fx->hBstr, IND_UNUSED, 0, i );
286 0 : *unbits -= i;
287 : }
288 : }
289 :
290 : /*--------------------------------------------------------------*
291 : * DCT transform
292 : *--------------------------------------------------------------*/
293 :
294 219050 : FOR( i = 0; i < Nsv * WIDTH_BAND; i++ )
295 : {
296 215680 : x_tran[i] = shl_sat( x_norm[i], Q_AVQ_OUT_DEC );
297 215680 : move16();
298 : }
299 :
300 3370 : test();
301 3370 : test();
302 3370 : test();
303 3370 : IF( NE_16( st_fx->coder_type, INACTIVE ) && LE_32( st_fx->core_brate, MAX_BRATE_AVQ_EXC_TD ) && GE_32( st_fx->core_brate, MIN_BRATE_AVQ_EXC ) && !harm_flag_acelp )
304 : {
305 0 : Copy( x_tran, code_preQ, L_SUBFR ); /* Q_AVQ_OUT_DEC */
306 : }
307 : ELSE
308 : {
309 3370 : Qdct = 0;
310 3370 : move16();
311 3370 : edct2_fx( L_SUBFR, 1, x_tran, out32, &Qdct, ip_edct2_64, w_edct2_64_fx );
312 : /*qdct = sub(Q_AVQ_OUT_DEC,qdct+Q_AVQ_OUT_DEC);*/
313 3370 : Qdct = negate( Qdct );
314 3370 : Copy_Scale_sig_32_16( out32, code_preQ, L_SUBFR, Qdct ); /* Output in Q_AVQ_OUT_DEC */
315 : /*qdct = Q_AVQ_OUT_DEC;*/
316 : }
317 :
318 : /*--------------------------------------------------------------*
319 : * Preemphasise
320 : *--------------------------------------------------------------*/
321 :
322 : /* in extreme cases at subframe boundaries, lower the preemphasis memory to avoid a saturation */
323 3370 : test();
324 3370 : if ( ( nq[7] != 0 ) && ( GT_16( sub( st_fx->last_nq_preQ, nq[0] ), 7 ) ) )
325 : {
326 : /* *mem_preemp /= 16; */
327 0 : st_fx->mem_preemp_preQ_fx = shr( st_fx->mem_preemp_preQ_fx, 4 );
328 0 : move16();
329 : }
330 3370 : st_fx->last_nq_preQ = nq[7];
331 3370 : move16();
332 :
333 3370 : PREEMPH_FX( code_preQ, FAC_PRE_AVQ_FX, L_SUBFR, &( st_fx->mem_preemp_preQ_fx ) );
334 :
335 : /*--------------------------------------------------------------*
336 : * For inactive segments
337 : * - Zero-memory filtered pre-filter excitation
338 : * - Update of targets and gain_pit
339 : * For inactive segments
340 : * - Update xn[L_subfr-1] for updating the memory of the weighting filter
341 : *--------------------------------------------------------------*/
342 :
343 3370 : IF( EQ_16( st_fx->coder_type, INACTIVE ) )
344 : {
345 : /*ftemp = fcode_preQ[0] *fh1[L_SUBFR-1];*/
346 0 : Ltmp = L_mult( code_preQ[0], h1[L_SUBFR - 1] ); /*1+14+shift + Q_AVQ_OUT */
347 0 : FOR( i = 1; i < L_SUBFR; i++ )
348 : {
349 : /*ftemp += fcode_preQ[i] * fh1[L_SUBFR-1-i];*/
350 0 : Ltmp = L_mac( Ltmp, code_preQ[i], h1[L_SUBFR - 1 - i] );
351 : }
352 : /*fxn[L_SUBFR-1] -= *fgain_preQ * ftemp;*/
353 0 : Ltmp = L_shr( Mult_32_16( Ltmp, *gain_preQ ), sub( add( Q_AVQ_OUT_DEC, 2 ), Q_new ) ); /* (2 + 1 + 14 +shift+Q_AVQ_OUT)-(Q_AVQ_OUT+2-Q_new) = 15 + Q_new + shift */
354 0 : xn[L_SUBFR - 1] = round_fx( L_sub( L_mult( xn[L_SUBFR - 1], 32767 ), Ltmp ) ); /* -> Q_new + shift -1 */
355 : }
356 : ELSE
357 : {
358 3370 : conv_fx( code_preQ, h1, x_tran, L_SUBFR );
359 3370 : updt_tar_HR_fx( cn, cn, code_preQ, *gain_preQ, sub( Q_new, add( -15 + 2, Q_AVQ_OUT_DEC ) ), L_SUBFR );
360 :
361 3370 : updt_tar_HR_fx( xn, xn, x_tran, *gain_preQ, sub( Q_new, add( -15 + 2, Q_AVQ_OUT_DEC ) ), L_SUBFR );
362 3370 : *gain_pit = corr_xy1_fx( xn, y1, g_corr, L_SUBFR, 0, &Overflow ); /* Q14 */
363 : /* clip gain if necessary to avoid problems at decoder */
364 3370 : test();
365 3370 : if ( EQ_16( clip_gain, 1 ) && GT_16( *gain_pit, 15565 ) )
366 : {
367 37 : *gain_pit = 15565; /* 0.95 in Q15 */
368 37 : move16();
369 : }
370 3370 : updt_tar_fx( xn, xn2, y1, *gain_pit, L_SUBFR );
371 : }
372 :
373 3370 : st_fx->use_acelp_preq = 1;
374 3370 : move16();
375 :
376 3370 : return;
377 : }
378 :
379 :
380 95320 : void transf_cdbk_enc_ivas_fx(
381 : Encoder_State *st_fx, /* i/o: encoder state structure */
382 : const Word16 harm_flag_acelp, /* i : harmonic flag for higher rates ACELP Q0*/
383 : const Word16 i_subfr, /* i : subframe index Q0*/
384 : Word16 cn[], /* i/o: target vector in residual domain Q_new*/
385 : Word16 exc[], /* i/o: pointer to excitation signal frame Q_new*/
386 : const Word16 *p_Aq, /* i : 12k8 Lp coefficient Q12*/
387 : const Word16 Ap[], /* i : weighted LP filter coefficients Q12*/
388 : const Word16 h1[], /* i : weighted filter input response Q15*/
389 : Word16 xn[], /* i/o: target vector Q_new + shift -1*/
390 : Word16 xn2[], /* i/o: target vector for innovation search Q_new + shift -1*/
391 : Word16 y1[], /* i/o: zero-memory filtered adaptive excitation Q_new + shift -1*/
392 : const Word16 y2[], /* i : zero-memory filtered innovative excitation Q9*/
393 : const Word16 Es_pred, /* i : predicited scaled innovation energy Q8*/
394 : Word16 *gain_pit, /* i/o: adaptive excitation gain Q14*/
395 : const Word32 gain_code, /* i : innovative excitation gain Q16*/
396 : Word16 g_corr[], /* o : ACELP correlation values Q15*/
397 : const Word16 clip_gain, /* i : adaptive gain clipping flag Q0*/
398 : Word16 *gain_preQ, /* o : prequantizer excitation gain Q2*/
399 : Word16 code_preQ[], /* o : prequantizer excitation Q_AVQ_OUT_DEC*/
400 : Word16 *unbits, /* o : number of AVQ unused bits Q0*/
401 : const Word16 Q_new, /* i : Current frame scaling */
402 : const Word16 shift /* i : shifting applied to y1, xn,... */
403 : )
404 : {
405 : Word16 i, index, nBits, Nsv, Es_pred_loc;
406 : Word16 x_in[L_SUBFR], x_tran[L_SUBFR], gcode16, stmp;
407 : Word16 e_corr, m_corr, e_ener, m_ener, m_den, e_den;
408 : Word16 x_norm[L_SUBFR + L_SUBFR / WIDTH_BAND];
409 : Word32 L_corr, L_ener, Ltmp, Ltmp1;
410 : Word16 nq[L_SUBFR / WIDTH_BAND];
411 : Word32 out32[L_SUBFR];
412 : Word16 Qdct;
413 : Word16 avq_bit_sFlag;
414 : Word16 trgtSvPos;
415 95320 : Flag Overflow = 0;
416 95320 : move32();
417 :
418 95320 : avq_bit_sFlag = 0;
419 95320 : move16();
420 95320 : if ( st_fx->element_mode > EVS_MONO )
421 : {
422 95320 : avq_bit_sFlag = 1;
423 95320 : move16();
424 : }
425 :
426 : /*--------------------------------------------------------------*
427 : * Set bit-allocation
428 : *--------------------------------------------------------------*/
429 :
430 95320 : Nsv = 8;
431 95320 : move16();
432 95320 : nBits = st_fx->acelp_cfg.AVQ_cdk_bits[i_subfr >> 6]; /* Q0 */
433 95320 : move16();
434 :
435 : /* increase # of AVQ allocated bits by unused bits from the previous subframe */
436 95320 : nBits = add( nBits, *unbits );
437 :
438 : /*--------------------------------------------------------------*
439 : * Compute/Update target
440 : * For inactive frame, find target in residual domain
441 : * Deemphasis
442 : *--------------------------------------------------------------*/
443 :
444 95320 : IF( EQ_16( st_fx->coder_type, INACTIVE ) )
445 : {
446 12845 : gcode16 = round_fx_sat( L_shl_sat( gain_code, Q_new ) );
447 834925 : FOR( i = 0; i < L_SUBFR; i++ )
448 : {
449 : /*x_tran[i] = xn[i] - *gain_pit * y1[i] - gain_code * y2[i];*/
450 822080 : Ltmp = L_mult( gcode16, y2[i] );
451 822080 : Ltmp = L_shl( Ltmp, add( 5, shift ) );
452 822080 : Ltmp = L_negate( Ltmp );
453 822080 : Ltmp = L_mac( Ltmp, xn[i], 16384 );
454 822080 : Ltmp = L_msu( Ltmp, y1[i], *gain_pit ); /* Q_new + 14 + shift */
455 822080 : Ltmp = L_shl_sat( Ltmp, sub( 1, shift ) ); /* Q_new + 15 */
456 822080 : x_tran[i] = round_fx_sat( Ltmp ); /*Q_new-1 */
457 : }
458 12845 : find_cn_fx( x_tran, Ap, p_Aq, x_in );
459 : }
460 : ELSE
461 : {
462 82475 : updt_tar_fx( cn, x_in, &exc[i_subfr], *gain_pit, L_SUBFR );
463 : }
464 95320 : Deemph2( x_in, FAC_PRE_AVQ_FX, L_SUBFR, &( st_fx->mem_deemp_preQ_fx ) );
465 :
466 : /*--------------------------------------------------------------*
467 : * DCT-II
468 : *--------------------------------------------------------------*/
469 :
470 95320 : test();
471 95320 : test();
472 95320 : test();
473 95320 : IF( NE_16( st_fx->coder_type, INACTIVE ) && LE_32( st_fx->core_brate, MAX_BRATE_AVQ_EXC_TD ) && GE_32( st_fx->core_brate, MIN_BRATE_AVQ_EXC ) && !harm_flag_acelp )
474 : {
475 55705 : Copy_Scale_sig( x_in, x_tran, L_SUBFR, -Q_MINUS + 1 ); /*Q_new-1 -> Q_new-4*/
476 : /*Copy( x_in, x_tran, L_SUBFR );*/
477 55705 : Qdct = sub( Q_new, Q_MINUS );
478 : }
479 : ELSE
480 : {
481 39615 : Qdct = 0;
482 39615 : move16();
483 39615 : edct2_fx( L_SUBFR, -1, x_in, out32, &Qdct, ip_edct2_64, w_edct2_64_fx );
484 39615 : Qdct = negate( Qdct );
485 39615 : Copy_Scale_sig_32_16( out32, x_tran, L_SUBFR, sub( Qdct, Q_MINUS - 1 ) ); /* Output in Q_new-4 */
486 39615 : Qdct = sub( Q_new, Q_MINUS );
487 : }
488 :
489 : /*--------------------------------------------------------------*
490 : * Split algebraic vector quantizer based on RE8 lattice
491 : *--------------------------------------------------------------*/
492 :
493 95320 : AVQ_cod_fx( x_tran, x_norm, nBits, Nsv, 0 );
494 :
495 : /*--------------------------------------------------------------*
496 : * Find prequantizer excitation gain
497 : * Quantize the gain
498 : *--------------------------------------------------------------*/
499 :
500 95320 : L_corr = L_deposit_l( 0 );
501 95320 : L_ener = L_deposit_l( 0 );
502 6195800 : FOR( i = 0; i < Nsv * 8; i++ )
503 : {
504 : /*fcorr += fx_tran[i]*(float)ix_norm[i];*/
505 : /*fener += (float)ix_norm[i]*(float)ix_norm[i];*/
506 6100480 : stmp = shl_sat( x_norm[i], Q_AVQ_OUT );
507 6100480 : L_corr = L_mac_sat( L_corr, x_tran[i], stmp );
508 6100480 : L_ener = L_mac_sat( L_ener, stmp, stmp );
509 : }
510 95320 : L_ener = L_max( L_ener, 1 );
511 :
512 : /* No negative gains allowed in the quantizer*/
513 95320 : L_corr = L_max( L_corr, 0 );
514 :
515 95320 : e_corr = norm_l( L_corr );
516 95320 : m_corr = extract_h( L_shl( L_corr, e_corr ) );
517 95320 : e_corr = sub( 30, add( e_corr, sub( Qdct, Q_AVQ_OUT ) ) );
518 95320 : e_ener = norm_l( L_ener );
519 95320 : m_ener = extract_h( L_shl( L_ener, e_ener ) ); /* 30 - e-ener */
520 95320 : e_ener = sub( 30, e_ener );
521 :
522 95320 : IF( GT_16( m_corr, m_ener ) )
523 : {
524 53335 : m_corr = shr( m_corr, 1 ); /* e_corr + 1 */
525 53335 : e_corr = add( e_corr, 1 );
526 : }
527 95320 : m_corr = div_s( m_corr, m_ener ); /* e_corr - e_ener */
528 95320 : e_corr = sub( e_corr, e_ener );
529 95320 : Ltmp = L_shl_sat( m_corr, s_min( add( e_corr, 1 ), 31 ) ); /* Lgain in Q16 */
530 95320 : IF( EQ_16( st_fx->coder_type, INACTIVE ) )
531 : {
532 12845 : Ltmp1 = L_max( gain_code, 1 );
533 12845 : e_den = norm_l( Ltmp1 );
534 12845 : m_den = extract_h( L_shl_sat( Ltmp1, e_den ) );
535 : /* ensure m_corr < m_den */
536 12845 : test();
537 12845 : IF( m_corr > 0 && m_den > 0 )
538 : {
539 12843 : m_corr = div_s( 16384, m_den );
540 12843 : e_corr = sub( 14 + 4, e_den );
541 12843 : Ltmp = L_shr( Mult_32_16( Ltmp, m_corr ), e_corr ); /*Q12*/
542 12843 : stmp = round_fx_sat( L_shl_sat( Ltmp, 16 ) ); /* Q12 */
543 : }
544 : ELSE
545 : {
546 2 : stmp = 0;
547 2 : move16();
548 : }
549 12845 : IF( GT_32( st_fx->core_brate, 56000 ) )
550 : {
551 0 : index = usquant_fx( stmp, &stmp, G_AVQ_MIN_INACT_64k_Q12, G_AVQ_DELTA_INACT_64k_Q12 >> 1, ( 1 << G_AVQ_BITS ) ); /* Q0 */
552 : }
553 12845 : ELSE IF( GT_32( st_fx->core_brate, 42000 ) )
554 : {
555 1150 : index = usquant_fx( stmp, &stmp, G_AVQ_MIN_INACT_48k_Q12, G_AVQ_DELTA_INACT_48k_Q12 >> 1, ( 1 << G_AVQ_BITS ) ); /* Q0 */
556 : }
557 : ELSE
558 : {
559 11695 : index = usquant_fx( stmp, &stmp, G_AVQ_MIN_INACT_Q12, G_AVQ_DELTA_INACT_Q12 >> 1, ( 1 << G_AVQ_BITS ) ); /* Q0 */
560 : }
561 12845 : Ltmp = Mult_32_16( gain_code, stmp ); /* Q16 * Q12 - 15 -> Q13*/
562 12845 : Ltmp = L_shl_sat( Ltmp, 5 ); /* Q13 -> Q18*/
563 12845 : *gain_preQ = round_fx_sat( Ltmp ); /* Q2*/
564 12845 : move16();
565 : }
566 : ELSE
567 : {
568 82475 : IF( Es_pred < 0 )
569 : {
570 455 : Es_pred_loc = shr( negate( Es_pred ), 2 );
571 : }
572 : ELSE
573 : {
574 82020 : Es_pred_loc = Es_pred;
575 82020 : move16();
576 : }
577 :
578 82475 : e_den = norm_s( Es_pred_loc );
579 82475 : m_den = shl( Es_pred_loc, e_den );
580 : /* ensure m_corr < m_den */
581 82475 : test();
582 82475 : IF( m_corr > 0 && m_den > 0 )
583 : {
584 80129 : m_corr = div_s( 16384, m_den );
585 80129 : e_corr = sub( 14 - 8, e_den );
586 80129 : Ltmp = L_shr( Mult_32_16( Ltmp, m_corr ), e_corr ); /* Q16 */
587 : }
588 : ELSE
589 : {
590 2346 : Ltmp = L_deposit_l( 0 );
591 : }
592 82475 : test();
593 82475 : IF( LE_32( st_fx->core_brate, 42000 ) && GT_32( st_fx->core_brate, ACELP_24k40 ) )
594 : {
595 75680 : index = gain_quant_fx( &Ltmp, &stmp, LG10_G_AVQ_MIN_32kbps_Q14, LG10_G_AVQ_MAX_Q13, G_AVQ_BITS, &e_den ); /* Q0 */
596 : }
597 : ELSE
598 : {
599 6795 : index = gain_quant_fx( &Ltmp, &stmp, LG10_G_AVQ_MIN_Q14, LG10_G_AVQ_MAX_Q13, G_AVQ_BITS, &e_den ); /* Q0 */
600 : }
601 82475 : Ltmp = L_mult( stmp, Es_pred_loc ); /* Q0*Q8 -> Q9*/
602 82475 : Ltmp = L_shl( Ltmp, add( e_den, 9 ) ); /* Q18*/
603 82475 : *gain_preQ = round_fx( Ltmp ); /* Q2*/
604 82475 : move16();
605 : }
606 95320 : push_indice( st_fx->hBstr, IND_AVQ_GAIN, index, G_AVQ_BITS );
607 :
608 : /*--------------------------------------------------------------*
609 : * Encode and multiplex subvectors into bit-stream
610 : *--------------------------------------------------------------*/
611 :
612 95320 : trgtSvPos = sub( Nsv, 1 );
613 95320 : move16();
614 95320 : test();
615 95320 : test();
616 95320 : test();
617 95320 : test();
618 95320 : test();
619 95320 : IF( avq_bit_sFlag && GT_16( nBits, 85 ) && !harm_flag_acelp && ( EQ_16( st_fx->coder_type, GENERIC ) || EQ_16( st_fx->coder_type, TRANSITION ) || EQ_16( st_fx->coder_type, INACTIVE ) ) )
620 : {
621 27528 : trgtSvPos = 2;
622 27528 : avq_bit_sFlag = 2;
623 27528 : move16();
624 27528 : move16();
625 : }
626 :
627 95320 : AVQ_encmux_fx( st_fx->hBstr, -1, x_norm, &nBits, Nsv, nq, avq_bit_sFlag, trgtSvPos );
628 :
629 : /* save # of AVQ unused bits for next subframe */
630 95320 : *unbits = nBits;
631 95320 : move16();
632 :
633 : /* at the last subframe, write AVQ unused bits */
634 95320 : test();
635 95320 : test();
636 95320 : IF( EQ_16( i_subfr, 4 * L_SUBFR ) && NE_16( st_fx->extl, SWB_BWE_HIGHRATE ) && NE_16( st_fx->extl, FB_BWE_HIGHRATE ) )
637 : {
638 36587 : WHILE( *unbits > 0 )
639 : {
640 17523 : i = s_min( *unbits, 16 );
641 17523 : push_indice( st_fx->hBstr, IND_UNUSED, 0, i );
642 17523 : *unbits = sub( *unbits, i );
643 17523 : move16();
644 : }
645 : }
646 :
647 : /*--------------------------------------------------------------*
648 : * DCT transform
649 : *--------------------------------------------------------------*/
650 :
651 6195800 : FOR( i = 0; i < Nsv * WIDTH_BAND; i++ )
652 : {
653 6100480 : x_tran[i] = shl_sat( x_norm[i], Q_AVQ_OUT_DEC );
654 6100480 : move16();
655 : }
656 :
657 95320 : test();
658 95320 : test();
659 95320 : test();
660 95320 : IF( NE_16( st_fx->coder_type, INACTIVE ) && LE_32( st_fx->core_brate, MAX_BRATE_AVQ_EXC_TD ) && GE_32( st_fx->core_brate, MIN_BRATE_AVQ_EXC ) && !harm_flag_acelp )
661 : {
662 55705 : Copy( x_tran, code_preQ, L_SUBFR ); /* Q_AVQ_OUT_DEC */
663 : }
664 : ELSE
665 : {
666 39615 : Qdct = 0;
667 39615 : move16();
668 39615 : edct2_fx( L_SUBFR, 1, x_tran, out32, &Qdct, ip_edct2_64, w_edct2_64_fx );
669 : /*qdct = sub(Q_AVQ_OUT_DEC,qdct+Q_AVQ_OUT_DEC);*/
670 39615 : Qdct = negate( Qdct );
671 39615 : Copy_Scale_sig_32_16( out32, code_preQ, L_SUBFR, Qdct ); /* Output in Q_AVQ_OUT_DEC */
672 : /*qdct = Q_AVQ_OUT_DEC;*/
673 : }
674 :
675 : /*--------------------------------------------------------------*
676 : * Preemphasise
677 : *--------------------------------------------------------------*/
678 :
679 : /* in extreme cases at subframe boundaries, lower the preemphasis memory to avoid a saturation */
680 95320 : test();
681 95320 : if ( ( nq[7] != 0 ) && ( GT_16( sub( st_fx->last_nq_preQ, nq[0] ), 7 ) ) )
682 : {
683 : /* *mem_preemp /= 16; */
684 22 : st_fx->mem_preemp_preQ_fx = shr( st_fx->mem_preemp_preQ_fx, 4 );
685 22 : move16();
686 : }
687 95320 : st_fx->last_nq_preQ = nq[7];
688 95320 : move16();
689 :
690 : /* TD pre-quantizer: in extreme cases at subframe boundaries, lower the preemphasis memory to avoid a saturation */
691 95320 : test();
692 95320 : test();
693 95320 : test();
694 95320 : test();
695 95320 : test();
696 95320 : IF( GT_16( st_fx->element_mode, EVS_MONO ) && NE_16( st_fx->coder_type, INACTIVE ) && GE_32( st_fx->core_brate, MIN_BRATE_AVQ_EXC ) && LE_32( st_fx->core_brate, MAX_BRATE_AVQ_EXC_TD ) && !harm_flag_acelp && code_preQ[0] != 0 )
697 : {
698 24926 : IF( GT_16( abs_s( st_fx->last_code_preq ), shl_sat( abs_s( code_preQ[0] ), 4 ) ) )
699 : {
700 0 : st_fx->mem_preemp_preQ_fx = shr( st_fx->mem_preemp_preQ_fx, 4 );
701 0 : move16();
702 : }
703 24926 : ELSE IF( GT_16( abs_s( st_fx->last_code_preq ), shl_sat( abs_s( code_preQ[0] ), 3 ) ) )
704 : {
705 0 : st_fx->mem_preemp_preQ_fx = shr( st_fx->mem_preemp_preQ_fx, 3 );
706 0 : move16();
707 : }
708 : }
709 :
710 95320 : st_fx->last_code_preq = shr( code_preQ[L_SUBFR - 1], 9 ); // Q0
711 95320 : move16();
712 :
713 95320 : PREEMPH_FX( code_preQ, FAC_PRE_AVQ_FX, L_SUBFR, &( st_fx->mem_preemp_preQ_fx ) );
714 :
715 : /*--------------------------------------------------------------*
716 : * For inactive segments
717 : * - Zero-memory filtered pre-filter excitation
718 : * - Update of targets and gain_pit
719 : * For inactive segments
720 : * - Update xn[L_subfr-1] for updating the memory of the weighting filter
721 : *--------------------------------------------------------------*/
722 :
723 95320 : IF( EQ_16( st_fx->coder_type, INACTIVE ) )
724 : {
725 : /*ftemp = fcode_preQ[0] *fh1[L_SUBFR-1];*/
726 12845 : Ltmp = L_mult( code_preQ[0], h1[L_SUBFR - 1] ); /*1+14+shift + Q_AVQ_OUT */
727 822080 : FOR( i = 1; i < L_SUBFR; i++ )
728 : {
729 : /*ftemp += fcode_preQ[i] * fh1[L_SUBFR-1-i];*/
730 809235 : Ltmp = L_mac( Ltmp, code_preQ[i], h1[L_SUBFR - 1 - i] );
731 : }
732 : /*fxn[L_SUBFR-1] -= *fgain_preQ * ftemp;*/
733 12845 : Ltmp = L_shr( Mult_32_16( Ltmp, *gain_preQ ), sub( add( Q_AVQ_OUT_DEC, 2 ), Q_new ) ); /* (2 + 1 + 14 +shift+Q_AVQ_OUT)-(Q_AVQ_OUT+2-Q_new) = 15 + Q_new + shift */
734 12845 : xn[L_SUBFR - 1] = round_fx( L_sub( L_mult( xn[L_SUBFR - 1], 32767 ), Ltmp ) ); /* -> Q_new + shift -1 */
735 : }
736 : ELSE
737 : {
738 82475 : conv_fx( code_preQ, h1, x_tran, L_SUBFR );
739 82475 : updt_tar_HR_fx( cn, cn, code_preQ, *gain_preQ, sub( Q_new, add( -15 + 2, Q_AVQ_OUT_DEC ) ), L_SUBFR );
740 :
741 82475 : updt_tar_HR_fx( xn, xn, x_tran, *gain_preQ, sub( Q_new, add( -15 + 2, Q_AVQ_OUT_DEC ) ), L_SUBFR );
742 82475 : *gain_pit = corr_xy1_fx( xn, y1, g_corr, L_SUBFR, 0, &Overflow ); /* Q14 */
743 82475 : move16();
744 : /* clip gain if necessary to avoid problems at decoder */
745 82475 : test();
746 82475 : if ( EQ_16( clip_gain, 1 ) && GT_16( *gain_pit, 15565 ) )
747 : {
748 1925 : *gain_pit = 15565; /* 0.95 in Q14 */
749 1925 : move16();
750 : }
751 82475 : updt_tar_fx( xn, xn2, y1, *gain_pit, L_SUBFR );
752 : }
753 :
754 95320 : st_fx->use_acelp_preq = 1;
755 95320 : move16();
756 :
757 95320 : return;
758 : }
759 :
760 :
761 : /*-------------------------------------------------------------------*
762 : * Find target in residual domain - cn[]
763 : *-------------------------------------------------------------------*/
764 :
765 12845 : static void find_cn_fx(
766 : const Word16 xn[], /* i : target signal Qx*/
767 : const Word16 Ap[], /* i : weighted LP filter coefficients Q12*/
768 : const Word16 *p_Aq, /* i : 12k8 LP coefficients Q12*/
769 : Word16 cn[] /* o : target signal in residual domain Qx*/
770 : )
771 : {
772 : Word16 tmp, tmp_fl[L_SUBFR + M];
773 :
774 12845 : set16_fx( tmp_fl, 0, M );
775 12845 : Copy( xn, tmp_fl + M, L_SUBFR ); /* Qx */
776 12845 : tmp = 0;
777 12845 : move16();
778 12845 : PREEMPH_FX( tmp_fl + M, PREEMPH_FAC_16k, L_SUBFR, &tmp );
779 12845 : syn_filt_s_lc_fx( 0, Ap, tmp_fl + M, tmp_fl + M, L_SUBFR );
780 12845 : Residu3_lc_fx( p_Aq, M, tmp_fl + M, cn, L_SUBFR, 1 );
781 :
782 12845 : return;
783 : }
784 :
785 :
786 : /*-----------------------------------------------------------------*
787 : * Transform domain contribution encoding
788 : *-----------------------------------------------------------------*/
789 :
790 : /* o: quantization index Q0*/
791 157601 : Word16 gain_quant_fx(
792 : Word32 *gain, /* i : quantized gain Q16*/
793 : Word16 *gain16, /* o : quantized gain expg*/
794 : const Word16 c_min, /* i : log10 of lower limit in Q14*/
795 : const Word16 c_max, /* i : log10 of upper limit in Q13*/
796 : const Word16 bits, /* i : number of bits to quantize Q0*/
797 : Word16 *expg /* o : output exponent of gain16 */
798 : )
799 : {
800 : Word16 index, levels;
801 : Word16 c_gain;
802 : Word16 e_tmp, f_tmp, exp;
803 : Word16 tmp, tmp1, tmp2, frac;
804 : Word32 L_tmp;
805 :
806 157601 : levels = shl( 1, bits );
807 : /* Prevent gain to be smaller than 0.0003. */
808 : /* This is to avoid an overflow when the gain is very small */
809 : /* the log10 give a high negative value in Q13 that overflow */
810 : /* on this code (the resulting value of 'index' is not affected. */
811 : /* tmp2 = msu_r(L_deposit_h(c_gain),c_min,16384) */
812 157601 : L_tmp = L_max( *gain, 20 );
813 :
814 : /*c_min = (float)log10(min);*/
815 : /*c_mult = (float) ((levels-1)/(log10(max)-c_min));*/
816 :
817 : /*tmp = c_mult * ((float)log10(*gain) - c_min);
818 : = ((levels-1)/(log10(max)-log10(min)))*((float)log10(*gain) - log10(min));*/
819 :
820 157601 : e_tmp = norm_l( L_tmp );
821 157601 : f_tmp = Log2_norm_lc( L_shl( L_tmp, e_tmp ) );
822 157601 : e_tmp = sub( 30 - 16, e_tmp ); /*Q(min)=16*/
823 157601 : L_tmp = Mpy_32_16( e_tmp, f_tmp, 9864 ); /* Q16 */ /*log10(2) in Q15*/
824 157601 : c_gain = round_fx( L_shl( L_tmp, 13 ) ); /* Q13 */
825 :
826 : /*tmp1 = sub(c_max,c_min); Q14*/
827 : /*tmp2 = sub(c_gain,c_min); Q14*/
828 :
829 157601 : tmp1 = msu_r_sat( L_deposit_h( c_max /*in Q13 already*/ ), c_min, 16384 ); /*Q13*/
830 157601 : tmp2 = msu_r_sat( L_deposit_h( c_gain /*in Q13 already*/ ), c_min, 16384 ); /*Q13*/
831 157601 : IF( tmp1 != 0 )
832 : {
833 157601 : exp = norm_s( tmp1 );
834 157601 : frac = div_s( shl( 1, sub( 14, exp ) ), tmp1 ); /*Q(15-exp)*/
835 157601 : L_tmp = L_mult( tmp2, frac ); /*Q(30-exp)*/
836 157601 : L_tmp = Mult_32_16( L_tmp, sub( levels, 1 ) ); /*Q(15-exp)*/
837 157601 : index = extract_l( L_shr( L_add( L_tmp, shr( 1 << 14, exp ) ), sub( 15, exp ) ) ); /* Q0 */
838 : }
839 : ELSE
840 : {
841 0 : L_tmp = L_mult( tmp2, sub( levels, 1 ) ); /*Q15*/
842 0 : index = extract_l( L_shr( L_add( L_tmp, 1 << 14 ), 15 ) ); /* Q0 */
843 : }
844 :
845 157601 : index = s_max( index, 0 );
846 157601 : index = s_min( index, sub( levels, 1 ) );
847 :
848 : /**gain = (float)pow( 10.0, (((float)index)/c_mult) + c_min );
849 : y = index/c_mult + c_min;
850 : = (index/(levels-1))*(log10(max) - log10(min)) + log10(min);
851 : = z*log10(max) + (1-z)*log10(min)
852 : z = (index/(levels-1))*/
853 157601 : tmp = div_s( index, sub( levels, 1 ) ); /*Q15*/
854 157601 : L_tmp = L_mult( tmp, c_max ); /*Q29*/
855 157601 : L_tmp = L_mac0( L_tmp, sub( 32767, tmp ), c_min ); /*Q29*/
856 :
857 157601 : L_tmp = Mult_32_16( L_tmp, 27213 ); /*Q27, 3.321928 in Q13*/
858 157601 : L_tmp = L_shr( L_tmp, 11 ); /*Q27->Q16*/
859 :
860 157601 : frac = L_Extract_lc( L_tmp, expg ); /* Extract exponent of gcode0 */
861 :
862 157601 : *gain16 = extract_l( Pow2( 14, frac ) ); /* Put 14 as exponent so that */
863 : /* output of Pow2() will be: */
864 : /* 16384 < Pow2() <= 32767 */
865 157601 : *expg = sub( *expg, 14 );
866 157601 : move16();
867 :
868 157601 : return ( index );
869 : }
|