Renaming the SKP_ prefix to silk_
diff --git a/silk/fixed/silk_LTP_analysis_filter_FIX.c b/silk/fixed/silk_LTP_analysis_filter_FIX.c
index 1d65b8c..7809a32 100644
--- a/silk/fixed/silk_LTP_analysis_filter_FIX.c
+++ b/silk/fixed/silk_LTP_analysis_filter_FIX.c
@@ -62,17 +62,17 @@
             LTP_res_ptr[ i ] = x_ptr[ i ];
 
             /* Long-term prediction */
-            LTP_est = SKP_SMULBB( x_lag_ptr[ LTP_ORDER / 2 ], Btmp_Q14[ 0 ] );
+            LTP_est = silk_SMULBB( x_lag_ptr[ LTP_ORDER / 2 ], Btmp_Q14[ 0 ] );
             for( j = 1; j < LTP_ORDER; j++ ) {
-                LTP_est = SKP_SMLABB_ovflw( LTP_est, x_lag_ptr[ LTP_ORDER / 2 - j ], Btmp_Q14[ j ] );
+                LTP_est = silk_SMLABB_ovflw( LTP_est, x_lag_ptr[ LTP_ORDER / 2 - j ], Btmp_Q14[ j ] );
             }
-            LTP_est = SKP_RSHIFT_ROUND( LTP_est, 14 ); /* round and -> Q0*/
+            LTP_est = silk_RSHIFT_ROUND( LTP_est, 14 ); /* round and -> Q0*/
 
             /* Subtract long-term prediction */
-            LTP_res_ptr[ i ] = ( opus_int16 )SKP_SAT16( ( opus_int32 )x_ptr[ i ] - LTP_est );
+            LTP_res_ptr[ i ] = ( opus_int16 )silk_SAT16( ( opus_int32 )x_ptr[ i ] - LTP_est );
 
             /* Scale residual */
-            LTP_res_ptr[ i ] = SKP_SMULWB( invGains_Q16[ k ], LTP_res_ptr[ i ] );
+            LTP_res_ptr[ i ] = silk_SMULWB( invGains_Q16[ k ], LTP_res_ptr[ i ] );
 
             x_lag_ptr++;
         }
diff --git a/silk/fixed/silk_LTP_scale_ctrl_FIX.c b/silk/fixed/silk_LTP_scale_ctrl_FIX.c
index b6b427f..abd5356 100644
--- a/silk/fixed/silk_LTP_scale_ctrl_FIX.c
+++ b/silk/fixed/silk_LTP_scale_ctrl_FIX.c
@@ -39,15 +39,15 @@
     opus_int round_loss;
 
     /* 1st order high-pass filter */
-    psEnc->HPLTPredCodGain_Q7 = SKP_max_int( psEncCtrl->LTPredCodGain_Q7 - SKP_RSHIFT( psEnc->prevLTPredCodGain_Q7, 1 ), 0 )
-        + SKP_RSHIFT( psEnc->HPLTPredCodGain_Q7, 1 );
+    psEnc->HPLTPredCodGain_Q7 = silk_max_int( psEncCtrl->LTPredCodGain_Q7 - silk_RSHIFT( psEnc->prevLTPredCodGain_Q7, 1 ), 0 )
+        + silk_RSHIFT( psEnc->HPLTPredCodGain_Q7, 1 );
     psEnc->prevLTPredCodGain_Q7 = psEncCtrl->LTPredCodGain_Q7;
 
     /* Only scale if first frame in packet */
     if( psEnc->sCmn.nFramesEncoded == 0 ) {
         round_loss = psEnc->sCmn.PacketLoss_perc + psEnc->sCmn.nFramesPerPacket - 1;
-        psEnc->sCmn.indices.LTP_scaleIndex = (opus_int8)SKP_LIMIT(
-            SKP_SMULWB( SKP_SMULBB( round_loss, psEnc->HPLTPredCodGain_Q7 ), SILK_FIX_CONST( 0.1, 9 ) ), 0, 2 );
+        psEnc->sCmn.indices.LTP_scaleIndex = (opus_int8)silk_LIMIT(
+            silk_SMULWB( silk_SMULBB( round_loss, psEnc->HPLTPredCodGain_Q7 ), SILK_FIX_CONST( 0.1, 9 ) ), 0, 2 );
     } else {
         /* Default is minimum scaling */
         psEnc->sCmn.indices.LTP_scaleIndex = 0;
diff --git a/silk/fixed/silk_corrMatrix_FIX.c b/silk/fixed/silk_corrMatrix_FIX.c
index 20655c3..4475b9c 100644
--- a/silk/fixed/silk_corrMatrix_FIX.c
+++ b/silk/fixed/silk_corrMatrix_FIX.c
@@ -57,13 +57,13 @@
         for( lag = 0; lag < order; lag++ ) {
             inner_prod = 0;
             for( i = 0; i < L; i++ ) {
-                inner_prod += SKP_RSHIFT32( SKP_SMULBB( ptr1[ i ], ptr2[i] ), rshifts );
+                inner_prod += silk_RSHIFT32( silk_SMULBB( ptr1[ i ], ptr2[i] ), rshifts );
             }
             Xt[ lag ] = inner_prod; /* X[:,lag]'*t */
             ptr1--; /* Go to next column of X */
         }
     } else {
-        SKP_assert( rshifts == 0 );
+        silk_assert( rshifts == 0 );
         for( lag = 0; lag < order; lag++ ) {
             Xt[ lag ] = silk_inner_prod_aligned( ptr1, ptr2, L ); /* X[:,lag]'*t */
             ptr1--; /* Go to next column of X */
@@ -88,19 +88,19 @@
     /* Calculate energy to find shift used to fit in 32 bits */
     silk_sum_sqr_shift( &energy, &rshifts_local, x, L + order - 1 );
     /* Add shifts to get the desired head room */
-    head_room_rshifts = SKP_max( head_room - silk_CLZ32( energy ), 0 );
+    head_room_rshifts = silk_max( head_room - silk_CLZ32( energy ), 0 );
 
-    energy = SKP_RSHIFT32( energy, head_room_rshifts );
+    energy = silk_RSHIFT32( energy, head_room_rshifts );
     rshifts_local += head_room_rshifts;
 
     /* Calculate energy of first column (0) of X: X[:,0]'*X[:,0] */
     /* Remove contribution of first order - 1 samples */
     for( i = 0; i < order - 1; i++ ) {
-        energy -= SKP_RSHIFT32( SKP_SMULBB( x[ i ], x[ i ] ), rshifts_local );
+        energy -= silk_RSHIFT32( silk_SMULBB( x[ i ], x[ i ] ), rshifts_local );
     }
     if( rshifts_local < *rshifts ) {
         /* Adjust energy */
-        energy = SKP_RSHIFT32( energy, *rshifts - rshifts_local );
+        energy = silk_RSHIFT32( energy, *rshifts - rshifts_local );
         rshifts_local = *rshifts;
     }
 
@@ -109,8 +109,8 @@
     matrix_ptr( XX, 0, 0, order ) = energy;
     ptr1 = &x[ order - 1 ]; /* First sample of column 0 of X */
     for( j = 1; j < order; j++ ) {
-        energy = SKP_SUB32( energy, SKP_RSHIFT32( SKP_SMULBB( ptr1[ L - j ], ptr1[ L - j ] ), rshifts_local ) );
-        energy = SKP_ADD32( energy, SKP_RSHIFT32( SKP_SMULBB( ptr1[ -j ], ptr1[ -j ] ), rshifts_local ) );
+        energy = silk_SUB32( energy, silk_RSHIFT32( silk_SMULBB( ptr1[ L - j ], ptr1[ L - j ] ), rshifts_local ) );
+        energy = silk_ADD32( energy, silk_RSHIFT32( silk_SMULBB( ptr1[ -j ], ptr1[ -j ] ), rshifts_local ) );
         matrix_ptr( XX, j, j, order ) = energy;
     }
 
@@ -122,14 +122,14 @@
             /* Inner product of column 0 and column lag: X[:,0]'*X[:,lag] */
             energy = 0;
             for( i = 0; i < L; i++ ) {
-                energy += SKP_RSHIFT32( SKP_SMULBB( ptr1[ i ], ptr2[i] ), rshifts_local );
+                energy += silk_RSHIFT32( silk_SMULBB( ptr1[ i ], ptr2[i] ), rshifts_local );
             }
             /* Calculate remaining off diagonal: X[:,j]'*X[:,j + lag] */
             matrix_ptr( XX, lag, 0, order ) = energy;
             matrix_ptr( XX, 0, lag, order ) = energy;
             for( j = 1; j < ( order - lag ); j++ ) {
-                energy = SKP_SUB32( energy, SKP_RSHIFT32( SKP_SMULBB( ptr1[ L - j ], ptr2[ L - j ] ), rshifts_local ) );
-                energy = SKP_ADD32( energy, SKP_RSHIFT32( SKP_SMULBB( ptr1[ -j ], ptr2[ -j ] ), rshifts_local ) );
+                energy = silk_SUB32( energy, silk_RSHIFT32( silk_SMULBB( ptr1[ L - j ], ptr2[ L - j ] ), rshifts_local ) );
+                energy = silk_ADD32( energy, silk_RSHIFT32( silk_SMULBB( ptr1[ -j ], ptr2[ -j ] ), rshifts_local ) );
                 matrix_ptr( XX, lag + j, j, order ) = energy;
                 matrix_ptr( XX, j, lag + j, order ) = energy;
             }
@@ -143,8 +143,8 @@
             matrix_ptr( XX, 0, lag, order ) = energy;
             /* Calculate remaining off diagonal: X[:,j]'*X[:,j + lag] */
             for( j = 1; j < ( order - lag ); j++ ) {
-                energy = SKP_SUB32( energy, SKP_SMULBB( ptr1[ L - j ], ptr2[ L - j ] ) );
-                energy = SKP_SMLABB( energy, ptr1[ -j ], ptr2[ -j ] );
+                energy = silk_SUB32( energy, silk_SMULBB( ptr1[ L - j ], ptr2[ L - j ] ) );
+                energy = silk_SMLABB( energy, ptr1[ -j ], ptr2[ -j ] );
                 matrix_ptr( XX, lag + j, j, order ) = energy;
                 matrix_ptr( XX, j, lag + j, order ) = energy;
             }
diff --git a/silk/fixed/silk_encode_frame_FIX.c b/silk/fixed/silk_encode_frame_FIX.c
index 079577b..ad52207 100644
--- a/silk/fixed/silk_encode_frame_FIX.c
+++ b/silk/fixed/silk_encode_frame_FIX.c
@@ -96,7 +96,7 @@
     /*******************************************/
     /* Copy new frame to front of input buffer */
     /*******************************************/
-    SKP_memcpy( x_frame + LA_SHAPE_MS * psEnc->sCmn.fs_kHz, psEnc->sCmn.inputBuf + 1, psEnc->sCmn.frame_length * sizeof( opus_int16 ) );
+    silk_memcpy( x_frame + LA_SHAPE_MS * psEnc->sCmn.fs_kHz, psEnc->sCmn.inputBuf + 1, psEnc->sCmn.frame_length * sizeof( opus_int16 ) );
 
     /*****************************************/
     /* Find pitch lags, initial LPC analysis */
@@ -156,7 +156,7 @@
 TOC(NSQ)
 
     /* Update input buffer */
-    SKP_memmove( psEnc->x_buf, &psEnc->x_buf[ psEnc->sCmn.frame_length ],
+    silk_memmove( psEnc->x_buf, &psEnc->x_buf[ psEnc->sCmn.frame_length ],
         ( psEnc->sCmn.ltp_mem_length + LA_SHAPE_MS * psEnc->sCmn.fs_kHz ) * sizeof( opus_int16 ) );
 
     /* Parameters needed for next frame */
@@ -191,7 +191,7 @@
     psEnc->sCmn.first_frame_after_reset = 0;
     if( ++psEnc->sCmn.nFramesEncoded >= psEnc->sCmn.nFramesPerPacket ) {
         /* Payload size */
-        *pnBytesOut = SKP_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
+        *pnBytesOut = silk_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
 
         /* Reset the number of frames in payload buffer */
         psEnc->sCmn.nFramesEncoded = 0;
@@ -203,40 +203,40 @@
 
 #ifdef SAVE_ALL_INTERNAL_DATA
     {
-        SKP_float tmp[ MAX_NB_SUBFR * LTP_ORDER ];
+        silk_float tmp[ MAX_NB_SUBFR * LTP_ORDER ];
         int i;
         DEBUG_STORE_DATA( xf.dat,                   x_frame + LA_SHAPE_MS * psEnc->sCmn.fs_kHz, psEnc->sCmn.frame_length * sizeof( opus_int16 ) );
         DEBUG_STORE_DATA( xfw.dat,                  xfw,                            psEnc->sCmn.frame_length    * sizeof( opus_int16 ) );
         DEBUG_STORE_DATA( pitchL.dat,               sEncCtrl.pitchL,                psEnc->sCmn.nb_subfr        * sizeof( opus_int ) );
         for( i = 0; i < psEnc->sCmn.nb_subfr * LTP_ORDER; i++ ) {
-            tmp[ i ] = (SKP_float)sEncCtrl.LTPCoef_Q14[ i ] / 16384.0f;
+            tmp[ i ] = (silk_float)sEncCtrl.LTPCoef_Q14[ i ] / 16384.0f;
         }
-        DEBUG_STORE_DATA( pitchG_quantized.dat,     tmp,                            psEnc->sCmn.nb_subfr * LTP_ORDER * sizeof( SKP_float ) );
+        DEBUG_STORE_DATA( pitchG_quantized.dat,     tmp,                            psEnc->sCmn.nb_subfr * LTP_ORDER * sizeof( silk_float ) );
         for( i = 0; i <psEnc->sCmn.predictLPCOrder; i++ ) {
-            tmp[ i ] = (SKP_float)sEncCtrl.PredCoef_Q12[ 1 ][ i ] / 4096.0f;
+            tmp[ i ] = (silk_float)sEncCtrl.PredCoef_Q12[ 1 ][ i ] / 4096.0f;
         }
-        DEBUG_STORE_DATA( PredCoef.dat,             tmp,                            psEnc->sCmn.predictLPCOrder * sizeof( SKP_float ) );
+        DEBUG_STORE_DATA( PredCoef.dat,             tmp,                            psEnc->sCmn.predictLPCOrder * sizeof( silk_float ) );
 
-        tmp[ 0 ] = (SKP_float)sEncCtrl.LTPredCodGain_Q7 / 128.0f;
-        DEBUG_STORE_DATA( LTPredCodGain.dat,        tmp,                            sizeof( SKP_float ) );
-        tmp[ 0 ] = (SKP_float)psEnc->LTPCorr_Q15 / 32768.0f;
-        DEBUG_STORE_DATA( LTPcorr.dat,              tmp,                            sizeof( SKP_float ) );
-        tmp[ 0 ] = (SKP_float)psEnc->sCmn.input_tilt_Q15 / 32768.0f;
-        DEBUG_STORE_DATA( tilt.dat,                 tmp,                            sizeof( SKP_float ) );
+        tmp[ 0 ] = (silk_float)sEncCtrl.LTPredCodGain_Q7 / 128.0f;
+        DEBUG_STORE_DATA( LTPredCodGain.dat,        tmp,                            sizeof( silk_float ) );
+        tmp[ 0 ] = (silk_float)psEnc->LTPCorr_Q15 / 32768.0f;
+        DEBUG_STORE_DATA( LTPcorr.dat,              tmp,                            sizeof( silk_float ) );
+        tmp[ 0 ] = (silk_float)psEnc->sCmn.input_tilt_Q15 / 32768.0f;
+        DEBUG_STORE_DATA( tilt.dat,                 tmp,                            sizeof( silk_float ) );
         for( i = 0; i < psEnc->sCmn.nb_subfr; i++ ) {
-            tmp[ i ] = (SKP_float)sEncCtrl.Gains_Q16[ i ] / 65536.0f;
+            tmp[ i ] = (silk_float)sEncCtrl.Gains_Q16[ i ] / 65536.0f;
         }
-        DEBUG_STORE_DATA( gains.dat,                tmp,                            psEnc->sCmn.nb_subfr * sizeof( SKP_float ) );
+        DEBUG_STORE_DATA( gains.dat,                tmp,                            psEnc->sCmn.nb_subfr * sizeof( silk_float ) );
         DEBUG_STORE_DATA( gains_indices.dat,        &psEnc->sCmn.indices.GainsIndices, psEnc->sCmn.nb_subfr * sizeof( opus_int ) );
-        tmp[ 0 ] = (SKP_float)sEncCtrl.current_SNR_dB_Q7 / 128.0f;
-        DEBUG_STORE_DATA( current_SNR_db.dat,       tmp,                            sizeof( SKP_float ) );
+        tmp[ 0 ] = (silk_float)sEncCtrl.current_SNR_dB_Q7 / 128.0f;
+        DEBUG_STORE_DATA( current_SNR_db.dat,       tmp,                            sizeof( silk_float ) );
         DEBUG_STORE_DATA( quantOffsetType.dat,      &psEnc->sCmn.indices.quantOffsetType, sizeof( opus_int ) );
-        tmp[ 0 ] = (SKP_float)psEnc->sCmn.speech_activity_Q8 / 256.0f;
-        DEBUG_STORE_DATA( speech_activity.dat,      tmp,                            sizeof( SKP_float ) );
+        tmp[ 0 ] = (silk_float)psEnc->sCmn.speech_activity_Q8 / 256.0f;
+        DEBUG_STORE_DATA( speech_activity.dat,      tmp,                            sizeof( silk_float ) );
         for( i = 0; i < VAD_N_BANDS; i++ ) {
-            tmp[ i ] = (SKP_float)psEnc->sCmn.input_quality_bands_Q15[ i ] / 32768.0f;
+            tmp[ i ] = (silk_float)psEnc->sCmn.input_quality_bands_Q15[ i ] / 32768.0f;
         }
-        DEBUG_STORE_DATA( input_quality_bands.dat,  tmp,                       VAD_N_BANDS * sizeof( SKP_float ) );
+        DEBUG_STORE_DATA( input_quality_bands.dat,  tmp,                       VAD_N_BANDS * sizeof( silk_float ) );
         DEBUG_STORE_DATA( signalType.dat,           &psEnc->sCmn.indices.signalType,         sizeof( opus_int8) );
         DEBUG_STORE_DATA( lag_index.dat,            &psEnc->sCmn.indices.lagIndex,           sizeof( opus_int16 ) );
         DEBUG_STORE_DATA( contour_index.dat,        &psEnc->sCmn.indices.contourIndex,       sizeof( opus_int8 ) );
@@ -264,11 +264,11 @@
         psEnc->sCmn.LBRR_flags[ psEnc->sCmn.nFramesEncoded ] = 1;
 
         /* Copy noise shaping quantizer state and quantization indices from regular encoding */
-        SKP_memcpy( &sNSQ_LBRR, &psEnc->sCmn.sNSQ, sizeof( silk_nsq_state ) );
-        SKP_memcpy( psIndices_LBRR, &psEnc->sCmn.indices, sizeof( SideInfoIndices ) );
+        silk_memcpy( &sNSQ_LBRR, &psEnc->sCmn.sNSQ, sizeof( silk_nsq_state ) );
+        silk_memcpy( psIndices_LBRR, &psEnc->sCmn.indices, sizeof( SideInfoIndices ) );
 
         /* Save original gains */
-        SKP_memcpy( TempGains_Q16, psEncCtrl->Gains_Q16, psEnc->sCmn.nb_subfr * sizeof( opus_int32 ) );
+        silk_memcpy( TempGains_Q16, psEncCtrl->Gains_Q16, psEnc->sCmn.nb_subfr * sizeof( opus_int32 ) );
 
         if( psEnc->sCmn.nFramesEncoded == 0 || psEnc->sCmn.LBRR_flags[ psEnc->sCmn.nFramesEncoded - 1 ] == 0 ) {
             /* First frame in packet or previous frame not LBRR coded */
@@ -276,7 +276,7 @@
 
             /* Increase Gains to get target LBRR rate */
             psIndices_LBRR->GainsIndices[ 0 ] = psIndices_LBRR->GainsIndices[ 0 ] + psEnc->sCmn.LBRR_GainIncreases;
-            psIndices_LBRR->GainsIndices[ 0 ] = SKP_min_int( psIndices_LBRR->GainsIndices[ 0 ], N_LEVELS_QGAIN - 1 );
+            psIndices_LBRR->GainsIndices[ 0 ] = silk_min_int( psIndices_LBRR->GainsIndices[ 0 ], N_LEVELS_QGAIN - 1 );
         }
 
         /* Decode to get gains in sync with decoder         */
@@ -300,6 +300,6 @@
         }
 
         /* Restore original gains */
-        SKP_memcpy( psEncCtrl->Gains_Q16, TempGains_Q16, psEnc->sCmn.nb_subfr * sizeof( opus_int32 ) );
+        silk_memcpy( psEncCtrl->Gains_Q16, TempGains_Q16, psEnc->sCmn.nb_subfr * sizeof( opus_int32 ) );
     }
 }
diff --git a/silk/fixed/silk_find_LPC_FIX.c b/silk/fixed/silk_find_LPC_FIX.c
index 44d702d..a1bda7a 100644
--- a/silk/fixed/silk_find_LPC_FIX.c
+++ b/silk/fixed/silk_find_LPC_FIX.c
@@ -83,11 +83,11 @@
         shift = res_tmp_nrg_Q - res_nrg_Q;
         if( shift >= 0 ) {
             if( shift < 32 ) {
-                res_nrg = res_nrg - SKP_RSHIFT( res_tmp_nrg, shift );
+                res_nrg = res_nrg - silk_RSHIFT( res_tmp_nrg, shift );
             }
         } else {
-            SKP_assert( shift > -32 );
-            res_nrg   = SKP_RSHIFT( res_nrg, -shift ) - res_tmp_nrg;
+            silk_assert( shift > -32 );
+            res_nrg   = silk_RSHIFT( res_nrg, -shift ) - res_tmp_nrg;
             res_nrg_Q = res_tmp_nrg_Q;
         }
 
@@ -95,7 +95,7 @@
         silk_A2NLSF( NLSF_Q15, a_tmp_Q16, LPC_order );
 
         /* Search over interpolation indices to find the one with lowest residual energy */
-        res_nrg_2nd = SKP_int32_MAX;
+        res_nrg_2nd = silk_int32_MAX;
         for( k = 3; k >= 0; k-- ) {
             /* Interpolate NLSFs for first half */
             silk_interpolate( NLSF0_Q15, prev_NLSFq_Q15, NLSF_Q15, k, LPC_order );
@@ -112,36 +112,36 @@
             /* Add subframe energies from first half frame */
             shift = rshift0 - rshift1;
             if( shift >= 0 ) {
-                res_nrg1         = SKP_RSHIFT( res_nrg1, shift );
+                res_nrg1         = silk_RSHIFT( res_nrg1, shift );
                 res_nrg_interp_Q = -rshift0;
             } else {
-                res_nrg0         = SKP_RSHIFT( res_nrg0, -shift );
+                res_nrg0         = silk_RSHIFT( res_nrg0, -shift );
                 res_nrg_interp_Q = -rshift1;
             }
-            res_nrg_interp = SKP_ADD32( res_nrg0, res_nrg1 );
+            res_nrg_interp = silk_ADD32( res_nrg0, res_nrg1 );
 
             /* Compare with first half energy without NLSF interpolation, or best interpolated value so far */
             shift = res_nrg_interp_Q - res_nrg_Q;
             if( shift >= 0 ) {
-                if( SKP_RSHIFT( res_nrg_interp, shift ) < res_nrg ) {
-                    isInterpLower = SKP_TRUE;
+                if( silk_RSHIFT( res_nrg_interp, shift ) < res_nrg ) {
+                    isInterpLower = silk_TRUE;
                 } else {
-                    isInterpLower = SKP_FALSE;
+                    isInterpLower = silk_FALSE;
                 }
             } else {
                 if( -shift < 32 ) {
-                    if( res_nrg_interp < SKP_RSHIFT( res_nrg, -shift ) ) {
-                        isInterpLower = SKP_TRUE;
+                    if( res_nrg_interp < silk_RSHIFT( res_nrg, -shift ) ) {
+                        isInterpLower = silk_TRUE;
                     } else {
-                        isInterpLower = SKP_FALSE;
+                        isInterpLower = silk_FALSE;
                     }
                 } else {
-                    isInterpLower = SKP_FALSE;
+                    isInterpLower = silk_FALSE;
                 }
             }
 
             /* Determine whether current interpolated NLSFs are best so far */
-            if( isInterpLower == SKP_TRUE ) {
+            if( isInterpLower == silk_TRUE ) {
                 /* Interpolation has lower residual energy */
                 res_nrg   = res_nrg_interp;
                 res_nrg_Q = res_nrg_interp_Q;
@@ -157,5 +157,5 @@
         silk_A2NLSF( NLSF_Q15, a_Q16, LPC_order );
     }
 
-    SKP_assert( *interpIndex == 4 || ( useInterpNLSFs && !firstFrameAfterReset && nb_subfr == MAX_NB_SUBFR ) );
+    silk_assert( *interpIndex == 4 || ( useInterpNLSFs && !firstFrameAfterReset && nb_subfr == MAX_NB_SUBFR ) );
 }
diff --git a/silk/fixed/silk_find_LTP_FIX.c b/silk/fixed/silk_find_LTP_FIX.c
index 53d0765..eaeaba1 100644
--- a/silk/fixed/silk_find_LTP_FIX.c
+++ b/silk/fixed/silk_find_LTP_FIX.c
@@ -80,7 +80,7 @@
         /* Assure headroom */
         LZs = silk_CLZ32( rr[k] );
         if( LZs < LTP_CORRS_HEAD_ROOM ) {
-            rr[ k ] = SKP_RSHIFT_ROUND( rr[ k ], LTP_CORRS_HEAD_ROOM - LZs );
+            rr[ k ] = silk_RSHIFT_ROUND( rr[ k ], LTP_CORRS_HEAD_ROOM - LZs );
             rr_shifts += ( LTP_CORRS_HEAD_ROOM - LZs );
         }
         corr_rshifts[ k ] = rr_shifts;
@@ -89,14 +89,14 @@
         /* The correlation vector always has lower max abs value than rr and/or RR so head room is assured */
         silk_corrVector_FIX( lag_ptr, r_ptr, subfr_length, LTP_ORDER, Rr, corr_rshifts[ k ] );  /* Rr_fix_ptr   in Q( -corr_rshifts[ k ] ) */
         if( corr_rshifts[ k ] > rr_shifts ) {
-            rr[ k ] = SKP_RSHIFT( rr[ k ], corr_rshifts[ k ] - rr_shifts ); /* rr[ k ] in Q( -corr_rshifts[ k ] ) */
+            rr[ k ] = silk_RSHIFT( rr[ k ], corr_rshifts[ k ] - rr_shifts ); /* rr[ k ] in Q( -corr_rshifts[ k ] ) */
         }
-        SKP_assert( rr[ k ] >= 0 );
+        silk_assert( rr[ k ] >= 0 );
 
         regu = 1;
-        regu = SKP_SMLAWB( regu, rr[ k ], SILK_FIX_CONST( LTP_DAMPING/3, 16 ) );
-        regu = SKP_SMLAWB( regu, matrix_ptr( WLTP_ptr, 0, 0, LTP_ORDER ), SILK_FIX_CONST( LTP_DAMPING/3, 16 ) );
-        regu = SKP_SMLAWB( regu, matrix_ptr( WLTP_ptr, LTP_ORDER-1, LTP_ORDER-1, LTP_ORDER ), SILK_FIX_CONST( LTP_DAMPING/3, 16 ) );
+        regu = silk_SMLAWB( regu, rr[ k ], SILK_FIX_CONST( LTP_DAMPING/3, 16 ) );
+        regu = silk_SMLAWB( regu, matrix_ptr( WLTP_ptr, 0, 0, LTP_ORDER ), SILK_FIX_CONST( LTP_DAMPING/3, 16 ) );
+        regu = silk_SMLAWB( regu, matrix_ptr( WLTP_ptr, LTP_ORDER-1, LTP_ORDER-1, LTP_ORDER ), SILK_FIX_CONST( LTP_DAMPING/3, 16 ) );
         silk_regularize_correlations_FIX( WLTP_ptr, &rr[k], regu, LTP_ORDER );
 
         silk_solve_LDL_FIX( WLTP_ptr, LTP_ORDER, Rr, b_Q16 ); /* WLTP_fix_ptr and Rr_fix_ptr both in Q(-corr_rshifts[k]) */
@@ -108,29 +108,29 @@
         nrg[ k ] = silk_residual_energy16_covar_FIX( b_Q14_ptr, WLTP_ptr, Rr, rr[ k ], LTP_ORDER, 14 ); /* nrg_fix in Q( -corr_rshifts[ k ] ) */
 
         /* temp = Wght[ k ] / ( nrg[ k ] * Wght[ k ] + 0.01f * subfr_length ); */
-        extra_shifts = SKP_min_int( corr_rshifts[ k ], LTP_CORRS_HEAD_ROOM );
-        denom32 = SKP_LSHIFT_SAT32( SKP_SMULWB( nrg[ k ], Wght_Q15[ k ] ), 1 + extra_shifts ) + /* Q( -corr_rshifts[ k ] + extra_shifts ) */
-            SKP_RSHIFT( SKP_SMULWB( subfr_length, 655 ), corr_rshifts[ k ] - extra_shifts );    /* Q( -corr_rshifts[ k ] + extra_shifts ) */
-        denom32 = SKP_max( denom32, 1 );
-        SKP_assert( ((opus_int64)Wght_Q15[ k ] << 16 ) < SKP_int32_MAX );                        /* Wght always < 0.5 in Q0 */
-        temp32 = SKP_DIV32( SKP_LSHIFT( ( opus_int32 )Wght_Q15[ k ], 16 ), denom32 );            /* Q( 15 + 16 + corr_rshifts[k] - extra_shifts ) */
-        temp32 = SKP_RSHIFT( temp32, 31 + corr_rshifts[ k ] - extra_shifts - 26 );              /* Q26 */
+        extra_shifts = silk_min_int( corr_rshifts[ k ], LTP_CORRS_HEAD_ROOM );
+        denom32 = silk_LSHIFT_SAT32( silk_SMULWB( nrg[ k ], Wght_Q15[ k ] ), 1 + extra_shifts ) + /* Q( -corr_rshifts[ k ] + extra_shifts ) */
+            silk_RSHIFT( silk_SMULWB( subfr_length, 655 ), corr_rshifts[ k ] - extra_shifts );    /* Q( -corr_rshifts[ k ] + extra_shifts ) */
+        denom32 = silk_max( denom32, 1 );
+        silk_assert( ((opus_int64)Wght_Q15[ k ] << 16 ) < silk_int32_MAX );                        /* Wght always < 0.5 in Q0 */
+        temp32 = silk_DIV32( silk_LSHIFT( ( opus_int32 )Wght_Q15[ k ], 16 ), denom32 );            /* Q( 15 + 16 + corr_rshifts[k] - extra_shifts ) */
+        temp32 = silk_RSHIFT( temp32, 31 + corr_rshifts[ k ] - extra_shifts - 26 );              /* Q26 */
 
         /* Limit temp such that the below scaling never wraps around */
         WLTP_max = 0;
         for( i = 0; i < LTP_ORDER * LTP_ORDER; i++ ) {
-            WLTP_max = SKP_max( WLTP_ptr[ i ], WLTP_max );
+            WLTP_max = silk_max( WLTP_ptr[ i ], WLTP_max );
         }
         lshift = silk_CLZ32( WLTP_max ) - 1 - 3; /* keep 3 bits free for vq_nearest_neighbor_fix */
-        SKP_assert( 26 - 18 + lshift >= 0 );
+        silk_assert( 26 - 18 + lshift >= 0 );
         if( 26 - 18 + lshift < 31 ) {
-            temp32 = SKP_min_32( temp32, SKP_LSHIFT( ( opus_int32 )1, 26 - 18 + lshift ) );
+            temp32 = silk_min_32( temp32, silk_LSHIFT( ( opus_int32 )1, 26 - 18 + lshift ) );
         }
 
         silk_scale_vector32_Q26_lshift_18( WLTP_ptr, temp32, LTP_ORDER * LTP_ORDER ); /* WLTP_ptr in Q( 18 - corr_rshifts[ k ] ) */
 
         w[ k ] = matrix_ptr( WLTP_ptr, LTP_ORDER/2, LTP_ORDER/2, LTP_ORDER ); /* w in Q( 18 - corr_rshifts[ k ] ) */
-        SKP_assert( w[k] >= 0 );
+        silk_assert( w[k] >= 0 );
 
         r_ptr     += subfr_length;
         b_Q14_ptr += LTP_ORDER;
@@ -139,24 +139,24 @@
 
     maxRshifts = 0;
     for( k = 0; k < nb_subfr; k++ ) {
-        maxRshifts = SKP_max_int( corr_rshifts[ k ], maxRshifts );
+        maxRshifts = silk_max_int( corr_rshifts[ k ], maxRshifts );
     }
 
     /* Compute LTP coding gain */
     if( LTPredCodGain_Q7 != NULL ) {
         LPC_LTP_res_nrg = 0;
         LPC_res_nrg     = 0;
-        SKP_assert( LTP_CORRS_HEAD_ROOM >= 2 ); /* Check that no overflow will happen when adding */
+        silk_assert( LTP_CORRS_HEAD_ROOM >= 2 ); /* Check that no overflow will happen when adding */
         for( k = 0; k < nb_subfr; k++ ) {
-            LPC_res_nrg     = SKP_ADD32( LPC_res_nrg,     SKP_RSHIFT( SKP_ADD32( SKP_SMULWB(  rr[ k ], Wght_Q15[ k ] ), 1 ), 1 + ( maxRshifts - corr_rshifts[ k ] ) ) ); /*  Q( -maxRshifts ) */
-            LPC_LTP_res_nrg = SKP_ADD32( LPC_LTP_res_nrg, SKP_RSHIFT( SKP_ADD32( SKP_SMULWB( nrg[ k ], Wght_Q15[ k ] ), 1 ), 1 + ( maxRshifts - corr_rshifts[ k ] ) ) ); /*  Q( -maxRshifts ) */
+            LPC_res_nrg     = silk_ADD32( LPC_res_nrg,     silk_RSHIFT( silk_ADD32( silk_SMULWB(  rr[ k ], Wght_Q15[ k ] ), 1 ), 1 + ( maxRshifts - corr_rshifts[ k ] ) ) ); /*  Q( -maxRshifts ) */
+            LPC_LTP_res_nrg = silk_ADD32( LPC_LTP_res_nrg, silk_RSHIFT( silk_ADD32( silk_SMULWB( nrg[ k ], Wght_Q15[ k ] ), 1 ), 1 + ( maxRshifts - corr_rshifts[ k ] ) ) ); /*  Q( -maxRshifts ) */
         }
-        LPC_LTP_res_nrg = SKP_max( LPC_LTP_res_nrg, 1 ); /* avoid division by zero */
+        LPC_LTP_res_nrg = silk_max( LPC_LTP_res_nrg, 1 ); /* avoid division by zero */
 
         div_Q16 = silk_DIV32_varQ( LPC_res_nrg, LPC_LTP_res_nrg, 16 );
-        *LTPredCodGain_Q7 = ( opus_int )SKP_SMULBB( 3, silk_lin2log( div_Q16 ) - ( 16 << 7 ) );
+        *LTPredCodGain_Q7 = ( opus_int )silk_SMULBB( 3, silk_lin2log( div_Q16 ) - ( 16 << 7 ) );
 
-        SKP_assert( *LTPredCodGain_Q7 == ( opus_int )SKP_SAT16( SKP_MUL( 3, silk_lin2log( div_Q16 ) - ( 16 << 7 ) ) ) );
+        silk_assert( *LTPredCodGain_Q7 == ( opus_int )silk_SAT16( silk_MUL( 3, silk_lin2log( div_Q16 ) - ( 16 << 7 ) ) ) );
     }
 
     /* smoothing */
@@ -176,30 +176,30 @@
     max_abs_d_Q14 = 0;
     max_w_bits    = 0;
     for( k = 0; k < nb_subfr; k++ ) {
-        max_abs_d_Q14 = SKP_max_32( max_abs_d_Q14, SKP_abs( d_Q14[ k ] ) );
+        max_abs_d_Q14 = silk_max_32( max_abs_d_Q14, silk_abs( d_Q14[ k ] ) );
         /* w[ k ] is in Q( 18 - corr_rshifts[ k ] ) */
         /* Find bits needed in Q( 18 - maxRshifts ) */
-        max_w_bits = SKP_max_32( max_w_bits, 32 - silk_CLZ32( w[ k ] ) + corr_rshifts[ k ] - maxRshifts );
+        max_w_bits = silk_max_32( max_w_bits, 32 - silk_CLZ32( w[ k ] ) + corr_rshifts[ k ] - maxRshifts );
     }
 
-    /* max_abs_d_Q14 = (5 << 15); worst case, i.e. LTP_ORDER * -SKP_int16_MIN */
-    SKP_assert( max_abs_d_Q14 <= ( 5 << 15 ) );
+    /* max_abs_d_Q14 = (5 << 15); worst case, i.e. LTP_ORDER * -silk_int16_MIN */
+    silk_assert( max_abs_d_Q14 <= ( 5 << 15 ) );
 
     /* How many bits is needed for w*d' in Q( 18 - maxRshifts ) in the worst case, of all d_Q14's being equal to max_abs_d_Q14 */
     extra_shifts = max_w_bits + 32 - silk_CLZ32( max_abs_d_Q14 ) - 14;
 
     /* Subtract what we got available; bits in output var plus maxRshifts */
     extra_shifts -= ( 32 - 1 - 2 + maxRshifts ); /* Keep sign bit free as well as 2 bits for accumulation */
-    extra_shifts = SKP_max_int( extra_shifts, 0 );
+    extra_shifts = silk_max_int( extra_shifts, 0 );
 
     maxRshifts_wxtra = maxRshifts + extra_shifts;
 
-    temp32 = SKP_RSHIFT( 262, maxRshifts + extra_shifts ) + 1; /* 1e-3f in Q( 18 - (maxRshifts + extra_shifts) ) */
+    temp32 = silk_RSHIFT( 262, maxRshifts + extra_shifts ) + 1; /* 1e-3f in Q( 18 - (maxRshifts + extra_shifts) ) */
     wd = 0;
     for( k = 0; k < nb_subfr; k++ ) {
         /* w has at least 2 bits of headroom so no overflow should happen */
-        temp32 = SKP_ADD32( temp32,                     SKP_RSHIFT( w[ k ], maxRshifts_wxtra - corr_rshifts[ k ] ) );                    /* Q( 18 - maxRshifts_wxtra ) */
-        wd     = SKP_ADD32( wd, SKP_LSHIFT( SKP_SMULWW( SKP_RSHIFT( w[ k ], maxRshifts_wxtra - corr_rshifts[ k ] ), d_Q14[ k ] ), 2 ) ); /* Q( 18 - maxRshifts_wxtra ) */
+        temp32 = silk_ADD32( temp32,                     silk_RSHIFT( w[ k ], maxRshifts_wxtra - corr_rshifts[ k ] ) );                    /* Q( 18 - maxRshifts_wxtra ) */
+        wd     = silk_ADD32( wd, silk_LSHIFT( silk_SMULWW( silk_RSHIFT( w[ k ], maxRshifts_wxtra - corr_rshifts[ k ] ), d_Q14[ k ] ), 2 ) ); /* Q( 18 - maxRshifts_wxtra ) */
     }
     m_Q12 = silk_DIV32_varQ( wd, temp32, 12 );
 
@@ -207,25 +207,25 @@
     for( k = 0; k < nb_subfr; k++ ) {
         /* w_fix[ k ] from Q( 18 - corr_rshifts[ k ] ) to Q( 16 ) */
         if( 2 - corr_rshifts[k] > 0 ) {
-            temp32 = SKP_RSHIFT( w[ k ], 2 - corr_rshifts[ k ] );
+            temp32 = silk_RSHIFT( w[ k ], 2 - corr_rshifts[ k ] );
         } else {
-            temp32 = SKP_LSHIFT_SAT32( w[ k ], corr_rshifts[ k ] - 2 );
+            temp32 = silk_LSHIFT_SAT32( w[ k ], corr_rshifts[ k ] - 2 );
         }
 
-        g_Q26 = SKP_MUL(
-            SKP_DIV32(
+        g_Q26 = silk_MUL(
+            silk_DIV32(
                 SILK_FIX_CONST( LTP_SMOOTHING, 26 ),
-                SKP_RSHIFT( SILK_FIX_CONST( LTP_SMOOTHING, 26 ), 10 ) + temp32 ),                                       /* Q10 */
-            SKP_LSHIFT_SAT32( SKP_SUB_SAT32( ( opus_int32 )m_Q12, SKP_RSHIFT( d_Q14[ k ], 2 ) ), 4 ) );  /* Q16 */
+                silk_RSHIFT( SILK_FIX_CONST( LTP_SMOOTHING, 26 ), 10 ) + temp32 ),                                       /* Q10 */
+            silk_LSHIFT_SAT32( silk_SUB_SAT32( ( opus_int32 )m_Q12, silk_RSHIFT( d_Q14[ k ], 2 ) ), 4 ) );  /* Q16 */
 
         temp32 = 0;
         for( i = 0; i < LTP_ORDER; i++ ) {
-            delta_b_Q14[ i ] = SKP_max_16( b_Q14_ptr[ i ], 1638 );  /* 1638_Q14 = 0.1_Q0 */
+            delta_b_Q14[ i ] = silk_max_16( b_Q14_ptr[ i ], 1638 );  /* 1638_Q14 = 0.1_Q0 */
             temp32 += delta_b_Q14[ i ];                          /* Q14 */
         }
-        temp32 = SKP_DIV32( g_Q26, temp32 ); /* Q14->Q12 */
+        temp32 = silk_DIV32( g_Q26, temp32 ); /* Q14->Q12 */
         for( i = 0; i < LTP_ORDER; i++ ) {
-            b_Q14_ptr[ i ] = SKP_LIMIT_32( ( opus_int32 )b_Q14_ptr[ i ] + SKP_SMULWB( SKP_LSHIFT_SAT32( temp32, 4 ), delta_b_Q14[ i ] ), -16000, 28000 );
+            b_Q14_ptr[ i ] = silk_LIMIT_32( ( opus_int32 )b_Q14_ptr[ i ] + silk_SMULWB( silk_LSHIFT_SAT32( temp32, 4 ), delta_b_Q14[ i ] ), -16000, 28000 );
         }
         b_Q14_ptr += LTP_ORDER;
     }
@@ -240,6 +240,6 @@
     opus_int i;
 
     for( i = 0; i < LTP_ORDER; i++ ) {
-        LTP_coefs_Q14[ i ] = ( opus_int16 )SKP_SAT16( SKP_RSHIFT_ROUND( LTP_coefs_Q16[ i ], 2 ) );
+        LTP_coefs_Q14[ i ] = ( opus_int16 )silk_SAT16( silk_RSHIFT_ROUND( LTP_coefs_Q16[ i ], 2 ) );
     }
 }
diff --git a/silk/fixed/silk_find_pitch_lags_FIX.c b/silk/fixed/silk_find_pitch_lags_FIX.c
index 69a7fcb..ba69668 100644
--- a/silk/fixed/silk_find_pitch_lags_FIX.c
+++ b/silk/fixed/silk_find_pitch_lags_FIX.c
@@ -55,7 +55,7 @@
     buf_len = psEnc->sCmn.la_pitch + psEnc->sCmn.frame_length + psEnc->sCmn.ltp_mem_length;
 
     /* Safty check */
-    SKP_assert( buf_len >= psEnc->sCmn.pitch_LPC_win_length );
+    silk_assert( buf_len >= psEnc->sCmn.pitch_LPC_win_length );
 
     x_buf = x - psEnc->sCmn.ltp_mem_length;
 
@@ -73,31 +73,31 @@
     /* Middle un - windowed samples */
     Wsig_ptr  += psEnc->sCmn.la_pitch;
     x_buf_ptr += psEnc->sCmn.la_pitch;
-    SKP_memcpy( Wsig_ptr, x_buf_ptr, ( psEnc->sCmn.pitch_LPC_win_length - SKP_LSHIFT( psEnc->sCmn.la_pitch, 1 ) ) * sizeof( opus_int16 ) );
+    silk_memcpy( Wsig_ptr, x_buf_ptr, ( psEnc->sCmn.pitch_LPC_win_length - silk_LSHIFT( psEnc->sCmn.la_pitch, 1 ) ) * sizeof( opus_int16 ) );
 
     /* Last LA_LTP samples */
-    Wsig_ptr  += psEnc->sCmn.pitch_LPC_win_length - SKP_LSHIFT( psEnc->sCmn.la_pitch, 1 );
-    x_buf_ptr += psEnc->sCmn.pitch_LPC_win_length - SKP_LSHIFT( psEnc->sCmn.la_pitch, 1 );
+    Wsig_ptr  += psEnc->sCmn.pitch_LPC_win_length - silk_LSHIFT( psEnc->sCmn.la_pitch, 1 );
+    x_buf_ptr += psEnc->sCmn.pitch_LPC_win_length - silk_LSHIFT( psEnc->sCmn.la_pitch, 1 );
     silk_apply_sine_window( Wsig_ptr, x_buf_ptr, 2, psEnc->sCmn.la_pitch );
 
     /* Calculate autocorrelation sequence */
     silk_autocorr( auto_corr, &scale, Wsig, psEnc->sCmn.pitch_LPC_win_length, psEnc->sCmn.pitchEstimationLPCOrder + 1 );
 
     /* Add white noise, as fraction of energy */
-    auto_corr[ 0 ] = SKP_SMLAWB( auto_corr[ 0 ], auto_corr[ 0 ], SILK_FIX_CONST( FIND_PITCH_WHITE_NOISE_FRACTION, 16 ) ) + 1;
+    auto_corr[ 0 ] = silk_SMLAWB( auto_corr[ 0 ], auto_corr[ 0 ], SILK_FIX_CONST( FIND_PITCH_WHITE_NOISE_FRACTION, 16 ) ) + 1;
 
     /* Calculate the reflection coefficients using schur */
     res_nrg = silk_schur( rc_Q15, auto_corr, psEnc->sCmn.pitchEstimationLPCOrder );
 
     /* Prediction gain */
-    psEncCtrl->predGain_Q16 = silk_DIV32_varQ( auto_corr[ 0 ], SKP_max_int( res_nrg, 1 ), 16 );
+    psEncCtrl->predGain_Q16 = silk_DIV32_varQ( auto_corr[ 0 ], silk_max_int( res_nrg, 1 ), 16 );
 
     /* Convert reflection coefficients to prediction coefficients */
     silk_k2a( A_Q24, rc_Q15, psEnc->sCmn.pitchEstimationLPCOrder );
 
     /* Convert From 32 bit Q24 to 16 bit Q12 coefs */
     for( i = 0; i < psEnc->sCmn.pitchEstimationLPCOrder; i++ ) {
-        A_Q12[ i ] = ( opus_int16 )SKP_SAT16( SKP_RSHIFT( A_Q24[ i ], 12 ) );
+        A_Q12[ i ] = ( opus_int16 )silk_SAT16( silk_RSHIFT( A_Q24[ i ], 12 ) );
     }
 
     /* Do BWE */
@@ -111,11 +111,11 @@
     if( psEnc->sCmn.indices.signalType != TYPE_NO_VOICE_ACTIVITY && psEnc->sCmn.first_frame_after_reset == 0 ) {
         /* Threshold for pitch estimator */
         thrhld_Q15 = SILK_FIX_CONST( 0.6, 15 );
-        thrhld_Q15 = SKP_SMLABB( thrhld_Q15, SILK_FIX_CONST( -0.004, 15 ), psEnc->sCmn.pitchEstimationLPCOrder );
-        thrhld_Q15 = SKP_SMLABB( thrhld_Q15, SILK_FIX_CONST( -0.1,   7  ), psEnc->sCmn.speech_activity_Q8 );
-        thrhld_Q15 = SKP_SMLABB( thrhld_Q15, SILK_FIX_CONST( -0.15,  15 ), SKP_RSHIFT( psEnc->sCmn.prevSignalType, 1 ) );
-        thrhld_Q15 = SKP_SMLAWB( thrhld_Q15, SILK_FIX_CONST( -0.1,   16 ), psEnc->sCmn.input_tilt_Q15 );
-        thrhld_Q15 = SKP_SAT16(  thrhld_Q15 );
+        thrhld_Q15 = silk_SMLABB( thrhld_Q15, SILK_FIX_CONST( -0.004, 15 ), psEnc->sCmn.pitchEstimationLPCOrder );
+        thrhld_Q15 = silk_SMLABB( thrhld_Q15, SILK_FIX_CONST( -0.1,   7  ), psEnc->sCmn.speech_activity_Q8 );
+        thrhld_Q15 = silk_SMLABB( thrhld_Q15, SILK_FIX_CONST( -0.15,  15 ), silk_RSHIFT( psEnc->sCmn.prevSignalType, 1 ) );
+        thrhld_Q15 = silk_SMLAWB( thrhld_Q15, SILK_FIX_CONST( -0.1,   16 ), psEnc->sCmn.input_tilt_Q15 );
+        thrhld_Q15 = silk_SAT16(  thrhld_Q15 );
 
         /*****************************************/
         /* Call pitch estimator                  */
@@ -129,7 +129,7 @@
             psEnc->sCmn.indices.signalType = TYPE_UNVOICED;
         }
     } else {
-        SKP_memset( psEncCtrl->pitchL, 0, sizeof( psEncCtrl->pitchL ) );
+        silk_memset( psEncCtrl->pitchL, 0, sizeof( psEncCtrl->pitchL ) );
         psEnc->sCmn.indices.lagIndex = 0;
         psEnc->sCmn.indices.contourIndex = 0;
         psEnc->LTPCorr_Q15 = 0;
diff --git a/silk/fixed/silk_find_pred_coefs_FIX.c b/silk/fixed/silk_find_pred_coefs_FIX.c
index c5d7d57..f7ad87f 100644
--- a/silk/fixed/silk_find_pred_coefs_FIX.c
+++ b/silk/fixed/silk_find_pred_coefs_FIX.c
@@ -48,33 +48,33 @@
     opus_int         LTP_corrs_rshift[ MAX_NB_SUBFR ];
 
     /* weighting for weighted least squares */
-    min_gain_Q16 = SKP_int32_MAX >> 6;
+    min_gain_Q16 = silk_int32_MAX >> 6;
     for( i = 0; i < psEnc->sCmn.nb_subfr; i++ ) {
-        min_gain_Q16 = SKP_min( min_gain_Q16, psEncCtrl->Gains_Q16[ i ] );
+        min_gain_Q16 = silk_min( min_gain_Q16, psEncCtrl->Gains_Q16[ i ] );
     }
     for( i = 0; i < psEnc->sCmn.nb_subfr; i++ ) {
         /* Divide to Q16 */
-        SKP_assert( psEncCtrl->Gains_Q16[ i ] > 0 );
+        silk_assert( psEncCtrl->Gains_Q16[ i ] > 0 );
         /* Invert and normalize gains, and ensure that maximum invGains_Q16 is within range of a 16 bit int */
         invGains_Q16[ i ] = silk_DIV32_varQ( min_gain_Q16, psEncCtrl->Gains_Q16[ i ], 16 - 2 );
 
         /* Ensure Wght_Q15 a minimum value 1 */
-        invGains_Q16[ i ] = SKP_max( invGains_Q16[ i ], 363 );
+        invGains_Q16[ i ] = silk_max( invGains_Q16[ i ], 363 );
 
         /* Square the inverted gains */
-        SKP_assert( invGains_Q16[ i ] == SKP_SAT16( invGains_Q16[ i ] ) );
-        tmp = SKP_SMULWB( invGains_Q16[ i ], invGains_Q16[ i ] );
-        Wght_Q15[ i ] = SKP_RSHIFT( tmp, 1 );
+        silk_assert( invGains_Q16[ i ] == silk_SAT16( invGains_Q16[ i ] ) );
+        tmp = silk_SMULWB( invGains_Q16[ i ], invGains_Q16[ i ] );
+        Wght_Q15[ i ] = silk_RSHIFT( tmp, 1 );
 
         /* Invert the inverted and normalized gains */
-        local_gains[ i ] = SKP_DIV32( ( 1 << 16 ), invGains_Q16[ i ] );
+        local_gains[ i ] = silk_DIV32( ( 1 << 16 ), invGains_Q16[ i ] );
     }
 
     if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) {
         /**********/
         /* VOICED */
         /**********/
-        SKP_assert( psEnc->sCmn.ltp_mem_length - psEnc->sCmn.predictLPCOrder >= psEncCtrl->pitchL[ 0 ] + LTP_ORDER / 2 );
+        silk_assert( psEnc->sCmn.ltp_mem_length - psEnc->sCmn.predictLPCOrder >= psEncCtrl->pitchL[ 0 ] + LTP_ORDER / 2 );
 
         /* LTP analysis */
         silk_find_LTP_FIX( psEncCtrl->LTPCoef_Q14, WLTP, &psEncCtrl->LTPredCodGain_Q7,
@@ -106,7 +106,7 @@
             x_ptr     += psEnc->sCmn.subfr_length;
         }
 
-        SKP_memset( psEncCtrl->LTPCoef_Q14, 0, psEnc->sCmn.nb_subfr * LTP_ORDER * sizeof( opus_int16 ) );
+        silk_memset( psEncCtrl->LTPCoef_Q14, 0, psEnc->sCmn.nb_subfr * LTP_ORDER * sizeof( opus_int16 ) );
         psEncCtrl->LTPredCodGain_Q7 = 0;
     }
 
@@ -127,5 +127,5 @@
         psEnc->sCmn.subfr_length, psEnc->sCmn.nb_subfr, psEnc->sCmn.predictLPCOrder );
 
     /* Copy to prediction struct for use in next frame for fluctuation reduction */
-    SKP_memcpy( psEnc->sCmn.prev_NLSFq_Q15, NLSF_Q15, sizeof( psEnc->sCmn.prev_NLSFq_Q15 ) );
+    silk_memcpy( psEnc->sCmn.prev_NLSFq_Q15, NLSF_Q15, sizeof( psEnc->sCmn.prev_NLSFq_Q15 ) );
 }
diff --git a/silk/fixed/silk_noise_shape_analysis_FIX.c b/silk/fixed/silk_noise_shape_analysis_FIX.c
index ea26832..c4789e6 100644
--- a/silk/fixed/silk_noise_shape_analysis_FIX.c
+++ b/silk/fixed/silk_noise_shape_analysis_FIX.c
@@ -45,9 +45,9 @@
     lambda_Q16 = -lambda_Q16;
     gain_Q24 = coefs_Q24[ order - 1 ];
     for( i = order - 2; i >= 0; i-- ) {
-        gain_Q24 = SKP_SMLAWB( coefs_Q24[ i ], gain_Q24, lambda_Q16 );
+        gain_Q24 = silk_SMLAWB( coefs_Q24[ i ], gain_Q24, lambda_Q16 );
     }
-    gain_Q24  = SKP_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), gain_Q24, -lambda_Q16 );
+    gain_Q24  = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), gain_Q24, -lambda_Q16 );
     return silk_INVERSE32_varQ( gain_Q24, 40 );
 }
 
@@ -67,25 +67,25 @@
     /* Convert to monic coefficients */
     lambda_Q16 = -lambda_Q16;
     for( i = order - 1; i > 0; i-- ) {
-        coefs_syn_Q24[ i - 1 ] = SKP_SMLAWB( coefs_syn_Q24[ i - 1 ], coefs_syn_Q24[ i ], lambda_Q16 );
-        coefs_ana_Q24[ i - 1 ] = SKP_SMLAWB( coefs_ana_Q24[ i - 1 ], coefs_ana_Q24[ i ], lambda_Q16 );
+        coefs_syn_Q24[ i - 1 ] = silk_SMLAWB( coefs_syn_Q24[ i - 1 ], coefs_syn_Q24[ i ], lambda_Q16 );
+        coefs_ana_Q24[ i - 1 ] = silk_SMLAWB( coefs_ana_Q24[ i - 1 ], coefs_ana_Q24[ i ], lambda_Q16 );
     }
     lambda_Q16 = -lambda_Q16;
-    nom_Q16  = SKP_SMLAWB( SILK_FIX_CONST( 1.0, 16 ), -lambda_Q16,        lambda_Q16 );
-    den_Q24  = SKP_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_syn_Q24[ 0 ], lambda_Q16 );
+    nom_Q16  = silk_SMLAWB( SILK_FIX_CONST( 1.0, 16 ), -lambda_Q16,        lambda_Q16 );
+    den_Q24  = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_syn_Q24[ 0 ], lambda_Q16 );
     gain_syn_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 );
-    den_Q24  = SKP_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_ana_Q24[ 0 ], lambda_Q16 );
+    den_Q24  = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_ana_Q24[ 0 ], lambda_Q16 );
     gain_ana_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 );
     for( i = 0; i < order; i++ ) {
-        coefs_syn_Q24[ i ] = SKP_SMULWW( gain_syn_Q16, coefs_syn_Q24[ i ] );
-        coefs_ana_Q24[ i ] = SKP_SMULWW( gain_ana_Q16, coefs_ana_Q24[ i ] );
+        coefs_syn_Q24[ i ] = silk_SMULWW( gain_syn_Q16, coefs_syn_Q24[ i ] );
+        coefs_ana_Q24[ i ] = silk_SMULWW( gain_ana_Q16, coefs_ana_Q24[ i ] );
     }
 
     for( iter = 0; iter < 10; iter++ ) {
         /* Find maximum absolute value */
         maxabs_Q24 = -1;
         for( i = 0; i < order; i++ ) {
-            tmp = SKP_max( SKP_abs_int32( coefs_syn_Q24[ i ] ), SKP_abs_int32( coefs_ana_Q24[ i ] ) );
+            tmp = silk_max( silk_abs_int32( coefs_syn_Q24[ i ] ), silk_abs_int32( coefs_ana_Q24[ i ] ) );
             if( tmp > maxabs_Q24 ) {
                 maxabs_Q24 = tmp;
                 ind = i;
@@ -98,41 +98,41 @@
 
         /* Convert back to true warped coefficients */
         for( i = 1; i < order; i++ ) {
-            coefs_syn_Q24[ i - 1 ] = SKP_SMLAWB( coefs_syn_Q24[ i - 1 ], coefs_syn_Q24[ i ], lambda_Q16 );
-            coefs_ana_Q24[ i - 1 ] = SKP_SMLAWB( coefs_ana_Q24[ i - 1 ], coefs_ana_Q24[ i ], lambda_Q16 );
+            coefs_syn_Q24[ i - 1 ] = silk_SMLAWB( coefs_syn_Q24[ i - 1 ], coefs_syn_Q24[ i ], lambda_Q16 );
+            coefs_ana_Q24[ i - 1 ] = silk_SMLAWB( coefs_ana_Q24[ i - 1 ], coefs_ana_Q24[ i ], lambda_Q16 );
         }
         gain_syn_Q16 = silk_INVERSE32_varQ( gain_syn_Q16, 32 );
         gain_ana_Q16 = silk_INVERSE32_varQ( gain_ana_Q16, 32 );
         for( i = 0; i < order; i++ ) {
-            coefs_syn_Q24[ i ] = SKP_SMULWW( gain_syn_Q16, coefs_syn_Q24[ i ] );
-            coefs_ana_Q24[ i ] = SKP_SMULWW( gain_ana_Q16, coefs_ana_Q24[ i ] );
+            coefs_syn_Q24[ i ] = silk_SMULWW( gain_syn_Q16, coefs_syn_Q24[ i ] );
+            coefs_ana_Q24[ i ] = silk_SMULWW( gain_ana_Q16, coefs_ana_Q24[ i ] );
         }
 
         /* Apply bandwidth expansion */
         chirp_Q16 = SILK_FIX_CONST( 0.99, 16 ) - silk_DIV32_varQ(
-            SKP_SMULWB( maxabs_Q24 - limit_Q24, SKP_SMLABB( SILK_FIX_CONST( 0.8, 10 ), SILK_FIX_CONST( 0.1, 10 ), iter ) ),
-            SKP_MUL( maxabs_Q24, ind + 1 ), 22 );
+            silk_SMULWB( maxabs_Q24 - limit_Q24, silk_SMLABB( SILK_FIX_CONST( 0.8, 10 ), SILK_FIX_CONST( 0.1, 10 ), iter ) ),
+            silk_MUL( maxabs_Q24, ind + 1 ), 22 );
         silk_bwexpander_32( coefs_syn_Q24, order, chirp_Q16 );
         silk_bwexpander_32( coefs_ana_Q24, order, chirp_Q16 );
 
         /* Convert to monic warped coefficients */
         lambda_Q16 = -lambda_Q16;
         for( i = order - 1; i > 0; i-- ) {
-            coefs_syn_Q24[ i - 1 ] = SKP_SMLAWB( coefs_syn_Q24[ i - 1 ], coefs_syn_Q24[ i ], lambda_Q16 );
-            coefs_ana_Q24[ i - 1 ] = SKP_SMLAWB( coefs_ana_Q24[ i - 1 ], coefs_ana_Q24[ i ], lambda_Q16 );
+            coefs_syn_Q24[ i - 1 ] = silk_SMLAWB( coefs_syn_Q24[ i - 1 ], coefs_syn_Q24[ i ], lambda_Q16 );
+            coefs_ana_Q24[ i - 1 ] = silk_SMLAWB( coefs_ana_Q24[ i - 1 ], coefs_ana_Q24[ i ], lambda_Q16 );
         }
         lambda_Q16 = -lambda_Q16;
-        nom_Q16  = SKP_SMLAWB( SILK_FIX_CONST( 1.0, 16 ), -lambda_Q16,        lambda_Q16 );
-        den_Q24  = SKP_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_syn_Q24[ 0 ], lambda_Q16 );
+        nom_Q16  = silk_SMLAWB( SILK_FIX_CONST( 1.0, 16 ), -lambda_Q16,        lambda_Q16 );
+        den_Q24  = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_syn_Q24[ 0 ], lambda_Q16 );
         gain_syn_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 );
-        den_Q24  = SKP_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_ana_Q24[ 0 ], lambda_Q16 );
+        den_Q24  = silk_SMLAWB( SILK_FIX_CONST( 1.0, 24 ), coefs_ana_Q24[ 0 ], lambda_Q16 );
         gain_ana_Q16 = silk_DIV32_varQ( nom_Q16, den_Q24, 24 );
         for( i = 0; i < order; i++ ) {
-            coefs_syn_Q24[ i ] = SKP_SMULWW( gain_syn_Q16, coefs_syn_Q24[ i ] );
-            coefs_ana_Q24[ i ] = SKP_SMULWW( gain_ana_Q16, coefs_ana_Q24[ i ] );
+            coefs_syn_Q24[ i ] = silk_SMULWW( gain_syn_Q16, coefs_syn_Q24[ i ] );
+            coefs_ana_Q24[ i ] = silk_SMULWW( gain_ana_Q16, coefs_ana_Q24[ i ] );
         }
     }
-    SKP_assert( 0 );
+    silk_assert( 0 );
 }
 
 /**************************************************************/
@@ -167,29 +167,29 @@
     SNR_adj_dB_Q7 = psEnc->sCmn.SNR_dB_Q7;
 
     /* Input quality is the average of the quality in the lowest two VAD bands */
-    psEncCtrl->input_quality_Q14 = ( opus_int )SKP_RSHIFT( ( opus_int32 )psEnc->sCmn.input_quality_bands_Q15[ 0 ]
+    psEncCtrl->input_quality_Q14 = ( opus_int )silk_RSHIFT( ( opus_int32 )psEnc->sCmn.input_quality_bands_Q15[ 0 ]
         + psEnc->sCmn.input_quality_bands_Q15[ 1 ], 2 );
 
     /* Coding quality level, between 0.0_Q0 and 1.0_Q0, but in Q14 */
-    psEncCtrl->coding_quality_Q14 = SKP_RSHIFT( silk_sigm_Q15( SKP_RSHIFT_ROUND( SNR_adj_dB_Q7 -
+    psEncCtrl->coding_quality_Q14 = silk_RSHIFT( silk_sigm_Q15( silk_RSHIFT_ROUND( SNR_adj_dB_Q7 -
         SILK_FIX_CONST( 18.0, 7 ), 4 ) ), 1 );
 
     /* Reduce coding SNR during low speech activity */
     if( psEnc->sCmn.useCBR == 0 ) {
         b_Q8 = SILK_FIX_CONST( 1.0, 8 ) - psEnc->sCmn.speech_activity_Q8;
-        b_Q8 = SKP_SMULWB( SKP_LSHIFT( b_Q8, 8 ), b_Q8 );
-        SNR_adj_dB_Q7 = SKP_SMLAWB( SNR_adj_dB_Q7,
-            SKP_SMULBB( SILK_FIX_CONST( -BG_SNR_DECR_dB, 7 ) >> ( 4 + 1 ), b_Q8 ),                                       /* Q11*/
-            SKP_SMULWB( SILK_FIX_CONST( 1.0, 14 ) + psEncCtrl->input_quality_Q14, psEncCtrl->coding_quality_Q14 ) );     /* Q12*/
+        b_Q8 = silk_SMULWB( silk_LSHIFT( b_Q8, 8 ), b_Q8 );
+        SNR_adj_dB_Q7 = silk_SMLAWB( SNR_adj_dB_Q7,
+            silk_SMULBB( SILK_FIX_CONST( -BG_SNR_DECR_dB, 7 ) >> ( 4 + 1 ), b_Q8 ),                                       /* Q11*/
+            silk_SMULWB( SILK_FIX_CONST( 1.0, 14 ) + psEncCtrl->input_quality_Q14, psEncCtrl->coding_quality_Q14 ) );     /* Q12*/
     }
 
     if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) {
         /* Reduce gains for periodic signals */
-        SNR_adj_dB_Q7 = SKP_SMLAWB( SNR_adj_dB_Q7, SILK_FIX_CONST( HARM_SNR_INCR_dB, 8 ), psEnc->LTPCorr_Q15 );
+        SNR_adj_dB_Q7 = silk_SMLAWB( SNR_adj_dB_Q7, SILK_FIX_CONST( HARM_SNR_INCR_dB, 8 ), psEnc->LTPCorr_Q15 );
     } else {
         /* For unvoiced signals and low-quality input, adjust the quality slower than SNR_dB setting */
-        SNR_adj_dB_Q7 = SKP_SMLAWB( SNR_adj_dB_Q7,
-            SKP_SMLAWB( SILK_FIX_CONST( 6.0, 9 ), -SILK_FIX_CONST( 0.4, 18 ), psEnc->sCmn.SNR_dB_Q7 ),
+        SNR_adj_dB_Q7 = silk_SMLAWB( SNR_adj_dB_Q7,
+            silk_SMLAWB( SILK_FIX_CONST( 6.0, 9 ), -SILK_FIX_CONST( 0.4, 18 ), psEnc->sCmn.SNR_dB_Q7 ),
             SILK_FIX_CONST( 1.0, 14 ) - psEncCtrl->input_quality_Q14 );
     }
 
@@ -203,23 +203,23 @@
         psEncCtrl->sparseness_Q8 = 0;
     } else {
         /* Sparseness measure, based on relative fluctuations of energy per 2 milliseconds */
-        nSamples = SKP_LSHIFT( psEnc->sCmn.fs_kHz, 1 );
+        nSamples = silk_LSHIFT( psEnc->sCmn.fs_kHz, 1 );
         energy_variation_Q7 = 0;
         log_energy_prev_Q7  = 0;
         pitch_res_ptr = pitch_res;
-        for( k = 0; k < SKP_SMULBB( SUB_FRAME_LENGTH_MS, psEnc->sCmn.nb_subfr ) / 2; k++ ) {
+        for( k = 0; k < silk_SMULBB( SUB_FRAME_LENGTH_MS, psEnc->sCmn.nb_subfr ) / 2; k++ ) {
             silk_sum_sqr_shift( &nrg, &scale, pitch_res_ptr, nSamples );
-            nrg += SKP_RSHIFT( nSamples, scale );           /* Q(-scale)*/
+            nrg += silk_RSHIFT( nSamples, scale );           /* Q(-scale)*/
 
             log_energy_Q7 = silk_lin2log( nrg );
             if( k > 0 ) {
-                energy_variation_Q7 += SKP_abs( log_energy_Q7 - log_energy_prev_Q7 );
+                energy_variation_Q7 += silk_abs( log_energy_Q7 - log_energy_prev_Q7 );
             }
             log_energy_prev_Q7 = log_energy_Q7;
             pitch_res_ptr += nSamples;
         }
 
-        psEncCtrl->sparseness_Q8 = SKP_RSHIFT( silk_sigm_Q15( SKP_SMULWB( energy_variation_Q7 -
+        psEncCtrl->sparseness_Q8 = silk_RSHIFT( silk_sigm_Q15( silk_SMULWB( energy_variation_Q7 -
             SILK_FIX_CONST( 5.0, 7 ), SILK_FIX_CONST( 0.1, 16 ) ) ), 7 );
 
         /* Set quantization offset depending on sparseness measure */
@@ -230,26 +230,26 @@
         }
 
         /* Increase coding SNR for sparse signals */
-        SNR_adj_dB_Q7 = SKP_SMLAWB( SNR_adj_dB_Q7, SILK_FIX_CONST( SPARSE_SNR_INCR_dB, 15 ), psEncCtrl->sparseness_Q8 - SILK_FIX_CONST( 0.5, 8 ) );
+        SNR_adj_dB_Q7 = silk_SMLAWB( SNR_adj_dB_Q7, SILK_FIX_CONST( SPARSE_SNR_INCR_dB, 15 ), psEncCtrl->sparseness_Q8 - SILK_FIX_CONST( 0.5, 8 ) );
     }
 
     /*******************************/
     /* Control bandwidth expansion */
     /*******************************/
     /* More BWE for signals with high prediction gain */
-    strength_Q16 = SKP_SMULWB( psEncCtrl->predGain_Q16, SILK_FIX_CONST( FIND_PITCH_WHITE_NOISE_FRACTION, 16 ) );
+    strength_Q16 = silk_SMULWB( psEncCtrl->predGain_Q16, SILK_FIX_CONST( FIND_PITCH_WHITE_NOISE_FRACTION, 16 ) );
     BWExp1_Q16 = BWExp2_Q16 = silk_DIV32_varQ( SILK_FIX_CONST( BANDWIDTH_EXPANSION, 16 ),
-        SKP_SMLAWW( SILK_FIX_CONST( 1.0, 16 ), strength_Q16, strength_Q16 ), 16 );
-    delta_Q16  = SKP_SMULWB( SILK_FIX_CONST( 1.0, 16 ) - SKP_SMULBB( 3, psEncCtrl->coding_quality_Q14 ),
+        silk_SMLAWW( SILK_FIX_CONST( 1.0, 16 ), strength_Q16, strength_Q16 ), 16 );
+    delta_Q16  = silk_SMULWB( SILK_FIX_CONST( 1.0, 16 ) - silk_SMULBB( 3, psEncCtrl->coding_quality_Q14 ),
         SILK_FIX_CONST( LOW_RATE_BANDWIDTH_EXPANSION_DELTA, 16 ) );
-    BWExp1_Q16 = SKP_SUB32( BWExp1_Q16, delta_Q16 );
-    BWExp2_Q16 = SKP_ADD32( BWExp2_Q16, delta_Q16 );
+    BWExp1_Q16 = silk_SUB32( BWExp1_Q16, delta_Q16 );
+    BWExp2_Q16 = silk_ADD32( BWExp2_Q16, delta_Q16 );
     /* BWExp1 will be applied after BWExp2, so make it relative */
-    BWExp1_Q16 = SKP_DIV32_16( SKP_LSHIFT( BWExp1_Q16, 14 ), SKP_RSHIFT( BWExp2_Q16, 2 ) );
+    BWExp1_Q16 = silk_DIV32_16( silk_LSHIFT( BWExp1_Q16, 14 ), silk_RSHIFT( BWExp2_Q16, 2 ) );
 
     if( psEnc->sCmn.warping_Q16 > 0 ) {
         /* Slightly more warping in analysis will move quantization noise up in frequency, where it's better masked */
-        warping_Q16 = SKP_SMLAWB( psEnc->sCmn.warping_Q16, psEncCtrl->coding_quality_Q14, SILK_FIX_CONST( 0.01, 18 ) );
+        warping_Q16 = silk_SMLAWB( psEnc->sCmn.warping_Q16, psEncCtrl->coding_quality_Q14, SILK_FIX_CONST( 0.01, 18 ) );
     } else {
         warping_Q16 = 0;
     }
@@ -261,11 +261,11 @@
         /* Apply window: sine slope followed by flat part followed by cosine slope */
         opus_int shift, slope_part, flat_part;
         flat_part = psEnc->sCmn.fs_kHz * 3;
-        slope_part = SKP_RSHIFT( psEnc->sCmn.shapeWinLength - flat_part, 1 );
+        slope_part = silk_RSHIFT( psEnc->sCmn.shapeWinLength - flat_part, 1 );
 
         silk_apply_sine_window( x_windowed, x_ptr, 1, slope_part );
         shift = slope_part;
-        SKP_memcpy( x_windowed + shift, x_ptr + shift, flat_part * sizeof(opus_int16) );
+        silk_memcpy( x_windowed + shift, x_ptr + shift, flat_part * sizeof(opus_int16) );
         shift += flat_part;
         silk_apply_sine_window( x_windowed + shift, x_ptr + shift, 2, slope_part );
 
@@ -281,19 +281,19 @@
         }
 
         /* Add white noise, as a fraction of energy */
-        auto_corr[0] = SKP_ADD32( auto_corr[0], SKP_max_32( SKP_SMULWB( SKP_RSHIFT( auto_corr[ 0 ], 4 ),
+        auto_corr[0] = silk_ADD32( auto_corr[0], silk_max_32( silk_SMULWB( silk_RSHIFT( auto_corr[ 0 ], 4 ),
             SILK_FIX_CONST( SHAPE_WHITE_NOISE_FRACTION, 20 ) ), 1 ) );
 
         /* Calculate the reflection coefficients using schur */
         nrg = silk_schur64( refl_coef_Q16, auto_corr, psEnc->sCmn.shapingLPCOrder );
-        SKP_assert( nrg >= 0 );
+        silk_assert( nrg >= 0 );
 
         /* Convert reflection coefficients to prediction coefficients */
         silk_k2a_Q16( AR2_Q24, refl_coef_Q16, psEnc->sCmn.shapingLPCOrder );
 
         Qnrg = -scale;          /* range: -12...30*/
-        SKP_assert( Qnrg >= -12 );
-        SKP_assert( Qnrg <=  30 );
+        silk_assert( Qnrg >= -12 );
+        silk_assert( Qnrg <=  30 );
 
         /* Make sure that Qnrg is an even number */
         if( Qnrg & 1 ) {
@@ -307,15 +307,15 @@
         sqrt_nrg[ k ] = tmp32;
         Qnrg_vec[ k ] = Qnrg;
 
-        psEncCtrl->Gains_Q16[ k ] = SKP_LSHIFT_SAT32( tmp32, 16 - Qnrg );
+        psEncCtrl->Gains_Q16[ k ] = silk_LSHIFT_SAT32( tmp32, 16 - Qnrg );
 
         if( psEnc->sCmn.warping_Q16 > 0 ) {
             /* Adjust gain for warping */
             gain_mult_Q16 = warped_gain( AR2_Q24, warping_Q16, psEnc->sCmn.shapingLPCOrder );
-            SKP_assert( psEncCtrl->Gains_Q16[ k ] >= 0 );
-            psEncCtrl->Gains_Q16[ k ] = SKP_SMULWW( psEncCtrl->Gains_Q16[ k ], gain_mult_Q16 );
+            silk_assert( psEncCtrl->Gains_Q16[ k ] >= 0 );
+            psEncCtrl->Gains_Q16[ k ] = silk_SMULWW( psEncCtrl->Gains_Q16[ k ], gain_mult_Q16 );
             if( psEncCtrl->Gains_Q16[ k ] < 0 ) {
-                psEncCtrl->Gains_Q16[ k ] = SKP_int32_MAX;
+                psEncCtrl->Gains_Q16[ k ] = silk_int32_MAX;
             }
         }
 
@@ -323,10 +323,10 @@
         silk_bwexpander_32( AR2_Q24, psEnc->sCmn.shapingLPCOrder, BWExp2_Q16 );
 
         /* Compute noise shaping filter coefficients */
-        SKP_memcpy( AR1_Q24, AR2_Q24, psEnc->sCmn.shapingLPCOrder * sizeof( opus_int32 ) );
+        silk_memcpy( AR1_Q24, AR2_Q24, psEnc->sCmn.shapingLPCOrder * sizeof( opus_int32 ) );
 
         /* Bandwidth expansion for analysis filter shaping */
-        SKP_assert( BWExp1_Q16 <= SILK_FIX_CONST( 1.0, 16 ) );
+        silk_assert( BWExp1_Q16 <= SILK_FIX_CONST( 1.0, 16 ) );
         silk_bwexpander_32( AR1_Q24, psEnc->sCmn.shapingLPCOrder, BWExp1_Q16 );
 
         /* Ratio of prediction gains, in energy domain */
@@ -334,7 +334,7 @@
         silk_LPC_inverse_pred_gain_Q24( &nrg,         AR1_Q24, psEnc->sCmn.shapingLPCOrder );
 
         /*psEncCtrl->GainsPre[ k ] = 1.0f - 0.7f * ( 1.0f - pre_nrg / nrg ) = 0.3f + 0.7f * pre_nrg / nrg;*/
-        pre_nrg_Q30 = SKP_LSHIFT32( SKP_SMULWB( pre_nrg_Q30, SILK_FIX_CONST( 0.7, 15 ) ), 1 );
+        pre_nrg_Q30 = silk_LSHIFT32( silk_SMULWB( pre_nrg_Q30, SILK_FIX_CONST( 0.7, 15 ) ), 1 );
         psEncCtrl->GainsPre_Q14[ k ] = ( opus_int ) SILK_FIX_CONST( 0.3, 14 ) + silk_DIV32_varQ( pre_nrg_Q30, nrg, 14 );
 
         /* Convert to monic warped prediction coefficients and limit absolute values */
@@ -342,8 +342,8 @@
 
         /* Convert from Q24 to Q13 and store in int16 */
         for( i = 0; i < psEnc->sCmn.shapingLPCOrder; i++ ) {
-            psEncCtrl->AR1_Q13[ k * MAX_SHAPE_LPC_ORDER + i ] = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( AR1_Q24[ i ], 11 ) );
-            psEncCtrl->AR2_Q13[ k * MAX_SHAPE_LPC_ORDER + i ] = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( AR2_Q24[ i ], 11 ) );
+            psEncCtrl->AR1_Q13[ k * MAX_SHAPE_LPC_ORDER + i ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( AR1_Q24[ i ], 11 ) );
+            psEncCtrl->AR2_Q13[ k * MAX_SHAPE_LPC_ORDER + i ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( AR2_Q24[ i ], 11 ) );
         }
     }
 
@@ -351,47 +351,47 @@
     /* Gain tweaking */
     /*****************/
     /* Increase gains during low speech activity and put lower limit on gains */
-    gain_mult_Q16 = silk_log2lin( -SKP_SMLAWB( -SILK_FIX_CONST( 16.0, 7 ), SNR_adj_dB_Q7, SILK_FIX_CONST( 0.16, 16 ) ) );
-    gain_add_Q16  = silk_log2lin(  SKP_SMLAWB(  SILK_FIX_CONST( 16.0, 7 ), SILK_FIX_CONST( MIN_QGAIN_DB, 7 ), SILK_FIX_CONST( 0.16, 16 ) ) );
-    SKP_assert( gain_mult_Q16 > 0 );
+    gain_mult_Q16 = silk_log2lin( -silk_SMLAWB( -SILK_FIX_CONST( 16.0, 7 ), SNR_adj_dB_Q7, SILK_FIX_CONST( 0.16, 16 ) ) );
+    gain_add_Q16  = silk_log2lin(  silk_SMLAWB(  SILK_FIX_CONST( 16.0, 7 ), SILK_FIX_CONST( MIN_QGAIN_DB, 7 ), SILK_FIX_CONST( 0.16, 16 ) ) );
+    silk_assert( gain_mult_Q16 > 0 );
     for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) {
-        psEncCtrl->Gains_Q16[ k ] = SKP_SMULWW( psEncCtrl->Gains_Q16[ k ], gain_mult_Q16 );
-        SKP_assert( psEncCtrl->Gains_Q16[ k ] >= 0 );
-        psEncCtrl->Gains_Q16[ k ] = SKP_ADD_POS_SAT32( psEncCtrl->Gains_Q16[ k ], gain_add_Q16 );
+        psEncCtrl->Gains_Q16[ k ] = silk_SMULWW( psEncCtrl->Gains_Q16[ k ], gain_mult_Q16 );
+        silk_assert( psEncCtrl->Gains_Q16[ k ] >= 0 );
+        psEncCtrl->Gains_Q16[ k ] = silk_ADD_POS_SAT32( psEncCtrl->Gains_Q16[ k ], gain_add_Q16 );
     }
 
-    gain_mult_Q16 = SILK_FIX_CONST( 1.0, 16 ) + SKP_RSHIFT_ROUND( SKP_MLA( SILK_FIX_CONST( INPUT_TILT, 26 ),
+    gain_mult_Q16 = SILK_FIX_CONST( 1.0, 16 ) + silk_RSHIFT_ROUND( silk_MLA( SILK_FIX_CONST( INPUT_TILT, 26 ),
         psEncCtrl->coding_quality_Q14, SILK_FIX_CONST( HIGH_RATE_INPUT_TILT, 12 ) ), 10 );
     for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) {
-        psEncCtrl->GainsPre_Q14[ k ] = SKP_SMULWB( gain_mult_Q16, psEncCtrl->GainsPre_Q14[ k ] );
+        psEncCtrl->GainsPre_Q14[ k ] = silk_SMULWB( gain_mult_Q16, psEncCtrl->GainsPre_Q14[ k ] );
     }
 
     /************************************************/
     /* Control low-frequency shaping and noise tilt */
     /************************************************/
     /* Less low frequency shaping for noisy inputs */
-    strength_Q16 = SKP_MUL( SILK_FIX_CONST( LOW_FREQ_SHAPING, 4 ), SKP_SMLAWB( SILK_FIX_CONST( 1.0, 12 ),
+    strength_Q16 = silk_MUL( SILK_FIX_CONST( LOW_FREQ_SHAPING, 4 ), silk_SMLAWB( SILK_FIX_CONST( 1.0, 12 ),
         SILK_FIX_CONST( LOW_QUALITY_LOW_FREQ_SHAPING_DECR, 13 ), psEnc->sCmn.input_quality_bands_Q15[ 0 ] - SILK_FIX_CONST( 1.0, 15 ) ) );
-    strength_Q16 = SKP_RSHIFT( SKP_MUL( strength_Q16, psEnc->sCmn.speech_activity_Q8 ), 8 );
+    strength_Q16 = silk_RSHIFT( silk_MUL( strength_Q16, psEnc->sCmn.speech_activity_Q8 ), 8 );
     if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) {
         /* Reduce low frequencies quantization noise for periodic signals, depending on pitch lag */
         /*f = 400; freqz([1, -0.98 + 2e-4 * f], [1, -0.97 + 7e-4 * f], 2^12, Fs); axis([0, 1000, -10, 1])*/
-        opus_int fs_kHz_inv = SKP_DIV32_16( SILK_FIX_CONST( 0.2, 14 ), psEnc->sCmn.fs_kHz );
+        opus_int fs_kHz_inv = silk_DIV32_16( SILK_FIX_CONST( 0.2, 14 ), psEnc->sCmn.fs_kHz );
         for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) {
-            b_Q14 = fs_kHz_inv + SKP_DIV32_16( SILK_FIX_CONST( 3.0, 14 ), psEncCtrl->pitchL[ k ] );
+            b_Q14 = fs_kHz_inv + silk_DIV32_16( SILK_FIX_CONST( 3.0, 14 ), psEncCtrl->pitchL[ k ] );
             /* Pack two coefficients in one int32 */
-            psEncCtrl->LF_shp_Q14[ k ]  = SKP_LSHIFT( SILK_FIX_CONST( 1.0, 14 ) - b_Q14 - SKP_SMULWB( strength_Q16, b_Q14 ), 16 );
+            psEncCtrl->LF_shp_Q14[ k ]  = silk_LSHIFT( SILK_FIX_CONST( 1.0, 14 ) - b_Q14 - silk_SMULWB( strength_Q16, b_Q14 ), 16 );
             psEncCtrl->LF_shp_Q14[ k ] |= (opus_uint16)( b_Q14 - SILK_FIX_CONST( 1.0, 14 ) );
         }
-        SKP_assert( SILK_FIX_CONST( HARM_HP_NOISE_COEF, 24 ) < SILK_FIX_CONST( 0.5, 24 ) ); /* Guarantees that second argument to SMULWB() is within range of an opus_int16*/
+        silk_assert( SILK_FIX_CONST( HARM_HP_NOISE_COEF, 24 ) < SILK_FIX_CONST( 0.5, 24 ) ); /* Guarantees that second argument to SMULWB() is within range of an opus_int16*/
         Tilt_Q16 = - SILK_FIX_CONST( HP_NOISE_COEF, 16 ) -
-            SKP_SMULWB( SILK_FIX_CONST( 1.0, 16 ) - SILK_FIX_CONST( HP_NOISE_COEF, 16 ),
-                SKP_SMULWB( SILK_FIX_CONST( HARM_HP_NOISE_COEF, 24 ), psEnc->sCmn.speech_activity_Q8 ) );
+            silk_SMULWB( SILK_FIX_CONST( 1.0, 16 ) - SILK_FIX_CONST( HP_NOISE_COEF, 16 ),
+                silk_SMULWB( SILK_FIX_CONST( HARM_HP_NOISE_COEF, 24 ), psEnc->sCmn.speech_activity_Q8 ) );
     } else {
-        b_Q14 = SKP_DIV32_16( 21299, psEnc->sCmn.fs_kHz ); /* 1.3_Q0 = 21299_Q14*/
+        b_Q14 = silk_DIV32_16( 21299, psEnc->sCmn.fs_kHz ); /* 1.3_Q0 = 21299_Q14*/
         /* Pack two coefficients in one int32 */
-        psEncCtrl->LF_shp_Q14[ 0 ]  = SKP_LSHIFT( SILK_FIX_CONST( 1.0, 14 ) - b_Q14 -
-            SKP_SMULWB( strength_Q16, SKP_SMULWB( SILK_FIX_CONST( 0.6, 16 ), b_Q14 ) ), 16 );
+        psEncCtrl->LF_shp_Q14[ 0 ]  = silk_LSHIFT( SILK_FIX_CONST( 1.0, 14 ) - b_Q14 -
+            silk_SMULWB( strength_Q16, silk_SMULWB( SILK_FIX_CONST( 0.6, 16 ), b_Q14 ) ), 16 );
         psEncCtrl->LF_shp_Q14[ 0 ] |= (opus_uint16)( b_Q14 - SILK_FIX_CONST( 1.0, 14 ) );
         for( k = 1; k < psEnc->sCmn.nb_subfr; k++ ) {
             psEncCtrl->LF_shp_Q14[ k ] = psEncCtrl->LF_shp_Q14[ 0 ];
@@ -403,22 +403,22 @@
     /* HARMONIC SHAPING CONTROL */
     /****************************/
     /* Control boosting of harmonic frequencies */
-    HarmBoost_Q16 = SKP_SMULWB( SKP_SMULWB( SILK_FIX_CONST( 1.0, 17 ) - SKP_LSHIFT( psEncCtrl->coding_quality_Q14, 3 ),
+    HarmBoost_Q16 = silk_SMULWB( silk_SMULWB( SILK_FIX_CONST( 1.0, 17 ) - silk_LSHIFT( psEncCtrl->coding_quality_Q14, 3 ),
         psEnc->LTPCorr_Q15 ), SILK_FIX_CONST( LOW_RATE_HARMONIC_BOOST, 16 ) );
 
     /* More harmonic boost for noisy input signals */
-    HarmBoost_Q16 = SKP_SMLAWB( HarmBoost_Q16,
-        SILK_FIX_CONST( 1.0, 16 ) - SKP_LSHIFT( psEncCtrl->input_quality_Q14, 2 ), SILK_FIX_CONST( LOW_INPUT_QUALITY_HARMONIC_BOOST, 16 ) );
+    HarmBoost_Q16 = silk_SMLAWB( HarmBoost_Q16,
+        SILK_FIX_CONST( 1.0, 16 ) - silk_LSHIFT( psEncCtrl->input_quality_Q14, 2 ), SILK_FIX_CONST( LOW_INPUT_QUALITY_HARMONIC_BOOST, 16 ) );
 
     if( USE_HARM_SHAPING && psEnc->sCmn.indices.signalType == TYPE_VOICED ) {
         /* More harmonic noise shaping for high bitrates or noisy input */
-        HarmShapeGain_Q16 = SKP_SMLAWB( SILK_FIX_CONST( HARMONIC_SHAPING, 16 ),
-                SILK_FIX_CONST( 1.0, 16 ) - SKP_SMULWB( SILK_FIX_CONST( 1.0, 18 ) - SKP_LSHIFT( psEncCtrl->coding_quality_Q14, 4 ),
+        HarmShapeGain_Q16 = silk_SMLAWB( SILK_FIX_CONST( HARMONIC_SHAPING, 16 ),
+                SILK_FIX_CONST( 1.0, 16 ) - silk_SMULWB( SILK_FIX_CONST( 1.0, 18 ) - silk_LSHIFT( psEncCtrl->coding_quality_Q14, 4 ),
                 psEncCtrl->input_quality_Q14 ), SILK_FIX_CONST( HIGH_RATE_OR_LOW_QUALITY_HARMONIC_SHAPING, 16 ) );
 
         /* Less harmonic noise shaping for less periodic signals */
-        HarmShapeGain_Q16 = SKP_SMULWB( SKP_LSHIFT( HarmShapeGain_Q16, 1 ),
-            silk_SQRT_APPROX( SKP_LSHIFT( psEnc->LTPCorr_Q15, 15 ) ) );
+        HarmShapeGain_Q16 = silk_SMULWB( silk_LSHIFT( HarmShapeGain_Q16, 1 ),
+            silk_SQRT_APPROX( silk_LSHIFT( psEnc->LTPCorr_Q15, 15 ) ) );
     } else {
         HarmShapeGain_Q16 = 0;
     }
@@ -428,14 +428,14 @@
     /*************************/
     for( k = 0; k < MAX_NB_SUBFR; k++ ) {
         psShapeSt->HarmBoost_smth_Q16 =
-            SKP_SMLAWB( psShapeSt->HarmBoost_smth_Q16,     HarmBoost_Q16     - psShapeSt->HarmBoost_smth_Q16,     SILK_FIX_CONST( SUBFR_SMTH_COEF, 16 ) );
+            silk_SMLAWB( psShapeSt->HarmBoost_smth_Q16,     HarmBoost_Q16     - psShapeSt->HarmBoost_smth_Q16,     SILK_FIX_CONST( SUBFR_SMTH_COEF, 16 ) );
         psShapeSt->HarmShapeGain_smth_Q16 =
-            SKP_SMLAWB( psShapeSt->HarmShapeGain_smth_Q16, HarmShapeGain_Q16 - psShapeSt->HarmShapeGain_smth_Q16, SILK_FIX_CONST( SUBFR_SMTH_COEF, 16 ) );
+            silk_SMLAWB( psShapeSt->HarmShapeGain_smth_Q16, HarmShapeGain_Q16 - psShapeSt->HarmShapeGain_smth_Q16, SILK_FIX_CONST( SUBFR_SMTH_COEF, 16 ) );
         psShapeSt->Tilt_smth_Q16 =
-            SKP_SMLAWB( psShapeSt->Tilt_smth_Q16,          Tilt_Q16          - psShapeSt->Tilt_smth_Q16,          SILK_FIX_CONST( SUBFR_SMTH_COEF, 16 ) );
+            silk_SMLAWB( psShapeSt->Tilt_smth_Q16,          Tilt_Q16          - psShapeSt->Tilt_smth_Q16,          SILK_FIX_CONST( SUBFR_SMTH_COEF, 16 ) );
 
-        psEncCtrl->HarmBoost_Q14[ k ]     = ( opus_int )SKP_RSHIFT_ROUND( psShapeSt->HarmBoost_smth_Q16,     2 );
-        psEncCtrl->HarmShapeGain_Q14[ k ] = ( opus_int )SKP_RSHIFT_ROUND( psShapeSt->HarmShapeGain_smth_Q16, 2 );
-        psEncCtrl->Tilt_Q14[ k ]          = ( opus_int )SKP_RSHIFT_ROUND( psShapeSt->Tilt_smth_Q16,          2 );
+        psEncCtrl->HarmBoost_Q14[ k ]     = ( opus_int )silk_RSHIFT_ROUND( psShapeSt->HarmBoost_smth_Q16,     2 );
+        psEncCtrl->HarmShapeGain_Q14[ k ] = ( opus_int )silk_RSHIFT_ROUND( psShapeSt->HarmShapeGain_smth_Q16, 2 );
+        psEncCtrl->Tilt_Q14[ k ]          = ( opus_int )silk_RSHIFT_ROUND( psShapeSt->Tilt_smth_Q16,          2 );
     }
 }
diff --git a/silk/fixed/silk_prefilter_FIX.c b/silk/fixed/silk_prefilter_FIX.c
index d9b16a3..f4c1bb2 100644
--- a/silk/fixed/silk_prefilter_FIX.c
+++ b/silk/fixed/silk_prefilter_FIX.c
@@ -58,30 +58,30 @@
     opus_int32   acc_Q11, tmp1, tmp2;
 
     /* Order must be even */
-    SKP_assert( ( order & 1 ) == 0 );
+    silk_assert( ( order & 1 ) == 0 );
 
     for( n = 0; n < length; n++ ) {
         /* Output of lowpass section */
-        tmp2 = SKP_SMLAWB( state[ 0 ], state[ 1 ], lambda_Q16 );
-        state[ 0 ] = SKP_LSHIFT( input[ n ], 14 );
+        tmp2 = silk_SMLAWB( state[ 0 ], state[ 1 ], lambda_Q16 );
+        state[ 0 ] = silk_LSHIFT( input[ n ], 14 );
         /* Output of allpass section */
-        tmp1 = SKP_SMLAWB( state[ 1 ], state[ 2 ] - tmp2, lambda_Q16 );
+        tmp1 = silk_SMLAWB( state[ 1 ], state[ 2 ] - tmp2, lambda_Q16 );
         state[ 1 ] = tmp2;
-        acc_Q11 = SKP_SMULWB( tmp2, coef_Q13[ 0 ] );
+        acc_Q11 = silk_SMULWB( tmp2, coef_Q13[ 0 ] );
         /* Loop over allpass sections */
         for( i = 2; i < order; i += 2 ) {
             /* Output of allpass section */
-            tmp2 = SKP_SMLAWB( state[ i ], state[ i + 1 ] - tmp1, lambda_Q16 );
+            tmp2 = silk_SMLAWB( state[ i ], state[ i + 1 ] - tmp1, lambda_Q16 );
             state[ i ] = tmp1;
-            acc_Q11 = SKP_SMLAWB( acc_Q11, tmp1, coef_Q13[ i - 1 ] );
+            acc_Q11 = silk_SMLAWB( acc_Q11, tmp1, coef_Q13[ i - 1 ] );
             /* Output of allpass section */
-            tmp1 = SKP_SMLAWB( state[ i + 1 ], state[ i + 2 ] - tmp2, lambda_Q16 );
+            tmp1 = silk_SMLAWB( state[ i + 1 ], state[ i + 2 ] - tmp2, lambda_Q16 );
             state[ i + 1 ] = tmp2;
-            acc_Q11 = SKP_SMLAWB( acc_Q11, tmp2, coef_Q13[ i ] );
+            acc_Q11 = silk_SMLAWB( acc_Q11, tmp2, coef_Q13[ i ] );
         }
         state[ order ] = tmp1;
-        acc_Q11 = SKP_SMLAWB( acc_Q11, tmp1, coef_Q13[ order - 1 ] );
-        res[ n ] = ( opus_int16 )SKP_SAT16( ( opus_int32 )input[ n ] - SKP_RSHIFT_ROUND( acc_Q11, 11 ) );
+        acc_Q11 = silk_SMLAWB( acc_Q11, tmp1, coef_Q13[ order - 1 ] );
+        res[ n ] = ( opus_int16 )silk_SAT16( ( opus_int32 )input[ n ] - silk_RSHIFT_ROUND( acc_Q11, 11 ) );
     }
 }
 
@@ -115,10 +115,10 @@
         }
 
         /* Noise shape parameters */
-        HarmShapeGain_Q12 = SKP_SMULWB( psEncCtrl->HarmShapeGain_Q14[ k ], 16384 - psEncCtrl->HarmBoost_Q14[ k ] );
-        SKP_assert( HarmShapeGain_Q12 >= 0 );
-        HarmShapeFIRPacked_Q12  =                          SKP_RSHIFT( HarmShapeGain_Q12, 2 );
-        HarmShapeFIRPacked_Q12 |= SKP_LSHIFT( ( opus_int32 )SKP_RSHIFT( HarmShapeGain_Q12, 1 ), 16 );
+        HarmShapeGain_Q12 = silk_SMULWB( psEncCtrl->HarmShapeGain_Q14[ k ], 16384 - psEncCtrl->HarmBoost_Q14[ k ] );
+        silk_assert( HarmShapeGain_Q12 >= 0 );
+        HarmShapeFIRPacked_Q12  =                          silk_RSHIFT( HarmShapeGain_Q12, 2 );
+        HarmShapeFIRPacked_Q12 |= silk_LSHIFT( ( opus_int32 )silk_RSHIFT( HarmShapeGain_Q12, 1 ), 16 );
         Tilt_Q14    = psEncCtrl->Tilt_Q14[   k ];
         LF_shp_Q14  = psEncCtrl->LF_shp_Q14[ k ];
         AR1_shp_Q13 = &psEncCtrl->AR1_Q13[   k * MAX_SHAPE_LPC_ORDER ];
@@ -128,16 +128,16 @@
             psEnc->sCmn.warping_Q16, psEnc->sCmn.subfr_length, psEnc->sCmn.shapingLPCOrder );
 
         /* reduce (mainly) low frequencies during harmonic emphasis */
-        B_Q12[ 0 ] = SKP_RSHIFT_ROUND( psEncCtrl->GainsPre_Q14[ k ], 2 );
-        tmp_32 = SKP_SMLABB( SILK_FIX_CONST( INPUT_TILT, 26 ), psEncCtrl->HarmBoost_Q14[ k ], HarmShapeGain_Q12 );   /* Q26 */
-        tmp_32 = SKP_SMLABB( tmp_32, psEncCtrl->coding_quality_Q14, SILK_FIX_CONST( HIGH_RATE_INPUT_TILT, 12 ) );    /* Q26 */
-        tmp_32 = SKP_SMULWB( tmp_32, -psEncCtrl->GainsPre_Q14[ k ] );                                               /* Q24 */
-        tmp_32 = SKP_RSHIFT_ROUND( tmp_32, 12 );                                                                    /* Q12 */
-        B_Q12[ 1 ]= SKP_SAT16( tmp_32 );
+        B_Q12[ 0 ] = silk_RSHIFT_ROUND( psEncCtrl->GainsPre_Q14[ k ], 2 );
+        tmp_32 = silk_SMLABB( SILK_FIX_CONST( INPUT_TILT, 26 ), psEncCtrl->HarmBoost_Q14[ k ], HarmShapeGain_Q12 );   /* Q26 */
+        tmp_32 = silk_SMLABB( tmp_32, psEncCtrl->coding_quality_Q14, SILK_FIX_CONST( HIGH_RATE_INPUT_TILT, 12 ) );    /* Q26 */
+        tmp_32 = silk_SMULWB( tmp_32, -psEncCtrl->GainsPre_Q14[ k ] );                                               /* Q24 */
+        tmp_32 = silk_RSHIFT_ROUND( tmp_32, 12 );                                                                    /* Q12 */
+        B_Q12[ 1 ]= silk_SAT16( tmp_32 );
 
-        x_filt_Q12[ 0 ] = SKP_SMLABB( SKP_SMULBB( st_res[ 0 ], B_Q12[ 0 ] ), P->sHarmHP, B_Q12[ 1 ] );
+        x_filt_Q12[ 0 ] = silk_SMLABB( silk_SMULBB( st_res[ 0 ], B_Q12[ 0 ] ), P->sHarmHP, B_Q12[ 1 ] );
         for( j = 1; j < psEnc->sCmn.subfr_length; j++ ) {
-            x_filt_Q12[ j ] = SKP_SMLABB( SKP_SMULBB( st_res[ j ], B_Q12[ 0 ] ), st_res[ j - 1 ], B_Q12[ 1 ] );
+            x_filt_Q12[ j ] = silk_SMLABB( silk_SMULBB( st_res[ j ], B_Q12[ 0 ] ), st_res[ j - 1 ], B_Q12[ 1 ] );
         }
         P->sHarmHP = st_res[ psEnc->sCmn.subfr_length - 1 ];
 
@@ -177,25 +177,25 @@
     for( i = 0; i < length; i++ ) {
         if( lag > 0 ) {
             /* unrolled loop */
-            SKP_assert( HARM_SHAPE_FIR_TAPS == 3 );
+            silk_assert( HARM_SHAPE_FIR_TAPS == 3 );
             idx = lag + LTP_shp_buf_idx;
-            n_LTP_Q12 = SKP_SMULBB(            LTP_shp_buf[ ( idx - HARM_SHAPE_FIR_TAPS / 2 - 1) & LTP_MASK ], HarmShapeFIRPacked_Q12 );
-            n_LTP_Q12 = SKP_SMLABT( n_LTP_Q12, LTP_shp_buf[ ( idx - HARM_SHAPE_FIR_TAPS / 2    ) & LTP_MASK ], HarmShapeFIRPacked_Q12 );
-            n_LTP_Q12 = SKP_SMLABB( n_LTP_Q12, LTP_shp_buf[ ( idx - HARM_SHAPE_FIR_TAPS / 2 + 1) & LTP_MASK ], HarmShapeFIRPacked_Q12 );
+            n_LTP_Q12 = silk_SMULBB(            LTP_shp_buf[ ( idx - HARM_SHAPE_FIR_TAPS / 2 - 1) & LTP_MASK ], HarmShapeFIRPacked_Q12 );
+            n_LTP_Q12 = silk_SMLABT( n_LTP_Q12, LTP_shp_buf[ ( idx - HARM_SHAPE_FIR_TAPS / 2    ) & LTP_MASK ], HarmShapeFIRPacked_Q12 );
+            n_LTP_Q12 = silk_SMLABB( n_LTP_Q12, LTP_shp_buf[ ( idx - HARM_SHAPE_FIR_TAPS / 2 + 1) & LTP_MASK ], HarmShapeFIRPacked_Q12 );
         } else {
             n_LTP_Q12 = 0;
         }
 
-        n_Tilt_Q10 = SKP_SMULWB( sLF_AR_shp_Q12, Tilt_Q14 );
-        n_LF_Q10   = SKP_SMLAWB( SKP_SMULWT( sLF_AR_shp_Q12, LF_shp_Q14 ), sLF_MA_shp_Q12, LF_shp_Q14 );
+        n_Tilt_Q10 = silk_SMULWB( sLF_AR_shp_Q12, Tilt_Q14 );
+        n_LF_Q10   = silk_SMLAWB( silk_SMULWT( sLF_AR_shp_Q12, LF_shp_Q14 ), sLF_MA_shp_Q12, LF_shp_Q14 );
 
-        sLF_AR_shp_Q12 = SKP_SUB32( st_res_Q12[ i ], SKP_LSHIFT( n_Tilt_Q10, 2 ) );
-        sLF_MA_shp_Q12 = SKP_SUB32( sLF_AR_shp_Q12,  SKP_LSHIFT( n_LF_Q10,   2 ) );
+        sLF_AR_shp_Q12 = silk_SUB32( st_res_Q12[ i ], silk_LSHIFT( n_Tilt_Q10, 2 ) );
+        sLF_MA_shp_Q12 = silk_SUB32( sLF_AR_shp_Q12,  silk_LSHIFT( n_LF_Q10,   2 ) );
 
         LTP_shp_buf_idx = ( LTP_shp_buf_idx - 1 ) & LTP_MASK;
-        LTP_shp_buf[ LTP_shp_buf_idx ] = ( opus_int16 )SKP_SAT16( SKP_RSHIFT_ROUND( sLF_MA_shp_Q12, 12 ) );
+        LTP_shp_buf[ LTP_shp_buf_idx ] = ( opus_int16 )silk_SAT16( silk_RSHIFT_ROUND( sLF_MA_shp_Q12, 12 ) );
 
-        xw[i] = ( opus_int16 )SKP_SAT16( SKP_RSHIFT_ROUND( SKP_SUB32( sLF_MA_shp_Q12, n_LTP_Q12 ), 12 ) );
+        xw[i] = ( opus_int16 )silk_SAT16( silk_RSHIFT_ROUND( silk_SUB32( sLF_MA_shp_Q12, n_LTP_Q12 ), 12 ) );
     }
 
     /* Copy temp variable back to state */
diff --git a/silk/fixed/silk_process_gains_FIX.c b/silk/fixed/silk_process_gains_FIX.c
index 049d53f..405faa5 100644
--- a/silk/fixed/silk_process_gains_FIX.c
+++ b/silk/fixed/silk_process_gains_FIX.c
@@ -44,44 +44,44 @@
 
     /* Gain reduction when LTP coding gain is high */
     if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) {
-        /*s = -0.5f * SKP_sigmoid( 0.25f * ( psEncCtrl->LTPredCodGain - 12.0f ) ); */
-        s_Q16 = -silk_sigm_Q15( SKP_RSHIFT_ROUND( psEncCtrl->LTPredCodGain_Q7 - SILK_FIX_CONST( 12.0, 7 ), 4 ) );
+        /*s = -0.5f * silk_sigmoid( 0.25f * ( psEncCtrl->LTPredCodGain - 12.0f ) ); */
+        s_Q16 = -silk_sigm_Q15( silk_RSHIFT_ROUND( psEncCtrl->LTPredCodGain_Q7 - SILK_FIX_CONST( 12.0, 7 ), 4 ) );
         for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) {
-            psEncCtrl->Gains_Q16[ k ] = SKP_SMLAWB( psEncCtrl->Gains_Q16[ k ], psEncCtrl->Gains_Q16[ k ], s_Q16 );
+            psEncCtrl->Gains_Q16[ k ] = silk_SMLAWB( psEncCtrl->Gains_Q16[ k ], psEncCtrl->Gains_Q16[ k ], s_Q16 );
         }
     }
 
     /* Limit the quantized signal */
     /* InvMaxSqrVal = pow( 2.0f, 0.33f * ( 21.0f - SNR_dB ) ) / subfr_length; */
-    InvMaxSqrVal_Q16 = SKP_DIV32_16( silk_log2lin(
-        SKP_SMULWB( SILK_FIX_CONST( 21 + 16 / 0.33, 7 ) - psEnc->sCmn.SNR_dB_Q7, SILK_FIX_CONST( 0.33, 16 ) ) ), psEnc->sCmn.subfr_length );
+    InvMaxSqrVal_Q16 = silk_DIV32_16( silk_log2lin(
+        silk_SMULWB( SILK_FIX_CONST( 21 + 16 / 0.33, 7 ) - psEnc->sCmn.SNR_dB_Q7, SILK_FIX_CONST( 0.33, 16 ) ) ), psEnc->sCmn.subfr_length );
 
     for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) {
         /* Soft limit on ratio residual energy and squared gains */
         ResNrg     = psEncCtrl->ResNrg[ k ];
-        ResNrgPart = SKP_SMULWW( ResNrg, InvMaxSqrVal_Q16 );
+        ResNrgPart = silk_SMULWW( ResNrg, InvMaxSqrVal_Q16 );
         if( psEncCtrl->ResNrgQ[ k ] > 0 ) {
-            ResNrgPart = SKP_RSHIFT_ROUND( ResNrgPart, psEncCtrl->ResNrgQ[ k ] );
+            ResNrgPart = silk_RSHIFT_ROUND( ResNrgPart, psEncCtrl->ResNrgQ[ k ] );
         } else {
-            if( ResNrgPart >= SKP_RSHIFT( SKP_int32_MAX, -psEncCtrl->ResNrgQ[ k ] ) ) {
-                ResNrgPart = SKP_int32_MAX;
+            if( ResNrgPart >= silk_RSHIFT( silk_int32_MAX, -psEncCtrl->ResNrgQ[ k ] ) ) {
+                ResNrgPart = silk_int32_MAX;
             } else {
-                ResNrgPart = SKP_LSHIFT( ResNrgPart, -psEncCtrl->ResNrgQ[ k ] );
+                ResNrgPart = silk_LSHIFT( ResNrgPart, -psEncCtrl->ResNrgQ[ k ] );
             }
         }
         gain = psEncCtrl->Gains_Q16[ k ];
-        gain_squared = SKP_ADD_SAT32( ResNrgPart, SKP_SMMUL( gain, gain ) );
-        if( gain_squared < SKP_int16_MAX ) {
+        gain_squared = silk_ADD_SAT32( ResNrgPart, silk_SMMUL( gain, gain ) );
+        if( gain_squared < silk_int16_MAX ) {
             /* recalculate with higher precision */
-            gain_squared = SKP_SMLAWW( SKP_LSHIFT( ResNrgPart, 16 ), gain, gain );
-            SKP_assert( gain_squared > 0 );
+            gain_squared = silk_SMLAWW( silk_LSHIFT( ResNrgPart, 16 ), gain, gain );
+            silk_assert( gain_squared > 0 );
             gain = silk_SQRT_APPROX( gain_squared );                    /* Q8   */
-            gain = SKP_min( gain, SKP_int32_MAX >> 8 );
-            psEncCtrl->Gains_Q16[ k ] = SKP_LSHIFT_SAT32( gain, 8 );        /* Q16  */
+            gain = silk_min( gain, silk_int32_MAX >> 8 );
+            psEncCtrl->Gains_Q16[ k ] = silk_LSHIFT_SAT32( gain, 8 );        /* Q16  */
         } else {
             gain = silk_SQRT_APPROX( gain_squared );                    /* Q0   */
-            gain = SKP_min( gain, SKP_int32_MAX >> 16 );
-            psEncCtrl->Gains_Q16[ k ] = SKP_LSHIFT_SAT32( gain, 16 );       /* Q16  */
+            gain = silk_min( gain, silk_int32_MAX >> 16 );
+            psEncCtrl->Gains_Q16[ k ] = silk_LSHIFT_SAT32( gain, 16 );       /* Q16  */
         }
     }
 
@@ -91,7 +91,7 @@
 
     /* Set quantizer offset for voiced signals. Larger offset when LTP coding gain is low or tilt is high (ie low-pass) */
     if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) {
-        if( psEncCtrl->LTPredCodGain_Q7 + SKP_RSHIFT( psEnc->sCmn.input_tilt_Q15, 8 ) > SILK_FIX_CONST( 1.0, 7 ) ) {
+        if( psEncCtrl->LTPredCodGain_Q7 + silk_RSHIFT( psEnc->sCmn.input_tilt_Q15, 8 ) > SILK_FIX_CONST( 1.0, 7 ) ) {
             psEnc->sCmn.indices.quantOffsetType = 0;
         } else {
             psEnc->sCmn.indices.quantOffsetType = 1;
@@ -101,12 +101,12 @@
     /* Quantizer boundary adjustment */
     quant_offset_Q10 = silk_Quantization_Offsets_Q10[ psEnc->sCmn.indices.signalType >> 1 ][ psEnc->sCmn.indices.quantOffsetType ];
     psEncCtrl->Lambda_Q10 = SILK_FIX_CONST( LAMBDA_OFFSET, 10 )
-                          + SKP_SMULBB( SILK_FIX_CONST( LAMBDA_DELAYED_DECISIONS, 10 ), psEnc->sCmn.nStatesDelayedDecision )
-                          + SKP_SMULWB( SILK_FIX_CONST( LAMBDA_SPEECH_ACT,        18 ), psEnc->sCmn.speech_activity_Q8     )
-                          + SKP_SMULWB( SILK_FIX_CONST( LAMBDA_INPUT_QUALITY,     12 ), psEncCtrl->input_quality_Q14       )
-                          + SKP_SMULWB( SILK_FIX_CONST( LAMBDA_CODING_QUALITY,    12 ), psEncCtrl->coding_quality_Q14      )
-                          + SKP_SMULWB( SILK_FIX_CONST( LAMBDA_QUANT_OFFSET,      16 ), quant_offset_Q10                   );
+                          + silk_SMULBB( SILK_FIX_CONST( LAMBDA_DELAYED_DECISIONS, 10 ), psEnc->sCmn.nStatesDelayedDecision )
+                          + silk_SMULWB( SILK_FIX_CONST( LAMBDA_SPEECH_ACT,        18 ), psEnc->sCmn.speech_activity_Q8     )
+                          + silk_SMULWB( SILK_FIX_CONST( LAMBDA_INPUT_QUALITY,     12 ), psEncCtrl->input_quality_Q14       )
+                          + silk_SMULWB( SILK_FIX_CONST( LAMBDA_CODING_QUALITY,    12 ), psEncCtrl->coding_quality_Q14      )
+                          + silk_SMULWB( SILK_FIX_CONST( LAMBDA_QUANT_OFFSET,      16 ), quant_offset_Q10                   );
 
-    SKP_assert( psEncCtrl->Lambda_Q10 > 0 );
-    SKP_assert( psEncCtrl->Lambda_Q10 < SILK_FIX_CONST( 2, 10 ) );
+    silk_assert( psEncCtrl->Lambda_Q10 > 0 );
+    silk_assert( psEncCtrl->Lambda_Q10 < SILK_FIX_CONST( 2, 10 ) );
 }
diff --git a/silk/fixed/silk_regularize_correlations_FIX.c b/silk/fixed/silk_regularize_correlations_FIX.c
index 040936b..926c812 100644
--- a/silk/fixed/silk_regularize_correlations_FIX.c
+++ b/silk/fixed/silk_regularize_correlations_FIX.c
@@ -41,7 +41,7 @@
 {
     opus_int i;
     for( i = 0; i < D; i++ ) {
-        matrix_ptr( &XX[ 0 ], i, i, D ) = SKP_ADD32( matrix_ptr( &XX[ 0 ], i, i, D ), noise );
+        matrix_ptr( &XX[ 0 ], i, i, D ) = silk_ADD32( matrix_ptr( &XX[ 0 ], i, i, D ), noise );
     }
     xx[ 0 ] += noise;
 }
diff --git a/silk/fixed/silk_residual_energy16_FIX.c b/silk/fixed/silk_residual_energy16_FIX.c
index fe3b2f1..08a3b0f 100644
--- a/silk/fixed/silk_residual_energy16_FIX.c
+++ b/silk/fixed/silk_residual_energy16_FIX.c
@@ -47,35 +47,35 @@
     const opus_int32 *pRow;
 
     /* Safety checks */
-    SKP_assert( D >=  0 );
-    SKP_assert( D <= 16 );
-    SKP_assert( cQ >  0 );
-    SKP_assert( cQ < 16 );
+    silk_assert( D >=  0 );
+    silk_assert( D <= 16 );
+    silk_assert( cQ >  0 );
+    silk_assert( cQ < 16 );
 
     lshifts = 16 - cQ;
     Qxtra = lshifts;
 
     c_max = 0;
     for( i = 0; i < D; i++ ) {
-        c_max = SKP_max_32( c_max, SKP_abs( ( opus_int32 )c[ i ] ) );
+        c_max = silk_max_32( c_max, silk_abs( ( opus_int32 )c[ i ] ) );
     }
-    Qxtra = SKP_min_int( Qxtra, silk_CLZ32( c_max ) - 17 );
+    Qxtra = silk_min_int( Qxtra, silk_CLZ32( c_max ) - 17 );
 
-    w_max = SKP_max_32( wXX[ 0 ], wXX[ D * D - 1 ] );
-    Qxtra = SKP_min_int( Qxtra, silk_CLZ32( SKP_MUL( D, SKP_RSHIFT( SKP_SMULWB( w_max, c_max ), 4 ) ) ) - 5 );
-    Qxtra = SKP_max_int( Qxtra, 0 );
+    w_max = silk_max_32( wXX[ 0 ], wXX[ D * D - 1 ] );
+    Qxtra = silk_min_int( Qxtra, silk_CLZ32( silk_MUL( D, silk_RSHIFT( silk_SMULWB( w_max, c_max ), 4 ) ) ) - 5 );
+    Qxtra = silk_max_int( Qxtra, 0 );
     for( i = 0; i < D; i++ ) {
-        cn[ i ] = SKP_LSHIFT( ( opus_int )c[ i ], Qxtra );
-        SKP_assert( SKP_abs(cn[i]) <= ( SKP_int16_MAX + 1 ) ); /* Check that SKP_SMLAWB can be used */
+        cn[ i ] = silk_LSHIFT( ( opus_int )c[ i ], Qxtra );
+        silk_assert( silk_abs(cn[i]) <= ( silk_int16_MAX + 1 ) ); /* Check that silk_SMLAWB can be used */
     }
     lshifts -= Qxtra;
 
     /* Compute wxx - 2 * wXx * c */
     tmp = 0;
     for( i = 0; i < D; i++ ) {
-        tmp = SKP_SMLAWB( tmp, wXx[ i ], cn[ i ] );
+        tmp = silk_SMLAWB( tmp, wXx[ i ], cn[ i ] );
     }
-    nrg = SKP_RSHIFT( wxx, 1 + lshifts ) - tmp;                         /* Q: -lshifts - 1 */
+    nrg = silk_RSHIFT( wxx, 1 + lshifts ) - tmp;                         /* Q: -lshifts - 1 */
 
     /* Add c' * wXX * c, assuming wXX is symmetric */
     tmp2 = 0;
@@ -83,20 +83,20 @@
         tmp = 0;
         pRow = &wXX[ i * D ];
         for( j = i + 1; j < D; j++ ) {
-            tmp = SKP_SMLAWB( tmp, pRow[ j ], cn[ j ] );
+            tmp = silk_SMLAWB( tmp, pRow[ j ], cn[ j ] );
         }
-        tmp  = SKP_SMLAWB( tmp,  SKP_RSHIFT( pRow[ i ], 1 ), cn[ i ] );
-        tmp2 = SKP_SMLAWB( tmp2, tmp,                        cn[ i ] );
+        tmp  = silk_SMLAWB( tmp,  silk_RSHIFT( pRow[ i ], 1 ), cn[ i ] );
+        tmp2 = silk_SMLAWB( tmp2, tmp,                        cn[ i ] );
     }
-    nrg = SKP_ADD_LSHIFT32( nrg, tmp2, lshifts );                       /* Q: -lshifts - 1 */
+    nrg = silk_ADD_LSHIFT32( nrg, tmp2, lshifts );                       /* Q: -lshifts - 1 */
 
     /* Keep one bit free always, because we add them for LSF interpolation */
     if( nrg < 1 ) {
         nrg = 1;
-    } else if( nrg > SKP_RSHIFT( SKP_int32_MAX, lshifts + 2 ) ) {
-        nrg = SKP_int32_MAX >> 1;
+    } else if( nrg > silk_RSHIFT( silk_int32_MAX, lshifts + 2 ) ) {
+        nrg = silk_int32_MAX >> 1;
     } else {
-        nrg = SKP_LSHIFT( nrg, lshifts + 1 );                           /* Q0 */
+        nrg = silk_LSHIFT( nrg, lshifts + 1 );                           /* Q0 */
     }
     return nrg;
 
diff --git a/silk/fixed/silk_residual_energy_FIX.c b/silk/fixed/silk_residual_energy_FIX.c
index a1e5ebd..eed19ed 100644
--- a/silk/fixed/silk_residual_energy_FIX.c
+++ b/silk/fixed/silk_residual_energy_FIX.c
@@ -79,13 +79,13 @@
         lz1 = silk_CLZ32( nrgs[  i ] ) - 1;
         lz2 = silk_CLZ32( gains[ i ] ) - 1;
 
-        tmp32 = SKP_LSHIFT32( gains[ i ], lz2 );
+        tmp32 = silk_LSHIFT32( gains[ i ], lz2 );
 
         /* Find squared gains */
-        tmp32 = SKP_SMMUL( tmp32, tmp32 ); /* Q( 2 * lz2 - 32 )*/
+        tmp32 = silk_SMMUL( tmp32, tmp32 ); /* Q( 2 * lz2 - 32 )*/
 
         /* Scale energies */
-        nrgs[ i ] = SKP_SMMUL( tmp32, SKP_LSHIFT32( nrgs[ i ], lz1 ) ); /* Q( nrgsQ[ i ] + lz1 + 2 * lz2 - 32 - 32 )*/
+        nrgs[ i ] = silk_SMMUL( tmp32, silk_LSHIFT32( nrgs[ i ], lz1 ) ); /* Q( nrgsQ[ i ] + lz1 + 2 * lz2 - 32 - 32 )*/
         nrgsQ[ i ] += lz1 + 2 * lz2 - 32 - 32;
     }
 }
diff --git a/silk/fixed/silk_solve_LS_FIX.c b/silk/fixed/silk_solve_LS_FIX.c
index 3742370..40abd14 100644
--- a/silk/fixed/silk_solve_LS_FIX.c
+++ b/silk/fixed/silk_solve_LS_FIX.c
@@ -83,7 +83,7 @@
     opus_int32 Y[      MAX_MATRIX_SIZE ];
     inv_D_t   inv_D[  MAX_MATRIX_SIZE ];
 
-    SKP_assert( M <= MAX_MATRIX_SIZE );
+    silk_assert( M <= MAX_MATRIX_SIZE );
 
     /***************************************************
     Factorize A by LDL such that A = L*D*L',
@@ -122,26 +122,26 @@
     opus_int32 v_Q0[ MAX_MATRIX_SIZE ], D_Q0[ MAX_MATRIX_SIZE ];
     opus_int32 one_div_diag_Q36, one_div_diag_Q40, one_div_diag_Q48;
 
-    SKP_assert( M <= MAX_MATRIX_SIZE );
+    silk_assert( M <= MAX_MATRIX_SIZE );
 
     status = 1;
-    diag_min_value = SKP_max_32( SKP_SMMUL( SKP_ADD_SAT32( A[ 0 ], A[ SKP_SMULBB( M, M ) - 1 ] ), SILK_FIX_CONST( FIND_LTP_COND_FAC, 31 ) ), 1 << 9 );
+    diag_min_value = silk_max_32( silk_SMMUL( silk_ADD_SAT32( A[ 0 ], A[ silk_SMULBB( M, M ) - 1 ] ), SILK_FIX_CONST( FIND_LTP_COND_FAC, 31 ) ), 1 << 9 );
     for( loop_count = 0; loop_count < M && status == 1; loop_count++ ) {
         status = 0;
         for( j = 0; j < M; j++ ) {
             ptr1 = matrix_adr( L_Q16, j, 0, M );
             tmp_32 = 0;
             for( i = 0; i < j; i++ ) {
-                v_Q0[ i ] = SKP_SMULWW(         D_Q0[ i ], ptr1[ i ] ); /* Q0 */
-                tmp_32    = SKP_SMLAWW( tmp_32, v_Q0[ i ], ptr1[ i ] ); /* Q0 */
+                v_Q0[ i ] = silk_SMULWW(         D_Q0[ i ], ptr1[ i ] ); /* Q0 */
+                tmp_32    = silk_SMLAWW( tmp_32, v_Q0[ i ], ptr1[ i ] ); /* Q0 */
             }
-            tmp_32 = SKP_SUB32( matrix_ptr( A, j, j, M ), tmp_32 );
+            tmp_32 = silk_SUB32( matrix_ptr( A, j, j, M ), tmp_32 );
 
             if( tmp_32 < diag_min_value ) {
-                tmp_32 = SKP_SUB32( SKP_SMULBB( loop_count + 1, diag_min_value ), tmp_32 );
+                tmp_32 = silk_SUB32( silk_SMULBB( loop_count + 1, diag_min_value ), tmp_32 );
                 /* Matrix not positive semi-definite, or ill conditioned */
                 for( i = 0; i < M; i++ ) {
-                    matrix_ptr( A, i, i, M ) = SKP_ADD32( matrix_ptr( A, i, i, M ), tmp_32 );
+                    matrix_ptr( A, i, i, M ) = silk_ADD32( matrix_ptr( A, i, i, M ), tmp_32 );
                 }
                 status = 1;
                 break;
@@ -150,9 +150,9 @@
 
             /* two-step division */
             one_div_diag_Q36 = silk_INVERSE32_varQ( tmp_32, 36 );                    /* Q36 */
-            one_div_diag_Q40 = SKP_LSHIFT( one_div_diag_Q36, 4 );                   /* Q40 */
-            err = SKP_SUB32( 1 << 24, SKP_SMULWW( tmp_32, one_div_diag_Q40 ) );     /* Q24 */
-            one_div_diag_Q48 = SKP_SMULWW( err, one_div_diag_Q40 );                 /* Q48 */
+            one_div_diag_Q40 = silk_LSHIFT( one_div_diag_Q36, 4 );                   /* Q40 */
+            err = silk_SUB32( 1 << 24, silk_SMULWW( tmp_32, one_div_diag_Q40 ) );     /* Q24 */
+            one_div_diag_Q48 = silk_SMULWW( err, one_div_diag_Q40 );                 /* Q48 */
 
             /* Save 1/Ds */
             inv_D[ j ].Q36_part = one_div_diag_Q36;
@@ -164,13 +164,13 @@
             for( i = j + 1; i < M; i++ ) {
                 tmp_32 = 0;
                 for( k = 0; k < j; k++ ) {
-                    tmp_32 = SKP_SMLAWW( tmp_32, v_Q0[ k ], ptr2[ k ] ); /* Q0 */
+                    tmp_32 = silk_SMLAWW( tmp_32, v_Q0[ k ], ptr2[ k ] ); /* Q0 */
                 }
-                tmp_32 = SKP_SUB32( ptr1[ i ], tmp_32 ); /* always < max(Correlation) */
+                tmp_32 = silk_SUB32( ptr1[ i ], tmp_32 ); /* always < max(Correlation) */
 
                 /* tmp_32 / D_Q0[j] : Divide to Q16 */
-                matrix_ptr( L_Q16, i, j, M ) = SKP_ADD32( SKP_SMMUL( tmp_32, one_div_diag_Q48 ),
-                    SKP_RSHIFT( SKP_SMULWW( tmp_32, one_div_diag_Q36 ), 4 ) );
+                matrix_ptr( L_Q16, i, j, M ) = silk_ADD32( silk_SMMUL( tmp_32, one_div_diag_Q48 ),
+                    silk_RSHIFT( silk_SMULWW( tmp_32, one_div_diag_Q36 ), 4 ) );
 
                 /* go to next column */
                 ptr2 += M;
@@ -178,7 +178,7 @@
         }
     }
 
-    SKP_assert( status == 0 );
+    silk_assert( status == 0 );
 }
 
 static inline void silk_LS_divide_Q16_FIX(
@@ -196,7 +196,7 @@
         one_div_diag_Q48 = inv_D[ i ].Q48_part;
 
         tmp_32 = T[ i ];
-        T[ i ] = SKP_ADD32( SKP_SMMUL( tmp_32, one_div_diag_Q48 ), SKP_RSHIFT( SKP_SMULWW( tmp_32, one_div_diag_Q36 ), 4 ) );
+        T[ i ] = silk_ADD32( silk_SMMUL( tmp_32, one_div_diag_Q48 ), silk_RSHIFT( silk_SMULWW( tmp_32, one_div_diag_Q36 ), 4 ) );
     }
 }
 
@@ -216,9 +216,9 @@
         ptr32 = matrix_adr( L_Q16, i, 0, M );
         tmp_32 = 0;
         for( j = 0; j < i; j++ ) {
-            tmp_32 = SKP_SMLAWW( tmp_32, ptr32[ j ], x_Q16[ j ] );
+            tmp_32 = silk_SMLAWW( tmp_32, ptr32[ j ], x_Q16[ j ] );
         }
-        x_Q16[ i ] = SKP_SUB32( b[ i ], tmp_32 );
+        x_Q16[ i ] = silk_SUB32( b[ i ], tmp_32 );
     }
 }
 
@@ -238,8 +238,8 @@
         ptr32 = matrix_adr( L_Q16, 0, i, M );
         tmp_32 = 0;
         for( j = M - 1; j > i; j-- ) {
-            tmp_32 = SKP_SMLAWW( tmp_32, ptr32[ SKP_SMULBB( j, M ) ], x_Q16[ j ] );
+            tmp_32 = silk_SMLAWW( tmp_32, ptr32[ silk_SMULBB( j, M ) ], x_Q16[ j ] );
         }
-        x_Q16[ i ] = SKP_SUB32( b[ i ], tmp_32 );
+        x_Q16[ i ] = silk_SUB32( b[ i ], tmp_32 );
     }
 }
diff --git a/silk/fixed/silk_structs_FIX.h b/silk/fixed/silk_structs_FIX.h
index 6cd74f1..4d5cfc3 100644
--- a/silk/fixed/silk_structs_FIX.h
+++ b/silk/fixed/silk_structs_FIX.h
@@ -70,7 +70,7 @@
     silk_prefilter_state_FIX    sPrefilt;                   /* Prefilter State                                                  */
 
     /* Buffer for find pitch and noise shape analysis */
-    SKP_DWORD_ALIGN opus_int16   x_buf[ 2 * MAX_FRAME_LENGTH + LA_SHAPE_MAX ];/* Buffer for find pitch and noise shape analysis  */
+    silk_DWORD_ALIGN opus_int16   x_buf[ 2 * MAX_FRAME_LENGTH + LA_SHAPE_MAX ];/* Buffer for find pitch and noise shape analysis  */
     opus_int                     LTPCorr_Q15;                /* Normalized correlation from pitch lag estimator                  */
 
     /* Parameters For LTP scaling Control */
@@ -84,15 +84,15 @@
 typedef struct {
     /* Prediction and coding parameters */
     opus_int32                   Gains_Q16[ MAX_NB_SUBFR ];
-    SKP_DWORD_ALIGN opus_int16   PredCoef_Q12[ 2 ][ MAX_LPC_ORDER ];
+    silk_DWORD_ALIGN opus_int16   PredCoef_Q12[ 2 ][ MAX_LPC_ORDER ];
     opus_int16                   LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ];
     opus_int                     LTP_scale_Q14;
     opus_int                     pitchL[ MAX_NB_SUBFR ];
 
     /* Noise shaping parameters */
     /* Testing */
-    SKP_DWORD_ALIGN opus_int16 AR1_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ];
-    SKP_DWORD_ALIGN opus_int16 AR2_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ];
+    silk_DWORD_ALIGN opus_int16 AR1_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ];
+    silk_DWORD_ALIGN opus_int16 AR2_Q13[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ];
     opus_int32   LF_shp_Q14[        MAX_NB_SUBFR ];          /* Packs two int16 coefficients per int32 value             */
     opus_int     GainsPre_Q14[      MAX_NB_SUBFR ];
     opus_int     HarmBoost_Q14[     MAX_NB_SUBFR ];
diff --git a/silk/fixed/silk_warped_autocorrelation_FIX.c b/silk/fixed/silk_warped_autocorrelation_FIX.c
index 2c80cfe..0eadda4 100644
--- a/silk/fixed/silk_warped_autocorrelation_FIX.c
+++ b/silk/fixed/silk_warped_autocorrelation_FIX.c
@@ -50,39 +50,39 @@
     opus_int64 corr_QC[  MAX_SHAPE_LPC_ORDER + 1 ] = { 0 };
 
     /* Order must be even */
-    SKP_assert( ( order & 1 ) == 0 );
-    SKP_assert( 2 * QS - QC >= 0 );
+    silk_assert( ( order & 1 ) == 0 );
+    silk_assert( 2 * QS - QC >= 0 );
 
     /* Loop over samples */
     for( n = 0; n < length; n++ ) {
-        tmp1_QS = SKP_LSHIFT32( ( opus_int32 )input[ n ], QS );
+        tmp1_QS = silk_LSHIFT32( ( opus_int32 )input[ n ], QS );
         /* Loop over allpass sections */
         for( i = 0; i < order; i += 2 ) {
             /* Output of allpass section */
-            tmp2_QS = SKP_SMLAWB( state_QS[ i ], state_QS[ i + 1 ] - tmp1_QS, warping_Q16 );
+            tmp2_QS = silk_SMLAWB( state_QS[ i ], state_QS[ i + 1 ] - tmp1_QS, warping_Q16 );
             state_QS[ i ]  = tmp1_QS;
-            corr_QC[  i ] += SKP_RSHIFT64( SKP_SMULL( tmp1_QS, state_QS[ 0 ] ), 2 * QS - QC );
+            corr_QC[  i ] += silk_RSHIFT64( silk_SMULL( tmp1_QS, state_QS[ 0 ] ), 2 * QS - QC );
             /* Output of allpass section */
-            tmp1_QS = SKP_SMLAWB( state_QS[ i + 1 ], state_QS[ i + 2 ] - tmp2_QS, warping_Q16 );
+            tmp1_QS = silk_SMLAWB( state_QS[ i + 1 ], state_QS[ i + 2 ] - tmp2_QS, warping_Q16 );
             state_QS[ i + 1 ]  = tmp2_QS;
-            corr_QC[  i + 1 ] += SKP_RSHIFT64( SKP_SMULL( tmp2_QS, state_QS[ 0 ] ), 2 * QS - QC );
+            corr_QC[  i + 1 ] += silk_RSHIFT64( silk_SMULL( tmp2_QS, state_QS[ 0 ] ), 2 * QS - QC );
         }
         state_QS[ order ] = tmp1_QS;
-        corr_QC[  order ] += SKP_RSHIFT64( SKP_SMULL( tmp1_QS, state_QS[ 0 ] ), 2 * QS - QC );
+        corr_QC[  order ] += silk_RSHIFT64( silk_SMULL( tmp1_QS, state_QS[ 0 ] ), 2 * QS - QC );
     }
 
     lsh = silk_CLZ64( corr_QC[ 0 ] ) - 35;
-    lsh = SKP_LIMIT( lsh, -12 - QC, 30 - QC );
+    lsh = silk_LIMIT( lsh, -12 - QC, 30 - QC );
     *scale = -( QC + lsh );
-    SKP_assert( *scale >= -30 && *scale <= 12 );
+    silk_assert( *scale >= -30 && *scale <= 12 );
     if( lsh >= 0 ) {
         for( i = 0; i < order + 1; i++ ) {
-            corr[ i ] = ( opus_int32 )SKP_CHECK_FIT32( SKP_LSHIFT64( corr_QC[ i ], lsh ) );
+            corr[ i ] = ( opus_int32 )silk_CHECK_FIT32( silk_LSHIFT64( corr_QC[ i ], lsh ) );
         }
     } else {
         for( i = 0; i < order + 1; i++ ) {
-            corr[ i ] = ( opus_int32 )SKP_CHECK_FIT32( SKP_RSHIFT64( corr_QC[ i ], -lsh ) );
+            corr[ i ] = ( opus_int32 )silk_CHECK_FIT32( silk_RSHIFT64( corr_QC[ i ], -lsh ) );
         }
     }
-    SKP_assert( corr_QC[ 0 ] >= 0 ); /* If breaking, decrease QC*/
+    silk_assert( corr_QC[ 0 ] >= 0 ); /* If breaking, decrease QC*/
 }
diff --git a/silk/float/silk_LPC_analysis_filter_FLP.c b/silk/float/silk_LPC_analysis_filter_FLP.c
index c221812..1cfd1d7 100644
--- a/silk/float/silk_LPC_analysis_filter_FLP.c
+++ b/silk/float/silk_LPC_analysis_filter_FLP.c
@@ -41,15 +41,15 @@
 
 /* 16th order LPC analysis filter, does not write first 16 samples */
 void silk_LPC_analysis_filter16_FLP(
-          SKP_float                 r_LPC[],            /* O    LPC residual signal                     */
-    const SKP_float                 PredCoef[],         /* I    LPC coefficients                        */
-    const SKP_float                 s[],                /* I    Input signal                            */
+          silk_float                 r_LPC[],            /* O    LPC residual signal                     */
+    const silk_float                 PredCoef[],         /* I    LPC coefficients                        */
+    const silk_float                 s[],                /* I    Input signal                            */
     const opus_int                   length              /* I    Length of input signal                  */
 )
 {
     opus_int   ix;
-    SKP_float LPC_pred;
-    const SKP_float *s_ptr;
+    silk_float LPC_pred;
+    const silk_float *s_ptr;
 
     for ( ix = 16; ix < length; ix++) {
         s_ptr = &s[ix - 1];
@@ -79,15 +79,15 @@
 
 /* 14th order LPC analysis filter, does not write first 14 samples */
 void silk_LPC_analysis_filter14_FLP(
-          SKP_float                 r_LPC[],            /* O    LPC residual signal                     */
-    const SKP_float                 PredCoef[],         /* I    LPC coefficients                        */
-    const SKP_float                 s[],                /* I    Input signal                            */
+          silk_float                 r_LPC[],            /* O    LPC residual signal                     */
+    const silk_float                 PredCoef[],         /* I    LPC coefficients                        */
+    const silk_float                 s[],                /* I    Input signal                            */
     const opus_int                   length              /* I    Length of input signal                  */
 )
 {
     opus_int   ix;
-    SKP_float LPC_pred;
-    const SKP_float *s_ptr;
+    silk_float LPC_pred;
+    const silk_float *s_ptr;
 
     for ( ix = 14; ix < length; ix++) {
         s_ptr = &s[ix - 1];
@@ -115,15 +115,15 @@
 
 /* 12th order LPC analysis filter, does not write first 12 samples */
 void silk_LPC_analysis_filter12_FLP(
-          SKP_float                 r_LPC[],            /* O    LPC residual signal                     */
-    const SKP_float                 PredCoef[],         /* I    LPC coefficients                        */
-    const SKP_float                 s[],                /* I    Input signal                            */
+          silk_float                 r_LPC[],            /* O    LPC residual signal                     */
+    const silk_float                 PredCoef[],         /* I    LPC coefficients                        */
+    const silk_float                 s[],                /* I    Input signal                            */
     const opus_int                   length              /* I    Length of input signal                  */
 )
 {
     opus_int   ix;
-    SKP_float LPC_pred;
-    const SKP_float *s_ptr;
+    silk_float LPC_pred;
+    const silk_float *s_ptr;
 
     for ( ix = 12; ix < length; ix++) {
         s_ptr = &s[ix - 1];
@@ -149,15 +149,15 @@
 
 /* 10th order LPC analysis filter, does not write first 10 samples */
 void silk_LPC_analysis_filter10_FLP(
-          SKP_float                 r_LPC[],            /* O    LPC residual signal                     */
-    const SKP_float                 PredCoef[],         /* I    LPC coefficients                        */
-    const SKP_float                 s[],                /* I    Input signal                            */
+          silk_float                 r_LPC[],            /* O    LPC residual signal                     */
+    const silk_float                 PredCoef[],         /* I    LPC coefficients                        */
+    const silk_float                 s[],                /* I    Input signal                            */
     const opus_int                   length              /* I    Length of input signal                  */
 )
 {
     opus_int   ix;
-    SKP_float LPC_pred;
-    const SKP_float *s_ptr;
+    silk_float LPC_pred;
+    const silk_float *s_ptr;
 
     for ( ix = 10; ix < length; ix++) {
         s_ptr = &s[ix - 1];
@@ -181,15 +181,15 @@
 
 /* 8th order LPC analysis filter, does not write first 8 samples */
 void silk_LPC_analysis_filter8_FLP(
-          SKP_float                 r_LPC[],            /* O    LPC residual signal                     */
-    const SKP_float                 PredCoef[],         /* I    LPC coefficients                        */
-    const SKP_float                 s[],                /* I    Input signal                            */
+          silk_float                 r_LPC[],            /* O    LPC residual signal                     */
+    const silk_float                 PredCoef[],         /* I    LPC coefficients                        */
+    const silk_float                 s[],                /* I    Input signal                            */
     const opus_int                   length              /* I    Length of input signal                  */
 )
 {
     opus_int   ix;
-    SKP_float LPC_pred;
-    const SKP_float *s_ptr;
+    silk_float LPC_pred;
+    const silk_float *s_ptr;
 
     for ( ix = 8; ix < length; ix++) {
         s_ptr = &s[ix - 1];
@@ -211,15 +211,15 @@
 
 /* 6th order LPC analysis filter, does not write first 6 samples */
 void silk_LPC_analysis_filter6_FLP(
-          SKP_float                 r_LPC[],            /* O    LPC residual signal                     */
-    const SKP_float                 PredCoef[],         /* I    LPC coefficients                        */
-    const SKP_float                 s[],                /* I    Input signal                            */
+          silk_float                 r_LPC[],            /* O    LPC residual signal                     */
+    const silk_float                 PredCoef[],         /* I    LPC coefficients                        */
+    const silk_float                 s[],                /* I    Input signal                            */
     const opus_int                   length              /* I    Length of input signal                  */
 )
 {
     opus_int   ix;
-    SKP_float LPC_pred;
-    const SKP_float *s_ptr;
+    silk_float LPC_pred;
+    const silk_float *s_ptr;
 
     for ( ix = 6; ix < length; ix++) {
         s_ptr = &s[ix - 1];
@@ -245,14 +245,14 @@
 /************************************************/
 
 void silk_LPC_analysis_filter_FLP(
-          SKP_float                 r_LPC[],            /* O    LPC residual signal                     */
-    const SKP_float                 PredCoef[],         /* I    LPC coefficients                        */
-    const SKP_float                 s[],                /* I    Input signal                            */
+          silk_float                 r_LPC[],            /* O    LPC residual signal                     */
+    const silk_float                 PredCoef[],         /* I    LPC coefficients                        */
+    const silk_float                 s[],                /* I    Input signal                            */
     const opus_int                   length,             /* I    Length of input signal                  */
     const opus_int                   Order               /* I    LPC order                               */
 )
 {
-    SKP_assert( Order <= length );
+    silk_assert( Order <= length );
 
     switch( Order ) {
         case 6:
@@ -280,11 +280,11 @@
         break;
 
         default:
-            SKP_assert( 0 );
+            silk_assert( 0 );
         break;
     }
 
     /* Set first Order output samples to zero */
-    SKP_memset( r_LPC, 0, Order * sizeof( SKP_float ) );
+    silk_memset( r_LPC, 0, Order * sizeof( silk_float ) );
 }
 
diff --git a/silk/float/silk_LPC_inv_pred_gain_FLP.c b/silk/float/silk_LPC_inv_pred_gain_FLP.c
index bbff53a..29c0c0d 100644
--- a/silk/float/silk_LPC_inv_pred_gain_FLP.c
+++ b/silk/float/silk_LPC_inv_pred_gain_FLP.c
@@ -38,18 +38,18 @@
 /* test if LPC coefficients are stable (all poles within unit circle)   */
 /* this code is based on silk_a2k_FLP()                               */
 opus_int silk_LPC_inverse_pred_gain_FLP(   /* O:   returns 1 if unstable, otherwise 0      */
-    SKP_float       *invGain,               /* O:   inverse prediction gain, energy domain  */
-    const SKP_float *A,                     /* I:   prediction coefficients [order]         */
+    silk_float       *invGain,               /* O:   inverse prediction gain, energy domain  */
+    const silk_float *A,                     /* I:   prediction coefficients [order]         */
     opus_int32       order                   /* I:   prediction order                        */
 )
 {
     opus_int   k, n;
     double    rc, rc_mult1, rc_mult2;
-    SKP_float Atmp[ 2 ][ SILK_MAX_ORDER_LPC ];
-    SKP_float *Aold, *Anew;
+    silk_float Atmp[ 2 ][ SILK_MAX_ORDER_LPC ];
+    silk_float *Aold, *Anew;
 
     Anew = Atmp[ order & 1 ];
-    SKP_memcpy( Anew, A, order * sizeof(SKP_float) );
+    silk_memcpy( Anew, A, order * sizeof(silk_float) );
 
     *invGain = 1.0f;
     for( k = order - 1; k > 0; k-- ) {
@@ -59,12 +59,12 @@
         }
         rc_mult1 = 1.0f - rc * rc;
         rc_mult2 = 1.0f / rc_mult1;
-        *invGain *= (SKP_float)rc_mult1;
+        *invGain *= (silk_float)rc_mult1;
         /* swap pointers */
         Aold = Anew;
         Anew = Atmp[ k & 1 ];
         for( n = 0; n < k; n++ ) {
-            Anew[ n ] = (SKP_float)( ( Aold[ n ] - Aold[ k - n - 1 ] * rc ) * rc_mult2 );
+            Anew[ n ] = (silk_float)( ( Aold[ n ] - Aold[ k - n - 1 ] * rc ) * rc_mult2 );
         }
     }
     rc = -Anew[ 0 ];
@@ -72,6 +72,6 @@
         return 1;
     }
     rc_mult1 = 1.0f - rc * rc;
-    *invGain *= (SKP_float)rc_mult1;
+    *invGain *= (silk_float)rc_mult1;
     return 0;
 }
diff --git a/silk/float/silk_LTP_analysis_filter_FLP.c b/silk/float/silk_LTP_analysis_filter_FLP.c
index a02d09d..4b54c0f 100644
--- a/silk/float/silk_LTP_analysis_filter_FLP.c
+++ b/silk/float/silk_LTP_analysis_filter_FLP.c
@@ -32,20 +32,20 @@
 #include "silk_main_FLP.h"
 
 void silk_LTP_analysis_filter_FLP(
-          SKP_float         *LTP_res,                       /* O    LTP res MAX_NB_SUBFR*(pre_lgth+subfr_lngth) */
-    const SKP_float         *x,                             /* I    Input signal, with preceeding samples       */
-    const SKP_float         B[ LTP_ORDER * MAX_NB_SUBFR ],  /* I    LTP coefficients for each subframe          */
+          silk_float         *LTP_res,                       /* O    LTP res MAX_NB_SUBFR*(pre_lgth+subfr_lngth) */
+    const silk_float         *x,                             /* I    Input signal, with preceeding samples       */
+    const silk_float         B[ LTP_ORDER * MAX_NB_SUBFR ],  /* I    LTP coefficients for each subframe          */
     const opus_int           pitchL[   MAX_NB_SUBFR ],       /* I    Pitch lags                                  */
-    const SKP_float         invGains[ MAX_NB_SUBFR ],       /* I    Inverse quantization gains                  */
+    const silk_float         invGains[ MAX_NB_SUBFR ],       /* I    Inverse quantization gains                  */
     const opus_int           subfr_length,                   /* I    Length of each subframe                     */
     const opus_int           nb_subfr,                       /* I    number of subframes                         */
     const opus_int           pre_length                      /* I    Preceeding samples for each subframe        */
 )
 {
-    const SKP_float *x_ptr, *x_lag_ptr;
-    SKP_float   Btmp[ LTP_ORDER ];
-    SKP_float   *LTP_res_ptr;
-    SKP_float   inv_gain;
+    const silk_float *x_ptr, *x_lag_ptr;
+    silk_float   Btmp[ LTP_ORDER ];
+    silk_float   *LTP_res_ptr;
+    silk_float   inv_gain;
     opus_int     k, i, j;
 
     x_ptr = x;
diff --git a/silk/float/silk_LTP_scale_ctrl_FLP.c b/silk/float/silk_LTP_scale_ctrl_FLP.c
index 7ff2325..bd4405d 100644
--- a/silk/float/silk_LTP_scale_ctrl_FLP.c
+++ b/silk/float/silk_LTP_scale_ctrl_FLP.c
@@ -40,17 +40,17 @@
 
     /* 1st order high-pass filter */
     /*g_HP(n) = g(n) - 0.5 * g(n-1) + 0.5 * g_HP(n-1);*/
-    psEnc->HPLTPredCodGain = SKP_max_float( psEncCtrl->LTPredCodGain - 0.5f * psEnc->prevLTPredCodGain, 0.0f )
+    psEnc->HPLTPredCodGain = silk_max_float( psEncCtrl->LTPredCodGain - 0.5f * psEnc->prevLTPredCodGain, 0.0f )
                             + 0.5f * psEnc->HPLTPredCodGain;
     psEnc->prevLTPredCodGain = psEncCtrl->LTPredCodGain;
 
     /* Only scale if first frame in packet */
     if( psEnc->sCmn.nFramesEncoded == 0 ) {
         round_loss = psEnc->sCmn.PacketLoss_perc + psEnc->sCmn.nFramesPerPacket;
-        psEnc->sCmn.indices.LTP_scaleIndex = (opus_int8)SKP_LIMIT( round_loss * psEnc->HPLTPredCodGain * 0.1f, 0.0f, 2.0f );
+        psEnc->sCmn.indices.LTP_scaleIndex = (opus_int8)silk_LIMIT( round_loss * psEnc->HPLTPredCodGain * 0.1f, 0.0f, 2.0f );
     } else {
         /* Default is minimum scaling */
         psEnc->sCmn.indices.LTP_scaleIndex = 0;
     }
-    psEncCtrl->LTP_scale = (SKP_float)silk_LTPScales_table_Q14[ psEnc->sCmn.indices.LTP_scaleIndex ] / 16384.0f;
+    psEncCtrl->LTP_scale = (silk_float)silk_LTPScales_table_Q14[ psEnc->sCmn.indices.LTP_scaleIndex ] / 16384.0f;
 }
diff --git a/silk/float/silk_SigProc_FLP.h b/silk/float/silk_SigProc_FLP.h
index 05e620c..b101924 100644
--- a/silk/float/silk_SigProc_FLP.h
+++ b/silk/float/silk_SigProc_FLP.h
@@ -42,43 +42,43 @@
 
 /* Chirp (bw expand) LP AR filter */
 void silk_bwexpander_FLP(
-    SKP_float *ar,                     /* io   AR filter to be expanded (without leading 1)    */
+    silk_float *ar,                     /* io   AR filter to be expanded (without leading 1)    */
     const opus_int d,                   /* i    length of ar                                       */
-    const SKP_float chirp              /* i    chirp factor (typically in range (0..1) )          */
+    const silk_float chirp              /* i    chirp factor (typically in range (0..1) )          */
 );
 
 /* compute inverse of LPC prediction gain, and                            */
 /* test if LPC coefficients are stable (all poles within unit circle)    */
 /* this code is based on silk_FLP_a2k()                                    */
 opus_int silk_LPC_inverse_pred_gain_FLP( /* O:   returns 1 if unstable, otherwise 0    */
-    SKP_float            *invGain,      /* O:   inverse prediction gain, energy domain      */
-    const SKP_float      *A,            /* I:   prediction coefficients [order]           */
+    silk_float            *invGain,      /* O:   inverse prediction gain, energy domain      */
+    const silk_float      *A,            /* I:   prediction coefficients [order]           */
     opus_int32            order          /* I:   prediction order                          */
 );
 
-SKP_float silk_schur_FLP(               /* O    returns residual energy                     */
-    SKP_float       refl_coef[],        /* O    reflection coefficients (length order)      */
-    const SKP_float auto_corr[],        /* I    autocorrelation sequence (length order+1)   */
+silk_float silk_schur_FLP(               /* O    returns residual energy                     */
+    silk_float       refl_coef[],        /* O    reflection coefficients (length order)      */
+    const silk_float auto_corr[],        /* I    autocorrelation sequence (length order+1)   */
     opus_int         order               /* I    order                                       */
 );
 
 void silk_k2a_FLP(
-    SKP_float           *A,             /* O:    prediction coefficients [order]           */
-    const SKP_float     *rc,            /* I:    reflection coefficients [order]           */
+    silk_float           *A,             /* O:    prediction coefficients [order]           */
+    const silk_float     *rc,            /* I:    reflection coefficients [order]           */
     opus_int32           order           /* I:    prediction order                          */
 );
 
 /* Solve the normal equations using the Levinson-Durbin recursion */
-SKP_float silk_levinsondurbin_FLP(        /* O    prediction error energy                        */
-    SKP_float        A[],                /* O    prediction coefficients    [order]                */
-    const SKP_float corr[],                /* I    input auto-correlations [order + 1]            */
+silk_float silk_levinsondurbin_FLP(        /* O    prediction error energy                        */
+    silk_float        A[],                /* O    prediction coefficients    [order]                */
+    const silk_float corr[],                /* I    input auto-correlations [order + 1]            */
     const opus_int    order                /* I    prediction order                             */
 );
 
 /* compute autocorrelation */
 void silk_autocorrelation_FLP(
-    SKP_float *results,                 /* o    result (length correlationCount)            */
-    const SKP_float *inputData,         /* i    input data to correlate                     */
+    silk_float *results,                 /* o    result (length correlationCount)            */
+    const silk_float *inputData,         /* i    input data to correlate                     */
     opus_int inputDataSize,              /* i    length of input                             */
     opus_int correlationCount            /* i    number of correlation taps to compute       */
 );
@@ -89,14 +89,14 @@
 #define SigProc_PE_MAX_COMPLEX        2
 
 opus_int silk_pitch_analysis_core_FLP(   /* O voicing estimate: 0 voiced, 1 unvoiced                         */
-    const SKP_float *signal,            /* I signal of length PE_FRAME_LENGTH_MS*Fs_kHz                     */
+    const silk_float *signal,            /* I signal of length PE_FRAME_LENGTH_MS*Fs_kHz                     */
     opus_int         *pitch_out,         /* O 4 pitch lag values                                             */
     opus_int16       *lagIndex,          /* O lag Index                                                      */
     opus_int8        *contourIndex,      /* O pitch contour Index                                            */
-    SKP_float       *LTPCorr,           /* I/O normalized correlation; input: value from previous frame     */
+    silk_float       *LTPCorr,           /* I/O normalized correlation; input: value from previous frame     */
     opus_int         prevLag,            /* I last lag of previous frame; set to zero is unvoiced            */
-    const SKP_float search_thres1,      /* I first stage threshold for lag candidates 0 - 1                 */
-    const SKP_float search_thres2,      /* I final threshold for lag candidates 0 - 1                       */
+    const silk_float search_thres1,      /* I first stage threshold for lag candidates 0 - 1                 */
+    const silk_float search_thres2,      /* I final threshold for lag candidates 0 - 1                       */
     const opus_int   Fs_kHz,             /* I sample frequency (kHz)                                         */
     const opus_int   complexity,         /* I Complexity setting, 0-2, where 2 is highest                    */
     const opus_int   nb_subfr            /* I    number of 5 ms subframes                                    */
@@ -105,47 +105,47 @@
 #define PI               (3.1415926536f)
 
 void silk_insertion_sort_decreasing_FLP(
-    SKP_float            *a,            /* I/O:  Unsorted / Sorted vector                */
+    silk_float            *a,            /* I/O:  Unsorted / Sorted vector                */
     opus_int              *idx,          /* O:    Index vector for the sorted elements    */
     const opus_int        L,             /* I:    Vector length                           */
     const opus_int        K              /* I:    Number of correctly sorted positions    */
 );
 
 /* Compute reflection coefficients from input signal */
-SKP_float silk_burg_modified_FLP(           /* O    returns residual energy                                         */
-    SKP_float           A[],                /* O    prediction coefficients (length order)                          */
-    const SKP_float     x[],                /* I    input signal, length: nb_subfr*(D+L_sub)                        */
+silk_float silk_burg_modified_FLP(           /* O    returns residual energy                                         */
+    silk_float           A[],                /* O    prediction coefficients (length order)                          */
+    const silk_float     x[],                /* I    input signal, length: nb_subfr*(D+L_sub)                        */
     const opus_int       subfr_length,       /* I    input signal subframe length (including D preceeding samples)   */
     const opus_int       nb_subfr,           /* I    number of subframes stacked in x                                */
-    const SKP_float     WhiteNoiseFrac,     /* I    fraction added to zero-lag autocorrelation                      */
+    const silk_float     WhiteNoiseFrac,     /* I    fraction added to zero-lag autocorrelation                      */
     const opus_int       D                   /* I    order                                                           */
 );
 
 /* multiply a vector by a constant */
 void silk_scale_vector_FLP(
-    SKP_float           *data1,
-    SKP_float           gain,
+    silk_float           *data1,
+    silk_float           gain,
     opus_int             dataSize
 );
 
 /* copy and multiply a vector by a constant */
 void silk_scale_copy_vector_FLP(
-    SKP_float           *data_out,
-    const SKP_float     *data_in,
-    SKP_float           gain,
+    silk_float           *data_out,
+    const silk_float     *data_in,
+    silk_float           gain,
     opus_int             dataSize
 );
 
-/* inner product of two SKP_float arrays, with result as double */
+/* inner product of two silk_float arrays, with result as double */
 double silk_inner_product_FLP(
-    const SKP_float     *data1,
-    const SKP_float     *data2,
+    const silk_float     *data1,
+    const silk_float     *data2,
     opus_int             dataSize
 );
 
-/* sum of squares of a SKP_float array, with result as double */
+/* sum of squares of a silk_float array, with result as double */
 double silk_energy_FLP(
-    const SKP_float     *data,
+    const silk_float     *data,
     opus_int             dataSize
 );
 
@@ -153,21 +153,21 @@
 /*                                MACROS                                */
 /********************************************************************/
 
-#define SKP_min_float(a, b)            (((a) < (b)) ? (a) :  (b))
-#define SKP_max_float(a, b)            (((a) > (b)) ? (a) :  (b))
-#define SKP_abs_float(a)            ((SKP_float)fabs(a))
+#define silk_min_float(a, b)            (((a) < (b)) ? (a) :  (b))
+#define silk_max_float(a, b)            (((a) > (b)) ? (a) :  (b))
+#define silk_abs_float(a)            ((silk_float)fabs(a))
 
-#define SKP_LIMIT_float( a, limit1, limit2)    ((limit1) > (limit2) ? ((a) > (limit1) ? (limit1) : ((a) < (limit2) ? (limit2) : (a))) \
+#define silk_LIMIT_float( a, limit1, limit2)    ((limit1) > (limit2) ? ((a) > (limit1) ? (limit1) : ((a) < (limit2) ? (limit2) : (a))) \
                                                                  : ((a) > (limit2) ? (limit2) : ((a) < (limit1) ? (limit1) : (a))))
 
 /* sigmoid function */
-static inline SKP_float SKP_sigmoid(SKP_float x)
+static inline silk_float silk_sigmoid(silk_float x)
 {
-    return (SKP_float)(1.0 / (1.0 + exp(-x)));
+    return (silk_float)(1.0 / (1.0 + exp(-x)));
 }
 
 /* floating-point to integer conversion (rounding) */
-static inline opus_int32 SKP_float2int(double x)
+static inline opus_int32 silk_float2int(double x)
 {
 #ifdef _WIN32
     double t = x + 6755399441055744.0;
@@ -178,9 +178,9 @@
 }
 
 /* floating-point to integer conversion (rounding) */
-static inline void SKP_float2short_array(
+static inline void silk_float2short_array(
     opus_int16       *out,
-    const SKP_float *in,
+    const silk_float *in,
     opus_int32       length
 )
 {
@@ -188,29 +188,29 @@
     for (k = length-1; k >= 0; k--) {
 #ifdef _WIN32
         double t = in[k] + 6755399441055744.0;
-        out[k] = (opus_int16)SKP_SAT16(*(( opus_int32 * )( &t )));
+        out[k] = (opus_int16)silk_SAT16(*(( opus_int32 * )( &t )));
 #else
         double x = in[k];
-        out[k] = (opus_int16)SKP_SAT16( ( x > 0 ) ? x + 0.5 : x - 0.5 );
+        out[k] = (opus_int16)silk_SAT16( ( x > 0 ) ? x + 0.5 : x - 0.5 );
 #endif
     }
 }
 
 /* integer to floating-point conversion */
-static inline void SKP_short2float_array(
-    SKP_float       *out,
+static inline void silk_short2float_array(
+    silk_float       *out,
     const opus_int16 *in,
     opus_int32       length
 )
 {
     opus_int32 k;
     for (k = length-1; k >= 0; k--) {
-        out[k] = (SKP_float)in[k];
+        out[k] = (silk_float)in[k];
     }
 }
 
 /* using log2() helps the fixed-point conversion */
-static inline SKP_float silk_log2( double x ) { return ( SKP_float )( 3.32192809488736 * log10( x ) ); }
+static inline silk_float silk_log2( double x ) { return ( silk_float )( 3.32192809488736 * log10( x ) ); }
 
 #ifdef  __cplusplus
 }
diff --git a/silk/float/silk_apply_sine_window_FLP.c b/silk/float/silk_apply_sine_window_FLP.c
index def9933..9a82d9c 100644
--- a/silk/float/silk_apply_sine_window_FLP.c
+++ b/silk/float/silk_apply_sine_window_FLP.c
@@ -36,19 +36,19 @@
 /*  1 -> sine window from 0 to pi/2                                                                     */
 /*  2 -> sine window from pi/2 to pi                                                                    */
 void silk_apply_sine_window_FLP(
-          SKP_float                 px_win[],           /* O    Pointer to windowed signal              */
-    const SKP_float                 px[],               /* I    Pointer to input signal                 */
+          silk_float                 px_win[],           /* O    Pointer to windowed signal              */
+    const silk_float                 px[],               /* I    Pointer to input signal                 */
     const opus_int                   win_type,           /* I    Selects a window type                   */
     const opus_int                   length              /* I    Window length, multiple of 4            */
 )
 {
     opus_int   k;
-    SKP_float freq, c, S0, S1;
+    silk_float freq, c, S0, S1;
 
-    SKP_assert( win_type == 1 || win_type == 2 );
+    silk_assert( win_type == 1 || win_type == 2 );
 
     /* Length must be multiple of 4 */
-    SKP_assert( ( length & 3 ) == 0 );
+    silk_assert( ( length & 3 ) == 0 );
 
     freq = PI / ( length + 1 );
 
diff --git a/silk/float/silk_autocorrelation_FLP.c b/silk/float/silk_autocorrelation_FLP.c
index 540512c..5c6662a 100644
--- a/silk/float/silk_autocorrelation_FLP.c
+++ b/silk/float/silk_autocorrelation_FLP.c
@@ -34,8 +34,8 @@
 
 /* compute autocorrelation */
 void silk_autocorrelation_FLP(
-    SKP_float       *results,           /* O    result (length correlationCount)            */
-    const SKP_float *inputData,         /* I    input data to correlate                     */
+    silk_float       *results,           /* O    result (length correlationCount)            */
+    const silk_float *inputData,         /* I    input data to correlate                     */
     opus_int         inputDataSize,      /* I    length of input                             */
     opus_int         correlationCount    /* I    number of correlation taps to compute       */
 )
@@ -47,6 +47,6 @@
     }
 
     for( i = 0; i < correlationCount; i++ ) {
-        results[ i ] =  (SKP_float)silk_inner_product_FLP( inputData, inputData + i, inputDataSize - i );
+        results[ i ] =  (silk_float)silk_inner_product_FLP( inputData, inputData + i, inputDataSize - i );
     }
 }
diff --git a/silk/float/silk_burg_modified_FLP.c b/silk/float/silk_burg_modified_FLP.c
index bf9278d..24806ae 100644
--- a/silk/float/silk_burg_modified_FLP.c
+++ b/silk/float/silk_burg_modified_FLP.c
@@ -35,35 +35,35 @@
 #define MAX_NB_SUBFR                4
 
 /* Compute reflection coefficients from input signal */
-SKP_float silk_burg_modified_FLP(   /* O    returns residual energy                                         */
-    SKP_float       A[],                /* O    prediction coefficients (length order)                          */
-    const SKP_float x[],                /* I    input signal, length: nb_subfr*(D+L_sub)                        */
+silk_float silk_burg_modified_FLP(   /* O    returns residual energy                                         */
+    silk_float       A[],                /* O    prediction coefficients (length order)                          */
+    const silk_float x[],                /* I    input signal, length: nb_subfr*(D+L_sub)                        */
     const opus_int   subfr_length,       /* I    input signal subframe length (including D preceeding samples)   */
     const opus_int   nb_subfr,           /* I    number of subframes stacked in x                                */
-    const SKP_float WhiteNoiseFrac,     /* I    fraction added to zero-lag autocorrelation                      */
+    const silk_float WhiteNoiseFrac,     /* I    fraction added to zero-lag autocorrelation                      */
     const opus_int   D                   /* I    order                                                           */
 )
 {
     opus_int         k, n, s;
     double          C0, num, nrg_f, nrg_b, rc, Atmp, tmp1, tmp2;
-    const SKP_float *x_ptr;
+    const silk_float *x_ptr;
     double          C_first_row[ SILK_MAX_ORDER_LPC ], C_last_row[ SILK_MAX_ORDER_LPC ];
     double          CAf[ SILK_MAX_ORDER_LPC + 1 ], CAb[ SILK_MAX_ORDER_LPC + 1 ];
     double          Af[ SILK_MAX_ORDER_LPC ];
 
-    SKP_assert( subfr_length * nb_subfr <= MAX_FRAME_SIZE );
-    SKP_assert( nb_subfr <= MAX_NB_SUBFR );
+    silk_assert( subfr_length * nb_subfr <= MAX_FRAME_SIZE );
+    silk_assert( nb_subfr <= MAX_NB_SUBFR );
 
     /* Compute autocorrelations, added over subframes */
     C0 = silk_energy_FLP( x, nb_subfr * subfr_length );
-    SKP_memset( C_first_row, 0, SILK_MAX_ORDER_LPC * sizeof( double ) );
+    silk_memset( C_first_row, 0, SILK_MAX_ORDER_LPC * sizeof( double ) );
     for( s = 0; s < nb_subfr; s++ ) {
         x_ptr = x + s * subfr_length;
         for( n = 1; n < D + 1; n++ ) {
             C_first_row[ n - 1 ] += silk_inner_product_FLP( x_ptr, x_ptr + n, subfr_length - n );
         }
     }
-    SKP_memcpy( C_last_row, C_first_row, SILK_MAX_ORDER_LPC * sizeof( double ) );
+    silk_memcpy( C_last_row, C_first_row, SILK_MAX_ORDER_LPC * sizeof( double ) );
 
     /* Initialize */
     CAb[ 0 ] = CAf[ 0 ] = C0 + WhiteNoiseFrac * C0 + 1e-9f;
@@ -109,12 +109,12 @@
             nrg_b += CAb[ k + 1 ] * Atmp;
             nrg_f += CAf[ k + 1 ] * Atmp;
         }
-        SKP_assert( nrg_f > 0.0 );
-        SKP_assert( nrg_b > 0.0 );
+        silk_assert( nrg_f > 0.0 );
+        silk_assert( nrg_b > 0.0 );
 
         /* Calculate the next order reflection (parcor) coefficient */
         rc = -2.0 * num / ( nrg_f + nrg_b );
-        SKP_assert( rc > -1.0 && rc < 1.0 );
+        silk_assert( rc > -1.0 && rc < 1.0 );
 
         /* Update the AR coefficients */
         for( k = 0; k < (n + 1) >> 1; k++ ) {
@@ -140,9 +140,9 @@
         Atmp = Af[ k ];
         nrg_f += CAf[ k + 1 ] * Atmp;
         tmp1  += Atmp * Atmp;
-        A[ k ] = (SKP_float)(-Atmp);
+        A[ k ] = (silk_float)(-Atmp);
     }
     nrg_f -= WhiteNoiseFrac * C0 * tmp1;
 
-    return (SKP_float)nrg_f;
+    return (silk_float)nrg_f;
 }
diff --git a/silk/float/silk_bwexpander_FLP.c b/silk/float/silk_bwexpander_FLP.c
index ddae86a..4a0088a 100644
--- a/silk/float/silk_bwexpander_FLP.c
+++ b/silk/float/silk_bwexpander_FLP.c
@@ -34,13 +34,13 @@
 
 /* Chirp (bw expand) LP AR filter */
 void silk_bwexpander_FLP(
-    SKP_float           *ar,        /* I/O  AR filter to be expanded (without leading 1)    */
+    silk_float           *ar,        /* I/O  AR filter to be expanded (without leading 1)    */
     const opus_int       d,          /* I    length of ar                                    */
-    const SKP_float     chirp       /* I    chirp factor (typically in range (0..1) )       */
+    const silk_float     chirp       /* I    chirp factor (typically in range (0..1) )       */
 )
 {
     opus_int   i;
-    SKP_float cfac = chirp;
+    silk_float cfac = chirp;
 
     for( i = 0; i < d - 1; i++ ) {
         ar[ i ] *=  cfac;
diff --git a/silk/float/silk_corrMatrix_FLP.c b/silk/float/silk_corrMatrix_FLP.c
index 1d92da8..d66107a 100644
--- a/silk/float/silk_corrMatrix_FLP.c
+++ b/silk/float/silk_corrMatrix_FLP.c
@@ -37,56 +37,56 @@
 
 /* Calculates correlation vector X'*t */
 void silk_corrVector_FLP(
-    const SKP_float                 *x,                 /* I    x vector [L+order-1] used to create X   */
-    const SKP_float                 *t,                 /* I    Target vector [L]                       */
+    const silk_float                 *x,                 /* I    x vector [L+order-1] used to create X   */
+    const silk_float                 *t,                 /* I    Target vector [L]                       */
     const opus_int                   L,                  /* I    Length of vecors                        */
     const opus_int                   Order,              /* I    Max lag for correlation                 */
-          SKP_float                 *Xt                 /* O    X'*t correlation vector [order]         */
+          silk_float                 *Xt                 /* O    X'*t correlation vector [order]         */
 )
 {
     opus_int lag;
-    const SKP_float *ptr1;
+    const silk_float *ptr1;
 
     ptr1 = &x[ Order - 1 ];                     /* Points to first sample of column 0 of X: X[:,0] */
     for( lag = 0; lag < Order; lag++ ) {
         /* Calculate X[:,lag]'*t */
-        Xt[ lag ] = (SKP_float)silk_inner_product_FLP( ptr1, t, L );
+        Xt[ lag ] = (silk_float)silk_inner_product_FLP( ptr1, t, L );
         ptr1--;                                 /* Next column of X */
     }
 }
 
 /* Calculates correlation matrix X'*X */
 void silk_corrMatrix_FLP(
-    const SKP_float                 *x,                 /* I    x vector [ L+order-1 ] used to create X */
+    const silk_float                 *x,                 /* I    x vector [ L+order-1 ] used to create X */
     const opus_int                   L,                  /* I    Length of vectors                       */
     const opus_int                   Order,              /* I    Max lag for correlation                 */
-          SKP_float                 *XX                 /* O    X'*X correlation matrix [order x order] */
+          silk_float                 *XX                 /* O    X'*X correlation matrix [order x order] */
 )
 {
     opus_int j, lag;
     double  energy;
-    const SKP_float *ptr1, *ptr2;
+    const silk_float *ptr1, *ptr2;
 
     ptr1 = &x[ Order - 1 ];                     /* First sample of column 0 of X */
     energy = silk_energy_FLP( ptr1, L );  /* X[:,0]'*X[:,0] */
-    matrix_ptr( XX, 0, 0, Order ) = ( SKP_float )energy;
+    matrix_ptr( XX, 0, 0, Order ) = ( silk_float )energy;
     for( j = 1; j < Order; j++ ) {
         /* Calculate X[:,j]'*X[:,j] */
         energy += ptr1[ -j ] * ptr1[ -j ] - ptr1[ L - j ] * ptr1[ L - j ];
-        matrix_ptr( XX, j, j, Order ) = ( SKP_float )energy;
+        matrix_ptr( XX, j, j, Order ) = ( silk_float )energy;
     }
 
     ptr2 = &x[ Order - 2 ];                     /* First sample of column 1 of X */
     for( lag = 1; lag < Order; lag++ ) {
         /* Calculate X[:,0]'*X[:,lag] */
         energy = silk_inner_product_FLP( ptr1, ptr2, L );
-        matrix_ptr( XX, lag, 0, Order ) = ( SKP_float )energy;
-        matrix_ptr( XX, 0, lag, Order ) = ( SKP_float )energy;
+        matrix_ptr( XX, lag, 0, Order ) = ( silk_float )energy;
+        matrix_ptr( XX, 0, lag, Order ) = ( silk_float )energy;
         /* Calculate X[:,j]'*X[:,j + lag] */
         for( j = 1; j < ( Order - lag ); j++ ) {
             energy += ptr1[ -j ] * ptr2[ -j ] - ptr1[ L - j ] * ptr2[ L - j ];
-            matrix_ptr( XX, lag + j, j, Order ) = ( SKP_float )energy;
-            matrix_ptr( XX, j, lag + j, Order ) = ( SKP_float )energy;
+            matrix_ptr( XX, lag + j, j, Order ) = ( silk_float )energy;
+            matrix_ptr( XX, j, lag + j, Order ) = ( silk_float )energy;
         }
         ptr2--;                                 /* Next column of X */
     }
diff --git a/silk/float/silk_encode_frame_FLP.c b/silk/float/silk_encode_frame_FLP.c
index 2fc701d..fce6160 100644
--- a/silk/float/silk_encode_frame_FLP.c
+++ b/silk/float/silk_encode_frame_FLP.c
@@ -43,9 +43,9 @@
 {
     silk_encoder_control_FLP sEncCtrl;
     opus_int     i, ret = 0;
-    SKP_float   *x_frame, *res_pitch_frame;
-    SKP_float   xfw[ MAX_FRAME_LENGTH ];
-    SKP_float   res_pitch[ 2 * MAX_FRAME_LENGTH + LA_PITCH_MAX ];
+    silk_float   *x_frame, *res_pitch_frame;
+    silk_float   xfw[ MAX_FRAME_LENGTH ];
+    silk_float   res_pitch[ 2 * MAX_FRAME_LENGTH + LA_PITCH_MAX ];
 
 TIC(ENCODE_FRAME)
 
@@ -96,7 +96,7 @@
     /*******************************************/
     /* Copy new frame to front of input buffer */
     /*******************************************/
-    SKP_short2float_array( x_frame + LA_SHAPE_MS * psEnc->sCmn.fs_kHz, psEnc->sCmn.inputBuf + 1, psEnc->sCmn.frame_length );
+    silk_short2float_array( x_frame + LA_SHAPE_MS * psEnc->sCmn.fs_kHz, psEnc->sCmn.inputBuf + 1, psEnc->sCmn.frame_length );
 
     /* Add tiny signal to avoid high CPU load from denormalized floating point numbers */
     for( i = 0; i < 8; i++ ) {
@@ -153,8 +153,8 @@
 TOC(NSQ)
 
     /* Update input buffer */
-    SKP_memmove( psEnc->x_buf, &psEnc->x_buf[ psEnc->sCmn.frame_length ],
-        ( psEnc->sCmn.ltp_mem_length + LA_SHAPE_MS * psEnc->sCmn.fs_kHz ) * sizeof( SKP_float ) );
+    silk_memmove( psEnc->x_buf, &psEnc->x_buf[ psEnc->sCmn.frame_length ],
+        ( psEnc->sCmn.ltp_mem_length + LA_SHAPE_MS * psEnc->sCmn.fs_kHz ) * sizeof( silk_float ) );
 
     /* Parameters needed for next frame */
     psEnc->sCmn.prevLag        = sEncCtrl.pitchL[ psEnc->sCmn.nb_subfr - 1 ];
@@ -188,7 +188,7 @@
     psEnc->sCmn.first_frame_after_reset = 0;
     if( ++psEnc->sCmn.nFramesEncoded >= psEnc->sCmn.nFramesPerPacket ) {
         /* Payload size */
-        *pnBytesOut = SKP_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
+        *pnBytesOut = silk_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
 
         /* Reset the number of frames in payload buffer */
         psEnc->sCmn.nFramesEncoded = 0;
@@ -200,11 +200,11 @@
 
 #ifdef SAVE_ALL_INTERNAL_DATA
     /*DEBUG_STORE_DATA( xf.dat,                   pIn_HP_LP,                           psEnc->sCmn.frame_length * sizeof( opus_int16 ) );*/
-    /*DEBUG_STORE_DATA( xfw.dat,                  xfw,                                 psEnc->sCmn.frame_length * sizeof( SKP_float ) );*/
+    /*DEBUG_STORE_DATA( xfw.dat,                  xfw,                                 psEnc->sCmn.frame_length * sizeof( silk_float ) );*/
     DEBUG_STORE_DATA( pitchL.dat,               sEncCtrl.pitchL,                                 MAX_NB_SUBFR * sizeof( opus_int   ) );
-    DEBUG_STORE_DATA( pitchG_quantized.dat,     sEncCtrl.LTPCoef,            psEnc->sCmn.nb_subfr * LTP_ORDER * sizeof( SKP_float ) );
-    DEBUG_STORE_DATA( LTPcorr.dat,              &psEnc->LTPCorr,                                                sizeof( SKP_float ) );
-    DEBUG_STORE_DATA( gains.dat,                sEncCtrl.Gains,                          psEnc->sCmn.nb_subfr * sizeof( SKP_float ) );
+    DEBUG_STORE_DATA( pitchG_quantized.dat,     sEncCtrl.LTPCoef,            psEnc->sCmn.nb_subfr * LTP_ORDER * sizeof( silk_float ) );
+    DEBUG_STORE_DATA( LTPcorr.dat,              &psEnc->LTPCorr,                                                sizeof( silk_float ) );
+    DEBUG_STORE_DATA( gains.dat,                sEncCtrl.Gains,                          psEnc->sCmn.nb_subfr * sizeof( silk_float ) );
     DEBUG_STORE_DATA( gains_indices.dat,        &psEnc->sCmn.indices.GainsIndices,       psEnc->sCmn.nb_subfr * sizeof( opus_int8  ) );
     DEBUG_STORE_DATA( quantOffsetType.dat,      &psEnc->sCmn.indices.quantOffsetType,                           sizeof( opus_int8  ) );
     DEBUG_STORE_DATA( speech_activity_q8.dat,   &psEnc->sCmn.speech_activity_Q8,                                sizeof( opus_int   ) );
@@ -212,9 +212,9 @@
     DEBUG_STORE_DATA( lag_index.dat,            &psEnc->sCmn.indices.lagIndex,                                  sizeof( opus_int16 ) );
     DEBUG_STORE_DATA( contour_index.dat,        &psEnc->sCmn.indices.contourIndex,                              sizeof( opus_int8  ) );
     DEBUG_STORE_DATA( per_index.dat,            &psEnc->sCmn.indices.PERIndex,                                  sizeof( opus_int8  ) );
-    DEBUG_STORE_DATA( PredCoef.dat,             &sEncCtrl.PredCoef[ 1 ],          psEnc->sCmn.predictLPCOrder * sizeof( SKP_float ) );
+    DEBUG_STORE_DATA( PredCoef.dat,             &sEncCtrl.PredCoef[ 1 ],          psEnc->sCmn.predictLPCOrder * sizeof( silk_float ) );
     DEBUG_STORE_DATA( ltp_scale_idx.dat,        &psEnc->sCmn.indices.LTP_scaleIndex,                            sizeof( opus_int8   ) );
-    /*DEBUG_STORE_DATA( xq.dat,                   psEnc->sCmn.sNSQ.xqBuf,                psEnc->sCmn.frame_length * sizeof( SKP_float ) );*/
+    /*DEBUG_STORE_DATA( xq.dat,                   psEnc->sCmn.sNSQ.xqBuf,                psEnc->sCmn.frame_length * sizeof( silk_float ) );*/
 #endif
     return ret;
 }
@@ -223,12 +223,12 @@
 void silk_LBRR_encode_FLP(
     silk_encoder_state_FLP          *psEnc,             /* I/O  Encoder state FLP                       */
     silk_encoder_control_FLP        *psEncCtrl,         /* I/O  Encoder control FLP                     */
-    const SKP_float                 xfw[]               /* I    Input signal                            */
+    const silk_float                 xfw[]               /* I    Input signal                            */
 )
 {
     opus_int     k;
     opus_int32   Gains_Q16[ MAX_NB_SUBFR ];
-    SKP_float   TempGains[ MAX_NB_SUBFR ];
+    silk_float   TempGains[ MAX_NB_SUBFR ];
     SideInfoIndices *psIndices_LBRR = &psEnc->sCmn.indices_LBRR[ psEnc->sCmn.nFramesEncoded ];
     silk_nsq_state sNSQ_LBRR;
 
@@ -239,11 +239,11 @@
         psEnc->sCmn.LBRR_flags[ psEnc->sCmn.nFramesEncoded ] = 1;
 
         /* Copy noise shaping quantizer state and quantization indices from regular encoding */
-        SKP_memcpy( &sNSQ_LBRR, &psEnc->sCmn.sNSQ, sizeof( silk_nsq_state ) );
-        SKP_memcpy( psIndices_LBRR, &psEnc->sCmn.indices, sizeof( SideInfoIndices ) );
+        silk_memcpy( &sNSQ_LBRR, &psEnc->sCmn.sNSQ, sizeof( silk_nsq_state ) );
+        silk_memcpy( psIndices_LBRR, &psEnc->sCmn.indices, sizeof( SideInfoIndices ) );
 
         /* Save original gains */
-        SKP_memcpy( TempGains, psEncCtrl->Gains, psEnc->sCmn.nb_subfr * sizeof( SKP_float ) );
+        silk_memcpy( TempGains, psEncCtrl->Gains, psEnc->sCmn.nb_subfr * sizeof( silk_float ) );
 
         if( psEnc->sCmn.nFramesEncoded == 0 || psEnc->sCmn.LBRR_flags[ psEnc->sCmn.nFramesEncoded - 1 ] == 0 ) {
             /* First frame in packet or previous frame not LBRR coded */
@@ -251,7 +251,7 @@
 
             /* Increase Gains to get target LBRR rate */
             psIndices_LBRR->GainsIndices[ 0 ] += psEnc->sCmn.LBRR_GainIncreases;
-            psIndices_LBRR->GainsIndices[ 0 ] = SKP_min_int( psIndices_LBRR->GainsIndices[ 0 ], N_LEVELS_QGAIN - 1 );
+            psIndices_LBRR->GainsIndices[ 0 ] = silk_min_int( psIndices_LBRR->GainsIndices[ 0 ], N_LEVELS_QGAIN - 1 );
         }
 
         /* Decode to get gains in sync with decoder */
@@ -270,6 +270,6 @@
             psEnc->sCmn.pulses_LBRR[ psEnc->sCmn.nFramesEncoded ], xfw );
 
         /* Restore original gains */
-        SKP_memcpy( psEncCtrl->Gains, TempGains, psEnc->sCmn.nb_subfr * sizeof( SKP_float ) );
+        silk_memcpy( psEncCtrl->Gains, TempGains, psEnc->sCmn.nb_subfr * sizeof( silk_float ) );
     }
 }
diff --git a/silk/float/silk_energy_FLP.c b/silk/float/silk_energy_FLP.c
index 2995f31..bb6c780 100644
--- a/silk/float/silk_energy_FLP.c
+++ b/silk/float/silk_energy_FLP.c
@@ -31,9 +31,9 @@
 
 #include "silk_SigProc_FLP.h"
 
-/* sum of squares of a SKP_float array, with result as double */
+/* sum of squares of a silk_float array, with result as double */
 double silk_energy_FLP(
-    const SKP_float     *data,
+    const silk_float     *data,
     opus_int             dataSize
 )
 {
@@ -55,6 +55,6 @@
         result += data[ i ] * data[ i ];
     }
 
-    SKP_assert( result >= 0.0 );
+    silk_assert( result >= 0.0 );
     return result;
 }
diff --git a/silk/float/silk_find_LPC_FLP.c b/silk/float/silk_find_LPC_FLP.c
index 463b889..f98e950 100644
--- a/silk/float/silk_find_LPC_FLP.c
+++ b/silk/float/silk_find_LPC_FLP.c
@@ -39,19 +39,19 @@
     const opus_int                   useInterpNLSFs,         /* I    Flag                                    */
     const opus_int                   firstFrameAfterReset,   /* I    Flag                                    */
     const opus_int                   LPC_order,              /* I    LPC order                               */
-    const SKP_float                 x[],                    /* I    Input signal                            */
+    const silk_float                 x[],                    /* I    Input signal                            */
     const opus_int                   subfr_length,           /* I    Subframe length incl preceeding samples */
     const opus_int                   nb_subfr                /* I:   Number of subframes                     */
 )
 {
     opus_int     k;
-    SKP_float   a[ MAX_LPC_ORDER ];
+    silk_float   a[ MAX_LPC_ORDER ];
 
     /* Used only for NLSF interpolation */
     double      res_nrg, res_nrg_2nd, res_nrg_interp;
     opus_int16   NLSF0_Q15[ MAX_LPC_ORDER ];
-    SKP_float   a_tmp[ MAX_LPC_ORDER ];
-    SKP_float   LPC_res[ ( MAX_FRAME_LENGTH + MAX_NB_SUBFR * MAX_LPC_ORDER ) / 2 ];
+    silk_float   a_tmp[ MAX_LPC_ORDER ];
+    silk_float   LPC_res[ ( MAX_FRAME_LENGTH + MAX_NB_SUBFR * MAX_LPC_ORDER ) / 2 ];
 
     /* Default: No interpolation */
     *interpIndex = 4;
@@ -77,7 +77,7 @@
         silk_A2NLSF_FLP( NLSF_Q15, a_tmp, LPC_order );
 
         /* Search over interpolation indices to find the one with lowest residual energy */
-        res_nrg_2nd = SKP_float_MAX;
+        res_nrg_2nd = silk_float_MAX;
         for( k = 3; k >= 0; k-- ) {
             /* Interpolate NLSFs for first half */
             silk_interpolate( NLSF0_Q15, prev_NLSFq_Q15, NLSF_Q15, k, LPC_order );
@@ -109,5 +109,5 @@
         silk_A2NLSF_FLP( NLSF_Q15, a, LPC_order );
     }
 
-    SKP_assert( *interpIndex == 4 || ( useInterpNLSFs && !firstFrameAfterReset && nb_subfr == MAX_NB_SUBFR ) );
+    silk_assert( *interpIndex == 4 || ( useInterpNLSFs && !firstFrameAfterReset && nb_subfr == MAX_NB_SUBFR ) );
 }
diff --git a/silk/float/silk_find_LTP_FLP.c b/silk/float/silk_find_LTP_FLP.c
index deb3361..faf1b98 100644
--- a/silk/float/silk_find_LTP_FLP.c
+++ b/silk/float/silk_find_LTP_FLP.c
@@ -33,24 +33,24 @@
 #include "silk_tuning_parameters.h"
 
 void silk_find_LTP_FLP(
-          SKP_float b[ MAX_NB_SUBFR * LTP_ORDER ],          /* O    LTP coefs                               */
-          SKP_float WLTP[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* O    Weight for LTP quantization       */
-          SKP_float *LTPredCodGain,                         /* O    LTP coding gain                         */
-    const SKP_float r_lpc[],                                /* I    LPC residual                            */
+          silk_float b[ MAX_NB_SUBFR * LTP_ORDER ],          /* O    LTP coefs                               */
+          silk_float WLTP[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* O    Weight for LTP quantization       */
+          silk_float *LTPredCodGain,                         /* O    LTP coding gain                         */
+    const silk_float r_lpc[],                                /* I    LPC residual                            */
     const opus_int   lag[  MAX_NB_SUBFR ],                   /* I    LTP lags                                */
-    const SKP_float Wght[ MAX_NB_SUBFR ],                   /* I    Weights                                 */
+    const silk_float Wght[ MAX_NB_SUBFR ],                   /* I    Weights                                 */
     const opus_int   subfr_length,                           /* I    Subframe length                         */
     const opus_int   nb_subfr,                               /* I    number of subframes                     */
     const opus_int   mem_offset                              /* I    Number of samples in LTP memory         */
 )
 {
     opus_int   i, k;
-    SKP_float *b_ptr, temp, *WLTP_ptr;
-    SKP_float LPC_res_nrg, LPC_LTP_res_nrg;
-    SKP_float d[ MAX_NB_SUBFR ], m, g, delta_b[ LTP_ORDER ];
-    SKP_float w[ MAX_NB_SUBFR ], nrg[ MAX_NB_SUBFR ], regu;
-    SKP_float Rr[ LTP_ORDER ], rr[ MAX_NB_SUBFR ];
-    const SKP_float *r_ptr, *lag_ptr;
+    silk_float *b_ptr, temp, *WLTP_ptr;
+    silk_float LPC_res_nrg, LPC_LTP_res_nrg;
+    silk_float d[ MAX_NB_SUBFR ], m, g, delta_b[ LTP_ORDER ];
+    silk_float w[ MAX_NB_SUBFR ], nrg[ MAX_NB_SUBFR ], regu;
+    silk_float Rr[ LTP_ORDER ], rr[ MAX_NB_SUBFR ];
+    const silk_float *r_ptr, *lag_ptr;
 
     b_ptr    = b;
     WLTP_ptr = WLTP;
@@ -61,7 +61,7 @@
         silk_corrMatrix_FLP( lag_ptr, subfr_length, LTP_ORDER, WLTP_ptr );
         silk_corrVector_FLP( lag_ptr, r_ptr, subfr_length, LTP_ORDER, Rr );
 
-        rr[ k ] = ( SKP_float )silk_energy_FLP( r_ptr, subfr_length );
+        rr[ k ] = ( silk_float )silk_energy_FLP( r_ptr, subfr_length );
         regu = 1.0f + rr[ k ] +
             matrix_ptr( WLTP_ptr, 0, 0, LTP_ORDER ) +
             matrix_ptr( WLTP_ptr, LTP_ORDER-1, LTP_ORDER-1, LTP_ORDER );
@@ -90,7 +90,7 @@
             LPC_LTP_res_nrg += nrg[ k ] * Wght[ k ];
         }
 
-        SKP_assert( LPC_LTP_res_nrg > 0 );
+        silk_assert( LPC_LTP_res_nrg > 0 );
         *LTPredCodGain = 3.0f * silk_log2( LPC_res_nrg / LPC_LTP_res_nrg );
     }
 
@@ -120,7 +120,7 @@
         g = LTP_SMOOTHING / ( LTP_SMOOTHING + w[ k ] ) * ( m - d[ k ] );
         temp = 0;
         for( i = 0; i < LTP_ORDER; i++ ) {
-            delta_b[ i ] = SKP_max_float( b_ptr[ i ], 0.1f );
+            delta_b[ i ] = silk_max_float( b_ptr[ i ], 0.1f );
             temp += delta_b[ i ];
         }
         temp = g / temp;
diff --git a/silk/float/silk_find_pitch_lags_FLP.c b/silk/float/silk_find_pitch_lags_FLP.c
index 382bcd5..05629ea 100644
--- a/silk/float/silk_find_pitch_lags_FLP.c
+++ b/silk/float/silk_find_pitch_lags_FLP.c
@@ -36,18 +36,18 @@
 void silk_find_pitch_lags_FLP(
     silk_encoder_state_FLP          *psEnc,             /* I/O  Encoder state FLP                       */
     silk_encoder_control_FLP        *psEncCtrl,         /* I/O  Encoder control FLP                     */
-          SKP_float                 res[],              /* O    Residual                                */
-    const SKP_float                 x[]                 /* I    Speech signal                           */
+          silk_float                 res[],              /* O    Residual                                */
+    const silk_float                 x[]                 /* I    Speech signal                           */
 )
 {
     opus_int   buf_len;
-    SKP_float thrhld, res_nrg;
-    const SKP_float *x_buf_ptr, *x_buf;
-    SKP_float auto_corr[ MAX_FIND_PITCH_LPC_ORDER + 1 ];
-    SKP_float A[         MAX_FIND_PITCH_LPC_ORDER ];
-    SKP_float refl_coef[ MAX_FIND_PITCH_LPC_ORDER ];
-    SKP_float Wsig[      FIND_PITCH_LPC_WIN_MAX ];
-    SKP_float *Wsig_ptr;
+    silk_float thrhld, res_nrg;
+    const silk_float *x_buf_ptr, *x_buf;
+    silk_float auto_corr[ MAX_FIND_PITCH_LPC_ORDER + 1 ];
+    silk_float A[         MAX_FIND_PITCH_LPC_ORDER ];
+    silk_float refl_coef[ MAX_FIND_PITCH_LPC_ORDER ];
+    silk_float Wsig[      FIND_PITCH_LPC_WIN_MAX ];
+    silk_float *Wsig_ptr;
 
     /******************************************/
     /* Setup buffer lengths etc based on Fs   */
@@ -55,7 +55,7 @@
     buf_len = psEnc->sCmn.la_pitch + psEnc->sCmn.frame_length + psEnc->sCmn.ltp_mem_length;
 
     /* Safty check */
-    SKP_assert( buf_len >= psEnc->sCmn.pitch_LPC_win_length );
+    silk_assert( buf_len >= psEnc->sCmn.pitch_LPC_win_length );
 
     x_buf = x - psEnc->sCmn.ltp_mem_length;
 
@@ -73,7 +73,7 @@
     /* Middle non-windowed samples */
     Wsig_ptr  += psEnc->sCmn.la_pitch;
     x_buf_ptr += psEnc->sCmn.la_pitch;
-    SKP_memcpy( Wsig_ptr, x_buf_ptr, ( psEnc->sCmn.pitch_LPC_win_length - ( psEnc->sCmn.la_pitch << 1 ) ) * sizeof( SKP_float ) );
+    silk_memcpy( Wsig_ptr, x_buf_ptr, ( psEnc->sCmn.pitch_LPC_win_length - ( psEnc->sCmn.la_pitch << 1 ) ) * sizeof( silk_float ) );
 
     /* Last LA_LTP samples */
     Wsig_ptr  += psEnc->sCmn.pitch_LPC_win_length - ( psEnc->sCmn.la_pitch << 1 );
@@ -90,7 +90,7 @@
     res_nrg = silk_schur_FLP( refl_coef, auto_corr, psEnc->sCmn.pitchEstimationLPCOrder );
 
     /* Prediction gain */
-    psEncCtrl->predGain = auto_corr[ 0 ] / SKP_max_float( res_nrg, 1.0f );
+    psEncCtrl->predGain = auto_corr[ 0 ] / silk_max_float( res_nrg, 1.0f );
 
     /* Convert reflection coefficients to prediction coefficients */
     silk_k2a_FLP( A, refl_coef, psEnc->sCmn.pitchEstimationLPCOrder );
@@ -123,7 +123,7 @@
             psEnc->sCmn.indices.signalType = TYPE_UNVOICED;
         }
     } else {
-        SKP_memset( psEncCtrl->pitchL, 0, sizeof( psEncCtrl->pitchL ) );
+        silk_memset( psEncCtrl->pitchL, 0, sizeof( psEncCtrl->pitchL ) );
         psEnc->sCmn.indices.lagIndex = 0;
         psEnc->sCmn.indices.contourIndex = 0;
         psEnc->LTPCorr = 0;
diff --git a/silk/float/silk_find_pred_coefs_FLP.c b/silk/float/silk_find_pred_coefs_FLP.c
index 5803615..9ad3377 100644
--- a/silk/float/silk_find_pred_coefs_FLP.c
+++ b/silk/float/silk_find_pred_coefs_FLP.c
@@ -35,20 +35,20 @@
 void silk_find_pred_coefs_FLP(
     silk_encoder_state_FLP          *psEnc,             /* I/O  Encoder state FLP                       */
     silk_encoder_control_FLP        *psEncCtrl,         /* I/O  Encoder control FLP                     */
-    const SKP_float                 res_pitch[],        /* I    Residual from pitch analysis            */
-    const SKP_float                 x[]                 /* I    Speech signal                           */
+    const silk_float                 res_pitch[],        /* I    Residual from pitch analysis            */
+    const silk_float                 x[]                 /* I    Speech signal                           */
 )
 {
     opus_int         i;
-    SKP_float       WLTP[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ];
-    SKP_float       invGains[ MAX_NB_SUBFR ], Wght[ MAX_NB_SUBFR ];
+    silk_float       WLTP[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ];
+    silk_float       invGains[ MAX_NB_SUBFR ], Wght[ MAX_NB_SUBFR ];
     opus_int16       NLSF_Q15[ MAX_LPC_ORDER ];
-    const SKP_float *x_ptr;
-    SKP_float       *x_pre_ptr, LPC_in_pre[ MAX_NB_SUBFR * MAX_LPC_ORDER + MAX_FRAME_LENGTH ];
+    const silk_float *x_ptr;
+    silk_float       *x_pre_ptr, LPC_in_pre[ MAX_NB_SUBFR * MAX_LPC_ORDER + MAX_FRAME_LENGTH ];
 
     /* Weighting for weighted least squares */
     for( i = 0; i < psEnc->sCmn.nb_subfr; i++ ) {
-        SKP_assert( psEncCtrl->Gains[ i ] > 0.0f );
+        silk_assert( psEncCtrl->Gains[ i ] > 0.0f );
         invGains[ i ] = 1.0f / psEncCtrl->Gains[ i ];
         Wght[ i ]     = invGains[ i ] * invGains[ i ];
     }
@@ -57,7 +57,7 @@
         /**********/
         /* VOICED */
         /**********/
-        SKP_assert( psEnc->sCmn.ltp_mem_length - psEnc->sCmn.predictLPCOrder >= psEncCtrl->pitchL[ 0 ] + LTP_ORDER / 2 );
+        silk_assert( psEnc->sCmn.ltp_mem_length - psEnc->sCmn.predictLPCOrder >= psEncCtrl->pitchL[ 0 ] + LTP_ORDER / 2 );
 
         /* LTP analysis */
         silk_find_LTP_FLP( psEncCtrl->LTPCoef, WLTP, &psEncCtrl->LTPredCodGain, res_pitch,
@@ -93,7 +93,7 @@
             x_ptr     += psEnc->sCmn.subfr_length;
         }
 
-        SKP_memset( psEncCtrl->LTPCoef, 0, psEnc->sCmn.nb_subfr * LTP_ORDER * sizeof( SKP_float ) );
+        silk_memset( psEncCtrl->LTPCoef, 0, psEnc->sCmn.nb_subfr * LTP_ORDER * sizeof( silk_float ) );
         psEncCtrl->LTPredCodGain = 0.0f;
     }
 
@@ -112,6 +112,6 @@
         psEnc->sCmn.subfr_length, psEnc->sCmn.nb_subfr, psEnc->sCmn.predictLPCOrder );
 
     /* Copy to prediction struct for use in next frame for fluctuation reduction */
-    SKP_memcpy( psEnc->sCmn.prev_NLSFq_Q15, NLSF_Q15, sizeof( psEnc->sCmn.prev_NLSFq_Q15 ) );
+    silk_memcpy( psEnc->sCmn.prev_NLSFq_Q15, NLSF_Q15, sizeof( psEnc->sCmn.prev_NLSFq_Q15 ) );
 }
 
diff --git a/silk/float/silk_inner_product_FLP.c b/silk/float/silk_inner_product_FLP.c
index 963cc77..18444e1 100644
--- a/silk/float/silk_inner_product_FLP.c
+++ b/silk/float/silk_inner_product_FLP.c
@@ -31,10 +31,10 @@
 
 #include "silk_SigProc_FLP.h"
 
-/* inner product of two SKP_float arrays, with result as double     */
+/* inner product of two silk_float arrays, with result as double     */
 double silk_inner_product_FLP(      /* O    result              */
-    const SKP_float     *data1,         /* I    vector 1            */
-    const SKP_float     *data2,         /* I    vector 2            */
+    const silk_float     *data1,         /* I    vector 1            */
+    const silk_float     *data2,         /* I    vector 2            */
     opus_int             dataSize        /* I    length of vectors   */
 )
 {
diff --git a/silk/float/silk_k2a_FLP.c b/silk/float/silk_k2a_FLP.c
index 318b8d6..dd2081b 100644
--- a/silk/float/silk_k2a_FLP.c
+++ b/silk/float/silk_k2a_FLP.c
@@ -33,13 +33,13 @@
 
 /* step up function, converts reflection coefficients to prediction coefficients */
 void silk_k2a_FLP(
-    SKP_float       *A,                 /* O:   prediction coefficients [order]             */
-    const SKP_float *rc,                /* I:   reflection coefficients [order]             */
+    silk_float       *A,                 /* O:   prediction coefficients [order]             */
+    const silk_float *rc,                /* I:   reflection coefficients [order]             */
     opus_int32       order               /* I:   prediction order                            */
 )
 {
     opus_int   k, n;
-    SKP_float Atmp[ SILK_MAX_ORDER_LPC ];
+    silk_float Atmp[ SILK_MAX_ORDER_LPC ];
 
     for( k = 0; k < order; k++ ){
         for( n = 0; n < k; n++ ){
diff --git a/silk/float/silk_levinsondurbin_FLP.c b/silk/float/silk_levinsondurbin_FLP.c
index 113204b..05933bb 100644
--- a/silk/float/silk_levinsondurbin_FLP.c
+++ b/silk/float/silk_levinsondurbin_FLP.c
@@ -32,21 +32,21 @@
 #include "silk_SigProc_FLP.h"
 
 /* Solve the normal equations using the Levinson-Durbin recursion */
-SKP_float silk_levinsondurbin_FLP(    /* O    prediction error energy                     */
-    SKP_float       A[],                /* O    prediction coefficients [order]             */
-    const SKP_float corr[],             /* I    input auto-correlations [order + 1]         */
+silk_float silk_levinsondurbin_FLP(    /* O    prediction error energy                     */
+    silk_float       A[],                /* O    prediction coefficients [order]             */
+    const silk_float corr[],             /* I    input auto-correlations [order + 1]         */
     const opus_int   order               /* I    prediction order                            */
 )
 {
     opus_int   i, mHalf, m;
-    SKP_float min_nrg, nrg, t, km, Atmp1, Atmp2;
+    silk_float min_nrg, nrg, t, km, Atmp1, Atmp2;
 
     min_nrg = 1e-12f * corr[ 0 ] + 1e-9f;
     nrg = corr[ 0 ];
-    nrg = SKP_max_float(min_nrg, nrg);
+    nrg = silk_max_float(min_nrg, nrg);
     A[ 0 ] = corr[ 1 ] / nrg;
     nrg -= A[ 0 ] * corr[ 1 ];
-    nrg = SKP_max_float(min_nrg, nrg);
+    nrg = silk_max_float(min_nrg, nrg);
 
     for( m = 1; m < order; m++ )
     {
@@ -60,7 +60,7 @@
 
         /* residual energy */
         nrg -= km * t;
-        nrg = SKP_max_float(min_nrg, nrg);
+        nrg = silk_max_float(min_nrg, nrg);
 
         mHalf = m >> 1;
         for( i = 0; i < mHalf; i++ ) {
diff --git a/silk/float/silk_main_FLP.h b/silk/float/silk_main_FLP.h
index ddd5a61..da65861 100644
--- a/silk/float/silk_main_FLP.h
+++ b/silk/float/silk_main_FLP.h
@@ -65,7 +65,7 @@
 void silk_LBRR_encode_FLP(
     silk_encoder_state_FLP          *psEnc,             /* I/O  Encoder state FLP                       */
     silk_encoder_control_FLP        *psEncCtrl,         /* I/O  Encoder control FLP                     */
-    const SKP_float                 xfw[]               /* I    Input signal                            */
+    const silk_float                 xfw[]               /* I    Input signal                            */
 );
 
 /* Initializes the Silk encoder state */
@@ -89,8 +89,8 @@
 void silk_prefilter_FLP(
     silk_encoder_state_FLP              *psEnc,         /* I/O  Encoder state FLP                       */
     const silk_encoder_control_FLP      *psEncCtrl,     /* I    Encoder control FLP                     */
-          SKP_float                     xw[],           /* O    Weighted signal                         */
-    const SKP_float                     x[]             /* I    Speech signal                           */
+          silk_float                     xw[],           /* O    Weighted signal                         */
+    const silk_float                     x[]             /* I    Speech signal                           */
 );
 
 /**************************/
@@ -100,15 +100,15 @@
 void silk_noise_shape_analysis_FLP(
     silk_encoder_state_FLP          *psEnc,             /* I/O  Encoder state FLP                       */
     silk_encoder_control_FLP        *psEncCtrl,         /* I/O  Encoder control FLP                     */
-    const SKP_float                 *pitch_res,         /* I    LPC residual from pitch analysis        */
-    const SKP_float                 *x                  /* I    Input signal [frame_length + la_shape]  */
+    const silk_float                 *pitch_res,         /* I    LPC residual from pitch analysis        */
+    const silk_float                 *x                  /* I    Input signal [frame_length + la_shape]  */
 );
 
 /* Autocorrelations for a warped frequency axis */
 void silk_warped_autocorrelation_FLP(
-          SKP_float                 *corr,              /* O    Result [order + 1]                      */
-    const SKP_float                 *input,             /* I    Input data to correlate                 */
-    const SKP_float                 warping,            /* I    Warping coefficient                     */
+          silk_float                 *corr,              /* O    Result [order + 1]                      */
+    const silk_float                 *input,             /* I    Input data to correlate                 */
+    const silk_float                 warping,            /* I    Warping coefficient                     */
     const opus_int                   length,             /* I    Length of input                         */
     const opus_int                   order               /* I    Correlation order (even)                */
 );
@@ -126,16 +126,16 @@
 void silk_find_pitch_lags_FLP(
     silk_encoder_state_FLP          *psEnc,             /* I/O  Encoder state FLP                       */
     silk_encoder_control_FLP        *psEncCtrl,         /* I/O  Encoder control FLP                     */
-          SKP_float                 res[],              /* O    Residual                                */
-    const SKP_float                 x[]                 /* I    Speech signal                           */
+          silk_float                 res[],              /* O    Residual                                */
+    const silk_float                 x[]                 /* I    Speech signal                           */
 );
 
 /* Find LPC and LTP coefficients */
 void silk_find_pred_coefs_FLP(
     silk_encoder_state_FLP          *psEnc,             /* I/O  Encoder state FLP                       */
     silk_encoder_control_FLP        *psEncCtrl,         /* I/O  Encoder control FLP                     */
-    const SKP_float                 res_pitch[],        /* I    Residual from pitch analysis            */
-    const SKP_float                 x[]                 /* I    Speech signal                           */
+    const silk_float                 res_pitch[],        /* I    Residual from pitch analysis            */
+    const silk_float                 x[]                 /* I    Speech signal                           */
 );
 
 /* LPC analysis */
@@ -146,30 +146,30 @@
     const opus_int                   useInterpNLSFs,         /* I    Flag                                    */
     const opus_int                   firstFrameAfterReset,   /* I    Flag                                    */
     const opus_int                   LPC_order,              /* I    LPC order                               */
-    const SKP_float                 x[],                    /* I    Input signal                            */
+    const silk_float                 x[],                    /* I    Input signal                            */
     const opus_int                   subfr_length,           /* I    Subframe length incl preceeding samples */
     const opus_int                   nb_subfr                /* I:   Number of subframes                     */
 );
 
 /* LTP analysis */
 void silk_find_LTP_FLP(
-          SKP_float b[ MAX_NB_SUBFR * LTP_ORDER ],          /* O    LTP coefs                               */
-          SKP_float WLTP[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* O    Weight for LTP quantization       */
-          SKP_float *LTPredCodGain,                         /* O    LTP coding gain                         */
-    const SKP_float r_lpc[],                                /* I    LPC residual                            */
+          silk_float b[ MAX_NB_SUBFR * LTP_ORDER ],          /* O    LTP coefs                               */
+          silk_float WLTP[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* O    Weight for LTP quantization       */
+          silk_float *LTPredCodGain,                         /* O    LTP coding gain                         */
+    const silk_float r_lpc[],                                /* I    LPC residual                            */
     const opus_int   lag[  MAX_NB_SUBFR ],                   /* I    LTP lags                                */
-    const SKP_float Wght[ MAX_NB_SUBFR ],                   /* I    Weights                                 */
+    const silk_float Wght[ MAX_NB_SUBFR ],                   /* I    Weights                                 */
     const opus_int   subfr_length,                           /* I    Subframe length                         */
     const opus_int   nb_subfr,                               /* I    number of subframes                     */
     const opus_int   mem_offset                              /* I    Number of samples in LTP memory         */
 );
 
 void silk_LTP_analysis_filter_FLP(
-          SKP_float         *LTP_res,                   /* O    LTP res MAX_NB_SUBFR*(pre_lgth+subfr_lngth) */
-    const SKP_float         *x,                         /* I    Input signal, with preceeding samples       */
-    const SKP_float         B[ LTP_ORDER * MAX_NB_SUBFR ],  /* I    LTP coefficients for each subframe      */
+          silk_float         *LTP_res,                   /* O    LTP res MAX_NB_SUBFR*(pre_lgth+subfr_lngth) */
+    const silk_float         *x,                         /* I    Input signal, with preceeding samples       */
+    const silk_float         B[ LTP_ORDER * MAX_NB_SUBFR ],  /* I    LTP coefficients for each subframe      */
     const opus_int           pitchL[   MAX_NB_SUBFR ],   /* I    Pitch lags                                  */
-    const SKP_float         invGains[ MAX_NB_SUBFR ],   /* I    Inverse quantization gains                  */
+    const silk_float         invGains[ MAX_NB_SUBFR ],   /* I    Inverse quantization gains                  */
     const opus_int           subfr_length,               /* I    Length of each subframe                     */
     const opus_int           nb_subfr,                   /* I    number of subframes                         */
     const opus_int           pre_length                  /* I    Preceeding samples for each subframe        */
@@ -178,10 +178,10 @@
 /* Calculates residual energies of input subframes where all subframes have LPC_order   */
 /* of preceeding samples                                                                */
 void silk_residual_energy_FLP(
-          SKP_float             nrgs[ MAX_NB_SUBFR ],   /* O    Residual energy per subframe            */
-    const SKP_float             x[],                    /* I    Input signal                            */
-          SKP_float             a[ 2 ][ MAX_LPC_ORDER ],/* I    AR coefs for each frame half            */
-    const SKP_float             gains[],                /* I    Quantization gains                      */
+          silk_float             nrgs[ MAX_NB_SUBFR ],   /* O    Residual energy per subframe            */
+    const silk_float             x[],                    /* I    Input signal                            */
+          silk_float             a[ 2 ][ MAX_LPC_ORDER ],/* I    AR coefs for each frame half            */
+    const silk_float             gains[],                /* I    Quantization gains                      */
     const opus_int               subfr_length,           /* I    Subframe length                         */
     const opus_int               nb_subfr,               /* I    number of subframes                     */
     const opus_int               LPC_order               /* I    LPC order                               */
@@ -189,19 +189,19 @@
 
 /* 16th order LPC analysis filter */
 void silk_LPC_analysis_filter_FLP(
-          SKP_float                 r_LPC[],            /* O    LPC residual signal                     */
-    const SKP_float                 PredCoef[],         /* I    LPC coefficients                        */
-    const SKP_float                 s[],                /* I    Input signal                            */
+          silk_float                 r_LPC[],            /* O    LPC residual signal                     */
+    const silk_float                 PredCoef[],         /* I    LPC coefficients                        */
+    const silk_float                 s[],                /* I    Input signal                            */
     const opus_int                   length,             /* I    Length of input signal                  */
     const opus_int                   Order               /* I    LPC order                               */
 );
 
 /* LTP tap quantizer */
 void silk_quant_LTP_gains_FLP(
-          SKP_float B[ MAX_NB_SUBFR * LTP_ORDER ],              /* I/O  (Un-)quantized LTP gains                */
+          silk_float B[ MAX_NB_SUBFR * LTP_ORDER ],              /* I/O  (Un-)quantized LTP gains                */
           opus_int8  cbk_index[ MAX_NB_SUBFR ],                  /* O    Codebook index                          */
           opus_int8  *periodicity_index,                         /* O    Periodicity index                       */
-    const SKP_float W[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ],  /* I    Error weights                           */
+    const silk_float W[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ],  /* I    Error weights                           */
     const opus_int   mu_Q10,                                     /* I    Mu value (R/D tradeoff)                 */
     const opus_int   lowComplexity,                              /* I    Flag for low complexity                 */
     const opus_int   nb_subfr                                    /* I    number of subframes                     */
@@ -213,29 +213,29 @@
 /* Limit, stabilize, and quantize NLSFs */
 void silk_process_NLSFs_FLP(
     silk_encoder_state              *psEncC,                            /* I/O  Encoder state                               */
-    SKP_float                       PredCoef[ 2 ][ MAX_LPC_ORDER ],     /* O    Prediction coefficients                     */
+    silk_float                       PredCoef[ 2 ][ MAX_LPC_ORDER ],     /* O    Prediction coefficients                     */
     opus_int16                       NLSF_Q15[      MAX_LPC_ORDER ],     /* I/O  Normalized LSFs (quant out) (0 - (2^15-1))  */
     const opus_int16                 prev_NLSF_Q15[ MAX_LPC_ORDER ]      /* I    Previous Normalized LSFs (0 - (2^15-1))     */
 );
 
 /* Residual energy: nrg = wxx - 2 * wXx * c + c' * wXX * c */
-SKP_float silk_residual_energy_covar_FLP(           /* O    Weighted residual energy                */
-    const SKP_float                 *c,                 /* I    Filter coefficients                     */
-          SKP_float                 *wXX,               /* I/O  Weighted correlation matrix, reg. out   */
-    const SKP_float                 *wXx,               /* I    Weighted correlation vector             */
-    const SKP_float                 wxx,                /* I    Weighted correlation value              */
+silk_float silk_residual_energy_covar_FLP(           /* O    Weighted residual energy                */
+    const silk_float                 *c,                 /* I    Filter coefficients                     */
+          silk_float                 *wXX,               /* I/O  Weighted correlation matrix, reg. out   */
+    const silk_float                 *wXx,               /* I    Weighted correlation vector             */
+    const silk_float                 wxx,                /* I    Weighted correlation value              */
     const opus_int                   D                   /* I    Dimension                               */
 );
 
 /* Entropy constrained MATRIX-weighted VQ, for a single input data vector */
 void silk_VQ_WMat_EC_FLP(
           opus_int                   *ind,               /* O    Index of best codebook vector           */
-          SKP_float                 *rate_dist,         /* O    Best weighted quant. error + mu * rate  */
-    const SKP_float                 *in,                /* I    Input vector to be quantized            */
-    const SKP_float                 *W,                 /* I    Weighting matrix                        */
+          silk_float                 *rate_dist,         /* O    Best weighted quant. error + mu * rate  */
+    const silk_float                 *in,                /* I    Input vector to be quantized            */
+    const silk_float                 *W,                 /* I    Weighting matrix                        */
     const opus_int16                 *cb,                /* I    Codebook                                */
     const opus_int16                 *cl_Q6,             /* I    Code length for each codebook vector    */
-    const SKP_float                 mu,                 /* I    Tradeoff between WSSE and rate          */
+    const silk_float                 mu,                 /* I    Tradeoff between WSSE and rate          */
     const opus_int                   L                   /* I    Number of vectors in codebook           */
 );
 
@@ -250,35 +250,35 @@
 /******************/
 /* Calculates correlation matrix X'*X */
 void silk_corrMatrix_FLP(
-    const SKP_float                 *x,                 /* I    x vector [ L+order-1 ] used to create X */
+    const silk_float                 *x,                 /* I    x vector [ L+order-1 ] used to create X */
     const opus_int                   L,                  /* I    Length of vectors                       */
     const opus_int                   Order,              /* I    Max lag for correlation                 */
-          SKP_float                 *XX                 /* O    X'*X correlation matrix [order x order] */
+          silk_float                 *XX                 /* O    X'*X correlation matrix [order x order] */
 );
 
 /* Calculates correlation vector X'*t */
 void silk_corrVector_FLP(
-    const SKP_float                 *x,                 /* I    x vector [L+order-1] used to create X   */
-    const SKP_float                 *t,                 /* I    Target vector [L]                       */
+    const silk_float                 *x,                 /* I    x vector [L+order-1] used to create X   */
+    const silk_float                 *t,                 /* I    Target vector [L]                       */
     const opus_int                   L,                  /* I    Length of vecors                        */
     const opus_int                   Order,              /* I    Max lag for correlation                 */
-          SKP_float                 *Xt                 /* O    X'*t correlation vector [order]         */
+          silk_float                 *Xt                 /* O    X'*t correlation vector [order]         */
 );
 
 /* Add noise to matrix diagonal */
 void silk_regularize_correlations_FLP(
-          SKP_float                 *XX,                /* I/O  Correlation matrices                    */
-          SKP_float                 *xx,                /* I/O  Correlation values                      */
-    const SKP_float                 noise,              /* I    Noise energy to add                     */
+          silk_float                 *XX,                /* I/O  Correlation matrices                    */
+          silk_float                 *xx,                /* I/O  Correlation values                      */
+    const silk_float                 noise,              /* I    Noise energy to add                     */
     const opus_int                   D                   /* I    Dimension of XX                         */
 );
 
 /* Function to solve linear equation Ax = b, where A is an MxM symmetric matrix */
 void silk_solve_LDL_FLP(
-          SKP_float                 *A,                 /* I/O  Symmetric square matrix, out: reg.      */
+          silk_float                 *A,                 /* I/O  Symmetric square matrix, out: reg.      */
     const opus_int                   M,                  /* I    Size of matrix                          */
-    const SKP_float                 *b,                 /* I    Pointer to b vector                     */
-          SKP_float                 *x                  /* O    Pointer to x solution vector            */
+    const silk_float                 *b,                 /* I    Pointer to b vector                     */
+          silk_float                 *x                  /* O    Pointer to x solution vector            */
 );
 
 /* Apply sine window to signal vector.                                                                  */
@@ -286,8 +286,8 @@
 /*  1 -> sine window from 0 to pi/2                                                                     */
 /*  2 -> sine window from pi/2 to pi                                                                    */
 void silk_apply_sine_window_FLP(
-          SKP_float                 px_win[],           /* O    Pointer to windowed signal              */
-    const SKP_float                 px[],               /* I    Pointer to input signal                 */
+          silk_float                 px_win[],           /* O    Pointer to windowed signal              */
+    const silk_float                 px[],               /* I    Pointer to input signal                 */
     const opus_int                   win_type,           /* I    Selects a window type                   */
     const opus_int                   length              /* I    Window length, multiple of 4            */
 );
@@ -297,13 +297,13 @@
 /* Convert AR filter coefficients to NLSF parameters */
 void silk_A2NLSF_FLP(
           opus_int16                 *NLSF_Q15,          /* O    NLSF vector      [ LPC_order ]          */
-    const SKP_float                 *pAR,               /* I    LPC coefficients [ LPC_order ]          */
+    const silk_float                 *pAR,               /* I    LPC coefficients [ LPC_order ]          */
     const opus_int                   LPC_order           /* I    LPC order                               */
 );
 
 /* Convert NLSF parameters to AR prediction filter coefficients */
 void silk_NLSF2A_FLP(
-          SKP_float                 *pAR,               /* O    LPC coefficients [ LPC_order ]          */
+          silk_float                 *pAR,               /* O    LPC coefficients [ LPC_order ]          */
     const opus_int16                 *NLSF_Q15,          /* I    NLSF vector      [ LPC_order ]          */
     const opus_int                   LPC_order           /* I    LPC order                               */
 );
@@ -317,7 +317,7 @@
     SideInfoIndices                 *psIndices,     /* I/O  Quantization indices                        */
     silk_nsq_state                  *psNSQ,         /* I/O  Noise Shaping Quantzation state             */
           opus_int8                  pulses[],       /* O    Quantized pulse signal                      */
-    const SKP_float                 x[]             /* I    Prefiltered input signal                    */
+    const silk_float                 x[]             /* I    Prefiltered input signal                    */
 );
 
 #ifdef __cplusplus
diff --git a/silk/float/silk_noise_shape_analysis_FLP.c b/silk/float/silk_noise_shape_analysis_FLP.c
index 3005a0d..ac4c887 100644
--- a/silk/float/silk_noise_shape_analysis_FLP.c
+++ b/silk/float/silk_noise_shape_analysis_FLP.c
@@ -34,33 +34,33 @@
 
 /* Compute gain to make warped filter coefficients have a zero mean log frequency response on a     */
 /* non-warped frequency scale. (So that it can be implemented with a minimum-phase monic filter.)   */
-static inline SKP_float warped_gain(
-    const SKP_float     *coefs,
-    SKP_float           lambda,
+static inline silk_float warped_gain(
+    const silk_float     *coefs,
+    silk_float           lambda,
     opus_int             order
 ) {
     opus_int   i;
-    SKP_float gain;
+    silk_float gain;
 
     lambda = -lambda;
     gain = coefs[ order - 1 ];
     for( i = order - 2; i >= 0; i-- ) {
         gain = lambda * gain + coefs[ i ];
     }
-    return (SKP_float)( 1.0f / ( 1.0f - lambda * gain ) );
+    return (silk_float)( 1.0f / ( 1.0f - lambda * gain ) );
 }
 
 /* Convert warped filter coefficients to monic pseudo-warped coefficients and limit maximum     */
 /* amplitude of monic warped coefficients by using bandwidth expansion on the true coefficients */
 static inline void warped_true2monic_coefs(
-    SKP_float           *coefs_syn,
-    SKP_float           *coefs_ana,
-    SKP_float           lambda,
-    SKP_float           limit,
+    silk_float           *coefs_syn,
+    silk_float           *coefs_ana,
+    silk_float           lambda,
+    silk_float           limit,
     opus_int             order
 ) {
     opus_int   i, iter, ind = 0;
-    SKP_float tmp, maxabs, chirp, gain_syn, gain_ana;
+    silk_float tmp, maxabs, chirp, gain_syn, gain_ana;
 
     /* Convert to monic coefficients */
     for( i = order - 1; i > 0; i-- ) {
@@ -79,7 +79,7 @@
         /* Find maximum absolute value */
         maxabs = -1.0f;
         for( i = 0; i < order; i++ ) {
-            tmp = SKP_max( SKP_abs_float( coefs_syn[ i ] ), SKP_abs_float( coefs_ana[ i ] ) );
+            tmp = silk_max( silk_abs_float( coefs_syn[ i ] ), silk_abs_float( coefs_ana[ i ] ) );
             if( tmp > maxabs ) {
                 maxabs = tmp;
                 ind = i;
@@ -119,25 +119,25 @@
             coefs_ana[ i ] *= gain_ana;
         }
     }
-    SKP_assert( 0 );
+    silk_assert( 0 );
 }
 
 /* Compute noise shaping coefficients and initial gain values */
 void silk_noise_shape_analysis_FLP(
     silk_encoder_state_FLP          *psEnc,             /* I/O  Encoder state FLP                       */
     silk_encoder_control_FLP        *psEncCtrl,         /* I/O  Encoder control FLP                     */
-    const SKP_float                 *pitch_res,         /* I    LPC residual from pitch analysis        */
-    const SKP_float                 *x                  /* I    Input signal [frame_length + la_shape]  */
+    const silk_float                 *pitch_res,         /* I    LPC residual from pitch analysis        */
+    const silk_float                 *x                  /* I    Input signal [frame_length + la_shape]  */
 )
 {
     silk_shape_state_FLP *psShapeSt = &psEnc->sShape;
     opus_int     k, nSamples;
-    SKP_float   SNR_adj_dB, HarmBoost, HarmShapeGain, Tilt;
-    SKP_float   nrg, pre_nrg, log_energy, log_energy_prev, energy_variation;
-    SKP_float   delta, BWExp1, BWExp2, gain_mult, gain_add, strength, b, warping;
-    SKP_float   x_windowed[ SHAPE_LPC_WIN_MAX ];
-    SKP_float   auto_corr[ MAX_SHAPE_LPC_ORDER + 1 ];
-    const SKP_float *x_ptr, *pitch_res_ptr;
+    silk_float   SNR_adj_dB, HarmBoost, HarmShapeGain, Tilt;
+    silk_float   nrg, pre_nrg, log_energy, log_energy_prev, energy_variation;
+    silk_float   delta, BWExp1, BWExp2, gain_mult, gain_add, strength, b, warping;
+    silk_float   x_windowed[ SHAPE_LPC_WIN_MAX ];
+    silk_float   auto_corr[ MAX_SHAPE_LPC_ORDER + 1 ];
+    const silk_float *x_ptr, *pitch_res_ptr;
 
     /* Point to start of first LPC analysis block */
     x_ptr = x - psEnc->sCmn.la_shape;
@@ -151,7 +151,7 @@
     psEncCtrl->input_quality = 0.5f * ( psEnc->sCmn.input_quality_bands_Q15[ 0 ] + psEnc->sCmn.input_quality_bands_Q15[ 1 ] ) * ( 1.0f / 32768.0f );
 
     /* Coding quality level, between 0.0 and 1.0 */
-    psEncCtrl->coding_quality = SKP_sigmoid( 0.25f * ( SNR_adj_dB - 18.0f ) );
+    psEncCtrl->coding_quality = silk_sigmoid( 0.25f * ( SNR_adj_dB - 18.0f ) );
 
     if( psEnc->sCmn.useCBR == 0 ) {
         /* Reduce coding SNR during low speech activity */
@@ -181,16 +181,16 @@
         energy_variation = 0.0f;
         log_energy_prev  = 0.0f;
         pitch_res_ptr = pitch_res;
-        for( k = 0; k < SKP_SMULBB( SUB_FRAME_LENGTH_MS, psEnc->sCmn.nb_subfr ) / 2; k++ ) {
-            nrg = ( SKP_float )nSamples + ( SKP_float )silk_energy_FLP( pitch_res_ptr, nSamples );
+        for( k = 0; k < silk_SMULBB( SUB_FRAME_LENGTH_MS, psEnc->sCmn.nb_subfr ) / 2; k++ ) {
+            nrg = ( silk_float )nSamples + ( silk_float )silk_energy_FLP( pitch_res_ptr, nSamples );
             log_energy = silk_log2( nrg );
             if( k > 0 ) {
-                energy_variation += SKP_abs_float( log_energy - log_energy_prev );
+                energy_variation += silk_abs_float( log_energy - log_energy_prev );
             }
             log_energy_prev = log_energy;
             pitch_res_ptr += nSamples;
         }
-        psEncCtrl->sparseness = SKP_sigmoid( 0.4f * ( energy_variation - 5.0f ) );
+        psEncCtrl->sparseness = silk_sigmoid( 0.4f * ( energy_variation - 5.0f ) );
 
         /* Set quantization offset depending on sparseness measure */
         if( psEncCtrl->sparseness > SPARSENESS_THRESHOLD_QNT_OFFSET ) {
@@ -217,7 +217,7 @@
 
     if( psEnc->sCmn.warping_Q16 > 0 ) {
         /* Slightly more warping in analysis will move quantization noise up in frequency, where it's better masked */
-        warping = (SKP_float)psEnc->sCmn.warping_Q16 / 65536.0f + 0.01f * psEncCtrl->coding_quality;
+        warping = (silk_float)psEnc->sCmn.warping_Q16 / 65536.0f + 0.01f * psEncCtrl->coding_quality;
     } else {
         warping = 0.0f;
     }
@@ -233,7 +233,7 @@
 
         silk_apply_sine_window_FLP( x_windowed, x_ptr, 1, slope_part );
         shift = slope_part;
-        SKP_memcpy( x_windowed + shift, x_ptr + shift, flat_part * sizeof(SKP_float) );
+        silk_memcpy( x_windowed + shift, x_ptr + shift, flat_part * sizeof(silk_float) );
         shift += flat_part;
         silk_apply_sine_window_FLP( x_windowed + shift, x_ptr + shift, 2, slope_part );
 
@@ -254,7 +254,7 @@
 
         /* Convert correlations to prediction coefficients, and compute residual energy */
         nrg = silk_levinsondurbin_FLP( &psEncCtrl->AR2[ k * MAX_SHAPE_LPC_ORDER ], auto_corr, psEnc->sCmn.shapingLPCOrder );
-        psEncCtrl->Gains[ k ] = ( SKP_float )sqrt( nrg );
+        psEncCtrl->Gains[ k ] = ( silk_float )sqrt( nrg );
 
         if( psEnc->sCmn.warping_Q16 > 0 ) {
             /* Adjust gain for warping */
@@ -265,10 +265,10 @@
         silk_bwexpander_FLP( &psEncCtrl->AR2[ k * MAX_SHAPE_LPC_ORDER ], psEnc->sCmn.shapingLPCOrder, BWExp2 );
 
         /* Compute noise shaping filter coefficients */
-        SKP_memcpy(
+        silk_memcpy(
             &psEncCtrl->AR1[ k * MAX_SHAPE_LPC_ORDER ],
             &psEncCtrl->AR2[ k * MAX_SHAPE_LPC_ORDER ],
-            psEnc->sCmn.shapingLPCOrder * sizeof( SKP_float ) );
+            psEnc->sCmn.shapingLPCOrder * sizeof( silk_float ) );
 
         /* Bandwidth expansion for analysis filter shaping */
         silk_bwexpander_FLP( &psEncCtrl->AR1[ k * MAX_SHAPE_LPC_ORDER ], psEnc->sCmn.shapingLPCOrder, BWExp1 );
@@ -287,8 +287,8 @@
     /* Gain tweaking */
     /*****************/
     /* Increase gains during low speech activity */
-    gain_mult = (SKP_float)pow( 2.0f, -0.16f * SNR_adj_dB );
-    gain_add  = (SKP_float)pow( 2.0f,  0.16f * MIN_QGAIN_DB );
+    gain_mult = (silk_float)pow( 2.0f, -0.16f * SNR_adj_dB );
+    gain_add  = (silk_float)pow( 2.0f,  0.16f * MIN_QGAIN_DB );
     for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) {
         psEncCtrl->Gains[ k ] *= gain_mult;
         psEncCtrl->Gains[ k ] += gain_add;
@@ -344,7 +344,7 @@
             ( 1.0f - ( 1.0f - psEncCtrl->coding_quality ) * psEncCtrl->input_quality );
 
         /* Less harmonic noise shaping for less periodic signals */
-        HarmShapeGain *= ( SKP_float )sqrt( psEnc->LTPCorr );
+        HarmShapeGain *= ( silk_float )sqrt( psEnc->LTPCorr );
     } else {
         HarmShapeGain = 0.0f;
     }
diff --git a/silk/float/silk_pitch_analysis_core_FLP.c b/silk/float/silk_pitch_analysis_core_FLP.c
index 1ff0797..5aaf41a 100644
--- a/silk/float/silk_pitch_analysis_core_FLP.c
+++ b/silk/float/silk_pitch_analysis_core_FLP.c
@@ -49,8 +49,8 @@
 /* Internally used functions                                */
 /************************************************************/
 static void silk_P_Ana_calc_corr_st3(
-    SKP_float cross_corr_st3[ PE_MAX_NB_SUBFR ][ PE_NB_CBKS_STAGE3_MAX ][ PE_NB_STAGE3_LAGS ], /* O 3 DIM correlation array */
-    const SKP_float  frame[],            /* I vector to correlate                                            */
+    silk_float cross_corr_st3[ PE_MAX_NB_SUBFR ][ PE_NB_CBKS_STAGE3_MAX ][ PE_NB_STAGE3_LAGS ], /* O 3 DIM correlation array */
+    const silk_float  frame[],            /* I vector to correlate                                            */
     opus_int         start_lag,          /* I start lag                                                      */
     opus_int         sf_length,          /* I sub frame length                                               */
     opus_int         nb_subfr,           /* I number of subframes                                            */
@@ -58,8 +58,8 @@
 );
 
 static void silk_P_Ana_calc_energy_st3(
-    SKP_float energies_st3[ PE_MAX_NB_SUBFR ][ PE_NB_CBKS_STAGE3_MAX ][ PE_NB_STAGE3_LAGS ], /* O 3 DIM correlation array */
-    const SKP_float  frame[],            /* I vector to correlate                                            */
+    silk_float energies_st3[ PE_MAX_NB_SUBFR ][ PE_NB_CBKS_STAGE3_MAX ][ PE_NB_STAGE3_LAGS ], /* O 3 DIM correlation array */
+    const silk_float  frame[],            /* I vector to correlate                                            */
     opus_int         start_lag,          /* I start lag                                                      */
     opus_int         sf_length,          /* I sub frame length                                               */
     opus_int         nb_subfr,           /* I number of subframes                                            */
@@ -72,39 +72,39 @@
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 */
 opus_int silk_pitch_analysis_core_FLP( /* O voicing estimate: 0 voiced, 1 unvoiced                       */
-    const SKP_float  *frame,             /* I signal of length PE_FRAME_LENGTH_MS*Fs_kHz                     */
+    const silk_float  *frame,             /* I signal of length PE_FRAME_LENGTH_MS*Fs_kHz                     */
     opus_int         *pitch_out,         /* O 4 pitch lag values                                             */
     opus_int16        *lagIndex,         /* O lag Index                                                      */
     opus_int8        *contourIndex,      /* O pitch contour Index                                            */
-    SKP_float       *LTPCorr,           /* I/O normalized correlation; input: value from previous frame     */
+    silk_float       *LTPCorr,           /* I/O normalized correlation; input: value from previous frame     */
     opus_int         prevLag,            /* I last lag of previous frame; set to zero is unvoiced            */
-    const SKP_float search_thres1,      /* I first stage threshold for lag candidates 0 - 1                 */
-    const SKP_float search_thres2,      /* I final threshold for lag candidates 0 - 1                       */
+    const silk_float search_thres1,      /* I first stage threshold for lag candidates 0 - 1                 */
+    const silk_float search_thres2,      /* I final threshold for lag candidates 0 - 1                       */
     const opus_int   Fs_kHz,             /* I sample frequency (kHz)                                         */
     const opus_int   complexity,         /* I Complexity setting, 0-2, where 2 is highest                    */
     const opus_int   nb_subfr            /* I    number of 5 ms subframes                                    */
 )
 {
     opus_int   i, k, d, j;
-    SKP_float frame_8kHz[  PE_MAX_FRAME_LENGTH_MS * 8 ];
-    SKP_float frame_4kHz[  PE_MAX_FRAME_LENGTH_MS * 4 ];
+    silk_float frame_8kHz[  PE_MAX_FRAME_LENGTH_MS * 8 ];
+    silk_float frame_4kHz[  PE_MAX_FRAME_LENGTH_MS * 4 ];
     opus_int16 frame_8_FIX[ PE_MAX_FRAME_LENGTH_MS * 8 ];
     opus_int16 frame_4_FIX[ PE_MAX_FRAME_LENGTH_MS * 4 ];
     opus_int32 filt_state[ 6 ];
-    SKP_float threshold, contour_bias;
-    SKP_float C[ PE_MAX_NB_SUBFR][ (PE_MAX_LAG >> 1) + 5 ];
-    SKP_float CC[ PE_NB_CBKS_STAGE2_EXT ];
-    const SKP_float *target_ptr, *basis_ptr;
+    silk_float threshold, contour_bias;
+    silk_float C[ PE_MAX_NB_SUBFR][ (PE_MAX_LAG >> 1) + 5 ];
+    silk_float CC[ PE_NB_CBKS_STAGE2_EXT ];
+    const silk_float *target_ptr, *basis_ptr;
     double    cross_corr, normalizer, energy, energy_tmp;
     opus_int   d_srch[ PE_D_SRCH_LENGTH ];
     opus_int16 d_comp[ (PE_MAX_LAG >> 1) + 5 ];
     opus_int   length_d_srch, length_d_comp;
-    SKP_float Cmax, CCmax, CCmax_b, CCmax_new_b, CCmax_new;
+    silk_float Cmax, CCmax, CCmax_b, CCmax_new_b, CCmax_new;
     opus_int   CBimax, CBimax_new, lag, start_lag, end_lag, lag_new;
     opus_int   cbk_size;
-    SKP_float lag_log2, prevLag_log2, delta_lag_log2_sqr;
-    SKP_float energies_st3[ PE_MAX_NB_SUBFR ][ PE_NB_CBKS_STAGE3_MAX ][ PE_NB_STAGE3_LAGS ];
-    SKP_float cross_corr_st3[ PE_MAX_NB_SUBFR ][ PE_NB_CBKS_STAGE3_MAX ][ PE_NB_STAGE3_LAGS ];
+    silk_float lag_log2, prevLag_log2, delta_lag_log2_sqr;
+    silk_float energies_st3[ PE_MAX_NB_SUBFR ][ PE_NB_CBKS_STAGE3_MAX ][ PE_NB_STAGE3_LAGS ];
+    silk_float cross_corr_st3[ PE_MAX_NB_SUBFR ][ PE_NB_CBKS_STAGE3_MAX ][ PE_NB_STAGE3_LAGS ];
     opus_int   lag_counter;
     opus_int   frame_length, frame_length_8kHz, frame_length_4kHz;
     opus_int   sf_length, sf_length_8kHz, sf_length_4kHz;
@@ -114,14 +114,14 @@
     const opus_int8 *Lag_CB_ptr;
 
     /* Check for valid sampling frequency */
-    SKP_assert( Fs_kHz == 8 || Fs_kHz == 12 || Fs_kHz == 16 );
+    silk_assert( Fs_kHz == 8 || Fs_kHz == 12 || Fs_kHz == 16 );
 
     /* Check for valid complexity setting */
-    SKP_assert( complexity >= SigProc_PE_MIN_COMPLEX );
-    SKP_assert( complexity <= SigProc_PE_MAX_COMPLEX );
+    silk_assert( complexity >= SigProc_PE_MIN_COMPLEX );
+    silk_assert( complexity <= SigProc_PE_MAX_COMPLEX );
 
-    SKP_assert( search_thres1 >= 0.0f && search_thres1 <= 1.0f );
-    SKP_assert( search_thres2 >= 0.0f && search_thres2 <= 1.0f );
+    silk_assert( search_thres1 >= 0.0f && search_thres1 <= 1.0f );
+    silk_assert( search_thres2 >= 0.0f && search_thres2 <= 1.0f );
 
     /* Setup frame lengths max / min lag for the sampling frequency */
     frame_length      = ( PE_LTP_MEM_LENGTH_MS + nb_subfr * PE_SUBFR_LENGTH_MS ) * Fs_kHz;
@@ -137,32 +137,32 @@
     max_lag_4kHz      = PE_MAX_LAG_MS * 4;
     max_lag_8kHz      = PE_MAX_LAG_MS * 8 - 1;
 
-    SKP_memset(C, 0, sizeof(SKP_float) * nb_subfr * ((PE_MAX_LAG >> 1) + 5));
+    silk_memset(C, 0, sizeof(silk_float) * nb_subfr * ((PE_MAX_LAG >> 1) + 5));
 
     /* Resample from input sampled at Fs_kHz to 8 kHz */
     if( Fs_kHz == 16 ) {
         /* Resample to 16 -> 8 khz */
         opus_int16 frame_16_FIX[ 16 * PE_MAX_FRAME_LENGTH_MS ];
-        SKP_float2short_array( frame_16_FIX, frame, frame_length );
-        SKP_memset( filt_state, 0, 2 * sizeof( opus_int32 ) );
+        silk_float2short_array( frame_16_FIX, frame, frame_length );
+        silk_memset( filt_state, 0, 2 * sizeof( opus_int32 ) );
         silk_resampler_down2( filt_state, frame_8_FIX, frame_16_FIX, frame_length );
-        SKP_short2float_array( frame_8kHz, frame_8_FIX, frame_length_8kHz );
+        silk_short2float_array( frame_8kHz, frame_8_FIX, frame_length_8kHz );
     } else if( Fs_kHz == 12 ) {
         /* Resample to 12 -> 8 khz */
         opus_int16 frame_12_FIX[ 12 * PE_MAX_FRAME_LENGTH_MS ];
-        SKP_float2short_array( frame_12_FIX, frame, frame_length );
-        SKP_memset( filt_state, 0, 6 * sizeof( opus_int32 ) );
+        silk_float2short_array( frame_12_FIX, frame, frame_length );
+        silk_memset( filt_state, 0, 6 * sizeof( opus_int32 ) );
         silk_resampler_down2_3( filt_state, frame_8_FIX, frame_12_FIX, frame_length );
-        SKP_short2float_array( frame_8kHz, frame_8_FIX, frame_length_8kHz );
+        silk_short2float_array( frame_8kHz, frame_8_FIX, frame_length_8kHz );
     } else {
-        SKP_assert( Fs_kHz == 8 );
-        SKP_float2short_array( frame_8_FIX, frame, frame_length_8kHz );
+        silk_assert( Fs_kHz == 8 );
+        silk_float2short_array( frame_8_FIX, frame, frame_length_8kHz );
     }
 
     /* Decimate again to 4 kHz */
-    SKP_memset( filt_state, 0, 2 * sizeof( opus_int32 ) );
+    silk_memset( filt_state, 0, 2 * sizeof( opus_int32 ) );
     silk_resampler_down2( filt_state, frame_4_FIX, frame_8_FIX, frame_length_8kHz );
-    SKP_short2float_array( frame_4kHz, frame_4_FIX, frame_length_4kHz );
+    silk_short2float_array( frame_4kHz, frame_4_FIX, frame_length_4kHz );
 
     /* Low-pass filter */
     for( i = frame_length_4kHz - 1; i > 0; i-- ) {
@@ -172,31 +172,31 @@
     /******************************************************************************
     * FIRST STAGE, operating in 4 khz
     ******************************************************************************/
-    target_ptr = &frame_4kHz[ SKP_LSHIFT( sf_length_4kHz, 2 ) ];
+    target_ptr = &frame_4kHz[ silk_LSHIFT( sf_length_4kHz, 2 ) ];
     for( k = 0; k < nb_subfr >> 1; k++ ) {
         /* Check that we are within range of the array */
-        SKP_assert( target_ptr >= frame_4kHz );
-        SKP_assert( target_ptr + sf_length_8kHz <= frame_4kHz + frame_length_4kHz );
+        silk_assert( target_ptr >= frame_4kHz );
+        silk_assert( target_ptr + sf_length_8kHz <= frame_4kHz + frame_length_4kHz );
 
         basis_ptr = target_ptr - min_lag_4kHz;
 
         /* Check that we are within range of the array */
-        SKP_assert( basis_ptr >= frame_4kHz );
-        SKP_assert( basis_ptr + sf_length_8kHz <= frame_4kHz + frame_length_4kHz );
+        silk_assert( basis_ptr >= frame_4kHz );
+        silk_assert( basis_ptr + sf_length_8kHz <= frame_4kHz + frame_length_4kHz );
 
         /* Calculate first vector products before loop */
         cross_corr = silk_inner_product_FLP( target_ptr, basis_ptr, sf_length_8kHz );
         normalizer = silk_energy_FLP( basis_ptr, sf_length_8kHz ) + sf_length_8kHz * 4000.0f;
 
-        C[ 0 ][ min_lag_4kHz ] += (SKP_float)(cross_corr / sqrt(normalizer));
+        C[ 0 ][ min_lag_4kHz ] += (silk_float)(cross_corr / sqrt(normalizer));
 
         /* From now on normalizer is computed recursively */
         for(d = min_lag_4kHz + 1; d <= max_lag_4kHz; d++) {
             basis_ptr--;
 
             /* Check that we are within range of the array */
-            SKP_assert( basis_ptr >= frame_4kHz );
-            SKP_assert( basis_ptr + sf_length_8kHz <= frame_4kHz + frame_length_4kHz );
+            silk_assert( basis_ptr >= frame_4kHz );
+            silk_assert( basis_ptr + sf_length_8kHz <= frame_4kHz + frame_length_4kHz );
 
             cross_corr = silk_inner_product_FLP(target_ptr, basis_ptr, sf_length_8kHz);
 
@@ -204,7 +204,7 @@
             normalizer +=
                 basis_ptr[ 0 ] * basis_ptr[ 0 ] -
                 basis_ptr[ sf_length_8kHz ] * basis_ptr[ sf_length_8kHz ];
-            C[ 0 ][ d ] += (SKP_float)(cross_corr / sqrt( normalizer ));
+            C[ 0 ][ d ] += (silk_float)(cross_corr / sqrt( normalizer ));
         }
         /* Update target pointer */
         target_ptr += sf_length_8kHz;
@@ -217,19 +217,19 @@
 
     /* Sort */
     length_d_srch = 4 + 2 * complexity;
-    SKP_assert( 3 * length_d_srch <= PE_D_SRCH_LENGTH );
+    silk_assert( 3 * length_d_srch <= PE_D_SRCH_LENGTH );
     silk_insertion_sort_decreasing_FLP( &C[ 0 ][ min_lag_4kHz ], d_srch, max_lag_4kHz - min_lag_4kHz + 1, length_d_srch );
 
     /* Escape if correlation is very low already here */
     Cmax = C[ 0 ][ min_lag_4kHz ];
-    target_ptr = &frame_4kHz[ SKP_SMULBB( sf_length_4kHz, nb_subfr ) ];
+    target_ptr = &frame_4kHz[ silk_SMULBB( sf_length_4kHz, nb_subfr ) ];
     energy = 1000.0f;
-    for( i = 0; i < SKP_LSHIFT( sf_length_4kHz, 2 ); i++ ) {
+    for( i = 0; i < silk_LSHIFT( sf_length_4kHz, 2 ); i++ ) {
         energy += target_ptr[i] * target_ptr[i];
     }
     threshold = Cmax * Cmax;
     if( energy / 16.0f > threshold ) {
-        SKP_memset( pitch_out, 0, nb_subfr * sizeof( opus_int ) );
+        silk_memset( pitch_out, 0, nb_subfr * sizeof( opus_int ) );
         *LTPCorr      = 0.0f;
         *lagIndex     = 0;
         *contourIndex = 0;
@@ -240,13 +240,13 @@
     for( i = 0; i < length_d_srch; i++ ) {
         /* Convert to 8 kHz indices for the sorted correlation that exceeds the threshold */
         if( C[ 0 ][ min_lag_4kHz + i ] > threshold ) {
-            d_srch[ i ] = SKP_LSHIFT( d_srch[ i ] + min_lag_4kHz, 1 );
+            d_srch[ i ] = silk_LSHIFT( d_srch[ i ] + min_lag_4kHz, 1 );
         } else {
             length_d_srch = i;
             break;
         }
     }
-    SKP_assert( length_d_srch > 0 );
+    silk_assert( length_d_srch > 0 );
 
     for( i = min_lag_8kHz - 5; i < max_lag_8kHz + 5; i++ ) {
         d_comp[ i ] = 0;
@@ -287,7 +287,7 @@
     /*********************************************************************************
     * Find energy of each subframe projected onto its history, for a range of delays
     *********************************************************************************/
-    SKP_memset( C, 0, PE_MAX_NB_SUBFR*((PE_MAX_LAG >> 1) + 5) * sizeof(SKP_float)); /* Is this needed?*/
+    silk_memset( C, 0, PE_MAX_NB_SUBFR*((PE_MAX_LAG >> 1) + 5) * sizeof(silk_float)); /* Is this needed?*/
 
     if( Fs_kHz == 8 ) {
         target_ptr = &frame[ PE_LTP_MEM_LENGTH_MS * 8 ];
@@ -302,7 +302,7 @@
             cross_corr = silk_inner_product_FLP( basis_ptr, target_ptr, sf_length_8kHz );
             energy     = silk_energy_FLP( basis_ptr, sf_length_8kHz );
             if (cross_corr > 0.0f) {
-                C[ k ][ d ] = (SKP_float)(cross_corr * cross_corr / (energy * energy_tmp + eps));
+                C[ k ][ d ] = (silk_float)(cross_corr * cross_corr / (energy * energy_tmp + eps));
             } else {
                 C[ k ][ d ] = 0.0f;
             }
@@ -321,11 +321,11 @@
 
     if( prevLag > 0 ) {
         if( Fs_kHz == 12 ) {
-            prevLag = SKP_LSHIFT( prevLag, 1 ) / 3;
+            prevLag = silk_LSHIFT( prevLag, 1 ) / 3;
         } else if( Fs_kHz == 16 ) {
-            prevLag = SKP_RSHIFT( prevLag, 1 );
+            prevLag = silk_RSHIFT( prevLag, 1 );
         }
-        prevLag_log2 = silk_log2((SKP_float)prevLag);
+        prevLag_log2 = silk_log2((silk_float)prevLag);
     } else {
         prevLag_log2 = 0;
     }
@@ -364,11 +364,11 @@
                 CBimax_new = i;
             }
         }
-        CCmax_new = SKP_max_float(CCmax_new, 0.0f); /* To avoid taking square root of negative number later */
+        CCmax_new = silk_max_float(CCmax_new, 0.0f); /* To avoid taking square root of negative number later */
         CCmax_new_b = CCmax_new;
 
         /* Bias towards shorter lags */
-        lag_log2 = silk_log2((SKP_float)d);
+        lag_log2 = silk_log2((silk_float)d);
         CCmax_new_b -= PE_SHORTLAG_BIAS * nb_subfr * lag_log2;
 
         /* Bias towards previous lag */
@@ -391,7 +391,7 @@
 
     if( lag == -1 ) {
         /* No suitable candidate found */
-        SKP_memset( pitch_out, 0, PE_MAX_NB_SUBFR * sizeof(opus_int) );
+        silk_memset( pitch_out, 0, PE_MAX_NB_SUBFR * sizeof(opus_int) );
         *LTPCorr      = 0.0f;
         *lagIndex     = 0;
         *contourIndex = 0;
@@ -402,22 +402,22 @@
         /* Search in original signal */
 
         /* Compensate for decimation */
-        SKP_assert( lag == SKP_SAT16( lag ) );
+        silk_assert( lag == silk_SAT16( lag ) );
         if( Fs_kHz == 12 ) {
-            lag = SKP_RSHIFT_ROUND( SKP_SMULBB( lag, 3 ), 1 );
+            lag = silk_RSHIFT_ROUND( silk_SMULBB( lag, 3 ), 1 );
         } else if( Fs_kHz == 16 ) {
-            lag = SKP_LSHIFT( lag, 1 );
+            lag = silk_LSHIFT( lag, 1 );
         } else {
-            lag = SKP_SMULBB( lag, 3 );
+            lag = silk_SMULBB( lag, 3 );
         }
 
-        lag = SKP_LIMIT_int( lag, min_lag, max_lag );
-        start_lag = SKP_max_int( lag - 2, min_lag );
-        end_lag   = SKP_min_int( lag + 2, max_lag );
+        lag = silk_LIMIT_int( lag, min_lag, max_lag );
+        start_lag = silk_max_int( lag - 2, min_lag );
+        end_lag   = silk_min_int( lag + 2, max_lag );
         lag_new   = lag;                                    /* to avoid undefined lag */
         CBimax    = 0;                                      /* to avoid undefined lag */
-        SKP_assert( CCmax >= 0.0f );
-        *LTPCorr = (SKP_float)sqrt( CCmax / nb_subfr );     /* Output normalized correlation */
+        silk_assert( CCmax >= 0.0f );
+        *LTPCorr = (silk_float)sqrt( CCmax / nb_subfr );     /* Output normalized correlation */
 
         CCmax = -1000.0f;
 
@@ -426,7 +426,7 @@
         silk_P_Ana_calc_energy_st3( energies_st3, frame, start_lag, sf_length, nb_subfr, complexity );
 
         lag_counter = 0;
-        SKP_assert( lag == SKP_SAT16( lag ) );
+        silk_assert( lag == silk_SAT16( lag ) );
         contour_bias = PE_FLATCONTOUR_BIAS / lag;
 
         /* Setup cbk parameters acording to complexity setting and frame length */
@@ -449,7 +449,7 @@
                     cross_corr += cross_corr_st3[ k ][ j ][ lag_counter ];
                 }
                 if( cross_corr > 0.0 ) {
-                    CCmax_new = (SKP_float)(cross_corr * cross_corr / energy);
+                    CCmax_new = (silk_float)(cross_corr * cross_corr / energy);
                     /* Reduce depending on flatness of contour */
                     CCmax_new *= 1.0f - contour_bias * j;
                 } else {
@@ -474,22 +474,22 @@
         *contourIndex = (opus_int8)CBimax;
     } else {
         /* Save Lags and correlation */
-        SKP_assert( CCmax >= 0.0f );
-        *LTPCorr = (SKP_float)sqrt( CCmax / nb_subfr ); /* Output normalized correlation */
+        silk_assert( CCmax >= 0.0f );
+        *LTPCorr = (silk_float)sqrt( CCmax / nb_subfr ); /* Output normalized correlation */
         for( k = 0; k < nb_subfr; k++ ) {
             pitch_out[ k ] = lag + matrix_ptr( Lag_CB_ptr, k, CBimax, cbk_size );
         }
         *lagIndex = (opus_int16)( lag - min_lag );
         *contourIndex = (opus_int8)CBimax;
     }
-    SKP_assert( *lagIndex >= 0 );
+    silk_assert( *lagIndex >= 0 );
     /* return as voiced */
     return 0;
 }
 
 static void silk_P_Ana_calc_corr_st3(
-    SKP_float cross_corr_st3[ PE_MAX_NB_SUBFR ][ PE_NB_CBKS_STAGE3_MAX ][ PE_NB_STAGE3_LAGS ], /* O 3 DIM correlation array */
-    const SKP_float  frame[],            /* I vector to correlate                                            */
+    silk_float cross_corr_st3[ PE_MAX_NB_SUBFR ][ PE_NB_CBKS_STAGE3_MAX ][ PE_NB_STAGE3_LAGS ], /* O 3 DIM correlation array */
+    const silk_float  frame[],            /* I vector to correlate                                            */
     opus_int         start_lag,          /* I start lag                                                      */
     opus_int         sf_length,          /* I sub frame length                                               */
     opus_int         nb_subfr,           /* I number of subframes                                            */
@@ -509,14 +509,14 @@
      4*12*5 = 240 correlations, but more likely around 120.
      **********************************************************************/
 {
-    const SKP_float *target_ptr, *basis_ptr;
+    const silk_float *target_ptr, *basis_ptr;
     opus_int   i, j, k, lag_counter, lag_low, lag_high;
     opus_int   nb_cbk_search, delta, idx, cbk_size;
-    SKP_float scratch_mem[ SCRATCH_SIZE ];
+    silk_float scratch_mem[ SCRATCH_SIZE ];
     const opus_int8 *Lag_range_ptr, *Lag_CB_ptr;
 
-    SKP_assert( complexity >= SigProc_PE_MIN_COMPLEX );
-    SKP_assert( complexity <= SigProc_PE_MAX_COMPLEX );
+    silk_assert( complexity >= SigProc_PE_MIN_COMPLEX );
+    silk_assert( complexity <= SigProc_PE_MAX_COMPLEX );
 
     if( nb_subfr == PE_MAX_NB_SUBFR ){
         Lag_range_ptr = &silk_Lag_range_stage3[ complexity ][ 0 ][ 0 ];
@@ -524,14 +524,14 @@
         nb_cbk_search = silk_nb_cbk_searchs_stage3[ complexity ];
         cbk_size      = PE_NB_CBKS_STAGE3_MAX;
     } else {
-        SKP_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1);
+        silk_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1);
         Lag_range_ptr = &silk_Lag_range_stage3_10_ms[ 0 ][ 0 ];
         Lag_CB_ptr    = &silk_CB_lags_stage3_10_ms[ 0 ][ 0 ];
         nb_cbk_search = PE_NB_CBKS_STAGE3_10MS;
         cbk_size      = PE_NB_CBKS_STAGE3_10MS;
     }
 
-    target_ptr = &frame[ SKP_LSHIFT( sf_length, 2 ) ]; /* Pointer to middle of frame */
+    target_ptr = &frame[ silk_LSHIFT( sf_length, 2 ) ]; /* Pointer to middle of frame */
     for( k = 0; k < nb_subfr; k++ ) {
         lag_counter = 0;
 
@@ -540,8 +540,8 @@
         lag_high = matrix_ptr( Lag_range_ptr, k, 1, 2 );
         for( j = lag_low; j <= lag_high; j++ ) {
             basis_ptr = target_ptr - ( start_lag + j );
-            SKP_assert( lag_counter < SCRATCH_SIZE );
-            scratch_mem[ lag_counter ] = (SKP_float)silk_inner_product_FLP( target_ptr, basis_ptr, sf_length );
+            silk_assert( lag_counter < SCRATCH_SIZE );
+            scratch_mem[ lag_counter ] = (silk_float)silk_inner_product_FLP( target_ptr, basis_ptr, sf_length );
             lag_counter++;
         }
 
@@ -551,8 +551,8 @@
             /* each code_book vector for each start lag */
             idx = matrix_ptr( Lag_CB_ptr, k, i, cbk_size ) - delta;
             for( j = 0; j < PE_NB_STAGE3_LAGS; j++ ) {
-                SKP_assert( idx + j < SCRATCH_SIZE );
-                SKP_assert( idx + j < lag_counter );
+                silk_assert( idx + j < SCRATCH_SIZE );
+                silk_assert( idx + j < lag_counter );
                 cross_corr_st3[ k ][ i ][ j ] = scratch_mem[ idx + j ];
             }
         }
@@ -561,8 +561,8 @@
 }
 
 static void silk_P_Ana_calc_energy_st3(
-    SKP_float energies_st3[ PE_MAX_NB_SUBFR ][ PE_NB_CBKS_STAGE3_MAX ][ PE_NB_STAGE3_LAGS ], /* O 3 DIM correlation array */
-    const SKP_float  frame[],            /* I vector to correlate                                            */
+    silk_float energies_st3[ PE_MAX_NB_SUBFR ][ PE_NB_CBKS_STAGE3_MAX ][ PE_NB_STAGE3_LAGS ], /* O 3 DIM correlation array */
+    const silk_float  frame[],            /* I vector to correlate                                            */
     opus_int         start_lag,          /* I start lag                                                      */
     opus_int         sf_length,          /* I sub frame length                                               */
     opus_int         nb_subfr,           /* I number of subframes                                            */
@@ -573,15 +573,15 @@
 calculated recursively.
 ****************************************************************/
 {
-    const SKP_float *target_ptr, *basis_ptr;
+    const silk_float *target_ptr, *basis_ptr;
     double    energy;
     opus_int   k, i, j, lag_counter;
     opus_int   nb_cbk_search, delta, idx, cbk_size, lag_diff;
-    SKP_float scratch_mem[ SCRATCH_SIZE ];
+    silk_float scratch_mem[ SCRATCH_SIZE ];
     const opus_int8 *Lag_range_ptr, *Lag_CB_ptr;
 
-    SKP_assert( complexity >= SigProc_PE_MIN_COMPLEX );
-    SKP_assert( complexity <= SigProc_PE_MAX_COMPLEX );
+    silk_assert( complexity >= SigProc_PE_MIN_COMPLEX );
+    silk_assert( complexity <= SigProc_PE_MAX_COMPLEX );
 
     if( nb_subfr == PE_MAX_NB_SUBFR ){
         Lag_range_ptr = &silk_Lag_range_stage3[ complexity ][ 0 ][ 0 ];
@@ -589,35 +589,35 @@
         nb_cbk_search = silk_nb_cbk_searchs_stage3[ complexity ];
         cbk_size      = PE_NB_CBKS_STAGE3_MAX;
     } else {
-        SKP_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1);
+        silk_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1);
         Lag_range_ptr = &silk_Lag_range_stage3_10_ms[ 0 ][ 0 ];
         Lag_CB_ptr    = &silk_CB_lags_stage3_10_ms[ 0 ][ 0 ];
         nb_cbk_search = PE_NB_CBKS_STAGE3_10MS;
         cbk_size      = PE_NB_CBKS_STAGE3_10MS;
     }
 
-    target_ptr = &frame[ SKP_LSHIFT( sf_length, 2 ) ];
+    target_ptr = &frame[ silk_LSHIFT( sf_length, 2 ) ];
     for( k = 0; k < nb_subfr; k++ ) {
         lag_counter = 0;
 
         /* Calculate the energy for first lag */
         basis_ptr = target_ptr - ( start_lag + matrix_ptr( Lag_range_ptr, k, 0, 2 ) );
         energy = silk_energy_FLP( basis_ptr, sf_length ) + 1e-3;
-        SKP_assert( energy >= 0.0 );
-        scratch_mem[lag_counter] = (SKP_float)energy;
+        silk_assert( energy >= 0.0 );
+        scratch_mem[lag_counter] = (silk_float)energy;
         lag_counter++;
 
         lag_diff = ( matrix_ptr( Lag_range_ptr, k, 1, 2 ) -  matrix_ptr( Lag_range_ptr, k, 0, 2 ) + 1 );
         for( i = 1; i < lag_diff; i++ ) {
             /* remove part outside new window */
             energy -= basis_ptr[sf_length - i] * basis_ptr[sf_length - i];
-            SKP_assert( energy >= 0.0 );
+            silk_assert( energy >= 0.0 );
 
             /* add part that comes into window */
             energy += basis_ptr[ -i ] * basis_ptr[ -i ];
-            SKP_assert( energy >= 0.0 );
-            SKP_assert( lag_counter < SCRATCH_SIZE );
-            scratch_mem[lag_counter] = (SKP_float)energy;
+            silk_assert( energy >= 0.0 );
+            silk_assert( lag_counter < SCRATCH_SIZE );
+            scratch_mem[lag_counter] = (silk_float)energy;
             lag_counter++;
         }
 
@@ -627,10 +627,10 @@
             /* each code_book vector for each start lag                     */
             idx = matrix_ptr( Lag_CB_ptr, k, i, cbk_size ) - delta;
             for( j = 0; j < PE_NB_STAGE3_LAGS; j++ ) {
-                SKP_assert( idx + j < SCRATCH_SIZE );
-                SKP_assert( idx + j < lag_counter );
+                silk_assert( idx + j < SCRATCH_SIZE );
+                silk_assert( idx + j < lag_counter );
                 energies_st3[ k ][ i ][ j ] = scratch_mem[ idx + j ];
-                SKP_assert( energies_st3[ k ][ i ][ j ] >= 0.0f );
+                silk_assert( energies_st3[ k ][ i ][ j ] >= 0.0f );
             }
         }
         target_ptr += sf_length;
diff --git a/silk/float/silk_prefilter_FLP.c b/silk/float/silk_prefilter_FLP.c
index e52a494..6737559 100644
--- a/silk/float/silk_prefilter_FLP.c
+++ b/silk/float/silk_prefilter_FLP.c
@@ -37,31 +37,31 @@
 */
 static inline void silk_prefilt_FLP(
     silk_prefilter_state_FLP *P,/* I/O state */
-    SKP_float st_res[],             /* I */
-    SKP_float xw[],                 /* O */
-    SKP_float *HarmShapeFIR,        /* I */
-    SKP_float Tilt,                 /* I */
-    SKP_float LF_MA_shp,            /* I */
-    SKP_float LF_AR_shp,            /* I */
+    silk_float st_res[],             /* I */
+    silk_float xw[],                 /* O */
+    silk_float *HarmShapeFIR,        /* I */
+    silk_float Tilt,                 /* I */
+    silk_float LF_MA_shp,            /* I */
+    silk_float LF_AR_shp,            /* I */
     opus_int   lag,                  /* I */
     opus_int   length                /* I */
 );
 
 void silk_warped_LPC_analysis_filter_FLP(
-          SKP_float                 state[],            /* I/O  State [order + 1]                       */
-          SKP_float                 res[],              /* O    Residual signal [length]                */
-    const SKP_float                 coef[],             /* I    Coefficients [order]                    */
-    const SKP_float                 input[],            /* I    Input signal [length]                   */
-    const SKP_float                 lambda,             /* I    Warping factor                          */
+          silk_float                 state[],            /* I/O  State [order + 1]                       */
+          silk_float                 res[],              /* O    Residual signal [length]                */
+    const silk_float                 coef[],             /* I    Coefficients [order]                    */
+    const silk_float                 input[],            /* I    Input signal [length]                   */
+    const silk_float                 lambda,             /* I    Warping factor                          */
     const opus_int                   length,             /* I    Length of input signal                  */
     const opus_int                   order               /* I    Filter order (even)                     */
 )
 {
     opus_int     n, i;
-    SKP_float   acc, tmp1, tmp2;
+    silk_float   acc, tmp1, tmp2;
 
     /* Order must be even */
-    SKP_assert( ( order & 1 ) == 0 );
+    silk_assert( ( order & 1 ) == 0 );
 
     for( n = 0; n < length; n++ ) {
         /* Output of lowpass section */
@@ -94,19 +94,19 @@
 void silk_prefilter_FLP(
     silk_encoder_state_FLP          *psEnc,         /* I/O  Encoder state FLP                       */
     const silk_encoder_control_FLP  *psEncCtrl,     /* I    Encoder control FLP                     */
-          SKP_float                     xw[],           /* O    Weighted signal                         */
-    const SKP_float                     x[]             /* I    Speech signal                           */
+          silk_float                     xw[],           /* O    Weighted signal                         */
+    const silk_float                     x[]             /* I    Speech signal                           */
 )
 {
     silk_prefilter_state_FLP *P = &psEnc->sPrefilt;
     opus_int   j, k, lag;
-    SKP_float HarmShapeGain, Tilt, LF_MA_shp, LF_AR_shp;
-    SKP_float B[ 2 ];
-    const SKP_float *AR1_shp;
-    const SKP_float *px;
-    SKP_float *pxw;
-    SKP_float HarmShapeFIR[ 3 ];
-    SKP_float st_res[ MAX_SUB_FRAME_LENGTH + MAX_LPC_ORDER ];
+    silk_float HarmShapeGain, Tilt, LF_MA_shp, LF_AR_shp;
+    silk_float B[ 2 ];
+    const silk_float *AR1_shp;
+    const silk_float *px;
+    silk_float *pxw;
+    silk_float HarmShapeFIR[ 3 ];
+    silk_float st_res[ MAX_SUB_FRAME_LENGTH + MAX_LPC_ORDER ];
 
     /* Setup pointers */
     px  = x;
@@ -130,7 +130,7 @@
 
         /* Short term FIR filtering */
         silk_warped_LPC_analysis_filter_FLP( P->sAR_shp, st_res, AR1_shp, px,
-            (SKP_float)psEnc->sCmn.warping_Q16 / 65536.0f, psEnc->sCmn.subfr_length, psEnc->sCmn.shapingLPCOrder );
+            (silk_float)psEnc->sCmn.warping_Q16 / 65536.0f, psEnc->sCmn.subfr_length, psEnc->sCmn.shapingLPCOrder );
 
         /* Reduce (mainly) low frequencies during harmonic emphasis */
         B[ 0 ] =  psEncCtrl->GainsPre[ k ];
@@ -155,21 +155,21 @@
 */
 static inline void silk_prefilt_FLP(
     silk_prefilter_state_FLP *P,/* I/O state */
-    SKP_float st_res[],                /* I */
-    SKP_float xw[],                    /* O */
-    SKP_float *HarmShapeFIR,        /* I */
-    SKP_float Tilt,                    /* I */
-    SKP_float LF_MA_shp,            /* I */
-    SKP_float LF_AR_shp,            /* I */
+    silk_float st_res[],                /* I */
+    silk_float xw[],                    /* O */
+    silk_float *HarmShapeFIR,        /* I */
+    silk_float Tilt,                    /* I */
+    silk_float LF_MA_shp,            /* I */
+    silk_float LF_AR_shp,            /* I */
     opus_int   lag,                    /* I */
     opus_int   length                /* I */
 )
 {
     opus_int   i;
     opus_int   idx, LTP_shp_buf_idx;
-    SKP_float n_Tilt, n_LF, n_LTP;
-    SKP_float sLF_AR_shp, sLF_MA_shp;
-    SKP_float *LTP_shp_buf;
+    silk_float n_Tilt, n_LF, n_LTP;
+    silk_float sLF_AR_shp, sLF_MA_shp;
+    silk_float *LTP_shp_buf;
 
     /* To speed up use temp variables instead of using the struct */
     LTP_shp_buf     = P->sLTP_shp;
@@ -179,7 +179,7 @@
 
     for( i = 0; i < length; i++ ) {
         if( lag > 0 ) {
-            SKP_assert( HARM_SHAPE_FIR_TAPS == 3 );
+            silk_assert( HARM_SHAPE_FIR_TAPS == 3 );
             idx = lag + LTP_shp_buf_idx;
             n_LTP  = LTP_shp_buf[ ( idx - HARM_SHAPE_FIR_TAPS / 2 - 1) & LTP_MASK ] * HarmShapeFIR[ 0 ];
             n_LTP += LTP_shp_buf[ ( idx - HARM_SHAPE_FIR_TAPS / 2    ) & LTP_MASK ] * HarmShapeFIR[ 1 ];
diff --git a/silk/float/silk_process_gains_FLP.c b/silk/float/silk_process_gains_FLP.c
index 2dceb02..33dada2 100644
--- a/silk/float/silk_process_gains_FLP.c
+++ b/silk/float/silk_process_gains_FLP.c
@@ -41,24 +41,24 @@
     silk_shape_state_FLP *psShapeSt = &psEnc->sShape;
     opus_int     k;
     opus_int32   pGains_Q16[ MAX_NB_SUBFR ];
-    SKP_float   s, InvMaxSqrVal, gain, quant_offset;
+    silk_float   s, InvMaxSqrVal, gain, quant_offset;
 
     /* Gain reduction when LTP coding gain is high */
     if( psEnc->sCmn.indices.signalType == TYPE_VOICED ) {
-        s = 1.0f - 0.5f * SKP_sigmoid( 0.25f * ( psEncCtrl->LTPredCodGain - 12.0f ) );
+        s = 1.0f - 0.5f * silk_sigmoid( 0.25f * ( psEncCtrl->LTPredCodGain - 12.0f ) );
         for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) {
             psEncCtrl->Gains[ k ] *= s;
         }
     }
 
     /* Limit the quantized signal */
-    InvMaxSqrVal = ( SKP_float )( pow( 2.0f, 0.33f * ( 21.0f - psEnc->sCmn.SNR_dB_Q7 * ( 1 / 128.0f ) ) ) / psEnc->sCmn.subfr_length );
+    InvMaxSqrVal = ( silk_float )( pow( 2.0f, 0.33f * ( 21.0f - psEnc->sCmn.SNR_dB_Q7 * ( 1 / 128.0f ) ) ) / psEnc->sCmn.subfr_length );
 
     for( k = 0; k < psEnc->sCmn.nb_subfr; k++ ) {
         /* Soft limit on ratio residual energy and squared gains */
         gain = psEncCtrl->Gains[ k ];
-        gain = ( SKP_float )sqrt( gain * gain + psEncCtrl->ResNrg[ k ] * InvMaxSqrVal );
-        psEncCtrl->Gains[ k ] = SKP_min_float( gain, 32767.0f );
+        gain = ( silk_float )sqrt( gain * gain + psEncCtrl->ResNrg[ k ] * InvMaxSqrVal );
+        psEncCtrl->Gains[ k ] = silk_min_float( gain, 32767.0f );
     }
 
     /* Prepare gains for noise shaping quantization */
@@ -93,6 +93,6 @@
                       + LAMBDA_CODING_QUALITY    * psEncCtrl->coding_quality
                       + LAMBDA_QUANT_OFFSET      * quant_offset;
 
-    SKP_assert( psEncCtrl->Lambda > 0.0f );
-    SKP_assert( psEncCtrl->Lambda < 2.0f );
+    silk_assert( psEncCtrl->Lambda > 0.0f );
+    silk_assert( psEncCtrl->Lambda < 2.0f );
 }
diff --git a/silk/float/silk_regularize_correlations_FLP.c b/silk/float/silk_regularize_correlations_FLP.c
index 9741ca9..7034dd3 100644
--- a/silk/float/silk_regularize_correlations_FLP.c
+++ b/silk/float/silk_regularize_correlations_FLP.c
@@ -32,9 +32,9 @@
 #include "silk_main_FLP.h"
 
 void silk_regularize_correlations_FLP(
-          SKP_float                 *XX,                /* I/O  Correlation matrices                    */
-          SKP_float                 *xx,                /* I/O  Correlation values                      */
-    const SKP_float                 noise,              /* I    Noise energy to add                     */
+          silk_float                 *XX,                /* I/O  Correlation matrices                    */
+          silk_float                 *xx,                /* I/O  Correlation values                      */
+    const silk_float                 noise,              /* I    Noise energy to add                     */
     const opus_int                   D                   /* I    Dimension of XX                         */
 )
 {
diff --git a/silk/float/silk_residual_energy_FLP.c b/silk/float/silk_residual_energy_FLP.c
index fadd7cc..6ab9ecf 100644
--- a/silk/float/silk_residual_energy_FLP.c
+++ b/silk/float/silk_residual_energy_FLP.c
@@ -35,19 +35,19 @@
 #define REGULARIZATION_FACTOR               1e-8f
 
 /* Residual energy: nrg = wxx - 2 * wXx * c + c' * wXX * c */
-SKP_float silk_residual_energy_covar_FLP(           /* O    Weighted residual energy                */
-    const SKP_float                 *c,                 /* I    Filter coefficients                     */
-          SKP_float                 *wXX,               /* I/O  Weighted correlation matrix, reg. out   */
-    const SKP_float                 *wXx,               /* I    Weighted correlation vector             */
-    const SKP_float                 wxx,                /* I    Weighted correlation value              */
+silk_float silk_residual_energy_covar_FLP(           /* O    Weighted residual energy                */
+    const silk_float                 *c,                 /* I    Filter coefficients                     */
+          silk_float                 *wXX,               /* I/O  Weighted correlation matrix, reg. out   */
+    const silk_float                 *wXx,               /* I    Weighted correlation vector             */
+    const silk_float                 wxx,                /* I    Weighted correlation value              */
     const opus_int                   D                   /* I    Dimension                               */
 )
 {
     opus_int   i, j, k;
-    SKP_float tmp, nrg = 0.0f, regularization;
+    silk_float tmp, nrg = 0.0f, regularization;
 
     /* Safety checks */
-    SKP_assert( D >= 0 );
+    silk_assert( D >= 0 );
 
     regularization = REGULARIZATION_FACTOR * ( wXX[ 0 ] + wXX[ D * D - 1 ] );
     for( k = 0; k < MAX_ITERATIONS_RESIDUAL_NRG; k++ ) {
@@ -79,7 +79,7 @@
         }
     }
     if( k == MAX_ITERATIONS_RESIDUAL_NRG ) {
-        SKP_assert( nrg == 0 );
+        silk_assert( nrg == 0 );
         nrg = 1.0f;
     }
 
@@ -89,29 +89,29 @@
 /* Calculates residual energies of input subframes where all subframes have LPC_order   */
 /* of preceeding samples                                                                */
 void silk_residual_energy_FLP(
-          SKP_float nrgs[ MAX_NB_SUBFR ],       /* O    Residual energy per subframe    */
-    const SKP_float x[],                        /* I    Input signal                    */
-          SKP_float a[ 2 ][ MAX_LPC_ORDER ],    /* I    AR coefs for each frame half    */
-    const SKP_float gains[],                    /* I    Quantization gains              */
+          silk_float nrgs[ MAX_NB_SUBFR ],       /* O    Residual energy per subframe    */
+    const silk_float x[],                        /* I    Input signal                    */
+          silk_float a[ 2 ][ MAX_LPC_ORDER ],    /* I    AR coefs for each frame half    */
+    const silk_float gains[],                    /* I    Quantization gains              */
     const opus_int   subfr_length,               /* I    Subframe length                 */
     const opus_int   nb_subfr,                   /* I    number of subframes             */
     const opus_int   LPC_order                   /* I    LPC order                       */
 )
 {
     opus_int     shift;
-    SKP_float   *LPC_res_ptr, LPC_res[ ( MAX_FRAME_LENGTH + MAX_NB_SUBFR * MAX_LPC_ORDER ) / 2 ];
+    silk_float   *LPC_res_ptr, LPC_res[ ( MAX_FRAME_LENGTH + MAX_NB_SUBFR * MAX_LPC_ORDER ) / 2 ];
 
     LPC_res_ptr = LPC_res + LPC_order;
     shift = LPC_order + subfr_length;
 
     /* Filter input to create the LPC residual for each frame half, and measure subframe energies */
     silk_LPC_analysis_filter_FLP( LPC_res, a[ 0 ], x + 0 * shift, 2 * shift, LPC_order );
-    nrgs[ 0 ] = ( SKP_float )( gains[ 0 ] * gains[ 0 ] * silk_energy_FLP( LPC_res_ptr + 0 * shift, subfr_length ) );
-    nrgs[ 1 ] = ( SKP_float )( gains[ 1 ] * gains[ 1 ] * silk_energy_FLP( LPC_res_ptr + 1 * shift, subfr_length ) );
+    nrgs[ 0 ] = ( silk_float )( gains[ 0 ] * gains[ 0 ] * silk_energy_FLP( LPC_res_ptr + 0 * shift, subfr_length ) );
+    nrgs[ 1 ] = ( silk_float )( gains[ 1 ] * gains[ 1 ] * silk_energy_FLP( LPC_res_ptr + 1 * shift, subfr_length ) );
 
     if( nb_subfr == MAX_NB_SUBFR ) {
         silk_LPC_analysis_filter_FLP( LPC_res, a[ 1 ], x + 2 * shift, 2 * shift, LPC_order );
-        nrgs[ 2 ] = ( SKP_float )( gains[ 2 ] * gains[ 2 ] * silk_energy_FLP( LPC_res_ptr + 0 * shift, subfr_length ) );
-        nrgs[ 3 ] = ( SKP_float )( gains[ 3 ] * gains[ 3 ] * silk_energy_FLP( LPC_res_ptr + 1 * shift, subfr_length ) );
+        nrgs[ 2 ] = ( silk_float )( gains[ 2 ] * gains[ 2 ] * silk_energy_FLP( LPC_res_ptr + 0 * shift, subfr_length ) );
+        nrgs[ 3 ] = ( silk_float )( gains[ 3 ] * gains[ 3 ] * silk_energy_FLP( LPC_res_ptr + 1 * shift, subfr_length ) );
     }
 }
diff --git a/silk/float/silk_scale_copy_vector_FLP.c b/silk/float/silk_scale_copy_vector_FLP.c
index 396203c..ca6867e 100644
--- a/silk/float/silk_scale_copy_vector_FLP.c
+++ b/silk/float/silk_scale_copy_vector_FLP.c
@@ -33,9 +33,9 @@
 
 /* copy and multiply a vector by a constant */
 void silk_scale_copy_vector_FLP(
-    SKP_float           *data_out,
-    const SKP_float     *data_in,
-    SKP_float           gain,
+    silk_float           *data_out,
+    const silk_float     *data_in,
+    silk_float           gain,
     opus_int             dataSize
 )
 {
diff --git a/silk/float/silk_scale_vector_FLP.c b/silk/float/silk_scale_vector_FLP.c
index b8fa92c..33982eb 100644
--- a/silk/float/silk_scale_vector_FLP.c
+++ b/silk/float/silk_scale_vector_FLP.c
@@ -33,8 +33,8 @@
 
 /* multiply a vector by a constant */
 void silk_scale_vector_FLP(
-    SKP_float           *data1,
-    SKP_float           gain,
+    silk_float           *data1,
+    silk_float           gain,
     opus_int             dataSize
 )
 {
diff --git a/silk/float/silk_schur_FLP.c b/silk/float/silk_schur_FLP.c
index f57ebda..d13d5cc 100644
--- a/silk/float/silk_schur_FLP.c
+++ b/silk/float/silk_schur_FLP.c
@@ -31,15 +31,15 @@
 
 #include "silk_SigProc_FLP.h"
 
-SKP_float silk_schur_FLP(           /* O    returns residual energy                     */
-    SKP_float       refl_coef[],        /* O    reflection coefficients (length order)      */
-    const SKP_float auto_corr[],        /* I    autotcorrelation sequence (length order+1)  */
+silk_float silk_schur_FLP(           /* O    returns residual energy                     */
+    silk_float       refl_coef[],        /* O    reflection coefficients (length order)      */
+    const silk_float auto_corr[],        /* I    autotcorrelation sequence (length order+1)  */
     opus_int         order               /* I    order                                       */
 )
 {
     opus_int   k, n;
-    SKP_float C[ SILK_MAX_ORDER_LPC + 1 ][ 2 ];
-    SKP_float Ctmp1, Ctmp2, rc_tmp;
+    silk_float C[ SILK_MAX_ORDER_LPC + 1 ][ 2 ];
+    silk_float Ctmp1, Ctmp2, rc_tmp;
 
     /* Copy correlations */
     for( k = 0; k < order+1; k++ ) {
@@ -48,7 +48,7 @@
 
     for( k = 0; k < order; k++ ) {
         /* Get reflection coefficient */
-        rc_tmp = -C[ k + 1 ][ 0 ] / SKP_max_float( C[ 0 ][ 1 ], 1e-9f );
+        rc_tmp = -C[ k + 1 ][ 0 ] / silk_max_float( C[ 0 ][ 1 ], 1e-9f );
 
         /* Save the output */
         refl_coef[ k ] = rc_tmp;
diff --git a/silk/float/silk_solve_LS_FLP.c b/silk/float/silk_solve_LS_FLP.c
index a082b33..fc38c7f 100644
--- a/silk/float/silk_solve_LS_FLP.c
+++ b/silk/float/silk_solve_LS_FLP.c
@@ -38,10 +38,10 @@
  * the symmetric matric A is given by A = L*D*L'.
  **********************************************************************/
 void silk_LDL_FLP(
-    SKP_float           *A,      /* (I/O) Pointer to Symetric Square Matrix */
+    silk_float           *A,      /* (I/O) Pointer to Symetric Square Matrix */
     opus_int             M,       /* (I) Size of Matrix */
-    SKP_float           *L,      /* (I/O) Pointer to Square Upper triangular Matrix */
-    SKP_float           *Dinv    /* (I/O) Pointer to vector holding the inverse diagonal elements of D */
+    silk_float           *L,      /* (I/O) Pointer to Square Upper triangular Matrix */
+    silk_float           *Dinv    /* (I/O) Pointer to vector holding the inverse diagonal elements of D */
 );
 
 /**********************************************************************
@@ -49,10 +49,10 @@
  * triangular matrix, with ones on the diagonal.
  **********************************************************************/
 void silk_SolveWithLowerTriangularWdiagOnes_FLP(
-    const SKP_float     *L,     /* (I) Pointer to Lower Triangular Matrix */
+    const silk_float     *L,     /* (I) Pointer to Lower Triangular Matrix */
     opus_int             M,      /* (I) Dim of Matrix equation */
-    const SKP_float     *b,     /* (I) b Vector */
-    SKP_float           *x      /* (O) x Vector */
+    const silk_float     *b,     /* (I) b Vector */
+    silk_float           *x      /* (O) x Vector */
 );
 
 /**********************************************************************
@@ -60,10 +60,10 @@
  * triangular, with ones on the diagonal. (ie then A^T is upper triangular)
  **********************************************************************/
 void silk_SolveWithUpperTriangularFromLowerWdiagOnes_FLP(
-    const SKP_float     *L,     /* (I) Pointer to Lower Triangular Matrix */
+    const silk_float     *L,     /* (I) Pointer to Lower Triangular Matrix */
     opus_int             M,      /* (I) Dim of Matrix equation */
-    const SKP_float     *b,     /* (I) b Vector */
-    SKP_float           *x      /* (O) x Vector */
+    const silk_float     *b,     /* (I) b Vector */
+    silk_float           *x      /* (O) x Vector */
 );
 
 /**********************************************************************
@@ -71,18 +71,18 @@
  * symmetric square matrix - using LDL factorisation
  **********************************************************************/
 void silk_solve_LDL_FLP(
-          SKP_float                 *A,                 /* I/O  Symmetric square matrix, out: reg.      */
+          silk_float                 *A,                 /* I/O  Symmetric square matrix, out: reg.      */
     const opus_int                   M,                  /* I    Size of matrix                          */
-    const SKP_float                 *b,                 /* I    Pointer to b vector                     */
-          SKP_float                 *x                  /* O    Pointer to x solution vector            */
+    const silk_float                 *b,                 /* I    Pointer to b vector                     */
+          silk_float                 *x                  /* O    Pointer to x solution vector            */
 )
 {
     opus_int   i;
-    SKP_float L[    MAX_MATRIX_SIZE ][ MAX_MATRIX_SIZE ];
-    SKP_float T[    MAX_MATRIX_SIZE ];
-    SKP_float Dinv[ MAX_MATRIX_SIZE ]; /* inverse diagonal elements of D*/
+    silk_float L[    MAX_MATRIX_SIZE ][ MAX_MATRIX_SIZE ];
+    silk_float T[    MAX_MATRIX_SIZE ];
+    silk_float Dinv[ MAX_MATRIX_SIZE ]; /* inverse diagonal elements of D*/
 
-    SKP_assert( M <= MAX_MATRIX_SIZE );
+    silk_assert( M <= MAX_MATRIX_SIZE );
 
     /***************************************************
     Factorize A by LDL such that A = L*D*(L^T),
@@ -110,15 +110,15 @@
 }
 
 void silk_SolveWithUpperTriangularFromLowerWdiagOnes_FLP(
-    const SKP_float     *L,     /* (I) Pointer to Lower Triangular Matrix */
+    const silk_float     *L,     /* (I) Pointer to Lower Triangular Matrix */
     opus_int             M,      /* (I) Dim of Matrix equation */
-    const SKP_float     *b,     /* (I) b Vector */
-    SKP_float           *x      /* (O) x Vector */
+    const silk_float     *b,     /* (I) b Vector */
+    silk_float           *x      /* (O) x Vector */
 )
 {
     opus_int   i, j;
-    SKP_float temp;
-    const SKP_float *ptr1;
+    silk_float temp;
+    const silk_float *ptr1;
 
     for( i = M - 1; i >= 0; i-- ) {
         ptr1 =  matrix_adr( L, 0, i, M );
@@ -132,15 +132,15 @@
 }
 
 void silk_SolveWithLowerTriangularWdiagOnes_FLP(
-    const SKP_float     *L,     /* (I) Pointer to Lower Triangular Matrix */
+    const silk_float     *L,     /* (I) Pointer to Lower Triangular Matrix */
     opus_int             M,      /* (I) Dim of Matrix equation */
-    const SKP_float     *b,     /* (I) b Vector */
-    SKP_float           *x      /* (O) x Vector */
+    const silk_float     *b,     /* (I) b Vector */
+    silk_float           *x      /* (O) x Vector */
 )
 {
     opus_int   i, j;
-    SKP_float temp;
-    const SKP_float *ptr1;
+    silk_float temp;
+    const silk_float *ptr1;
 
     for( i = 0; i < M; i++ ) {
         ptr1 =  matrix_adr( L, i, 0, M );
@@ -154,18 +154,18 @@
 }
 
 void silk_LDL_FLP(
-    SKP_float           *A,      /* (I/O) Pointer to Symetric Square Matrix */
+    silk_float           *A,      /* (I/O) Pointer to Symetric Square Matrix */
     opus_int             M,       /* (I) Size of Matrix */
-    SKP_float           *L,      /* (I/O) Pointer to Square Upper triangular Matrix */
-    SKP_float           *Dinv    /* (I/O) Pointer to vector holding the inverse diagonal elements of D */
+    silk_float           *L,      /* (I/O) Pointer to Square Upper triangular Matrix */
+    silk_float           *Dinv    /* (I/O) Pointer to vector holding the inverse diagonal elements of D */
 )
 {
     opus_int i, j, k, loop_count, err = 1;
-    SKP_float *ptr1, *ptr2;
+    silk_float *ptr1, *ptr2;
     double temp, diag_min_value;
-    SKP_float v[ MAX_MATRIX_SIZE ], D[ MAX_MATRIX_SIZE ]; /* temp arrays*/
+    silk_float v[ MAX_MATRIX_SIZE ], D[ MAX_MATRIX_SIZE ]; /* temp arrays*/
 
-    SKP_assert( M <= MAX_MATRIX_SIZE );
+    silk_assert( M <= MAX_MATRIX_SIZE );
 
     diag_min_value = FIND_LTP_COND_FAC * 0.5f * ( A[ 0 ] + A[ M * M - 1 ] );
     for( loop_count = 0; loop_count < M && err == 1; loop_count++ ) {
@@ -181,13 +181,13 @@
                 /* Badly conditioned matrix: add white noise and run again */
                 temp = ( loop_count + 1 ) * diag_min_value - temp;
                 for( i = 0; i < M; i++ ) {
-                    matrix_ptr( A, i, i, M ) += ( SKP_float )temp;
+                    matrix_ptr( A, i, i, M ) += ( silk_float )temp;
                 }
                 err = 1;
                 break;
             }
-            D[ j ]    = ( SKP_float )temp;
-            Dinv[ j ] = ( SKP_float )( 1.0f / temp );
+            D[ j ]    = ( silk_float )temp;
+            Dinv[ j ] = ( silk_float )( 1.0f / temp );
             matrix_ptr( L, j, j, M ) = 1.0f;
 
             ptr1 = matrix_adr( A, j, 0, M );
@@ -197,11 +197,11 @@
                 for( k = 0; k < j; k++ ) {
                     temp += ptr2[ k ] * v[ k ];
                 }
-                matrix_ptr( L, i, j, M ) = ( SKP_float )( ( ptr1[ i ] - temp ) * Dinv[ j ] );
+                matrix_ptr( L, i, j, M ) = ( silk_float )( ( ptr1[ i ] - temp ) * Dinv[ j ] );
                 ptr2 += M; /* go to next column*/
             }
         }
     }
-    SKP_assert( err == 0 );
+    silk_assert( err == 0 );
 }
 
diff --git a/silk/float/silk_sort_FLP.c b/silk/float/silk_sort_FLP.c
index 62249b5..c08fb32 100644
--- a/silk/float/silk_sort_FLP.c
+++ b/silk/float/silk_sort_FLP.c
@@ -37,19 +37,19 @@
 #include "silk_SigProc_FLP.h"
 
 void silk_insertion_sort_decreasing_FLP(
-    SKP_float            *a,          /* I/O:  Unsorted / Sorted vector                */
+    silk_float            *a,          /* I/O:  Unsorted / Sorted vector                */
     opus_int              *idx,      /* O:    Index vector for the sorted elements    */
     const opus_int        L,           /* I:    Vector length                           */
     const opus_int        K            /* I:    Number of correctly sorted positions    */
 )
 {
-    SKP_float value;
+    silk_float value;
     opus_int   i, j;
 
     /* Safety checks */
-    SKP_assert( K >  0 );
-    SKP_assert( L >  0 );
-    SKP_assert( L >= K );
+    silk_assert( K >  0 );
+    silk_assert( L >  0 );
+    silk_assert( L >= K );
 
     /* Write start indices in index vector */
     for( i = 0; i < K; i++ ) {
diff --git a/silk/float/silk_structs_FLP.h b/silk/float/silk_structs_FLP.h
index e9c5b1a..68d358a 100644
--- a/silk/float/silk_structs_FLP.h
+++ b/silk/float/silk_structs_FLP.h
@@ -42,21 +42,21 @@
 /********************************/
 typedef struct {
     opus_int8    LastGainIndex;
-    SKP_float   HarmBoost_smth;
-    SKP_float   HarmShapeGain_smth;
-    SKP_float   Tilt_smth;
+    silk_float   HarmBoost_smth;
+    silk_float   HarmShapeGain_smth;
+    silk_float   Tilt_smth;
 } silk_shape_state_FLP;
 
 /********************************/
 /* Prefilter state              */
 /********************************/
 typedef struct {
-    SKP_float   sLTP_shp[ LTP_BUF_LENGTH ];
-    SKP_float   sAR_shp[ MAX_SHAPE_LPC_ORDER + 1 ];
+    silk_float   sLTP_shp[ LTP_BUF_LENGTH ];
+    silk_float   sAR_shp[ MAX_SHAPE_LPC_ORDER + 1 ];
     opus_int     sLTP_shp_buf_idx;
-    SKP_float   sLF_AR_shp;
-    SKP_float   sLF_MA_shp;
-    SKP_float   sHarmHP;
+    silk_float   sLF_AR_shp;
+    silk_float   sLF_MA_shp;
+    silk_float   sHarmHP;
     opus_int32   rand_seed;
     opus_int     lagPrev;
 } silk_prefilter_state_FLP;
@@ -70,12 +70,12 @@
     silk_prefilter_state_FLP    sPrefilt;                   /* Prefilter State */
 
     /* Buffer for find pitch and noise shape analysis */
-    SKP_float                   x_buf[ 2 * MAX_FRAME_LENGTH + LA_SHAPE_MAX ];/* Buffer for find pitch and noise shape analysis */
-    SKP_float                   LTPCorr;                    /* Normalized correlation from pitch lag estimator */
+    silk_float                   x_buf[ 2 * MAX_FRAME_LENGTH + LA_SHAPE_MAX ];/* Buffer for find pitch and noise shape analysis */
+    silk_float                   LTPCorr;                    /* Normalized correlation from pitch lag estimator */
 
     /* Parameters for LTP scaling control */
-    SKP_float                   prevLTPredCodGain;
-    SKP_float                   HPLTPredCodGain;
+    silk_float                   prevLTPredCodGain;
+    silk_float                   HPLTPredCodGain;
 } silk_encoder_state_FLP;
 
 /************************/
@@ -83,30 +83,30 @@
 /************************/
 typedef struct {
     /* Prediction and coding parameters */
-    SKP_float                    Gains[ MAX_NB_SUBFR ];
-    SKP_float                    PredCoef[ 2 ][ MAX_LPC_ORDER ];        /* holds interpolated and final coefficients */
-    SKP_float                    LTPCoef[LTP_ORDER * MAX_NB_SUBFR];
-    SKP_float                    LTP_scale;
+    silk_float                    Gains[ MAX_NB_SUBFR ];
+    silk_float                    PredCoef[ 2 ][ MAX_LPC_ORDER ];        /* holds interpolated and final coefficients */
+    silk_float                    LTPCoef[LTP_ORDER * MAX_NB_SUBFR];
+    silk_float                    LTP_scale;
     opus_int                     pitchL[ MAX_NB_SUBFR ];
 
     /* Noise shaping parameters */
-    SKP_float                    AR1[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ];
-    SKP_float                    AR2[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ];
-    SKP_float                    LF_MA_shp[     MAX_NB_SUBFR ];
-    SKP_float                    LF_AR_shp[     MAX_NB_SUBFR ];
-    SKP_float                    GainsPre[      MAX_NB_SUBFR ];
-    SKP_float                    HarmBoost[     MAX_NB_SUBFR ];
-    SKP_float                    Tilt[          MAX_NB_SUBFR ];
-    SKP_float                    HarmShapeGain[ MAX_NB_SUBFR ];
-    SKP_float                    Lambda;
-    SKP_float                    input_quality;
-    SKP_float                    coding_quality;
+    silk_float                    AR1[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ];
+    silk_float                    AR2[ MAX_NB_SUBFR * MAX_SHAPE_LPC_ORDER ];
+    silk_float                    LF_MA_shp[     MAX_NB_SUBFR ];
+    silk_float                    LF_AR_shp[     MAX_NB_SUBFR ];
+    silk_float                    GainsPre[      MAX_NB_SUBFR ];
+    silk_float                    HarmBoost[     MAX_NB_SUBFR ];
+    silk_float                    Tilt[          MAX_NB_SUBFR ];
+    silk_float                    HarmShapeGain[ MAX_NB_SUBFR ];
+    silk_float                    Lambda;
+    silk_float                    input_quality;
+    silk_float                    coding_quality;
 
     /* Measures */
-    SKP_float                    sparseness;
-    SKP_float                   predGain;
-    SKP_float                    LTPredCodGain;
-    SKP_float                    ResNrg[ MAX_NB_SUBFR ];                    /* Residual energy per subframe */
+    silk_float                    sparseness;
+    silk_float                   predGain;
+    silk_float                    LTPredCodGain;
+    silk_float                    ResNrg[ MAX_NB_SUBFR ];                    /* Residual energy per subframe */
 } silk_encoder_control_FLP;
 
 /************************/
diff --git a/silk/float/silk_warped_autocorrelation_FLP.c b/silk/float/silk_warped_autocorrelation_FLP.c
index e997f62..de08d49 100644
--- a/silk/float/silk_warped_autocorrelation_FLP.c
+++ b/silk/float/silk_warped_autocorrelation_FLP.c
@@ -33,9 +33,9 @@
 
 /* Autocorrelations for a warped frequency axis */
 void silk_warped_autocorrelation_FLP(
-          SKP_float                 *corr,              /* O    Result [order + 1]                      */
-    const SKP_float                 *input,             /* I    Input data to correlate                 */
-    const SKP_float                 warping,            /* I    Warping coefficient                     */
+          silk_float                 *corr,              /* O    Result [order + 1]                      */
+    const silk_float                 *input,             /* I    Input data to correlate                 */
+    const silk_float                 warping,            /* I    Warping coefficient                     */
     const opus_int                   length,             /* I    Length of input                         */
     const opus_int                   order               /* I    Correlation order (even)                */
 )
@@ -46,7 +46,7 @@
     double C[ MAX_SHAPE_LPC_ORDER + 1 ] = { 0 };
 
     /* Order must be even */
-    SKP_assert( ( order & 1 ) == 0 );
+    silk_assert( ( order & 1 ) == 0 );
 
     /* Loop over samples */
     for( n = 0; n < length; n++ ) {
@@ -66,8 +66,8 @@
         C[ order ] += state[ 0 ] * tmp1;
     }
 
-    /* Copy correlations in SKP_float output format */
+    /* Copy correlations in silk_float output format */
     for( i = 0; i < order + 1; i++ ) {
-        corr[ i ] = ( SKP_float )C[ i ];
+        corr[ i ] = ( silk_float )C[ i ];
     }
 }
diff --git a/silk/float/silk_wrappers_FLP.c b/silk/float/silk_wrappers_FLP.c
index 826251f..2e38f08 100644
--- a/silk/float/silk_wrappers_FLP.c
+++ b/silk/float/silk_wrappers_FLP.c
@@ -36,7 +36,7 @@
 /* Convert AR filter coefficients to NLSF parameters */
 void silk_A2NLSF_FLP(
           opus_int16                 *NLSF_Q15,          /* O    NLSF vector      [ LPC_order ]          */
-    const SKP_float                 *pAR,               /* I    LPC coefficients [ LPC_order ]          */
+    const silk_float                 *pAR,               /* I    LPC coefficients [ LPC_order ]          */
     const opus_int                   LPC_order           /* I    LPC order                               */
 )
 {
@@ -44,7 +44,7 @@
     opus_int32 a_fix_Q16[ MAX_LPC_ORDER ];
 
     for( i = 0; i < LPC_order; i++ ) {
-        a_fix_Q16[ i ] = SKP_float2int( pAR[ i ] * 65536.0f );
+        a_fix_Q16[ i ] = silk_float2int( pAR[ i ] * 65536.0f );
     }
 
     silk_A2NLSF( NLSF_Q15, a_fix_Q16, LPC_order );
@@ -52,7 +52,7 @@
 
 /* Convert LSF parameters to AR prediction filter coefficients */
 void silk_NLSF2A_FLP(
-          SKP_float                 *pAR,               /* O    LPC coefficients [ LPC_order ]          */
+          silk_float                 *pAR,               /* O    LPC coefficients [ LPC_order ]          */
     const opus_int16                 *NLSF_Q15,          /* I    NLSF vector      [ LPC_order ]          */
     const opus_int                   LPC_order           /* I    LPC order                               */
 )
@@ -63,7 +63,7 @@
     silk_NLSF2A( a_fix_Q12, NLSF_Q15, LPC_order );
 
     for( i = 0; i < LPC_order; i++ ) {
-        pAR[ i ] = ( SKP_float )a_fix_Q12[ i ] * ( 1.0f / 4096.0f );
+        pAR[ i ] = ( silk_float )a_fix_Q12[ i ] * ( 1.0f / 4096.0f );
     }
 }
 
@@ -72,7 +72,7 @@
 /******************************************/
 void silk_process_NLSFs_FLP(
     silk_encoder_state              *psEncC,                            /* I/O  Encoder state                               */
-    SKP_float                       PredCoef[ 2 ][ MAX_LPC_ORDER ],     /* O    Prediction coefficients                     */
+    silk_float                       PredCoef[ 2 ][ MAX_LPC_ORDER ],     /* O    Prediction coefficients                     */
     opus_int16                       NLSF_Q15[      MAX_LPC_ORDER ],     /* I/O  Normalized LSFs (quant out) (0 - (2^15-1))  */
     const opus_int16                 prev_NLSF_Q15[ MAX_LPC_ORDER ]      /* I    Previous Normalized LSFs (0 - (2^15-1))     */
 )
@@ -84,7 +84,7 @@
 
     for( j = 0; j < 2; j++ ) {
         for( i = 0; i < psEncC->predictLPCOrder; i++ ) {
-            PredCoef[ j ][ i ] = ( SKP_float )PredCoef_Q12[ j ][ i ] * ( 1.0f / 4096.0f );
+            PredCoef[ j ][ i ] = ( silk_float )PredCoef_Q12[ j ][ i ] * ( 1.0f / 4096.0f );
         }
     }
 }
@@ -98,13 +98,13 @@
     SideInfoIndices                 *psIndices,     /* I/O  Quantization indices                        */
     silk_nsq_state                  *psNSQ,         /* I/O  Noise Shaping Quantzation state             */
           opus_int8                  pulses[],       /* O    Quantized pulse signal                      */
-    const SKP_float                 x[]             /* I    Prefiltered input signal                    */
+    const silk_float                 x[]             /* I    Prefiltered input signal                    */
 )
 {
     opus_int     i, j;
     opus_int16   x_16[ MAX_FRAME_LENGTH ];
     opus_int32   Gains_Q16[ MAX_NB_SUBFR ];
-    SKP_DWORD_ALIGN opus_int16 PredCoef_Q12[ 2 ][ MAX_LPC_ORDER ];
+    silk_DWORD_ALIGN opus_int16 PredCoef_Q12[ 2 ][ MAX_LPC_ORDER ];
     opus_int16   LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ];
     opus_int     LTP_scale_Q14;
 
@@ -119,32 +119,32 @@
     /* Noise shape parameters */
     for( i = 0; i < psEnc->sCmn.nb_subfr; i++ ) {
         for( j = 0; j < psEnc->sCmn.shapingLPCOrder; j++ ) {
-            AR2_Q13[ i * MAX_SHAPE_LPC_ORDER + j ] = SKP_float2int( psEncCtrl->AR2[ i * MAX_SHAPE_LPC_ORDER + j ] * 8192.0f );
+            AR2_Q13[ i * MAX_SHAPE_LPC_ORDER + j ] = silk_float2int( psEncCtrl->AR2[ i * MAX_SHAPE_LPC_ORDER + j ] * 8192.0f );
         }
     }
 
     for( i = 0; i < psEnc->sCmn.nb_subfr; i++ ) {
-        LF_shp_Q14[ i ] =   SKP_LSHIFT32( SKP_float2int( psEncCtrl->LF_AR_shp[ i ]     * 16384.0f ), 16 ) |
-                              (opus_uint16)SKP_float2int( psEncCtrl->LF_MA_shp[ i ]     * 16384.0f );
-        Tilt_Q14[ i ]   =        (opus_int)SKP_float2int( psEncCtrl->Tilt[ i ]          * 16384.0f );
-        HarmShapeGain_Q14[ i ] = (opus_int)SKP_float2int( psEncCtrl->HarmShapeGain[ i ] * 16384.0f );
+        LF_shp_Q14[ i ] =   silk_LSHIFT32( silk_float2int( psEncCtrl->LF_AR_shp[ i ]     * 16384.0f ), 16 ) |
+                              (opus_uint16)silk_float2int( psEncCtrl->LF_MA_shp[ i ]     * 16384.0f );
+        Tilt_Q14[ i ]   =        (opus_int)silk_float2int( psEncCtrl->Tilt[ i ]          * 16384.0f );
+        HarmShapeGain_Q14[ i ] = (opus_int)silk_float2int( psEncCtrl->HarmShapeGain[ i ] * 16384.0f );
     }
-    Lambda_Q10 = ( opus_int )SKP_float2int( psEncCtrl->Lambda * 1024.0f );
+    Lambda_Q10 = ( opus_int )silk_float2int( psEncCtrl->Lambda * 1024.0f );
 
     /* prediction and coding parameters */
     for( i = 0; i < psEnc->sCmn.nb_subfr * LTP_ORDER; i++ ) {
-        LTPCoef_Q14[ i ] = ( opus_int16 )SKP_float2int( psEncCtrl->LTPCoef[ i ] * 16384.0f );
+        LTPCoef_Q14[ i ] = ( opus_int16 )silk_float2int( psEncCtrl->LTPCoef[ i ] * 16384.0f );
     }
 
     for( j = 0; j < 2; j++ ) {
         for( i = 0; i < psEnc->sCmn.predictLPCOrder; i++ ) {
-            PredCoef_Q12[ j ][ i ] = ( opus_int16 )SKP_float2int( psEncCtrl->PredCoef[ j ][ i ] * 4096.0f );
+            PredCoef_Q12[ j ][ i ] = ( opus_int16 )silk_float2int( psEncCtrl->PredCoef[ j ][ i ] * 4096.0f );
         }
     }
 
     for( i = 0; i < psEnc->sCmn.nb_subfr; i++ ) {
-        Gains_Q16[ i ] = SKP_float2int( psEncCtrl->Gains[ i ] * 65536.0f );
-        SKP_assert( Gains_Q16[ i ] > 0 );
+        Gains_Q16[ i ] = silk_float2int( psEncCtrl->Gains[ i ] * 65536.0f );
+        silk_assert( Gains_Q16[ i ] > 0 );
     }
 
     if( psIndices->signalType == TYPE_VOICED ) {
@@ -154,7 +154,7 @@
     }
 
     /* Convert input to fix */
-    SKP_float2short_array( x_16, x, psEnc->sCmn.frame_length );
+    silk_float2short_array( x_16, x, psEnc->sCmn.frame_length );
 
     /* Call NSQ */
     if( psEnc->sCmn.nStatesDelayedDecision > 1 || psEnc->sCmn.warping_Q16 > 0 ) {
@@ -170,10 +170,10 @@
 /* Floating-point Silk LTP quantiation wrapper */
 /***********************************************/
 void silk_quant_LTP_gains_FLP(
-          SKP_float B[ MAX_NB_SUBFR * LTP_ORDER ],              /* I/O  (Un-)quantized LTP gains                */
+          silk_float B[ MAX_NB_SUBFR * LTP_ORDER ],              /* I/O  (Un-)quantized LTP gains                */
           opus_int8  cbk_index[ MAX_NB_SUBFR ],                  /* O    Codebook index                          */
           opus_int8  *periodicity_index,                         /* O    Periodicity index                       */
-    const SKP_float W[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ],  /* I    Error weights                           */
+    const silk_float W[ MAX_NB_SUBFR * LTP_ORDER * LTP_ORDER ],  /* I    Error weights                           */
     const opus_int   mu_Q10,                                     /* I    Mu value (R/D tradeoff)                 */
     const opus_int   lowComplexity,                              /* I    Flag for low complexity                 */
     const opus_int   nb_subfr                                    /* I    number of subframes                     */
@@ -184,15 +184,15 @@
     opus_int32 W_Q18[ MAX_NB_SUBFR*LTP_ORDER*LTP_ORDER ];
 
     for( i = 0; i < nb_subfr * LTP_ORDER; i++ ) {
-        B_Q14[ i ] = (opus_int16)SKP_float2int( B[ i ] * 16384.0f );
+        B_Q14[ i ] = (opus_int16)silk_float2int( B[ i ] * 16384.0f );
     }
     for( i = 0; i < nb_subfr * LTP_ORDER * LTP_ORDER; i++ ) {
-        W_Q18[ i ] = (opus_int32)SKP_float2int( W[ i ] * 262144.0f );
+        W_Q18[ i ] = (opus_int32)silk_float2int( W[ i ] * 262144.0f );
     }
 
     silk_quant_LTP_gains( B_Q14, cbk_index, periodicity_index, W_Q18, mu_Q10, lowComplexity, nb_subfr );
 
     for( i = 0; i < nb_subfr * LTP_ORDER; i++ ) {
-        B[ i ] = (SKP_float)B_Q14[ i ] * ( 1.0f / 16384.0f );
+        B[ i ] = (silk_float)B_Q14[ i ] * ( 1.0f / 16384.0f );
     }
 }
diff --git a/silk/silk_A2NLSF.c b/silk/silk_A2NLSF.c
index 1913534..028d770 100644
--- a/silk/silk_A2NLSF.c
+++ b/silk/silk_A2NLSF.c
@@ -59,7 +59,7 @@
         for( n = dd; n > k; n-- ) {
             p[ n - 2 ] -= p[ n ];
         }
-        p[ k - 2 ] -= SKP_LSHIFT( p[ k ], 1 );
+        p[ k - 2 ] -= silk_LSHIFT( p[ k ], 1 );
     }
 }
 /* Helper function for A2NLSF(..)                    */
@@ -74,9 +74,9 @@
     opus_int32 x_Q16, y32;
 
     y32 = p[ dd ];                                    /* QPoly */
-    x_Q16 = SKP_LSHIFT( x, 4 );
+    x_Q16 = silk_LSHIFT( x, 4 );
     for( n = dd - 1; n >= 0; n-- ) {
-        y32 = SKP_SMLAWW( p[ n ], y32, x_Q16 );       /* QPoly */
+        y32 = silk_SMLAWW( p[ n ], y32, x_Q16 );       /* QPoly */
     }
     return y32;
 }
@@ -91,18 +91,18 @@
     opus_int k;
 
     /* Convert filter coefs to even and odd polynomials */
-    P[dd] = SKP_LSHIFT( 1, QPoly );
-    Q[dd] = SKP_LSHIFT( 1, QPoly );
+    P[dd] = silk_LSHIFT( 1, QPoly );
+    Q[dd] = silk_LSHIFT( 1, QPoly );
     for( k = 0; k < dd; k++ ) {
 #if( QPoly < 16 )
-        P[ k ] = SKP_RSHIFT_ROUND( -a_Q16[ dd - k - 1 ] - a_Q16[ dd + k ], 16 - QPoly ); /* QPoly */
-        Q[ k ] = SKP_RSHIFT_ROUND( -a_Q16[ dd - k - 1 ] + a_Q16[ dd + k ], 16 - QPoly ); /* QPoly */
+        P[ k ] = silk_RSHIFT_ROUND( -a_Q16[ dd - k - 1 ] - a_Q16[ dd + k ], 16 - QPoly ); /* QPoly */
+        Q[ k ] = silk_RSHIFT_ROUND( -a_Q16[ dd - k - 1 ] + a_Q16[ dd + k ], 16 - QPoly ); /* QPoly */
 #elif( Qpoly == 16 )
         P[ k ] = -a_Q16[ dd - k - 1 ] - a_Q16[ dd + k ]; /* QPoly*/
         Q[ k ] = -a_Q16[ dd - k - 1 ] + a_Q16[ dd + k ]; /* QPoly*/
 #else
-        P[ k ] = SKP_LSHIFT( -a_Q16[ dd - k - 1 ] - a_Q16[ dd + k ], QPoly - 16 ); /* QPoly */
-        Q[ k ] = SKP_LSHIFT( -a_Q16[ dd - k - 1 ] + a_Q16[ dd + k ], QPoly - 16 ); /* QPoly */
+        P[ k ] = silk_LSHIFT( -a_Q16[ dd - k - 1 ] - a_Q16[ dd + k ], QPoly - 16 ); /* QPoly */
+        Q[ k ] = silk_LSHIFT( -a_Q16[ dd - k - 1 ] + a_Q16[ dd + k ], QPoly - 16 ); /* QPoly */
 #endif
     }
 
@@ -140,7 +140,7 @@
     PQ[ 0 ] = P;
     PQ[ 1 ] = Q;
 
-    dd = SKP_RSHIFT( d, 1 );
+    dd = silk_RSHIFT( d, 1 );
 
     silk_A2NLSF_init( a_Q16, P, Q, dd );
 
@@ -182,7 +182,7 @@
 #endif
             for( m = 0; m < BIN_DIV_STEPS_A2NLSF_FIX; m++ ) {
                 /* Evaluate polynomial */
-                xmid = SKP_RSHIFT_ROUND( xlo + xhi, 1 );
+                xmid = silk_RSHIFT_ROUND( xlo + xhi, 1 );
                 ymid = silk_A2NLSF_eval_poly( p, xmid, dd );
 
                 /* Detect zero crossing */
@@ -195,33 +195,33 @@
                     xlo = xmid;
                     ylo = ymid;
 #if OVERSAMPLE_COSINE_TABLE
-                    ffrac = SKP_ADD_RSHIFT( ffrac,  64, m );
+                    ffrac = silk_ADD_RSHIFT( ffrac,  64, m );
 #else
-                    ffrac = SKP_ADD_RSHIFT( ffrac, 128, m );
+                    ffrac = silk_ADD_RSHIFT( ffrac, 128, m );
 #endif
                 }
             }
 
             /* Interpolate */
-            if( SKP_abs( ylo ) < 65536 ) {
+            if( silk_abs( ylo ) < 65536 ) {
                 /* Avoid dividing by zero */
                 den = ylo - yhi;
-                nom = SKP_LSHIFT( ylo, 8 - BIN_DIV_STEPS_A2NLSF_FIX ) + SKP_RSHIFT( den, 1 );
+                nom = silk_LSHIFT( ylo, 8 - BIN_DIV_STEPS_A2NLSF_FIX ) + silk_RSHIFT( den, 1 );
                 if( den != 0 ) {
-                    ffrac += SKP_DIV32( nom, den );
+                    ffrac += silk_DIV32( nom, den );
                 }
             } else {
                 /* No risk of dividing by zero because abs(ylo - yhi) >= abs(ylo) >= 65536 */
-                ffrac += SKP_DIV32( ylo, SKP_RSHIFT( ylo - yhi, 8 - BIN_DIV_STEPS_A2NLSF_FIX ) );
+                ffrac += silk_DIV32( ylo, silk_RSHIFT( ylo - yhi, 8 - BIN_DIV_STEPS_A2NLSF_FIX ) );
             }
 #if OVERSAMPLE_COSINE_TABLE
-            NLSF[ root_ix ] = (opus_int16)SKP_min_32( SKP_LSHIFT( (opus_int32)k, 7 ) + ffrac, SKP_int16_MAX );
+            NLSF[ root_ix ] = (opus_int16)silk_min_32( silk_LSHIFT( (opus_int32)k, 7 ) + ffrac, silk_int16_MAX );
 #else
-            NLSF[ root_ix ] = (opus_int16)SKP_min_32( SKP_LSHIFT( (opus_int32)k, 8 ) + ffrac, SKP_int16_MAX );
+            NLSF[ root_ix ] = (opus_int16)silk_min_32( silk_LSHIFT( (opus_int32)k, 8 ) + ffrac, silk_int16_MAX );
 #endif
 
-            SKP_assert( NLSF[ root_ix ] >=     0 );
-            SKP_assert( NLSF[ root_ix ] <= 32767 );
+            silk_assert( NLSF[ root_ix ] >=     0 );
+            silk_assert( NLSF[ root_ix ] <= 32767 );
 
             root_ix++;        /* Next root */
             if( root_ix >= d ) {
@@ -239,7 +239,7 @@
 #else
             xlo = silk_LSFCosTab_FIX_Q12[ k - 1 ]; /* Q12*/
 #endif
-            ylo = SKP_LSHIFT( 1 - ( root_ix & 2 ), 12 );
+            ylo = silk_LSHIFT( 1 - ( root_ix & 2 ), 12 );
         } else {
             /* Increment loop counter */
             k++;
@@ -254,15 +254,15 @@
                 i++;
                 if( i > MAX_ITERATIONS_A2NLSF_FIX ) {
                     /* Set NLSFs to white spectrum and exit */
-                    NLSF[ 0 ] = (opus_int16)SKP_DIV32_16( 1 << 15, d + 1 );
+                    NLSF[ 0 ] = (opus_int16)silk_DIV32_16( 1 << 15, d + 1 );
                     for( k = 1; k < d; k++ ) {
-                        NLSF[ k ] = (opus_int16)SKP_SMULBB( k + 1, NLSF[ 0 ] );
+                        NLSF[ k ] = (opus_int16)silk_SMULBB( k + 1, NLSF[ 0 ] );
                     }
                     return;
                 }
 
                 /* Error: Apply progressively more bandwidth expansion and run again */
-                silk_bwexpander_32( a_Q16, d, 65536 - SKP_SMULBB( 10 + i, i ) ); /* 10_Q16 = 0.00015*/
+                silk_bwexpander_32( a_Q16, d, 65536 - silk_SMULBB( 10 + i, i ) ); /* 10_Q16 = 0.00015*/
 
                 silk_A2NLSF_init( a_Q16, P, Q, dd );
                 p = P;                            /* Pointer to polynomial */
diff --git a/silk/silk_CNG.c b/silk/silk_CNG.c
index e2983eb..b746420 100644
--- a/silk/silk_CNG.c
+++ b/silk/silk_CNG.c
@@ -45,16 +45,16 @@
 
     exc_mask = CNG_BUF_MASK_MAX;
     while( exc_mask > length ) {
-        exc_mask = SKP_RSHIFT( exc_mask, 1 );
+        exc_mask = silk_RSHIFT( exc_mask, 1 );
     }
 
     seed = *rand_seed;
     for( i = 0; i < length; i++ ) {
-        seed = SKP_RAND( seed );
-        idx = ( opus_int )( SKP_RSHIFT( seed, 24 ) & exc_mask );
-        SKP_assert( idx >= 0 );
-        SKP_assert( idx <= CNG_BUF_MASK_MAX );
-        residual_Q10[ i ] = ( opus_int16 )SKP_SAT16( SKP_SMULWW( exc_buf_Q10[ idx ], Gain_Q16 ) );
+        seed = silk_RAND( seed );
+        idx = ( opus_int )( silk_RSHIFT( seed, 24 ) & exc_mask );
+        silk_assert( idx >= 0 );
+        silk_assert( idx <= CNG_BUF_MASK_MAX );
+        residual_Q10[ i ] = ( opus_int16 )silk_SAT16( silk_SMULWW( exc_buf_Q10[ idx ], Gain_Q16 ) );
     }
     *rand_seed = seed;
 }
@@ -65,7 +65,7 @@
 {
     opus_int i, NLSF_step_Q15, NLSF_acc_Q15;
 
-    NLSF_step_Q15 = SKP_DIV32_16( SKP_int16_MAX, psDec->LPC_order + 1 );
+    NLSF_step_Q15 = silk_DIV32_16( silk_int16_MAX, psDec->LPC_order + 1 );
     NLSF_acc_Q15 = 0;
     for( i = 0; i < psDec->LPC_order; i++ ) {
         NLSF_acc_Q15 += NLSF_step_Q15;
@@ -100,7 +100,7 @@
 
         /* Smoothing of LSF's  */
         for( i = 0; i < psDec->LPC_order; i++ ) {
-            psCNG->CNG_smth_NLSF_Q15[ i ] += SKP_SMULWB( psDec->prevNLSF_Q15[ i ] - psCNG->CNG_smth_NLSF_Q15[ i ], CNG_NLSF_SMTH_Q16 );
+            psCNG->CNG_smth_NLSF_Q15[ i ] += silk_SMULWB( psDec->prevNLSF_Q15[ i ] - psCNG->CNG_smth_NLSF_Q15[ i ], CNG_NLSF_SMTH_Q16 );
         }
         /* Find the subframe with the highest gain */
         max_Gain_Q16 = 0;
@@ -112,12 +112,12 @@
             }
         }
         /* Update CNG excitation buffer with excitation from this subframe */
-        SKP_memmove( &psCNG->CNG_exc_buf_Q10[ psDec->subfr_length ], psCNG->CNG_exc_buf_Q10, ( psDec->nb_subfr - 1 ) * psDec->subfr_length * sizeof( opus_int32 ) );
-        SKP_memcpy(   psCNG->CNG_exc_buf_Q10, &psDec->exc_Q10[ subfr * psDec->subfr_length ], psDec->subfr_length * sizeof( opus_int32 ) );
+        silk_memmove( &psCNG->CNG_exc_buf_Q10[ psDec->subfr_length ], psCNG->CNG_exc_buf_Q10, ( psDec->nb_subfr - 1 ) * psDec->subfr_length * sizeof( opus_int32 ) );
+        silk_memcpy(   psCNG->CNG_exc_buf_Q10, &psDec->exc_Q10[ subfr * psDec->subfr_length ], psDec->subfr_length * sizeof( opus_int32 ) );
 
         /* Smooth gains */
         for( i = 0; i < psDec->nb_subfr; i++ ) {
-            psCNG->CNG_smth_Gain_Q16 += SKP_SMULWB( psDecCtrl->Gains_Q16[ i ] - psCNG->CNG_smth_Gain_Q16, CNG_GAIN_SMTH_Q16 );
+            psCNG->CNG_smth_Gain_Q16 += silk_SMULWB( psDecCtrl->Gains_Q16[ i ] - psCNG->CNG_smth_Gain_Q16, CNG_GAIN_SMTH_Q16 );
         }
     }
 
@@ -131,30 +131,30 @@
         silk_NLSF2A( A_Q12, psCNG->CNG_smth_NLSF_Q15, psDec->LPC_order );
 
         /* Generate CNG signal, by synthesis filtering */
-        SKP_memcpy( CNG_sig_Q10, psCNG->CNG_synth_state, MAX_LPC_ORDER * sizeof( opus_int32 ) );
+        silk_memcpy( CNG_sig_Q10, psCNG->CNG_synth_state, MAX_LPC_ORDER * sizeof( opus_int32 ) );
         for( i = 0; i < length; i++ ) {
             /* Partially unrolled */
-            sum_Q6 = SKP_SMULWB(         CNG_sig_Q10[ MAX_LPC_ORDER + i -  1 ], A_Q12[ 0 ] );
-            sum_Q6 = SKP_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i -  2 ], A_Q12[ 1 ] );
-            sum_Q6 = SKP_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i -  3 ], A_Q12[ 2 ] );
-            sum_Q6 = SKP_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i -  4 ], A_Q12[ 3 ] );
-            sum_Q6 = SKP_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i -  5 ], A_Q12[ 4 ] );
-            sum_Q6 = SKP_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i -  6 ], A_Q12[ 5 ] );
-            sum_Q6 = SKP_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i -  7 ], A_Q12[ 6 ] );
-            sum_Q6 = SKP_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i -  8 ], A_Q12[ 7 ] );
-            sum_Q6 = SKP_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i -  9 ], A_Q12[ 8 ] );
-            sum_Q6 = SKP_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i - 10 ], A_Q12[ 9 ] );
+            sum_Q6 = silk_SMULWB(         CNG_sig_Q10[ MAX_LPC_ORDER + i -  1 ], A_Q12[ 0 ] );
+            sum_Q6 = silk_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i -  2 ], A_Q12[ 1 ] );
+            sum_Q6 = silk_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i -  3 ], A_Q12[ 2 ] );
+            sum_Q6 = silk_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i -  4 ], A_Q12[ 3 ] );
+            sum_Q6 = silk_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i -  5 ], A_Q12[ 4 ] );
+            sum_Q6 = silk_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i -  6 ], A_Q12[ 5 ] );
+            sum_Q6 = silk_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i -  7 ], A_Q12[ 6 ] );
+            sum_Q6 = silk_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i -  8 ], A_Q12[ 7 ] );
+            sum_Q6 = silk_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i -  9 ], A_Q12[ 8 ] );
+            sum_Q6 = silk_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i - 10 ], A_Q12[ 9 ] );
             for( j = 10; j < psDec->LPC_order; j++ ) {
-                sum_Q6 = SKP_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i - j - 1 ], A_Q12[ j ] );
+                sum_Q6 = silk_SMLAWB( sum_Q6, CNG_sig_Q10[ MAX_LPC_ORDER + i - j - 1 ], A_Q12[ j ] );
             }
 
             /* Update states */
-            CNG_sig_Q10[ MAX_LPC_ORDER + i ] = SKP_ADD_LSHIFT( CNG_sig_Q10[ MAX_LPC_ORDER + i ], sum_Q6, 4 );
+            CNG_sig_Q10[ MAX_LPC_ORDER + i ] = silk_ADD_LSHIFT( CNG_sig_Q10[ MAX_LPC_ORDER + i ], sum_Q6, 4 );
 
-            frame[ i ] = SKP_ADD_SAT16( frame[ i ], SKP_RSHIFT_ROUND( sum_Q6, 6 ) );
+            frame[ i ] = silk_ADD_SAT16( frame[ i ], silk_RSHIFT_ROUND( sum_Q6, 6 ) );
         }
-        SKP_memcpy( psCNG->CNG_synth_state, &CNG_sig_Q10[ length ], MAX_LPC_ORDER * sizeof( opus_int32 ) );
+        silk_memcpy( psCNG->CNG_synth_state, &CNG_sig_Q10[ length ], MAX_LPC_ORDER * sizeof( opus_int32 ) );
     } else {
-        SKP_memset( psCNG->CNG_synth_state, 0, psDec->LPC_order *  sizeof( opus_int32 ) );
+        silk_memset( psCNG->CNG_synth_state, 0, psDec->LPC_order *  sizeof( opus_int32 ) );
     }
 }
diff --git a/silk/silk_HP_variable_cutoff.c b/silk/silk_HP_variable_cutoff.c
index f76767c..5b8d538 100644
--- a/silk/silk_HP_variable_cutoff.c
+++ b/silk/silk_HP_variable_cutoff.c
@@ -48,31 +48,31 @@
    /* Adaptive cutoff frequency: estimate low end of pitch frequency range */
    if( psEncC1->prevSignalType == TYPE_VOICED ) {
       /* difference, in log domain */
-      pitch_freq_Hz_Q16 = SKP_DIV32_16( SKP_LSHIFT( SKP_MUL( psEncC1->fs_kHz, 1000 ), 16 ), psEncC1->prevLag );
+      pitch_freq_Hz_Q16 = silk_DIV32_16( silk_LSHIFT( silk_MUL( psEncC1->fs_kHz, 1000 ), 16 ), psEncC1->prevLag );
       pitch_freq_log_Q7 = silk_lin2log( pitch_freq_Hz_Q16 ) - ( 16 << 7 );
 
       /* adjustment based on quality */
       quality_Q15 = psEncC1->input_quality_bands_Q15[ 0 ];
-      pitch_freq_log_Q7 = SKP_SMLAWB( pitch_freq_log_Q7, SKP_SMULWB( SKP_LSHIFT( -quality_Q15, 2 ), quality_Q15 ),
+      pitch_freq_log_Q7 = silk_SMLAWB( pitch_freq_log_Q7, silk_SMULWB( silk_LSHIFT( -quality_Q15, 2 ), quality_Q15 ),
             pitch_freq_log_Q7 - ( silk_lin2log( SILK_FIX_CONST( VARIABLE_HP_MIN_CUTOFF_HZ, 16 ) ) - ( 16 << 7 ) ) );
 
       /* delta_freq = pitch_freq_log - psEnc->variable_HP_smth1; */
-      delta_freq_Q7 = pitch_freq_log_Q7 - SKP_RSHIFT( psEncC1->variable_HP_smth1_Q15, 8 );
+      delta_freq_Q7 = pitch_freq_log_Q7 - silk_RSHIFT( psEncC1->variable_HP_smth1_Q15, 8 );
       if( delta_freq_Q7 < 0 ) {
          /* less smoothing for decreasing pitch frequency, to track something close to the minimum */
-         delta_freq_Q7 = SKP_MUL( delta_freq_Q7, 3 );
+         delta_freq_Q7 = silk_MUL( delta_freq_Q7, 3 );
       }
 
       /* limit delta, to reduce impact of outliers in pitch estimation */
-      delta_freq_Q7 = SKP_LIMIT_32( delta_freq_Q7, -SILK_FIX_CONST( VARIABLE_HP_MAX_DELTA_FREQ, 7 ), SILK_FIX_CONST( VARIABLE_HP_MAX_DELTA_FREQ, 7 ) );
+      delta_freq_Q7 = silk_LIMIT_32( delta_freq_Q7, -SILK_FIX_CONST( VARIABLE_HP_MAX_DELTA_FREQ, 7 ), SILK_FIX_CONST( VARIABLE_HP_MAX_DELTA_FREQ, 7 ) );
 
       /* update smoother */
-      psEncC1->variable_HP_smth1_Q15 = SKP_SMLAWB( psEncC1->variable_HP_smth1_Q15,
-            SKP_SMULBB( psEncC1->speech_activity_Q8, delta_freq_Q7 ), SILK_FIX_CONST( VARIABLE_HP_SMTH_COEF1, 16 ) );
+      psEncC1->variable_HP_smth1_Q15 = silk_SMLAWB( psEncC1->variable_HP_smth1_Q15,
+            silk_SMULBB( psEncC1->speech_activity_Q8, delta_freq_Q7 ), SILK_FIX_CONST( VARIABLE_HP_SMTH_COEF1, 16 ) );
 
       /* limit frequency range */
-      psEncC1->variable_HP_smth1_Q15 = SKP_LIMIT_32( psEncC1->variable_HP_smth1_Q15,
-            SKP_LSHIFT( silk_lin2log( VARIABLE_HP_MIN_CUTOFF_HZ ), 8 ),
-            SKP_LSHIFT( silk_lin2log( VARIABLE_HP_MAX_CUTOFF_HZ ), 8 ) );
+      psEncC1->variable_HP_smth1_Q15 = silk_LIMIT_32( psEncC1->variable_HP_smth1_Q15,
+            silk_LSHIFT( silk_lin2log( VARIABLE_HP_MIN_CUTOFF_HZ ), 8 ),
+            silk_LSHIFT( silk_lin2log( VARIABLE_HP_MAX_CUTOFF_HZ ), 8 ) );
    }
 }
diff --git a/silk/silk_Inlines.h b/silk/silk_Inlines.h
index 0d5eb55..ec97cfb 100644
--- a/silk/silk_Inlines.h
+++ b/silk/silk_Inlines.h
@@ -42,7 +42,7 @@
 {
     opus_int32 in_upper;
 
-    in_upper = (opus_int32)SKP_RSHIFT64(in, 32);
+    in_upper = (opus_int32)silk_RSHIFT64(in, 32);
     if (in_upper == 0) {
         /* Search in the lower 32 bits */
         return 32 + silk_CLZ32( (opus_int32) in );
@@ -83,10 +83,10 @@
     }
 
     /* get scaling right */
-    y >>= SKP_RSHIFT(lz, 1);
+    y >>= silk_RSHIFT(lz, 1);
 
     /* increment using fractional part of input */
-    y = SKP_SMLAWB(y, y, SKP_SMULBB(213, frac_Q7));
+    y = silk_SMLAWB(y, y, silk_SMULBB(213, frac_Q7));
 
     return y;
 }
@@ -101,34 +101,34 @@
     opus_int   a_headrm, b_headrm, lshift;
     opus_int32 b32_inv, a32_nrm, b32_nrm, result;
 
-    SKP_assert( b32 != 0 );
-    SKP_assert( Qres >= 0 );
+    silk_assert( b32 != 0 );
+    silk_assert( Qres >= 0 );
 
     /* Compute number of bits head room and normalize inputs */
-    a_headrm = silk_CLZ32( SKP_abs(a32) ) - 1;
-    a32_nrm = SKP_LSHIFT(a32, a_headrm);                                    /* Q: a_headrm                    */
-    b_headrm = silk_CLZ32( SKP_abs(b32) ) - 1;
-    b32_nrm = SKP_LSHIFT(b32, b_headrm);                                    /* Q: b_headrm                    */
+    a_headrm = silk_CLZ32( silk_abs(a32) ) - 1;
+    a32_nrm = silk_LSHIFT(a32, a_headrm);                                    /* Q: a_headrm                    */
+    b_headrm = silk_CLZ32( silk_abs(b32) ) - 1;
+    b32_nrm = silk_LSHIFT(b32, b_headrm);                                    /* Q: b_headrm                    */
 
     /* Inverse of b32, with 14 bits of precision */
-    b32_inv = SKP_DIV32_16( SKP_int32_MAX >> 2, SKP_RSHIFT(b32_nrm, 16) );  /* Q: 29 + 16 - b_headrm        */
+    b32_inv = silk_DIV32_16( silk_int32_MAX >> 2, silk_RSHIFT(b32_nrm, 16) );  /* Q: 29 + 16 - b_headrm        */
 
     /* First approximation */
-    result = SKP_SMULWB(a32_nrm, b32_inv);                                  /* Q: 29 + a_headrm - b_headrm    */
+    result = silk_SMULWB(a32_nrm, b32_inv);                                  /* Q: 29 + a_headrm - b_headrm    */
 
     /* Compute residual by subtracting product of denominator and first approximation */
-    a32_nrm -= SKP_LSHIFT( SKP_SMMUL(b32_nrm, result), 3 );           /* Q: a_headrm                    */
+    a32_nrm -= silk_LSHIFT( silk_SMMUL(b32_nrm, result), 3 );           /* Q: a_headrm                    */
 
     /* Refinement */
-    result = SKP_SMLAWB(result, a32_nrm, b32_inv);                          /* Q: 29 + a_headrm - b_headrm    */
+    result = silk_SMLAWB(result, a32_nrm, b32_inv);                          /* Q: 29 + a_headrm - b_headrm    */
 
     /* Convert to Qres domain */
     lshift = 29 + a_headrm - b_headrm - Qres;
     if( lshift < 0 ) {
-        return SKP_LSHIFT_SAT32(result, -lshift);
+        return silk_LSHIFT_SAT32(result, -lshift);
     } else {
         if( lshift < 32){
-            return SKP_RSHIFT(result, lshift);
+            return silk_RSHIFT(result, lshift);
         } else {
             /* Avoid undefined result */
             return 0;
@@ -145,32 +145,32 @@
     opus_int   b_headrm, lshift;
     opus_int32 b32_inv, b32_nrm, err_Q32, result;
 
-    SKP_assert( b32 != 0 );
-    SKP_assert( Qres > 0 );
+    silk_assert( b32 != 0 );
+    silk_assert( Qres > 0 );
 
     /* Compute number of bits head room and normalize input */
-    b_headrm = silk_CLZ32( SKP_abs(b32) ) - 1;
-    b32_nrm = SKP_LSHIFT(b32, b_headrm);                                    /* Q: b_headrm                */
+    b_headrm = silk_CLZ32( silk_abs(b32) ) - 1;
+    b32_nrm = silk_LSHIFT(b32, b_headrm);                                    /* Q: b_headrm                */
 
     /* Inverse of b32, with 14 bits of precision */
-    b32_inv = SKP_DIV32_16( SKP_int32_MAX >> 2, SKP_RSHIFT(b32_nrm, 16) );  /* Q: 29 + 16 - b_headrm    */
+    b32_inv = silk_DIV32_16( silk_int32_MAX >> 2, silk_RSHIFT(b32_nrm, 16) );  /* Q: 29 + 16 - b_headrm    */
 
     /* First approximation */
-    result = SKP_LSHIFT(b32_inv, 16);                                       /* Q: 61 - b_headrm            */
+    result = silk_LSHIFT(b32_inv, 16);                                       /* Q: 61 - b_headrm            */
 
     /* Compute residual by subtracting product of denominator and first approximation from one */
-    err_Q32 = SKP_LSHIFT( (1<<29)-SKP_SMULWB(b32_nrm, b32_inv), 3 );         /* Q32                        */
+    err_Q32 = silk_LSHIFT( (1<<29)-silk_SMULWB(b32_nrm, b32_inv), 3 );         /* Q32                        */
 
     /* Refinement */
-    result = SKP_SMLAWW(result, err_Q32, b32_inv);                          /* Q: 61 - b_headrm            */
+    result = silk_SMLAWW(result, err_Q32, b32_inv);                          /* Q: 61 - b_headrm            */
 
     /* Convert to Qres domain */
     lshift = 61 - b_headrm - Qres;
     if( lshift <= 0 ) {
-        return SKP_LSHIFT_SAT32(result, -lshift);
+        return silk_LSHIFT_SAT32(result, -lshift);
     } else {
         if( lshift < 32){
-            return SKP_RSHIFT(result, lshift);
+            return silk_RSHIFT(result, lshift);
         }else{
             /* Avoid undefined result */
             return 0;
diff --git a/silk/silk_LPC_analysis_filter.c b/silk/silk_LPC_analysis_filter.c
index 2a4b8e0..7859d73 100644
--- a/silk/silk_LPC_analysis_filter.c
+++ b/silk/silk_LPC_analysis_filter.c
@@ -50,34 +50,34 @@
     opus_int32       out32_Q12, out32;
     const opus_int16 *in_ptr;
 
-    SKP_assert( d >= 6 );
-    SKP_assert( (d & 1) == 0 );
-    SKP_assert( d <= len );
+    silk_assert( d >= 6 );
+    silk_assert( (d & 1) == 0 );
+    silk_assert( d <= len );
 
     for ( ix = d; ix < len; ix++) {
         in_ptr = &in[ ix - 1 ];
 
-        out32_Q12 = SKP_SMULBB(            in_ptr[  0 ], B[ 0 ] );
-        out32_Q12 = SKP_SMLABB( out32_Q12, in_ptr[ -1 ], B[ 1 ] );
-        out32_Q12 = SKP_SMLABB( out32_Q12, in_ptr[ -2 ], B[ 2 ] );
-        out32_Q12 = SKP_SMLABB( out32_Q12, in_ptr[ -3 ], B[ 3 ] );
-        out32_Q12 = SKP_SMLABB( out32_Q12, in_ptr[ -4 ], B[ 4 ] );
-        out32_Q12 = SKP_SMLABB( out32_Q12, in_ptr[ -5 ], B[ 5 ] );
+        out32_Q12 = silk_SMULBB(            in_ptr[  0 ], B[ 0 ] );
+        out32_Q12 = silk_SMLABB( out32_Q12, in_ptr[ -1 ], B[ 1 ] );
+        out32_Q12 = silk_SMLABB( out32_Q12, in_ptr[ -2 ], B[ 2 ] );
+        out32_Q12 = silk_SMLABB( out32_Q12, in_ptr[ -3 ], B[ 3 ] );
+        out32_Q12 = silk_SMLABB( out32_Q12, in_ptr[ -4 ], B[ 4 ] );
+        out32_Q12 = silk_SMLABB( out32_Q12, in_ptr[ -5 ], B[ 5 ] );
         for( j = 6; j < d; j += 2 ) {
-            out32_Q12 = SKP_SMLABB( out32_Q12, in_ptr[ -j     ], B[ j     ] );
-            out32_Q12 = SKP_SMLABB( out32_Q12, in_ptr[ -j - 1 ], B[ j + 1 ] );
+            out32_Q12 = silk_SMLABB( out32_Q12, in_ptr[ -j     ], B[ j     ] );
+            out32_Q12 = silk_SMLABB( out32_Q12, in_ptr[ -j - 1 ], B[ j + 1 ] );
         }
 
         /* Subtract prediction */
-        out32_Q12 = SKP_SUB32( SKP_LSHIFT( (opus_int32)in_ptr[ 1 ], 12 ), out32_Q12 );
+        out32_Q12 = silk_SUB32( silk_LSHIFT( (opus_int32)in_ptr[ 1 ], 12 ), out32_Q12 );
 
         /* Scale to Q0 */
-        out32 = SKP_RSHIFT_ROUND( out32_Q12, 12 );
+        out32 = silk_RSHIFT_ROUND( out32_Q12, 12 );
 
         /* Saturate output */
-        out[ ix ] = ( opus_int16 )SKP_SAT16( out32 );
+        out[ ix ] = ( opus_int16 )silk_SAT16( out32 );
     }
 
     /* Set first d output samples to zero */
-    SKP_memset( out, 0, d * sizeof( opus_int16 ) );
+    silk_memset( out, 0, d * sizeof( opus_int16 ) );
 }
diff --git a/silk/silk_LPC_inv_pred_gain.c b/silk/silk_LPC_inv_pred_gain.c
index 820bb93..42765bd 100644
--- a/silk/silk_LPC_inv_pred_gain.c
+++ b/silk/silk_LPC_inv_pred_gain.c
@@ -57,21 +57,21 @@
         }
 
         /* Set RC equal to negated AR coef */
-        rc_Q31 = -SKP_LSHIFT( Anew_QA[ k ], 31 - QA );
+        rc_Q31 = -silk_LSHIFT( Anew_QA[ k ], 31 - QA );
 
         /* rc_mult1_Q30 range: [ 1 : 2^30-1 ] */
-        rc_mult1_Q30 = ( SKP_int32_MAX >> 1 ) - SKP_SMMUL( rc_Q31, rc_Q31 );
-        SKP_assert( rc_mult1_Q30 > ( 1 << 15 ) );                   /* reduce A_LIMIT if fails */
-        SKP_assert( rc_mult1_Q30 < ( 1 << 30 ) );
+        rc_mult1_Q30 = ( silk_int32_MAX >> 1 ) - silk_SMMUL( rc_Q31, rc_Q31 );
+        silk_assert( rc_mult1_Q30 > ( 1 << 15 ) );                   /* reduce A_LIMIT if fails */
+        silk_assert( rc_mult1_Q30 < ( 1 << 30 ) );
 
-        /* rc_mult2_Q16 range: [ 2^16 : SKP_int32_MAX ] */
+        /* rc_mult2_Q16 range: [ 2^16 : silk_int32_MAX ] */
         rc_mult2_Q16 = silk_INVERSE32_varQ( rc_mult1_Q30, 46 );      /* 16 = 46 - 30 */
 
         /* Update inverse gain */
         /* invGain_Q30 range: [ 0 : 2^30 ] */
-        *invGain_Q30 = SKP_LSHIFT( SKP_SMMUL( *invGain_Q30, rc_mult1_Q30 ), 2 );
-        SKP_assert( *invGain_Q30 >= 0           );
-        SKP_assert( *invGain_Q30 <= ( 1 << 30 ) );
+        *invGain_Q30 = silk_LSHIFT( silk_SMMUL( *invGain_Q30, rc_mult1_Q30 ), 2 );
+        silk_assert( *invGain_Q30 >= 0           );
+        silk_assert( *invGain_Q30 <= ( 1 << 30 ) );
 
         /* Swap pointers */
         Aold_QA = Anew_QA;
@@ -79,10 +79,10 @@
 
         /* Update AR coefficient */
         headrm = silk_CLZ32( rc_mult2_Q16 ) - 1;
-        rc_mult2_Q16 = SKP_LSHIFT( rc_mult2_Q16, headrm );          /* Q: 16 + headrm */
+        rc_mult2_Q16 = silk_LSHIFT( rc_mult2_Q16, headrm );          /* Q: 16 + headrm */
         for( n = 0; n < k; n++ ) {
-            tmp_QA = Aold_QA[ n ] - SKP_LSHIFT( SKP_SMMUL( Aold_QA[ k - n - 1 ], rc_Q31 ), 1 );
-            Anew_QA[ n ] = SKP_LSHIFT( SKP_SMMUL( tmp_QA, rc_mult2_Q16 ), 16 - headrm );
+            tmp_QA = Aold_QA[ n ] - silk_LSHIFT( silk_SMMUL( Aold_QA[ k - n - 1 ], rc_Q31 ), 1 );
+            Anew_QA[ n ] = silk_LSHIFT( silk_SMMUL( tmp_QA, rc_mult2_Q16 ), 16 - headrm );
         }
     }
 
@@ -92,16 +92,16 @@
     }
 
     /* Set RC equal to negated AR coef */
-    rc_Q31 = -SKP_LSHIFT( Anew_QA[ 0 ], 31 - QA );
+    rc_Q31 = -silk_LSHIFT( Anew_QA[ 0 ], 31 - QA );
 
     /* Range: [ 1 : 2^30 ] */
-    rc_mult1_Q30 = ( SKP_int32_MAX >> 1 ) - SKP_SMMUL( rc_Q31, rc_Q31 );
+    rc_mult1_Q30 = ( silk_int32_MAX >> 1 ) - silk_SMMUL( rc_Q31, rc_Q31 );
 
     /* Update inverse gain */
     /* Range: [ 0 : 2^30 ] */
-    *invGain_Q30 = SKP_LSHIFT( SKP_SMMUL( *invGain_Q30, rc_mult1_Q30 ), 2 );
-    SKP_assert( *invGain_Q30 >= 0     );
-    SKP_assert( *invGain_Q30 <= 1<<30 );
+    *invGain_Q30 = silk_LSHIFT( silk_SMMUL( *invGain_Q30, rc_mult1_Q30 ), 2 );
+    silk_assert( *invGain_Q30 >= 0     );
+    silk_assert( *invGain_Q30 <= 1<<30 );
 
     return 0;
 }
@@ -121,7 +121,7 @@
 
     /* Increase Q domain of the AR coefficients */
     for( k = 0; k < order; k++ ) {
-        Anew_QA[ k ] = SKP_LSHIFT( (opus_int32)A_Q12[ k ], QA - 12 );
+        Anew_QA[ k ] = silk_LSHIFT( (opus_int32)A_Q12[ k ], QA - 12 );
     }
 
     return LPC_inverse_pred_gain_QA( invGain_Q30, Atmp_QA, order );
@@ -142,7 +142,7 @@
 
     /* Increase Q domain of the AR coefficients */
     for( k = 0; k < order; k++ ) {
-        Anew_QA[ k ] = SKP_RSHIFT_ROUND( A_Q24[ k ], 24 - QA );
+        Anew_QA[ k ] = silk_RSHIFT_ROUND( A_Q24[ k ], 24 - QA );
     }
 
     return LPC_inverse_pred_gain_QA( invGain_Q30, Atmp_QA, order );
diff --git a/silk/silk_LP_variable_cutoff.c b/silk/silk_LP_variable_cutoff.c
index 0c758a0..9b10f29 100644
--- a/silk/silk_LP_variable_cutoff.c
+++ b/silk/silk_LP_variable_cutoff.c
@@ -53,31 +53,31 @@
             if( fac_Q16 < 32768 ) { /* fac_Q16 is in range of a 16-bit int */
                 /* Piece-wise linear interpolation of B and A */
                 for( nb = 0; nb < TRANSITION_NB; nb++ ) {
-                    B_Q28[ nb ] = SKP_SMLAWB(
+                    B_Q28[ nb ] = silk_SMLAWB(
                         silk_Transition_LP_B_Q28[ ind     ][ nb ],
                         silk_Transition_LP_B_Q28[ ind + 1 ][ nb ] -
                         silk_Transition_LP_B_Q28[ ind     ][ nb ],
                         fac_Q16 );
                 }
                 for( na = 0; na < TRANSITION_NA; na++ ) {
-                    A_Q28[ na ] = SKP_SMLAWB(
+                    A_Q28[ na ] = silk_SMLAWB(
                         silk_Transition_LP_A_Q28[ ind     ][ na ],
                         silk_Transition_LP_A_Q28[ ind + 1 ][ na ] -
                         silk_Transition_LP_A_Q28[ ind     ][ na ],
                         fac_Q16 );
                 }
             } else { /* ( fac_Q16 - ( 1 << 16 ) ) is in range of a 16-bit int */
-                SKP_assert( fac_Q16 - ( 1 << 16 ) == SKP_SAT16( fac_Q16 - ( 1 << 16 ) ) );
+                silk_assert( fac_Q16 - ( 1 << 16 ) == silk_SAT16( fac_Q16 - ( 1 << 16 ) ) );
                 /* Piece-wise linear interpolation of B and A */
                 for( nb = 0; nb < TRANSITION_NB; nb++ ) {
-                    B_Q28[ nb ] = SKP_SMLAWB(
+                    B_Q28[ nb ] = silk_SMLAWB(
                         silk_Transition_LP_B_Q28[ ind + 1 ][ nb ],
                         silk_Transition_LP_B_Q28[ ind + 1 ][ nb ] -
                         silk_Transition_LP_B_Q28[ ind     ][ nb ],
                         fac_Q16 - ( 1 << 16 ) );
                 }
                 for( na = 0; na < TRANSITION_NA; na++ ) {
-                    A_Q28[ na ] = SKP_SMLAWB(
+                    A_Q28[ na ] = silk_SMLAWB(
                         silk_Transition_LP_A_Q28[ ind + 1 ][ na ],
                         silk_Transition_LP_A_Q28[ ind + 1 ][ na ] -
                         silk_Transition_LP_A_Q28[ ind     ][ na ],
@@ -85,12 +85,12 @@
                 }
             }
         } else {
-            SKP_memcpy( B_Q28, silk_Transition_LP_B_Q28[ ind ], TRANSITION_NB * sizeof( opus_int32 ) );
-            SKP_memcpy( A_Q28, silk_Transition_LP_A_Q28[ ind ], TRANSITION_NA * sizeof( opus_int32 ) );
+            silk_memcpy( B_Q28, silk_Transition_LP_B_Q28[ ind ], TRANSITION_NB * sizeof( opus_int32 ) );
+            silk_memcpy( A_Q28, silk_Transition_LP_A_Q28[ ind ], TRANSITION_NA * sizeof( opus_int32 ) );
         }
     } else {
-        SKP_memcpy( B_Q28, silk_Transition_LP_B_Q28[ TRANSITION_INT_NUM - 1 ], TRANSITION_NB * sizeof( opus_int32 ) );
-        SKP_memcpy( A_Q28, silk_Transition_LP_A_Q28[ TRANSITION_INT_NUM - 1 ], TRANSITION_NA * sizeof( opus_int32 ) );
+        silk_memcpy( B_Q28, silk_Transition_LP_B_Q28[ TRANSITION_INT_NUM - 1 ], TRANSITION_NB * sizeof( opus_int32 ) );
+        silk_memcpy( A_Q28, silk_Transition_LP_A_Q28[ TRANSITION_INT_NUM - 1 ], TRANSITION_NA * sizeof( opus_int32 ) );
     }
 }
 
@@ -107,30 +107,30 @@
     opus_int32   B_Q28[ TRANSITION_NB ], A_Q28[ TRANSITION_NA ], fac_Q16 = 0;
     opus_int     ind = 0;
 
-    SKP_assert( psLP->transition_frame_no >= 0 && psLP->transition_frame_no <= TRANSITION_FRAMES );
+    silk_assert( psLP->transition_frame_no >= 0 && psLP->transition_frame_no <= TRANSITION_FRAMES );
 
     /* Run filter if needed */
     if( psLP->mode != 0 ) {
         /* Calculate index and interpolation factor for interpolation */
 #if( TRANSITION_INT_STEPS == 64 )
-        fac_Q16 = SKP_LSHIFT( TRANSITION_FRAMES - psLP->transition_frame_no, 16 - 6 );
+        fac_Q16 = silk_LSHIFT( TRANSITION_FRAMES - psLP->transition_frame_no, 16 - 6 );
 #else
-        fac_Q16 = SKP_DIV32_16( SKP_LSHIFT( TRANSITION_FRAMES - psLP->transition_frame_no, 16 ), TRANSITION_FRAMES );
+        fac_Q16 = silk_DIV32_16( silk_LSHIFT( TRANSITION_FRAMES - psLP->transition_frame_no, 16 ), TRANSITION_FRAMES );
 #endif
-        ind      = SKP_RSHIFT( fac_Q16, 16 );
-        fac_Q16 -= SKP_LSHIFT( ind, 16 );
+        ind      = silk_RSHIFT( fac_Q16, 16 );
+        fac_Q16 -= silk_LSHIFT( ind, 16 );
 
-        SKP_assert( ind >= 0 );
-        SKP_assert( ind < TRANSITION_INT_NUM );
+        silk_assert( ind >= 0 );
+        silk_assert( ind < TRANSITION_INT_NUM );
 
         /* Interpolate filter coefficients */
         silk_LP_interpolate_filter_taps( B_Q28, A_Q28, ind, fac_Q16 );
 
         /* Update transition frame number for next frame */
-        psLP->transition_frame_no = SKP_LIMIT( psLP->transition_frame_no + psLP->mode, 0, TRANSITION_FRAMES );
+        psLP->transition_frame_no = silk_LIMIT( psLP->transition_frame_no + psLP->mode, 0, TRANSITION_FRAMES );
 
         /* ARMA low-pass filtering */
-        SKP_assert( TRANSITION_NB == 3 && TRANSITION_NA == 2 );
+        silk_assert( TRANSITION_NB == 3 && TRANSITION_NA == 2 );
         silk_biquad_alt( frame, B_Q28, A_Q28, psLP->In_LP_State, frame, frame_length, 1);
     }
 }
diff --git a/silk/silk_MacroCount.h b/silk/silk_MacroCount.h
index 9541776..98eacfb 100644
--- a/silk/silk_MacroCount.h
+++ b/silk/silk_MacroCount.h
@@ -29,16 +29,16 @@
 #define _SIGPROCFIX_API_MACROCOUNT_H_
 #include <stdio.h>
 
-#ifdef    SKP_MACRO_COUNT
+#ifdef    silk_MACRO_COUNT
 #define varDefine opus_int64 ops_count = 0;
 
 extern opus_int64 ops_count;
 
-static inline opus_int64 SKP_SaveCount(){
+static inline opus_int64 silk_SaveCount(){
     return(ops_count);
 }
 
-static inline opus_int64 SKP_SaveResetCount(){
+static inline opus_int64 silk_SaveResetCount(){
     opus_int64 ret;
 
     ret = ops_count;
@@ -46,112 +46,112 @@
     return(ret);
 }
 
-static inline SKP_PrintCount(){
+static inline silk_PrintCount(){
     printf("ops_count = %d \n ", (opus_int32)ops_count);
 }
 
-#undef SKP_MUL
-static inline opus_int32 SKP_MUL(opus_int32 a32, opus_int32 b32){
+#undef silk_MUL
+static inline opus_int32 silk_MUL(opus_int32 a32, opus_int32 b32){
     opus_int32 ret;
     ops_count += 4;
     ret = a32 * b32;
     return ret;
 }
 
-#undef SKP_MUL_uint
-static inline opus_uint32 SKP_MUL_uint(opus_uint32 a32, opus_uint32 b32){
+#undef silk_MUL_uint
+static inline opus_uint32 silk_MUL_uint(opus_uint32 a32, opus_uint32 b32){
     opus_uint32 ret;
     ops_count += 4;
     ret = a32 * b32;
     return ret;
 }
-#undef SKP_MLA
-static inline opus_int32 SKP_MLA(opus_int32 a32, opus_int32 b32, opus_int32 c32){
+#undef silk_MLA
+static inline opus_int32 silk_MLA(opus_int32 a32, opus_int32 b32, opus_int32 c32){
     opus_int32 ret;
     ops_count += 4;
     ret = a32 + b32 * c32;
     return ret;
 }
 
-#undef SKP_MLA_uint
-static inline opus_int32 SKP_MLA_uint(opus_uint32 a32, opus_uint32 b32, opus_uint32 c32){
+#undef silk_MLA_uint
+static inline opus_int32 silk_MLA_uint(opus_uint32 a32, opus_uint32 b32, opus_uint32 c32){
     opus_uint32 ret;
     ops_count += 4;
     ret = a32 + b32 * c32;
     return ret;
 }
 
-#undef SKP_SMULWB
-static inline opus_int32 SKP_SMULWB(opus_int32 a32, opus_int32 b32){
+#undef silk_SMULWB
+static inline opus_int32 silk_SMULWB(opus_int32 a32, opus_int32 b32){
     opus_int32 ret;
     ops_count += 5;
     ret = (a32 >> 16) * (opus_int32)((opus_int16)b32) + (((a32 & 0x0000FFFF) * (opus_int32)((opus_int16)b32)) >> 16);
     return ret;
 }
-#undef    SKP_SMLAWB
-static inline opus_int32 SKP_SMLAWB(opus_int32 a32, opus_int32 b32, opus_int32 c32){
+#undef    silk_SMLAWB
+static inline opus_int32 silk_SMLAWB(opus_int32 a32, opus_int32 b32, opus_int32 c32){
     opus_int32 ret;
     ops_count += 5;
     ret = ((a32) + ((((b32) >> 16) * (opus_int32)((opus_int16)(c32))) + ((((b32) & 0x0000FFFF) * (opus_int32)((opus_int16)(c32))) >> 16)));
     return ret;
 }
 
-#undef SKP_SMULWT
-static inline opus_int32 SKP_SMULWT(opus_int32 a32, opus_int32 b32){
+#undef silk_SMULWT
+static inline opus_int32 silk_SMULWT(opus_int32 a32, opus_int32 b32){
     opus_int32 ret;
     ops_count += 4;
     ret = (a32 >> 16) * (b32 >> 16) + (((a32 & 0x0000FFFF) * (b32 >> 16)) >> 16);
     return ret;
 }
-#undef SKP_SMLAWT
-static inline opus_int32 SKP_SMLAWT(opus_int32 a32, opus_int32 b32, opus_int32 c32){
+#undef silk_SMLAWT
+static inline opus_int32 silk_SMLAWT(opus_int32 a32, opus_int32 b32, opus_int32 c32){
     opus_int32 ret;
     ops_count += 4;
     ret = a32 + ((b32 >> 16) * (c32 >> 16)) + (((b32 & 0x0000FFFF) * ((c32 >> 16)) >> 16));
     return ret;
 }
 
-#undef SKP_SMULBB
-static inline opus_int32 SKP_SMULBB(opus_int32 a32, opus_int32 b32){
+#undef silk_SMULBB
+static inline opus_int32 silk_SMULBB(opus_int32 a32, opus_int32 b32){
     opus_int32 ret;
     ops_count += 1;
     ret = (opus_int32)((opus_int16)a32) * (opus_int32)((opus_int16)b32);
     return ret;
 }
-#undef SKP_SMLABB
-static inline opus_int32 SKP_SMLABB(opus_int32 a32, opus_int32 b32, opus_int32 c32){
+#undef silk_SMLABB
+static inline opus_int32 silk_SMLABB(opus_int32 a32, opus_int32 b32, opus_int32 c32){
     opus_int32 ret;
     ops_count += 1;
     ret = a32 + (opus_int32)((opus_int16)b32) * (opus_int32)((opus_int16)c32);
     return ret;
 }
 
-#undef SKP_SMULBT
-static inline opus_int32 SKP_SMULBT(opus_int32 a32, opus_int32 b32 ){
+#undef silk_SMULBT
+static inline opus_int32 silk_SMULBT(opus_int32 a32, opus_int32 b32 ){
     opus_int32 ret;
     ops_count += 4;
     ret = ((opus_int32)((opus_int16)a32)) * (b32 >> 16);
     return ret;
 }
 
-#undef SKP_SMLABT
-static inline opus_int32 SKP_SMLABT(opus_int32 a32, opus_int32 b32, opus_int32 c32){
+#undef silk_SMLABT
+static inline opus_int32 silk_SMLABT(opus_int32 a32, opus_int32 b32, opus_int32 c32){
     opus_int32 ret;
     ops_count += 1;
     ret = a32 + ((opus_int32)((opus_int16)b32)) * (c32 >> 16);
     return ret;
 }
 
-#undef SKP_SMULTT
-static inline opus_int32 SKP_SMULTT(opus_int32 a32, opus_int32 b32){
+#undef silk_SMULTT
+static inline opus_int32 silk_SMULTT(opus_int32 a32, opus_int32 b32){
     opus_int32 ret;
     ops_count += 1;
     ret = (a32 >> 16) * (b32 >> 16);
     return ret;
 }
 
-#undef    SKP_SMLATT
-static inline opus_int32 SKP_SMLATT(opus_int32 a32, opus_int32 b32, opus_int32 c32){
+#undef    silk_SMLATT
+static inline opus_int32 silk_SMLATT(opus_int32 a32, opus_int32 b32, opus_int32 c32){
     opus_int32 ret;
     ops_count += 1;
     ret = a32 + (b32 >> 16) * (c32 >> 16);
@@ -160,41 +160,41 @@
 
 
 /* multiply-accumulate macros that allow overflow in the addition (ie, no asserts in debug mode)*/
-#undef    SKP_MLA_ovflw
-#define SKP_MLA_ovflw SKP_MLA
+#undef    silk_MLA_ovflw
+#define silk_MLA_ovflw silk_MLA
 
-#undef SKP_SMLABB_ovflw
-#define SKP_SMLABB_ovflw SKP_SMLABB
+#undef silk_SMLABB_ovflw
+#define silk_SMLABB_ovflw silk_SMLABB
 
-#undef SKP_SMLABT_ovflw
-#define SKP_SMLABT_ovflw SKP_SMLABT
+#undef silk_SMLABT_ovflw
+#define silk_SMLABT_ovflw silk_SMLABT
 
-#undef SKP_SMLATT_ovflw
-#define SKP_SMLATT_ovflw SKP_SMLATT
+#undef silk_SMLATT_ovflw
+#define silk_SMLATT_ovflw silk_SMLATT
 
-#undef SKP_SMLAWB_ovflw
-#define SKP_SMLAWB_ovflw SKP_SMLAWB
+#undef silk_SMLAWB_ovflw
+#define silk_SMLAWB_ovflw silk_SMLAWB
 
-#undef SKP_SMLAWT_ovflw
-#define SKP_SMLAWT_ovflw SKP_SMLAWT
+#undef silk_SMLAWT_ovflw
+#define silk_SMLAWT_ovflw silk_SMLAWT
 
-#undef SKP_SMULL
-static inline opus_int64 SKP_SMULL(opus_int32 a32, opus_int32 b32){
+#undef silk_SMULL
+static inline opus_int64 silk_SMULL(opus_int32 a32, opus_int32 b32){
     opus_int64 ret;
     ops_count += 8;
     ret = ((opus_int64)(a32) * /*(opus_int64)*/(b32));
     return ret;
 }
 
-#undef    SKP_SMLAL
-static inline opus_int64 SKP_SMLAL(opus_int64 a64, opus_int32 b32, opus_int32 c32){
+#undef    silk_SMLAL
+static inline opus_int64 silk_SMLAL(opus_int64 a64, opus_int32 b32, opus_int32 c32){
     opus_int64 ret;
     ops_count += 8;
     ret = a64 + ((opus_int64)(b32) * /*(opus_int64)*/(c32));
     return ret;
 }
-#undef    SKP_SMLALBB
-static inline opus_int64 SKP_SMLALBB(opus_int64 a64, opus_int16 b16, opus_int16 c16){
+#undef    silk_SMLALBB
+static inline opus_int64 silk_SMLALBB(opus_int64 a64, opus_int16 b16, opus_int16 c16){
     opus_int64 ret;
     ops_count += 4;
     ret = a64 + ((opus_int64)(b16) * /*(opus_int64)*/(c16));
@@ -251,430 +251,430 @@
     }
 }
 
-#undef SKP_DIV32
-static inline opus_int32 SKP_DIV32(opus_int32 a32, opus_int32 b32){
+#undef silk_DIV32
+static inline opus_int32 silk_DIV32(opus_int32 a32, opus_int32 b32){
     ops_count += 64;
     return a32 / b32;
 }
 
-#undef SKP_DIV32_16
-static inline opus_int32 SKP_DIV32_16(opus_int32 a32, opus_int32 b32){
+#undef silk_DIV32_16
+static inline opus_int32 silk_DIV32_16(opus_int32 a32, opus_int32 b32){
     ops_count += 32;
     return a32 / b32;
 }
 
-#undef SKP_SAT8
-static inline opus_int8 SKP_SAT8(opus_int64 a){
+#undef silk_SAT8
+static inline opus_int8 silk_SAT8(opus_int64 a){
     opus_int8 tmp;
     ops_count += 1;
-    tmp = (opus_int8)((a) > SKP_int8_MAX ? SKP_int8_MAX  : \
-                    ((a) < SKP_int8_MIN ? SKP_int8_MIN  : (a)));
+    tmp = (opus_int8)((a) > silk_int8_MAX ? silk_int8_MAX  : \
+                    ((a) < silk_int8_MIN ? silk_int8_MIN  : (a)));
     return(tmp);
 }
 
-#undef SKP_SAT16
-static inline opus_int16 SKP_SAT16(opus_int64 a){
+#undef silk_SAT16
+static inline opus_int16 silk_SAT16(opus_int64 a){
     opus_int16 tmp;
     ops_count += 1;
-    tmp = (opus_int16)((a) > SKP_int16_MAX ? SKP_int16_MAX  : \
-                     ((a) < SKP_int16_MIN ? SKP_int16_MIN  : (a)));
+    tmp = (opus_int16)((a) > silk_int16_MAX ? silk_int16_MAX  : \
+                     ((a) < silk_int16_MIN ? silk_int16_MIN  : (a)));
     return(tmp);
 }
-#undef SKP_SAT32
-static inline opus_int32 SKP_SAT32(opus_int64 a){
+#undef silk_SAT32
+static inline opus_int32 silk_SAT32(opus_int64 a){
     opus_int32 tmp;
     ops_count += 1;
-    tmp = (opus_int32)((a) > SKP_int32_MAX ? SKP_int32_MAX  : \
-                     ((a) < SKP_int32_MIN ? SKP_int32_MIN  : (a)));
+    tmp = (opus_int32)((a) > silk_int32_MAX ? silk_int32_MAX  : \
+                     ((a) < silk_int32_MIN ? silk_int32_MIN  : (a)));
     return(tmp);
 }
-#undef SKP_POS_SAT32
-static inline opus_int32 SKP_POS_SAT32(opus_int64 a){
+#undef silk_POS_SAT32
+static inline opus_int32 silk_POS_SAT32(opus_int64 a){
     opus_int32 tmp;
     ops_count += 1;
-    tmp = (opus_int32)((a) > SKP_int32_MAX ? SKP_int32_MAX : (a));
+    tmp = (opus_int32)((a) > silk_int32_MAX ? silk_int32_MAX : (a));
     return(tmp);
 }
 
-#undef SKP_ADD_POS_SAT8
-static inline opus_int8 SKP_ADD_POS_SAT8(opus_int64 a, opus_int64 b){
+#undef silk_ADD_POS_SAT8
+static inline opus_int8 silk_ADD_POS_SAT8(opus_int64 a, opus_int64 b){
     opus_int8 tmp;
     ops_count += 1;
-    tmp = (opus_int8)((((a)+(b)) & 0x80) ? SKP_int8_MAX  : ((a)+(b)));
+    tmp = (opus_int8)((((a)+(b)) & 0x80) ? silk_int8_MAX  : ((a)+(b)));
     return(tmp);
 }
-#undef SKP_ADD_POS_SAT16
-static inline opus_int16 SKP_ADD_POS_SAT16(opus_int64 a, opus_int64 b){
+#undef silk_ADD_POS_SAT16
+static inline opus_int16 silk_ADD_POS_SAT16(opus_int64 a, opus_int64 b){
     opus_int16 tmp;
     ops_count += 1;
-    tmp = (opus_int16)((((a)+(b)) & 0x8000) ? SKP_int16_MAX : ((a)+(b)));
+    tmp = (opus_int16)((((a)+(b)) & 0x8000) ? silk_int16_MAX : ((a)+(b)));
     return(tmp);
 }
 
-#undef SKP_ADD_POS_SAT32
-static inline opus_int32 SKP_ADD_POS_SAT32(opus_int64 a, opus_int64 b){
+#undef silk_ADD_POS_SAT32
+static inline opus_int32 silk_ADD_POS_SAT32(opus_int64 a, opus_int64 b){
     opus_int32 tmp;
     ops_count += 1;
-    tmp = (opus_int32)((((a)+(b)) & 0x80000000) ? SKP_int32_MAX : ((a)+(b)));
+    tmp = (opus_int32)((((a)+(b)) & 0x80000000) ? silk_int32_MAX : ((a)+(b)));
     return(tmp);
 }
 
-#undef SKP_ADD_POS_SAT64
-static inline opus_int64 SKP_ADD_POS_SAT64(opus_int64 a, opus_int64 b){
+#undef silk_ADD_POS_SAT64
+static inline opus_int64 silk_ADD_POS_SAT64(opus_int64 a, opus_int64 b){
     opus_int64 tmp;
     ops_count += 1;
-    tmp = ((((a)+(b)) & 0x8000000000000000LL) ? SKP_int64_MAX : ((a)+(b)));
+    tmp = ((((a)+(b)) & 0x8000000000000000LL) ? silk_int64_MAX : ((a)+(b)));
     return(tmp);
 }
 
-#undef    SKP_LSHIFT8
-static inline opus_int8 SKP_LSHIFT8(opus_int8 a, opus_int32 shift){
+#undef    silk_LSHIFT8
+static inline opus_int8 silk_LSHIFT8(opus_int8 a, opus_int32 shift){
     opus_int8 ret;
     ops_count += 1;
     ret = a << shift;
     return ret;
 }
-#undef    SKP_LSHIFT16
-static inline opus_int16 SKP_LSHIFT16(opus_int16 a, opus_int32 shift){
+#undef    silk_LSHIFT16
+static inline opus_int16 silk_LSHIFT16(opus_int16 a, opus_int32 shift){
     opus_int16 ret;
     ops_count += 1;
     ret = a << shift;
     return ret;
 }
-#undef    SKP_LSHIFT32
-static inline opus_int32 SKP_LSHIFT32(opus_int32 a, opus_int32 shift){
+#undef    silk_LSHIFT32
+static inline opus_int32 silk_LSHIFT32(opus_int32 a, opus_int32 shift){
     opus_int32 ret;
     ops_count += 1;
     ret = a << shift;
     return ret;
 }
-#undef    SKP_LSHIFT64
-static inline opus_int64 SKP_LSHIFT64(opus_int64 a, opus_int shift){
+#undef    silk_LSHIFT64
+static inline opus_int64 silk_LSHIFT64(opus_int64 a, opus_int shift){
     ops_count += 1;
     return a << shift;
 }
 
-#undef    SKP_LSHIFT_ovflw
-static inline opus_int32 SKP_LSHIFT_ovflw(opus_int32 a, opus_int32 shift){
+#undef    silk_LSHIFT_ovflw
+static inline opus_int32 silk_LSHIFT_ovflw(opus_int32 a, opus_int32 shift){
     ops_count += 1;
     return a << shift;
 }
 
-#undef    SKP_LSHIFT_uint
-static inline opus_uint32 SKP_LSHIFT_uint(opus_uint32 a, opus_int32 shift){
+#undef    silk_LSHIFT_uint
+static inline opus_uint32 silk_LSHIFT_uint(opus_uint32 a, opus_int32 shift){
     opus_uint32 ret;
     ops_count += 1;
     ret = a << shift;
     return ret;
 }
 
-#undef    SKP_RSHIFT8
-static inline opus_int8 SKP_RSHIFT8(opus_int8 a, opus_int32 shift){
+#undef    silk_RSHIFT8
+static inline opus_int8 silk_RSHIFT8(opus_int8 a, opus_int32 shift){
     ops_count += 1;
     return a >> shift;
 }
-#undef    SKP_RSHIFT16
-static inline opus_int16 SKP_RSHIFT16(opus_int16 a, opus_int32 shift){
+#undef    silk_RSHIFT16
+static inline opus_int16 silk_RSHIFT16(opus_int16 a, opus_int32 shift){
     ops_count += 1;
     return a >> shift;
 }
-#undef    SKP_RSHIFT32
-static inline opus_int32 SKP_RSHIFT32(opus_int32 a, opus_int32 shift){
+#undef    silk_RSHIFT32
+static inline opus_int32 silk_RSHIFT32(opus_int32 a, opus_int32 shift){
     ops_count += 1;
     return a >> shift;
 }
-#undef    SKP_RSHIFT64
-static inline opus_int64 SKP_RSHIFT64(opus_int64 a, opus_int64 shift){
+#undef    silk_RSHIFT64
+static inline opus_int64 silk_RSHIFT64(opus_int64 a, opus_int64 shift){
     ops_count += 1;
     return a >> shift;
 }
 
-#undef    SKP_RSHIFT_uint
-static inline opus_uint32 SKP_RSHIFT_uint(opus_uint32 a, opus_int32 shift){
+#undef    silk_RSHIFT_uint
+static inline opus_uint32 silk_RSHIFT_uint(opus_uint32 a, opus_int32 shift){
     ops_count += 1;
     return a >> shift;
 }
 
-#undef    SKP_ADD_LSHIFT
-static inline opus_int32 SKP_ADD_LSHIFT(opus_int32 a, opus_int32 b, opus_int32 shift){
+#undef    silk_ADD_LSHIFT
+static inline opus_int32 silk_ADD_LSHIFT(opus_int32 a, opus_int32 b, opus_int32 shift){
     opus_int32 ret;
     ops_count += 1;
     ret = a + (b << shift);
     return ret;                /* shift >= 0*/
 }
-#undef    SKP_ADD_LSHIFT32
-static inline opus_int32 SKP_ADD_LSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
+#undef    silk_ADD_LSHIFT32
+static inline opus_int32 silk_ADD_LSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
     opus_int32 ret;
     ops_count += 1;
     ret = a + (b << shift);
     return ret;                /* shift >= 0*/
 }
-#undef    SKP_ADD_LSHIFT_uint
-static inline opus_uint32 SKP_ADD_LSHIFT_uint(opus_uint32 a, opus_uint32 b, opus_int32 shift){
+#undef    silk_ADD_LSHIFT_uint
+static inline opus_uint32 silk_ADD_LSHIFT_uint(opus_uint32 a, opus_uint32 b, opus_int32 shift){
     opus_uint32 ret;
     ops_count += 1;
     ret = a + (b << shift);
     return ret;                /* shift >= 0*/
 }
-#undef    SKP_ADD_RSHIFT
-static inline opus_int32 SKP_ADD_RSHIFT(opus_int32 a, opus_int32 b, opus_int32 shift){
+#undef    silk_ADD_RSHIFT
+static inline opus_int32 silk_ADD_RSHIFT(opus_int32 a, opus_int32 b, opus_int32 shift){
     opus_int32 ret;
     ops_count += 1;
     ret = a + (b >> shift);
     return ret;                /* shift  > 0*/
 }
-#undef    SKP_ADD_RSHIFT32
-static inline opus_int32 SKP_ADD_RSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
+#undef    silk_ADD_RSHIFT32
+static inline opus_int32 silk_ADD_RSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
     opus_int32 ret;
     ops_count += 1;
     ret = a + (b >> shift);
     return ret;                /* shift  > 0*/
 }
-#undef    SKP_ADD_RSHIFT_uint
-static inline opus_uint32 SKP_ADD_RSHIFT_uint(opus_uint32 a, opus_uint32 b, opus_int32 shift){
+#undef    silk_ADD_RSHIFT_uint
+static inline opus_uint32 silk_ADD_RSHIFT_uint(opus_uint32 a, opus_uint32 b, opus_int32 shift){
     opus_uint32 ret;
     ops_count += 1;
     ret = a + (b >> shift);
     return ret;                /* shift  > 0*/
 }
-#undef    SKP_SUB_LSHIFT32
-static inline opus_int32 SKP_SUB_LSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
+#undef    silk_SUB_LSHIFT32
+static inline opus_int32 silk_SUB_LSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
     opus_int32 ret;
     ops_count += 1;
     ret = a - (b << shift);
     return ret;                /* shift >= 0*/
 }
-#undef    SKP_SUB_RSHIFT32
-static inline opus_int32 SKP_SUB_RSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
+#undef    silk_SUB_RSHIFT32
+static inline opus_int32 silk_SUB_RSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
     opus_int32 ret;
     ops_count += 1;
     ret = a - (b >> shift);
     return ret;                /* shift  > 0*/
 }
 
-#undef    SKP_RSHIFT_ROUND
-static inline opus_int32 SKP_RSHIFT_ROUND(opus_int32 a, opus_int32 shift){
+#undef    silk_RSHIFT_ROUND
+static inline opus_int32 silk_RSHIFT_ROUND(opus_int32 a, opus_int32 shift){
     opus_int32 ret;
     ops_count += 3;
     ret = shift == 1 ? (a >> 1) + (a & 1) : ((a >> (shift - 1)) + 1) >> 1;
     return ret;
 }
 
-#undef    SKP_RSHIFT_ROUND64
-static inline opus_int64 SKP_RSHIFT_ROUND64(opus_int64 a, opus_int32 shift){
+#undef    silk_RSHIFT_ROUND64
+static inline opus_int64 silk_RSHIFT_ROUND64(opus_int64 a, opus_int32 shift){
     opus_int64 ret;
     ops_count += 6;
     ret = shift == 1 ? (a >> 1) + (a & 1) : ((a >> (shift - 1)) + 1) >> 1;
     return ret;
 }
 
-#undef    SKP_abs_int64
-static inline opus_int64 SKP_abs_int64(opus_int64 a){
+#undef    silk_abs_int64
+static inline opus_int64 silk_abs_int64(opus_int64 a){
     ops_count += 1;
-    return (((a) >  0)  ? (a) : -(a));            /* Be careful, SKP_abs returns wrong when input equals to SKP_intXX_MIN*/
+    return (((a) >  0)  ? (a) : -(a));            /* Be careful, silk_abs returns wrong when input equals to silk_intXX_MIN*/
 }
 
-#undef    SKP_abs_int32
-static inline opus_int32 SKP_abs_int32(opus_int32 a){
+#undef    silk_abs_int32
+static inline opus_int32 silk_abs_int32(opus_int32 a){
     ops_count += 1;
     return abs(a);
 }
 
 
-#undef SKP_min
-static SKP_min(a, b){
+#undef silk_min
+static silk_min(a, b){
     ops_count += 1;
     return (((a) < (b)) ? (a) :  (b));
 }
-#undef SKP_max
-static SKP_max(a, b){
+#undef silk_max
+static silk_max(a, b){
     ops_count += 1;
     return (((a) > (b)) ? (a) :  (b));
 }
-#undef SKP_sign
-static SKP_sign(a){
+#undef silk_sign
+static silk_sign(a){
     ops_count += 1;
     return ((a) > 0 ? 1 : ( (a) < 0 ? -1 : 0 ));
 }
 
-#undef    SKP_ADD16
-static inline opus_int16 SKP_ADD16(opus_int16 a, opus_int16 b){
+#undef    silk_ADD16
+static inline opus_int16 silk_ADD16(opus_int16 a, opus_int16 b){
     opus_int16 ret;
     ops_count += 1;
     ret = a + b;
     return ret;
 }
 
-#undef    SKP_ADD32
-static inline opus_int32 SKP_ADD32(opus_int32 a, opus_int32 b){
+#undef    silk_ADD32
+static inline opus_int32 silk_ADD32(opus_int32 a, opus_int32 b){
     opus_int32 ret;
     ops_count += 1;
     ret = a + b;
     return ret;
 }
 
-#undef    SKP_ADD64
-static inline opus_int64 SKP_ADD64(opus_int64 a, opus_int64 b){
+#undef    silk_ADD64
+static inline opus_int64 silk_ADD64(opus_int64 a, opus_int64 b){
     opus_int64 ret;
     ops_count += 2;
     ret = a + b;
     return ret;
 }
 
-#undef    SKP_SUB16
-static inline opus_int16 SKP_SUB16(opus_int16 a, opus_int16 b){
+#undef    silk_SUB16
+static inline opus_int16 silk_SUB16(opus_int16 a, opus_int16 b){
     opus_int16 ret;
     ops_count += 1;
     ret = a - b;
     return ret;
 }
 
-#undef    SKP_SUB32
-static inline opus_int32 SKP_SUB32(opus_int32 a, opus_int32 b){
+#undef    silk_SUB32
+static inline opus_int32 silk_SUB32(opus_int32 a, opus_int32 b){
     opus_int32 ret;
     ops_count += 1;
     ret = a - b;
     return ret;
 }
 
-#undef    SKP_SUB64
-static inline opus_int64 SKP_SUB64(opus_int64 a, opus_int64 b){
+#undef    silk_SUB64
+static inline opus_int64 silk_SUB64(opus_int64 a, opus_int64 b){
     opus_int64 ret;
     ops_count += 2;
     ret = a - b;
     return ret;
 }
 
-#undef SKP_ADD_SAT16
-static inline opus_int16 SKP_ADD_SAT16( opus_int16 a16, opus_int16 b16 ) {
+#undef silk_ADD_SAT16
+static inline opus_int16 silk_ADD_SAT16( opus_int16 a16, opus_int16 b16 ) {
     opus_int16 res;
-    /* Nb will be counted in AKP_add32 and SKP_SAT16*/
-    res = (opus_int16)SKP_SAT16( SKP_ADD32( (opus_int32)(a16), (b16) ) );
+    /* Nb will be counted in AKP_add32 and silk_SAT16*/
+    res = (opus_int16)silk_SAT16( silk_ADD32( (opus_int32)(a16), (b16) ) );
     return res;
 }
 
-#undef SKP_ADD_SAT32
-static inline opus_int32 SKP_ADD_SAT32(opus_int32 a32, opus_int32 b32){
+#undef silk_ADD_SAT32
+static inline opus_int32 silk_ADD_SAT32(opus_int32 a32, opus_int32 b32){
     opus_int32 res;
     ops_count += 1;
     res =    ((((a32) + (b32)) & 0x80000000) == 0 ?                                    \
-            ((((a32) & (b32)) & 0x80000000) != 0 ? SKP_int32_MIN : (a32)+(b32)) :    \
-            ((((a32) | (b32)) & 0x80000000) == 0 ? SKP_int32_MAX : (a32)+(b32)) );
+            ((((a32) & (b32)) & 0x80000000) != 0 ? silk_int32_MIN : (a32)+(b32)) :    \
+            ((((a32) | (b32)) & 0x80000000) == 0 ? silk_int32_MAX : (a32)+(b32)) );
     return res;
 }
 
-#undef SKP_ADD_SAT64
-static inline opus_int64 SKP_ADD_SAT64( opus_int64 a64, opus_int64 b64 ) {
+#undef silk_ADD_SAT64
+static inline opus_int64 silk_ADD_SAT64( opus_int64 a64, opus_int64 b64 ) {
     opus_int64 res;
     ops_count += 1;
     res =    ((((a64) + (b64)) & 0x8000000000000000LL) == 0 ?                                \
-            ((((a64) & (b64)) & 0x8000000000000000LL) != 0 ? SKP_int64_MIN : (a64)+(b64)) :    \
-            ((((a64) | (b64)) & 0x8000000000000000LL) == 0 ? SKP_int64_MAX : (a64)+(b64)) );
+            ((((a64) & (b64)) & 0x8000000000000000LL) != 0 ? silk_int64_MIN : (a64)+(b64)) :    \
+            ((((a64) | (b64)) & 0x8000000000000000LL) == 0 ? silk_int64_MAX : (a64)+(b64)) );
     return res;
 }
 
-#undef SKP_SUB_SAT16
-static inline opus_int16 SKP_SUB_SAT16( opus_int16 a16, opus_int16 b16 ) {
+#undef silk_SUB_SAT16
+static inline opus_int16 silk_SUB_SAT16( opus_int16 a16, opus_int16 b16 ) {
     opus_int16 res;
-    SKP_assert(0);
+    silk_assert(0);
     /* Nb will be counted in sub-macros*/
-    res = (opus_int16)SKP_SAT16( SKP_SUB32( (opus_int32)(a16), (b16) ) );
+    res = (opus_int16)silk_SAT16( silk_SUB32( (opus_int32)(a16), (b16) ) );
     return res;
 }
 
-#undef SKP_SUB_SAT32
-static inline opus_int32 SKP_SUB_SAT32( opus_int32 a32, opus_int32 b32 ) {
+#undef silk_SUB_SAT32
+static inline opus_int32 silk_SUB_SAT32( opus_int32 a32, opus_int32 b32 ) {
     opus_int32 res;
     ops_count += 1;
     res =     ((((a32)-(b32)) & 0x80000000) == 0 ?                                            \
-            (( (a32) & ((b32)^0x80000000) & 0x80000000) ? SKP_int32_MIN : (a32)-(b32)) :    \
-            ((((a32)^0x80000000) & (b32)  & 0x80000000) ? SKP_int32_MAX : (a32)-(b32)) );
+            (( (a32) & ((b32)^0x80000000) & 0x80000000) ? silk_int32_MIN : (a32)-(b32)) :    \
+            ((((a32)^0x80000000) & (b32)  & 0x80000000) ? silk_int32_MAX : (a32)-(b32)) );
     return res;
 }
 
-#undef SKP_SUB_SAT64
-static inline opus_int64 SKP_SUB_SAT64( opus_int64 a64, opus_int64 b64 ) {
+#undef silk_SUB_SAT64
+static inline opus_int64 silk_SUB_SAT64( opus_int64 a64, opus_int64 b64 ) {
     opus_int64 res;
     ops_count += 1;
     res =    ((((a64)-(b64)) & 0x8000000000000000LL) == 0 ?                                                        \
-            (( (a64) & ((b64)^0x8000000000000000LL) & 0x8000000000000000LL) ? SKP_int64_MIN : (a64)-(b64)) :    \
-            ((((a64)^0x8000000000000000LL) & (b64)  & 0x8000000000000000LL) ? SKP_int64_MAX : (a64)-(b64)) );
+            (( (a64) & ((b64)^0x8000000000000000LL) & 0x8000000000000000LL) ? silk_int64_MIN : (a64)-(b64)) :    \
+            ((((a64)^0x8000000000000000LL) & (b64)  & 0x8000000000000000LL) ? silk_int64_MAX : (a64)-(b64)) );
 
     return res;
 }
 
-#undef    SKP_SMULWW
-static inline opus_int32 SKP_SMULWW(opus_int32 a32, opus_int32 b32){
+#undef    silk_SMULWW
+static inline opus_int32 silk_SMULWW(opus_int32 a32, opus_int32 b32){
     opus_int32 ret;
     /* Nb will be counted in sub-macros*/
-    ret = SKP_MLA(SKP_SMULWB((a32), (b32)), (a32), SKP_RSHIFT_ROUND((b32), 16));
+    ret = silk_MLA(silk_SMULWB((a32), (b32)), (a32), silk_RSHIFT_ROUND((b32), 16));
     return ret;
 }
 
-#undef    SKP_SMLAWW
-static inline opus_int32 SKP_SMLAWW(opus_int32 a32, opus_int32 b32, opus_int32 c32){
+#undef    silk_SMLAWW
+static inline opus_int32 silk_SMLAWW(opus_int32 a32, opus_int32 b32, opus_int32 c32){
     opus_int32 ret;
     /* Nb will be counted in sub-macros*/
-    ret = SKP_MLA(SKP_SMLAWB((a32), (b32), (c32)), (b32), SKP_RSHIFT_ROUND((c32), 16));
+    ret = silk_MLA(silk_SMLAWB((a32), (b32), (c32)), (b32), silk_RSHIFT_ROUND((c32), 16));
     return ret;
 }
 
-#undef    SKP_min_int
-static inline opus_int SKP_min_int(opus_int a, opus_int b)
+#undef    silk_min_int
+static inline opus_int silk_min_int(opus_int a, opus_int b)
 {
     ops_count += 1;
     return (((a) < (b)) ? (a) : (b));
 }
 
-#undef    SKP_min_16
-static inline opus_int16 SKP_min_16(opus_int16 a, opus_int16 b)
+#undef    silk_min_16
+static inline opus_int16 silk_min_16(opus_int16 a, opus_int16 b)
 {
     ops_count += 1;
     return (((a) < (b)) ? (a) : (b));
 }
-#undef    SKP_min_32
-static inline opus_int32 SKP_min_32(opus_int32 a, opus_int32 b)
+#undef    silk_min_32
+static inline opus_int32 silk_min_32(opus_int32 a, opus_int32 b)
 {
     ops_count += 1;
     return (((a) < (b)) ? (a) : (b));
 }
-#undef    SKP_min_64
-static inline opus_int64 SKP_min_64(opus_int64 a, opus_int64 b)
+#undef    silk_min_64
+static inline opus_int64 silk_min_64(opus_int64 a, opus_int64 b)
 {
     ops_count += 1;
     return (((a) < (b)) ? (a) : (b));
 }
 
-/* SKP_min() versions with typecast in the function call */
-#undef    SKP_max_int
-static inline opus_int SKP_max_int(opus_int a, opus_int b)
+/* silk_min() versions with typecast in the function call */
+#undef    silk_max_int
+static inline opus_int silk_max_int(opus_int a, opus_int b)
 {
     ops_count += 1;
     return (((a) > (b)) ? (a) : (b));
 }
-#undef    SKP_max_16
-static inline opus_int16 SKP_max_16(opus_int16 a, opus_int16 b)
+#undef    silk_max_16
+static inline opus_int16 silk_max_16(opus_int16 a, opus_int16 b)
 {
     ops_count += 1;
     return (((a) > (b)) ? (a) : (b));
 }
-#undef    SKP_max_32
-static inline opus_int32 SKP_max_32(opus_int32 a, opus_int32 b)
+#undef    silk_max_32
+static inline opus_int32 silk_max_32(opus_int32 a, opus_int32 b)
 {
     ops_count += 1;
     return (((a) > (b)) ? (a) : (b));
 }
 
-#undef    SKP_max_64
-static inline opus_int64 SKP_max_64(opus_int64 a, opus_int64 b)
+#undef    silk_max_64
+static inline opus_int64 silk_max_64(opus_int64 a, opus_int64 b)
 {
     ops_count += 1;
     return (((a) > (b)) ? (a) : (b));
 }
 
 
-#undef SKP_LIMIT_int
-static inline opus_int SKP_LIMIT_int(opus_int a, opus_int limit1, opus_int limit2)
+#undef silk_LIMIT_int
+static inline opus_int silk_LIMIT_int(opus_int a, opus_int limit1, opus_int limit2)
 {
     opus_int ret;
     ops_count += 6;
@@ -685,8 +685,8 @@
     return(ret);
 }
 
-#undef SKP_LIMIT_16
-static inline opus_int16 SKP_LIMIT_16(opus_int16 a, opus_int16 limit1, opus_int16 limit2)
+#undef silk_LIMIT_16
+static inline opus_int16 silk_LIMIT_16(opus_int16 a, opus_int16 limit1, opus_int16 limit2)
 {
     opus_int16 ret;
     ops_count += 6;
@@ -698,8 +698,8 @@
 }
 
 
-#undef SKP_LIMIT_32
-static inline opus_int SKP_LIMIT_32(opus_int32 a, opus_int32 limit1, opus_int32 limit2)
+#undef silk_LIMIT_32
+static inline opus_int silk_LIMIT_32(opus_int32 a, opus_int32 limit1, opus_int32 limit2)
 {
     opus_int32 ret;
     ops_count += 6;
@@ -712,7 +712,7 @@
 #else
 #define exVarDefine
 #define varDefine
-#define SKP_SaveCount()
+#define silk_SaveCount()
 
 #endif
 #endif
diff --git a/silk/silk_MacroDebug.h b/silk/silk_MacroDebug.h
index ebf1e8f..f91da06 100644
--- a/silk/silk_MacroDebug.h
+++ b/silk/silk_MacroDebug.h
@@ -31,521 +31,521 @@
 /* Redefine macro functions with extensive assertion in Win32_DEBUG mode.
    As function can't be undefined, this file can't work with SigProcFIX_MacroCount.h */
 
-#if 0 && defined (_WIN32) && defined (_DEBUG) && !defined (SKP_MACRO_COUNT)
+#if 0 && defined (_WIN32) && defined (_DEBUG) && !defined (silk_MACRO_COUNT)
 
-#undef    SKP_ADD16
-static inline opus_int16 SKP_ADD16(opus_int16 a, opus_int16 b){
+#undef    silk_ADD16
+static inline opus_int16 silk_ADD16(opus_int16 a, opus_int16 b){
     opus_int16 ret;
 
     ret = a + b;
-    SKP_assert( ret == SKP_ADD_SAT16( a, b ));
+    silk_assert( ret == silk_ADD_SAT16( a, b ));
     return ret;
 }
 
-#undef    SKP_ADD32
-static inline opus_int32 SKP_ADD32(opus_int32 a, opus_int32 b){
+#undef    silk_ADD32
+static inline opus_int32 silk_ADD32(opus_int32 a, opus_int32 b){
     opus_int32 ret;
 
     ret = a + b;
-    SKP_assert( ret == SKP_ADD_SAT32( a, b ));
+    silk_assert( ret == silk_ADD_SAT32( a, b ));
     return ret;
 }
 
-#undef    SKP_ADD64
-static inline opus_int64 SKP_ADD64(opus_int64 a, opus_int64 b){
+#undef    silk_ADD64
+static inline opus_int64 silk_ADD64(opus_int64 a, opus_int64 b){
     opus_int64 ret;
 
     ret = a + b;
-    SKP_assert( ret == SKP_ADD_SAT64( a, b ));
+    silk_assert( ret == silk_ADD_SAT64( a, b ));
     return ret;
 }
 
-#undef    SKP_SUB16
-static inline opus_int16 SKP_SUB16(opus_int16 a, opus_int16 b){
+#undef    silk_SUB16
+static inline opus_int16 silk_SUB16(opus_int16 a, opus_int16 b){
     opus_int16 ret;
 
     ret = a - b;
-    SKP_assert( ret == SKP_SUB_SAT16( a, b ));
+    silk_assert( ret == silk_SUB_SAT16( a, b ));
     return ret;
 }
 
-#undef    SKP_SUB32
-static inline opus_int32 SKP_SUB32(opus_int32 a, opus_int32 b){
+#undef    silk_SUB32
+static inline opus_int32 silk_SUB32(opus_int32 a, opus_int32 b){
     opus_int32 ret;
 
     ret = a - b;
-    SKP_assert( ret == SKP_SUB_SAT32( a, b ));
+    silk_assert( ret == silk_SUB_SAT32( a, b ));
     return ret;
 }
 
-#undef    SKP_SUB64
-static inline opus_int64 SKP_SUB64(opus_int64 a, opus_int64 b){
+#undef    silk_SUB64
+static inline opus_int64 silk_SUB64(opus_int64 a, opus_int64 b){
     opus_int64 ret;
 
     ret = a - b;
-    SKP_assert( ret == SKP_SUB_SAT64( a, b ));
+    silk_assert( ret == silk_SUB_SAT64( a, b ));
     return ret;
 }
 
-#undef SKP_ADD_SAT16
-static inline opus_int16 SKP_ADD_SAT16( opus_int16 a16, opus_int16 b16 ) {
+#undef silk_ADD_SAT16
+static inline opus_int16 silk_ADD_SAT16( opus_int16 a16, opus_int16 b16 ) {
     opus_int16 res;
-    res = (opus_int16)SKP_SAT16( SKP_ADD32( (opus_int32)(a16), (b16) ) );
-    SKP_assert( res == SKP_SAT16( ( opus_int32 )a16 + ( opus_int32 )b16 ) );
+    res = (opus_int16)silk_SAT16( silk_ADD32( (opus_int32)(a16), (b16) ) );
+    silk_assert( res == silk_SAT16( ( opus_int32 )a16 + ( opus_int32 )b16 ) );
     return res;
 }
 
-#undef SKP_ADD_SAT32
-static inline opus_int32 SKP_ADD_SAT32(opus_int32 a32, opus_int32 b32){
+#undef silk_ADD_SAT32
+static inline opus_int32 silk_ADD_SAT32(opus_int32 a32, opus_int32 b32){
     opus_int32 res;
     res =    ((((a32) + (b32)) & 0x80000000) == 0 ?                                    \
-            ((((a32) & (b32)) & 0x80000000) != 0 ? SKP_int32_MIN : (a32)+(b32)) :    \
-            ((((a32) | (b32)) & 0x80000000) == 0 ? SKP_int32_MAX : (a32)+(b32)) );
-    SKP_assert( res == SKP_SAT32( ( opus_int64 )a32 + ( opus_int64 )b32 ) );
+            ((((a32) & (b32)) & 0x80000000) != 0 ? silk_int32_MIN : (a32)+(b32)) :    \
+            ((((a32) | (b32)) & 0x80000000) == 0 ? silk_int32_MAX : (a32)+(b32)) );
+    silk_assert( res == silk_SAT32( ( opus_int64 )a32 + ( opus_int64 )b32 ) );
     return res;
 }
 
-#undef SKP_ADD_SAT64
-static inline opus_int64 SKP_ADD_SAT64( opus_int64 a64, opus_int64 b64 ) {
+#undef silk_ADD_SAT64
+static inline opus_int64 silk_ADD_SAT64( opus_int64 a64, opus_int64 b64 ) {
     opus_int64 res;
     res =    ((((a64) + (b64)) & 0x8000000000000000LL) == 0 ?                                \
-            ((((a64) & (b64)) & 0x8000000000000000LL) != 0 ? SKP_int64_MIN : (a64)+(b64)) :    \
-            ((((a64) | (b64)) & 0x8000000000000000LL) == 0 ? SKP_int64_MAX : (a64)+(b64)) );
+            ((((a64) & (b64)) & 0x8000000000000000LL) != 0 ? silk_int64_MIN : (a64)+(b64)) :    \
+            ((((a64) | (b64)) & 0x8000000000000000LL) == 0 ? silk_int64_MAX : (a64)+(b64)) );
     if( res != a64 + b64 ) {
         /* Check that we saturated to the correct extreme value */
-        SKP_assert( ( res == SKP_int64_MAX && ( ( a64 >> 1 ) + ( b64 >> 1 ) > ( SKP_int64_MAX >> 3 ) ) ) ||
-                    ( res == SKP_int64_MIN && ( ( a64 >> 1 ) + ( b64 >> 1 ) < ( SKP_int64_MIN >> 3 ) ) ) );
+        silk_assert( ( res == silk_int64_MAX && ( ( a64 >> 1 ) + ( b64 >> 1 ) > ( silk_int64_MAX >> 3 ) ) ) ||
+                    ( res == silk_int64_MIN && ( ( a64 >> 1 ) + ( b64 >> 1 ) < ( silk_int64_MIN >> 3 ) ) ) );
     } else {
         /* Saturation not necessary */
-        SKP_assert( res == a64 + b64 );
+        silk_assert( res == a64 + b64 );
     }
     return res;
 }
 
-#undef SKP_SUB_SAT16
-static inline opus_int16 SKP_SUB_SAT16( opus_int16 a16, opus_int16 b16 ) {
+#undef silk_SUB_SAT16
+static inline opus_int16 silk_SUB_SAT16( opus_int16 a16, opus_int16 b16 ) {
     opus_int16 res;
-    res = (opus_int16)SKP_SAT16( SKP_SUB32( (opus_int32)(a16), (b16) ) );
-    SKP_assert( res == SKP_SAT16( ( opus_int32 )a16 - ( opus_int32 )b16 ) );
+    res = (opus_int16)silk_SAT16( silk_SUB32( (opus_int32)(a16), (b16) ) );
+    silk_assert( res == silk_SAT16( ( opus_int32 )a16 - ( opus_int32 )b16 ) );
     return res;
 }
 
-#undef SKP_SUB_SAT32
-static inline opus_int32 SKP_SUB_SAT32( opus_int32 a32, opus_int32 b32 ) {
+#undef silk_SUB_SAT32
+static inline opus_int32 silk_SUB_SAT32( opus_int32 a32, opus_int32 b32 ) {
     opus_int32 res;
     res =     ((((a32)-(b32)) & 0x80000000) == 0 ?                                            \
-            (( (a32) & ((b32)^0x80000000) & 0x80000000) ? SKP_int32_MIN : (a32)-(b32)) :    \
-            ((((a32)^0x80000000) & (b32)  & 0x80000000) ? SKP_int32_MAX : (a32)-(b32)) );
-    SKP_assert( res == SKP_SAT32( ( opus_int64 )a32 - ( opus_int64 )b32 ) );
+            (( (a32) & ((b32)^0x80000000) & 0x80000000) ? silk_int32_MIN : (a32)-(b32)) :    \
+            ((((a32)^0x80000000) & (b32)  & 0x80000000) ? silk_int32_MAX : (a32)-(b32)) );
+    silk_assert( res == silk_SAT32( ( opus_int64 )a32 - ( opus_int64 )b32 ) );
     return res;
 }
 
-#undef SKP_SUB_SAT64
-static inline opus_int64 SKP_SUB_SAT64( opus_int64 a64, opus_int64 b64 ) {
+#undef silk_SUB_SAT64
+static inline opus_int64 silk_SUB_SAT64( opus_int64 a64, opus_int64 b64 ) {
     opus_int64 res;
     res =    ((((a64)-(b64)) & 0x8000000000000000LL) == 0 ?                                                        \
-            (( (a64) & ((b64)^0x8000000000000000LL) & 0x8000000000000000LL) ? SKP_int64_MIN : (a64)-(b64)) :    \
-            ((((a64)^0x8000000000000000LL) & (b64)  & 0x8000000000000000LL) ? SKP_int64_MAX : (a64)-(b64)) );
+            (( (a64) & ((b64)^0x8000000000000000LL) & 0x8000000000000000LL) ? silk_int64_MIN : (a64)-(b64)) :    \
+            ((((a64)^0x8000000000000000LL) & (b64)  & 0x8000000000000000LL) ? silk_int64_MAX : (a64)-(b64)) );
 
     if( res != a64 - b64 ) {
         /* Check that we saturated to the correct extreme value */
-        SKP_assert( ( res == SKP_int64_MAX && ( ( a64 >> 1 ) + ( b64 >> 1 ) > ( SKP_int64_MAX >> 3 ) ) ) ||
-                    ( res == SKP_int64_MIN && ( ( a64 >> 1 ) + ( b64 >> 1 ) < ( SKP_int64_MIN >> 3 ) ) ) );
+        silk_assert( ( res == silk_int64_MAX && ( ( a64 >> 1 ) + ( b64 >> 1 ) > ( silk_int64_MAX >> 3 ) ) ) ||
+                    ( res == silk_int64_MIN && ( ( a64 >> 1 ) + ( b64 >> 1 ) < ( silk_int64_MIN >> 3 ) ) ) );
     } else {
         /* Saturation not necessary */
-        SKP_assert( res == a64 - b64 );
+        silk_assert( res == a64 - b64 );
     }
     return res;
 }
 
-#undef SKP_MUL
-static inline opus_int32 SKP_MUL(opus_int32 a32, opus_int32 b32){
+#undef silk_MUL
+static inline opus_int32 silk_MUL(opus_int32 a32, opus_int32 b32){
     opus_int32 ret;
     opus_int64 ret64; /* Will easily show how many bits that are needed */
     ret = a32 * b32;
     ret64 = (opus_int64)a32 * (opus_int64)b32;
-    SKP_assert((opus_int64)ret == ret64 );        /* Check output overflow */
+    silk_assert((opus_int64)ret == ret64 );        /* Check output overflow */
     return ret;
 }
 
-#undef SKP_MUL_uint
-static inline opus_uint32 SKP_MUL_uint(opus_uint32 a32, opus_uint32 b32){
+#undef silk_MUL_uint
+static inline opus_uint32 silk_MUL_uint(opus_uint32 a32, opus_uint32 b32){
     opus_uint32 ret;
     ret = a32 * b32;
-    SKP_assert((opus_uint64)ret == (opus_uint64)a32 * (opus_uint64)b32);        /* Check output overflow */
+    silk_assert((opus_uint64)ret == (opus_uint64)a32 * (opus_uint64)b32);        /* Check output overflow */
     return ret;
 }
-#undef SKP_MLA
-static inline opus_int32 SKP_MLA(opus_int32 a32, opus_int32 b32, opus_int32 c32){
+#undef silk_MLA
+static inline opus_int32 silk_MLA(opus_int32 a32, opus_int32 b32, opus_int32 c32){
     opus_int32 ret;
     ret = a32 + b32 * c32;
-    SKP_assert((opus_int64)ret == (opus_int64)a32 + (opus_int64)b32 * (opus_int64)c32);    /* Check output overflow */
+    silk_assert((opus_int64)ret == (opus_int64)a32 + (opus_int64)b32 * (opus_int64)c32);    /* Check output overflow */
     return ret;
 }
 
-#undef SKP_MLA_uint
-static inline opus_int32 SKP_MLA_uint(opus_uint32 a32, opus_uint32 b32, opus_uint32 c32){
+#undef silk_MLA_uint
+static inline opus_int32 silk_MLA_uint(opus_uint32 a32, opus_uint32 b32, opus_uint32 c32){
     opus_uint32 ret;
     ret = a32 + b32 * c32;
-    SKP_assert((opus_int64)ret == (opus_int64)a32 + (opus_int64)b32 * (opus_int64)c32);    /* Check output overflow */
+    silk_assert((opus_int64)ret == (opus_int64)a32 + (opus_int64)b32 * (opus_int64)c32);    /* Check output overflow */
     return ret;
 }
 
-#undef    SKP_SMULWB
-static inline opus_int32 SKP_SMULWB(opus_int32 a32, opus_int32 b32){
+#undef    silk_SMULWB
+static inline opus_int32 silk_SMULWB(opus_int32 a32, opus_int32 b32){
     opus_int32 ret;
     ret = (a32 >> 16) * (opus_int32)((opus_int16)b32) + (((a32 & 0x0000FFFF) * (opus_int32)((opus_int16)b32)) >> 16);
-    SKP_assert((opus_int64)ret == ((opus_int64)a32 * (opus_int16)b32) >> 16);
+    silk_assert((opus_int64)ret == ((opus_int64)a32 * (opus_int16)b32) >> 16);
     return ret;
 }
-#undef    SKP_SMLAWB
-static inline opus_int32 SKP_SMLAWB(opus_int32 a32, opus_int32 b32, opus_int32 c32){
+#undef    silk_SMLAWB
+static inline opus_int32 silk_SMLAWB(opus_int32 a32, opus_int32 b32, opus_int32 c32){
     opus_int32 ret;
-    ret = SKP_ADD32( a32, SKP_SMULWB( b32, c32 ) );
-    SKP_assert(SKP_ADD32( a32, SKP_SMULWB( b32, c32 ) ) == SKP_ADD_SAT32( a32, SKP_SMULWB( b32, c32 ) ));
+    ret = silk_ADD32( a32, silk_SMULWB( b32, c32 ) );
+    silk_assert(silk_ADD32( a32, silk_SMULWB( b32, c32 ) ) == silk_ADD_SAT32( a32, silk_SMULWB( b32, c32 ) ));
     return ret;
 }
 
-#undef SKP_SMULWT
-static inline opus_int32 SKP_SMULWT(opus_int32 a32, opus_int32 b32){
+#undef silk_SMULWT
+static inline opus_int32 silk_SMULWT(opus_int32 a32, opus_int32 b32){
     opus_int32 ret;
     ret = (a32 >> 16) * (b32 >> 16) + (((a32 & 0x0000FFFF) * (b32 >> 16)) >> 16);
-    SKP_assert((opus_int64)ret == ((opus_int64)a32 * (b32 >> 16)) >> 16);
+    silk_assert((opus_int64)ret == ((opus_int64)a32 * (b32 >> 16)) >> 16);
     return ret;
 }
-#undef SKP_SMLAWT
-static inline opus_int32 SKP_SMLAWT(opus_int32 a32, opus_int32 b32, opus_int32 c32){
+#undef silk_SMLAWT
+static inline opus_int32 silk_SMLAWT(opus_int32 a32, opus_int32 b32, opus_int32 c32){
     opus_int32 ret;
     ret = a32 + ((b32 >> 16) * (c32 >> 16)) + (((b32 & 0x0000FFFF) * ((c32 >> 16)) >> 16));
-    SKP_assert((opus_int64)ret == (opus_int64)a32 + (((opus_int64)b32 * (c32 >> 16)) >> 16));
+    silk_assert((opus_int64)ret == (opus_int64)a32 + (((opus_int64)b32 * (c32 >> 16)) >> 16));
     return ret;
 }
 
-#undef SKP_SMULL
-static inline opus_int64 SKP_SMULL(opus_int64 a64, opus_int64 b64){
+#undef silk_SMULL
+static inline opus_int64 silk_SMULL(opus_int64 a64, opus_int64 b64){
     opus_int64 ret64;
     ret64 = a64 * b64;
     if( b64 != 0 ) {
-        SKP_assert( a64 == (ret64 / b64) );
+        silk_assert( a64 == (ret64 / b64) );
     } else if( a64 != 0 ) {
-        SKP_assert( b64 == (ret64 / a64) );
+        silk_assert( b64 == (ret64 / a64) );
     }
     return ret64;
 }
 
-/* no checking needed for SKP_SMULBB */
-#undef    SKP_SMLABB
-static inline opus_int32 SKP_SMLABB(opus_int32 a32, opus_int32 b32, opus_int32 c32){
+/* no checking needed for silk_SMULBB */
+#undef    silk_SMLABB
+static inline opus_int32 silk_SMLABB(opus_int32 a32, opus_int32 b32, opus_int32 c32){
     opus_int32 ret;
     ret = a32 + (opus_int32)((opus_int16)b32) * (opus_int32)((opus_int16)c32);
-    SKP_assert((opus_int64)ret == (opus_int64)a32 + (opus_int64)b32 * (opus_int16)c32);
+    silk_assert((opus_int64)ret == (opus_int64)a32 + (opus_int64)b32 * (opus_int16)c32);
     return ret;
 }
 
-/* no checking needed for SKP_SMULBT */
-#undef    SKP_SMLABT
-static inline opus_int32 SKP_SMLABT(opus_int32 a32, opus_int32 b32, opus_int32 c32){
+/* no checking needed for silk_SMULBT */
+#undef    silk_SMLABT
+static inline opus_int32 silk_SMLABT(opus_int32 a32, opus_int32 b32, opus_int32 c32){
     opus_int32 ret;
     ret = a32 + ((opus_int32)((opus_int16)b32)) * (c32 >> 16);
-    SKP_assert((opus_int64)ret == (opus_int64)a32 + (opus_int64)b32 * (c32 >> 16));
+    silk_assert((opus_int64)ret == (opus_int64)a32 + (opus_int64)b32 * (c32 >> 16));
     return ret;
 }
 
-/* no checking needed for SKP_SMULTT */
-#undef    SKP_SMLATT
-static inline opus_int32 SKP_SMLATT(opus_int32 a32, opus_int32 b32, opus_int32 c32){
+/* no checking needed for silk_SMULTT */
+#undef    silk_SMLATT
+static inline opus_int32 silk_SMLATT(opus_int32 a32, opus_int32 b32, opus_int32 c32){
     opus_int32 ret;
     ret = a32 + (b32 >> 16) * (c32 >> 16);
-    SKP_assert((opus_int64)ret == (opus_int64)a32 + (b32 >> 16) * (c32 >> 16));
+    silk_assert((opus_int64)ret == (opus_int64)a32 + (b32 >> 16) * (c32 >> 16));
     return ret;
 }
 
-#undef    SKP_SMULWW
-static inline opus_int32 SKP_SMULWW(opus_int32 a32, opus_int32 b32){
+#undef    silk_SMULWW
+static inline opus_int32 silk_SMULWW(opus_int32 a32, opus_int32 b32){
     opus_int32 ret, tmp1, tmp2;
     opus_int64 ret64;
 
-    ret  = SKP_SMULWB( a32, b32 );
-    tmp1 = SKP_RSHIFT_ROUND( b32, 16 );
-    tmp2 = SKP_MUL( a32, tmp1 );
+    ret  = silk_SMULWB( a32, b32 );
+    tmp1 = silk_RSHIFT_ROUND( b32, 16 );
+    tmp2 = silk_MUL( a32, tmp1 );
 
-    SKP_assert( (opus_int64)tmp2 == (opus_int64) a32 * (opus_int64) tmp1 );
+    silk_assert( (opus_int64)tmp2 == (opus_int64) a32 * (opus_int64) tmp1 );
 
     tmp1 = ret;
-    ret  = SKP_ADD32( tmp1, tmp2 );
-    SKP_assert( SKP_ADD32( tmp1, tmp2 ) == SKP_ADD_SAT32( tmp1, tmp2 ) );
+    ret  = silk_ADD32( tmp1, tmp2 );
+    silk_assert( silk_ADD32( tmp1, tmp2 ) == silk_ADD_SAT32( tmp1, tmp2 ) );
 
-    ret64 = SKP_RSHIFT64( SKP_SMULL( a32, b32 ), 16 );
-    SKP_assert( (opus_int64)ret == ret64 );
+    ret64 = silk_RSHIFT64( silk_SMULL( a32, b32 ), 16 );
+    silk_assert( (opus_int64)ret == ret64 );
 
     return ret;
 }
 
-#undef    SKP_SMLAWW
-static inline opus_int32 SKP_SMLAWW(opus_int32 a32, opus_int32 b32, opus_int32 c32){
+#undef    silk_SMLAWW
+static inline opus_int32 silk_SMLAWW(opus_int32 a32, opus_int32 b32, opus_int32 c32){
     opus_int32 ret, tmp;
 
-    tmp = SKP_SMULWW( b32, c32 );
-    ret = SKP_ADD32( a32, tmp );
-    SKP_assert( ret == SKP_ADD_SAT32( a32, tmp ) );
+    tmp = silk_SMULWW( b32, c32 );
+    ret = silk_ADD32( a32, tmp );
+    silk_assert( ret == silk_ADD_SAT32( a32, tmp ) );
     return ret;
 }
 
 /* multiply-accumulate macros that allow overflow in the addition (ie, no asserts in debug mode) */
-#undef    SKP_MLA_ovflw
-#define SKP_MLA_ovflw(a32, b32, c32)    ((a32) + ((b32) * (c32)))
-#undef    SKP_SMLABB_ovflw
-#define SKP_SMLABB_ovflw(a32, b32, c32)    ((a32) + ((opus_int32)((opus_int16)(b32))) * (opus_int32)((opus_int16)(c32)))
+#undef    silk_MLA_ovflw
+#define silk_MLA_ovflw(a32, b32, c32)    ((a32) + ((b32) * (c32)))
+#undef    silk_SMLABB_ovflw
+#define silk_SMLABB_ovflw(a32, b32, c32)    ((a32) + ((opus_int32)((opus_int16)(b32))) * (opus_int32)((opus_int16)(c32)))
 
-/* no checking needed for SKP_SMULL
-   no checking needed for SKP_SMLAL
-   no checking needed for SKP_SMLALBB
+/* no checking needed for silk_SMULL
+   no checking needed for silk_SMLAL
+   no checking needed for silk_SMLALBB
    no checking needed for SigProcFIX_CLZ16
    no checking needed for SigProcFIX_CLZ32*/
 
-#undef SKP_DIV32
-static inline opus_int32 SKP_DIV32(opus_int32 a32, opus_int32 b32){
-    SKP_assert( b32 != 0 );
+#undef silk_DIV32
+static inline opus_int32 silk_DIV32(opus_int32 a32, opus_int32 b32){
+    silk_assert( b32 != 0 );
     return a32 / b32;
 }
 
-#undef SKP_DIV32_16
-static inline opus_int32 SKP_DIV32_16(opus_int32 a32, opus_int32 b32){
-    SKP_assert( b32 != 0 );
-    SKP_assert( b32 <= SKP_int16_MAX );
-    SKP_assert( b32 >= SKP_int16_MIN );
+#undef silk_DIV32_16
+static inline opus_int32 silk_DIV32_16(opus_int32 a32, opus_int32 b32){
+    silk_assert( b32 != 0 );
+    silk_assert( b32 <= silk_int16_MAX );
+    silk_assert( b32 >= silk_int16_MIN );
     return a32 / b32;
 }
 
-/* no checking needed for SKP_SAT8
-   no checking needed for SKP_SAT16
-   no checking needed for SKP_SAT32
-   no checking needed for SKP_POS_SAT32
-   no checking needed for SKP_ADD_POS_SAT8
-   no checking needed for SKP_ADD_POS_SAT16
-   no checking needed for SKP_ADD_POS_SAT32
-   no checking needed for SKP_ADD_POS_SAT64 */
-#undef    SKP_LSHIFT8
-static inline opus_int8 SKP_LSHIFT8(opus_int8 a, opus_int32 shift){
+/* no checking needed for silk_SAT8
+   no checking needed for silk_SAT16
+   no checking needed for silk_SAT32
+   no checking needed for silk_POS_SAT32
+   no checking needed for silk_ADD_POS_SAT8
+   no checking needed for silk_ADD_POS_SAT16
+   no checking needed for silk_ADD_POS_SAT32
+   no checking needed for silk_ADD_POS_SAT64 */
+#undef    silk_LSHIFT8
+static inline opus_int8 silk_LSHIFT8(opus_int8 a, opus_int32 shift){
     opus_int8 ret;
     ret = a << shift;
-    SKP_assert(shift >= 0);
-    SKP_assert(shift < 8);
-    SKP_assert((opus_int64)ret == ((opus_int64)a) << shift);
+    silk_assert(shift >= 0);
+    silk_assert(shift < 8);
+    silk_assert((opus_int64)ret == ((opus_int64)a) << shift);
     return ret;
 }
-#undef    SKP_LSHIFT16
-static inline opus_int16 SKP_LSHIFT16(opus_int16 a, opus_int32 shift){
+#undef    silk_LSHIFT16
+static inline opus_int16 silk_LSHIFT16(opus_int16 a, opus_int32 shift){
     opus_int16 ret;
     ret = a << shift;
-    SKP_assert(shift >= 0);
-    SKP_assert(shift < 16);
-    SKP_assert((opus_int64)ret == ((opus_int64)a) << shift);
+    silk_assert(shift >= 0);
+    silk_assert(shift < 16);
+    silk_assert((opus_int64)ret == ((opus_int64)a) << shift);
     return ret;
 }
-#undef    SKP_LSHIFT32
-static inline opus_int32 SKP_LSHIFT32(opus_int32 a, opus_int32 shift){
+#undef    silk_LSHIFT32
+static inline opus_int32 silk_LSHIFT32(opus_int32 a, opus_int32 shift){
     opus_int32 ret;
     ret = a << shift;
-    SKP_assert(shift >= 0);
-    SKP_assert(shift < 32);
-    SKP_assert((opus_int64)ret == ((opus_int64)a) << shift);
+    silk_assert(shift >= 0);
+    silk_assert(shift < 32);
+    silk_assert((opus_int64)ret == ((opus_int64)a) << shift);
     return ret;
 }
-#undef    SKP_LSHIFT64
-static inline opus_int64 SKP_LSHIFT64(opus_int64 a, opus_int shift){
-    SKP_assert(shift >= 0);
-    SKP_assert(shift < 64);
+#undef    silk_LSHIFT64
+static inline opus_int64 silk_LSHIFT64(opus_int64 a, opus_int shift){
+    silk_assert(shift >= 0);
+    silk_assert(shift < 64);
     return a << shift;
 }
 
-#undef    SKP_LSHIFT_ovflw
-static inline opus_int32 SKP_LSHIFT_ovflw(opus_int32 a, opus_int32 shift){
-    SKP_assert(shift >= 0);            /* no check for overflow */
+#undef    silk_LSHIFT_ovflw
+static inline opus_int32 silk_LSHIFT_ovflw(opus_int32 a, opus_int32 shift){
+    silk_assert(shift >= 0);            /* no check for overflow */
     return a << shift;
 }
 
-#undef    SKP_LSHIFT_uint
-static inline opus_uint32 SKP_LSHIFT_uint(opus_uint32 a, opus_int32 shift){
+#undef    silk_LSHIFT_uint
+static inline opus_uint32 silk_LSHIFT_uint(opus_uint32 a, opus_int32 shift){
     opus_uint32 ret;
     ret = a << shift;
-    SKP_assert(shift >= 0);
-    SKP_assert((opus_int64)ret == ((opus_int64)a) << shift);
+    silk_assert(shift >= 0);
+    silk_assert((opus_int64)ret == ((opus_int64)a) << shift);
     return ret;
 }
 
-#undef    SKP_RSHIFT8
-static inline opus_int8 SKP_RSHIFT8(opus_int8 a, opus_int32 shift){
-    SKP_assert(shift >=  0);
-    SKP_assert(shift < 8);
+#undef    silk_RSHIFT8
+static inline opus_int8 silk_RSHIFT8(opus_int8 a, opus_int32 shift){
+    silk_assert(shift >=  0);
+    silk_assert(shift < 8);
     return a >> shift;
 }
-#undef    SKP_RSHIFT16
-static inline opus_int16 SKP_RSHIFT16(opus_int16 a, opus_int32 shift){
-    SKP_assert(shift >=  0);
-    SKP_assert(shift < 16);
+#undef    silk_RSHIFT16
+static inline opus_int16 silk_RSHIFT16(opus_int16 a, opus_int32 shift){
+    silk_assert(shift >=  0);
+    silk_assert(shift < 16);
     return a >> shift;
 }
-#undef    SKP_RSHIFT32
-static inline opus_int32 SKP_RSHIFT32(opus_int32 a, opus_int32 shift){
-    SKP_assert(shift >=  0);
-    SKP_assert(shift < 32);
+#undef    silk_RSHIFT32
+static inline opus_int32 silk_RSHIFT32(opus_int32 a, opus_int32 shift){
+    silk_assert(shift >=  0);
+    silk_assert(shift < 32);
     return a >> shift;
 }
-#undef    SKP_RSHIFT64
-static inline opus_int64 SKP_RSHIFT64(opus_int64 a, opus_int64 shift){
-    SKP_assert(shift >=  0);
-    SKP_assert(shift <= 63);
+#undef    silk_RSHIFT64
+static inline opus_int64 silk_RSHIFT64(opus_int64 a, opus_int64 shift){
+    silk_assert(shift >=  0);
+    silk_assert(shift <= 63);
     return a >> shift;
 }
 
-#undef    SKP_RSHIFT_uint
-static inline opus_uint32 SKP_RSHIFT_uint(opus_uint32 a, opus_int32 shift){
-    SKP_assert(shift >=  0);
-    SKP_assert(shift <= 32);
+#undef    silk_RSHIFT_uint
+static inline opus_uint32 silk_RSHIFT_uint(opus_uint32 a, opus_int32 shift){
+    silk_assert(shift >=  0);
+    silk_assert(shift <= 32);
     return a >> shift;
 }
 
-#undef    SKP_ADD_LSHIFT
-static inline opus_int32 SKP_ADD_LSHIFT(opus_int32 a, opus_int32 b, opus_int32 shift){
+#undef    silk_ADD_LSHIFT
+static inline opus_int32 silk_ADD_LSHIFT(opus_int32 a, opus_int32 b, opus_int32 shift){
     opus_int32 ret;
-    SKP_assert(shift >= 0);
-    SKP_assert(shift <= 31);
+    silk_assert(shift >= 0);
+    silk_assert(shift <= 31);
     ret = a + (b << shift);
-    SKP_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) << shift));
+    silk_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) << shift));
     return ret;                /* shift >= 0 */
 }
-#undef    SKP_ADD_LSHIFT32
-static inline opus_int32 SKP_ADD_LSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
+#undef    silk_ADD_LSHIFT32
+static inline opus_int32 silk_ADD_LSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
     opus_int32 ret;
-    SKP_assert(shift >= 0);
-    SKP_assert(shift <= 31);
+    silk_assert(shift >= 0);
+    silk_assert(shift <= 31);
     ret = a + (b << shift);
-    SKP_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) << shift));
+    silk_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) << shift));
     return ret;                /* shift >= 0 */
 }
-#undef    SKP_ADD_LSHIFT_uint
-static inline opus_uint32 SKP_ADD_LSHIFT_uint(opus_uint32 a, opus_uint32 b, opus_int32 shift){
+#undef    silk_ADD_LSHIFT_uint
+static inline opus_uint32 silk_ADD_LSHIFT_uint(opus_uint32 a, opus_uint32 b, opus_int32 shift){
     opus_uint32 ret;
-    SKP_assert(shift >= 0);
-    SKP_assert(shift <= 32);
+    silk_assert(shift >= 0);
+    silk_assert(shift <= 32);
     ret = a + (b << shift);
-    SKP_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) << shift));
+    silk_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) << shift));
     return ret;                /* shift >= 0 */
 }
-#undef    SKP_ADD_RSHIFT
-static inline opus_int32 SKP_ADD_RSHIFT(opus_int32 a, opus_int32 b, opus_int32 shift){
+#undef    silk_ADD_RSHIFT
+static inline opus_int32 silk_ADD_RSHIFT(opus_int32 a, opus_int32 b, opus_int32 shift){
     opus_int32 ret;
-    SKP_assert(shift >= 0);
-    SKP_assert(shift <= 31);
+    silk_assert(shift >= 0);
+    silk_assert(shift <= 31);
     ret = a + (b >> shift);
-    SKP_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) >> shift));
+    silk_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) >> shift));
     return ret;                /* shift  > 0 */
 }
-#undef    SKP_ADD_RSHIFT32
-static inline opus_int32 SKP_ADD_RSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
+#undef    silk_ADD_RSHIFT32
+static inline opus_int32 silk_ADD_RSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
     opus_int32 ret;
-    SKP_assert(shift >= 0);
-    SKP_assert(shift <= 31);
+    silk_assert(shift >= 0);
+    silk_assert(shift <= 31);
     ret = a + (b >> shift);
-    SKP_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) >> shift));
+    silk_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) >> shift));
     return ret;                /* shift  > 0 */
 }
-#undef    SKP_ADD_RSHIFT_uint
-static inline opus_uint32 SKP_ADD_RSHIFT_uint(opus_uint32 a, opus_uint32 b, opus_int32 shift){
+#undef    silk_ADD_RSHIFT_uint
+static inline opus_uint32 silk_ADD_RSHIFT_uint(opus_uint32 a, opus_uint32 b, opus_int32 shift){
     opus_uint32 ret;
-    SKP_assert(shift >= 0);
-    SKP_assert(shift <= 32);
+    silk_assert(shift >= 0);
+    silk_assert(shift <= 32);
     ret = a + (b >> shift);
-    SKP_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) >> shift));
+    silk_assert((opus_int64)ret == (opus_int64)a + (((opus_int64)b) >> shift));
     return ret;                /* shift  > 0 */
 }
-#undef    SKP_SUB_LSHIFT32
-static inline opus_int32 SKP_SUB_LSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
+#undef    silk_SUB_LSHIFT32
+static inline opus_int32 silk_SUB_LSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
     opus_int32 ret;
-    SKP_assert(shift >= 0);
-    SKP_assert(shift <= 31);
+    silk_assert(shift >= 0);
+    silk_assert(shift <= 31);
     ret = a - (b << shift);
-    SKP_assert((opus_int64)ret == (opus_int64)a - (((opus_int64)b) << shift));
+    silk_assert((opus_int64)ret == (opus_int64)a - (((opus_int64)b) << shift));
     return ret;                /* shift >= 0 */
 }
-#undef    SKP_SUB_RSHIFT32
-static inline opus_int32 SKP_SUB_RSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
+#undef    silk_SUB_RSHIFT32
+static inline opus_int32 silk_SUB_RSHIFT32(opus_int32 a, opus_int32 b, opus_int32 shift){
     opus_int32 ret;
-    SKP_assert(shift >= 0);
-    SKP_assert(shift <= 31);
+    silk_assert(shift >= 0);
+    silk_assert(shift <= 31);
     ret = a - (b >> shift);
-    SKP_assert((opus_int64)ret == (opus_int64)a - (((opus_int64)b) >> shift));
+    silk_assert((opus_int64)ret == (opus_int64)a - (((opus_int64)b) >> shift));
     return ret;                /* shift  > 0 */
 }
 
-#undef    SKP_RSHIFT_ROUND
-static inline opus_int32 SKP_RSHIFT_ROUND(opus_int32 a, opus_int32 shift){
+#undef    silk_RSHIFT_ROUND
+static inline opus_int32 silk_RSHIFT_ROUND(opus_int32 a, opus_int32 shift){
     opus_int32 ret;
-    SKP_assert(shift > 0);        /* the marco definition can't handle a shift of zero */
-    SKP_assert(shift < 32);
+    silk_assert(shift > 0);        /* the marco definition can't handle a shift of zero */
+    silk_assert(shift < 32);
     ret = shift == 1 ? (a >> 1) + (a & 1) : ((a >> (shift - 1)) + 1) >> 1;
-    SKP_assert((opus_int64)ret == ((opus_int64)a + ((opus_int64)1 << (shift - 1))) >> shift);
+    silk_assert((opus_int64)ret == ((opus_int64)a + ((opus_int64)1 << (shift - 1))) >> shift);
     return ret;
 }
 
-#undef    SKP_RSHIFT_ROUND64
-static inline opus_int64 SKP_RSHIFT_ROUND64(opus_int64 a, opus_int32 shift){
+#undef    silk_RSHIFT_ROUND64
+static inline opus_int64 silk_RSHIFT_ROUND64(opus_int64 a, opus_int32 shift){
     opus_int64 ret;
-    SKP_assert(shift > 0);        /* the marco definition can't handle a shift of zero */
-    SKP_assert(shift < 64);
+    silk_assert(shift > 0);        /* the marco definition can't handle a shift of zero */
+    silk_assert(shift < 64);
     ret = shift == 1 ? (a >> 1) + (a & 1) : ((a >> (shift - 1)) + 1) >> 1;
     return ret;
 }
 
-/* SKP_abs is used on floats also, so doesn't work... */
-/*#undef    SKP_abs
-static inline opus_int32 SKP_abs(opus_int32 a){
-    SKP_assert(a != 0x80000000);
-    return (((a) >  0)  ? (a) : -(a));            // Be careful, SKP_abs returns wrong when input equals to SKP_intXX_MIN
+/* silk_abs is used on floats also, so doesn't work... */
+/*#undef    silk_abs
+static inline opus_int32 silk_abs(opus_int32 a){
+    silk_assert(a != 0x80000000);
+    return (((a) >  0)  ? (a) : -(a));            // Be careful, silk_abs returns wrong when input equals to silk_intXX_MIN
 }*/
 
-#undef    SKP_abs_int64
-static inline opus_int64 SKP_abs_int64(opus_int64 a){
-    SKP_assert(a != 0x8000000000000000);
-    return (((a) >  0)  ? (a) : -(a));            /* Be careful, SKP_abs returns wrong when input equals to SKP_intXX_MIN */
+#undef    silk_abs_int64
+static inline opus_int64 silk_abs_int64(opus_int64 a){
+    silk_assert(a != 0x8000000000000000);
+    return (((a) >  0)  ? (a) : -(a));            /* Be careful, silk_abs returns wrong when input equals to silk_intXX_MIN */
 }
 
-#undef    SKP_abs_int32
-static inline opus_int32 SKP_abs_int32(opus_int32 a){
-    SKP_assert(a != 0x80000000);
+#undef    silk_abs_int32
+static inline opus_int32 silk_abs_int32(opus_int32 a){
+    silk_assert(a != 0x80000000);
     return abs(a);
 }
 
-#undef    SKP_CHECK_FIT8
-static inline opus_int8 SKP_CHECK_FIT8( opus_int64 a ){
+#undef    silk_CHECK_FIT8
+static inline opus_int8 silk_CHECK_FIT8( opus_int64 a ){
     opus_int8 ret;
     ret = (opus_int8)a;
-    SKP_assert( (opus_int64)ret == a );
+    silk_assert( (opus_int64)ret == a );
     return( ret );
 }
 
-#undef    SKP_CHECK_FIT16
-static inline opus_int16 SKP_CHECK_FIT16( opus_int64 a ){
+#undef    silk_CHECK_FIT16
+static inline opus_int16 silk_CHECK_FIT16( opus_int64 a ){
     opus_int16 ret;
     ret = (opus_int16)a;
-    SKP_assert( (opus_int64)ret == a );
+    silk_assert( (opus_int64)ret == a );
     return( ret );
 }
 
-#undef    SKP_CHECK_FIT32
-static inline opus_int32 SKP_CHECK_FIT32( opus_int64 a ){
+#undef    silk_CHECK_FIT32
+static inline opus_int32 silk_CHECK_FIT32( opus_int64 a ){
     opus_int32 ret;
     ret = (opus_int32)a;
-    SKP_assert( (opus_int64)ret == a );
+    silk_assert( (opus_int64)ret == a );
     return( ret );
 }
 
-/* no checking for SKP_NSHIFT_MUL_32_32
-   no checking for SKP_NSHIFT_MUL_16_16
-   no checking needed for SKP_min
-   no checking needed for SKP_max
-   no checking needed for SKP_sign
+/* no checking for silk_NSHIFT_MUL_32_32
+   no checking for silk_NSHIFT_MUL_16_16
+   no checking needed for silk_min
+   no checking needed for silk_max
+   no checking needed for silk_sign
 */
 
 #endif
diff --git a/silk/silk_NLSF2A.c b/silk/silk_NLSF2A.c
index fa60c32..5461bdc 100644
--- a/silk/silk_NLSF2A.c
+++ b/silk/silk_NLSF2A.c
@@ -50,13 +50,13 @@
     opus_int   k, n;
     opus_int32 ftmp;
 
-    out[0] = SKP_LSHIFT( 1, QA );
+    out[0] = silk_LSHIFT( 1, QA );
     out[1] = -cLSF[0];
     for( k = 1; k < dd; k++ ) {
         ftmp = cLSF[2*k];            /* QA*/
-        out[k+1] = SKP_LSHIFT( out[k-1], 1 ) - (opus_int32)SKP_RSHIFT_ROUND64( SKP_SMULL( ftmp, out[k] ), QA );
+        out[k+1] = silk_LSHIFT( out[k-1], 1 ) - (opus_int32)silk_RSHIFT_ROUND64( silk_SMULL( ftmp, out[k] ), QA );
         for( n = k; n > 1; n-- ) {
-            out[n] += out[n-2] - (opus_int32)SKP_RSHIFT_ROUND64( SKP_SMULL( ftmp, out[n-1] ), QA );
+            out[n] += out[n-2] - (opus_int32)silk_RSHIFT_ROUND64( silk_SMULL( ftmp, out[n-1] ), QA );
         }
         out[1] -= ftmp;
     }
@@ -76,31 +76,31 @@
     opus_int32 a32_QA1[ SILK_MAX_ORDER_LPC ];
     opus_int32 maxabs, absval, idx=0, sc_Q16, invGain_Q30;
 
-    SKP_assert( LSF_COS_TAB_SZ_FIX == 128 );
+    silk_assert( LSF_COS_TAB_SZ_FIX == 128 );
 
     /* convert LSFs to 2*cos(LSF), using piecewise linear curve from table */
     for( k = 0; k < d; k++ ) {
-        SKP_assert(NLSF[k] >= 0 );
-        SKP_assert(NLSF[k] <= 32767 );
+        silk_assert(NLSF[k] >= 0 );
+        silk_assert(NLSF[k] <= 32767 );
 
         /* f_int on a scale 0-127 (rounded down) */
-        f_int = SKP_RSHIFT( NLSF[k], 15 - 7 );
+        f_int = silk_RSHIFT( NLSF[k], 15 - 7 );
 
         /* f_frac, range: 0..255 */
-        f_frac = NLSF[k] - SKP_LSHIFT( f_int, 15 - 7 );
+        f_frac = NLSF[k] - silk_LSHIFT( f_int, 15 - 7 );
 
-        SKP_assert(f_int >= 0);
-        SKP_assert(f_int < LSF_COS_TAB_SZ_FIX );
+        silk_assert(f_int >= 0);
+        silk_assert(f_int < LSF_COS_TAB_SZ_FIX );
 
         /* Read start and end value from table */
         cos_val = silk_LSFCosTab_FIX_Q12[ f_int ];                /* Q12 */
         delta   = silk_LSFCosTab_FIX_Q12[ f_int + 1 ] - cos_val;  /* Q12, with a range of 0..200 */
 
         /* Linear interpolation */
-        cos_LSF_QA[k] = SKP_RSHIFT_ROUND( SKP_LSHIFT( cos_val, 8 ) + SKP_MUL( delta, f_frac ), 20 - QA ); /* QA */
+        cos_LSF_QA[k] = silk_RSHIFT_ROUND( silk_LSHIFT( cos_val, 8 ) + silk_MUL( delta, f_frac ), 20 - QA ); /* QA */
     }
 
-    dd = SKP_RSHIFT( d, 1 );
+    dd = silk_RSHIFT( d, 1 );
 
     /* generate even and odd polynomials using convolution */
     silk_NLSF2A_find_poly( P, &cos_LSF_QA[ 0 ], dd );
@@ -121,19 +121,19 @@
         /* Find maximum absolute value and its index */
         maxabs = 0;
         for( k = 0; k < d; k++ ) {
-            absval = SKP_abs( a32_QA1[k] );
+            absval = silk_abs( a32_QA1[k] );
             if( absval > maxabs ) {
                 maxabs = absval;
                 idx    = k;
             }
         }
-        maxabs = SKP_RSHIFT_ROUND( maxabs, QA + 1 - 12 );       /* QA+1 -> Q12 */
+        maxabs = silk_RSHIFT_ROUND( maxabs, QA + 1 - 12 );       /* QA+1 -> Q12 */
 
-        if( maxabs > SKP_int16_MAX ) {
+        if( maxabs > silk_int16_MAX ) {
             /* Reduce magnitude of prediction coefficients */
-            maxabs = SKP_min( maxabs, 163838 );  /* ( SKP_int32_MAX >> 14 ) + SKP_int16_MAX = 163838 */
-            sc_Q16 = SILK_FIX_CONST( 0.999, 16 ) - SKP_DIV32( SKP_LSHIFT( maxabs - SKP_int16_MAX, 14 ),
-                                        SKP_RSHIFT32( SKP_MUL( maxabs, idx + 1), 2 ) );
+            maxabs = silk_min( maxabs, 163838 );  /* ( silk_int32_MAX >> 14 ) + silk_int16_MAX = 163838 */
+            sc_Q16 = SILK_FIX_CONST( 0.999, 16 ) - silk_DIV32( silk_LSHIFT( maxabs - silk_int16_MAX, 14 ),
+                                        silk_RSHIFT32( silk_MUL( maxabs, idx + 1), 2 ) );
             silk_bwexpander_32( a32_QA1, d, sc_Q16 );
         } else {
             break;
@@ -143,12 +143,12 @@
     if( i == 10 ) {
         /* Reached the last iteration, clip the coefficients */
         for( k = 0; k < d; k++ ) {
-            a_Q12[ k ] = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( a32_QA1[ k ], QA + 1 - 12 ) ); /* QA+1 -> Q12 */
-            a32_QA1[ k ] = SKP_LSHIFT( (opus_int32)a_Q12[ k ], QA + 1 - 12 );
+            a_Q12[ k ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( a32_QA1[ k ], QA + 1 - 12 ) ); /* QA+1 -> Q12 */
+            a32_QA1[ k ] = silk_LSHIFT( (opus_int32)a_Q12[ k ], QA + 1 - 12 );
         }
     } else {
         for( k = 0; k < d; k++ ) {
-            a_Q12[ k ] = (opus_int16)SKP_RSHIFT_ROUND( a32_QA1[ k ], QA + 1 - 12 );       /* QA+1 -> Q12 */
+            a_Q12[ k ] = (opus_int16)silk_RSHIFT_ROUND( a32_QA1[ k ], QA + 1 - 12 );       /* QA+1 -> Q12 */
         }
     }
 
@@ -156,9 +156,9 @@
         if( silk_LPC_inverse_pred_gain( &invGain_Q30, a_Q12, d ) == 1 ) {
             /* Prediction coefficients are (too close to) unstable; apply bandwidth expansion   */
             /* on the unscaled coefficients, convert to Q12 and measure again                   */
-            silk_bwexpander_32( a32_QA1, d, 65536 - SKP_SMULBB( 9 + i, i ) );            /* 10_Q16 = 0.00015 */
+            silk_bwexpander_32( a32_QA1, d, 65536 - silk_SMULBB( 9 + i, i ) );            /* 10_Q16 = 0.00015 */
             for( k = 0; k < d; k++ ) {
-                a_Q12[ k ] = (opus_int16)SKP_RSHIFT_ROUND( a32_QA1[ k ], QA + 1 - 12 );  /* QA+1 -> Q12 */
+                a_Q12[ k ] = (opus_int16)silk_RSHIFT_ROUND( a32_QA1[ k ], QA + 1 - 12 );  /* QA+1 -> Q12 */
             }
         } else {
             break;
diff --git a/silk/silk_NLSF_VQ.c b/silk/silk_NLSF_VQ.c
index 14c297c..7318385 100644
--- a/silk/silk_NLSF_VQ.c
+++ b/silk/silk_NLSF_VQ.c
@@ -43,25 +43,25 @@
     opus_int        i, m;
     opus_int32      diff_Q15, sum_error_Q30, sum_error_Q26;
 
-    SKP_assert( LPC_order <= 16 );
-    SKP_assert( ( LPC_order & 1 ) == 0 );
+    silk_assert( LPC_order <= 16 );
+    silk_assert( ( LPC_order & 1 ) == 0 );
 
     /* Loop over codebook */
     for( i = 0; i < K; i++ ) {
         sum_error_Q26 = 0;
         for( m = 0; m < LPC_order; m += 2 ) {
             /* Compute weighted squared quantization error for index m */
-            diff_Q15 = SKP_SUB_LSHIFT32( in_Q15[ m ], ( opus_int32 )*pCB_Q8++, 7 ); /* range: [ -32767 : 32767 ]*/
-            sum_error_Q30 = SKP_SMULBB( diff_Q15, diff_Q15 );
+            diff_Q15 = silk_SUB_LSHIFT32( in_Q15[ m ], ( opus_int32 )*pCB_Q8++, 7 ); /* range: [ -32767 : 32767 ]*/
+            sum_error_Q30 = silk_SMULBB( diff_Q15, diff_Q15 );
 
             /* Compute weighted squared quantization error for index m + 1 */
-            diff_Q15 = SKP_SUB_LSHIFT32( in_Q15[m + 1], ( opus_int32 )*pCB_Q8++, 7 ); /* range: [ -32767 : 32767 ]*/
-            sum_error_Q30 = SKP_SMLABB( sum_error_Q30, diff_Q15, diff_Q15 );
+            diff_Q15 = silk_SUB_LSHIFT32( in_Q15[m + 1], ( opus_int32 )*pCB_Q8++, 7 ); /* range: [ -32767 : 32767 ]*/
+            sum_error_Q30 = silk_SMLABB( sum_error_Q30, diff_Q15, diff_Q15 );
 
-            sum_error_Q26 = SKP_ADD_RSHIFT32( sum_error_Q26, sum_error_Q30, 4 );
+            sum_error_Q26 = silk_ADD_RSHIFT32( sum_error_Q26, sum_error_Q30, 4 );
 
-            SKP_assert( sum_error_Q26 >= 0 );
-            SKP_assert( sum_error_Q30 >= 0 );
+            silk_assert( sum_error_Q26 >= 0 );
+            silk_assert( sum_error_Q30 >= 0 );
         }
         err_Q26[ i ] = sum_error_Q26;
     }
diff --git a/silk/silk_NLSF_VQ_weights_laroia.c b/silk/silk_NLSF_VQ_weights_laroia.c
index 4b3a36c..ce7a3e5 100644
--- a/silk/silk_NLSF_VQ_weights_laroia.c
+++ b/silk/silk_NLSF_VQ_weights_laroia.c
@@ -48,33 +48,33 @@
     opus_int   k;
     opus_int32 tmp1_int, tmp2_int;
 
-    SKP_assert( D > 0 );
-    SKP_assert( ( D & 1 ) == 0 );
+    silk_assert( D > 0 );
+    silk_assert( ( D & 1 ) == 0 );
 
     /* First value */
-    tmp1_int = SKP_max_int( pNLSF_Q15[ 0 ], 1 );
-    tmp1_int = SKP_DIV32_16( 1 << ( 15 + NLSF_W_Q ), tmp1_int );
-    tmp2_int = SKP_max_int( pNLSF_Q15[ 1 ] - pNLSF_Q15[ 0 ], 1 );
-    tmp2_int = SKP_DIV32_16( 1 << ( 15 + NLSF_W_Q ), tmp2_int );
-    pNLSFW_Q_OUT[ 0 ] = (opus_int16)SKP_min_int( tmp1_int + tmp2_int, SKP_int16_MAX );
-    SKP_assert( pNLSFW_Q_OUT[ 0 ] > 0 );
+    tmp1_int = silk_max_int( pNLSF_Q15[ 0 ], 1 );
+    tmp1_int = silk_DIV32_16( 1 << ( 15 + NLSF_W_Q ), tmp1_int );
+    tmp2_int = silk_max_int( pNLSF_Q15[ 1 ] - pNLSF_Q15[ 0 ], 1 );
+    tmp2_int = silk_DIV32_16( 1 << ( 15 + NLSF_W_Q ), tmp2_int );
+    pNLSFW_Q_OUT[ 0 ] = (opus_int16)silk_min_int( tmp1_int + tmp2_int, silk_int16_MAX );
+    silk_assert( pNLSFW_Q_OUT[ 0 ] > 0 );
 
     /* Main loop */
     for( k = 1; k < D - 1; k += 2 ) {
-        tmp1_int = SKP_max_int( pNLSF_Q15[ k + 1 ] - pNLSF_Q15[ k ], 1 );
-        tmp1_int = SKP_DIV32_16( 1 << ( 15 + NLSF_W_Q ), tmp1_int );
-        pNLSFW_Q_OUT[ k ] = (opus_int16)SKP_min_int( tmp1_int + tmp2_int, SKP_int16_MAX );
-        SKP_assert( pNLSFW_Q_OUT[ k ] > 0 );
+        tmp1_int = silk_max_int( pNLSF_Q15[ k + 1 ] - pNLSF_Q15[ k ], 1 );
+        tmp1_int = silk_DIV32_16( 1 << ( 15 + NLSF_W_Q ), tmp1_int );
+        pNLSFW_Q_OUT[ k ] = (opus_int16)silk_min_int( tmp1_int + tmp2_int, silk_int16_MAX );
+        silk_assert( pNLSFW_Q_OUT[ k ] > 0 );
 
-        tmp2_int = SKP_max_int( pNLSF_Q15[ k + 2 ] - pNLSF_Q15[ k + 1 ], 1 );
-        tmp2_int = SKP_DIV32_16( 1 << ( 15 + NLSF_W_Q ), tmp2_int );
-        pNLSFW_Q_OUT[ k + 1 ] = (opus_int16)SKP_min_int( tmp1_int + tmp2_int, SKP_int16_MAX );
-        SKP_assert( pNLSFW_Q_OUT[ k + 1 ] > 0 );
+        tmp2_int = silk_max_int( pNLSF_Q15[ k + 2 ] - pNLSF_Q15[ k + 1 ], 1 );
+        tmp2_int = silk_DIV32_16( 1 << ( 15 + NLSF_W_Q ), tmp2_int );
+        pNLSFW_Q_OUT[ k + 1 ] = (opus_int16)silk_min_int( tmp1_int + tmp2_int, silk_int16_MAX );
+        silk_assert( pNLSFW_Q_OUT[ k + 1 ] > 0 );
     }
 
     /* Last value */
-    tmp1_int = SKP_max_int( ( 1 << 15 ) - pNLSF_Q15[ D - 1 ], 1 );
-    tmp1_int = SKP_DIV32_16( 1 << ( 15 + NLSF_W_Q ), tmp1_int );
-    pNLSFW_Q_OUT[ D - 1 ] = (opus_int16)SKP_min_int( tmp1_int + tmp2_int, SKP_int16_MAX );
-    SKP_assert( pNLSFW_Q_OUT[ D - 1 ] > 0 );
+    tmp1_int = silk_max_int( ( 1 << 15 ) - pNLSF_Q15[ D - 1 ], 1 );
+    tmp1_int = silk_DIV32_16( 1 << ( 15 + NLSF_W_Q ), tmp1_int );
+    pNLSFW_Q_OUT[ D - 1 ] = (opus_int16)silk_min_int( tmp1_int + tmp2_int, silk_int16_MAX );
+    silk_assert( pNLSFW_Q_OUT[ D - 1 ] > 0 );
 }
diff --git a/silk/silk_NLSF_decode.c b/silk/silk_NLSF_decode.c
index ebe5786..acd2c49 100644
--- a/silk/silk_NLSF_decode.c
+++ b/silk/silk_NLSF_decode.c
@@ -44,14 +44,14 @@
 
     out_Q10 = 0;
     for( i = order-1; i >= 0; i-- ) {
-        pred_Q10 = SKP_RSHIFT( SKP_SMULBB( out_Q10, (opus_int16)pred_coef_Q8[ i ] ), 8 );
-        out_Q10  = SKP_LSHIFT( indices[ i ], 10 );
+        pred_Q10 = silk_RSHIFT( silk_SMULBB( out_Q10, (opus_int16)pred_coef_Q8[ i ] ), 8 );
+        out_Q10  = silk_LSHIFT( indices[ i ], 10 );
         if( out_Q10 > 0 ) {
-            out_Q10 = SKP_SUB16( out_Q10, SILK_FIX_CONST( NLSF_QUANT_LEVEL_ADJ, 10 ) );
+            out_Q10 = silk_SUB16( out_Q10, SILK_FIX_CONST( NLSF_QUANT_LEVEL_ADJ, 10 ) );
         } else if( out_Q10 < 0 ) {
-            out_Q10 = SKP_ADD16( out_Q10, SILK_FIX_CONST( NLSF_QUANT_LEVEL_ADJ, 10 ) );
+            out_Q10 = silk_ADD16( out_Q10, SILK_FIX_CONST( NLSF_QUANT_LEVEL_ADJ, 10 ) );
         }
-        out_Q10  = SKP_SMLAWB( pred_Q10, out_Q10, quant_step_size_Q16 );
+        out_Q10  = silk_SMLAWB( pred_Q10, out_Q10, quant_step_size_Q16 );
         x_Q10[ i ] = out_Q10;
     }
 }
@@ -77,7 +77,7 @@
     /* Decode first stage */
     pCB_element = &psNLSF_CB->CB1_NLSF_Q8[ NLSFIndices[ 0 ] * psNLSF_CB->order ];
     for( i = 0; i < psNLSF_CB->order; i++ ) {
-        pNLSF_Q15[ i ] = SKP_LSHIFT( ( opus_int16 )pCB_element[ i ], 7 );
+        pNLSF_Q15[ i ] = silk_LSHIFT( ( opus_int16 )pCB_element[ i ], 7 );
     }
 
     /* Unpack entropy table indices and predictor for current CB1 index */
@@ -91,9 +91,9 @@
 
     /* Apply inverse square-rooted weights and add to output */
     for( i = 0; i < psNLSF_CB->order; i++ ) {
-        W_tmp_Q9 = silk_SQRT_APPROX( SKP_LSHIFT( ( opus_int32 )W_tmp_QW[ i ], 18 - NLSF_W_Q ) );
-        NLSF_Q15_tmp = SKP_ADD32( pNLSF_Q15[ i ], SKP_DIV32_16( SKP_LSHIFT( ( opus_int32 )res_Q10[ i ], 14 ), W_tmp_Q9 ) );
-        pNLSF_Q15[ i ] = (opus_int16)SKP_LIMIT( NLSF_Q15_tmp, 0, 32767 );
+        W_tmp_Q9 = silk_SQRT_APPROX( silk_LSHIFT( ( opus_int32 )W_tmp_QW[ i ], 18 - NLSF_W_Q ) );
+        NLSF_Q15_tmp = silk_ADD32( pNLSF_Q15[ i ], silk_DIV32_16( silk_LSHIFT( ( opus_int32 )res_Q10[ i ], 14 ), W_tmp_Q9 ) );
+        pNLSF_Q15[ i ] = (opus_int16)silk_LIMIT( NLSF_Q15_tmp, 0, 32767 );
     }
 
     /* NLSF stabilization */
diff --git a/silk/silk_NLSF_del_dec_quant.c b/silk/silk_NLSF_del_dec_quant.c
index 380bb86..03a9cf5 100644
--- a/silk/silk_NLSF_del_dec_quant.c
+++ b/silk/silk_NLSF_del_dec_quant.c
@@ -56,40 +56,40 @@
     opus_int32       RD_max_Q25[       NLSF_QUANT_DEL_DEC_STATES ];
     const opus_uint8 *rates_Q5;
 
-    SKP_assert( (NLSF_QUANT_DEL_DEC_STATES & (NLSF_QUANT_DEL_DEC_STATES-1)) == 0 );     /* must be power of two */
+    silk_assert( (NLSF_QUANT_DEL_DEC_STATES & (NLSF_QUANT_DEL_DEC_STATES-1)) == 0 );     /* must be power of two */
 
     nStates = 1;
     RD_Q25[ 0 ] = 0;
     prev_out_Q10[ 0 ] = 0;
     for( i = order - 1; ; i-- ) {
         rates_Q5 = &ec_rates_Q5[ ec_ix[ i ] ];
-        pred_coef_Q16 = SKP_LSHIFT( (opus_int32)pred_coef_Q8[ i ], 8 );
+        pred_coef_Q16 = silk_LSHIFT( (opus_int32)pred_coef_Q8[ i ], 8 );
         in_Q10 = x_Q10[ i ];
         for( j = 0; j < nStates; j++ ) {
-            pred_Q10 = SKP_SMULWB( pred_coef_Q16, prev_out_Q10[ j ] );
-            res_Q10  = SKP_SUB16( in_Q10, pred_Q10 );
-            ind_tmp  = SKP_SMULWB( inv_quant_step_size_Q6, res_Q10 );
-            ind_tmp  = SKP_LIMIT( ind_tmp, -NLSF_QUANT_MAX_AMPLITUDE_EXT, NLSF_QUANT_MAX_AMPLITUDE_EXT-1 );
+            pred_Q10 = silk_SMULWB( pred_coef_Q16, prev_out_Q10[ j ] );
+            res_Q10  = silk_SUB16( in_Q10, pred_Q10 );
+            ind_tmp  = silk_SMULWB( inv_quant_step_size_Q6, res_Q10 );
+            ind_tmp  = silk_LIMIT( ind_tmp, -NLSF_QUANT_MAX_AMPLITUDE_EXT, NLSF_QUANT_MAX_AMPLITUDE_EXT-1 );
             ind[ j ][ i ] = (opus_int8)ind_tmp;
 
             /* compute outputs for ind_tmp and ind_tmp + 1 */
-            out0_Q10 = SKP_LSHIFT( ind_tmp, 10 );
-            out1_Q10 = SKP_ADD16( out0_Q10, 1024 );
+            out0_Q10 = silk_LSHIFT( ind_tmp, 10 );
+            out1_Q10 = silk_ADD16( out0_Q10, 1024 );
             if( ind_tmp > 0 ) {
-                out0_Q10 = SKP_SUB16( out0_Q10, SILK_FIX_CONST( NLSF_QUANT_LEVEL_ADJ, 10 ) );
-                out1_Q10 = SKP_SUB16( out1_Q10, SILK_FIX_CONST( NLSF_QUANT_LEVEL_ADJ, 10 ) );
+                out0_Q10 = silk_SUB16( out0_Q10, SILK_FIX_CONST( NLSF_QUANT_LEVEL_ADJ, 10 ) );
+                out1_Q10 = silk_SUB16( out1_Q10, SILK_FIX_CONST( NLSF_QUANT_LEVEL_ADJ, 10 ) );
             } else if( ind_tmp == 0 ) {
-                out1_Q10 = SKP_SUB16( out1_Q10, SILK_FIX_CONST( NLSF_QUANT_LEVEL_ADJ, 10 ) );
+                out1_Q10 = silk_SUB16( out1_Q10, SILK_FIX_CONST( NLSF_QUANT_LEVEL_ADJ, 10 ) );
             } else if( ind_tmp == -1 ) {
-                out0_Q10 = SKP_ADD16( out0_Q10, SILK_FIX_CONST( NLSF_QUANT_LEVEL_ADJ, 10 ) );
+                out0_Q10 = silk_ADD16( out0_Q10, SILK_FIX_CONST( NLSF_QUANT_LEVEL_ADJ, 10 ) );
             } else {
-                out0_Q10 = SKP_ADD16( out0_Q10, SILK_FIX_CONST( NLSF_QUANT_LEVEL_ADJ, 10 ) );
-                out1_Q10 = SKP_ADD16( out1_Q10, SILK_FIX_CONST( NLSF_QUANT_LEVEL_ADJ, 10 ) );
+                out0_Q10 = silk_ADD16( out0_Q10, SILK_FIX_CONST( NLSF_QUANT_LEVEL_ADJ, 10 ) );
+                out1_Q10 = silk_ADD16( out1_Q10, SILK_FIX_CONST( NLSF_QUANT_LEVEL_ADJ, 10 ) );
             }
-            out0_Q10  = SKP_SMULWB( out0_Q10, quant_step_size_Q16 );
-            out1_Q10  = SKP_SMULWB( out1_Q10, quant_step_size_Q16 );
-            out0_Q10  = SKP_ADD16( out0_Q10, pred_Q10 );
-            out1_Q10  = SKP_ADD16( out1_Q10, pred_Q10 );
+            out0_Q10  = silk_SMULWB( out0_Q10, quant_step_size_Q16 );
+            out1_Q10  = silk_SMULWB( out1_Q10, quant_step_size_Q16 );
+            out0_Q10  = silk_ADD16( out0_Q10, pred_Q10 );
+            out1_Q10  = silk_ADD16( out1_Q10, pred_Q10 );
             prev_out_Q10[ j           ] = out0_Q10;
             prev_out_Q10[ j + nStates ] = out1_Q10;
 
@@ -99,26 +99,26 @@
                     rate0_Q5 = rates_Q5[ ind_tmp + NLSF_QUANT_MAX_AMPLITUDE ];
                     rate1_Q5 = 280;
                 } else {
-                    rate0_Q5 = SKP_SMLABB( 280 - 43 * NLSF_QUANT_MAX_AMPLITUDE, 43, ind_tmp );
-                    rate1_Q5 = SKP_ADD16( rate0_Q5, 43 );
+                    rate0_Q5 = silk_SMLABB( 280 - 43 * NLSF_QUANT_MAX_AMPLITUDE, 43, ind_tmp );
+                    rate1_Q5 = silk_ADD16( rate0_Q5, 43 );
                 }
             } else if( ind_tmp <= -NLSF_QUANT_MAX_AMPLITUDE ) {
                 if( ind_tmp == -NLSF_QUANT_MAX_AMPLITUDE ) {
                     rate0_Q5 = 280;
                     rate1_Q5 = rates_Q5[ ind_tmp + 1 + NLSF_QUANT_MAX_AMPLITUDE ];
                 } else {
-                    rate0_Q5 = SKP_SMLABB( 280 - 43 * NLSF_QUANT_MAX_AMPLITUDE, -43, ind_tmp );
-                    rate1_Q5 = SKP_SUB16( rate0_Q5, 43 );
+                    rate0_Q5 = silk_SMLABB( 280 - 43 * NLSF_QUANT_MAX_AMPLITUDE, -43, ind_tmp );
+                    rate1_Q5 = silk_SUB16( rate0_Q5, 43 );
                 }
             } else {
                 rate0_Q5 = rates_Q5[ ind_tmp +     NLSF_QUANT_MAX_AMPLITUDE ];
                 rate1_Q5 = rates_Q5[ ind_tmp + 1 + NLSF_QUANT_MAX_AMPLITUDE ];
             }
             RD_tmp_Q25            = RD_Q25[ j ];
-            diff_Q10              = SKP_SUB16( in_Q10, out0_Q10 );
-            RD_Q25[ j ]           = SKP_SMLABB( SKP_MLA( RD_tmp_Q25, SKP_SMULBB( diff_Q10, diff_Q10 ), w_Q5[ i ] ), mu_Q20, rate0_Q5 );
-            diff_Q10              = SKP_SUB16( in_Q10, out1_Q10 );
-            RD_Q25[ j + nStates ] = SKP_SMLABB( SKP_MLA( RD_tmp_Q25, SKP_SMULBB( diff_Q10, diff_Q10 ), w_Q5[ i ] ), mu_Q20, rate1_Q5 );
+            diff_Q10              = silk_SUB16( in_Q10, out0_Q10 );
+            RD_Q25[ j ]           = silk_SMLABB( silk_MLA( RD_tmp_Q25, silk_SMULBB( diff_Q10, diff_Q10 ), w_Q5[ i ] ), mu_Q20, rate0_Q5 );
+            diff_Q10              = silk_SUB16( in_Q10, out1_Q10 );
+            RD_Q25[ j + nStates ] = silk_SMLABB( silk_MLA( RD_tmp_Q25, silk_SMULBB( diff_Q10, diff_Q10 ), w_Q5[ i ] ), mu_Q20, rate1_Q5 );
         }
 
         if( nStates < NLSF_QUANT_DEL_DEC_STATES ) {
@@ -126,7 +126,7 @@
             for( j = 0; j < nStates; j++ ) {
                 ind[ j + nStates ][ i ] = ind[ j ][ i ] + 1;
             }
-            nStates = SKP_LSHIFT( nStates, 1 );
+            nStates = silk_LSHIFT( nStates, 1 );
             for( j = nStates; j < NLSF_QUANT_DEL_DEC_STATES; j++ ) {
                 ind[ j ][ i ] = ind[ j - nStates ][ i ];
             }
@@ -152,7 +152,7 @@
             /* compare the highest RD values of the winning half with the lowest one in the losing half, and copy if necessary */
             /* afterwards ind_sort[] will contain the indices of the NLSF_QUANT_DEL_DEC_STATES winning RD values */
             while( 1 ) {
-                min_max_Q25 = SKP_int32_MAX;
+                min_max_Q25 = silk_int32_MAX;
                 max_min_Q25 = 0;
                 ind_min_max = 0;
                 ind_max_min = 0;
@@ -174,17 +174,17 @@
                 RD_Q25[       ind_max_min ] = RD_Q25[       ind_min_max + NLSF_QUANT_DEL_DEC_STATES ];
                 prev_out_Q10[ ind_max_min ] = prev_out_Q10[ ind_min_max + NLSF_QUANT_DEL_DEC_STATES ];
                 RD_min_Q25[   ind_max_min ] = 0;
-                RD_max_Q25[   ind_min_max ] = SKP_int32_MAX;
-                SKP_memcpy( ind[ ind_max_min ], ind[ ind_min_max ], MAX_LPC_ORDER * sizeof( opus_int8 ) );
+                RD_max_Q25[   ind_min_max ] = silk_int32_MAX;
+                silk_memcpy( ind[ ind_max_min ], ind[ ind_min_max ], MAX_LPC_ORDER * sizeof( opus_int8 ) );
             }
             /* increment index if it comes from the upper half */
             for( j = 0; j < NLSF_QUANT_DEL_DEC_STATES; j++ ) {
-                ind[ j ][ i ] += SKP_RSHIFT( ind_sort[ j ], NLSF_QUANT_DEL_DEC_STATES_LOG2 );
+                ind[ j ][ i ] += silk_RSHIFT( ind_sort[ j ], NLSF_QUANT_DEL_DEC_STATES_LOG2 );
             }
         } else {  /* i == 0 */
             /* last sample: find winner, copy indices and return RD value */
             ind_tmp = 0;
-            min_Q25 = SKP_int32_MAX;
+            min_Q25 = silk_int32_MAX;
             for( j = 0; j < 2 * NLSF_QUANT_DEL_DEC_STATES; j++ ) {
                 if( min_Q25 > RD_Q25[ j ] ) {
                     min_Q25 = RD_Q25[ j ];
@@ -193,12 +193,12 @@
             }
             for( j = 0; j < order; j++ ) {
                 indices[ j ] = ind[ ind_tmp & ( NLSF_QUANT_DEL_DEC_STATES - 1 ) ][ j ];
-                SKP_assert( indices[ j ] >= -NLSF_QUANT_MAX_AMPLITUDE_EXT );
-                SKP_assert( indices[ j ] <=  NLSF_QUANT_MAX_AMPLITUDE_EXT );
+                silk_assert( indices[ j ] >= -NLSF_QUANT_MAX_AMPLITUDE_EXT );
+                silk_assert( indices[ j ] <=  NLSF_QUANT_MAX_AMPLITUDE_EXT );
             }
-            indices[ 0 ] += SKP_RSHIFT( ind_tmp, NLSF_QUANT_DEL_DEC_STATES_LOG2 );
-            SKP_assert( indices[ 0 ] <= NLSF_QUANT_MAX_AMPLITUDE_EXT );
-            SKP_assert( min_Q25 >= 0 );
+            indices[ 0 ] += silk_RSHIFT( ind_tmp, NLSF_QUANT_DEL_DEC_STATES_LOG2 );
+            silk_assert( indices[ 0 ] <= NLSF_QUANT_MAX_AMPLITUDE_EXT );
+            silk_assert( min_Q25 >= 0 );
             return min_Q25;
         }
     }
diff --git a/silk/silk_NLSF_encode.c b/silk/silk_NLSF_encode.c
index 6f06745..83a26d1 100644
--- a/silk/silk_NLSF_encode.c
+++ b/silk/silk_NLSF_encode.c
@@ -67,12 +67,12 @@
     DEBUG_STORE_DATA( WNLSF.dat,   pW_Q5,        psNLSF_CB->order * sizeof( opus_int16 ) );
     DEBUG_STORE_DATA( NLSF_mu.dat, &NLSF_mu_Q20,                    sizeof( opus_int   ) );
     DEBUG_STORE_DATA( sigType.dat, &signalType,                     sizeof( opus_int   ) );
-    SKP_memcpy(pNLSF_Q15_orig, pNLSF_Q15, sizeof( pNLSF_Q15_orig ));
+    silk_memcpy(pNLSF_Q15_orig, pNLSF_Q15, sizeof( pNLSF_Q15_orig ));
 #endif
 
-    SKP_assert( nSurvivors <= NLSF_VQ_MAX_SURVIVORS );
-    SKP_assert( signalType >= 0 && signalType <= 2 );
-    SKP_assert( NLSF_mu_Q20 <= 32767 && NLSF_mu_Q20 >= 0 );
+    silk_assert( nSurvivors <= NLSF_VQ_MAX_SURVIVORS );
+    silk_assert( signalType >= 0 && signalType <= 2 );
+    silk_assert( NLSF_mu_Q20 <= 32767 && NLSF_mu_Q20 >= 0 );
 
     /* NLSF stabilization */
     silk_NLSF_stabilize( pNLSF_Q15, psNLSF_CB->deltaMin_Q15, psNLSF_CB->order );
@@ -90,7 +90,7 @@
         /* Residual after first stage */
         pCB_element = &psNLSF_CB->CB1_NLSF_Q8[ ind1 * psNLSF_CB->order ];
         for( i = 0; i < psNLSF_CB->order; i++ ) {
-            NLSF_tmp_Q15[ i ] = SKP_LSHIFT16( ( opus_int16 )pCB_element[ i ], 7 );
+            NLSF_tmp_Q15[ i ] = silk_LSHIFT16( ( opus_int16 )pCB_element[ i ], 7 );
             res_Q15[ i ] = pNLSF_Q15[ i ] - NLSF_tmp_Q15[ i ];
         }
 
@@ -99,13 +99,13 @@
 
         /* Apply square-rooted weights */
         for( i = 0; i < psNLSF_CB->order; i++ ) {
-            W_tmp_Q9 = silk_SQRT_APPROX( SKP_LSHIFT( ( opus_int32 )W_tmp_QW[ i ], 18 - NLSF_W_Q ) );
-            res_Q10[ i ] = ( opus_int16 )SKP_RSHIFT( SKP_SMULBB( res_Q15[ i ], W_tmp_Q9 ), 14 );
+            W_tmp_Q9 = silk_SQRT_APPROX( silk_LSHIFT( ( opus_int32 )W_tmp_QW[ i ], 18 - NLSF_W_Q ) );
+            res_Q10[ i ] = ( opus_int16 )silk_RSHIFT( silk_SMULBB( res_Q15[ i ], W_tmp_Q9 ), 14 );
         }
 
         /* Modify input weights accordingly */
         for( i = 0; i < psNLSF_CB->order; i++ ) {
-            W_adj_Q5[ i ] = SKP_DIV32_16( SKP_LSHIFT( ( opus_int32 )pW_QW[ i ], 5 ), W_tmp_QW[ i ] );
+            W_adj_Q5[ i ] = silk_DIV32_16( silk_LSHIFT( ( opus_int32 )pW_QW[ i ], 5 ), W_tmp_QW[ i ] );
         }
 
         /* Unpack entropy table indices and predictor for current CB1 index */
@@ -123,14 +123,14 @@
             prob_Q8 = iCDF_ptr[ ind1 - 1 ] - iCDF_ptr[ ind1 ];
         }
         bits_q7 = ( 8 << 7 ) - silk_lin2log( prob_Q8 );
-        RD_Q25[ s ] = SKP_SMLABB( RD_Q25[ s ], bits_q7, SKP_RSHIFT( NLSF_mu_Q20, 2 ) );
+        RD_Q25[ s ] = silk_SMLABB( RD_Q25[ s ], bits_q7, silk_RSHIFT( NLSF_mu_Q20, 2 ) );
     }
 
     /* Find the lowest rate-distortion error */
     silk_insertion_sort_increasing( RD_Q25, &bestIndex, nSurvivors, 1 );
 
     NLSFIndices[ 0 ] = ( opus_int8 )tempIndices1[ bestIndex ];
-    SKP_memcpy( &NLSFIndices[ 1 ], &tempIndices2[ bestIndex * MAX_LPC_ORDER ], psNLSF_CB->order * sizeof( opus_int8 ) );
+    silk_memcpy( &NLSFIndices[ 1 ], &tempIndices2[ bestIndex * MAX_LPC_ORDER ], psNLSF_CB->order * sizeof( opus_int8 ) );
 
     /* Decode */
     silk_NLSF_decode( pNLSF_Q15, NLSFIndices, psNLSF_CB );
@@ -144,16 +144,16 @@
 
         pCB_element = &psNLSF_CB->CB1_NLSF_Q8[ ind1 * psNLSF_CB->order ];
         for( i = 0; i < psNLSF_CB->order; i++ ) {
-            NLSF_tmp_Q15[ i ] = SKP_LSHIFT16( ( opus_int16 )pCB_element[ i ], 7 );
+            NLSF_tmp_Q15[ i ] = silk_LSHIFT16( ( opus_int16 )pCB_element[ i ], 7 );
         }
         silk_NLSF_VQ_weights_laroia( W_tmp_QW, NLSF_tmp_Q15, psNLSF_CB->order );
         for( i = 0; i < psNLSF_CB->order; i++ ) {
-            W_tmp_Q9 = silk_SQRT_APPROX( SKP_LSHIFT( ( opus_int32 )W_tmp_QW[ i ], 18 - NLSF_W_Q ) );
+            W_tmp_Q9 = silk_SQRT_APPROX( silk_LSHIFT( ( opus_int32 )W_tmp_QW[ i ], 18 - NLSF_W_Q ) );
             res_Q15[ i ] = pNLSF_Q15_orig[ i ] - NLSF_tmp_Q15[ i ];
-            res_Q10[ i ] = (opus_int16)SKP_RSHIFT( SKP_SMULBB( res_Q15[ i ], W_tmp_Q9 ), 14 );
+            res_Q10[ i ] = (opus_int16)silk_RSHIFT( silk_SMULBB( res_Q15[ i ], W_tmp_Q9 ), 14 );
             DEBUG_STORE_DATA( NLSF_res_q10.dat, &res_Q10[ i ], sizeof( opus_int16 ) );
             res_Q15[ i ] = pNLSF_Q15[ i ] - NLSF_tmp_Q15[ i ];
-            res_Q10[ i ] = (opus_int16)SKP_RSHIFT( SKP_SMULBB( res_Q15[ i ], W_tmp_Q9 ), 14 );
+            res_Q10[ i ] = (opus_int16)silk_RSHIFT( silk_SMULBB( res_Q15[ i ], W_tmp_Q9 ), 14 );
             DEBUG_STORE_DATA( NLSF_resq_q10.dat, &res_Q10[ i ], sizeof( opus_int16 ) );
         }
 
@@ -170,9 +170,9 @@
         }
         Rate_Q7 = ( 8 << 7 ) - silk_lin2log( prob_Q8 );
         for( i = 0; i < psNLSF_CB->order; i++ ) {
-            Rate_Q7 += ((int)psNLSF_CB->ec_Rates_Q5[ ec_ix[ i ] + SKP_LIMIT( NLSFIndices[ i + 1 ] + NLSF_QUANT_MAX_AMPLITUDE, 0, 2 * NLSF_QUANT_MAX_AMPLITUDE ) ] ) << 2;
-            if( SKP_abs( NLSFIndices[ i + 1 ] ) >= NLSF_QUANT_MAX_AMPLITUDE ) {
-                Rate_Q7 += 128 << ( SKP_abs( NLSFIndices[ i + 1 ] ) - NLSF_QUANT_MAX_AMPLITUDE );
+            Rate_Q7 += ((int)psNLSF_CB->ec_Rates_Q5[ ec_ix[ i ] + silk_LIMIT( NLSFIndices[ i + 1 ] + NLSF_QUANT_MAX_AMPLITUDE, 0, 2 * NLSF_QUANT_MAX_AMPLITUDE ) ] ) << 2;
+            if( silk_abs( NLSFIndices[ i + 1 ] ) >= NLSF_QUANT_MAX_AMPLITUDE ) {
+                Rate_Q7 += 128 << ( silk_abs( NLSFIndices[ i + 1 ] ) - NLSF_QUANT_MAX_AMPLITUDE );
             }
         }
         RD_dec_Q22 = Dist_Q22_dec + Rate_Q7 * NLSF_mu_Q20 >> 5;
diff --git a/silk/silk_NLSF_stabilize.c b/silk/silk_NLSF_stabilize.c
index a7e7ea3..fd831f1 100644
--- a/silk/silk_NLSF_stabilize.c
+++ b/silk/silk_NLSF_stabilize.c
@@ -55,7 +55,7 @@
     opus_int32 diff_Q15, min_diff_Q15, min_center_Q15, max_center_Q15;
 
     /* This is necessary to ensure an output within range of a opus_int16 */
-    SKP_assert( NDeltaMin_Q15[L] >= 1 );
+    silk_assert( NDeltaMin_Q15[L] >= 1 );
 
     for( loops = 0; loops < MAX_LOOPS; loops++ ) {
         /**************************/
@@ -100,19 +100,19 @@
             for( k = 0; k < I; k++ ) {
                 min_center_Q15 += NDeltaMin_Q15[k];
             }
-            min_center_Q15 += SKP_RSHIFT( NDeltaMin_Q15[I], 1 );
+            min_center_Q15 += silk_RSHIFT( NDeltaMin_Q15[I], 1 );
 
             /* Find the upper extreme for the location of the current center frequency */
             max_center_Q15 = 1 << 15;
             for( k = L; k > I; k-- ) {
                 max_center_Q15 -= NDeltaMin_Q15[k];
             }
-            max_center_Q15 -= SKP_RSHIFT( NDeltaMin_Q15[I], 1 );
+            max_center_Q15 -= silk_RSHIFT( NDeltaMin_Q15[I], 1 );
 
             /* Move apart, sorted by value, keeping the same center frequency */
-            center_freq_Q15 = (opus_int16)SKP_LIMIT_32( SKP_RSHIFT_ROUND( (opus_int32)NLSF_Q15[I-1] + (opus_int32)NLSF_Q15[I], 1 ),
+            center_freq_Q15 = (opus_int16)silk_LIMIT_32( silk_RSHIFT_ROUND( (opus_int32)NLSF_Q15[I-1] + (opus_int32)NLSF_Q15[I], 1 ),
                 min_center_Q15, max_center_Q15 );
-            NLSF_Q15[I-1] = center_freq_Q15 - SKP_RSHIFT( NDeltaMin_Q15[I], 1 );
+            NLSF_Q15[I-1] = center_freq_Q15 - silk_RSHIFT( NDeltaMin_Q15[I], 1 );
             NLSF_Q15[I] = NLSF_Q15[I-1] + NDeltaMin_Q15[I];
         }
     }
@@ -126,17 +126,17 @@
         silk_insertion_sort_increasing_all_values_int16( &NLSF_Q15[0], L );
 
         /* First NLSF should be no less than NDeltaMin[0] */
-        NLSF_Q15[0] = SKP_max_int( NLSF_Q15[0], NDeltaMin_Q15[0] );
+        NLSF_Q15[0] = silk_max_int( NLSF_Q15[0], NDeltaMin_Q15[0] );
 
         /* Keep delta_min distance between the NLSFs */
         for( i = 1; i < L; i++ )
-            NLSF_Q15[i] = SKP_max_int( NLSF_Q15[i], NLSF_Q15[i-1] + NDeltaMin_Q15[i] );
+            NLSF_Q15[i] = silk_max_int( NLSF_Q15[i], NLSF_Q15[i-1] + NDeltaMin_Q15[i] );
 
         /* Last NLSF should be no higher than 1 - NDeltaMin[L] */
-        NLSF_Q15[L-1] = SKP_min_int( NLSF_Q15[L-1], (1<<15) - NDeltaMin_Q15[L] );
+        NLSF_Q15[L-1] = silk_min_int( NLSF_Q15[L-1], (1<<15) - NDeltaMin_Q15[L] );
 
         /* Keep NDeltaMin distance between the NLSFs */
         for( i = L-2; i >= 0; i-- )
-            NLSF_Q15[i] = SKP_min_int( NLSF_Q15[i], NLSF_Q15[i+1] - NDeltaMin_Q15[i+1] );
+            NLSF_Q15[i] = silk_min_int( NLSF_Q15[i], NLSF_Q15[i+1] - NDeltaMin_Q15[i+1] );
     }
 }
diff --git a/silk/silk_NLSF_unpack.c b/silk/silk_NLSF_unpack.c
index 0021b2c..00845a2 100644
--- a/silk/silk_NLSF_unpack.c
+++ b/silk/silk_NLSF_unpack.c
@@ -46,10 +46,10 @@
     ec_sel_ptr = &psNLSF_CB->ec_sel[ CB1_index * psNLSF_CB->order / 2 ];
     for( i = 0; i < psNLSF_CB->order; i += 2 ) {
         entry = *ec_sel_ptr++;
-        ec_ix  [ i     ] = SKP_SMULBB( SKP_RSHIFT( entry, 1 ) & 7, 2 * NLSF_QUANT_MAX_AMPLITUDE + 1 );
+        ec_ix  [ i     ] = silk_SMULBB( silk_RSHIFT( entry, 1 ) & 7, 2 * NLSF_QUANT_MAX_AMPLITUDE + 1 );
         pred_Q8[ i     ] = psNLSF_CB->pred_Q8[ i + ( entry & 1 ) * ( psNLSF_CB->order - 1 ) ];
-        ec_ix  [ i + 1 ] = SKP_SMULBB( SKP_RSHIFT( entry, 5 ) & 7, 2 * NLSF_QUANT_MAX_AMPLITUDE + 1 );
-        pred_Q8[ i + 1 ] = psNLSF_CB->pred_Q8[ i + ( SKP_RSHIFT( entry, 4 ) & 1 ) * ( psNLSF_CB->order - 1 ) + 1 ];
+        ec_ix  [ i + 1 ] = silk_SMULBB( silk_RSHIFT( entry, 5 ) & 7, 2 * NLSF_QUANT_MAX_AMPLITUDE + 1 );
+        pred_Q8[ i + 1 ] = psNLSF_CB->pred_Q8[ i + ( silk_RSHIFT( entry, 4 ) & 1 ) * ( psNLSF_CB->order - 1 ) + 1 ];
     }
 }
 
diff --git a/silk/silk_NSQ.c b/silk/silk_NSQ.c
index 9612a0a..e32fd61 100644
--- a/silk/silk_NSQ.c
+++ b/silk/silk_NSQ.c
@@ -98,7 +98,7 @@
     /* Set unvoiced lag to the previous one, overwrite later for voiced */
     lag = NSQ->lagPrev;
 
-    SKP_assert( NSQ->prev_inv_gain_Q16 != 0 );
+    silk_assert( NSQ->prev_inv_gain_Q16 != 0 );
 
     offset_Q10 = silk_Quantization_Offsets_Q10[ psIndices->signalType >> 1 ][ psIndices->quantOffsetType ];
 
@@ -118,9 +118,9 @@
         AR_shp_Q13 = &AR2_Q13[     k * MAX_SHAPE_LPC_ORDER ];
 
         /* Noise shape parameters */
-        SKP_assert( HarmShapeGain_Q14[ k ] >= 0 );
-        HarmShapeFIRPacked_Q14  =                          SKP_RSHIFT( HarmShapeGain_Q14[ k ], 2 );
-        HarmShapeFIRPacked_Q14 |= SKP_LSHIFT( ( opus_int32 )SKP_RSHIFT( HarmShapeGain_Q14[ k ], 1 ), 16 );
+        silk_assert( HarmShapeGain_Q14[ k ] >= 0 );
+        HarmShapeFIRPacked_Q14  =                          silk_RSHIFT( HarmShapeGain_Q14[ k ], 2 );
+        HarmShapeFIRPacked_Q14 |= silk_LSHIFT( ( opus_int32 )silk_RSHIFT( HarmShapeGain_Q14[ k ], 1 ), 16 );
 
         NSQ->rewhite_flag = 0;
         if( psIndices->signalType == TYPE_VOICED ) {
@@ -128,10 +128,10 @@
             lag = pitchL[ k ];
 
             /* Re-whitening */
-            if( ( k & ( 3 - SKP_LSHIFT( LSF_interpolation_flag, 1 ) ) ) == 0 ) {
+            if( ( k & ( 3 - silk_LSHIFT( LSF_interpolation_flag, 1 ) ) ) == 0 ) {
                 /* Rewhiten with new A coefs */
                 start_idx = psEncC->ltp_mem_length - lag - psEncC->predictLPCOrder - LTP_ORDER / 2;
-                SKP_assert( start_idx > 0 );
+                silk_assert( start_idx > 0 );
 
                 silk_LPC_analysis_filter( &sLTP[ start_idx ], &NSQ->xq[ start_idx + k * psEncC->subfr_length ],
                     A_Q12, psEncC->ltp_mem_length - start_idx, psEncC->predictLPCOrder );
@@ -156,8 +156,8 @@
     NSQ->lagPrev = pitchL[ psEncC->nb_subfr - 1 ];
 
     /* Save quantized speech and noise shaping signals */
-    SKP_memmove( NSQ->xq,           &NSQ->xq[           psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int16 ) );
-    SKP_memmove( NSQ->sLTP_shp_Q10, &NSQ->sLTP_shp_Q10[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int32 ) );
+    silk_memmove( NSQ->xq,           &NSQ->xq[           psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int16 ) );
+    silk_memmove( NSQ->sLTP_shp_Q10, &NSQ->sLTP_shp_Q10[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int32 ) );
 
 #ifdef SAVE_ALL_INTERNAL_DATA
     DEBUG_STORE_DATA( xq.dat,       &pxq[ -psEncC->frame_length ],       psEncC->frame_length * sizeof( opus_int16 ) );
@@ -206,158 +206,158 @@
 
     for( i = 0; i < length; i++ ) {
         /* Generate dither */
-        NSQ->rand_seed = SKP_RAND( NSQ->rand_seed );
+        NSQ->rand_seed = silk_RAND( NSQ->rand_seed );
 
         /* dither = rand_seed < 0 ? 0xFFFFFFFF : 0; */
-        dither = SKP_RSHIFT( NSQ->rand_seed, 31 );
+        dither = silk_RSHIFT( NSQ->rand_seed, 31 );
 
         /* Short-term prediction */
-        SKP_assert( ( predictLPCOrder  & 1 ) == 0 );    /* check that order is even */
-        SKP_assert( ( (opus_int64)a_Q12 & 3 ) == 0 );    /* check that array starts at 4-byte aligned address */
-        SKP_assert( predictLPCOrder >= 10 );            /* check that unrolling works */
+        silk_assert( ( predictLPCOrder  & 1 ) == 0 );    /* check that order is even */
+        silk_assert( ( (opus_int64)a_Q12 & 3 ) == 0 );    /* check that array starts at 4-byte aligned address */
+        silk_assert( predictLPCOrder >= 10 );            /* check that unrolling works */
 
         /* Partially unrolled */
-        LPC_pred_Q10 = SKP_SMULWB(               psLPC_Q14[  0 ], a_Q12[ 0 ] );
-        LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -1 ], a_Q12[ 1 ] );
-        LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -2 ], a_Q12[ 2 ] );
-        LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -3 ], a_Q12[ 3 ] );
-        LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -4 ], a_Q12[ 4 ] );
-        LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -5 ], a_Q12[ 5 ] );
-        LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -6 ], a_Q12[ 6 ] );
-        LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -7 ], a_Q12[ 7 ] );
-        LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -8 ], a_Q12[ 8 ] );
-        LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -9 ], a_Q12[ 9 ] );
+        LPC_pred_Q10 = silk_SMULWB(               psLPC_Q14[  0 ], a_Q12[ 0 ] );
+        LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -1 ], a_Q12[ 1 ] );
+        LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -2 ], a_Q12[ 2 ] );
+        LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -3 ], a_Q12[ 3 ] );
+        LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -4 ], a_Q12[ 4 ] );
+        LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -5 ], a_Q12[ 5 ] );
+        LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -6 ], a_Q12[ 6 ] );
+        LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -7 ], a_Q12[ 7 ] );
+        LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -8 ], a_Q12[ 8 ] );
+        LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -9 ], a_Q12[ 9 ] );
         for( j = 10; j < predictLPCOrder; j ++ ) {
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -j ], a_Q12[ j ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -j ], a_Q12[ j ] );
         }
 
         /* Long-term prediction */
         if( signalType == TYPE_VOICED ) {
             /* Unrolled loop */
-            LTP_pred_Q14 = SKP_SMULWB(               pred_lag_ptr[  0 ], b_Q14[ 0 ] );
-            LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -1 ], b_Q14[ 1 ] );
-            LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -2 ], b_Q14[ 2 ] );
-            LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -3 ], b_Q14[ 3 ] );
-            LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -4 ], b_Q14[ 4 ] );
+            LTP_pred_Q14 = silk_SMULWB(               pred_lag_ptr[  0 ], b_Q14[ 0 ] );
+            LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -1 ], b_Q14[ 1 ] );
+            LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -2 ], b_Q14[ 2 ] );
+            LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -3 ], b_Q14[ 3 ] );
+            LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -4 ], b_Q14[ 4 ] );
             pred_lag_ptr++;
         } else {
             LTP_pred_Q14 = 0;
         }
 
         /* Noise shape feedback */
-        SKP_assert( ( shapingLPCOrder & 1 ) == 0 );   /* check that order is even */
+        silk_assert( ( shapingLPCOrder & 1 ) == 0 );   /* check that order is even */
         tmp2 = psLPC_Q14[ 0 ];
         tmp1 = NSQ->sAR2_Q14[ 0 ];
         NSQ->sAR2_Q14[ 0 ] = tmp2;
-        n_AR_Q10 = SKP_SMULWB( tmp2, AR_shp_Q13[ 0 ] );
+        n_AR_Q10 = silk_SMULWB( tmp2, AR_shp_Q13[ 0 ] );
         for( j = 2; j < shapingLPCOrder; j += 2 ) {
             tmp2 = NSQ->sAR2_Q14[ j - 1 ];
             NSQ->sAR2_Q14[ j - 1 ] = tmp1;
-            n_AR_Q10 = SKP_SMLAWB( n_AR_Q10, tmp1, AR_shp_Q13[ j - 1 ] );
+            n_AR_Q10 = silk_SMLAWB( n_AR_Q10, tmp1, AR_shp_Q13[ j - 1 ] );
             tmp1 = NSQ->sAR2_Q14[ j + 0 ];
             NSQ->sAR2_Q14[ j + 0 ] = tmp2;
-            n_AR_Q10 = SKP_SMLAWB( n_AR_Q10, tmp2, AR_shp_Q13[ j ] );
+            n_AR_Q10 = silk_SMLAWB( n_AR_Q10, tmp2, AR_shp_Q13[ j ] );
         }
         NSQ->sAR2_Q14[ shapingLPCOrder - 1 ] = tmp1;
-        n_AR_Q10 = SKP_SMLAWB( n_AR_Q10, tmp1, AR_shp_Q13[ shapingLPCOrder - 1 ] );
+        n_AR_Q10 = silk_SMLAWB( n_AR_Q10, tmp1, AR_shp_Q13[ shapingLPCOrder - 1 ] );
 
-        n_AR_Q10 = SKP_RSHIFT( n_AR_Q10, 1 );   /* Q11 -> Q10 */
-        n_AR_Q10 = SKP_SMLAWB( n_AR_Q10, NSQ->sLF_AR_shp_Q12, Tilt_Q14 );
+        n_AR_Q10 = silk_RSHIFT( n_AR_Q10, 1 );   /* Q11 -> Q10 */
+        n_AR_Q10 = silk_SMLAWB( n_AR_Q10, NSQ->sLF_AR_shp_Q12, Tilt_Q14 );
 
-        n_LF_Q10 = SKP_LSHIFT( SKP_SMULWB( NSQ->sLTP_shp_Q10[ NSQ->sLTP_shp_buf_idx - 1 ], LF_shp_Q14 ), 2 );
-        n_LF_Q10 = SKP_SMLAWT( n_LF_Q10, NSQ->sLF_AR_shp_Q12, LF_shp_Q14 );
+        n_LF_Q10 = silk_LSHIFT( silk_SMULWB( NSQ->sLTP_shp_Q10[ NSQ->sLTP_shp_buf_idx - 1 ], LF_shp_Q14 ), 2 );
+        n_LF_Q10 = silk_SMLAWT( n_LF_Q10, NSQ->sLF_AR_shp_Q12, LF_shp_Q14 );
 
-        SKP_assert( lag > 0 || signalType != TYPE_VOICED );
+        silk_assert( lag > 0 || signalType != TYPE_VOICED );
 
         /* Long-term shaping */
         if( lag > 0 ) {
             /* Symmetric, packed FIR coefficients */
-            n_LTP_Q14 = SKP_SMULWB( SKP_ADD32( shp_lag_ptr[ 0 ], shp_lag_ptr[ -2 ] ), HarmShapeFIRPacked_Q14 );
-            n_LTP_Q14 = SKP_SMLAWT( n_LTP_Q14, shp_lag_ptr[ -1 ],                     HarmShapeFIRPacked_Q14 );
-            n_LTP_Q14 = SKP_LSHIFT( n_LTP_Q14, 6 );
+            n_LTP_Q14 = silk_SMULWB( silk_ADD32( shp_lag_ptr[ 0 ], shp_lag_ptr[ -2 ] ), HarmShapeFIRPacked_Q14 );
+            n_LTP_Q14 = silk_SMLAWT( n_LTP_Q14, shp_lag_ptr[ -1 ],                     HarmShapeFIRPacked_Q14 );
+            n_LTP_Q14 = silk_LSHIFT( n_LTP_Q14, 6 );
             shp_lag_ptr++;
 
-            tmp1 = SKP_SUB32( LTP_pred_Q14, n_LTP_Q14 );                        /* Add Q14 stuff */
-            tmp1 = SKP_RSHIFT( tmp1, 4 );                                       /* convert to Q10  */
-            tmp1 = SKP_ADD32( tmp1, LPC_pred_Q10 );                             /* add Q10 stuff */
-            tmp1 = SKP_SUB32( tmp1, n_AR_Q10 );                                 /* subtract Q10 stuff */
+            tmp1 = silk_SUB32( LTP_pred_Q14, n_LTP_Q14 );                        /* Add Q14 stuff */
+            tmp1 = silk_RSHIFT( tmp1, 4 );                                       /* convert to Q10  */
+            tmp1 = silk_ADD32( tmp1, LPC_pred_Q10 );                             /* add Q10 stuff */
+            tmp1 = silk_SUB32( tmp1, n_AR_Q10 );                                 /* subtract Q10 stuff */
         } else {
-            tmp1 = SKP_SUB32( LPC_pred_Q10, n_AR_Q10 );                         /* subtract Q10 stuff */
+            tmp1 = silk_SUB32( LPC_pred_Q10, n_AR_Q10 );                         /* subtract Q10 stuff */
         }
 
         /* Input minus prediction plus noise feedback  */
         /*r = x[ i ] - LTP_pred - LPC_pred + n_AR + n_Tilt + n_LF + n_LTP;*/
-        tmp1  = SKP_SUB32( tmp1, n_LF_Q10 );                                    /* subtract Q10 stuff */
-        r_Q10 = SKP_SUB32( x_sc_Q10[ i ], tmp1 );
+        tmp1  = silk_SUB32( tmp1, n_LF_Q10 );                                    /* subtract Q10 stuff */
+        r_Q10 = silk_SUB32( x_sc_Q10[ i ], tmp1 );
 
         /* Flip sign depending on dither */
         r_Q10 = r_Q10 ^ dither;
-        r_Q10 = SKP_LIMIT_32( r_Q10, -31 << 10, 30 << 10 );
+        r_Q10 = silk_LIMIT_32( r_Q10, -31 << 10, 30 << 10 );
 
         /* Find two quantization level candidates and measure their rate-distortion */
-        q1_Q10 = SKP_SUB32( r_Q10, offset_Q10 );
-        q1_Q10 = SKP_RSHIFT( q1_Q10, 10 );
+        q1_Q10 = silk_SUB32( r_Q10, offset_Q10 );
+        q1_Q10 = silk_RSHIFT( q1_Q10, 10 );
         if( q1_Q10 > 0 ) {
-            q1_Q10  = SKP_SUB32( SKP_LSHIFT( q1_Q10, 10 ), QUANT_LEVEL_ADJUST_Q10 );
-            q1_Q10  = SKP_ADD32( q1_Q10, offset_Q10 );
-            q2_Q10  = SKP_ADD32( q1_Q10, 1024 );
-            rd1_Q10 = SKP_SMULBB( q1_Q10, Lambda_Q10 );
-            rd2_Q10 = SKP_SMULBB( q2_Q10, Lambda_Q10 );
+            q1_Q10  = silk_SUB32( silk_LSHIFT( q1_Q10, 10 ), QUANT_LEVEL_ADJUST_Q10 );
+            q1_Q10  = silk_ADD32( q1_Q10, offset_Q10 );
+            q2_Q10  = silk_ADD32( q1_Q10, 1024 );
+            rd1_Q10 = silk_SMULBB( q1_Q10, Lambda_Q10 );
+            rd2_Q10 = silk_SMULBB( q2_Q10, Lambda_Q10 );
         } else if( q1_Q10 == 0 ) {
             q1_Q10  = offset_Q10;
-            q2_Q10  = SKP_ADD32( q1_Q10, 1024 - QUANT_LEVEL_ADJUST_Q10 );
-            rd1_Q10 = SKP_SMULBB( q1_Q10, Lambda_Q10 );
-            rd2_Q10 = SKP_SMULBB( q2_Q10, Lambda_Q10 );
+            q2_Q10  = silk_ADD32( q1_Q10, 1024 - QUANT_LEVEL_ADJUST_Q10 );
+            rd1_Q10 = silk_SMULBB( q1_Q10, Lambda_Q10 );
+            rd2_Q10 = silk_SMULBB( q2_Q10, Lambda_Q10 );
         } else if( q1_Q10 == -1 ) {
             q2_Q10  = offset_Q10;
-            q1_Q10  = SKP_SUB32( q2_Q10, 1024 - QUANT_LEVEL_ADJUST_Q10 );
-            rd1_Q10 = SKP_SMULBB( -q1_Q10, Lambda_Q10 );
-            rd2_Q10 = SKP_SMULBB(  q2_Q10, Lambda_Q10 );
+            q1_Q10  = silk_SUB32( q2_Q10, 1024 - QUANT_LEVEL_ADJUST_Q10 );
+            rd1_Q10 = silk_SMULBB( -q1_Q10, Lambda_Q10 );
+            rd2_Q10 = silk_SMULBB(  q2_Q10, Lambda_Q10 );
         } else {            /* Q1_Q10 < -1 */
-            q1_Q10  = SKP_ADD32( SKP_LSHIFT( q1_Q10, 10 ), QUANT_LEVEL_ADJUST_Q10 );
-            q1_Q10  = SKP_ADD32( q1_Q10, offset_Q10 );
-            q2_Q10  = SKP_ADD32( q1_Q10, 1024 );
-            rd1_Q10 = SKP_SMULBB( -q1_Q10, Lambda_Q10 );
-            rd2_Q10 = SKP_SMULBB( -q2_Q10, Lambda_Q10 );
+            q1_Q10  = silk_ADD32( silk_LSHIFT( q1_Q10, 10 ), QUANT_LEVEL_ADJUST_Q10 );
+            q1_Q10  = silk_ADD32( q1_Q10, offset_Q10 );
+            q2_Q10  = silk_ADD32( q1_Q10, 1024 );
+            rd1_Q10 = silk_SMULBB( -q1_Q10, Lambda_Q10 );
+            rd2_Q10 = silk_SMULBB( -q2_Q10, Lambda_Q10 );
         }
-        rr_Q10  = SKP_SUB32( r_Q10, q1_Q10 );
-        rd1_Q10 = SKP_RSHIFT( SKP_SMLABB( rd1_Q10, rr_Q10, rr_Q10 ), 10 );
-        rr_Q10  = SKP_SUB32( r_Q10, q2_Q10 );
-        rd2_Q10 = SKP_RSHIFT( SKP_SMLABB( rd2_Q10, rr_Q10, rr_Q10 ), 10 );
+        rr_Q10  = silk_SUB32( r_Q10, q1_Q10 );
+        rd1_Q10 = silk_RSHIFT( silk_SMLABB( rd1_Q10, rr_Q10, rr_Q10 ), 10 );
+        rr_Q10  = silk_SUB32( r_Q10, q2_Q10 );
+        rd2_Q10 = silk_RSHIFT( silk_SMLABB( rd2_Q10, rr_Q10, rr_Q10 ), 10 );
 
         if( rd2_Q10 < rd1_Q10 ) {
             q1_Q10 = q2_Q10;
         }
 
-        pulses[ i ] = ( opus_int8 )SKP_RSHIFT_ROUND( q1_Q10, 10 );
+        pulses[ i ] = ( opus_int8 )silk_RSHIFT_ROUND( q1_Q10, 10 );
 
         /* Excitation */
         exc_Q10 = q1_Q10 ^ dither;
 
         /* Add predictions */
-        LPC_exc_Q10 = SKP_ADD32( exc_Q10, SKP_RSHIFT_ROUND( LTP_pred_Q14, 4 ) );
-        xq_Q10      = SKP_ADD32( LPC_exc_Q10, LPC_pred_Q10 );
+        LPC_exc_Q10 = silk_ADD32( exc_Q10, silk_RSHIFT_ROUND( LTP_pred_Q14, 4 ) );
+        xq_Q10      = silk_ADD32( LPC_exc_Q10, LPC_pred_Q10 );
 
         /* Scale XQ back to normal level before saving */
-        xq[ i ] = ( opus_int16 )SKP_SAT16( SKP_RSHIFT_ROUND( SKP_SMULWW( xq_Q10, Gain_Q16 ), 10 ) );
+        xq[ i ] = ( opus_int16 )silk_SAT16( silk_RSHIFT_ROUND( silk_SMULWW( xq_Q10, Gain_Q16 ), 10 ) );
 
         /* Update states */
         psLPC_Q14++;
-        *psLPC_Q14 = SKP_LSHIFT( xq_Q10, 4 );
-        sLF_AR_shp_Q10 = SKP_SUB32( xq_Q10, n_AR_Q10 );
-        NSQ->sLF_AR_shp_Q12 = SKP_LSHIFT( sLF_AR_shp_Q10, 2 );
+        *psLPC_Q14 = silk_LSHIFT( xq_Q10, 4 );
+        sLF_AR_shp_Q10 = silk_SUB32( xq_Q10, n_AR_Q10 );
+        NSQ->sLF_AR_shp_Q12 = silk_LSHIFT( sLF_AR_shp_Q10, 2 );
 
-        NSQ->sLTP_shp_Q10[ NSQ->sLTP_shp_buf_idx ] = SKP_SUB32( sLF_AR_shp_Q10, n_LF_Q10 );
-        sLTP_Q16[ NSQ->sLTP_buf_idx ] = SKP_LSHIFT( LPC_exc_Q10, 6 );
+        NSQ->sLTP_shp_Q10[ NSQ->sLTP_shp_buf_idx ] = silk_SUB32( sLF_AR_shp_Q10, n_LF_Q10 );
+        sLTP_Q16[ NSQ->sLTP_buf_idx ] = silk_LSHIFT( LPC_exc_Q10, 6 );
         NSQ->sLTP_shp_buf_idx++;
         NSQ->sLTP_buf_idx++;
 
         /* Make dither dependent on quantized signal */
-        NSQ->rand_seed = SKP_ADD32_ovflw(NSQ->rand_seed, pulses[ i ]);
+        NSQ->rand_seed = silk_ADD32_ovflw(NSQ->rand_seed, pulses[ i ]);
     }
 
     /* Update LPC synth buffer */
-    SKP_memcpy( NSQ->sLPC_Q14, &NSQ->sLPC_Q14[ length ], NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) );
+    silk_memcpy( NSQ->sLPC_Q14, &NSQ->sLPC_Q14[ length ], NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) );
 }
 
 static inline void silk_nsq_scale_states(
@@ -376,20 +376,20 @@
     opus_int   i, lag;
     opus_int32 inv_gain_Q16, gain_adj_Q16, inv_gain_Q32;
 
-    inv_gain_Q16 = silk_INVERSE32_varQ( SKP_max( Gains_Q16[ subfr ], 1 ), 32 );
-    inv_gain_Q16 = SKP_min( inv_gain_Q16, SKP_int16_MAX );
+    inv_gain_Q16 = silk_INVERSE32_varQ( silk_max( Gains_Q16[ subfr ], 1 ), 32 );
+    inv_gain_Q16 = silk_min( inv_gain_Q16, silk_int16_MAX );
     lag          = pitchL[ subfr ];
 
     /* After rewhitening the LTP state is un-scaled, so scale with inv_gain_Q16 */
     if( NSQ->rewhite_flag ) {
-        inv_gain_Q32 = SKP_LSHIFT( inv_gain_Q16, 16 );
+        inv_gain_Q32 = silk_LSHIFT( inv_gain_Q16, 16 );
         if( subfr == 0 ) {
             /* Do LTP downscaling */
-            inv_gain_Q32 = SKP_LSHIFT( SKP_SMULWB( inv_gain_Q32, LTP_scale_Q14 ), 2 );
+            inv_gain_Q32 = silk_LSHIFT( silk_SMULWB( inv_gain_Q32, LTP_scale_Q14 ), 2 );
         }
         for( i = NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2; i < NSQ->sLTP_buf_idx; i++ ) {
-            SKP_assert( i < MAX_FRAME_LENGTH );
-            sLTP_Q16[ i ] = SKP_SMULWB( inv_gain_Q32, sLTP[ i ] );
+            silk_assert( i < MAX_FRAME_LENGTH );
+            sLTP_Q16[ i ] = silk_SMULWB( inv_gain_Q32, sLTP[ i ] );
         }
     }
 
@@ -399,33 +399,33 @@
 
         /* Scale long-term shaping state */
         for( i = NSQ->sLTP_shp_buf_idx - psEncC->ltp_mem_length; i < NSQ->sLTP_shp_buf_idx; i++ ) {
-            NSQ->sLTP_shp_Q10[ i ] = SKP_SMULWW( gain_adj_Q16, NSQ->sLTP_shp_Q10[ i ] );
+            NSQ->sLTP_shp_Q10[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sLTP_shp_Q10[ i ] );
         }
 
         /* Scale long-term prediction state */
         if( NSQ->rewhite_flag == 0 ) {
             for( i = NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2; i < NSQ->sLTP_buf_idx; i++ ) {
-                sLTP_Q16[ i ] = SKP_SMULWW( gain_adj_Q16, sLTP_Q16[ i ] );
+                sLTP_Q16[ i ] = silk_SMULWW( gain_adj_Q16, sLTP_Q16[ i ] );
             }
         }
 
-        NSQ->sLF_AR_shp_Q12 = SKP_SMULWW( gain_adj_Q16, NSQ->sLF_AR_shp_Q12 );
+        NSQ->sLF_AR_shp_Q12 = silk_SMULWW( gain_adj_Q16, NSQ->sLF_AR_shp_Q12 );
 
         /* Scale short-term prediction and shaping states */
         for( i = 0; i < NSQ_LPC_BUF_LENGTH; i++ ) {
-            NSQ->sLPC_Q14[ i ] = SKP_SMULWW( gain_adj_Q16, NSQ->sLPC_Q14[ i ] );
+            NSQ->sLPC_Q14[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sLPC_Q14[ i ] );
         }
         for( i = 0; i < MAX_SHAPE_LPC_ORDER; i++ ) {
-            NSQ->sAR2_Q14[ i ] = SKP_SMULWW( gain_adj_Q16, NSQ->sAR2_Q14[ i ] );
+            NSQ->sAR2_Q14[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sAR2_Q14[ i ] );
         }
     }
 
     /* Scale input */
     for( i = 0; i < psEncC->subfr_length; i++ ) {
-        x_sc_Q10[ i ] = SKP_RSHIFT( SKP_SMULBB( x[ i ], ( opus_int16 )inv_gain_Q16 ), 6 );
+        x_sc_Q10[ i ] = silk_RSHIFT( silk_SMULBB( x[ i ], ( opus_int16 )inv_gain_Q16 ), 6 );
     }
 
     /* save inv_gain */
-    SKP_assert( inv_gain_Q16 != 0 );
+    silk_assert( inv_gain_Q16 != 0 );
     NSQ->prev_inv_gain_Q16 = inv_gain_Q16;
 }
diff --git a/silk/silk_NSQ_del_dec.c b/silk/silk_NSQ_del_dec.c
index d150c78..e61fd79 100644
--- a/silk/silk_NSQ_del_dec.c
+++ b/silk/silk_NSQ_del_dec.c
@@ -137,10 +137,10 @@
     /* Set unvoiced lag to the previous one, overwrite later for voiced */
     lag = NSQ->lagPrev;
 
-    SKP_assert( NSQ->prev_inv_gain_Q16 != 0 );
+    silk_assert( NSQ->prev_inv_gain_Q16 != 0 );
 
     /* Initialize delayed decision states */
-    SKP_memset( psDelDec, 0, psEncC->nStatesDelayedDecision * sizeof( NSQ_del_dec_struct ) );
+    silk_memset( psDelDec, 0, psEncC->nStatesDelayedDecision * sizeof( NSQ_del_dec_struct ) );
     for( k = 0; k < psEncC->nStatesDelayedDecision; k++ ) {
         psDD                 = &psDelDec[ k ];
         psDD->Seed           = ( k + psIndices->Seed ) & 3;
@@ -148,23 +148,23 @@
         psDD->RD_Q10         = 0;
         psDD->LF_AR_Q12      = NSQ->sLF_AR_shp_Q12;
         psDD->Shape_Q10[ 0 ] = NSQ->sLTP_shp_Q10[ psEncC->ltp_mem_length - 1 ];
-        SKP_memcpy( psDD->sLPC_Q14, NSQ->sLPC_Q14, NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) );
-        SKP_memcpy( psDD->sAR2_Q14, NSQ->sAR2_Q14, sizeof( NSQ->sAR2_Q14 ) );
+        silk_memcpy( psDD->sLPC_Q14, NSQ->sLPC_Q14, NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) );
+        silk_memcpy( psDD->sAR2_Q14, NSQ->sAR2_Q14, sizeof( NSQ->sAR2_Q14 ) );
     }
 
     offset_Q10   = silk_Quantization_Offsets_Q10[ psIndices->signalType >> 1 ][ psIndices->quantOffsetType ];
     smpl_buf_idx = 0; /* index of oldest samples */
 
-    decisionDelay = SKP_min_int( DECISION_DELAY, psEncC->subfr_length );
+    decisionDelay = silk_min_int( DECISION_DELAY, psEncC->subfr_length );
 
     /* For voiced frames limit the decision delay to lower than the pitch lag */
     if( psIndices->signalType == TYPE_VOICED ) {
         for( k = 0; k < psEncC->nb_subfr; k++ ) {
-            decisionDelay = SKP_min_int( decisionDelay, pitchL[ k ] - LTP_ORDER / 2 - 1 );
+            decisionDelay = silk_min_int( decisionDelay, pitchL[ k ] - LTP_ORDER / 2 - 1 );
         }
     } else {
         if( lag > 0 ) {
-            decisionDelay = SKP_min_int( decisionDelay, lag - LTP_ORDER / 2 - 1 );
+            decisionDelay = silk_min_int( decisionDelay, lag - LTP_ORDER / 2 - 1 );
         }
     }
 
@@ -185,9 +185,9 @@
         AR_shp_Q13 = &AR2_Q13[     k * MAX_SHAPE_LPC_ORDER ];
 
         /* Noise shape parameters */
-        SKP_assert( HarmShapeGain_Q14[ k ] >= 0 );
-        HarmShapeFIRPacked_Q14  =                          SKP_RSHIFT( HarmShapeGain_Q14[ k ], 2 );
-        HarmShapeFIRPacked_Q14 |= SKP_LSHIFT( ( opus_int32 )SKP_RSHIFT( HarmShapeGain_Q14[ k ], 1 ), 16 );
+        silk_assert( HarmShapeGain_Q14[ k ] >= 0 );
+        HarmShapeFIRPacked_Q14  =                          silk_RSHIFT( HarmShapeGain_Q14[ k ], 2 );
+        HarmShapeFIRPacked_Q14 |= silk_LSHIFT( ( opus_int32 )silk_RSHIFT( HarmShapeGain_Q14[ k ], 1 ), 16 );
 
         NSQ->rewhite_flag = 0;
         if( psIndices->signalType == TYPE_VOICED ) {
@@ -195,7 +195,7 @@
             lag = pitchL[ k ];
 
             /* Re-whitening */
-            if( ( k & ( 3 - SKP_LSHIFT( LSF_interpolation_flag, 1 ) ) ) == 0 ) {
+            if( ( k & ( 3 - silk_LSHIFT( LSF_interpolation_flag, 1 ) ) ) == 0 ) {
                 if( k == 2 ) {
                     /* RESET DELAYED DECISIONS */
                     /* Find winner */
@@ -209,8 +209,8 @@
                     }
                     for( i = 0; i < psEncC->nStatesDelayedDecision; i++ ) {
                         if( i != Winner_ind ) {
-                            psDelDec[ i ].RD_Q10 += ( SKP_int32_MAX >> 4 );
-                            SKP_assert( psDelDec[ i ].RD_Q10 >= 0 );
+                            psDelDec[ i ].RD_Q10 += ( silk_int32_MAX >> 4 );
+                            silk_assert( psDelDec[ i ].RD_Q10 >= 0 );
                         }
                     }
 
@@ -219,9 +219,9 @@
                     last_smple_idx = smpl_buf_idx + decisionDelay;
                     for( i = 0; i < decisionDelay; i++ ) {
                         last_smple_idx = ( last_smple_idx - 1 ) & DECISION_DELAY_MASK;
-                        pulses[   i - decisionDelay ] = ( opus_int8 )SKP_RSHIFT_ROUND( psDD->Q_Q10[ last_smple_idx ], 10 );
-                        pxq[ i - decisionDelay ] = ( opus_int16 )SKP_SAT16( SKP_RSHIFT_ROUND(
-                            SKP_SMULWW( psDD->Xq_Q10[ last_smple_idx ], Gains_Q16[ 1 ] ), 10 ) );
+                        pulses[   i - decisionDelay ] = ( opus_int8 )silk_RSHIFT_ROUND( psDD->Q_Q10[ last_smple_idx ], 10 );
+                        pxq[ i - decisionDelay ] = ( opus_int16 )silk_SAT16( silk_RSHIFT_ROUND(
+                            silk_SMULWW( psDD->Xq_Q10[ last_smple_idx ], Gains_Q16[ 1 ] ), 10 ) );
                         NSQ->sLTP_shp_Q10[ NSQ->sLTP_shp_buf_idx - decisionDelay + i ] = psDD->Shape_Q10[ last_smple_idx ];
                     }
 
@@ -230,7 +230,7 @@
 
                 /* Rewhiten with new A coefs */
                 start_idx = psEncC->ltp_mem_length - lag - psEncC->predictLPCOrder - LTP_ORDER / 2;
-                SKP_assert( start_idx > 0 );
+                silk_assert( start_idx > 0 );
 
                 silk_LPC_analysis_filter( &sLTP[ start_idx ], &NSQ->xq[ start_idx + k * psEncC->subfr_length ],
                     A_Q12, psEncC->ltp_mem_length - start_idx, psEncC->predictLPCOrder );
@@ -269,22 +269,22 @@
     last_smple_idx = smpl_buf_idx + decisionDelay;
     for( i = 0; i < decisionDelay; i++ ) {
         last_smple_idx = ( last_smple_idx - 1 ) & DECISION_DELAY_MASK;
-        pulses[   i - decisionDelay ] = ( opus_int8 )SKP_RSHIFT_ROUND( psDD->Q_Q10[ last_smple_idx ], 10 );
-        pxq[ i - decisionDelay ] = ( opus_int16 )SKP_SAT16( SKP_RSHIFT_ROUND(
-            SKP_SMULWW( psDD->Xq_Q10[ last_smple_idx ], Gains_Q16[ psEncC->nb_subfr - 1 ] ), 10 ) );
+        pulses[   i - decisionDelay ] = ( opus_int8 )silk_RSHIFT_ROUND( psDD->Q_Q10[ last_smple_idx ], 10 );
+        pxq[ i - decisionDelay ] = ( opus_int16 )silk_SAT16( silk_RSHIFT_ROUND(
+            silk_SMULWW( psDD->Xq_Q10[ last_smple_idx ], Gains_Q16[ psEncC->nb_subfr - 1 ] ), 10 ) );
         NSQ->sLTP_shp_Q10[ NSQ->sLTP_shp_buf_idx - decisionDelay + i ] = psDD->Shape_Q10[ last_smple_idx ];
         sLTP_Q16[          NSQ->sLTP_buf_idx     - decisionDelay + i ] = psDD->Pred_Q16[  last_smple_idx ];
     }
-    SKP_memcpy( NSQ->sLPC_Q14, &psDD->sLPC_Q14[ psEncC->subfr_length ], NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) );
-    SKP_memcpy( NSQ->sAR2_Q14, psDD->sAR2_Q14, sizeof( psDD->sAR2_Q14 ) );
+    silk_memcpy( NSQ->sLPC_Q14, &psDD->sLPC_Q14[ psEncC->subfr_length ], NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) );
+    silk_memcpy( NSQ->sAR2_Q14, psDD->sAR2_Q14, sizeof( psDD->sAR2_Q14 ) );
 
     /* Update states */
     NSQ->sLF_AR_shp_Q12 = psDD->LF_AR_Q12;
     NSQ->lagPrev        = pitchL[ psEncC->nb_subfr - 1 ];
 
     /* Save quantized speech and noise shaping signals */
-    SKP_memmove( NSQ->xq,           &NSQ->xq[           psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int16 ) );
-    SKP_memmove( NSQ->sLTP_shp_Q10, &NSQ->sLTP_shp_Q10[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int32 ) );
+    silk_memmove( NSQ->xq,           &NSQ->xq[           psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int16 ) );
+    silk_memmove( NSQ->sLTP_shp_Q10, &NSQ->sLTP_shp_Q10[ psEncC->frame_length ], psEncC->ltp_mem_length * sizeof( opus_int32 ) );
 
 #ifdef SAVE_ALL_INTERNAL_DATA
     DEBUG_STORE_DATA( xq.dat,       &pxq[ -psEncC->frame_length ],       psEncC->frame_length * sizeof( opus_int16 ) );
@@ -345,11 +345,11 @@
         /* Long-term prediction */
         if( signalType == TYPE_VOICED ) {
             /* Unrolled loop */
-            LTP_pred_Q14 = SKP_SMULWB(               pred_lag_ptr[  0 ], b_Q14[ 0 ] );
-            LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -1 ], b_Q14[ 1 ] );
-            LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -2 ], b_Q14[ 2 ] );
-            LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -3 ], b_Q14[ 3 ] );
-            LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -4 ], b_Q14[ 4 ] );
+            LTP_pred_Q14 = silk_SMULWB(               pred_lag_ptr[  0 ], b_Q14[ 0 ] );
+            LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -1 ], b_Q14[ 1 ] );
+            LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -2 ], b_Q14[ 2 ] );
+            LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -3 ], b_Q14[ 3 ] );
+            LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -4 ], b_Q14[ 4 ] );
             pred_lag_ptr++;
         } else {
             LTP_pred_Q14 = 0;
@@ -358,12 +358,12 @@
         /* Long-term shaping */
         if( lag > 0 ) {
             /* Symmetric, packed FIR coefficients */
-            n_LTP_Q14 = SKP_SMULWB( SKP_ADD32( shp_lag_ptr[ 0 ], shp_lag_ptr[ -2 ] ), HarmShapeFIRPacked_Q14 );
-            n_LTP_Q14 = SKP_SMLAWT( n_LTP_Q14, shp_lag_ptr[ -1 ],                     HarmShapeFIRPacked_Q14 );
-            n_LTP_Q14 = SKP_LSHIFT( n_LTP_Q14, 6 );
+            n_LTP_Q14 = silk_SMULWB( silk_ADD32( shp_lag_ptr[ 0 ], shp_lag_ptr[ -2 ] ), HarmShapeFIRPacked_Q14 );
+            n_LTP_Q14 = silk_SMLAWT( n_LTP_Q14, shp_lag_ptr[ -1 ],                     HarmShapeFIRPacked_Q14 );
+            n_LTP_Q14 = silk_LSHIFT( n_LTP_Q14, 6 );
             shp_lag_ptr++;
 
-            LTP_Q10 = SKP_RSHIFT( SKP_SUB32( LTP_pred_Q14, n_LTP_Q14 ), 4 );
+            LTP_Q10 = silk_RSHIFT( silk_SUB32( LTP_pred_Q14, n_LTP_Q14 ), 4 );
         } else {
             LTP_Q10 = 0;
         }
@@ -376,110 +376,110 @@
             psSS = psSampleState[ k ];
 
             /* Generate dither */
-            psDD->Seed = SKP_RAND( psDD->Seed );
+            psDD->Seed = silk_RAND( psDD->Seed );
 
             /* dither = rand_seed < 0 ? 0xFFFFFFFF : 0; */
-            dither = SKP_RSHIFT( psDD->Seed, 31 );
+            dither = silk_RSHIFT( psDD->Seed, 31 );
 
             /* Pointer used in short term prediction and shaping */
             psLPC_Q14 = &psDD->sLPC_Q14[ NSQ_LPC_BUF_LENGTH - 1 + i ];
             /* Short-term prediction */
-            SKP_assert( predictLPCOrder >= 10 );            /* check that unrolling works */
-            SKP_assert( ( predictLPCOrder  & 1 ) == 0 );    /* check that order is even */
-            SKP_assert( ( (opus_int64)a_Q12 & 3 ) == 0 );    /* check that array starts at 4-byte aligned address */
+            silk_assert( predictLPCOrder >= 10 );            /* check that unrolling works */
+            silk_assert( ( predictLPCOrder  & 1 ) == 0 );    /* check that order is even */
+            silk_assert( ( (opus_int64)a_Q12 & 3 ) == 0 );    /* check that array starts at 4-byte aligned address */
             /* Partially unrolled */
-            LPC_pred_Q10 = SKP_SMULWB(               psLPC_Q14[  0 ], a_Q12[ 0 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -1 ], a_Q12[ 1 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -2 ], a_Q12[ 2 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -3 ], a_Q12[ 3 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -4 ], a_Q12[ 4 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -5 ], a_Q12[ 5 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -6 ], a_Q12[ 6 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -7 ], a_Q12[ 7 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -8 ], a_Q12[ 8 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -9 ], a_Q12[ 9 ] );
+            LPC_pred_Q10 = silk_SMULWB(               psLPC_Q14[  0 ], a_Q12[ 0 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -1 ], a_Q12[ 1 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -2 ], a_Q12[ 2 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -3 ], a_Q12[ 3 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -4 ], a_Q12[ 4 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -5 ], a_Q12[ 5 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -6 ], a_Q12[ 6 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -7 ], a_Q12[ 7 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -8 ], a_Q12[ 8 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -9 ], a_Q12[ 9 ] );
             for( j = 10; j < predictLPCOrder; j ++ ) {
-                LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -j ], a_Q12[ j ] );
+                LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psLPC_Q14[ -j ], a_Q12[ j ] );
             }
 
             /* Noise shape feedback */
-            SKP_assert( ( shapingLPCOrder & 1 ) == 0 );   /* check that order is even */
+            silk_assert( ( shapingLPCOrder & 1 ) == 0 );   /* check that order is even */
             /* Output of lowpass section */
-            tmp2 = SKP_SMLAWB( psLPC_Q14[ 0 ], psDD->sAR2_Q14[ 0 ], warping_Q16 );
+            tmp2 = silk_SMLAWB( psLPC_Q14[ 0 ], psDD->sAR2_Q14[ 0 ], warping_Q16 );
             /* Output of allpass section */
-            tmp1 = SKP_SMLAWB( psDD->sAR2_Q14[ 0 ], psDD->sAR2_Q14[ 1 ] - tmp2, warping_Q16 );
+            tmp1 = silk_SMLAWB( psDD->sAR2_Q14[ 0 ], psDD->sAR2_Q14[ 1 ] - tmp2, warping_Q16 );
             psDD->sAR2_Q14[ 0 ] = tmp2;
-            n_AR_Q10 = SKP_SMULWB( tmp2, AR_shp_Q13[ 0 ] );
+            n_AR_Q10 = silk_SMULWB( tmp2, AR_shp_Q13[ 0 ] );
             /* Loop over allpass sections */
             for( j = 2; j < shapingLPCOrder; j += 2 ) {
                 /* Output of allpass section */
-                tmp2 = SKP_SMLAWB( psDD->sAR2_Q14[ j - 1 ], psDD->sAR2_Q14[ j + 0 ] - tmp1, warping_Q16 );
+                tmp2 = silk_SMLAWB( psDD->sAR2_Q14[ j - 1 ], psDD->sAR2_Q14[ j + 0 ] - tmp1, warping_Q16 );
                 psDD->sAR2_Q14[ j - 1 ] = tmp1;
-                n_AR_Q10 = SKP_SMLAWB( n_AR_Q10, tmp1, AR_shp_Q13[ j - 1 ] );
+                n_AR_Q10 = silk_SMLAWB( n_AR_Q10, tmp1, AR_shp_Q13[ j - 1 ] );
                 /* Output of allpass section */
-                tmp1 = SKP_SMLAWB( psDD->sAR2_Q14[ j + 0 ], psDD->sAR2_Q14[ j + 1 ] - tmp2, warping_Q16 );
+                tmp1 = silk_SMLAWB( psDD->sAR2_Q14[ j + 0 ], psDD->sAR2_Q14[ j + 1 ] - tmp2, warping_Q16 );
                 psDD->sAR2_Q14[ j + 0 ] = tmp2;
-                n_AR_Q10 = SKP_SMLAWB( n_AR_Q10, tmp2, AR_shp_Q13[ j ] );
+                n_AR_Q10 = silk_SMLAWB( n_AR_Q10, tmp2, AR_shp_Q13[ j ] );
             }
             psDD->sAR2_Q14[ shapingLPCOrder - 1 ] = tmp1;
-            n_AR_Q10 = SKP_SMLAWB( n_AR_Q10, tmp1, AR_shp_Q13[ shapingLPCOrder - 1 ] );
+            n_AR_Q10 = silk_SMLAWB( n_AR_Q10, tmp1, AR_shp_Q13[ shapingLPCOrder - 1 ] );
 
-            n_AR_Q10 = SKP_RSHIFT( n_AR_Q10, 1 );           /* Q11 -> Q10 */
-            n_AR_Q10 = SKP_SMLAWB( n_AR_Q10, psDD->LF_AR_Q12, Tilt_Q14 );
+            n_AR_Q10 = silk_RSHIFT( n_AR_Q10, 1 );           /* Q11 -> Q10 */
+            n_AR_Q10 = silk_SMLAWB( n_AR_Q10, psDD->LF_AR_Q12, Tilt_Q14 );
 
-            n_LF_Q10 = SKP_LSHIFT( SKP_SMULWB( psDD->Shape_Q10[ *smpl_buf_idx ], LF_shp_Q14 ), 2 );
-            n_LF_Q10 = SKP_SMLAWT( n_LF_Q10, psDD->LF_AR_Q12, LF_shp_Q14 );
+            n_LF_Q10 = silk_LSHIFT( silk_SMULWB( psDD->Shape_Q10[ *smpl_buf_idx ], LF_shp_Q14 ), 2 );
+            n_LF_Q10 = silk_SMLAWT( n_LF_Q10, psDD->LF_AR_Q12, LF_shp_Q14 );
 
             /* Input minus prediction plus noise feedback                       */
             /* r = x[ i ] - LTP_pred - LPC_pred + n_AR + n_Tilt + n_LF + n_LTP  */
-            tmp1  = SKP_ADD32( LTP_Q10, LPC_pred_Q10 );                         /* add Q10 stuff */
-            tmp1  = SKP_SUB32( tmp1, n_AR_Q10 );                                /* subtract Q10 stuff */
-            tmp1  = SKP_SUB32( tmp1, n_LF_Q10 );                                /* subtract Q10 stuff */
-            r_Q10 = SKP_SUB32( x_Q10[ i ], tmp1 );                              /* residual error Q10 */
+            tmp1  = silk_ADD32( LTP_Q10, LPC_pred_Q10 );                         /* add Q10 stuff */
+            tmp1  = silk_SUB32( tmp1, n_AR_Q10 );                                /* subtract Q10 stuff */
+            tmp1  = silk_SUB32( tmp1, n_LF_Q10 );                                /* subtract Q10 stuff */
+            r_Q10 = silk_SUB32( x_Q10[ i ], tmp1 );                              /* residual error Q10 */
 
             /* Flip sign depending on dither */
             r_Q10 = r_Q10 ^ dither;
-            r_Q10 = SKP_LIMIT_32( r_Q10, -31 << 10, 30 << 10 );
+            r_Q10 = silk_LIMIT_32( r_Q10, -31 << 10, 30 << 10 );
 
             /* Find two quantization level candidates and measure their rate-distortion */
-            q1_Q10 = SKP_SUB32( r_Q10, offset_Q10 );
-            q1_Q10 = SKP_RSHIFT( q1_Q10, 10 );
+            q1_Q10 = silk_SUB32( r_Q10, offset_Q10 );
+            q1_Q10 = silk_RSHIFT( q1_Q10, 10 );
             if( q1_Q10 > 0 ) {
-                q1_Q10  = SKP_SUB32( SKP_LSHIFT( q1_Q10, 10 ), QUANT_LEVEL_ADJUST_Q10 );
-                q1_Q10  = SKP_ADD32( q1_Q10, offset_Q10 );
-                q2_Q10  = SKP_ADD32( q1_Q10, 1024 );
-                rd1_Q10 = SKP_SMULBB( q1_Q10, Lambda_Q10 );
-                rd2_Q10 = SKP_SMULBB( q2_Q10, Lambda_Q10 );
+                q1_Q10  = silk_SUB32( silk_LSHIFT( q1_Q10, 10 ), QUANT_LEVEL_ADJUST_Q10 );
+                q1_Q10  = silk_ADD32( q1_Q10, offset_Q10 );
+                q2_Q10  = silk_ADD32( q1_Q10, 1024 );
+                rd1_Q10 = silk_SMULBB( q1_Q10, Lambda_Q10 );
+                rd2_Q10 = silk_SMULBB( q2_Q10, Lambda_Q10 );
             } else if( q1_Q10 == 0 ) {
                 q1_Q10  = offset_Q10;
-                q2_Q10  = SKP_ADD32( q1_Q10, 1024 - QUANT_LEVEL_ADJUST_Q10 );
-                rd1_Q10 = SKP_SMULBB( q1_Q10, Lambda_Q10 );
-                rd2_Q10 = SKP_SMULBB( q2_Q10, Lambda_Q10 );
+                q2_Q10  = silk_ADD32( q1_Q10, 1024 - QUANT_LEVEL_ADJUST_Q10 );
+                rd1_Q10 = silk_SMULBB( q1_Q10, Lambda_Q10 );
+                rd2_Q10 = silk_SMULBB( q2_Q10, Lambda_Q10 );
             } else if( q1_Q10 == -1 ) {
                 q2_Q10  = offset_Q10;
-                q1_Q10  = SKP_SUB32( q2_Q10, 1024 - QUANT_LEVEL_ADJUST_Q10 );
-                rd1_Q10 = SKP_SMULBB( -q1_Q10, Lambda_Q10 );
-                rd2_Q10 = SKP_SMULBB(  q2_Q10, Lambda_Q10 );
+                q1_Q10  = silk_SUB32( q2_Q10, 1024 - QUANT_LEVEL_ADJUST_Q10 );
+                rd1_Q10 = silk_SMULBB( -q1_Q10, Lambda_Q10 );
+                rd2_Q10 = silk_SMULBB(  q2_Q10, Lambda_Q10 );
             } else {            /* Q1_Q10 < -1 */
-                q1_Q10  = SKP_ADD32( SKP_LSHIFT( q1_Q10, 10 ), QUANT_LEVEL_ADJUST_Q10 );
-                q1_Q10  = SKP_ADD32( q1_Q10, offset_Q10 );
-                q2_Q10  = SKP_ADD32( q1_Q10, 1024 );
-                rd1_Q10 = SKP_SMULBB( -q1_Q10, Lambda_Q10 );
-                rd2_Q10 = SKP_SMULBB( -q2_Q10, Lambda_Q10 );
+                q1_Q10  = silk_ADD32( silk_LSHIFT( q1_Q10, 10 ), QUANT_LEVEL_ADJUST_Q10 );
+                q1_Q10  = silk_ADD32( q1_Q10, offset_Q10 );
+                q2_Q10  = silk_ADD32( q1_Q10, 1024 );
+                rd1_Q10 = silk_SMULBB( -q1_Q10, Lambda_Q10 );
+                rd2_Q10 = silk_SMULBB( -q2_Q10, Lambda_Q10 );
             }
-            rr_Q10  = SKP_SUB32( r_Q10, q1_Q10 );
-            rd1_Q10 = SKP_RSHIFT( SKP_SMLABB( rd1_Q10, rr_Q10, rr_Q10 ), 10 );
-            rr_Q10  = SKP_SUB32( r_Q10, q2_Q10 );
-            rd2_Q10 = SKP_RSHIFT( SKP_SMLABB( rd2_Q10, rr_Q10, rr_Q10 ), 10 );
+            rr_Q10  = silk_SUB32( r_Q10, q1_Q10 );
+            rd1_Q10 = silk_RSHIFT( silk_SMLABB( rd1_Q10, rr_Q10, rr_Q10 ), 10 );
+            rr_Q10  = silk_SUB32( r_Q10, q2_Q10 );
+            rd2_Q10 = silk_RSHIFT( silk_SMLABB( rd2_Q10, rr_Q10, rr_Q10 ), 10 );
 
             if( rd1_Q10 < rd2_Q10 ) {
-                psSS[ 0 ].RD_Q10 = SKP_ADD32( psDD->RD_Q10, rd1_Q10 );
-                psSS[ 1 ].RD_Q10 = SKP_ADD32( psDD->RD_Q10, rd2_Q10 );
+                psSS[ 0 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd1_Q10 );
+                psSS[ 1 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd2_Q10 );
                 psSS[ 0 ].Q_Q10  = q1_Q10;
                 psSS[ 1 ].Q_Q10  = q2_Q10;
             } else {
-                psSS[ 0 ].RD_Q10 = SKP_ADD32( psDD->RD_Q10, rd2_Q10 );
-                psSS[ 1 ].RD_Q10 = SKP_ADD32( psDD->RD_Q10, rd1_Q10 );
+                psSS[ 0 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd2_Q10 );
+                psSS[ 1 ].RD_Q10 = silk_ADD32( psDD->RD_Q10, rd1_Q10 );
                 psSS[ 0 ].Q_Q10  = q2_Q10;
                 psSS[ 1 ].Q_Q10  = q1_Q10;
             }
@@ -490,15 +490,15 @@
             exc_Q10 = psSS[ 0 ].Q_Q10 ^ dither;
 
             /* Add predictions */
-            LPC_exc_Q10 = exc_Q10 + SKP_RSHIFT_ROUND( LTP_pred_Q14, 4 );
-            xq_Q10      = SKP_ADD32( LPC_exc_Q10, LPC_pred_Q10 );
+            LPC_exc_Q10 = exc_Q10 + silk_RSHIFT_ROUND( LTP_pred_Q14, 4 );
+            xq_Q10      = silk_ADD32( LPC_exc_Q10, LPC_pred_Q10 );
 
             /* Update states */
-            sLF_AR_shp_Q10         = SKP_SUB32(  xq_Q10, n_AR_Q10 );
-            psSS[ 0 ].sLTP_shp_Q10 = SKP_SUB32(  sLF_AR_shp_Q10, n_LF_Q10 );
-            psSS[ 0 ].LF_AR_Q12    = SKP_LSHIFT( sLF_AR_shp_Q10, 2 );
-            psSS[ 0 ].xq_Q14       = SKP_LSHIFT( xq_Q10,         4 );
-            psSS[ 0 ].LPC_exc_Q16  = SKP_LSHIFT( LPC_exc_Q10,    6 );
+            sLF_AR_shp_Q10         = silk_SUB32(  xq_Q10, n_AR_Q10 );
+            psSS[ 0 ].sLTP_shp_Q10 = silk_SUB32(  sLF_AR_shp_Q10, n_LF_Q10 );
+            psSS[ 0 ].LF_AR_Q12    = silk_LSHIFT( sLF_AR_shp_Q10, 2 );
+            psSS[ 0 ].xq_Q14       = silk_LSHIFT( xq_Q10,         4 );
+            psSS[ 0 ].LPC_exc_Q16  = silk_LSHIFT( LPC_exc_Q10,    6 );
 
             /* Update states for second best quantization */
 
@@ -506,15 +506,15 @@
             exc_Q10 = psSS[ 1 ].Q_Q10 ^ dither;
 
             /* Add predictions */
-            LPC_exc_Q10 = exc_Q10 + SKP_RSHIFT_ROUND( LTP_pred_Q14, 4 );
-            xq_Q10      = SKP_ADD32( LPC_exc_Q10, LPC_pred_Q10 );
+            LPC_exc_Q10 = exc_Q10 + silk_RSHIFT_ROUND( LTP_pred_Q14, 4 );
+            xq_Q10      = silk_ADD32( LPC_exc_Q10, LPC_pred_Q10 );
 
             /* Update states */
-            sLF_AR_shp_Q10         = SKP_SUB32(  xq_Q10, n_AR_Q10 );
-            psSS[ 1 ].sLTP_shp_Q10 = SKP_SUB32(  sLF_AR_shp_Q10, n_LF_Q10 );
-            psSS[ 1 ].LF_AR_Q12    = SKP_LSHIFT( sLF_AR_shp_Q10, 2 );
-            psSS[ 1 ].xq_Q14       = SKP_LSHIFT( xq_Q10,         4 );
-            psSS[ 1 ].LPC_exc_Q16  = SKP_LSHIFT( LPC_exc_Q10,    6 );
+            sLF_AR_shp_Q10         = silk_SUB32(  xq_Q10, n_AR_Q10 );
+            psSS[ 1 ].sLTP_shp_Q10 = silk_SUB32(  sLF_AR_shp_Q10, n_LF_Q10 );
+            psSS[ 1 ].LF_AR_Q12    = silk_LSHIFT( sLF_AR_shp_Q10, 2 );
+            psSS[ 1 ].xq_Q14       = silk_LSHIFT( xq_Q10,         4 );
+            psSS[ 1 ].LPC_exc_Q16  = silk_LSHIFT( LPC_exc_Q10,    6 );
         }
 
         *smpl_buf_idx  = ( *smpl_buf_idx - 1 ) & DECISION_DELAY_MASK;                   /* Index to newest samples              */
@@ -534,9 +534,9 @@
         Winner_rand_state = psDelDec[ Winner_ind ].RandState[ last_smple_idx ];
         for( k = 0; k < nStatesDelayedDecision; k++ ) {
             if( psDelDec[ k ].RandState[ last_smple_idx ] != Winner_rand_state ) {
-                psSampleState[ k ][ 0 ].RD_Q10 = SKP_ADD32( psSampleState[ k ][ 0 ].RD_Q10, ( SKP_int32_MAX >> 4 ) );
-                psSampleState[ k ][ 1 ].RD_Q10 = SKP_ADD32( psSampleState[ k ][ 1 ].RD_Q10, ( SKP_int32_MAX >> 4 ) );
-                SKP_assert( psSampleState[ k ][ 0 ].RD_Q10 >= 0 );
+                psSampleState[ k ][ 0 ].RD_Q10 = silk_ADD32( psSampleState[ k ][ 0 ].RD_Q10, ( silk_int32_MAX >> 4 ) );
+                psSampleState[ k ][ 1 ].RD_Q10 = silk_ADD32( psSampleState[ k ][ 1 ].RD_Q10, ( silk_int32_MAX >> 4 ) );
+                silk_assert( psSampleState[ k ][ 0 ].RD_Q10 >= 0 );
             }
         }
 
@@ -560,17 +560,17 @@
 
         /* Replace a state if best from second set outperforms worst in first set */
         if( RDmin_Q10 < RDmax_Q10 ) {
-            SKP_memcpy( ((opus_int32 *)&psDelDec[ RDmax_ind ]) + i,
+            silk_memcpy( ((opus_int32 *)&psDelDec[ RDmax_ind ]) + i,
                         ((opus_int32 *)&psDelDec[ RDmin_ind ]) + i, sizeof( NSQ_del_dec_struct ) - i * sizeof( opus_int32) );
-            SKP_memcpy( &psSampleState[ RDmax_ind ][ 0 ], &psSampleState[ RDmin_ind ][ 1 ], sizeof( NSQ_sample_struct ) );
+            silk_memcpy( &psSampleState[ RDmax_ind ][ 0 ], &psSampleState[ RDmin_ind ][ 1 ], sizeof( NSQ_sample_struct ) );
         }
 
         /* Write samples from winner to output and long-term filter states */
         psDD = &psDelDec[ Winner_ind ];
         if( subfr > 0 || i >= decisionDelay ) {
-            pulses[  i - decisionDelay ] = ( opus_int8 )SKP_RSHIFT_ROUND( psDD->Q_Q10[ last_smple_idx ], 10 );
-            xq[ i - decisionDelay ] = ( opus_int16 )SKP_SAT16( SKP_RSHIFT_ROUND(
-                SKP_SMULWW( psDD->Xq_Q10[ last_smple_idx ], delayedGain_Q16[ last_smple_idx ] ), 10 ) );
+            pulses[  i - decisionDelay ] = ( opus_int8 )silk_RSHIFT_ROUND( psDD->Q_Q10[ last_smple_idx ], 10 );
+            xq[ i - decisionDelay ] = ( opus_int16 )silk_SAT16( silk_RSHIFT_ROUND(
+                silk_SMULWW( psDD->Xq_Q10[ last_smple_idx ], delayedGain_Q16[ last_smple_idx ] ), 10 ) );
             NSQ->sLTP_shp_Q10[ NSQ->sLTP_shp_buf_idx - decisionDelay ] = psDD->Shape_Q10[ last_smple_idx ];
             sLTP_Q16[          NSQ->sLTP_buf_idx     - decisionDelay ] = psDD->Pred_Q16[  last_smple_idx ];
         }
@@ -583,11 +583,11 @@
             psSS                                     = &psSampleState[ k ][ 0 ];
             psDD->LF_AR_Q12                          = psSS->LF_AR_Q12;
             psDD->sLPC_Q14[ NSQ_LPC_BUF_LENGTH + i ] = psSS->xq_Q14;
-            psDD->Xq_Q10[    *smpl_buf_idx ]         = SKP_RSHIFT( psSS->xq_Q14, 4 );
+            psDD->Xq_Q10[    *smpl_buf_idx ]         = silk_RSHIFT( psSS->xq_Q14, 4 );
             psDD->Q_Q10[     *smpl_buf_idx ]         = psSS->Q_Q10;
             psDD->Pred_Q16[  *smpl_buf_idx ]         = psSS->LPC_exc_Q16;
             psDD->Shape_Q10[ *smpl_buf_idx ]         = psSS->sLTP_shp_Q10;
-            psDD->Seed                               = SKP_ADD32( psDD->Seed, SKP_RSHIFT_ROUND( psSS->Q_Q10, 10 ) );
+            psDD->Seed                               = silk_ADD32( psDD->Seed, silk_RSHIFT_ROUND( psSS->Q_Q10, 10 ) );
             psDD->RandState[ *smpl_buf_idx ]         = psDD->Seed;
             psDD->RD_Q10                             = psSS->RD_Q10;
         }
@@ -596,7 +596,7 @@
     /* Update LPC states */
     for( k = 0; k < nStatesDelayedDecision; k++ ) {
         psDD = &psDelDec[ k ];
-        SKP_memcpy( psDD->sLPC_Q14, &psDD->sLPC_Q14[ length ], NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) );
+        silk_memcpy( psDD->sLPC_Q14, &psDD->sLPC_Q14[ length ], NSQ_LPC_BUF_LENGTH * sizeof( opus_int32 ) );
     }
 }
 
@@ -620,20 +620,20 @@
     opus_int32          inv_gain_Q16, gain_adj_Q16, inv_gain_Q32;
     NSQ_del_dec_struct *psDD;
 
-    inv_gain_Q16 = silk_INVERSE32_varQ( SKP_max( Gains_Q16[ subfr ], 1 ), 32 );
-    inv_gain_Q16 = SKP_min( inv_gain_Q16, SKP_int16_MAX );
+    inv_gain_Q16 = silk_INVERSE32_varQ( silk_max( Gains_Q16[ subfr ], 1 ), 32 );
+    inv_gain_Q16 = silk_min( inv_gain_Q16, silk_int16_MAX );
     lag          = pitchL[ subfr ];
 
     /* After rewhitening the LTP state is un-scaled, so scale with inv_gain_Q16 */
     if( NSQ->rewhite_flag ) {
-        inv_gain_Q32 = SKP_LSHIFT( inv_gain_Q16, 16 );
+        inv_gain_Q32 = silk_LSHIFT( inv_gain_Q16, 16 );
         if( subfr == 0 ) {
             /* Do LTP downscaling */
-            inv_gain_Q32 = SKP_LSHIFT( SKP_SMULWB( inv_gain_Q32, LTP_scale_Q14 ), 2 );
+            inv_gain_Q32 = silk_LSHIFT( silk_SMULWB( inv_gain_Q32, LTP_scale_Q14 ), 2 );
         }
         for( i = NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2; i < NSQ->sLTP_buf_idx; i++ ) {
-            SKP_assert( i < MAX_FRAME_LENGTH );
-            sLTP_Q16[ i ] = SKP_SMULWB( inv_gain_Q32, sLTP[ i ] );
+            silk_assert( i < MAX_FRAME_LENGTH );
+            sLTP_Q16[ i ] = silk_SMULWB( inv_gain_Q32, sLTP[ i ] );
         }
     }
 
@@ -643,13 +643,13 @@
 
         /* Scale long-term shaping state */
         for( i = NSQ->sLTP_shp_buf_idx - psEncC->ltp_mem_length; i < NSQ->sLTP_shp_buf_idx; i++ ) {
-            NSQ->sLTP_shp_Q10[ i ] = SKP_SMULWW( gain_adj_Q16, NSQ->sLTP_shp_Q10[ i ] );
+            NSQ->sLTP_shp_Q10[ i ] = silk_SMULWW( gain_adj_Q16, NSQ->sLTP_shp_Q10[ i ] );
         }
 
         /* Scale long-term prediction state */
         if( NSQ->rewhite_flag == 0 ) {
             for( i = NSQ->sLTP_buf_idx - lag - LTP_ORDER / 2; i < NSQ->sLTP_buf_idx; i++ ) {
-                sLTP_Q16[ i ] = SKP_SMULWW( gain_adj_Q16, sLTP_Q16[ i ] );
+                sLTP_Q16[ i ] = silk_SMULWW( gain_adj_Q16, sLTP_Q16[ i ] );
             }
         }
 
@@ -657,28 +657,28 @@
             psDD = &psDelDec[ k ];
 
             /* Scale scalar states */
-            psDD->LF_AR_Q12 = SKP_SMULWW( gain_adj_Q16, psDD->LF_AR_Q12 );
+            psDD->LF_AR_Q12 = silk_SMULWW( gain_adj_Q16, psDD->LF_AR_Q12 );
 
             /* Scale short-term prediction and shaping states */
             for( i = 0; i < NSQ_LPC_BUF_LENGTH; i++ ) {
-                psDD->sLPC_Q14[ i ] = SKP_SMULWW( gain_adj_Q16, psDD->sLPC_Q14[ i ] );
+                psDD->sLPC_Q14[ i ] = silk_SMULWW( gain_adj_Q16, psDD->sLPC_Q14[ i ] );
             }
             for( i = 0; i < MAX_SHAPE_LPC_ORDER; i++ ) {
-                psDD->sAR2_Q14[ i ] = SKP_SMULWW( gain_adj_Q16, psDD->sAR2_Q14[ i ] );
+                psDD->sAR2_Q14[ i ] = silk_SMULWW( gain_adj_Q16, psDD->sAR2_Q14[ i ] );
             }
             for( i = 0; i < DECISION_DELAY; i++ ) {
-                psDD->Pred_Q16[  i ] = SKP_SMULWW( gain_adj_Q16, psDD->Pred_Q16[  i ] );
-                psDD->Shape_Q10[ i ] = SKP_SMULWW( gain_adj_Q16, psDD->Shape_Q10[ i ] );
+                psDD->Pred_Q16[  i ] = silk_SMULWW( gain_adj_Q16, psDD->Pred_Q16[  i ] );
+                psDD->Shape_Q10[ i ] = silk_SMULWW( gain_adj_Q16, psDD->Shape_Q10[ i ] );
             }
         }
     }
 
     /* Scale input */
     for( i = 0; i < psEncC->subfr_length; i++ ) {
-        x_sc_Q10[ i ] = SKP_RSHIFT( SKP_SMULBB( x[ i ], ( opus_int16 )inv_gain_Q16 ), 6 );
+        x_sc_Q10[ i ] = silk_RSHIFT( silk_SMULBB( x[ i ], ( opus_int16 )inv_gain_Q16 ), 6 );
     }
 
     /* save inv_gain */
-    SKP_assert( inv_gain_Q16 != 0 );
+    silk_assert( inv_gain_Q16 != 0 );
     NSQ->prev_inv_gain_Q16 = inv_gain_Q16;
 }
diff --git a/silk/silk_PLC.c b/silk/silk_PLC.c
index 77c3d07..d0658a4 100644
--- a/silk/silk_PLC.c
+++ b/silk/silk_PLC.c
@@ -41,7 +41,7 @@
     silk_decoder_state      *psDec              /* I/O Decoder state        */
 )
 {
-    psDec->sPLC.pitchL_Q8 = SKP_RSHIFT( psDec->frame_length, 1 );
+    psDec->sPLC.pitchL_Q8 = silk_RSHIFT( psDec->frame_length, 1 );
 }
 
 void silk_PLC(
@@ -104,16 +104,16 @@
             }
             if( temp_LTP_Gain_Q14 > LTP_Gain_Q14 ) {
                 LTP_Gain_Q14 = temp_LTP_Gain_Q14;
-                SKP_memcpy( psPLC->LTPCoef_Q14,
-                    &psDecCtrl->LTPCoef_Q14[ SKP_SMULBB( psDec->nb_subfr - 1 - j, LTP_ORDER ) ],
+                silk_memcpy( psPLC->LTPCoef_Q14,
+                    &psDecCtrl->LTPCoef_Q14[ silk_SMULBB( psDec->nb_subfr - 1 - j, LTP_ORDER ) ],
                     LTP_ORDER * sizeof( opus_int16 ) );
 
-                psPLC->pitchL_Q8 = SKP_LSHIFT( psDecCtrl->pitchL[ psDec->nb_subfr - 1 - j ], 8 );
+                psPLC->pitchL_Q8 = silk_LSHIFT( psDecCtrl->pitchL[ psDec->nb_subfr - 1 - j ], 8 );
             }
         }
 
 #if USE_SINGLE_TAP
-        SKP_memset( psPLC->LTPCoef_Q14, 0, LTP_ORDER * sizeof( opus_int16 ) );
+        silk_memset( psPLC->LTPCoef_Q14, 0, LTP_ORDER * sizeof( opus_int16 ) );
         psPLC->LTPCoef_Q14[ LTP_ORDER / 2 ] = LTP_Gain_Q14;
 #endif
 
@@ -122,32 +122,32 @@
             opus_int   scale_Q10;
             opus_int32 tmp;
 
-            tmp = SKP_LSHIFT( V_PITCH_GAIN_START_MIN_Q14, 10 );
-            scale_Q10 = SKP_DIV32( tmp, SKP_max( LTP_Gain_Q14, 1 ) );
+            tmp = silk_LSHIFT( V_PITCH_GAIN_START_MIN_Q14, 10 );
+            scale_Q10 = silk_DIV32( tmp, silk_max( LTP_Gain_Q14, 1 ) );
             for( i = 0; i < LTP_ORDER; i++ ) {
-                psPLC->LTPCoef_Q14[ i ] = SKP_RSHIFT( SKP_SMULBB( psPLC->LTPCoef_Q14[ i ], scale_Q10 ), 10 );
+                psPLC->LTPCoef_Q14[ i ] = silk_RSHIFT( silk_SMULBB( psPLC->LTPCoef_Q14[ i ], scale_Q10 ), 10 );
             }
         } else if( LTP_Gain_Q14 > V_PITCH_GAIN_START_MAX_Q14 ) {
             opus_int   scale_Q14;
             opus_int32 tmp;
 
-            tmp = SKP_LSHIFT( V_PITCH_GAIN_START_MAX_Q14, 14 );
-            scale_Q14 = SKP_DIV32( tmp, SKP_max( LTP_Gain_Q14, 1 ) );
+            tmp = silk_LSHIFT( V_PITCH_GAIN_START_MAX_Q14, 14 );
+            scale_Q14 = silk_DIV32( tmp, silk_max( LTP_Gain_Q14, 1 ) );
             for( i = 0; i < LTP_ORDER; i++ ) {
-                psPLC->LTPCoef_Q14[ i ] = SKP_RSHIFT( SKP_SMULBB( psPLC->LTPCoef_Q14[ i ], scale_Q14 ), 14 );
+                psPLC->LTPCoef_Q14[ i ] = silk_RSHIFT( silk_SMULBB( psPLC->LTPCoef_Q14[ i ], scale_Q14 ), 14 );
             }
         }
     } else {
-        psPLC->pitchL_Q8 = SKP_LSHIFT( SKP_SMULBB( psDec->fs_kHz, 18 ), 8 );
-        SKP_memset( psPLC->LTPCoef_Q14, 0, LTP_ORDER * sizeof( opus_int16 ));
+        psPLC->pitchL_Q8 = silk_LSHIFT( silk_SMULBB( psDec->fs_kHz, 18 ), 8 );
+        silk_memset( psPLC->LTPCoef_Q14, 0, LTP_ORDER * sizeof( opus_int16 ));
     }
 
     /* Save LPC coeficients */
-    SKP_memcpy( psPLC->prevLPC_Q12, psDecCtrl->PredCoef_Q12[ 1 ], psDec->LPC_order * sizeof( opus_int16 ) );
+    silk_memcpy( psPLC->prevLPC_Q12, psDecCtrl->PredCoef_Q12[ 1 ], psDec->LPC_order * sizeof( opus_int16 ) );
     psPLC->prevLTP_scale_Q14 = psDecCtrl->LTP_scale_Q14;
 
     /* Save Gains */
-    SKP_memcpy( psPLC->prevGain_Q16, psDecCtrl->Gains_Q16, psDec->nb_subfr * sizeof( opus_int32 ) );
+    silk_memcpy( psPLC->prevGain_Q16, psDecCtrl->Gains_Q16, psDec->nb_subfr * sizeof( opus_int32 ) );
 }
 
 void silk_PLC_conceal(
@@ -168,7 +168,7 @@
     psPLC = &psDec->sPLC;
 
     /* Update LTP buffer */
-    SKP_memmove( psDec->sLTP_Q16, &psDec->sLTP_Q16[ psDec->frame_length ], psDec->ltp_mem_length * sizeof( opus_int32 ) );
+    silk_memmove( psDec->sLTP_Q16, &psDec->sLTP_Q16[ psDec->frame_length ], psDec->ltp_mem_length * sizeof( opus_int32 ) );
 
     /* LPC concealment. Apply BWE to previous LPC */
     silk_bwexpander( psPLC->prevLPC_Q12, psDec->LPC_order, SILK_FIX_CONST( BWE_COEF, 16 ) );
@@ -181,8 +181,8 @@
         exc_buf[i] = 0;
     for( k = ( psDec->nb_subfr >> 1 ); k < psDec->nb_subfr; k++ ) {
         for( i = 0; i < psDec->subfr_length; i++ ) {
-            exc_buf_ptr[ i ] = ( opus_int16 )SKP_RSHIFT(
-                SKP_SMULWW( psDec->exc_Q10[ i + k * psDec->subfr_length ], psPLC->prevGain_Q16[ k ] ), 10 );
+            exc_buf_ptr[ i ] = ( opus_int16 )silk_RSHIFT(
+                silk_SMULWW( psDec->exc_Q10[ i + k * psDec->subfr_length ], psPLC->prevGain_Q16[ k ] ), 10 );
         }
         exc_buf_ptr += psDec->subfr_length;
     }
@@ -190,12 +190,12 @@
     silk_sum_sqr_shift( &energy1, &shift1, exc_buf,                         psDec->subfr_length );
     silk_sum_sqr_shift( &energy2, &shift2, &exc_buf[ psDec->subfr_length ], psDec->subfr_length );
 
-    if( SKP_RSHIFT( energy1, shift2 ) < SKP_RSHIFT( energy2, shift1 ) ) {
+    if( silk_RSHIFT( energy1, shift2 ) < silk_RSHIFT( energy2, shift1 ) ) {
         /* First sub-frame has lowest energy */
-        rand_ptr = &psDec->exc_Q10[ SKP_max_int( 0, 3 * psDec->subfr_length - RAND_BUF_SIZE ) ];
+        rand_ptr = &psDec->exc_Q10[ silk_max_int( 0, 3 * psDec->subfr_length - RAND_BUF_SIZE ) ];
     } else {
         /* Second sub-frame has lowest energy */
-        rand_ptr = &psDec->exc_Q10[ SKP_max_int( 0, psDec->frame_length - RAND_BUF_SIZE ) ];
+        rand_ptr = &psDec->exc_Q10[ silk_max_int( 0, psDec->frame_length - RAND_BUF_SIZE ) ];
     }
 
     /* Setup Gain to random noise component */
@@ -203,11 +203,11 @@
     rand_scale_Q14 = psPLC->randScale_Q14;
 
     /* Setup attenuation gains */
-    harm_Gain_Q15 = HARM_ATT_Q15[ SKP_min_int( NB_ATT - 1, psDec->lossCnt ) ];
+    harm_Gain_Q15 = HARM_ATT_Q15[ silk_min_int( NB_ATT - 1, psDec->lossCnt ) ];
     if( psDec->prevSignalType == TYPE_VOICED ) {
-        rand_Gain_Q15 = PLC_RAND_ATTENUATE_V_Q15[  SKP_min_int( NB_ATT - 1, psDec->lossCnt ) ];
+        rand_Gain_Q15 = PLC_RAND_ATTENUATE_V_Q15[  silk_min_int( NB_ATT - 1, psDec->lossCnt ) ];
     } else {
-        rand_Gain_Q15 = PLC_RAND_ATTENUATE_UV_Q15[ SKP_min_int( NB_ATT - 1, psDec->lossCnt ) ];
+        rand_Gain_Q15 = PLC_RAND_ATTENUATE_UV_Q15[ silk_min_int( NB_ATT - 1, psDec->lossCnt ) ];
     }
 
     /* First Lost frame */
@@ -219,24 +219,24 @@
             for( i = 0; i < LTP_ORDER; i++ ) {
                 rand_scale_Q14 -= B_Q14[ i ];
             }
-            rand_scale_Q14 = SKP_max_16( 3277, rand_scale_Q14 ); /* 0.2 */
-            rand_scale_Q14 = ( opus_int16 )SKP_RSHIFT( SKP_SMULBB( rand_scale_Q14, psPLC->prevLTP_scale_Q14 ), 14 );
+            rand_scale_Q14 = silk_max_16( 3277, rand_scale_Q14 ); /* 0.2 */
+            rand_scale_Q14 = ( opus_int16 )silk_RSHIFT( silk_SMULBB( rand_scale_Q14, psPLC->prevLTP_scale_Q14 ), 14 );
         } else {
             /* Reduce random noise for unvoiced frames with high LPC gain */
             opus_int32 invGain_Q30, down_scale_Q30;
 
             silk_LPC_inverse_pred_gain( &invGain_Q30, psPLC->prevLPC_Q12, psDec->LPC_order );
 
-            down_scale_Q30 = SKP_min_32( SKP_RSHIFT( 1 << 30, LOG2_INV_LPC_GAIN_HIGH_THRES ), invGain_Q30 );
-            down_scale_Q30 = SKP_max_32( SKP_RSHIFT( 1 << 30, LOG2_INV_LPC_GAIN_LOW_THRES ), down_scale_Q30 );
-            down_scale_Q30 = SKP_LSHIFT( down_scale_Q30, LOG2_INV_LPC_GAIN_HIGH_THRES );
+            down_scale_Q30 = silk_min_32( silk_RSHIFT( 1 << 30, LOG2_INV_LPC_GAIN_HIGH_THRES ), invGain_Q30 );
+            down_scale_Q30 = silk_max_32( silk_RSHIFT( 1 << 30, LOG2_INV_LPC_GAIN_LOW_THRES ), down_scale_Q30 );
+            down_scale_Q30 = silk_LSHIFT( down_scale_Q30, LOG2_INV_LPC_GAIN_HIGH_THRES );
 
-            rand_Gain_Q15 = SKP_RSHIFT( SKP_SMULWB( down_scale_Q30, rand_Gain_Q15 ), 14 );
+            rand_Gain_Q15 = silk_RSHIFT( silk_SMULWB( down_scale_Q30, rand_Gain_Q15 ), 14 );
         }
     }
 
     rand_seed    = psPLC->rand_seed;
-    lag          = SKP_RSHIFT_ROUND( psPLC->pitchL_Q8, 8 );
+    lag          = silk_RSHIFT_ROUND( psPLC->pitchL_Q8, 8 );
     sLTP_buf_idx = psDec->ltp_mem_length;
 
     /***************************/
@@ -247,23 +247,23 @@
         /* Setup pointer */
         pred_lag_ptr = &psDec->sLTP_Q16[ sLTP_buf_idx - lag + LTP_ORDER / 2 ];
         for( i = 0; i < psDec->subfr_length; i++ ) {
-            rand_seed = SKP_RAND( rand_seed );
-            idx = SKP_RSHIFT( rand_seed, 25 ) & RAND_BUF_MASK;
+            rand_seed = silk_RAND( rand_seed );
+            idx = silk_RSHIFT( rand_seed, 25 ) & RAND_BUF_MASK;
 
             /* Unrolled loop */
-            LTP_pred_Q14 = SKP_SMULWB(               pred_lag_ptr[  0 ], B_Q14[ 0 ] );
-            LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -1 ], B_Q14[ 1 ] );
-            LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -2 ], B_Q14[ 2 ] );
-            LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -3 ], B_Q14[ 3 ] );
-            LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -4 ], B_Q14[ 4 ] );
+            LTP_pred_Q14 = silk_SMULWB(               pred_lag_ptr[  0 ], B_Q14[ 0 ] );
+            LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -1 ], B_Q14[ 1 ] );
+            LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -2 ], B_Q14[ 2 ] );
+            LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -3 ], B_Q14[ 3 ] );
+            LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -4 ], B_Q14[ 4 ] );
             pred_lag_ptr++;
 
             /* Generate LPC residual */
-            LPC_exc_Q10 = SKP_LSHIFT( SKP_SMULWB( rand_ptr[ idx ], rand_scale_Q14 ), 2 ); /* Random noise part */
-            LPC_exc_Q10 = SKP_ADD32( LPC_exc_Q10, SKP_RSHIFT_ROUND( LTP_pred_Q14, 4 ) );  /* Harmonic part */
+            LPC_exc_Q10 = silk_LSHIFT( silk_SMULWB( rand_ptr[ idx ], rand_scale_Q14 ), 2 ); /* Random noise part */
+            LPC_exc_Q10 = silk_ADD32( LPC_exc_Q10, silk_RSHIFT_ROUND( LTP_pred_Q14, 4 ) );  /* Harmonic part */
 
             /* Update states */
-            psDec->sLTP_Q16[ sLTP_buf_idx ] = SKP_LSHIFT( LPC_exc_Q10, 6 );
+            psDec->sLTP_Q16[ sLTP_buf_idx ] = silk_LSHIFT( LPC_exc_Q10, 6 );
             sLTP_buf_idx++;
 
             /* Save LPC residual */
@@ -272,15 +272,15 @@
         sig_Q10_ptr += psDec->subfr_length;
         /* Gradually reduce LTP gain */
         for( j = 0; j < LTP_ORDER; j++ ) {
-            B_Q14[ j ] = SKP_RSHIFT( SKP_SMULBB( harm_Gain_Q15, B_Q14[ j ] ), 15 );
+            B_Q14[ j ] = silk_RSHIFT( silk_SMULBB( harm_Gain_Q15, B_Q14[ j ] ), 15 );
         }
         /* Gradually reduce excitation gain */
-        rand_scale_Q14 = SKP_RSHIFT( SKP_SMULBB( rand_scale_Q14, rand_Gain_Q15 ), 15 );
+        rand_scale_Q14 = silk_RSHIFT( silk_SMULBB( rand_scale_Q14, rand_Gain_Q15 ), 15 );
 
         /* Slowly increase pitch lag */
-        psPLC->pitchL_Q8 += SKP_SMULWB( psPLC->pitchL_Q8, PITCH_DRIFT_FAC_Q16 );
-        psPLC->pitchL_Q8 = SKP_min_32( psPLC->pitchL_Q8, SKP_LSHIFT( SKP_SMULBB( MAX_PITCH_LAG_MS, psDec->fs_kHz ), 8 ) );
-        lag = SKP_RSHIFT_ROUND( psPLC->pitchL_Q8, 8 );
+        psPLC->pitchL_Q8 += silk_SMULWB( psPLC->pitchL_Q8, PITCH_DRIFT_FAC_Q16 );
+        psPLC->pitchL_Q8 = silk_min_32( psPLC->pitchL_Q8, silk_LSHIFT( silk_SMULBB( MAX_PITCH_LAG_MS, psDec->fs_kHz ), 8 ) );
+        lag = silk_RSHIFT_ROUND( psPLC->pitchL_Q8, 8 );
     }
 
     /***************************/
@@ -288,40 +288,40 @@
     /***************************/
     sig_Q10_ptr = sig_Q10;
     /* Preload LPC coeficients to array on stack. Gives small performance gain */
-    SKP_memcpy( A_Q12_tmp, psPLC->prevLPC_Q12, psDec->LPC_order * sizeof( opus_int16 ) );
-    SKP_assert( psDec->LPC_order >= 10 ); /* check that unrolling works */
+    silk_memcpy( A_Q12_tmp, psPLC->prevLPC_Q12, psDec->LPC_order * sizeof( opus_int16 ) );
+    silk_assert( psDec->LPC_order >= 10 ); /* check that unrolling works */
     for( k = 0; k < psDec->nb_subfr; k++ ) {
         for( i = 0; i < psDec->subfr_length; i++ ){
             /* partly unrolled */
-            LPC_pred_Q10 = SKP_SMULWB(               psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  1 ], A_Q12_tmp[ 0 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  2 ], A_Q12_tmp[ 1 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  3 ], A_Q12_tmp[ 2 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  4 ], A_Q12_tmp[ 3 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  5 ], A_Q12_tmp[ 4 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  6 ], A_Q12_tmp[ 5 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  7 ], A_Q12_tmp[ 6 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  8 ], A_Q12_tmp[ 7 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  9 ], A_Q12_tmp[ 8 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 10 ], A_Q12_tmp[ 9 ] );
+            LPC_pred_Q10 = silk_SMULWB(               psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  1 ], A_Q12_tmp[ 0 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  2 ], A_Q12_tmp[ 1 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  3 ], A_Q12_tmp[ 2 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  4 ], A_Q12_tmp[ 3 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  5 ], A_Q12_tmp[ 4 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  6 ], A_Q12_tmp[ 5 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  7 ], A_Q12_tmp[ 6 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  8 ], A_Q12_tmp[ 7 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  9 ], A_Q12_tmp[ 8 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 10 ], A_Q12_tmp[ 9 ] );
 
             for( j = 10; j < psDec->LPC_order; j++ ) {
-                LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - j - 1 ], A_Q12_tmp[ j ] );
+                LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - j - 1 ], A_Q12_tmp[ j ] );
             }
 
             /* Add prediction to LPC residual */
-            sig_Q10_ptr[ i ] = SKP_ADD32( sig_Q10_ptr[ i ], LPC_pred_Q10 );
+            sig_Q10_ptr[ i ] = silk_ADD32( sig_Q10_ptr[ i ], LPC_pred_Q10 );
 
             /* Update states */
-            psDec->sLPC_Q14[ MAX_LPC_ORDER + i ] = SKP_LSHIFT( sig_Q10_ptr[ i ], 4 );
+            psDec->sLPC_Q14[ MAX_LPC_ORDER + i ] = silk_LSHIFT( sig_Q10_ptr[ i ], 4 );
         }
         sig_Q10_ptr += psDec->subfr_length;
         /* Update LPC filter state */
-        SKP_memcpy( psDec->sLPC_Q14, &psDec->sLPC_Q14[ psDec->subfr_length ], MAX_LPC_ORDER * sizeof( opus_int32 ) );
+        silk_memcpy( psDec->sLPC_Q14, &psDec->sLPC_Q14[ psDec->subfr_length ], MAX_LPC_ORDER * sizeof( opus_int32 ) );
     }
 
     /* Scale with Gain */
     for( i = 0; i < psDec->frame_length; i++ ) {
-        frame[ i ] = ( opus_int16 )SKP_SAT16( SKP_RSHIFT_ROUND( SKP_SMULWW( sig_Q10[ i ], psPLC->prevGain_Q16[ psDec->nb_subfr - 1 ] ), 10 ) );
+        frame[ i ] = ( opus_int16 )silk_SAT16( silk_RSHIFT_ROUND( silk_SMULWW( sig_Q10[ i ], psPLC->prevGain_Q16[ psDec->nb_subfr - 1 ] ), 10 ) );
     }
 
     /**************************************/
@@ -359,9 +359,9 @@
 
             /* Normalize energies */
             if( energy_shift > psPLC->conc_energy_shift ) {
-                psPLC->conc_energy = SKP_RSHIFT( psPLC->conc_energy, energy_shift - psPLC->conc_energy_shift );
+                psPLC->conc_energy = silk_RSHIFT( psPLC->conc_energy, energy_shift - psPLC->conc_energy_shift );
             } else if( energy_shift < psPLC->conc_energy_shift ) {
-                energy = SKP_RSHIFT( energy, psPLC->conc_energy_shift - energy_shift );
+                energy = silk_RSHIFT( energy, psPLC->conc_energy_shift - energy_shift );
             }
 
             /* Fade in the energy difference */
@@ -371,18 +371,18 @@
 
                 LZ = silk_CLZ32( psPLC->conc_energy );
                 LZ = LZ - 1;
-                psPLC->conc_energy = SKP_LSHIFT( psPLC->conc_energy, LZ );
-                energy = SKP_RSHIFT( energy, SKP_max_32( 24 - LZ, 0 ) );
+                psPLC->conc_energy = silk_LSHIFT( psPLC->conc_energy, LZ );
+                energy = silk_RSHIFT( energy, silk_max_32( 24 - LZ, 0 ) );
 
-                frac_Q24 = SKP_DIV32( psPLC->conc_energy, SKP_max( energy, 1 ) );
+                frac_Q24 = silk_DIV32( psPLC->conc_energy, silk_max( energy, 1 ) );
 
-                gain_Q16 = SKP_LSHIFT( silk_SQRT_APPROX( frac_Q24 ), 4 );
-                slope_Q16 = SKP_DIV32_16( ( 1 << 16 ) - gain_Q16, length );
+                gain_Q16 = silk_LSHIFT( silk_SQRT_APPROX( frac_Q24 ), 4 );
+                slope_Q16 = silk_DIV32_16( ( 1 << 16 ) - gain_Q16, length );
                 /* Make slope 4x steeper to avoid missing onsets after DTX */
-                slope_Q16 = SKP_LSHIFT( slope_Q16, 2 );
+                slope_Q16 = silk_LSHIFT( slope_Q16, 2 );
 
                 for( i = 0; i < length; i++ ) {
-                    frame[ i ] = SKP_SMULWB( gain_Q16, frame[ i ] );
+                    frame[ i ] = silk_SMULWB( gain_Q16, frame[ i ] );
                     gain_Q16 += slope_Q16;
                     if( gain_Q16 > 1 << 16 ) {
                         break;
diff --git a/silk/silk_SigProc_FIX.h b/silk/silk_SigProc_FIX.h
index e015775..7514eb7 100644
--- a/silk/silk_SigProc_FIX.h
+++ b/silk/silk_SigProc_FIX.h
@@ -33,7 +33,7 @@
 {
 #endif
 
-/*#define SKP_MACRO_COUNT */          /* Used to enable WMOPS counting */
+/*#define silk_MACRO_COUNT */          /* Used to enable WMOPS counting */
 
 #define SILK_MAX_ORDER_LPC            16                        /* max order of the LPC analysis in schur() and k2a()    */
 
@@ -408,206 +408,206 @@
 
 /* Allocate opus_int16 alligned to 4-byte memory address */
 #if EMBEDDED_ARM
-#define SKP_DWORD_ALIGN __attribute__((aligned(4)))
+#define silk_DWORD_ALIGN __attribute__((aligned(4)))
 #else
-#define SKP_DWORD_ALIGN
+#define silk_DWORD_ALIGN
 #endif
 
 /* Useful Macros that can be adjusted to other platforms */
-#define SKP_memcpy(a, b, c)                memcpy((a), (b), (c))    /* Dest, Src, ByteCount */
-#define SKP_memset(a, b, c)                memset((a), (b), (c))    /* Dest, value, ByteCount */
-#define SKP_memmove(a, b, c)               memmove((a), (b), (c))   /* Dest, Src, ByteCount */
+#define silk_memcpy(a, b, c)                memcpy((a), (b), (c))    /* Dest, Src, ByteCount */
+#define silk_memset(a, b, c)                memset((a), (b), (c))    /* Dest, value, ByteCount */
+#define silk_memmove(a, b, c)               memmove((a), (b), (c))   /* Dest, Src, ByteCount */
 /* fixed point macros */
 
 /* (a32 * b32) output have to be 32bit int */
-#define SKP_MUL(a32, b32)                  ((a32) * (b32))
+#define silk_MUL(a32, b32)                  ((a32) * (b32))
 
 /* (a32 * b32) output have to be 32bit uint */
-#define SKP_MUL_uint(a32, b32)             SKP_MUL(a32, b32)
+#define silk_MUL_uint(a32, b32)             silk_MUL(a32, b32)
 
 /* a32 + (b32 * c32) output have to be 32bit int */
-#define SKP_MLA(a32, b32, c32)             SKP_ADD32((a32),((b32) * (c32)))
+#define silk_MLA(a32, b32, c32)             silk_ADD32((a32),((b32) * (c32)))
 
 /* a32 + (b32 * c32) output have to be 32bit uint */
-#define SKP_MLA_uint(a32, b32, c32)        SKP_MLA(a32, b32, c32)
+#define silk_MLA_uint(a32, b32, c32)        silk_MLA(a32, b32, c32)
 
 /* ((a32 >> 16)  * (b32 >> 16)) output have to be 32bit int */
-#define SKP_SMULTT(a32, b32)               (((a32) >> 16) * ((b32) >> 16))
+#define silk_SMULTT(a32, b32)               (((a32) >> 16) * ((b32) >> 16))
 
 /* a32 + ((a32 >> 16)  * (b32 >> 16)) output have to be 32bit int */
-#define SKP_SMLATT(a32, b32, c32)          SKP_ADD32((a32),((b32) >> 16) * ((c32) >> 16))
+#define silk_SMLATT(a32, b32, c32)          silk_ADD32((a32),((b32) >> 16) * ((c32) >> 16))
 
-#define SKP_SMLALBB(a64, b16, c16)         SKP_ADD64((a64),(opus_int64)((opus_int32)(b16) * (opus_int32)(c16)))
+#define silk_SMLALBB(a64, b16, c16)         silk_ADD64((a64),(opus_int64)((opus_int32)(b16) * (opus_int32)(c16)))
 
 /* (a32 * b32) */
-#define SKP_SMULL(a32, b32)                ((opus_int64)(a32) * /*(opus_int64)*/(b32))
+#define silk_SMULL(a32, b32)                ((opus_int64)(a32) * /*(opus_int64)*/(b32))
 
 /* Adds two signed 32-bit values in a way that can overflow, while not relying on undefined behaviour
    (just standard two's complement implementation-specific behaviour) */
-#define SKP_ADD32_ovflw(a, b)              ((opus_int32)((opus_uint32)(a) + (opus_uint32)(b)))
+#define silk_ADD32_ovflw(a, b)              ((opus_int32)((opus_uint32)(a) + (opus_uint32)(b)))
 
 /* multiply-accumulate macros that allow overflow in the addition (ie, no asserts in debug mode)*/
-#define SKP_MLA_ovflw(a32, b32, c32)       SKP_ADD32_ovflw((a32),(opus_uint32)(b32) * (opus_uint32)(c32))
-#ifndef SKP_SMLABB_ovflw
-#    define SKP_SMLABB_ovflw(a32, b32, c32)    SKP_ADD32_ovflw((a32), (opus_int32)((opus_int16)(b32)) * (opus_int32)((opus_int16)(c32)))
+#define silk_MLA_ovflw(a32, b32, c32)       silk_ADD32_ovflw((a32),(opus_uint32)(b32) * (opus_uint32)(c32))
+#ifndef silk_SMLABB_ovflw
+#    define silk_SMLABB_ovflw(a32, b32, c32)    silk_ADD32_ovflw((a32), (opus_int32)((opus_int16)(b32)) * (opus_int32)((opus_int16)(c32)))
 #endif
 
-#define SKP_DIV32_16(a32, b16)             ((opus_int32)((a32) / (b16)))
-#define SKP_DIV32(a32, b32)                ((opus_int32)((a32) / (b32)))
+#define silk_DIV32_16(a32, b16)             ((opus_int32)((a32) / (b16)))
+#define silk_DIV32(a32, b32)                ((opus_int32)((a32) / (b32)))
 
 /* These macros enables checking for overflow in silk_API_Debug.h*/
-#define SKP_ADD16(a, b)                    ((a) + (b))
-#define SKP_ADD32(a, b)                    ((a) + (b))
-#define SKP_ADD64(a, b)                    ((a) + (b))
+#define silk_ADD16(a, b)                    ((a) + (b))
+#define silk_ADD32(a, b)                    ((a) + (b))
+#define silk_ADD64(a, b)                    ((a) + (b))
 
-#define SKP_SUB16(a, b)                    ((a) - (b))
-#define SKP_SUB32(a, b)                    ((a) - (b))
-#define SKP_SUB64(a, b)                    ((a) - (b))
+#define silk_SUB16(a, b)                    ((a) - (b))
+#define silk_SUB32(a, b)                    ((a) - (b))
+#define silk_SUB64(a, b)                    ((a) - (b))
 
-#define SKP_SAT8(a)                        ((a) > SKP_int8_MAX ? SKP_int8_MAX  : \
-                                           ((a) < SKP_int8_MIN ? SKP_int8_MIN  : (a)))
-#define SKP_SAT16(a)                       ((a) > SKP_int16_MAX ? SKP_int16_MAX : \
-                                           ((a) < SKP_int16_MIN ? SKP_int16_MIN : (a)))
-#define SKP_SAT32(a)                       ((a) > SKP_int32_MAX ? SKP_int32_MAX : \
-                                           ((a) < SKP_int32_MIN ? SKP_int32_MIN : (a)))
+#define silk_SAT8(a)                        ((a) > silk_int8_MAX ? silk_int8_MAX  : \
+                                           ((a) < silk_int8_MIN ? silk_int8_MIN  : (a)))
+#define silk_SAT16(a)                       ((a) > silk_int16_MAX ? silk_int16_MAX : \
+                                           ((a) < silk_int16_MIN ? silk_int16_MIN : (a)))
+#define silk_SAT32(a)                       ((a) > silk_int32_MAX ? silk_int32_MAX : \
+                                           ((a) < silk_int32_MIN ? silk_int32_MIN : (a)))
 
-#define SKP_CHECK_FIT8(a)                  (a)
-#define SKP_CHECK_FIT16(a)                 (a)
-#define SKP_CHECK_FIT32(a)                 (a)
+#define silk_CHECK_FIT8(a)                  (a)
+#define silk_CHECK_FIT16(a)                 (a)
+#define silk_CHECK_FIT32(a)                 (a)
 
-#define SKP_ADD_SAT16(a, b)                (opus_int16)SKP_SAT16( SKP_ADD32( (opus_int32)(a), (b) ) )
-#define SKP_ADD_SAT64(a, b)                ((((a) + (b)) & 0x8000000000000000LL) == 0 ?                            \
-                                           ((((a) & (b)) & 0x8000000000000000LL) != 0 ? SKP_int64_MIN : (a)+(b)) :    \
-                                           ((((a) | (b)) & 0x8000000000000000LL) == 0 ? SKP_int64_MAX : (a)+(b)) )
+#define silk_ADD_SAT16(a, b)                (opus_int16)silk_SAT16( silk_ADD32( (opus_int32)(a), (b) ) )
+#define silk_ADD_SAT64(a, b)                ((((a) + (b)) & 0x8000000000000000LL) == 0 ?                            \
+                                           ((((a) & (b)) & 0x8000000000000000LL) != 0 ? silk_int64_MIN : (a)+(b)) :    \
+                                           ((((a) | (b)) & 0x8000000000000000LL) == 0 ? silk_int64_MAX : (a)+(b)) )
 
-#define SKP_SUB_SAT16(a, b)                (opus_int16)SKP_SAT16( SKP_SUB32( (opus_int32)(a), (b) ) )
-#define SKP_SUB_SAT64(a, b)                ((((a)-(b)) & 0x8000000000000000LL) == 0 ?                                                    \
-                                           (( (a) & ((b)^0x8000000000000000LL) & 0x8000000000000000LL) ? SKP_int64_MIN : (a)-(b)) :    \
-                                           ((((a)^0x8000000000000000LL) & (b)  & 0x8000000000000000LL) ? SKP_int64_MAX : (a)-(b)) )
+#define silk_SUB_SAT16(a, b)                (opus_int16)silk_SAT16( silk_SUB32( (opus_int32)(a), (b) ) )
+#define silk_SUB_SAT64(a, b)                ((((a)-(b)) & 0x8000000000000000LL) == 0 ?                                                    \
+                                           (( (a) & ((b)^0x8000000000000000LL) & 0x8000000000000000LL) ? silk_int64_MIN : (a)-(b)) :    \
+                                           ((((a)^0x8000000000000000LL) & (b)  & 0x8000000000000000LL) ? silk_int64_MAX : (a)-(b)) )
 
 /* Saturation for positive input values */
-#define SKP_POS_SAT32(a)                   ((a) > SKP_int32_MAX ? SKP_int32_MAX : (a))
+#define silk_POS_SAT32(a)                   ((a) > silk_int32_MAX ? silk_int32_MAX : (a))
 
 /* Add with saturation for positive input values */
-#define SKP_ADD_POS_SAT8(a, b)             ((((a)+(b)) & 0x80)                 ? SKP_int8_MAX  : ((a)+(b)))
-#define SKP_ADD_POS_SAT16(a, b)            ((((a)+(b)) & 0x8000)               ? SKP_int16_MAX : ((a)+(b)))
-#define SKP_ADD_POS_SAT32(a, b)            ((((a)+(b)) & 0x80000000)           ? SKP_int32_MAX : ((a)+(b)))
-#define SKP_ADD_POS_SAT64(a, b)            ((((a)+(b)) & 0x8000000000000000LL) ? SKP_int64_MAX : ((a)+(b)))
+#define silk_ADD_POS_SAT8(a, b)             ((((a)+(b)) & 0x80)                 ? silk_int8_MAX  : ((a)+(b)))
+#define silk_ADD_POS_SAT16(a, b)            ((((a)+(b)) & 0x8000)               ? silk_int16_MAX : ((a)+(b)))
+#define silk_ADD_POS_SAT32(a, b)            ((((a)+(b)) & 0x80000000)           ? silk_int32_MAX : ((a)+(b)))
+#define silk_ADD_POS_SAT64(a, b)            ((((a)+(b)) & 0x8000000000000000LL) ? silk_int64_MAX : ((a)+(b)))
 
-#define SKP_LSHIFT8(a, shift)              ((a)<<(shift))                /* shift >= 0, shift < 8  */
-#define SKP_LSHIFT16(a, shift)             ((a)<<(shift))                /* shift >= 0, shift < 16 */
-#define SKP_LSHIFT32(a, shift)             ((a)<<(shift))                /* shift >= 0, shift < 32 */
-#define SKP_LSHIFT64(a, shift)             ((a)<<(shift))                /* shift >= 0, shift < 64 */
-#define SKP_LSHIFT(a, shift)               SKP_LSHIFT32(a, shift)        /* shift >= 0, shift < 32 */
+#define silk_LSHIFT8(a, shift)              ((a)<<(shift))                /* shift >= 0, shift < 8  */
+#define silk_LSHIFT16(a, shift)             ((a)<<(shift))                /* shift >= 0, shift < 16 */
+#define silk_LSHIFT32(a, shift)             ((a)<<(shift))                /* shift >= 0, shift < 32 */
+#define silk_LSHIFT64(a, shift)             ((a)<<(shift))                /* shift >= 0, shift < 64 */
+#define silk_LSHIFT(a, shift)               silk_LSHIFT32(a, shift)        /* shift >= 0, shift < 32 */
 
-#define SKP_RSHIFT8(a, shift)              ((a)>>(shift))                /* shift >= 0, shift < 8  */
-#define SKP_RSHIFT16(a, shift)             ((a)>>(shift))                /* shift >= 0, shift < 16 */
-#define SKP_RSHIFT32(a, shift)             ((a)>>(shift))                /* shift >= 0, shift < 32 */
-#define SKP_RSHIFT64(a, shift)             ((a)>>(shift))                /* shift >= 0, shift < 64 */
-#define SKP_RSHIFT(a, shift)               SKP_RSHIFT32(a, shift)        /* shift >= 0, shift < 32 */
+#define silk_RSHIFT8(a, shift)              ((a)>>(shift))                /* shift >= 0, shift < 8  */
+#define silk_RSHIFT16(a, shift)             ((a)>>(shift))                /* shift >= 0, shift < 16 */
+#define silk_RSHIFT32(a, shift)             ((a)>>(shift))                /* shift >= 0, shift < 32 */
+#define silk_RSHIFT64(a, shift)             ((a)>>(shift))                /* shift >= 0, shift < 64 */
+#define silk_RSHIFT(a, shift)               silk_RSHIFT32(a, shift)        /* shift >= 0, shift < 32 */
 
 /* saturates before shifting */
-#define SKP_LSHIFT_SAT16(a, shift)         (SKP_LSHIFT16( SKP_LIMIT( (a), SKP_RSHIFT16( SKP_int16_MIN, (shift) ),    \
-                                                                          SKP_RSHIFT16( SKP_int16_MAX, (shift) ) ), (shift) ))
-#define SKP_LSHIFT_SAT32(a, shift)         (SKP_LSHIFT32( SKP_LIMIT( (a), SKP_RSHIFT32( SKP_int32_MIN, (shift) ),    \
-                                                                          SKP_RSHIFT32( SKP_int32_MAX, (shift) ) ), (shift) ))
+#define silk_LSHIFT_SAT16(a, shift)         (silk_LSHIFT16( silk_LIMIT( (a), silk_RSHIFT16( silk_int16_MIN, (shift) ),    \
+                                                                          silk_RSHIFT16( silk_int16_MAX, (shift) ) ), (shift) ))
+#define silk_LSHIFT_SAT32(a, shift)         (silk_LSHIFT32( silk_LIMIT( (a), silk_RSHIFT32( silk_int32_MIN, (shift) ),    \
+                                                                          silk_RSHIFT32( silk_int32_MAX, (shift) ) ), (shift) ))
 
-#define SKP_LSHIFT_ovflw(a, shift)        ((a)<<(shift))        /* shift >= 0, allowed to overflow */
-#define SKP_LSHIFT_uint(a, shift)         ((a)<<(shift))        /* shift >= 0 */
-#define SKP_RSHIFT_uint(a, shift)         ((a)>>(shift))        /* shift >= 0 */
+#define silk_LSHIFT_ovflw(a, shift)        ((a)<<(shift))        /* shift >= 0, allowed to overflow */
+#define silk_LSHIFT_uint(a, shift)         ((a)<<(shift))        /* shift >= 0 */
+#define silk_RSHIFT_uint(a, shift)         ((a)>>(shift))        /* shift >= 0 */
 
-#define SKP_ADD_LSHIFT(a, b, shift)       ((a) + SKP_LSHIFT((b), (shift)))             /* shift >= 0 */
-#define SKP_ADD_LSHIFT32(a, b, shift)     SKP_ADD32((a), SKP_LSHIFT32((b), (shift)))   /* shift >= 0 */
-#define SKP_ADD_LSHIFT_uint(a, b, shift)  ((a) + SKP_LSHIFT_uint((b), (shift)))        /* shift >= 0 */
-#define SKP_ADD_RSHIFT(a, b, shift)       ((a) + SKP_RSHIFT((b), (shift)))             /* shift >= 0 */
-#define SKP_ADD_RSHIFT32(a, b, shift)     SKP_ADD32((a), SKP_RSHIFT32((b), (shift)))   /* shift >= 0 */
-#define SKP_ADD_RSHIFT_uint(a, b, shift)  ((a) + SKP_RSHIFT_uint((b), (shift)))        /* shift >= 0 */
-#define SKP_SUB_LSHIFT32(a, b, shift)     SKP_SUB32((a), SKP_LSHIFT32((b), (shift)))   /* shift >= 0 */
-#define SKP_SUB_RSHIFT32(a, b, shift)     SKP_SUB32((a), SKP_RSHIFT32((b), (shift)))   /* shift >= 0 */
+#define silk_ADD_LSHIFT(a, b, shift)       ((a) + silk_LSHIFT((b), (shift)))             /* shift >= 0 */
+#define silk_ADD_LSHIFT32(a, b, shift)     silk_ADD32((a), silk_LSHIFT32((b), (shift)))   /* shift >= 0 */
+#define silk_ADD_LSHIFT_uint(a, b, shift)  ((a) + silk_LSHIFT_uint((b), (shift)))        /* shift >= 0 */
+#define silk_ADD_RSHIFT(a, b, shift)       ((a) + silk_RSHIFT((b), (shift)))             /* shift >= 0 */
+#define silk_ADD_RSHIFT32(a, b, shift)     silk_ADD32((a), silk_RSHIFT32((b), (shift)))   /* shift >= 0 */
+#define silk_ADD_RSHIFT_uint(a, b, shift)  ((a) + silk_RSHIFT_uint((b), (shift)))        /* shift >= 0 */
+#define silk_SUB_LSHIFT32(a, b, shift)     silk_SUB32((a), silk_LSHIFT32((b), (shift)))   /* shift >= 0 */
+#define silk_SUB_RSHIFT32(a, b, shift)     silk_SUB32((a), silk_RSHIFT32((b), (shift)))   /* shift >= 0 */
 
 /* Requires that shift > 0 */
-#define SKP_RSHIFT_ROUND(a, shift)        ((shift) == 1 ? ((a) >> 1) + ((a) & 1) : (((a) >> ((shift) - 1)) + 1) >> 1)
-#define SKP_RSHIFT_ROUND64(a, shift)      ((shift) == 1 ? ((a) >> 1) + ((a) & 1) : (((a) >> ((shift) - 1)) + 1) >> 1)
+#define silk_RSHIFT_ROUND(a, shift)        ((shift) == 1 ? ((a) >> 1) + ((a) & 1) : (((a) >> ((shift) - 1)) + 1) >> 1)
+#define silk_RSHIFT_ROUND64(a, shift)      ((shift) == 1 ? ((a) >> 1) + ((a) & 1) : (((a) >> ((shift) - 1)) + 1) >> 1)
 
 /* Number of rightshift required to fit the multiplication */
-#define SKP_NSHIFT_MUL_32_32(a, b)        ( -(31- (32-silk_CLZ32(SKP_abs(a)) + (32-silk_CLZ32(SKP_abs(b))))) )
-#define SKP_NSHIFT_MUL_16_16(a, b)        ( -(15- (16-silk_CLZ16(SKP_abs(a)) + (16-silk_CLZ16(SKP_abs(b))))) )
+#define silk_NSHIFT_MUL_32_32(a, b)        ( -(31- (32-silk_CLZ32(silk_abs(a)) + (32-silk_CLZ32(silk_abs(b))))) )
+#define silk_NSHIFT_MUL_16_16(a, b)        ( -(15- (16-silk_CLZ16(silk_abs(a)) + (16-silk_CLZ16(silk_abs(b))))) )
 
 
-#define SKP_min(a, b)                     (((a) < (b)) ? (a) : (b))
-#define SKP_max(a, b)                     (((a) > (b)) ? (a) : (b))
+#define silk_min(a, b)                     (((a) < (b)) ? (a) : (b))
+#define silk_max(a, b)                     (((a) > (b)) ? (a) : (b))
 
 /* Macro to convert floating-point constants to fixed-point */
 #define SILK_FIX_CONST( C, Q )           ((opus_int32)((C) * ((opus_int64)1 << (Q)) + 0.5))
 
-/* SKP_min() versions with typecast in the function call */
-static inline opus_int SKP_min_int(opus_int a, opus_int b)
+/* silk_min() versions with typecast in the function call */
+static inline opus_int silk_min_int(opus_int a, opus_int b)
 {
     return (((a) < (b)) ? (a) : (b));
 }
-static inline opus_int16 SKP_min_16(opus_int16 a, opus_int16 b)
+static inline opus_int16 silk_min_16(opus_int16 a, opus_int16 b)
 {
     return (((a) < (b)) ? (a) : (b));
 }
-static inline opus_int32 SKP_min_32(opus_int32 a, opus_int32 b)
+static inline opus_int32 silk_min_32(opus_int32 a, opus_int32 b)
 {
     return (((a) < (b)) ? (a) : (b));
 }
-static inline opus_int64 SKP_min_64(opus_int64 a, opus_int64 b)
+static inline opus_int64 silk_min_64(opus_int64 a, opus_int64 b)
 {
     return (((a) < (b)) ? (a) : (b));
 }
 
-/* SKP_min() versions with typecast in the function call */
-static inline opus_int SKP_max_int(opus_int a, opus_int b)
+/* silk_min() versions with typecast in the function call */
+static inline opus_int silk_max_int(opus_int a, opus_int b)
 {
     return (((a) > (b)) ? (a) : (b));
 }
-static inline opus_int16 SKP_max_16(opus_int16 a, opus_int16 b)
+static inline opus_int16 silk_max_16(opus_int16 a, opus_int16 b)
 {
     return (((a) > (b)) ? (a) : (b));
 }
-static inline opus_int32 SKP_max_32(opus_int32 a, opus_int32 b)
+static inline opus_int32 silk_max_32(opus_int32 a, opus_int32 b)
 {
     return (((a) > (b)) ? (a) : (b));
 }
-static inline opus_int64 SKP_max_64(opus_int64 a, opus_int64 b)
+static inline opus_int64 silk_max_64(opus_int64 a, opus_int64 b)
 {
     return (((a) > (b)) ? (a) : (b));
 }
 
-#define SKP_LIMIT( a, limit1, limit2)    ((limit1) > (limit2) ? ((a) > (limit1) ? (limit1) : ((a) < (limit2) ? (limit2) : (a))) \
+#define silk_LIMIT( a, limit1, limit2)    ((limit1) > (limit2) ? ((a) > (limit1) ? (limit1) : ((a) < (limit2) ? (limit2) : (a))) \
                                                              : ((a) > (limit2) ? (limit2) : ((a) < (limit1) ? (limit1) : (a))))
 
-#define SKP_LIMIT_int SKP_LIMIT
-#define SKP_LIMIT_16 SKP_LIMIT
-#define SKP_LIMIT_32 SKP_LIMIT
+#define silk_LIMIT_int silk_LIMIT
+#define silk_LIMIT_16 silk_LIMIT
+#define silk_LIMIT_32 silk_LIMIT
 
-/*#define SKP_non_neg(a)                 ((a) & ((-(a)) >> (8 * sizeof(a) - 1)))*/   /* doesn't seem faster than SKP_max(0, a);*/
+/*#define silk_non_neg(a)                 ((a) & ((-(a)) >> (8 * sizeof(a) - 1)))*/   /* doesn't seem faster than silk_max(0, a);*/
 
-#define SKP_abs(a)                       (((a) >  0)  ? (a) : -(a))            /* Be careful, SKP_abs returns wrong when input equals to SKP_intXX_MIN */
-#define SKP_abs_int(a)                   (((a) ^ ((a) >> (8 * sizeof(a) - 1))) - ((a) >> (8 * sizeof(a) - 1)))
-#define SKP_abs_int32(a)                 (((a) ^ ((a) >> 31)) - ((a) >> 31))
-#define SKP_abs_int64(a)                 (((a) >  0)  ? (a) : -(a))
+#define silk_abs(a)                       (((a) >  0)  ? (a) : -(a))            /* Be careful, silk_abs returns wrong when input equals to silk_intXX_MIN */
+#define silk_abs_int(a)                   (((a) ^ ((a) >> (8 * sizeof(a) - 1))) - ((a) >> (8 * sizeof(a) - 1)))
+#define silk_abs_int32(a)                 (((a) ^ ((a) >> 31)) - ((a) >> 31))
+#define silk_abs_int64(a)                 (((a) >  0)  ? (a) : -(a))
 
-#define SKP_sign(a)                      ((a) > 0 ? 1 : ( (a) < 0 ? -1 : 0 ))
+#define silk_sign(a)                      ((a) > 0 ? 1 : ( (a) < 0 ? -1 : 0 ))
 
-#define SKP_sqrt(a)                      (sqrt(a))
+#define silk_sqrt(a)                      (sqrt(a))
 
 /* PSEUDO-RANDOM GENERATOR                                                          */
 /* Make sure to store the result as the seed for the next call (also in between     */
 /* frames), otherwise result won't be random at all. When only using some of the    */
 /* bits, take the most significant bits by right-shifting.                          */
-#define SKP_RAND(seed)                   (SKP_MLA_ovflw(907633515, (seed), 196314165))
+#define silk_RAND(seed)                   (silk_MLA_ovflw(907633515, (seed), 196314165))
 
 /*  Add some multiplication functions that can be easily mapped to ARM. */
 
-/*    SKP_SMMUL: Signed top word multiply.
+/*    silk_SMMUL: Signed top word multiply.
           ARMv6        2 instruction cycles.
           ARMv3M+        3 instruction cycles. use SMULL and ignore LSB registers.(except xM)*/
-/*#define SKP_SMMUL(a32, b32)            (opus_int32)SKP_RSHIFT(SKP_SMLAL(SKP_SMULWB((a32), (b32)), (a32), SKP_RSHIFT_ROUND((b32), 16)), 16)*/
+/*#define silk_SMMUL(a32, b32)            (opus_int32)silk_RSHIFT(silk_SMLAL(silk_SMULWB((a32), (b32)), (a32), silk_RSHIFT_ROUND((b32), 16)), 16)*/
 /* the following seems faster on x86 */
-#define SKP_SMMUL(a32, b32)              (opus_int32)SKP_RSHIFT64(SKP_SMULL((a32), (b32)), 32)
+#define silk_SMMUL(a32, b32)              (opus_int32)silk_RSHIFT64(silk_SMULL((a32), (b32)), 32)
 
 #include "silk_Inlines.h"
 #include "silk_MacroCount.h"
diff --git a/silk/silk_VAD.c b/silk/silk_VAD.c
index aef2b20..f9470da 100644
--- a/silk/silk_VAD.c
+++ b/silk/silk_VAD.c
@@ -42,18 +42,18 @@
     opus_int b, ret = 0;
 
     /* reset state memory */
-    SKP_memset( psSilk_VAD, 0, sizeof( silk_VAD_state ) );
+    silk_memset( psSilk_VAD, 0, sizeof( silk_VAD_state ) );
 
     /* init noise levels */
     /* Initialize array with approx pink noise levels (psd proportional to inverse of frequency) */
     for( b = 0; b < VAD_N_BANDS; b++ ) {
-        psSilk_VAD->NoiseLevelBias[ b ] = SKP_max_32( SKP_DIV32_16( VAD_NOISE_LEVELS_BIAS, b + 1 ), 1 );
+        psSilk_VAD->NoiseLevelBias[ b ] = silk_max_32( silk_DIV32_16( VAD_NOISE_LEVELS_BIAS, b + 1 ), 1 );
     }
 
     /* Initialize state */
     for( b = 0; b < VAD_N_BANDS; b++ ) {
-        psSilk_VAD->NL[ b ]     = SKP_MUL( 100, psSilk_VAD->NoiseLevelBias[ b ] );
-        psSilk_VAD->inv_NL[ b ] = SKP_DIV32( SKP_int32_MAX, psSilk_VAD->NL[ b ] );
+        psSilk_VAD->NL[ b ]     = silk_MUL( 100, psSilk_VAD->NoiseLevelBias[ b ] );
+        psSilk_VAD->inv_NL[ b ] = silk_DIV32( silk_int32_MAX, psSilk_VAD->NL[ b ] );
     }
     psSilk_VAD->counter = 15;
 
@@ -88,10 +88,10 @@
     silk_VAD_state *psSilk_VAD = &psEncC->sVAD;
 
     /* Safety checks */
-    SKP_assert( VAD_N_BANDS == 4 );
-    SKP_assert( MAX_FRAME_LENGTH >= psEncC->frame_length );
-    SKP_assert( psEncC->frame_length <= 512 );
-    SKP_assert( psEncC->frame_length == 8 * SKP_RSHIFT( psEncC->frame_length, 3 ) );
+    silk_assert( VAD_N_BANDS == 4 );
+    silk_assert( MAX_FRAME_LENGTH >= psEncC->frame_length );
+    silk_assert( psEncC->frame_length <= 512 );
+    silk_assert( psEncC->frame_length == 8 * silk_RSHIFT( psEncC->frame_length, 3 ) );
 
     /***********************/
     /* Filter and Decimate */
@@ -100,19 +100,19 @@
     silk_ana_filt_bank_1( pIn,          &psSilk_VAD->AnaState[  0 ], &X[ 0 ][ 0 ], &X[ 3 ][ 0 ], psEncC->frame_length );
 
     /* 0-4 kHz to 0-2 kHz and 2-4 kHz */
-    silk_ana_filt_bank_1( &X[ 0 ][ 0 ], &psSilk_VAD->AnaState1[ 0 ], &X[ 0 ][ 0 ], &X[ 2 ][ 0 ], SKP_RSHIFT( psEncC->frame_length, 1 ) );
+    silk_ana_filt_bank_1( &X[ 0 ][ 0 ], &psSilk_VAD->AnaState1[ 0 ], &X[ 0 ][ 0 ], &X[ 2 ][ 0 ], silk_RSHIFT( psEncC->frame_length, 1 ) );
 
     /* 0-2 kHz to 0-1 kHz and 1-2 kHz */
-    silk_ana_filt_bank_1( &X[ 0 ][ 0 ], &psSilk_VAD->AnaState2[ 0 ], &X[ 0 ][ 0 ], &X[ 1 ][ 0 ], SKP_RSHIFT( psEncC->frame_length, 2 ) );
+    silk_ana_filt_bank_1( &X[ 0 ][ 0 ], &psSilk_VAD->AnaState2[ 0 ], &X[ 0 ][ 0 ], &X[ 1 ][ 0 ], silk_RSHIFT( psEncC->frame_length, 2 ) );
 
     /*********************************************/
     /* HP filter on lowest band (differentiator) */
     /*********************************************/
-    decimated_framelength = SKP_RSHIFT( psEncC->frame_length, 3 );
-    X[ 0 ][ decimated_framelength - 1 ] = SKP_RSHIFT( X[ 0 ][ decimated_framelength - 1 ], 1 );
+    decimated_framelength = silk_RSHIFT( psEncC->frame_length, 3 );
+    X[ 0 ][ decimated_framelength - 1 ] = silk_RSHIFT( X[ 0 ][ decimated_framelength - 1 ], 1 );
     HPstateTmp = X[ 0 ][ decimated_framelength - 1 ];
     for( i = decimated_framelength - 1; i > 0; i-- ) {
-        X[ 0 ][ i - 1 ]  = SKP_RSHIFT( X[ 0 ][ i - 1 ], 1 );
+        X[ 0 ][ i - 1 ]  = silk_RSHIFT( X[ 0 ][ i - 1 ], 1 );
         X[ 0 ][ i ]     -= X[ 0 ][ i - 1 ];
     }
     X[ 0 ][ 0 ] -= psSilk_VAD->HPstate;
@@ -123,10 +123,10 @@
     /*************************************/
     for( b = 0; b < VAD_N_BANDS; b++ ) {
         /* Find the decimated framelength in the non-uniformly divided bands */
-        decimated_framelength = SKP_RSHIFT( psEncC->frame_length, SKP_min_int( VAD_N_BANDS - b, VAD_N_BANDS - 1 ) );
+        decimated_framelength = silk_RSHIFT( psEncC->frame_length, silk_min_int( VAD_N_BANDS - b, VAD_N_BANDS - 1 ) );
 
         /* Split length into subframe lengths */
-        dec_subframe_length = SKP_RSHIFT( decimated_framelength, VAD_INTERNAL_SUBFRAMES_LOG2 );
+        dec_subframe_length = silk_RSHIFT( decimated_framelength, VAD_INTERNAL_SUBFRAMES_LOG2 );
         dec_subframe_offset = 0;
 
         /* Compute energy per sub-frame */
@@ -135,21 +135,21 @@
         for( s = 0; s < VAD_INTERNAL_SUBFRAMES; s++ ) {
             sumSquared = 0;
             for( i = 0; i < dec_subframe_length; i++ ) {
-                /* The energy will be less than dec_subframe_length * ( SKP_int16_MIN / 8 ) ^ 2.            */
+                /* The energy will be less than dec_subframe_length * ( silk_int16_MIN / 8 ) ^ 2.            */
                 /* Therefore we can accumulate with no risk of overflow (unless dec_subframe_length > 128)  */
-                x_tmp = SKP_RSHIFT( X[ b ][ i + dec_subframe_offset ], 3 );
-                sumSquared = SKP_SMLABB( sumSquared, x_tmp, x_tmp );
+                x_tmp = silk_RSHIFT( X[ b ][ i + dec_subframe_offset ], 3 );
+                sumSquared = silk_SMLABB( sumSquared, x_tmp, x_tmp );
 
                 /* Safety check */
-                SKP_assert( sumSquared >= 0 );
+                silk_assert( sumSquared >= 0 );
             }
 
             /* Add/saturate summed energy of current subframe */
             if( s < VAD_INTERNAL_SUBFRAMES - 1 ) {
-                Xnrg[ b ] = SKP_ADD_POS_SAT32( Xnrg[ b ], sumSquared );
+                Xnrg[ b ] = silk_ADD_POS_SAT32( Xnrg[ b ], sumSquared );
             } else {
                 /* Look-ahead subframe */
-                Xnrg[ b ] = SKP_ADD_POS_SAT32( Xnrg[ b ], SKP_RSHIFT( sumSquared, 1 ) );
+                Xnrg[ b ] = silk_ADD_POS_SAT32( Xnrg[ b ], silk_RSHIFT( sumSquared, 1 ) );
             }
 
             dec_subframe_offset += dec_subframe_length;
@@ -172,30 +172,30 @@
         if( speech_nrg > 0 ) {
             /* Divide, with sufficient resolution */
             if( ( Xnrg[ b ] & 0xFF800000 ) == 0 ) {
-                NrgToNoiseRatio_Q8[ b ] = SKP_DIV32( SKP_LSHIFT( Xnrg[ b ], 8 ), psSilk_VAD->NL[ b ] + 1 );
+                NrgToNoiseRatio_Q8[ b ] = silk_DIV32( silk_LSHIFT( Xnrg[ b ], 8 ), psSilk_VAD->NL[ b ] + 1 );
             } else {
-                NrgToNoiseRatio_Q8[ b ] = SKP_DIV32( Xnrg[ b ], SKP_RSHIFT( psSilk_VAD->NL[ b ], 8 ) + 1 );
+                NrgToNoiseRatio_Q8[ b ] = silk_DIV32( Xnrg[ b ], silk_RSHIFT( psSilk_VAD->NL[ b ], 8 ) + 1 );
             }
 
             /* Convert to log domain */
             SNR_Q7 = silk_lin2log( NrgToNoiseRatio_Q8[ b ] ) - 8 * 128;
 
             /* Sum-of-squares */
-            sumSquared = SKP_SMLABB( sumSquared, SNR_Q7, SNR_Q7 );          /* Q14 */
+            sumSquared = silk_SMLABB( sumSquared, SNR_Q7, SNR_Q7 );          /* Q14 */
 
             /* Tilt measure */
             if( speech_nrg < ( 1 << 20 ) ) {
                 /* Scale down SNR value for small subband speech energies */
-                SNR_Q7 = SKP_SMULWB( SKP_LSHIFT( silk_SQRT_APPROX( speech_nrg ), 6 ), SNR_Q7 );
+                SNR_Q7 = silk_SMULWB( silk_LSHIFT( silk_SQRT_APPROX( speech_nrg ), 6 ), SNR_Q7 );
             }
-            input_tilt = SKP_SMLAWB( input_tilt, tiltWeights[ b ], SNR_Q7 );
+            input_tilt = silk_SMLAWB( input_tilt, tiltWeights[ b ], SNR_Q7 );
         } else {
             NrgToNoiseRatio_Q8[ b ] = 256;
         }
     }
 
     /* Mean-of-squares */
-    sumSquared = SKP_DIV32_16( sumSquared, VAD_N_BANDS ); /* Q14 */
+    sumSquared = silk_DIV32_16( sumSquared, VAD_N_BANDS ); /* Q14 */
 
     /* Root-mean-square approximation, scale to dBs, and write to output pointer */
     pSNR_dB_Q7 = ( opus_int16 )( 3 * silk_SQRT_APPROX( sumSquared ) ); /* Q7 */
@@ -203,12 +203,12 @@
     /*********************************/
     /* Speech Probability Estimation */
     /*********************************/
-    SA_Q15 = silk_sigm_Q15( SKP_SMULWB( VAD_SNR_FACTOR_Q16, pSNR_dB_Q7 ) - VAD_NEGATIVE_OFFSET_Q5 );
+    SA_Q15 = silk_sigm_Q15( silk_SMULWB( VAD_SNR_FACTOR_Q16, pSNR_dB_Q7 ) - VAD_NEGATIVE_OFFSET_Q5 );
 
     /**************************/
     /* Frequency Tilt Measure */
     /**************************/
-    psEncC->input_tilt_Q15 = SKP_LSHIFT( silk_sigm_Q15( input_tilt ) - 16384, 1 );
+    psEncC->input_tilt_Q15 = silk_LSHIFT( silk_sigm_Q15( input_tilt ) - 16384, 1 );
 
     /**************************************************/
     /* Scale the sigmoid output based on power levels */
@@ -216,32 +216,32 @@
     speech_nrg = 0;
     for( b = 0; b < VAD_N_BANDS; b++ ) {
         /* Accumulate signal-without-noise energies, higher frequency bands have more weight */
-        speech_nrg += ( b + 1 ) * SKP_RSHIFT( Xnrg[ b ] - psSilk_VAD->NL[ b ], 4 );
+        speech_nrg += ( b + 1 ) * silk_RSHIFT( Xnrg[ b ] - psSilk_VAD->NL[ b ], 4 );
     }
 
     /* Power scaling */
     if( speech_nrg <= 0 ) {
-        SA_Q15 = SKP_RSHIFT( SA_Q15, 1 );
+        SA_Q15 = silk_RSHIFT( SA_Q15, 1 );
     } else if( speech_nrg < 32768 ) {
         if( psEncC->frame_length == 10 * psEncC->fs_kHz ) {
-            speech_nrg = SKP_LSHIFT_SAT32( speech_nrg, 16 );
+            speech_nrg = silk_LSHIFT_SAT32( speech_nrg, 16 );
         } else {
-            speech_nrg = SKP_LSHIFT_SAT32( speech_nrg, 15 );
+            speech_nrg = silk_LSHIFT_SAT32( speech_nrg, 15 );
         }
 
         /* square-root */
         speech_nrg = silk_SQRT_APPROX( speech_nrg );
-        SA_Q15 = SKP_SMULWB( 32768 + speech_nrg, SA_Q15 );
+        SA_Q15 = silk_SMULWB( 32768 + speech_nrg, SA_Q15 );
     }
 
     /* Copy the resulting speech activity in Q8 */
-    psEncC->speech_activity_Q8 = SKP_min_int( SKP_RSHIFT( SA_Q15, 7 ), SKP_uint8_MAX );
+    psEncC->speech_activity_Q8 = silk_min_int( silk_RSHIFT( SA_Q15, 7 ), silk_uint8_MAX );
 
     /***********************************/
     /* Energy Level and SNR estimation */
     /***********************************/
     /* Smoothing coefficient */
-    smooth_coef_Q16 = SKP_SMULWB( VAD_SNR_SMOOTH_COEF_Q18, SKP_SMULWB( SA_Q15, SA_Q15 ) );
+    smooth_coef_Q16 = silk_SMULWB( VAD_SNR_SMOOTH_COEF_Q18, silk_SMULWB( SA_Q15, SA_Q15 ) );
 
     if( psEncC->frame_length == 10 * psEncC->fs_kHz ) {
         smooth_coef_Q16 >>= 1;
@@ -249,13 +249,13 @@
 
     for( b = 0; b < VAD_N_BANDS; b++ ) {
         /* compute smoothed energy-to-noise ratio per band */
-        psSilk_VAD->NrgRatioSmth_Q8[ b ] = SKP_SMLAWB( psSilk_VAD->NrgRatioSmth_Q8[ b ],
+        psSilk_VAD->NrgRatioSmth_Q8[ b ] = silk_SMLAWB( psSilk_VAD->NrgRatioSmth_Q8[ b ],
             NrgToNoiseRatio_Q8[ b ] - psSilk_VAD->NrgRatioSmth_Q8[ b ], smooth_coef_Q16 );
 
         /* signal to noise ratio in dB per band */
         SNR_Q7 = 3 * ( silk_lin2log( psSilk_VAD->NrgRatioSmth_Q8[b] ) - 8 * 128 );
         /* quality = sigmoid( 0.25 * ( SNR_dB - 16 ) ); */
-        psEncC->input_quality_bands_Q15[ b ] = silk_sigm_Q15( SKP_RSHIFT( SNR_Q7 - 16 * 128, 4 ) );
+        psEncC->input_quality_bands_Q15[ b ] = silk_sigm_Q15( silk_RSHIFT( SNR_Q7 - 16 * 128, 4 ) );
     }
 
     return( ret );
@@ -275,7 +275,7 @@
 
     /* Initially faster smoothing */
     if( psSilk_VAD->counter < 1000 ) { /* 1000 = 20 sec */
-        min_coef = SKP_DIV32_16( SKP_int16_MAX, SKP_RSHIFT( psSilk_VAD->counter, 4 ) + 1 );
+        min_coef = silk_DIV32_16( silk_int16_MAX, silk_RSHIFT( psSilk_VAD->counter, 4 ) + 1 );
     } else {
         min_coef = 0;
     }
@@ -283,38 +283,38 @@
     for( k = 0; k < VAD_N_BANDS; k++ ) {
         /* Get old noise level estimate for current band */
         nl = psSilk_VAD->NL[ k ];
-        SKP_assert( nl >= 0 );
+        silk_assert( nl >= 0 );
 
         /* Add bias */
-        nrg = SKP_ADD_POS_SAT32( pX[ k ], psSilk_VAD->NoiseLevelBias[ k ] );
-        SKP_assert( nrg > 0 );
+        nrg = silk_ADD_POS_SAT32( pX[ k ], psSilk_VAD->NoiseLevelBias[ k ] );
+        silk_assert( nrg > 0 );
 
         /* Invert energies */
-        inv_nrg = SKP_DIV32( SKP_int32_MAX, nrg );
-        SKP_assert( inv_nrg >= 0 );
+        inv_nrg = silk_DIV32( silk_int32_MAX, nrg );
+        silk_assert( inv_nrg >= 0 );
 
         /* Less update when subband energy is high */
-        if( nrg > SKP_LSHIFT( nl, 3 ) ) {
+        if( nrg > silk_LSHIFT( nl, 3 ) ) {
             coef = VAD_NOISE_LEVEL_SMOOTH_COEF_Q16 >> 3;
         } else if( nrg < nl ) {
             coef = VAD_NOISE_LEVEL_SMOOTH_COEF_Q16;
         } else {
-            coef = SKP_SMULWB( SKP_SMULWW( inv_nrg, nl ), VAD_NOISE_LEVEL_SMOOTH_COEF_Q16 << 1 );
+            coef = silk_SMULWB( silk_SMULWW( inv_nrg, nl ), VAD_NOISE_LEVEL_SMOOTH_COEF_Q16 << 1 );
         }
 
         /* Initially faster smoothing */
-        coef = SKP_max_int( coef, min_coef );
+        coef = silk_max_int( coef, min_coef );
 
         /* Smooth inverse energies */
-        psSilk_VAD->inv_NL[ k ] = SKP_SMLAWB( psSilk_VAD->inv_NL[ k ], inv_nrg - psSilk_VAD->inv_NL[ k ], coef );
-        SKP_assert( psSilk_VAD->inv_NL[ k ] >= 0 );
+        psSilk_VAD->inv_NL[ k ] = silk_SMLAWB( psSilk_VAD->inv_NL[ k ], inv_nrg - psSilk_VAD->inv_NL[ k ], coef );
+        silk_assert( psSilk_VAD->inv_NL[ k ] >= 0 );
 
         /* Compute noise level by inverting again */
-        nl = SKP_DIV32( SKP_int32_MAX, psSilk_VAD->inv_NL[ k ] );
-        SKP_assert( nl >= 0 );
+        nl = silk_DIV32( silk_int32_MAX, psSilk_VAD->inv_NL[ k ] );
+        silk_assert( nl >= 0 );
 
         /* Limit noise levels (guarantee 7 bits of head room) */
-        nl = SKP_min( nl, 0x00FFFFFF );
+        nl = silk_min( nl, 0x00FFFFFF );
 
         /* Store as part of state */
         psSilk_VAD->NL[ k ] = nl;
diff --git a/silk/silk_VQ_WMat_EC.c b/silk/silk_VQ_WMat_EC.c
index a6bb7cd..1720366 100644
--- a/silk/silk_VQ_WMat_EC.c
+++ b/silk/silk_VQ_WMat_EC.c
@@ -49,55 +49,55 @@
     opus_int32 sum1_Q14, sum2_Q16;
 
     /* Loop over codebook */
-    *rate_dist_Q14 = SKP_int32_MAX;
+    *rate_dist_Q14 = silk_int32_MAX;
     cb_row_Q7 = cb_Q7;
     for( k = 0; k < L; k++ ) {
-        diff_Q14[ 0 ] = in_Q14[ 0 ] - SKP_LSHIFT( cb_row_Q7[ 0 ], 7 );
-        diff_Q14[ 1 ] = in_Q14[ 1 ] - SKP_LSHIFT( cb_row_Q7[ 1 ], 7 );
-        diff_Q14[ 2 ] = in_Q14[ 2 ] - SKP_LSHIFT( cb_row_Q7[ 2 ], 7 );
-        diff_Q14[ 3 ] = in_Q14[ 3 ] - SKP_LSHIFT( cb_row_Q7[ 3 ], 7 );
-        diff_Q14[ 4 ] = in_Q14[ 4 ] - SKP_LSHIFT( cb_row_Q7[ 4 ], 7 );
+        diff_Q14[ 0 ] = in_Q14[ 0 ] - silk_LSHIFT( cb_row_Q7[ 0 ], 7 );
+        diff_Q14[ 1 ] = in_Q14[ 1 ] - silk_LSHIFT( cb_row_Q7[ 1 ], 7 );
+        diff_Q14[ 2 ] = in_Q14[ 2 ] - silk_LSHIFT( cb_row_Q7[ 2 ], 7 );
+        diff_Q14[ 3 ] = in_Q14[ 3 ] - silk_LSHIFT( cb_row_Q7[ 3 ], 7 );
+        diff_Q14[ 4 ] = in_Q14[ 4 ] - silk_LSHIFT( cb_row_Q7[ 4 ], 7 );
 
         /* Weighted rate */
-        sum1_Q14 = SKP_SMULBB( mu_Q9, cl_Q5[ k ] );
+        sum1_Q14 = silk_SMULBB( mu_Q9, cl_Q5[ k ] );
 
-        SKP_assert( sum1_Q14 >= 0 );
+        silk_assert( sum1_Q14 >= 0 );
 
         /* first row of W_Q18 */
-        sum2_Q16 = SKP_SMULWB(           W_Q18[  1 ], diff_Q14[ 1 ] );
-        sum2_Q16 = SKP_SMLAWB( sum2_Q16, W_Q18[  2 ], diff_Q14[ 2 ] );
-        sum2_Q16 = SKP_SMLAWB( sum2_Q16, W_Q18[  3 ], diff_Q14[ 3 ] );
-        sum2_Q16 = SKP_SMLAWB( sum2_Q16, W_Q18[  4 ], diff_Q14[ 4 ] );
-        sum2_Q16 = SKP_LSHIFT( sum2_Q16, 1 );
-        sum2_Q16 = SKP_SMLAWB( sum2_Q16, W_Q18[  0 ], diff_Q14[ 0 ] );
-        sum1_Q14 = SKP_SMLAWB( sum1_Q14, sum2_Q16,    diff_Q14[ 0 ] );
+        sum2_Q16 = silk_SMULWB(           W_Q18[  1 ], diff_Q14[ 1 ] );
+        sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[  2 ], diff_Q14[ 2 ] );
+        sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[  3 ], diff_Q14[ 3 ] );
+        sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[  4 ], diff_Q14[ 4 ] );
+        sum2_Q16 = silk_LSHIFT( sum2_Q16, 1 );
+        sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[  0 ], diff_Q14[ 0 ] );
+        sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16,    diff_Q14[ 0 ] );
 
         /* second row of W_Q18 */
-        sum2_Q16 = SKP_SMULWB(           W_Q18[  7 ], diff_Q14[ 2 ] );
-        sum2_Q16 = SKP_SMLAWB( sum2_Q16, W_Q18[  8 ], diff_Q14[ 3 ] );
-        sum2_Q16 = SKP_SMLAWB( sum2_Q16, W_Q18[  9 ], diff_Q14[ 4 ] );
-        sum2_Q16 = SKP_LSHIFT( sum2_Q16, 1 );
-        sum2_Q16 = SKP_SMLAWB( sum2_Q16, W_Q18[  6 ], diff_Q14[ 1 ] );
-        sum1_Q14 = SKP_SMLAWB( sum1_Q14, sum2_Q16,    diff_Q14[ 1 ] );
+        sum2_Q16 = silk_SMULWB(           W_Q18[  7 ], diff_Q14[ 2 ] );
+        sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[  8 ], diff_Q14[ 3 ] );
+        sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[  9 ], diff_Q14[ 4 ] );
+        sum2_Q16 = silk_LSHIFT( sum2_Q16, 1 );
+        sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[  6 ], diff_Q14[ 1 ] );
+        sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16,    diff_Q14[ 1 ] );
 
         /* third row of W_Q18 */
-        sum2_Q16 = SKP_SMULWB(           W_Q18[ 13 ], diff_Q14[ 3 ] );
-        sum2_Q16 = SKP_SMLAWB( sum2_Q16, W_Q18[ 14 ], diff_Q14[ 4 ] );
-        sum2_Q16 = SKP_LSHIFT( sum2_Q16, 1 );
-        sum2_Q16 = SKP_SMLAWB( sum2_Q16, W_Q18[ 12 ], diff_Q14[ 2 ] );
-        sum1_Q14 = SKP_SMLAWB( sum1_Q14, sum2_Q16,    diff_Q14[ 2 ] );
+        sum2_Q16 = silk_SMULWB(           W_Q18[ 13 ], diff_Q14[ 3 ] );
+        sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 14 ], diff_Q14[ 4 ] );
+        sum2_Q16 = silk_LSHIFT( sum2_Q16, 1 );
+        sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 12 ], diff_Q14[ 2 ] );
+        sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16,    diff_Q14[ 2 ] );
 
         /* fourth row of W_Q18 */
-        sum2_Q16 = SKP_SMULWB(           W_Q18[ 19 ], diff_Q14[ 4 ] );
-        sum2_Q16 = SKP_LSHIFT( sum2_Q16, 1 );
-        sum2_Q16 = SKP_SMLAWB( sum2_Q16, W_Q18[ 18 ], diff_Q14[ 3 ] );
-        sum1_Q14 = SKP_SMLAWB( sum1_Q14, sum2_Q16,    diff_Q14[ 3 ] );
+        sum2_Q16 = silk_SMULWB(           W_Q18[ 19 ], diff_Q14[ 4 ] );
+        sum2_Q16 = silk_LSHIFT( sum2_Q16, 1 );
+        sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 18 ], diff_Q14[ 3 ] );
+        sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16,    diff_Q14[ 3 ] );
 
         /* last row of W_Q18 */
-        sum2_Q16 = SKP_SMULWB(           W_Q18[ 24 ], diff_Q14[ 4 ] );
-        sum1_Q14 = SKP_SMLAWB( sum1_Q14, sum2_Q16,    diff_Q14[ 4 ] );
+        sum2_Q16 = silk_SMULWB(           W_Q18[ 24 ], diff_Q14[ 4 ] );
+        sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16,    diff_Q14[ 4 ] );
 
-        SKP_assert( sum1_Q14 >= 0 );
+        silk_assert( sum1_Q14 >= 0 );
 
         /* find best */
         if( sum1_Q14 < *rate_dist_Q14 ) {
diff --git a/silk/silk_ana_filt_bank_1.c b/silk/silk_ana_filt_bank_1.c
index 1fa819f..d19790f 100644
--- a/silk/silk_ana_filt_bank_1.c
+++ b/silk/silk_ana_filt_bank_1.c
@@ -45,31 +45,31 @@
     const opus_int32      N           /* I:   Number of input samples */
 )
 {
-    opus_int      k, N2 = SKP_RSHIFT( N, 1 );
+    opus_int      k, N2 = silk_RSHIFT( N, 1 );
     opus_int32    in32, X, Y, out_1, out_2;
 
     /* Internal variables and state are in Q10 format */
     for( k = 0; k < N2; k++ ) {
         /* Convert to Q10 */
-        in32 = SKP_LSHIFT( (opus_int32)in[ 2 * k ], 10 );
+        in32 = silk_LSHIFT( (opus_int32)in[ 2 * k ], 10 );
 
         /* All-pass section for even input sample */
-        Y      = SKP_SUB32( in32, S[ 0 ] );
-        X      = SKP_SMLAWB( Y, Y, A_fb1_21[ 0 ] );
-        out_1  = SKP_ADD32( S[ 0 ], X );
-        S[ 0 ] = SKP_ADD32( in32, X );
+        Y      = silk_SUB32( in32, S[ 0 ] );
+        X      = silk_SMLAWB( Y, Y, A_fb1_21[ 0 ] );
+        out_1  = silk_ADD32( S[ 0 ], X );
+        S[ 0 ] = silk_ADD32( in32, X );
 
         /* Convert to Q10 */
-        in32 = SKP_LSHIFT( (opus_int32)in[ 2 * k + 1 ], 10 );
+        in32 = silk_LSHIFT( (opus_int32)in[ 2 * k + 1 ], 10 );
 
         /* All-pass section for odd input sample, and add to output of previous section */
-        Y      = SKP_SUB32( in32, S[ 1 ] );
-        X      = SKP_SMULWB( Y, A_fb1_20[ 0 ] );
-        out_2  = SKP_ADD32( S[ 1 ], X );
-        S[ 1 ] = SKP_ADD32( in32, X );
+        Y      = silk_SUB32( in32, S[ 1 ] );
+        X      = silk_SMULWB( Y, A_fb1_20[ 0 ] );
+        out_2  = silk_ADD32( S[ 1 ], X );
+        S[ 1 ] = silk_ADD32( in32, X );
 
         /* Add/subtract, convert back to int16 and store to output */
-        outL[ k ] = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( SKP_ADD32( out_2, out_1 ), 11 ) );
-        outH[ k ] = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( SKP_SUB32( out_2, out_1 ), 11 ) );
+        outL[ k ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( silk_ADD32( out_2, out_1 ), 11 ) );
+        outH[ k ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( silk_SUB32( out_2, out_1 ), 11 ) );
     }
 }
diff --git a/silk/silk_apply_sine_window.c b/silk/silk_apply_sine_window.c
index 3fdb1d8..dcc1989 100644
--- a/silk/silk_apply_sine_window.c
+++ b/silk/silk_apply_sine_window.c
@@ -57,45 +57,45 @@
     opus_int   k, f_Q16, c_Q16;
     opus_int32 S0_Q16, S1_Q16;
 
-    SKP_assert( win_type == 1 || win_type == 2 );
+    silk_assert( win_type == 1 || win_type == 2 );
 
     /* Length must be in a range from 16 to 120 and a multiple of 4 */
-    SKP_assert( length >= 16 && length <= 120 );
-    SKP_assert( ( length & 3 ) == 0 );
+    silk_assert( length >= 16 && length <= 120 );
+    silk_assert( ( length & 3 ) == 0 );
 
     /* Frequency */
     k = ( length >> 2 ) - 4;
-    SKP_assert( k >= 0 && k <= 26 );
+    silk_assert( k >= 0 && k <= 26 );
     f_Q16 = (opus_int)freq_table_Q16[ k ];
 
     /* Factor used for cosine approximation */
-    c_Q16 = SKP_SMULWB( f_Q16, -f_Q16 );
-    SKP_assert( c_Q16 >= -32768 );
+    c_Q16 = silk_SMULWB( f_Q16, -f_Q16 );
+    silk_assert( c_Q16 >= -32768 );
 
     /* initialize state */
     if( win_type == 1 ) {
         /* start from 0 */
         S0_Q16 = 0;
         /* approximation of sin(f) */
-        S1_Q16 = f_Q16 + SKP_RSHIFT( length, 3 );
+        S1_Q16 = f_Q16 + silk_RSHIFT( length, 3 );
     } else {
         /* start from 1 */
         S0_Q16 = ( 1 << 16 );
         /* approximation of cos(f) */
-        S1_Q16 = ( 1 << 16 ) + SKP_RSHIFT( c_Q16, 1 ) + SKP_RSHIFT( length, 4 );
+        S1_Q16 = ( 1 << 16 ) + silk_RSHIFT( c_Q16, 1 ) + silk_RSHIFT( length, 4 );
     }
 
     /* Uses the recursive equation:   sin(n*f) = 2 * cos(f) * sin((n-1)*f) - sin((n-2)*f)    */
     /* 4 samples at a time */
     for( k = 0; k < length; k += 4 ) {
-        px_win[ k ]     = (opus_int16)SKP_SMULWB( SKP_RSHIFT( S0_Q16 + S1_Q16, 1 ), px[ k ] );
-        px_win[ k + 1 ] = (opus_int16)SKP_SMULWB( S1_Q16, px[ k + 1] );
-        S0_Q16 = SKP_SMULWB( S1_Q16, c_Q16 ) + SKP_LSHIFT( S1_Q16, 1 ) - S0_Q16 + 1;
-        S0_Q16 = SKP_min( S0_Q16, ( 1 << 16 ) );
+        px_win[ k ]     = (opus_int16)silk_SMULWB( silk_RSHIFT( S0_Q16 + S1_Q16, 1 ), px[ k ] );
+        px_win[ k + 1 ] = (opus_int16)silk_SMULWB( S1_Q16, px[ k + 1] );
+        S0_Q16 = silk_SMULWB( S1_Q16, c_Q16 ) + silk_LSHIFT( S1_Q16, 1 ) - S0_Q16 + 1;
+        S0_Q16 = silk_min( S0_Q16, ( 1 << 16 ) );
 
-        px_win[ k + 2 ] = (opus_int16)SKP_SMULWB( SKP_RSHIFT( S0_Q16 + S1_Q16, 1 ), px[ k + 2] );
-        px_win[ k + 3 ] = (opus_int16)SKP_SMULWB( S0_Q16, px[ k + 3 ] );
-        S1_Q16 = SKP_SMULWB( S0_Q16, c_Q16 ) + SKP_LSHIFT( S0_Q16, 1 ) - S1_Q16;
-        S1_Q16 = SKP_min( S1_Q16, ( 1 << 16 ) );
+        px_win[ k + 2 ] = (opus_int16)silk_SMULWB( silk_RSHIFT( S0_Q16 + S1_Q16, 1 ), px[ k + 2] );
+        px_win[ k + 3 ] = (opus_int16)silk_SMULWB( S0_Q16, px[ k + 3 ] );
+        S1_Q16 = silk_SMULWB( S0_Q16, c_Q16 ) + silk_LSHIFT( S0_Q16, 1 ) - S1_Q16;
+        S1_Q16 = silk_min( S1_Q16, ( 1 << 16 ) );
     }
 }
diff --git a/silk/silk_array_maxabs.c b/silk/silk_array_maxabs.c
index 13f6cda..2df83a6 100644
--- a/silk/silk_array_maxabs.c
+++ b/silk/silk_array_maxabs.c
@@ -41,9 +41,9 @@
     if( len == 0 ) return 0;
 
     ind = len - 1;
-    max = SKP_SMULBB( vec[ ind ], vec[ ind ] );
+    max = silk_SMULBB( vec[ ind ], vec[ ind ] );
     for( i = len - 2; i >= 0; i-- ) {
-        lvl = SKP_SMULBB( vec[ i ], vec[ i ] );
+        lvl = silk_SMULBB( vec[ i ], vec[ i ] );
         if( lvl > max ) {
             max = lvl;
             ind = i;
@@ -52,7 +52,7 @@
 
     /* Do not return 32768, as it will not fit in an int16 so may lead to problems later on */
     if( max >= 1073676289 ) { /* (2^15-1)^2 = 1073676289*/
-        return( SKP_int16_MAX );
+        return( silk_int16_MAX );
     } else {
         if( vec[ ind ] < 0 ) {
             return( -vec[ ind ] );
diff --git a/silk/silk_autocorr.c b/silk/silk_autocorr.c
index 1da028d..098d96a 100644
--- a/silk/silk_autocorr.c
+++ b/silk/silk_autocorr.c
@@ -43,7 +43,7 @@
     opus_int   i, lz, nRightShifts, corrCount;
     opus_int64 corr64;
 
-    corrCount = SKP_min_int( inputDataSize, correlationCount );
+    corrCount = silk_min_int( inputDataSize, correlationCount );
 
     /* compute energy (zero-lag correlation) */
     corr64 = silk_inner_prod16_aligned_64( inputData, inputData, inputDataSize );
@@ -59,18 +59,18 @@
     *scale = nRightShifts;
 
     if( nRightShifts <= 0 ) {
-        results[ 0 ] = SKP_LSHIFT( (opus_int32)SKP_CHECK_FIT32( corr64 ), -nRightShifts );
+        results[ 0 ] = silk_LSHIFT( (opus_int32)silk_CHECK_FIT32( corr64 ), -nRightShifts );
 
         /* compute remaining correlations based on int32 inner product */
           for( i = 1; i < corrCount; i++ ) {
-            results[ i ] = SKP_LSHIFT( silk_inner_prod_aligned( inputData, inputData + i, inputDataSize - i ), -nRightShifts );
+            results[ i ] = silk_LSHIFT( silk_inner_prod_aligned( inputData, inputData + i, inputDataSize - i ), -nRightShifts );
         }
     } else {
-        results[ 0 ] = (opus_int32)SKP_CHECK_FIT32( SKP_RSHIFT64( corr64, nRightShifts ) );
+        results[ 0 ] = (opus_int32)silk_CHECK_FIT32( silk_RSHIFT64( corr64, nRightShifts ) );
 
         /* compute remaining correlations based on int64 inner product */
           for( i = 1; i < corrCount; i++ ) {
-            results[ i ] =  (opus_int32)SKP_CHECK_FIT32( SKP_RSHIFT64( silk_inner_prod16_aligned_64( inputData, inputData + i, inputDataSize - i ), nRightShifts ) );
+            results[ i ] =  (opus_int32)silk_CHECK_FIT32( silk_RSHIFT64( silk_inner_prod16_aligned_64( inputData, inputData + i, inputDataSize - i ), nRightShifts ) );
         }
     }
 }
diff --git a/silk/silk_biquad_alt.c b/silk/silk_biquad_alt.c
index 966ba28..ef95bf8 100644
--- a/silk/silk_biquad_alt.c
+++ b/silk/silk_biquad_alt.c
@@ -56,24 +56,24 @@
 
     /* Negate A_Q28 values and split in two parts */
     A0_L_Q28 = ( -A_Q28[ 0 ] ) & 0x00003FFF;        /* lower part */
-    A0_U_Q28 = SKP_RSHIFT( -A_Q28[ 0 ], 14 );       /* upper part */
+    A0_U_Q28 = silk_RSHIFT( -A_Q28[ 0 ], 14 );       /* upper part */
     A1_L_Q28 = ( -A_Q28[ 1 ] ) & 0x00003FFF;        /* lower part */
-    A1_U_Q28 = SKP_RSHIFT( -A_Q28[ 1 ], 14 );       /* upper part */
+    A1_U_Q28 = silk_RSHIFT( -A_Q28[ 1 ], 14 );       /* upper part */
 
     for( k = 0; k < len; k++ ) {
         /* S[ 0 ], S[ 1 ]: Q12 */
         inval = in[ k*stride ];
-        out32_Q14 = SKP_LSHIFT( SKP_SMLAWB( S[ 0 ], B_Q28[ 0 ], inval ), 2 );
+        out32_Q14 = silk_LSHIFT( silk_SMLAWB( S[ 0 ], B_Q28[ 0 ], inval ), 2 );
 
-        S[ 0 ] = S[1] + SKP_RSHIFT_ROUND( SKP_SMULWB( out32_Q14, A0_L_Q28 ), 14 );
-        S[ 0 ] = SKP_SMLAWB( S[ 0 ], out32_Q14, A0_U_Q28 );
-        S[ 0 ] = SKP_SMLAWB( S[ 0 ], B_Q28[ 1 ], inval);
+        S[ 0 ] = S[1] + silk_RSHIFT_ROUND( silk_SMULWB( out32_Q14, A0_L_Q28 ), 14 );
+        S[ 0 ] = silk_SMLAWB( S[ 0 ], out32_Q14, A0_U_Q28 );
+        S[ 0 ] = silk_SMLAWB( S[ 0 ], B_Q28[ 1 ], inval);
 
-        S[ 1 ] = SKP_RSHIFT_ROUND( SKP_SMULWB( out32_Q14, A1_L_Q28 ), 14 );
-        S[ 1 ] = SKP_SMLAWB( S[ 1 ], out32_Q14, A1_U_Q28 );
-        S[ 1 ] = SKP_SMLAWB( S[ 1 ], B_Q28[ 2 ], inval );
+        S[ 1 ] = silk_RSHIFT_ROUND( silk_SMULWB( out32_Q14, A1_L_Q28 ), 14 );
+        S[ 1 ] = silk_SMLAWB( S[ 1 ], out32_Q14, A1_U_Q28 );
+        S[ 1 ] = silk_SMLAWB( S[ 1 ], B_Q28[ 2 ], inval );
 
         /* Scale back to Q0 and saturate */
-        out[ k*stride ] = (opus_int16)SKP_SAT16( SKP_RSHIFT( out32_Q14 + (1<<14) - 1, 14 ) );
+        out[ k*stride ] = (opus_int16)silk_SAT16( silk_RSHIFT( out32_Q14 + (1<<14) - 1, 14 ) );
     }
 }
diff --git a/silk/silk_burg_modified.c b/silk/silk_burg_modified.c
index 2d525b2..f41fcc9 100644
--- a/silk/silk_burg_modified.c
+++ b/silk/silk_burg_modified.c
@@ -62,34 +62,34 @@
     opus_int32       CAf[ SILK_MAX_ORDER_LPC + 1 ];
     opus_int32       CAb[ SILK_MAX_ORDER_LPC + 1 ];
 
-    SKP_assert( subfr_length * nb_subfr <= MAX_FRAME_SIZE );
-    SKP_assert( nb_subfr <= MAX_NB_SUBFR );
+    silk_assert( subfr_length * nb_subfr <= MAX_FRAME_SIZE );
+    silk_assert( nb_subfr <= MAX_NB_SUBFR );
 
 
     /* Compute autocorrelations, added over subframes */
     silk_sum_sqr_shift( &C0, &rshifts, x, nb_subfr * subfr_length );
     if( rshifts > MAX_RSHIFTS ) {
-        C0 = SKP_LSHIFT32( C0, rshifts - MAX_RSHIFTS );
-        SKP_assert( C0 > 0 );
+        C0 = silk_LSHIFT32( C0, rshifts - MAX_RSHIFTS );
+        silk_assert( C0 > 0 );
         rshifts = MAX_RSHIFTS;
     } else {
         lz = silk_CLZ32( C0 ) - 1;
         rshifts_extra = N_BITS_HEAD_ROOM - lz;
         if( rshifts_extra > 0 ) {
-            rshifts_extra = SKP_min( rshifts_extra, MAX_RSHIFTS - rshifts );
-            C0 = SKP_RSHIFT32( C0, rshifts_extra );
+            rshifts_extra = silk_min( rshifts_extra, MAX_RSHIFTS - rshifts );
+            C0 = silk_RSHIFT32( C0, rshifts_extra );
         } else {
-            rshifts_extra = SKP_max( rshifts_extra, MIN_RSHIFTS - rshifts );
-            C0 = SKP_LSHIFT32( C0, -rshifts_extra );
+            rshifts_extra = silk_max( rshifts_extra, MIN_RSHIFTS - rshifts );
+            C0 = silk_LSHIFT32( C0, -rshifts_extra );
         }
         rshifts += rshifts_extra;
     }
-    SKP_memset( C_first_row, 0, SILK_MAX_ORDER_LPC * sizeof( opus_int32 ) );
+    silk_memset( C_first_row, 0, SILK_MAX_ORDER_LPC * sizeof( opus_int32 ) );
     if( rshifts > 0 ) {
         for( s = 0; s < nb_subfr; s++ ) {
             x_ptr = x + s * subfr_length;
             for( n = 1; n < D + 1; n++ ) {
-                C_first_row[ n - 1 ] += (opus_int32)SKP_RSHIFT64(
+                C_first_row[ n - 1 ] += (opus_int32)silk_RSHIFT64(
                     silk_inner_prod16_aligned_64( x_ptr, x_ptr + n, subfr_length - n ), rshifts );
             }
         }
@@ -97,15 +97,15 @@
         for( s = 0; s < nb_subfr; s++ ) {
             x_ptr = x + s * subfr_length;
             for( n = 1; n < D + 1; n++ ) {
-                C_first_row[ n - 1 ] += SKP_LSHIFT32(
+                C_first_row[ n - 1 ] += silk_LSHIFT32(
                     silk_inner_prod_aligned( x_ptr, x_ptr + n, subfr_length - n ), -rshifts );
             }
         }
     }
-    SKP_memcpy( C_last_row, C_first_row, SILK_MAX_ORDER_LPC * sizeof( opus_int32 ) );
+    silk_memcpy( C_last_row, C_first_row, SILK_MAX_ORDER_LPC * sizeof( opus_int32 ) );
 
     /* Initialize */
-    CAb[ 0 ] = CAf[ 0 ] = C0 + SKP_SMMUL( WhiteNoiseFrac_Q32, C0 ) + 1;         /* Q(-rshifts)*/
+    CAb[ 0 ] = CAf[ 0 ] = C0 + silk_SMMUL( WhiteNoiseFrac_Q32, C0 ) + 1;         /* Q(-rshifts)*/
 
     for( n = 0; n < D; n++ ) {
         /* Update first row of correlation matrix (without first element) */
@@ -115,45 +115,45 @@
         if( rshifts > -2 ) {
             for( s = 0; s < nb_subfr; s++ ) {
                 x_ptr = x + s * subfr_length;
-                x1  = -SKP_LSHIFT32( (opus_int32)x_ptr[ n ],                    16 - rshifts );      /* Q(16-rshifts)*/
-                x2  = -SKP_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], 16 - rshifts );      /* Q(16-rshifts)*/
-                tmp1 = SKP_LSHIFT32( (opus_int32)x_ptr[ n ],                    QA - 16 );           /* Q(QA-16)*/
-                tmp2 = SKP_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], QA - 16 );           /* Q(QA-16)*/
+                x1  = -silk_LSHIFT32( (opus_int32)x_ptr[ n ],                    16 - rshifts );      /* Q(16-rshifts)*/
+                x2  = -silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], 16 - rshifts );      /* Q(16-rshifts)*/
+                tmp1 = silk_LSHIFT32( (opus_int32)x_ptr[ n ],                    QA - 16 );           /* Q(QA-16)*/
+                tmp2 = silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], QA - 16 );           /* Q(QA-16)*/
                 for( k = 0; k < n; k++ ) {
-                    C_first_row[ k ] = SKP_SMLAWB( C_first_row[ k ], x1, x_ptr[ n - k - 1 ]            ); /* Q( -rshifts )*/
-                    C_last_row[ k ]  = SKP_SMLAWB( C_last_row[ k ],  x2, x_ptr[ subfr_length - n + k ] ); /* Q( -rshifts )*/
+                    C_first_row[ k ] = silk_SMLAWB( C_first_row[ k ], x1, x_ptr[ n - k - 1 ]            ); /* Q( -rshifts )*/
+                    C_last_row[ k ]  = silk_SMLAWB( C_last_row[ k ],  x2, x_ptr[ subfr_length - n + k ] ); /* Q( -rshifts )*/
                     Atmp_QA = Af_QA[ k ];
-                    tmp1 = SKP_SMLAWB( tmp1, Atmp_QA, x_ptr[ n - k - 1 ]            );              /* Q(QA-16)*/
-                    tmp2 = SKP_SMLAWB( tmp2, Atmp_QA, x_ptr[ subfr_length - n + k ] );              /* Q(QA-16)*/
+                    tmp1 = silk_SMLAWB( tmp1, Atmp_QA, x_ptr[ n - k - 1 ]            );              /* Q(QA-16)*/
+                    tmp2 = silk_SMLAWB( tmp2, Atmp_QA, x_ptr[ subfr_length - n + k ] );              /* Q(QA-16)*/
                 }
-                tmp1 = SKP_LSHIFT32( -tmp1, 32 - QA - rshifts );                                    /* Q(16-rshifts)*/
-                tmp2 = SKP_LSHIFT32( -tmp2, 32 - QA - rshifts );                                    /* Q(16-rshifts)*/
+                tmp1 = silk_LSHIFT32( -tmp1, 32 - QA - rshifts );                                    /* Q(16-rshifts)*/
+                tmp2 = silk_LSHIFT32( -tmp2, 32 - QA - rshifts );                                    /* Q(16-rshifts)*/
                 for( k = 0; k <= n; k++ ) {
-                    CAf[ k ] = SKP_SMLAWB( CAf[ k ], tmp1, x_ptr[ n - k ]                    );     /* Q( -rshift )*/
-                    CAb[ k ] = SKP_SMLAWB( CAb[ k ], tmp2, x_ptr[ subfr_length - n + k - 1 ] );     /* Q( -rshift )*/
+                    CAf[ k ] = silk_SMLAWB( CAf[ k ], tmp1, x_ptr[ n - k ]                    );     /* Q( -rshift )*/
+                    CAb[ k ] = silk_SMLAWB( CAb[ k ], tmp2, x_ptr[ subfr_length - n + k - 1 ] );     /* Q( -rshift )*/
                 }
             }
         } else {
             for( s = 0; s < nb_subfr; s++ ) {
                 x_ptr = x + s * subfr_length;
-                x1  = -SKP_LSHIFT32( (opus_int32)x_ptr[ n ],                    -rshifts );          /* Q( -rshifts )*/
-                x2  = -SKP_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], -rshifts );          /* Q( -rshifts )*/
-                tmp1 = SKP_LSHIFT32( (opus_int32)x_ptr[ n ],                    17 );                /* Q17*/
-                tmp2 = SKP_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], 17 );                /* Q17*/
+                x1  = -silk_LSHIFT32( (opus_int32)x_ptr[ n ],                    -rshifts );          /* Q( -rshifts )*/
+                x2  = -silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], -rshifts );          /* Q( -rshifts )*/
+                tmp1 = silk_LSHIFT32( (opus_int32)x_ptr[ n ],                    17 );                /* Q17*/
+                tmp2 = silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n - 1 ], 17 );                /* Q17*/
                 for( k = 0; k < n; k++ ) {
-                    C_first_row[ k ] = SKP_MLA( C_first_row[ k ], x1, x_ptr[ n - k - 1 ]            ); /* Q( -rshifts )*/
-                    C_last_row[ k ]  = SKP_MLA( C_last_row[ k ],  x2, x_ptr[ subfr_length - n + k ] ); /* Q( -rshifts )*/
-                    Atmp1 = SKP_RSHIFT_ROUND( Af_QA[ k ], QA - 17 );                                /* Q17*/
-                    tmp1 = SKP_MLA( tmp1, x_ptr[ n - k - 1 ],            Atmp1 );                   /* Q17*/
-                    tmp2 = SKP_MLA( tmp2, x_ptr[ subfr_length - n + k ], Atmp1 );                   /* Q17*/
+                    C_first_row[ k ] = silk_MLA( C_first_row[ k ], x1, x_ptr[ n - k - 1 ]            ); /* Q( -rshifts )*/
+                    C_last_row[ k ]  = silk_MLA( C_last_row[ k ],  x2, x_ptr[ subfr_length - n + k ] ); /* Q( -rshifts )*/
+                    Atmp1 = silk_RSHIFT_ROUND( Af_QA[ k ], QA - 17 );                                /* Q17*/
+                    tmp1 = silk_MLA( tmp1, x_ptr[ n - k - 1 ],            Atmp1 );                   /* Q17*/
+                    tmp2 = silk_MLA( tmp2, x_ptr[ subfr_length - n + k ], Atmp1 );                   /* Q17*/
                 }
                 tmp1 = -tmp1;                                                                       /* Q17*/
                 tmp2 = -tmp2;                                                                       /* Q17*/
                 for( k = 0; k <= n; k++ ) {
-                    CAf[ k ] = SKP_SMLAWW( CAf[ k ], tmp1,
-                        SKP_LSHIFT32( (opus_int32)x_ptr[ n - k ], -rshifts - 1 ) );                  /* Q( -rshift )*/
-                    CAb[ k ] = SKP_SMLAWW( CAb[ k ], tmp2,
-                        SKP_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n + k - 1 ], -rshifts - 1 ) );/* Q( -rshift )*/
+                    CAf[ k ] = silk_SMLAWW( CAf[ k ], tmp1,
+                        silk_LSHIFT32( (opus_int32)x_ptr[ n - k ], -rshifts - 1 ) );                  /* Q( -rshift )*/
+                    CAb[ k ] = silk_SMLAWW( CAb[ k ], tmp2,
+                        silk_LSHIFT32( (opus_int32)x_ptr[ subfr_length - n + k - 1 ], -rshifts - 1 ) );/* Q( -rshift )*/
                 }
             }
         }
@@ -162,31 +162,31 @@
         tmp1 = C_first_row[ n ];                                                            /* Q( -rshifts )*/
         tmp2 = C_last_row[ n ];                                                             /* Q( -rshifts )*/
         num  = 0;                                                                           /* Q( -rshifts )*/
-        nrg  = SKP_ADD32( CAb[ 0 ], CAf[ 0 ] );                                             /* Q( 1-rshifts )*/
+        nrg  = silk_ADD32( CAb[ 0 ], CAf[ 0 ] );                                             /* Q( 1-rshifts )*/
         for( k = 0; k < n; k++ ) {
             Atmp_QA = Af_QA[ k ];
-            lz = silk_CLZ32( SKP_abs( Atmp_QA ) ) - 1;
-            lz = SKP_min( 32 - QA, lz );
-            Atmp1 = SKP_LSHIFT32( Atmp_QA, lz );                                            /* Q( QA + lz )*/
+            lz = silk_CLZ32( silk_abs( Atmp_QA ) ) - 1;
+            lz = silk_min( 32 - QA, lz );
+            Atmp1 = silk_LSHIFT32( Atmp_QA, lz );                                            /* Q( QA + lz )*/
 
-            tmp1 = SKP_ADD_LSHIFT32( tmp1, SKP_SMMUL( C_last_row[  n - k - 1 ], Atmp1 ), 32 - QA - lz );    /* Q( -rshifts )*/
-            tmp2 = SKP_ADD_LSHIFT32( tmp2, SKP_SMMUL( C_first_row[ n - k - 1 ], Atmp1 ), 32 - QA - lz );    /* Q( -rshifts )*/
-            num  = SKP_ADD_LSHIFT32( num,  SKP_SMMUL( CAb[ n - k ],             Atmp1 ), 32 - QA - lz );    /* Q( -rshifts )*/
-            nrg  = SKP_ADD_LSHIFT32( nrg,  SKP_SMMUL( SKP_ADD32( CAb[ k + 1 ], CAf[ k + 1 ] ),
+            tmp1 = silk_ADD_LSHIFT32( tmp1, silk_SMMUL( C_last_row[  n - k - 1 ], Atmp1 ), 32 - QA - lz );    /* Q( -rshifts )*/
+            tmp2 = silk_ADD_LSHIFT32( tmp2, silk_SMMUL( C_first_row[ n - k - 1 ], Atmp1 ), 32 - QA - lz );    /* Q( -rshifts )*/
+            num  = silk_ADD_LSHIFT32( num,  silk_SMMUL( CAb[ n - k ],             Atmp1 ), 32 - QA - lz );    /* Q( -rshifts )*/
+            nrg  = silk_ADD_LSHIFT32( nrg,  silk_SMMUL( silk_ADD32( CAb[ k + 1 ], CAf[ k + 1 ] ),
                                                                                 Atmp1 ), 32 - QA - lz );    /* Q( 1-rshifts )*/
         }
         CAf[ n + 1 ] = tmp1;                                                                /* Q( -rshifts )*/
         CAb[ n + 1 ] = tmp2;                                                                /* Q( -rshifts )*/
-        num = SKP_ADD32( num, tmp2 );                                                       /* Q( -rshifts )*/
-        num = SKP_LSHIFT32( -num, 1 );                                                      /* Q( 1-rshifts )*/
+        num = silk_ADD32( num, tmp2 );                                                       /* Q( -rshifts )*/
+        num = silk_LSHIFT32( -num, 1 );                                                      /* Q( 1-rshifts )*/
 
         /* Calculate the next order reflection (parcor) coefficient */
-        if( SKP_abs( num ) < nrg ) {
+        if( silk_abs( num ) < nrg ) {
             rc_Q31 = silk_DIV32_varQ( num, nrg, 31 );
         } else {
             /* Negative energy or ratio too high; set remaining coefficients to zero and exit loop */
-            SKP_memset( &Af_QA[ n ], 0, ( D - n ) * sizeof( opus_int32 ) );
-            SKP_assert( 0 );
+            silk_memset( &Af_QA[ n ], 0, ( D - n ) * sizeof( opus_int32 ) );
+            silk_assert( 0 );
             break;
         }
 
@@ -194,17 +194,17 @@
         for( k = 0; k < (n + 1) >> 1; k++ ) {
             tmp1 = Af_QA[ k ];                                                              /* QA*/
             tmp2 = Af_QA[ n - k - 1 ];                                                      /* QA*/
-            Af_QA[ k ]         = SKP_ADD_LSHIFT32( tmp1, SKP_SMMUL( tmp2, rc_Q31 ), 1 );    /* QA*/
-            Af_QA[ n - k - 1 ] = SKP_ADD_LSHIFT32( tmp2, SKP_SMMUL( tmp1, rc_Q31 ), 1 );    /* QA*/
+            Af_QA[ k ]         = silk_ADD_LSHIFT32( tmp1, silk_SMMUL( tmp2, rc_Q31 ), 1 );    /* QA*/
+            Af_QA[ n - k - 1 ] = silk_ADD_LSHIFT32( tmp2, silk_SMMUL( tmp1, rc_Q31 ), 1 );    /* QA*/
         }
-        Af_QA[ n ] = SKP_RSHIFT32( rc_Q31, 31 - QA );                                       /* QA*/
+        Af_QA[ n ] = silk_RSHIFT32( rc_Q31, 31 - QA );                                       /* QA*/
 
         /* Update C * Af and C * Ab */
         for( k = 0; k <= n + 1; k++ ) {
             tmp1 = CAf[ k ];                                                                /* Q( -rshifts )*/
             tmp2 = CAb[ n - k + 1 ];                                                        /* Q( -rshifts )*/
-            CAf[ k ]         = SKP_ADD_LSHIFT32( tmp1, SKP_SMMUL( tmp2, rc_Q31 ), 1 );      /* Q( -rshifts )*/
-            CAb[ n - k + 1 ] = SKP_ADD_LSHIFT32( tmp2, SKP_SMMUL( tmp1, rc_Q31 ), 1 );      /* Q( -rshifts )*/
+            CAf[ k ]         = silk_ADD_LSHIFT32( tmp1, silk_SMMUL( tmp2, rc_Q31 ), 1 );      /* Q( -rshifts )*/
+            CAb[ n - k + 1 ] = silk_ADD_LSHIFT32( tmp2, silk_SMMUL( tmp1, rc_Q31 ), 1 );      /* Q( -rshifts )*/
         }
     }
 
@@ -212,11 +212,11 @@
     nrg  = CAf[ 0 ];                                                                        /* Q( -rshifts )*/
     tmp1 = 1 << 16;                                                                         /* Q16*/
     for( k = 0; k < D; k++ ) {
-        Atmp1 = SKP_RSHIFT_ROUND( Af_QA[ k ], QA - 16 );                                    /* Q16*/
-        nrg  = SKP_SMLAWW( nrg, CAf[ k + 1 ], Atmp1 );                                      /* Q( -rshifts )*/
-        tmp1 = SKP_SMLAWW( tmp1, Atmp1, Atmp1 );                                            /* Q16*/
+        Atmp1 = silk_RSHIFT_ROUND( Af_QA[ k ], QA - 16 );                                    /* Q16*/
+        nrg  = silk_SMLAWW( nrg, CAf[ k + 1 ], Atmp1 );                                      /* Q( -rshifts )*/
+        tmp1 = silk_SMLAWW( tmp1, Atmp1, Atmp1 );                                            /* Q16*/
         A_Q16[ k ] = -Atmp1;
     }
-    *res_nrg = SKP_SMLAWW( nrg, SKP_SMMUL( WhiteNoiseFrac_Q32, C0 ), -tmp1 );               /* Q( -rshifts )*/
+    *res_nrg = silk_SMLAWW( nrg, silk_SMMUL( WhiteNoiseFrac_Q32, C0 ), -tmp1 );               /* Q( -rshifts )*/
     *res_nrg_Q = -rshifts;
 }
diff --git a/silk/silk_bwexpander.c b/silk/silk_bwexpander.c
index 06ed0e7..ba0f4ec 100644
--- a/silk/silk_bwexpander.c
+++ b/silk/silk_bwexpander.c
@@ -41,11 +41,11 @@
     opus_int   i;
     opus_int32 chirp_minus_one_Q16 = chirp_Q16 - 65536;
 
-    /* NB: Dont use SKP_SMULWB, instead of SKP_RSHIFT_ROUND( SKP_MUL(), 16 ), below.  */
-    /* Bias in SKP_SMULWB can lead to unstable filters                                */
+    /* NB: Dont use silk_SMULWB, instead of silk_RSHIFT_ROUND( silk_MUL(), 16 ), below.  */
+    /* Bias in silk_SMULWB can lead to unstable filters                                */
     for( i = 0; i < d - 1; i++ ) {
-        ar[ i ]    = (opus_int16)SKP_RSHIFT_ROUND( SKP_MUL( chirp_Q16, ar[ i ]             ), 16 );
-        chirp_Q16 +=            SKP_RSHIFT_ROUND( SKP_MUL( chirp_Q16, chirp_minus_one_Q16 ), 16 );
+        ar[ i ]    = (opus_int16)silk_RSHIFT_ROUND( silk_MUL( chirp_Q16, ar[ i ]             ), 16 );
+        chirp_Q16 +=            silk_RSHIFT_ROUND( silk_MUL( chirp_Q16, chirp_minus_one_Q16 ), 16 );
     }
-    ar[ d - 1 ] = (opus_int16)SKP_RSHIFT_ROUND( SKP_MUL( chirp_Q16, ar[ d - 1 ] ), 16 );
+    ar[ d - 1 ] = (opus_int16)silk_RSHIFT_ROUND( silk_MUL( chirp_Q16, ar[ d - 1 ] ), 16 );
 }
diff --git a/silk/silk_bwexpander_32.c b/silk/silk_bwexpander_32.c
index 005c142..c58c743 100644
--- a/silk/silk_bwexpander_32.c
+++ b/silk/silk_bwexpander_32.c
@@ -42,9 +42,9 @@
     opus_int32 chirp_minus_one_Q16 = chirp_Q16 - 65536;
 
     for( i = 0; i < d - 1; i++ ) {
-        ar[ i ]    = SKP_SMULWW( chirp_Q16, ar[ i ] );
-        chirp_Q16 += SKP_RSHIFT_ROUND( SKP_MUL( chirp_Q16, chirp_minus_one_Q16 ), 16 );
+        ar[ i ]    = silk_SMULWW( chirp_Q16, ar[ i ] );
+        chirp_Q16 += silk_RSHIFT_ROUND( silk_MUL( chirp_Q16, chirp_minus_one_Q16 ), 16 );
     }
-    ar[ d - 1 ] = SKP_SMULWW( chirp_Q16, ar[ d - 1 ] );
+    ar[ d - 1 ] = silk_SMULWW( chirp_Q16, ar[ d - 1 ] );
 }
 
diff --git a/silk/silk_check_control_input.c b/silk/silk_check_control_input.c
index b84b98d..aa8ee6d 100644
--- a/silk/silk_check_control_input.c
+++ b/silk/silk_check_control_input.c
@@ -38,7 +38,7 @@
     silk_EncControlStruct        *encControl     /* I:   Control structure                               */
 )
 {
-    SKP_assert( encControl != NULL );
+    silk_assert( encControl != NULL );
 
     if( ( ( encControl->API_sampleRate            !=  8000 ) &&
           ( encControl->API_sampleRate            != 12000 ) &&
@@ -59,46 +59,46 @@
           ( encControl->minInternalSampleRate > encControl->desiredInternalSampleRate ) ||
           ( encControl->maxInternalSampleRate < encControl->desiredInternalSampleRate ) ||
           ( encControl->minInternalSampleRate > encControl->maxInternalSampleRate ) ) {
-        SKP_assert( 0 );
+        silk_assert( 0 );
         return SILK_ENC_FS_NOT_SUPPORTED;
     }
     if( encControl->payloadSize_ms != 10 &&
         encControl->payloadSize_ms != 20 &&
         encControl->payloadSize_ms != 40 &&
         encControl->payloadSize_ms != 60 ) {
-        SKP_assert( 0 );
+        silk_assert( 0 );
         return SILK_ENC_PACKET_SIZE_NOT_SUPPORTED;
     }
     if( encControl->packetLossPercentage < 0 || encControl->packetLossPercentage > 100 ) {
-        SKP_assert( 0 );
+        silk_assert( 0 );
         return SILK_ENC_INVALID_LOSS_RATE;
     }
     if( encControl->useDTX < 0 || encControl->useDTX > 1 ) {
-        SKP_assert( 0 );
+        silk_assert( 0 );
         return SILK_ENC_INVALID_DTX_SETTING;
     }
     if( encControl->useCBR < 0 || encControl->useCBR > 1 ) {
-        SKP_assert( 0 );
+        silk_assert( 0 );
         return SILK_ENC_INVALID_CBR_SETTING;
     }
     if( encControl->useInBandFEC < 0 || encControl->useInBandFEC > 1 ) {
-        SKP_assert( 0 );
+        silk_assert( 0 );
         return SILK_ENC_INVALID_INBAND_FEC_SETTING;
     }
     if( encControl->nChannelsAPI < 1 || encControl->nChannelsAPI > ENCODER_NUM_CHANNELS ) {
-        SKP_assert( 0 );
+        silk_assert( 0 );
         return SILK_ENC_INVALID_NUMBER_OF_CHANNELS_ERROR;
     }
     if( encControl->nChannelsInternal < 1 || encControl->nChannelsInternal > ENCODER_NUM_CHANNELS ) {
-        SKP_assert( 0 );
+        silk_assert( 0 );
         return SILK_ENC_INVALID_NUMBER_OF_CHANNELS_ERROR;
     }
     if( encControl->nChannelsInternal > encControl->nChannelsAPI ) {
-        SKP_assert( 0 );
+        silk_assert( 0 );
         return SILK_ENC_INVALID_NUMBER_OF_CHANNELS_ERROR;
     }
     if( encControl->complexity < 0 || encControl->complexity > 10 ) {
-        SKP_assert( 0 );
+        silk_assert( 0 );
         return SILK_ENC_INVALID_COMPLEXITY_SETTING;
     }
 
diff --git a/silk/silk_code_signs.c b/silk/silk_code_signs.c
index f9e352a..2697b36 100644
--- a/silk/silk_code_signs.c
+++ b/silk/silk_code_signs.c
@@ -31,11 +31,11 @@
 
 #include "silk_main.h"
 
-/*#define SKP_enc_map(a)                ((a) > 0 ? 1 : 0)*/
-/*#define SKP_dec_map(a)                ((a) > 0 ? 1 : -1)*/
+/*#define silk_enc_map(a)                ((a) > 0 ? 1 : 0)*/
+/*#define silk_dec_map(a)                ((a) > 0 ? 1 : -1)*/
 /* shifting avoids if-statement */
-#define SKP_enc_map(a)                  ( SKP_RSHIFT( (a), 15 ) + 1 )
-#define SKP_dec_map(a)                  ( SKP_LSHIFT( (a),  1 ) - 1 )
+#define silk_enc_map(a)                  ( silk_RSHIFT( (a), 15 ) + 1 )
+#define silk_dec_map(a)                  ( silk_LSHIFT( (a),  1 ) - 1 )
 
 /* Encodes signs of excitation */
 void silk_encode_signs(
@@ -54,16 +54,16 @@
 
     icdf[ 1 ] = 0;
     q_ptr = pulses;
-    i = SKP_SMULBB( 6, SKP_ADD_LSHIFT( quantOffsetType, signalType, 1 ) );
+    i = silk_SMULBB( 6, silk_ADD_LSHIFT( quantOffsetType, signalType, 1 ) );
     icdf_ptr = &silk_sign_iCDF[ i ];
-    length = SKP_RSHIFT( length + SHELL_CODEC_FRAME_LENGTH/2, LOG2_SHELL_CODEC_FRAME_LENGTH );
+    length = silk_RSHIFT( length + SHELL_CODEC_FRAME_LENGTH/2, LOG2_SHELL_CODEC_FRAME_LENGTH );
     for( i = 0; i < length; i++ ) {
         p = sum_pulses[ i ];
         if( p > 0 ) {
-            icdf[ 0 ] = icdf_ptr[ SKP_min( p - 1, 5 ) ];
+            icdf[ 0 ] = icdf_ptr[ silk_min( p - 1, 5 ) ];
             for( j = 0; j < SHELL_CODEC_FRAME_LENGTH; j++ ) {
                 if( q_ptr[ j ] != 0 ) {
-                    ec_enc_icdf( psRangeEnc, SKP_enc_map( q_ptr[ j ]), icdf, 8 );
+                    ec_enc_icdf( psRangeEnc, silk_enc_map( q_ptr[ j ]), icdf, 8 );
                 }
             }
         }
@@ -88,13 +88,13 @@
 
     icdf[ 1 ] = 0;
     q_ptr = pulses;
-    i = SKP_SMULBB( 6, SKP_ADD_LSHIFT( quantOffsetType, signalType, 1 ) );
+    i = silk_SMULBB( 6, silk_ADD_LSHIFT( quantOffsetType, signalType, 1 ) );
     icdf_ptr = &silk_sign_iCDF[ i ];
-    length = SKP_RSHIFT( length + SHELL_CODEC_FRAME_LENGTH/2, LOG2_SHELL_CODEC_FRAME_LENGTH );
+    length = silk_RSHIFT( length + SHELL_CODEC_FRAME_LENGTH/2, LOG2_SHELL_CODEC_FRAME_LENGTH );
     for( i = 0; i < length; i++ ) {
         p = sum_pulses[ i ];
         if( p > 0 ) {
-            icdf[ 0 ] = icdf_ptr[ SKP_min( p - 1, 5 ) ];
+            icdf[ 0 ] = icdf_ptr[ silk_min( p - 1, 5 ) ];
             for( j = 0; j < SHELL_CODEC_FRAME_LENGTH; j++ ) {
                 if( q_ptr[ j ] > 0 ) {
                     /* attach sign */
@@ -105,7 +105,7 @@
                     }
 #else
                     /* implementation with shift, subtraction, multiplication */
-                    q_ptr[ j ] *= SKP_dec_map( ec_dec_icdf( psRangeDec, icdf, 8 ) );
+                    q_ptr[ j ] *= silk_dec_map( ec_dec_icdf( psRangeDec, icdf, 8 ) );
 #endif
                 }
             }
diff --git a/silk/silk_control_SNR.c b/silk/silk_control_SNR.c
index f8cd3dc..d9dd8f8 100644
--- a/silk/silk_control_SNR.c
+++ b/silk/silk_control_SNR.c
@@ -43,7 +43,7 @@
     const opus_int32 *rateTable;
 
     /* Set bitrate/coding quality */
-    TargetRate_bps = SKP_LIMIT( TargetRate_bps, MIN_TARGET_RATE_BPS, MAX_TARGET_RATE_BPS );
+    TargetRate_bps = silk_LIMIT( TargetRate_bps, MIN_TARGET_RATE_BPS, MAX_TARGET_RATE_BPS );
     if( TargetRate_bps != psEncC->TargetRate_bps ) {
         psEncC->TargetRate_bps = TargetRate_bps;
 
@@ -64,16 +64,16 @@
         /* Find bitrate interval in table and interpolate */
         for( k = 1; k < TARGET_RATE_TAB_SZ; k++ ) {
             if( TargetRate_bps <= rateTable[ k ] ) {
-                frac_Q6 = SKP_DIV32( SKP_LSHIFT( TargetRate_bps - rateTable[ k - 1 ], 6 ),
+                frac_Q6 = silk_DIV32( silk_LSHIFT( TargetRate_bps - rateTable[ k - 1 ], 6 ),
                                                  rateTable[ k ] - rateTable[ k - 1 ] );
-                psEncC->SNR_dB_Q7 = SKP_LSHIFT( silk_SNR_table_Q1[ k - 1 ], 6 ) + SKP_MUL( frac_Q6, silk_SNR_table_Q1[ k ] - silk_SNR_table_Q1[ k - 1 ] );
+                psEncC->SNR_dB_Q7 = silk_LSHIFT( silk_SNR_table_Q1[ k - 1 ], 6 ) + silk_MUL( frac_Q6, silk_SNR_table_Q1[ k ] - silk_SNR_table_Q1[ k - 1 ] );
                 break;
             }
         }
 
         /* Reduce coding quality whenever LBRR is enabled, to free up some bits */
         if( psEncC->LBRR_enabled ) {
-            psEncC->SNR_dB_Q7 = SKP_SMLABB( psEncC->SNR_dB_Q7, 12 - psEncC->LBRR_GainIncreases, SILK_FIX_CONST( -0.25, 7 ) );
+            psEncC->SNR_dB_Q7 = silk_SMLABB( psEncC->SNR_dB_Q7, 12 - psEncC->LBRR_GainIncreases, SILK_FIX_CONST( -0.25, 7 ) );
         }
     }
 
diff --git a/silk/silk_control_audio_bandwidth.c b/silk/silk_control_audio_bandwidth.c
index 4e7ef84..2f995a5 100644
--- a/silk/silk_control_audio_bandwidth.c
+++ b/silk/silk_control_audio_bandwidth.c
@@ -41,17 +41,17 @@
     opus_int32 fs_Hz;
 
     fs_kHz = psEncC->fs_kHz;
-    fs_Hz = SKP_SMULBB( fs_kHz, 1000 );
+    fs_Hz = silk_SMULBB( fs_kHz, 1000 );
     if( fs_Hz == 0 ) {
         /* Encoder has just been initialized */
-        fs_Hz  = SKP_min( psEncC->desiredInternal_fs_Hz, psEncC->API_fs_Hz );
-        fs_kHz = SKP_DIV32_16( fs_Hz, 1000 );
+        fs_Hz  = silk_min( psEncC->desiredInternal_fs_Hz, psEncC->API_fs_Hz );
+        fs_kHz = silk_DIV32_16( fs_Hz, 1000 );
     } else if( fs_Hz > psEncC->API_fs_Hz || fs_Hz > psEncC->maxInternal_fs_Hz || fs_Hz < psEncC->minInternal_fs_Hz ) {
         /* Make sure internal rate is not higher than external rate or maximum allowed, or lower than minimum allowed */
         fs_Hz  = psEncC->API_fs_Hz;
-        fs_Hz  = SKP_min( fs_Hz, psEncC->maxInternal_fs_Hz );
-        fs_Hz  = SKP_max( fs_Hz, psEncC->minInternal_fs_Hz );
-        fs_kHz = SKP_DIV32_16( fs_Hz, 1000 );
+        fs_Hz  = silk_min( fs_Hz, psEncC->maxInternal_fs_Hz );
+        fs_Hz  = silk_max( fs_Hz, psEncC->minInternal_fs_Hz );
+        fs_kHz = silk_DIV32_16( fs_Hz, 1000 );
     } else {
         /* State machine for the internal sampling rate switching */
         if( psEncC->sLP.transition_frame_no >= TRANSITION_FRAMES ) {
@@ -60,7 +60,7 @@
         }
         if( psEncC->allow_bandwidth_switch ) {
             /* Check if we should switch down */
-            if( SKP_SMULBB( psEncC->fs_kHz, 1000 ) > psEncC->desiredInternal_fs_Hz )
+            if( silk_SMULBB( psEncC->fs_kHz, 1000 ) > psEncC->desiredInternal_fs_Hz )
             {
                 /* Switch down */
                 if( psEncC->sLP.mode == 0 ) {
@@ -68,7 +68,7 @@
                     psEncC->sLP.transition_frame_no = TRANSITION_FRAMES;
 
                     /* Reset transition filter state */
-                    SKP_memset( psEncC->sLP.In_LP_State, 0, sizeof( psEncC->sLP.In_LP_State ) );
+                    silk_memset( psEncC->sLP.In_LP_State, 0, sizeof( psEncC->sLP.In_LP_State ) );
                 }
                 if( psEncC->sLP.transition_frame_no <= 0 ) {
                     /* Stop transition phase */
@@ -83,7 +83,7 @@
             }
             else
             /* Check if we should switch up */
-            if( SKP_SMULBB( psEncC->fs_kHz, 1000 ) < psEncC->desiredInternal_fs_Hz )
+            if( silk_SMULBB( psEncC->fs_kHz, 1000 ) < psEncC->desiredInternal_fs_Hz )
             {
                 /* Switch up */
                 if( psEncC->sLP.mode == 0 ) {
@@ -94,7 +94,7 @@
                     psEncC->sLP.transition_frame_no = 0;
 
                     /* Reset transition filter state */
-                    SKP_memset( psEncC->sLP.In_LP_State, 0, sizeof( psEncC->sLP.In_LP_State ) );
+                    silk_memset( psEncC->sLP.In_LP_State, 0, sizeof( psEncC->sLP.In_LP_State ) );
                 }
                 /* Direction: up */
                 psEncC->sLP.mode = 1;
diff --git a/silk/silk_control_codec.c b/silk/silk_control_codec.c
index 34bad44..0e80724 100644
--- a/silk/silk_control_codec.c
+++ b/silk/silk_control_codec.c
@@ -151,32 +151,32 @@
             opus_int16 x_bufFIX[ 2 * MAX_FRAME_LENGTH + LA_SHAPE_MAX ];
 #endif
 
-            nSamples_temp = SKP_LSHIFT( psEnc->sCmn.frame_length, 1 ) + LA_SHAPE_MS * psEnc->sCmn.fs_kHz;
+            nSamples_temp = silk_LSHIFT( psEnc->sCmn.frame_length, 1 ) + LA_SHAPE_MS * psEnc->sCmn.fs_kHz;
 
 #ifndef FIXED_POINT
-            SKP_float2short_array( x_bufFIX, psEnc->x_buf, nSamples_temp );
+            silk_float2short_array( x_bufFIX, psEnc->x_buf, nSamples_temp );
 #endif
 
-            if( SKP_SMULBB( fs_kHz, 1000 ) < psEnc->sCmn.API_fs_Hz && psEnc->sCmn.fs_kHz != 0 ) {
+            if( silk_SMULBB( fs_kHz, 1000 ) < psEnc->sCmn.API_fs_Hz && psEnc->sCmn.fs_kHz != 0 ) {
                 /* Resample buffered data in x_buf to API_fs_Hz */
 
                 silk_resampler_state_struct  temp_resampler_state;
 
                 /* Initialize resampler for temporary resampling of x_buf data to API_fs_Hz */
-                ret += silk_resampler_init( &temp_resampler_state, SKP_SMULBB( psEnc->sCmn.fs_kHz, 1000 ), psEnc->sCmn.API_fs_Hz );
+                ret += silk_resampler_init( &temp_resampler_state, silk_SMULBB( psEnc->sCmn.fs_kHz, 1000 ), psEnc->sCmn.API_fs_Hz );
 
                 /* Temporary resampling of x_buf data to API_fs_Hz */
                 ret += silk_resampler( &temp_resampler_state, x_buf_API_fs_Hz, x_bufFIX, nSamples_temp );
 
                 /* Calculate number of samples that has been temporarily upsampled */
-                nSamples_temp = SKP_DIV32_16( nSamples_temp * psEnc->sCmn.API_fs_Hz, SKP_SMULBB( psEnc->sCmn.fs_kHz, 1000 ) );
+                nSamples_temp = silk_DIV32_16( nSamples_temp * psEnc->sCmn.API_fs_Hz, silk_SMULBB( psEnc->sCmn.fs_kHz, 1000 ) );
 
                 /* Initialize the resampler for enc_API.c preparing resampling from API_fs_Hz to fs_kHz */
-                ret += silk_resampler_init( &psEnc->sCmn.resampler_state, psEnc->sCmn.API_fs_Hz, SKP_SMULBB( fs_kHz, 1000 ) );
+                ret += silk_resampler_init( &psEnc->sCmn.resampler_state, psEnc->sCmn.API_fs_Hz, silk_SMULBB( fs_kHz, 1000 ) );
 
             } else {
                 /* Copy data */
-                SKP_memcpy( x_buf_API_fs_Hz, x_bufFIX, nSamples_temp * sizeof( opus_int16 ) );
+                silk_memcpy( x_buf_API_fs_Hz, x_bufFIX, nSamples_temp * sizeof( opus_int16 ) );
             }
 
             if( 1000 * fs_kHz != psEnc->sCmn.API_fs_Hz ) {
@@ -184,7 +184,7 @@
                 ret += silk_resampler( &psEnc->sCmn.resampler_state, x_bufFIX, x_buf_API_fs_Hz, nSamples_temp );
             }
 #ifndef FIXED_POINT
-            SKP_short2float_array( psEnc->x_buf, x_bufFIX, ( 2 * MAX_FRAME_LENGTH_MS + LA_SHAPE_MS ) * fs_kHz );
+            silk_short2float_array( psEnc->x_buf, x_bufFIX, ( 2 * MAX_FRAME_LENGTH_MS + LA_SHAPE_MS ) * fs_kHz );
 #endif
         }
     }
@@ -213,18 +213,18 @@
         if( PacketSize_ms <= 10 ) {
             psEnc->sCmn.nFramesPerPacket = 1;
             psEnc->sCmn.nb_subfr = PacketSize_ms == 10 ? 2 : 1;
-            psEnc->sCmn.frame_length = SKP_SMULBB( PacketSize_ms, fs_kHz );
-            psEnc->sCmn.pitch_LPC_win_length = SKP_SMULBB( FIND_PITCH_LPC_WIN_MS_2_SF, fs_kHz );
+            psEnc->sCmn.frame_length = silk_SMULBB( PacketSize_ms, fs_kHz );
+            psEnc->sCmn.pitch_LPC_win_length = silk_SMULBB( FIND_PITCH_LPC_WIN_MS_2_SF, fs_kHz );
             if( psEnc->sCmn.fs_kHz == 8 ) {
                 psEnc->sCmn.pitch_contour_iCDF = silk_pitch_contour_10_ms_NB_iCDF;
             } else {
                 psEnc->sCmn.pitch_contour_iCDF = silk_pitch_contour_10_ms_iCDF;
             }
         } else {
-            psEnc->sCmn.nFramesPerPacket = SKP_DIV32_16( PacketSize_ms, MAX_FRAME_LENGTH_MS );
+            psEnc->sCmn.nFramesPerPacket = silk_DIV32_16( PacketSize_ms, MAX_FRAME_LENGTH_MS );
             psEnc->sCmn.nb_subfr = MAX_NB_SUBFR;
-            psEnc->sCmn.frame_length = SKP_SMULBB( 20, fs_kHz );
-            psEnc->sCmn.pitch_LPC_win_length = SKP_SMULBB( FIND_PITCH_LPC_WIN_MS, fs_kHz );
+            psEnc->sCmn.frame_length = silk_SMULBB( 20, fs_kHz );
+            psEnc->sCmn.pitch_LPC_win_length = silk_SMULBB( FIND_PITCH_LPC_WIN_MS, fs_kHz );
             if( psEnc->sCmn.fs_kHz == 8 ) {
                 psEnc->sCmn.pitch_contour_iCDF = silk_pitch_contour_NB_iCDF;
             } else {
@@ -236,20 +236,20 @@
     }
 
     /* Set internal sampling frequency */
-    SKP_assert( fs_kHz == 8 || fs_kHz == 12 || fs_kHz == 16 );
-    SKP_assert( psEnc->sCmn.nb_subfr == 2 || psEnc->sCmn.nb_subfr == 4 );
+    silk_assert( fs_kHz == 8 || fs_kHz == 12 || fs_kHz == 16 );
+    silk_assert( psEnc->sCmn.nb_subfr == 2 || psEnc->sCmn.nb_subfr == 4 );
     if( psEnc->sCmn.fs_kHz != fs_kHz ) {
         /* reset part of the state */
 #ifdef FIXED_POINT
-        SKP_memset( &psEnc->sShape,               0, sizeof( silk_shape_state_FIX ) );
-        SKP_memset( &psEnc->sPrefilt,             0, sizeof( silk_prefilter_state_FIX ) );
+        silk_memset( &psEnc->sShape,               0, sizeof( silk_shape_state_FIX ) );
+        silk_memset( &psEnc->sPrefilt,             0, sizeof( silk_prefilter_state_FIX ) );
 #else
-        SKP_memset( &psEnc->sShape,               0, sizeof( silk_shape_state_FLP ) );
-        SKP_memset( &psEnc->sPrefilt,             0, sizeof( silk_prefilter_state_FLP ) );
+        silk_memset( &psEnc->sShape,               0, sizeof( silk_shape_state_FLP ) );
+        silk_memset( &psEnc->sPrefilt,             0, sizeof( silk_prefilter_state_FLP ) );
 #endif
-        SKP_memset( &psEnc->sCmn.sNSQ,            0, sizeof( silk_nsq_state ) );
-        SKP_memset( psEnc->sCmn.prev_NLSFq_Q15,   0, sizeof( psEnc->sCmn.prev_NLSFq_Q15 ) );
-        SKP_memset( &psEnc->sCmn.sLP.In_LP_State, 0, sizeof( psEnc->sCmn.sLP.In_LP_State ) );
+        silk_memset( &psEnc->sCmn.sNSQ,            0, sizeof( silk_nsq_state ) );
+        silk_memset( psEnc->sCmn.prev_NLSFq_Q15,   0, sizeof( psEnc->sCmn.prev_NLSFq_Q15 ) );
+        silk_memset( &psEnc->sCmn.sLP.In_LP_State, 0, sizeof( psEnc->sCmn.sLP.In_LP_State ) );
         psEnc->sCmn.inputBufIx                  = 0;
         psEnc->sCmn.nFramesEncoded              = 0;
         psEnc->sCmn.TargetRate_bps              = 0;     /* trigger new SNR computation */
@@ -284,14 +284,14 @@
             psEnc->sCmn.psNLSF_CB  = &silk_NLSF_CB_WB;
         }
         psEnc->sCmn.subfr_length   = SUB_FRAME_LENGTH_MS * fs_kHz;
-        psEnc->sCmn.frame_length   = SKP_SMULBB( psEnc->sCmn.subfr_length, psEnc->sCmn.nb_subfr );
-        psEnc->sCmn.ltp_mem_length = SKP_SMULBB( LTP_MEM_LENGTH_MS, fs_kHz );
-        psEnc->sCmn.la_pitch       = SKP_SMULBB( LA_PITCH_MS, fs_kHz );
-        psEnc->sCmn.max_pitch_lag  = SKP_SMULBB( 18, fs_kHz );
+        psEnc->sCmn.frame_length   = silk_SMULBB( psEnc->sCmn.subfr_length, psEnc->sCmn.nb_subfr );
+        psEnc->sCmn.ltp_mem_length = silk_SMULBB( LTP_MEM_LENGTH_MS, fs_kHz );
+        psEnc->sCmn.la_pitch       = silk_SMULBB( LA_PITCH_MS, fs_kHz );
+        psEnc->sCmn.max_pitch_lag  = silk_SMULBB( 18, fs_kHz );
         if( psEnc->sCmn.nb_subfr == MAX_NB_SUBFR ) {
-            psEnc->sCmn.pitch_LPC_win_length = SKP_SMULBB( FIND_PITCH_LPC_WIN_MS, fs_kHz );
+            psEnc->sCmn.pitch_LPC_win_length = silk_SMULBB( FIND_PITCH_LPC_WIN_MS, fs_kHz );
         } else {
-            psEnc->sCmn.pitch_LPC_win_length = SKP_SMULBB( FIND_PITCH_LPC_WIN_MS_2_SF, fs_kHz );
+            psEnc->sCmn.pitch_LPC_win_length = silk_SMULBB( FIND_PITCH_LPC_WIN_MS_2_SF, fs_kHz );
         }
         if( psEnc->sCmn.fs_kHz == 16 ) {
             psEnc->sCmn.mu_LTP_Q9 = SILK_FIX_CONST( MU_LTP_QUANT_WB, 9 );
@@ -306,7 +306,7 @@
     }
 
     /* Check that settings are valid */
-    SKP_assert( ( psEnc->sCmn.subfr_length * psEnc->sCmn.nb_subfr ) == psEnc->sCmn.frame_length );
+    silk_assert( ( psEnc->sCmn.subfr_length * psEnc->sCmn.nb_subfr ) == psEnc->sCmn.frame_length );
 
     return ret;
 }
@@ -319,7 +319,7 @@
     opus_int ret = 0;
 
     /* Set encoding complexity */
-    SKP_assert( Complexity >= 0 && Complexity <= 10 );
+    silk_assert( Complexity >= 0 && Complexity <= 10 );
     if( Complexity < 2 ) {
         psEncC->pitchEstimationComplexity       = SILK_PE_MIN_COMPLEX;
         psEncC->pitchEstimationThreshold_Q16    = SILK_FIX_CONST( 0.8, 16 );
@@ -378,17 +378,17 @@
     }
 
     /* Do not allow higher pitch estimation LPC order than predict LPC order */
-    psEncC->pitchEstimationLPCOrder = SKP_min_int( psEncC->pitchEstimationLPCOrder, psEncC->predictLPCOrder );
+    psEncC->pitchEstimationLPCOrder = silk_min_int( psEncC->pitchEstimationLPCOrder, psEncC->predictLPCOrder );
     psEncC->shapeWinLength          = SUB_FRAME_LENGTH_MS * psEncC->fs_kHz + 2 * psEncC->la_shape;
     psEncC->Complexity              = Complexity;
 
-    SKP_assert( psEncC->pitchEstimationLPCOrder <= MAX_FIND_PITCH_LPC_ORDER );
-    SKP_assert( psEncC->shapingLPCOrder         <= MAX_SHAPE_LPC_ORDER      );
-    SKP_assert( psEncC->nStatesDelayedDecision  <= MAX_DEL_DEC_STATES       );
-    SKP_assert( psEncC->warping_Q16             <= 32767                    );
-    SKP_assert( psEncC->la_shape                <= LA_SHAPE_MAX             );
-    SKP_assert( psEncC->shapeWinLength          <= SHAPE_LPC_WIN_MAX        );
-    SKP_assert( psEncC->NLSF_MSVQ_Survivors     <= NLSF_VQ_MAX_SURVIVORS    );
+    silk_assert( psEncC->pitchEstimationLPCOrder <= MAX_FIND_PITCH_LPC_ORDER );
+    silk_assert( psEncC->shapingLPCOrder         <= MAX_SHAPE_LPC_ORDER      );
+    silk_assert( psEncC->nStatesDelayedDecision  <= MAX_DEL_DEC_STATES       );
+    silk_assert( psEncC->warping_Q16             <= 32767                    );
+    silk_assert( psEncC->la_shape                <= LA_SHAPE_MAX             );
+    silk_assert( psEncC->shapeWinLength          <= SHAPE_LPC_WIN_MAX        );
+    silk_assert( psEncC->NLSF_MSVQ_Survivors     <= NLSF_VQ_MAX_SURVIVORS    );
 
     return ret;
 }
@@ -410,12 +410,12 @@
         } else {
             LBRR_rate_thres_bps = LBRR_WB_MIN_RATE_BPS;
         }
-        LBRR_rate_thres_bps = SKP_SMULWB( SKP_MUL( LBRR_rate_thres_bps, 125 - SKP_min( psEncC->PacketLoss_perc, 25 ) ), SILK_FIX_CONST( 0.01, 16 ) );
+        LBRR_rate_thres_bps = silk_SMULWB( silk_MUL( LBRR_rate_thres_bps, 125 - silk_min( psEncC->PacketLoss_perc, 25 ) ), SILK_FIX_CONST( 0.01, 16 ) );
 
         if( TargetRate_bps > LBRR_rate_thres_bps ) {
             /* Set gain increase for coding LBRR excitation */
             psEncC->LBRR_enabled = 1;
-            psEncC->LBRR_GainIncreases = SKP_max_int( 7 - SKP_SMULWB( psEncC->PacketLoss_perc, SILK_FIX_CONST( 0.4, 16 ) ), 2 );
+            psEncC->LBRR_GainIncreases = silk_max_int( 7 - silk_SMULWB( psEncC->PacketLoss_perc, SILK_FIX_CONST( 0.4, 16 ) ), 2 );
         }
     }
 
diff --git a/silk/silk_create_init_destroy.c b/silk/silk_create_init_destroy.c
index 261fa45..236a745 100644
--- a/silk/silk_create_init_destroy.c
+++ b/silk/silk_create_init_destroy.c
@@ -40,7 +40,7 @@
 )
 {
     /* Clear the entire encoder state, except anything copied */
-    SKP_memset( psDec, 0, sizeof( silk_decoder_state ) );
+    silk_memset( psDec, 0, sizeof( silk_decoder_state ) );
 
     /* Used to deactivate e.g. LSF interpolation and fluctuation reduction */
     psDec->first_frame_after_reset = 1;
diff --git a/silk/silk_debug.c b/silk/silk_debug.c
index b2f2da9..ecd2d95 100644
--- a/silk/silk_debug.c
+++ b/silk/silk_debug.c
@@ -64,17 +64,17 @@
 
 int           silk_Timer_nTimers = 0;
 int           silk_Timer_depth_ctr = 0;
-char          silk_Timer_tags[SKP_NUM_TIMERS_MAX][SKP_NUM_TIMERS_MAX_TAG_LEN];
+char          silk_Timer_tags[silk_NUM_TIMERS_MAX][silk_NUM_TIMERS_MAX_TAG_LEN];
 #ifdef WIN32
-LARGE_INTEGER silk_Timer_start[SKP_NUM_TIMERS_MAX];
+LARGE_INTEGER silk_Timer_start[silk_NUM_TIMERS_MAX];
 #else
-unsigned long silk_Timer_start[SKP_NUM_TIMERS_MAX];
+unsigned long silk_Timer_start[silk_NUM_TIMERS_MAX];
 #endif
-unsigned int  silk_Timer_cnt[SKP_NUM_TIMERS_MAX];
-opus_int64     silk_Timer_min[SKP_NUM_TIMERS_MAX];
-opus_int64     silk_Timer_sum[SKP_NUM_TIMERS_MAX];
-opus_int64     silk_Timer_max[SKP_NUM_TIMERS_MAX];
-opus_int64     silk_Timer_depth[SKP_NUM_TIMERS_MAX];
+unsigned int  silk_Timer_cnt[silk_NUM_TIMERS_MAX];
+opus_int64     silk_Timer_min[silk_NUM_TIMERS_MAX];
+opus_int64     silk_Timer_sum[silk_NUM_TIMERS_MAX];
+opus_int64     silk_Timer_max[silk_NUM_TIMERS_MAX];
+opus_int64     silk_Timer_depth[silk_NUM_TIMERS_MAX];
 
 #ifdef WIN32
 void silk_TimerSave(char *file_name)
@@ -118,9 +118,9 @@
                 fprintf(fp, "    %-24s", silk_Timer_tags[k]);
             }
             avg = (1e6 * silk_Timer_sum[k] / silk_Timer_cnt[k] - del) / lpFrequency.QuadPart;
-            fprintf(fp, "%8.2f", (1e6 * (SKP_max_64(silk_Timer_min[k] - del, 0))) / lpFrequency.QuadPart);
+            fprintf(fp, "%8.2f", (1e6 * (silk_max_64(silk_Timer_min[k] - del, 0))) / lpFrequency.QuadPart);
             fprintf(fp, "%12.2f %6.2f", avg, 100.0 * avg / sum_avg * silk_Timer_cnt[k]);
-            fprintf(fp, "%12.2f", (1e6 * (SKP_max_64(silk_Timer_max[k] - del, 0))) / lpFrequency.QuadPart);
+            fprintf(fp, "%12.2f", (1e6 * (silk_max_64(silk_Timer_max[k] - del, 0))) / lpFrequency.QuadPart);
             fprintf(fp, "%10d\n", silk_Timer_cnt[k]);
         }
         fprintf(fp, "                                microseconds\n");
@@ -164,7 +164,7 @@
 #endif /* SILK_TIC_TOC */
 
 #if SILK_DEBUG
-FILE *silk_debug_store_fp[ SKP_NUM_STORES_MAX ];
+FILE *silk_debug_store_fp[ silk_NUM_STORES_MAX ];
 int silk_debug_store_count = 0;
 #endif /* SILK_DEBUG */
 
diff --git a/silk/silk_debug.h b/silk/silk_debug.h
index 835b55e..77d390d 100644
--- a/silk/silk_debug.h
+++ b/silk/silk_debug.h
@@ -84,30 +84,30 @@
 /*                                                                  */
 /* and call the following just before exiting (from main)           */
 /*                                                                  */
-/* silk_TimerSave("SKP_TimingData.txt");                             */
+/* silk_TimerSave("silk_TimingData.txt");                             */
 /*                                                                  */
-/* results are now in SKP_TimingData.txt                            */
+/* results are now in silk_TimingData.txt                            */
 
 void silk_TimerSave(char *file_name);
 
 /* max number of timers (in different locations) */
-#define SKP_NUM_TIMERS_MAX                  50
+#define silk_NUM_TIMERS_MAX                  50
 /* max length of name tags in TIC(..), TOC(..) */
-#define SKP_NUM_TIMERS_MAX_TAG_LEN          30
+#define silk_NUM_TIMERS_MAX_TAG_LEN          30
 
 extern int           silk_Timer_nTimers;
 extern int           silk_Timer_depth_ctr;
-extern char          silk_Timer_tags[SKP_NUM_TIMERS_MAX][SKP_NUM_TIMERS_MAX_TAG_LEN];
+extern char          silk_Timer_tags[silk_NUM_TIMERS_MAX][silk_NUM_TIMERS_MAX_TAG_LEN];
 #ifdef _WIN32
-extern LARGE_INTEGER silk_Timer_start[SKP_NUM_TIMERS_MAX];
+extern LARGE_INTEGER silk_Timer_start[silk_NUM_TIMERS_MAX];
 #else
-extern unsigned long silk_Timer_start[SKP_NUM_TIMERS_MAX];
+extern unsigned long silk_Timer_start[silk_NUM_TIMERS_MAX];
 #endif
-extern unsigned int  silk_Timer_cnt[SKP_NUM_TIMERS_MAX];
-extern opus_int64     silk_Timer_sum[SKP_NUM_TIMERS_MAX];
-extern opus_int64     silk_Timer_max[SKP_NUM_TIMERS_MAX];
-extern opus_int64     silk_Timer_min[SKP_NUM_TIMERS_MAX];
-extern opus_int64     silk_Timer_depth[SKP_NUM_TIMERS_MAX];
+extern unsigned int  silk_Timer_cnt[silk_NUM_TIMERS_MAX];
+extern opus_int64     silk_Timer_sum[silk_NUM_TIMERS_MAX];
+extern opus_int64     silk_Timer_max[silk_NUM_TIMERS_MAX];
+extern opus_int64     silk_Timer_min[silk_NUM_TIMERS_MAX];
+extern opus_int64     silk_Timer_depth[silk_NUM_TIMERS_MAX];
 
 /* WARNING: TIC()/TOC can measure only up to 0.1 seconds at a time */
 #ifdef _WIN32
@@ -267,8 +267,8 @@
 
 #else
 
-#define SKP_NUM_STORES_MAX                                  100
-extern FILE *silk_debug_store_fp[ SKP_NUM_STORES_MAX ];
+#define silk_NUM_STORES_MAX                                  100
+extern FILE *silk_debug_store_fp[ silk_NUM_STORES_MAX ];
 extern int silk_debug_store_count;
 
 /* Faster way of storing the data */
@@ -293,7 +293,7 @@
 #endif
 
 /* micro sec */
-#define SKP_GETTIME(void)       time = (opus_int64) silk_GetHighResolutionTime();
+#define silk_GETTIME(void)       time = (opus_int64) silk_GetHighResolutionTime();
 
 #else /* SILK_DEBUG */
 
diff --git a/silk/silk_dec_API.c b/silk/silk_dec_API.c
index 18a0024..9ae47d1 100644
--- a/silk/silk_dec_API.c
+++ b/silk/silk_dec_API.c
@@ -105,7 +105,7 @@
     if( decControl->nChannelsInternal > psDec->nChannelsInternal ) {
         ret += silk_init_decoder( &channel_state[ 1 ] );
         if( psDec->nChannelsAPI == 2 ) {
-            SKP_memcpy( &channel_state[ 1 ].resampler_state, &channel_state[ 0 ].resampler_state, sizeof( silk_resampler_state_struct ) );
+            silk_memcpy( &channel_state[ 1 ].resampler_state, &channel_state[ 0 ].resampler_state, sizeof( silk_resampler_state_struct ) );
         }
     }
 
@@ -129,12 +129,12 @@
                 channel_state[ n ].nFramesPerPacket = 3;
                 channel_state[ n ].nb_subfr = 4;
             } else {
-                SKP_assert( 0 );
+                silk_assert( 0 );
                 return SILK_DEC_INVALID_FRAME_SIZE;
             }
             fs_kHz_dec = ( decControl->internalSampleRate >> 10 ) + 1;
             if( fs_kHz_dec != 8 && fs_kHz_dec != 12 && fs_kHz_dec != 16 ) {
-                SKP_assert( 0 );
+                silk_assert( 0 );
                 return SILK_DEC_INVALID_SAMPLING_FREQUENCY;
             }
             silk_decoder_set_fs( &channel_state[ n ], fs_kHz_dec );
@@ -143,15 +143,15 @@
 
     /* Initialize resampler when switching internal or external sampling frequency */
     if( prev_fs_kHz != channel_state[ 0 ].fs_kHz || channel_state[ 0 ].prev_API_sampleRate != decControl->API_sampleRate ) {
-        ret = silk_resampler_init( &channel_state[ 0 ].resampler_state, SKP_SMULBB( channel_state[ 0 ].fs_kHz, 1000 ), decControl->API_sampleRate );
+        ret = silk_resampler_init( &channel_state[ 0 ].resampler_state, silk_SMULBB( channel_state[ 0 ].fs_kHz, 1000 ), decControl->API_sampleRate );
         if( decControl->nChannelsAPI == 2 && decControl->nChannelsInternal == 2 ) {
-            SKP_memcpy( &channel_state[ 1 ].resampler_state, &channel_state[ 0 ].resampler_state, sizeof( silk_resampler_state_struct ) );
+            silk_memcpy( &channel_state[ 1 ].resampler_state, &channel_state[ 0 ].resampler_state, sizeof( silk_resampler_state_struct ) );
         }
     }
     channel_state[ 0 ].prev_API_sampleRate = decControl->API_sampleRate;
     if( decControl->nChannelsAPI == 2 && decControl->nChannelsInternal == 2 && ( psDec->nChannelsAPI == 1 || psDec->nChannelsInternal == 1 ) ) {
-        SKP_memset( psDec->sStereo.pred_prev_Q13, 0, sizeof( psDec->sStereo.pred_prev_Q13 ) );
-        SKP_memset( psDec->sStereo.sSide, 0, sizeof( psDec->sStereo.sSide ) );
+        silk_memset( psDec->sStereo.pred_prev_Q13, 0, sizeof( psDec->sStereo.pred_prev_Q13 ) );
+        silk_memset( psDec->sStereo.sSide, 0, sizeof( psDec->sStereo.sSide ) );
     }
     psDec->nChannelsAPI      = decControl->nChannelsAPI;
     psDec->nChannelsInternal = decControl->nChannelsInternal;
@@ -172,14 +172,14 @@
         }
         /* Decode LBRR flags */
         for( n = 0; n < decControl->nChannelsInternal; n++ ) {
-            SKP_memset( channel_state[ n ].LBRR_flags, 0, sizeof( channel_state[ n ].LBRR_flags ) );
+            silk_memset( channel_state[ n ].LBRR_flags, 0, sizeof( channel_state[ n ].LBRR_flags ) );
             if( channel_state[ n ].LBRR_flag ) {
                 if( channel_state[ n ].nFramesPerPacket == 1 ) {
                     channel_state[ n ].LBRR_flags[ 0 ] = 1;
                 } else {
                     LBRR_symbol = ec_dec_icdf( psRangeDec, silk_LBRR_flags_iCDF_ptr[ channel_state[ n ].nFramesPerPacket - 2 ], 8 ) + 1;
                     for( i = 0; i < channel_state[ n ].nFramesPerPacket; i++ ) {
-                        channel_state[ n ].LBRR_flags[ i ] = SKP_RSHIFT( LBRR_symbol, i ) & 1;
+                        channel_state[ n ].LBRR_flags[ i ] = silk_RSHIFT( LBRR_symbol, i ) & 1;
                     }
                 }
             }
@@ -232,7 +232,7 @@
         if( n == 0 || decode_only_middle == 0 ) {
             ret += silk_decode_frame( &channel_state[ n ], psRangeDec, &samplesOut1_tmp[ n ][ 2 ], &nSamplesOutDec, lostFlag );
         } else {
-            SKP_memset( &samplesOut1_tmp[ n ][ 2 ], 0, nSamplesOutDec * sizeof( opus_int16 ) );
+            silk_memset( &samplesOut1_tmp[ n ][ 2 ], 0, nSamplesOutDec * sizeof( opus_int16 ) );
         }
     }
 
@@ -241,12 +241,12 @@
         silk_stereo_MS_to_LR( &psDec->sStereo, samplesOut1_tmp[ 0 ], samplesOut1_tmp[ 1 ], MS_pred_Q13, channel_state[ 0 ].fs_kHz, nSamplesOutDec );
     } else {
         /* Buffering */
-        SKP_memcpy( samplesOut1_tmp[ 0 ], psDec->sStereo.sMid, 2 * sizeof( opus_int16 ) );
-        SKP_memcpy( psDec->sStereo.sMid, &samplesOut1_tmp[ 0 ][ nSamplesOutDec ], 2 * sizeof( opus_int16 ) );
+        silk_memcpy( samplesOut1_tmp[ 0 ], psDec->sStereo.sMid, 2 * sizeof( opus_int16 ) );
+        silk_memcpy( psDec->sStereo.sMid, &samplesOut1_tmp[ 0 ][ nSamplesOutDec ], 2 * sizeof( opus_int16 ) );
     }
 
     /* Number of output samples */
-    *nSamplesOut = SKP_DIV32( nSamplesOutDec * decControl->API_sampleRate, SKP_SMULBB( channel_state[ 0 ].fs_kHz, 1000 ) );
+    *nSamplesOut = silk_DIV32( nSamplesOutDec * decControl->API_sampleRate, silk_SMULBB( channel_state[ 0 ].fs_kHz, 1000 ) );
 
     /* Set up pointers to temp buffers */
     if( decControl->nChannelsAPI == 2 ) {
@@ -255,7 +255,7 @@
         resample_out_ptr = samplesOut;
     }
 
-    for( n = 0; n < SKP_min( decControl->nChannelsAPI, decControl->nChannelsInternal ); n++ ) {
+    for( n = 0; n < silk_min( decControl->nChannelsAPI, decControl->nChannelsInternal ); n++ ) {
         /* Resample decoded signal to API_sampleRate */
         ret += silk_resampler( &channel_state[ n ].resampler_state, resample_out_ptr, &samplesOut1_tmp[ n ][ 1 ], nSamplesOutDec );
 
@@ -294,14 +294,14 @@
         return -1;
     }
 
-    SKP_memset( Silk_TOC, 0, sizeof( Silk_TOC ) );
+    silk_memset( Silk_TOC, 0, sizeof( Silk_TOC ) );
 
     /* For stereo, extract the flags for the mid channel */
-    flags = SKP_RSHIFT( payload[ 0 ], 7 - nFramesPerPayload ) & ( SKP_LSHIFT( 1, nFramesPerPayload + 1 ) - 1 );
+    flags = silk_RSHIFT( payload[ 0 ], 7 - nFramesPerPayload ) & ( silk_LSHIFT( 1, nFramesPerPayload + 1 ) - 1 );
 
     Silk_TOC->inbandFECFlag = flags & 1;
     for( i = nFramesPerPayload - 1; i >= 0 ; i-- ) {
-        flags = SKP_RSHIFT( flags, 1 );
+        flags = silk_RSHIFT( flags, 1 );
         Silk_TOC->VADFlags[ i ] = flags & 1;
         Silk_TOC->VADFlag |= flags & 1;
     }
diff --git a/silk/silk_decode_core.c b/silk/silk_decode_core.c
index 550a86c..a6554c8 100644
--- a/silk/silk_decode_core.c
+++ b/silk/silk_decode_core.c
@@ -49,7 +49,7 @@
     opus_int32 res_Q10[ MAX_SUB_FRAME_LENGTH ];
     opus_int32 vec_Q10[ MAX_SUB_FRAME_LENGTH ];
 
-    SKP_assert( psDec->prev_inv_gain_Q16 != 0 );
+    silk_assert( psDec->prev_inv_gain_Q16 != 0 );
 
     offset_Q10 = silk_Quantization_Offsets_Q10[ psDec->indices.signalType >> 1 ][ psDec->indices.quantOffsetType ];
 
@@ -62,8 +62,8 @@
     /* Decode excitation */
     rand_seed = psDec->indices.Seed;
     for( i = 0; i < psDec->frame_length; i++ ) {
-        rand_seed = SKP_RAND( rand_seed );
-        psDec->exc_Q10[ i ] = SKP_LSHIFT( ( opus_int32 )pulses[ i ], 10 );
+        rand_seed = silk_RAND( rand_seed );
+        psDec->exc_Q10[ i ] = silk_LSHIFT( ( opus_int32 )pulses[ i ], 10 );
         if( psDec->exc_Q10[ i ] > 0 ) {
             psDec->exc_Q10[ i ] -= QUANT_LEVEL_ADJUST_Q10;
         } else
@@ -71,9 +71,9 @@
             psDec->exc_Q10[ i ] += QUANT_LEVEL_ADJUST_Q10;
         }
         psDec->exc_Q10[ i ] += offset_Q10;
-        psDec->exc_Q10[ i ] ^= SKP_RSHIFT( rand_seed, 31 );
+        psDec->exc_Q10[ i ] ^= silk_RSHIFT( rand_seed, 31 );
 
-        rand_seed = SKP_ADD32_ovflw(rand_seed, pulses[ i ]);
+        rand_seed = silk_ADD32_ovflw(rand_seed, pulses[ i ]);
     }
 
 #ifdef SAVE_ALL_INTERNAL_DATA
@@ -89,13 +89,13 @@
         A_Q12 = psDecCtrl->PredCoef_Q12[ k >> 1 ];
 
         /* Preload LPC coeficients to array on stack. Gives small performance gain */
-        SKP_memcpy( A_Q12_tmp, A_Q12, psDec->LPC_order * sizeof( opus_int16 ) );
+        silk_memcpy( A_Q12_tmp, A_Q12, psDec->LPC_order * sizeof( opus_int16 ) );
         B_Q14        = &psDecCtrl->LTPCoef_Q14[ k * LTP_ORDER ];
         Gain_Q16     = psDecCtrl->Gains_Q16[ k ];
         signalType   = psDec->indices.signalType;
 
-        inv_gain_Q16 = silk_INVERSE32_varQ( SKP_max( Gain_Q16, 1 ), 32 );
-        inv_gain_Q16 = SKP_min( inv_gain_Q16, SKP_int16_MAX );
+        inv_gain_Q16 = silk_INVERSE32_varQ( silk_max( Gain_Q16, 1 ), 32 );
+        inv_gain_Q16 = silk_min( inv_gain_Q16, silk_int16_MAX );
 
         /* Calculate Gain adjustment factor */
         gain_adj_Q16 = 1 << 16;
@@ -104,19 +104,19 @@
 
             /* Scale short term state */
             for( i = 0; i < MAX_LPC_ORDER; i++ ) {
-                psDec->sLPC_Q14[ i ] = SKP_SMULWW( gain_adj_Q16, psDec->sLPC_Q14[ i ] );
+                psDec->sLPC_Q14[ i ] = silk_SMULWW( gain_adj_Q16, psDec->sLPC_Q14[ i ] );
             }
         }
 
         /* Save inv_gain */
-        SKP_assert( inv_gain_Q16 != 0 );
+        silk_assert( inv_gain_Q16 != 0 );
         psDec->prev_inv_gain_Q16 = inv_gain_Q16;
 
         /* Avoid abrupt transition from voiced PLC to unvoiced normal decoding */
         if( psDec->lossCnt && psDec->prevSignalType == TYPE_VOICED &&
             psDec->indices.signalType != TYPE_VOICED && k < MAX_NB_SUBFR/2 ) {
 
-            SKP_memset( B_Q14, 0, LTP_ORDER * sizeof( opus_int16 ) );
+            silk_memset( B_Q14, 0, LTP_ORDER * sizeof( opus_int16 ) );
             B_Q14[ LTP_ORDER/2 ] = SILK_FIX_CONST( 0.25, 14 );
 
             signalType = TYPE_VOICED;
@@ -128,28 +128,28 @@
             lag = psDecCtrl->pitchL[ k ];
 
             /* Re-whitening */
-            if( ( k & ( 3 - SKP_LSHIFT( NLSF_interpolation_flag, 1 ) ) ) == 0 ) {
+            if( ( k & ( 3 - silk_LSHIFT( NLSF_interpolation_flag, 1 ) ) ) == 0 ) {
                 /* Rewhiten with new A coefs */
                 start_idx = psDec->ltp_mem_length - lag - psDec->LPC_order - LTP_ORDER / 2;
-                SKP_assert( start_idx > 0 );
+                silk_assert( start_idx > 0 );
 
                 silk_LPC_analysis_filter( &sLTP[ start_idx ], &psDec->outBuf[ start_idx + k * psDec->subfr_length ],
                     A_Q12, psDec->ltp_mem_length - start_idx, psDec->LPC_order );
 
                 /* After rewhitening the LTP state is unscaled */
-                inv_gain_Q32 = SKP_LSHIFT( inv_gain_Q16, 16 );
+                inv_gain_Q32 = silk_LSHIFT( inv_gain_Q16, 16 );
                 if( k == 0 ) {
                     /* Do LTP downscaling */
-                    inv_gain_Q32 = SKP_LSHIFT( SKP_SMULWB( inv_gain_Q32, psDecCtrl->LTP_scale_Q14 ), 2 );
+                    inv_gain_Q32 = silk_LSHIFT( silk_SMULWB( inv_gain_Q32, psDecCtrl->LTP_scale_Q14 ), 2 );
                 }
                 for( i = 0; i < lag + LTP_ORDER/2; i++ ) {
-                    psDec->sLTP_Q16[ sLTP_buf_idx - i - 1 ] = SKP_SMULWB( inv_gain_Q32, sLTP[ psDec->ltp_mem_length - i - 1 ] );
+                    psDec->sLTP_Q16[ sLTP_buf_idx - i - 1 ] = silk_SMULWB( inv_gain_Q32, sLTP[ psDec->ltp_mem_length - i - 1 ] );
                 }
             } else {
                 /* Update LTP state when Gain changes */
                 if( gain_adj_Q16 != 1 << 16 ) {
                     for( i = 0; i < lag + LTP_ORDER/2; i++ ) {
-                        psDec->sLTP_Q16[ sLTP_buf_idx - i - 1 ] = SKP_SMULWW( gain_adj_Q16, psDec->sLTP_Q16[ sLTP_buf_idx - i - 1 ] );
+                        psDec->sLTP_Q16[ sLTP_buf_idx - i - 1 ] = silk_SMULWW( gain_adj_Q16, psDec->sLTP_Q16[ sLTP_buf_idx - i - 1 ] );
                     }
                 }
             }
@@ -161,18 +161,18 @@
             pred_lag_ptr = &psDec->sLTP_Q16[ sLTP_buf_idx - lag + LTP_ORDER / 2 ];
             for( i = 0; i < psDec->subfr_length; i++ ) {
                 /* Unrolled loop */
-                LTP_pred_Q14 = SKP_SMULWB(               pred_lag_ptr[  0 ], B_Q14[ 0 ] );
-                LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -1 ], B_Q14[ 1 ] );
-                LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -2 ], B_Q14[ 2 ] );
-                LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -3 ], B_Q14[ 3 ] );
-                LTP_pred_Q14 = SKP_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -4 ], B_Q14[ 4 ] );
+                LTP_pred_Q14 = silk_SMULWB(               pred_lag_ptr[  0 ], B_Q14[ 0 ] );
+                LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -1 ], B_Q14[ 1 ] );
+                LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -2 ], B_Q14[ 2 ] );
+                LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -3 ], B_Q14[ 3 ] );
+                LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -4 ], B_Q14[ 4 ] );
                 pred_lag_ptr++;
 
                 /* Generate LPC excitation */
-                pres_Q10[ i ] = SKP_ADD32( pexc_Q10[ i ], SKP_RSHIFT_ROUND( LTP_pred_Q14, 4 ) );
+                pres_Q10[ i ] = silk_ADD32( pexc_Q10[ i ], silk_RSHIFT_ROUND( LTP_pred_Q14, 4 ) );
 
                 /* Update states */
-                psDec->sLTP_Q16[ sLTP_buf_idx ] = SKP_LSHIFT( pres_Q10[ i ], 6 );
+                psDec->sLTP_Q16[ sLTP_buf_idx ] = silk_LSHIFT( pres_Q10[ i ], 6 );
                 sLTP_buf_idx++;
             }
         } else {
@@ -186,40 +186,40 @@
 
         for( i = 0; i < psDec->subfr_length; i++ ) {
             /* Partially unrolled */
-            LPC_pred_Q10 = SKP_SMULWB(               psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  1 ], A_Q12_tmp[ 0 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  2 ], A_Q12_tmp[ 1 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  3 ], A_Q12_tmp[ 2 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  4 ], A_Q12_tmp[ 3 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  5 ], A_Q12_tmp[ 4 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  6 ], A_Q12_tmp[ 5 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  7 ], A_Q12_tmp[ 6 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  8 ], A_Q12_tmp[ 7 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  9 ], A_Q12_tmp[ 8 ] );
-            LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 10 ], A_Q12_tmp[ 9 ] );
+            LPC_pred_Q10 = silk_SMULWB(               psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  1 ], A_Q12_tmp[ 0 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  2 ], A_Q12_tmp[ 1 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  3 ], A_Q12_tmp[ 2 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  4 ], A_Q12_tmp[ 3 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  5 ], A_Q12_tmp[ 4 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  6 ], A_Q12_tmp[ 5 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  7 ], A_Q12_tmp[ 6 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  8 ], A_Q12_tmp[ 7 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i -  9 ], A_Q12_tmp[ 8 ] );
+            LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - 10 ], A_Q12_tmp[ 9 ] );
             for( j = 10; j < psDec->LPC_order; j++ ) {
-                LPC_pred_Q10 = SKP_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - j - 1 ], A_Q12_tmp[ j ] );
+                LPC_pred_Q10 = silk_SMLAWB( LPC_pred_Q10, psDec->sLPC_Q14[ MAX_LPC_ORDER + i - j - 1 ], A_Q12_tmp[ j ] );
             }
 
             /* Add prediction to LPC excitation */
-            vec_Q10[ i ] = SKP_ADD32( pres_Q10[ i ], LPC_pred_Q10 );
+            vec_Q10[ i ] = silk_ADD32( pres_Q10[ i ], LPC_pred_Q10 );
 
             /* Update states */
-            psDec->sLPC_Q14[ MAX_LPC_ORDER + i ] = SKP_LSHIFT( vec_Q10[ i ], 4 );
+            psDec->sLPC_Q14[ MAX_LPC_ORDER + i ] = silk_LSHIFT( vec_Q10[ i ], 4 );
         }
 
         /* Scale with Gain */
         for( i = 0; i < psDec->subfr_length; i++ ) {
-            pxq[ i ] = ( opus_int16 )SKP_SAT16( SKP_RSHIFT_ROUND( SKP_SMULWW( vec_Q10[ i ], Gain_Q16 ), 10 ) );
+            pxq[ i ] = ( opus_int16 )silk_SAT16( silk_RSHIFT_ROUND( silk_SMULWW( vec_Q10[ i ], Gain_Q16 ), 10 ) );
         }
 
         /* Update LPC filter state */
-        SKP_memcpy( psDec->sLPC_Q14, &psDec->sLPC_Q14[ psDec->subfr_length ], MAX_LPC_ORDER * sizeof( opus_int32 ) );
+        silk_memcpy( psDec->sLPC_Q14, &psDec->sLPC_Q14[ psDec->subfr_length ], MAX_LPC_ORDER * sizeof( opus_int32 ) );
         pexc_Q10 += psDec->subfr_length;
         pxq      += psDec->subfr_length;
     }
 
     /* Copy to output */
-    SKP_memcpy( xq, &psDec->outBuf[ psDec->ltp_mem_length ], psDec->frame_length * sizeof( opus_int16 ) );
+    silk_memcpy( xq, &psDec->outBuf[ psDec->ltp_mem_length ], psDec->frame_length * sizeof( opus_int16 ) );
 
 #ifdef SAVE_ALL_INTERNAL_DATA
     DEBUG_STORE_DATA( dec_sLTP_Q16.dat, &psDec->sLTP_Q16[ psDec->ltp_mem_length ], psDec->frame_length * sizeof( opus_int32 ));
diff --git a/silk/silk_decode_frame.c b/silk/silk_decode_frame.c
index 50ff38d..3e918cf 100644
--- a/silk/silk_decode_frame.c
+++ b/silk/silk_decode_frame.c
@@ -53,7 +53,7 @@
     sDecCtrl.LTP_scale_Q14 = 0;
 
     /* Safety checks */
-    SKP_assert( L > 0 && L <= MAX_FRAME_LENGTH );
+    silk_assert( L > 0 && L <= MAX_FRAME_LENGTH );
 
     if(   lostFlag == FLAG_DECODE_NORMAL ||
         ( lostFlag == FLAG_DECODE_LBRR && psDec->LBRR_flags[ psDec->nFramesDecoded ] == 1 ) )
@@ -97,7 +97,7 @@
 
         psDec->lossCnt = 0;
         psDec->prevSignalType = psDec->indices.signalType;
-        SKP_assert( psDec->prevSignalType >= 0 && psDec->prevSignalType <= 2 );
+        silk_assert( psDec->prevSignalType >= 0 && psDec->prevSignalType <= 2 );
 
         /* A frame has been decoded without errors */
         psDec->first_frame_after_reset = 0;
@@ -109,10 +109,10 @@
     /*************************/
     /* Update output buffer. */
     /*************************/
-    SKP_assert( psDec->ltp_mem_length >= psDec->frame_length );
+    silk_assert( psDec->ltp_mem_length >= psDec->frame_length );
     mv_len = psDec->ltp_mem_length - psDec->frame_length;
-    SKP_memmove( psDec->outBuf, &psDec->outBuf[ psDec->frame_length ], mv_len * sizeof(opus_int16) );
-    SKP_memcpy( &psDec->outBuf[ mv_len ], pOut, psDec->frame_length * sizeof( opus_int16 ) );
+    silk_memmove( psDec->outBuf, &psDec->outBuf[ psDec->frame_length ], mv_len * sizeof(opus_int16) );
+    silk_memcpy( &psDec->outBuf[ mv_len ], pOut, psDec->frame_length * sizeof( opus_int16 ) );
 
     /****************************************************************/
     /* Ensure smooth connection of extrapolated and good frames     */
diff --git a/silk/silk_decode_indices.c b/silk/silk_decode_indices.c
index 06dd4ce..99afd54 100644
--- a/silk/silk_decode_indices.c
+++ b/silk/silk_decode_indices.c
@@ -59,7 +59,7 @@
     } else {
         Ix = ec_dec_icdf( psRangeDec, silk_type_offset_no_VAD_iCDF, 8 );
     }
-    psDec->indices.signalType      = (opus_int8)SKP_RSHIFT( Ix, 1 );
+    psDec->indices.signalType      = (opus_int8)silk_RSHIFT( Ix, 1 );
     psDec->indices.quantOffsetType = (opus_int8)( Ix & 1 );
 
     /****************/
@@ -71,7 +71,7 @@
         psDec->indices.GainsIndices[ 0 ] = (opus_int8)ec_dec_icdf( psRangeDec, silk_delta_gain_iCDF, 8 );
     } else {
         /* Independent coding, in two stages: MSB bits followed by 3 LSBs */
-        psDec->indices.GainsIndices[ 0 ]  = (opus_int8)SKP_LSHIFT( ec_dec_icdf( psRangeDec, silk_gain_iCDF[ psDec->indices.signalType ], 8 ), 3 );
+        psDec->indices.GainsIndices[ 0 ]  = (opus_int8)silk_LSHIFT( ec_dec_icdf( psRangeDec, silk_gain_iCDF[ psDec->indices.signalType ], 8 ), 3 );
         psDec->indices.GainsIndices[ 0 ] += (opus_int8)ec_dec_icdf( psRangeDec, silk_uniform8_iCDF, 8 );
     }
 
@@ -85,7 +85,7 @@
     /**********************/
     psDec->indices.NLSFIndices[ 0 ] = (opus_int8)ec_dec_icdf( psRangeDec, &psDec->psNLSF_CB->CB1_iCDF[ ( psDec->indices.signalType >> 1 ) * psDec->psNLSF_CB->nVectors ], 8 );
     silk_NLSF_unpack( ec_ix, pred_Q8, psDec->psNLSF_CB, psDec->indices.NLSFIndices[ 0 ] );
-    SKP_assert( psDec->psNLSF_CB->order == psDec->LPC_order );
+    silk_assert( psDec->psNLSF_CB->order == psDec->LPC_order );
     for( i = 0; i < psDec->psNLSF_CB->order; i++ ) {
         Ix = ec_dec_icdf( psRangeDec, &psDec->psNLSF_CB->ec_iCDF[ ec_ix[ i ] ], 8 );
         if( Ix == 0 ) {
@@ -121,7 +121,7 @@
         }
         if( decode_absolute_lagIndex ) {
             /* Absolute decoding */
-            psDec->indices.lagIndex  = (opus_int16)ec_dec_icdf( psRangeDec, silk_pitch_lag_iCDF, 8 ) * SKP_RSHIFT( psDec->fs_kHz, 1 );
+            psDec->indices.lagIndex  = (opus_int16)ec_dec_icdf( psRangeDec, silk_pitch_lag_iCDF, 8 ) * silk_RSHIFT( psDec->fs_kHz, 1 );
             psDec->indices.lagIndex += (opus_int16)ec_dec_icdf( psRangeDec, psDec->pitch_lag_low_bits_iCDF, 8 );
         }
         psDec->ec_prevLagIndex = psDec->indices.lagIndex;
diff --git a/silk/silk_decode_parameters.c b/silk/silk_decode_parameters.c
index 9840439..a4b1fc0 100644
--- a/silk/silk_decode_parameters.c
+++ b/silk/silk_decode_parameters.c
@@ -63,7 +63,7 @@
         /* Calculation of the interpolated NLSF0 vector from the interpolation factor, */
         /* the previous NLSF1, and the current NLSF1                                   */
         for( i = 0; i < psDec->LPC_order; i++ ) {
-            pNLSF0_Q15[ i ] = psDec->prevNLSF_Q15[ i ] + SKP_RSHIFT( SKP_MUL( psDec->indices.NLSFInterpCoef_Q2,
+            pNLSF0_Q15[ i ] = psDec->prevNLSF_Q15[ i ] + silk_RSHIFT( silk_MUL( psDec->indices.NLSFInterpCoef_Q2,
                 pNLSF_Q15[ i ] - psDec->prevNLSF_Q15[ i ] ), 2 );
         }
 
@@ -71,11 +71,11 @@
         silk_NLSF2A( psDecCtrl->PredCoef_Q12[ 0 ], pNLSF0_Q15, psDec->LPC_order );
     } else {
         /* Copy LPC coefficients for first half from second half */
-        SKP_memcpy( psDecCtrl->PredCoef_Q12[ 0 ], psDecCtrl->PredCoef_Q12[ 1 ],
+        silk_memcpy( psDecCtrl->PredCoef_Q12[ 0 ], psDecCtrl->PredCoef_Q12[ 1 ],
             psDec->LPC_order * sizeof( opus_int16 ) );
     }
 
-    SKP_memcpy( psDec->prevNLSF_Q15, pNLSF_Q15, psDec->LPC_order * sizeof( opus_int16 ) );
+    silk_memcpy( psDec->prevNLSF_Q15, pNLSF_Q15, psDec->LPC_order * sizeof( opus_int16 ) );
 
     /* After a packet loss do BWE of LPC coefs */
     if( psDec->lossCnt ) {
@@ -97,7 +97,7 @@
         for( k = 0; k < psDec->nb_subfr; k++ ) {
             Ix = psDec->indices.LTPIndex[ k ];
             for( i = 0; i < LTP_ORDER; i++ ) {
-                psDecCtrl->LTPCoef_Q14[ k * LTP_ORDER + i ] = SKP_LSHIFT( cbk_ptr_Q7[ Ix * LTP_ORDER + i ], 7 );
+                psDecCtrl->LTPCoef_Q14[ k * LTP_ORDER + i ] = silk_LSHIFT( cbk_ptr_Q7[ Ix * LTP_ORDER + i ], 7 );
             }
         }
 
@@ -107,8 +107,8 @@
         Ix = psDec->indices.LTP_scaleIndex;
         psDecCtrl->LTP_scale_Q14 = silk_LTPScales_table_Q14[ Ix ];
     } else {
-        SKP_memset( psDecCtrl->pitchL,      0,             psDec->nb_subfr * sizeof( opus_int   ) );
-        SKP_memset( psDecCtrl->LTPCoef_Q14, 0, LTP_ORDER * psDec->nb_subfr * sizeof( opus_int16 ) );
+        silk_memset( psDecCtrl->pitchL,      0,             psDec->nb_subfr * sizeof( opus_int   ) );
+        silk_memset( psDecCtrl->LTPCoef_Q14, 0, LTP_ORDER * psDec->nb_subfr * sizeof( opus_int16 ) );
         psDec->indices.PERIndex  = 0;
         psDecCtrl->LTP_scale_Q14 = 0;
     }
diff --git a/silk/silk_decode_pitch.c b/silk/silk_decode_pitch.c
index f2ac5d2..00df918 100644
--- a/silk/silk_decode_pitch.c
+++ b/silk/silk_decode_pitch.c
@@ -51,7 +51,7 @@
             Lag_CB_ptr = &silk_CB_lags_stage2[ 0 ][ 0 ];
             cbk_size   = PE_NB_CBKS_STAGE2_EXT;
         } else {
-            SKP_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1 );
+            silk_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1 );
             Lag_CB_ptr = &silk_CB_lags_stage2_10_ms[ 0 ][ 0 ];
             cbk_size   = PE_NB_CBKS_STAGE2_10MS;
         }
@@ -60,18 +60,18 @@
             Lag_CB_ptr = &silk_CB_lags_stage3[ 0 ][ 0 ];
             cbk_size   = PE_NB_CBKS_STAGE3_MAX;
         } else {
-            SKP_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1 );
+            silk_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1 );
             Lag_CB_ptr = &silk_CB_lags_stage3_10_ms[ 0 ][ 0 ];
             cbk_size   = PE_NB_CBKS_STAGE3_10MS;
         }
     }
 
-    min_lag = SKP_SMULBB( PE_MIN_LAG_MS, Fs_kHz );
-    max_lag = SKP_SMULBB( PE_MAX_LAG_MS, Fs_kHz );
+    min_lag = silk_SMULBB( PE_MIN_LAG_MS, Fs_kHz );
+    max_lag = silk_SMULBB( PE_MAX_LAG_MS, Fs_kHz );
     lag = min_lag + lagIndex;
 
     for( k = 0; k < nb_subfr; k++ ) {
         pitch_lags[ k ] = lag + matrix_ptr( Lag_CB_ptr, k, contourIndex, cbk_size );
-        pitch_lags[ k ] = SKP_LIMIT( pitch_lags[ k ], min_lag, max_lag );
+        pitch_lags[ k ] = silk_LIMIT( pitch_lags[ k ], min_lag, max_lag );
     }
 }
diff --git a/silk/silk_decode_pulses.c b/silk/silk_decode_pulses.c
index f4f21c8..924bcb9 100644
--- a/silk/silk_decode_pulses.c
+++ b/silk/silk_decode_pulses.c
@@ -53,10 +53,10 @@
     RateLevelIndex = ec_dec_icdf( psRangeDec, silk_rate_levels_iCDF[ signalType >> 1 ], 8 );
 
     /* Calculate number of shell blocks */
-    SKP_assert( 1 << LOG2_SHELL_CODEC_FRAME_LENGTH == SHELL_CODEC_FRAME_LENGTH );
-    iter = SKP_RSHIFT( frame_length, LOG2_SHELL_CODEC_FRAME_LENGTH );
+    silk_assert( 1 << LOG2_SHELL_CODEC_FRAME_LENGTH == SHELL_CODEC_FRAME_LENGTH );
+    iter = silk_RSHIFT( frame_length, LOG2_SHELL_CODEC_FRAME_LENGTH );
     if( iter * SHELL_CODEC_FRAME_LENGTH < frame_length ){
-        SKP_assert( frame_length == 12 * 10 ); /* Make sure only happens for 10 ms @ 12 kHz */
+        silk_assert( frame_length == 12 * 10 ); /* Make sure only happens for 10 ms @ 12 kHz */
         iter++;
     }
 
@@ -82,9 +82,9 @@
     /***************************************************/
     for( i = 0; i < iter; i++ ) {
         if( sum_pulses[ i ] > 0 ) {
-            silk_shell_decoder( &pulses[ SKP_SMULBB( i, SHELL_CODEC_FRAME_LENGTH ) ], psRangeDec, sum_pulses[ i ] );
+            silk_shell_decoder( &pulses[ silk_SMULBB( i, SHELL_CODEC_FRAME_LENGTH ) ], psRangeDec, sum_pulses[ i ] );
         } else {
-            SKP_memset( &pulses[ SKP_SMULBB( i, SHELL_CODEC_FRAME_LENGTH ) ], 0, SHELL_CODEC_FRAME_LENGTH * sizeof( opus_int ) );
+            silk_memset( &pulses[ silk_SMULBB( i, SHELL_CODEC_FRAME_LENGTH ) ], 0, SHELL_CODEC_FRAME_LENGTH * sizeof( opus_int ) );
         }
     }
 
@@ -94,11 +94,11 @@
     for( i = 0; i < iter; i++ ) {
         if( nLshifts[ i ] > 0 ) {
             nLS = nLshifts[ i ];
-            pulses_ptr = &pulses[ SKP_SMULBB( i, SHELL_CODEC_FRAME_LENGTH ) ];
+            pulses_ptr = &pulses[ silk_SMULBB( i, SHELL_CODEC_FRAME_LENGTH ) ];
             for( k = 0; k < SHELL_CODEC_FRAME_LENGTH; k++ ) {
                 abs_q = pulses_ptr[ k ];
                 for( j = 0; j < nLS; j++ ) {
-                    abs_q = SKP_LSHIFT( abs_q, 1 );
+                    abs_q = silk_LSHIFT( abs_q, 1 );
                     abs_q += ec_dec_icdf( psRangeDec, silk_lsb_iCDF, 8 );
                 }
                 pulses_ptr[ k ] = abs_q;
diff --git a/silk/silk_decoder_set_fs.c b/silk/silk_decoder_set_fs.c
index 1f7e275..dea1836 100644
--- a/silk/silk_decoder_set_fs.c
+++ b/silk/silk_decoder_set_fs.c
@@ -39,15 +39,15 @@
 {
     opus_int frame_length;
 
-    SKP_assert( fs_kHz == 8 || fs_kHz == 12 || fs_kHz == 16 );
-    SKP_assert( psDec->nb_subfr == MAX_NB_SUBFR || psDec->nb_subfr == MAX_NB_SUBFR/2 );
+    silk_assert( fs_kHz == 8 || fs_kHz == 12 || fs_kHz == 16 );
+    silk_assert( psDec->nb_subfr == MAX_NB_SUBFR || psDec->nb_subfr == MAX_NB_SUBFR/2 );
 
-    psDec->subfr_length = SKP_SMULBB( SUB_FRAME_LENGTH_MS, fs_kHz );
-    frame_length = SKP_SMULBB( psDec->nb_subfr, psDec->subfr_length );
+    psDec->subfr_length = silk_SMULBB( SUB_FRAME_LENGTH_MS, fs_kHz );
+    frame_length = silk_SMULBB( psDec->nb_subfr, psDec->subfr_length );
     if( psDec->fs_kHz != fs_kHz || frame_length != psDec->frame_length ) {
         psDec->fs_kHz  = fs_kHz;
         psDec->frame_length   = frame_length;
-        psDec->ltp_mem_length = SKP_SMULBB( LTP_MEM_LENGTH_MS, fs_kHz );
+        psDec->ltp_mem_length = silk_SMULBB( LTP_MEM_LENGTH_MS, fs_kHz );
         if( psDec->fs_kHz == 8 ) {
             if( psDec->nb_subfr == MAX_NB_SUBFR ) {
                 psDec->pitch_contour_iCDF = silk_pitch_contour_NB_iCDF;
@@ -70,9 +70,9 @@
         }
 
         /* Reset part of the decoder state */
-        SKP_memset( psDec->sLPC_Q14,     0,                    sizeof( psDec->sLPC_Q14 ) );
-        SKP_memset( psDec->outBuf,       0, MAX_FRAME_LENGTH * sizeof( opus_int16 ) );
-        SKP_memset( psDec->prevNLSF_Q15, 0,                    sizeof( psDec->prevNLSF_Q15 ) );
+        silk_memset( psDec->sLPC_Q14,     0,                    sizeof( psDec->sLPC_Q14 ) );
+        silk_memset( psDec->outBuf,       0, MAX_FRAME_LENGTH * sizeof( opus_int16 ) );
+        silk_memset( psDec->prevNLSF_Q15, 0,                    sizeof( psDec->prevNLSF_Q15 ) );
 
         psDec->lagPrev                 = 100;
         psDec->LastGainIndex           = 10;
@@ -87,11 +87,11 @@
             psDec->pitch_lag_low_bits_iCDF = silk_uniform4_iCDF;
         } else {
             /* unsupported sampling rate */
-            SKP_assert( 0 );
+            silk_assert( 0 );
         }
     }
 
     /* Check that settings are valid */
-    SKP_assert( psDec->frame_length > 0 && psDec->frame_length <= MAX_FRAME_LENGTH );
+    silk_assert( psDec->frame_length > 0 && psDec->frame_length <= MAX_FRAME_LENGTH );
 }
 
diff --git a/silk/silk_enc_API.c b/silk/silk_enc_API.c
index c447b26..ecd8abe 100644
--- a/silk/silk_enc_API.c
+++ b/silk/silk_enc_API.c
@@ -67,10 +67,10 @@
     psEnc = (silk_encoder *)encState;
 
     /* Reset encoder */
-    SKP_memset( psEnc, 0, sizeof( silk_encoder ) );
+    silk_memset( psEnc, 0, sizeof( silk_encoder ) );
     for( n = 0; n < ENCODER_NUM_CHANNELS; n++ ) {
         if( ret += silk_init_encoder( &psEnc->state_Fxx[ n ] ) ) {
-            SKP_assert( 0 );
+            silk_assert( 0 );
         }
     }
 
@@ -79,7 +79,7 @@
 
     /* Read control structure */
     if( ret += silk_QueryEncoder( encState, encStatus ) ) {
-        SKP_assert( 0 );
+        silk_assert( 0 );
     }
 
     return ret;
@@ -112,7 +112,7 @@
     encStatus->useInBandFEC              = state_Fxx[ 0 ].sCmn.useInBandFEC;
     encStatus->useDTX                    = state_Fxx[ 0 ].sCmn.useDTX;
     encStatus->useCBR                    = state_Fxx[ 0 ].sCmn.useCBR;
-    encStatus->internalSampleRate        = SKP_SMULBB( state_Fxx[ 0 ].sCmn.fs_kHz, 1000 );
+    encStatus->internalSampleRate        = silk_SMULBB( state_Fxx[ 0 ].sCmn.fs_kHz, 1000 );
     encStatus->allowBandwidthSwitch      = state_Fxx[ 0 ].sCmn.allow_bandwidth_switch;
     encStatus->inWBmodeWithoutVariableLP = state_Fxx[ 0 ].sCmn.fs_kHz == 16 && state_Fxx[ 0 ].sCmn.sLP.mode == 0;
 
@@ -141,38 +141,38 @@
 
     /* Check values in encoder control structure */
     if( ( ret = check_control_input( encControl ) != 0 ) ) {
-        SKP_assert( 0 );
+        silk_assert( 0 );
         return ret;
     }
 
     if( encControl->nChannelsInternal > psEnc->nChannelsInternal ) {
         /* Mono -> Stereo transition: init state of second channel and stereo state */
         ret += silk_init_encoder( &psEnc->state_Fxx[ 1 ] );
-        SKP_memset( psEnc->sStereo.pred_prev_Q13, 0, sizeof( psEnc->sStereo.pred_prev_Q13 ) );
-        SKP_memset( psEnc->sStereo.sSide, 0, sizeof( psEnc->sStereo.sSide ) );
-        SKP_memset( psEnc->sStereo.mid_side_amp_Q0, 0, sizeof( psEnc->sStereo.mid_side_amp_Q0 ) );
+        silk_memset( psEnc->sStereo.pred_prev_Q13, 0, sizeof( psEnc->sStereo.pred_prev_Q13 ) );
+        silk_memset( psEnc->sStereo.sSide, 0, sizeof( psEnc->sStereo.sSide ) );
+        silk_memset( psEnc->sStereo.mid_side_amp_Q0, 0, sizeof( psEnc->sStereo.mid_side_amp_Q0 ) );
         psEnc->sStereo.width_prev_Q14 = 0;
         psEnc->sStereo.smth_width_Q14 = SILK_FIX_CONST( 1, 14 );
         if( psEnc->nChannelsAPI == 2 ) {
-            SKP_memcpy( &psEnc->state_Fxx[ 1 ].sCmn.resampler_state, &psEnc->state_Fxx[ 0 ].sCmn.resampler_state, sizeof( silk_resampler_state_struct ) );
-            SKP_memcpy( &psEnc->state_Fxx[ 1 ].sCmn.In_HP_State,     &psEnc->state_Fxx[ 0 ].sCmn.In_HP_State,     sizeof( psEnc->state_Fxx[ 1 ].sCmn.In_HP_State ) );
+            silk_memcpy( &psEnc->state_Fxx[ 1 ].sCmn.resampler_state, &psEnc->state_Fxx[ 0 ].sCmn.resampler_state, sizeof( silk_resampler_state_struct ) );
+            silk_memcpy( &psEnc->state_Fxx[ 1 ].sCmn.In_HP_State,     &psEnc->state_Fxx[ 0 ].sCmn.In_HP_State,     sizeof( psEnc->state_Fxx[ 1 ].sCmn.In_HP_State ) );
         }
     }
     psEnc->nChannelsAPI = encControl->nChannelsAPI;
     psEnc->nChannelsInternal = encControl->nChannelsInternal;
 
-    nBlocksOf10ms = SKP_DIV32( 100 * nSamplesIn, encControl->API_sampleRate );
+    nBlocksOf10ms = silk_DIV32( 100 * nSamplesIn, encControl->API_sampleRate );
     if( prefillFlag ) {
         /* Only accept input length of 10 ms */
         if( nBlocksOf10ms != 1 ) {
             ret = SILK_ENC_INPUT_INVALID_NO_OF_SAMPLES;
-            SKP_assert( 0 );
+            silk_assert( 0 );
             return ret;
         }
         /* Reset Encoder */
         for( n = 0; n < encControl->nChannelsInternal; n++ ) {
             if( (ret = silk_init_encoder( &psEnc->state_Fxx[ n ] ) ) != 0 ) {
-                SKP_assert( 0 );
+                silk_assert( 0 );
             }
         }
         tmp_payloadSize_ms = encControl->payloadSize_ms;
@@ -187,33 +187,33 @@
         /* Only accept input lengths that are a multiple of 10 ms */
         if( nBlocksOf10ms * encControl->API_sampleRate != 100 * nSamplesIn || nSamplesIn < 0 ) {
             ret = SILK_ENC_INPUT_INVALID_NO_OF_SAMPLES;
-            SKP_assert( 0 );
+            silk_assert( 0 );
             return ret;
         }
         /* Make sure no more than one packet can be produced */
         if( 1000 * (opus_int32)nSamplesIn > encControl->payloadSize_ms * encControl->API_sampleRate ) {
             ret = SILK_ENC_INPUT_INVALID_NO_OF_SAMPLES;
-            SKP_assert( 0 );
+            silk_assert( 0 );
             return ret;
         }
     }
 
-    TargetRate_bps = SKP_RSHIFT32( encControl->bitRate, encControl->nChannelsInternal - 1 );
+    TargetRate_bps = silk_RSHIFT32( encControl->bitRate, encControl->nChannelsInternal - 1 );
     for( n = 0; n < encControl->nChannelsInternal; n++ ) {
         /* JMV: Force the side channel to the same rate as the mid. Is this the right way? */
         int force_fs_kHz = (n==1) ? psEnc->state_Fxx[0].sCmn.fs_kHz : 0;
         if( ( ret = silk_control_encoder( &psEnc->state_Fxx[ n ], encControl, TargetRate_bps, psEnc->allowBandwidthSwitch, n, force_fs_kHz ) ) != 0 ) {
-            SKP_assert( 0 );
+            silk_assert( 0 );
             return ret;
         }
     }
-    SKP_assert( encControl->nChannelsInternal == 1 || psEnc->state_Fxx[ 0 ].sCmn.fs_kHz == psEnc->state_Fxx[ 1 ].sCmn.fs_kHz );
+    silk_assert( encControl->nChannelsInternal == 1 || psEnc->state_Fxx[ 0 ].sCmn.fs_kHz == psEnc->state_Fxx[ 1 ].sCmn.fs_kHz );
 
     /* Input buffering/resampling and encoding */
     while( 1 ) {
         nSamplesToBuffer  = psEnc->state_Fxx[ 0 ].sCmn.frame_length - psEnc->state_Fxx[ 0 ].sCmn.inputBufIx;
-        nSamplesToBuffer  = SKP_min( nSamplesToBuffer, 10 * nBlocksOf10ms * psEnc->state_Fxx[ 0 ].sCmn.fs_kHz );
-        nSamplesFromInput = SKP_DIV32_16( nSamplesToBuffer * psEnc->state_Fxx[ 0 ].sCmn.API_fs_Hz, psEnc->state_Fxx[ 0 ].sCmn.fs_kHz * 1000 );
+        nSamplesToBuffer  = silk_min( nSamplesToBuffer, 10 * nBlocksOf10ms * psEnc->state_Fxx[ 0 ].sCmn.fs_kHz );
+        nSamplesFromInput = silk_DIV32_16( nSamplesToBuffer * psEnc->state_Fxx[ 0 ].sCmn.API_fs_Hz, psEnc->state_Fxx[ 0 ].sCmn.fs_kHz * 1000 );
         /* Resample and write to buffer */
         if( encControl->nChannelsAPI == 2 && encControl->nChannelsInternal == 2 ) {
             for( n = 0; n < nSamplesFromInput; n++ ) {
@@ -224,7 +224,7 @@
             psEnc->state_Fxx[ 0 ].sCmn.inputBufIx += nSamplesToBuffer;
 
             nSamplesToBuffer  = psEnc->state_Fxx[ 1 ].sCmn.frame_length - psEnc->state_Fxx[ 1 ].sCmn.inputBufIx;
-            nSamplesToBuffer  = SKP_min( nSamplesToBuffer, 10 * nBlocksOf10ms * psEnc->state_Fxx[ 1 ].sCmn.fs_kHz );
+            nSamplesToBuffer  = silk_min( nSamplesToBuffer, 10 * nBlocksOf10ms * psEnc->state_Fxx[ 1 ].sCmn.fs_kHz );
             for( n = 0; n < nSamplesFromInput; n++ ) {
                 buf[ n ] = samplesIn[ 2 * n + 1 ];
             }
@@ -234,13 +234,13 @@
         } else if( encControl->nChannelsAPI == 2 && encControl->nChannelsInternal == 1 ) {
             /* Combine left and right channels before resampling */
             for( n = 0; n < nSamplesFromInput; n++ ) {
-                buf[ n ] = (opus_int16)SKP_RSHIFT_ROUND( samplesIn[ 2 * n ] + samplesIn[ 2 * n + 1 ],  1 );
+                buf[ n ] = (opus_int16)silk_RSHIFT_ROUND( samplesIn[ 2 * n ] + samplesIn[ 2 * n + 1 ],  1 );
             }
             ret += silk_resampler( &psEnc->state_Fxx[ 0 ].sCmn.resampler_state,
                 &psEnc->state_Fxx[ 0 ].sCmn.inputBuf[ psEnc->state_Fxx[ 0 ].sCmn.inputBufIx + 2 ], buf, nSamplesFromInput );
             psEnc->state_Fxx[ 0 ].sCmn.inputBufIx += nSamplesToBuffer;
         } else {
-            SKP_assert( encControl->nChannelsAPI == 1 && encControl->nChannelsInternal == 1 );
+            silk_assert( encControl->nChannelsAPI == 1 && encControl->nChannelsInternal == 1 );
             ret += silk_resampler( &psEnc->state_Fxx[ 0 ].sCmn.resampler_state,
                 &psEnc->state_Fxx[ 0 ].sCmn.inputBuf[ psEnc->state_Fxx[ 0 ].sCmn.inputBufIx + 2 ], samplesIn, nSamplesFromInput );
             psEnc->state_Fxx[ 0 ].sCmn.inputBufIx += nSamplesToBuffer;
@@ -254,14 +254,14 @@
         /* Silk encoder */
         if( psEnc->state_Fxx[ 0 ].sCmn.inputBufIx >= psEnc->state_Fxx[ 0 ].sCmn.frame_length ) {
             /* Enough data in input buffer, so encode */
-            SKP_assert( psEnc->state_Fxx[ 0 ].sCmn.inputBufIx == psEnc->state_Fxx[ 0 ].sCmn.frame_length );
-            SKP_assert( encControl->nChannelsInternal == 1 || psEnc->state_Fxx[ 1 ].sCmn.inputBufIx == psEnc->state_Fxx[ 1 ].sCmn.frame_length );
+            silk_assert( psEnc->state_Fxx[ 0 ].sCmn.inputBufIx == psEnc->state_Fxx[ 0 ].sCmn.frame_length );
+            silk_assert( encControl->nChannelsInternal == 1 || psEnc->state_Fxx[ 1 ].sCmn.inputBufIx == psEnc->state_Fxx[ 1 ].sCmn.frame_length );
 
             /* Deal with LBRR data */
             if( psEnc->state_Fxx[ 0 ].sCmn.nFramesEncoded == 0 && !prefillFlag ) {
                 /* Create space at start of payload for VAD and FEC flags */
                 opus_uint8 iCDF[ 2 ] = { 0, 0 };
-                iCDF[ 0 ] = 256 - SKP_RSHIFT( 256, ( psEnc->state_Fxx[ 0 ].sCmn.nFramesPerPacket + 1 ) * encControl->nChannelsInternal );
+                iCDF[ 0 ] = 256 - silk_RSHIFT( 256, ( psEnc->state_Fxx[ 0 ].sCmn.nFramesPerPacket + 1 ) * encControl->nChannelsInternal );
                 ec_enc_icdf( psRangeEnc, 0, iCDF, 8 );
 
                 /* Encode any LBRR data from previous packet */
@@ -269,7 +269,7 @@
                 for( n = 0; n < encControl->nChannelsInternal; n++ ) {
                     LBRR_symbol = 0;
                     for( i = 0; i < psEnc->state_Fxx[ n ].sCmn.nFramesPerPacket; i++ ) {
-                        LBRR_symbol |= SKP_LSHIFT( psEnc->state_Fxx[ n ].sCmn.LBRR_flags[ i ], i );
+                        LBRR_symbol |= silk_LSHIFT( psEnc->state_Fxx[ n ].sCmn.LBRR_flags[ i ], i );
                     }
                     psEnc->state_Fxx[ n ].sCmn.LBRR_flag = LBRR_symbol > 0 ? 1 : 0;
                     if( LBRR_symbol && psEnc->state_Fxx[ n ].sCmn.nFramesPerPacket > 1 ) {
@@ -297,29 +297,29 @@
 
                 /* Reset LBRR flags */
                 for( n = 0; n < encControl->nChannelsInternal; n++ ) {
-                    SKP_memset( psEnc->state_Fxx[ n ].sCmn.LBRR_flags, 0, sizeof( psEnc->state_Fxx[ n ].sCmn.LBRR_flags ) );
+                    silk_memset( psEnc->state_Fxx[ n ].sCmn.LBRR_flags, 0, sizeof( psEnc->state_Fxx[ n ].sCmn.LBRR_flags ) );
                 }
             }
 
             silk_HP_variable_cutoff( psEnc->state_Fxx, psEnc->nChannelsInternal );
 
             /* Total target bits for packet */
-            nBits = SKP_DIV32_16( SKP_MUL( encControl->bitRate, encControl->payloadSize_ms ), 1000 );
+            nBits = silk_DIV32_16( silk_MUL( encControl->bitRate, encControl->payloadSize_ms ), 1000 );
             /* Subtract half of the bits already used */
             if (!prefillFlag)
                 nBits -= ec_tell( psRangeEnc ) >> 1;
             /* Divide by number of uncoded frames left in packet */
-            nBits = SKP_DIV32_16( nBits, psEnc->state_Fxx[ 0 ].sCmn.nFramesPerPacket - psEnc->state_Fxx[ 0 ].sCmn.nFramesEncoded );
+            nBits = silk_DIV32_16( nBits, psEnc->state_Fxx[ 0 ].sCmn.nFramesPerPacket - psEnc->state_Fxx[ 0 ].sCmn.nFramesEncoded );
             /* Convert to bits/second */
             if( encControl->payloadSize_ms == 10 ) {
-                TargetRate_bps = SKP_SMULBB( nBits, 100 );
+                TargetRate_bps = silk_SMULBB( nBits, 100 );
             } else {
-                TargetRate_bps = SKP_SMULBB( nBits, 50 );
+                TargetRate_bps = silk_SMULBB( nBits, 50 );
             }
             /* Subtract fraction of bits in excess of target in previous packets */
-            TargetRate_bps -= SKP_DIV32_16( SKP_MUL( psEnc->nBitsExceeded, 1000 ), BITRESERVOIR_DECAY_TIME_MS );
+            TargetRate_bps -= silk_DIV32_16( silk_MUL( psEnc->nBitsExceeded, 1000 ), BITRESERVOIR_DECAY_TIME_MS );
             /* Never exceed input bitrate */
-            TargetRate_bps = SKP_LIMIT( TargetRate_bps, encControl->bitRate, 5000 );
+            TargetRate_bps = silk_LIMIT( TargetRate_bps, encControl->bitRate, 5000 );
 
             /* Convert Left/Right to Mid/Side */
             if( encControl->nChannelsInternal == 2 ) {
@@ -333,8 +333,8 @@
                 }
             } else {
                 /* Buffering */
-                SKP_memcpy( psEnc->state_Fxx[ 0 ].sCmn.inputBuf, psEnc->sStereo.sMid, 2 * sizeof( opus_int16 ) );
-                SKP_memcpy( psEnc->sStereo.sMid, &psEnc->state_Fxx[ 0 ].sCmn.inputBuf[ psEnc->state_Fxx[ 0 ].sCmn.frame_length ], 2 * sizeof( opus_int16 ) );
+                silk_memcpy( psEnc->state_Fxx[ 0 ].sCmn.inputBuf, psEnc->sStereo.sMid, 2 * sizeof( opus_int16 ) );
+                silk_memcpy( psEnc->sStereo.sMid, &psEnc->state_Fxx[ 0 ].sCmn.inputBuf[ psEnc->state_Fxx[ 0 ].sCmn.frame_length ], 2 * sizeof( opus_int16 ) );
             }
 
             /* Encode */
@@ -349,7 +349,7 @@
                     silk_control_SNR( &psEnc->state_Fxx[ n ].sCmn, channelRate_bps );
 
                     if( ( ret = silk_encode_frame_Fxx( &psEnc->state_Fxx[ n ], nBytesOut, psRangeEnc ) ) != 0 ) {
-                        SKP_assert( 0 );
+                        silk_assert( 0 );
                     }
                 }
 
@@ -362,10 +362,10 @@
                 flags = 0;
                 for( n = 0; n < encControl->nChannelsInternal; n++ ) {
                     for( i = 0; i < psEnc->state_Fxx[ n ].sCmn.nFramesPerPacket; i++ ) {
-                        flags  = SKP_LSHIFT( flags, 1 );
+                        flags  = silk_LSHIFT( flags, 1 );
                         flags |= psEnc->state_Fxx[ n ].sCmn.VAD_flags[ i ];
                     }
-                    flags  = SKP_LSHIFT( flags, 1 );
+                    flags  = silk_LSHIFT( flags, 1 );
                     flags |= psEnc->state_Fxx[ n ].sCmn.LBRR_flag;
                 }
                 if (!prefillFlag)
@@ -377,11 +377,11 @@
                 }
 
                 psEnc->nBitsExceeded += *nBytesOut * 8;
-                psEnc->nBitsExceeded -= SKP_DIV32_16( SKP_MUL( encControl->bitRate, encControl->payloadSize_ms ), 1000 );
-                psEnc->nBitsExceeded  = SKP_LIMIT( psEnc->nBitsExceeded, 0, 10000 );
+                psEnc->nBitsExceeded -= silk_DIV32_16( silk_MUL( encControl->bitRate, encControl->payloadSize_ms ), 1000 );
+                psEnc->nBitsExceeded  = silk_LIMIT( psEnc->nBitsExceeded, 0, 10000 );
 
                 /* Update flag indicating if bandwidth switching is allowed */
-                speech_act_thr_for_switch_Q8 = SKP_SMLAWB( SILK_FIX_CONST( SPEECH_ACTIVITY_DTX_THRES, 8 ),
+                speech_act_thr_for_switch_Q8 = silk_SMLAWB( SILK_FIX_CONST( SPEECH_ACTIVITY_DTX_THRES, 8 ),
                     SILK_FIX_CONST( ( 1 - SPEECH_ACTIVITY_DTX_THRES ) / MAX_BANDWIDTH_SWITCH_DELAY_MS, 16 + 8 ), psEnc->timeSinceSwitchAllowed_ms );
                 if( psEnc->state_Fxx[ 0 ].sCmn.speech_activity_Q8 < speech_act_thr_for_switch_Q8 ) {
                     psEnc->allowBandwidthSwitch = 1;
@@ -402,7 +402,7 @@
 
     encControl->allowBandwidthSwitch = psEnc->allowBandwidthSwitch;
     encControl->inWBmodeWithoutVariableLP = psEnc->state_Fxx[ 0 ].sCmn.fs_kHz == 16 && psEnc->state_Fxx[ 0 ].sCmn.sLP.mode == 0;
-    encControl->internalSampleRate = SKP_SMULBB( psEnc->state_Fxx[ 0 ].sCmn.fs_kHz, 1000 );
+    encControl->internalSampleRate = silk_SMULBB( psEnc->state_Fxx[ 0 ].sCmn.fs_kHz, 1000 );
     encControl->stereoWidth_Q14 = psEnc->sStereo.width_prev_Q14;
     if( prefillFlag ) {
         encControl->payloadSize_ms = tmp_payloadSize_ms;
diff --git a/silk/silk_encode_indices.c b/silk/silk_encode_indices.c
index 31a7aab..61f174f 100644
--- a/silk/silk_encode_indices.c
+++ b/silk/silk_encode_indices.c
@@ -66,8 +66,8 @@
     /* Encode signal type and quantizer offset */
     /*******************************************/
     typeOffset = 2 * psIndices->signalType + psIndices->quantOffsetType;
-    SKP_assert( typeOffset >= 0 && typeOffset < 6 );
-    SKP_assert( encode_LBRR == 0 || typeOffset >= 2 );
+    silk_assert( typeOffset >= 0 && typeOffset < 6 );
+    silk_assert( encode_LBRR == 0 || typeOffset >= 2 );
     if( encode_LBRR || typeOffset >= 2 ) {
         ec_enc_icdf( psRangeEnc, typeOffset - 2, silk_type_offset_VAD_iCDF, 8 );
     } else {
@@ -78,28 +78,28 @@
     /* Encode gains */
     /****************/
 #ifdef SAVE_ALL_INTERNAL_DATA
-    nBytes_before = SKP_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
+    nBytes_before = silk_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
 #endif
     /* first subframe */
     if( condCoding ) {
         /* conditional coding */
-        SKP_assert( psIndices->GainsIndices[ 0 ] >= 0 && psIndices->GainsIndices[ 0 ] < MAX_DELTA_GAIN_QUANT - MIN_DELTA_GAIN_QUANT + 1 );
+        silk_assert( psIndices->GainsIndices[ 0 ] >= 0 && psIndices->GainsIndices[ 0 ] < MAX_DELTA_GAIN_QUANT - MIN_DELTA_GAIN_QUANT + 1 );
         ec_enc_icdf( psRangeEnc, psIndices->GainsIndices[ 0 ], silk_delta_gain_iCDF, 8 );
     } else {
         /* independent coding, in two stages: MSB bits followed by 3 LSBs */
-        SKP_assert( psIndices->GainsIndices[ 0 ] >= 0 && psIndices->GainsIndices[ 0 ] < N_LEVELS_QGAIN );
-        ec_enc_icdf( psRangeEnc, SKP_RSHIFT( psIndices->GainsIndices[ 0 ], 3 ), silk_gain_iCDF[ psIndices->signalType ], 8 );
+        silk_assert( psIndices->GainsIndices[ 0 ] >= 0 && psIndices->GainsIndices[ 0 ] < N_LEVELS_QGAIN );
+        ec_enc_icdf( psRangeEnc, silk_RSHIFT( psIndices->GainsIndices[ 0 ], 3 ), silk_gain_iCDF[ psIndices->signalType ], 8 );
         ec_enc_icdf( psRangeEnc, psIndices->GainsIndices[ 0 ] & 7, silk_uniform8_iCDF, 8 );
     }
 
     /* remaining subframes */
     for( i = 1; i < psEncC->nb_subfr; i++ ) {
-        SKP_assert( psIndices->GainsIndices[ i ] >= 0 && psIndices->GainsIndices[ i ] < MAX_DELTA_GAIN_QUANT - MIN_DELTA_GAIN_QUANT + 1 );
+        silk_assert( psIndices->GainsIndices[ i ] >= 0 && psIndices->GainsIndices[ i ] < MAX_DELTA_GAIN_QUANT - MIN_DELTA_GAIN_QUANT + 1 );
         ec_enc_icdf( psRangeEnc, psIndices->GainsIndices[ i ], silk_delta_gain_iCDF, 8 );
     }
 
 #ifdef SAVE_ALL_INTERNAL_DATA
-    nBytes_after = SKP_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
+    nBytes_after = silk_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
     nBytes_after -= nBytes_before; /* bytes just added*/
     DEBUG_STORE_DATA( nBytes_gains.dat, &nBytes_after, sizeof( opus_int ) );
 #endif
@@ -108,11 +108,11 @@
     /* Encode NLSFs */
     /****************/
 #ifdef SAVE_ALL_INTERNAL_DATA
-    nBytes_before = SKP_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
+    nBytes_before = silk_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
 #endif
     ec_enc_icdf( psRangeEnc, psIndices->NLSFIndices[ 0 ], &psEncC->psNLSF_CB->CB1_iCDF[ ( psIndices->signalType >> 1 ) * psEncC->psNLSF_CB->nVectors ], 8 );
     silk_NLSF_unpack( ec_ix, pred_Q8, psEncC->psNLSF_CB, psIndices->NLSFIndices[ 0 ] );
-    SKP_assert( psEncC->psNLSF_CB->order == psEncC->predictLPCOrder );
+    silk_assert( psEncC->psNLSF_CB->order == psEncC->predictLPCOrder );
     for( i = 0; i < psEncC->psNLSF_CB->order; i++ ) {
         if( psIndices->NLSFIndices[ i+1 ] >= NLSF_QUANT_MAX_AMPLITUDE ) {
             ec_enc_icdf( psRangeEnc, 2 * NLSF_QUANT_MAX_AMPLITUDE, &psEncC->psNLSF_CB->ec_iCDF[ ec_ix[ i ] ], 8 );
@@ -127,14 +127,14 @@
 
     /* Encode NLSF interpolation factor */
     if( psEncC->nb_subfr == MAX_NB_SUBFR ) {
-        SKP_assert( psEncC->useInterpolatedNLSFs == 1 || psIndices->NLSFInterpCoef_Q2 == ( 1 << 2 ) );
-        SKP_assert( psIndices->NLSFInterpCoef_Q2 >= 0 && psIndices->NLSFInterpCoef_Q2 < 5 );
+        silk_assert( psEncC->useInterpolatedNLSFs == 1 || psIndices->NLSFInterpCoef_Q2 == ( 1 << 2 ) );
+        silk_assert( psIndices->NLSFInterpCoef_Q2 >= 0 && psIndices->NLSFInterpCoef_Q2 < 5 );
         ec_enc_icdf( psRangeEnc, psIndices->NLSFInterpCoef_Q2, silk_NLSF_interpolation_factor_iCDF, 8 );
     }
 
 #ifdef SAVE_ALL_INTERNAL_DATA
     DEBUG_STORE_DATA( lsf_interpol.dat, &psIndices->NLSFInterpCoef_Q2, sizeof(int) );
-    nBytes_after = SKP_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
+    nBytes_after = silk_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
     nBytes_after -= nBytes_before; /* bytes just added*/
     DEBUG_STORE_DATA( nBytes_LSF.dat, &nBytes_after, sizeof( opus_int ) );
 #endif
@@ -145,7 +145,7 @@
         /* Encode pitch lags */
         /*********************/
 #ifdef SAVE_ALL_INTERNAL_DATA
-        nBytes_before = SKP_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
+        nBytes_before = silk_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
 #endif
         /* lag index */
         encode_absolute_lagIndex = 1;
@@ -158,38 +158,38 @@
                 delta_lagIndex = delta_lagIndex + 9;
                 encode_absolute_lagIndex = 0; /* Only use delta */
             }
-            SKP_assert( delta_lagIndex >= 0 && delta_lagIndex < 21 );
+            silk_assert( delta_lagIndex >= 0 && delta_lagIndex < 21 );
             ec_enc_icdf( psRangeEnc, delta_lagIndex, silk_pitch_delta_iCDF, 8 );
         }
         if( encode_absolute_lagIndex ) {
             /* Absolute encoding */
             opus_int32 pitch_high_bits, pitch_low_bits;
-            pitch_high_bits = SKP_DIV32_16( psIndices->lagIndex, SKP_RSHIFT( psEncC->fs_kHz, 1 ) );
-            pitch_low_bits = psIndices->lagIndex - SKP_SMULBB( pitch_high_bits, SKP_RSHIFT( psEncC->fs_kHz, 1 ) );
-            SKP_assert( pitch_low_bits < psEncC->fs_kHz / 2 );
-            SKP_assert( pitch_high_bits < 32 );
+            pitch_high_bits = silk_DIV32_16( psIndices->lagIndex, silk_RSHIFT( psEncC->fs_kHz, 1 ) );
+            pitch_low_bits = psIndices->lagIndex - silk_SMULBB( pitch_high_bits, silk_RSHIFT( psEncC->fs_kHz, 1 ) );
+            silk_assert( pitch_low_bits < psEncC->fs_kHz / 2 );
+            silk_assert( pitch_high_bits < 32 );
             ec_enc_icdf( psRangeEnc, pitch_high_bits, silk_pitch_lag_iCDF, 8 );
             ec_enc_icdf( psRangeEnc, pitch_low_bits, psEncC->pitch_lag_low_bits_iCDF, 8 );
         }
         psEncC->ec_prevLagIndex = psIndices->lagIndex;
 
 #ifdef SAVE_ALL_INTERNAL_DATA
-        nBytes_after = SKP_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
+        nBytes_after = silk_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
         nBytes_lagIndex = nBytes_after - nBytes_before; /* bytes just added*/
 #endif
 
 #ifdef SAVE_ALL_INTERNAL_DATA
-        nBytes_before = SKP_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
+        nBytes_before = silk_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
 #endif
         /* Countour index */
-        SKP_assert(   psIndices->contourIndex  >= 0 );
-        SKP_assert( ( psIndices->contourIndex < 34 && psEncC->fs_kHz  > 8 && psEncC->nb_subfr == 4 ) ||
+        silk_assert(   psIndices->contourIndex  >= 0 );
+        silk_assert( ( psIndices->contourIndex < 34 && psEncC->fs_kHz  > 8 && psEncC->nb_subfr == 4 ) ||
                     ( psIndices->contourIndex < 11 && psEncC->fs_kHz == 8 && psEncC->nb_subfr == 4 ) ||
                     ( psIndices->contourIndex < 12 && psEncC->fs_kHz  > 8 && psEncC->nb_subfr == 2 ) ||
                     ( psIndices->contourIndex <  3 && psEncC->fs_kHz == 8 && psEncC->nb_subfr == 2 ) );
         ec_enc_icdf( psRangeEnc, psIndices->contourIndex, psEncC->pitch_contour_iCDF, 8 );
 #ifdef SAVE_ALL_INTERNAL_DATA
-        nBytes_after = SKP_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
+        nBytes_after = silk_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
         nBytes_contourIndex = nBytes_after - nBytes_before; /* bytes just added*/
 #endif
 
@@ -197,16 +197,16 @@
         /* Encode LTP gains */
         /********************/
 #ifdef SAVE_ALL_INTERNAL_DATA
-        nBytes_before = SKP_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
+        nBytes_before = silk_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
 #endif
 
         /* PERIndex value */
-        SKP_assert( psIndices->PERIndex >= 0 && psIndices->PERIndex < 3 );
+        silk_assert( psIndices->PERIndex >= 0 && psIndices->PERIndex < 3 );
         ec_enc_icdf( psRangeEnc, psIndices->PERIndex, silk_LTP_per_index_iCDF, 8 );
 
         /* Codebook Indices */
         for( k = 0; k < psEncC->nb_subfr; k++ ) {
-            SKP_assert( psIndices->LTPIndex[ k ] >= 0 && psIndices->LTPIndex[ k ] < ( 8 << psIndices->PERIndex ) );
+            silk_assert( psIndices->LTPIndex[ k ] >= 0 && psIndices->LTPIndex[ k ] < ( 8 << psIndices->PERIndex ) );
             ec_enc_icdf( psRangeEnc, psIndices->LTPIndex[ k ], silk_LTP_gain_iCDF_ptrs[ psIndices->PERIndex ], 8 );
         }
 
@@ -214,13 +214,13 @@
         /* Encode LTP scaling */
         /**********************/
         if( !condCoding ) {
-            SKP_assert( psIndices->LTP_scaleIndex >= 0 && psIndices->LTP_scaleIndex < 3 );
+            silk_assert( psIndices->LTP_scaleIndex >= 0 && psIndices->LTP_scaleIndex < 3 );
             ec_enc_icdf( psRangeEnc, psIndices->LTP_scaleIndex, silk_LTPscale_iCDF, 8 );
         }
-        SKP_assert( !condCoding || psIndices->LTP_scaleIndex == 0 );
+        silk_assert( !condCoding || psIndices->LTP_scaleIndex == 0 );
 
 #ifdef SAVE_ALL_INTERNAL_DATA
-        nBytes_after = SKP_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
+        nBytes_after = silk_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
         nBytes_LTP = nBytes_after - nBytes_before; /* bytes just added*/
 #endif
     }
@@ -239,12 +239,12 @@
     psEncC->ec_prevSignalType = psIndices->signalType;
 
 #ifdef SAVE_ALL_INTERNAL_DATA
-    nBytes_before = SKP_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
+    nBytes_before = silk_RSHIFT( ec_tell( psRangeEnc ) + 7, 3 );
 #endif
 
     /***************/
     /* Encode seed */
     /***************/
-    SKP_assert( psIndices->Seed >= 0 && psIndices->Seed < 4 );
+    silk_assert( psIndices->Seed >= 0 && psIndices->Seed < 4 );
     ec_enc_icdf( psRangeEnc, psIndices->Seed, silk_uniform4_iCDF, 8 );
 }
diff --git a/silk/silk_encode_pulses.c b/silk/silk_encode_pulses.c
index 3f1369e..9c529ff 100644
--- a/silk/silk_encode_pulses.c
+++ b/silk/silk_encode_pulses.c
@@ -75,26 +75,26 @@
     const opus_uint8 *cdf_ptr;
     const opus_uint8 *nBits_ptr;
 
-    SKP_memset( pulses_comb, 0, 8 * sizeof( opus_int ) ); /* Fixing Valgrind reported problem*/
+    silk_memset( pulses_comb, 0, 8 * sizeof( opus_int ) ); /* Fixing Valgrind reported problem*/
 
     /****************************/
     /* Prepare for shell coding */
     /****************************/
     /* Calculate number of shell blocks */
-    SKP_assert( 1 << LOG2_SHELL_CODEC_FRAME_LENGTH == SHELL_CODEC_FRAME_LENGTH );
-    iter = SKP_RSHIFT( frame_length, LOG2_SHELL_CODEC_FRAME_LENGTH );
+    silk_assert( 1 << LOG2_SHELL_CODEC_FRAME_LENGTH == SHELL_CODEC_FRAME_LENGTH );
+    iter = silk_RSHIFT( frame_length, LOG2_SHELL_CODEC_FRAME_LENGTH );
     if( iter * SHELL_CODEC_FRAME_LENGTH < frame_length ){
-        SKP_assert( frame_length == 12 * 10 ); /* Make sure only happens for 10 ms @ 12 kHz */
+        silk_assert( frame_length == 12 * 10 ); /* Make sure only happens for 10 ms @ 12 kHz */
         iter++;
-        SKP_memset( &pulses[ frame_length ], 0, SHELL_CODEC_FRAME_LENGTH * sizeof(opus_int8));
+        silk_memset( &pulses[ frame_length ], 0, SHELL_CODEC_FRAME_LENGTH * sizeof(opus_int8));
     }
 
     /* Take the absolute value of the pulses */
     for( i = 0; i < iter * SHELL_CODEC_FRAME_LENGTH; i+=4 ) {
-        abs_pulses[i+0] = ( opus_int )SKP_abs( pulses[ i + 0 ] );
-        abs_pulses[i+1] = ( opus_int )SKP_abs( pulses[ i + 1 ] );
-        abs_pulses[i+2] = ( opus_int )SKP_abs( pulses[ i + 2 ] );
-        abs_pulses[i+3] = ( opus_int )SKP_abs( pulses[ i + 3 ] );
+        abs_pulses[i+0] = ( opus_int )silk_abs( pulses[ i + 0 ] );
+        abs_pulses[i+1] = ( opus_int )silk_abs( pulses[ i + 1 ] );
+        abs_pulses[i+2] = ( opus_int )silk_abs( pulses[ i + 2 ] );
+        abs_pulses[i+3] = ( opus_int )silk_abs( pulses[ i + 3 ] );
     }
 
     /* Calc sum pulses per shell code frame */
@@ -116,7 +116,7 @@
                 /* We need to downscale the quantization signal */
                 nRshifts[ i ]++;
                 for( k = 0; k < SHELL_CODEC_FRAME_LENGTH; k++ ) {
-                    abs_pulses_ptr[ k ] = SKP_RSHIFT( abs_pulses_ptr[ k ], 1 );
+                    abs_pulses_ptr[ k ] = silk_RSHIFT( abs_pulses_ptr[ k ], 1 );
                 }
             } else {
                 /* Jump out of while(1) loop and go to next shell coding frame */
@@ -130,7 +130,7 @@
     /* Rate level */
     /**************/
     /* find rate level that leads to fewest bits for coding of pulses per block info */
-    minSumBits_Q5 = SKP_int32_MAX;
+    minSumBits_Q5 = silk_int32_MAX;
     for( k = 0; k < N_RATE_LEVELS - 1; k++ ) {
         nBits_ptr  = silk_pulses_per_block_BITS_Q5[ k ];
         sumBits_Q5 = silk_rate_levels_BITS_Q5[ signalType >> 1 ][ k ];
@@ -181,9 +181,9 @@
             pulses_ptr = &pulses[ i * SHELL_CODEC_FRAME_LENGTH ];
             nLS = nRshifts[ i ] - 1;
             for( k = 0; k < SHELL_CODEC_FRAME_LENGTH; k++ ) {
-                abs_q = (opus_int8)SKP_abs( pulses_ptr[ k ] );
+                abs_q = (opus_int8)silk_abs( pulses_ptr[ k ] );
                 for( j = nLS; j > 0; j-- ) {
-                    bit = SKP_RSHIFT( abs_q, j ) & 1;
+                    bit = silk_RSHIFT( abs_q, j ) & 1;
                     ec_enc_icdf( psRangeEnc, bit, silk_lsb_iCDF, 8 );
                 }
                 bit = abs_q & 1;
diff --git a/silk/silk_gain_quant.c b/silk/silk_gain_quant.c
index 90ee5bd..0092601 100644
--- a/silk/silk_gain_quant.c
+++ b/silk/silk_gain_quant.c
@@ -48,18 +48,18 @@
 
     for( k = 0; k < nb_subfr; k++ ) {
         /* Add half of previous quantization error, convert to log scale, scale, floor() */
-        ind[ k ] = SKP_SMULWB( SCALE_Q16, silk_lin2log( gain_Q16[ k ] ) - OFFSET );
+        ind[ k ] = silk_SMULWB( SCALE_Q16, silk_lin2log( gain_Q16[ k ] ) - OFFSET );
 
         /* Round towards previous quantized gain (hysteresis) */
         if( ind[ k ] < *prev_ind ) {
             ind[ k ]++;
         }
-        ind[ k ] = SKP_max_int( ind[ k ], 0 );
+        ind[ k ] = silk_max_int( ind[ k ], 0 );
 
         /* Compute delta indices and limit */
         if( k == 0 && conditional == 0 ) {
             /* Full index */
-            ind[ k ] = SKP_LIMIT_int( ind[ k ], *prev_ind + MIN_DELTA_GAIN_QUANT, N_LEVELS_QGAIN - 1 );
+            ind[ k ] = silk_LIMIT_int( ind[ k ], *prev_ind + MIN_DELTA_GAIN_QUANT, N_LEVELS_QGAIN - 1 );
             *prev_ind = ind[ k ];
         } else {
             /* Delta index */
@@ -68,14 +68,14 @@
             /* Double the quantization step size for large gain increases, so that the max gain level can be reached */
             double_step_size_threshold = 2 * MAX_DELTA_GAIN_QUANT - N_LEVELS_QGAIN + *prev_ind;
             if( ind[ k ] > double_step_size_threshold ) {
-                ind[ k ] = double_step_size_threshold + SKP_RSHIFT( ind[ k ] - double_step_size_threshold + 1, 1 );
+                ind[ k ] = double_step_size_threshold + silk_RSHIFT( ind[ k ] - double_step_size_threshold + 1, 1 );
             }
 
-            ind[ k ] = SKP_LIMIT_int( ind[ k ], MIN_DELTA_GAIN_QUANT, MAX_DELTA_GAIN_QUANT );
+            ind[ k ] = silk_LIMIT_int( ind[ k ], MIN_DELTA_GAIN_QUANT, MAX_DELTA_GAIN_QUANT );
 
             /* Accumulate deltas */
             if( ind[ k ] > double_step_size_threshold ) {
-                *prev_ind += SKP_LSHIFT( ind[ k ], 1 ) - double_step_size_threshold;
+                *prev_ind += silk_LSHIFT( ind[ k ], 1 ) - double_step_size_threshold;
             } else {
                 *prev_ind += ind[ k ];
             }
@@ -85,7 +85,7 @@
         }
 
         /* Convert to linear scale and scale */
-        gain_Q16[ k ] = silk_log2lin( SKP_min_32( SKP_SMULWB( INV_SCALE_Q16, *prev_ind ) + OFFSET, 3967 ) ); /* 3967 = 31 in Q7 */
+        gain_Q16[ k ] = silk_log2lin( silk_min_32( silk_SMULWB( INV_SCALE_Q16, *prev_ind ) + OFFSET, 3967 ) ); /* 3967 = 31 in Q7 */
     }
 }
 
@@ -110,14 +110,14 @@
             /* Accumulate deltas */
             double_step_size_threshold = 2 * MAX_DELTA_GAIN_QUANT - N_LEVELS_QGAIN + *prev_ind;
             if( ind_tmp > double_step_size_threshold ) {
-                *prev_ind += SKP_LSHIFT( ind_tmp, 1 ) - double_step_size_threshold;
+                *prev_ind += silk_LSHIFT( ind_tmp, 1 ) - double_step_size_threshold;
             } else {
                 *prev_ind += ind_tmp;
             }
         }
-        *prev_ind = SKP_min( *prev_ind, N_LEVELS_QGAIN - 1 );
+        *prev_ind = silk_min( *prev_ind, N_LEVELS_QGAIN - 1 );
 
         /* Convert to linear scale and scale */
-        gain_Q16[ k ] = silk_log2lin( SKP_min_32( SKP_SMULWB( INV_SCALE_Q16, *prev_ind ) + OFFSET, 3967 ) ); /* 3967 = 31 in Q7 */
+        gain_Q16[ k ] = silk_log2lin( silk_min_32( silk_SMULWB( INV_SCALE_Q16, *prev_ind ) + OFFSET, 3967 ) ); /* 3967 = 31 in Q7 */
     }
 }
diff --git a/silk/silk_init_encoder.c b/silk/silk_init_encoder.c
index 63b43b4..540e4fc 100644
--- a/silk/silk_init_encoder.c
+++ b/silk/silk_init_encoder.c
@@ -44,9 +44,9 @@
     opus_int ret = 0;
 
     /* Clear the entire encoder state */
-    SKP_memset( psEnc, 0, sizeof( silk_encoder_state_Fxx ) );
+    silk_memset( psEnc, 0, sizeof( silk_encoder_state_Fxx ) );
 
-    psEnc->sCmn.variable_HP_smth1_Q15 = SKP_LSHIFT( silk_lin2log( SILK_FIX_CONST( VARIABLE_HP_MIN_CUTOFF_HZ, 16 ) ) - ( 16 << 7 ), 8 );
+    psEnc->sCmn.variable_HP_smth1_Q15 = silk_LSHIFT( silk_lin2log( SILK_FIX_CONST( VARIABLE_HP_MIN_CUTOFF_HZ, 16 ) ) - ( 16 << 7 ), 8 );
     psEnc->sCmn.variable_HP_smth2_Q15 = psEnc->sCmn.variable_HP_smth1_Q15;
 
     /* Used to deactivate LSF interpolation, fluctuation reduction, pitch prediction */
diff --git a/silk/silk_inner_prod_aligned.c b/silk/silk_inner_prod_aligned.c
index 187d143..9897ef1 100644
--- a/silk/silk_inner_prod_aligned.c
+++ b/silk/silk_inner_prod_aligned.c
@@ -46,7 +46,7 @@
     opus_int   i;
     opus_int32 sum = 0;
     for( i = 0; i < len; i++ ) {
-        sum = SKP_SMLABB( sum, inVec1[ i ], inVec2[ i ] );
+        sum = silk_SMLABB( sum, inVec1[ i ], inVec2[ i ] );
     }
     return sum;
 }
@@ -61,7 +61,7 @@
     opus_int   i;
     opus_int32 sum = 0;
     for( i = 0; i < len; i++ ) {
-        sum = SKP_ADD_RSHIFT32( sum, SKP_SMULBB( inVec1[ i ], inVec2[ i ] ), scale );
+        sum = silk_ADD_RSHIFT32( sum, silk_SMULBB( inVec1[ i ], inVec2[ i ] ), scale );
     }
     return sum;
 }
@@ -75,7 +75,7 @@
     opus_int   i;
     opus_int64 sum = 0;
     for( i = 0; i < len; i++ ) {
-        sum = SKP_SMLALBB( sum, inVec1[ i ], inVec2[ i ] );
+        sum = silk_SMLALBB( sum, inVec1[ i ], inVec2[ i ] );
     }
     return sum;
 }
diff --git a/silk/silk_interpolate.c b/silk/silk_interpolate.c
index 1b8e79b..4029256 100644
--- a/silk/silk_interpolate.c
+++ b/silk/silk_interpolate.c
@@ -42,10 +42,10 @@
 {
     opus_int i;
 
-    SKP_assert( ifact_Q2 >= 0 );
-    SKP_assert( ifact_Q2 <= 4 );
+    silk_assert( ifact_Q2 >= 0 );
+    silk_assert( ifact_Q2 <= 4 );
 
     for( i = 0; i < d; i++ ) {
-        xi[ i ] = ( opus_int16 )SKP_ADD_RSHIFT( x0[ i ], SKP_SMULBB( x1[ i ] - x0[ i ], ifact_Q2 ), 2 );
+        xi[ i ] = ( opus_int16 )silk_ADD_RSHIFT( x0[ i ], silk_SMULBB( x1[ i ] - x0[ i ], ifact_Q2 ), 2 );
     }
 }
diff --git a/silk/silk_k2a.c b/silk/silk_k2a.c
index 620d4cb..04f8f9f 100644
--- a/silk/silk_k2a.c
+++ b/silk/silk_k2a.c
@@ -46,8 +46,8 @@
             Atmp[ n ] = A_Q24[ n ];
         }
         for( n = 0; n < k; n++ ) {
-            A_Q24[ n ] = SKP_SMLAWB( A_Q24[ n ], SKP_LSHIFT( Atmp[ k - n - 1 ], 1 ), rc_Q15[ k ] );
+            A_Q24[ n ] = silk_SMLAWB( A_Q24[ n ], silk_LSHIFT( Atmp[ k - n - 1 ], 1 ), rc_Q15[ k ] );
         }
-        A_Q24[ k ] = -SKP_LSHIFT( (opus_int32)rc_Q15[ k ], 9 );
+        A_Q24[ k ] = -silk_LSHIFT( (opus_int32)rc_Q15[ k ], 9 );
     }
 }
diff --git a/silk/silk_k2a_Q16.c b/silk/silk_k2a_Q16.c
index fd7d535..41f661e 100644
--- a/silk/silk_k2a_Q16.c
+++ b/silk/silk_k2a_Q16.c
@@ -46,8 +46,8 @@
             Atmp[ n ] = A_Q24[ n ];
         }
         for( n = 0; n < k; n++ ) {
-            A_Q24[ n ] = SKP_SMLAWW( A_Q24[ n ], Atmp[ k - n - 1 ], rc_Q16[ k ] );
+            A_Q24[ n ] = silk_SMLAWW( A_Q24[ n ], Atmp[ k - n - 1 ], rc_Q16[ k ] );
         }
-        A_Q24[ k ] = -SKP_LSHIFT( rc_Q16[ k ], 8 );
+        A_Q24[ k ] = -silk_LSHIFT( rc_Q16[ k ], 8 );
     }
 }
diff --git a/silk/silk_lin2log.c b/silk/silk_lin2log.c
index e975c58..a054e40 100644
--- a/silk/silk_lin2log.c
+++ b/silk/silk_lin2log.c
@@ -39,6 +39,6 @@
     silk_CLZ_FRAC( inLin, &lz, &frac_Q7 );
 
     /* Piece-wise parabolic approximation */
-    return SKP_LSHIFT( 31 - lz, 7 ) + SKP_SMLAWB( frac_Q7, SKP_MUL( frac_Q7, 128 - frac_Q7 ), 179 );
+    return silk_LSHIFT( 31 - lz, 7 ) + silk_SMLAWB( frac_Q7, silk_MUL( frac_Q7, 128 - frac_Q7 ), 179 );
 }
 
diff --git a/silk/silk_log2lin.c b/silk/silk_log2lin.c
index 9fe9003..ae81f9c 100644
--- a/silk/silk_log2lin.c
+++ b/silk/silk_log2lin.c
@@ -41,14 +41,14 @@
         return 0;
     }
 
-    out = SKP_LSHIFT( 1, SKP_RSHIFT( inLog_Q7, 7 ) );
+    out = silk_LSHIFT( 1, silk_RSHIFT( inLog_Q7, 7 ) );
     frac_Q7 = inLog_Q7 & 0x7F;
     if( inLog_Q7 < 2048 ) {
         /* Piece-wise parabolic approximation */
-        out = SKP_ADD_RSHIFT( out, SKP_MUL( out, SKP_SMLAWB( frac_Q7, SKP_MUL( frac_Q7, 128 - frac_Q7 ), -174 ) ), 7 );
+        out = silk_ADD_RSHIFT( out, silk_MUL( out, silk_SMLAWB( frac_Q7, silk_MUL( frac_Q7, 128 - frac_Q7 ), -174 ) ), 7 );
     } else {
         /* Piece-wise parabolic approximation */
-        out = SKP_MLA( out, SKP_RSHIFT( out, 7 ), SKP_SMLAWB( frac_Q7, SKP_MUL( frac_Q7, 128 - frac_Q7 ), -174 ) );
+        out = silk_MLA( out, silk_RSHIFT( out, 7 ), silk_SMLAWB( frac_Q7, silk_MUL( frac_Q7, 128 - frac_Q7 ), -174 ) );
     }
     return out;
 }
diff --git a/silk/silk_macros.h b/silk/silk_macros.h
index a507de1..9135dde 100644
--- a/silk/silk_macros.h
+++ b/silk/silk_macros.h
@@ -31,46 +31,46 @@
 /* This is an inline header file for general platform. */
 
 /* (a32 * (opus_int32)((opus_int16)(b32))) >> 16 output have to be 32bit int */
-#define SKP_SMULWB(a32, b32)            ((((a32) >> 16) * (opus_int32)((opus_int16)(b32))) + ((((a32) & 0x0000FFFF) * (opus_int32)((opus_int16)(b32))) >> 16))
+#define silk_SMULWB(a32, b32)            ((((a32) >> 16) * (opus_int32)((opus_int16)(b32))) + ((((a32) & 0x0000FFFF) * (opus_int32)((opus_int16)(b32))) >> 16))
 
 /* a32 + (b32 * (opus_int32)((opus_int16)(c32))) >> 16 output have to be 32bit int */
-#define SKP_SMLAWB(a32, b32, c32)       ((a32) + ((((b32) >> 16) * (opus_int32)((opus_int16)(c32))) + ((((b32) & 0x0000FFFF) * (opus_int32)((opus_int16)(c32))) >> 16)))
+#define silk_SMLAWB(a32, b32, c32)       ((a32) + ((((b32) >> 16) * (opus_int32)((opus_int16)(c32))) + ((((b32) & 0x0000FFFF) * (opus_int32)((opus_int16)(c32))) >> 16)))
 
 /* (a32 * (b32 >> 16)) >> 16 */
-#define SKP_SMULWT(a32, b32)            (((a32) >> 16) * ((b32) >> 16) + ((((a32) & 0x0000FFFF) * ((b32) >> 16)) >> 16))
+#define silk_SMULWT(a32, b32)            (((a32) >> 16) * ((b32) >> 16) + ((((a32) & 0x0000FFFF) * ((b32) >> 16)) >> 16))
 
 /* a32 + (b32 * (c32 >> 16)) >> 16 */
-#define SKP_SMLAWT(a32, b32, c32)       ((a32) + (((b32) >> 16) * ((c32) >> 16)) + ((((b32) & 0x0000FFFF) * ((c32) >> 16)) >> 16))
+#define silk_SMLAWT(a32, b32, c32)       ((a32) + (((b32) >> 16) * ((c32) >> 16)) + ((((b32) & 0x0000FFFF) * ((c32) >> 16)) >> 16))
 
 /* (opus_int32)((opus_int16)(a3))) * (opus_int32)((opus_int16)(b32)) output have to be 32bit int */
-#define SKP_SMULBB(a32, b32)            ((opus_int32)((opus_int16)(a32)) * (opus_int32)((opus_int16)(b32)))
+#define silk_SMULBB(a32, b32)            ((opus_int32)((opus_int16)(a32)) * (opus_int32)((opus_int16)(b32)))
 
 /* a32 + (opus_int32)((opus_int16)(b32)) * (opus_int32)((opus_int16)(c32)) output have to be 32bit int */
-#define SKP_SMLABB(a32, b32, c32)       ((a32) + ((opus_int32)((opus_int16)(b32))) * (opus_int32)((opus_int16)(c32)))
+#define silk_SMLABB(a32, b32, c32)       ((a32) + ((opus_int32)((opus_int16)(b32))) * (opus_int32)((opus_int16)(c32)))
 
 /* (opus_int32)((opus_int16)(a32)) * (b32 >> 16) */
-#define SKP_SMULBT(a32, b32)            ((opus_int32)((opus_int16)(a32)) * ((b32) >> 16))
+#define silk_SMULBT(a32, b32)            ((opus_int32)((opus_int16)(a32)) * ((b32) >> 16))
 
 /* a32 + (opus_int32)((opus_int16)(b32)) * (c32 >> 16) */
-#define SKP_SMLABT(a32, b32, c32)       ((a32) + ((opus_int32)((opus_int16)(b32))) * ((c32) >> 16))
+#define silk_SMLABT(a32, b32, c32)       ((a32) + ((opus_int32)((opus_int16)(b32))) * ((c32) >> 16))
 
 /* a64 + (b32 * c32) */
-#define SKP_SMLAL(a64, b32, c32)        (SKP_ADD64((a64), ((opus_int64)(b32) * (opus_int64)(c32))))
+#define silk_SMLAL(a64, b32, c32)        (silk_ADD64((a64), ((opus_int64)(b32) * (opus_int64)(c32))))
 
 /* (a32 * b32) >> 16 */
-#define SKP_SMULWW(a32, b32)            SKP_MLA(SKP_SMULWB((a32), (b32)), (a32), SKP_RSHIFT_ROUND((b32), 16))
+#define silk_SMULWW(a32, b32)            silk_MLA(silk_SMULWB((a32), (b32)), (a32), silk_RSHIFT_ROUND((b32), 16))
 
 /* a32 + ((b32 * c32) >> 16) */
-#define SKP_SMLAWW(a32, b32, c32)       SKP_MLA(SKP_SMLAWB((a32), (b32), (c32)), (b32), SKP_RSHIFT_ROUND((c32), 16))
+#define silk_SMLAWW(a32, b32, c32)       silk_MLA(silk_SMLAWB((a32), (b32), (c32)), (b32), silk_RSHIFT_ROUND((c32), 16))
 
 /* add/subtract with output saturated */
-#define SKP_ADD_SAT32(a, b)             ((((a) + (b)) & 0x80000000) == 0 ?                              \
-                                        ((((a) & (b)) & 0x80000000) != 0 ? SKP_int32_MIN : (a)+(b)) :   \
-                                        ((((a) | (b)) & 0x80000000) == 0 ? SKP_int32_MAX : (a)+(b)) )
+#define silk_ADD_SAT32(a, b)             ((((a) + (b)) & 0x80000000) == 0 ?                              \
+                                        ((((a) & (b)) & 0x80000000) != 0 ? silk_int32_MIN : (a)+(b)) :   \
+                                        ((((a) | (b)) & 0x80000000) == 0 ? silk_int32_MAX : (a)+(b)) )
 
-#define SKP_SUB_SAT32(a, b)             ((((a)-(b)) & 0x80000000) == 0 ?                                        \
-                                        (( (a) & ((b)^0x80000000) & 0x80000000) ? SKP_int32_MIN : (a)-(b)) :    \
-                                        ((((a)^0x80000000) & (b)  & 0x80000000) ? SKP_int32_MAX : (a)-(b)) )
+#define silk_SUB_SAT32(a, b)             ((((a)-(b)) & 0x80000000) == 0 ?                                        \
+                                        (( (a) & ((b)^0x80000000) & 0x80000000) ? silk_int32_MIN : (a)-(b)) :    \
+                                        ((((a)^0x80000000) & (b)  & 0x80000000) ? silk_int32_MAX : (a)-(b)) )
 
 static inline opus_int32 silk_CLZ16(opus_int16 in16)
 {
diff --git a/silk/silk_pitch_analysis_core.c b/silk/silk_pitch_analysis_core.c
index 28d9523..bd6dba1 100644
--- a/silk/silk_pitch_analysis_core.c
+++ b/silk/silk_pitch_analysis_core.c
@@ -107,14 +107,14 @@
     opus_int32 delta_lag_log2_sqr_Q7, lag_log2_Q7, prevLag_log2_Q7, prev_lag_bias_Q15, corr_thres_Q15;
     const opus_int8 *Lag_CB_ptr;
     /* Check for valid sampling frequency */
-    SKP_assert( Fs_kHz == 8 || Fs_kHz == 12 || Fs_kHz == 16 );
+    silk_assert( Fs_kHz == 8 || Fs_kHz == 12 || Fs_kHz == 16 );
 
     /* Check for valid complexity setting */
-    SKP_assert( complexity >= SILK_PE_MIN_COMPLEX );
-    SKP_assert( complexity <= SILK_PE_MAX_COMPLEX );
+    silk_assert( complexity >= SILK_PE_MIN_COMPLEX );
+    silk_assert( complexity <= SILK_PE_MAX_COMPLEX );
 
-    SKP_assert( search_thres1_Q16 >= 0 && search_thres1_Q16 <= (1<<16) );
-    SKP_assert( search_thres2_Q15 >= 0 && search_thres2_Q15 <= (1<<15) );
+    silk_assert( search_thres1_Q16 >= 0 && search_thres1_Q16 <= (1<<16) );
+    silk_assert( search_thres2_Q15 >= 0 && search_thres2_Q15 <= (1<<15) );
 
     /* Setup frame lengths max / min lag for the sampling frequency */
     frame_length      = ( PE_LTP_MEM_LENGTH_MS + nb_subfr * PE_SUBFR_LENGTH_MS ) * Fs_kHz;
@@ -130,27 +130,27 @@
     max_lag_4kHz      = PE_MAX_LAG_MS * 4;
     max_lag_8kHz      = PE_MAX_LAG_MS * 8 - 1;
 
-    SKP_memset( C, 0, sizeof( opus_int16 ) * nb_subfr * ( ( PE_MAX_LAG >> 1 ) + 5) );
+    silk_memset( C, 0, sizeof( opus_int16 ) * nb_subfr * ( ( PE_MAX_LAG >> 1 ) + 5) );
 
     /* Resample from input sampled at Fs_kHz to 8 kHz */
     if( Fs_kHz == 16 ) {
-        SKP_memset( filt_state, 0, 2 * sizeof( opus_int32 ) );
+        silk_memset( filt_state, 0, 2 * sizeof( opus_int32 ) );
         silk_resampler_down2( filt_state, frame_8kHz, frame, frame_length );
     } else if ( Fs_kHz == 12 ) {
-        SKP_memset( filt_state, 0, 6 * sizeof( opus_int32 ) );
+        silk_memset( filt_state, 0, 6 * sizeof( opus_int32 ) );
         silk_resampler_down2_3( filt_state, frame_8kHz, frame, frame_length );
     } else {
-        SKP_assert( Fs_kHz == 8 );
-        SKP_memcpy( frame_8kHz, frame, frame_length_8kHz * sizeof(opus_int16) );
+        silk_assert( Fs_kHz == 8 );
+        silk_memcpy( frame_8kHz, frame, frame_length_8kHz * sizeof(opus_int16) );
     }
 
     /* Decimate again to 4 kHz */
-    SKP_memset( filt_state, 0, 2 * sizeof( opus_int32 ) );/* Set state to zero */
+    silk_memset( filt_state, 0, 2 * sizeof( opus_int32 ) );/* Set state to zero */
     silk_resampler_down2( filt_state, frame_4kHz, frame_8kHz, frame_length_8kHz );
 
     /* Low-pass filter */
     for( i = frame_length_4kHz - 1; i > 0; i-- ) {
-        frame_4kHz[ i ] = SKP_ADD_SAT16( frame_4kHz[ i ], frame_4kHz[ i - 1 ] );
+        frame_4kHz[ i ] = silk_ADD_SAT16( frame_4kHz[ i ], frame_4kHz[ i - 1 ] );
     }
 
     /*******************************************************************************
@@ -159,56 +159,56 @@
     *******************************************************************************/
 
     /* Inner product is calculated with different lengths, so scale for the worst case */
-    max_sum_sq_length = SKP_max_32( sf_length_8kHz, SKP_LSHIFT( sf_length_4kHz, 2 ) );
+    max_sum_sq_length = silk_max_32( sf_length_8kHz, silk_LSHIFT( sf_length_4kHz, 2 ) );
     shift = silk_P_Ana_find_scaling( frame_4kHz, frame_length_4kHz, max_sum_sq_length );
     if( shift > 0 ) {
         for( i = 0; i < frame_length_4kHz; i++ ) {
-            frame_4kHz[ i ] = SKP_RSHIFT( frame_4kHz[ i ], shift );
+            frame_4kHz[ i ] = silk_RSHIFT( frame_4kHz[ i ], shift );
         }
     }
 
     /******************************************************************************
     * FIRST STAGE, operating in 4 khz
     ******************************************************************************/
-    target_ptr = &frame_4kHz[ SKP_LSHIFT( sf_length_4kHz, 2 ) ];
+    target_ptr = &frame_4kHz[ silk_LSHIFT( sf_length_4kHz, 2 ) ];
     for( k = 0; k < nb_subfr >> 1; k++ ) {
         /* Check that we are within range of the array */
-        SKP_assert( target_ptr >= frame_4kHz );
-        SKP_assert( target_ptr + sf_length_8kHz <= frame_4kHz + frame_length_4kHz );
+        silk_assert( target_ptr >= frame_4kHz );
+        silk_assert( target_ptr + sf_length_8kHz <= frame_4kHz + frame_length_4kHz );
 
         basis_ptr = target_ptr - min_lag_4kHz;
 
         /* Check that we are within range of the array */
-        SKP_assert( basis_ptr >= frame_4kHz );
-        SKP_assert( basis_ptr + sf_length_8kHz <= frame_4kHz + frame_length_4kHz );
+        silk_assert( basis_ptr >= frame_4kHz );
+        silk_assert( basis_ptr + sf_length_8kHz <= frame_4kHz + frame_length_4kHz );
 
         normalizer = 0;
         cross_corr = 0;
         /* Calculate first vector products before loop */
         cross_corr = silk_inner_prod_aligned( target_ptr, basis_ptr, sf_length_8kHz );
         normalizer = silk_inner_prod_aligned( basis_ptr,  basis_ptr, sf_length_8kHz );
-        normalizer = SKP_ADD_SAT32( normalizer, SKP_SMULBB( sf_length_8kHz, 4000 ) );
+        normalizer = silk_ADD_SAT32( normalizer, silk_SMULBB( sf_length_8kHz, 4000 ) );
 
-        temp32 = SKP_DIV32( cross_corr, silk_SQRT_APPROX( normalizer ) + 1 );
-        C[ k ][ min_lag_4kHz ] = (opus_int16)SKP_SAT16( temp32 );        /* Q0 */
+        temp32 = silk_DIV32( cross_corr, silk_SQRT_APPROX( normalizer ) + 1 );
+        C[ k ][ min_lag_4kHz ] = (opus_int16)silk_SAT16( temp32 );        /* Q0 */
 
         /* From now on normalizer is computed recursively */
         for( d = min_lag_4kHz + 1; d <= max_lag_4kHz; d++ ) {
             basis_ptr--;
 
             /* Check that we are within range of the array */
-            SKP_assert( basis_ptr >= frame_4kHz );
-            SKP_assert( basis_ptr + sf_length_8kHz <= frame_4kHz + frame_length_4kHz );
+            silk_assert( basis_ptr >= frame_4kHz );
+            silk_assert( basis_ptr + sf_length_8kHz <= frame_4kHz + frame_length_4kHz );
 
             cross_corr = silk_inner_prod_aligned( target_ptr, basis_ptr, sf_length_8kHz );
 
             /* Add contribution of new sample and remove contribution from oldest sample */
             normalizer +=
-                SKP_SMULBB( basis_ptr[ 0 ], basis_ptr[ 0 ] ) -
-                SKP_SMULBB( basis_ptr[ sf_length_8kHz ], basis_ptr[ sf_length_8kHz ] );
+                silk_SMULBB( basis_ptr[ 0 ], basis_ptr[ 0 ] ) -
+                silk_SMULBB( basis_ptr[ sf_length_8kHz ], basis_ptr[ sf_length_8kHz ] );
 
-            temp32 = SKP_DIV32( cross_corr, silk_SQRT_APPROX( normalizer ) + 1 );
-            C[ k ][ d ] = (opus_int16)SKP_SAT16( temp32 );                        /* Q0 */
+            temp32 = silk_DIV32( cross_corr, silk_SQRT_APPROX( normalizer ) + 1 );
+            C[ k ][ d ] = (opus_int16)silk_SAT16( temp32 );                        /* Q0 */
         }
         /* Update target pointer */
         target_ptr += sf_length_8kHz;
@@ -218,54 +218,54 @@
     if( nb_subfr == PE_MAX_NB_SUBFR ) {
         for( i = max_lag_4kHz; i >= min_lag_4kHz; i-- ) {
             sum = (opus_int32)C[ 0 ][ i ] + (opus_int32)C[ 1 ][ i ];                /* Q0 */
-            SKP_assert( SKP_RSHIFT( sum, 1 ) == SKP_SAT16( SKP_RSHIFT( sum, 1 ) ) );
-            sum = SKP_RSHIFT( sum, 1 );                                           /* Q-1 */
-            SKP_assert( SKP_LSHIFT( (opus_int32)-i, 4 ) == SKP_SAT16( SKP_LSHIFT( (opus_int32)-i, 4 ) ) );
-            sum = SKP_SMLAWB( sum, sum, SKP_LSHIFT( -i, 4 ) );                    /* Q-1 */
-            SKP_assert( sum == SKP_SAT16( sum ) );
+            silk_assert( silk_RSHIFT( sum, 1 ) == silk_SAT16( silk_RSHIFT( sum, 1 ) ) );
+            sum = silk_RSHIFT( sum, 1 );                                           /* Q-1 */
+            silk_assert( silk_LSHIFT( (opus_int32)-i, 4 ) == silk_SAT16( silk_LSHIFT( (opus_int32)-i, 4 ) ) );
+            sum = silk_SMLAWB( sum, sum, silk_LSHIFT( -i, 4 ) );                    /* Q-1 */
+            silk_assert( sum == silk_SAT16( sum ) );
             C[ 0 ][ i ] = (opus_int16)sum;                                         /* Q-1 */
         }
     } else {
         /* Only short-lag bias */
         for( i = max_lag_4kHz; i >= min_lag_4kHz; i-- ) {
             sum = (opus_int32)C[ 0 ][ i ];
-            sum = SKP_SMLAWB( sum, sum, SKP_LSHIFT( -i, 4 ) );                    /* Q-1 */
+            sum = silk_SMLAWB( sum, sum, silk_LSHIFT( -i, 4 ) );                    /* Q-1 */
             C[ 0 ][ i ] = (opus_int16)sum;                                         /* Q-1 */
         }
     }
 
     /* Sort */
-    length_d_srch = SKP_ADD_LSHIFT32( 4, complexity, 1 );
-    SKP_assert( 3 * length_d_srch <= PE_D_SRCH_LENGTH );
+    length_d_srch = silk_ADD_LSHIFT32( 4, complexity, 1 );
+    silk_assert( 3 * length_d_srch <= PE_D_SRCH_LENGTH );
     silk_insertion_sort_decreasing_int16( &C[ 0 ][ min_lag_4kHz ], d_srch, max_lag_4kHz - min_lag_4kHz + 1, length_d_srch );
 
     /* Escape if correlation is very low already here */
-    target_ptr = &frame_4kHz[ SKP_SMULBB( sf_length_4kHz, nb_subfr ) ];
-    energy = silk_inner_prod_aligned( target_ptr, target_ptr, SKP_LSHIFT( sf_length_4kHz, 2 ) );
-    energy = SKP_ADD_SAT32( energy, 1000 );                                  /* Q0 */
+    target_ptr = &frame_4kHz[ silk_SMULBB( sf_length_4kHz, nb_subfr ) ];
+    energy = silk_inner_prod_aligned( target_ptr, target_ptr, silk_LSHIFT( sf_length_4kHz, 2 ) );
+    energy = silk_ADD_SAT32( energy, 1000 );                                  /* Q0 */
     Cmax = (opus_int)C[ 0 ][ min_lag_4kHz ];                                  /* Q-1 */
-    threshold = SKP_SMULBB( Cmax, Cmax );                                    /* Q-2 */
+    threshold = silk_SMULBB( Cmax, Cmax );                                    /* Q-2 */
 
     /* Compare in Q-2 domain */
-    if( SKP_RSHIFT( energy, 4 + 2 ) > threshold ) {
-        SKP_memset( pitch_out, 0, nb_subfr * sizeof( opus_int ) );
+    if( silk_RSHIFT( energy, 4 + 2 ) > threshold ) {
+        silk_memset( pitch_out, 0, nb_subfr * sizeof( opus_int ) );
         *LTPCorr_Q15  = 0;
         *lagIndex     = 0;
         *contourIndex = 0;
         return 1;
     }
 
-    threshold = SKP_SMULWB( search_thres1_Q16, Cmax );
+    threshold = silk_SMULWB( search_thres1_Q16, Cmax );
     for( i = 0; i < length_d_srch; i++ ) {
         /* Convert to 8 kHz indices for the sorted correlation that exceeds the threshold */
         if( C[ 0 ][ min_lag_4kHz + i ] > threshold ) {
-            d_srch[ i ] = SKP_LSHIFT( d_srch[ i ] + min_lag_4kHz, 1 );
+            d_srch[ i ] = silk_LSHIFT( d_srch[ i ] + min_lag_4kHz, 1 );
         } else {
             length_d_srch = i;
             break;
         }
     }
-    SKP_assert( length_d_srch > 0 );
+    silk_assert( length_d_srch > 0 );
 
     for( i = min_lag_8kHz - 5; i < max_lag_8kHz + 5; i++ ) {
         d_comp[ i ] = 0;
@@ -311,21 +311,21 @@
     shift = silk_P_Ana_find_scaling( frame_8kHz, frame_length_8kHz, sf_length_8kHz );
     if( shift > 0 ) {
         for( i = 0; i < frame_length_8kHz; i++ ) {
-            frame_8kHz[ i ] = SKP_RSHIFT( frame_8kHz[ i ], shift );
+            frame_8kHz[ i ] = silk_RSHIFT( frame_8kHz[ i ], shift );
         }
     }
 
     /*********************************************************************************
     * Find energy of each subframe projected onto its history, for a range of delays
     *********************************************************************************/
-    SKP_memset( C, 0, PE_MAX_NB_SUBFR * ( ( PE_MAX_LAG >> 1 ) + 5 ) * sizeof( opus_int16 ) );
+    silk_memset( C, 0, PE_MAX_NB_SUBFR * ( ( PE_MAX_LAG >> 1 ) + 5 ) * sizeof( opus_int16 ) );
 
     target_ptr = &frame_8kHz[ PE_LTP_MEM_LENGTH_MS * 8 ];
     for( k = 0; k < nb_subfr; k++ ) {
 
         /* Check that we are within range of the array */
-        SKP_assert( target_ptr >= frame_8kHz );
-        SKP_assert( target_ptr + sf_length_8kHz <= frame_8kHz + frame_length_8kHz );
+        silk_assert( target_ptr >= frame_8kHz );
+        silk_assert( target_ptr + sf_length_8kHz <= frame_8kHz + frame_length_8kHz );
 
         energy_target = silk_inner_prod_aligned( target_ptr, target_ptr, sf_length_8kHz );
         /* ToDo: Calculate 1 / energy_target here and save one division inside next for loop*/
@@ -334,23 +334,23 @@
             basis_ptr = target_ptr - d;
 
             /* Check that we are within range of the array */
-            SKP_assert( basis_ptr >= frame_8kHz );
-            SKP_assert( basis_ptr + sf_length_8kHz <= frame_8kHz + frame_length_8kHz );
+            silk_assert( basis_ptr >= frame_8kHz );
+            silk_assert( basis_ptr + sf_length_8kHz <= frame_8kHz + frame_length_8kHz );
 
             cross_corr   = silk_inner_prod_aligned( target_ptr, basis_ptr, sf_length_8kHz );
             energy_basis = silk_inner_prod_aligned( basis_ptr,  basis_ptr, sf_length_8kHz );
             if( cross_corr > 0 ) {
-                energy = SKP_max( energy_target, energy_basis ); /* Find max to make sure first division < 1.0 */
+                energy = silk_max( energy_target, energy_basis ); /* Find max to make sure first division < 1.0 */
                 lz = silk_CLZ32( cross_corr );
-                lshift = SKP_LIMIT_32( lz - 1, 0, 15 );
-                temp32 = SKP_DIV32( SKP_LSHIFT( cross_corr, lshift ), SKP_RSHIFT( energy, 15 - lshift ) + 1 ); /* Q15 */
-                SKP_assert( temp32 == SKP_SAT16( temp32 ) );
-                temp32 = SKP_SMULWB( cross_corr, temp32 ); /* Q(-1), cc * ( cc / max(b, t) ) */
-                temp32 = SKP_ADD_SAT32( temp32, temp32 );  /* Q(0) */
+                lshift = silk_LIMIT_32( lz - 1, 0, 15 );
+                temp32 = silk_DIV32( silk_LSHIFT( cross_corr, lshift ), silk_RSHIFT( energy, 15 - lshift ) + 1 ); /* Q15 */
+                silk_assert( temp32 == silk_SAT16( temp32 ) );
+                temp32 = silk_SMULWB( cross_corr, temp32 ); /* Q(-1), cc * ( cc / max(b, t) ) */
+                temp32 = silk_ADD_SAT32( temp32, temp32 );  /* Q(0) */
                 lz = silk_CLZ32( temp32 );
-                lshift = SKP_LIMIT_32( lz - 1, 0, 15 );
-                energy = SKP_min( energy_target, energy_basis );
-                C[ k ][ d ] = SKP_DIV32( SKP_LSHIFT( temp32, lshift ), SKP_RSHIFT( energy, 15 - lshift ) + 1 ); /* Q15*/
+                lshift = silk_LIMIT_32( lz - 1, 0, 15 );
+                energy = silk_min( energy_target, energy_basis );
+                C[ k ][ d ] = silk_DIV32( silk_LSHIFT( temp32, lshift ), silk_RSHIFT( energy, 15 - lshift ) + 1 ); /* Q15*/
             } else {
                 C[ k ][ d ] = 0;
             }
@@ -361,23 +361,23 @@
     /* search over lag range and lags codebook */
     /* scale factor for lag codebook, as a function of center lag */
 
-    CCmax   = SKP_int32_MIN;
-    CCmax_b = SKP_int32_MIN;
+    CCmax   = silk_int32_MIN;
+    CCmax_b = silk_int32_MIN;
 
     CBimax = 0; /* To avoid returning undefined lag values */
     lag = -1;   /* To check if lag with strong enough correlation has been found */
 
     if( prevLag > 0 ) {
         if( Fs_kHz == 12 ) {
-            prevLag = SKP_DIV32_16( SKP_LSHIFT( prevLag, 1 ), 3 );
+            prevLag = silk_DIV32_16( silk_LSHIFT( prevLag, 1 ), 3 );
         } else if( Fs_kHz == 16 ) {
-            prevLag = SKP_RSHIFT( prevLag, 1 );
+            prevLag = silk_RSHIFT( prevLag, 1 );
         }
         prevLag_log2_Q7 = silk_lin2log( (opus_int32)prevLag );
     } else {
         prevLag_log2_Q7 = 0;
     }
-    SKP_assert( search_thres2_Q15 == SKP_SAT16( search_thres2_Q15 ) );
+    silk_assert( search_thres2_Q15 == silk_SAT16( search_thres2_Q15 ) );
     /* Setup stage 2 codebook based on number of subframes */
     if( nb_subfr == PE_MAX_NB_SUBFR ) {
         cbk_size   = PE_NB_CBKS_STAGE2_EXT;
@@ -388,12 +388,12 @@
         } else {
             nb_cbk_search = PE_NB_CBKS_STAGE2;
         }
-        corr_thres_Q15 = SKP_RSHIFT( SKP_SMULBB( search_thres2_Q15, search_thres2_Q15 ), 13 );
+        corr_thres_Q15 = silk_RSHIFT( silk_SMULBB( search_thres2_Q15, search_thres2_Q15 ), 13 );
     } else {
         cbk_size       = PE_NB_CBKS_STAGE2_10MS;
         Lag_CB_ptr     = &silk_CB_lags_stage2_10_ms[ 0 ][ 0 ];
         nb_cbk_search  = PE_NB_CBKS_STAGE2_10MS;
-        corr_thres_Q15 = SKP_RSHIFT( SKP_SMULBB( search_thres2_Q15, search_thres2_Q15 ), 14 );
+        corr_thres_Q15 = silk_RSHIFT( silk_SMULBB( search_thres2_Q15, search_thres2_Q15 ), 14 );
     }
 
     for( k = 0; k < length_d_srch; k++ ) {
@@ -406,7 +406,7 @@
             }
         }
         /* Find best codebook */
-        CCmax_new = SKP_int32_MIN;
+        CCmax_new = silk_int32_MIN;
         CBimax_new = 0;
         for( i = 0; i < nb_cbk_search; i++ ) {
             if( CC[ i ] > CCmax_new ) {
@@ -417,18 +417,18 @@
 
         /* Bias towards shorter lags */
         lag_log2_Q7 = silk_lin2log( (opus_int32)d ); /* Q7 */
-        SKP_assert( lag_log2_Q7 == SKP_SAT16( lag_log2_Q7 ) );
-        SKP_assert( nb_subfr * SILK_FIX_CONST( PE_SHORTLAG_BIAS, 15 ) == SKP_SAT16( nb_subfr * SILK_FIX_CONST( PE_SHORTLAG_BIAS, 15 ) ) );
-        CCmax_new_b = CCmax_new - SKP_RSHIFT( SKP_SMULBB( nb_subfr * SILK_FIX_CONST( PE_SHORTLAG_BIAS, 15 ), lag_log2_Q7 ), 7 ); /* Q15 */
+        silk_assert( lag_log2_Q7 == silk_SAT16( lag_log2_Q7 ) );
+        silk_assert( nb_subfr * SILK_FIX_CONST( PE_SHORTLAG_BIAS, 15 ) == silk_SAT16( nb_subfr * SILK_FIX_CONST( PE_SHORTLAG_BIAS, 15 ) ) );
+        CCmax_new_b = CCmax_new - silk_RSHIFT( silk_SMULBB( nb_subfr * SILK_FIX_CONST( PE_SHORTLAG_BIAS, 15 ), lag_log2_Q7 ), 7 ); /* Q15 */
 
         /* Bias towards previous lag */
-        SKP_assert( nb_subfr * SILK_FIX_CONST( PE_PREVLAG_BIAS, 15 ) == SKP_SAT16( nb_subfr * SILK_FIX_CONST( PE_PREVLAG_BIAS, 15 ) ) );
+        silk_assert( nb_subfr * SILK_FIX_CONST( PE_PREVLAG_BIAS, 15 ) == silk_SAT16( nb_subfr * SILK_FIX_CONST( PE_PREVLAG_BIAS, 15 ) ) );
         if( prevLag > 0 ) {
             delta_lag_log2_sqr_Q7 = lag_log2_Q7 - prevLag_log2_Q7;
-            SKP_assert( delta_lag_log2_sqr_Q7 == SKP_SAT16( delta_lag_log2_sqr_Q7 ) );
-            delta_lag_log2_sqr_Q7 = SKP_RSHIFT( SKP_SMULBB( delta_lag_log2_sqr_Q7, delta_lag_log2_sqr_Q7 ), 7 );
-            prev_lag_bias_Q15 = SKP_RSHIFT( SKP_SMULBB( nb_subfr * SILK_FIX_CONST( PE_PREVLAG_BIAS, 15 ), *LTPCorr_Q15 ), 15 ); /* Q15 */
-            prev_lag_bias_Q15 = SKP_DIV32( SKP_MUL( prev_lag_bias_Q15, delta_lag_log2_sqr_Q7 ), delta_lag_log2_sqr_Q7 + ( 1 << 6 ) );
+            silk_assert( delta_lag_log2_sqr_Q7 == silk_SAT16( delta_lag_log2_sqr_Q7 ) );
+            delta_lag_log2_sqr_Q7 = silk_RSHIFT( silk_SMULBB( delta_lag_log2_sqr_Q7, delta_lag_log2_sqr_Q7 ), 7 );
+            prev_lag_bias_Q15 = silk_RSHIFT( silk_SMULBB( nb_subfr * SILK_FIX_CONST( PE_PREVLAG_BIAS, 15 ), *LTPCorr_Q15 ), 15 ); /* Q15 */
+            prev_lag_bias_Q15 = silk_DIV32( silk_MUL( prev_lag_bias_Q15, delta_lag_log2_sqr_Q7 ), delta_lag_log2_sqr_Q7 + ( 1 << 6 ) );
             CCmax_new_b -= prev_lag_bias_Q15; /* Q15 */
         }
 
@@ -445,7 +445,7 @@
 
     if( lag == -1 ) {
         /* No suitable candidate found */
-        SKP_memset( pitch_out, 0, nb_subfr * sizeof( opus_int ) );
+        silk_memset( pitch_out, 0, nb_subfr * sizeof( opus_int ) );
         *LTPCorr_Q15  = 0;
         *lagIndex     = 0;
         *contourIndex = 0;
@@ -464,7 +464,7 @@
             /* Reuse the 32 bit scratch mem vector, use a 16 bit pointer from now */
             input_frame_ptr = (opus_int16*)scratch_mem;
             for( i = 0; i < frame_length; i++ ) {
-                input_frame_ptr[ i ] = SKP_RSHIFT( frame[ i ], shift );
+                input_frame_ptr[ i ] = silk_RSHIFT( frame[ i ], shift );
             }
         } else {
             input_frame_ptr = (opus_int16*)frame;
@@ -475,24 +475,24 @@
 
         CBimax_old = CBimax;
         /* Compensate for decimation */
-        SKP_assert( lag == SKP_SAT16( lag ) );
+        silk_assert( lag == silk_SAT16( lag ) );
         if( Fs_kHz == 12 ) {
-            lag = SKP_RSHIFT( SKP_SMULBB( lag, 3 ), 1 );
+            lag = silk_RSHIFT( silk_SMULBB( lag, 3 ), 1 );
         } else if( Fs_kHz == 16 ) {
-            lag = SKP_LSHIFT( lag, 1 );
+            lag = silk_LSHIFT( lag, 1 );
         } else {
-            lag = SKP_SMULBB( lag, 3 );
+            lag = silk_SMULBB( lag, 3 );
         }
 
-        lag = SKP_LIMIT_int( lag, min_lag, max_lag );
-        start_lag = SKP_max_int( lag - 2, min_lag );
-        end_lag   = SKP_min_int( lag + 2, max_lag );
+        lag = silk_LIMIT_int( lag, min_lag, max_lag );
+        start_lag = silk_max_int( lag - 2, min_lag );
+        end_lag   = silk_min_int( lag + 2, max_lag );
         lag_new   = lag;                                    /* to avoid undefined lag */
         CBimax    = 0;                                        /* to avoid undefined lag */
-        SKP_assert( SKP_LSHIFT( CCmax, 13 ) >= 0 );
-        *LTPCorr_Q15 = (opus_int)silk_SQRT_APPROX( SKP_LSHIFT( CCmax, 13 ) ); /* Output normalized correlation */
+        silk_assert( silk_LSHIFT( CCmax, 13 ) >= 0 );
+        *LTPCorr_Q15 = (opus_int)silk_SQRT_APPROX( silk_LSHIFT( CCmax, 13 ) ); /* Output normalized correlation */
 
-        CCmax = SKP_int32_MIN;
+        CCmax = silk_int32_MIN;
         /* pitch lags according to second stage */
         for( k = 0; k < nb_subfr; k++ ) {
             pitch_out[ k ] = lag + 2 * silk_CB_lags_stage2[ k ][ CBimax_old ];
@@ -502,8 +502,8 @@
         silk_P_Ana_calc_energy_st3( energies_st3, input_frame_ptr, start_lag, sf_length, nb_subfr, complexity );
 
         lag_counter = 0;
-        SKP_assert( lag == SKP_SAT16( lag ) );
-        contour_bias_Q20 = SKP_DIV32_16( SILK_FIX_CONST( PE_FLATCONTOUR_BIAS, 20 ), lag );
+        silk_assert( lag == silk_SAT16( lag ) );
+        contour_bias_Q20 = silk_DIV32_16( SILK_FIX_CONST( PE_FLATCONTOUR_BIAS, 20 ), lag );
 
         /* Setup cbk parameters acording to complexity setting and frame length */
         if( nb_subfr == PE_MAX_NB_SUBFR ) {
@@ -520,29 +520,29 @@
                 cross_corr = 0;
                 energy     = 0;
                 for( k = 0; k < nb_subfr; k++ ) {
-                    SKP_assert( PE_MAX_NB_SUBFR == 4 );
-                    energy     += SKP_RSHIFT( energies_st3[  k ][ j ][ lag_counter ], 2 ); /* use mean, to avoid overflow */
-                    SKP_assert( energy >= 0 );
-                    cross_corr += SKP_RSHIFT( crosscorr_st3[ k ][ j ][ lag_counter ], 2 ); /* use mean, to avoid overflow */
+                    silk_assert( PE_MAX_NB_SUBFR == 4 );
+                    energy     += silk_RSHIFT( energies_st3[  k ][ j ][ lag_counter ], 2 ); /* use mean, to avoid overflow */
+                    silk_assert( energy >= 0 );
+                    cross_corr += silk_RSHIFT( crosscorr_st3[ k ][ j ][ lag_counter ], 2 ); /* use mean, to avoid overflow */
                 }
                 if( cross_corr > 0 ) {
                     /* Divide cross_corr / energy and get result in Q15 */
                     lz = silk_CLZ32( cross_corr );
                     /* Divide with result in Q13, cross_corr could be larger than energy */
-                    lshift = SKP_LIMIT_32( lz - 1, 0, 13 );
-                    CCmax_new = SKP_DIV32( SKP_LSHIFT( cross_corr, lshift ), SKP_RSHIFT( energy, 13 - lshift ) + 1 );
-                    CCmax_new = SKP_SAT16( CCmax_new );
-                    CCmax_new = SKP_SMULWB( cross_corr, CCmax_new );
+                    lshift = silk_LIMIT_32( lz - 1, 0, 13 );
+                    CCmax_new = silk_DIV32( silk_LSHIFT( cross_corr, lshift ), silk_RSHIFT( energy, 13 - lshift ) + 1 );
+                    CCmax_new = silk_SAT16( CCmax_new );
+                    CCmax_new = silk_SMULWB( cross_corr, CCmax_new );
                     /* Saturate */
-                    if( CCmax_new > SKP_RSHIFT( SKP_int32_MAX, 3 ) ) {
-                        CCmax_new = SKP_int32_MAX;
+                    if( CCmax_new > silk_RSHIFT( silk_int32_MAX, 3 ) ) {
+                        CCmax_new = silk_int32_MAX;
                     } else {
-                        CCmax_new = SKP_LSHIFT( CCmax_new, 3 );
+                        CCmax_new = silk_LSHIFT( CCmax_new, 3 );
                     }
                     /* Reduce depending on flatness of contour */
-                    diff = SKP_int16_MAX - SKP_RSHIFT( SKP_MUL( contour_bias_Q20, j ), 5 ); /* Q20 -> Q15 */
-                    SKP_assert( diff == SKP_SAT16( diff ) );
-                    CCmax_new = SKP_LSHIFT( SKP_SMULWB( CCmax_new, diff ), 1 );
+                    diff = silk_int16_MAX - silk_RSHIFT( silk_MUL( contour_bias_Q20, j ), 5 ); /* Q20 -> Q15 */
+                    silk_assert( diff == silk_SAT16( diff ) );
+                    CCmax_new = silk_LSHIFT( silk_SMULWB( CCmax_new, diff ), 1 );
                 } else {
                     CCmax_new = 0;
                 }
@@ -565,15 +565,15 @@
         *contourIndex = (opus_int8)CBimax;
     } else {
         /* Save Lags and correlation */
-        CCmax = SKP_max( CCmax, 0 );
-        *LTPCorr_Q15 = (opus_int)silk_SQRT_APPROX( SKP_LSHIFT( CCmax, 13 ) ); /* Output normalized correlation */
+        CCmax = silk_max( CCmax, 0 );
+        *LTPCorr_Q15 = (opus_int)silk_SQRT_APPROX( silk_LSHIFT( CCmax, 13 ) ); /* Output normalized correlation */
         for( k = 0; k < nb_subfr; k++ ) {
             pitch_out[ k ] = lag + matrix_ptr( Lag_CB_ptr, k, CBimax, cbk_size );
         }
         *lagIndex = (opus_int16)( lag - min_lag_8kHz );
         *contourIndex = (opus_int8)CBimax;
     }
-    SKP_assert( *lagIndex >= 0 );
+    silk_assert( *lagIndex >= 0 );
     /* return as voiced */
     return 0;
 }
@@ -598,8 +598,8 @@
     opus_int32 scratch_mem[ SCRATCH_SIZE ];
     const opus_int8 *Lag_range_ptr, *Lag_CB_ptr;
 
-    SKP_assert( complexity >= SILK_PE_MIN_COMPLEX );
-    SKP_assert( complexity <= SILK_PE_MAX_COMPLEX );
+    silk_assert( complexity >= SILK_PE_MIN_COMPLEX );
+    silk_assert( complexity <= SILK_PE_MAX_COMPLEX );
 
     if( nb_subfr == PE_MAX_NB_SUBFR ){
         Lag_range_ptr = &silk_Lag_range_stage3[ complexity ][ 0 ][ 0 ];
@@ -607,14 +607,14 @@
         nb_cbk_search = silk_nb_cbk_searchs_stage3[ complexity ];
         cbk_size      = PE_NB_CBKS_STAGE3_MAX;
     } else {
-        SKP_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1);
+        silk_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1);
         Lag_range_ptr = &silk_Lag_range_stage3_10_ms[ 0 ][ 0 ];
         Lag_CB_ptr    = &silk_CB_lags_stage3_10_ms[ 0 ][ 0 ];
         nb_cbk_search = PE_NB_CBKS_STAGE3_10MS;
         cbk_size      = PE_NB_CBKS_STAGE3_10MS;
     }
 
-    target_ptr = &frame[ SKP_LSHIFT( sf_length, 2 ) ]; /* Pointer to middle of frame */
+    target_ptr = &frame[ silk_LSHIFT( sf_length, 2 ) ]; /* Pointer to middle of frame */
     for( k = 0; k < nb_subfr; k++ ) {
         lag_counter = 0;
 
@@ -624,7 +624,7 @@
         for( j = lag_low; j <= lag_high; j++ ) {
             basis_ptr = target_ptr - ( start_lag + j );
             cross_corr = silk_inner_prod_aligned( (opus_int16*)target_ptr, (opus_int16*)basis_ptr, sf_length );
-            SKP_assert( lag_counter < SCRATCH_SIZE );
+            silk_assert( lag_counter < SCRATCH_SIZE );
             scratch_mem[ lag_counter ] = cross_corr;
             lag_counter++;
         }
@@ -635,8 +635,8 @@
             /* each code_book vector for each start lag */
             idx = matrix_ptr( Lag_CB_ptr, k, i, cbk_size ) - delta;
             for( j = 0; j < PE_NB_STAGE3_LAGS; j++ ) {
-                SKP_assert( idx + j < SCRATCH_SIZE );
-                SKP_assert( idx + j < lag_counter );
+                silk_assert( idx + j < SCRATCH_SIZE );
+                silk_assert( idx + j < lag_counter );
                 cross_corr_st3[ k ][ i ][ j ] = scratch_mem[ idx + j ];
             }
         }
@@ -664,8 +664,8 @@
     opus_int32 scratch_mem[ SCRATCH_SIZE ];
     const opus_int8 *Lag_range_ptr, *Lag_CB_ptr;
 
-    SKP_assert( complexity >= SILK_PE_MIN_COMPLEX );
-    SKP_assert( complexity <= SILK_PE_MAX_COMPLEX );
+    silk_assert( complexity >= SILK_PE_MIN_COMPLEX );
+    silk_assert( complexity <= SILK_PE_MAX_COMPLEX );
 
     if( nb_subfr == PE_MAX_NB_SUBFR ){
         Lag_range_ptr = &silk_Lag_range_stage3[ complexity ][ 0 ][ 0 ];
@@ -673,33 +673,33 @@
         nb_cbk_search = silk_nb_cbk_searchs_stage3[ complexity ];
         cbk_size      = PE_NB_CBKS_STAGE3_MAX;
     } else {
-        SKP_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1);
+        silk_assert( nb_subfr == PE_MAX_NB_SUBFR >> 1);
         Lag_range_ptr = &silk_Lag_range_stage3_10_ms[ 0 ][ 0 ];
         Lag_CB_ptr    = &silk_CB_lags_stage3_10_ms[ 0 ][ 0 ];
         nb_cbk_search = PE_NB_CBKS_STAGE3_10MS;
         cbk_size      = PE_NB_CBKS_STAGE3_10MS;
     }
-    target_ptr = &frame[ SKP_LSHIFT( sf_length, 2 ) ];
+    target_ptr = &frame[ silk_LSHIFT( sf_length, 2 ) ];
     for( k = 0; k < nb_subfr; k++ ) {
         lag_counter = 0;
 
         /* Calculate the energy for first lag */
         basis_ptr = target_ptr - ( start_lag + matrix_ptr( Lag_range_ptr, k, 0, 2 ) );
         energy = silk_inner_prod_aligned( basis_ptr, basis_ptr, sf_length );
-        SKP_assert( energy >= 0 );
+        silk_assert( energy >= 0 );
         scratch_mem[ lag_counter ] = energy;
         lag_counter++;
 
         lag_diff = ( matrix_ptr( Lag_range_ptr, k, 1, 2 ) -  matrix_ptr( Lag_range_ptr, k, 0, 2 ) + 1 );
         for( i = 1; i < lag_diff; i++ ) {
             /* remove part outside new window */
-            energy -= SKP_SMULBB( basis_ptr[ sf_length - i ], basis_ptr[ sf_length - i ] );
-            SKP_assert( energy >= 0 );
+            energy -= silk_SMULBB( basis_ptr[ sf_length - i ], basis_ptr[ sf_length - i ] );
+            silk_assert( energy >= 0 );
 
             /* add part that comes into window */
-            energy = SKP_ADD_SAT32( energy, SKP_SMULBB( basis_ptr[ -i ], basis_ptr[ -i ] ) );
-            SKP_assert( energy >= 0 );
-            SKP_assert( lag_counter < SCRATCH_SIZE );
+            energy = silk_ADD_SAT32( energy, silk_SMULBB( basis_ptr[ -i ], basis_ptr[ -i ] ) );
+            silk_assert( energy >= 0 );
+            silk_assert( lag_counter < SCRATCH_SIZE );
             scratch_mem[ lag_counter ] = energy;
             lag_counter++;
         }
@@ -710,10 +710,10 @@
             /* each code_book vector for each start lag                     */
             idx = matrix_ptr( Lag_CB_ptr, k, i, cbk_size ) - delta;
             for( j = 0; j < PE_NB_STAGE3_LAGS; j++ ) {
-                SKP_assert( idx + j < SCRATCH_SIZE );
-                SKP_assert( idx + j < lag_counter );
+                silk_assert( idx + j < SCRATCH_SIZE );
+                silk_assert( idx + j < lag_counter );
                 energies_st3[ k ][ i ][ j ] = scratch_mem[ idx + j ];
-                SKP_assert( energies_st3[ k ][ i ][ j ] >= 0 );
+                silk_assert( energies_st3[ k ][ i ][ j ] >= 0 );
             }
         }
         target_ptr += sf_length;
@@ -730,11 +730,11 @@
 
     x_max = silk_int16_array_maxabs( frame, frame_length );
 
-    if( x_max < SKP_int16_MAX ) {
+    if( x_max < silk_int16_MAX ) {
         /* Number of bits needed for the sum of the squares */
-        nbits = 32 - silk_CLZ32( SKP_SMULBB( x_max, x_max ) );
+        nbits = 32 - silk_CLZ32( silk_SMULBB( x_max, x_max ) );
     } else {
-        /* Here we don't know if x_max should have been SKP_int16_MAX + 1, so we expect the worst case */
+        /* Here we don't know if x_max should have been silk_int16_MAX + 1, so we expect the worst case */
         nbits = 30;
     }
     nbits += 17 - silk_CLZ16( sum_sqr_len );
diff --git a/silk/silk_process_NLSFs.c b/silk/silk_process_NLSFs.c
index 647b841..7920e4b 100644
--- a/silk/silk_process_NLSFs.c
+++ b/silk/silk_process_NLSFs.c
@@ -46,21 +46,21 @@
     opus_int16   pNLSFW_QW[ MAX_LPC_ORDER ];
     opus_int16   pNLSFW0_temp_QW[ MAX_LPC_ORDER ];
 
-    SKP_assert( psEncC->speech_activity_Q8 >=   0 );
-    SKP_assert( psEncC->speech_activity_Q8 <= SILK_FIX_CONST( 1.0, 8 ) );
+    silk_assert( psEncC->speech_activity_Q8 >=   0 );
+    silk_assert( psEncC->speech_activity_Q8 <= SILK_FIX_CONST( 1.0, 8 ) );
 
     /***********************/
     /* Calculate mu values */
     /***********************/
     /* NLSF_mu  = 0.003 - 0.0015 * psEnc->speech_activity; */
-    NLSF_mu_Q20 = SKP_SMLAWB( SILK_FIX_CONST( 0.0025, 20 ), SILK_FIX_CONST( -0.001, 28 ), psEncC->speech_activity_Q8 );
+    NLSF_mu_Q20 = silk_SMLAWB( SILK_FIX_CONST( 0.0025, 20 ), SILK_FIX_CONST( -0.001, 28 ), psEncC->speech_activity_Q8 );
     if( psEncC->nb_subfr == 2 ) {
         /* Multiply by 1.5 for 10 ms packets */
-        NLSF_mu_Q20 = SKP_ADD_RSHIFT( NLSF_mu_Q20, NLSF_mu_Q20, 1 );
+        NLSF_mu_Q20 = silk_ADD_RSHIFT( NLSF_mu_Q20, NLSF_mu_Q20, 1 );
     }
 
-    SKP_assert( NLSF_mu_Q20 >  0 );
-    SKP_assert( NLSF_mu_Q20 <= SILK_FIX_CONST( 0.0045, 20 ) );
+    silk_assert( NLSF_mu_Q20 >  0 );
+    silk_assert( NLSF_mu_Q20 <= SILK_FIX_CONST( 0.0045, 20 ) );
 
     /* Calculate NLSF weights */
     silk_NLSF_VQ_weights_laroia( pNLSFW_QW, pNLSF_Q15, psEncC->predictLPCOrder );
@@ -76,11 +76,11 @@
         silk_NLSF_VQ_weights_laroia( pNLSFW0_temp_QW, pNLSF0_temp_Q15, psEncC->predictLPCOrder );
 
         /* Update NLSF weights with contribution from first half */
-        i_sqr_Q15 = SKP_LSHIFT( SKP_SMULBB( psEncC->indices.NLSFInterpCoef_Q2, psEncC->indices.NLSFInterpCoef_Q2 ), 11 );
+        i_sqr_Q15 = silk_LSHIFT( silk_SMULBB( psEncC->indices.NLSFInterpCoef_Q2, psEncC->indices.NLSFInterpCoef_Q2 ), 11 );
         for( i = 0; i < psEncC->predictLPCOrder; i++ ) {
-            pNLSFW_QW[ i ] = SKP_SMLAWB( SKP_RSHIFT( pNLSFW_QW[ i ], 1 ), pNLSFW0_temp_QW[ i ], i_sqr_Q15 );
-            SKP_assert( pNLSFW_QW[ i ] <= SKP_int16_MAX );
-            SKP_assert( pNLSFW_QW[ i ] >= 1 );
+            pNLSFW_QW[ i ] = silk_SMLAWB( silk_RSHIFT( pNLSFW_QW[ i ], 1 ), pNLSFW0_temp_QW[ i ], i_sqr_Q15 );
+            silk_assert( pNLSFW_QW[ i ] <= silk_int16_MAX );
+            silk_assert( pNLSFW_QW[ i ] >= 1 );
         }
     }
 
@@ -102,6 +102,6 @@
 
     } else {
         /* Copy LPC coefficients for first half from second half */
-        SKP_memcpy( PredCoef_Q12[ 0 ], PredCoef_Q12[ 1 ], psEncC->predictLPCOrder * sizeof( opus_int16 ) );
+        silk_memcpy( PredCoef_Q12[ 0 ], PredCoef_Q12[ 1 ], psEncC->predictLPCOrder * sizeof( opus_int16 ) );
     }
 }
diff --git a/silk/silk_quant_LTP_gains.c b/silk/silk_quant_LTP_gains.c
index 8aeb215..7ae0911 100644
--- a/silk/silk_quant_LTP_gains.c
+++ b/silk/silk_quant_LTP_gains.c
@@ -55,7 +55,7 @@
     /* iterate over different codebooks with different */
     /* rates/distortions, and choose best */
     /***************************************************/
-    min_rate_dist_Q14 = SKP_int32_MAX;
+    min_rate_dist_Q14 = silk_int32_MAX;
     for( k = 0; k < 3; k++ ) {
         cl_ptr_Q5  = silk_LTP_gain_BITS_Q5_ptrs[ k ];
         cbk_ptr_Q7 = silk_LTP_vq_ptrs_Q7[        k ];
@@ -79,19 +79,19 @@
                 cbk_size                /* I    number of vectors in codebook                           */
             );
 
-            rate_dist_Q14 = SKP_ADD_POS_SAT32( rate_dist_Q14, rate_dist_Q14_subfr );
+            rate_dist_Q14 = silk_ADD_POS_SAT32( rate_dist_Q14, rate_dist_Q14_subfr );
 
             b_Q14_ptr += LTP_ORDER;
             W_Q18_ptr += LTP_ORDER * LTP_ORDER;
         }
 
         /* Avoid never finding a codebook */
-        rate_dist_Q14 = SKP_min( SKP_int32_MAX - 1, rate_dist_Q14 );
+        rate_dist_Q14 = silk_min( silk_int32_MAX - 1, rate_dist_Q14 );
 
         if( rate_dist_Q14 < min_rate_dist_Q14 ) {
             min_rate_dist_Q14 = rate_dist_Q14;
             *periodicity_index = (opus_int8)k;
-            SKP_memcpy( cbk_index, temp_idx, nb_subfr * sizeof( opus_int8 ) );
+            silk_memcpy( cbk_index, temp_idx, nb_subfr * sizeof( opus_int8 ) );
         }
 
         /* Break early in low-complexity mode if rate distortion is below threshold */
@@ -103,7 +103,7 @@
     cbk_ptr_Q7 = silk_LTP_vq_ptrs_Q7[ *periodicity_index ];
     for( j = 0; j < nb_subfr; j++ ) {
         for( k = 0; k < LTP_ORDER; k++ ) {
-            B_Q14[ j * LTP_ORDER + k ] = SKP_LSHIFT( cbk_ptr_Q7[ cbk_index[ j ] * LTP_ORDER + k ], 7 );
+            B_Q14[ j * LTP_ORDER + k ] = silk_LSHIFT( cbk_ptr_Q7[ cbk_index[ j ] * LTP_ORDER + k ], 7 );
         }
     }
 TOC(quant_LTP)
diff --git a/silk/silk_resampler.c b/silk/silk_resampler.c
index 0c62b94..af6fda6 100644
--- a/silk/silk_resampler.c
+++ b/silk/silk_resampler.c
@@ -65,7 +65,7 @@
 {
     opus_int32 tmp;
     while( b > 0 ) {
-        tmp = a - b * SKP_DIV32( a, b );
+        tmp = a - b * silk_DIV32( a, b );
         a   = b;
         b   = tmp;
     }
@@ -82,7 +82,7 @@
     opus_int32 cycleLen, cyclesPerBatch, up2 = 0, down2 = 0;
 
     /* Clear state */
-    SKP_memset( S, 0, sizeof( silk_resampler_state_struct ) );
+    silk_memset( S, 0, sizeof( silk_resampler_state_struct ) );
 
     /* Input checking */
 #if RESAMPLER_SUPPORT_ABOVE_48KHZ
@@ -90,7 +90,7 @@
 #else
     if( Fs_Hz_in < 8000 || Fs_Hz_in >  48000 || Fs_Hz_out < 8000 || Fs_Hz_out >  48000 ) {
 #endif
-        SKP_assert( 0 );
+        silk_assert( 0 );
         return -1;
     }
 
@@ -120,32 +120,32 @@
 
     if( S->nPreDownsamplers + S->nPostUpsamplers > 0 ) {
         /* Ratio of output/input samples */
-        S->ratio_Q16 = SKP_LSHIFT32( SKP_DIV32( SKP_LSHIFT32( Fs_Hz_out, 13 ), Fs_Hz_in ), 3 );
+        S->ratio_Q16 = silk_LSHIFT32( silk_DIV32( silk_LSHIFT32( Fs_Hz_out, 13 ), Fs_Hz_in ), 3 );
         /* Make sure the ratio is rounded up */
-        while( SKP_SMULWW( S->ratio_Q16, Fs_Hz_in ) < Fs_Hz_out ) S->ratio_Q16++;
+        while( silk_SMULWW( S->ratio_Q16, Fs_Hz_in ) < Fs_Hz_out ) S->ratio_Q16++;
 
         /* Batch size is 10 ms */
-        S->batchSizePrePost = SKP_DIV32_16( Fs_Hz_in, 100 );
+        S->batchSizePrePost = silk_DIV32_16( Fs_Hz_in, 100 );
 
         /* Convert sampling rate to those after pre-downsampling and before post-upsampling */
-        Fs_Hz_in  = SKP_RSHIFT( Fs_Hz_in,  S->nPreDownsamplers  );
-        Fs_Hz_out = SKP_RSHIFT( Fs_Hz_out, S->nPostUpsamplers  );
+        Fs_Hz_in  = silk_RSHIFT( Fs_Hz_in,  S->nPreDownsamplers  );
+        Fs_Hz_out = silk_RSHIFT( Fs_Hz_out, S->nPostUpsamplers  );
     }
 #endif
 
     /* Number of samples processed per batch */
     /* First, try 10 ms frames */
-    S->batchSize = SKP_DIV32_16( Fs_Hz_in, 100 );
-    if( ( SKP_MUL( S->batchSize, 100 ) != Fs_Hz_in ) || ( Fs_Hz_in % 100 != 0 ) ) {
+    S->batchSize = silk_DIV32_16( Fs_Hz_in, 100 );
+    if( ( silk_MUL( S->batchSize, 100 ) != Fs_Hz_in ) || ( Fs_Hz_in % 100 != 0 ) ) {
         /* No integer number of input or output samples with 10 ms frames, use greatest common divisor */
-        cycleLen = SKP_DIV32( Fs_Hz_in, gcd( Fs_Hz_in, Fs_Hz_out ) );
-        cyclesPerBatch = SKP_DIV32( RESAMPLER_MAX_BATCH_SIZE_IN, cycleLen );
+        cycleLen = silk_DIV32( Fs_Hz_in, gcd( Fs_Hz_in, Fs_Hz_out ) );
+        cyclesPerBatch = silk_DIV32( RESAMPLER_MAX_BATCH_SIZE_IN, cycleLen );
         if( cyclesPerBatch == 0 ) {
             /* cycleLen too big, let's just use the maximum batch size. Some distortion will result. */
             S->batchSize = RESAMPLER_MAX_BATCH_SIZE_IN;
-            SKP_assert( 0 );
+            silk_assert( 0 );
         } else {
-            S->batchSize = SKP_MUL( cyclesPerBatch, cycleLen );
+            S->batchSize = silk_MUL( cyclesPerBatch, cycleLen );
         }
     }
 
@@ -153,7 +153,7 @@
     /* Find resampler with the right sampling ratio */
     if( Fs_Hz_out > Fs_Hz_in ) {
         /* Upsample */
-        if( Fs_Hz_out == SKP_MUL( Fs_Hz_in, 2 ) ) {                             /* Fs_out : Fs_in = 2 : 1 */
+        if( Fs_Hz_out == silk_MUL( Fs_Hz_in, 2 ) ) {                             /* Fs_out : Fs_in = 2 : 1 */
             /* Special case: directly use 2x upsampler */
             S->resampler_function = silk_resampler_private_up2_HQ_wrapper;
         } else {
@@ -170,49 +170,49 @@
         }
     } else if ( Fs_Hz_out < Fs_Hz_in ) {
         /* Downsample */
-        if( SKP_MUL( Fs_Hz_out, 4 ) == SKP_MUL( Fs_Hz_in, 3 ) ) {               /* Fs_out : Fs_in = 3 : 4 */
+        if( silk_MUL( Fs_Hz_out, 4 ) == silk_MUL( Fs_Hz_in, 3 ) ) {               /* Fs_out : Fs_in = 3 : 4 */
             S->FIR_Fracs = 3;
             S->Coefs = silk_Resampler_3_4_COEFS;
             S->resampler_function = silk_resampler_private_down_FIR;
-        } else if( SKP_MUL( Fs_Hz_out, 3 ) == SKP_MUL( Fs_Hz_in, 2 ) ) {        /* Fs_out : Fs_in = 2 : 3 */
+        } else if( silk_MUL( Fs_Hz_out, 3 ) == silk_MUL( Fs_Hz_in, 2 ) ) {        /* Fs_out : Fs_in = 2 : 3 */
             S->FIR_Fracs = 2;
             S->Coefs = silk_Resampler_2_3_COEFS;
             S->resampler_function = silk_resampler_private_down_FIR;
-        } else if( SKP_MUL( Fs_Hz_out, 2 ) == Fs_Hz_in ) {                      /* Fs_out : Fs_in = 1 : 2 */
+        } else if( silk_MUL( Fs_Hz_out, 2 ) == Fs_Hz_in ) {                      /* Fs_out : Fs_in = 1 : 2 */
             S->FIR_Fracs = 1;
             S->Coefs = silk_Resampler_1_2_COEFS;
             S->resampler_function = silk_resampler_private_down_FIR;
-        } else if( SKP_MUL( Fs_Hz_out, 8 ) == SKP_MUL( Fs_Hz_in, 3 ) ) {        /* Fs_out : Fs_in = 3 : 8 */
+        } else if( silk_MUL( Fs_Hz_out, 8 ) == silk_MUL( Fs_Hz_in, 3 ) ) {        /* Fs_out : Fs_in = 3 : 8 */
             S->FIR_Fracs = 3;
             S->Coefs = silk_Resampler_3_8_COEFS;
             S->resampler_function = silk_resampler_private_down_FIR;
-        } else if( SKP_MUL( Fs_Hz_out, 3 ) == Fs_Hz_in ) {                      /* Fs_out : Fs_in = 1 : 3 */
+        } else if( silk_MUL( Fs_Hz_out, 3 ) == Fs_Hz_in ) {                      /* Fs_out : Fs_in = 1 : 3 */
             S->FIR_Fracs = 1;
             S->Coefs = silk_Resampler_1_3_COEFS;
             S->resampler_function = silk_resampler_private_down_FIR;
-        } else if( SKP_MUL( Fs_Hz_out, 4 ) == Fs_Hz_in ) {                      /* Fs_out : Fs_in = 1 : 4 */
+        } else if( silk_MUL( Fs_Hz_out, 4 ) == Fs_Hz_in ) {                      /* Fs_out : Fs_in = 1 : 4 */
             S->FIR_Fracs = 1;
             down2 = 1;
             S->Coefs = silk_Resampler_1_2_COEFS;
             S->resampler_function = silk_resampler_private_down_FIR;
-        } else if( SKP_MUL( Fs_Hz_out, 6 ) == Fs_Hz_in ) {                      /* Fs_out : Fs_in = 1 : 6 */
+        } else if( silk_MUL( Fs_Hz_out, 6 ) == Fs_Hz_in ) {                      /* Fs_out : Fs_in = 1 : 6 */
             S->FIR_Fracs = 1;
             down2 = 1;
             S->Coefs = silk_Resampler_1_3_COEFS;
             S->resampler_function = silk_resampler_private_down_FIR;
-        } else if( SKP_MUL( Fs_Hz_out, 441 ) == SKP_MUL( Fs_Hz_in, 80 ) ) {     /* Fs_out : Fs_in = 80 : 441 */
+        } else if( silk_MUL( Fs_Hz_out, 441 ) == silk_MUL( Fs_Hz_in, 80 ) ) {     /* Fs_out : Fs_in = 80 : 441 */
             S->Coefs = silk_Resampler_80_441_ARMA4_COEFS;
             S->resampler_function = silk_resampler_private_IIR_FIR;
-        } else if( SKP_MUL( Fs_Hz_out, 441 ) == SKP_MUL( Fs_Hz_in, 120 ) ) {    /* Fs_out : Fs_in = 120 : 441 */
+        } else if( silk_MUL( Fs_Hz_out, 441 ) == silk_MUL( Fs_Hz_in, 120 ) ) {    /* Fs_out : Fs_in = 120 : 441 */
             S->Coefs = silk_Resampler_120_441_ARMA4_COEFS;
             S->resampler_function = silk_resampler_private_IIR_FIR;
-        } else if( SKP_MUL( Fs_Hz_out, 441 ) == SKP_MUL( Fs_Hz_in, 160 ) ) {    /* Fs_out : Fs_in = 160 : 441 */
+        } else if( silk_MUL( Fs_Hz_out, 441 ) == silk_MUL( Fs_Hz_in, 160 ) ) {    /* Fs_out : Fs_in = 160 : 441 */
             S->Coefs = silk_Resampler_160_441_ARMA4_COEFS;
             S->resampler_function = silk_resampler_private_IIR_FIR;
-        } else if( SKP_MUL( Fs_Hz_out, 441 ) == SKP_MUL( Fs_Hz_in, 240 ) ) {    /* Fs_out : Fs_in = 240 : 441 */
+        } else if( silk_MUL( Fs_Hz_out, 441 ) == silk_MUL( Fs_Hz_in, 240 ) ) {    /* Fs_out : Fs_in = 240 : 441 */
             S->Coefs = silk_Resampler_240_441_ARMA4_COEFS;
             S->resampler_function = silk_resampler_private_IIR_FIR;
-        } else if( SKP_MUL( Fs_Hz_out, 441 ) == SKP_MUL( Fs_Hz_in, 320 ) ) {    /* Fs_out : Fs_in = 320 : 441 */
+        } else if( silk_MUL( Fs_Hz_out, 441 ) == silk_MUL( Fs_Hz_in, 320 ) ) {    /* Fs_out : Fs_in = 320 : 441 */
             S->Coefs = silk_Resampler_320_441_ARMA4_COEFS;
             S->resampler_function = silk_resampler_private_IIR_FIR;
         } else {
@@ -235,9 +235,9 @@
     S->input2x = up2 | down2;
 
     /* Ratio of input/output samples */
-    S->invRatio_Q16 = SKP_LSHIFT32( SKP_DIV32( SKP_LSHIFT32( Fs_Hz_in, 14 + up2 - down2 ), Fs_Hz_out ), 2 );
+    S->invRatio_Q16 = silk_LSHIFT32( silk_DIV32( silk_LSHIFT32( Fs_Hz_in, 14 + up2 - down2 ), Fs_Hz_out ), 2 );
     /* Make sure the ratio is rounded up */
-    while( SKP_SMULWW( S->invRatio_Q16, SKP_LSHIFT32( Fs_Hz_out, down2 ) ) < SKP_LSHIFT32( Fs_Hz_in, up2 ) ) {
+    while( silk_SMULWW( S->invRatio_Q16, silk_LSHIFT32( Fs_Hz_out, down2 ) ) < silk_LSHIFT32( Fs_Hz_in, up2 ) ) {
         S->invRatio_Q16++;
     }
 
@@ -252,12 +252,12 @@
 )
 {
     /* Clear state */
-    SKP_memset( S->sDown2, 0, sizeof( S->sDown2 ) );
-    SKP_memset( S->sIIR,   0, sizeof( S->sIIR ) );
-    SKP_memset( S->sFIR,   0, sizeof( S->sFIR ) );
+    silk_memset( S->sDown2, 0, sizeof( S->sDown2 ) );
+    silk_memset( S->sIIR,   0, sizeof( S->sIIR ) );
+    silk_memset( S->sFIR,   0, sizeof( S->sFIR ) );
 #if RESAMPLER_SUPPORT_ABOVE_48KHZ
-    SKP_memset( S->sDownPre, 0, sizeof( S->sDownPre ) );
-    SKP_memset( S->sUpPost,  0, sizeof( S->sUpPost ) );
+    silk_memset( S->sDownPre, 0, sizeof( S->sDownPre ) );
+    silk_memset( S->sUpPost,  0, sizeof( S->sUpPost ) );
 #endif
     return 0;
 }
@@ -272,7 +272,7 @@
 {
     /* Verify that state was initialized and has not been corrupted */
     if( S->magic_number != 123456789 ) {
-        SKP_assert( 0 );
+        silk_assert( 0 );
         return -1;
     }
 
@@ -284,23 +284,23 @@
 
         while( inLen > 0 ) {
             /* Number of input and output samples to process */
-            nSamplesIn = SKP_min( inLen, S->batchSizePrePost );
-            nSamplesOut = SKP_SMULWB( S->ratio_Q16, nSamplesIn );
+            nSamplesIn = silk_min( inLen, S->batchSizePrePost );
+            nSamplesOut = silk_SMULWB( S->ratio_Q16, nSamplesIn );
 
-            SKP_assert( SKP_RSHIFT32( nSamplesIn,  S->nPreDownsamplers ) <= 480 );
-            SKP_assert( SKP_RSHIFT32( nSamplesOut, S->nPostUpsamplers  ) <= 480 );
+            silk_assert( silk_RSHIFT32( nSamplesIn,  S->nPreDownsamplers ) <= 480 );
+            silk_assert( silk_RSHIFT32( nSamplesOut, S->nPostUpsamplers  ) <= 480 );
 
             if( S->nPreDownsamplers > 0 ) {
                 S->down_pre_function( S->sDownPre, in_buf, in, nSamplesIn );
                 if( S->nPostUpsamplers > 0 ) {
-                    S->resampler_function( S, out_buf, in_buf, SKP_RSHIFT32( nSamplesIn, S->nPreDownsamplers ) );
-                    S->up_post_function( S->sUpPost, out, out_buf, SKP_RSHIFT32( nSamplesOut, S->nPostUpsamplers ) );
+                    S->resampler_function( S, out_buf, in_buf, silk_RSHIFT32( nSamplesIn, S->nPreDownsamplers ) );
+                    S->up_post_function( S->sUpPost, out, out_buf, silk_RSHIFT32( nSamplesOut, S->nPostUpsamplers ) );
                 } else {
-                    S->resampler_function( S, out, in_buf, SKP_RSHIFT32( nSamplesIn, S->nPreDownsamplers ) );
+                    S->resampler_function( S, out, in_buf, silk_RSHIFT32( nSamplesIn, S->nPreDownsamplers ) );
                 }
             } else {
-                S->resampler_function( S, out_buf, in, SKP_RSHIFT32( nSamplesIn, S->nPreDownsamplers ) );
-                S->up_post_function( S->sUpPost, out, out_buf, SKP_RSHIFT32( nSamplesOut, S->nPostUpsamplers ) );
+                S->resampler_function( S, out_buf, in, silk_RSHIFT32( nSamplesIn, S->nPreDownsamplers ) );
+                S->up_post_function( S->sUpPost, out, out_buf, silk_RSHIFT32( nSamplesOut, S->nPostUpsamplers ) );
             }
 
             in += nSamplesIn;
diff --git a/silk/silk_resampler_down2.c b/silk/silk_resampler_down2.c
index 4881799..52743a3 100644
--- a/silk/silk_resampler_down2.c
+++ b/silk/silk_resampler_down2.c
@@ -40,35 +40,35 @@
     opus_int32                           inLen       /* I:   Number of input samples             */
 )
 {
-    opus_int32 k, len2 = SKP_RSHIFT32( inLen, 1 );
+    opus_int32 k, len2 = silk_RSHIFT32( inLen, 1 );
     opus_int32 in32, out32, Y, X;
 
-    SKP_assert( silk_resampler_down2_0 > 0 );
-    SKP_assert( silk_resampler_down2_1 < 0 );
+    silk_assert( silk_resampler_down2_0 > 0 );
+    silk_assert( silk_resampler_down2_1 < 0 );
 
     /* Internal variables and state are in Q10 format */
     for( k = 0; k < len2; k++ ) {
         /* Convert to Q10 */
-        in32 = SKP_LSHIFT( (opus_int32)in[ 2 * k ], 10 );
+        in32 = silk_LSHIFT( (opus_int32)in[ 2 * k ], 10 );
 
         /* All-pass section for even input sample */
-        Y      = SKP_SUB32( in32, S[ 0 ] );
-        X      = SKP_SMLAWB( Y, Y, silk_resampler_down2_1 );
-        out32  = SKP_ADD32( S[ 0 ], X );
-        S[ 0 ] = SKP_ADD32( in32, X );
+        Y      = silk_SUB32( in32, S[ 0 ] );
+        X      = silk_SMLAWB( Y, Y, silk_resampler_down2_1 );
+        out32  = silk_ADD32( S[ 0 ], X );
+        S[ 0 ] = silk_ADD32( in32, X );
 
         /* Convert to Q10 */
-        in32 = SKP_LSHIFT( (opus_int32)in[ 2 * k + 1 ], 10 );
+        in32 = silk_LSHIFT( (opus_int32)in[ 2 * k + 1 ], 10 );
 
         /* All-pass section for odd input sample, and add to output of previous section */
-        Y      = SKP_SUB32( in32, S[ 1 ] );
-        X      = SKP_SMULWB( Y, silk_resampler_down2_0 );
-        out32  = SKP_ADD32( out32, S[ 1 ] );
-        out32  = SKP_ADD32( out32, X );
-        S[ 1 ] = SKP_ADD32( in32, X );
+        Y      = silk_SUB32( in32, S[ 1 ] );
+        X      = silk_SMULWB( Y, silk_resampler_down2_0 );
+        out32  = silk_ADD32( out32, S[ 1 ] );
+        out32  = silk_ADD32( out32, X );
+        S[ 1 ] = silk_ADD32( in32, X );
 
         /* Add, convert back to int16 and store to output */
-        out[ k ] = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( out32, 11 ) );
+        out[ k ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( out32, 11 ) );
     }
 }
 
diff --git a/silk/silk_resampler_down2_3.c b/silk/silk_resampler_down2_3.c
index be2cb45..0057c84 100644
--- a/silk/silk_resampler_down2_3.c
+++ b/silk/silk_resampler_down2_3.c
@@ -47,11 +47,11 @@
     opus_int32 *buf_ptr;
 
     /* Copy buffered samples to start of buffer */
-    SKP_memcpy( buf, S, ORDER_FIR * sizeof( opus_int32 ) );
+    silk_memcpy( buf, S, ORDER_FIR * sizeof( opus_int32 ) );
 
     /* Iterate over blocks of frameSizeIn input samples */
     while( 1 ) {
-        nSamplesIn = SKP_min( inLen, RESAMPLER_MAX_BATCH_SIZE_IN );
+        nSamplesIn = silk_min( inLen, RESAMPLER_MAX_BATCH_SIZE_IN );
 
         /* Second-order AR filter (output in Q8) */
         silk_resampler_private_AR2( &S[ ORDER_FIR ], &buf[ ORDER_FIR ], in,
@@ -62,21 +62,21 @@
         counter = nSamplesIn;
         while( counter > 2 ) {
             /* Inner product */
-            res_Q6 = SKP_SMULWB(         buf_ptr[ 0 ], silk_Resampler_2_3_COEFS_LQ[ 2 ] );
-            res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 1 ], silk_Resampler_2_3_COEFS_LQ[ 3 ] );
-            res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 2 ], silk_Resampler_2_3_COEFS_LQ[ 5 ] );
-            res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 3 ], silk_Resampler_2_3_COEFS_LQ[ 4 ] );
+            res_Q6 = silk_SMULWB(         buf_ptr[ 0 ], silk_Resampler_2_3_COEFS_LQ[ 2 ] );
+            res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 1 ], silk_Resampler_2_3_COEFS_LQ[ 3 ] );
+            res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 2 ], silk_Resampler_2_3_COEFS_LQ[ 5 ] );
+            res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 3 ], silk_Resampler_2_3_COEFS_LQ[ 4 ] );
 
             /* Scale down, saturate and store in output array */
-            *out++ = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( res_Q6, 6 ) );
+            *out++ = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( res_Q6, 6 ) );
 
-            res_Q6 = SKP_SMULWB(         buf_ptr[ 1 ], silk_Resampler_2_3_COEFS_LQ[ 4 ] );
-            res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 2 ], silk_Resampler_2_3_COEFS_LQ[ 5 ] );
-            res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 3 ], silk_Resampler_2_3_COEFS_LQ[ 3 ] );
-            res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 4 ], silk_Resampler_2_3_COEFS_LQ[ 2 ] );
+            res_Q6 = silk_SMULWB(         buf_ptr[ 1 ], silk_Resampler_2_3_COEFS_LQ[ 4 ] );
+            res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 2 ], silk_Resampler_2_3_COEFS_LQ[ 5 ] );
+            res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 3 ], silk_Resampler_2_3_COEFS_LQ[ 3 ] );
+            res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 4 ], silk_Resampler_2_3_COEFS_LQ[ 2 ] );
 
             /* Scale down, saturate and store in output array */
-            *out++ = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( res_Q6, 6 ) );
+            *out++ = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( res_Q6, 6 ) );
 
             buf_ptr += 3;
             counter -= 3;
@@ -87,12 +87,12 @@
 
         if( inLen > 0 ) {
             /* More iterations to do; copy last part of filtered signal to beginning of buffer */
-            SKP_memcpy( buf, &buf[ nSamplesIn ], ORDER_FIR * sizeof( opus_int32 ) );
+            silk_memcpy( buf, &buf[ nSamplesIn ], ORDER_FIR * sizeof( opus_int32 ) );
         } else {
             break;
         }
     }
 
     /* Copy last part of filtered signal to the state for the next call */
-    SKP_memcpy( S, &buf[ nSamplesIn ], ORDER_FIR * sizeof( opus_int32 ) );
+    silk_memcpy( S, &buf[ nSamplesIn ], ORDER_FIR * sizeof( opus_int32 ) );
 }
diff --git a/silk/silk_resampler_down3.c b/silk/silk_resampler_down3.c
index d539814..73edf17 100644
--- a/silk/silk_resampler_down3.c
+++ b/silk/silk_resampler_down3.c
@@ -47,11 +47,11 @@
     opus_int32 *buf_ptr;
 
     /* Copy buffered samples to start of buffer */
-    SKP_memcpy( buf, S, ORDER_FIR * sizeof( opus_int32 ) );
+    silk_memcpy( buf, S, ORDER_FIR * sizeof( opus_int32 ) );
 
     /* Iterate over blocks of frameSizeIn input samples */
     while( 1 ) {
-        nSamplesIn = SKP_min( inLen, RESAMPLER_MAX_BATCH_SIZE_IN );
+        nSamplesIn = silk_min( inLen, RESAMPLER_MAX_BATCH_SIZE_IN );
 
         /* Second-order AR filter (output in Q8) */
         silk_resampler_private_AR2( &S[ ORDER_FIR ], &buf[ ORDER_FIR ], in,
@@ -62,12 +62,12 @@
         counter = nSamplesIn;
         while( counter > 2 ) {
             /* Inner product */
-            res_Q6 = SKP_SMULWB(         SKP_ADD32( buf_ptr[ 0 ], buf_ptr[ 5 ] ), silk_Resampler_1_3_COEFS_LQ[ 2 ] );
-            res_Q6 = SKP_SMLAWB( res_Q6, SKP_ADD32( buf_ptr[ 1 ], buf_ptr[ 4 ] ), silk_Resampler_1_3_COEFS_LQ[ 3 ] );
-            res_Q6 = SKP_SMLAWB( res_Q6, SKP_ADD32( buf_ptr[ 2 ], buf_ptr[ 3 ] ), silk_Resampler_1_3_COEFS_LQ[ 4 ] );
+            res_Q6 = silk_SMULWB(         silk_ADD32( buf_ptr[ 0 ], buf_ptr[ 5 ] ), silk_Resampler_1_3_COEFS_LQ[ 2 ] );
+            res_Q6 = silk_SMLAWB( res_Q6, silk_ADD32( buf_ptr[ 1 ], buf_ptr[ 4 ] ), silk_Resampler_1_3_COEFS_LQ[ 3 ] );
+            res_Q6 = silk_SMLAWB( res_Q6, silk_ADD32( buf_ptr[ 2 ], buf_ptr[ 3 ] ), silk_Resampler_1_3_COEFS_LQ[ 4 ] );
 
             /* Scale down, saturate and store in output array */
-            *out++ = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( res_Q6, 6 ) );
+            *out++ = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( res_Q6, 6 ) );
 
             buf_ptr += 3;
             counter -= 3;
@@ -78,12 +78,12 @@
 
         if( inLen > 0 ) {
             /* More iterations to do; copy last part of filtered signal to beginning of buffer */
-            SKP_memcpy( buf, &buf[ nSamplesIn ], ORDER_FIR * sizeof( opus_int32 ) );
+            silk_memcpy( buf, &buf[ nSamplesIn ], ORDER_FIR * sizeof( opus_int32 ) );
         } else {
             break;
         }
     }
 
     /* Copy last part of filtered signal to the state for the next call */
-    SKP_memcpy( S, &buf[ nSamplesIn ], ORDER_FIR * sizeof( opus_int32 ) );
+    silk_memcpy( S, &buf[ nSamplesIn ], ORDER_FIR * sizeof( opus_int32 ) );
 }
diff --git a/silk/silk_resampler_private_AR2.c b/silk/silk_resampler_private_AR2.c
index b6fb325..b0db789 100644
--- a/silk/silk_resampler_private_AR2.c
+++ b/silk/silk_resampler_private_AR2.c
@@ -45,11 +45,11 @@
     opus_int32    out32;
 
     for( k = 0; k < len; k++ ) {
-        out32       = SKP_ADD_LSHIFT32( S[ 0 ], (opus_int32)in[ k ], 8 );
+        out32       = silk_ADD_LSHIFT32( S[ 0 ], (opus_int32)in[ k ], 8 );
         out_Q8[ k ] = out32;
-        out32       = SKP_LSHIFT( out32, 2 );
-        S[ 0 ]      = SKP_SMLAWB( S[ 1 ], out32, A_Q14[ 0 ] );
-        S[ 1 ]      = SKP_SMULWB( out32, A_Q14[ 1 ] );
+        out32       = silk_LSHIFT( out32, 2 );
+        S[ 0 ]      = silk_SMLAWB( S[ 1 ], out32, A_Q14[ 0 ] );
+        S[ 1 ]      = silk_SMULWB( out32, A_Q14[ 1 ] );
     }
 }
 
diff --git a/silk/silk_resampler_private_ARMA4.c b/silk/silk_resampler_private_ARMA4.c
index 711b192..1690ce3 100644
--- a/silk/silk_resampler_private_ARMA4.c
+++ b/silk/silk_resampler_private_ARMA4.c
@@ -51,24 +51,24 @@
     opus_int32 in_Q8, out1_Q8, out2_Q8, X;
 
     for( k = 0; k < len; k++ ) {
-        in_Q8  = SKP_LSHIFT32( (opus_int32)in[ k ], 8 );
+        in_Q8  = silk_LSHIFT32( (opus_int32)in[ k ], 8 );
 
         /* Outputs of first and second biquad */
-        out1_Q8 = SKP_ADD_LSHIFT32( in_Q8,   S[ 0 ], 2 );
-        out2_Q8 = SKP_ADD_LSHIFT32( out1_Q8, S[ 2 ], 2 );
+        out1_Q8 = silk_ADD_LSHIFT32( in_Q8,   S[ 0 ], 2 );
+        out2_Q8 = silk_ADD_LSHIFT32( out1_Q8, S[ 2 ], 2 );
 
         /* Update states, which are stored in Q6. Coefficients are in Q14 here */
-        X      = SKP_SMLAWB( S[ 1 ], in_Q8,   Coef[ 0 ] );
-        S[ 0 ] = SKP_SMLAWB( X,      out1_Q8, Coef[ 2 ] );
+        X      = silk_SMLAWB( S[ 1 ], in_Q8,   Coef[ 0 ] );
+        S[ 0 ] = silk_SMLAWB( X,      out1_Q8, Coef[ 2 ] );
 
-        X      = SKP_SMLAWB( S[ 3 ], out1_Q8, Coef[ 1 ] );
-        S[ 2 ] = SKP_SMLAWB( X,      out2_Q8, Coef[ 4 ] );
+        X      = silk_SMLAWB( S[ 3 ], out1_Q8, Coef[ 1 ] );
+        S[ 2 ] = silk_SMLAWB( X,      out2_Q8, Coef[ 4 ] );
 
-        S[ 1 ] = SKP_SMLAWB( SKP_RSHIFT32( in_Q8,   2 ), out1_Q8, Coef[ 3 ] );
-        S[ 3 ] = SKP_SMLAWB( SKP_RSHIFT32( out1_Q8, 2 ), out2_Q8, Coef[ 5 ] );
+        S[ 1 ] = silk_SMLAWB( silk_RSHIFT32( in_Q8,   2 ), out1_Q8, Coef[ 3 ] );
+        S[ 3 ] = silk_SMLAWB( silk_RSHIFT32( out1_Q8, 2 ), out2_Q8, Coef[ 5 ] );
 
         /* Apply gain and store to output. The coefficient is in Q16 */
-        out[ k ] = (opus_int16)SKP_SAT16( SKP_RSHIFT32( SKP_SMLAWB( 128, out2_Q8, Coef[ 6 ] ), 8 ) );
+        out[ k ] = (opus_int16)silk_SAT16( silk_RSHIFT32( silk_SMLAWB( 128, out2_Q8, Coef[ 6 ] ), 8 ) );
     }
 }
 
diff --git a/silk/silk_resampler_private_IIR_FIR.c b/silk/silk_resampler_private_IIR_FIR.c
index ec1cb54..8becba3 100644
--- a/silk/silk_resampler_private_IIR_FIR.c
+++ b/silk/silk_resampler_private_IIR_FIR.c
@@ -39,16 +39,16 @@
     opus_int32 table_index;
     /* Interpolate upsampled signal and store in output array */
     for( index_Q16 = 0; index_Q16 < max_index_Q16; index_Q16 += index_increment_Q16 ) {
-        table_index = SKP_SMULWB( index_Q16 & 0xFFFF, 144 );
+        table_index = silk_SMULWB( index_Q16 & 0xFFFF, 144 );
         buf_ptr = &buf[ index_Q16 >> 16 ];
 
-        res_Q15 = SKP_SMULBB(          buf_ptr[ 0 ], silk_resampler_frac_FIR_144[       table_index ][ 0 ] );
-        res_Q15 = SKP_SMLABB( res_Q15, buf_ptr[ 1 ], silk_resampler_frac_FIR_144[       table_index ][ 1 ] );
-        res_Q15 = SKP_SMLABB( res_Q15, buf_ptr[ 2 ], silk_resampler_frac_FIR_144[       table_index ][ 2 ] );
-        res_Q15 = SKP_SMLABB( res_Q15, buf_ptr[ 3 ], silk_resampler_frac_FIR_144[ 143 - table_index ][ 2 ] );
-        res_Q15 = SKP_SMLABB( res_Q15, buf_ptr[ 4 ], silk_resampler_frac_FIR_144[ 143 - table_index ][ 1 ] );
-        res_Q15 = SKP_SMLABB( res_Q15, buf_ptr[ 5 ], silk_resampler_frac_FIR_144[ 143 - table_index ][ 0 ] );
-        *out++ = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( res_Q15, 15 ) );
+        res_Q15 = silk_SMULBB(          buf_ptr[ 0 ], silk_resampler_frac_FIR_144[       table_index ][ 0 ] );
+        res_Q15 = silk_SMLABB( res_Q15, buf_ptr[ 1 ], silk_resampler_frac_FIR_144[       table_index ][ 1 ] );
+        res_Q15 = silk_SMLABB( res_Q15, buf_ptr[ 2 ], silk_resampler_frac_FIR_144[       table_index ][ 2 ] );
+        res_Q15 = silk_SMLABB( res_Q15, buf_ptr[ 3 ], silk_resampler_frac_FIR_144[ 143 - table_index ][ 2 ] );
+        res_Q15 = silk_SMLABB( res_Q15, buf_ptr[ 4 ], silk_resampler_frac_FIR_144[ 143 - table_index ][ 1 ] );
+        res_Q15 = silk_SMLABB( res_Q15, buf_ptr[ 5 ], silk_resampler_frac_FIR_144[ 143 - table_index ][ 0 ] );
+        *out++ = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( res_Q15, 15 ) );
     }
     return out;
 }
@@ -67,12 +67,12 @@
 
 
     /* Copy buffered samples to start of buffer */
-    SKP_memcpy( buf, S->sFIR, RESAMPLER_ORDER_FIR_144 * sizeof( opus_int32 ) );
+    silk_memcpy( buf, S->sFIR, RESAMPLER_ORDER_FIR_144 * sizeof( opus_int32 ) );
 
     /* Iterate over blocks of frameSizeIn input samples */
     index_increment_Q16 = S->invRatio_Q16;
     while( 1 ) {
-        nSamplesIn = SKP_min( inLen, S->batchSize );
+        nSamplesIn = silk_min( inLen, S->batchSize );
 
         if( S->input2x == 1 ) {
             /* Upsample 2x */
@@ -82,20 +82,20 @@
             silk_resampler_private_ARMA4( S->sIIR, &buf[ RESAMPLER_ORDER_FIR_144 ], in, S->Coefs, nSamplesIn );
         }
 
-        max_index_Q16 = SKP_LSHIFT32( nSamplesIn, 16 + S->input2x );         /* +1 if 2x upsampling */
+        max_index_Q16 = silk_LSHIFT32( nSamplesIn, 16 + S->input2x );         /* +1 if 2x upsampling */
         out = silk_resampler_private_IIR_FIR_INTERPOL(out, buf, max_index_Q16, index_increment_Q16);
         in += nSamplesIn;
         inLen -= nSamplesIn;
 
         if( inLen > 0 ) {
             /* More iterations to do; copy last part of filtered signal to beginning of buffer */
-            SKP_memcpy( buf, &buf[ nSamplesIn << S->input2x ], RESAMPLER_ORDER_FIR_144 * sizeof( opus_int32 ) );
+            silk_memcpy( buf, &buf[ nSamplesIn << S->input2x ], RESAMPLER_ORDER_FIR_144 * sizeof( opus_int32 ) );
         } else {
             break;
         }
     }
 
     /* Copy last part of filtered signal to the state for the next call */
-    SKP_memcpy( S->sFIR, &buf[nSamplesIn << S->input2x ], RESAMPLER_ORDER_FIR_144 * sizeof( opus_int32 ) );
+    silk_memcpy( S->sFIR, &buf[nSamplesIn << S->input2x ], RESAMPLER_ORDER_FIR_144 * sizeof( opus_int32 ) );
 }
 
diff --git a/silk/silk_resampler_private_copy.c b/silk/silk_resampler_private_copy.c
index 824b2fa..4bc7bb8 100644
--- a/silk/silk_resampler_private_copy.c
+++ b/silk/silk_resampler_private_copy.c
@@ -40,5 +40,5 @@
     opus_int32                        inLen            /* I:    Number of input samples                    */
 )
 {
-    SKP_memcpy( out, in, inLen * sizeof( opus_int16 ) );
+    silk_memcpy( out, in, inLen * sizeof( opus_int16 ) );
 }
diff --git a/silk/silk_resampler_private_down4.c b/silk/silk_resampler_private_down4.c
index c7dd952..cb63d29 100644
--- a/silk/silk_resampler_private_down4.c
+++ b/silk/silk_resampler_private_down4.c
@@ -40,34 +40,34 @@
     opus_int32                       inLen           /* I:   Number of input samples                 */
 )
 {
-    opus_int32 k, len4 = SKP_RSHIFT32( inLen, 2 );
+    opus_int32 k, len4 = silk_RSHIFT32( inLen, 2 );
     opus_int32 in32, out32, Y, X;
 
-    SKP_assert( silk_resampler_down2_0 > 0 );
-    SKP_assert( silk_resampler_down2_1 < 0 );
+    silk_assert( silk_resampler_down2_0 > 0 );
+    silk_assert( silk_resampler_down2_1 < 0 );
 
     /* Internal variables and state are in Q10 format */
     for( k = 0; k < len4; k++ ) {
         /* Add two input samples and convert to Q10 */
-        in32 = SKP_LSHIFT( SKP_ADD32( (opus_int32)in[ 4 * k ], (opus_int32)in[ 4 * k + 1 ] ), 9 );
+        in32 = silk_LSHIFT( silk_ADD32( (opus_int32)in[ 4 * k ], (opus_int32)in[ 4 * k + 1 ] ), 9 );
 
         /* All-pass section for even input sample */
-        Y      = SKP_SUB32( in32, S[ 0 ] );
-        X      = SKP_SMLAWB( Y, Y, silk_resampler_down2_1 );
-        out32  = SKP_ADD32( S[ 0 ], X );
-        S[ 0 ] = SKP_ADD32( in32, X );
+        Y      = silk_SUB32( in32, S[ 0 ] );
+        X      = silk_SMLAWB( Y, Y, silk_resampler_down2_1 );
+        out32  = silk_ADD32( S[ 0 ], X );
+        S[ 0 ] = silk_ADD32( in32, X );
 
         /* Add two input samples and convert to Q10 */
-        in32 = SKP_LSHIFT( SKP_ADD32( (opus_int32)in[ 4 * k + 2 ], (opus_int32)in[ 4 * k + 3 ] ), 9 );
+        in32 = silk_LSHIFT( silk_ADD32( (opus_int32)in[ 4 * k + 2 ], (opus_int32)in[ 4 * k + 3 ] ), 9 );
 
         /* All-pass section for odd input sample */
-        Y      = SKP_SUB32( in32, S[ 1 ] );
-        X      = SKP_SMULWB( Y, silk_resampler_down2_0 );
-        out32  = SKP_ADD32( out32, S[ 1 ] );
-        out32  = SKP_ADD32( out32, X );
-        S[ 1 ] = SKP_ADD32( in32, X );
+        Y      = silk_SUB32( in32, S[ 1 ] );
+        X      = silk_SMULWB( Y, silk_resampler_down2_0 );
+        out32  = silk_ADD32( out32, S[ 1 ] );
+        out32  = silk_ADD32( out32, X );
+        S[ 1 ] = silk_ADD32( in32, X );
 
         /* Add, convert back to int16 and store to output */
-        out[ k ] = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( out32, 11 ) );
+        out[ k ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( out32, 11 ) );
     }
 }
diff --git a/silk/silk_resampler_private_down_FIR.c b/silk/silk_resampler_private_down_FIR.c
index 235346f..010c840 100644
--- a/silk/silk_resampler_private_down_FIR.c
+++ b/silk/silk_resampler_private_down_FIR.c
@@ -39,20 +39,20 @@
     opus_int32 *buf_ptr;
     for( index_Q16 = 0; index_Q16 < max_index_Q16; index_Q16 += index_increment_Q16 ) {
         /* Integer part gives pointer to buffered input */
-        buf_ptr = buf2 + SKP_RSHIFT( index_Q16, 16 );
+        buf_ptr = buf2 + silk_RSHIFT( index_Q16, 16 );
 
         /* Inner product */
-        res_Q6 = SKP_SMULWB(         SKP_ADD32( buf_ptr[ 0 ], buf_ptr[ 15 ] ), FIR_Coefs[ 0 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, SKP_ADD32( buf_ptr[ 1 ], buf_ptr[ 14 ] ), FIR_Coefs[ 1 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, SKP_ADD32( buf_ptr[ 2 ], buf_ptr[ 13 ] ), FIR_Coefs[ 2 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, SKP_ADD32( buf_ptr[ 3 ], buf_ptr[ 12 ] ), FIR_Coefs[ 3 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, SKP_ADD32( buf_ptr[ 4 ], buf_ptr[ 11 ] ), FIR_Coefs[ 4 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, SKP_ADD32( buf_ptr[ 5 ], buf_ptr[ 10 ] ), FIR_Coefs[ 5 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, SKP_ADD32( buf_ptr[ 6 ], buf_ptr[  9 ] ), FIR_Coefs[ 6 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, SKP_ADD32( buf_ptr[ 7 ], buf_ptr[  8 ] ), FIR_Coefs[ 7 ] );
+        res_Q6 = silk_SMULWB(         silk_ADD32( buf_ptr[ 0 ], buf_ptr[ 15 ] ), FIR_Coefs[ 0 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, silk_ADD32( buf_ptr[ 1 ], buf_ptr[ 14 ] ), FIR_Coefs[ 1 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, silk_ADD32( buf_ptr[ 2 ], buf_ptr[ 13 ] ), FIR_Coefs[ 2 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, silk_ADD32( buf_ptr[ 3 ], buf_ptr[ 12 ] ), FIR_Coefs[ 3 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, silk_ADD32( buf_ptr[ 4 ], buf_ptr[ 11 ] ), FIR_Coefs[ 4 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, silk_ADD32( buf_ptr[ 5 ], buf_ptr[ 10 ] ), FIR_Coefs[ 5 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, silk_ADD32( buf_ptr[ 6 ], buf_ptr[  9 ] ), FIR_Coefs[ 6 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, silk_ADD32( buf_ptr[ 7 ], buf_ptr[  8 ] ), FIR_Coefs[ 7 ] );
 
                 /* Scale down, saturate and store in output array */
-        *out++ = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( res_Q6, 6 ) );
+        *out++ = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( res_Q6, 6 ) );
     }
     return out;
 }
@@ -66,33 +66,33 @@
     const opus_int16 *interpol_ptr;
     for( index_Q16 = 0; index_Q16 < max_index_Q16; index_Q16 += index_increment_Q16 ) {
         /* Integer part gives pointer to buffered input */
-        buf_ptr = buf2 + SKP_RSHIFT( index_Q16, 16 );
+        buf_ptr = buf2 + silk_RSHIFT( index_Q16, 16 );
 
         /* Fractional part gives interpolation coefficients */
-        interpol_ind = SKP_SMULWB( index_Q16 & 0xFFFF, FIR_Fracs );
+        interpol_ind = silk_SMULWB( index_Q16 & 0xFFFF, FIR_Fracs );
 
         /* Inner product */
         interpol_ptr = &FIR_Coefs[ RESAMPLER_DOWN_ORDER_FIR / 2 * interpol_ind ];
-        res_Q6 = SKP_SMULWB(         buf_ptr[ 0 ], interpol_ptr[ 0 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 1 ], interpol_ptr[ 1 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 2 ], interpol_ptr[ 2 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 3 ], interpol_ptr[ 3 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 4 ], interpol_ptr[ 4 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 5 ], interpol_ptr[ 5 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 6 ], interpol_ptr[ 6 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 7 ], interpol_ptr[ 7 ] );
+        res_Q6 = silk_SMULWB(         buf_ptr[ 0 ], interpol_ptr[ 0 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 1 ], interpol_ptr[ 1 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 2 ], interpol_ptr[ 2 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 3 ], interpol_ptr[ 3 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 4 ], interpol_ptr[ 4 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 5 ], interpol_ptr[ 5 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 6 ], interpol_ptr[ 6 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 7 ], interpol_ptr[ 7 ] );
         interpol_ptr = &FIR_Coefs[ RESAMPLER_DOWN_ORDER_FIR / 2 * ( FIR_Fracs - 1 - interpol_ind ) ];
-        res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 15 ], interpol_ptr[ 0 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 14 ], interpol_ptr[ 1 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 13 ], interpol_ptr[ 2 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 12 ], interpol_ptr[ 3 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 11 ], interpol_ptr[ 4 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[ 10 ], interpol_ptr[ 5 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[  9 ], interpol_ptr[ 6 ] );
-        res_Q6 = SKP_SMLAWB( res_Q6, buf_ptr[  8 ], interpol_ptr[ 7 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 15 ], interpol_ptr[ 0 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 14 ], interpol_ptr[ 1 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 13 ], interpol_ptr[ 2 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 12 ], interpol_ptr[ 3 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 11 ], interpol_ptr[ 4 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[ 10 ], interpol_ptr[ 5 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[  9 ], interpol_ptr[ 6 ] );
+        res_Q6 = silk_SMLAWB( res_Q6, buf_ptr[  8 ], interpol_ptr[ 7 ] );
 
         /* Scale down, saturate and store in output array */
-        *out++ = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( res_Q6, 6 ) );
+        *out++ = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( res_Q6, 6 ) );
     }
     return out;
 }
@@ -114,20 +114,20 @@
     const opus_int16 *FIR_Coefs;
 
     /* Copy buffered samples to start of buffer */
-    SKP_memcpy( buf2, S->sFIR, RESAMPLER_DOWN_ORDER_FIR * sizeof( opus_int32 ) );
+    silk_memcpy( buf2, S->sFIR, RESAMPLER_DOWN_ORDER_FIR * sizeof( opus_int32 ) );
 
     FIR_Coefs = &S->Coefs[ 2 ];
 
     /* Iterate over blocks of frameSizeIn input samples */
     index_increment_Q16 = S->invRatio_Q16;
     while( 1 ) {
-        nSamplesIn = SKP_min( inLen, S->batchSize );
+        nSamplesIn = silk_min( inLen, S->batchSize );
 
         if( S->input2x == 1 ) {
             /* Downsample 2x */
             silk_resampler_down2( S->sDown2, buf1, in, nSamplesIn );
 
-            nSamplesIn = SKP_RSHIFT32( nSamplesIn, 1 );
+            nSamplesIn = silk_RSHIFT32( nSamplesIn, 1 );
 
             /* Second-order AR filter (output in Q8) */
             silk_resampler_private_AR2( S->sIIR, &buf2[ RESAMPLER_DOWN_ORDER_FIR ], buf1, S->Coefs, nSamplesIn );
@@ -136,7 +136,7 @@
             silk_resampler_private_AR2( S->sIIR, &buf2[ RESAMPLER_DOWN_ORDER_FIR ], in, S->Coefs, nSamplesIn );
         }
 
-        max_index_Q16 = SKP_LSHIFT32( nSamplesIn, 16 );
+        max_index_Q16 = silk_LSHIFT32( nSamplesIn, 16 );
 
         /* Interpolate filtered signal */
         if( S->FIR_Fracs == 1 ) {
@@ -150,13 +150,13 @@
 
         if( inLen > S->input2x ) {
             /* More iterations to do; copy last part of filtered signal to beginning of buffer */
-            SKP_memcpy( buf2, &buf2[ nSamplesIn ], RESAMPLER_DOWN_ORDER_FIR * sizeof( opus_int32 ) );
+            silk_memcpy( buf2, &buf2[ nSamplesIn ], RESAMPLER_DOWN_ORDER_FIR * sizeof( opus_int32 ) );
         } else {
             break;
         }
     }
 
     /* Copy last part of filtered signal to the state for the next call */
-    SKP_memcpy( S->sFIR, &buf2[ nSamplesIn ], RESAMPLER_DOWN_ORDER_FIR * sizeof( opus_int32 ) );
+    silk_memcpy( S->sFIR, &buf2[ nSamplesIn ], RESAMPLER_DOWN_ORDER_FIR * sizeof( opus_int32 ) );
 }
 
diff --git a/silk/silk_resampler_private_up2_HQ.c b/silk/silk_resampler_private_up2_HQ.c
index b75190c..23836ce 100644
--- a/silk/silk_resampler_private_up2_HQ.c
+++ b/silk/silk_resampler_private_up2_HQ.c
@@ -45,59 +45,59 @@
     opus_int32 k;
     opus_int32 in32, out32_1, out32_2, Y, X;
 
-    SKP_assert( silk_resampler_up2_hq_0[ 0 ] > 0 );
-    SKP_assert( silk_resampler_up2_hq_0[ 1 ] < 0 );
-    SKP_assert( silk_resampler_up2_hq_1[ 0 ] > 0 );
-    SKP_assert( silk_resampler_up2_hq_1[ 1 ] < 0 );
+    silk_assert( silk_resampler_up2_hq_0[ 0 ] > 0 );
+    silk_assert( silk_resampler_up2_hq_0[ 1 ] < 0 );
+    silk_assert( silk_resampler_up2_hq_1[ 0 ] > 0 );
+    silk_assert( silk_resampler_up2_hq_1[ 1 ] < 0 );
 
     /* Internal variables and state are in Q10 format */
     for( k = 0; k < len; k++ ) {
         /* Convert to Q10 */
-        in32 = SKP_LSHIFT( (opus_int32)in[ k ], 10 );
+        in32 = silk_LSHIFT( (opus_int32)in[ k ], 10 );
 
         /* First all-pass section for even output sample */
-        Y       = SKP_SUB32( in32, S[ 0 ] );
-        X       = SKP_SMULWB( Y, silk_resampler_up2_hq_0[ 0 ] );
-        out32_1 = SKP_ADD32( S[ 0 ], X );
-        S[ 0 ]  = SKP_ADD32( in32, X );
+        Y       = silk_SUB32( in32, S[ 0 ] );
+        X       = silk_SMULWB( Y, silk_resampler_up2_hq_0[ 0 ] );
+        out32_1 = silk_ADD32( S[ 0 ], X );
+        S[ 0 ]  = silk_ADD32( in32, X );
 
         /* Second all-pass section for even output sample */
-        Y       = SKP_SUB32( out32_1, S[ 1 ] );
-        X       = SKP_SMLAWB( Y, Y, silk_resampler_up2_hq_0[ 1 ] );
-        out32_2 = SKP_ADD32( S[ 1 ], X );
-        S[ 1 ]  = SKP_ADD32( out32_1, X );
+        Y       = silk_SUB32( out32_1, S[ 1 ] );
+        X       = silk_SMLAWB( Y, Y, silk_resampler_up2_hq_0[ 1 ] );
+        out32_2 = silk_ADD32( S[ 1 ], X );
+        S[ 1 ]  = silk_ADD32( out32_1, X );
 
         /* Biquad notch filter */
-        out32_2 = SKP_SMLAWB( out32_2, S[ 5 ], silk_resampler_up2_hq_notch[ 2 ] );
-        out32_2 = SKP_SMLAWB( out32_2, S[ 4 ], silk_resampler_up2_hq_notch[ 1 ] );
-        out32_1 = SKP_SMLAWB( out32_2, S[ 4 ], silk_resampler_up2_hq_notch[ 0 ] );
-        S[ 5 ]  = SKP_SUB32(  out32_2, S[ 5 ] );
+        out32_2 = silk_SMLAWB( out32_2, S[ 5 ], silk_resampler_up2_hq_notch[ 2 ] );
+        out32_2 = silk_SMLAWB( out32_2, S[ 4 ], silk_resampler_up2_hq_notch[ 1 ] );
+        out32_1 = silk_SMLAWB( out32_2, S[ 4 ], silk_resampler_up2_hq_notch[ 0 ] );
+        S[ 5 ]  = silk_SUB32(  out32_2, S[ 5 ] );
 
         /* Apply gain in Q15, convert back to int16 and store to output */
-        out[ 2 * k ] = (opus_int16)SKP_SAT16( SKP_RSHIFT32(
-            SKP_SMLAWB( 256, out32_1, silk_resampler_up2_hq_notch[ 3 ] ), 9 ) );
+        out[ 2 * k ] = (opus_int16)silk_SAT16( silk_RSHIFT32(
+            silk_SMLAWB( 256, out32_1, silk_resampler_up2_hq_notch[ 3 ] ), 9 ) );
 
         /* First all-pass section for odd output sample */
-        Y       = SKP_SUB32( in32, S[ 2 ] );
-        X       = SKP_SMULWB( Y, silk_resampler_up2_hq_1[ 0 ] );
-        out32_1 = SKP_ADD32( S[ 2 ], X );
-        S[ 2 ]  = SKP_ADD32( in32, X );
+        Y       = silk_SUB32( in32, S[ 2 ] );
+        X       = silk_SMULWB( Y, silk_resampler_up2_hq_1[ 0 ] );
+        out32_1 = silk_ADD32( S[ 2 ], X );
+        S[ 2 ]  = silk_ADD32( in32, X );
 
         /* Second all-pass section for odd output sample */
-        Y       = SKP_SUB32( out32_1, S[ 3 ] );
-        X       = SKP_SMLAWB( Y, Y, silk_resampler_up2_hq_1[ 1 ] );
-        out32_2 = SKP_ADD32( S[ 3 ], X );
-        S[ 3 ]  = SKP_ADD32( out32_1, X );
+        Y       = silk_SUB32( out32_1, S[ 3 ] );
+        X       = silk_SMLAWB( Y, Y, silk_resampler_up2_hq_1[ 1 ] );
+        out32_2 = silk_ADD32( S[ 3 ], X );
+        S[ 3 ]  = silk_ADD32( out32_1, X );
 
         /* Biquad notch filter */
-        out32_2 = SKP_SMLAWB( out32_2, S[ 4 ], silk_resampler_up2_hq_notch[ 2 ] );
-        out32_2 = SKP_SMLAWB( out32_2, S[ 5 ], silk_resampler_up2_hq_notch[ 1 ] );
-        out32_1 = SKP_SMLAWB( out32_2, S[ 5 ], silk_resampler_up2_hq_notch[ 0 ] );
-        S[ 4 ]  = SKP_SUB32(  out32_2, S[ 4 ] );
+        out32_2 = silk_SMLAWB( out32_2, S[ 4 ], silk_resampler_up2_hq_notch[ 2 ] );
+        out32_2 = silk_SMLAWB( out32_2, S[ 5 ], silk_resampler_up2_hq_notch[ 1 ] );
+        out32_1 = silk_SMLAWB( out32_2, S[ 5 ], silk_resampler_up2_hq_notch[ 0 ] );
+        S[ 4 ]  = silk_SUB32(  out32_2, S[ 4 ] );
 
         /* Apply gain in Q15, convert back to int16 and store to output */
-        out[ 2 * k + 1 ] = (opus_int16)SKP_SAT16( SKP_RSHIFT32(
-            SKP_SMLAWB( 256, out32_1, silk_resampler_up2_hq_notch[ 3 ] ), 9 ) );
+        out[ 2 * k + 1 ] = (opus_int16)silk_SAT16( silk_RSHIFT32(
+            silk_SMLAWB( 256, out32_1, silk_resampler_up2_hq_notch[ 3 ] ), 9 ) );
     }
 }
 
diff --git a/silk/silk_resampler_private_up4.c b/silk/silk_resampler_private_up4.c
index c3904fd..c9e9dda 100644
--- a/silk/silk_resampler_private_up4.c
+++ b/silk/silk_resampler_private_up4.c
@@ -44,33 +44,33 @@
     opus_int32 in32, out32, Y, X;
     opus_int16 out16;
 
-    SKP_assert( silk_resampler_up2_lq_0 > 0 );
-    SKP_assert( silk_resampler_up2_lq_1 < 0 );
+    silk_assert( silk_resampler_up2_lq_0 > 0 );
+    silk_assert( silk_resampler_up2_lq_1 < 0 );
 
     /* Internal variables and state are in Q10 format */
     for( k = 0; k < len; k++ ) {
         /* Convert to Q10 */
-        in32 = SKP_LSHIFT( (opus_int32)in[ k ], 10 );
+        in32 = silk_LSHIFT( (opus_int32)in[ k ], 10 );
 
         /* All-pass section for even output sample */
-        Y      = SKP_SUB32( in32, S[ 0 ] );
-        X      = SKP_SMULWB( Y, silk_resampler_up2_lq_0 );
-        out32  = SKP_ADD32( S[ 0 ], X );
-        S[ 0 ] = SKP_ADD32( in32, X );
+        Y      = silk_SUB32( in32, S[ 0 ] );
+        X      = silk_SMULWB( Y, silk_resampler_up2_lq_0 );
+        out32  = silk_ADD32( S[ 0 ], X );
+        S[ 0 ] = silk_ADD32( in32, X );
 
         /* Convert back to int16 and store to output */
-        out16 = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( out32, 10 ) );
+        out16 = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( out32, 10 ) );
         out[ 4 * k ]     = out16;
         out[ 4 * k + 1 ] = out16;
 
         /* All-pass section for odd output sample */
-        Y      = SKP_SUB32( in32, S[ 1 ] );
-        X      = SKP_SMLAWB( Y, Y, silk_resampler_up2_lq_1 );
-        out32  = SKP_ADD32( S[ 1 ], X );
-        S[ 1 ] = SKP_ADD32( in32, X );
+        Y      = silk_SUB32( in32, S[ 1 ] );
+        X      = silk_SMLAWB( Y, Y, silk_resampler_up2_lq_1 );
+        out32  = silk_ADD32( S[ 1 ], X );
+        S[ 1 ] = silk_ADD32( in32, X );
 
         /* Convert back to int16 and store to output */
-        out16 = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( out32, 10 ) );
+        out16 = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( out32, 10 ) );
         out[ 4 * k + 2 ] = out16;
         out[ 4 * k + 3 ] = out16;
     }
diff --git a/silk/silk_resampler_rom.c b/silk/silk_resampler_rom.c
index 11d260b..02b79bf 100644
--- a/silk/silk_resampler_rom.c
+++ b/silk/silk_resampler_rom.c
@@ -48,43 +48,43 @@
 const opus_int16 silk_resampler_up2_hq_notch[ 4 ] = { 6554,  -3932,   6554,  30573 };
 
 /* Tables with IIR and FIR coefficients for fractional downsamplers (90 Words) */
-SKP_DWORD_ALIGN const opus_int16 silk_Resampler_3_4_COEFS[ 2 + 3 * RESAMPLER_DOWN_ORDER_FIR / 2 ] = {
+silk_DWORD_ALIGN const opus_int16 silk_Resampler_3_4_COEFS[ 2 + 3 * RESAMPLER_DOWN_ORDER_FIR / 2 ] = {
     -20253, -13986,
         86,      7,   -151,    368,   -542,    232,  11041,  21904,
         39,     90,   -181,    216,    -17,   -877,   6408,  19695,
          2,    113,   -108,      2,    314,   -977,   2665,  15787,
 };
 
-SKP_DWORD_ALIGN const opus_int16 silk_Resampler_2_3_COEFS[ 2 + 2 * RESAMPLER_DOWN_ORDER_FIR / 2 ] = {
+silk_DWORD_ALIGN const opus_int16 silk_Resampler_2_3_COEFS[ 2 + 2 * RESAMPLER_DOWN_ORDER_FIR / 2 ] = {
     -13997, -14120,
         60,   -174,     71,    298,   -800,    659,   9238,  17461,
         48,    -40,   -150,    314,   -155,   -845,   4188,  14293,
 };
 
-SKP_DWORD_ALIGN const opus_int16 silk_Resampler_1_2_COEFS[ 2 + RESAMPLER_DOWN_ORDER_FIR / 2 ] = {
+silk_DWORD_ALIGN const opus_int16 silk_Resampler_1_2_COEFS[ 2 + RESAMPLER_DOWN_ORDER_FIR / 2 ] = {
       1233, -14293,
        -91,    162,    169,   -342,   -505,   1332,   5281,   8742,
 };
 
-SKP_DWORD_ALIGN const opus_int16 silk_Resampler_3_8_COEFS[ 2 + 3 * RESAMPLER_DOWN_ORDER_FIR / 2 ] = {
+silk_DWORD_ALIGN const opus_int16 silk_Resampler_3_8_COEFS[ 2 + 3 * RESAMPLER_DOWN_ORDER_FIR / 2 ] = {
      12634, -14550,
        246,   -175,   -326,   -113,    764,   2209,   3664,   4402,
        171,      3,   -301,   -258,    391,   1693,   3227,   4272,
         88,    138,   -236,   -327,     95,   1203,   2733,   4022,
 };
 
-SKP_DWORD_ALIGN const opus_int16 silk_Resampler_1_3_COEFS[ 2 + RESAMPLER_DOWN_ORDER_FIR / 2 ] = {
+silk_DWORD_ALIGN const opus_int16 silk_Resampler_1_3_COEFS[ 2 + RESAMPLER_DOWN_ORDER_FIR / 2 ] = {
      16306, -14409,
         99,   -201,   -220,    -16,    572,   1483,   2433,   3043,
 };
 
-SKP_DWORD_ALIGN const opus_int16 silk_Resampler_2_3_COEFS_LQ[ 2 + 2 * 2 ] = {
+silk_DWORD_ALIGN const opus_int16 silk_Resampler_2_3_COEFS_LQ[ 2 + 2 * 2 ] = {
      -2797,  -6507,
       4697,  10739,
       1567,   8276,
 };
 
-SKP_DWORD_ALIGN const opus_int16 silk_Resampler_1_3_COEFS_LQ[ 2 + 3 ] = {
+silk_DWORD_ALIGN const opus_int16 silk_Resampler_1_3_COEFS_LQ[ 2 + 3 ] = {
      16777,  -9792,
        890,   1614,   2148,
 };
@@ -93,28 +93,28 @@
 /* Tables with coefficients for 4th order ARMA filter (35 Words), in a packed format:       */
 /*    { B1_Q14[1], B2_Q14[1], -A1_Q14[1], -A1_Q14[2], -A2_Q14[1], -A2_Q14[2], gain_Q16 }    */
 /* where it is assumed that B*_Q14[0], B*_Q14[2], A*_Q14[0] are all 16384                   */
-SKP_DWORD_ALIGN const opus_int16 silk_Resampler_320_441_ARMA4_COEFS[ 7 ] = {
+silk_DWORD_ALIGN const opus_int16 silk_Resampler_320_441_ARMA4_COEFS[ 7 ] = {
      31454,  24746,  -9706,  -3386, -17911, -13243,  24797
 };
 
-SKP_DWORD_ALIGN const opus_int16 silk_Resampler_240_441_ARMA4_COEFS[ 7 ] = {
+silk_DWORD_ALIGN const opus_int16 silk_Resampler_240_441_ARMA4_COEFS[ 7 ] = {
      28721,  11254,   3189,  -2546,  -1495, -12618,  11562
 };
 
-SKP_DWORD_ALIGN const opus_int16 silk_Resampler_160_441_ARMA4_COEFS[ 7 ] = {
+silk_DWORD_ALIGN const opus_int16 silk_Resampler_160_441_ARMA4_COEFS[ 7 ] = {
      23492,  -6457,  14358,  -4856,  14654, -13008,   4456
 };
 
-SKP_DWORD_ALIGN const opus_int16 silk_Resampler_120_441_ARMA4_COEFS[ 7 ] = {
+silk_DWORD_ALIGN const opus_int16 silk_Resampler_120_441_ARMA4_COEFS[ 7 ] = {
      19311, -15569,  19489,  -6950,  21441, -13559,   2370
 };
 
-SKP_DWORD_ALIGN const opus_int16 silk_Resampler_80_441_ARMA4_COEFS[ 7 ] = {
+silk_DWORD_ALIGN const opus_int16 silk_Resampler_80_441_ARMA4_COEFS[ 7 ] = {
      13248, -23849,  24126,  -9486,  26806, -14286,   1065
 };
 
 /* Table with interplation fractions of 1/288 : 2/288 : 287/288 (432 Words) */
-SKP_DWORD_ALIGN const opus_int16 silk_resampler_frac_FIR_144[ 144 ][ RESAMPLER_ORDER_FIR_144 / 2 ] = {
+silk_DWORD_ALIGN const opus_int16 silk_resampler_frac_FIR_144[ 144 ][ RESAMPLER_ORDER_FIR_144 / 2 ] = {
     {  -25,    58, 32526},
     {   -8,   -69, 32461},
     {    8,  -195, 32393},
diff --git a/silk/silk_resampler_up2.c b/silk/silk_resampler_up2.c
index 82e2eb0..5cb6973 100644
--- a/silk/silk_resampler_up2.c
+++ b/silk/silk_resampler_up2.c
@@ -43,29 +43,29 @@
     opus_int32 k;
     opus_int32 in32, out32, Y, X;
 
-    SKP_assert( silk_resampler_up2_lq_0 > 0 );
-    SKP_assert( silk_resampler_up2_lq_1 < 0 );
+    silk_assert( silk_resampler_up2_lq_0 > 0 );
+    silk_assert( silk_resampler_up2_lq_1 < 0 );
     /* Internal variables and state are in Q10 format */
     for( k = 0; k < len; k++ ) {
         /* Convert to Q10 */
-        in32 = SKP_LSHIFT( (opus_int32)in[ k ], 10 );
+        in32 = silk_LSHIFT( (opus_int32)in[ k ], 10 );
 
         /* All-pass section for even output sample */
-        Y      = SKP_SUB32( in32, S[ 0 ] );
-        X      = SKP_SMULWB( Y, silk_resampler_up2_lq_0 );
-        out32  = SKP_ADD32( S[ 0 ], X );
-        S[ 0 ] = SKP_ADD32( in32, X );
+        Y      = silk_SUB32( in32, S[ 0 ] );
+        X      = silk_SMULWB( Y, silk_resampler_up2_lq_0 );
+        out32  = silk_ADD32( S[ 0 ], X );
+        S[ 0 ] = silk_ADD32( in32, X );
 
         /* Convert back to int16 and store to output */
-        out[ 2 * k ] = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( out32, 10 ) );
+        out[ 2 * k ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( out32, 10 ) );
 
         /* All-pass section for odd output sample */
-        Y      = SKP_SUB32( in32, S[ 1 ] );
-        X      = SKP_SMLAWB( Y, Y, silk_resampler_up2_lq_1 );
-        out32  = SKP_ADD32( S[ 1 ], X );
-        S[ 1 ] = SKP_ADD32( in32, X );
+        Y      = silk_SUB32( in32, S[ 1 ] );
+        X      = silk_SMLAWB( Y, Y, silk_resampler_up2_lq_1 );
+        out32  = silk_ADD32( S[ 1 ], X );
+        S[ 1 ] = silk_ADD32( in32, X );
 
         /* Convert back to int16 and store to output */
-        out[ 2 * k + 1 ] = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( out32, 10 ) );
+        out[ 2 * k + 1 ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( out32, 10 ) );
     }
 }
diff --git a/silk/silk_scale_copy_vector16.c b/silk/silk_scale_copy_vector16.c
index ce17979..6eff9ab 100644
--- a/silk/silk_scale_copy_vector16.c
+++ b/silk/silk_scale_copy_vector16.c
@@ -43,7 +43,7 @@
     opus_int32 tmp32;
 
     for( i = 0; i < dataSize; i++ ) {
-        tmp32 = SKP_SMULWB( gain_Q16, data_in[ i ] );
-        data_out[ i ] = (opus_int16)SKP_CHECK_FIT16( tmp32 );
+        tmp32 = silk_SMULWB( gain_Q16, data_in[ i ] );
+        data_out[ i ] = (opus_int16)silk_CHECK_FIT16( tmp32 );
     }
 }
diff --git a/silk/silk_scale_vector.c b/silk/silk_scale_vector.c
index 9794cd9..2596e71 100644
--- a/silk/silk_scale_vector.c
+++ b/silk/silk_scale_vector.c
@@ -41,6 +41,6 @@
     opus_int  i;
 
     for( i = 0; i < dataSize; i++ ) {
-        data1[ i ] = (opus_int32)SKP_CHECK_FIT32( SKP_RSHIFT64( SKP_SMULL( data1[ i ], gain_Q26 ), 8 ) );/* OUTPUT: Q18*/
+        data1[ i ] = (opus_int32)silk_CHECK_FIT32( silk_RSHIFT64( silk_SMULL( data1[ i ], gain_Q26 ), 8 ) );/* OUTPUT: Q18*/
     }
 }
diff --git a/silk/silk_schur.c b/silk/silk_schur.c
index 39e680a..ad4d3ef 100644
--- a/silk/silk_schur.c
+++ b/silk/silk_schur.c
@@ -50,13 +50,13 @@
     if( lz < 2 ) {
         /* lz must be 1, so shift one to the right */
         for( k = 0; k < order + 1; k++ ) {
-            C[ k ][ 0 ] = C[ k ][ 1 ] = SKP_RSHIFT( c[ k ], 1 );
+            C[ k ][ 0 ] = C[ k ][ 1 ] = silk_RSHIFT( c[ k ], 1 );
         }
     } else if( lz > 2 ) {
         /* Shift to the left */
         lz -= 2;
         for( k = 0; k < order + 1; k++ ) {
-            C[ k ][ 0 ] = C[ k ][ 1 ] = SKP_LSHIFT( c[ k ], lz );
+            C[ k ][ 0 ] = C[ k ][ 1 ] = silk_LSHIFT( c[ k ], lz );
         }
     } else {
         /* No need to shift */
@@ -68,10 +68,10 @@
     for( k = 0; k < order; k++ ) {
 
         /* Get reflection coefficient */
-        rc_tmp_Q15 = -SKP_DIV32_16( C[ k + 1 ][ 0 ], SKP_max_32( SKP_RSHIFT( C[ 0 ][ 1 ], 15 ), 1 ) );
+        rc_tmp_Q15 = -silk_DIV32_16( C[ k + 1 ][ 0 ], silk_max_32( silk_RSHIFT( C[ 0 ][ 1 ], 15 ), 1 ) );
 
         /* Clip (shouldn't happen for properly conditioned inputs) */
-        rc_tmp_Q15 = SKP_SAT16( rc_tmp_Q15 );
+        rc_tmp_Q15 = silk_SAT16( rc_tmp_Q15 );
 
         /* Store */
         rc_Q15[ k ] = ( opus_int16 )rc_tmp_Q15;
@@ -80,8 +80,8 @@
         for( n = 0; n < order - k; n++ ) {
             Ctmp1 = C[ n + k + 1 ][ 0 ];
             Ctmp2 = C[ n ][ 1 ];
-            C[ n + k + 1 ][ 0 ] = SKP_SMLAWB( Ctmp1, SKP_LSHIFT( Ctmp2, 1 ), rc_tmp_Q15 );
-            C[ n ][ 1 ]         = SKP_SMLAWB( Ctmp2, SKP_LSHIFT( Ctmp1, 1 ), rc_tmp_Q15 );
+            C[ n + k + 1 ][ 0 ] = silk_SMLAWB( Ctmp1, silk_LSHIFT( Ctmp2, 1 ), rc_tmp_Q15 );
+            C[ n ][ 1 ]         = silk_SMLAWB( Ctmp2, silk_LSHIFT( Ctmp1, 1 ), rc_tmp_Q15 );
         }
     }
 
diff --git a/silk/silk_schur64.c b/silk/silk_schur64.c
index 90d582b..e8be5b7 100644
--- a/silk/silk_schur64.c
+++ b/silk/silk_schur64.c
@@ -45,7 +45,7 @@
 
     /* Check for invalid input */
     if( c[ 0 ] <= 0 ) {
-        SKP_memset( rc_Q16, 0, order * sizeof( opus_int32 ) );
+        silk_memset( rc_Q16, 0, order * sizeof( opus_int32 ) );
         return 0;
     }
 
@@ -58,7 +58,7 @@
         rc_tmp_Q31 = silk_DIV32_varQ( -C[ k + 1 ][ 0 ], C[ 0 ][ 1 ], 31 );
 
         /* Save the output */
-        rc_Q16[ k ] = SKP_RSHIFT_ROUND( rc_tmp_Q31, 15 );
+        rc_Q16[ k ] = silk_RSHIFT_ROUND( rc_tmp_Q31, 15 );
 
         /* Update correlations */
         for( n = 0; n < order - k; n++ ) {
@@ -66,8 +66,8 @@
             Ctmp2_Q30 = C[ n ][ 1 ];
 
             /* Multiply and add the highest int32 */
-            C[ n + k + 1 ][ 0 ] = Ctmp1_Q30 + SKP_SMMUL( SKP_LSHIFT( Ctmp2_Q30, 1 ), rc_tmp_Q31 );
-            C[ n ][ 1 ]         = Ctmp2_Q30 + SKP_SMMUL( SKP_LSHIFT( Ctmp1_Q30, 1 ), rc_tmp_Q31 );
+            C[ n + k + 1 ][ 0 ] = Ctmp1_Q30 + silk_SMMUL( silk_LSHIFT( Ctmp2_Q30, 1 ), rc_tmp_Q31 );
+            C[ n ][ 1 ]         = Ctmp2_Q30 + silk_SMMUL( silk_LSHIFT( Ctmp1_Q30, 1 ), rc_tmp_Q31 );
         }
     }
 
diff --git a/silk/silk_shell_coder.c b/silk/silk_shell_coder.c
index 918e817..c96131a 100644
--- a/silk/silk_shell_coder.c
+++ b/silk/silk_shell_coder.c
@@ -83,7 +83,7 @@
     opus_int pulses1[ 8 ], pulses2[ 4 ], pulses3[ 2 ], pulses4[ 1 ];
 
     /* this function operates on one shell code frame of 16 pulses */
-    SKP_assert( SHELL_CODEC_FRAME_LENGTH == 16 );
+    silk_assert( SHELL_CODEC_FRAME_LENGTH == 16 );
 
     /* tree representation per pulse-subframe */
     combine_pulses( pulses1, pulses0, 8 );
@@ -125,7 +125,7 @@
     opus_int pulses3[ 2 ], pulses2[ 4 ], pulses1[ 8 ];
 
     /* this function operates on one shell code frame of 16 pulses */
-    SKP_assert( SHELL_CODEC_FRAME_LENGTH == 16 );
+    silk_assert( SHELL_CODEC_FRAME_LENGTH == 16 );
 
     decode_split( &pulses3[  0 ], &pulses3[  1 ], psRangeDec, pulses4,      silk_shell_code_table3 );
 
diff --git a/silk/silk_sigm_Q15.c b/silk/silk_sigm_Q15.c
index b2d8510..d4979cb 100644
--- a/silk/silk_sigm_Q15.c
+++ b/silk/silk_sigm_Q15.c
@@ -57,8 +57,8 @@
             return 0;        /* Clip */
         } else {
             /* Linear interpolation of look up table */
-            ind = SKP_RSHIFT( in_Q5, 5 );
-            return( sigm_LUT_neg_Q15[ ind ] - SKP_SMULBB( sigm_LUT_slope_Q10[ ind ], in_Q5 & 0x1F ) );
+            ind = silk_RSHIFT( in_Q5, 5 );
+            return( sigm_LUT_neg_Q15[ ind ] - silk_SMULBB( sigm_LUT_slope_Q10[ ind ], in_Q5 & 0x1F ) );
         }
     } else {
         /* Positive input */
@@ -66,8 +66,8 @@
             return 32767;        /* clip */
         } else {
             /* Linear interpolation of look up table */
-            ind = SKP_RSHIFT( in_Q5, 5 );
-            return( sigm_LUT_pos_Q15[ ind ] + SKP_SMULBB( sigm_LUT_slope_Q10[ ind ], in_Q5 & 0x1F ) );
+            ind = silk_RSHIFT( in_Q5, 5 );
+            return( sigm_LUT_pos_Q15[ ind ] + silk_SMULBB( sigm_LUT_slope_Q10[ ind ], in_Q5 & 0x1F ) );
         }
     }
 }
diff --git a/silk/silk_sort.c b/silk/silk_sort.c
index 31a564d..2c539ca 100644
--- a/silk/silk_sort.c
+++ b/silk/silk_sort.c
@@ -48,9 +48,9 @@
     opus_int        i, j;
 
     /* Safety checks */
-    SKP_assert( K >  0 );
-    SKP_assert( L >  0 );
-    SKP_assert( L >= K );
+    silk_assert( K >  0 );
+    silk_assert( L >  0 );
+    silk_assert( L >= K );
 
     /* Write start indices in index vector */
     for( i = 0; i < K; i++ ) {
@@ -94,9 +94,9 @@
     opus_int value;
 
     /* Safety checks */
-    SKP_assert( K >  0 );
-    SKP_assert( L >  0 );
-    SKP_assert( L >= K );
+    silk_assert( K >  0 );
+    silk_assert( L >  0 );
+    silk_assert( L >= K );
 
     /* Write start indices in index vector */
     for( i = 0; i < K; i++ ) {
@@ -138,7 +138,7 @@
     opus_int    i, j;
 
     /* Safety checks */
-    SKP_assert( L >  0 );
+    silk_assert( L >  0 );
 
     /* Sort vector elements by value, increasing order */
     for( i = 1; i < L; i++ ) {
diff --git a/silk/silk_stereo_LR_to_MS.c b/silk/silk_stereo_LR_to_MS.c
index f9f59eb..1dc7434 100644
--- a/silk/silk_stereo_LR_to_MS.c
+++ b/silk/silk_stereo_LR_to_MS.c
@@ -57,26 +57,26 @@
     for( n = 0; n < frame_length + 2; n++ ) {
         sum  = x1[ n - 2 ] + (opus_int32)x2[ n - 2 ];
         diff = x1[ n - 2 ] - (opus_int32)x2[ n - 2 ];
-        mid[  n ] = (opus_int16)SKP_RSHIFT_ROUND( sum, 1 );
-        side[ n ] = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( diff, 1 ) );
+        mid[  n ] = (opus_int16)silk_RSHIFT_ROUND( sum, 1 );
+        side[ n ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( diff, 1 ) );
     }
 
     /* Buffering */
-    SKP_memcpy( mid,  state->sMid,  2 * sizeof( opus_int16 ) );
-    SKP_memcpy( side, state->sSide, 2 * sizeof( opus_int16 ) );
-    SKP_memcpy( state->sMid,  &mid[  frame_length ], 2 * sizeof( opus_int16 ) );
-    SKP_memcpy( state->sSide, &side[ frame_length ], 2 * sizeof( opus_int16 ) );
+    silk_memcpy( mid,  state->sMid,  2 * sizeof( opus_int16 ) );
+    silk_memcpy( side, state->sSide, 2 * sizeof( opus_int16 ) );
+    silk_memcpy( state->sMid,  &mid[  frame_length ], 2 * sizeof( opus_int16 ) );
+    silk_memcpy( state->sSide, &side[ frame_length ], 2 * sizeof( opus_int16 ) );
 
     /* LP and HP filter mid signal */
     for( n = 0; n < frame_length; n++ ) {
-        sum = SKP_RSHIFT_ROUND( SKP_ADD_LSHIFT( mid[ n ] + mid[ n + 2 ], mid[ n + 1 ], 1 ), 2 );
+        sum = silk_RSHIFT_ROUND( silk_ADD_LSHIFT( mid[ n ] + mid[ n + 2 ], mid[ n + 1 ], 1 ), 2 );
         LP_mid[ n ] = sum;
         HP_mid[ n ] = mid[ n + 1 ] - sum;
     }
 
     /* LP and HP filter side signal */
     for( n = 0; n < frame_length; n++ ) {
-        sum = SKP_RSHIFT_ROUND( SKP_ADD_LSHIFT( side[ n ] + side[ n + 2 ], side[ n + 1 ], 1 ), 2 );
+        sum = silk_RSHIFT_ROUND( silk_ADD_LSHIFT( side[ n ] + side[ n + 2 ], side[ n + 1 ], 1 ), 2 );
         LP_side[ n ] = sum;
         HP_side[ n ] = side[ n + 1 ] - sum;
     }
@@ -86,46 +86,46 @@
     smooth_coef_Q16 = is10msFrame ?
         SILK_FIX_CONST( STEREO_RATIO_SMOOTH_COEF / 2, 16 ) :
         SILK_FIX_CONST( STEREO_RATIO_SMOOTH_COEF,     16 );
-    smooth_coef_Q16 = SKP_SMULWB( SKP_SMULBB( prev_speech_act_Q8 , prev_speech_act_Q8 ), smooth_coef_Q16 );
+    smooth_coef_Q16 = silk_SMULWB( silk_SMULBB( prev_speech_act_Q8 , prev_speech_act_Q8 ), smooth_coef_Q16 );
 
     pred_Q13[ 0 ] = silk_stereo_find_predictor( &LP_ratio_Q14, LP_mid, LP_side, &state->mid_side_amp_Q0[ 0 ], frame_length, smooth_coef_Q16 );
     pred_Q13[ 1 ] = silk_stereo_find_predictor( &HP_ratio_Q14, HP_mid, HP_side, &state->mid_side_amp_Q0[ 2 ], frame_length, smooth_coef_Q16 );
     /* Ratio of the norms of residual and mid signals */
-    frac_Q16 = SKP_SMLABB( HP_ratio_Q14, LP_ratio_Q14, 3 );
-    frac_Q16 = SKP_min( frac_Q16, SILK_FIX_CONST( 1, 16 ) );
+    frac_Q16 = silk_SMLABB( HP_ratio_Q14, LP_ratio_Q14, 3 );
+    frac_Q16 = silk_min( frac_Q16, SILK_FIX_CONST( 1, 16 ) );
 
     /* Determine bitrate distribution between mid and side, and possibly reduce stereo width */
     total_rate_bps -= is10msFrame ? 1200 : 600;      /* Subtract approximate bitrate for coding stereo parameters */
     if (total_rate_bps < 1)
         total_rate_bps = 1;
-    min_mid_rate_bps = SKP_SMLABB( 2000, fs_kHz, 900 );
-    SKP_assert( min_mid_rate_bps < 32767 );
+    min_mid_rate_bps = silk_SMLABB( 2000, fs_kHz, 900 );
+    silk_assert( min_mid_rate_bps < 32767 );
     /* Default bitrate distribution: 8 parts for Mid and (5+3*frac) parts for Side. so: mid_rate = ( 8 / ( 13 + 3 * frac ) ) * total_ rate */
-    frac_3_Q16 = SKP_MUL( 3, frac_Q16 );
+    frac_3_Q16 = silk_MUL( 3, frac_Q16 );
     mid_side_rates_bps[ 0 ] = silk_DIV32_varQ( total_rate_bps, SILK_FIX_CONST( 8 + 5, 16 ) + frac_3_Q16, 16+3 );
     /* If Mid bitrate below minimum, reduce stereo width */
     if( mid_side_rates_bps[ 0 ] < min_mid_rate_bps ) {
         mid_side_rates_bps[ 0 ] = min_mid_rate_bps;
         mid_side_rates_bps[ 1 ] = total_rate_bps - mid_side_rates_bps[ 0 ];
         /* width = 4 * ( 2 * side_rate - min_rate ) / ( ( 1 + 3 * frac ) * min_rate ) */
-        width_Q14 = silk_DIV32_varQ( SKP_LSHIFT( mid_side_rates_bps[ 1 ], 1 ) - min_mid_rate_bps,
-            SKP_SMULWB( SILK_FIX_CONST( 1, 16 ) + frac_3_Q16, min_mid_rate_bps ), 14+2 );
-        width_Q14 = SKP_LIMIT( width_Q14, 0, SILK_FIX_CONST( 1, 14 ) );
+        width_Q14 = silk_DIV32_varQ( silk_LSHIFT( mid_side_rates_bps[ 1 ], 1 ) - min_mid_rate_bps,
+            silk_SMULWB( SILK_FIX_CONST( 1, 16 ) + frac_3_Q16, min_mid_rate_bps ), 14+2 );
+        width_Q14 = silk_LIMIT( width_Q14, 0, SILK_FIX_CONST( 1, 14 ) );
     } else {
         mid_side_rates_bps[ 1 ] = total_rate_bps - mid_side_rates_bps[ 0 ];
         width_Q14 = SILK_FIX_CONST( 1, 14 );
     }
 
     /* Smoother */
-    state->smth_width_Q14 = (opus_int16)SKP_SMLAWB( state->smth_width_Q14, width_Q14 - state->smth_width_Q14, smooth_coef_Q16 );
+    state->smth_width_Q14 = (opus_int16)silk_SMLAWB( state->smth_width_Q14, width_Q14 - state->smth_width_Q14, smooth_coef_Q16 );
 
     /* Reduce predictors */
-    pred_Q13[ 0 ] = SKP_RSHIFT( SKP_SMULBB( state->smth_width_Q14, pred_Q13[ 0 ] ), 14 );
-    pred_Q13[ 1 ] = SKP_RSHIFT( SKP_SMULBB( state->smth_width_Q14, pred_Q13[ 1 ] ), 14 );
+    pred_Q13[ 0 ] = silk_RSHIFT( silk_SMULBB( state->smth_width_Q14, pred_Q13[ 0 ] ), 14 );
+    pred_Q13[ 1 ] = silk_RSHIFT( silk_SMULBB( state->smth_width_Q14, pred_Q13[ 1 ] ), 14 );
 
     *mid_only_flag = 0;
     if( state->width_prev_Q14 == 0 &&
-        ( 8 * total_rate_bps < 13 * min_mid_rate_bps || SKP_SMULWB( frac_Q16, state->smth_width_Q14 ) < SILK_FIX_CONST( 0.05, 14 ) ) )
+        ( 8 * total_rate_bps < 13 * min_mid_rate_bps || silk_SMULWB( frac_Q16, state->smth_width_Q14 ) < SILK_FIX_CONST( 0.05, 14 ) ) )
     {
         width_Q14 = 0;
         /* Only encode mid channel */
@@ -133,7 +133,7 @@
         mid_side_rates_bps[ 1 ] = 0;
         *mid_only_flag = 1;
     } else if( state->width_prev_Q14 != 0 &&
-        ( 8 * total_rate_bps < 11 * min_mid_rate_bps || SKP_SMULWB( frac_Q16, state->smth_width_Q14 ) < SILK_FIX_CONST( 0.02, 14 ) ) )
+        ( 8 * total_rate_bps < 11 * min_mid_rate_bps || silk_SMULWB( frac_Q16, state->smth_width_Q14 ) < SILK_FIX_CONST( 0.02, 14 ) ) )
     {
         width_Q14 = 0;
     } else if( state->smth_width_Q14 > SILK_FIX_CONST( 0.95, 14 ) ) {
@@ -155,28 +155,28 @@
     /* Interpolate predictors and subtract prediction from side channel */
     pred0_Q13  = -state->pred_prev_Q13[ 0 ];
     pred1_Q13  = -state->pred_prev_Q13[ 1 ];
-    w_Q24      =  SKP_LSHIFT( state->width_prev_Q14, 10 );
-    denom_Q16  = SKP_DIV32_16( 1 << 16, STEREO_INTERP_LEN_MS * fs_kHz );
-    delta0_Q13 = -SKP_RSHIFT_ROUND( SKP_SMULBB( pred_Q13[ 0 ] - state->pred_prev_Q13[ 0 ], denom_Q16 ), 16 );
-    delta1_Q13 = -SKP_RSHIFT_ROUND( SKP_SMULBB( pred_Q13[ 1 ] - state->pred_prev_Q13[ 1 ], denom_Q16 ), 16 );
-    deltaw_Q24 =  SKP_LSHIFT( SKP_SMULWB( width_Q14 - state->width_prev_Q14, denom_Q16 ), 10 );
+    w_Q24      =  silk_LSHIFT( state->width_prev_Q14, 10 );
+    denom_Q16  = silk_DIV32_16( 1 << 16, STEREO_INTERP_LEN_MS * fs_kHz );
+    delta0_Q13 = -silk_RSHIFT_ROUND( silk_SMULBB( pred_Q13[ 0 ] - state->pred_prev_Q13[ 0 ], denom_Q16 ), 16 );
+    delta1_Q13 = -silk_RSHIFT_ROUND( silk_SMULBB( pred_Q13[ 1 ] - state->pred_prev_Q13[ 1 ], denom_Q16 ), 16 );
+    deltaw_Q24 =  silk_LSHIFT( silk_SMULWB( width_Q14 - state->width_prev_Q14, denom_Q16 ), 10 );
     for( n = 0; n < STEREO_INTERP_LEN_MS * fs_kHz; n++ ) {
         pred0_Q13 += delta0_Q13;
         pred1_Q13 += delta1_Q13;
         w_Q24   += deltaw_Q24;
-        sum = SKP_LSHIFT( SKP_ADD_LSHIFT( mid[ n ] + mid[ n + 2 ], mid[ n + 1 ], 1 ), 9 );      /* Q11 */
-        sum = SKP_SMLAWB( SKP_SMULWB( w_Q24, side[ n + 1 ] ), sum, pred0_Q13 );                 /* Q8  */
-        sum = SKP_SMLAWB( sum, SKP_LSHIFT( ( opus_int32 )mid[ n + 1 ], 11 ), pred1_Q13 );        /* Q8  */
-        x2[ n - 1 ] = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( sum, 8 ) );
+        sum = silk_LSHIFT( silk_ADD_LSHIFT( mid[ n ] + mid[ n + 2 ], mid[ n + 1 ], 1 ), 9 );      /* Q11 */
+        sum = silk_SMLAWB( silk_SMULWB( w_Q24, side[ n + 1 ] ), sum, pred0_Q13 );                 /* Q8  */
+        sum = silk_SMLAWB( sum, silk_LSHIFT( ( opus_int32 )mid[ n + 1 ], 11 ), pred1_Q13 );        /* Q8  */
+        x2[ n - 1 ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( sum, 8 ) );
     }
     pred0_Q13 = -pred_Q13[ 0 ];
     pred1_Q13 = -pred_Q13[ 1 ];
-    w_Q24     =  SKP_LSHIFT( width_Q14, 10 );
+    w_Q24     =  silk_LSHIFT( width_Q14, 10 );
     for( n = STEREO_INTERP_LEN_MS * fs_kHz; n < frame_length; n++ ) {
-        sum = SKP_LSHIFT( SKP_ADD_LSHIFT( mid[ n ] + mid[ n + 2 ], mid[ n + 1 ], 1 ), 9 );      /* Q11 */
-        sum = SKP_SMLAWB( SKP_SMULWB( w_Q24, side[ n + 1 ] ), sum, pred0_Q13 );                 /* Q8  */
-        sum = SKP_SMLAWB( sum, SKP_LSHIFT( ( opus_int32 )mid[ n + 1 ], 11 ), pred1_Q13 );        /* Q8  */
-        x2[ n - 1 ] = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( sum, 8 ) );
+        sum = silk_LSHIFT( silk_ADD_LSHIFT( mid[ n ] + mid[ n + 2 ], mid[ n + 1 ], 1 ), 9 );      /* Q11 */
+        sum = silk_SMLAWB( silk_SMULWB( w_Q24, side[ n + 1 ] ), sum, pred0_Q13 );                 /* Q8  */
+        sum = silk_SMLAWB( sum, silk_LSHIFT( ( opus_int32 )mid[ n + 1 ], 11 ), pred1_Q13 );        /* Q8  */
+        x2[ n - 1 ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( sum, 8 ) );
     }
     state->pred_prev_Q13[ 0 ] = (opus_int16)pred_Q13[ 0 ];
     state->pred_prev_Q13[ 1 ] = (opus_int16)pred_Q13[ 1 ];
diff --git a/silk/silk_stereo_MS_to_LR.c b/silk/silk_stereo_MS_to_LR.c
index 96097b3..133755a 100644
--- a/silk/silk_stereo_MS_to_LR.c
+++ b/silk/silk_stereo_MS_to_LR.c
@@ -45,32 +45,32 @@
     opus_int32 sum, diff, pred0_Q13, pred1_Q13;
 
     /* Buffering */
-    SKP_memcpy( x1, state->sMid,  2 * sizeof( opus_int16 ) );
-    SKP_memcpy( x2, state->sSide, 2 * sizeof( opus_int16 ) );
-    SKP_memcpy( state->sMid,  &x1[ frame_length ], 2 * sizeof( opus_int16 ) );
-    SKP_memcpy( state->sSide, &x2[ frame_length ], 2 * sizeof( opus_int16 ) );
+    silk_memcpy( x1, state->sMid,  2 * sizeof( opus_int16 ) );
+    silk_memcpy( x2, state->sSide, 2 * sizeof( opus_int16 ) );
+    silk_memcpy( state->sMid,  &x1[ frame_length ], 2 * sizeof( opus_int16 ) );
+    silk_memcpy( state->sSide, &x2[ frame_length ], 2 * sizeof( opus_int16 ) );
 
     /* Interpolate predictors and add prediction to side channel */
     pred0_Q13  = state->pred_prev_Q13[ 0 ];
     pred1_Q13  = state->pred_prev_Q13[ 1 ];
-    denom_Q16  = SKP_DIV32_16( 1 << 16, STEREO_INTERP_LEN_MS * fs_kHz );
-    delta0_Q13 = SKP_RSHIFT_ROUND( SKP_SMULBB( pred_Q13[ 0 ] - state->pred_prev_Q13[ 0 ], denom_Q16 ), 16 );
-    delta1_Q13 = SKP_RSHIFT_ROUND( SKP_SMULBB( pred_Q13[ 1 ] - state->pred_prev_Q13[ 1 ], denom_Q16 ), 16 );
+    denom_Q16  = silk_DIV32_16( 1 << 16, STEREO_INTERP_LEN_MS * fs_kHz );
+    delta0_Q13 = silk_RSHIFT_ROUND( silk_SMULBB( pred_Q13[ 0 ] - state->pred_prev_Q13[ 0 ], denom_Q16 ), 16 );
+    delta1_Q13 = silk_RSHIFT_ROUND( silk_SMULBB( pred_Q13[ 1 ] - state->pred_prev_Q13[ 1 ], denom_Q16 ), 16 );
     for( n = 0; n < STEREO_INTERP_LEN_MS * fs_kHz; n++ ) {
         pred0_Q13 += delta0_Q13;
         pred1_Q13 += delta1_Q13;
-        sum = SKP_LSHIFT( SKP_ADD_LSHIFT( x1[ n ] + x1[ n + 2 ], x1[ n + 1 ], 1 ), 9 );         /* Q11 */
-        sum = SKP_SMLAWB( SKP_LSHIFT( ( opus_int32 )x2[ n + 1 ], 8 ), sum, pred0_Q13 );          /* Q8  */
-        sum = SKP_SMLAWB( sum, SKP_LSHIFT( ( opus_int32 )x1[ n + 1 ], 11 ), pred1_Q13 );         /* Q8  */
-        x2[ n + 1 ] = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( sum, 8 ) );
+        sum = silk_LSHIFT( silk_ADD_LSHIFT( x1[ n ] + x1[ n + 2 ], x1[ n + 1 ], 1 ), 9 );         /* Q11 */
+        sum = silk_SMLAWB( silk_LSHIFT( ( opus_int32 )x2[ n + 1 ], 8 ), sum, pred0_Q13 );          /* Q8  */
+        sum = silk_SMLAWB( sum, silk_LSHIFT( ( opus_int32 )x1[ n + 1 ], 11 ), pred1_Q13 );         /* Q8  */
+        x2[ n + 1 ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( sum, 8 ) );
     }
     pred0_Q13 = pred_Q13[ 0 ];
     pred1_Q13 = pred_Q13[ 1 ];
     for( n = STEREO_INTERP_LEN_MS * fs_kHz; n < frame_length; n++ ) {
-        sum = SKP_LSHIFT( SKP_ADD_LSHIFT( x1[ n ] + x1[ n + 2 ], x1[ n + 1 ], 1 ), 9 );         /* Q11 */
-        sum = SKP_SMLAWB( SKP_LSHIFT( ( opus_int32 )x2[ n + 1 ], 8 ), sum, pred0_Q13 );          /* Q8  */
-        sum = SKP_SMLAWB( sum, SKP_LSHIFT( ( opus_int32 )x1[ n + 1 ], 11 ), pred1_Q13 );         /* Q8  */
-        x2[ n + 1 ] = (opus_int16)SKP_SAT16( SKP_RSHIFT_ROUND( sum, 8 ) );
+        sum = silk_LSHIFT( silk_ADD_LSHIFT( x1[ n ] + x1[ n + 2 ], x1[ n + 1 ], 1 ), 9 );         /* Q11 */
+        sum = silk_SMLAWB( silk_LSHIFT( ( opus_int32 )x2[ n + 1 ], 8 ), sum, pred0_Q13 );          /* Q8  */
+        sum = silk_SMLAWB( sum, silk_LSHIFT( ( opus_int32 )x1[ n + 1 ], 11 ), pred1_Q13 );         /* Q8  */
+        x2[ n + 1 ] = (opus_int16)silk_SAT16( silk_RSHIFT_ROUND( sum, 8 ) );
     }
     state->pred_prev_Q13[ 0 ] = pred_Q13[ 0 ];
     state->pred_prev_Q13[ 1 ] = pred_Q13[ 1 ];
@@ -79,7 +79,7 @@
     for( n = 0; n < frame_length; n++ ) {
         sum  = x1[ n + 1 ] + (opus_int32)x2[ n + 1 ];
         diff = x1[ n + 1 ] - (opus_int32)x2[ n + 1 ];
-        x1[ n + 1 ] = (opus_int16)SKP_SAT16( sum );
-        x2[ n + 1 ] = (opus_int16)SKP_SAT16( diff );
+        x1[ n + 1 ] = (opus_int16)silk_SAT16( sum );
+        x2[ n + 1 ] = (opus_int16)silk_SAT16( diff );
     }
 }
diff --git a/silk/silk_stereo_decode_pred.c b/silk/silk_stereo_decode_pred.c
index 027a236..e9bd571 100644
--- a/silk/silk_stereo_decode_pred.c
+++ b/silk/silk_stereo_decode_pred.c
@@ -42,7 +42,7 @@
 
     /* Entropy decoding */
     n = ec_dec_icdf( psRangeDec, silk_stereo_pred_joint_iCDF, 8 );
-    ix[ 0 ][ 2 ] = SKP_DIV32_16( n, 5 );
+    ix[ 0 ][ 2 ] = silk_DIV32_16( n, 5 );
     ix[ 1 ][ 2 ] = n - 5 * ix[ 0 ][ 2 ];
     for( n = 0; n < 2; n++ ) {
         ix[ n ][ 0 ] = ec_dec_icdf( psRangeDec, silk_uniform3_iCDF, 8 );
@@ -53,9 +53,9 @@
     for( n = 0; n < 2; n++ ) {
         ix[ n ][ 0 ] += 3 * ix[ n ][ 2 ];
         low_Q13 = silk_stereo_pred_quant_Q13[ ix[ n ][ 0 ] ];
-        step_Q13 = SKP_SMULWB( silk_stereo_pred_quant_Q13[ ix[ n ][ 0 ] + 1 ] - low_Q13,
+        step_Q13 = silk_SMULWB( silk_stereo_pred_quant_Q13[ ix[ n ][ 0 ] + 1 ] - low_Q13,
             SILK_FIX_CONST( 0.5 / STEREO_QUANT_SUB_STEPS, 16 ) );
-        pred_Q13[ n ] = SKP_SMLABB( low_Q13, step_Q13, 2 * ix[ n ][ 1 ] + 1 );
+        pred_Q13[ n ] = silk_SMLABB( low_Q13, step_Q13, 2 * ix[ n ][ 1 ] + 1 );
     }
 
     /* Subtract second from first predictor (helps when actually applying these) */
diff --git a/silk/silk_stereo_encode_pred.c b/silk/silk_stereo_encode_pred.c
index a2ee8f3..ad83ab0 100644
--- a/silk/silk_stereo_encode_pred.c
+++ b/silk/silk_stereo_encode_pred.c
@@ -41,11 +41,11 @@
 
     /* Entropy coding */
     n = 5 * ix[ 0 ][ 2 ] + ix[ 1 ][ 2 ];
-    SKP_assert( n < 25 );
+    silk_assert( n < 25 );
     ec_enc_icdf( psRangeEnc, n, silk_stereo_pred_joint_iCDF, 8 );
     for( n = 0; n < 2; n++ ) {
-        SKP_assert( ix[ n ][ 0 ] < 3 );
-        SKP_assert( ix[ n ][ 1 ] < STEREO_QUANT_SUB_STEPS );
+        silk_assert( ix[ n ][ 0 ] < 3 );
+        silk_assert( ix[ n ][ 1 ] < STEREO_QUANT_SUB_STEPS );
         ec_enc_icdf( psRangeEnc, ix[ n ][ 0 ], silk_uniform3_iCDF, 8 );
         ec_enc_icdf( psRangeEnc, ix[ n ][ 1 ], silk_uniform5_iCDF, 8 );
     }
diff --git a/silk/silk_stereo_find_predictor.c b/silk/silk_stereo_find_predictor.c
index 20b29ff..d491956 100644
--- a/silk/silk_stereo_find_predictor.c
+++ b/silk/silk_stereo_find_predictor.c
@@ -47,27 +47,27 @@
     /* Find  predictor */
     silk_sum_sqr_shift( &nrgx, &scale1, x, length );
     silk_sum_sqr_shift( &nrgy, &scale2, y, length );
-    scale = SKP_max( scale1, scale2 );
+    scale = silk_max( scale1, scale2 );
     scale = scale + ( scale & 1 );          /* make even */
-    nrgy = SKP_RSHIFT32( nrgy, scale - scale2 );
-    nrgx = SKP_RSHIFT32( nrgx, scale - scale1 );
-    nrgx = SKP_max( nrgx, 1 );
+    nrgy = silk_RSHIFT32( nrgy, scale - scale2 );
+    nrgx = silk_RSHIFT32( nrgx, scale - scale1 );
+    nrgx = silk_max( nrgx, 1 );
     corr = silk_inner_prod_aligned_scale( x, y, scale, length );
     pred_Q13 = silk_DIV32_varQ( corr, nrgx, 13 );
-    pred_Q13 = SKP_SAT16( pred_Q13 );
+    pred_Q13 = silk_SAT16( pred_Q13 );
 
     /* Smoothed mid and residual norms */
-    SKP_assert( smooth_coef_Q16 < 32768 );
-    scale = SKP_RSHIFT( scale, 1 );
-    mid_res_amp_Q0[ 0 ] = SKP_SMLAWB( mid_res_amp_Q0[ 0 ], SKP_LSHIFT( silk_SQRT_APPROX( nrgx ), scale ) - mid_res_amp_Q0[ 0 ],
+    silk_assert( smooth_coef_Q16 < 32768 );
+    scale = silk_RSHIFT( scale, 1 );
+    mid_res_amp_Q0[ 0 ] = silk_SMLAWB( mid_res_amp_Q0[ 0 ], silk_LSHIFT( silk_SQRT_APPROX( nrgx ), scale ) - mid_res_amp_Q0[ 0 ],
         smooth_coef_Q16 );
-    nrgy = SKP_SUB_LSHIFT32( nrgy, SKP_SMULWB( corr, pred_Q13 ), 3 );
-    mid_res_amp_Q0[ 1 ] = SKP_SMLAWB( mid_res_amp_Q0[ 1 ], SKP_LSHIFT( silk_SQRT_APPROX( nrgy ), scale ) - mid_res_amp_Q0[ 1 ],
+    nrgy = silk_SUB_LSHIFT32( nrgy, silk_SMULWB( corr, pred_Q13 ), 3 );
+    mid_res_amp_Q0[ 1 ] = silk_SMLAWB( mid_res_amp_Q0[ 1 ], silk_LSHIFT( silk_SQRT_APPROX( nrgy ), scale ) - mid_res_amp_Q0[ 1 ],
         smooth_coef_Q16 );
 
     /* Ratio of smoothed residual and mid norms */
-    *ratio_Q14 = silk_DIV32_varQ( mid_res_amp_Q0[ 1 ], SKP_max( mid_res_amp_Q0[ 0 ], 1 ), 14 );
-    *ratio_Q14 = SKP_LIMIT( *ratio_Q14, 0, 32767 );
+    *ratio_Q14 = silk_DIV32_varQ( mid_res_amp_Q0[ 1 ], silk_max( mid_res_amp_Q0[ 0 ], 1 ), 14 );
+    *ratio_Q14 = silk_LIMIT( *ratio_Q14, 0, 32767 );
 
     return pred_Q13;
 }
diff --git a/silk/silk_stereo_quant_pred.c b/silk/silk_stereo_quant_pred.c
index 9ff9550..3bd2836 100644
--- a/silk/silk_stereo_quant_pred.c
+++ b/silk/silk_stereo_quant_pred.c
@@ -44,14 +44,14 @@
     /* Quantize */
     for( n = 0; n < 2; n++ ) {
         /* Brute-force search over quantization levels */
-        err_min_Q13 = SKP_int32_MAX;
+        err_min_Q13 = silk_int32_MAX;
         for( i = 0; i < STEREO_QUANT_TAB_SIZE - 1; i++ ) {
             low_Q13 = silk_stereo_pred_quant_Q13[ i ];
-            step_Q13 = SKP_SMULWB( silk_stereo_pred_quant_Q13[ i + 1 ] - low_Q13,
+            step_Q13 = silk_SMULWB( silk_stereo_pred_quant_Q13[ i + 1 ] - low_Q13,
                 SILK_FIX_CONST( 0.5 / STEREO_QUANT_SUB_STEPS, 16 ) );
             for( j = 0; j < STEREO_QUANT_SUB_STEPS; j++ ) {
-                lvl_Q13 = SKP_SMLABB( low_Q13, step_Q13, 2 * j + 1 );
-                err_Q13 = SKP_abs( pred_Q13[ n ] - lvl_Q13 );
+                lvl_Q13 = silk_SMLABB( low_Q13, step_Q13, 2 * j + 1 );
+                err_Q13 = silk_abs( pred_Q13[ n ] - lvl_Q13 );
                 if( err_Q13 < err_min_Q13 ) {
                     err_min_Q13 = err_Q13;
                     quant_pred_Q13 = lvl_Q13;
@@ -64,7 +64,7 @@
             }
         }
         done:
-        ix[ n ][ 2 ]  = SKP_DIV32_16( ix[ n ][ 0 ], 3 );
+        ix[ n ][ 2 ]  = silk_DIV32_16( ix[ n ][ 0 ], 3 );
         ix[ n ][ 0 ] -= ix[ n ][ 2 ] * 3;
         pred_Q13[ n ] = quant_pred_Q13;
     }
diff --git a/silk/silk_structs.h b/silk/silk_structs.h
index 854d0cd..e4fd1d7 100644
--- a/silk/silk_structs.h
+++ b/silk/silk_structs.h
@@ -309,7 +309,7 @@
     opus_int             pitchL[ MAX_NB_SUBFR ];
     opus_int32           Gains_Q16[ MAX_NB_SUBFR ];
     /* holds interpolated and final coefficients, 4-byte aligned */
-    SKP_DWORD_ALIGN opus_int16 PredCoef_Q12[ 2 ][ MAX_LPC_ORDER ];
+    silk_DWORD_ALIGN opus_int16 PredCoef_Q12[ 2 ][ MAX_LPC_ORDER ];
     opus_int16           LTPCoef_Q14[ LTP_ORDER * MAX_NB_SUBFR ];
     opus_int             LTP_scale_Q14;
 } silk_decoder_control;
diff --git a/silk/silk_sum_sqr_shift.c b/silk/silk_sum_sqr_shift.c
index 70a8aa3..0f2ed98 100644
--- a/silk/silk_sum_sqr_shift.c
+++ b/silk/silk_sum_sqr_shift.c
@@ -47,34 +47,34 @@
     shft = 0;
     len--;
     for( i = 0; i < len; i += 2 ) {
-        nrg = SKP_SMLABB_ovflw( nrg, x[ i ], x[ i ] );
-        nrg = SKP_SMLABB_ovflw( nrg, x[ i + 1 ], x[ i + 1 ] );
+        nrg = silk_SMLABB_ovflw( nrg, x[ i ], x[ i ] );
+        nrg = silk_SMLABB_ovflw( nrg, x[ i + 1 ], x[ i + 1 ] );
         if( nrg < 0 ) {
             /* Scale down */
-            nrg = (opus_int32)SKP_RSHIFT_uint( (opus_uint32)nrg, 2 );
+            nrg = (opus_int32)silk_RSHIFT_uint( (opus_uint32)nrg, 2 );
             shft = 2;
             break;
         }
     }
     for( ; i < len; i += 2 ) {
-        nrg_tmp = SKP_SMULBB( x[ i ], x[ i ] );
-        nrg_tmp = SKP_SMLABB_ovflw( nrg_tmp, x[ i + 1 ], x[ i + 1 ] );
-        nrg = (opus_int32)SKP_ADD_RSHIFT_uint( nrg, (opus_uint32)nrg_tmp, shft );
+        nrg_tmp = silk_SMULBB( x[ i ], x[ i ] );
+        nrg_tmp = silk_SMLABB_ovflw( nrg_tmp, x[ i + 1 ], x[ i + 1 ] );
+        nrg = (opus_int32)silk_ADD_RSHIFT_uint( nrg, (opus_uint32)nrg_tmp, shft );
         if( nrg < 0 ) {
             /* Scale down */
-            nrg = (opus_int32)SKP_RSHIFT_uint( (opus_uint32)nrg, 2 );
+            nrg = (opus_int32)silk_RSHIFT_uint( (opus_uint32)nrg, 2 );
             shft += 2;
         }
     }
     if( i == len ) {
         /* One sample left to process */
-        nrg_tmp = SKP_SMULBB( x[ i ], x[ i ] );
-        nrg = (opus_int32)SKP_ADD_RSHIFT_uint( nrg, nrg_tmp, shft );
+        nrg_tmp = silk_SMULBB( x[ i ], x[ i ] );
+        nrg = (opus_int32)silk_ADD_RSHIFT_uint( nrg, nrg_tmp, shft );
     }
 
     /* Make sure to have at least one extra leading zero (two leading zeros in total) */
     if( nrg & 0xC0000000 ) {
-        nrg = SKP_RSHIFT_uint( (opus_uint32)nrg, 2 );
+        nrg = silk_RSHIFT_uint( (opus_uint32)nrg, 2 );
         shft += 2;
     }
 
diff --git a/silk/silk_typedef.h b/silk/silk_typedef.h
index 6183b9c..53bdc7c 100644
--- a/silk/silk_typedef.h
+++ b/silk/silk_typedef.h
@@ -30,8 +30,8 @@
 
 #include "opus_types.h"
 
-#ifndef SKP_USE_DOUBLE_PRECISION_FLOATS
-#define SKP_USE_DOUBLE_PRECISION_FLOATS     0
+#ifndef silk_USE_DOUBLE_PRECISION_FLOATS
+#define silk_USE_DOUBLE_PRECISION_FLOATS     0
 #endif
 
 #include <float.h>
@@ -39,49 +39,49 @@
 #include <stdint.h>
 #endif
 
-#define SKP_int_ptr_size intptr_t
+#define silk_int_ptr_size intptr_t
 
-#if SKP_USE_DOUBLE_PRECISION_FLOATS
-# define SKP_float      double
-# define SKP_float_MAX  DBL_MAX
+#if silk_USE_DOUBLE_PRECISION_FLOATS
+# define silk_float      double
+# define silk_float_MAX  DBL_MAX
 #else
-# define SKP_float      float
-# define SKP_float_MAX  FLT_MAX
+# define silk_float      float
+# define silk_float_MAX  FLT_MAX
 #endif
 
 #ifdef _WIN32
-# define SKP_STR_CASEINSENSITIVE_COMPARE(x, y) _stricmp(x, y)
+# define silk_STR_CASEINSENSITIVE_COMPARE(x, y) _stricmp(x, y)
 #else
-# define SKP_STR_CASEINSENSITIVE_COMPARE(x, y) strcasecmp(x, y)
+# define silk_STR_CASEINSENSITIVE_COMPARE(x, y) strcasecmp(x, y)
 #endif
 
-#define SKP_int64_MAX   ((opus_int64)0x7FFFFFFFFFFFFFFFLL)   /*  2^63 - 1 */
-#define SKP_int64_MIN   ((opus_int64)0x8000000000000000LL)   /* -2^63 */
-#define SKP_int32_MAX   0x7FFFFFFF                           /*  2^31 - 1 =  2147483647 */
-#define SKP_int32_MIN   ((opus_int32)0x80000000)             /* -2^31     = -2147483648 */
-#define SKP_int16_MAX   0x7FFF                               /*  2^15 - 1 =  32767 */
-#define SKP_int16_MIN   ((opus_int16)0x8000)                 /* -2^15     = -32768 */
-#define SKP_int8_MAX    0x7F                                 /*  2^7 - 1  =  127 */
-#define SKP_int8_MIN    ((opus_int8)0x80)                    /* -2^7      = -128 */
+#define silk_int64_MAX   ((opus_int64)0x7FFFFFFFFFFFFFFFLL)   /*  2^63 - 1 */
+#define silk_int64_MIN   ((opus_int64)0x8000000000000000LL)   /* -2^63 */
+#define silk_int32_MAX   0x7FFFFFFF                           /*  2^31 - 1 =  2147483647 */
+#define silk_int32_MIN   ((opus_int32)0x80000000)             /* -2^31     = -2147483648 */
+#define silk_int16_MAX   0x7FFF                               /*  2^15 - 1 =  32767 */
+#define silk_int16_MIN   ((opus_int16)0x8000)                 /* -2^15     = -32768 */
+#define silk_int8_MAX    0x7F                                 /*  2^7 - 1  =  127 */
+#define silk_int8_MIN    ((opus_int8)0x80)                    /* -2^7      = -128 */
 
-#define SKP_uint32_MAX  0xFFFFFFFF  /* 2^32 - 1 = 4294967295 */
-#define SKP_uint32_MIN  0x00000000
-#define SKP_uint16_MAX  0xFFFF      /* 2^16 - 1 = 65535 */
-#define SKP_uint16_MIN  0x0000
-#define SKP_uint8_MAX   0xFF        /*  2^8 - 1 = 255 */
-#define SKP_uint8_MIN   0x00
+#define silk_uint32_MAX  0xFFFFFFFF  /* 2^32 - 1 = 4294967295 */
+#define silk_uint32_MIN  0x00000000
+#define silk_uint16_MAX  0xFFFF      /* 2^16 - 1 = 65535 */
+#define silk_uint16_MIN  0x0000
+#define silk_uint8_MAX   0xFF        /*  2^8 - 1 = 255 */
+#define silk_uint8_MIN   0x00
 
-#define SKP_TRUE        1
-#define SKP_FALSE       0
+#define silk_TRUE        1
+#define silk_FALSE       0
 
 /* assertions */
 #if (defined _WIN32 && !defined _WINCE && !defined(__GNUC__) && !defined(NO_ASSERTS))
-# ifndef SKP_assert
+# ifndef silk_assert
 #  include <crtdbg.h>      /* ASSERTE() */
-#  define SKP_assert(COND)   _ASSERTE(COND)
+#  define silk_assert(COND)   _ASSERTE(COND)
 # endif
 #else
-# define SKP_assert(COND)
+# define silk_assert(COND)
 #endif
 
 #endif
diff --git a/src/opus_decoder.c b/src/opus_decoder.c
index 254548b..5b4d2cf 100644
--- a/src/opus_decoder.c
+++ b/src/opus_decoder.c
@@ -277,7 +277,7 @@
                 DecControl.internalSampleRate = 16000;
             } else {
             	DecControl.internalSampleRate = 16000;
-                SKP_assert( 0 );
+                silk_assert( 0 );
             }
         } else {
             /* Hybrid mode */
diff --git a/src/opus_encoder.c b/src/opus_encoder.c
index 72eedf6..0a47440 100644
--- a/src/opus_encoder.c
+++ b/src/opus_encoder.c
@@ -213,7 +213,7 @@
        st->delay_compensation += 2;
 
     st->hybrid_stereo_width_Q14             = 1 << 14;
-    st->variable_HP_smth2_Q15 = SKP_LSHIFT( silk_lin2log( VARIABLE_HP_MIN_CUTOFF_HZ ), 8 );
+    st->variable_HP_smth2_Q15 = silk_LSHIFT( silk_lin2log( VARIABLE_HP_MIN_CUTOFF_HZ ), 8 );
     st->first = 1;
     st->mode = MODE_HYBRID;
     st->bandwidth = OPUS_BANDWIDTH_FULLBAND;
@@ -298,22 +298,22 @@
    opus_int32 B_Q28[ 3 ], A_Q28[ 2 ];
    opus_int32 Fc_Q19, r_Q28, r_Q22;
 
-   SKP_assert( cutoff_Hz <= SKP_int32_MAX / SILK_FIX_CONST( 1.5 * 3.14159 / 1000, 19 ) );
-   Fc_Q19 = SKP_DIV32_16( SKP_SMULBB( SILK_FIX_CONST( 1.5 * 3.14159 / 1000, 19 ), cutoff_Hz ), Fs/1000 );
-   SKP_assert( Fc_Q19 > 0 && Fc_Q19 < 32768 );
+   silk_assert( cutoff_Hz <= silk_int32_MAX / SILK_FIX_CONST( 1.5 * 3.14159 / 1000, 19 ) );
+   Fc_Q19 = silk_DIV32_16( silk_SMULBB( SILK_FIX_CONST( 1.5 * 3.14159 / 1000, 19 ), cutoff_Hz ), Fs/1000 );
+   silk_assert( Fc_Q19 > 0 && Fc_Q19 < 32768 );
 
-   r_Q28 = SILK_FIX_CONST( 1.0, 28 ) - SKP_MUL( SILK_FIX_CONST( 0.92, 9 ), Fc_Q19 );
+   r_Q28 = SILK_FIX_CONST( 1.0, 28 ) - silk_MUL( SILK_FIX_CONST( 0.92, 9 ), Fc_Q19 );
 
    /* b = r * [ 1; -2; 1 ]; */
    /* a = [ 1; -2 * r * ( 1 - 0.5 * Fc^2 ); r^2 ]; */
    B_Q28[ 0 ] = r_Q28;
-   B_Q28[ 1 ] = SKP_LSHIFT( -r_Q28, 1 );
+   B_Q28[ 1 ] = silk_LSHIFT( -r_Q28, 1 );
    B_Q28[ 2 ] = r_Q28;
 
    /* -r * ( 2 - Fc * Fc ); */
-   r_Q22  = SKP_RSHIFT( r_Q28, 6 );
-   A_Q28[ 0 ] = SKP_SMULWW( r_Q22, SKP_SMULWW( Fc_Q19, Fc_Q19 ) - SILK_FIX_CONST( 2.0,  22 ) );
-   A_Q28[ 1 ] = SKP_SMULWW( r_Q22, r_Q22 );
+   r_Q22  = silk_RSHIFT( r_Q28, 6 );
+   A_Q28[ 0 ] = silk_SMULWW( r_Q22, silk_SMULWW( Fc_Q19, Fc_Q19 ) - SILK_FIX_CONST( 2.0,  22 ) );
+   A_Q28[ 1 ] = silk_SMULWW( r_Q22, r_Q22 );
 
 #ifdef FIXED_POINT
    silk_biquad_alt( in, B_Q28, A_Q28, hp_mem, out, len, channels );
@@ -635,15 +635,15 @@
        pcm_buf[i] = st->delay_buffer[(st->encoder_buffer-delay_compensation)*st->channels+i];
 
     if (st->mode == MODE_CELT_ONLY)
-       hp_freq_smth1 = SKP_LSHIFT( silk_lin2log( VARIABLE_HP_MIN_CUTOFF_HZ ), 8 );
+       hp_freq_smth1 = silk_LSHIFT( silk_lin2log( VARIABLE_HP_MIN_CUTOFF_HZ ), 8 );
     else
        hp_freq_smth1 = ((silk_encoder*)silk_enc)->state_Fxx[0].sCmn.variable_HP_smth1_Q15;
 
-    st->variable_HP_smth2_Q15 = SKP_SMLAWB( st->variable_HP_smth2_Q15,
+    st->variable_HP_smth2_Q15 = silk_SMLAWB( st->variable_HP_smth2_Q15,
           hp_freq_smth1 - st->variable_HP_smth2_Q15, SILK_FIX_CONST( VARIABLE_HP_SMTH_COEF2, 16 ) );
 
     /* convert from log scale to Hertz */
-    cutoff_Hz = silk_log2lin( SKP_RSHIFT( st->variable_HP_smth2_Q15, 8 ) );
+    cutoff_Hz = silk_log2lin( silk_RSHIFT( st->variable_HP_smth2_Q15, 8 ) );
 
     if (st->application == OPUS_APPLICATION_VOIP)
     {
@@ -700,7 +700,7 @@
         } else if (st->bandwidth == OPUS_BANDWIDTH_MEDIUMBAND) {
             st->silk_mode.desiredInternalSampleRate = 12000;
         } else {
-            SKP_assert( st->mode == MODE_HYBRID || st->bandwidth == OPUS_BANDWIDTH_WIDEBAND );
+            silk_assert( st->mode == MODE_HYBRID || st->bandwidth == OPUS_BANDWIDTH_WIDEBAND );
             st->silk_mode.desiredInternalSampleRate = 16000;
         }
         if( st->mode == MODE_HYBRID ) {
@@ -753,7 +753,7 @@
                 silk_internal_bandwidth = OPUS_BANDWIDTH_WIDEBAND;
             }
         } else {
-            SKP_assert( st->silk_mode.internalSampleRate == 16000 );
+            silk_assert( st->silk_mode.internalSampleRate == 16000 );
         }
     }
 
@@ -1207,7 +1207,7 @@
            st->first = 1;
            st->mode = MODE_HYBRID;
            st->bandwidth = OPUS_BANDWIDTH_FULLBAND;
-           st->variable_HP_smth2_Q15 = SKP_LSHIFT( silk_lin2log( VARIABLE_HP_MIN_CUTOFF_HZ ), 8 );
+           st->variable_HP_smth2_Q15 = silk_LSHIFT( silk_lin2log( VARIABLE_HP_MIN_CUTOFF_HZ ), 8 );
         }
         break;
         case OPUS_SET_FORCE_MODE_REQUEST: