Match existing type usage better.

This makes a variety of small changes to synchronize bits of code using different types, remove useless code or casts, and add explicit casts in some places previously doing implicit ones.  For example:

* Change a few type declarations to better match how the majority of code uses those objects.
* Eliminate "< 0" check for unsigned values.
* Replace "(float)sin(x)", where |x| is also a float, with "sinf(x)", and similar.
* Add casts to uint32_t in many places timestamps were used and the existing code stored signed values into the unsigned objects.
* Remove downcasts when the results would be passed to a larger type, e.g. calling "foo((int16_t)x)" with an int |x| when foo() takes an int instead of an int16_t.
* Similarly, add casts when passing a larger type to a function taking a smaller one.
* Add casts to int16_t when doing something like "int16_t = int16_t + int16_t" as the "+" operation would implicitly upconvert to int, and similar.
* Use "false" instead of "0" for setting a bool.
* Shift a few temp types when doing a multi-stage calculation involving typecasts, so as to put the most logical/semantically correct type possible into the temps.  For example, when doing "int foo = int + int; size_t bar = (size_t)foo + size_t;", we might change |foo| to a size_t and move the cast if it makes more sense for |foo| to be represented as a size_t.

BUG=none
R=andrew@webrtc.org, asapersson@webrtc.org, henrika@webrtc.org, juberti@webrtc.org, kwiberg@webrtc.org
TBR=andrew, asapersson, henrika

Review URL: https://codereview.webrtc.org/1168753002

Cr-Commit-Position: refs/heads/master@{#9419}
diff --git a/talk/app/webrtc/test/fakeaudiocapturemodule.cc b/talk/app/webrtc/test/fakeaudiocapturemodule.cc
index c6339d3..e52b686 100644
--- a/talk/app/webrtc/test/fakeaudiocapturemodule.cc
+++ b/talk/app/webrtc/test/fakeaudiocapturemodule.cc
@@ -45,7 +45,7 @@
 // Constants here are derived by running VoE using a real ADM.
 // The constants correspond to 10ms of mono audio at 44kHz.
 static const int kTimePerFrameMs = 10;
-static const int kNumberOfChannels = 1;
+static const uint8_t kNumberOfChannels = 1;
 static const int kSamplesPerSecond = 44000;
 static const int kTotalDelayMs = 0;
 static const int kClockDriftMs = 0;
diff --git a/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc b/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
index 8fcbfd7..821b44c 100644
--- a/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
+++ b/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
@@ -66,7 +66,7 @@
                                           const bool keyPressed,
                                           uint32_t& newMicLevel) {
     rec_buffer_bytes_ = nSamples * nBytesPerSample;
-    if ((rec_buffer_bytes_ <= 0) ||
+    if ((rec_buffer_bytes_ == 0) ||
         (rec_buffer_bytes_ > FakeAudioCaptureModule::kNumberSamples *
          FakeAudioCaptureModule::kNumberBytesPerSample)) {
       ADD_FAILURE();
diff --git a/webrtc/common_audio/fft4g.c b/webrtc/common_audio/fft4g.c
index cbc4dc3..ad5f383 100644
--- a/webrtc/common_audio/fft4g.c
+++ b/webrtc/common_audio/fft4g.c
@@ -648,7 +648,7 @@
     ip[1] = 1;
     if (nw > 2) {
         nwh = nw >> 1;
-        delta = (float)atan(1.0f) / nwh;
+        delta = atanf(1.0f) / nwh;
         w[0] = 1;
         w[1] = 0;
         w[nwh] = (float)cos(delta * nwh);
@@ -676,7 +676,7 @@
     ip[1] = nc;
     if (nc > 1) {
         nch = nc >> 1;
-        delta = (float)atan(1.0f) / nch;
+        delta = atanf(1.0f) / nch;
         c[0] = (float)cos(delta * nch);
         c[nch] = 0.5f * c[0];
         for (j = 1; j < nch; j++) {
diff --git a/webrtc/common_audio/lapped_transform_unittest.cc b/webrtc/common_audio/lapped_transform_unittest.cc
index c30651c..3becfe1 100644
--- a/webrtc/common_audio/lapped_transform_unittest.cc
+++ b/webrtc/common_audio/lapped_transform_unittest.cc
@@ -51,11 +51,12 @@
                                  complex<float>* const* out_block) {
     CHECK_EQ(in_channels, out_channels);
 
-    float full_length = (frames - 1) * 2;
+    int full_length = (frames - 1) * 2;
     ++block_num_;
 
     if (block_num_ > 0) {
-      ASSERT_NEAR(in_block[0][0].real(), full_length, 1e-5f);
+      ASSERT_NEAR(in_block[0][0].real(), static_cast<float>(full_length),
+                  1e-5f);
       ASSERT_NEAR(in_block[0][0].imag(), 0.0f, 1e-5f);
       for (int i = 1; i < frames; ++i) {
         ASSERT_NEAR(in_block[0][i].real(), 0.0f, 1e-5f);
diff --git a/webrtc/common_audio/real_fourier.cc b/webrtc/common_audio/real_fourier.cc
index 30c8ee3..cb707e4 100644
--- a/webrtc/common_audio/real_fourier.cc
+++ b/webrtc/common_audio/real_fourier.cc
@@ -31,7 +31,7 @@
 
 int RealFourier::FftOrder(int length) {
   CHECK_GT(length, 0);
-  return WebRtcSpl_GetSizeInBits(length - 1);
+  return WebRtcSpl_GetSizeInBits(static_cast<uint32_t>(length - 1));
 }
 
 int RealFourier::FftLength(int order) {
diff --git a/webrtc/common_audio/signal_processing/auto_correlation.c b/webrtc/common_audio/signal_processing/auto_correlation.c
index fed1312..405a08e 100644
--- a/webrtc/common_audio/signal_processing/auto_correlation.c
+++ b/webrtc/common_audio/signal_processing/auto_correlation.c
@@ -36,7 +36,7 @@
     scaling = 0;
   } else {
     // Number of bits in the sum loop.
-    int nbits = WebRtcSpl_GetSizeInBits(in_vector_length);
+    int nbits = WebRtcSpl_GetSizeInBits((uint32_t)in_vector_length);
     // Number of bits to normalize smax.
     int t = WebRtcSpl_NormW32(WEBRTC_SPL_MUL(smax, smax));
 
diff --git a/webrtc/common_audio/signal_processing/complex_fft.c b/webrtc/common_audio/signal_processing/complex_fft.c
index aaeda52..f21b7d8 100644
--- a/webrtc/common_audio/signal_processing/complex_fft.c
+++ b/webrtc/common_audio/signal_processing/complex_fft.c
@@ -181,7 +181,7 @@
         shift = 0;
         round2 = 8192;
 
-        tmp32 = (int32_t)WebRtcSpl_MaxAbsValueW16(frfi, 2 * n);
+        tmp32 = WebRtcSpl_MaxAbsValueW16(frfi, 2 * n);
         if (tmp32 > 13573)
         {
             shift++;
diff --git a/webrtc/common_audio/signal_processing/get_scaling_square.c b/webrtc/common_audio/signal_processing/get_scaling_square.c
index 9b6049c..3b9171d 100644
--- a/webrtc/common_audio/signal_processing/get_scaling_square.c
+++ b/webrtc/common_audio/signal_processing/get_scaling_square.c
@@ -21,7 +21,7 @@
                                    int in_vector_length,
                                    int times)
 {
-    int16_t nbits = WebRtcSpl_GetSizeInBits(times);
+    int16_t nbits = WebRtcSpl_GetSizeInBits((uint32_t)times);
     int i;
     int16_t smax = -1;
     int16_t sabs;
diff --git a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
index 8135b98..d16dd3b 100644
--- a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
@@ -77,7 +77,7 @@
     ASSERT_TRUE(cng_) << "Must call CreateCng() first.";
     encoded_info_ = cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
                                  encoded_.size(), &encoded_[0]);
-    timestamp_ += num_audio_samples_10ms_;
+    timestamp_ += static_cast<uint32_t>(num_audio_samples_10ms_);
   }
 
   // Expect |num_calls| calls to the encoder, all successful. The last call
diff --git a/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.c b/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.c
index 9862f12..cb7aa45 100644
--- a/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.c
+++ b/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.c
@@ -370,7 +370,7 @@
     }
     if ((i == 93) && (index == 0))
       index = 94;
-    SIDdata[0] = index;
+    SIDdata[0] = (uint8_t)index;
 
     /* Quantize coefficients with tweak for WebRtc implementation of RFC3389. */
     if (inst->enc_nrOfCoefs == WEBRTC_CNG_MAX_LPC_ORDER) {
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/cb_search.c b/webrtc/modules/audio_coding/codecs/ilbc/cb_search.c
index a775a02..2ee9f6c 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/cb_search.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/cb_search.c
@@ -108,8 +108,8 @@
 
   /* Find the highest absolute value to calculate proper
      vector scale factor (so that it uses 12 bits) */
-  temp1 = WebRtcSpl_MaxAbsValueW16(buf, (int16_t)lMem);
-  temp2 = WebRtcSpl_MaxAbsValueW16(target, (int16_t)lTarget);
+  temp1 = WebRtcSpl_MaxAbsValueW16(buf, lMem);
+  temp2 = WebRtcSpl_MaxAbsValueW16(target, lTarget);
 
   if ((temp1>0)&&(temp2>0)) {
     temp1 = WEBRTC_SPL_MAX(temp1, temp2);
@@ -332,7 +332,8 @@
     /* Subtract the best codebook vector, according
        to measure, from the target vector */
 
-    WebRtcSpl_AddAffineVectorToVector(target, pp, (int16_t)(-bestGain), (int32_t)8192, (int16_t)14, (int)lTarget);
+    WebRtcSpl_AddAffineVectorToVector(target, pp, (int16_t)(-bestGain),
+                                      (int32_t)8192, (int16_t)14, lTarget);
 
     /* record quantized gain */
     gains[stage+1] = bestGain;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/decode.c b/webrtc/modules/audio_coding/codecs/ilbc/decode.c
index 3a2e5a2..035460b 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/decode.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/decode.c
@@ -206,7 +206,7 @@
     }
 
     /* Store lag (it is needed if next packet is lost) */
-    (*iLBCdec_inst).last_lag = (int)lag;
+    (*iLBCdec_inst).last_lag = lag;
 
     /* copy data and run synthesis filter */
     WEBRTC_SPL_MEMCPY_W16(data, decresidual, iLBCdec_inst->blockl);
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/decode_residual.c b/webrtc/modules/audio_coding/codecs/ilbc/decode_residual.c
index c04fd99..de42ea9 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/decode_residual.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/decode_residual.c
@@ -66,7 +66,7 @@
 
     /* setup memory */
 
-    WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-iLBCdec_inst->state_short_len));
+    WebRtcSpl_MemSetW16(mem, 0, CB_MEML - iLBCdec_inst->state_short_len);
     WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-iLBCdec_inst->state_short_len, decresidual+start_pos,
                           iLBCdec_inst->state_short_len);
 
@@ -76,8 +76,7 @@
         &decresidual[start_pos+iLBCdec_inst->state_short_len],
         iLBC_encbits->cb_index, iLBC_encbits->gain_index,
         mem+CB_MEML-ST_MEM_L_TBL,
-        ST_MEM_L_TBL, (int16_t)diff
-                              );
+        ST_MEM_L_TBL, diff);
 
   }
   else {/* put adaptive part in the beginning */
@@ -87,7 +86,7 @@
     meml_gotten = iLBCdec_inst->state_short_len;
     WebRtcSpl_MemCpyReversedOrder(mem+CB_MEML-1,
                                   decresidual+start_pos, meml_gotten);
-    WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-meml_gotten));
+    WebRtcSpl_MemSetW16(mem, 0, CB_MEML - meml_gotten);
 
     /* construct decoded vector */
 
@@ -153,7 +152,7 @@
 
     WebRtcSpl_MemCpyReversedOrder(mem+CB_MEML-1,
                                   decresidual+(iLBC_encbits->startIdx-1)*SUBL, meml_gotten);
-    WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-meml_gotten));
+    WebRtcSpl_MemSetW16(mem, 0, CB_MEML - meml_gotten);
 
     /* loop over subframes to decode */
 
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/encode.c b/webrtc/modules/audio_coding/codecs/ilbc/encode.c
index 3de8425..114ce1f 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/encode.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/encode.c
@@ -193,7 +193,7 @@
 
       /* setup memory */
 
-      WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-iLBCenc_inst->state_short_len));
+      WebRtcSpl_MemSetW16(mem, 0, CB_MEML - iLBCenc_inst->state_short_len);
       WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-iLBCenc_inst->state_short_len,
                             decresidual+start_pos, iLBCenc_inst->state_short_len);
 
@@ -224,7 +224,7 @@
 
       meml_gotten = iLBCenc_inst->state_short_len;
       WebRtcSpl_MemCpyReversedOrder(&mem[CB_MEML-1], &decresidual[start_pos], meml_gotten);
-      WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-iLBCenc_inst->state_short_len));
+      WebRtcSpl_MemSetW16(mem, 0, CB_MEML - iLBCenc_inst->state_short_len);
 
       /* encode subframes */
       WebRtcIlbcfix_CbSearch(iLBCenc_inst, iLBCbits_inst->cb_index, iLBCbits_inst->gain_index,
@@ -397,7 +397,7 @@
     }
 
     WebRtcSpl_MemCpyReversedOrder(&mem[CB_MEML-1], &decresidual[Nback*SUBL], meml_gotten);
-    WebRtcSpl_MemSetW16(mem, 0, (int16_t)(CB_MEML-meml_gotten));
+    WebRtcSpl_MemSetW16(mem, 0, CB_MEML - meml_gotten);
 
 #ifdef SPLIT_10MS
     if (iLBCenc_inst->Nback_flag > 0)
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c b/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c
index f282432..262a564 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c
@@ -96,11 +96,11 @@
   memmove(enh_period, &enh_period[new_blocks],
           (ENH_NBLOCKS_TOT - new_blocks) * sizeof(*enh_period));
 
-  k=WebRtcSpl_DownsampleFast(
+  k = WebRtcSpl_DownsampleFast(
       enh_buf+ENH_BUFL-inLen,    /* Input samples */
-      (int16_t)(inLen+ENH_BUFL_FILTEROVERHEAD),
+      inLen + ENH_BUFL_FILTEROVERHEAD,
       downsampled,
-      (int16_t)(inLen / 2),
+      inLen / 2,
       (int16_t*)WebRtcIlbcfix_kLpFiltCoefs,  /* Coefficients in Q12 */
       FILTERORDER_DS_PLUS1,    /* Length of filter (order-1) */
       FACTOR_DS,
@@ -114,8 +114,7 @@
     regressor = target - 10;
 
     /* scaling */
-    max16=WebRtcSpl_MaxAbsValueW16(&regressor[-50],
-                                   (int16_t)(ENH_BLOCKL_HALF+50-1));
+    max16 = WebRtcSpl_MaxAbsValueW16(&regressor[-50], ENH_BLOCKL_HALF + 50 - 1);
     shifts = WebRtcSpl_GetSizeInBits((uint32_t)(max16 * max16)) - 25;
     shifts = WEBRTC_SPL_MAX(0, shifts);
 
@@ -199,7 +198,7 @@
     regressor=in+tlag-1;
 
     /* scaling */
-    max16=WebRtcSpl_MaxAbsValueW16(regressor, (int16_t)(plc_blockl+3-1));
+    max16 = WebRtcSpl_MaxAbsValueW16(regressor, plc_blockl + 3 - 1);
     if (max16>5000)
       shifts=2;
     else
@@ -338,7 +337,7 @@
           synt,
           &iLBCdec_inst->old_syntdenum[
                                        (iLBCdec_inst->nsub-1)*(LPC_FILTERORDER+1)],
-                                       LPC_FILTERORDER+1, (int16_t)lag);
+                                       LPC_FILTERORDER+1, lag);
 
       WEBRTC_SPL_MEMCPY_W16(&synt[-LPC_FILTERORDER], &synt[lag-LPC_FILTERORDER],
                             LPC_FILTERORDER);
@@ -349,7 +348,7 @@
           enh_bufPtr1, synt,
           &iLBCdec_inst->old_syntdenum[
                                        (iLBCdec_inst->nsub-1)*(LPC_FILTERORDER+1)],
-                                       LPC_FILTERORDER+1, (int16_t)lag);
+                                       LPC_FILTERORDER+1, lag);
 
       WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->syntMem, &synt[lag-LPC_FILTERORDER],
                             LPC_FILTERORDER);
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.c b/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.c
index d124b6b..6a68dec 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.c
@@ -62,7 +62,7 @@
   }
 
   /* Scale to maximum 20 bits in order to allow for the 11 bit window */
-  maxW32 = WebRtcSpl_MaxValueW32(ssqEn, (int16_t)(iLBCenc_inst->nsub-1));
+  maxW32 = WebRtcSpl_MaxValueW32(ssqEn, iLBCenc_inst->nsub - 1);
   scale = WebRtcSpl_GetSizeInBits(maxW32) - 20;
   scale1 = WEBRTC_SPL_MAX(0, scale);
 
@@ -82,7 +82,7 @@
   }
 
   /* Extract the best choise of start state */
-  pos = WebRtcSpl_MaxIndexW32(ssqEn, (int16_t)(iLBCenc_inst->nsub-1)) + 1;
+  pos = WebRtcSpl_MaxIndexW32(ssqEn, iLBCenc_inst->nsub - 1) + 1;
 
   return(pos);
 }
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/my_corr.c b/webrtc/modules/audio_coding/codecs/ilbc/my_corr.c
index 048745a..ec3cf20 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/my_corr.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/my_corr.c
@@ -45,7 +45,7 @@
   loops=dim1-dim2+1;
 
   /* Calculate the cross correlations */
-  WebRtcSpl_CrossCorrelation(corr, (int16_t*)seq2, seq1, dim2, loops, scale, 1);
+  WebRtcSpl_CrossCorrelation(corr, seq2, seq1, dim2, loops, scale, 1);
 
   return;
 }
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c b/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c
index 6329908..30c7a03 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c
@@ -42,5 +42,5 @@
   }
 
   /* Find the minimum square distance */
-  *index=WebRtcSpl_MinIndexW32(crit, (int16_t)arlength);
+  *index=WebRtcSpl_MinIndexW32(crit, arlength);
 }
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/refiner.c b/webrtc/modules/audio_coding/codecs/ilbc/refiner.c
index ca99b3a..2fff362 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/refiner.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/refiner.c
@@ -75,7 +75,7 @@
 
   /* Calculate the rescaling factor for the correlation in order to
      put the correlation in a int16_t vector instead */
-  maxtemp=WebRtcSpl_MaxAbsValueW32(corrVecTemp, (int16_t)corrdim);
+  maxtemp=WebRtcSpl_MaxAbsValueW32(corrVecTemp, corrdim);
 
   scalefact=WebRtcSpl_GetSizeInBits(maxtemp)-15;
 
@@ -97,7 +97,7 @@
   WebRtcIlbcfix_EnhUpsample(corrVecUps,corrVec);
 
   /* Find maximum */
-  tloc=WebRtcSpl_MaxIndexW32(corrVecUps, (int16_t) (ENH_UPS0*corrdim));
+  tloc=WebRtcSpl_MaxIndexW32(corrVecUps, ENH_UPS0 * corrdim);
 
   /* make vector can be upsampled without ever running outside
      bounds */
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/state_construct.c b/webrtc/modules/audio_coding/codecs/ilbc/state_construct.c
index 80b3e1b..324b670 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/state_construct.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/state_construct.c
@@ -100,7 +100,7 @@
   WebRtcSpl_MemSetW16(&sampleMa[len + LPC_FILTERORDER], 0, (len - LPC_FILTERORDER));
   WebRtcSpl_FilterARFastQ12(
       sampleMa, sampleAr,
-      syntDenum, LPC_FILTERORDER+1, (int16_t)(2*len));
+      syntDenum, LPC_FILTERORDER+1, 2 * len);
 
   tmp1 = &sampleAr[len-1];
   tmp2 = &sampleAr[2*len-1];
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/state_search.c b/webrtc/modules/audio_coding/codecs/ilbc/state_search.c
index 5d85a84..b2214c7 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/state_search.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/state_search.c
@@ -71,7 +71,7 @@
 
   WebRtcSpl_FilterARFastQ12(
       sampleMa, sampleAr,
-      syntDenum, LPC_FILTERORDER+1, (int16_t)(2*iLBCenc_inst->state_short_len));
+      syntDenum, LPC_FILTERORDER+1, 2 * iLBCenc_inst->state_short_len);
 
   for(k=0;k<iLBCenc_inst->state_short_len;k++){
     sampleAr[k] += sampleAr[k+iLBCenc_inst->state_short_len];
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c b/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c
index 3490461..328a5fe 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c
@@ -55,11 +55,11 @@
 
   /* Find scale value and start position */
   if (step==1) {
-    max=WebRtcSpl_MaxAbsValueW16(regressor, (int16_t)(subl+searchLen-1));
+    max=WebRtcSpl_MaxAbsValueW16(regressor, subl + searchLen - 1);
     rp_beg = regressor;
     rp_end = &regressor[subl];
   } else { /* step==-1 */
-    max=WebRtcSpl_MaxAbsValueW16(&regressor[-searchLen], (int16_t)(subl+searchLen-1));
+    max=WebRtcSpl_MaxAbsValueW16(&regressor[-searchLen], subl + searchLen - 1);
     rp_beg = &regressor[-1];
     rp_end = &regressor[subl-1];
   }
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c
index 16befba..4a4cddc 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c
@@ -374,7 +374,7 @@
         /* compute inverse receiving rate for last packet, in Q19 */
         numBytesInv = (uint16_t) WebRtcSpl_DivW32W16(
             524288 + ((pksize + HEADER_SIZE) >> 1),
-            pksize + HEADER_SIZE);
+            (int16_t)(pksize + HEADER_SIZE));
 
         /* 8389 is  ~ 1/128000 in Q30 */
         byteSecondsPerBit = (uint32_t)(arrTimeDiff * 8389);
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c
index d2cfb3a..1a7ff92 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c
@@ -447,7 +447,7 @@
   /* inverse pitch filter */
 
   pitchLags_Q7[0] = pitchLags_Q7[1] = pitchLags_Q7[2] = pitchLags_Q7[3] =
-      ((ISACdec_obj->plcstr_obj).stretchLag<<7);
+      (int16_t)((ISACdec_obj->plcstr_obj).stretchLag<<7);
   pitchGains_Q12[3] = ( (ISACdec_obj->plcstr_obj).lastPitchGain_Q12);
   pitchGains_Q12[2] = (int16_t)(pitchGains_Q12[3] * 1010 >> 10);
   pitchGains_Q12[1] = (int16_t)(pitchGains_Q12[2] * 1010 >> 10);
@@ -749,7 +749,8 @@
     k = ( k < ((ISACdec_obj->plcstr_obj).stretchLag - 1) )? (k+1):0;
   }
 
-  (ISACdec_obj->plcstr_obj).lastPitchLag_Q7 = (ISACdec_obj->plcstr_obj).stretchLag << 7;
+  (ISACdec_obj->plcstr_obj).lastPitchLag_Q7 =
+      (int16_t)((ISACdec_obj->plcstr_obj).stretchLag << 7);
 
 
   /* --- Inverse Pitch Filter --- */
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/encode.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/encode.c
index 1a6372a..757c0b8 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/encode.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/encode.c
@@ -498,7 +498,7 @@
 {
   int ii;
   int status;
-  int16_t BWno = BWnumber;
+  int16_t BWno = (int16_t)BWnumber;
   int stream_length = 0;
 
   int16_t model;
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c b/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c
index f8abc8a..03bceec 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c
@@ -425,7 +425,8 @@
     return -1;
   }
 
-  write_be16(ISAC_inst->ISACenc_obj.bitstr_obj.stream, stream_len, encoded);
+  write_be16(ISAC_inst->ISACenc_obj.bitstr_obj.stream, (size_t)stream_len,
+             encoded);
   return stream_len;
 
 }
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc b/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc
index a7a80ab..7f4272b 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc
@@ -62,7 +62,8 @@
   /* everything in samples */
   BN_data->sample_count = BN_data->sample_count + current_framesamples;
 
-  BN_data->arrival_time += ((packet_size + HeaderSize) * 8 * FS) / (bottleneck + HeaderRate);
+  BN_data->arrival_time += static_cast<uint32_t>(
+      ((packet_size + HeaderSize) * 8 * FS) / (bottleneck + HeaderRate));
   BN_data->send_time += current_framesamples;
 
   if (BN_data->arrival_time < BN_data->sample_count)
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c b/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
index b4c0ee4..e2a778a 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
@@ -68,8 +68,8 @@
   /* everything in samples */
   BN_data->sample_count = BN_data->sample_count + current_framesamples;
 
-  BN_data->arrival_time +=
-      ((packet_size + HeaderSize) * 8 * FS) / (bottleneck + HeaderRate);
+  BN_data->arrival_time += (uint32_t)
+      (((packet_size + HeaderSize) * 8 * FS) / (bottleneck + HeaderRate));
   BN_data->send_time += current_framesamples;
 
   if (BN_data->arrival_time < BN_data->sample_count)
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c b/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c
index db78e6d..3492bfa 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c
+++ b/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c
@@ -504,7 +504,7 @@
   int16_t streamLenUB = 0;
   int16_t streamLen = 0;
   int16_t k = 0;
-  int garbageLen = 0;
+  uint8_t garbageLen = 0;
   int32_t bottleneck = 0;
   int16_t bottleneckIdx = 0;
   int16_t jitterInfo = 0;
@@ -645,7 +645,7 @@
     memcpy(encoded, instLB->ISACencLB_obj.bitstr_obj.stream, streamLenLB);
     streamLen = streamLenLB;
     if (streamLenUB > 0) {
-      encoded[streamLenLB] = streamLenUB + 1 + LEN_CHECK_SUM_WORD8;
+      encoded[streamLenLB] = (uint8_t)(streamLenUB + 1 + LEN_CHECK_SUM_WORD8);
       memcpy(&encoded[streamLenLB + 1],
              instUB->ISACencUB_obj.bitstr_obj.stream,
              streamLenUB);
@@ -703,7 +703,7 @@
     }
     minBytes = (minBytes > limit) ? limit : minBytes;
 
-    garbageLen = (minBytes > streamLen) ? (minBytes - streamLen) : 0;
+    garbageLen = (minBytes > streamLen) ? (uint8_t)(minBytes - streamLen) : 0;
 
     /* Save data for creation of multiple bit-streams. */
     /* If bit-stream too short then add garbage at the end. */
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc b/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
index c564991..0574047 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
+++ b/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
@@ -52,7 +52,8 @@
   double starttime, runtime, length_file;
 
   int16_t stream_len = 0;
-  int16_t declen = 0, lostFrame = 0, declenTC = 0;
+  int16_t declen = 0, declenTC = 0;
+  bool lostFrame = false;
 
   int16_t shortdata[SWBFRAMESAMPLES_10ms];
   int16_t vaddata[SWBFRAMESAMPLES_10ms * 3];
@@ -696,7 +697,7 @@
     if (!lostFrame) {
       lostFrame = ((rand() % 100) < packetLossPercent);
     } else {
-      lostFrame = 0;
+      lostFrame = false;
     }
 
     // RED.
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c b/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c
index 8f5b4cf..7ea8bae 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c
+++ b/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c
@@ -98,7 +98,7 @@
   char histFileName[500];
   char averageFileName[500];
   unsigned int hist[600];
-  unsigned int tmpSumStreamLen = 0;
+  double tmpSumStreamLen = 0;
   unsigned int packetCntr = 0;
   unsigned int lostPacketCntr = 0;
   uint8_t payload[1200];
@@ -374,7 +374,7 @@
       if (packetCntr == 100) {
         // kbps
         fprintf(averageFile, "%8.3f ",
-                (double)tmpSumStreamLen * 8.0 / (30.0 * packetCntr));
+                tmpSumStreamLen * 8.0 / (30.0 * packetCntr));
         packetCntr = 0;
         tmpSumStreamLen = 0;
       }
@@ -493,7 +493,7 @@
   if (averageFile != NULL) {
     if (packetCntr > 0) {
       fprintf(averageFile, "%8.3f ",
-              (double)tmpSumStreamLen * 8.0 / (30.0 * packetCntr));
+              tmpSumStreamLen * 8.0 / (30.0 * packetCntr));
     }
     fprintf(averageFile, "\n");
     fclose(averageFile);
diff --git a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
index c05d773..e69b0c8 100644
--- a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
@@ -115,9 +115,9 @@
   // Calculate the number of bytes we expect the encoder to produce,
   // then multiply by two to give a wide margin for error.
   int frame_size_ms = num_10ms_frames_per_packet_ * 10;
-  int bytes_per_millisecond = bitrate_bps_ / (1000 * 8) + 1;
-  size_t approx_encoded_bytes =
-      static_cast<size_t>(frame_size_ms * bytes_per_millisecond);
+  size_t bytes_per_millisecond =
+      static_cast<size_t>(bitrate_bps_ / (1000 * 8) + 1);
+  size_t approx_encoded_bytes = frame_size_ms * bytes_per_millisecond;
   return 2 * approx_encoded_bytes;
 }
 
@@ -206,7 +206,7 @@
   CHECK_GE(status, 0);  // Fails only if fed invalid data.
   input_buffer_.clear();
   EncodedInfo info;
-  info.encoded_bytes = status;
+  info.encoded_bytes = static_cast<size_t>(status);
   info.encoded_timestamp = first_timestamp_in_buffer_;
   info.payload_type = payload_type_;
   info.send_even_if_empty = true;  // Allows Opus to send empty packets.
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc b/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
index a30b1cb..aaaced1 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
@@ -196,7 +196,7 @@
       EncodeABlock();
 
       // Check if payload has FEC.
-      int16_t fec = WebRtcOpus_PacketHasFec(&bit_stream_[0], encoded_bytes_);
+      int fec = WebRtcOpus_PacketHasFec(&bit_stream_[0], encoded_bytes_);
 
       // If FEC is disabled or the target packet loss rate is set to 0, there
       // should be no FEC in the bit stream.
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc b/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
index f9cf89a..ae5a04f 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
@@ -461,8 +461,8 @@
   // |audio_frame|.
   uint32_t playout_timestamp = 0;
   if (GetPlayoutTimestamp(&playout_timestamp)) {
-    audio_frame->timestamp_ =
-        playout_timestamp - audio_frame->samples_per_channel_;
+    audio_frame->timestamp_ = playout_timestamp -
+        static_cast<uint32_t>(audio_frame->samples_per_channel_);
   } else {
     // Remain 0 until we have a valid |playout_timestamp|.
     audio_frame->timestamp_ = 0;
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc b/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
index 56830a4..b96db6b 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
@@ -79,7 +79,7 @@
     }
     int32_t encoded_bytes = acm_->Add10MsAudio(input_frame_);
     EXPECT_GE(encoded_bytes, 0);
-    input_frame_.timestamp_ += input_block_size_samples_;
+    input_frame_.timestamp_ += static_cast<uint32_t>(input_block_size_samples_);
     if (encoded_bytes > 0) {
       // Encoded packet received.
       return CreatePacket();
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc b/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
index d0c031e..1819d59 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
@@ -92,7 +92,7 @@
     }
     data_to_send_ = false;
     CHECK_GE(acm_->Add10MsData(input_frame_), 0);
-    input_frame_.timestamp_ += input_block_size_samples_;
+    input_frame_.timestamp_ += static_cast<uint32_t>(input_block_size_samples_);
     if (data_to_send_) {
       // Encoded packet received.
       return CreatePacket();
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
index b659649..ce98636 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
@@ -431,8 +431,8 @@
 
   if (!down_mix && !resample) {
     // No pre-processing is required.
-    expected_in_ts_ += in_frame.samples_per_channel_;
-    expected_codec_ts_ += in_frame.samples_per_channel_;
+    expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
+    expected_codec_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
     *ptr_out = &in_frame;
     return 0;
   }
@@ -477,8 +477,9 @@
         codec_manager_.CurrentEncoder()->SampleRateHz();
   }
 
-  expected_codec_ts_ += preprocess_frame_.samples_per_channel_;
-  expected_in_ts_ += in_frame.samples_per_channel_;
+  expected_codec_ts_ +=
+      static_cast<uint32_t>(preprocess_frame_.samples_per_channel_);
+  expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
 
   return 0;
 }
diff --git a/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc b/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc
index 3d0d312..ffbbc8c 100644
--- a/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc
+++ b/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc
@@ -144,7 +144,7 @@
     acm_b_->SetInitialPlayoutDelay(initial_delay_ms);
     while (rms < kAmp / 2) {
       in_audio_frame.timestamp_ = timestamp;
-      timestamp += in_audio_frame.samples_per_channel_;
+      timestamp += static_cast<uint32_t>(in_audio_frame.samples_per_channel_);
       ASSERT_GE(acm_a_->Add10MsData(in_audio_frame), 0);
       ASSERT_EQ(0, acm_b_->PlayoutData10Ms(codec.plfreq, &out_audio_frame));
       rms = FrameRms(out_audio_frame);
diff --git a/webrtc/modules/audio_coding/neteq/background_noise.cc b/webrtc/modules/audio_coding/neteq/background_noise.cc
index 4fbc84c..a59f444 100644
--- a/webrtc/modules/audio_coding/neteq/background_noise.cc
+++ b/webrtc/modules/audio_coding/neteq/background_noise.cc
@@ -239,7 +239,7 @@
   parameters.low_energy_update_threshold = 0;
 
   // Normalize residual_energy to 29 or 30 bits before sqrt.
-  int norm_shift = WebRtcSpl_NormW32(residual_energy) - 1;
+  int16_t norm_shift = WebRtcSpl_NormW32(residual_energy) - 1;
   if (norm_shift & 0x1) {
     norm_shift -= 1;  // Even number of shifts required.
   }
@@ -251,7 +251,8 @@
   // Add 13 to the |scale_shift_|, since the random numbers table is in
   // Q13.
   // TODO(hlundin): Move the "13" to where the |scale_shift_| is used?
-  parameters.scale_shift = 13 + ((kLogResidualLength + norm_shift) / 2);
+  parameters.scale_shift =
+      static_cast<int16_t>(13 + ((kLogResidualLength + norm_shift) / 2));
 
   initialized_ = true;
 }
diff --git a/webrtc/modules/audio_coding/neteq/background_noise.h b/webrtc/modules/audio_coding/neteq/background_noise.h
index fd4e6a5..baf1818 100644
--- a/webrtc/modules/audio_coding/neteq/background_noise.h
+++ b/webrtc/modules/audio_coding/neteq/background_noise.h
@@ -79,7 +79,7 @@
   static const int kVecLen = 256;
   static const int kLogVecLen = 8;  // log2(kVecLen).
   static const int kResidualLength = 64;
-  static const int kLogResidualLength = 6;  // log2(kResidualLength)
+  static const int16_t kLogResidualLength = 6;  // log2(kResidualLength)
 
   struct ChannelParameters {
     // Constructor.
diff --git a/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc b/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
index 89fdb51..e985ee0 100644
--- a/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
+++ b/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
@@ -67,7 +67,8 @@
     return kNormal;
   }
 
-  const uint32_t five_seconds_samples = 5 * 8000 * fs_mult_;
+  const uint32_t five_seconds_samples =
+      static_cast<uint32_t>(5 * 8000 * fs_mult_);
   // Check if the required packet is available.
   if (target_timestamp == available_timestamp) {
     return ExpectedPacketAvailable(prev_mode, play_dtmf);
@@ -87,10 +88,11 @@
                                              uint32_t target_timestamp,
                                              uint32_t available_timestamp) {
   // Signed difference between target and available timestamp.
-  int32_t timestamp_diff = (generated_noise_samples_ + target_timestamp) -
-      available_timestamp;
-  int32_t optimal_level_samp =
-      (delay_manager_->TargetLevel() * packet_length_samples_) >> 8;
+  int32_t timestamp_diff = static_cast<int32_t>(
+      static_cast<uint32_t>(generated_noise_samples_ + target_timestamp) -
+      available_timestamp);
+  int32_t optimal_level_samp = static_cast<int32_t>(
+      (delay_manager_->TargetLevel() * packet_length_samples_) >> 8);
   int32_t excess_waiting_time_samp = -timestamp_diff - optimal_level_samp;
 
   if (excess_waiting_time_samp > optimal_level_samp / 2) {
@@ -182,11 +184,11 @@
     // safety precaution), but make sure that the number of samples in buffer
     // is no higher than 4 times the optimal level. (Note that TargetLevel()
     // is in Q8.)
-    int32_t timestamp_diff = (generated_noise_samples_ + target_timestamp) -
-        available_timestamp;
-    if (timestamp_diff >= 0 ||
+    if (static_cast<uint32_t>(generated_noise_samples_ + target_timestamp) >=
+            available_timestamp ||
         cur_size_samples >
-        4 * ((delay_manager_->TargetLevel() * packet_length_samples_) >> 8)) {
+            ((delay_manager_->TargetLevel() * packet_length_samples_) >> 8) *
+            4) {
       // Time to play this new packet.
       return kNormal;
     } else {
diff --git a/webrtc/modules/audio_coding/neteq/expand.cc b/webrtc/modules/audio_coding/neteq/expand.cc
index 1378241..d5f0f9c 100644
--- a/webrtc/modules/audio_coding/neteq/expand.cc
+++ b/webrtc/modules/audio_coding/neteq/expand.cc
@@ -227,7 +227,7 @@
       if (mix_factor_increment != 0) {
         parameters.current_voice_mix_factor = parameters.voice_mix_factor;
       }
-      int temp_scale = 16384 - parameters.current_voice_mix_factor;
+      int16_t temp_scale = 16384 - parameters.current_voice_mix_factor;
       WebRtcSpl_ScaleAndAddVectorsWithRound(
           voiced_vector + temp_lenght, parameters.current_voice_mix_factor,
           unvoiced_vector + temp_lenght, temp_scale, 14,
@@ -669,7 +669,8 @@
     // even, which is suitable for the sqrt.
     unvoiced_scale += ((unvoiced_scale & 0x1) ^ 0x1);
     unvoiced_energy = WEBRTC_SPL_SHIFT_W32(unvoiced_energy, unvoiced_scale);
-    int32_t unvoiced_gain = WebRtcSpl_SqrtFloor(unvoiced_energy);
+    int16_t unvoiced_gain =
+        static_cast<int16_t>(WebRtcSpl_SqrtFloor(unvoiced_energy));
     parameters.ar_gain_scale = 13
         + (unvoiced_scale + 7 - unvoiced_prescale) / 2;
     parameters.ar_gain = unvoiced_gain;
@@ -709,8 +710,9 @@
       // the division.
       // Shift the denominator from Q13 to Q5 before the division. The result of
       // the division will then be in Q20.
-      int16_t temp_ratio = WebRtcSpl_DivW32W16((slope - 8192) << 12,
-                                               (distortion_lag * slope) >> 8);
+      int16_t temp_ratio = WebRtcSpl_DivW32W16(
+          (slope - 8192) << 12,
+          static_cast<int16_t>((distortion_lag * slope) >> 8));
       if (slope > 14746) {
         // slope > 1.8.
         // Divide by 2, with proper rounding.
@@ -723,8 +725,8 @@
     } else {
       // Calculate (1 - slope) / distortion_lag.
       // Shift |slope| by 7 to Q20 before the division. The result is in Q20.
-      parameters.mute_slope = WebRtcSpl_DivW32W16((8192 - slope) << 7,
-                                                   distortion_lag);
+      parameters.mute_slope = WebRtcSpl_DivW32W16(
+          (8192 - slope) << 7, static_cast<int16_t>(distortion_lag));
       if (parameters.voice_mix_factor <= 13107) {
         // Make sure the mute factor decreases from 1.0 to 0.9 in no more than
         // 6.25 ms.
@@ -810,7 +812,8 @@
   // Normalize and move data from 32-bit to 16-bit vector.
   int32_t max_correlation = WebRtcSpl_MaxAbsValueW32(correlation,
                                                      kNumCorrelationLags);
-  int16_t norm_shift2 = std::max(18 - WebRtcSpl_NormW32(max_correlation), 0);
+  int16_t norm_shift2 = static_cast<int16_t>(
+      std::max(18 - WebRtcSpl_NormW32(max_correlation), 0));
   WebRtcSpl_VectorBitShiftW32ToW16(output, kNumCorrelationLags, correlation,
                                    norm_shift2);
   // Total scale factor (right shifts) of correlation value.
@@ -928,7 +931,7 @@
   }
 }
 
-void Expand::GenerateRandomVector(int seed_increment,
+void Expand::GenerateRandomVector(int16_t seed_increment,
                                   size_t length,
                                   int16_t* random_vector) {
   // TODO(turajs): According to hlundin The loop should not be needed. Should be
diff --git a/webrtc/modules/audio_coding/neteq/expand.h b/webrtc/modules/audio_coding/neteq/expand.h
index 5679ec1..0000642 100644
--- a/webrtc/modules/audio_coding/neteq/expand.h
+++ b/webrtc/modules/audio_coding/neteq/expand.h
@@ -66,7 +66,7 @@
 
  protected:
   static const int kMaxConsecutiveExpands = 200;
-  void GenerateRandomVector(int seed_increment,
+  void GenerateRandomVector(int16_t seed_increment,
                             size_t length,
                             int16_t* random_vector);
 
diff --git a/webrtc/modules/audio_coding/neteq/merge.cc b/webrtc/modules/audio_coding/neteq/merge.cc
index 44fc511..8399a78 100644
--- a/webrtc/modules/audio_coding/neteq/merge.cc
+++ b/webrtc/modules/audio_coding/neteq/merge.cc
@@ -108,10 +108,11 @@
       // Set a suitable muting slope (Q20). 0.004 for NB, 0.002 for WB,
       // and so on.
       int increment = 4194 / fs_mult_;
-      *external_mute_factor = DspHelper::RampSignal(input_channel,
-                                                    interpolation_length,
-                                                    *external_mute_factor,
-                                                    increment);
+      *external_mute_factor =
+          static_cast<int16_t>(DspHelper::RampSignal(input_channel,
+                                                     interpolation_length,
+                                                     *external_mute_factor,
+                                                     increment));
       DspHelper::UnmuteSignal(&input_channel[interpolation_length],
                               input_length_per_channel - interpolation_length,
                               external_mute_factor, increment,
@@ -125,7 +126,8 @@
     }
 
     // Do overlap and mix linearly.
-    int increment = 16384 / (interpolation_length + 1);  // In Q14.
+    int16_t increment =
+        static_cast<int16_t>(16384 / (interpolation_length + 1));  // In Q14.
     int16_t mute_factor = 16384 - increment;
     memmove(temp_data, expanded_channel,
             sizeof(int16_t) * best_correlation_index);
@@ -246,7 +248,8 @@
     // energy_expanded / energy_input is in Q14.
     energy_expanded = WEBRTC_SPL_SHIFT_W32(energy_expanded, temp_shift + 14);
     // Calculate sqrt(energy_expanded / energy_input) in Q14.
-    mute_factor = WebRtcSpl_SqrtFloor((energy_expanded / energy_input) << 14);
+    mute_factor = static_cast<int16_t>(
+        WebRtcSpl_SqrtFloor((energy_expanded / energy_input) << 14));
   } else {
     // Set to 1 (in Q14) when |expanded| has higher energy than |input|.
     mute_factor = 16384;
diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl.cc b/webrtc/modules/audio_coding/neteq/neteq_impl.cc
index 6512515..3a3ad98 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_impl.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_impl.cc
@@ -788,7 +788,8 @@
     }
     case kAudioRepetitionIncreaseTimestamp: {
       // TODO(hlundin): Write test for this.
-      sync_buffer_->IncreaseEndTimestamp(output_size_samples_);
+      sync_buffer_->IncreaseEndTimestamp(
+          static_cast<uint32_t>(output_size_samples_));
       // Skipping break on purpose. Execution should move on into the
       // next case.
       FALLTHROUGH();
@@ -881,7 +882,7 @@
     }
   } else {
     // Use dead reckoning to estimate the |playout_timestamp_|.
-    playout_timestamp_ += output_size_samples_;
+    playout_timestamp_ += static_cast<uint32_t>(output_size_samples_);
   }
 
   if (decode_return_value) return decode_return_value;
@@ -940,9 +941,10 @@
   }
 
   // Check if it is time to play a DTMF event.
-  if (dtmf_buffer_->GetEvent(end_timestamp +
-                             decision_logic_->generated_noise_samples(),
-                             dtmf_event)) {
+  if (dtmf_buffer_->GetEvent(
+      static_cast<uint32_t>(
+          end_timestamp + decision_logic_->generated_noise_samples()),
+      dtmf_event)) {
     *play_dtmf = true;
   }
 
@@ -1030,7 +1032,8 @@
       if (decision_logic_->generated_noise_samples() > 0 &&
           last_mode_ != kModeDtmf) {
         // Make a jump in timestamp due to the recently played comfort noise.
-        uint32_t timestamp_jump = decision_logic_->generated_noise_samples();
+        uint32_t timestamp_jump =
+            static_cast<uint32_t>(decision_logic_->generated_noise_samples());
         sync_buffer_->IncreaseEndTimestamp(timestamp_jump);
         timestamp_ += timestamp_jump;
       }
@@ -1224,7 +1227,8 @@
   if (*decoded_length < 0) {
     // Error returned from the decoder.
     *decoded_length = 0;
-    sync_buffer_->IncreaseEndTimestamp(decoder_frame_length_);
+    sync_buffer_->IncreaseEndTimestamp(
+        static_cast<uint32_t>(decoder_frame_length_));
     int error_code = 0;
     if (decoder)
       error_code = decoder->ErrorCode();
@@ -1719,7 +1723,8 @@
   //    algorithm_buffer_->PopFront(sync_buffer_->FutureLength());
   //  }
 
-  sync_buffer_->IncreaseEndTimestamp(output_size_samples_);
+  sync_buffer_->IncreaseEndTimestamp(
+      static_cast<uint32_t>(output_size_samples_));
   expand_->Reset();
   last_mode_ = kModeDtmf;
 
@@ -1749,7 +1754,7 @@
     stats_.AddZeros(length);
   }
   if (increase_timestamp) {
-    sync_buffer_->IncreaseEndTimestamp(length);
+    sync_buffer_->IncreaseEndTimestamp(static_cast<uint32_t>(length));
   }
   expand_->Reset();
 }
diff --git a/webrtc/modules/audio_coding/neteq/neteq_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
index 3bdaa69b..8a66262 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
@@ -343,7 +343,8 @@
       ASSERT_EQ(0, neteq_->InsertPacket(
                        rtp_header, packet_->payload(),
                        packet_->payload_length_bytes(),
-                       packet_->time_ms() * (output_sample_rate_ / 1000)));
+                       static_cast<uint32_t>(
+                           packet_->time_ms() * (output_sample_rate_ / 1000))));
     }
     // Get next packet.
     packet_.reset(rtp_source_->NextPacket());
diff --git a/webrtc/modules/audio_coding/neteq/normal.cc b/webrtc/modules/audio_coding/neteq/normal.cc
index 18ba79b..a0e5d2d 100644
--- a/webrtc/modules/audio_coding/neteq/normal.cc
+++ b/webrtc/modules/audio_coding/neteq/normal.cc
@@ -50,7 +50,7 @@
   // fs_shift = log2(fs_mult), rounded down.
   // Note that |fs_shift| is not "exact" for 48 kHz.
   // TODO(hlundin): Investigate this further.
-  const int fs_shift = 30 - WebRtcSpl_NormW32(fs_mult);
+  const int fs_shift = 30 - WebRtcSpl_NormW32(static_cast<int32_t>(fs_mult));
 
   // Check if last RecOut call resulted in an Expand. If so, we have to take
   // care of some cross-fading and unmuting.
@@ -99,14 +99,15 @@
         // We want background_noise_.energy() / energy in Q14.
         int32_t bgn_energy =
             background_noise_.Energy(channel_ix) << (scaling+14);
-        int16_t energy_scaled = energy << scaling;
-        int16_t ratio = WebRtcSpl_DivW32W16(bgn_energy, energy_scaled);
-        mute_factor = WebRtcSpl_SqrtFloor(static_cast<int32_t>(ratio) << 14);
+        int16_t energy_scaled = static_cast<int16_t>(energy << scaling);
+        int32_t ratio = WebRtcSpl_DivW32W16(bgn_energy, energy_scaled);
+        mute_factor = WebRtcSpl_SqrtFloor(ratio << 14);
       } else {
         mute_factor = 16384;  // 1.0 in Q14.
       }
       if (mute_factor > external_mute_factor_array[channel_ix]) {
-        external_mute_factor_array[channel_ix] = std::min(mute_factor, 16384);
+        external_mute_factor_array[channel_ix] =
+            static_cast<int16_t>(std::min(mute_factor, 16384));
       }
 
       // If muted increase by 0.64 for every 20 ms (NB/WB 0.0040/0.0020 in Q14).
@@ -118,10 +119,11 @@
         int32_t scaled_signal = (*output)[channel_ix][i] *
             external_mute_factor_array[channel_ix];
         // Shift 14 with proper rounding.
-        (*output)[channel_ix][i] = (scaled_signal + 8192) >> 14;
+        (*output)[channel_ix][i] =
+            static_cast<int16_t>((scaled_signal + 8192) >> 14);
         // Increase mute_factor towards 16384.
-        external_mute_factor_array[channel_ix] =
-            std::min(external_mute_factor_array[channel_ix] + increment, 16384);
+        external_mute_factor_array[channel_ix] = static_cast<int16_t>(std::min(
+            external_mute_factor_array[channel_ix] + increment, 16384));
       }
 
       // Interpolate the expanded data into the new vector.
@@ -135,8 +137,8 @@
         assert(channel_ix < output->Channels());
         assert(i < output->Size());
         (*output)[channel_ix][i] =
-            (fraction * (*output)[channel_ix][i] +
-                (32 - fraction) * expanded[channel_ix][i] + 8) >> 5;
+            static_cast<int16_t>((fraction * (*output)[channel_ix][i] +
+                (32 - fraction) * expanded[channel_ix][i] + 8) >> 5);
         fraction += increment;
       }
     }
@@ -187,10 +189,11 @@
         int32_t scaled_signal = (*output)[channel_ix][i] *
             external_mute_factor_array[channel_ix];
         // Shift 14 with proper rounding.
-        (*output)[channel_ix][i] = (scaled_signal + 8192) >> 14;
+        (*output)[channel_ix][i] =
+            static_cast<int16_t>((scaled_signal + 8192) >> 14);
         // Increase mute_factor towards 16384.
-        external_mute_factor_array[channel_ix] =
-            std::min(16384, external_mute_factor_array[channel_ix] + increment);
+        external_mute_factor_array[channel_ix] = static_cast<int16_t>(std::min(
+            16384, external_mute_factor_array[channel_ix] + increment));
       }
     }
   }
diff --git a/webrtc/modules/audio_coding/neteq/statistics_calculator.cc b/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
index 14e9385..f637eb8 100644
--- a/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
+++ b/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
@@ -83,7 +83,7 @@
 }
 
 void StatisticsCalculator::IncreaseCounter(int num_samples, int fs_hz) {
-  timestamps_since_last_report_ += num_samples;
+  timestamps_since_last_report_ += static_cast<uint32_t>(num_samples);
   if (timestamps_since_last_report_ >
       static_cast<uint32_t>(fs_hz * kMaxReportPeriod)) {
     lost_timestamps_ = 0;
@@ -121,7 +121,8 @@
   }
 
   stats->added_zero_samples = added_zero_samples_;
-  stats->current_buffer_size_ms = num_samples_in_buffers * 1000 / fs_hz;
+  stats->current_buffer_size_ms =
+      static_cast<uint16_t>(num_samples_in_buffers * 1000 / fs_hz);
   const int ms_per_packet = decision_logic.packet_length_samples() /
       (fs_hz / 1000);
   stats->preferred_buffer_size_ms = (delay_manager.TargetLevel() >> 8) *
@@ -167,14 +168,14 @@
   ResetWaitingTimeStatistics();
 }
 
-int StatisticsCalculator::CalculateQ14Ratio(uint32_t numerator,
-                                            uint32_t denominator) {
+uint16_t StatisticsCalculator::CalculateQ14Ratio(uint32_t numerator,
+                                                 uint32_t denominator) {
   if (numerator == 0) {
     return 0;
   } else if (numerator < denominator) {
     // Ratio must be smaller than 1 in Q14.
     assert((numerator << 14) / denominator < (1 << 14));
-    return (numerator << 14) / denominator;
+    return static_cast<uint16_t>((numerator << 14) / denominator);
   } else {
     // Will not produce a ratio larger than 1, since this is probably an error.
     return 1 << 14;
diff --git a/webrtc/modules/audio_coding/neteq/statistics_calculator.h b/webrtc/modules/audio_coding/neteq/statistics_calculator.h
index cd4d867..a2cd9be 100644
--- a/webrtc/modules/audio_coding/neteq/statistics_calculator.h
+++ b/webrtc/modules/audio_coding/neteq/statistics_calculator.h
@@ -91,7 +91,7 @@
   static const int kLenWaitingTimes = 100;
 
   // Calculates numerator / denominator, and returns the value in Q14.
-  static int CalculateQ14Ratio(uint32_t numerator, uint32_t denominator);
+  static uint16_t CalculateQ14Ratio(uint32_t numerator, uint32_t denominator);
 
   uint32_t preemptive_samples_;
   uint32_t accelerate_samples_;
diff --git a/webrtc/modules/audio_coding/neteq/test/RTPencode.cc b/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
index c097f5f..192d374 100644
--- a/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
+++ b/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
@@ -621,8 +621,8 @@
       }
 
       /* write RTP packet to file */
-      length = htons(12 + enc_len + 8);
-      plen = htons(12 + enc_len);
+      length = htons(static_cast<unsigned short>(12 + enc_len + 8));
+      plen = htons(static_cast<unsigned short>(12 + enc_len));
       offset = (uint32_t)sendtime;  //(timestamp/(fs/1000));
       offset = htonl(offset);
       if (fwrite(&length, 2, 1, out_file) != 1) {
@@ -673,7 +673,7 @@
           memmove(&rtp_data[RTPheaderLen + red_len[0]], &rtp_data[12], enc_len);
           memcpy(&rtp_data[RTPheaderLen], red_data, red_len[0]);
 
-          red_len[1] = enc_len;
+          red_len[1] = static_cast<uint16_t>(enc_len);
           red_TS[1] = timestamp;
           if (vad)
             red_PT[1] = payloadType;
@@ -689,7 +689,7 @@
           memmove(&rtp_data[RTPheaderLen - 4], &rtp_data[12], enc_len);
           // memcpy(&rtp_data[RTPheaderLen], red_data, red_len[0]);
 
-          red_len[1] = enc_len;
+          red_len[1] = static_cast<uint16_t>(enc_len);
           red_TS[1] = timestamp;
           if (vad)
             red_PT[1] = payloadType;
@@ -714,8 +714,8 @@
       do {
 #endif  // MULTIPLE_SAME_TIMESTAMP
         /* write RTP packet to file */
-        length = htons(12 + enc_len + 8);
-        plen = htons(12 + enc_len);
+        length = htons(static_cast<unsigned short>(12 + enc_len + 8));
+        plen = htons(static_cast<unsigned short>(12 + enc_len));
         offset = (uint32_t)sendtime;
         //(timestamp/(fs/1000));
         offset = htonl(offset);
diff --git a/webrtc/modules/audio_device/audio_device_buffer.cc b/webrtc/modules/audio_device/audio_device_buffer.cc
index febacbc..18a242f 100644
--- a/webrtc/modules/audio_device/audio_device_buffer.cc
+++ b/webrtc/modules/audio_device/audio_device_buffer.cc
@@ -563,7 +563,7 @@
         }
     }
 
-    return nSamplesOut;
+    return static_cast<int32_t>(nSamplesOut);
 }
 
 // ----------------------------------------------------------------------------
@@ -590,7 +590,7 @@
         _playFile.Write(&_playBuffer[0], _playSize);
     }
 
-    return _playSamples;
+    return static_cast<int32_t>(_playSamples);
 }
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_device/dummy/file_audio_device.cc b/webrtc/modules/audio_device/dummy/file_audio_device.cc
index 82569e8..3de5344 100644
--- a/webrtc/modules/audio_device/dummy/file_audio_device.cc
+++ b/webrtc/modules/audio_device/dummy/file_audio_device.cc
@@ -172,7 +172,7 @@
     return -1;
   }
 
-  _recordingFramesIn10MS = kRecordingFixedSampleRate/100;
+  _recordingFramesIn10MS = static_cast<uint32_t>(kRecordingFixedSampleRate/100);
 
   if (_ptrAudioBuffer) {
     _ptrAudioBuffer->SetRecordingSampleRate(kRecordingFixedSampleRate);
@@ -190,7 +190,7 @@
       return 0;
   }
 
-  _playoutFramesIn10MS = kPlayoutFixedSampleRate/100;
+  _playoutFramesIn10MS = static_cast<uint32_t>(kPlayoutFixedSampleRate/100);
   _playing = true;
   _playoutFramesLeft = 0;
 
diff --git a/webrtc/modules/audio_processing/ns/ns_core.c b/webrtc/modules/audio_processing/ns/ns_core.c
index 9e230dd..1bd7af4 100644
--- a/webrtc/modules/audio_processing/ns/ns_core.c
+++ b/webrtc/modules/audio_processing/ns/ns_core.c
@@ -898,10 +898,10 @@
 
   imag[0] = 0;
   real[0] = time_data[0];
-  magn[0] = fabs(real[0]) + 1.f;
+  magn[0] = fabsf(real[0]) + 1.f;
   imag[magnitude_length - 1] = 0;
   real[magnitude_length - 1] = time_data[1];
-  magn[magnitude_length - 1] = fabs(real[magnitude_length - 1]) + 1.f;
+  magn[magnitude_length - 1] = fabsf(real[magnitude_length - 1]) + 1.f;
   for (i = 1; i < magnitude_length - 1; ++i) {
     real[i] = time_data[2 * i];
     imag[i] = time_data[2 * i + 1];
@@ -1090,10 +1090,10 @@
     sumMagn += magn[i];
     if (self->blockInd < END_STARTUP_SHORT) {
       if (i >= kStartBand) {
-        tmpFloat2 = log((float)i);
+        tmpFloat2 = logf((float)i);
         sum_log_i += tmpFloat2;
         sum_log_i_square += tmpFloat2 * tmpFloat2;
-        tmpFloat1 = log(magn[i]);
+        tmpFloat1 = logf(magn[i]);
         sum_log_magn += tmpFloat1;
         sum_log_i_log_magn += tmpFloat2 * tmpFloat1;
       }
@@ -1136,7 +1136,7 @@
     if (self->pinkNoiseExp > 0.f) {
       // Use pink noise estimate.
       parametric_num =
-          exp(self->pinkNoiseNumerator / (float)(self->blockInd + 1));
+          expf(self->pinkNoiseNumerator / (float)(self->blockInd + 1));
       parametric_num *= (float)(self->blockInd + 1);
       parametric_exp = self->pinkNoiseExp / (float)(self->blockInd + 1);
     }
@@ -1150,7 +1150,7 @@
         // Use pink noise estimate.
         float use_band = (float)(i < kStartBand ? kStartBand : i);
         self->parametricNoise[i] =
-            parametric_num / pow(use_band, parametric_exp);
+            parametric_num / powf(use_band, parametric_exp);
       }
       // Weight quantile noise with modeled noise.
       noise[i] *= (self->blockInd);
diff --git a/webrtc/modules/audio_processing/ns/nsx_core_mips.c b/webrtc/modules/audio_processing/ns/nsx_core_mips.c
index be65c25..0e4b28f 100644
--- a/webrtc/modules/audio_processing/ns/nsx_core_mips.c
+++ b/webrtc/modules/audio_processing/ns/nsx_core_mips.c
@@ -758,7 +758,7 @@
   int16_t *imag = inst->imag;
   int32_t loop_count = 2;
   int16_t tmp_1, tmp_2, tmp_3, tmp_4, tmp_5, tmp_6;
-  int16_t tmp16 = (inst->anaLen << 1) - 4;
+  int16_t tmp16 = (int16_t)(inst->anaLen << 1) - 4;
   int16_t* freq_buf_f = freq_buf;
   int16_t* freq_buf_s = &freq_buf[tmp16];
 
diff --git a/webrtc/modules/utility/source/coder.cc b/webrtc/modules/utility/source/coder.cc
index dc0799a..1baeaef 100644
--- a/webrtc/modules/utility/source/coder.cc
+++ b/webrtc/modules/utility/source/coder.cc
@@ -85,7 +85,7 @@
     AudioFrame audioFrame;
     audioFrame.CopyFrom(audio);
     audioFrame.timestamp_ = _encodeTimestamp;
-    _encodeTimestamp += audioFrame.samples_per_channel_;
+    _encodeTimestamp += static_cast<uint32_t>(audioFrame.samples_per_channel_);
 
     // For any codec with a frame size that is longer than 10 ms the encoded
     // length in bytes should be zero until a a full frame has been encoded.
diff --git a/webrtc/voice_engine/channel.cc b/webrtc/voice_engine/channel.cc
index 063ffa6..6dd64c7 100644
--- a/webrtc/voice_engine/channel.cc
+++ b/webrtc/voice_engine/channel.cc
@@ -1097,7 +1097,7 @@
 Channel::UpdateLocalTimeStamp()
 {
 
-    _timeStamp += _audioFrame.samples_per_channel_;
+    _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_);
     return 0;
 }
 
@@ -3454,7 +3454,7 @@
         return 0xFFFFFFFF;
     }
 
-    _timeStamp += _audioFrame.samples_per_channel_;
+    _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_);
     return 0;
 }
 
diff --git a/webrtc/voice_engine/utility_unittest.cc b/webrtc/voice_engine/utility_unittest.cc
index 8f7efa8..a5dd70b 100644
--- a/webrtc/voice_engine/utility_unittest.cc
+++ b/webrtc/voice_engine/utility_unittest.cc
@@ -54,7 +54,7 @@
   frame->sample_rate_hz_ = sample_rate_hz;
   frame->samples_per_channel_ = sample_rate_hz / 100;
   for (int i = 0; i < frame->samples_per_channel_; i++) {
-    frame->data_[i] = data * i;
+    frame->data_[i] = static_cast<int16_t>(data * i);
   }
 }
 
@@ -72,8 +72,8 @@
   frame->sample_rate_hz_ = sample_rate_hz;
   frame->samples_per_channel_ = sample_rate_hz / 100;
   for (int i = 0; i < frame->samples_per_channel_; i++) {
-    frame->data_[i * 2] = left * i;
-    frame->data_[i * 2 + 1] = right * i;
+    frame->data_[i * 2] = static_cast<int16_t>(left * i);
+    frame->data_[i * 2 + 1] = static_cast<int16_t>(right * i);
   }
 }