blob: 845102376169e70401d6580ae865d42677f856a1 [file] [log] [blame]
Shih-wei Liaof8fd82b2010-02-10 11:10:31 -08001//===--- SemaChecking.cpp - Extra Semantic Checking -----------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements extra semantic analysis beyond what is enforced
11// by the C type system.
12//
13//===----------------------------------------------------------------------===//
14
15#include "Sema.h"
16#include "clang/Analysis/CFG.h"
17#include "clang/Analysis/AnalysisContext.h"
18#include "clang/Analysis/Analyses/PrintfFormatString.h"
19#include "clang/AST/ASTContext.h"
20#include "clang/AST/CharUnits.h"
21#include "clang/AST/DeclObjC.h"
22#include "clang/AST/ExprCXX.h"
23#include "clang/AST/ExprObjC.h"
24#include "clang/AST/DeclObjC.h"
25#include "clang/AST/StmtCXX.h"
26#include "clang/AST/StmtObjC.h"
27#include "clang/Lex/LiteralSupport.h"
28#include "clang/Lex/Preprocessor.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/STLExtras.h"
31#include <limits>
32#include <queue>
33using namespace clang;
34
35/// getLocationOfStringLiteralByte - Return a source location that points to the
36/// specified byte of the specified string literal.
37///
38/// Strings are amazingly complex. They can be formed from multiple tokens and
39/// can have escape sequences in them in addition to the usual trigraph and
40/// escaped newline business. This routine handles this complexity.
41///
42SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
43 unsigned ByteNo) const {
44 assert(!SL->isWide() && "This doesn't work for wide strings yet");
45
46 // Loop over all of the tokens in this string until we find the one that
47 // contains the byte we're looking for.
48 unsigned TokNo = 0;
49 while (1) {
50 assert(TokNo < SL->getNumConcatenated() && "Invalid byte number!");
51 SourceLocation StrTokLoc = SL->getStrTokenLoc(TokNo);
52
53 // Get the spelling of the string so that we can get the data that makes up
54 // the string literal, not the identifier for the macro it is potentially
55 // expanded through.
56 SourceLocation StrTokSpellingLoc = SourceMgr.getSpellingLoc(StrTokLoc);
57
58 // Re-lex the token to get its length and original spelling.
59 std::pair<FileID, unsigned> LocInfo =
60 SourceMgr.getDecomposedLoc(StrTokSpellingLoc);
61 std::pair<const char *,const char *> Buffer =
62 SourceMgr.getBufferData(LocInfo.first);
63 const char *StrData = Buffer.first+LocInfo.second;
64
65 // Create a langops struct and enable trigraphs. This is sufficient for
66 // relexing tokens.
67 LangOptions LangOpts;
68 LangOpts.Trigraphs = true;
69
70 // Create a lexer starting at the beginning of this token.
71 Lexer TheLexer(StrTokSpellingLoc, LangOpts, Buffer.first, StrData,
72 Buffer.second);
73 Token TheTok;
74 TheLexer.LexFromRawLexer(TheTok);
75
76 // Use the StringLiteralParser to compute the length of the string in bytes.
77 StringLiteralParser SLP(&TheTok, 1, PP);
78 unsigned TokNumBytes = SLP.GetStringLength();
79
80 // If the byte is in this token, return the location of the byte.
81 if (ByteNo < TokNumBytes ||
82 (ByteNo == TokNumBytes && TokNo == SL->getNumConcatenated())) {
83 unsigned Offset =
84 StringLiteralParser::getOffsetOfStringByte(TheTok, ByteNo, PP);
85
86 // Now that we know the offset of the token in the spelling, use the
87 // preprocessor to get the offset in the original source.
88 return PP.AdvanceToTokenCharacter(StrTokLoc, Offset);
89 }
90
91 // Move to the next string token.
92 ++TokNo;
93 ByteNo -= TokNumBytes;
94 }
95}
96
97/// CheckablePrintfAttr - does a function call have a "printf" attribute
98/// and arguments that merit checking?
99bool Sema::CheckablePrintfAttr(const FormatAttr *Format, CallExpr *TheCall) {
100 if (Format->getType() == "printf") return true;
101 if (Format->getType() == "printf0") {
102 // printf0 allows null "format" string; if so don't check format/args
103 unsigned format_idx = Format->getFormatIdx() - 1;
104 // Does the index refer to the implicit object argument?
105 if (isa<CXXMemberCallExpr>(TheCall)) {
106 if (format_idx == 0)
107 return false;
108 --format_idx;
109 }
110 if (format_idx < TheCall->getNumArgs()) {
111 Expr *Format = TheCall->getArg(format_idx)->IgnoreParenCasts();
112 if (!Format->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull))
113 return true;
114 }
115 }
116 return false;
117}
118
119Action::OwningExprResult
120Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
121 OwningExprResult TheCallResult(Owned(TheCall));
122
123 switch (BuiltinID) {
124 case Builtin::BI__builtin___CFStringMakeConstantString:
125 assert(TheCall->getNumArgs() == 1 &&
126 "Wrong # arguments to builtin CFStringMakeConstantString");
127 if (CheckObjCString(TheCall->getArg(0)))
128 return ExprError();
129 break;
130 case Builtin::BI__builtin_stdarg_start:
131 case Builtin::BI__builtin_va_start:
132 if (SemaBuiltinVAStart(TheCall))
133 return ExprError();
134 break;
135 case Builtin::BI__builtin_isgreater:
136 case Builtin::BI__builtin_isgreaterequal:
137 case Builtin::BI__builtin_isless:
138 case Builtin::BI__builtin_islessequal:
139 case Builtin::BI__builtin_islessgreater:
140 case Builtin::BI__builtin_isunordered:
141 if (SemaBuiltinUnorderedCompare(TheCall))
142 return ExprError();
143 break;
144 case Builtin::BI__builtin_isfinite:
145 case Builtin::BI__builtin_isinf:
146 case Builtin::BI__builtin_isinf_sign:
147 case Builtin::BI__builtin_isnan:
148 case Builtin::BI__builtin_isnormal:
149 if (SemaBuiltinUnaryFP(TheCall))
150 return ExprError();
151 break;
152 case Builtin::BI__builtin_return_address:
153 case Builtin::BI__builtin_frame_address:
154 if (SemaBuiltinStackAddress(TheCall))
155 return ExprError();
156 break;
157 case Builtin::BI__builtin_eh_return_data_regno:
158 if (SemaBuiltinEHReturnDataRegNo(TheCall))
159 return ExprError();
160 break;
161 case Builtin::BI__builtin_shufflevector:
162 return SemaBuiltinShuffleVector(TheCall);
163 // TheCall will be freed by the smart pointer here, but that's fine, since
164 // SemaBuiltinShuffleVector guts it, but then doesn't release it.
165 case Builtin::BI__builtin_prefetch:
166 if (SemaBuiltinPrefetch(TheCall))
167 return ExprError();
168 break;
169 case Builtin::BI__builtin_object_size:
170 if (SemaBuiltinObjectSize(TheCall))
171 return ExprError();
172 break;
173 case Builtin::BI__builtin_longjmp:
174 if (SemaBuiltinLongjmp(TheCall))
175 return ExprError();
176 break;
177 case Builtin::BI__sync_fetch_and_add:
178 case Builtin::BI__sync_fetch_and_sub:
179 case Builtin::BI__sync_fetch_and_or:
180 case Builtin::BI__sync_fetch_and_and:
181 case Builtin::BI__sync_fetch_and_xor:
182 case Builtin::BI__sync_fetch_and_nand:
183 case Builtin::BI__sync_add_and_fetch:
184 case Builtin::BI__sync_sub_and_fetch:
185 case Builtin::BI__sync_and_and_fetch:
186 case Builtin::BI__sync_or_and_fetch:
187 case Builtin::BI__sync_xor_and_fetch:
188 case Builtin::BI__sync_nand_and_fetch:
189 case Builtin::BI__sync_val_compare_and_swap:
190 case Builtin::BI__sync_bool_compare_and_swap:
191 case Builtin::BI__sync_lock_test_and_set:
192 case Builtin::BI__sync_lock_release:
193 if (SemaBuiltinAtomicOverloaded(TheCall))
194 return ExprError();
195 break;
196 }
197
198 return move(TheCallResult);
199}
200
201/// CheckFunctionCall - Check a direct function call for various correctness
202/// and safety properties not strictly enforced by the C type system.
203bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall) {
204 // Get the IdentifierInfo* for the called function.
205 IdentifierInfo *FnInfo = FDecl->getIdentifier();
206
207 // None of the checks below are needed for functions that don't have
208 // simple names (e.g., C++ conversion functions).
209 if (!FnInfo)
210 return false;
211
212 // FIXME: This mechanism should be abstracted to be less fragile and
213 // more efficient. For example, just map function ids to custom
214 // handlers.
215
216 // Printf checking.
217 if (const FormatAttr *Format = FDecl->getAttr<FormatAttr>()) {
218 if (CheckablePrintfAttr(Format, TheCall)) {
219 bool HasVAListArg = Format->getFirstArg() == 0;
220 if (!HasVAListArg) {
221 if (const FunctionProtoType *Proto
222 = FDecl->getType()->getAs<FunctionProtoType>())
223 HasVAListArg = !Proto->isVariadic();
224 }
225 CheckPrintfArguments(TheCall, HasVAListArg, Format->getFormatIdx() - 1,
226 HasVAListArg ? 0 : Format->getFirstArg() - 1);
227 }
228 }
229
230 for (const NonNullAttr *NonNull = FDecl->getAttr<NonNullAttr>(); NonNull;
231 NonNull = NonNull->getNext<NonNullAttr>())
232 CheckNonNullArguments(NonNull, TheCall);
233
234 return false;
235}
236
237bool Sema::CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall) {
238 // Printf checking.
239 const FormatAttr *Format = NDecl->getAttr<FormatAttr>();
240 if (!Format)
241 return false;
242
243 const VarDecl *V = dyn_cast<VarDecl>(NDecl);
244 if (!V)
245 return false;
246
247 QualType Ty = V->getType();
248 if (!Ty->isBlockPointerType())
249 return false;
250
251 if (!CheckablePrintfAttr(Format, TheCall))
252 return false;
253
254 bool HasVAListArg = Format->getFirstArg() == 0;
255 if (!HasVAListArg) {
256 const FunctionType *FT =
257 Ty->getAs<BlockPointerType>()->getPointeeType()->getAs<FunctionType>();
258 if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FT))
259 HasVAListArg = !Proto->isVariadic();
260 }
261 CheckPrintfArguments(TheCall, HasVAListArg, Format->getFormatIdx() - 1,
262 HasVAListArg ? 0 : Format->getFirstArg() - 1);
263
264 return false;
265}
266
267/// SemaBuiltinAtomicOverloaded - We have a call to a function like
268/// __sync_fetch_and_add, which is an overloaded function based on the pointer
269/// type of its first argument. The main ActOnCallExpr routines have already
270/// promoted the types of arguments because all of these calls are prototyped as
271/// void(...).
272///
273/// This function goes through and does final semantic checking for these
274/// builtins,
275bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) {
276 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
277 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
278
279 // Ensure that we have at least one argument to do type inference from.
280 if (TheCall->getNumArgs() < 1)
281 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
282 << 0 << TheCall->getCallee()->getSourceRange();
283
284 // Inspect the first argument of the atomic builtin. This should always be
285 // a pointer type, whose element is an integral scalar or pointer type.
286 // Because it is a pointer type, we don't have to worry about any implicit
287 // casts here.
288 Expr *FirstArg = TheCall->getArg(0);
289 if (!FirstArg->getType()->isPointerType())
290 return Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
291 << FirstArg->getType() << FirstArg->getSourceRange();
292
293 QualType ValType = FirstArg->getType()->getAs<PointerType>()->getPointeeType();
294 if (!ValType->isIntegerType() && !ValType->isPointerType() &&
295 !ValType->isBlockPointerType())
296 return Diag(DRE->getLocStart(),
297 diag::err_atomic_builtin_must_be_pointer_intptr)
298 << FirstArg->getType() << FirstArg->getSourceRange();
299
300 // We need to figure out which concrete builtin this maps onto. For example,
301 // __sync_fetch_and_add with a 2 byte object turns into
302 // __sync_fetch_and_add_2.
303#define BUILTIN_ROW(x) \
304 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
305 Builtin::BI##x##_8, Builtin::BI##x##_16 }
306
307 static const unsigned BuiltinIndices[][5] = {
308 BUILTIN_ROW(__sync_fetch_and_add),
309 BUILTIN_ROW(__sync_fetch_and_sub),
310 BUILTIN_ROW(__sync_fetch_and_or),
311 BUILTIN_ROW(__sync_fetch_and_and),
312 BUILTIN_ROW(__sync_fetch_and_xor),
313 BUILTIN_ROW(__sync_fetch_and_nand),
314
315 BUILTIN_ROW(__sync_add_and_fetch),
316 BUILTIN_ROW(__sync_sub_and_fetch),
317 BUILTIN_ROW(__sync_and_and_fetch),
318 BUILTIN_ROW(__sync_or_and_fetch),
319 BUILTIN_ROW(__sync_xor_and_fetch),
320 BUILTIN_ROW(__sync_nand_and_fetch),
321
322 BUILTIN_ROW(__sync_val_compare_and_swap),
323 BUILTIN_ROW(__sync_bool_compare_and_swap),
324 BUILTIN_ROW(__sync_lock_test_and_set),
325 BUILTIN_ROW(__sync_lock_release)
326 };
327#undef BUILTIN_ROW
328
329 // Determine the index of the size.
330 unsigned SizeIndex;
331 switch (Context.getTypeSizeInChars(ValType).getQuantity()) {
332 case 1: SizeIndex = 0; break;
333 case 2: SizeIndex = 1; break;
334 case 4: SizeIndex = 2; break;
335 case 8: SizeIndex = 3; break;
336 case 16: SizeIndex = 4; break;
337 default:
338 return Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size)
339 << FirstArg->getType() << FirstArg->getSourceRange();
340 }
341
342 // Each of these builtins has one pointer argument, followed by some number of
343 // values (0, 1 or 2) followed by a potentially empty varags list of stuff
344 // that we ignore. Find out which row of BuiltinIndices to read from as well
345 // as the number of fixed args.
346 unsigned BuiltinID = FDecl->getBuiltinID();
347 unsigned BuiltinIndex, NumFixed = 1;
348 switch (BuiltinID) {
349 default: assert(0 && "Unknown overloaded atomic builtin!");
350 case Builtin::BI__sync_fetch_and_add: BuiltinIndex = 0; break;
351 case Builtin::BI__sync_fetch_and_sub: BuiltinIndex = 1; break;
352 case Builtin::BI__sync_fetch_and_or: BuiltinIndex = 2; break;
353 case Builtin::BI__sync_fetch_and_and: BuiltinIndex = 3; break;
354 case Builtin::BI__sync_fetch_and_xor: BuiltinIndex = 4; break;
355 case Builtin::BI__sync_fetch_and_nand:BuiltinIndex = 5; break;
356
357 case Builtin::BI__sync_add_and_fetch: BuiltinIndex = 6; break;
358 case Builtin::BI__sync_sub_and_fetch: BuiltinIndex = 7; break;
359 case Builtin::BI__sync_and_and_fetch: BuiltinIndex = 8; break;
360 case Builtin::BI__sync_or_and_fetch: BuiltinIndex = 9; break;
361 case Builtin::BI__sync_xor_and_fetch: BuiltinIndex =10; break;
362 case Builtin::BI__sync_nand_and_fetch:BuiltinIndex =11; break;
363
364 case Builtin::BI__sync_val_compare_and_swap:
365 BuiltinIndex = 12;
366 NumFixed = 2;
367 break;
368 case Builtin::BI__sync_bool_compare_and_swap:
369 BuiltinIndex = 13;
370 NumFixed = 2;
371 break;
372 case Builtin::BI__sync_lock_test_and_set: BuiltinIndex = 14; break;
373 case Builtin::BI__sync_lock_release:
374 BuiltinIndex = 15;
375 NumFixed = 0;
376 break;
377 }
378
379 // Now that we know how many fixed arguments we expect, first check that we
380 // have at least that many.
381 if (TheCall->getNumArgs() < 1+NumFixed)
382 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
383 << 0 << TheCall->getCallee()->getSourceRange();
384
385
386 // Get the decl for the concrete builtin from this, we can tell what the
387 // concrete integer type we should convert to is.
388 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
389 const char *NewBuiltinName = Context.BuiltinInfo.GetName(NewBuiltinID);
390 IdentifierInfo *NewBuiltinII = PP.getIdentifierInfo(NewBuiltinName);
391 FunctionDecl *NewBuiltinDecl =
392 cast<FunctionDecl>(LazilyCreateBuiltin(NewBuiltinII, NewBuiltinID,
393 TUScope, false, DRE->getLocStart()));
394 const FunctionProtoType *BuiltinFT =
395 NewBuiltinDecl->getType()->getAs<FunctionProtoType>();
396 ValType = BuiltinFT->getArgType(0)->getAs<PointerType>()->getPointeeType();
397
398 // If the first type needs to be converted (e.g. void** -> int*), do it now.
399 if (BuiltinFT->getArgType(0) != FirstArg->getType()) {
400 ImpCastExprToType(FirstArg, BuiltinFT->getArgType(0), CastExpr::CK_BitCast);
401 TheCall->setArg(0, FirstArg);
402 }
403
404 // Next, walk the valid ones promoting to the right type.
405 for (unsigned i = 0; i != NumFixed; ++i) {
406 Expr *Arg = TheCall->getArg(i+1);
407
408 // If the argument is an implicit cast, then there was a promotion due to
409 // "...", just remove it now.
410 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
411 Arg = ICE->getSubExpr();
412 ICE->setSubExpr(0);
413 ICE->Destroy(Context);
414 TheCall->setArg(i+1, Arg);
415 }
416
417 // GCC does an implicit conversion to the pointer or integer ValType. This
418 // can fail in some cases (1i -> int**), check for this error case now.
419 CastExpr::CastKind Kind = CastExpr::CK_Unknown;
420 CXXMethodDecl *ConversionDecl = 0;
421 if (CheckCastTypes(Arg->getSourceRange(), ValType, Arg, Kind,
422 ConversionDecl))
423 return true;
424
425 // Okay, we have something that *can* be converted to the right type. Check
426 // to see if there is a potentially weird extension going on here. This can
427 // happen when you do an atomic operation on something like an char* and
428 // pass in 42. The 42 gets converted to char. This is even more strange
429 // for things like 45.123 -> char, etc.
430 // FIXME: Do this check.
431 ImpCastExprToType(Arg, ValType, Kind, /*isLvalue=*/false);
432 TheCall->setArg(i+1, Arg);
433 }
434
435 // Switch the DeclRefExpr to refer to the new decl.
436 DRE->setDecl(NewBuiltinDecl);
437 DRE->setType(NewBuiltinDecl->getType());
438
439 // Set the callee in the CallExpr.
440 // FIXME: This leaks the original parens and implicit casts.
441 Expr *PromotedCall = DRE;
442 UsualUnaryConversions(PromotedCall);
443 TheCall->setCallee(PromotedCall);
444
445
446 // Change the result type of the call to match the result type of the decl.
447 TheCall->setType(NewBuiltinDecl->getResultType());
448 return false;
449}
450
451
452/// CheckObjCString - Checks that the argument to the builtin
453/// CFString constructor is correct
454/// FIXME: GCC currently emits the following warning:
455/// "warning: input conversion stopped due to an input byte that does not
456/// belong to the input codeset UTF-8"
457/// Note: It might also make sense to do the UTF-16 conversion here (would
458/// simplify the backend).
459bool Sema::CheckObjCString(Expr *Arg) {
460 Arg = Arg->IgnoreParenCasts();
461 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg);
462
463 if (!Literal || Literal->isWide()) {
464 Diag(Arg->getLocStart(), diag::err_cfstring_literal_not_string_constant)
465 << Arg->getSourceRange();
466 return true;
467 }
468
469 const char *Data = Literal->getStrData();
470 unsigned Length = Literal->getByteLength();
471
472 for (unsigned i = 0; i < Length; ++i) {
473 if (!Data[i]) {
474 Diag(getLocationOfStringLiteralByte(Literal, i),
475 diag::warn_cfstring_literal_contains_nul_character)
476 << Arg->getSourceRange();
477 break;
478 }
479 }
480
481 return false;
482}
483
484/// SemaBuiltinVAStart - Check the arguments to __builtin_va_start for validity.
485/// Emit an error and return true on failure, return false on success.
486bool Sema::SemaBuiltinVAStart(CallExpr *TheCall) {
487 Expr *Fn = TheCall->getCallee();
488 if (TheCall->getNumArgs() > 2) {
489 Diag(TheCall->getArg(2)->getLocStart(),
490 diag::err_typecheck_call_too_many_args)
491 << 0 /*function call*/ << Fn->getSourceRange()
492 << SourceRange(TheCall->getArg(2)->getLocStart(),
493 (*(TheCall->arg_end()-1))->getLocEnd());
494 return true;
495 }
496
497 if (TheCall->getNumArgs() < 2) {
498 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
499 << 0 /*function call*/;
500 }
501
502 // Determine whether the current function is variadic or not.
503 bool isVariadic;
504 if (CurBlock)
505 isVariadic = CurBlock->isVariadic;
506 else if (getCurFunctionDecl()) {
507 if (FunctionProtoType* FTP =
508 dyn_cast<FunctionProtoType>(getCurFunctionDecl()->getType()))
509 isVariadic = FTP->isVariadic();
510 else
511 isVariadic = false;
512 } else {
513 isVariadic = getCurMethodDecl()->isVariadic();
514 }
515
516 if (!isVariadic) {
517 Diag(Fn->getLocStart(), diag::err_va_start_used_in_non_variadic_function);
518 return true;
519 }
520
521 // Verify that the second argument to the builtin is the last argument of the
522 // current function or method.
523 bool SecondArgIsLastNamedArgument = false;
524 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts();
525
526 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) {
527 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) {
528 // FIXME: This isn't correct for methods (results in bogus warning).
529 // Get the last formal in the current function.
530 const ParmVarDecl *LastArg;
531 if (CurBlock)
532 LastArg = *(CurBlock->TheDecl->param_end()-1);
533 else if (FunctionDecl *FD = getCurFunctionDecl())
534 LastArg = *(FD->param_end()-1);
535 else
536 LastArg = *(getCurMethodDecl()->param_end()-1);
537 SecondArgIsLastNamedArgument = PV == LastArg;
538 }
539 }
540
541 if (!SecondArgIsLastNamedArgument)
542 Diag(TheCall->getArg(1)->getLocStart(),
543 diag::warn_second_parameter_of_va_start_not_last_named_argument);
544 return false;
545}
546
547/// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
548/// friends. This is declared to take (...), so we have to check everything.
549bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
550 if (TheCall->getNumArgs() < 2)
551 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
552 << 0 /*function call*/;
553 if (TheCall->getNumArgs() > 2)
554 return Diag(TheCall->getArg(2)->getLocStart(),
555 diag::err_typecheck_call_too_many_args)
556 << 0 /*function call*/
557 << SourceRange(TheCall->getArg(2)->getLocStart(),
558 (*(TheCall->arg_end()-1))->getLocEnd());
559
560 Expr *OrigArg0 = TheCall->getArg(0);
561 Expr *OrigArg1 = TheCall->getArg(1);
562
563 // Do standard promotions between the two arguments, returning their common
564 // type.
565 QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false);
566
567 // Make sure any conversions are pushed back into the call; this is
568 // type safe since unordered compare builtins are declared as "_Bool
569 // foo(...)".
570 TheCall->setArg(0, OrigArg0);
571 TheCall->setArg(1, OrigArg1);
572
573 if (OrigArg0->isTypeDependent() || OrigArg1->isTypeDependent())
574 return false;
575
576 // If the common type isn't a real floating type, then the arguments were
577 // invalid for this operation.
578 if (!Res->isRealFloatingType())
579 return Diag(OrigArg0->getLocStart(),
580 diag::err_typecheck_call_invalid_ordered_compare)
581 << OrigArg0->getType() << OrigArg1->getType()
582 << SourceRange(OrigArg0->getLocStart(), OrigArg1->getLocEnd());
583
584 return false;
585}
586
587/// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isnan and
588/// friends. This is declared to take (...), so we have to check everything.
589bool Sema::SemaBuiltinUnaryFP(CallExpr *TheCall) {
590 if (TheCall->getNumArgs() < 1)
591 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
592 << 0 /*function call*/;
593 if (TheCall->getNumArgs() > 1)
594 return Diag(TheCall->getArg(1)->getLocStart(),
595 diag::err_typecheck_call_too_many_args)
596 << 0 /*function call*/
597 << SourceRange(TheCall->getArg(1)->getLocStart(),
598 (*(TheCall->arg_end()-1))->getLocEnd());
599
600 Expr *OrigArg = TheCall->getArg(0);
601
602 if (OrigArg->isTypeDependent())
603 return false;
604
605 // This operation requires a floating-point number
606 if (!OrigArg->getType()->isRealFloatingType())
607 return Diag(OrigArg->getLocStart(),
608 diag::err_typecheck_call_invalid_unary_fp)
609 << OrigArg->getType() << OrigArg->getSourceRange();
610
611 return false;
612}
613
614bool Sema::SemaBuiltinStackAddress(CallExpr *TheCall) {
615 // The signature for these builtins is exact; the only thing we need
616 // to check is that the argument is a constant.
617 SourceLocation Loc;
618 if (!TheCall->getArg(0)->isTypeDependent() &&
619 !TheCall->getArg(0)->isValueDependent() &&
620 !TheCall->getArg(0)->isIntegerConstantExpr(Context, &Loc))
621 return Diag(Loc, diag::err_stack_const_level) << TheCall->getSourceRange();
622
623 return false;
624}
625
626/// SemaBuiltinShuffleVector - Handle __builtin_shufflevector.
627// This is declared to take (...), so we have to check everything.
628Action::OwningExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
629 if (TheCall->getNumArgs() < 3)
630 return ExprError(Diag(TheCall->getLocEnd(),
631 diag::err_typecheck_call_too_few_args)
632 << 0 /*function call*/ << TheCall->getSourceRange());
633
634 unsigned numElements = std::numeric_limits<unsigned>::max();
635 if (!TheCall->getArg(0)->isTypeDependent() &&
636 !TheCall->getArg(1)->isTypeDependent()) {
637 QualType FAType = TheCall->getArg(0)->getType();
638 QualType SAType = TheCall->getArg(1)->getType();
639
640 if (!FAType->isVectorType() || !SAType->isVectorType()) {
641 Diag(TheCall->getLocStart(), diag::err_shufflevector_non_vector)
642 << SourceRange(TheCall->getArg(0)->getLocStart(),
643 TheCall->getArg(1)->getLocEnd());
644 return ExprError();
645 }
646
647 if (!Context.hasSameUnqualifiedType(FAType, SAType)) {
648 Diag(TheCall->getLocStart(), diag::err_shufflevector_incompatible_vector)
649 << SourceRange(TheCall->getArg(0)->getLocStart(),
650 TheCall->getArg(1)->getLocEnd());
651 return ExprError();
652 }
653
654 numElements = FAType->getAs<VectorType>()->getNumElements();
655 if (TheCall->getNumArgs() != numElements+2) {
656 if (TheCall->getNumArgs() < numElements+2)
657 return ExprError(Diag(TheCall->getLocEnd(),
658 diag::err_typecheck_call_too_few_args)
659 << 0 /*function call*/ << TheCall->getSourceRange());
660 return ExprError(Diag(TheCall->getLocEnd(),
661 diag::err_typecheck_call_too_many_args)
662 << 0 /*function call*/ << TheCall->getSourceRange());
663 }
664 }
665
666 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) {
667 if (TheCall->getArg(i)->isTypeDependent() ||
668 TheCall->getArg(i)->isValueDependent())
669 continue;
670
671 llvm::APSInt Result(32);
672 if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context))
673 return ExprError(Diag(TheCall->getLocStart(),
674 diag::err_shufflevector_nonconstant_argument)
675 << TheCall->getArg(i)->getSourceRange());
676
677 if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2)
678 return ExprError(Diag(TheCall->getLocStart(),
679 diag::err_shufflevector_argument_too_large)
680 << TheCall->getArg(i)->getSourceRange());
681 }
682
683 llvm::SmallVector<Expr*, 32> exprs;
684
685 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) {
686 exprs.push_back(TheCall->getArg(i));
687 TheCall->setArg(i, 0);
688 }
689
690 return Owned(new (Context) ShuffleVectorExpr(Context, exprs.begin(),
691 exprs.size(), exprs[0]->getType(),
692 TheCall->getCallee()->getLocStart(),
693 TheCall->getRParenLoc()));
694}
695
696/// SemaBuiltinPrefetch - Handle __builtin_prefetch.
697// This is declared to take (const void*, ...) and can take two
698// optional constant int args.
699bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
700 unsigned NumArgs = TheCall->getNumArgs();
701
702 if (NumArgs > 3)
703 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_many_args)
704 << 0 /*function call*/ << TheCall->getSourceRange();
705
706 // Argument 0 is checked for us and the remaining arguments must be
707 // constant integers.
708 for (unsigned i = 1; i != NumArgs; ++i) {
709 Expr *Arg = TheCall->getArg(i);
710 if (Arg->isTypeDependent())
711 continue;
712
713 if (!Arg->getType()->isIntegralType())
714 return Diag(TheCall->getLocStart(), diag::err_prefetch_invalid_arg_type)
715 << Arg->getSourceRange();
716
717 ImpCastExprToType(Arg, Context.IntTy, CastExpr::CK_IntegralCast);
718 TheCall->setArg(i, Arg);
719
720 if (Arg->isValueDependent())
721 continue;
722
723 llvm::APSInt Result;
724 if (!Arg->isIntegerConstantExpr(Result, Context))
725 return Diag(TheCall->getLocStart(), diag::err_prefetch_invalid_arg_ice)
726 << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
727
728 // FIXME: gcc issues a warning and rewrites these to 0. These
729 // seems especially odd for the third argument since the default
730 // is 3.
731 if (i == 1) {
732 if (Result.getLimitedValue() > 1)
733 return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
734 << "0" << "1" << Arg->getSourceRange();
735 } else {
736 if (Result.getLimitedValue() > 3)
737 return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
738 << "0" << "3" << Arg->getSourceRange();
739 }
740 }
741
742 return false;
743}
744
745/// SemaBuiltinEHReturnDataRegNo - Handle __builtin_eh_return_data_regno, the
746/// operand must be an integer constant.
747bool Sema::SemaBuiltinEHReturnDataRegNo(CallExpr *TheCall) {
748 llvm::APSInt Result;
749 if (!TheCall->getArg(0)->isIntegerConstantExpr(Result, Context))
750 return Diag(TheCall->getLocStart(), diag::err_expr_not_ice)
751 << TheCall->getArg(0)->getSourceRange();
752
753 return false;
754}
755
756
757/// SemaBuiltinObjectSize - Handle __builtin_object_size(void *ptr,
758/// int type). This simply type checks that type is one of the defined
759/// constants (0-3).
760// For compatability check 0-3, llvm only handles 0 and 2.
761bool Sema::SemaBuiltinObjectSize(CallExpr *TheCall) {
762 Expr *Arg = TheCall->getArg(1);
763 if (Arg->isTypeDependent())
764 return false;
765
766 QualType ArgType = Arg->getType();
767 const BuiltinType *BT = ArgType->getAs<BuiltinType>();
768 llvm::APSInt Result(32);
769 if (!BT || BT->getKind() != BuiltinType::Int)
770 return Diag(TheCall->getLocStart(), diag::err_object_size_invalid_argument)
771 << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
772
773 if (Arg->isValueDependent())
774 return false;
775
776 if (!Arg->isIntegerConstantExpr(Result, Context)) {
777 return Diag(TheCall->getLocStart(), diag::err_object_size_invalid_argument)
778 << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
779 }
780
781 if (Result.getSExtValue() < 0 || Result.getSExtValue() > 3) {
782 return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
783 << "0" << "3" << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
784 }
785
786 return false;
787}
788
789/// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
790/// This checks that val is a constant 1.
791bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
792 Expr *Arg = TheCall->getArg(1);
793 if (Arg->isTypeDependent() || Arg->isValueDependent())
794 return false;
795
796 llvm::APSInt Result(32);
797 if (!Arg->isIntegerConstantExpr(Result, Context) || Result != 1)
798 return Diag(TheCall->getLocStart(), diag::err_builtin_longjmp_invalid_val)
799 << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
800
801 return false;
802}
803
804// Handle i > 1 ? "x" : "y", recursivelly
805bool Sema::SemaCheckStringLiteral(const Expr *E, const CallExpr *TheCall,
806 bool HasVAListArg,
807 unsigned format_idx, unsigned firstDataArg) {
808 if (E->isTypeDependent() || E->isValueDependent())
809 return false;
810
811 switch (E->getStmtClass()) {
812 case Stmt::ConditionalOperatorClass: {
813 const ConditionalOperator *C = cast<ConditionalOperator>(E);
814 return SemaCheckStringLiteral(C->getTrueExpr(), TheCall,
815 HasVAListArg, format_idx, firstDataArg)
816 && SemaCheckStringLiteral(C->getRHS(), TheCall,
817 HasVAListArg, format_idx, firstDataArg);
818 }
819
820 case Stmt::ImplicitCastExprClass: {
821 const ImplicitCastExpr *Expr = cast<ImplicitCastExpr>(E);
822 return SemaCheckStringLiteral(Expr->getSubExpr(), TheCall, HasVAListArg,
823 format_idx, firstDataArg);
824 }
825
826 case Stmt::ParenExprClass: {
827 const ParenExpr *Expr = cast<ParenExpr>(E);
828 return SemaCheckStringLiteral(Expr->getSubExpr(), TheCall, HasVAListArg,
829 format_idx, firstDataArg);
830 }
831
832 case Stmt::DeclRefExprClass: {
833 const DeclRefExpr *DR = cast<DeclRefExpr>(E);
834
835 // As an exception, do not flag errors for variables binding to
836 // const string literals.
837 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
838 bool isConstant = false;
839 QualType T = DR->getType();
840
841 if (const ArrayType *AT = Context.getAsArrayType(T)) {
842 isConstant = AT->getElementType().isConstant(Context);
843 } else if (const PointerType *PT = T->getAs<PointerType>()) {
844 isConstant = T.isConstant(Context) &&
845 PT->getPointeeType().isConstant(Context);
846 }
847
848 if (isConstant) {
849 if (const Expr *Init = VD->getAnyInitializer())
850 return SemaCheckStringLiteral(Init, TheCall,
851 HasVAListArg, format_idx, firstDataArg);
852 }
853
854 // For vprintf* functions (i.e., HasVAListArg==true), we add a
855 // special check to see if the format string is a function parameter
856 // of the function calling the printf function. If the function
857 // has an attribute indicating it is a printf-like function, then we
858 // should suppress warnings concerning non-literals being used in a call
859 // to a vprintf function. For example:
860 //
861 // void
862 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){
863 // va_list ap;
864 // va_start(ap, fmt);
865 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt".
866 // ...
867 //
868 //
869 // FIXME: We don't have full attribute support yet, so just check to see
870 // if the argument is a DeclRefExpr that references a parameter. We'll
871 // add proper support for checking the attribute later.
872 if (HasVAListArg)
873 if (isa<ParmVarDecl>(VD))
874 return true;
875 }
876
877 return false;
878 }
879
880 case Stmt::CallExprClass: {
881 const CallExpr *CE = cast<CallExpr>(E);
882 if (const ImplicitCastExpr *ICE
883 = dyn_cast<ImplicitCastExpr>(CE->getCallee())) {
884 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
885 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
886 if (const FormatArgAttr *FA = FD->getAttr<FormatArgAttr>()) {
887 unsigned ArgIndex = FA->getFormatIdx();
888 const Expr *Arg = CE->getArg(ArgIndex - 1);
889
890 return SemaCheckStringLiteral(Arg, TheCall, HasVAListArg,
891 format_idx, firstDataArg);
892 }
893 }
894 }
895 }
896
897 return false;
898 }
899 case Stmt::ObjCStringLiteralClass:
900 case Stmt::StringLiteralClass: {
901 const StringLiteral *StrE = NULL;
902
903 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E))
904 StrE = ObjCFExpr->getString();
905 else
906 StrE = cast<StringLiteral>(E);
907
908 if (StrE) {
909 CheckPrintfString(StrE, E, TheCall, HasVAListArg, format_idx,
910 firstDataArg);
911 return true;
912 }
913
914 return false;
915 }
916
917 default:
918 return false;
919 }
920}
921
922void
923Sema::CheckNonNullArguments(const NonNullAttr *NonNull,
924 const CallExpr *TheCall) {
925 for (NonNullAttr::iterator i = NonNull->begin(), e = NonNull->end();
926 i != e; ++i) {
927 const Expr *ArgExpr = TheCall->getArg(*i);
928 if (ArgExpr->isNullPointerConstant(Context,
929 Expr::NPC_ValueDependentIsNotNull))
930 Diag(TheCall->getCallee()->getLocStart(), diag::warn_null_arg)
931 << ArgExpr->getSourceRange();
932 }
933}
934
935/// CheckPrintfArguments - Check calls to printf (and similar functions) for
936/// correct use of format strings.
937///
938/// HasVAListArg - A predicate indicating whether the printf-like
939/// function is passed an explicit va_arg argument (e.g., vprintf)
940///
941/// format_idx - The index into Args for the format string.
942///
943/// Improper format strings to functions in the printf family can be
944/// the source of bizarre bugs and very serious security holes. A
945/// good source of information is available in the following paper
946/// (which includes additional references):
947///
948/// FormatGuard: Automatic Protection From printf Format String
949/// Vulnerabilities, Proceedings of the 10th USENIX Security Symposium, 2001.
950///
951/// Functionality implemented:
952///
953/// We can statically check the following properties for string
954/// literal format strings for non v.*printf functions (where the
955/// arguments are passed directly):
956//
957/// (1) Are the number of format conversions equal to the number of
958/// data arguments?
959///
960/// (2) Does each format conversion correctly match the type of the
961/// corresponding data argument? (TODO)
962///
963/// Moreover, for all printf functions we can:
964///
965/// (3) Check for a missing format string (when not caught by type checking).
966///
967/// (4) Check for no-operation flags; e.g. using "#" with format
968/// conversion 'c' (TODO)
969///
970/// (5) Check the use of '%n', a major source of security holes.
971///
972/// (6) Check for malformed format conversions that don't specify anything.
973///
974/// (7) Check for empty format strings. e.g: printf("");
975///
976/// (8) Check that the format string is a wide literal.
977///
978/// All of these checks can be done by parsing the format string.
979///
980/// For now, we ONLY do (1), (3), (5), (6), (7), and (8).
981void
982Sema::CheckPrintfArguments(const CallExpr *TheCall, bool HasVAListArg,
983 unsigned format_idx, unsigned firstDataArg) {
984 const Expr *Fn = TheCall->getCallee();
985
986 // The way the format attribute works in GCC, the implicit this argument
987 // of member functions is counted. However, it doesn't appear in our own
988 // lists, so decrement format_idx in that case.
989 if (isa<CXXMemberCallExpr>(TheCall)) {
990 // Catch a format attribute mistakenly referring to the object argument.
991 if (format_idx == 0)
992 return;
993 --format_idx;
994 if(firstDataArg != 0)
995 --firstDataArg;
996 }
997
998 // CHECK: printf-like function is called with no format string.
999 if (format_idx >= TheCall->getNumArgs()) {
1000 Diag(TheCall->getRParenLoc(), diag::warn_printf_missing_format_string)
1001 << Fn->getSourceRange();
1002 return;
1003 }
1004
1005 const Expr *OrigFormatExpr = TheCall->getArg(format_idx)->IgnoreParenCasts();
1006
1007 // CHECK: format string is not a string literal.
1008 //
1009 // Dynamically generated format strings are difficult to
1010 // automatically vet at compile time. Requiring that format strings
1011 // are string literals: (1) permits the checking of format strings by
1012 // the compiler and thereby (2) can practically remove the source of
1013 // many format string exploits.
1014
1015 // Format string can be either ObjC string (e.g. @"%d") or
1016 // C string (e.g. "%d")
1017 // ObjC string uses the same format specifiers as C string, so we can use
1018 // the same format string checking logic for both ObjC and C strings.
1019 if (SemaCheckStringLiteral(OrigFormatExpr, TheCall, HasVAListArg, format_idx,
1020 firstDataArg))
1021 return; // Literal format string found, check done!
1022
1023 // If there are no arguments specified, warn with -Wformat-security, otherwise
1024 // warn only with -Wformat-nonliteral.
1025 if (TheCall->getNumArgs() == format_idx+1)
1026 Diag(TheCall->getArg(format_idx)->getLocStart(),
1027 diag::warn_printf_nonliteral_noargs)
1028 << OrigFormatExpr->getSourceRange();
1029 else
1030 Diag(TheCall->getArg(format_idx)->getLocStart(),
1031 diag::warn_printf_nonliteral)
1032 << OrigFormatExpr->getSourceRange();
1033}
1034
1035namespace {
1036class CheckPrintfHandler : public analyze_printf::FormatStringHandler {
1037 Sema &S;
1038 const StringLiteral *FExpr;
1039 const Expr *OrigFormatExpr;
1040 unsigned NumConversions;
1041 const unsigned NumDataArgs;
1042 const bool IsObjCLiteral;
1043 const char *Beg; // Start of format string.
1044 const bool HasVAListArg;
1045 const CallExpr *TheCall;
1046 unsigned FormatIdx;
1047public:
1048 CheckPrintfHandler(Sema &s, const StringLiteral *fexpr,
1049 const Expr *origFormatExpr,
1050 unsigned numDataArgs, bool isObjCLiteral,
1051 const char *beg, bool hasVAListArg,
1052 const CallExpr *theCall, unsigned formatIdx)
1053 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr),
1054 NumConversions(0), NumDataArgs(numDataArgs),
1055 IsObjCLiteral(isObjCLiteral), Beg(beg),
1056 HasVAListArg(hasVAListArg),
1057 TheCall(theCall), FormatIdx(formatIdx) {}
1058
1059 void DoneProcessing();
1060
1061 void HandleIncompleteFormatSpecifier(const char *startSpecifier,
1062 unsigned specifierLen);
1063
1064 void
1065 HandleInvalidConversionSpecifier(const analyze_printf::FormatSpecifier &FS,
1066 const char *startSpecifier,
1067 unsigned specifierLen);
1068
1069 void HandleNullChar(const char *nullCharacter);
1070
1071 bool HandleFormatSpecifier(const analyze_printf::FormatSpecifier &FS,
1072 const char *startSpecifier,
1073 unsigned specifierLen);
1074private:
1075 SourceRange getFormatStringRange();
1076 SourceRange getFormatSpecifierRange(const char *startSpecifier,
1077 unsigned specifierLen);
1078 SourceLocation getLocationOfByte(const char *x);
1079
1080 bool HandleAmount(const analyze_printf::OptionalAmount &Amt,
1081 unsigned MissingArgDiag, unsigned BadTypeDiag,
1082 const char *startSpecifier, unsigned specifierLen);
1083
1084 bool MatchType(QualType A, QualType B, bool ignoreSign);
1085
1086 const Expr *getDataArg(unsigned i) const;
1087};
1088}
1089
1090SourceRange CheckPrintfHandler::getFormatStringRange() {
1091 return OrigFormatExpr->getSourceRange();
1092}
1093
1094SourceRange CheckPrintfHandler::
1095getFormatSpecifierRange(const char *startSpecifier, unsigned specifierLen) {
1096 return SourceRange(getLocationOfByte(startSpecifier),
1097 getLocationOfByte(startSpecifier+specifierLen-1));
1098}
1099
1100SourceLocation CheckPrintfHandler::getLocationOfByte(const char *x) {
1101 return S.getLocationOfStringLiteralByte(FExpr, x - Beg);
1102}
1103
1104void CheckPrintfHandler::
1105HandleIncompleteFormatSpecifier(const char *startSpecifier,
1106 unsigned specifierLen) {
1107 SourceLocation Loc = getLocationOfByte(startSpecifier);
1108 S.Diag(Loc, diag::warn_printf_incomplete_specifier)
1109 << getFormatSpecifierRange(startSpecifier, specifierLen);
1110}
1111
1112void CheckPrintfHandler::
1113HandleInvalidConversionSpecifier(const analyze_printf::FormatSpecifier &FS,
1114 const char *startSpecifier,
1115 unsigned specifierLen) {
1116
1117 ++NumConversions;
1118 const analyze_printf::ConversionSpecifier &CS =
1119 FS.getConversionSpecifier();
1120 SourceLocation Loc = getLocationOfByte(CS.getStart());
1121 S.Diag(Loc, diag::warn_printf_invalid_conversion)
1122 << llvm::StringRef(CS.getStart(), CS.getLength())
1123 << getFormatSpecifierRange(startSpecifier, specifierLen);
1124}
1125
1126void CheckPrintfHandler::HandleNullChar(const char *nullCharacter) {
1127 // The presence of a null character is likely an error.
1128 S.Diag(getLocationOfByte(nullCharacter),
1129 diag::warn_printf_format_string_contains_null_char)
1130 << getFormatStringRange();
1131}
1132
1133const Expr *CheckPrintfHandler::getDataArg(unsigned i) const {
1134 return TheCall->getArg(FormatIdx + i);
1135}
1136
1137bool CheckPrintfHandler::MatchType(QualType A, QualType B, bool ignoreSign) {
1138 A = S.Context.getCanonicalType(A).getUnqualifiedType();
1139 B = S.Context.getCanonicalType(B).getUnqualifiedType();
1140
1141 if (A == B)
1142 return true;
1143
1144 if (ignoreSign) {
1145 if (const BuiltinType *BT = B->getAs<BuiltinType>()) {
1146 switch (BT->getKind()) {
1147 default:
1148 return false;
1149 case BuiltinType::Char_S:
1150 case BuiltinType::SChar:
1151 return A == S.Context.UnsignedCharTy;
1152 case BuiltinType::Char_U:
1153 case BuiltinType::UChar:
1154 return A == S.Context.SignedCharTy;
1155 case BuiltinType::Short:
1156 return A == S.Context.UnsignedShortTy;
1157 case BuiltinType::UShort:
1158 return A == S.Context.ShortTy;
1159 case BuiltinType::Int:
1160 return A == S.Context.UnsignedIntTy;
1161 case BuiltinType::UInt:
1162 return A == S.Context.IntTy;
1163 case BuiltinType::Long:
1164 return A == S.Context.UnsignedLongTy;
1165 case BuiltinType::ULong:
1166 return A == S.Context.LongTy;
1167 case BuiltinType::LongLong:
1168 return A == S.Context.UnsignedLongLongTy;
1169 case BuiltinType::ULongLong:
1170 return A == S.Context.LongLongTy;
1171 }
1172 return A == B;
1173 }
1174 }
1175 return false;
1176}
1177
1178bool
1179CheckPrintfHandler::HandleAmount(const analyze_printf::OptionalAmount &Amt,
1180 unsigned MissingArgDiag,
1181 unsigned BadTypeDiag,
1182 const char *startSpecifier,
1183 unsigned specifierLen) {
1184
1185 if (Amt.hasDataArgument()) {
1186 ++NumConversions;
1187 if (!HasVAListArg) {
1188 if (NumConversions > NumDataArgs) {
1189 S.Diag(getLocationOfByte(Amt.getStart()), MissingArgDiag)
1190 << getFormatSpecifierRange(startSpecifier, specifierLen);
1191 // Don't do any more checking. We will just emit
1192 // spurious errors.
1193 return false;
1194 }
1195
1196 // Type check the data argument. It should be an 'int'.
1197 // Although not in conformance with C99, we also allow the argument to be
1198 // an 'unsigned int' as that is a reasonably safe case. GCC also
1199 // doesn't emit a warning for that case.
1200 const Expr *Arg = getDataArg(NumConversions);
1201 QualType T = Arg->getType();
1202 if (!MatchType(T, S.Context.IntTy, true)) {
1203 S.Diag(getLocationOfByte(Amt.getStart()), BadTypeDiag)
1204 << S.Context.IntTy << T
1205 << getFormatSpecifierRange(startSpecifier, specifierLen)
1206 << Arg->getSourceRange();
1207 // Don't do any more checking. We will just emit
1208 // spurious errors.
1209 return false;
1210 }
1211 }
1212 }
1213 return true;
1214}
1215
1216bool
1217CheckPrintfHandler::HandleFormatSpecifier(const analyze_printf::FormatSpecifier &FS,
1218 const char *startSpecifier,
1219 unsigned specifierLen) {
1220
1221 using namespace analyze_printf;
1222 const ConversionSpecifier &CS = FS.getConversionSpecifier();
1223
1224 // First check if the field width, precision, and conversion specifier
1225 // have matching data arguments.
1226 if (!HandleAmount(FS.getFieldWidth(),
1227 diag::warn_printf_asterisk_width_missing_arg,
1228 diag::warn_printf_asterisk_width_wrong_type,
1229 startSpecifier, specifierLen)) {
1230 return false;
1231 }
1232
1233 if (!HandleAmount(FS.getPrecision(),
1234 diag::warn_printf_asterisk_precision_missing_arg,
1235 diag::warn_printf_asterisk_precision_wrong_type,
1236 startSpecifier, specifierLen)) {
1237 return false;
1238 }
1239
1240 // Check for using an Objective-C specific conversion specifier
1241 // in a non-ObjC literal.
1242 if (!IsObjCLiteral && CS.isObjCArg()) {
1243 HandleInvalidConversionSpecifier(FS, startSpecifier, specifierLen);
1244
1245 // Continue checking the other format specifiers.
1246 return true;
1247 }
1248
1249 if (!CS.consumesDataArgument()) {
1250 // FIXME: Technically specifying a precision or field width here
1251 // makes no sense. Worth issuing a warning at some point.
1252 return true;
1253 }
1254
1255 ++NumConversions;
1256
1257 // Are we using '%n'? Issue a warning about this being
1258 // a possible security issue.
1259 if (CS.getKind() == ConversionSpecifier::OutIntPtrArg) {
1260 S.Diag(getLocationOfByte(CS.getStart()), diag::warn_printf_write_back)
1261 << getFormatSpecifierRange(startSpecifier, specifierLen);
1262 // Continue checking the other format specifiers.
1263 return true;
1264 }
1265
1266
1267 // The remaining checks depend on the data arguments.
1268 if (HasVAListArg)
1269 return true;
1270
1271 if (NumConversions > NumDataArgs) {
1272 S.Diag(getLocationOfByte(CS.getStart()),
1273 diag::warn_printf_insufficient_data_args)
1274 << getFormatSpecifierRange(startSpecifier, specifierLen);
1275 // Don't do any more checking.
1276 return false;
1277 }
1278
1279 // Now type check the data expression that matches the
1280 // format specifier.
1281 const Expr *Ex = getDataArg(NumConversions);
1282 const analyze_printf::ArgTypeResult &ATR = FS.getArgType(S.Context);
1283
1284 if (const QualType *T = ATR.getSpecificType()) {
1285 if (!MatchType(*T, Ex->getType(), true)) {
1286 // Check if we didn't match because of an implicit cast from a 'char'
1287 // or 'short' to an 'int'. This is done because printf is a varargs
1288 // function.
1289 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Ex))
1290 if (ICE->getType() == S.Context.IntTy)
1291 if (MatchType(*T, ICE->getSubExpr()->getType(), true))
1292 return true;
1293
1294 S.Diag(getLocationOfByte(CS.getStart()),
1295 diag::warn_printf_conversion_argument_type_mismatch)
1296 << *T << Ex->getType();
1297// << getFormatSpecifierRange(startSpecifier, specifierLen)
1298// << Ex->getSourceRange();
1299 }
1300 return true;
1301 }
1302
1303 return true;
1304}
1305
1306void CheckPrintfHandler::DoneProcessing() {
1307 // Does the number of data arguments exceed the number of
1308 // format conversions in the format string?
1309 if (!HasVAListArg && NumConversions < NumDataArgs)
1310 S.Diag(getDataArg(NumConversions+1)->getLocStart(),
1311 diag::warn_printf_too_many_data_args)
1312 << getFormatStringRange();
1313}
1314
1315void Sema::CheckPrintfString(const StringLiteral *FExpr,
1316 const Expr *OrigFormatExpr,
1317 const CallExpr *TheCall, bool HasVAListArg,
1318 unsigned format_idx, unsigned firstDataArg) {
1319
1320 // CHECK: is the format string a wide literal?
1321 if (FExpr->isWide()) {
1322 Diag(FExpr->getLocStart(),
1323 diag::warn_printf_format_string_is_wide_literal)
1324 << OrigFormatExpr->getSourceRange();
1325 return;
1326 }
1327
1328 // Str - The format string. NOTE: this is NOT null-terminated!
1329 const char *Str = FExpr->getStrData();
1330
1331 // CHECK: empty format string?
1332 unsigned StrLen = FExpr->getByteLength();
1333
1334 if (StrLen == 0) {
1335 Diag(FExpr->getLocStart(), diag::warn_printf_empty_format_string)
1336 << OrigFormatExpr->getSourceRange();
1337 return;
1338 }
1339
1340 CheckPrintfHandler H(*this, FExpr, OrigFormatExpr,
1341 TheCall->getNumArgs() - firstDataArg,
1342 isa<ObjCStringLiteral>(OrigFormatExpr), Str,
1343 HasVAListArg, TheCall, format_idx);
1344
1345 if (!analyze_printf::ParseFormatString(H, Str, Str + StrLen))
1346 H.DoneProcessing();
1347}
1348
1349//===--- CHECK: Return Address of Stack Variable --------------------------===//
1350
1351static DeclRefExpr* EvalVal(Expr *E);
1352static DeclRefExpr* EvalAddr(Expr* E);
1353
1354/// CheckReturnStackAddr - Check if a return statement returns the address
1355/// of a stack variable.
1356void
1357Sema::CheckReturnStackAddr(Expr *RetValExp, QualType lhsType,
1358 SourceLocation ReturnLoc) {
1359
1360 // Perform checking for returned stack addresses.
1361 if (lhsType->isPointerType() || lhsType->isBlockPointerType()) {
1362 if (DeclRefExpr *DR = EvalAddr(RetValExp))
1363 Diag(DR->getLocStart(), diag::warn_ret_stack_addr)
1364 << DR->getDecl()->getDeclName() << RetValExp->getSourceRange();
1365
1366 // Skip over implicit cast expressions when checking for block expressions.
1367 RetValExp = RetValExp->IgnoreParenCasts();
1368
1369 if (BlockExpr *C = dyn_cast<BlockExpr>(RetValExp))
1370 if (C->hasBlockDeclRefExprs())
1371 Diag(C->getLocStart(), diag::err_ret_local_block)
1372 << C->getSourceRange();
1373
1374 if (AddrLabelExpr *ALE = dyn_cast<AddrLabelExpr>(RetValExp))
1375 Diag(ALE->getLocStart(), diag::warn_ret_addr_label)
1376 << ALE->getSourceRange();
1377
1378 } else if (lhsType->isReferenceType()) {
1379 // Perform checking for stack values returned by reference.
1380 // Check for a reference to the stack
1381 if (DeclRefExpr *DR = EvalVal(RetValExp))
1382 Diag(DR->getLocStart(), diag::warn_ret_stack_ref)
1383 << DR->getDecl()->getDeclName() << RetValExp->getSourceRange();
1384 }
1385}
1386
1387/// EvalAddr - EvalAddr and EvalVal are mutually recursive functions that
1388/// check if the expression in a return statement evaluates to an address
1389/// to a location on the stack. The recursion is used to traverse the
1390/// AST of the return expression, with recursion backtracking when we
1391/// encounter a subexpression that (1) clearly does not lead to the address
1392/// of a stack variable or (2) is something we cannot determine leads to
1393/// the address of a stack variable based on such local checking.
1394///
1395/// EvalAddr processes expressions that are pointers that are used as
1396/// references (and not L-values). EvalVal handles all other values.
1397/// At the base case of the recursion is a check for a DeclRefExpr* in
1398/// the refers to a stack variable.
1399///
1400/// This implementation handles:
1401///
1402/// * pointer-to-pointer casts
1403/// * implicit conversions from array references to pointers
1404/// * taking the address of fields
1405/// * arbitrary interplay between "&" and "*" operators
1406/// * pointer arithmetic from an address of a stack variable
1407/// * taking the address of an array element where the array is on the stack
1408static DeclRefExpr* EvalAddr(Expr *E) {
1409 // We should only be called for evaluating pointer expressions.
1410 assert((E->getType()->isAnyPointerType() ||
1411 E->getType()->isBlockPointerType() ||
1412 E->getType()->isObjCQualifiedIdType()) &&
1413 "EvalAddr only works on pointers");
1414
1415 // Our "symbolic interpreter" is just a dispatch off the currently
1416 // viewed AST node. We then recursively traverse the AST by calling
1417 // EvalAddr and EvalVal appropriately.
1418 switch (E->getStmtClass()) {
1419 case Stmt::ParenExprClass:
1420 // Ignore parentheses.
1421 return EvalAddr(cast<ParenExpr>(E)->getSubExpr());
1422
1423 case Stmt::UnaryOperatorClass: {
1424 // The only unary operator that make sense to handle here
1425 // is AddrOf. All others don't make sense as pointers.
1426 UnaryOperator *U = cast<UnaryOperator>(E);
1427
1428 if (U->getOpcode() == UnaryOperator::AddrOf)
1429 return EvalVal(U->getSubExpr());
1430 else
1431 return NULL;
1432 }
1433
1434 case Stmt::BinaryOperatorClass: {
1435 // Handle pointer arithmetic. All other binary operators are not valid
1436 // in this context.
1437 BinaryOperator *B = cast<BinaryOperator>(E);
1438 BinaryOperator::Opcode op = B->getOpcode();
1439
1440 if (op != BinaryOperator::Add && op != BinaryOperator::Sub)
1441 return NULL;
1442
1443 Expr *Base = B->getLHS();
1444
1445 // Determine which argument is the real pointer base. It could be
1446 // the RHS argument instead of the LHS.
1447 if (!Base->getType()->isPointerType()) Base = B->getRHS();
1448
1449 assert (Base->getType()->isPointerType());
1450 return EvalAddr(Base);
1451 }
1452
1453 // For conditional operators we need to see if either the LHS or RHS are
1454 // valid DeclRefExpr*s. If one of them is valid, we return it.
1455 case Stmt::ConditionalOperatorClass: {
1456 ConditionalOperator *C = cast<ConditionalOperator>(E);
1457
1458 // Handle the GNU extension for missing LHS.
1459 if (Expr *lhsExpr = C->getLHS())
1460 if (DeclRefExpr* LHS = EvalAddr(lhsExpr))
1461 return LHS;
1462
1463 return EvalAddr(C->getRHS());
1464 }
1465
1466 // For casts, we need to handle conversions from arrays to
1467 // pointer values, and pointer-to-pointer conversions.
1468 case Stmt::ImplicitCastExprClass:
1469 case Stmt::CStyleCastExprClass:
1470 case Stmt::CXXFunctionalCastExprClass: {
1471 Expr* SubExpr = cast<CastExpr>(E)->getSubExpr();
1472 QualType T = SubExpr->getType();
1473
1474 if (SubExpr->getType()->isPointerType() ||
1475 SubExpr->getType()->isBlockPointerType() ||
1476 SubExpr->getType()->isObjCQualifiedIdType())
1477 return EvalAddr(SubExpr);
1478 else if (T->isArrayType())
1479 return EvalVal(SubExpr);
1480 else
1481 return 0;
1482 }
1483
1484 // C++ casts. For dynamic casts, static casts, and const casts, we
1485 // are always converting from a pointer-to-pointer, so we just blow
1486 // through the cast. In the case the dynamic cast doesn't fail (and
1487 // return NULL), we take the conservative route and report cases
1488 // where we return the address of a stack variable. For Reinterpre
1489 // FIXME: The comment about is wrong; we're not always converting
1490 // from pointer to pointer. I'm guessing that this code should also
1491 // handle references to objects.
1492 case Stmt::CXXStaticCastExprClass:
1493 case Stmt::CXXDynamicCastExprClass:
1494 case Stmt::CXXConstCastExprClass:
1495 case Stmt::CXXReinterpretCastExprClass: {
1496 Expr *S = cast<CXXNamedCastExpr>(E)->getSubExpr();
1497 if (S->getType()->isPointerType() || S->getType()->isBlockPointerType())
1498 return EvalAddr(S);
1499 else
1500 return NULL;
1501 }
1502
1503 // Everything else: we simply don't reason about them.
1504 default:
1505 return NULL;
1506 }
1507}
1508
1509
1510/// EvalVal - This function is complements EvalAddr in the mutual recursion.
1511/// See the comments for EvalAddr for more details.
1512static DeclRefExpr* EvalVal(Expr *E) {
1513
1514 // We should only be called for evaluating non-pointer expressions, or
1515 // expressions with a pointer type that are not used as references but instead
1516 // are l-values (e.g., DeclRefExpr with a pointer type).
1517
1518 // Our "symbolic interpreter" is just a dispatch off the currently
1519 // viewed AST node. We then recursively traverse the AST by calling
1520 // EvalAddr and EvalVal appropriately.
1521 switch (E->getStmtClass()) {
1522 case Stmt::DeclRefExprClass: {
1523 // DeclRefExpr: the base case. When we hit a DeclRefExpr we are looking
1524 // at code that refers to a variable's name. We check if it has local
1525 // storage within the function, and if so, return the expression.
1526 DeclRefExpr *DR = cast<DeclRefExpr>(E);
1527
1528 if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl()))
1529 if (V->hasLocalStorage() && !V->getType()->isReferenceType()) return DR;
1530
1531 return NULL;
1532 }
1533
1534 case Stmt::ParenExprClass:
1535 // Ignore parentheses.
1536 return EvalVal(cast<ParenExpr>(E)->getSubExpr());
1537
1538 case Stmt::UnaryOperatorClass: {
1539 // The only unary operator that make sense to handle here
1540 // is Deref. All others don't resolve to a "name." This includes
1541 // handling all sorts of rvalues passed to a unary operator.
1542 UnaryOperator *U = cast<UnaryOperator>(E);
1543
1544 if (U->getOpcode() == UnaryOperator::Deref)
1545 return EvalAddr(U->getSubExpr());
1546
1547 return NULL;
1548 }
1549
1550 case Stmt::ArraySubscriptExprClass: {
1551 // Array subscripts are potential references to data on the stack. We
1552 // retrieve the DeclRefExpr* for the array variable if it indeed
1553 // has local storage.
1554 return EvalAddr(cast<ArraySubscriptExpr>(E)->getBase());
1555 }
1556
1557 case Stmt::ConditionalOperatorClass: {
1558 // For conditional operators we need to see if either the LHS or RHS are
1559 // non-NULL DeclRefExpr's. If one is non-NULL, we return it.
1560 ConditionalOperator *C = cast<ConditionalOperator>(E);
1561
1562 // Handle the GNU extension for missing LHS.
1563 if (Expr *lhsExpr = C->getLHS())
1564 if (DeclRefExpr *LHS = EvalVal(lhsExpr))
1565 return LHS;
1566
1567 return EvalVal(C->getRHS());
1568 }
1569
1570 // Accesses to members are potential references to data on the stack.
1571 case Stmt::MemberExprClass: {
1572 MemberExpr *M = cast<MemberExpr>(E);
1573
1574 // Check for indirect access. We only want direct field accesses.
1575 if (!M->isArrow())
1576 return EvalVal(M->getBase());
1577 else
1578 return NULL;
1579 }
1580
1581 // Everything else: we simply don't reason about them.
1582 default:
1583 return NULL;
1584 }
1585}
1586
1587//===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===//
1588
1589/// Check for comparisons of floating point operands using != and ==.
1590/// Issue a warning if these are no self-comparisons, as they are not likely
1591/// to do what the programmer intended.
1592void Sema::CheckFloatComparison(SourceLocation loc, Expr* lex, Expr *rex) {
1593 bool EmitWarning = true;
1594
1595 Expr* LeftExprSansParen = lex->IgnoreParens();
1596 Expr* RightExprSansParen = rex->IgnoreParens();
1597
1598 // Special case: check for x == x (which is OK).
1599 // Do not emit warnings for such cases.
1600 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen))
1601 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen))
1602 if (DRL->getDecl() == DRR->getDecl())
1603 EmitWarning = false;
1604
1605
1606 // Special case: check for comparisons against literals that can be exactly
1607 // represented by APFloat. In such cases, do not emit a warning. This
1608 // is a heuristic: often comparison against such literals are used to
1609 // detect if a value in a variable has not changed. This clearly can
1610 // lead to false negatives.
1611 if (EmitWarning) {
1612 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) {
1613 if (FLL->isExact())
1614 EmitWarning = false;
1615 } else
1616 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)){
1617 if (FLR->isExact())
1618 EmitWarning = false;
1619 }
1620 }
1621
1622 // Check for comparisons with builtin types.
1623 if (EmitWarning)
1624 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen))
1625 if (CL->isBuiltinCall(Context))
1626 EmitWarning = false;
1627
1628 if (EmitWarning)
1629 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen))
1630 if (CR->isBuiltinCall(Context))
1631 EmitWarning = false;
1632
1633 // Emit the diagnostic.
1634 if (EmitWarning)
1635 Diag(loc, diag::warn_floatingpoint_eq)
1636 << lex->getSourceRange() << rex->getSourceRange();
1637}
1638
1639//===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===//
1640//===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===//
1641
1642namespace {
1643
1644/// Structure recording the 'active' range of an integer-valued
1645/// expression.
1646struct IntRange {
1647 /// The number of bits active in the int.
1648 unsigned Width;
1649
1650 /// True if the int is known not to have negative values.
1651 bool NonNegative;
1652
1653 IntRange() {}
1654 IntRange(unsigned Width, bool NonNegative)
1655 : Width(Width), NonNegative(NonNegative)
1656 {}
1657
1658 // Returns the range of the bool type.
1659 static IntRange forBoolType() {
1660 return IntRange(1, true);
1661 }
1662
1663 // Returns the range of an integral type.
1664 static IntRange forType(ASTContext &C, QualType T) {
1665 return forCanonicalType(C, T->getCanonicalTypeInternal().getTypePtr());
1666 }
1667
1668 // Returns the range of an integeral type based on its canonical
1669 // representation.
1670 static IntRange forCanonicalType(ASTContext &C, const Type *T) {
1671 assert(T->isCanonicalUnqualified());
1672
1673 if (const VectorType *VT = dyn_cast<VectorType>(T))
1674 T = VT->getElementType().getTypePtr();
1675 if (const ComplexType *CT = dyn_cast<ComplexType>(T))
1676 T = CT->getElementType().getTypePtr();
1677 if (const EnumType *ET = dyn_cast<EnumType>(T))
1678 T = ET->getDecl()->getIntegerType().getTypePtr();
1679
1680 const BuiltinType *BT = cast<BuiltinType>(T);
1681 assert(BT->isInteger());
1682
1683 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger());
1684 }
1685
1686 // Returns the supremum of two ranges: i.e. their conservative merge.
1687 static IntRange join(const IntRange &L, const IntRange &R) {
1688 return IntRange(std::max(L.Width, R.Width),
1689 L.NonNegative && R.NonNegative);
1690 }
1691
1692 // Returns the infinum of two ranges: i.e. their aggressive merge.
1693 static IntRange meet(const IntRange &L, const IntRange &R) {
1694 return IntRange(std::min(L.Width, R.Width),
1695 L.NonNegative || R.NonNegative);
1696 }
1697};
1698
1699IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, unsigned MaxWidth) {
1700 if (value.isSigned() && value.isNegative())
1701 return IntRange(value.getMinSignedBits(), false);
1702
1703 if (value.getBitWidth() > MaxWidth)
1704 value.trunc(MaxWidth);
1705
1706 // isNonNegative() just checks the sign bit without considering
1707 // signedness.
1708 return IntRange(value.getActiveBits(), true);
1709}
1710
1711IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty,
1712 unsigned MaxWidth) {
1713 if (result.isInt())
1714 return GetValueRange(C, result.getInt(), MaxWidth);
1715
1716 if (result.isVector()) {
1717 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth);
1718 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) {
1719 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth);
1720 R = IntRange::join(R, El);
1721 }
1722 return R;
1723 }
1724
1725 if (result.isComplexInt()) {
1726 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth);
1727 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth);
1728 return IntRange::join(R, I);
1729 }
1730
1731 // This can happen with lossless casts to intptr_t of "based" lvalues.
1732 // Assume it might use arbitrary bits.
1733 // FIXME: The only reason we need to pass the type in here is to get
1734 // the sign right on this one case. It would be nice if APValue
1735 // preserved this.
1736 assert(result.isLValue());
1737 return IntRange(MaxWidth, Ty->isUnsignedIntegerType());
1738}
1739
1740/// Pseudo-evaluate the given integer expression, estimating the
1741/// range of values it might take.
1742///
1743/// \param MaxWidth - the width to which the value will be truncated
1744IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) {
1745 E = E->IgnoreParens();
1746
1747 // Try a full evaluation first.
1748 Expr::EvalResult result;
1749 if (E->Evaluate(result, C))
1750 return GetValueRange(C, result.Val, E->getType(), MaxWidth);
1751
1752 // I think we only want to look through implicit casts here; if the
1753 // user has an explicit widening cast, we should treat the value as
1754 // being of the new, wider type.
1755 if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) {
1756 if (CE->getCastKind() == CastExpr::CK_NoOp)
1757 return GetExprRange(C, CE->getSubExpr(), MaxWidth);
1758
1759 IntRange OutputTypeRange = IntRange::forType(C, CE->getType());
1760
1761 bool isIntegerCast = (CE->getCastKind() == CastExpr::CK_IntegralCast);
1762 if (!isIntegerCast && CE->getCastKind() == CastExpr::CK_Unknown)
1763 isIntegerCast = CE->getSubExpr()->getType()->isIntegerType();
1764
1765 // Assume that non-integer casts can span the full range of the type.
1766 if (!isIntegerCast)
1767 return OutputTypeRange;
1768
1769 IntRange SubRange
1770 = GetExprRange(C, CE->getSubExpr(),
1771 std::min(MaxWidth, OutputTypeRange.Width));
1772
1773 // Bail out if the subexpr's range is as wide as the cast type.
1774 if (SubRange.Width >= OutputTypeRange.Width)
1775 return OutputTypeRange;
1776
1777 // Otherwise, we take the smaller width, and we're non-negative if
1778 // either the output type or the subexpr is.
1779 return IntRange(SubRange.Width,
1780 SubRange.NonNegative || OutputTypeRange.NonNegative);
1781 }
1782
1783 if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
1784 // If we can fold the condition, just take that operand.
1785 bool CondResult;
1786 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C))
1787 return GetExprRange(C, CondResult ? CO->getTrueExpr()
1788 : CO->getFalseExpr(),
1789 MaxWidth);
1790
1791 // Otherwise, conservatively merge.
1792 IntRange L = GetExprRange(C, CO->getTrueExpr(), MaxWidth);
1793 IntRange R = GetExprRange(C, CO->getFalseExpr(), MaxWidth);
1794 return IntRange::join(L, R);
1795 }
1796
1797 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
1798 switch (BO->getOpcode()) {
1799
1800 // Boolean-valued operations are single-bit and positive.
1801 case BinaryOperator::LAnd:
1802 case BinaryOperator::LOr:
1803 case BinaryOperator::LT:
1804 case BinaryOperator::GT:
1805 case BinaryOperator::LE:
1806 case BinaryOperator::GE:
1807 case BinaryOperator::EQ:
1808 case BinaryOperator::NE:
1809 return IntRange::forBoolType();
1810
1811 // Operations with opaque sources are black-listed.
1812 case BinaryOperator::PtrMemD:
1813 case BinaryOperator::PtrMemI:
1814 return IntRange::forType(C, E->getType());
1815
1816 // Bitwise-and uses the *infinum* of the two source ranges.
1817 case BinaryOperator::And:
1818 return IntRange::meet(GetExprRange(C, BO->getLHS(), MaxWidth),
1819 GetExprRange(C, BO->getRHS(), MaxWidth));
1820
1821 // Left shift gets black-listed based on a judgement call.
1822 case BinaryOperator::Shl:
1823 return IntRange::forType(C, E->getType());
1824
1825 // Right shift by a constant can narrow its left argument.
1826 case BinaryOperator::Shr: {
1827 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth);
1828
1829 // If the shift amount is a positive constant, drop the width by
1830 // that much.
1831 llvm::APSInt shift;
1832 if (BO->getRHS()->isIntegerConstantExpr(shift, C) &&
1833 shift.isNonNegative()) {
1834 unsigned zext = shift.getZExtValue();
1835 if (zext >= L.Width)
1836 L.Width = (L.NonNegative ? 0 : 1);
1837 else
1838 L.Width -= zext;
1839 }
1840
1841 return L;
1842 }
1843
1844 // Comma acts as its right operand.
1845 case BinaryOperator::Comma:
1846 return GetExprRange(C, BO->getRHS(), MaxWidth);
1847
1848 // Black-list pointer subtractions.
1849 case BinaryOperator::Sub:
1850 if (BO->getLHS()->getType()->isPointerType())
1851 return IntRange::forType(C, E->getType());
1852 // fallthrough
1853
1854 default:
1855 break;
1856 }
1857
1858 // Treat every other operator as if it were closed on the
1859 // narrowest type that encompasses both operands.
1860 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth);
1861 IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth);
1862 return IntRange::join(L, R);
1863 }
1864
1865 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1866 switch (UO->getOpcode()) {
1867 // Boolean-valued operations are white-listed.
1868 case UnaryOperator::LNot:
1869 return IntRange::forBoolType();
1870
1871 // Operations with opaque sources are black-listed.
1872 case UnaryOperator::Deref:
1873 case UnaryOperator::AddrOf: // should be impossible
1874 case UnaryOperator::OffsetOf:
1875 return IntRange::forType(C, E->getType());
1876
1877 default:
1878 return GetExprRange(C, UO->getSubExpr(), MaxWidth);
1879 }
1880 }
1881
1882 FieldDecl *BitField = E->getBitField();
1883 if (BitField) {
1884 llvm::APSInt BitWidthAP = BitField->getBitWidth()->EvaluateAsInt(C);
1885 unsigned BitWidth = BitWidthAP.getZExtValue();
1886
1887 return IntRange(BitWidth, BitField->getType()->isUnsignedIntegerType());
1888 }
1889
1890 return IntRange::forType(C, E->getType());
1891}
1892
1893/// Checks whether the given value, which currently has the given
1894/// source semantics, has the same value when coerced through the
1895/// target semantics.
1896bool IsSameFloatAfterCast(const llvm::APFloat &value,
1897 const llvm::fltSemantics &Src,
1898 const llvm::fltSemantics &Tgt) {
1899 llvm::APFloat truncated = value;
1900
1901 bool ignored;
1902 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored);
1903 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored);
1904
1905 return truncated.bitwiseIsEqual(value);
1906}
1907
1908/// Checks whether the given value, which currently has the given
1909/// source semantics, has the same value when coerced through the
1910/// target semantics.
1911///
1912/// The value might be a vector of floats (or a complex number).
1913bool IsSameFloatAfterCast(const APValue &value,
1914 const llvm::fltSemantics &Src,
1915 const llvm::fltSemantics &Tgt) {
1916 if (value.isFloat())
1917 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt);
1918
1919 if (value.isVector()) {
1920 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i)
1921 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt))
1922 return false;
1923 return true;
1924 }
1925
1926 assert(value.isComplexFloat());
1927 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) &&
1928 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt));
1929}
1930
1931} // end anonymous namespace
1932
1933/// \brief Implements -Wsign-compare.
1934///
1935/// \param lex the left-hand expression
1936/// \param rex the right-hand expression
1937/// \param OpLoc the location of the joining operator
1938/// \param Equality whether this is an "equality-like" join, which
1939/// suppresses the warning in some cases
1940void Sema::CheckSignCompare(Expr *lex, Expr *rex, SourceLocation OpLoc,
1941 const PartialDiagnostic &PD, bool Equality) {
1942 // Don't warn if we're in an unevaluated context.
1943 if (ExprEvalContexts.back().Context == Unevaluated)
1944 return;
1945
1946 // If either expression is value-dependent, don't warn. We'll get another
1947 // chance at instantiation time.
1948 if (lex->isValueDependent() || rex->isValueDependent())
1949 return;
1950
1951 QualType lt = lex->getType(), rt = rex->getType();
1952
1953 // Only warn if both operands are integral.
1954 if (!lt->isIntegerType() || !rt->isIntegerType())
1955 return;
1956
1957 // In C, the width of a bitfield determines its type, and the
1958 // declared type only contributes the signedness. This duplicates
1959 // the work that will later be done by UsualUnaryConversions.
1960 // Eventually, this check will be reorganized in a way that avoids
1961 // this duplication.
1962 if (!getLangOptions().CPlusPlus) {
1963 QualType tmp;
1964 tmp = Context.isPromotableBitField(lex);
1965 if (!tmp.isNull()) lt = tmp;
1966 tmp = Context.isPromotableBitField(rex);
1967 if (!tmp.isNull()) rt = tmp;
1968 }
1969
1970 // The rule is that the signed operand becomes unsigned, so isolate the
1971 // signed operand.
1972 Expr *signedOperand = lex, *unsignedOperand = rex;
1973 QualType signedType = lt, unsignedType = rt;
1974 if (lt->isSignedIntegerType()) {
1975 if (rt->isSignedIntegerType()) return;
1976 } else {
1977 if (!rt->isSignedIntegerType()) return;
1978 std::swap(signedOperand, unsignedOperand);
1979 std::swap(signedType, unsignedType);
1980 }
1981
1982 unsigned unsignedWidth = Context.getIntWidth(unsignedType);
1983 unsigned signedWidth = Context.getIntWidth(signedType);
1984
1985 // If the unsigned type is strictly smaller than the signed type,
1986 // then (1) the result type will be signed and (2) the unsigned
1987 // value will fit fully within the signed type, and thus the result
1988 // of the comparison will be exact.
1989 if (signedWidth > unsignedWidth)
1990 return;
1991
1992 // Otherwise, calculate the effective ranges.
1993 IntRange signedRange = GetExprRange(Context, signedOperand, signedWidth);
1994 IntRange unsignedRange = GetExprRange(Context, unsignedOperand, unsignedWidth);
1995
1996 // We should never be unable to prove that the unsigned operand is
1997 // non-negative.
1998 assert(unsignedRange.NonNegative && "unsigned range includes negative?");
1999
2000 // If the signed operand is non-negative, then the signed->unsigned
2001 // conversion won't change it.
2002 if (signedRange.NonNegative)
2003 return;
2004
2005 // For (in)equality comparisons, if the unsigned operand is a
2006 // constant which cannot collide with a overflowed signed operand,
2007 // then reinterpreting the signed operand as unsigned will not
2008 // change the result of the comparison.
2009 if (Equality && unsignedRange.Width < unsignedWidth)
2010 return;
2011
2012 Diag(OpLoc, PD)
2013 << lt << rt << lex->getSourceRange() << rex->getSourceRange();
2014}
2015
2016/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
2017static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, unsigned diag) {
2018 S.Diag(E->getExprLoc(), diag) << E->getType() << T << E->getSourceRange();
2019}
2020
2021/// Implements -Wconversion.
2022void Sema::CheckImplicitConversion(Expr *E, QualType T) {
2023 // Don't diagnose in unevaluated contexts.
2024 if (ExprEvalContexts.back().Context == Sema::Unevaluated)
2025 return;
2026
2027 // Don't diagnose for value-dependent expressions.
2028 if (E->isValueDependent())
2029 return;
2030
2031 const Type *Source = Context.getCanonicalType(E->getType()).getTypePtr();
2032 const Type *Target = Context.getCanonicalType(T).getTypePtr();
2033
2034 // Never diagnose implicit casts to bool.
2035 if (Target->isSpecificBuiltinType(BuiltinType::Bool))
2036 return;
2037
2038 // Strip vector types.
2039 if (isa<VectorType>(Source)) {
2040 if (!isa<VectorType>(Target))
2041 return DiagnoseImpCast(*this, E, T, diag::warn_impcast_vector_scalar);
2042
2043 Source = cast<VectorType>(Source)->getElementType().getTypePtr();
2044 Target = cast<VectorType>(Target)->getElementType().getTypePtr();
2045 }
2046
2047 // Strip complex types.
2048 if (isa<ComplexType>(Source)) {
2049 if (!isa<ComplexType>(Target))
2050 return DiagnoseImpCast(*this, E, T, diag::warn_impcast_complex_scalar);
2051
2052 Source = cast<ComplexType>(Source)->getElementType().getTypePtr();
2053 Target = cast<ComplexType>(Target)->getElementType().getTypePtr();
2054 }
2055
2056 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source);
2057 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target);
2058
2059 // If the source is floating point...
2060 if (SourceBT && SourceBT->isFloatingPoint()) {
2061 // ...and the target is floating point...
2062 if (TargetBT && TargetBT->isFloatingPoint()) {
2063 // ...then warn if we're dropping FP rank.
2064
2065 // Builtin FP kinds are ordered by increasing FP rank.
2066 if (SourceBT->getKind() > TargetBT->getKind()) {
2067 // Don't warn about float constants that are precisely
2068 // representable in the target type.
2069 Expr::EvalResult result;
2070 if (E->Evaluate(result, Context)) {
2071 // Value might be a float, a float vector, or a float complex.
2072 if (IsSameFloatAfterCast(result.Val,
2073 Context.getFloatTypeSemantics(QualType(TargetBT, 0)),
2074 Context.getFloatTypeSemantics(QualType(SourceBT, 0))))
2075 return;
2076 }
2077
2078 DiagnoseImpCast(*this, E, T, diag::warn_impcast_float_precision);
2079 }
2080 return;
2081 }
2082
2083 // If the target is integral, always warn.
2084 if ((TargetBT && TargetBT->isInteger()))
2085 // TODO: don't warn for integer values?
2086 return DiagnoseImpCast(*this, E, T, diag::warn_impcast_float_integer);
2087
2088 return;
2089 }
2090
2091 if (!Source->isIntegerType() || !Target->isIntegerType())
2092 return;
2093
2094 IntRange SourceRange = GetExprRange(Context, E, Context.getIntWidth(E->getType()));
2095 IntRange TargetRange = IntRange::forCanonicalType(Context, Target);
2096
2097 // FIXME: also signed<->unsigned?
2098
2099 if (SourceRange.Width > TargetRange.Width) {
2100 // People want to build with -Wshorten-64-to-32 and not -Wconversion
2101 // and by god we'll let them.
2102 if (SourceRange.Width == 64 && TargetRange.Width == 32)
2103 return DiagnoseImpCast(*this, E, T, diag::warn_impcast_integer_64_32);
2104 return DiagnoseImpCast(*this, E, T, diag::warn_impcast_integer_precision);
2105 }
2106
2107 return;
2108}
2109
2110// MarkLive - Mark all the blocks reachable from e as live. Returns the total
2111// number of blocks just marked live.
2112static unsigned MarkLive(CFGBlock *e, llvm::BitVector &live) {
2113 unsigned count = 0;
2114 std::queue<CFGBlock*> workq;
2115 // Prep work queue
2116 live.set(e->getBlockID());
2117 ++count;
2118 workq.push(e);
2119 // Solve
2120 while (!workq.empty()) {
2121 CFGBlock *item = workq.front();
2122 workq.pop();
2123 for (CFGBlock::succ_iterator I=item->succ_begin(),
2124 E=item->succ_end();
2125 I != E;
2126 ++I) {
2127 if ((*I) && !live[(*I)->getBlockID()]) {
2128 live.set((*I)->getBlockID());
2129 ++count;
2130 workq.push(*I);
2131 }
2132 }
2133 }
2134 return count;
2135}
2136
2137static SourceLocation GetUnreachableLoc(CFGBlock &b, SourceRange &R1,
2138 SourceRange &R2) {
2139 Stmt *S;
2140 unsigned sn = 0;
2141 R1 = R2 = SourceRange();
2142
2143 top:
2144 if (sn < b.size())
2145 S = b[sn].getStmt();
2146 else if (b.getTerminator())
2147 S = b.getTerminator();
2148 else
2149 return SourceLocation();
2150
2151 switch (S->getStmtClass()) {
2152 case Expr::BinaryOperatorClass: {
2153 BinaryOperator *BO = cast<BinaryOperator>(S);
2154 if (BO->getOpcode() == BinaryOperator::Comma) {
2155 if (sn+1 < b.size())
2156 return b[sn+1].getStmt()->getLocStart();
2157 CFGBlock *n = &b;
2158 while (1) {
2159 if (n->getTerminator())
2160 return n->getTerminator()->getLocStart();
2161 if (n->succ_size() != 1)
2162 return SourceLocation();
2163 n = n[0].succ_begin()[0];
2164 if (n->pred_size() != 1)
2165 return SourceLocation();
2166 if (!n->empty())
2167 return n[0][0].getStmt()->getLocStart();
2168 }
2169 }
2170 R1 = BO->getLHS()->getSourceRange();
2171 R2 = BO->getRHS()->getSourceRange();
2172 return BO->getOperatorLoc();
2173 }
2174 case Expr::UnaryOperatorClass: {
2175 const UnaryOperator *UO = cast<UnaryOperator>(S);
2176 R1 = UO->getSubExpr()->getSourceRange();
2177 return UO->getOperatorLoc();
2178 }
2179 case Expr::CompoundAssignOperatorClass: {
2180 const CompoundAssignOperator *CAO = cast<CompoundAssignOperator>(S);
2181 R1 = CAO->getLHS()->getSourceRange();
2182 R2 = CAO->getRHS()->getSourceRange();
2183 return CAO->getOperatorLoc();
2184 }
2185 case Expr::ConditionalOperatorClass: {
2186 const ConditionalOperator *CO = cast<ConditionalOperator>(S);
2187 return CO->getQuestionLoc();
2188 }
2189 case Expr::MemberExprClass: {
2190 const MemberExpr *ME = cast<MemberExpr>(S);
2191 R1 = ME->getSourceRange();
2192 return ME->getMemberLoc();
2193 }
2194 case Expr::ArraySubscriptExprClass: {
2195 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(S);
2196 R1 = ASE->getLHS()->getSourceRange();
2197 R2 = ASE->getRHS()->getSourceRange();
2198 return ASE->getRBracketLoc();
2199 }
2200 case Expr::CStyleCastExprClass: {
2201 const CStyleCastExpr *CSC = cast<CStyleCastExpr>(S);
2202 R1 = CSC->getSubExpr()->getSourceRange();
2203 return CSC->getLParenLoc();
2204 }
2205 case Expr::CXXFunctionalCastExprClass: {
2206 const CXXFunctionalCastExpr *CE = cast <CXXFunctionalCastExpr>(S);
2207 R1 = CE->getSubExpr()->getSourceRange();
2208 return CE->getTypeBeginLoc();
2209 }
2210 case Expr::ImplicitCastExprClass:
2211 ++sn;
2212 goto top;
2213 case Stmt::CXXTryStmtClass: {
2214 return cast<CXXTryStmt>(S)->getHandler(0)->getCatchLoc();
2215 }
2216 default: ;
2217 }
2218 R1 = S->getSourceRange();
2219 return S->getLocStart();
2220}
2221
2222static SourceLocation MarkLiveTop(CFGBlock *e, llvm::BitVector &live,
2223 SourceManager &SM) {
2224 std::queue<CFGBlock*> workq;
2225 // Prep work queue
2226 workq.push(e);
2227 SourceRange R1, R2;
2228 SourceLocation top = GetUnreachableLoc(*e, R1, R2);
2229 bool FromMainFile = false;
2230 bool FromSystemHeader = false;
2231 bool TopValid = false;
2232 if (top.isValid()) {
2233 FromMainFile = SM.isFromMainFile(top);
2234 FromSystemHeader = SM.isInSystemHeader(top);
2235 TopValid = true;
2236 }
2237 // Solve
2238 while (!workq.empty()) {
2239 CFGBlock *item = workq.front();
2240 workq.pop();
2241 SourceLocation c = GetUnreachableLoc(*item, R1, R2);
2242 if (c.isValid()
2243 && (!TopValid
2244 || (SM.isFromMainFile(c) && !FromMainFile)
2245 || (FromSystemHeader && !SM.isInSystemHeader(c))
2246 || SM.isBeforeInTranslationUnit(c, top))) {
2247 top = c;
2248 FromMainFile = SM.isFromMainFile(top);
2249 FromSystemHeader = SM.isInSystemHeader(top);
2250 }
2251 live.set(item->getBlockID());
2252 for (CFGBlock::succ_iterator I=item->succ_begin(),
2253 E=item->succ_end();
2254 I != E;
2255 ++I) {
2256 if ((*I) && !live[(*I)->getBlockID()]) {
2257 live.set((*I)->getBlockID());
2258 workq.push(*I);
2259 }
2260 }
2261 }
2262 return top;
2263}
2264
2265static int LineCmp(const void *p1, const void *p2) {
2266 SourceLocation *Line1 = (SourceLocation *)p1;
2267 SourceLocation *Line2 = (SourceLocation *)p2;
2268 return !(*Line1 < *Line2);
2269}
2270
2271namespace {
2272 struct ErrLoc {
2273 SourceLocation Loc;
2274 SourceRange R1;
2275 SourceRange R2;
2276 ErrLoc(SourceLocation l, SourceRange r1, SourceRange r2)
2277 : Loc(l), R1(r1), R2(r2) { }
2278 };
2279}
2280
2281/// CheckUnreachable - Check for unreachable code.
2282void Sema::CheckUnreachable(AnalysisContext &AC) {
2283 unsigned count;
2284 // We avoid checking when there are errors, as the CFG won't faithfully match
2285 // the user's code.
2286 if (getDiagnostics().hasErrorOccurred())
2287 return;
2288 if (Diags.getDiagnosticLevel(diag::warn_unreachable) == Diagnostic::Ignored)
2289 return;
2290
2291 CFG *cfg = AC.getCFG();
2292 if (cfg == 0)
2293 return;
2294
2295 llvm::BitVector live(cfg->getNumBlockIDs());
2296 // Mark all live things first.
2297 count = MarkLive(&cfg->getEntry(), live);
2298
2299 if (count == cfg->getNumBlockIDs())
2300 // If there are no dead blocks, we're done.
2301 return;
2302
2303 SourceRange R1, R2;
2304
2305 llvm::SmallVector<ErrLoc, 24> lines;
2306 bool AddEHEdges = AC.getAddEHEdges();
2307 // First, give warnings for blocks with no predecessors, as they
2308 // can't be part of a loop.
2309 for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) {
2310 CFGBlock &b = **I;
2311 if (!live[b.getBlockID()]) {
2312 if (b.pred_begin() == b.pred_end()) {
2313 if (!AddEHEdges && b.getTerminator()
2314 && isa<CXXTryStmt>(b.getTerminator())) {
2315 // When not adding EH edges from calls, catch clauses
2316 // can otherwise seem dead. Avoid noting them as dead.
2317 count += MarkLive(&b, live);
2318 continue;
2319 }
2320 SourceLocation c = GetUnreachableLoc(b, R1, R2);
2321 if (!c.isValid()) {
2322 // Blocks without a location can't produce a warning, so don't mark
2323 // reachable blocks from here as live.
2324 live.set(b.getBlockID());
2325 ++count;
2326 continue;
2327 }
2328 lines.push_back(ErrLoc(c, R1, R2));
2329 // Avoid excessive errors by marking everything reachable from here
2330 count += MarkLive(&b, live);
2331 }
2332 }
2333 }
2334
2335 if (count < cfg->getNumBlockIDs()) {
2336 // And then give warnings for the tops of loops.
2337 for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) {
2338 CFGBlock &b = **I;
2339 if (!live[b.getBlockID()])
2340 // Avoid excessive errors by marking everything reachable from here
2341 lines.push_back(ErrLoc(MarkLiveTop(&b, live,
2342 Context.getSourceManager()),
2343 SourceRange(), SourceRange()));
2344 }
2345 }
2346
2347 llvm::array_pod_sort(lines.begin(), lines.end(), LineCmp);
2348 for (llvm::SmallVector<ErrLoc, 24>::iterator I = lines.begin(),
2349 E = lines.end();
2350 I != E;
2351 ++I)
2352 if (I->Loc.isValid())
2353 Diag(I->Loc, diag::warn_unreachable) << I->R1 << I->R2;
2354}
2355
2356/// CheckFallThrough - Check that we don't fall off the end of a
2357/// Statement that should return a value.
2358///
2359/// \returns AlwaysFallThrough iff we always fall off the end of the statement,
2360/// MaybeFallThrough iff we might or might not fall off the end,
2361/// NeverFallThroughOrReturn iff we never fall off the end of the statement or
2362/// return. We assume NeverFallThrough iff we never fall off the end of the
2363/// statement but we may return. We assume that functions not marked noreturn
2364/// will return.
2365Sema::ControlFlowKind Sema::CheckFallThrough(AnalysisContext &AC) {
2366 CFG *cfg = AC.getCFG();
2367 if (cfg == 0)
2368 // FIXME: This should be NeverFallThrough
2369 return NeverFallThroughOrReturn;
2370
2371 // The CFG leaves in dead things, and we don't want the dead code paths to
2372 // confuse us, so we mark all live things first.
2373 std::queue<CFGBlock*> workq;
2374 llvm::BitVector live(cfg->getNumBlockIDs());
2375 unsigned count = MarkLive(&cfg->getEntry(), live);
2376
2377 bool AddEHEdges = AC.getAddEHEdges();
2378 if (!AddEHEdges && count != cfg->getNumBlockIDs())
2379 // When there are things remaining dead, and we didn't add EH edges
2380 // from CallExprs to the catch clauses, we have to go back and
2381 // mark them as live.
2382 for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) {
2383 CFGBlock &b = **I;
2384 if (!live[b.getBlockID()]) {
2385 if (b.pred_begin() == b.pred_end()) {
2386 if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator()))
2387 // When not adding EH edges from calls, catch clauses
2388 // can otherwise seem dead. Avoid noting them as dead.
2389 count += MarkLive(&b, live);
2390 continue;
2391 }
2392 }
2393 }
2394
2395 // Now we know what is live, we check the live precessors of the exit block
2396 // and look for fall through paths, being careful to ignore normal returns,
2397 // and exceptional paths.
2398 bool HasLiveReturn = false;
2399 bool HasFakeEdge = false;
2400 bool HasPlainEdge = false;
2401 bool HasAbnormalEdge = false;
2402 for (CFGBlock::pred_iterator I=cfg->getExit().pred_begin(),
2403 E = cfg->getExit().pred_end();
2404 I != E;
2405 ++I) {
2406 CFGBlock& B = **I;
2407 if (!live[B.getBlockID()])
2408 continue;
2409 if (B.size() == 0) {
2410 if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) {
2411 HasAbnormalEdge = true;
2412 continue;
2413 }
2414
2415 // A labeled empty statement, or the entry block...
2416 HasPlainEdge = true;
2417 continue;
2418 }
2419 Stmt *S = B[B.size()-1];
2420 if (isa<ReturnStmt>(S)) {
2421 HasLiveReturn = true;
2422 continue;
2423 }
2424 if (isa<ObjCAtThrowStmt>(S)) {
2425 HasFakeEdge = true;
2426 continue;
2427 }
2428 if (isa<CXXThrowExpr>(S)) {
2429 HasFakeEdge = true;
2430 continue;
2431 }
2432 if (const AsmStmt *AS = dyn_cast<AsmStmt>(S)) {
2433 if (AS->isMSAsm()) {
2434 HasFakeEdge = true;
2435 HasLiveReturn = true;
2436 continue;
2437 }
2438 }
2439 if (isa<CXXTryStmt>(S)) {
2440 HasAbnormalEdge = true;
2441 continue;
2442 }
2443
2444 bool NoReturnEdge = false;
2445 if (CallExpr *C = dyn_cast<CallExpr>(S)) {
2446 if (B.succ_begin()[0] != &cfg->getExit()) {
2447 HasAbnormalEdge = true;
2448 continue;
2449 }
2450 Expr *CEE = C->getCallee()->IgnoreParenCasts();
2451 if (CEE->getType().getNoReturnAttr()) {
2452 NoReturnEdge = true;
2453 HasFakeEdge = true;
2454 } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) {
2455 ValueDecl *VD = DRE->getDecl();
2456 if (VD->hasAttr<NoReturnAttr>()) {
2457 NoReturnEdge = true;
2458 HasFakeEdge = true;
2459 }
2460 }
2461 }
2462 // FIXME: Add noreturn message sends.
2463 if (NoReturnEdge == false)
2464 HasPlainEdge = true;
2465 }
2466 if (!HasPlainEdge) {
2467 if (HasLiveReturn)
2468 return NeverFallThrough;
2469 return NeverFallThroughOrReturn;
2470 }
2471 if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn)
2472 return MaybeFallThrough;
2473 // This says AlwaysFallThrough for calls to functions that are not marked
2474 // noreturn, that don't return. If people would like this warning to be more
2475 // accurate, such functions should be marked as noreturn.
2476 return AlwaysFallThrough;
2477}
2478
2479/// CheckFallThroughForFunctionDef - Check that we don't fall off the end of a
2480/// function that should return a value. Check that we don't fall off the end
2481/// of a noreturn function. We assume that functions and blocks not marked
2482/// noreturn will return.
2483void Sema::CheckFallThroughForFunctionDef(Decl *D, Stmt *Body,
2484 AnalysisContext &AC) {
2485 // FIXME: Would be nice if we had a better way to control cascading errors,
2486 // but for now, avoid them. The problem is that when Parse sees:
2487 // int foo() { return a; }
2488 // The return is eaten and the Sema code sees just:
2489 // int foo() { }
2490 // which this code would then warn about.
2491 if (getDiagnostics().hasErrorOccurred())
2492 return;
2493
2494 bool ReturnsVoid = false;
2495 bool HasNoReturn = false;
2496 if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
2497 // For function templates, class templates and member function templates
2498 // we'll do the analysis at instantiation time.
2499 if (FD->isDependentContext())
2500 return;
2501
2502 if (FD->getResultType()->isVoidType())
2503 ReturnsVoid = true;
2504 if (FD->hasAttr<NoReturnAttr>() ||
2505 FD->getType()->getAs<FunctionType>()->getNoReturnAttr())
2506 HasNoReturn = true;
2507 } else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
2508 if (MD->getResultType()->isVoidType())
2509 ReturnsVoid = true;
2510 if (MD->hasAttr<NoReturnAttr>())
2511 HasNoReturn = true;
2512 }
2513
2514 // Short circuit for compilation speed.
2515 if ((Diags.getDiagnosticLevel(diag::warn_maybe_falloff_nonvoid_function)
2516 == Diagnostic::Ignored || ReturnsVoid)
2517 && (Diags.getDiagnosticLevel(diag::warn_noreturn_function_has_return_expr)
2518 == Diagnostic::Ignored || !HasNoReturn)
2519 && (Diags.getDiagnosticLevel(diag::warn_suggest_noreturn_block)
2520 == Diagnostic::Ignored || !ReturnsVoid))
2521 return;
2522 // FIXME: Function try block
2523 if (CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) {
2524 switch (CheckFallThrough(AC)) {
2525 case MaybeFallThrough:
2526 if (HasNoReturn)
2527 Diag(Compound->getRBracLoc(), diag::warn_falloff_noreturn_function);
2528 else if (!ReturnsVoid)
2529 Diag(Compound->getRBracLoc(),diag::warn_maybe_falloff_nonvoid_function);
2530 break;
2531 case AlwaysFallThrough:
2532 if (HasNoReturn)
2533 Diag(Compound->getRBracLoc(), diag::warn_falloff_noreturn_function);
2534 else if (!ReturnsVoid)
2535 Diag(Compound->getRBracLoc(), diag::warn_falloff_nonvoid_function);
2536 break;
2537 case NeverFallThroughOrReturn:
2538 if (ReturnsVoid && !HasNoReturn)
2539 Diag(Compound->getLBracLoc(), diag::warn_suggest_noreturn_function);
2540 break;
2541 case NeverFallThrough:
2542 break;
2543 }
2544 }
2545}
2546
2547/// CheckFallThroughForBlock - Check that we don't fall off the end of a block
2548/// that should return a value. Check that we don't fall off the end of a
2549/// noreturn block. We assume that functions and blocks not marked noreturn
2550/// will return.
2551void Sema::CheckFallThroughForBlock(QualType BlockTy, Stmt *Body,
2552 AnalysisContext &AC) {
2553 // FIXME: Would be nice if we had a better way to control cascading errors,
2554 // but for now, avoid them. The problem is that when Parse sees:
2555 // int foo() { return a; }
2556 // The return is eaten and the Sema code sees just:
2557 // int foo() { }
2558 // which this code would then warn about.
2559 if (getDiagnostics().hasErrorOccurred())
2560 return;
2561 bool ReturnsVoid = false;
2562 bool HasNoReturn = false;
2563 if (const FunctionType *FT =BlockTy->getPointeeType()->getAs<FunctionType>()){
2564 if (FT->getResultType()->isVoidType())
2565 ReturnsVoid = true;
2566 if (FT->getNoReturnAttr())
2567 HasNoReturn = true;
2568 }
2569
2570 // Short circuit for compilation speed.
2571 if (ReturnsVoid
2572 && !HasNoReturn
2573 && (Diags.getDiagnosticLevel(diag::warn_suggest_noreturn_block)
2574 == Diagnostic::Ignored || !ReturnsVoid))
2575 return;
2576 // FIXME: Funtion try block
2577 if (CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) {
2578 switch (CheckFallThrough(AC)) {
2579 case MaybeFallThrough:
2580 if (HasNoReturn)
2581 Diag(Compound->getRBracLoc(), diag::err_noreturn_block_has_return_expr);
2582 else if (!ReturnsVoid)
2583 Diag(Compound->getRBracLoc(), diag::err_maybe_falloff_nonvoid_block);
2584 break;
2585 case AlwaysFallThrough:
2586 if (HasNoReturn)
2587 Diag(Compound->getRBracLoc(), diag::err_noreturn_block_has_return_expr);
2588 else if (!ReturnsVoid)
2589 Diag(Compound->getRBracLoc(), diag::err_falloff_nonvoid_block);
2590 break;
2591 case NeverFallThroughOrReturn:
2592 if (ReturnsVoid)
2593 Diag(Compound->getLBracLoc(), diag::warn_suggest_noreturn_block);
2594 break;
2595 case NeverFallThrough:
2596 break;
2597 }
2598 }
2599}
2600
2601/// CheckParmsForFunctionDef - Check that the parameters of the given
2602/// function are appropriate for the definition of a function. This
2603/// takes care of any checks that cannot be performed on the
2604/// declaration itself, e.g., that the types of each of the function
2605/// parameters are complete.
2606bool Sema::CheckParmsForFunctionDef(FunctionDecl *FD) {
2607 bool HasInvalidParm = false;
2608 for (unsigned p = 0, NumParams = FD->getNumParams(); p < NumParams; ++p) {
2609 ParmVarDecl *Param = FD->getParamDecl(p);
2610
2611 // C99 6.7.5.3p4: the parameters in a parameter type list in a
2612 // function declarator that is part of a function definition of
2613 // that function shall not have incomplete type.
2614 //
2615 // This is also C++ [dcl.fct]p6.
2616 if (!Param->isInvalidDecl() &&
2617 RequireCompleteType(Param->getLocation(), Param->getType(),
2618 diag::err_typecheck_decl_incomplete_type)) {
2619 Param->setInvalidDecl();
2620 HasInvalidParm = true;
2621 }
2622
2623 // C99 6.9.1p5: If the declarator includes a parameter type list, the
2624 // declaration of each parameter shall include an identifier.
2625 if (Param->getIdentifier() == 0 &&
2626 !Param->isImplicit() &&
2627 !getLangOptions().CPlusPlus)
2628 Diag(Param->getLocation(), diag::err_parameter_name_omitted);
2629
2630 // C99 6.7.5.3p12:
2631 // If the function declarator is not part of a definition of that
2632 // function, parameters may have incomplete type and may use the [*]
2633 // notation in their sequences of declarator specifiers to specify
2634 // variable length array types.
2635 QualType PType = Param->getOriginalType();
2636 if (const ArrayType *AT = Context.getAsArrayType(PType)) {
2637 if (AT->getSizeModifier() == ArrayType::Star) {
2638 // FIXME: This diagnosic should point the the '[*]' if source-location
2639 // information is added for it.
2640 Diag(Param->getLocation(), diag::err_array_star_in_function_definition);
2641 }
2642 }
2643
2644 if (getLangOptions().CPlusPlus)
2645 if (const RecordType *RT = Param->getType()->getAs<RecordType>())
2646 FinalizeVarWithDestructor(Param, RT);
2647 }
2648
2649 return HasInvalidParm;
2650}