blob: be1f1dd66c7c34125c02238a9c8c86fbb324b321 [file] [log] [blame]
Nico Weber59705ae2010-10-09 00:27:47 +00001// RUN: %clang_cc1 -x c++ -emit-llvm -fshort-wchar %s -o - | FileCheck %s
2// Runs in c++ mode so that wchar_t is available.
Nico Webera0f15b02010-10-06 04:57:26 +00003
4int main() {
5 // This should convert to utf8.
6 // CHECK: internal constant [10 x i8] c"\E1\84\A0\C8\A0\F4\82\80\B0\00", align 1
7 char b[10] = "\u1120\u0220\U00102030";
8
9 // CHECK: private constant [6 x i8] c"A\00B\00\00\00"
Nico Weber59705ae2010-10-09 00:27:47 +000010 const wchar_t *foo = L"AB";
Nico Webera0f15b02010-10-06 04:57:26 +000011
12 // This should convert to utf16.
13 // CHECK: private constant [10 x i8] c" \11 \02\C8\DB0\DC\00\00"
Nico Weber59705ae2010-10-09 00:27:47 +000014 const wchar_t *bar = L"\u1120\u0220\U00102030";
15
16
17
18 // Should pick second character.
19 // CHECK: store i8 98
20 char c = 'ab';
21
22 // CHECK: store i16 97
23 wchar_t wa = L'a';
24
25 // Should pick second character.
26 // CHECK: store i16 98
27 wchar_t wb = L'ab';
28
29 // -4085 == 0xf00b
30 // CHECK: store i16 -4085
31 wchar_t wc = L'\uF00B';
32
33 // Should take lower word of the 4byte UNC sequence. This does not match
34 // gcc. I don't understand what gcc does (it looks like it converts to utf16,
35 // then takes the second (!) utf16 word, swaps the lower two nibbles, and
36 // stores that?).
37 // CHECK: store i16 -4085
38 wchar_t wd = L'\U0010F00B'; // has utf16 encoding dbc8 dcb0
39
40 // Should pick second character. (gcc: -9205)
41 // CHECK: store i16 -4085
42 wchar_t we = L'\u1234\U0010F00B';
Nico Webera0f15b02010-10-06 04:57:26 +000043}