1 | // RUN: %clang_cc1 -std=c++11 -triple x86_64-apple-darwin %s -emit-llvm -o - | FileCheck -check-prefixes=X64,CHECK %s |
2 | // RUN: %clang_cc1 -std=c++11 -triple amdgcn %s -emit-llvm -o - | FileCheck -check-prefixes=AMDGCN,CHECK %s |
3 | |
4 | template<typename T> |
5 | struct S { |
6 | static int n; |
7 | }; |
8 | template<typename T> int S<T>::n = 5; |
9 | |
10 | int f() { |
11 | // Make sure that the reference here is enough to trigger the instantiation of |
12 | // the static data member. |
13 | // CHECK: @_ZN1SIiE1nE = linkonce_odr{{.*}} global i32 5 |
14 | int a[S<int>::n]; |
15 | return sizeof a; |
16 | } |
17 | |
18 | // rdar://problem/9506377 |
19 | void test0(void *array, int n) { |
20 | // CHECK-LABEL: define void @_Z5test0Pvi( |
21 | // X64: [[ARRAY:%.*]] = alloca i8*, align 8 |
22 | // AMDGCN: [[ARRAY0:%.*]] = alloca i8*, align 8, addrspace(5) |
23 | // AMDGCN-NEXT: [[ARRAY:%.*]] = addrspacecast i8* addrspace(5)* [[ARRAY0]] to i8** |
24 | // X64-NEXT: [[N:%.*]] = alloca i32, align 4 |
25 | // AMDGCN: [[N0:%.*]] = alloca i32, align 4, addrspace(5) |
26 | // AMDGCN-NEXT: [[N:%.*]] = addrspacecast i32 addrspace(5)* [[N0]] to i32* |
27 | // X64-NEXT: [[REF:%.*]] = alloca i16*, align 8 |
28 | // AMDGCN: [[REF0:%.*]] = alloca i16*, align 8, addrspace(5) |
29 | // AMDGCN-NEXT: [[REF:%.*]] = addrspacecast i16* addrspace(5)* [[REF0]] to i16** |
30 | // X64-NEXT: [[S:%.*]] = alloca i16, align 2 |
31 | // AMDGCN: [[S0:%.*]] = alloca i16, align 2, addrspace(5) |
32 | // AMDGCN-NEXT: [[S:%.*]] = addrspacecast i16 addrspace(5)* [[S0]] to i16* |
33 | // CHECK-NEXT: store i8* |
34 | // CHECK-NEXT: store i32 |
35 | |
36 | // Capture the bounds. |
37 | // CHECK-NEXT: [[T0:%.*]] = load i32, i32* [[N]], align 4 |
38 | // CHECK-NEXT: [[DIM0:%.*]] = zext i32 [[T0]] to i64 |
39 | // CHECK-NEXT: [[T0:%.*]] = load i32, i32* [[N]], align 4 |
40 | // CHECK-NEXT: [[T1:%.*]] = add nsw i32 [[T0]], 1 |
41 | // CHECK-NEXT: [[DIM1:%.*]] = zext i32 [[T1]] to i64 |
42 | typedef short array_t[n][n+1]; |
43 | |
44 | // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[ARRAY]], align 8 |
45 | // CHECK-NEXT: [[T1:%.*]] = bitcast i8* [[T0]] to i16* |
46 | // CHECK-NEXT: store i16* [[T1]], i16** [[REF]], align 8 |
47 | array_t &ref = *(array_t*) array; |
48 | |
49 | // CHECK-NEXT: [[T0:%.*]] = load i16*, i16** [[REF]] |
50 | // CHECK-NEXT: [[T1:%.*]] = mul nsw i64 1, [[DIM1]] |
51 | // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i16, i16* [[T0]], i64 [[T1]] |
52 | // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i16, i16* [[T2]], i64 2 |
53 | // CHECK-NEXT: store i16 3, i16* [[T3]] |
54 | ref[1][2] = 3; |
55 | |
56 | // CHECK-NEXT: [[T0:%.*]] = load i16*, i16** [[REF]] |
57 | // CHECK-NEXT: [[T1:%.*]] = mul nsw i64 4, [[DIM1]] |
58 | // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i16, i16* [[T0]], i64 [[T1]] |
59 | // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i16, i16* [[T2]], i64 5 |
60 | // CHECK-NEXT: [[T4:%.*]] = load i16, i16* [[T3]] |
61 | // CHECK-NEXT: store i16 [[T4]], i16* [[S]], align 2 |
62 | short s = ref[4][5]; |
63 | |
64 | // CHECK-NEXT: ret void |
65 | } |
66 | |
67 | |
68 | void test2(int b) { |
69 | // CHECK-LABEL: define void {{.*}}test2{{.*}}(i32 %b) |
70 | int varr[b]; |
71 | // AMDGCN: %__end1 = alloca i32*, align 8, addrspace(5) |
72 | // AMDGCN: [[END:%.*]] = addrspacecast i32* addrspace(5)* %__end1 to i32** |
73 | // get the address of %b by checking the first store that stores it |
74 | //CHECK: store i32 %b, i32* [[PTR_B:%.*]] |
75 | |
76 | // get the size of the VLA by getting the first load of the PTR_B |
77 | //CHECK: [[VLA_NUM_ELEMENTS_PREZEXT:%.*]] = load i32, i32* [[PTR_B]] |
78 | //CHECK-NEXT: [[VLA_NUM_ELEMENTS_PRE:%.*]] = zext i32 [[VLA_NUM_ELEMENTS_PREZEXT]] |
79 | |
80 | b = 15; |
81 | //CHECK: store i32 15, i32* [[PTR_B]] |
82 | |
83 | // Now get the sizeof, and then divide by the element size |
84 | |
85 | |
86 | //CHECK: [[VLA_SIZEOF:%.*]] = mul nuw i64 4, [[VLA_NUM_ELEMENTS_PRE]] |
87 | //CHECK-NEXT: [[VLA_NUM_ELEMENTS_POST:%.*]] = udiv i64 [[VLA_SIZEOF]], 4 |
88 | //CHECK-NEXT: [[VLA_END_PTR:%.*]] = getelementptr inbounds i32, i32* {{%.*}}, i64 [[VLA_NUM_ELEMENTS_POST]] |
89 | //X64-NEXT: store i32* [[VLA_END_PTR]], i32** %__end1 |
90 | //AMDGCN-NEXT: store i32* [[VLA_END_PTR]], i32** [[END]] |
91 | for (int d : varr) 0; |
92 | } |
93 | |
94 | void test3(int b, int c) { |
95 | // CHECK-LABEL: define void {{.*}}test3{{.*}}(i32 %b, i32 %c) |
96 | int varr[b][c]; |
97 | // AMDGCN: %__end1 = alloca i32*, align 8, addrspace(5) |
98 | // AMDGCN: [[END:%.*]] = addrspacecast i32* addrspace(5)* %__end1 to i32** |
99 | // get the address of %b by checking the first store that stores it |
100 | //CHECK: store i32 %b, i32* [[PTR_B:%.*]] |
101 | //CHECK-NEXT: store i32 %c, i32* [[PTR_C:%.*]] |
102 | |
103 | // get the size of the VLA by getting the first load of the PTR_B |
104 | //CHECK: [[VLA_DIM1_PREZEXT:%.*]] = load i32, i32* [[PTR_B]] |
105 | //CHECK-NEXT: [[VLA_DIM1_PRE:%.*]] = zext i32 [[VLA_DIM1_PREZEXT]] |
106 | //CHECK: [[VLA_DIM2_PREZEXT:%.*]] = load i32, i32* [[PTR_C]] |
107 | //CHECK-NEXT: [[VLA_DIM2_PRE:%.*]] = zext i32 [[VLA_DIM2_PREZEXT]] |
108 | |
109 | b = 15; |
110 | c = 15; |
111 | //CHECK: store i32 15, i32* [[PTR_B]] |
112 | //CHECK: store i32 15, i32* [[PTR_C]] |
113 | // Now get the sizeof, and then divide by the element size |
114 | |
115 | // multiply the two dimensions, then by the element type and then divide by the sizeof dim2 |
116 | //CHECK: [[VLA_DIM1_X_DIM2:%.*]] = mul nuw i64 [[VLA_DIM1_PRE]], [[VLA_DIM2_PRE]] |
117 | //CHECK-NEXT: [[VLA_SIZEOF:%.*]] = mul nuw i64 4, [[VLA_DIM1_X_DIM2]] |
118 | //CHECK-NEXT: [[VLA_SIZEOF_DIM2:%.*]] = mul nuw i64 4, [[VLA_DIM2_PRE]] |
119 | //CHECK-NEXT: [[VLA_NUM_ELEMENTS:%.*]] = udiv i64 [[VLA_SIZEOF]], [[VLA_SIZEOF_DIM2]] |
120 | //CHECK-NEXT: [[VLA_END_INDEX:%.*]] = mul nsw i64 [[VLA_NUM_ELEMENTS]], [[VLA_DIM2_PRE]] |
121 | //CHECK-NEXT: [[VLA_END_PTR:%.*]] = getelementptr inbounds i32, i32* {{%.*}}, i64 [[VLA_END_INDEX]] |
122 | //X64-NEXT: store i32* [[VLA_END_PTR]], i32** %__end |
123 | //AMDGCN-NEXT: store i32* [[VLA_END_PTR]], i32** [[END]] |
124 | |
125 | for (auto &d : varr) 0; |
126 | } |
127 | |
128 | |
129 | |