| 1 | // RUN: %clang_cc1 -target-feature +altivec -triple powerpc64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s |
| 2 | |
| 3 | #include <stdarg.h> |
| 4 | |
| 5 | struct test1 { int x; int y; }; |
| 6 | struct test2 { int x; int y; } __attribute__((aligned (16))); |
| 7 | struct test3 { int x; int y; } __attribute__((aligned (32))); |
| 8 | struct test4 { int x; int y; int z; }; |
| 9 | struct test5 { int x[17]; }; |
| 10 | struct test6 { int x[17]; } __attribute__((aligned (16))); |
| 11 | struct test7 { int x[17]; } __attribute__((aligned (32))); |
| 12 | |
| 13 | // CHECK: define void @test1(i32 signext %x, i64 %y.coerce) |
| 14 | void test1 (int x, struct test1 y) |
| 15 | { |
| 16 | } |
| 17 | |
| 18 | // CHECK: define void @test2(i32 signext %x, [1 x i128] %y.coerce) |
| 19 | void test2 (int x, struct test2 y) |
| 20 | { |
| 21 | } |
| 22 | |
| 23 | // CHECK: define void @test3(i32 signext %x, [2 x i128] %y.coerce) |
| 24 | void test3 (int x, struct test3 y) |
| 25 | { |
| 26 | } |
| 27 | |
| 28 | // CHECK: define void @test4(i32 signext %x, [2 x i64] %y.coerce) |
| 29 | void test4 (int x, struct test4 y) |
| 30 | { |
| 31 | } |
| 32 | |
| 33 | // CHECK: define void @test5(i32 signext %x, %struct.test5* byval align 8 %y) |
| 34 | void test5 (int x, struct test5 y) |
| 35 | { |
| 36 | } |
| 37 | |
| 38 | // CHECK: define void @test6(i32 signext %x, %struct.test6* byval align 16 %y) |
| 39 | void test6 (int x, struct test6 y) |
| 40 | { |
| 41 | } |
| 42 | |
| 43 | // This case requires run-time realignment of the incoming struct |
| 44 | // CHECK-LABEL: define void @test7(i32 signext %x, %struct.test7* byval align 16) |
| 45 | // CHECK: %y = alloca %struct.test7, align 32 |
| 46 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64 |
| 47 | void test7 (int x, struct test7 y) |
| 48 | { |
| 49 | } |
| 50 | |
| 51 | // CHECK: define void @test1va(%struct.test1* noalias sret %[[AGG_RESULT:.*]], i32 signext %x, ...) |
| 52 | // CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap |
| 53 | // CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[CUR]], i64 8 |
| 54 | // CHECK: store i8* %[[NEXT]], i8** %ap |
| 55 | // CHECK: [[T0:%.*]] = bitcast i8* %[[CUR]] to %struct.test1* |
| 56 | // CHECK: [[DEST:%.*]] = bitcast %struct.test1* %[[AGG_RESULT]] to i8* |
| 57 | // CHECK: [[SRC:%.*]] = bitcast %struct.test1* [[T0]] to i8* |
| 58 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[DEST]], i8* align 8 [[SRC]], i64 8, i1 false) |
| 59 | struct test1 test1va (int x, ...) |
| 60 | { |
| 61 | struct test1 y; |
| 62 | va_list ap; |
| 63 | va_start(ap, x); |
| 64 | y = va_arg (ap, struct test1); |
| 65 | va_end(ap); |
| 66 | return y; |
| 67 | } |
| 68 | |
| 69 | // CHECK: define void @test2va(%struct.test2* noalias sret %[[AGG_RESULT:.*]], i32 signext %x, ...) |
| 70 | // CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap |
| 71 | // CHECK: %[[TMP0:[^ ]+]] = ptrtoint i8* %[[CUR]] to i64 |
| 72 | // CHECK: %[[TMP1:[^ ]+]] = add i64 %[[TMP0]], 15 |
| 73 | // CHECK: %[[TMP2:[^ ]+]] = and i64 %[[TMP1]], -16 |
| 74 | // CHECK: %[[ALIGN:[^ ]+]] = inttoptr i64 %[[TMP2]] to i8* |
| 75 | // CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[ALIGN]], i64 16 |
| 76 | // CHECK: store i8* %[[NEXT]], i8** %ap |
| 77 | // CHECK: [[T0:%.*]] = bitcast i8* %[[ALIGN]] to %struct.test2* |
| 78 | // CHECK: [[DEST:%.*]] = bitcast %struct.test2* %[[AGG_RESULT]] to i8* |
| 79 | // CHECK: [[SRC:%.*]] = bitcast %struct.test2* [[T0]] to i8* |
| 80 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[DEST]], i8* align 16 [[SRC]], i64 16, i1 false) |
| 81 | struct test2 test2va (int x, ...) |
| 82 | { |
| 83 | struct test2 y; |
| 84 | va_list ap; |
| 85 | va_start(ap, x); |
| 86 | y = va_arg (ap, struct test2); |
| 87 | va_end(ap); |
| 88 | return y; |
| 89 | } |
| 90 | |
| 91 | // CHECK: define void @test3va(%struct.test3* noalias sret %[[AGG_RESULT:.*]], i32 signext %x, ...) |
| 92 | // CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap |
| 93 | // CHECK: %[[TMP0:[^ ]+]] = ptrtoint i8* %[[CUR]] to i64 |
| 94 | // CHECK: %[[TMP1:[^ ]+]] = add i64 %[[TMP0]], 15 |
| 95 | // CHECK: %[[TMP2:[^ ]+]] = and i64 %[[TMP1]], -16 |
| 96 | // CHECK: %[[ALIGN:[^ ]+]] = inttoptr i64 %[[TMP2]] to i8* |
| 97 | // CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[ALIGN]], i64 32 |
| 98 | // CHECK: store i8* %[[NEXT]], i8** %ap |
| 99 | // CHECK: [[T0:%.*]] = bitcast i8* %[[ALIGN]] to %struct.test3* |
| 100 | // CHECK: [[DEST:%.*]] = bitcast %struct.test3* %[[AGG_RESULT]] to i8* |
| 101 | // CHECK: [[SRC:%.*]] = bitcast %struct.test3* [[T0]] to i8* |
| 102 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 [[DEST]], i8* align 16 [[SRC]], i64 32, i1 false) |
| 103 | struct test3 test3va (int x, ...) |
| 104 | { |
| 105 | struct test3 y; |
| 106 | va_list ap; |
| 107 | va_start(ap, x); |
| 108 | y = va_arg (ap, struct test3); |
| 109 | va_end(ap); |
| 110 | return y; |
| 111 | } |
| 112 | |
| 113 | // CHECK: define void @test4va(%struct.test4* noalias sret %[[AGG_RESULT:.*]], i32 signext %x, ...) |
| 114 | // CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap |
| 115 | // CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[CUR]], i64 16 |
| 116 | // CHECK: store i8* %[[NEXT]], i8** %ap |
| 117 | // CHECK: [[T0:%.*]] = bitcast i8* %[[CUR]] to %struct.test4* |
| 118 | // CHECK: [[DEST:%.*]] = bitcast %struct.test4* %[[AGG_RESULT]] to i8* |
| 119 | // CHECK: [[SRC:%.*]] = bitcast %struct.test4* [[T0]] to i8* |
| 120 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[DEST]], i8* align 8 [[SRC]], i64 12, i1 false) |
| 121 | struct test4 test4va (int x, ...) |
| 122 | { |
| 123 | struct test4 y; |
| 124 | va_list ap; |
| 125 | va_start(ap, x); |
| 126 | y = va_arg (ap, struct test4); |
| 127 | va_end(ap); |
| 128 | return y; |
| 129 | } |
| 130 | |
| 131 | // CHECK: define void @testva_longdouble(%struct.test_longdouble* noalias sret %[[AGG_RESULT:.*]], i32 signext %x, ...) |
| 132 | // CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap |
| 133 | // CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[CUR]], i64 16 |
| 134 | // CHECK: store i8* %[[NEXT]], i8** %ap |
| 135 | // CHECK: [[T0:%.*]] = bitcast i8* %[[CUR]] to %struct.test_longdouble* |
| 136 | // CHECK: [[DEST:%.*]] = bitcast %struct.test_longdouble* %[[AGG_RESULT]] to i8* |
| 137 | // CHECK: [[SRC:%.*]] = bitcast %struct.test_longdouble* [[T0]] to i8* |
| 138 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[DEST]], i8* align 8 [[SRC]], i64 16, i1 false) |
| 139 | struct test_longdouble { long double x; }; |
| 140 | struct test_longdouble testva_longdouble (int x, ...) |
| 141 | { |
| 142 | struct test_longdouble y; |
| 143 | va_list ap; |
| 144 | va_start(ap, x); |
| 145 | y = va_arg (ap, struct test_longdouble); |
| 146 | va_end(ap); |
| 147 | return y; |
| 148 | } |
| 149 | |
| 150 | // CHECK: define void @testva_vector(%struct.test_vector* noalias sret %[[AGG_RESULT:.*]], i32 signext %x, ...) |
| 151 | // CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap |
| 152 | // CHECK: %[[TMP0:[^ ]+]] = ptrtoint i8* %[[CUR]] to i64 |
| 153 | // CHECK: %[[TMP1:[^ ]+]] = add i64 %[[TMP0]], 15 |
| 154 | // CHECK: %[[TMP2:[^ ]+]] = and i64 %[[TMP1]], -16 |
| 155 | // CHECK: %[[ALIGN:[^ ]+]] = inttoptr i64 %[[TMP2]] to i8* |
| 156 | // CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[ALIGN]], i64 16 |
| 157 | // CHECK: store i8* %[[NEXT]], i8** %ap |
| 158 | // CHECK: [[T0:%.*]] = bitcast i8* %[[ALIGN]] to %struct.test_vector* |
| 159 | // CHECK: [[DEST:%.*]] = bitcast %struct.test_vector* %[[AGG_RESULT]] to i8* |
| 160 | // CHECK: [[SRC:%.*]] = bitcast %struct.test_vector* [[T0]] to i8* |
| 161 | // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[DEST]], i8* align 16 [[SRC]], i64 16, i1 false) |
| 162 | struct test_vector { vector int x; }; |
| 163 | struct test_vector testva_vector (int x, ...) |
| 164 | { |
| 165 | struct test_vector y; |
| 166 | va_list ap; |
| 167 | va_start(ap, x); |
| 168 | y = va_arg (ap, struct test_vector); |
| 169 | va_end(ap); |
| 170 | return y; |
| 171 | } |
| 172 | |
| 173 | |