|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 |
| 2 | +; RUN: llc < %s -mtriple aarch64 -mattr=+sve -aarch64-enable-gisel-sve=1 | FileCheck %s |
| 3 | +; RUN: llc < %s -mtriple aarch64 -mattr=+sve -global-isel -aarch64-enable-gisel-sve=1 | FileCheck %s |
| 4 | + |
| 5 | +;; add |
| 6 | +define <vscale x 2 x i64> @addnxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { |
| 7 | +; CHECK-LABEL: addnxv2i64: |
| 8 | +; CHECK: // %bb.0: // %entry |
| 9 | +; CHECK-NEXT: add z0.d, z0.d, z1.d |
| 10 | +; CHECK-NEXT: ret |
| 11 | +entry: |
| 12 | + %c = add <vscale x 2 x i64> %a, %b |
| 13 | + ret <vscale x 2 x i64> %c |
| 14 | +} |
| 15 | + |
| 16 | +define <vscale x 4 x i32> @addnxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { |
| 17 | +; CHECK-LABEL: addnxv4i32: |
| 18 | +; CHECK: // %bb.0: // %entry |
| 19 | +; CHECK-NEXT: add z0.s, z0.s, z1.s |
| 20 | +; CHECK-NEXT: ret |
| 21 | +entry: |
| 22 | + %c = add <vscale x 4 x i32> %a, %b |
| 23 | + ret <vscale x 4 x i32> %c |
| 24 | +} |
| 25 | + |
| 26 | +define <vscale x 8 x i16> @addnxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, ptr %p) { |
| 27 | +; CHECK-LABEL: addnxv8i16: |
| 28 | +; CHECK: // %bb.0: // %entry |
| 29 | +; CHECK-NEXT: add z0.h, z0.h, z1.h |
| 30 | +; CHECK-NEXT: ret |
| 31 | +entry: |
| 32 | + %c = add <vscale x 8 x i16> %a, %b |
| 33 | + ret <vscale x 8 x i16> %c |
| 34 | +} |
| 35 | + |
| 36 | +define <vscale x 16 x i8> @addnxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { |
| 37 | +; CHECK-LABEL: addnxv16i8: |
| 38 | +; CHECK: // %bb.0: // %entry |
| 39 | +; CHECK-NEXT: add z0.b, z0.b, z1.b |
| 40 | +; CHECK-NEXT: ret |
| 41 | +entry: |
| 42 | + %c = add <vscale x 16 x i8> %a, %b |
| 43 | + ret <vscale x 16 x i8> %c |
| 44 | +} |
| 45 | + |
| 46 | +;; sub |
| 47 | +define <vscale x 2 x i64> @subnxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { |
| 48 | +; CHECK-LABEL: subnxv2i64: |
| 49 | +; CHECK: // %bb.0: // %entry |
| 50 | +; CHECK-NEXT: sub z0.d, z0.d, z1.d |
| 51 | +; CHECK-NEXT: ret |
| 52 | +entry: |
| 53 | + %c = sub <vscale x 2 x i64> %a, %b |
| 54 | + ret <vscale x 2 x i64> %c |
| 55 | +} |
| 56 | + |
| 57 | +define <vscale x 4 x i32> @subnxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { |
| 58 | +; CHECK-LABEL: subnxv4i32: |
| 59 | +; CHECK: // %bb.0: // %entry |
| 60 | +; CHECK-NEXT: sub z0.s, z0.s, z1.s |
| 61 | +; CHECK-NEXT: ret |
| 62 | +entry: |
| 63 | + %c = sub <vscale x 4 x i32> %a, %b |
| 64 | + ret <vscale x 4 x i32> %c |
| 65 | +} |
| 66 | + |
| 67 | +define <vscale x 8 x i16> @subnxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, ptr %p) { |
| 68 | +; CHECK-LABEL: subnxv8i16: |
| 69 | +; CHECK: // %bb.0: // %entry |
| 70 | +; CHECK-NEXT: sub z0.h, z0.h, z1.h |
| 71 | +; CHECK-NEXT: ret |
| 72 | +entry: |
| 73 | + %c = sub <vscale x 8 x i16> %a, %b |
| 74 | + ret <vscale x 8 x i16> %c |
| 75 | +} |
| 76 | + |
| 77 | +define <vscale x 16 x i8> @subnxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { |
| 78 | +; CHECK-LABEL: subnxv16i8: |
| 79 | +; CHECK: // %bb.0: // %entry |
| 80 | +; CHECK-NEXT: sub z0.b, z0.b, z1.b |
| 81 | +; CHECK-NEXT: ret |
| 82 | +entry: |
| 83 | + %c = sub <vscale x 16 x i8> %a, %b |
| 84 | + ret <vscale x 16 x i8> %c |
| 85 | +} |
| 86 | + |
| 87 | +;; and |
| 88 | +define <vscale x 2 x i64> @andnxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { |
| 89 | +; CHECK-LABEL: andnxv2i64: |
| 90 | +; CHECK: // %bb.0: // %entry |
| 91 | +; CHECK-NEXT: and z0.d, z0.d, z1.d |
| 92 | +; CHECK-NEXT: ret |
| 93 | +entry: |
| 94 | + %c = and <vscale x 2 x i64> %a, %b |
| 95 | + ret <vscale x 2 x i64> %c |
| 96 | +} |
| 97 | + |
| 98 | +define <vscale x 4 x i32> @andnxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { |
| 99 | +; CHECK-LABEL: andnxv4i32: |
| 100 | +; CHECK: // %bb.0: // %entry |
| 101 | +; CHECK-NEXT: and z0.d, z0.d, z1.d |
| 102 | +; CHECK-NEXT: ret |
| 103 | +entry: |
| 104 | + %c = and <vscale x 4 x i32> %a, %b |
| 105 | + ret <vscale x 4 x i32> %c |
| 106 | +} |
| 107 | + |
| 108 | +define <vscale x 8 x i16> @andnxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, ptr %p) { |
| 109 | +; CHECK-LABEL: andnxv8i16: |
| 110 | +; CHECK: // %bb.0: // %entry |
| 111 | +; CHECK-NEXT: and z0.d, z0.d, z1.d |
| 112 | +; CHECK-NEXT: ret |
| 113 | +entry: |
| 114 | + %c = and <vscale x 8 x i16> %a, %b |
| 115 | + ret <vscale x 8 x i16> %c |
| 116 | +} |
| 117 | + |
| 118 | +define <vscale x 16 x i8> @andnxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { |
| 119 | +; CHECK-LABEL: andnxv16i8: |
| 120 | +; CHECK: // %bb.0: // %entry |
| 121 | +; CHECK-NEXT: and z0.d, z0.d, z1.d |
| 122 | +; CHECK-NEXT: ret |
| 123 | +entry: |
| 124 | + %c = and <vscale x 16 x i8> %a, %b |
| 125 | + ret <vscale x 16 x i8> %c |
| 126 | +} |
| 127 | + |
| 128 | +;; or |
| 129 | +define <vscale x 2 x i64> @ornxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { |
| 130 | +; CHECK-LABEL: ornxv2i64: |
| 131 | +; CHECK: // %bb.0: // %entry |
| 132 | +; CHECK-NEXT: orr z0.d, z0.d, z1.d |
| 133 | +; CHECK-NEXT: ret |
| 134 | +entry: |
| 135 | + %c = or <vscale x 2 x i64> %a, %b |
| 136 | + ret <vscale x 2 x i64> %c |
| 137 | +} |
| 138 | + |
| 139 | +define <vscale x 4 x i32> @ornxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { |
| 140 | +; CHECK-LABEL: ornxv4i32: |
| 141 | +; CHECK: // %bb.0: // %entry |
| 142 | +; CHECK-NEXT: orr z0.d, z0.d, z1.d |
| 143 | +; CHECK-NEXT: ret |
| 144 | +entry: |
| 145 | + %c = or <vscale x 4 x i32> %a, %b |
| 146 | + ret <vscale x 4 x i32> %c |
| 147 | +} |
| 148 | + |
| 149 | +define <vscale x 8 x i16> @ornxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, ptr %p) { |
| 150 | +; CHECK-LABEL: ornxv8i16: |
| 151 | +; CHECK: // %bb.0: // %entry |
| 152 | +; CHECK-NEXT: orr z0.d, z0.d, z1.d |
| 153 | +; CHECK-NEXT: ret |
| 154 | +entry: |
| 155 | + %c = or <vscale x 8 x i16> %a, %b |
| 156 | + ret <vscale x 8 x i16> %c |
| 157 | +} |
| 158 | + |
| 159 | +define <vscale x 16 x i8> @ornxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { |
| 160 | +; CHECK-LABEL: ornxv16i8: |
| 161 | +; CHECK: // %bb.0: // %entry |
| 162 | +; CHECK-NEXT: orr z0.d, z0.d, z1.d |
| 163 | +; CHECK-NEXT: ret |
| 164 | +entry: |
| 165 | + %c = or <vscale x 16 x i8> %a, %b |
| 166 | + ret <vscale x 16 x i8> %c |
| 167 | +} |
| 168 | + |
| 169 | +;; xor |
| 170 | +define <vscale x 2 x i64> @xornxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { |
| 171 | +; CHECK-LABEL: xornxv2i64: |
| 172 | +; CHECK: // %bb.0: // %entry |
| 173 | +; CHECK-NEXT: eor z0.d, z0.d, z1.d |
| 174 | +; CHECK-NEXT: ret |
| 175 | +entry: |
| 176 | + %c = xor <vscale x 2 x i64> %a, %b |
| 177 | + ret <vscale x 2 x i64> %c |
| 178 | +} |
| 179 | + |
| 180 | +define <vscale x 4 x i32> @xornxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { |
| 181 | +; CHECK-LABEL: xornxv4i32: |
| 182 | +; CHECK: // %bb.0: // %entry |
| 183 | +; CHECK-NEXT: eor z0.d, z0.d, z1.d |
| 184 | +; CHECK-NEXT: ret |
| 185 | +entry: |
| 186 | + %c = xor <vscale x 4 x i32> %a, %b |
| 187 | + ret <vscale x 4 x i32> %c |
| 188 | +} |
| 189 | + |
| 190 | +define <vscale x 8 x i16> @xornxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, ptr %p) { |
| 191 | +; CHECK-LABEL: xornxv8i16: |
| 192 | +; CHECK: // %bb.0: // %entry |
| 193 | +; CHECK-NEXT: eor z0.d, z0.d, z1.d |
| 194 | +; CHECK-NEXT: ret |
| 195 | +entry: |
| 196 | + %c = xor <vscale x 8 x i16> %a, %b |
| 197 | + ret <vscale x 8 x i16> %c |
| 198 | +} |
| 199 | + |
| 200 | +define <vscale x 16 x i8> @xornxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { |
| 201 | +; CHECK-LABEL: xornxv16i8: |
| 202 | +; CHECK: // %bb.0: // %entry |
| 203 | +; CHECK-NEXT: eor z0.d, z0.d, z1.d |
| 204 | +; CHECK-NEXT: ret |
| 205 | +entry: |
| 206 | + %c = xor <vscale x 16 x i8> %a, %b |
| 207 | + ret <vscale x 16 x i8> %c |
| 208 | +} |
0 commit comments