@@ -570,6 +570,39 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
570
570
try self .dbgSetEpilogueBegin ();
571
571
}
572
572
},
573
+ .arm = > {
574
+ const cc = self .fn_type .fnCallingConvention ();
575
+ if (cc != .Naked ) {
576
+ // push {fp, lr}
577
+ // mov fp, sp
578
+ // sub sp, sp, #reloc
579
+ mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .push (.al , .{ .fp , .lr }).toU32 ());
580
+ mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .mov (.al , .fp , Instruction .Operand .reg (.sp , Instruction .Operand .Shift .none )).toU32 ());
581
+ // TODO: prepare stack for local variables
582
+ // const backpatch_reloc = try self.code.addManyAsArray(4);
583
+
584
+ try self .dbgSetPrologueEnd ();
585
+
586
+ try self .genBody (self .mod_fn .analysis .success );
587
+
588
+ // Backpatch stack offset
589
+ // const stack_end = self.max_end_stack;
590
+ // const aligned_stack_end = mem.alignForward(stack_end, self.stack_align);
591
+ // mem.writeIntLittle(u32, backpatch_reloc, Instruction.sub(.al, .sp, .sp, Instruction.Operand.imm()));
592
+
593
+ try self .dbgSetEpilogueBegin ();
594
+
595
+ // mov sp, fp
596
+ // pop {fp, pc}
597
+ // TODO: return by jumping to this code, use relocations
598
+ // mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, .sp, Instruction.Operand.reg(.fp, Instruction.Operand.Shift.none)).toU32());
599
+ // mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.pop(.al, .{ .fp, .pc }).toU32());
600
+ } else {
601
+ try self .dbgSetPrologueEnd ();
602
+ try self .genBody (self .mod_fn .analysis .success );
603
+ try self .dbgSetEpilogueBegin ();
604
+ }
605
+ },
573
606
else = > {
574
607
try self .dbgSetPrologueEnd ();
575
608
try self .genBody (self .mod_fn .analysis .success );
@@ -1461,7 +1494,35 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
1461
1494
}
1462
1495
},
1463
1496
.arm = > {
1464
- if (info .args .len > 0 ) return self .fail (inst .base .src , "TODO implement fn args for {}" , .{self .target .cpu .arch });
1497
+ for (info .args ) | mc_arg , arg_i | {
1498
+ const arg = inst .args [arg_i ];
1499
+ const arg_mcv = try self .resolveInst (inst .args [arg_i ]);
1500
+
1501
+ switch (mc_arg ) {
1502
+ .none = > continue ,
1503
+ .undef = > unreachable ,
1504
+ .immediate = > unreachable ,
1505
+ .unreach = > unreachable ,
1506
+ .dead = > unreachable ,
1507
+ .embedded_in_code = > unreachable ,
1508
+ .memory = > unreachable ,
1509
+ .compare_flags_signed = > unreachable ,
1510
+ .compare_flags_unsigned = > unreachable ,
1511
+ .register = > | reg | {
1512
+ try self .genSetReg (arg .src , reg , arg_mcv );
1513
+ // TODO interact with the register allocator to mark the instruction as moved.
1514
+ },
1515
+ .stack_offset = > {
1516
+ return self .fail (inst .base .src , "TODO implement calling with parameters in memory" , .{});
1517
+ },
1518
+ .ptr_stack_offset = > {
1519
+ return self .fail (inst .base .src , "TODO implement calling with MCValue.ptr_stack_offset arg" , .{});
1520
+ },
1521
+ .ptr_embedded_in_code = > {
1522
+ return self .fail (inst .base .src , "TODO implement calling with MCValue.ptr_embedded_in_code arg" , .{});
1523
+ },
1524
+ }
1525
+ }
1465
1526
1466
1527
if (inst .func .cast (ir .Inst .Constant )) | func_inst | {
1467
1528
if (func_inst .val .cast (Value .Payload .Function )) | func_val | {
@@ -1476,13 +1537,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
1476
1537
else
1477
1538
unreachable ;
1478
1539
1479
- // TODO only works with leaf functions
1480
- // at the moment, which works fine for
1481
- // Hello World, but not for real code
1482
- // of course. Add pushing lr to stack
1483
- // and popping after call
1484
1540
try self .genSetReg (inst .base .src , .lr , .{ .memory = got_addr });
1485
- mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .blx (.al , .lr ).toU32 ());
1541
+
1542
+ // TODO: add Instruction.supportedOn
1543
+ // function for ARM
1544
+ if (Target .arm .featureSetHas (self .target .cpu .features , .has_v5t )) {
1545
+ mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .blx (.al , .lr ).toU32 ());
1546
+ } else {
1547
+ mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .mov (.al , .lr , Instruction .Operand .reg (.pc , Instruction .Operand .Shift .none )).toU32 ());
1548
+ mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .bx (.al , .lr ).toU32 ());
1549
+ }
1486
1550
} else {
1487
1551
return self .fail (inst .base .src , "TODO implement calling bitcasted functions" , .{});
1488
1552
}
@@ -1602,7 +1666,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
1602
1666
mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .jalr (.zero , 0 , .ra ).toU32 ());
1603
1667
},
1604
1668
.arm = > {
1605
- mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .bx (.al , .lr ).toU32 ());
1669
+ mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .mov (.al , .sp , Instruction .Operand .reg (.fp , Instruction .Operand .Shift .none )).toU32 ());
1670
+ mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .pop (.al , .{ .fp , .pc }).toU32 ());
1671
+ // TODO: jump to the end with relocation
1672
+ // // Just add space for an instruction, patch this later
1673
+ // try self.code.resize(self.code.items.len + 4);
1674
+ // try self.exitlude_jump_relocs.append(self.gpa, self.code.items.len - 4);
1606
1675
},
1607
1676
else = > return self .fail (src , "TODO implement return for {}" , .{self .target .cpu .arch }),
1608
1677
}
@@ -2214,14 +2283,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
2214
2283
// least amount of necessary instructions (use
2215
2284
// more intelligent rotating)
2216
2285
if (x <= math .maxInt (u8 )) {
2217
- mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .mov (.al , 0 , reg , Instruction .Operand .imm (@truncate (u8 , x ), 0 )).toU32 ());
2286
+ mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .mov (.al , reg , Instruction .Operand .imm (@truncate (u8 , x ), 0 )).toU32 ());
2218
2287
return ;
2219
2288
} else if (x <= math .maxInt (u16 )) {
2220
2289
// TODO Use movw Note: Not supported on
2221
2290
// all ARM targets!
2222
2291
2223
- mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .mov (.al , 0 , reg , Instruction .Operand .imm (@truncate (u8 , x ), 0 )).toU32 ());
2224
- mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .orr (.al , 0 , reg , reg , Instruction .Operand .imm (@truncate (u8 , x >> 8 ), 12 )).toU32 ());
2292
+ mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .mov (.al , reg , Instruction .Operand .imm (@truncate (u8 , x ), 0 )).toU32 ());
2293
+ mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .orr (.al , reg , reg , Instruction .Operand .imm (@truncate (u8 , x >> 8 ), 12 )).toU32 ());
2225
2294
} else if (x <= math .maxInt (u32 )) {
2226
2295
// TODO Use movw and movt Note: Not
2227
2296
// supported on all ARM targets! Also TODO
@@ -2233,20 +2302,28 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
2233
2302
// orr reg, reg, #0xbb, 24
2234
2303
// orr reg, reg, #0xcc, 16
2235
2304
// orr reg, reg, #0xdd, 8
2236
- mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .mov (.al , 0 , reg , Instruction .Operand .imm (@truncate (u8 , x ), 0 )).toU32 ());
2237
- mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .orr (.al , 0 , reg , reg , Instruction .Operand .imm (@truncate (u8 , x >> 8 ), 12 )).toU32 ());
2238
- mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .orr (.al , 0 , reg , reg , Instruction .Operand .imm (@truncate (u8 , x >> 16 ), 8 )).toU32 ());
2239
- mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .orr (.al , 0 , reg , reg , Instruction .Operand .imm (@truncate (u8 , x >> 24 ), 4 )).toU32 ());
2305
+ mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .mov (.al , reg , Instruction .Operand .imm (@truncate (u8 , x ), 0 )).toU32 ());
2306
+ mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .orr (.al , reg , reg , Instruction .Operand .imm (@truncate (u8 , x >> 8 ), 12 )).toU32 ());
2307
+ mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .orr (.al , reg , reg , Instruction .Operand .imm (@truncate (u8 , x >> 16 ), 8 )).toU32 ());
2308
+ mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .orr (.al , reg , reg , Instruction .Operand .imm (@truncate (u8 , x >> 24 ), 4 )).toU32 ());
2240
2309
return ;
2241
2310
} else {
2242
2311
return self .fail (src , "ARM registers are 32-bit wide" , .{});
2243
2312
}
2244
2313
},
2314
+ .register = > | src_reg | {
2315
+ // If the registers are the same, nothing to do.
2316
+ if (src_reg .id () == reg .id ())
2317
+ return ;
2318
+
2319
+ // mov reg, src_reg
2320
+ mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .mov (.al , reg , Instruction .Operand .reg (src_reg , Instruction .Operand .Shift .none )).toU32 ());
2321
+ },
2245
2322
.memory = > | addr | {
2246
2323
// The value is in memory at a hard-coded address.
2247
2324
// If the type is a pointer, it means the pointer address is at this memory location.
2248
2325
try self .genSetReg (src , reg , .{ .immediate = addr });
2249
- mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .ldr (.al , reg , reg , Instruction .Offset .none ).toU32 ());
2326
+ mem .writeIntLittle (u32 , try self .code .addManyAsArray (4 ), Instruction .ldr (.al , reg , reg , .{ . offset = Instruction .Offset .none } ).toU32 ());
2250
2327
},
2251
2328
else = > return self .fail (src , "TODO implement getSetReg for arm {}" , .{mcv }),
2252
2329
},
@@ -2702,6 +2779,55 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
2702
2779
else = > return self .fail (src , "TODO implement function parameters for {} on x86_64" , .{cc }),
2703
2780
}
2704
2781
},
2782
+ .arm = > {
2783
+ switch (cc ) {
2784
+ .Naked = > {
2785
+ assert (result .args .len == 0 );
2786
+ result .return_value = .{ .unreach = {} };
2787
+ result .stack_byte_count = 0 ;
2788
+ result .stack_align = 1 ;
2789
+ return result ;
2790
+ },
2791
+ .Unspecified , .C = > {
2792
+ // ARM Procedure Call Standard, Chapter 6.5
2793
+ var ncrn : usize = 0 ; // Next Core Register Number
2794
+ var nsaa : u32 = 0 ; // Next stacked argument address
2795
+
2796
+ for (param_types ) | ty , i | {
2797
+ if (ty .abiAlignment (self .target .* ) == 8 ) {
2798
+ // Round up NCRN to the next even number
2799
+ ncrn += ncrn % 2 ;
2800
+ }
2801
+
2802
+ const param_size = @intCast (u32 , ty .abiSize (self .target .* ));
2803
+ if (std .math .divCeil (u32 , param_size , 4 ) catch unreachable <= 4 - ncrn ) {
2804
+ if (param_size <= 4 ) {
2805
+ result .args [i ] = .{ .register = c_abi_int_param_regs [ncrn ] };
2806
+ ncrn += 1 ;
2807
+ } else {
2808
+ return self .fail (src , "TODO MCValues with multiple registers" , .{});
2809
+ }
2810
+ } else if (ncrn < 4 and nsaa == 0 ) {
2811
+ return self .fail (src , "TODO MCValues split between registers and stack" , .{});
2812
+ } else {
2813
+ ncrn = 4 ;
2814
+ if (ty .abiAlignment (self .target .* ) == 8 ) {
2815
+ if (nsaa % 8 != 0 ) {
2816
+ nsaa += 8 - (nsaa % 8 );
2817
+ }
2818
+ }
2819
+
2820
+ result .args [i ] = .{ .stack_offset = nsaa };
2821
+ nsaa += param_size ;
2822
+ }
2823
+ }
2824
+
2825
+ result .stack_byte_count = nsaa ;
2826
+ result .stack_align = 4 ;
2827
+ },
2828
+ else = > return self .fail (src , "TODO implement function parameters for {} on arm" , .{cc }),
2829
+ }
2830
+ },
2705
2831
else = > if (param_types .len != 0 )
2706
2832
return self .fail (src , "TODO implement codegen parameters for {}" , .{self .target .cpu .arch }),
2707
2833
}
@@ -2720,6 +2846,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
2720
2846
},
2721
2847
else = > return self .fail (src , "TODO implement function return values for {}" , .{cc }),
2722
2848
},
2849
+ .arm = > switch (cc ) {
2850
+ .Naked = > unreachable ,
2851
+ .Unspecified , .C = > {
2852
+ const ret_ty_size = @intCast (u32 , ret_ty .abiSize (self .target .* ));
2853
+ if (ret_ty_size <= 4 ) {
2854
+ result .return_value = .{ .register = c_abi_int_return_regs [0 ] };
2855
+ } else {
2856
+ return self .fail (src , "TODO support more return types for ARM backend" , .{});
2857
+ }
2858
+ },
2859
+ else = > return self .fail (src , "TODO implement function return values for {}" , .{cc }),
2860
+ },
2723
2861
else = > return self .fail (src , "TODO implement codegen return values for {}" , .{self .target .cpu .arch }),
2724
2862
}
2725
2863
return result ;
0 commit comments