@@ -288,59 +288,62 @@ pub const LLVMIRModule = struct {
288
288
}
289
289
290
290
fn gen (self : * LLVMIRModule , module : * Module , typed_value : TypedValue , src : usize ) ! void {
291
- switch (typed_value .ty .zigTypeTag ()) {
292
- .Fn = > {
293
- const func = typed_value .val .castTag (.function ).? .data ;
291
+ if (typed_value .val .castTag (.function )) | func_inst | {
292
+ const func = func_inst .data ;
294
293
295
- const llvm_func = try self .resolveLLVMFunction (func , src );
294
+ const llvm_func = try self .resolveLLVMFunction (func , src );
296
295
297
- // This gets the LLVM values from the function and stores them in `self.args`.
298
- const fn_param_len = func .owner_decl .typed_value .most_recent .typed_value .ty .fnParamLen ();
299
- var args = try self .gpa .alloc (* const llvm .ValueRef , fn_param_len );
300
- defer self .gpa .free (args );
296
+ // This gets the LLVM values from the function and stores them in `self.args`.
297
+ const fn_param_len = func .owner_decl .typed_value .most_recent .typed_value .ty .fnParamLen ();
298
+ var args = try self .gpa .alloc (* const llvm .ValueRef , fn_param_len );
299
+ defer self .gpa .free (args );
301
300
302
- for (args ) | * arg , i | {
303
- arg .* = llvm .getParam (llvm_func , @intCast (c_uint , i ));
304
- }
305
- self .args = args ;
306
- self .arg_index = 0 ;
301
+ for (args ) | * arg , i | {
302
+ arg .* = llvm .getParam (llvm_func , @intCast (c_uint , i ));
303
+ }
304
+ self .args = args ;
305
+ self .arg_index = 0 ;
307
306
308
- // Make sure no other LLVM values from other functions can be referenced
309
- self .func_inst_table .clearRetainingCapacity ();
307
+ // Make sure no other LLVM values from other functions can be referenced
308
+ self .func_inst_table .clearRetainingCapacity ();
310
309
311
- // We remove all the basic blocks of a function to support incremental
312
- // compilation!
313
- // TODO: remove all basic blocks if functions can have more than one
314
- if (llvm_func .getFirstBasicBlock ()) | bb | {
315
- bb .deleteBasicBlock ();
316
- }
310
+ // We remove all the basic blocks of a function to support incremental
311
+ // compilation!
312
+ // TODO: remove all basic blocks if functions can have more than one
313
+ if (llvm_func .getFirstBasicBlock ()) | bb | {
314
+ bb .deleteBasicBlock ();
315
+ }
317
316
318
- const entry_block = llvm_func .appendBasicBlock ("Entry" );
319
- self .builder .positionBuilderAtEnd (entry_block );
320
-
321
- const instructions = func .body .instructions ;
322
- for (instructions ) | inst | {
323
- const opt_llvm_val : ? * const llvm.ValueRef = switch (inst .tag ) {
324
- .breakpoint = > try self .genBreakpoint (inst .castTag (.breakpoint ).? ),
325
- .call = > try self .genCall (inst .castTag (.call ).? ),
326
- .unreach = > self .genUnreach (inst .castTag (.unreach ).? ),
327
- .retvoid = > self .genRetVoid (inst .castTag (.retvoid ).? ),
328
- .arg = > try self .genArg (inst .castTag (.arg ).? ),
329
- .alloc = > try self .genAlloc (inst .castTag (.alloc ).? ),
330
- .store = > try self .genStore (inst .castTag (.store ).? ),
331
- .load = > try self .genLoad (inst .castTag (.load ).? ),
332
- .ret = > try self .genRet (inst .castTag (.ret ).? ),
333
- .not = > try self .genNot (inst .castTag (.not ).? ),
334
- .dbg_stmt = > blk : {
335
- // TODO: implement debug info
336
- break :blk null ;
337
- },
338
- else = > | tag | return self .fail (src , "TODO implement LLVM codegen for Zir instruction: {}" , .{tag }),
339
- };
340
- if (opt_llvm_val ) | llvm_val | try self .func_inst_table .put (self .gpa , inst , llvm_val );
341
- }
342
- },
343
- else = > | ty | return self .fail (src , "TODO implement LLVM codegen for top-level decl type: {}" , .{ty }),
317
+ const entry_block = llvm_func .appendBasicBlock ("Entry" );
318
+ self .builder .positionBuilderAtEnd (entry_block );
319
+
320
+ const instructions = func .body .instructions ;
321
+ for (instructions ) | inst | {
322
+ const opt_llvm_val : ? * const llvm.ValueRef = switch (inst .tag ) {
323
+ .add = > try self .genAdd (inst .castTag (.add ).? ),
324
+ .alloc = > try self .genAlloc (inst .castTag (.alloc ).? ),
325
+ .arg = > try self .genArg (inst .castTag (.arg ).? ),
326
+ .bitcast = > try self .genBitCast (inst .castTag (.bitcast ).? ),
327
+ .breakpoint = > try self .genBreakpoint (inst .castTag (.breakpoint ).? ),
328
+ .call = > try self .genCall (inst .castTag (.call ).? ),
329
+ .intcast = > try self .genIntCast (inst .castTag (.intcast ).? ),
330
+ .load = > try self .genLoad (inst .castTag (.load ).? ),
331
+ .not = > try self .genNot (inst .castTag (.not ).? ),
332
+ .ret = > try self .genRet (inst .castTag (.ret ).? ),
333
+ .retvoid = > self .genRetVoid (inst .castTag (.retvoid ).? ),
334
+ .store = > try self .genStore (inst .castTag (.store ).? ),
335
+ .sub = > try self .genSub (inst .castTag (.sub ).? ),
336
+ .unreach = > self .genUnreach (inst .castTag (.unreach ).? ),
337
+ .dbg_stmt = > blk : {
338
+ // TODO: implement debug info
339
+ break :blk null ;
340
+ },
341
+ else = > | tag | return self .fail (src , "TODO implement LLVM codegen for Zir instruction: {}" , .{tag }),
342
+ };
343
+ if (opt_llvm_val ) | llvm_val | try self .func_inst_table .putNoClobber (self .gpa , inst , llvm_val );
344
+ }
345
+ } else {
346
+ return self .fail (src , "TODO implement LLVM codegen for top-level decl type: {}" , .{typed_value .ty });
344
347
}
345
348
}
346
349
@@ -369,13 +372,13 @@ pub const LLVMIRModule = struct {
369
372
"" ,
370
373
);
371
374
372
- const return_type = zig_fn_type .fnReturnType (). zigTypeTag () ;
373
- if (return_type == .NoReturn ) {
375
+ const return_type = zig_fn_type .fnReturnType ();
376
+ if (return_type . tag () == .noreturn ) {
374
377
_ = self .builder .buildUnreachable ();
375
378
}
376
379
377
380
// No need to store the LLVM value if the return type is void or noreturn
378
- if (return_type == .NoReturn or return_type == .Void ) return null ;
381
+ if (! return_type . hasCodeGenBits () ) return null ;
379
382
380
383
return call ;
381
384
}
@@ -402,6 +405,48 @@ pub const LLVMIRModule = struct {
402
405
return null ;
403
406
}
404
407
408
+ fn genAdd (self : * LLVMIRModule , inst : * Inst.BinOp ) ! ? * const llvm.ValueRef {
409
+ const lhs = try self .resolveInst (inst .lhs );
410
+ const rhs = try self .resolveInst (inst .rhs );
411
+
412
+ if (! inst .base .ty .isInt ())
413
+ return self .fail (inst .base .src , "TODO implement 'genAdd' for type {}" , .{inst .base .ty });
414
+
415
+ return if (inst .base .ty .isSignedInt ())
416
+ self .builder .buildNSWAdd (lhs , rhs , "" )
417
+ else
418
+ self .builder .buildNUWAdd (lhs , rhs , "" );
419
+ }
420
+
421
+ fn genSub (self : * LLVMIRModule , inst : * Inst.BinOp ) ! ? * const llvm.ValueRef {
422
+ const lhs = try self .resolveInst (inst .lhs );
423
+ const rhs = try self .resolveInst (inst .rhs );
424
+
425
+ if (! inst .base .ty .isInt ())
426
+ return self .fail (inst .base .src , "TODO implement 'genSub' for type {}" , .{inst .base .ty });
427
+
428
+ return if (inst .base .ty .isSignedInt ())
429
+ self .builder .buildNSWSub (lhs , rhs , "" )
430
+ else
431
+ self .builder .buildNUWSub (lhs , rhs , "" );
432
+ }
433
+
434
+ fn genIntCast (self : * LLVMIRModule , inst : * Inst.UnOp ) ! ? * const llvm.ValueRef {
435
+ const val = try self .resolveInst (inst .operand );
436
+
437
+ const signed = inst .base .ty .isSignedInt ();
438
+ // TODO: Should we use intcast here or just a simple bitcast?
439
+ // LLVM does truncation vs bitcast (+signed extension) in the intcast depending on the sizes
440
+ return self .builder .buildIntCast2 (val , try self .getLLVMType (inst .base .ty , inst .base .src ), signed , "" );
441
+ }
442
+
443
+ fn genBitCast (self : * LLVMIRModule , inst : * Inst.UnOp ) ! ? * const llvm.ValueRef {
444
+ const val = try self .resolveInst (inst .operand );
445
+ const dest_type = try self .getLLVMType (inst .base .ty , inst .base .src );
446
+
447
+ return self .builder .buildBitCast (val , dest_type , "" );
448
+ }
449
+
405
450
fn genArg (self : * LLVMIRModule , inst : * Inst.Arg ) ! ? * const llvm.ValueRef {
406
451
const arg_val = self .args [self .arg_index ];
407
452
self .arg_index += 1 ;
@@ -449,23 +494,38 @@ pub const LLVMIRModule = struct {
449
494
}
450
495
451
496
fn resolveInst (self : * LLVMIRModule , inst : * ir.Inst ) ! * const llvm.ValueRef {
452
- if (inst .castTag ( .constant )) | const_inst | {
453
- return self .genTypedValue (inst .src , .{ .ty = inst .ty , .val = const_inst . val });
497
+ if (inst .value ( )) | val | {
498
+ return self .genTypedValue (inst .src , .{ .ty = inst .ty , .val = val });
454
499
}
455
500
if (self .func_inst_table .get (inst )) | value | return value ;
456
501
457
502
return self .fail (inst .src , "TODO implement global llvm values (or the value is not in the func_inst_table table)" , .{});
458
503
}
459
504
460
- fn genTypedValue (self : * LLVMIRModule , src : usize , typed_value : TypedValue ) ! * const llvm.ValueRef {
461
- const llvm_type = try self .getLLVMType (typed_value .ty , src );
505
+ fn genTypedValue (self : * LLVMIRModule , src : usize , tv : TypedValue ) ! * const llvm.ValueRef {
506
+ const llvm_type = try self .getLLVMType (tv .ty , src );
462
507
463
- if (typed_value .val .isUndef ())
508
+ if (tv .val .isUndef ())
464
509
return llvm_type .getUndef ();
465
510
466
- switch (typed_value .ty .zigTypeTag ()) {
467
- .Bool = > return if (typed_value .val .toBool ()) llvm_type .constAllOnes () else llvm_type .constNull (),
468
- else = > return self .fail (src , "TODO implement const of type '{}'" , .{typed_value .ty }),
511
+ switch (tv .ty .zigTypeTag ()) {
512
+ .Bool = > return if (tv .val .toBool ()) llvm_type .constAllOnes () else llvm_type .constNull (),
513
+ .Int = > {
514
+ var bigint_space : Value.BigIntSpace = undefined ;
515
+ const bigint = tv .val .toBigInt (& bigint_space );
516
+
517
+ if (bigint .eqZero ()) return llvm_type .constNull ();
518
+
519
+ if (bigint .limbs .len != 1 ) {
520
+ return self .fail (src , "TODO implement bigger bigint" , .{});
521
+ }
522
+ const llvm_int = llvm_type .constInt (bigint .limbs [0 ], false );
523
+ if (! bigint .positive ) {
524
+ return llvm .constNeg (llvm_int );
525
+ }
526
+ return llvm_int ;
527
+ },
528
+ else = > return self .fail (src , "TODO implement const of type '{}'" , .{tv .ty }),
469
529
}
470
530
}
471
531
@@ -498,14 +558,14 @@ pub const LLVMIRModule = struct {
498
558
);
499
559
const llvm_fn = self .llvm_module .addFunction (func .owner_decl .name , fn_type );
500
560
501
- if (return_type .zigTypeTag () == .NoReturn ) {
561
+ if (return_type .tag () == .noreturn ) {
502
562
llvm_fn .addFnAttr ("noreturn" );
503
563
}
504
564
505
565
return llvm_fn ;
506
566
}
507
567
508
- fn getLLVMType (self : * LLVMIRModule , t : Type , src : usize ) ! * const llvm.TypeRef {
568
+ fn getLLVMType (self : * LLVMIRModule , t : Type , src : usize ) error { OutOfMemory , CodegenFail } ! * const llvm.TypeRef {
509
569
switch (t .zigTypeTag ()) {
510
570
.Void = > return llvm .voidType (),
511
571
.NoReturn = > return llvm .voidType (),
@@ -514,6 +574,11 @@ pub const LLVMIRModule = struct {
514
574
return llvm .intType (info .bits );
515
575
},
516
576
.Bool = > return llvm .intType (1 ),
577
+ .Pointer = > {
578
+ const pointer = t .castPointer ().? ;
579
+ const elem_type = try self .getLLVMType (pointer .data , src );
580
+ return elem_type .pointerType (0 );
581
+ },
517
582
else = > return self .fail (src , "TODO implement getLLVMType for type '{}'" , .{t }),
518
583
}
519
584
}
0 commit comments