diff --git a/src/Sema.zig b/src/Sema.zig index 8368ca35157b..40550027e431 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -14200,7 +14200,7 @@ fn zirShl( var i: usize = 0; while (i < rhs_ty.vectorLen(zcu)) : (i += 1) { const rhs_elem = try rhs_val.elemValue(pt, i); - if (rhs_elem.compareHetero(.gte, bit_value, zcu)) { + if (!rhs_elem.isUndef(zcu) and rhs_elem.compareHetero(.gte, bit_value, zcu)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ rhs_elem.fmtValueSema(pt, sema), i, @@ -14208,7 +14208,7 @@ fn zirShl( }); } } - } else if (rhs_val.compareHetero(.gte, bit_value, zcu)) { + } else if (!rhs_val.isUndef(zcu) and rhs_val.compareHetero(.gte, bit_value, zcu)) { return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{ rhs_val.fmtValueSema(pt, sema), scalar_ty.fmt(pt), @@ -14219,14 +14219,14 @@ fn zirShl( var i: usize = 0; while (i < rhs_ty.vectorLen(zcu)) : (i += 1) { const rhs_elem = try rhs_val.elemValue(pt, i); - if (rhs_elem.compareHetero(.lt, try pt.intValue(scalar_rhs_ty, 0), zcu)) { + if (!rhs_elem.isUndef(zcu) and rhs_elem.compareHetero(.lt, try pt.intValue(scalar_rhs_ty, 0), zcu)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValueSema(pt, sema), i, }); } } - } else if (rhs_val.compareHetero(.lt, try pt.intValue(rhs_ty, 0), zcu)) { + } else if (!rhs_val.isUndef(zcu) and rhs_val.compareHetero(.lt, try pt.intValue(rhs_ty, 0), zcu)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ rhs_val.fmtValueSema(pt, sema), }); @@ -14246,10 +14246,25 @@ fn zirShl( else switch (air_tag) { .shl_exact => val: { const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, pt); - if (shifted.overflow_bit.compareAllWithZero(.eq, zcu)) { + if (lhs_ty.zigTypeTag(zcu) == .vector) { + const elems = zcu.intern_pool.indexToKey(shifted.overflow_bit.toIntern()).aggregate.storage.values(); + for (elems) |elem| { + const needed_value = Value.fromInterned(elem); + const not_undefined = !needed_value.isUndef(zcu); + if (not_undefined) { + const not_equal_to_zero = (try needed_value.orderAgainstZeroSema(pt)) != .eq; + if (not_equal_to_zero) { + return sema.fail(block, src, "operation caused overflow", .{}); + } + } + } break :val shifted.wrapped_result; + } else { + if (shifted.overflow_bit.compareAllWithZero(.eq, zcu)) { + break :val shifted.wrapped_result; + } + return sema.fail(block, src, "operation caused overflow", .{}); } - return sema.fail(block, src, "operation caused overflow", .{}); }, .shl_sat => try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, pt), .shl => try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, pt), @@ -14376,7 +14391,7 @@ fn zirShr( var i: usize = 0; while (i < rhs_ty.vectorLen(zcu)) : (i += 1) { const rhs_elem = try rhs_val.elemValue(pt, i); - if (rhs_elem.compareHetero(.gte, bit_value, zcu)) { + if (!rhs_elem.isUndef(zcu) and rhs_elem.compareHetero(.gte, bit_value, zcu)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ rhs_elem.fmtValueSema(pt, sema), i, @@ -14395,7 +14410,7 @@ fn zirShr( var i: usize = 0; while (i < rhs_ty.vectorLen(zcu)) : (i += 1) { const rhs_elem = try rhs_val.elemValue(pt, i); - if (rhs_elem.compareHetero(.lt, try pt.intValue(rhs_ty.childType(zcu), 0), zcu)) { + if (!rhs_elem.isUndef(zcu) and rhs_elem.compareHetero(.lt, try pt.intValue(rhs_ty.childType(zcu), 0), zcu)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValueSema(pt, sema), i, @@ -14414,7 +14429,19 @@ fn zirShr( if (air_tag == .shr_exact) { // Detect if any ones would be shifted out. const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, pt); - if (!(try truncated.compareAllWithZeroSema(.eq, pt))) { + if (lhs_ty.zigTypeTag(zcu) == .vector and !truncated.isUndef(zcu)) { + const elems = zcu.intern_pool.indexToKey(truncated.toIntern()).aggregate.storage.values(); + for (elems) |elem| { + const needed_value = Value.fromInterned(elem); + const not_undefined = !needed_value.isUndef(zcu); + if (not_undefined) { + const not_equal_to_zero = (try needed_value.orderAgainstZeroSema(pt)) != .eq; + if (not_equal_to_zero) { + return sema.fail(block, src, "exact shift shifted out 1 bits", .{}); + } + } + } + } else if (!((try truncated.compareAllWithZeroSema(.eq, pt)) or truncated.isUndef(zcu))) { return sema.fail(block, src, "exact shift shifted out 1 bits", .{}); } } @@ -16189,6 +16216,12 @@ fn intRem( fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileError!Value { const pt = sema.pt; + const zcu = pt.zcu; + + if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) { + return try pt.undefValue(scalar_ty); + } + // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -24154,8 +24187,12 @@ fn zirBitCount( const scalar_ty = operand_ty.scalarType(zcu); for (elems, 0..) |*elem, i| { const elem_val = try val.elemValue(pt, i); - const count = comptimeOp(elem_val, scalar_ty, zcu); - elem.* = (try pt.intValue(result_scalar_ty, count)).toIntern(); + if (elem_val.isUndef(zcu)) { + elem.* = (try pt.undefValue(result_scalar_ty)).toIntern(); + } else { + const count = comptimeOp(elem_val, scalar_ty, zcu); + elem.* = (try pt.intValue(result_scalar_ty, count)).toIntern(); + } } return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = result_ty.toIntern(), diff --git a/src/Value.zig b/src/Value.zig index 40e5331c4ef6..9ae97676692d 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -935,6 +935,11 @@ pub fn popCount(val: Value, ty: Type, zcu: *Zcu) u64 { pub fn bitReverse(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) !Value { const zcu = pt.zcu; + + if (val.isUndef(zcu)) { + return try pt.undefValue(ty); + } + const info = ty.intInfo(zcu); var buffer: Value.BigIntSpace = undefined; @@ -952,6 +957,11 @@ pub fn bitReverse(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) !Va pub fn byteSwap(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) !Value { const zcu = pt.zcu; + + if (val.isUndef(zcu)) { + return try pt.undefValue(ty); + } + const info = ty.intInfo(zcu); // Bit count must be evenly divisible by 8 @@ -2175,6 +2185,11 @@ pub fn intDivScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. const zcu = pt.zcu; + + if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) { + return try pt.undefValue(ty); + } + var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, zcu); @@ -2224,6 +2239,11 @@ pub fn intDivFloorScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. const zcu = pt.zcu; + + if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) { + return try pt.undefValue(ty); + } + var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, zcu); @@ -2267,6 +2287,11 @@ pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. const zcu = pt.zcu; + + if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) { + return try pt.undefValue(ty); + } + var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, zcu); @@ -2371,6 +2396,11 @@ pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, pt: pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, pt: Zcu.PerThread) !Value { const zcu = pt.zcu; + + if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) { + return try pt.undefValue(float_type); + } + const target = zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { 16 => .{ .f16 = @mod(lhs.toFloat(f16, zcu), rhs.toFloat(f16, zcu)) }, @@ -2493,6 +2523,9 @@ pub fn intTruncBitsAsValue( for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(pt, i); const bits_elem = try bits.elemValue(pt, i); + if (bits_elem.isUndef(zcu)) { + return try pt.undefValue(ty); + } scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(bits_elem.toUnsignedInt(zcu)), pt)).toIntern(); } return Value.fromInterned(try pt.intern(.{ .aggregate = .{ @@ -2514,7 +2547,9 @@ pub fn intTruncScalar( const zcu = pt.zcu; if (bits == 0) return pt.intValue(ty, 0); - if (val.isUndef(zcu)) return pt.undefValue(ty); + if (val.isUndef(zcu)) { + return try pt.undefValue(ty); + } var val_space: Value.BigIntSpace = undefined; const val_bigint = val.toBigInt(&val_space, zcu); @@ -2551,6 +2586,11 @@ pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. const zcu = pt.zcu; + + if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) { + return try pt.undefValue(ty); + } + var lhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, zcu); const shift: usize = @intCast(rhs.toUnsignedInt(zcu)); @@ -2614,6 +2654,13 @@ pub fn shlWithOverflowScalar( ) !OverflowArithmeticResult { const zcu = pt.zcu; const info = ty.intInfo(zcu); + + if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) + return OverflowArithmeticResult{ + .overflow_bit = try pt.undefValue(Type.u1), + .wrapped_result = try pt.undefValue(ty), + }; + var lhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, zcu); const shift: usize = @intCast(rhs.toUnsignedInt(zcu)); @@ -2672,6 +2719,9 @@ pub fn shlSatScalar( const zcu = pt.zcu; const info = ty.intInfo(zcu); + if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) + return try pt.undefValue(ty); + var lhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, zcu); const shift: usize = @intCast(rhs.toUnsignedInt(zcu)); @@ -2745,6 +2795,11 @@ pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. const zcu = pt.zcu; + + if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) { + return try pt.undefValue(ty); + } + var lhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, zcu); const shift: usize = @intCast(rhs.toUnsignedInt(zcu)); @@ -3103,6 +3158,11 @@ pub fn sqrt(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) ! pub fn sqrtScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { const zcu = pt.zcu; + + if (val.isUndef(zcu)) { + return try pt.undefValue(float_type); + } + const target = zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { 16 => .{ .f16 = @sqrt(val.toFloat(f16, zcu)) }, @@ -3137,6 +3197,11 @@ pub fn sin(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V pub fn sinScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { const zcu = pt.zcu; + + if (val.isUndef(zcu)) { + return try pt.undefValue(float_type); + } + const target = zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { 16 => .{ .f16 = @sin(val.toFloat(f16, zcu)) }, @@ -3171,6 +3236,11 @@ pub fn cos(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V pub fn cosScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { const zcu = pt.zcu; + + if (val.isUndef(zcu)) { + return try pt.undefValue(float_type); + } + const target = zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { 16 => .{ .f16 = @cos(val.toFloat(f16, zcu)) }, @@ -3205,6 +3275,11 @@ pub fn tan(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V pub fn tanScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { const zcu = pt.zcu; + + if (val.isUndef(zcu)) { + return try pt.undefValue(float_type); + } + const target = zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { 16 => .{ .f16 = @tan(val.toFloat(f16, zcu)) }, @@ -3239,6 +3314,11 @@ pub fn exp(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V pub fn expScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { const zcu = pt.zcu; + + if (val.isUndef(zcu)) { + return try pt.undefValue(float_type); + } + const target = zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { 16 => .{ .f16 = @exp(val.toFloat(f16, zcu)) }, @@ -3273,6 +3353,11 @@ pub fn exp2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) ! pub fn exp2Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { const zcu = pt.zcu; + + if (val.isUndef(zcu)) { + return try pt.undefValue(float_type); + } + const target = zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { 16 => .{ .f16 = @exp2(val.toFloat(f16, zcu)) }, @@ -3307,6 +3392,11 @@ pub fn log(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V pub fn logScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { const zcu = pt.zcu; + + if (val.isUndef(zcu)) { + return try pt.undefValue(float_type); + } + const target = zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { 16 => .{ .f16 = @log(val.toFloat(f16, zcu)) }, @@ -3341,6 +3431,11 @@ pub fn log2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) ! pub fn log2Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { const zcu = pt.zcu; + + if (val.isUndef(zcu)) { + return try pt.undefValue(float_type); + } + const target = zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { 16 => .{ .f16 = @log2(val.toFloat(f16, zcu)) }, @@ -3375,6 +3470,11 @@ pub fn log10(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) pub fn log10Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { const zcu = pt.zcu; + + if (val.isUndef(zcu)) { + return try pt.undefValue(float_type); + } + const target = zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { 16 => .{ .f16 = @log10(val.toFloat(f16, zcu)) }, @@ -3409,6 +3509,11 @@ pub fn abs(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value { pub fn absScalar(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) Allocator.Error!Value { const zcu = pt.zcu; + + if (val.isUndef(zcu)) { + return try pt.undefValue(ty); + } + switch (ty.zigTypeTag(zcu)) { .int => { var buffer: Value.BigIntSpace = undefined; @@ -3462,6 +3567,11 @@ pub fn floor(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) pub fn floorScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { const zcu = pt.zcu; + + if (val.isUndef(zcu)) { + return try pt.undefValue(float_type); + } + const target = zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { 16 => .{ .f16 = @floor(val.toFloat(f16, zcu)) }, @@ -3496,6 +3606,11 @@ pub fn ceil(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) ! pub fn ceilScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { const zcu = pt.zcu; + + if (val.isUndef(zcu)) { + return try pt.undefValue(float_type); + } + const target = zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { 16 => .{ .f16 = @ceil(val.toFloat(f16, zcu)) }, @@ -3530,6 +3645,11 @@ pub fn round(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) pub fn roundScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { const zcu = pt.zcu; + + if (val.isUndef(zcu)) { + return try pt.undefValue(float_type); + } + const target = zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { 16 => .{ .f16 = @round(val.toFloat(f16, zcu)) }, @@ -3564,6 +3684,11 @@ pub fn trunc(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) pub fn truncScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value { const zcu = pt.zcu; + + if (val.isUndef(zcu)) { + return try pt.undefValue(float_type); + } + const target = zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { 16 => .{ .f16 = @trunc(val.toFloat(f16, zcu)) }, @@ -3613,6 +3738,11 @@ pub fn mulAddScalar( pt: Zcu.PerThread, ) Allocator.Error!Value { const zcu = pt.zcu; + + if (mulend1.isUndef(zcu) or mulend2.isUndef(zcu) or addend.isUndef(zcu)) { + return try pt.undefValue(float_type); + } + const target = zcu.getTarget(); const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { 16 => .{ .f16 = @mulAdd(f16, mulend1.toFloat(f16, zcu), mulend2.toFloat(f16, zcu), addend.toFloat(f16, zcu)) }, diff --git a/test/cases/compile_errors/arithmetic_operations_with_undefined.zig b/test/cases/compile_errors/arithmetic_operations_with_undefined.zig new file mode 100644 index 000000000000..7fcf362c0199 --- /dev/null +++ b/test/cases/compile_errors/arithmetic_operations_with_undefined.zig @@ -0,0 +1,82 @@ +const U8Vec = @Vector(3, u8); +const FloatVec = @Vector(1, f32); + +comptime { + const a: U8Vec = .{ undefined, 0, undefined }; + const b: U8Vec = @splat(1); + const c: FloatVec = .{undefined}; + @compileLog(a >> b); + @compileLog(b >> a); + @compileLog(a << b); + @compileLog(b << a); + @compileLog(@shlExact(a, b)); + @compileLog(@shlExact(b, a)); + @compileLog(@shlWithOverflow(a, b)); + @compileLog(@shlWithOverflow(b, a)); + @compileLog(@shrExact(a, b)); + @compileLog(@shrExact(b, a)); + @compileLog(@subWithOverflow(a, b)); + @compileLog(@sin(c)); + @compileLog(@cos(c)); + @compileLog(@tan(c)); + @compileLog(@exp(c)); + @compileLog(@exp2(c)); + @compileLog(@log(c)); + @compileLog(@log2(c)); + @compileLog(@log10(c)); + @compileLog(@abs(a)); + @compileLog(@abs(c)); + @compileLog(@floor(c)); + @compileLog(@ceil(c)); + @compileLog(@round(c)); + @compileLog(@trunc(c)); + @compileLog(@mod(a, b)); + @compileLog(@rem(a, b)); + @compileLog(@mulAdd(FloatVec, c, c, c)); + @compileLog(@byteSwap(a)); + @compileLog(@bitReverse(a)); + @compileLog(@clz(a)); + @compileLog(@ctz(a)); + @compileLog(@popCount(a)); +} + +// error +// backend=stage2 +// target=native +// +// :8:5: error: found compile log statement +// +// Compile Log Output: +// @as(@Vector(3, u8), .{ undefined, 0, undefined }) +// @as(@Vector(3, u8), .{ undefined, 1, undefined }) +// @as(@Vector(3, u8), .{ undefined, 0, undefined }) +// @as(@Vector(3, u8), .{ undefined, 1, undefined }) +// @as(@Vector(3, u8), .{ undefined, 0, undefined }) +// @as(@Vector(3, u8), .{ undefined, 1, undefined }) +// @as(struct { @Vector(3, u8), @Vector(3, u1) }, .{ .{ undefined, 0, undefined }, .{ undefined, 0, undefined } }) +// @as(struct { @Vector(3, u8), @Vector(3, u1) }, .{ .{ undefined, 1, undefined }, .{ undefined, 0, undefined } }) +// @as(@Vector(3, u8), .{ undefined, 0, undefined }) +// @as(@Vector(3, u8), .{ undefined, 1, undefined }) +// @as(struct { @Vector(3, u8), @Vector(3, u1) }, .{ .{ undefined, 255, undefined }, .{ undefined, 1, undefined } }) +// @as(@Vector(1, f32), .{ undefined }) +// @as(@Vector(1, f32), .{ undefined }) +// @as(@Vector(1, f32), .{ undefined }) +// @as(@Vector(1, f32), .{ undefined }) +// @as(@Vector(1, f32), .{ undefined }) +// @as(@Vector(1, f32), .{ undefined }) +// @as(@Vector(1, f32), .{ undefined }) +// @as(@Vector(1, f32), .{ undefined }) +// @as(@Vector(3, u8), .{ undefined, 0, undefined }) +// @as(@Vector(1, f32), .{ undefined }) +// @as(@Vector(1, f32), .{ undefined }) +// @as(@Vector(1, f32), .{ undefined }) +// @as(@Vector(1, f32), .{ undefined }) +// @as(@Vector(1, f32), .{ undefined }) +// @as(@Vector(3, u8), .{ undefined, 0, undefined }) +// @as(@Vector(3, u8), .{ undefined, 0, undefined }) +// @as(@Vector(1, f32), .{ undefined }) +// @as(@Vector(3, u8), .{ undefined, 0, undefined }) +// @as(@Vector(3, u8), .{ undefined, 0, undefined }) +// @as(@Vector(3, u4), .{ undefined, 8, undefined }) +// @as(@Vector(3, u4), .{ undefined, 8, undefined }) +// @as(@Vector(3, u4), .{ undefined, 0, undefined }) diff --git a/test/cases/compile_errors/exact division failure.zig b/test/cases/compile_errors/exact division failure.zig index fee8219a7b05..b2485f7e35ca 100644 --- a/test/cases/compile_errors/exact division failure.zig +++ b/test/cases/compile_errors/exact division failure.zig @@ -1,6 +1,6 @@ comptime { const x = @divExact(10, 3); - _ = x; + _ = &x; } // error