2121#include " clang/AST/GlobalDecl.h"
2222#include " clang/Basic/Builtins.h"
2323#include " clang/CIR/Dialect/IR/CIRDialect.h"
24- #include " clang/CIR/Dialect/IR/CIROpsEnums.h"
2524#include " clang/CIR/Dialect/IR/CIRTypes.h"
2625#include " llvm/Support/Casting.h"
2726#include " llvm/Support/ErrorHandling.h"
@@ -127,7 +126,6 @@ static Address buildPointerWithAlignment(const Expr *E,
127126 if (PtrTy->getPointeeType ()->isVoidType ())
128127 break ;
129128 assert (!UnimplementedFeature::tbaa ());
130-
131129 LValueBaseInfo InnerBaseInfo;
132130 Address Addr = CGF.buildPointerWithAlignment (
133131 CE->getSubExpr (), &InnerBaseInfo, IsKnownNonNull);
@@ -211,78 +209,13 @@ static Address buildPointerWithAlignment(const Expr *E,
211209 return Address (CGF.buildScalarExpr (E), Align);
212210}
213211
214- // / Helper method to check if the underlying ABI is AAPCS
215- static bool isAAPCS (const TargetInfo &TargetInfo) {
216- return TargetInfo.getABI ().startswith (" aapcs" );
217- }
218-
219- Address CIRGenFunction::getAddrOfField (LValue base, const FieldDecl *field,
220- unsigned index) {
221- if (index == 0 )
222- return base.getAddress ();
223-
224- auto loc = getLoc (field->getLocation ());
225- auto fieldType = convertType (field->getType ());
226- auto fieldPtr =
227- mlir::cir::PointerType::get (getBuilder ().getContext (), fieldType);
228- auto sea = getBuilder ().createGetMember (
229- loc, fieldPtr, base.getPointer (), field->getName (), index);
230-
231- return Address (sea, CharUnits::One ());
232- }
233-
234- static bool useVolatileForBitField (const CIRGenModule &cgm, LValue base,
235- const CIRGenBitFieldInfo &info,
236- const FieldDecl *field) {
237- return isAAPCS (cgm.getTarget ()) && cgm.getCodeGenOpts ().AAPCSBitfieldWidth &&
238- info.VolatileStorageSize != 0 &&
239- field->getType ()
240- .withCVRQualifiers (base.getVRQualifiers ())
241- .isVolatileQualified ();
242- }
243-
244- LValue CIRGenFunction::buildLValueForBitField (LValue base,
245- const FieldDecl *field) {
246-
247- LValueBaseInfo BaseInfo = base.getBaseInfo ();
248- const RecordDecl *rec = field->getParent ();
249- auto &layout = CGM.getTypes ().getCIRGenRecordLayout (field->getParent ());
250- auto &info = layout.getBitFieldInfo (field);
251- auto useVolatile = useVolatileForBitField (CGM, base, info, field);
252- unsigned Idx = layout.getCIRFieldNo (field);
253-
254- if (useVolatile ||
255- (IsInPreservedAIRegion ||
256- (getDebugInfo () && rec->hasAttr <BPFPreserveAccessIndexAttr>()))) {
257- llvm_unreachable (" NYI" );
258- }
259-
260- Address Addr = getAddrOfField (base, field, Idx);
261-
262- const unsigned SS = useVolatile ? info.VolatileStorageSize : info.StorageSize ;
263-
264- // Get the access type.
265- mlir::Type FieldIntTy = builder.getUIntNTy (SS);
266-
267- auto loc = getLoc (field->getLocation ());
268- if (Addr.getElementType () != FieldIntTy)
269- Addr = builder.createElementBitCast (loc, Addr, FieldIntTy);
270-
271- QualType fieldType =
272- field->getType ().withCVRQualifiers (base.getVRQualifiers ());
273-
274- assert (!UnimplementedFeature::tbaa () && " NYI TBAA for bit fields" );
275- LValueBaseInfo FieldBaseInfo (BaseInfo.getAlignmentSource ());
276- return LValue::MakeBitfield (Addr, info, fieldType, FieldBaseInfo);
277- }
278-
279212LValue CIRGenFunction::buildLValueForField (LValue base,
280213 const FieldDecl *field) {
281-
282214 LValueBaseInfo BaseInfo = base.getBaseInfo ();
283215
284- if (field->isBitField ())
285- return buildLValueForBitField (base, field);
216+ if (field->isBitField ()) {
217+ llvm_unreachable (" NYI" );
218+ }
286219
287220 // Fields of may-alias structures are may-alais themselves.
288221 // FIXME: this hould get propagated down through anonymous structs and unions.
@@ -585,55 +518,12 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue,
585518// / method emits the address of the lvalue, then loads the result as an rvalue,
586519// / returning the rvalue.
587520RValue CIRGenFunction::buildLoadOfLValue (LValue LV, SourceLocation Loc) {
521+ assert (LV.isSimple () && " not implemented" );
588522 assert (!LV.getType ()->isFunctionType ());
589523 assert (!(LV.getType ()->isConstantMatrixType ()) && " not implemented" );
590524
591- if (LV.isBitField ())
592- return buildLoadOfBitfieldLValue (LV, Loc);
593-
594- if (LV.isSimple ())
595- return RValue::get (buildLoadOfScalar (LV, Loc));
596- llvm_unreachable (" NYI" );
597- }
598-
599- RValue CIRGenFunction::buildLoadOfBitfieldLValue (LValue LV,
600- SourceLocation Loc) {
601- const CIRGenBitFieldInfo &Info = LV.getBitFieldInfo ();
602-
603- // Get the output type.
604- mlir::Type ResLTy = convertType (LV.getType ());
605- Address Ptr = LV.getBitFieldAddress ();
606- mlir::Value Val = builder.createLoad (getLoc (Loc), Ptr);
607- auto ValWidth = Val.getType ().cast <IntType>().getWidth ();
608-
609- bool UseVolatile = LV.isVolatileQualified () &&
610- Info.VolatileStorageSize != 0 && isAAPCS (CGM.getTarget ());
611- const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset ;
612- const unsigned StorageSize =
613- UseVolatile ? Info.VolatileStorageSize : Info.StorageSize ;
614-
615- if (Info.IsSigned ) {
616- assert (static_cast <unsigned >(Offset + Info.Size ) <= StorageSize);
617-
618- mlir::Type typ = builder.getSIntNTy (ValWidth);
619- Val = builder.createIntCast (Val, typ);
620-
621- unsigned HighBits = StorageSize - Offset - Info.Size ;
622- if (HighBits)
623- Val = builder.createShiftLeft (Val, HighBits);
624- if (Offset + HighBits)
625- Val = builder.createShiftRight (Val, Offset + HighBits);
626- } else {
627- if (Offset)
628- Val = builder.createShiftRight (Val, Offset);
629-
630- if (static_cast <unsigned >(Offset) + Info.Size < StorageSize)
631- Val = builder.createAnd (Val,
632- llvm::APInt::getLowBitsSet (ValWidth, Info.Size ));
633- }
634- Val = builder.createIntCast (Val, ResLTy);
635- assert (!UnimplementedFeature::emitScalarRangeCheck () && " NYI" );
636- return RValue::get (Val);
525+ // Everything needs a load.
526+ return RValue::get (buildLoadOfScalar (LV, Loc));
637527}
638528
639529void CIRGenFunction::buildStoreThroughLValue (RValue Src, LValue Dst) {
@@ -656,81 +546,6 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) {
656546 buildStoreOfScalar (Src.getScalarVal (), Dst);
657547}
658548
659- void CIRGenFunction::buildStoreThroughBitfieldLValue (RValue Src, LValue Dst,
660- mlir::Value &Result) {
661- const CIRGenBitFieldInfo &Info = Dst.getBitFieldInfo ();
662- mlir::Type ResLTy = getTypes ().convertTypeForMem (Dst.getType ());
663- Address Ptr = Dst.getBitFieldAddress ();
664-
665- // Get the source value, truncated to the width of the bit-field.
666- mlir::Value SrcVal = Src.getScalarVal ();
667-
668- // Cast the source to the storage type and shift it into place.
669- SrcVal = builder.createIntCast (SrcVal, Ptr.getElementType ());
670- auto SrcWidth = SrcVal.getType ().cast <IntType>().getWidth ();
671- mlir::Value MaskedVal = SrcVal;
672-
673- const bool UseVolatile =
674- CGM.getCodeGenOpts ().AAPCSBitfieldWidth && Dst.isVolatileQualified () &&
675- Info.VolatileStorageSize != 0 && isAAPCS (CGM.getTarget ());
676- const unsigned StorageSize =
677- UseVolatile ? Info.VolatileStorageSize : Info.StorageSize ;
678- const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset ;
679- // See if there are other bits in the bitfield's storage we'll need to load
680- // and mask together with source before storing.
681- if (StorageSize != Info.Size ) {
682- assert (StorageSize > Info.Size && " Invalid bitfield size." );
683-
684- mlir::Value Val = buildLoadOfScalar (Dst, Dst.getPointer ().getLoc ());
685-
686- // Mask the source value as needed.
687- if (!hasBooleanRepresentation (Dst.getType ()))
688- SrcVal = builder.createAnd (
689- SrcVal, llvm::APInt::getLowBitsSet (SrcWidth, Info.Size ));
690-
691- MaskedVal = SrcVal;
692- if (Offset)
693- SrcVal = builder.createShiftLeft (SrcVal, Offset);
694-
695- // Mask out the original value.
696- Val = builder.createAnd (
697- Val, ~llvm::APInt::getBitsSet (SrcWidth, Offset, Offset + Info.Size ));
698-
699- // Or together the unchanged values and the source value.
700- SrcVal = builder.createOr (Val, SrcVal);
701-
702- } else {
703- // According to the AACPS:
704- // When a volatile bit-field is written, and its container does not overlap
705- // with any non-bit-field member, its container must be read exactly once
706- // and written exactly once using the access width appropriate to the type
707- // of the container. The two accesses are not atomic.
708- llvm_unreachable (" volatile bit-field is not implemented for the AACPS" );
709- }
710-
711- // Write the new value back out.
712- // TODO: constant matrix type, volatile, no init, non temporal, TBAA
713- buildStoreOfScalar (SrcVal, Ptr, Dst.isVolatileQualified (), Dst.getType (),
714- Dst.getBaseInfo (), false , false );
715-
716- // Return the new value of the bit-field.
717- mlir::Value ResultVal = MaskedVal;
718- ResultVal = builder.createIntCast (ResultVal, ResLTy);
719-
720- // Sign extend the value if needed.
721- if (Info.IsSigned ) {
722- assert (Info.Size <= StorageSize);
723- unsigned HighBits = StorageSize - Info.Size ;
724-
725- if (HighBits) {
726- ResultVal = builder.createShiftLeft (ResultVal, HighBits);
727- ResultVal = builder.createShiftRight (ResultVal, HighBits);
728- }
729- }
730-
731- Result = buildFromMemory (ResultVal, Dst.getType ());
732- }
733-
734549static LValue buildGlobalVarDeclLValue (CIRGenFunction &CGF, const Expr *E,
735550 const VarDecl *VD) {
736551 QualType T = E->getType ();
@@ -954,13 +769,7 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) {
954769 LValue LV = buildLValue (E->getLHS ());
955770
956771 SourceLocRAIIObject Loc{*this , getLoc (E->getSourceRange ())};
957- if (LV.isBitField ()) {
958- mlir::Value result;
959- buildStoreThroughBitfieldLValue (RV, LV, result);
960- } else {
961- buildStoreThroughLValue (RV, LV);
962- }
963-
772+ buildStoreThroughLValue (RV, LV);
964773 assert (!getContext ().getLangOpts ().OpenMP &&
965774 " last priv cond not implemented" );
966775 return LV;
@@ -2394,13 +2203,6 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty,
23942203
23952204mlir::Value CIRGenFunction::buildLoadOfScalar (LValue lvalue,
23962205 SourceLocation Loc) {
2397- return buildLoadOfScalar (lvalue.getAddress (), lvalue.isVolatile (),
2398- lvalue.getType (), getLoc (Loc), lvalue.getBaseInfo (),
2399- lvalue.isNontemporal ());
2400- }
2401-
2402- mlir::Value CIRGenFunction::buildLoadOfScalar (LValue lvalue,
2403- mlir::Location Loc) {
24042206 return buildLoadOfScalar (lvalue.getAddress (), lvalue.isVolatile (),
24052207 lvalue.getType (), Loc, lvalue.getBaseInfo (),
24062208 lvalue.isNontemporal ());
@@ -2418,14 +2220,6 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,
24182220 QualType Ty, SourceLocation Loc,
24192221 LValueBaseInfo BaseInfo,
24202222 bool isNontemporal) {
2421- return buildLoadOfScalar (Addr, Volatile, Ty, getLoc (Loc), BaseInfo,
2422- isNontemporal);
2423- }
2424-
2425- mlir::Value CIRGenFunction::buildLoadOfScalar (Address Addr, bool Volatile,
2426- QualType Ty, mlir::Location Loc,
2427- LValueBaseInfo BaseInfo,
2428- bool isNontemporal) {
24292223 if (!CGM.getCodeGenOpts ().PreserveVec3Type ) {
24302224 if (Ty->isVectorType ()) {
24312225 llvm_unreachable (" NYI" );
@@ -2439,14 +2233,15 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,
24392233 }
24402234
24412235 mlir::cir::LoadOp Load = builder.create <mlir::cir::LoadOp>(
2442- Loc, Addr.getElementType (), Addr.getPointer ());
2236+ getLoc ( Loc) , Addr.getElementType (), Addr.getPointer ());
24432237
24442238 if (isNontemporal) {
24452239 llvm_unreachable (" NYI" );
24462240 }
2447-
2448- assert (!UnimplementedFeature::tbaa () && " NYI" );
2449- assert (!UnimplementedFeature::emitScalarRangeCheck () && " NYI" );
2241+
2242+ // TODO: TBAA
2243+
2244+ // TODO: buildScalarRangeCheck
24502245
24512246 return buildFromMemory (Load, Ty);
24522247}
0 commit comments