938 lines
46 KiB
Diff
938 lines
46 KiB
Diff
Source: Snektron
|
|
|
|
There were a few changes required for LLVM 11:
|
|
* llvm::StringRef requires .str(), implicit conversion was removed
|
|
* llvm::CallSite was removed, but the required methods are available on llvm::CallInst
|
|
* VectorTyID was split into FixedVectorTyID and ScalableVectorTyID, FixedVectorTyID was used
|
|
* CallInst::getCalledValue() was renamed to CallInst::getCalledOperand()
|
|
* Implicit conversion from MaybeAlign to Align was removed, so MaybeAlign::valueOrOne was used instead.
|
|
|
|
--- a/backend/src/llvm/StripAttributes.cpp 2020-11-09 16:48:20.823866112 +0100
|
|
+++ b/backend/src/llvm/StripAttributes.cpp 2020-11-09 19:04:19.641897141 +0100
|
|
@@ -107,9 +107,8 @@
|
|
BB != E; ++BB) {
|
|
for (BasicBlock::iterator Inst = BB->begin(), E = BB->end();
|
|
Inst != E; ++Inst) {
|
|
- CallSite Call(&*Inst);
|
|
- if (Call)
|
|
- Call.setCallingConv(CallingConv::C);
|
|
+ if (CallInst* callInst = dyn_cast<CallInst>(&*Inst))
|
|
+ callInst->setCallingConv(CallingConv::C);
|
|
}
|
|
}
|
|
|
|
--- a/backend/src/llvm/llvm_bitcode_link.cpp 2020-11-09 16:48:20.823866112 +0100
|
|
+++ b/backend/src/llvm/llvm_bitcode_link.cpp 2020-11-09 17:04:09.633753428 +0100
|
|
@@ -98,7 +98,7 @@
|
|
if (callFunc && callFunc->getIntrinsicID() != 0)
|
|
continue;
|
|
|
|
- std::string fnName = call->getCalledValue()->stripPointerCasts()->getName();
|
|
+ std::string fnName = call->getCalledOperand()->stripPointerCasts()->getName().str();
|
|
|
|
if (!MFS.insert(fnName).second) {
|
|
continue;
|
|
--- a/backend/src/llvm/llvm_device_enqueue.cpp 2020-11-09 16:48:20.823866112 +0100
|
|
+++ b/backend/src/llvm/llvm_device_enqueue.cpp 2020-11-09 19:01:49.201915008 +0100
|
|
@@ -45,7 +45,7 @@
|
|
return NULL;
|
|
|
|
/* This is a fake, to check the function bitcast is for block or not */
|
|
- std::string fnName = Fn->getName();
|
|
+ std::string fnName = Fn->getName().str();
|
|
if(fnName.find("_invoke") == std::string::npos)
|
|
return NULL;
|
|
|
|
@@ -119,18 +119,18 @@
|
|
ParamTys.push_back(ty);
|
|
}
|
|
FunctionType* NewFT = FunctionType::get(Fn->getReturnType(), ParamTys, false);
|
|
- Function* NewFn = Function::Create(NewFT, Function::ExternalLinkage, Fn->getName());
|
|
+ Function* NewFn = Function::Create(NewFT, Function::ExternalLinkage, Fn->getName().str());
|
|
SmallVector<ReturnInst*, 8> Returns;
|
|
|
|
Function::arg_iterator NewFnArgIt = NewFn->arg_begin();
|
|
for (Function::arg_iterator I = Fn->arg_begin(), E = Fn->arg_end(); I != E; ++I) {
|
|
- std::string ArgName = I->getName();
|
|
+ std::string ArgName = I->getName().str();
|
|
NewFnArgIt->setName(ArgName);
|
|
VMap[&*I] = &(*NewFnArgIt++);
|
|
}
|
|
CloneFunctionInto(NewFn, Fn, VMap, /*ModuleLevelChanges=*/true, Returns);
|
|
|
|
- Fn->setName("__d" + Fn->getName());
|
|
+ Fn->setName("__d" + Fn->getName().str());
|
|
mod->getFunctionList().push_back(NewFn);
|
|
//mod->getOrInsertFunction(NewFn->getName(), NewFn->getFunctionType(),
|
|
// NewFn->getAttributes());
|
|
@@ -147,7 +147,7 @@
|
|
argTypeNames.push_back(llvm::MDString::get(Context, "char*"));
|
|
argBaseTypeNames.push_back(llvm::MDString::get(Context, "char*"));
|
|
argTypeQuals.push_back(llvm::MDString::get(Context, ""));
|
|
- argNames.push_back(llvm::MDString::get(Context, I->getName()));
|
|
+ argNames.push_back(llvm::MDString::get(Context, I->getName().str()));
|
|
}
|
|
|
|
//If run to here, llvm version always > 3.9, add the version check just for build.
|
|
@@ -198,7 +198,7 @@
|
|
* invoke pointer to store the index in the unit's block functions index.*/
|
|
Function *Fn = dyn_cast<Function>(bt->getOperand(0));
|
|
|
|
- std::string fnName = Fn->getName();
|
|
+ std::string fnName = Fn->getName().str();
|
|
int index = -1;
|
|
for(size_t i=0; i<unit.blockFuncs.size(); i++) {
|
|
if(unit.blockFuncs[i] == fnName) {
|
|
@@ -241,7 +241,7 @@
|
|
//unnamed call function, parse the use to find the define of called function
|
|
SmallVector<Value*, 16> args(CI->op_begin(), CI->op_end()-1);
|
|
|
|
- Value *v = CI->getCalledValue();
|
|
+ Value *v = CI->getCalledOperand();
|
|
BitCastInst* bt = dyn_cast<BitCastInst>(v);
|
|
if(bt == NULL)
|
|
continue;
|
|
@@ -316,7 +316,7 @@
|
|
ConstantExpr *expr = dyn_cast<ConstantExpr>(c->getOperand(3));
|
|
BitCastInst *bt = dyn_cast<BitCastInst>(expr->getAsInstruction());
|
|
Function* f = dyn_cast<Function>(bt->getOperand(0));
|
|
- blocks[v] = f->getName();
|
|
+ blocks[v] = f->getName().str();
|
|
}
|
|
}
|
|
|
|
@@ -332,7 +332,7 @@
|
|
} else {
|
|
//handle enqueue_kernel function call
|
|
Function *fn = CI->getCalledFunction();
|
|
- if (fn->getName().find("enqueue_kernel") == std::string::npos)
|
|
+ if (fn->getName().str().find("enqueue_kernel") == std::string::npos)
|
|
continue;
|
|
|
|
//block parameter's index, 3 or 6
|
|
@@ -361,7 +361,7 @@
|
|
ConstantExpr *expr = dyn_cast<ConstantExpr>(c->getOperand(3));
|
|
BitCastInst *bt = dyn_cast<BitCastInst>(expr->getAsInstruction());
|
|
Function* f = dyn_cast<Function>(bt->getOperand(0));
|
|
- blocks[v] = f->getName();
|
|
+ blocks[v] = f->getName().str();
|
|
}
|
|
}
|
|
} else if(isa<AllocaInst>(block)) {
|
|
--- a/backend/src/llvm/llvm_gen_backend.cpp 2020-11-09 16:48:20.823866112 +0100
|
|
+++ b/backend/src/llvm/llvm_gen_backend.cpp 2020-11-09 18:33:50.919114327 +0100
|
|
@@ -376,7 +376,7 @@
|
|
GBE_ASSERT(index == 0);
|
|
return this->_newScalar(value, key, type, index, uniform);
|
|
break;
|
|
- case Type::VectorTyID:
|
|
+ case Type::FixedVectorTyID:
|
|
{
|
|
auto vectorType = cast<VectorType>(type);
|
|
auto elementType = vectorType->getElementType();
|
|
@@ -743,20 +743,20 @@
|
|
#undef DECL_VISIT_FN
|
|
|
|
// Emit rounding instructions from gen native function
|
|
- void emitRoundingCallInst(CallInst &I, CallSite &CS, ir::Opcode opcode);
|
|
+ void emitRoundingCallInst(CallInst &I, ir::Opcode opcode);
|
|
// Emit unary instructions from gen native function
|
|
- void emitUnaryCallInst(CallInst &I, CallSite &CS, ir::Opcode opcode, ir::Type = ir::TYPE_FLOAT);
|
|
+ void emitUnaryCallInst(CallInst &I, ir::Opcode opcode, ir::Type = ir::TYPE_FLOAT);
|
|
// Emit unary instructions from gen native function
|
|
- void emitAtomicInst(CallInst &I, CallSite &CS, ir::AtomicOps opcode);
|
|
+ void emitAtomicInst(CallInst &I, ir::AtomicOps opcode);
|
|
// Emit workgroup instructions
|
|
- void emitWorkGroupInst(CallInst &I, CallSite &CS, ir::WorkGroupOps opcode);
|
|
+ void emitWorkGroupInst(CallInst &I, ir::WorkGroupOps opcode);
|
|
// Emit subgroup instructions
|
|
- void emitSubGroupInst(CallInst &I, CallSite &CS, ir::WorkGroupOps opcode);
|
|
+ void emitSubGroupInst(CallInst &I, ir::WorkGroupOps opcode);
|
|
// Emit subgroup instructions
|
|
- void emitBlockReadWriteMemInst(CallInst &I, CallSite &CS, bool isWrite, uint8_t vec_size, ir::Type = ir::TYPE_U32);
|
|
- void emitBlockReadWriteImageInst(CallInst &I, CallSite &CS, bool isWrite, uint8_t vec_size, ir::Type = ir::TYPE_U32);
|
|
+ void emitBlockReadWriteMemInst(CallInst &I, bool isWrite, uint8_t vec_size, ir::Type = ir::TYPE_U32);
|
|
+ void emitBlockReadWriteImageInst(CallInst &I, bool isWrite, uint8_t vec_size, ir::Type = ir::TYPE_U32);
|
|
|
|
- uint8_t appendSampler(CallSite::arg_iterator AI);
|
|
+ uint8_t appendSampler(User::op_iterator AI);
|
|
uint8_t getImageID(CallInst &I);
|
|
|
|
// These instructions are not supported at all
|
|
@@ -1320,13 +1320,13 @@
|
|
#endif
|
|
|
|
if(typeNameNode) {
|
|
- llvmInfo.typeName= (cast<MDString>(typeNameNode->getOperand(opID)))->getString();
|
|
+ llvmInfo.typeName= (cast<MDString>(typeNameNode->getOperand(opID)))->getString().str();
|
|
}
|
|
if(typeBaseNameNode) {
|
|
- llvmInfo.typeBaseName= (cast<MDString>(typeBaseNameNode->getOperand(opID)))->getString();
|
|
+ llvmInfo.typeBaseName= (cast<MDString>(typeBaseNameNode->getOperand(opID)))->getString().str();
|
|
}
|
|
- llvmInfo.typeName= (cast<MDString>(typeNameNode->getOperand(opID)))->getString();
|
|
- llvmInfo.typeQual = (cast<MDString>(typeQualNode->getOperand(opID)))->getString();
|
|
+ llvmInfo.typeName= (cast<MDString>(typeNameNode->getOperand(opID)))->getString().str();
|
|
+ llvmInfo.typeQual = (cast<MDString>(typeQualNode->getOperand(opID)))->getString().str();
|
|
bool isImage = llvmInfo.isImageType();
|
|
bool isPipe = llvmInfo.isPipeType();
|
|
if (I->getType()->isPointerTy() || isImage || isPipe) {
|
|
@@ -1531,7 +1531,7 @@
|
|
EltTy = getEltType(EltTy, TypeIndex);
|
|
}
|
|
|
|
- ir::Constant cc = unit.getConstantSet().getConstant(pointer->getName());
|
|
+ ir::Constant cc = unit.getConstantSet().getConstant(pointer->getName().str());
|
|
unsigned int defOffset = cc.getOffset();
|
|
relocs.push_back(ir::RelocEntry(offset, defOffset + constantOffset));
|
|
|
|
@@ -1546,7 +1546,7 @@
|
|
return;
|
|
}
|
|
if (isa<GlobalVariable>(c)) {
|
|
- ir::Constant cc = unit.getConstantSet().getConstant(c->getName());
|
|
+ ir::Constant cc = unit.getConstantSet().getConstant(c->getName().str());
|
|
unsigned int defOffset = cc.getOffset();
|
|
|
|
relocs.push_back(ir::RelocEntry(offset, defOffset));
|
|
@@ -1609,7 +1609,7 @@
|
|
}
|
|
break;
|
|
}
|
|
- case Type::TypeID::VectorTyID:
|
|
+ case Type::TypeID::FixedVectorTyID:
|
|
{
|
|
const ConstantDataSequential *cds = dyn_cast<ConstantDataSequential>(c);
|
|
const VectorType *vecTy = cast<VectorType>(type);
|
|
@@ -1936,7 +1936,7 @@
|
|
case Type::PointerTyID:
|
|
regTranslator.newScalar(value, key, 0, uniform);
|
|
break;
|
|
- case Type::VectorTyID:
|
|
+ case Type::FixedVectorTyID:
|
|
{
|
|
auto vectorType = cast<VectorType>(type);
|
|
const uint32_t elemNum = vectorType->getNumElements();
|
|
@@ -2395,7 +2395,7 @@
|
|
#endif
|
|
}
|
|
if(typeNameNode) {
|
|
- llvmInfo.typeName = (cast<MDString>(typeNameNode->getOperand(opID)))->getString();
|
|
+ llvmInfo.typeName = (cast<MDString>(typeNameNode->getOperand(opID)))->getString().str();
|
|
//LLVM 3.9 image's type name include access qual, don't match OpenCL spec, erase them.
|
|
std::vector<std::string> filters = {"__read_only ", "__write_only "};
|
|
for (uint32_t i = 0; i < filters.size(); i++) {
|
|
@@ -2406,16 +2406,16 @@
|
|
}
|
|
}
|
|
if(typeBaseNameNode){
|
|
- llvmInfo.typeBaseName = (cast<MDString>(typeBaseNameNode->getOperand(opID)))->getString();
|
|
+ llvmInfo.typeBaseName = (cast<MDString>(typeBaseNameNode->getOperand(opID)))->getString().str();
|
|
}
|
|
if(accessQualNode) {
|
|
- llvmInfo.accessQual = (cast<MDString>(accessQualNode->getOperand(opID)))->getString();
|
|
+ llvmInfo.accessQual = (cast<MDString>(accessQualNode->getOperand(opID)))->getString().str();
|
|
}
|
|
if(typeQualNode) {
|
|
- llvmInfo.typeQual = (cast<MDString>(typeQualNode->getOperand(opID)))->getString();
|
|
+ llvmInfo.typeQual = (cast<MDString>(typeQualNode->getOperand(opID)))->getString().str();
|
|
}
|
|
if(argNameNode){
|
|
- llvmInfo.argName = (cast<MDString>(argNameNode->getOperand(opID)))->getString();
|
|
+ llvmInfo.argName = (cast<MDString>(argNameNode->getOperand(opID)))->getString().str();
|
|
}
|
|
|
|
// function arguments are uniform values.
|
|
@@ -3010,7 +3010,7 @@
|
|
} else {
|
|
this->newRegister(const_cast<GlobalVariable*>(&v));
|
|
ir::Register reg = regTranslator.getScalar(const_cast<GlobalVariable*>(&v), 0);
|
|
- ir::Constant &con = unit.getConstantSet().getConstant(v.getName());
|
|
+ ir::Constant &con = unit.getConstantSet().getConstant(v.getName().str());
|
|
if (!legacyMode) {
|
|
ir::Register regload = ctx.reg(getFamily(getType(ctx, v.getType())));
|
|
ctx.LOADI(getType(ctx, v.getType()), regload, ctx.newIntegerImmediate(con.getOffset(), getType(ctx, v.getType())));
|
|
@@ -3212,7 +3212,7 @@
|
|
GBE_ASSERTM(false, "Unsupported calling convention");
|
|
}
|
|
|
|
- ctx.startFunction(F.getName());
|
|
+ ctx.startFunction(F.getName().str());
|
|
|
|
ir::Function &fn = ctx.getFunction();
|
|
this->regTranslator.clear();
|
|
@@ -3810,9 +3810,9 @@
|
|
|
|
void GenWriter::regAllocateCallInst(CallInst &I) {
|
|
Value *dst = &I;
|
|
- Value *Callee = I.getCalledValue();
|
|
+ Value *Callee = I.getCalledOperand();
|
|
GBE_ASSERT(ctx.getFunction().getProfile() == ir::PROFILE_OCL);
|
|
- GBE_ASSERT(isa<InlineAsm>(I.getCalledValue()) == false);
|
|
+ GBE_ASSERT(isa<InlineAsm>(I.getCalledOperand()) == false);
|
|
if(I.getNumArgOperands()) GBE_ASSERT(I.hasStructRetAttr() == false);
|
|
|
|
// We only support a small number of intrinsics right now
|
|
@@ -3870,7 +3870,7 @@
|
|
}
|
|
}
|
|
// Get the name of the called function and handle it
|
|
- const std::string fnName = Callee->stripPointerCasts()->getName();
|
|
+ const std::string fnName = Callee->stripPointerCasts()->getName().str();
|
|
auto genIntrinsicID = intrinsicMap.find(fnName);
|
|
switch (genIntrinsicID) {
|
|
case GEN_OCL_GET_GROUP_ID0:
|
|
@@ -4166,7 +4166,7 @@
|
|
};
|
|
}
|
|
|
|
- void GenWriter::emitRoundingCallInst(CallInst &I, CallSite &CS, ir::Opcode opcode) {
|
|
+ void GenWriter::emitRoundingCallInst(CallInst &I, ir::Opcode opcode) {
|
|
if (I.getType()->isHalfTy()) {
|
|
const ir::Register src = this->getRegister(I.getOperand(0));
|
|
const ir::Register srcFloat = ctx.reg(ir::FAMILY_DWORD);
|
|
@@ -4177,14 +4177,14 @@
|
|
ctx.F32TO16(ir::TYPE_U16, ir::TYPE_FLOAT, dst, dstFloat);
|
|
} else {
|
|
GBE_ASSERT(I.getType()->isFloatTy());
|
|
- this->emitUnaryCallInst(I,CS,opcode);
|
|
+ this->emitUnaryCallInst(I,opcode);
|
|
}
|
|
}
|
|
|
|
- void GenWriter::emitUnaryCallInst(CallInst &I, CallSite &CS, ir::Opcode opcode, ir::Type type) {
|
|
- CallSite::arg_iterator AI = CS.arg_begin();
|
|
+ void GenWriter::emitUnaryCallInst(CallInst &I, ir::Opcode opcode, ir::Type type) {
|
|
+ User::op_iterator AI = I.arg_begin();
|
|
#if GBE_DEBUG
|
|
- CallSite::arg_iterator AE = CS.arg_end();
|
|
+ User::op_iterator AE = I.arg_end();
|
|
#endif /* GBE_DEBUG */
|
|
GBE_ASSERT(AI != AE);
|
|
const ir::Register src = this->getRegister(*AI);
|
|
@@ -4293,9 +4293,9 @@
|
|
this->emitAtomicInstHelper(opcode, type, dst, llvmPtr, payloadTuple);
|
|
}
|
|
|
|
- void GenWriter::emitAtomicInst(CallInst &I, CallSite &CS, ir::AtomicOps opcode) {
|
|
- CallSite::arg_iterator AI = CS.arg_begin();
|
|
- CallSite::arg_iterator AE = CS.arg_end();
|
|
+ void GenWriter::emitAtomicInst(CallInst &I, ir::AtomicOps opcode) {
|
|
+ User::op_iterator AI = I.arg_begin();
|
|
+ User::op_iterator AE = I.arg_end();
|
|
GBE_ASSERT(AI != AE);
|
|
Value *llvmPtr = *AI;
|
|
ir::AddressSpace addrSpace = addressSpaceLLVMToGen(llvmPtr->getType()->getPointerAddressSpace());
|
|
@@ -4348,7 +4348,7 @@
|
|
}
|
|
}
|
|
|
|
- void GenWriter::emitWorkGroupInst(CallInst &I, CallSite &CS, ir::WorkGroupOps opcode) {
|
|
+ void GenWriter::emitWorkGroupInst(CallInst &I, ir::WorkGroupOps opcode) {
|
|
ir::Function &f = ctx.getFunction();
|
|
|
|
if (f.getwgBroadcastSLM() < 0 && opcode == ir::WORKGROUP_OP_BROADCAST) {
|
|
@@ -4378,8 +4378,8 @@
|
|
GBE_ASSERT(f.gettidMapSLM() >= 0);
|
|
}
|
|
|
|
- CallSite::arg_iterator AI = CS.arg_begin();
|
|
- CallSite::arg_iterator AE = CS.arg_end();
|
|
+ User::op_iterator AI = I.arg_begin();
|
|
+ User::op_iterator AE = I.arg_end();
|
|
GBE_ASSERT(AI != AE);
|
|
|
|
if (opcode == ir::WORKGROUP_OP_ALL || opcode == ir::WORKGROUP_OP_ANY) {
|
|
@@ -4391,14 +4391,14 @@
|
|
const ir::Tuple srcTuple = ctx.arrayTuple(&src[0], 3);
|
|
ctx.WORKGROUP(opcode, (uint32_t)f.gettidMapSLM(), getRegister(&I), srcTuple, 3, ir::TYPE_S32);
|
|
} else if (opcode == ir::WORKGROUP_OP_BROADCAST) {
|
|
- int argNum = CS.arg_size();
|
|
+ int argNum = I.arg_size();
|
|
std::vector<ir::Register> src(argNum);
|
|
for (int i = 0; i < argNum; i++) {
|
|
src[i] = this->getRegister(*(AI++));
|
|
}
|
|
const ir::Tuple srcTuple = ctx.arrayTuple(&src[0], argNum);
|
|
ctx.WORKGROUP(ir::WORKGROUP_OP_BROADCAST, (uint32_t)f.getwgBroadcastSLM(), getRegister(&I), srcTuple, argNum,
|
|
- getType(ctx, (*CS.arg_begin())->getType()));
|
|
+ getType(ctx, (*I.arg_begin())->getType()));
|
|
} else {
|
|
ConstantInt *sign = dyn_cast<ConstantInt>(AI);
|
|
GBE_ASSERT(sign);
|
|
@@ -4423,9 +4423,9 @@
|
|
GBE_ASSERT(AI == AE);
|
|
}
|
|
|
|
- void GenWriter::emitSubGroupInst(CallInst &I, CallSite &CS, ir::WorkGroupOps opcode) {
|
|
- CallSite::arg_iterator AI = CS.arg_begin();
|
|
- CallSite::arg_iterator AE = CS.arg_end();
|
|
+ void GenWriter::emitSubGroupInst(CallInst &I, ir::WorkGroupOps opcode) {
|
|
+ User::op_iterator AI = I.arg_begin();
|
|
+ User::op_iterator AE = I.arg_end();
|
|
GBE_ASSERT(AI != AE);
|
|
|
|
if (opcode == ir::WORKGROUP_OP_ALL || opcode == ir::WORKGROUP_OP_ANY) {
|
|
@@ -4435,7 +4435,7 @@
|
|
const ir::Tuple srcTuple = ctx.arrayTuple(&src[0], 1);
|
|
ctx.SUBGROUP(opcode, getRegister(&I), srcTuple, 1, ir::TYPE_S32);
|
|
} else if (opcode == ir::WORKGROUP_OP_BROADCAST) {
|
|
- int argNum = CS.arg_size();
|
|
+ int argNum = I.arg_size();
|
|
GBE_ASSERT(argNum == 2);
|
|
std::vector<ir::Register> src(argNum);
|
|
for (int i = 0; i < argNum; i++) {
|
|
@@ -4443,7 +4443,7 @@
|
|
}
|
|
const ir::Tuple srcTuple = ctx.arrayTuple(&src[0], argNum);
|
|
ctx.SUBGROUP(ir::WORKGROUP_OP_BROADCAST, getRegister(&I), srcTuple, argNum,
|
|
- getType(ctx, (*CS.arg_begin())->getType()));
|
|
+ getType(ctx, (*I.arg_begin())->getType()));
|
|
} else {
|
|
ConstantInt *sign = dyn_cast<ConstantInt>(AI);
|
|
GBE_ASSERT(sign);
|
|
@@ -4466,9 +4466,9 @@
|
|
GBE_ASSERT(AI == AE);
|
|
}
|
|
|
|
- void GenWriter::emitBlockReadWriteMemInst(CallInst &I, CallSite &CS, bool isWrite, uint8_t vec_size, ir::Type type) {
|
|
- CallSite::arg_iterator AI = CS.arg_begin();
|
|
- CallSite::arg_iterator AE = CS.arg_end();
|
|
+ void GenWriter::emitBlockReadWriteMemInst(CallInst &I, bool isWrite, uint8_t vec_size, ir::Type type) {
|
|
+ User::op_iterator AI = I.arg_begin();
|
|
+ User::op_iterator AE = I.arg_end();
|
|
GBE_ASSERT(AI != AE);
|
|
|
|
Value *llvmPtr = *(AI++);
|
|
@@ -4522,9 +4522,9 @@
|
|
GBE_ASSERT(AI == AE);
|
|
}
|
|
|
|
- void GenWriter::emitBlockReadWriteImageInst(CallInst &I, CallSite &CS, bool isWrite, uint8_t vec_size, ir::Type type) {
|
|
- CallSite::arg_iterator AI = CS.arg_begin();
|
|
- CallSite::arg_iterator AE = CS.arg_end();
|
|
+ void GenWriter::emitBlockReadWriteImageInst(CallInst &I, bool isWrite, uint8_t vec_size, ir::Type type) {
|
|
+ User::op_iterator AI = I.arg_begin();
|
|
+ User::op_iterator AE = I.arg_end();
|
|
GBE_ASSERT(AI != AE);
|
|
|
|
const uint8_t imageID = getImageID(I);
|
|
@@ -4557,7 +4557,7 @@
|
|
|
|
/* append a new sampler. should be called before any reference to
|
|
* a sampler_t value. */
|
|
- uint8_t GenWriter::appendSampler(CallSite::arg_iterator AI) {
|
|
+ uint8_t GenWriter::appendSampler(User::op_iterator AI) {
|
|
#if LLVM_VERSION_MAJOR * 10 + LLVM_VERSION_MINOR >= 40
|
|
CallInst *TC = dyn_cast<CallInst>(*AI);
|
|
Constant *CPV = TC ? dyn_cast<Constant>(TC->getOperand(0)) : NULL;
|
|
@@ -4595,10 +4595,9 @@
|
|
const ir::Function &fn = ctx.getFunction();
|
|
|
|
// Get the function arguments
|
|
- CallSite CS(&I);
|
|
- CallSite::arg_iterator AI = CS.arg_begin();
|
|
+ User::op_iterator AI = I.arg_begin();
|
|
#if GBE_DEBUG
|
|
- CallSite::arg_iterator AE = CS.arg_end();
|
|
+ User::op_iterator AE = I.arg_end();
|
|
#endif /* GBE_DEBUG */
|
|
switch (F->getIntrinsicID()) {
|
|
case Intrinsic::stacksave:
|
|
@@ -4764,17 +4763,17 @@
|
|
ctx.MAD(srcType, dst, src0, src1, src2);
|
|
}
|
|
break;
|
|
- case Intrinsic::sqrt: this->emitUnaryCallInst(I,CS,ir::OP_SQR); break;
|
|
- case Intrinsic::ceil: this->emitRoundingCallInst(I,CS,ir::OP_RNDU); break;
|
|
- case Intrinsic::trunc: this->emitRoundingCallInst(I,CS,ir::OP_RNDZ); break;
|
|
- case Intrinsic::rint: this->emitRoundingCallInst(I,CS,ir::OP_RNDE); break;
|
|
- case Intrinsic::floor: this->emitRoundingCallInst(I,CS,ir::OP_RNDD); break;
|
|
- case Intrinsic::sin: this->emitUnaryCallInst(I,CS,ir::OP_SIN); break;
|
|
- case Intrinsic::cos: this->emitUnaryCallInst(I,CS,ir::OP_COS); break;
|
|
- case Intrinsic::log2: this->emitUnaryCallInst(I,CS,ir::OP_LOG); break;
|
|
- case Intrinsic::exp2: this->emitUnaryCallInst(I,CS,ir::OP_EXP); break;
|
|
+ case Intrinsic::sqrt: this->emitUnaryCallInst(I,ir::OP_SQR); break;
|
|
+ case Intrinsic::ceil: this->emitRoundingCallInst(I,ir::OP_RNDU); break;
|
|
+ case Intrinsic::trunc: this->emitRoundingCallInst(I,ir::OP_RNDZ); break;
|
|
+ case Intrinsic::rint: this->emitRoundingCallInst(I,ir::OP_RNDE); break;
|
|
+ case Intrinsic::floor: this->emitRoundingCallInst(I,ir::OP_RNDD); break;
|
|
+ case Intrinsic::sin: this->emitUnaryCallInst(I,ir::OP_SIN); break;
|
|
+ case Intrinsic::cos: this->emitUnaryCallInst(I,ir::OP_COS); break;
|
|
+ case Intrinsic::log2: this->emitUnaryCallInst(I,ir::OP_LOG); break;
|
|
+ case Intrinsic::exp2: this->emitUnaryCallInst(I,ir::OP_EXP); break;
|
|
case Intrinsic::bswap:
|
|
- this->emitUnaryCallInst(I,CS,ir::OP_BSWAP, getUnsignedType(ctx, I.getType())); break;
|
|
+ this->emitUnaryCallInst(I,ir::OP_BSWAP, getUnsignedType(ctx, I.getType())); break;
|
|
case Intrinsic::pow:
|
|
{
|
|
const ir::Register src0 = this->getRegister(*AI); ++AI;
|
|
@@ -4794,21 +4793,20 @@
|
|
}
|
|
} else {
|
|
// Get the name of the called function and handle it
|
|
- Value *Callee = I.getCalledValue();
|
|
- const std::string fnName = Callee->stripPointerCasts()->getName();
|
|
+ Value *Callee = I.getCalledOperand();
|
|
+ const std::string fnName = Callee->stripPointerCasts()->getName().str();
|
|
auto genIntrinsicID = intrinsicMap.find(fnName);
|
|
|
|
// Get the function arguments
|
|
- CallSite CS(&I);
|
|
- CallSite::arg_iterator AI = CS.arg_begin();
|
|
+ User::op_iterator AI = I.arg_begin();
|
|
#if GBE_DEBUG
|
|
- CallSite::arg_iterator AE = CS.arg_end();
|
|
+ User::op_iterator AE = I.arg_end();
|
|
#endif /* GBE_DEBUG */
|
|
|
|
switch (genIntrinsicID) {
|
|
- case GEN_OCL_FBH: this->emitUnaryCallInst(I,CS,ir::OP_FBH, ir::TYPE_U32); break;
|
|
- case GEN_OCL_FBL: this->emitUnaryCallInst(I,CS,ir::OP_FBL, ir::TYPE_U32); break;
|
|
- case GEN_OCL_CBIT: this->emitUnaryCallInst(I,CS,ir::OP_CBIT, getUnsignedType(ctx, (*AI)->getType())); break;
|
|
+ case GEN_OCL_FBH: this->emitUnaryCallInst(I,ir::OP_FBH, ir::TYPE_U32); break;
|
|
+ case GEN_OCL_FBL: this->emitUnaryCallInst(I,ir::OP_FBL, ir::TYPE_U32); break;
|
|
+ case GEN_OCL_CBIT: this->emitUnaryCallInst(I,ir::OP_CBIT, getUnsignedType(ctx, (*AI)->getType())); break;
|
|
case GEN_OCL_ABS:
|
|
{
|
|
const ir::Register src = this->getRegister(*AI);
|
|
@@ -4915,8 +4913,8 @@
|
|
ctx.REGION(dst, src, x.getIntegerValue());
|
|
break;
|
|
}
|
|
- case GEN_OCL_RSQ: this->emitUnaryCallInst(I,CS,ir::OP_RSQ); break;
|
|
- case GEN_OCL_RCP: this->emitUnaryCallInst(I,CS,ir::OP_RCP); break;
|
|
+ case GEN_OCL_RSQ: this->emitUnaryCallInst(I,ir::OP_RSQ); break;
|
|
+ case GEN_OCL_RCP: this->emitUnaryCallInst(I,ir::OP_RCP); break;
|
|
case GEN_OCL_FORCE_SIMD8: ctx.setSimdWidth(8); break;
|
|
case GEN_OCL_FORCE_SIMD16: ctx.setSimdWidth(16); break;
|
|
case GEN_OCL_LBARRIER: ctx.SYNC(ir::syncLocalBarrier); break;
|
|
@@ -4947,31 +4945,31 @@
|
|
break;
|
|
}
|
|
case GEN_OCL_ATOMIC_ADD0:
|
|
- case GEN_OCL_ATOMIC_ADD1: this->emitAtomicInst(I,CS,ir::ATOMIC_OP_ADD); break;
|
|
+ case GEN_OCL_ATOMIC_ADD1: this->emitAtomicInst(I,ir::ATOMIC_OP_ADD); break;
|
|
case GEN_OCL_ATOMIC_SUB0:
|
|
- case GEN_OCL_ATOMIC_SUB1: this->emitAtomicInst(I,CS,ir::ATOMIC_OP_SUB); break;
|
|
+ case GEN_OCL_ATOMIC_SUB1: this->emitAtomicInst(I,ir::ATOMIC_OP_SUB); break;
|
|
case GEN_OCL_ATOMIC_AND0:
|
|
- case GEN_OCL_ATOMIC_AND1: this->emitAtomicInst(I,CS,ir::ATOMIC_OP_AND); break;
|
|
+ case GEN_OCL_ATOMIC_AND1: this->emitAtomicInst(I,ir::ATOMIC_OP_AND); break;
|
|
case GEN_OCL_ATOMIC_OR0:
|
|
- case GEN_OCL_ATOMIC_OR1: this->emitAtomicInst(I,CS,ir::ATOMIC_OP_OR); break;
|
|
+ case GEN_OCL_ATOMIC_OR1: this->emitAtomicInst(I,ir::ATOMIC_OP_OR); break;
|
|
case GEN_OCL_ATOMIC_XOR0:
|
|
- case GEN_OCL_ATOMIC_XOR1: this->emitAtomicInst(I,CS,ir::ATOMIC_OP_XOR); break;
|
|
+ case GEN_OCL_ATOMIC_XOR1: this->emitAtomicInst(I,ir::ATOMIC_OP_XOR); break;
|
|
case GEN_OCL_ATOMIC_XCHG0:
|
|
- case GEN_OCL_ATOMIC_XCHG1: this->emitAtomicInst(I,CS,ir::ATOMIC_OP_XCHG); break;
|
|
+ case GEN_OCL_ATOMIC_XCHG1: this->emitAtomicInst(I,ir::ATOMIC_OP_XCHG); break;
|
|
case GEN_OCL_ATOMIC_INC0:
|
|
- case GEN_OCL_ATOMIC_INC1: this->emitAtomicInst(I,CS,ir::ATOMIC_OP_INC); break;
|
|
+ case GEN_OCL_ATOMIC_INC1: this->emitAtomicInst(I,ir::ATOMIC_OP_INC); break;
|
|
case GEN_OCL_ATOMIC_DEC0:
|
|
- case GEN_OCL_ATOMIC_DEC1: this->emitAtomicInst(I,CS,ir::ATOMIC_OP_DEC); break;
|
|
+ case GEN_OCL_ATOMIC_DEC1: this->emitAtomicInst(I,ir::ATOMIC_OP_DEC); break;
|
|
case GEN_OCL_ATOMIC_UMIN0:
|
|
- case GEN_OCL_ATOMIC_UMIN1: this->emitAtomicInst(I,CS,ir::ATOMIC_OP_UMIN); break;
|
|
+ case GEN_OCL_ATOMIC_UMIN1: this->emitAtomicInst(I,ir::ATOMIC_OP_UMIN); break;
|
|
case GEN_OCL_ATOMIC_UMAX0:
|
|
- case GEN_OCL_ATOMIC_UMAX1: this->emitAtomicInst(I,CS,ir::ATOMIC_OP_UMAX); break;
|
|
+ case GEN_OCL_ATOMIC_UMAX1: this->emitAtomicInst(I,ir::ATOMIC_OP_UMAX); break;
|
|
case GEN_OCL_ATOMIC_IMIN0:
|
|
- case GEN_OCL_ATOMIC_IMIN1: this->emitAtomicInst(I,CS,ir::ATOMIC_OP_IMIN); break;
|
|
+ case GEN_OCL_ATOMIC_IMIN1: this->emitAtomicInst(I,ir::ATOMIC_OP_IMIN); break;
|
|
case GEN_OCL_ATOMIC_IMAX0:
|
|
- case GEN_OCL_ATOMIC_IMAX1: this->emitAtomicInst(I,CS,ir::ATOMIC_OP_IMAX); break;
|
|
+ case GEN_OCL_ATOMIC_IMAX1: this->emitAtomicInst(I,ir::ATOMIC_OP_IMAX); break;
|
|
case GEN_OCL_ATOMIC_CMPXCHG0:
|
|
- case GEN_OCL_ATOMIC_CMPXCHG1: this->emitAtomicInst(I,CS,ir::ATOMIC_OP_CMPXCHG); break;
|
|
+ case GEN_OCL_ATOMIC_CMPXCHG1: this->emitAtomicInst(I,ir::ATOMIC_OP_CMPXCHG); break;
|
|
case GEN_OCL_GET_IMAGE_WIDTH:
|
|
case GEN_OCL_GET_IMAGE_HEIGHT:
|
|
case GEN_OCL_GET_IMAGE_DEPTH:
|
|
@@ -5429,48 +5427,48 @@
|
|
ctx.WAIT();
|
|
break;
|
|
}
|
|
- case GEN_OCL_WORK_GROUP_ALL: this->emitWorkGroupInst(I, CS, ir::WORKGROUP_OP_ALL); break;
|
|
- case GEN_OCL_WORK_GROUP_ANY: this->emitWorkGroupInst(I, CS, ir::WORKGROUP_OP_ANY); break;
|
|
+ case GEN_OCL_WORK_GROUP_ALL: this->emitWorkGroupInst(I, ir::WORKGROUP_OP_ALL); break;
|
|
+ case GEN_OCL_WORK_GROUP_ANY: this->emitWorkGroupInst(I, ir::WORKGROUP_OP_ANY); break;
|
|
case GEN_OCL_WORK_GROUP_BROADCAST:
|
|
- this->emitWorkGroupInst(I, CS, ir::WORKGROUP_OP_BROADCAST); break;
|
|
+ this->emitWorkGroupInst(I, ir::WORKGROUP_OP_BROADCAST); break;
|
|
case GEN_OCL_WORK_GROUP_REDUCE_ADD:
|
|
- this->emitWorkGroupInst(I, CS, ir::WORKGROUP_OP_REDUCE_ADD); break;
|
|
+ this->emitWorkGroupInst(I, ir::WORKGROUP_OP_REDUCE_ADD); break;
|
|
case GEN_OCL_WORK_GROUP_REDUCE_MAX:
|
|
- this->emitWorkGroupInst(I, CS, ir::WORKGROUP_OP_REDUCE_MAX); break;
|
|
+ this->emitWorkGroupInst(I, ir::WORKGROUP_OP_REDUCE_MAX); break;
|
|
case GEN_OCL_WORK_GROUP_REDUCE_MIN:
|
|
- this->emitWorkGroupInst(I, CS, ir::WORKGROUP_OP_REDUCE_MIN); break;
|
|
+ this->emitWorkGroupInst(I, ir::WORKGROUP_OP_REDUCE_MIN); break;
|
|
case GEN_OCL_WORK_GROUP_SCAN_EXCLUSIVE_ADD:
|
|
- this->emitWorkGroupInst(I, CS, ir::WORKGROUP_OP_EXCLUSIVE_ADD); break;
|
|
+ this->emitWorkGroupInst(I, ir::WORKGROUP_OP_EXCLUSIVE_ADD); break;
|
|
case GEN_OCL_WORK_GROUP_SCAN_EXCLUSIVE_MAX:
|
|
- this->emitWorkGroupInst(I, CS, ir::WORKGROUP_OP_EXCLUSIVE_MAX); break;
|
|
+ this->emitWorkGroupInst(I, ir::WORKGROUP_OP_EXCLUSIVE_MAX); break;
|
|
case GEN_OCL_WORK_GROUP_SCAN_EXCLUSIVE_MIN:
|
|
- this->emitWorkGroupInst(I, CS, ir::WORKGROUP_OP_EXCLUSIVE_MIN); break;
|
|
+ this->emitWorkGroupInst(I, ir::WORKGROUP_OP_EXCLUSIVE_MIN); break;
|
|
case GEN_OCL_WORK_GROUP_SCAN_INCLUSIVE_ADD:
|
|
- this->emitWorkGroupInst(I, CS, ir::WORKGROUP_OP_INCLUSIVE_ADD); break;
|
|
+ this->emitWorkGroupInst(I, ir::WORKGROUP_OP_INCLUSIVE_ADD); break;
|
|
case GEN_OCL_WORK_GROUP_SCAN_INCLUSIVE_MAX:
|
|
- this->emitWorkGroupInst(I, CS, ir::WORKGROUP_OP_INCLUSIVE_MAX); break;
|
|
+ this->emitWorkGroupInst(I, ir::WORKGROUP_OP_INCLUSIVE_MAX); break;
|
|
case GEN_OCL_WORK_GROUP_SCAN_INCLUSIVE_MIN:
|
|
- this->emitWorkGroupInst(I, CS, ir::WORKGROUP_OP_INCLUSIVE_MIN); break;
|
|
+ this->emitWorkGroupInst(I, ir::WORKGROUP_OP_INCLUSIVE_MIN); break;
|
|
case GEN_OCL_SUB_GROUP_BROADCAST:
|
|
- this->emitSubGroupInst(I, CS, ir::WORKGROUP_OP_BROADCAST); break;
|
|
+ this->emitSubGroupInst(I, ir::WORKGROUP_OP_BROADCAST); break;
|
|
case GEN_OCL_SUB_GROUP_REDUCE_ADD:
|
|
- this->emitSubGroupInst(I, CS, ir::WORKGROUP_OP_REDUCE_ADD); break;
|
|
+ this->emitSubGroupInst(I, ir::WORKGROUP_OP_REDUCE_ADD); break;
|
|
case GEN_OCL_SUB_GROUP_REDUCE_MAX:
|
|
- this->emitSubGroupInst(I, CS, ir::WORKGROUP_OP_REDUCE_MAX); break;
|
|
+ this->emitSubGroupInst(I, ir::WORKGROUP_OP_REDUCE_MAX); break;
|
|
case GEN_OCL_SUB_GROUP_REDUCE_MIN:
|
|
- this->emitSubGroupInst(I, CS, ir::WORKGROUP_OP_REDUCE_MIN); break;
|
|
+ this->emitSubGroupInst(I, ir::WORKGROUP_OP_REDUCE_MIN); break;
|
|
case GEN_OCL_SUB_GROUP_SCAN_EXCLUSIVE_ADD:
|
|
- this->emitSubGroupInst(I, CS, ir::WORKGROUP_OP_EXCLUSIVE_ADD); break;
|
|
+ this->emitSubGroupInst(I, ir::WORKGROUP_OP_EXCLUSIVE_ADD); break;
|
|
case GEN_OCL_SUB_GROUP_SCAN_EXCLUSIVE_MAX:
|
|
- this->emitSubGroupInst(I, CS, ir::WORKGROUP_OP_EXCLUSIVE_MAX); break;
|
|
+ this->emitSubGroupInst(I, ir::WORKGROUP_OP_EXCLUSIVE_MAX); break;
|
|
case GEN_OCL_SUB_GROUP_SCAN_EXCLUSIVE_MIN:
|
|
- this->emitSubGroupInst(I, CS, ir::WORKGROUP_OP_EXCLUSIVE_MIN); break;
|
|
+ this->emitSubGroupInst(I, ir::WORKGROUP_OP_EXCLUSIVE_MIN); break;
|
|
case GEN_OCL_SUB_GROUP_SCAN_INCLUSIVE_ADD:
|
|
- this->emitSubGroupInst(I, CS, ir::WORKGROUP_OP_INCLUSIVE_ADD); break;
|
|
+ this->emitSubGroupInst(I, ir::WORKGROUP_OP_INCLUSIVE_ADD); break;
|
|
case GEN_OCL_SUB_GROUP_SCAN_INCLUSIVE_MAX:
|
|
- this->emitSubGroupInst(I, CS, ir::WORKGROUP_OP_INCLUSIVE_MAX); break;
|
|
+ this->emitSubGroupInst(I, ir::WORKGROUP_OP_INCLUSIVE_MAX); break;
|
|
case GEN_OCL_SUB_GROUP_SCAN_INCLUSIVE_MIN:
|
|
- this->emitSubGroupInst(I, CS, ir::WORKGROUP_OP_INCLUSIVE_MIN); break;
|
|
+ this->emitSubGroupInst(I, ir::WORKGROUP_OP_INCLUSIVE_MIN); break;
|
|
case GEN_OCL_LRP:
|
|
{
|
|
const ir::Register dst = this->getRegister(&I);
|
|
@@ -5484,69 +5482,69 @@
|
|
break;
|
|
}
|
|
case GEN_OCL_SUB_GROUP_BLOCK_READ_UI_MEM:
|
|
- this->emitBlockReadWriteMemInst(I, CS, false, 1); break;
|
|
+ this->emitBlockReadWriteMemInst(I, false, 1); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_READ_UI_MEM2:
|
|
- this->emitBlockReadWriteMemInst(I, CS, false, 2); break;
|
|
+ this->emitBlockReadWriteMemInst(I, false, 2); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_READ_UI_MEM4:
|
|
- this->emitBlockReadWriteMemInst(I, CS, false, 4); break;
|
|
+ this->emitBlockReadWriteMemInst(I, false, 4); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_READ_UI_MEM8:
|
|
- this->emitBlockReadWriteMemInst(I, CS, false, 8); break;
|
|
+ this->emitBlockReadWriteMemInst(I, false, 8); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_WRITE_UI_MEM:
|
|
- this->emitBlockReadWriteMemInst(I, CS, true, 1); break;
|
|
+ this->emitBlockReadWriteMemInst(I, true, 1); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_WRITE_UI_MEM2:
|
|
- this->emitBlockReadWriteMemInst(I, CS, true, 2); break;
|
|
+ this->emitBlockReadWriteMemInst(I, true, 2); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_WRITE_UI_MEM4:
|
|
- this->emitBlockReadWriteMemInst(I, CS, true, 4); break;
|
|
+ this->emitBlockReadWriteMemInst(I, true, 4); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_WRITE_UI_MEM8:
|
|
- this->emitBlockReadWriteMemInst(I, CS, true, 8); break;
|
|
+ this->emitBlockReadWriteMemInst(I, true, 8); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_READ_UI_IMAGE:
|
|
- this->emitBlockReadWriteImageInst(I, CS, false, 1); break;
|
|
+ this->emitBlockReadWriteImageInst(I, false, 1); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_READ_UI_IMAGE2:
|
|
- this->emitBlockReadWriteImageInst(I, CS, false, 2); break;
|
|
+ this->emitBlockReadWriteImageInst(I, false, 2); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_READ_UI_IMAGE4:
|
|
- this->emitBlockReadWriteImageInst(I, CS, false, 4); break;
|
|
+ this->emitBlockReadWriteImageInst(I, false, 4); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_READ_UI_IMAGE8:
|
|
- this->emitBlockReadWriteImageInst(I, CS, false, 8); break;
|
|
+ this->emitBlockReadWriteImageInst(I, false, 8); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_WRITE_UI_IMAGE:
|
|
- this->emitBlockReadWriteImageInst(I, CS, true, 1); break;
|
|
+ this->emitBlockReadWriteImageInst(I, true, 1); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_WRITE_UI_IMAGE2:
|
|
- this->emitBlockReadWriteImageInst(I, CS, true, 2); break;
|
|
+ this->emitBlockReadWriteImageInst(I, true, 2); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_WRITE_UI_IMAGE4:
|
|
- this->emitBlockReadWriteImageInst(I, CS, true, 4); break;
|
|
+ this->emitBlockReadWriteImageInst(I, true, 4); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_WRITE_UI_IMAGE8:
|
|
- this->emitBlockReadWriteImageInst(I, CS, true, 8); break;
|
|
+ this->emitBlockReadWriteImageInst(I, true, 8); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_READ_US_MEM:
|
|
- this->emitBlockReadWriteMemInst(I, CS, false, 1, ir::TYPE_U16); break;
|
|
+ this->emitBlockReadWriteMemInst(I, false, 1, ir::TYPE_U16); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_READ_US_MEM2:
|
|
- this->emitBlockReadWriteMemInst(I, CS, false, 2, ir::TYPE_U16); break;
|
|
+ this->emitBlockReadWriteMemInst(I, false, 2, ir::TYPE_U16); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_READ_US_MEM4:
|
|
- this->emitBlockReadWriteMemInst(I, CS, false, 4, ir::TYPE_U16); break;
|
|
+ this->emitBlockReadWriteMemInst(I, false, 4, ir::TYPE_U16); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_READ_US_MEM8:
|
|
- this->emitBlockReadWriteMemInst(I, CS, false, 8, ir::TYPE_U16); break;
|
|
+ this->emitBlockReadWriteMemInst(I, false, 8, ir::TYPE_U16); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_WRITE_US_MEM:
|
|
- this->emitBlockReadWriteMemInst(I, CS, true, 1, ir::TYPE_U16); break;
|
|
+ this->emitBlockReadWriteMemInst(I, true, 1, ir::TYPE_U16); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_WRITE_US_MEM2:
|
|
- this->emitBlockReadWriteMemInst(I, CS, true, 2, ir::TYPE_U16); break;
|
|
+ this->emitBlockReadWriteMemInst(I, true, 2, ir::TYPE_U16); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_WRITE_US_MEM4:
|
|
- this->emitBlockReadWriteMemInst(I, CS, true, 4, ir::TYPE_U16); break;
|
|
+ this->emitBlockReadWriteMemInst(I, true, 4, ir::TYPE_U16); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_WRITE_US_MEM8:
|
|
- this->emitBlockReadWriteMemInst(I, CS, true, 8, ir::TYPE_U16); break;
|
|
+ this->emitBlockReadWriteMemInst(I, true, 8, ir::TYPE_U16); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_READ_US_IMAGE:
|
|
- this->emitBlockReadWriteImageInst(I, CS, false, 1, ir::TYPE_U16); break;
|
|
+ this->emitBlockReadWriteImageInst(I, false, 1, ir::TYPE_U16); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_READ_US_IMAGE2:
|
|
- this->emitBlockReadWriteImageInst(I, CS, false, 2, ir::TYPE_U16); break;
|
|
+ this->emitBlockReadWriteImageInst(I, false, 2, ir::TYPE_U16); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_READ_US_IMAGE4:
|
|
- this->emitBlockReadWriteImageInst(I, CS, false, 4, ir::TYPE_U16); break;
|
|
+ this->emitBlockReadWriteImageInst(I, false, 4, ir::TYPE_U16); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_READ_US_IMAGE8:
|
|
- this->emitBlockReadWriteImageInst(I, CS, false, 8, ir::TYPE_U16); break;
|
|
+ this->emitBlockReadWriteImageInst(I, false, 8, ir::TYPE_U16); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_WRITE_US_IMAGE:
|
|
- this->emitBlockReadWriteImageInst(I, CS, true, 1, ir::TYPE_U16); break;
|
|
+ this->emitBlockReadWriteImageInst(I, true, 1, ir::TYPE_U16); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_WRITE_US_IMAGE2:
|
|
- this->emitBlockReadWriteImageInst(I, CS, true, 2, ir::TYPE_U16); break;
|
|
+ this->emitBlockReadWriteImageInst(I, true, 2, ir::TYPE_U16); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_WRITE_US_IMAGE4:
|
|
- this->emitBlockReadWriteImageInst(I, CS, true, 4, ir::TYPE_U16); break;
|
|
+ this->emitBlockReadWriteImageInst(I, true, 4, ir::TYPE_U16); break;
|
|
case GEN_OCL_SUB_GROUP_BLOCK_WRITE_US_IMAGE8:
|
|
- this->emitBlockReadWriteImageInst(I, CS, true, 8, ir::TYPE_U16); break;
|
|
+ this->emitBlockReadWriteImageInst(I, true, 8, ir::TYPE_U16); break;
|
|
case GEN_OCL_GET_PIPE:
|
|
case GEN_OCL_MAKE_RID:
|
|
case GEN_OCL_GET_RID:
|
|
--- a/backend/src/llvm/llvm_includes.hpp 2020-11-09 16:48:20.823866112 +0100
|
|
+++ b/backend/src/llvm/llvm_includes.hpp 2020-11-09 19:00:09.955926795 +0100
|
|
@@ -103,7 +103,6 @@
|
|
|
|
#if LLVM_VERSION_MAJOR * 10 + LLVM_VERSION_MINOR >= 35
|
|
#include "llvm/IR/Mangler.h"
|
|
-#include "llvm/IR/CallSite.h"
|
|
#include "llvm/IR/CFG.h"
|
|
#include "llvm/IR/InstVisitor.h"
|
|
#include "llvm/IR/IRPrintingPasses.h"
|
|
--- a/backend/src/llvm/llvm_loadstore_optimization.cpp 2020-11-09 16:48:20.823866112 +0100
|
|
+++ b/backend/src/llvm/llvm_loadstore_optimization.cpp 2020-11-09 19:10:37.141852308 +0100
|
|
@@ -160,7 +160,7 @@
|
|
Value *vecPtr = Builder.CreateBitCast(ld->getPointerOperand(),
|
|
PointerType::get(vecTy, addrSpace));
|
|
LoadInst *vecValue = Builder.CreateLoad(vecPtr);
|
|
- vecValue->setAlignment(align);
|
|
+ vecValue->setAlignment(align.valueOrOne());
|
|
|
|
for (unsigned i = 0; i < size; ++i) {
|
|
Value *S = Builder.CreateExtractElement(vecValue, Builder.getInt32(i));
|
|
@@ -251,7 +251,7 @@
|
|
return;
|
|
Value *newPtr = Builder.CreateBitCast(stPointer, PointerType::get(vecTy, addrSpace));
|
|
StoreInst *newST = Builder.CreateStore(parent, newPtr);
|
|
- newST->setAlignment(align);
|
|
+ newST->setAlignment(align.valueOrOne());
|
|
}
|
|
|
|
// Find the safe iterator we can point to. If reorder happens, we need to
|
|
--- a/backend/src/llvm/llvm_passes.cpp 2020-11-09 16:48:20.823866112 +0100
|
|
+++ b/backend/src/llvm/llvm_passes.cpp 2020-11-09 18:48:55.997006837 +0100
|
|
@@ -95,7 +95,7 @@
|
|
{
|
|
switch (Ty->getTypeID()) {
|
|
case Type::VoidTyID: NOT_SUPPORTED;
|
|
- case Type::VectorTyID:
|
|
+ case Type::FixedVectorTyID:
|
|
{
|
|
const VectorType* VecTy = cast<VectorType>(Ty);
|
|
uint32_t elemNum = VecTy->getNumElements();
|
|
@@ -138,7 +138,7 @@
|
|
case Type::HalfTyID: return 16;
|
|
case Type::FloatTyID: return 32;
|
|
case Type::DoubleTyID: return 64;
|
|
- case Type::VectorTyID:
|
|
+ case Type::FixedVectorTyID:
|
|
{
|
|
const VectorType* VecTy = cast<VectorType>(Ty);
|
|
uint32_t numElem = VecTy->getNumElements();
|
|
@@ -184,10 +184,12 @@
|
|
Type *elementType = NULL;
|
|
if (PointerType* ptrType = dyn_cast<PointerType>(eltTy))
|
|
elementType = ptrType->getElementType();
|
|
- else if(SequentialType * seqType = dyn_cast<SequentialType>(eltTy))
|
|
- elementType = seqType->getElementType();
|
|
- else if(CompositeType * compTy= dyn_cast<CompositeType>(eltTy))
|
|
- elementType = compTy->getTypeAtIndex(index);
|
|
+ else if (ArrayType * arrType = dyn_cast<ArrayType>(eltTy))
|
|
+ elementType = arrType->getElementType();
|
|
+ else if(VectorType * vecType = dyn_cast<VectorType>(eltTy))
|
|
+ elementType = vecType->getElementType();
|
|
+ else if(StructType * structType = dyn_cast<StructType>(eltTy))
|
|
+ elementType = structType->getTypeAtIndex(index);
|
|
GBE_ASSERT(elementType);
|
|
return elementType;
|
|
}
|
|
--- a/backend/src/llvm/llvm_printf_parser.cpp 2020-11-09 16:48:20.823866112 +0100
|
|
+++ b/backend/src/llvm/llvm_printf_parser.cpp 2020-11-09 18:57:52.618943105 +0100
|
|
@@ -323,8 +323,7 @@
|
|
|
|
bool PrintfParser::parseOnePrintfInstruction(CallInst * call)
|
|
{
|
|
- CallSite CS(call);
|
|
- CallSite::arg_iterator CI_FMT = CS.arg_begin();
|
|
+ User::op_iterator CI_FMT = call->arg_begin();
|
|
int param_num = 0;
|
|
|
|
llvm::Constant* arg0 = dyn_cast<llvm::ConstantExpr>(*CI_FMT);
|
|
@@ -341,7 +340,7 @@
|
|
return false;
|
|
}
|
|
|
|
- std::string fmt = fmt_arg->getAsCString();
|
|
+ std::string fmt = fmt_arg->getAsCString().str();
|
|
if (fmt.size() == 0)
|
|
return false;
|
|
|
|
@@ -437,8 +436,8 @@
|
|
if ( callFunc->getIntrinsicID() != 0)
|
|
continue;
|
|
|
|
- Value *Callee = call->getCalledValue();
|
|
- const std::string fnName = Callee->getName();
|
|
+ Value *Callee = call->getCalledOperand();
|
|
+ const std::string fnName = Callee->getName().str();
|
|
|
|
if (fnName != "__gen_ocl_printf_stub" && fnName != "__gen_ocl_puts_stub")
|
|
continue;
|
|
@@ -582,7 +581,7 @@
|
|
if (!fmt_arg || !fmt_arg->isCString()) {
|
|
return false;
|
|
}
|
|
- slot.state.str = fmt_arg->getAsCString();
|
|
+ slot.state.str = fmt_arg->getAsCString().str();
|
|
return true;
|
|
}
|
|
case PRINTF_CONVERSION_P: {
|
|
@@ -595,10 +594,10 @@
|
|
|
|
break;
|
|
|
|
- case Type::VectorTyID: {
|
|
- Type* vect_type = arg->getType();
|
|
- Type* elt_type = vect_type->getVectorElementType();
|
|
- int vec_num = vect_type->getVectorNumElements();
|
|
+ case Type::FixedVectorTyID: {
|
|
+ VectorType* vect_type = dyn_cast<VectorType>(arg->getType());
|
|
+ Type* elt_type = vect_type->getElementType();
|
|
+ int vec_num = vect_type->getNumElements();
|
|
bool sign = false;
|
|
|
|
if (vec_num != slot.state.vector_n) {
|
|
--- a/backend/src/llvm/llvm_profiling.cpp 2020-11-09 16:48:20.823866112 +0100
|
|
+++ b/backend/src/llvm/llvm_profiling.cpp 2020-11-09 18:59:50.120929150 +0100
|
|
@@ -35,7 +35,6 @@
|
|
#include "llvm/IR/IRBuilder.h"
|
|
|
|
#if LLVM_VERSION_MAJOR * 10 + LLVM_VERSION_MINOR >= 35
|
|
-#include "llvm/IR/CallSite.h"
|
|
#include "llvm/IR/CFG.h"
|
|
#else
|
|
#include "llvm/Support/CallSite.h"
|
|
--- a/backend/src/llvm/llvm_sampler_fix.cpp 2020-11-09 16:48:20.823866112 +0100
|
|
+++ b/backend/src/llvm/llvm_sampler_fix.cpp 2020-11-09 17:01:09.407774832 +0100
|
|
@@ -45,8 +45,8 @@
|
|
bool visitCallInst(CallInst *I) {
|
|
if(!I)
|
|
return false;
|
|
- Value *Callee = I->getCalledValue();
|
|
- const std::string fnName = Callee->getName();
|
|
+ Value *Callee = I->getCalledOperand();
|
|
+ const std::string fnName = Callee->getName().str();
|
|
bool changed = false;
|
|
Type *boolTy = IntegerType::get(I->getContext(), 1);
|
|
Type *i32Ty = IntegerType::get(I->getContext(), 32);
|
|
@@ -59,7 +59,7 @@
|
|
Value *needFixVal;
|
|
#if LLVM_VERSION_MAJOR * 10 + LLVM_VERSION_MINOR >= 40
|
|
CallInst *init = dyn_cast<CallInst>(I->getOperand(0));
|
|
- if (init && init->getCalledValue()->getName().compare("__translate_sampler_initializer"))
|
|
+ if (init && init->getCalledOperand()->getName().compare("__translate_sampler_initializer"))
|
|
{
|
|
const ConstantInt *ci = dyn_cast<ConstantInt>(init->getOperand(0));
|
|
uint32_t samplerInt = ci->getZExtValue();
|
|
@@ -113,7 +113,7 @@
|
|
Value *needFixVal;
|
|
#if LLVM_VERSION_MAJOR * 10 + LLVM_VERSION_MINOR >= 40
|
|
CallInst *init = dyn_cast<CallInst>(I->getOperand(0));
|
|
- if (init && init->getCalledValue()->getName().compare("__translate_sampler_initializer"))
|
|
+ if (init && init->getCalledOperand()->getName().compare("__translate_sampler_initializer"))
|
|
{
|
|
const ConstantInt *ci = dyn_cast<ConstantInt>(init->getOperand(0));
|
|
uint32_t samplerInt = ci->getZExtValue();
|
|
--- a/backend/src/llvm/llvm_scalarize.cpp 2020-11-09 16:48:20.823866112 +0100
|
|
+++ b/backend/src/llvm/llvm_scalarize.cpp 2020-11-09 18:51:16.888990104 +0100
|
|
@@ -178,7 +178,7 @@
|
|
if(!type)
|
|
return type;
|
|
switch(type->getTypeID()) {
|
|
- case Type::VectorTyID:
|
|
+ case Type::FixedVectorTyID:
|
|
case Type::ArrayTyID:
|
|
return GetBasicType(type->getContainedType(0));
|
|
default:
|
|
@@ -188,7 +188,7 @@
|
|
}
|
|
|
|
int GetComponentCount(const Type* type) {
|
|
- if (type && type->getTypeID() == Type::VectorTyID)
|
|
+ if (type && type->getTypeID() == Type::FixedVectorTyID)
|
|
return llvm::dyn_cast<VectorType>(type)->getNumElements();
|
|
else
|
|
return 1;
|
|
@@ -652,13 +652,12 @@
|
|
break;
|
|
}
|
|
} else {
|
|
- Value *Callee = call->getCalledValue();
|
|
- const std::string fnName = Callee->getName();
|
|
+ Value *Callee = call->getCalledOperand();
|
|
+ const std::string fnName = Callee->getName().str();
|
|
auto genIntrinsicID = intrinsicMap.find(fnName);
|
|
|
|
// Get the function arguments
|
|
- CallSite CS(call);
|
|
- CallSite::arg_iterator CI = CS.arg_begin() + 1;
|
|
+ User::op_iterator CI = call->arg_begin() + 1;
|
|
|
|
switch (genIntrinsicID) {
|
|
case GEN_OCL_NOT_FOUND:
|
|
@@ -729,7 +728,7 @@
|
|
extractFromVector(call);
|
|
break;
|
|
case GEN_OCL_PRINTF:
|
|
- for (; CI != CS.arg_end(); ++CI)
|
|
+ for (; CI != call->arg_end(); ++CI)
|
|
if ((*CI)->getType()->isVectorTy())
|
|
*CI = InsertToVector(call, *CI);
|
|
break;
|