Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 14 additions & 18 deletions lib/SILOptimizer/Mandatory/TFDeabstraction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,19 @@ static void deleteInstAndAbandonedUses(SILInstruction *inst) {
inst->eraseFromParent();
}

/// Return true if this apply instruction is to a function that can be
/// conditionally hoisted into the graph, but don't check the operands to
/// see if they are actually constants we can handle.
static bool isDecodableApply(ApplyInst *apply) {
auto fn = apply->getCalleeFunction();
if (!fn) return false;

auto name = fn->getName();
return name == "__tf_tensor_from_scalars" ||
name == "__tf_tensor_from_scalars_1d";
}


namespace {
/// This class wraps the state and logic necessary to deabstract code into one
/// specific SIL function, which has been designated as a potential top-level
Expand Down Expand Up @@ -510,7 +523,6 @@ static bool explodeAggregateInst(SILInstruction *inst,
return true;
}


/// Identify all of the tensor operations in the current function, and scan them
/// to see if there are any indirect arguments, where the address of a stack
/// allocation is passed to the builtin. These occur when the tensor op was in
Expand Down Expand Up @@ -561,7 +573,7 @@ void TFDeabstraction::simplifyTensorOperands() {
// deabstract. This ensures that we deabstract its operands, which makes
// it possible to tell if it is getting a variable or constant value.
if (auto *apply = dyn_cast<ApplyInst>(inst)) {
if (SILTensorOpInfo::isDecodableApply(apply)) {
if (isDecodableApply(apply)) {
logIfFirstChange();
// Remember this for later passes.
tensorOps.push_back(apply);
Expand Down Expand Up @@ -1706,22 +1718,6 @@ void TFDeabstraction::checkAttributesAndFormGraphOps() {
}
}
}

if (!TFStrictDeabstraction) {
for (auto &BB : fn) {
for (auto I = BB.begin(), E = BB.end(); I != E; ) {
// Manually move iterator to avoid invalidation if we replace 'inst'.
auto *inst = &*I++;

// If this is a well known function that can be transformed into an op,
// do so first.
// FIXME: This should take into consideration the constants we just
// computed!
if (auto apply = dyn_cast<ApplyInst>(inst))
inst = SILTensorOpInfo::decodeApply(apply);
}
}
}
}

/// If the specified type is a Swift.Array or some element type, then return the
Expand Down
7 changes: 0 additions & 7 deletions lib/SILOptimizer/Mandatory/TFPartition.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1979,13 +1979,6 @@ bool TFFunctionPartition::markFunction(bool &hasTensorOps) {
continue;
}

// If this is a well known function that can be transformed into an op, do
// so first.
if (auto apply = dyn_cast<ApplyInst>(inst)) {
inst = SILTensorOpInfo::decodeApply(apply);
bbi = SILBasicBlock::iterator(inst);
}

auto opInfo = SILTensorOpInfo::decode(inst);
if (!opInfo)
continue;
Expand Down
188 changes: 0 additions & 188 deletions lib/SILOptimizer/Mandatory/TFUtilities.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -991,194 +991,6 @@ static bool expandArrayAttribute(SILValue arrayVal, StringRef attrName,
return true;
}

/// If all the operands to a call to __tf_tensor_from_scalars are constants, we
/// can promote this to a 'Const' node with an attached TF_Tensor attribute.
///
/// It takes a 1D array of scalars and a shape as a 1D array of integers.
///
SILInstruction *SILTensorOpInfo::decodeTensorFromScalars(ApplyInst *inst) {
assert(inst->getNumOperands() == 3 && isTensorHandle(inst->getType()) &&
"Unexpected type signature for __tf_tensor_from_scalars");

// If we can't analyze the operands as arrays of constants, give up.
auto scalars = getAttrOperand(inst->getOperand(1));
auto shape = getAttrOperand(inst->getOperand(2));
if (!scalars || !shape)
return inst;

// We transform this into a __tfop_Const instruction, where the values are
// part of the 'value' tensor attribute and the shape is specified as a shape
// attribute.
SmallVector<SILValue, 8> operands;
std::string name = "__tfop_Const";

// Try to expand the array and the shape into their scalars.
if (!expandArrayAttribute(scalars, "value", OperandClass::Tensor,
name, operands, inst))
return inst;

unsigned numElements = operands.size()-1;

if (!expandArrayAttribute(shape, "value", OperandClass::Shape,
name, operands, inst))
return inst;

// Verify we have the right number of scalars. If not, emit an error and
// leave the broken code without promoting it to an op.
uint64_t scalarCount = 1;
std::string errorInfo;
for (auto elt : ArrayRef<SILValue>(operands).drop_front(numElements+2)) {
auto *eltCst = cast<IntegerLiteralInst>(elt);
scalarCount *= eltCst->getValue().getLimitedValue();
}
if (scalarCount != numElements && errorInfo.empty()) {
errorInfo = "tensor literal should have " + llvm::utostr(scalarCount) +
" scalars for this shape, but has " + llvm::utostr(numElements);
}

if (!errorInfo.empty()) {
auto loc = getUserSourceLocation(inst);
diagnose(inst->getType().getASTType()->getASTContext(),
loc.getSourceLoc(), diag::tf_op_misuse, errorInfo)
.highlight(loc.getSourceRange());
return inst;
}

// This takes a Tensor and a Shape operand, but needs a DType added. The
// dtype is the type of the Tensor elements, which we conveniently already
// have available as the first operand.
operands.push_back(operands[0]);
name += ",dtype";

auto scalarV = inst->getOperand(1);
auto shapeV = inst->getOperand(2);

SILBuilder B(inst);
// Finally build a new builtin instruction with the simplified operands.
auto newInst =
B.createBuiltin(inst->getLoc(),
B.getASTContext().getIdentifier(name),
inst->getType(), /*no substitions*/{},
operands);
newInst->setDebugLocation(inst->getDebugLocation());
inst->replaceAllUsesPairwiseWith(newInst);
inst->eraseFromParent();


B.setInsertionPoint(newInst);
// We are dropping a reference to the element and shape array initializers, so
// we need to remove the arrays themselves or at least release them.
removeOrDestroyArrayValue(scalarV, newInst->getLoc(), B);
removeOrDestroyArrayValue(shapeV, newInst->getLoc(), B);
return newInst;
}

/// If all the operands to a call to __tf_tensor_from_scalars_1d are constants,
/// we can promote this to a 'Const' node with an attached TF_Tensor attribute.
/// This is a specialized form of __tf_tensor_from_scalars, because the later is
/// defined in terms of a shape of "[scalars.count]" but the performance
/// optimizer is not reliably constant propagating this. When we have a
/// reliable deabstraction pass we can re-evaluate this and hopefully eliminate
/// it in favor of library code in the TensorFlow module.
///
SILInstruction *SILTensorOpInfo::decodeTensorFromScalars1D(ApplyInst *inst) {
assert(inst->getNumOperands() == 2 && isTensorHandle(inst->getType()) &&
"Unexpected type signature for __tf_tensor_from_scalars_1d");

// If we can't analyze the operands as arrays of constants, give up.
auto scalars = getAttrOperand(inst->getOperand(1));
if (!scalars)
return inst;

// We transform this into a __tfop_Const instruction, where the values are
// part of the 'value' tensor attribute and the shape is hard coded.
SmallVector<SILValue, 8> operands;
std::string name = "__tfop_Const";

// Try to expand the array into its scalars.
if (!expandArrayAttribute(scalars, "value", OperandClass::Tensor,
name, operands, inst))
return inst;

SILBuilder B(inst);

// This takes a Tensor operand, but needs a Shape and a DType added. At
// this point, the operands list will have a metatype for the tensor as
// the first operand then all the elements.
uint64_t scalarCount = operands.size()-1;

// The shape needs a metatype to be well formed, but nothing actually
// cares what it is. Just re-push the metatype for the tensor elements,
// even though it might be floating point or something else weird.
operands.push_back(operands[0]);
name += ",shape";
name += getOperandClassSuffix(OperandClass::Shape);

// The shape of a 1d tensor is just the count of elements.
auto &ctx = inst->getFunction()->getASTContext();
auto scalarCountVal =
B.createIntegerLiteral(inst->getLoc(),
SILType::getBuiltinIntegerType(64, ctx),
scalarCount);
operands.push_back(scalarCountVal);
name += ",";
name += getOperandClassSuffix(OperandClass::ArrayElement);

// The dtype is the type of the Tensor elements, which we conveniently
// already have available as the first operand.
operands.push_back(operands[0]);
name += ",dtype";

auto arrayValue = inst->getOperand(1);

// Finally build a new builtin instruction with the simplified operands.
auto newInst =
B.createBuiltin(inst->getLoc(),
B.getASTContext().getIdentifier(name),
inst->getType(), /*no substitions*/{},
operands);
newInst->setDebugLocation(inst->getDebugLocation());
inst->replaceAllUsesPairwiseWith(newInst);
inst->eraseFromParent();

// We dropped a reference to the element initializer, so we need to
// remove the array itself or at least release it. This happens after
// creating the replacement builtin, so that element initializers aren't
// dropped.
B.setInsertionPoint(newInst);
removeOrDestroyArrayValue(arrayValue, newInst->getLoc(), B);

return newInst;
}

/// Return true if this apply instruction is to a function that can be
/// conditionally hoisted into the graph, but don't check the operands to
/// see if they are actually constants we can handle.
bool SILTensorOpInfo::isDecodableApply(ApplyInst *apply) {
auto fn = apply->getCalleeFunction();
if (!fn) return false;

auto name = fn->getName();
return name == "__tf_tensor_from_scalars" ||
name == "__tf_tensor_from_scalars_1d";
}

/// If the specified call is to a function that we can promote to an op,
/// rewrite the instruction and return a new one that does so. Otherwise,
/// return the same instruction.
SILInstruction *SILTensorOpInfo::decodeApply(ApplyInst *apply) {
auto fn = apply->getCalleeFunction();
if (!fn) return apply;

auto name = fn->getName();
if (name == "__tf_tensor_from_scalars")
return decodeTensorFromScalars(apply);
if (name == "__tf_tensor_from_scalars_1d")
return decodeTensorFromScalars1D(apply);

return apply;
}

/// Return the string suffix for the specified attribute modifier.
const char *SILTensorOpInfo::
getOperandClassSuffix(OperandClass opClass) {
Expand Down
13 changes: 0 additions & 13 deletions lib/SILOptimizer/Mandatory/TFUtilities.h
Original file line number Diff line number Diff line change
Expand Up @@ -357,16 +357,6 @@ struct GraphGlobalConfiguration {
operandClasses[operandNumber].second == OperandClass::InputElt;
}

/// Return true if this apply instruction is to a function that can be
/// conditionally hoisted into the graph, but don't check the operands to
/// see if they are actually constants we can handle.
static bool isDecodableApply(ApplyInst *apply);

/// If the specified call is to a function that we can promote to an op,
/// rewrite the instruction and return a new one that does so. Otherwise,
/// return the same instruction.
static SILInstruction *decodeApply(ApplyInst *apply);

/// Analyze the specified SIL instruction and return a SILTensorOpInfo
/// result if the instruction is a valid tensor operation. This is the
/// way that SILTensorOpInfo's are created.
Expand Down Expand Up @@ -429,9 +419,6 @@ struct GraphGlobalConfiguration {
private:
SILTensorOpInfo(BuiltinInst *inst) : inst(inst) {}
bool decodeBuiltin();
static SILInstruction *decodeTensorFromScalars(ApplyInst *inst);
static SILInstruction *decodeTensorFromScalars1D(ApplyInst *inst);
static SILInstruction *decodeTensorFromScalarsND(ApplyInst *inst);
};

/// Holds information about a TensorFlow operation as represented in SIL
Expand Down