FDF互助循環(huán)游戲系統(tǒng)開發(fā)(開發(fā)邏輯)丨智能合約互助循環(huán)游戲系統(tǒng)開發(fā)(案例詳細(xì))
什么是智能合約?智能合約,又稱加密合約,是在一定條件下可以直接控制數(shù)字資產(chǎn)或資產(chǎn)在各方之間轉(zhuǎn)移的一種計(jì)算機(jī)程序z--Guts。智能合約不僅以與傳統(tǒng)合約相同的方式定義了協(xié)議的規(guī)則和處罰,還可以自動(dòng)強(qiáng)制執(zhí)行這些義務(wù)。它通過接受信息作為輸入,通過規(guī)則為輸入賦值,在合約中列出并執(zhí)行這些合約條款所要求的行為。
void Calibration::_updateScale(){
for(const auto&op:_originaleModel->oplists){
std::vector<std::string>::iterator iter=std::find(_skip_quant_ops.begin(),_skip_quant_ops.end(),op->name);
if(iter!=_skip_quant_ops.end()){
continue;
}
const auto opType=op->type;
if(opType!=MNN::OpType_Convolution&&opType!=MNN::OpType_ConvolutionDepthwise&&
opType!=MNN::OpType_Eltwise){
continue;
}
auto tensorsPair=_opInfo.find(op->name);
if(tensorsPair==_opInfo.end()){
MNN_ERROR("Can't find tensors for%sn",op->name.c_str());
}設(shè)計(jì)案例及源碼I59分析2OO7搭建3O69
if(opType==MNN::OpType_Eltwise){
auto param=op->main.AsEltwise();
//Now only support AddInt8
if(param->type!=MNN::EltwiseType_SUM){
continue;
}
const auto&inputScale0=_scales[tensorsPair->second.first[0]];
const auto&inputScale1=_scales[tensorsPair->second.first[1]];
const auto&outputScale=_scales[tensorsPair->second.second[0]];
const int outputScaleSize=outputScale.size();
std::vector<float>outputInvertScale(outputScaleSize);
Helper::invertData(outputInvertScale.data(),outputScale.data(),outputScaleSize);
op->type=MNN::OpType_EltwiseInt8;
op->main.Reset();
op->main.type=MNN::OpParameter_EltwiseInt8;
auto eltwiseInt8Param=new MNN::EltwiseInt8T;
auto input0ScaleParam=new MNN::QuantizedFloatParamT;
auto input1ScaleParam=new MNN::QuantizedFloatParamT;
auto outputScaleParam=new MNN::QuantizedFloatParamT;
input0ScaleParam->tensorScale=inputScale0;
input1ScaleParam->tensorScale=inputScale1;
outputScaleParam->tensorScale=outputInvertScale;
eltwiseInt8Param->inputQuan0=std::unique_ptr<MNN::QuantizedFloatParamT>(input0ScaleParam);
eltwiseInt8Param->inputQuan1=std::unique_ptr<MNN::QuantizedFloatParamT>(input1ScaleParam);
eltwiseInt8Param->outputQuan=std::unique_ptr<MNN::QuantizedFloatParamT>(outputScaleParam);
op->main.value=eltwiseInt8Param;
continue;
}開發(fā)模式及分析:yy625019
//below is Conv/DepthwiseConv
const auto&inputScale=_scales[tensorsPair->second.first[0]];
const auto&outputScale=_scales[tensorsPair->second.second[0]];
auto param=op->main.AsConvolution2D();
const int channles=param->common->outputCount;
const int weightSize=param->weight.size();
param->symmetricQuan.reset(new MNN::QuantizedFloatParamT);
//quantizedParam是param->symmetricQuan的引用
auto&quantizedParam=param->symmetricQuan;
quantizedParam->scale.resize(channles);
quantizedParam->weight.resize(weightSize);
quantizedParam->bias.resize(channles);
if(opType==MNN::OpType_Convolution){
QuantizeConvPerChannel(param->weight.data(),param->weight.size(),param->bias.data(),
quantizedParam->weight.data(),quantizedParam->bias.data(),
quantizedParam->scale.data(),inputScale,outputScale,_weightQuantizeMethod,_weightClampValue);
op->type=MNN::OpType_ConvInt8;
}else if(opType==MNN::OpType_ConvolutionDepthwise){
QuantizeDepthwiseConv(param->weight.data(),param->weight.size(),param->bias.data(),
quantizedParam->weight.data(),quantizedParam->bias.data(),
quantizedParam->scale.data(),inputScale,outputScale,_weightQuantizeMethod,_weightClampValue);
op->type=MNN::OpType_DepthwiseConvInt8;
}
if(param->common->relu6){
param->common->relu=true;
param->common->relu6=false;
}
//清除原本的權(quán)重和bias
param->weight.clear();
param->bias.clear();
}
}