Matic馬蹄鏈智能合約游戲開發(fā)案例版丨Matic馬蹄鏈智能合約游戲系統(tǒng)開發(fā)(詳細(xì)及規(guī)則)
什么是DAPP?DAPP是Decentralized Application的縮寫,中文叫分布式應(yīng)用/去中心化應(yīng)用。通常來說,不同的DAPP會(huì)采用不同的底層技術(shù)開發(fā)平臺(tái)和共識(shí)機(jī)制,或者自行發(fā)布代幣。
以太坊中一般會(huì)認(rèn)為智能合約就是DAPP,當(dāng)然更準(zhǔn)確的可以認(rèn)為智能合約相當(dāng)于服務(wù)器后臺(tái),另外要實(shí)現(xiàn)用戶體驗(yàn),還需要UI交互界面,通過RPC與后臺(tái)對(duì)接,那么DAPP系統(tǒng)開發(fā)就是開發(fā)包含完整的智能合約和UI交互界面的DAPP。
int QuantizeConvPerChannel(const float*weight,const int size,const float*bias,int8_t*quantizedWeight,
int32_t*quantizedBias,float*scale,const std::vector<float>&inputScale,
const std::vector<float>&outputScale,std::string method,float weightClampValue,bool mergeChannel){
const int inputChannels=inputScale.size();
const int outputChannels=outputScale.size();
const int icXoc=inputChannels*outputChannels;
DCHECK(size%icXoc==0)<<"Input Data Size Error!";
std::vector<float>quantizedWeightScale(outputChannels);
float inputScalexWeight=1.0f;
if(mergeChannel){
if(method=="MAX_ABS"){
SymmetricQuantizeWeight(weight,size,quantizedWeight,quantizedWeightScale.data(),outputChannels,weightClampValue);
}開發(fā)流程及功能I59模式2OO7系統(tǒng)3O69
else if(method=="ADMM"){
QuantizeWeightADMM(weight,size,quantizedWeight,quantizedWeightScale.data(),outputChannels,weightClampValue);
}
inputScalexWeight=inputScale[0];
}else{
const int kernelSize=size/icXoc;
const int ocStride=size/outputChannels;
//每個(gè)權(quán)重都乘上對(duì)應(yīng)scale
std::vector<float>weightMultiByInputScale(size);
for(int oc=0;oc<outputChannels;++oc){
for(int ic=0;ic<inputChannels;++ic){
for(int i=0;i<kernelSize;++i){
const int index=oc*ocStride+ic*kernelSize+i;
weightMultiByInputScale[index]=inputScale[ic]*weight[index];
}
}
}開發(fā)需求及分析:yy625019
if(method=="MAX_ABS"){
SymmetricQuantizeWeight(weightMultiByInputScale.data(),size,quantizedWeight,quantizedWeightScale.data(),outputChannels,weightClampValue);
}
else if(method=="ADMM"){
QuantizeWeightADMM(weightMultiByInputScale.data(),size,quantizedWeight,quantizedWeightScale.data(),outputChannels,weightClampValue);
}
}
for(int i=0;i<outputChannels;++i){
if(fabs(outputScale<i>)<=1e-6){
scale<i>=0.0f;
}else{
scale<i>=inputScalexWeight*quantizedWeightScale<i>/outputScale[0];
}
}
if(bias){
for(int i=0;i<outputChannels;++i){
if(fabs(inputScalexWeight)<=1e-6||fabs(quantizedWeightScale<i>)<=1e-6){
quantizedBias<i>=0;
}else{
quantizedBias<i>=static_cast<int32_t>(bias<i>/(inputScalexWeight*quantizedWeightScale<i>));
}
}
}
return 0;
}