Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

init QNNBacked #3165

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
95 changes: 95 additions & 0 deletions codegen/backend/generate.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
import os
import sys
import argparse
from os.path import dirname
import json
import shutil
from typing import List, Dict
import datetime

MNN_ROOT = dirname(dirname(dirname(__file__))) # 3 level up
CODEGEN_PATH = os.path.join(MNN_ROOT, "codegen")
CODEGEN_BACKEND_PATH = os.path.join(CODEGEN_PATH, "backend")
BACKEND_PATH = os.path.join(MNN_ROOT, "source", "backend")
print("MNN_ROOT: " + MNN_ROOT)

def parse_str(s, kv):
k, v = kv
return s.replace("{"+k+"}", v)

def get_copyright() -> str:
c = open(os.path.join(CODEGEN_BACKEND_PATH, "templates", "copyright.txt"), "rt").read()
return c.replace("{CURRENT_TIME}", datetime.datetime.today().strftime('%Y/%m/%d'))

def parse_copyright(s: str, c: str):
return s.replace("{COPYRIGHT}", c)

def get_extra_includes(xpu_name):
include_path = os.path.join(CODEGEN_PATH, xpu_name, "core", "include.json")
return json.decoder.JSONDecoder().decode(open(include_path, "rt").read())

def parse_includes(s: str, d: Dict[str, str], k: str, domain: str):
extra_includes = ""
for n in d[domain][k]:
extra_includes += "#include \"{}\"\n".format(n)
return s.replace("{EXTRA_INCLUDE_FILES}", extra_includes)

def get_runtime_params(xpu_name):
include_path = os.path.join(CODEGEN_PATH, xpu_name, "core", "runtime_params.json")
return json.decoder.JSONDecoder().decode(open(include_path, "rt").read())

def parse_RUNTIME_PARAMS(s: str, d: Dict[str, str]):
params = ""
for t, n in d.items():
params += "{t} {n};\n\t".format(t=t, n=n)
return s.replace("{RUNTIME_PARAMS}", params)

def genCore(xpu_name):
template_path = os.path.join(CODEGEN_BACKEND_PATH, "templates", "core")
XPUBackend_hpp = open(os.path.join(template_path, "{XPU}Backend.hpp"), "rt").read()
XPUBackend_cpp = open(os.path.join(template_path, "{XPU}Backend.cpp"), "rt").read()
parse_dict = json.decoder.JSONDecoder().decode(
open(os.path.join(CODEGEN_PATH, xpu_name, "core", "symbol.json"), "rt").read())
# parse symbols
for kv in parse_dict.items():
XPUBackend_hpp = parse_str(XPUBackend_hpp, kv)
XPUBackend_cpp = parse_str(XPUBackend_cpp, kv)
XPUBackend_hpp_name = "{XPU}Backend.hpp".format(XPU=parse_dict["XPU"])
XPUBackend_cpp_name = "{XPU}Backend.cpp".format(XPU=parse_dict["XPU"])
# parse copyright
copyright = get_copyright()
XPUBackend_hpp = parse_copyright(XPUBackend_hpp, copyright.format(THIS_FILE_NAME=XPUBackend_hpp_name))
XPUBackend_cpp = parse_copyright(XPUBackend_cpp, copyright.format(THIS_FILE_NAME=XPUBackend_cpp_name))
# parse include files
extra_includes = get_extra_includes(xpu_name)
XPUBackend_hpp = parse_includes(XPUBackend_hpp, extra_includes, XPUBackend_hpp_name, "core")
XPUBackend_cpp = parse_includes(XPUBackend_cpp, extra_includes, XPUBackend_cpp_name, "core")
# parse Rumtime Params
runtime_params = get_runtime_params(xpu_name)
XPUBackend_hpp = parse_RUNTIME_PARAMS(XPUBackend_hpp, runtime_params)
XPUBackend_cpp = parse_RUNTIME_PARAMS(XPUBackend_cpp, runtime_params)
print(XPUBackend_hpp, file=open(os.path.join(BACKEND_PATH, xpu_name, "core", XPUBackend_hpp_name), "wt"))
print(XPUBackend_cpp, file=open(os.path.join(BACKEND_PATH, xpu_name, "core", XPUBackend_cpp_name), "wt"))
shutil.copy(src=os.path.join(CODEGEN_PATH, xpu_name, "core", "{XPU}BackendUtil.cpp".format(XPU=parse_dict["XPU"])),
dst=os.path.join(BACKEND_PATH, xpu_name, "core", "{XPU}BackendUtil.cpp".format(XPU=parse_dict["XPU"])))


def genBackend(xpu_name):
os.makedirs(os.path.join(BACKEND_PATH, xpu_name), exist_ok=True)
os.makedirs(os.path.join(BACKEND_PATH, xpu_name, "core"), exist_ok=True)
os.makedirs(os.path.join(BACKEND_PATH, xpu_name, "execution"), exist_ok=True)

genCore(xpu_name)

# genCMakeLists

if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--name", type=str)
args = parser.parse_args()
xpu_name = args.name
if xpu_name == None:
print("Format: python generate.py --name [xpu-name]")
exit(1)

genBackend(xpu_name)
1 change: 1 addition & 0 deletions codegen/backend/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
pyjson
1 change: 1 addition & 0 deletions codegen/backend/templates/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
3rd_party/
Empty file.
7 changes: 7 additions & 0 deletions codegen/backend/templates/copyright.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
//
// {THIS_FILE_NAME}
// MNN
//
// Created by MNN on {CURRENT_TIME}.
// Copyright © 2018, Alibaba Group Holding Limited
//
170 changes: 170 additions & 0 deletions codegen/backend/templates/core/{XPU}Backend.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,170 @@
{COPYRIGHT}


{EXTRA_INCLUDE_FILES}
// MNN headers
#include "{XPU}Backend.hpp"
#include <core/Macro.h>
#include <core/TensorUtils.hpp>
#include <stdlib.h>
//#define MNN_OPEN_TIME_TRACE
#include <MNN/AutoTime.hpp>

namespace MNN {

// Runtime Part
// <begin Constructor and Destructor
{XPU}Runtime::{XPU}Runtime(const Backend::Info& info) {
// 0. initialize all parameters
mInfo = info;
mPower = BackendConfig::Power_Normal;
mMemory = BackendConfig::Memory_Normal;
mPrecision = BackendConfig::Precision_Normal;
if (info.user != nullptr) {
mPrecision = info.user->precision;
mPower = info.user->power;
mMemory = info.user->memory;
}
// 1. Device & Library setup
auto err = initDevice();
if (err != ErrorCode::NO_ERROR) {
MNN_ERROR("[{XPU}]: initDevice failed!\n");
}
// 2. Resource (Buffer) Pool setup
err = setupResourcePool();
if (err != ErrorCode::NO_ERROR) {
MNN_ERROR("[{XPU}]: setupResourcePool failed!\n");
}
}
{XPU}Runtime::~{XPU}Runtime() {
// release ResourcePool
releaseResourcePool();
}

Backend* {XPU}Runtime::onCreate(const BackendConfig* config, Backend* origin) const {
if (config != nullptr) {
mPrecision = config->precision;
mPower = config->power;
mMemory = config->memory;
}
return new {XPU}Backend(this);
}
Runtime::CompilerType {XPU}Runtime::onGetCompilerType() const {
return Compiler_Origin;
}
void {XPU}Runtime::onGabageCollect(int level) {
// release ResourcePool
releaseResourcePool(level);
}

// Backend Part
// <begin Constructor and Destructor
{XPU}Backend::{XPU}Backend(MNNForwardType type, {XPU}Runtime* rt) : Backend(type) {
// 0. initialize all parameters
mRuntime = rt;
mPower = mRuntime->mPower;
mMemory = mRuntime->mMemory;
mPrecision = mRuntime->mPrecision;
initParam();
// 1. possible jit pre-build & pre-tuning setup
jitPreBuild();
jitPreTuning();
}
{XPU}Backend::~{XPU}Backend() {
// release all temporary resources
}
// end Constructor and Destructor>

// <begin Execution Registration & Creation
// static OpType -> Execution Creator map
static inline std::map<OpType, {XPU}Backend::Creator*>* getCreatorMap() {
static std::once_flag of;
static std::map<OpType, {XPU}Backend::Creator*>* ret = nullptr;
std::call_once(of, [&]() { ret = new std::map<OpType, {XPU}Backend::Creator*>; });
return ret;
}
bool {XPU}Backend::addCreator(OpType t, Creator* c) {
auto map = getCreatorMap();
if (map->find(t) != map->end()) {
MNN_PRINT("Error: %d type has be multi-added\n", t);
return false;
}
map->insert(std::make_pair(t, c));
return true;
}
Execution* {XPU}Backend::onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, const MNN::Op* op) {

auto map = getCreatorMap();
auto iter = map->find(op->type());

// error handle 1
if (iter == map->end()) {
MNN_ERROR("map not find !!! \n");
if(op != nullptr){
if(op->name() != nullptr){
MNN_PRINT("[{XPU}] Don't support type %d, %s\n", op->type(), op->name()->c_str());
}
}
return nullptr;
}

// Create
auto exe = iter->second->onCreate(inputs, outputs, op, this);

// error handle 2
if (nullptr == exe) {
MNN_ERROR("nullptr == exe !!! \n");
if(op != nullptr){
if(op->name() != nullptr){
MNN_PRINT("[{XPU}] The Creator Don't support type %d, %s\n", op->type(), op->name()->c_str());
}
}
return nullptr;
}

return exe;
}
// end Execution Registration & Creation>

// 2. Pipeline Functions
void {XPU}Backend::onExecuteBegin() const {

}
void {XPU}Backend::onExecuteEnd() const {

}
void {XPU}Backend::onResizeBegin() {

}
ErrorCode {XPU}Backend::onResizeEnd() {
return NO_ERROR;
}

// 3. Buffer Management
MemObj* {XPU}Backend::onAcquire(const Tensor* tensor, StorageType storageType) {
return nullptr;
}
bool {XPU}Backend::onClearBuffer() {
return true;
}
void {XPU}Backend::onCopyBuffer(const Tensor* srcTensor, const Tensor* dstTensor) const {

}


// <begin insert new runtime to creator
class {XPU}RuntimeCreator : public RuntimeCreator {
Runtime* onCreate(const Backend::Info &info) const {
return std::static_cast<Runtime*>(new {XPU}Runtime(info));
}
bool onValid(Backend::Info& info) const {
return true;
}
};

static const auto __{XPU_LOWER}_global_initializer = []() {
MNNInsertExtraRuntimeCreator(MNN_FORWARD_{XPU_FOWARD_TYPE}, new {XPU}RuntimeCreator, true);
return true;
}();
// insertion end>
}
110 changes: 110 additions & 0 deletions codegen/backend/templates/core/{XPU}Backend.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
{COPYRIGHT}

#ifndef MNN_{XPU}BACKEND_Hpp
#define MNN_{XPU}BACKEND_Hpp


{EXTRA_INCLUDE_FILES}
// MNN headers
#include <MNN/ErrorCode.hpp>
#include <core/Backend.hpp>
#include <core/Execution.hpp>
#include "MNN_generated.h"

// stl libs
#include <map>


namespace MNN {

class {XPU}Runtime : public Runtime {
public:
// 0. Constructor and Destructor
{XPU}Runtime(const Backend::Info& info);
virtual ~{XPU}Runtime();
virtual CompilerType onGetCompilerType() const override;

// 1. Backend Creation
virtual Backend* onCreate(const BackendConfig* conf, Backend* origin) const override;

// 2. gc function
virtual void onGabageCollect(int level) override;

// 3. jit compilation functions

// 4. jit compilation/execution info cache
virtual bool onSetCache(const void* buffer, size_t size) override;
virtual std::pair<const void*, size_t> onGetCache() override;

private:
ErrorCode initDevice();
ErrorCode setupResourcePool();
void releaseResourcePool(int level = 100);

private:
Backend::Info mInfo;
BackendConfig::PowerMode mPower;
BackendConfig::MemoryMode mMemory;
BackendConfig::PrecisionMode mPrecision;
{RUNTIME_PARAMS}

// friend class declaration
friend class {XPU}Backend;
};

// < {XPU}Backend begin
class {XPU}Backend : public Backend {
public:
// 0. Constructor and Destructor
{XPU}Backend(MNNForwardType type, {XPU}Runtime* rt);
virtual ~{XPU}Backend();

// 1. Execution Registration & Creation
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, const MNN::Op* op) override;
class Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const = 0;
};
static bool addCreator(OpType t, Creator* c);

// 2. Pipeline Functions
virtual void onExecuteBegin() const override;
virtual void onExecuteEnd() const override;
virtual void onResizeBegin() override; // If inherit default, do nothing
virtual ErrorCode onResizeEnd() override;

// 3. Buffer Management
virtual MemObj* onAcquire(const Tensor* tensor, StorageType storageType) override;
// MNN_PUBLIC bool onAcquireBuffer(const Tensor* tensor, StorageType storageType);
// MNN_PUBLIC bool onReleaseBuffer(const Tensor* tensor, StorageType storageType);
virtual bool onClearBuffer() override;
virtual void onCopyBuffer(const Tensor* srcTensor, const Tensor* dstTensor) const override;

private:
void initParam();
ErrorCode jitPreBuild();
ErrorCode jitPreTuning();

private:
const {XPU}Runtime* mRuntime;
BackendConfig::PowerMode mPower;
BackendConfig::MemoryMode mMemory;
BackendConfig::PrecisionMode mPrecision;
};

// 1. Execution Creator Register
template <class T>
class {XPU}CreatorRegister {
public:
{XPU}CreatorRegister(OpType type) {
T *t = new T; {XPU}Backend::addCreator(type, t);
}
~{XPU}CreatorRegister() = default;
};

// {XPU}Backend end>

} // MNN

#endif
2 changes: 2 additions & 0 deletions codegen/qnn/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
install_Hexagon.json
install_qnn_ai.json
Loading
Loading