Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[ExecuTorch] Model Coverage, Partitioner, Quantizer, Debug Handle #2178

Merged
merged 4 commits into from
Apr 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ test_py39_tf2_intel-1:
PYTHON: "3.9"
TEST_PACKAGE: coremltools.converters.mil.frontend.tensorflow
WHEEL_PATH: build/dist/*cp39*10_15*
REQUIREMENTS: reqs/test.pip
REQUIREMENTS: reqs/test_tf2.pip

test_py39_tf2_intel-2:
<<: *test_macos_pkg
Expand All @@ -126,7 +126,7 @@ test_py39_tf2_intel-2:
PYTHON: "3.9"
TEST_PACKAGE: coremltools.converters.mil.frontend.tensorflow2
WHEEL_PATH: build/dist/*cp39*10_15*
REQUIREMENTS: reqs/test.pip
REQUIREMENTS: reqs/test_tf2.pip

test_py39_mil_intel:
<<: *test_macos_pkg
Expand Down Expand Up @@ -174,7 +174,7 @@ test_py39_milproto_intel:
WHEEL_PATH: build/dist/*cp39*10_15*
TEST_PACKAGE: coremltools.converters.mil.frontend.milproto
PYTHON: "3.9"
REQUIREMENTS: reqs/test.pip
REQUIREMENTS: reqs/test_tf2.pip



Expand Down Expand Up @@ -212,7 +212,7 @@ test_py310_tf2-1:
PYTHON: "3.10"
TEST_PACKAGE: coremltools.converters.mil.frontend.tensorflow
WHEEL_PATH: build/dist/*cp310*11*
REQUIREMENTS: reqs/test.pip
REQUIREMENTS: reqs/test_tf2.pip

test_py310_tf2-2:
<<: *test_macos_pkg
Expand All @@ -224,7 +224,7 @@ test_py310_tf2-2:
PYTHON: "3.10"
TEST_PACKAGE: coremltools.converters.mil.frontend.tensorflow2
WHEEL_PATH: build/dist/*cp310*11*
REQUIREMENTS: reqs/test.pip
REQUIREMENTS: reqs/test_tf2.pip

test_py310_mil:
<<: *test_macos_pkg
Expand Down Expand Up @@ -272,7 +272,7 @@ test_py310_milproto:
PYTHON: "3.10"
TEST_PACKAGE: coremltools.converters.mil.frontend.milproto
WHEEL_PATH: build/dist/*cp310*11*
REQUIREMENTS: reqs/test.pip
REQUIREMENTS: reqs/test_tf2.pip



Expand Down
4 changes: 4 additions & 0 deletions coremlpython/CoreMLPython.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
// Use of this source code is governed by a BSD-3-clause license that can be
// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause


// Disable a few warnings and include pybind first, then re-enable warnings
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wexit-time-destructors"
#pragma clang diagnostic ignored "-Wdocumentation"
Expand All @@ -28,11 +30,13 @@ namespace CoreML {
static py::bytes autoSetSpecificationVersion(const py::bytes& modelBytes);
static py::str compileModel(const std::string& urlStr);
static int32_t maximumSupportedSpecificationVersion();
static void setComputeUnit(MLModelConfiguration *configuration, const std::string& computeUnits);

Model(const Model&) = delete;
Model& operator=(const Model&) = delete;
~Model();
explicit Model(const std::string& urlStr, const std::string& computeUnits);
explicit Model(MLModel* m_model, NSURL* compiledUrl, bool deleteCompiledModelOnExit);

py::dict predict(const py::dict& input) const;
py::list batchPredict(const py::list& batch) const;
Expand Down
63 changes: 33 additions & 30 deletions coremlpython/CoreMLPython.mm
Original file line number Diff line number Diff line change
Expand Up @@ -33,18 +33,6 @@ bool usingMacOS13OrHigher() {
return (NSProtocolFromString(@"MLProgram") != nil);
}

bool isCompiledModelPath(const std::string& path) {
const std::string fileExtension = ".mlmodelc";

size_t start = path.length() - fileExtension.length();
if (path.back() == '/') {
start--;
}
const std::string match = path.substr(start, fileExtension.length());

return (match == fileExtension);
}

Model::~Model() {
@autoreleasepool {
NSFileManager *fileManager = [NSFileManager defaultManager];
Expand All @@ -58,7 +46,7 @@ bool isCompiledModelPath(const std::string& path) {
@autoreleasepool {
NSError *error = nil;

if (! isCompiledModelPath(urlStr)) {
if (! Utils::isCompiledModelPath(urlStr)) {
// Compile the model
NSURL *specUrl = Utils::stringToNSURL(urlStr);

Expand Down Expand Up @@ -89,31 +77,23 @@ bool isCompiledModelPath(const std::string& path) {
compiledUrl = Utils::stringToNSURL(urlStr);
}

// Set compute unit
MLModelConfiguration *configuration = [MLModelConfiguration new];
if (computeUnits == "CPU_ONLY") {
configuration.computeUnits = MLComputeUnitsCPUOnly;
} else if (computeUnits == "CPU_AND_GPU") {
configuration.computeUnits = MLComputeUnitsCPUAndGPU;
} else if (computeUnits == "CPU_AND_NE") {
if (usingMacOS13OrHigher()) {
#if BUILT_WITH_MACOS13_SDK
configuration.computeUnits = MLComputeUnitsCPUAndNeuralEngine;
#endif // BUILT_WITH_MACOS13_SDK
} else {
throw std::runtime_error("CPU_AND_NE is only available on macOS >= 13.0");
}
} else {
assert(computeUnits == "ALL");
configuration.computeUnits = MLComputeUnitsAll;
}
setComputeUnit(configuration, computeUnits);

// Create MLModel
m_model = [MLModel modelWithContentsOfURL:compiledUrl configuration:configuration error:&error];
Utils::handleError(error);
}
}


Model::Model(MLModel* mlModel, NSURL* compiledUrl, bool deleteCompiledModelOnExit)
: m_model(mlModel),
compiledUrl(compiledUrl),
m_deleteCompiledModelOnExit(deleteCompiledModelOnExit)
{
}

py::dict Model::predict(const py::dict& input) const {
@autoreleasepool {
NSError *error = nil;
Expand All @@ -127,6 +107,26 @@ bool isCompiledModelPath(const std::string& path) {
}


void Model::setComputeUnit(MLModelConfiguration *configuration, const std::string& computeUnits) {
if (computeUnits == "CPU_ONLY") {
configuration.computeUnits = MLComputeUnitsCPUOnly;
} else if (computeUnits == "CPU_AND_GPU") {
configuration.computeUnits = MLComputeUnitsCPUAndGPU;
} else if (computeUnits == "CPU_AND_NE") {
if (usingMacOS13OrHigher()) {
#if BUILT_WITH_MACOS13_SDK
configuration.computeUnits = MLComputeUnitsCPUAndNeuralEngine;
#endif // BUILT_WITH_MACOS13_SDK
} else {
throw std::runtime_error("CPU_AND_NE is only available on macOS >= 13.0");
}
} else {
assert(computeUnits == "ALL");
configuration.computeUnits = MLComputeUnitsAll;
}
}


py::list Model::batchPredict(const py::list& batch) const {
@autoreleasepool {
NSError* error = nil;
Expand Down Expand Up @@ -156,6 +156,9 @@ bool isCompiledModelPath(const std::string& path) {


py::str Model::getCompiledModelPath() const {
if (this->compiledUrl == nil) {
return nil;
}
return [this->compiledUrl.path UTF8String];
}

Expand Down
1 change: 1 addition & 0 deletions coremlpython/CoreMLPythonUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ namespace CoreML {
namespace Python {
namespace Utils {

bool isCompiledModelPath(const std::string& path);
NSURL * stringToNSURL(const std::string& str);
void handleError(NSError *error);

Expand Down
12 changes: 12 additions & 0 deletions coremlpython/CoreMLPythonUtils.mm
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,18 @@

using namespace CoreML::Python;

bool Utils::isCompiledModelPath(const std::string& path) {
const std::string fileExtension = ".mlmodelc";

size_t start = path.length() - fileExtension.length();
if (path.back() == '/') {
start--;
}
const std::string match = path.substr(start, fileExtension.length());

return (match == fileExtension);
}

NSURL * Utils::stringToNSURL(const std::string& str) {
NSString *nsstr = [NSString stringWithUTF8String:str.c_str()];
return [NSURL fileURLWithPath:nsstr];
Expand Down
4 changes: 2 additions & 2 deletions coremltools/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@

For more information: http://developer.apple.com/documentation/coreml
"""

from enum import Enum as _Enum
from logging import getLogger as _getLogger

Expand Down Expand Up @@ -90,15 +91,14 @@ class ComputeUnit(_Enum):

# expose sub packages as directories
from . import converters, models, optimize, proto

# expose unified converter in coremltools package level
from .converters import ClassifierConfig
from .converters import ColorLayout as colorlayout
from .converters import EnumeratedShapes, ImageType, RangeDim, Shape, TensorType, convert
from .converters.mil._deployment_compatibility import AvailableTarget as target
from .converters.mil.mil.passes.defs import quantization as transform
from .converters.mil.mil.passes.pass_pipeline import PassPipeline
from .converters.mil.mil.passes.defs.quantization import ComputePrecision as precision
from .converters.mil.mil.passes.pass_pipeline import PassPipeline
from .models import utils
from .models.ml_program import compression_utils

Expand Down
2 changes: 1 addition & 1 deletion coremltools/_deps/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def __get_sklearn_version(version):

# ---------------------------------------------------------------------------------------
_HAS_TORCH = True
_TORCH_MAX_VERSION = "2.1.0"
_TORCH_MAX_VERSION = "2.2.0"
_HAS_TORCH_EXPORT_API = False
try:
import torch
Expand Down
Loading