Skip to content

Commit

Permalink
#66 Change the file path of the output path to the folder path (#76)
Browse files Browse the repository at this point in the history
  • Loading branch information
Only-bottle authored Jan 9, 2024
1 parent 9b1ba13 commit 93b9cb4
Show file tree
Hide file tree
Showing 14 changed files with 218 additions and 47 deletions.
2 changes: 1 addition & 1 deletion examples/compressor/automatic_compression.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
FRAMEWORK = Framework.TENSORFLOW_KERAS
INPUT_SHAPES = [{"batch": 1, "channel": 3, "dimension": [32, 32]}]
INPUT_MODEL_PATH = "./examples/sample_models/mobilenetv1.h5"
OUTPUT_MODEL_PATH = "./outputs/compressed/mobilenetv1_cifar100_automatic.h5"
OUTPUT_MODEL_PATH = "./outputs/compressed/mobilenetv1_cifar100_automatic"
COMPRESSION_RATIO = 0.5

compressed_model = compressor.automatic_compression(
Expand Down
2 changes: 1 addition & 1 deletion examples/compressor/manual_compression.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@

# Compress Model
COMPRESSED_MODEL_NAME = "test_l2norm"
OUTPUT_PATH = "./outputs/compressed/graphmodule_manual.pt"
OUTPUT_PATH = "./outputs/compressed/graphmodule_manual"
compressed_model = compressor.compress_model(
compression=compression_1,
model_name=COMPRESSED_MODEL_NAME,
Expand Down
2 changes: 1 addition & 1 deletion examples/compressor/recommendation_compression.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
TASK = Task.IMAGE_CLASSIFICATION
FRAMEWORK = Framework.PYTORCH
INPUT_MODEL_PATH = "./examples/sample_models/graphmodule.pt"
OUTPUT_MODEL_PATH = "./outputs/compressed/graphmodule_recommend.pt"
OUTPUT_MODEL_PATH = "./outputs/compressed/graphmodule_recommend"
INPUT_SHAPES = [{"batch": 1, "channel": 3, "dimension": [224, 224]}]
COMPRESSION_METHOD = CompressionMethod.PR_L2
RECOMMENDATION_METHOD = RecommendationMethod.SLAMP
Expand Down
6 changes: 4 additions & 2 deletions examples/launcher/keras2tflite.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from loguru import logger
from netspresso.client import SessionClient
from netspresso.clients.auth import SessionClient
from netspresso.launcher import (
ModelConverter,
ModelBenchmarker,
Expand All @@ -14,7 +14,7 @@
EMAIL = "YOUR_EMAIL"
PASSWORD = "YOUR_PASSWORD"
MODEL_PATH = "./examples/sample_models/mobilenetv1.h5"
CONVERTED_MODEL_PATH = "./outputs/converted/converted_model.tflite"
CONVERTED_MODEL_PATH = "./outputs/converted/keras2tflite"
session = SessionClient(email=EMAIL, password=PASSWORD)
converter = ModelConverter(user_session=session)

Expand Down Expand Up @@ -69,6 +69,7 @@
benchmarker = ModelBenchmarker(user_session=session)
benchmark_task: BenchmarkTask = benchmarker.benchmark_model(
model_path=CONVERTED_MODEL_PATH,
target_framework=ModelFramework.TENSORFLOW_LITE,
target_device_name=DeviceName.RASPBERRY_PI_4B,
)
########################
Expand All @@ -77,6 +78,7 @@
########################
# benchmark_task: BenchmarkTask = benchmarker.benchmark_model(
# model_path=CONVERTED_MODEL_PATH,
# target_framework=ModelFramework.TENSORFLOW_LITE,
# target_device_name=DeviceName.RASPBERRY_PI_4B,
# wait_until_done=False,
# )
Expand Down
6 changes: 4 additions & 2 deletions examples/launcher/onnx2drpai.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from loguru import logger
from netspresso.client import SessionClient
from netspresso.clients.auth import SessionClient
from netspresso.launcher import ModelConverter, ModelBenchmarker
from netspresso.launcher import (
ModelConverter,
Expand All @@ -15,7 +15,7 @@
EMAIL = "YOUR_EMAIL"
PASSWORD = "YOUR_PASSWORD"
MODEL_PATH = "./examples/sample_models/test.onnx"
CONVERTED_MODEL_PATH = "./outputs/converted/converted_drpai.zip"
CONVERTED_MODEL_PATH = "./outputs/converted/onnx2drpai"
session = SessionClient(email=EMAIL, password=PASSWORD)
converter = ModelConverter(user_session=session)

Expand Down Expand Up @@ -64,6 +64,7 @@
benchmarker = ModelBenchmarker(user_session=session)
benchmark_task: BenchmarkTask = benchmarker.benchmark_model(
model_path=CONVERTED_MODEL_PATH,
target_framework=ModelFramework.DRPAI,
target_device_name=DeviceName.RENESAS_RZ_V2L,
)
########################
Expand All @@ -72,6 +73,7 @@
########################
# benchmark_task: BenchmarkTask = benchmarker.benchmark_model(
# model_path=CONVERTED_MODEL_PATH,
# target_framework=ModelFramework.DRPAI,
# target_device_name=DeviceName.RENESAS_RZ_V2L,
# wait_until_done=False,
# )
Expand Down
6 changes: 4 additions & 2 deletions examples/launcher/onnx2openvino.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from loguru import logger
from netspresso.client import SessionClient
from netspresso.clients.auth import SessionClient
from netspresso.launcher import (
ModelConverter,
ModelBenchmarker,
Expand All @@ -14,7 +14,7 @@
EMAIL = "YOUR_EMAIL"
PASSWORD = "YOUR_PASSWORD"
MODEL_PATH = "./examples/sample_models/test.onnx"
CONVERTED_MODEL_PATH = "./outputs/converted/converted_openvino.zip"
CONVERTED_MODEL_PATH = "./outputs/converted/onnx2openvino"
session = SessionClient(email=EMAIL, password=PASSWORD)
converter = ModelConverter(user_session=session)

Expand Down Expand Up @@ -62,6 +62,7 @@
benchmarker = ModelBenchmarker(user_session=session)
benchmark_task: BenchmarkTask = benchmarker.benchmark_model(
model_path=CONVERTED_MODEL_PATH,
target_framework=ModelFramework.OPENVINO,
target_device_name=DeviceName.Intel_XEON_W_2233,
)
########################
Expand All @@ -70,6 +71,7 @@
########################
# benchmark_task: BenchmarkTask = benchmarker.benchmark_model(
# model_path=CONVERTED_MODEL_PATH,
# target_framework=ModelFramework.OPENVINO,
# target_device_name=DeviceName.Intel_XEON_W_2233,
# wait_until_done=False,
# )
Expand Down
6 changes: 4 additions & 2 deletions examples/launcher/onnx2tensorrt.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from loguru import logger
from netspresso.client import SessionClient
from netspresso.clients.auth import SessionClient
from netspresso.launcher import (
ModelConverter,
ModelBenchmarker,
Expand All @@ -15,7 +15,7 @@
EMAIL = "YOUR_EMAIL"
PASSWORD = "YOUR_PASSWORD"
MODEL_PATH = "./examples/sample_models/test.onnx"
CONVERTED_MODEL_PATH = "./outputs/converted/converted_model.trt"
CONVERTED_MODEL_PATH = "./outputs/converted/onnx2tensorrt"
session = SessionClient(email=EMAIL, password=PASSWORD)
converter = ModelConverter(user_session=session)

Expand Down Expand Up @@ -86,6 +86,7 @@
benchmarker = ModelBenchmarker(user_session=session)
benchmark_task: BenchmarkTask = benchmarker.benchmark_model(
model_path=CONVERTED_MODEL_PATH,
target_framework=ModelFramework.TENSORRT,
target_device_name=DeviceName.JETSON_AGX_ORIN,
target_software_version=SoftwareVersion.JETPACK_5_0_1,
)
Expand All @@ -95,6 +96,7 @@
########################
# benchmark_task: BenchmarkTask = benchmarker.benchmark_model(
# model_path=CONVERTED_MODEL_PATH,
# target_framework=ModelFramework.TENSORRT,
# target_device_name=DeviceName.JETSON_AGX_ORIN,
# target_software_version=SoftwareVersion.JETPACK_5_0_1,
# wait_until_done=False,
Expand Down
6 changes: 4 additions & 2 deletions examples/launcher/onnx2tflite.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from loguru import logger
from netspresso.client import SessionClient
from netspresso.clients.auth import SessionClient
from netspresso.launcher import (
ModelConverter,
ModelBenchmarker,
Expand All @@ -14,7 +14,7 @@
EMAIL = "YOUR_EMAIL"
PASSWORD = "YOUR_PASSWORD"
MODEL_PATH = "./examples/sample_models/test.onnx"
CONVERTED_MODEL_PATH = "./outputs/converted/converted_model.tflite"
CONVERTED_MODEL_PATH = "./outputs/converted/onnx2tflite"
session = SessionClient(email=EMAIL, password=PASSWORD)
converter = ModelConverter(user_session=session)

Expand Down Expand Up @@ -73,6 +73,7 @@

benchmark_task: BenchmarkTask = benchmarker.benchmark_model(
model_path=CONVERTED_MODEL_PATH,
target_framework=ModelFramework.TENSORFLOW_LITE,
target_device_name=DeviceName.RASPBERRY_PI_4B,
)
########################
Expand All @@ -81,6 +82,7 @@
########################
# benchmark_task: BenchmarkTask = benchmarker.benchmark_model(
# model_path=CONVERTED_MODEL_PATH,
# target_framework=ModelFramework.TENSORFLOW_LITE,
# target_device_name=DeviceName.RASPBERRY_PI_4B,
# wait_until_done=False,
# )
Expand Down
5 changes: 3 additions & 2 deletions examples/launcher/onnx2tflite_int8.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from loguru import logger
from netspresso.client import SessionClient
from netspresso.clients.auth import SessionClient
from netspresso.launcher import (
ModelConverter,
ModelBenchmarker,
Expand All @@ -16,7 +16,7 @@
EMAIL = "YOUR_EMAIL"
PASSWORD = "YOUR_PASSWORD"
MODEL_PATH = "./examples/sample_models/yolox_auto_compress_0.7.onnx"
CONVERTED_MODEL_PATH = "./outputs/converted/converted_model.tflite"
CONVERTED_MODEL_PATH = "./outputs/converted/onnx2tflite_int8"
session = SessionClient(email=EMAIL, password=PASSWORD)
converter = ModelConverter(user_session=session)

Expand Down Expand Up @@ -54,6 +54,7 @@
benchmarker = ModelBenchmarker(user_session=session)
benchmark_task: BenchmarkTask = benchmarker.benchmark_model(
model_path=CONVERTED_MODEL_PATH,
target_framework=ModelFramework.TENSORFLOW_LITE,
target_device_name=TARGET_DEVICE_NAME,
data_type=DATA_TYPE,
hardware_type=HardwareType.HELIUM,
Expand Down
75 changes: 53 additions & 22 deletions netspresso/compressor/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import sys
from pathlib import Path
from typing import Dict, List, Union
from urllib import request
Expand Down Expand Up @@ -37,7 +38,7 @@
)
from netspresso.enums import ServiceCredit

from ..utils.credit import check_credit_balance
from ..utils import FileManager, check_credit_balance
from .utils.onnx import export_onnx


Expand Down Expand Up @@ -254,11 +255,6 @@ def download_model(self, model_id: str, local_path: str) -> None:
download_link = self.client.get_download_model_link(
model_id=model_id, access_token=self.user_session.access_token
)
if not Path(local_path).parent.exists():
logger.info(
f"The specified folder does not exist. Local Path: {Path(local_path).parent}"
)
Path(local_path).parent.mkdir(parents=True, exist_ok=True)
request.urlretrieve(download_link.url, local_path)
logger.info(f"Model downloaded at {Path(local_path)}")

Expand Down Expand Up @@ -441,11 +437,19 @@ def compress_model(
"""
try:
logger.info("Compressing model...")

model_info = self.get_model(compression.original_model_id)

default_model_path, extension = FileManager.prepare_model_path(
folder_path=output_path, framework=model_info.framework
)

current_credit = self.user_session.get_credit()
check_credit_balance(
user_credit=current_credit,
service_credit=ServiceCredit.ADVANCED_COMPRESSION,
)

data = CreateCompressionRequest(
model_id=compression.original_model_id,
model_name=model_name,
Expand Down Expand Up @@ -497,12 +501,16 @@ def compress_model(
self.client.compress_model(
data=data, access_token=self.user_session.access_token
)

self.download_model(
model_id=compression_info.new_model_id, local_path=output_path
model_id=compression_info.new_model_id,
local_path=default_model_path.with_suffix(extension),
)
compressed_model = self.get_model(model_id=compression_info.new_model_id)

if compressed_model.framework in [Framework.PYTORCH, Framework.ONNX]:
export_onnx(output_path, compressed_model.input_shapes)
export_onnx(default_model_path, compressed_model.input_shapes)

logger.info(
f"Compress model successfully. Compressed Model ID: {compressed_model.model_id}"
)
Expand Down Expand Up @@ -556,21 +564,19 @@ def recommendation_compression(

try:
logger.info("Compressing recommendation-based model...")

default_model_path, extension = FileManager.prepare_model_path(
folder_path=output_path, framework=framework
)

current_credit = self.user_session.get_credit()
check_credit_balance(
user_credit=current_credit,
service_credit=ServiceCredit.ADVANCED_COMPRESSION,
)
model = self.upload_model(
model_name=model_name,
task=task,
framework=framework,
file_path=input_path,
input_shapes=input_shapes,
)

if (
model.framework == Framework.PYTORCH
framework == Framework.PYTORCH
and compression_method == CompressionMethod.PR_NN
):
raise Exception(
Expand Down Expand Up @@ -604,6 +610,14 @@ def recommendation_compression(
f"The {compression_method} compression method is only available the VBMF recommendation method."
)

model = self.upload_model(
model_name=model_name,
task=task,
framework=framework,
file_path=input_path,
input_shapes=input_shapes,
)

data = CreateCompressionRequest(
model_id=model.model_id,
model_name=model_name,
Expand Down Expand Up @@ -648,12 +662,16 @@ def recommendation_compression(
self.client.compress_model(
data=data, access_token=self.user_session.access_token
)

self.download_model(
model_id=compression_info.new_model_id, local_path=output_path
model_id=compression_info.new_model_id,
local_path=default_model_path.with_suffix(extension),
)
compressed_model = self.get_model(model_id=compression_info.new_model_id)

if compressed_model.framework in [Framework.PYTORCH, Framework.ONNX]:
export_onnx(output_path, compressed_model.input_shapes)
export_onnx(default_model_path, compressed_model.input_shapes)

logger.info(
f"Recommendation compression successfully. Compressed Model ID: {compressed_model.model_id}"
)
Expand Down Expand Up @@ -699,11 +717,17 @@ def automatic_compression(

try:
logger.info("Compressing automatic-based model...")

default_model_path, extension = FileManager.prepare_model_path(
folder_path=output_path, framework=framework
)

current_credit = self.user_session.get_credit()
check_credit_balance(
user_credit=current_credit,
service_credit=ServiceCredit.AUTOMATIC_COMPRESSION,
)

model = self.upload_model(
model_name=model_name,
task=task,
Expand All @@ -712,22 +736,29 @@ def automatic_compression(
input_shapes=input_shapes,
)

compressed_model_name = f"{model_name}_automatic_{compression_ratio}"
data = AutoCompressionRequest(
model_id=model.model_id,
model_name=f"{model_name}_automatic_{compression_ratio}",
model_name=compressed_model_name,
recommendation_ratio=compression_ratio,
save_path=output_path,
)
logger.info("Compressing model...")
model_info = self.client.auto_compression(
data=data, access_token=self.user_session.access_token
)
self.download_model(model_id=model_info.model_id, local_path=output_path)

self.download_model(
model_id=model_info.model_id,
local_path=default_model_path.with_suffix(extension),
)
compressed_model = self.model_factory.create_compressed_model(
model_info=model_info
)
) # TODO: delete

if compressed_model.framework in [Framework.PYTORCH, Framework.ONNX]:
export_onnx(output_path, compressed_model.input_shapes)
export_onnx(default_model_path, compressed_model.input_shapes)

logger.info(
f"Automatic compression successfully. Compressed Model ID: {compressed_model.model_id}"
)
Expand Down
2 changes: 1 addition & 1 deletion netspresso/compressor/utils/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def _export_onnx(

def export_onnx(file_path: str, input_shapes: List[int]):
file_path = Path(file_path)
model = torch.load(file_path)
model = torch.load(file_path.with_suffix(".pt"))

input_shape = input_shapes[0]
sample_input = torch.randn((1, input_shape.channel, *input_shape.dimension))
Expand Down
Loading

0 comments on commit 93b9cb4

Please sign in to comment.