From cc6b1510e8fb350a58400f3ba4faf24f2eb9f768 Mon Sep 17 00:00:00 2001
From: Arjun Suresh <arjunsuresh1987@gmail.com>
Date: Thu, 20 Jun 2024 18:29:04 +0530
Subject: [PATCH 1/5] Added onnx dependency for 3dunet, fixes #78

---
 script/app-mlperf-inference-nvidia/_cm.yaml | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/script/app-mlperf-inference-nvidia/_cm.yaml b/script/app-mlperf-inference-nvidia/_cm.yaml
index 9820921f6e..e6c38b5d91 100644
--- a/script/app-mlperf-inference-nvidia/_cm.yaml
+++ b/script/app-mlperf-inference-nvidia/_cm.yaml
@@ -416,6 +416,8 @@ variations:
     - tags: get,generic-python-lib,_package.nibabel
     - tags: get,generic-python-lib,_pandas
       version_max: "1.5.3"
+    - tags: get,generic-python-lib,_package.onnx
+      version: 1.13.1
 
   3d-unet-99:
     group: model

From 22dd822294e155fb7e7041630e63bd8bff099109 Mon Sep 17 00:00:00 2001
From: Arjun Suresh <arjunsuresh1987@gmail.com>
Date: Thu, 20 Jun 2024 18:51:48 +0530
Subject: [PATCH 2/5] Fix numpy version to 1.26.4 for onnxruntime and pytorch

---
 .../app-mlperf-inference-mlcommons-python/_cm.yaml   | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/script/app-mlperf-inference-mlcommons-python/_cm.yaml b/script/app-mlperf-inference-mlcommons-python/_cm.yaml
index 754b712ab3..0b92244768 100644
--- a/script/app-mlperf-inference-mlcommons-python/_cm.yaml
+++ b/script/app-mlperf-inference-mlcommons-python/_cm.yaml
@@ -612,6 +612,9 @@ variations:
         tags: _NCHW
       ml-model:
         tags: raw,_onnx
+      numpy:
+        version_max: "1.26.4"
+        version_max_usable: "1.26.4"
     env:
       CM_MLPERF_BACKEND: onnxruntime
 
@@ -633,6 +636,9 @@ variations:
         tags: _NCHW
       ml-model:
         tags: raw,_pytorch
+      numpy:
+        version_max: "1.26.4"
+        version_max_usable: "1.26.4"
     env:
       CM_MLPERF_BACKEND: pytorch
       CM_MLPERF_BACKEND_VERSION: <<<CM_TORCH_VERSION>>>
@@ -980,6 +986,8 @@ variations:
     - tags: get,generic-python-lib,_tqdm
     - tags: get,generic-python-lib,_onnx
     - tags: get,generic-python-lib,_numpy
+      names:
+        - numpy
     - tags: get,generic-python-lib,_package.torchrec
     - tags: get,generic-python-lib,_package.pyre-extensions
     - tags: get,generic-python-lib,_package.torchsnapshot
@@ -1008,6 +1016,8 @@ variations:
     deps:
     - tags: get,generic-python-lib,_opencv-python
     - tags: get,generic-python-lib,_numpy
+      names:
+        - numpy
     - tags: get,generic-python-lib,_pycocotools
 
     env:
@@ -1024,6 +1034,8 @@ variations:
     deps:
     - tags: get,generic-python-lib,_opencv-python
     - tags: get,generic-python-lib,_numpy
+      names:
+        - numpy
     - tags: get,generic-python-lib,_pycocotools
     prehook_deps:
     - tags: get,generic-python-lib,_protobuf

From afab6877753b67108d57cd065875aa65fce00858 Mon Sep 17 00:00:00 2001
From: Arjun Suresh <arjunsuresh1987@gmail.com>
Date: Thu, 20 Jun 2024 19:23:32 +0530
Subject: [PATCH 3/5] Preparing intel-mlperf-inference-resnet50

---
 script/app-mlperf-inference-intel/_cm.yaml | 76 +++++++++++++++++++++-
 script/app-mlperf-inference/_cm.yaml       |  3 -
 2 files changed, 75 insertions(+), 4 deletions(-)

diff --git a/script/app-mlperf-inference-intel/_cm.yaml b/script/app-mlperf-inference-intel/_cm.yaml
index 7860e1123d..eb4c863837 100644
--- a/script/app-mlperf-inference-intel/_cm.yaml
+++ b/script/app-mlperf-inference-intel/_cm.yaml
@@ -97,7 +97,7 @@ deps:
     names:
       - resnet50-model
       - ml-model
-    tags: get,ml-model,resnet50,_fp32,_onnx,_from-tf
+    tags: get,ml-model,resnet50,_fp32,_pytorch
 
   - enable_if_env:
       CM_MODEL:
@@ -310,6 +310,80 @@ variations:
           - pip-package
           - optimum
 
+  resnet50,pytorch:
+    adr:
+      conda-package:
+        tags: _name.resnet50-pt
+    deps:
+      - tags: get,conda,_name.resnet50-pt
+      - tags: get,python,_conda.resnet50-pt
+        adr:
+          conda-python:
+            version: "3.9"
+      - names:
+        - conda-package
+        - mkl
+        tags: get,generic,conda-package,_package.mkl,_source.conda-forge
+        enable_if_env:
+          CM_MLPERF_INFERENCE_CODE_VERSION:
+          -  v4.0
+      - names:
+        - conda-package
+        - mkl-include
+        tags: get,generic,conda-package,_package.mkl-include,_source.intel
+      - names:
+        - conda-package
+        - llvm-openmp
+        tags: get,generic,conda-package,_package.llvm-openmp,_source.conda-forge
+      - names:
+        - conda-package
+        - ncurses
+        tags: get,generic,conda-package,_package.ncurses,_source.conda-forge
+      - names:
+        - conda-package
+        - ncurses
+        tags: get,generic,conda-package,_package.ncurses,_source.conda-forge
+      - tags: get,generic-sys-util,_numactl
+      - tags: get,generic,conda-package,_package.jemalloc,_source.conda-forge
+        names:
+        - conda-package
+        - jemalloc
+      - tags: install,ipex,from.src,_for-intel-mlperf-inference-v3.1-resnet50
+        names:
+          - ipex-from-src
+      - tags: get,generic,conda-package,_package.ninja
+        names:
+        - conda-package
+        - ninja
+      - tags: get,mlcommons,inference,src
+        names:
+        - inference-src
+      - tags: get,mlcommons,inference,loadgen,_custom-python
+        names:
+        - inference-loadgen
+        env:
+          CM_PYTHON_BIN_WITH_PATH: "<<<CM_CONDA_BIN_PATH>>>/python3"
+      - tags: get,generic-python-lib,_custom-python,_package.torch,_url.git+https://github.com/pytorch/pytorch.git@927dc662386af052018212c7d01309a506fc94cd
+        enable_if_env:
+          CM_MLPERF_INFERENCE_CODE_VERSION:
+          -  v3.1
+        env:
+          CM_PYTHON_BIN_WITH_PATH: "<<<CM_CONDA_BIN_PATH>>>/python3"
+          "+ CXXFLAGS":
+            - "-Wno-nonnull"
+            - "-Wno-maybe-uninitialized"
+            - "-Wno-uninitialized"
+            - "-Wno-free-nonheap-object"
+      - tags: get,generic-python-lib,_custom-python,_package.torch
+        env:
+          CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu
+        enable_if_env:
+          CM_MLPERF_INFERENCE_CODE_VERSION:
+            - 'v4.0'
+      - tags: install,intel-neural-speed,_for-intel-mlperf-inference-v4.0-gptj,_branch.mlperf-v4-0
+        enable_if_env:
+          CM_MLPERF_INFERENCE_CODE_VERSION:
+            - 'v4.0'
   gptj_,pytorch:
     adr:
       conda-package:
diff --git a/script/app-mlperf-inference/_cm.yaml b/script/app-mlperf-inference/_cm.yaml
index e1bb963617..56578b746c 100644
--- a/script/app-mlperf-inference/_cm.yaml
+++ b/script/app-mlperf-inference/_cm.yaml
@@ -1225,9 +1225,6 @@ variations:
 
 
 invalid_variation_combinations:
-  -
-    - resnet50
-    - pytorch
   -
     - retinanet
     - tf

From cd8cf93bb322099fc3651930cd7835e1931313ca Mon Sep 17 00:00:00 2001
From: Arjun Suresh <arjunsuresh1987@gmail.com>
Date: Thu, 20 Jun 2024 22:23:00 +0530
Subject: [PATCH 4/5] Added onnxgraphsurgeon dependency for
 mlperf-inference-nvidia

---
 script/app-mlperf-inference-nvidia/_cm.yaml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/script/app-mlperf-inference-nvidia/_cm.yaml b/script/app-mlperf-inference-nvidia/_cm.yaml
index e6c38b5d91..2fb226719b 100644
--- a/script/app-mlperf-inference-nvidia/_cm.yaml
+++ b/script/app-mlperf-inference-nvidia/_cm.yaml
@@ -418,6 +418,7 @@ variations:
       version_max: "1.5.3"
     - tags: get,generic-python-lib,_package.onnx
       version: 1.13.1
+    - tags: get,generic-python-lib,_onnx-graphsurgeon
 
   3d-unet-99:
     group: model

From 45ea7b8d4c08e3fb0d0a93b0b57d257d1111dd9e Mon Sep 17 00:00:00 2001
From: Arjun Suresh <arjunsuresh1987@gmail.com>
Date: Fri, 21 Jun 2024 01:49:31 +0530
Subject: [PATCH 5/5] Improve the loadgen installation script

---
 script/get-mlperf-inference-loadgen/run.sh | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/script/get-mlperf-inference-loadgen/run.sh b/script/get-mlperf-inference-loadgen/run.sh
index 372fddfb74..fd478ddeb2 100644
--- a/script/get-mlperf-inference-loadgen/run.sh
+++ b/script/get-mlperf-inference-loadgen/run.sh
@@ -44,12 +44,14 @@ PYTHON_MINOR_VERSION=${PYTHON_SHORT_VERSION#*.}
 MLPERF_INFERENCE_PYTHON_SITE_BASE=${INSTALL_DIR}"/python"
 
 cd "${CM_MLPERF_INFERENCE_SOURCE}/loadgen"
-CFLAGS="-std=c++14 -O3" ${CM_PYTHON_BIN_WITH_PATH} setup.py bdist_wheel
-${CM_PYTHON_BIN_WITH_PATH} -m pip install --force-reinstall `ls dist/mlperf_loadgen-*cp3${PYTHON_MINOR_VERSION}*.whl` --target="${MLPERF_INFERENCE_PYTHON_SITE_BASE}"
+#CFLAGS="-std=c++14 -O3" ${CM_PYTHON_BIN_WITH_PATH} setup.py bdist_wheel
+#${CM_PYTHON_BIN_WITH_PATH} -m pip install --force-reinstall `ls dist/mlperf_loadgen-*cp3${PYTHON_MINOR_VERSION}*.whl` --target="${MLPERF_INFERENCE_PYTHON_SITE_BASE}"
+${CM_PYTHON_BIN_WITH_PATH} -m pip install .
+
 if [ "${?}" != "0" ]; then exit 1; fi
 
 # Clean the built wheel
-find . -name 'mlperf_loadgen*.whl' | xargs rm
+find . -name 'mlcommons_loadgen*.whl' | xargs rm
 
 echo "******************************************************"
 echo "Loadgen is built and installed to ${INSTALL_DIR} ..."