Skip to content

Commit 910cc70

Browse files
authored
Merge branch 'main' into pr_model_improve
2 parents 8d69c9c + 01f5599 commit 910cc70

File tree

466 files changed

+10384
-4462
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

466 files changed

+10384
-4462
lines changed

.ci/docker/ci_commit_pins/pytorch.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
295f2ed4d103017f7e19a7b8263ece606cd629db
1+
7ae0ce6360b6e4f944906502d20da24c04debee5

.ci/scripts/build_android_instrumentation.sh

Lines changed: 6 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -12,29 +12,10 @@ if [[ -z "${PYTHON_EXECUTABLE:-}" ]]; then
1212
fi
1313
which "${PYTHON_EXECUTABLE}"
1414

15-
build_android_test() {
16-
pushd extension/android_test
17-
ANDROID_HOME="${ANDROID_SDK:-/opt/android/sdk}" ./gradlew testDebugUnitTest
18-
ANDROID_HOME="${ANDROID_SDK:-/opt/android/sdk}" ./gradlew build assembleAndroidTest
19-
popd
20-
}
15+
mkdir -p "${BUILD_AAR_DIR}"/executorch_android/src/androidTest/resources
16+
cp extension/module/test/resources/add.pte "${BUILD_AAR_DIR}"/executorch_android/src/androidTest/resources
2117

22-
collect_artifacts_to_be_uploaded() {
23-
ARTIFACTS_DIR_NAME="$1"
24-
# Collect Java library test
25-
JAVA_LIBRARY_TEST_DIR="${ARTIFACTS_DIR_NAME}/library_test_dir"
26-
mkdir -p "${JAVA_LIBRARY_TEST_DIR}"
27-
cp extension/android_test/build/outputs/apk/debug/*.apk "${JAVA_LIBRARY_TEST_DIR}"
28-
cp extension/android_test/build/outputs/apk/androidTest/debug/*.apk "${JAVA_LIBRARY_TEST_DIR}"
29-
}
30-
31-
main() {
32-
build_android_test
33-
if [ -n "$ARTIFACTS_DIR_NAME" ]; then
34-
collect_artifacts_to_be_uploaded ${ARTIFACTS_DIR_NAME}
35-
fi
36-
}
37-
38-
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
39-
main "$@"
40-
fi
18+
pushd "${BUILD_AAR_DIR}"
19+
ANDROID_HOME="${ANDROID_SDK:-/opt/android/sdk}" ./gradlew :executorch_android:testDebugUnitTest
20+
ANDROID_HOME="${ANDROID_SDK:-/opt/android/sdk}" ./gradlew :executorch_android:assembleAndroidTest
21+
popd

.ci/scripts/gather_test_models.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
from typing import Any
1515

1616
from examples.models import MODEL_NAME_TO_MODEL
17-
from examples.xnnpack import MODEL_NAME_TO_OPTIONS
17+
from examples.xnnpack import MODEL_NAME_TO_OPTIONS, QuantType
1818

1919
DEFAULT_RUNNERS = {
2020
"linux": "linux.2xlarge",
@@ -33,7 +33,7 @@
3333
"dl3": "linux.4xlarge.memory",
3434
"emformer_join": "linux.4xlarge.memory",
3535
"emformer_predict": "linux.4xlarge.memory",
36-
"phi-4-mini": "linux.4xlarge.memory",
36+
"phi_4_mini": "linux.4xlarge.memory",
3737
}
3838
}
3939

@@ -154,7 +154,7 @@ def export_models_for_ci() -> dict[str, dict]:
154154
if backend == "xnnpack":
155155
if name not in MODEL_NAME_TO_OPTIONS:
156156
continue
157-
if MODEL_NAME_TO_OPTIONS[name].quantization:
157+
if MODEL_NAME_TO_OPTIONS[name].quantization != QuantType.NONE:
158158
backend += "-quantization"
159159

160160
if MODEL_NAME_TO_OPTIONS[name].delegation:

.ci/scripts/test_llama_torchao_lowbit.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,6 @@ ${PYTHON_EXECUTABLE} -m examples.models.llama.export_llama \
7878
-qmode "torchao:8da${QLINEAR_BITWIDTH}w" \
7979
--group_size ${QLINEAR_GROUP_SIZE} \
8080
-E "torchao:${QEMBEDDING_BITWIDTH},${QEMBEDDING_GROUP_SIZE}" \
81-
--disable_dynamic_shape \
8281
-d fp32
8382

8483
# Test run

.ci/scripts/test_model.sh

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -96,15 +96,15 @@ test_model() {
9696
bash examples/models/llama/install_requirements.sh
9797
# Test export_llama script: python3 -m examples.models.llama.export_llama.
9898
# Use Llama random checkpoint with Qwen 2.5 1.5b model configuration.
99-
"${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -c examples/models/llama/params/demo_rand_params.pth -p examples/models/qwen2_5/1_5b_config.json
99+
"${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -p examples/models/qwen2_5/1_5b_config.json
100100
rm "./${MODEL_NAME}.pte"
101101
return # Skip running with portable executor runnner since portable doesn't support Qwen's biased linears.
102102
fi
103-
if [[ "${MODEL_NAME}" == "phi-4-mini" ]]; then
103+
if [[ "${MODEL_NAME}" == "phi_4_mini" ]]; then
104104
# Install requirements for export_llama
105105
bash examples/models/llama/install_requirements.sh
106106
# Test export_llama script: python3 -m examples.models.llama.export_llama.
107-
"${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -c examples/models/llama/params/demo_rand_params.pth -p examples/models/phi-4-mini/config.json
107+
"${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -p examples/models/phi_4_mini/config.json
108108
run_portable_executor_runner
109109
rm "./${MODEL_NAME}.pte"
110110
return
@@ -224,19 +224,22 @@ test_model_with_coreml() {
224224

225225
"${PYTHON_EXECUTABLE}" -m examples.apple.coreml.scripts.export --model_name="${MODEL_NAME}" --compute_precision "${DTYPE}"
226226
EXPORTED_MODEL=$(find "." -type f -name "${MODEL_NAME}*.pte" -print -quit)
227-
# TODO:
227+
228228
if [ -n "$EXPORTED_MODEL" ]; then
229229
EXPORTED_MODEL_WITH_DTYPE="${EXPORTED_MODEL%.pte}_${DTYPE}.pte"
230230
mv "$EXPORTED_MODEL" "$EXPORTED_MODEL_WITH_DTYPE"
231231
EXPORTED_MODEL="$EXPORTED_MODEL_WITH_DTYPE"
232-
echo "Renamed file path: $EXPORTED_MODEL"
232+
echo "OK exported model: $EXPORTED_MODEL"
233233
else
234-
echo "No .pte file found"
234+
echo "[error] failed to export model: no .pte file found"
235235
exit 1
236236
fi
237237

238238
# Run the model
239239
if [ "${should_test}" = true ]; then
240+
echo "Installing requirements needed to build coreml_executor_runner..."
241+
backends/apple/coreml/scripts/install_requirements.sh
242+
240243
echo "Testing exported model with coreml_executor_runner..."
241244
local out_dir=$(mktemp -d)
242245
COREML_EXECUTOR_RUNNER_OUT_DIR="${out_dir}" examples/apple/coreml/scripts/build_executor_runner.sh

.ci/scripts/unittest-linux.sh

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,7 @@ if [[ "$BUILD_TOOL" == "cmake" ]]; then
2121
source .ci/scripts/setup-vulkan-linux-deps.sh
2222

2323
PYTHON_EXECUTABLE=python \
24-
EXECUTORCH_BUILD_PYBIND=ON \
25-
CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
24+
CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
2625
.ci/scripts/setup-linux.sh "$@"
2726

2827
# Install llama3_2_vision dependencies.

.ci/scripts/unittest-macos-buck2.sh

100644100755
File mode changed.

.ci/scripts/unittest-macos.sh

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,13 @@ export TMP_DIR=$(mktemp -d)
1919
export PATH="${TMP_DIR}:$PATH"
2020
trap 'rm -rfv ${TMP_DIR}' EXIT
2121

22-
if [[ "$BUILD_TOOL" == "cmake" ]]; then
23-
# Setup MacOS dependencies as there is no Docker support on MacOS atm
24-
PYTHON_EXECUTABLE=python \
25-
EXECUTORCH_BUILD_PYBIND=ON \
26-
CMAKE_ARGS="-DEXECUTORCH_BUILD_COREML=ON -DEXECUTORCH_BUILD_MPS=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
27-
${CONDA_RUN} --no-capture-output \
28-
.ci/scripts/setup-macos.sh "$@"
22+
# Setup MacOS dependencies as there is no Docker support on MacOS atm
23+
PYTHON_EXECUTABLE=python \
24+
CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_COREML=ON -DEXECUTORCH_BUILD_MPS=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
25+
${CONDA_RUN} --no-capture-output \
26+
.ci/scripts/setup-macos.sh "$@"
2927

28+
if [[ "$BUILD_TOOL" == "cmake" ]]; then
3029
# Install llama3_2_vision dependencies.
3130
PYTHON_EXECUTABLE=python \
3231
${CONDA_RUN} --no-capture-output \

.ci/scripts/utils.sh

Lines changed: 39 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -60,12 +60,46 @@ install_pytorch_and_domains() {
6060
# Fetch the target commit
6161
pushd pytorch || return
6262
git checkout "${TORCH_VERSION}"
63-
git submodule update --init --recursive
6463

65-
export USE_DISTRIBUTED=1
66-
# Then build and install PyTorch
67-
python setup.py bdist_wheel
68-
pip install "$(echo dist/*.whl)"
64+
local system_name=$(uname)
65+
if [[ "${system_name}" == "Darwin" ]]; then
66+
local platform=$(python -c 'import sysconfig; import platform; v=platform.mac_ver()[0].split(".")[0]; platform=sysconfig.get_platform().split("-"); platform[1]=f"{v}_0"; print("_".join(platform))')
67+
fi
68+
local python_version=$(python -c 'import platform; v=platform.python_version_tuple(); print(f"{v[0]}{v[1]}")')
69+
local torch_release=$(cat version.txt)
70+
local torch_short_hash=${TORCH_VERSION:0:7}
71+
local torch_wheel_path="cached_artifacts/pytorch/executorch/pytorch_wheels/${system_name}/${python_version}"
72+
local torch_wheel_name="torch-${torch_release}%2Bgit${torch_short_hash}-cp${python_version}-cp${python_version}-${platform:-}.whl"
73+
74+
local cached_torch_wheel="https://gha-artifacts.s3.us-east-1.amazonaws.com/${torch_wheel_path}/${torch_wheel_name}"
75+
# Cache PyTorch wheel is only needed on MacOS, Linux CI already has this as part
76+
# of the Docker image
77+
local torch_wheel_not_found=0
78+
if [[ "${system_name}" == "Darwin" ]]; then
79+
pip install "${cached_torch_wheel}" || torch_wheel_not_found=1
80+
else
81+
torch_wheel_not_found=1
82+
fi
83+
84+
# Found no such wheel, we will build it from source then
85+
if [[ "${torch_wheel_not_found}" == "1" ]]; then
86+
echo "No cached wheel found, continue with building PyTorch at ${TORCH_VERSION}"
87+
88+
git submodule update --init --recursive
89+
USE_DISTRIBUTED=1 python setup.py bdist_wheel
90+
pip install "$(echo dist/*.whl)"
91+
92+
# Only AWS runners have access to S3
93+
if command -v aws && [[ -z "${GITHUB_RUNNER:-}" ]]; then
94+
for wheel_path in dist/*.whl; do
95+
local wheel_name=$(basename "${wheel_path}")
96+
echo "Caching ${wheel_name}"
97+
aws s3 cp "${wheel_path}" "s3://gha-artifacts/${torch_wheel_path}/${wheel_name}"
98+
done
99+
fi
100+
else
101+
echo "Use cached wheel at ${cached_torch_wheel}"
102+
fi
69103

70104
# Grab the pinned audio and vision commits from PyTorch
71105
TORCHAUDIO_VERSION=$(cat .github/ci_commit_pins/audio.txt)

.ci/scripts/wheel/envvar_base.sh

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,10 @@
88
# should typically only contain shell variable assignments. Be sure to export
99
# any variables so that subprocesses will see them.
1010

11-
# Enable pybindings so that users can execute ExecuTorch programs from python.
12-
export EXECUTORCH_BUILD_PYBIND=1
13-
1411
# Ensure that CMAKE_ARGS is defined before referencing it. Defaults to empty
1512
# if not defined.
1613
export CMAKE_ARGS="${CMAKE_ARGS:-}"
1714

1815
# Link the XNNPACK backend into the pybindings runtime so that users can execute
1916
# ExecuTorch programs that delegate to it.
20-
CMAKE_ARGS="${CMAKE_ARGS} -DEXECUTORCH_BUILD_XNNPACK=ON"
17+
CMAKE_ARGS="${CMAKE_ARGS} -DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON"

.ci/scripts/wheel/test_macos.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,9 @@
1515
model=Model.Mv3,
1616
backend=Backend.XnnpackQuantizationDelegation,
1717
),
18-
# Enable this once CoreML is suppported out-of-the-box
19-
# https://github.com/pytorch/executorch/issues/9019
20-
# test_base.ModelTest(
21-
# model=Model.Mv3,
22-
# backend=Backend.CoreMlTest,
23-
# )
18+
test_base.ModelTest(
19+
model=Model.Mv3,
20+
backend=Backend.CoreMlTest,
21+
),
2422
]
2523
)

.github/workflows/_android.yml

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,13 +27,20 @@ jobs:
2727
conda activate "${CONDA_ENV}"
2828
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool buck2
2929
export ARTIFACTS_DIR_NAME=artifacts-to-be-uploaded
30+
mkdir -p ${ARTIFACTS_DIR_NAME}/
3031
3132
# Build LLM Demo for Android
3233
export BUILD_AAR_DIR=aar-out
33-
bash build/build_android_library.sh ${ARTIFACTS_DIR_NAME}
34-
bash .ci/scripts/build_android_instrumentation.sh ${ARTIFACTS_DIR_NAME}
34+
mkdir -p $BUILD_AAR_DIR
35+
bash scripts/build_android_library.sh
36+
cp ${BUILD_AAR_DIR}/executorch.aar $ARTIFACTS_DIR_NAME
37+
38+
mkdir -p ${ARTIFACTS_DIR_NAME}/library_test_dir
39+
bash .ci/scripts/build_android_instrumentation.sh
40+
cp ${BUILD_AAR_DIR}/executorch_android/build/outputs/apk/androidTest/debug/executorch_android-debug-androidTest.apk "${ARTIFACTS_DIR_NAME}/library_test_dir"
3541
3642
mkdir -p ${ARTIFACTS_DIR_NAME}/fp32-xnnpack-custom
43+
bash examples/models/llama/install_requirements.sh
3744
bash ".ci/scripts/test_llama.sh" -model stories110M -build_tool cmake -dtype fp16 -mode portable -upload ${ARTIFACTS_DIR_NAME}/fp32-xnnpack-custom
3845
3946
mkdir -p examples/demo-apps/android/LlamaDemo/app/libs
@@ -94,8 +101,7 @@ jobs:
94101
curl -O https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/llm_demo/app-debug.apk
95102
curl -O https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/llm_demo/app-debug-androidTest.apk
96103
curl -O https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/fp32-xnnpack-custom/model.zip
97-
curl -o android-test-debug.apk https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/library_test_dir/executorch-debug.apk
98-
curl -o android-test-debug-androidTest.apk https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/library_test_dir/executorch-debug-androidTest.apk
104+
curl -o android-test-debug-androidTest.apk https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/library_test_dir/executorch_android-debug-androidTest.apk
99105
unzip model.zip
100106
mv *.pte model.pte
101107
@@ -119,7 +125,7 @@ jobs:
119125
with:
120126
api-level: ${{ env.API_LEVEL }}
121127
arch: x86_64
122-
script: ./build/run_android_emulator.sh
128+
script: ./scripts/run_android_emulator.sh
123129
# NB: This is to boot the emulator faster following the instructions on
124130
# https://github.com/ReactiveCircus/android-emulator-runner. The max number
125131
# of cores we can set is 6, any higher number will be reduced to 6.

.github/workflows/_unittest.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,4 +49,6 @@ jobs:
4949
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
5050
script: |
5151
set -eux
52+
# This is needed to get the prebuilt PyTorch wheel from S3
53+
${CONDA_RUN} --no-capture-output pip install awscli==1.37.21
5254
.ci/scripts/unittest-macos.sh --build-tool "${{ inputs.build-tool }}" --build-mode "${{ inputs.build-mode }}" --editable "${{ inputs.editable }}"

.github/workflows/android-perf.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -363,7 +363,7 @@ jobs:
363363
PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh
364364
365365
mkdir -p aar-out
366-
PYTHON_EXECUTABLE=python ANDROID_ABIS="arm64-v8a" BUILD_AAR_DIR=aar-out EXECUTORCH_BUILD_QNN=ON QNN_SDK_ROOT=/tmp/qnn/2.28.0.241029 bash build/build_android_library.sh
366+
PYTHON_EXECUTABLE=python ANDROID_ABIS="arm64-v8a" BUILD_AAR_DIR=aar-out EXECUTORCH_BUILD_QNN=ON QNN_SDK_ROOT=/tmp/qnn/2.28.0.241029 bash scripts/build_android_library.sh
367367
mkdir -p extension/benchmark/android/benchmark/app/libs
368368
cp aar-out/executorch.aar extension/benchmark/android/benchmark/app/libs
369369
pushd extension/benchmark/android/benchmark

.github/workflows/android-release-artifacts.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ jobs:
5555
# Build AAR Package
5656
mkdir aar-out
5757
export BUILD_AAR_DIR=aar-out
58-
bash build/build_android_library.sh
58+
bash scripts/build_android_library.sh
5959
mkdir -p "${ARTIFACTS_DIR_NAME}"
6060
cp aar-out/executorch.aar "${ARTIFACTS_DIR_NAME}/executorch.aar"
6161

.github/workflows/apple-perf.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -387,7 +387,7 @@ jobs:
387387
388388
echo "::group::Build ExecuTorch iOS frameworks"
389389
PYTHON_EXECUTABLE=python ${CONDA_RUN} --no-capture-output \
390-
build/build_apple_frameworks.sh --Release --Debug --coreml --custom --mps --optimized --portable --quantized --xnnpack
390+
scripts/build_apple_frameworks.sh --Release --Debug --coreml --custom --mps --optimized --portable --quantized --xnnpack
391391
echo "::endgroup::"
392392
393393
# NB: Although exported models can be copied to this directory and bundled together with the
@@ -396,7 +396,7 @@ jobs:
396396
# create the directory here to pass the build
397397
mkdir -p extension/benchmark/apple/Benchmark/Models
398398
${CONDA_RUN} --no-capture-output \
399-
build/build_apple_llm_demo.sh ${ARTIFACTS_DIR_NAME}
399+
scripts/build_apple_llm_demo.sh ${ARTIFACTS_DIR_NAME}
400400
401401
upload-benchmark-app:
402402
needs: build-benchmark-app

.github/workflows/apple.yml

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@ on:
1111
- .github/workflows/apple.yml
1212
- install_executorch.sh
1313
- backends/apple/**
14-
- build/build_apple_frameworks.sh
15-
- build/build_apple_llm_demo.sh
16-
- build/create_frameworks.sh
14+
- scripts/build_apple_frameworks.sh
15+
- scripts/build_apple_llm_demo.sh
16+
- scripts/create_frameworks.sh
1717
- .ci/scripts/test_ios_ci.sh
1818
- examples/demo-apps/apple_ios/**
1919
- extension/apple/**
@@ -172,7 +172,7 @@ jobs:
172172
173173
# Build iOS Frameworks
174174
PYTHON_EXECUTABLE=python ${CONDA_RUN} --no-capture-output \
175-
build/build_apple_frameworks.sh --Release --Debug --coreml --custom --mps --optimized --portable --quantized --xnnpack
175+
scripts/build_apple_frameworks.sh --Release --Debug --coreml --custom --mps --optimized --portable --quantized --xnnpack
176176
177177
# Bundle iOS Frameworks
178178
for FRAMEWORK in "${FRAMEWORKS[@]}"; do (
@@ -313,11 +313,11 @@ jobs:
313313
314314
echo "::group::Build ExecuTorch iOS frameworks"
315315
PYTHON_EXECUTABLE=python ${CONDA_RUN} --no-capture-output \
316-
build/build_apple_frameworks.sh --Release --Debug --coreml --custom --mps --optimized --portable --quantized --xnnpack
316+
scripts/build_apple_frameworks.sh --Release --Debug --coreml --custom --mps --optimized --portable --quantized --xnnpack
317317
echo "::endgroup::"
318318
319319
echo "::group::Build ExecuTorch benchmark app"
320320
mkdir -p extension/benchmark/apple/Benchmark/Models
321321
${CONDA_RUN} --no-capture-output \
322-
build/build_apple_llm_demo.sh "${ARTIFACTS_DIR_NAME}"
322+
scripts/build_apple_llm_demo.sh "${ARTIFACTS_DIR_NAME}"
323323
echo "::endgroup::"

.github/workflows/build-wheels-linux.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@ on:
66
paths:
77
- .ci/**/*
88
- .github/workflows/build-wheels-linux.yml
9+
- examples/**/*
10+
- pyproject.toml
11+
- setup.py
912
push:
1013
branches:
1114
- nightly

.github/workflows/build-wheels-macos.yml

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@ on:
66
paths:
77
- .ci/**/*
88
- .github/workflows/build-wheels-macos.yml
9+
- examples/**/*
10+
- pyproject.toml
11+
- setup.py
912
push:
1013
branches:
1114
- nightly
@@ -57,6 +60,8 @@ jobs:
5760
pre-script: ${{ matrix.pre-script }}
5861
post-script: ${{ matrix.post-script }}
5962
package-name: ${{ matrix.package-name }}
60-
runner-type: macos-m1-stable
63+
# Meta's macOS runners do not have Xcode, so use GitHub's runners.
64+
runner-type: macos-latest-xlarge
65+
setup-miniconda: true
6166
smoke-test-script: ${{ matrix.smoke-test-script }}
6267
trigger-event: ${{ github.event_name }}

0 commit comments

Comments
 (0)