Skip to content

Commit cf723c2

Browse files
authored
Merge branch 'main' into pr_model_improve
2 parents 470645d + 82f3381 commit cf723c2

File tree

368 files changed

+8331
-3558
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

368 files changed

+8331
-3558
lines changed

.buckconfig

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,3 +39,6 @@
3939

4040
[buck2]
4141
restarter=true
42+
43+
[oss]
44+
folly_cxx_tests = False

.ci/docker/build.sh

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,10 @@ case "${IMAGE_NAME}" in
2929
LINTRUNNER=""
3030
CLANG_VERSION=12
3131
;;
32+
executorch-ubuntu-22.04-gcc11-aarch64)
33+
LINTRUNNER=""
34+
GCC_VERSION=11
35+
;;
3236
executorch-ubuntu-22.04-linter)
3337
LINTRUNNER=yes
3438
CLANG_VERSION=12

.ci/docker/common/install_conda.sh

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,9 @@ source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
1313
install_miniconda() {
1414
BASE_URL="https://repo.anaconda.com/miniconda"
1515
CONDA_FILE="Miniconda3-py${PYTHON_VERSION//./}_${MINICONDA_VERSION}-Linux-x86_64.sh"
16+
if [[ $(uname -m) == "aarch64" ]]; then
17+
CONDA_FILE="Miniconda3-py${PYTHON_VERSION//./}_${MINICONDA_VERSION}-Linux-aarch64.sh"
18+
fi
1619

1720
mkdir -p /opt/conda
1821
chown ci-user:ci-user /opt/conda
@@ -36,7 +39,7 @@ install_python() {
3639

3740
# From https://github.com/pytorch/pytorch/blob/main/.ci/docker/common/install_conda.sh
3841
if [[ $(uname -m) == "aarch64" ]]; then
39-
conda_install "openblas==0.3.28=*openmp*"
42+
conda_install "openblas==0.3.29=*openmp*" -c conda-forge
4043
else
4144
conda_install mkl=2022.1.0 mkl-include=2022.1.0
4245
fi

.ci/scripts/build_llama_android.sh

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,10 @@ install_executorch_and_backend_lib() {
4242

4343
build_llama_runner() {
4444
echo "Building llama runner for Android..."
45+
pushd extension/llm/tokenizers
46+
echo "Updating tokenizers submodule"
47+
git submodule update --init
48+
popd
4549
ANDROID_ABI=arm64-v8a
4650
cmake -DBUCK2="${BUCK2}" \
4751
-DCMAKE_TOOLCHAIN_FILE="$ANDROID_NDK"/build/cmake/android.toolchain.cmake \

.ci/scripts/gather_test_models.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,9 @@
55
# This source code is licensed under the BSD-style license found in the
66
# LICENSE file in the root directory of this source tree.
77

8+
# WARNING: The CI runner logic should directly be in the corresponding yml files
9+
# This file will be deleted once the reference in periodic.yml is deleted.
10+
811
import itertools
912
import json
1013
import os
@@ -30,6 +33,7 @@
3033
"dl3": "linux.4xlarge.memory",
3134
"emformer_join": "linux.4xlarge.memory",
3235
"emformer_predict": "linux.4xlarge.memory",
36+
"phi-4-mini": "linux.4xlarge.memory",
3337
}
3438
}
3539

.ci/scripts/test_ane_static_llama.sh

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,12 @@ fi
1717

1818
which "${PYTHON_EXECUTABLE}"
1919

20+
# Update tokenizers submodule
21+
pushd $EXECUTORCH_ROOT/extension/llm/tokenizers
22+
echo "Update tokenizers submodule"
23+
git submodule update --init
24+
popd
25+
2026
pushd $EXECUTORCH_ROOT/examples/apple/coreml/llama
2127

2228
# Download stories llama110m artifacts

.ci/scripts/test_llama.sh

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,10 @@ cmake_install_executorch_libraries() {
173173

174174
cmake_build_llama_runner() {
175175
echo "Building llama runner"
176+
pushd extension/llm/tokenizers
177+
echo "Updating tokenizers submodule"
178+
git submodule update --init
179+
popd
176180
dir="examples/models/llama"
177181
retry cmake \
178182
-DCMAKE_INSTALL_PREFIX=cmake-out \

.ci/scripts/unittest-buck2.sh

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,22 @@ set -eux
88

99
# TODO: expand this to //...
1010
# TODO: can't query cadence & vulkan backends
11+
# TODO: can't query //kernels/prim_ops because of a cpp_unittest and
12+
# broken code in shim to read oss.folly_cxx_tests. Sending fix but it
13+
# needs to propagate and we need a submodule update.
1114
buck2 query "//backends/apple/... + //backends/example/... + \
1215
//backends/mediatek/... + //backends/test/... + //backends/transforms/... + \
13-
//backends/xnnpack/... + //configurations/... + //kernels/portable/cpu/... + \
14-
//runtime/... + //schema/... + //test/... + //util/..."
16+
//backends/xnnpack/... + //configurations/... + //kernels/aten/... + \
17+
//kernels/optimized/... + //kernels/portable/... + //kernels/quantized/... + \
18+
//kernels/test/... + //runtime/... + //schema/... + //test/... + //util/..."
1519

20+
UNBUILDABLE_OPTIMIZED_OPS_REGEX="gelu|fft_r2c|log_softmax"
21+
BUILDABLE_OPTIMIZED_OPS=$(buck2 query //kernels/optimized/cpu/... | grep -E -v $UNBUILDABLE_OPTIMIZED_OPS_REGEX)
22+
23+
BUILDABLE_KERNELS_PRIM_OPS_TARGETS=$(buck2 query //kernels/prim_ops/... | grep -v prim_ops_test_py)
1624
# TODO: expand the covered scope of Buck targets.
17-
buck2 build //runtime/core/portable_type/...
18-
buck2 test //runtime/core/portable_type/...
25+
# //runtime/kernel/... is failing because //third-party:torchgen_files's shell script can't find python on PATH.
26+
# //runtime/test/... requires Python torch, which we don't have in our OSS buck setup.
27+
buck2 test $BUILDABLE_OPTIMIZED_OPS //kernels/portable/... \
28+
$BUILDABLE_KERNELS_PRIM_OPS_TARGETS //runtime/backend/... //runtime/core/... \
29+
//runtime/executor: //runtime/kernel/... //runtime/platform/...

.ci/scripts/unittest-macos-buck2.sh

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
#!/usr/bin/env bash
2+
# Copyright (c) Meta Platforms, Inc. and affiliates.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
set -eux
8+
9+
buck2 test //extension/apple:ExecuTorch

.ci/scripts/unittest-macos.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ if [[ "$BUILD_TOOL" == "cmake" ]]; then
3535
.ci/scripts/unittest-macos-cmake.sh
3636
elif [[ "$BUILD_TOOL" == "buck2" ]]; then
3737
.ci/scripts/unittest-buck2.sh
38+
# .ci/scripts/unittest-macos-buck2.sh
3839
else
3940
echo "Unknown build tool $BUILD_TOOL"
4041
exit 1

.ci/scripts/utils.sh

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,14 @@ clean_executorch_install_folders() {
2020
./install_executorch.sh --clean
2121
}
2222

23+
update_tokenizers_git_submodule() {
24+
echo "Updating tokenizers git submodule..."
25+
git submodule update --init
26+
pushd extension/llm/tokenizers
27+
git submodule update --init
28+
popd
29+
}
30+
2331
install_executorch() {
2432
which pip
2533
# Install executorch, this assumes that Executorch is checked out in the

.github/scripts/extract_benchmark_results.py

Lines changed: 48 additions & 135 deletions
Original file line numberDiff line numberDiff line change
@@ -86,36 +86,6 @@ def parse_args() -> Any:
8686
action=ValidateDir,
8787
help="the directory to keep the benchmark results",
8888
)
89-
parser.add_argument(
90-
"--repo",
91-
type=str,
92-
required=True,
93-
help="which GitHub repo this workflow run belongs to",
94-
)
95-
parser.add_argument(
96-
"--head-branch",
97-
type=str,
98-
required=True,
99-
help="the head branch that runs",
100-
)
101-
parser.add_argument(
102-
"--workflow-name",
103-
type=str,
104-
required=True,
105-
help="the name of the benchmark workflow",
106-
)
107-
parser.add_argument(
108-
"--workflow-run-id",
109-
type=int,
110-
required=True,
111-
help="the id of the benchmark workflow",
112-
)
113-
parser.add_argument(
114-
"--workflow-run-attempt",
115-
type=int,
116-
required=True,
117-
help="which retry of the workflow this is",
118-
)
11989
parser.add_argument(
12090
"--benchmark-configs",
12191
type=str,
@@ -153,9 +123,10 @@ def extract_android_benchmark_results(
153123
# This is to handle the case where there is no benchmark results
154124
warning(f"Fail to load the benchmark results from {artifact_s3_url}")
155125
return []
126+
return []
156127

157128

158-
def initialize_ios_metadata(test_name: str) -> Dict[str, any]:
129+
def initialize_ios_metadata(test_name: str) -> Dict[str, Any]:
159130
"""
160131
Extract the benchmark metadata from the test name, for example:
161132
test_forward_llama2_pte_iOS_17_2_1_iPhone15_4
@@ -364,14 +335,7 @@ def transform(
364335
app_type: str,
365336
benchmark_results: List,
366337
benchmark_config: Dict[str, str],
367-
repo: str,
368-
head_branch: str,
369-
workflow_name: str,
370-
workflow_run_id: int,
371-
workflow_run_attempt: int,
372338
job_name: str,
373-
job_id: int,
374-
schema_version: str,
375339
) -> List:
376340
"""
377341
Transform the benchmark results into the format writable into the benchmark database
@@ -381,87 +345,51 @@ def transform(
381345
for r in benchmark_results:
382346
r["deviceInfo"]["device"] = job_name
383347

384-
if schema_version == "v2":
385-
# TODO (huydhn): Clean up this branch after ExecuTorch dashboard migrates to v3
386-
return [
387-
{
388-
# GH-info to identify where the benchmark is run
389-
"repo": repo,
390-
"head_branch": head_branch,
391-
"workflow_id": workflow_run_id,
392-
"run_attempt": workflow_run_attempt,
393-
"job_id": job_id,
394-
# The model
395-
"name": f"{r['benchmarkModel']['name']} {r['benchmarkModel'].get('backend', '')}".strip(),
396-
"dtype": (
397-
r["benchmarkModel"]["quantization"]
398-
if r["benchmarkModel"]["quantization"]
399-
else "unknown"
400-
),
401-
# The metric value
402-
"metric": r["metric"],
403-
"actual": r["actualValue"],
404-
"target": r["targetValue"],
405-
# The device
406-
"device": r["deviceInfo"]["device"],
407-
"arch": r["deviceInfo"].get("os", ""),
408-
# Not used here, just set it to something unique here
409-
"filename": workflow_name,
410-
"test_name": app_type,
411-
"runner": job_name,
412-
}
413-
for r in benchmark_results
414-
]
415-
elif schema_version == "v3":
416-
v3_benchmark_results = []
417-
# From https://github.com/pytorch/pytorch/wiki/How-to-integrate-with-PyTorch-OSS-benchmark-database
418-
return [
419-
{
420-
"benchmark": {
421-
"name": "ExecuTorch",
422-
"mode": "inference",
423-
"extra_info": {
424-
"app_type": app_type,
425-
# Just keep a copy of the benchmark config here
426-
"benchmark_config": json.dumps(benchmark_config),
427-
},
428-
},
429-
"model": {
430-
"name": benchmark_config.get("model", r["benchmarkModel"]["name"]),
431-
"type": "OSS model",
432-
"backend": benchmark_config.get(
433-
"config", r["benchmarkModel"].get("backend", "")
434-
),
348+
# From https://github.com/pytorch/pytorch/wiki/How-to-integrate-with-PyTorch-OSS-benchmark-database
349+
return [
350+
{
351+
"benchmark": {
352+
"name": "ExecuTorch",
353+
"mode": "inference",
354+
"extra_info": {
355+
"app_type": app_type,
356+
# Just keep a copy of the benchmark config here
357+
"benchmark_config": json.dumps(benchmark_config),
435358
},
436-
"metric": {
437-
"name": r["metric"],
438-
"benchmark_values": [r["actualValue"]],
439-
"target_value": r["targetValue"],
440-
"extra_info": {
441-
"method": r.get("method", ""),
442-
},
359+
},
360+
"model": {
361+
"name": benchmark_config.get("model", r["benchmarkModel"]["name"]),
362+
"type": "OSS model",
363+
"backend": benchmark_config.get(
364+
"config", r["benchmarkModel"].get("backend", "")
365+
),
366+
},
367+
"metric": {
368+
"name": r["metric"],
369+
"benchmark_values": [r["actualValue"]],
370+
"target_value": r["targetValue"],
371+
"extra_info": {
372+
"method": r.get("method", ""),
443373
},
444-
"runners": [
445-
{
446-
"name": r["deviceInfo"]["device"],
447-
"type": r["deviceInfo"]["os"],
448-
"avail_mem_in_gb": r["deviceInfo"].get("availMem", ""),
449-
"total_mem_in_gb": r["deviceInfo"].get("totalMem", ""),
450-
}
451-
],
452-
}
453-
for r in benchmark_results
454-
]
374+
},
375+
"runners": [
376+
{
377+
"name": r["deviceInfo"]["device"],
378+
"type": r["deviceInfo"]["os"],
379+
"avail_mem_in_gb": r["deviceInfo"].get("availMem", ""),
380+
"total_mem_in_gb": r["deviceInfo"].get("totalMem", ""),
381+
}
382+
],
383+
}
384+
for r in benchmark_results
385+
]
455386

456387

457388
def main() -> None:
458389
args = parse_args()
459390

460391
# Across all devices, keeping both schemas for now until ExecuTorch dashboard migrates to v3
461-
all_benchmark_results = {
462-
"v2": [],
463-
"v3": [],
464-
}
392+
all_benchmark_results = []
465393
benchmark_config = {}
466394

467395
with open(args.artifacts) as f:
@@ -482,7 +410,7 @@ def main() -> None:
482410
benchmark_config = read_benchmark_config(
483411
artifact_s3_url, args.benchmark_configs
484412
)
485-
413+
benchmark_results = []
486414
if app_type == "ANDROID_APP":
487415
benchmark_results = extract_android_benchmark_results(
488416
job_name, artifact_type, artifact_s3_url
@@ -494,32 +422,17 @@ def main() -> None:
494422
)
495423

496424
if benchmark_results:
497-
for schema in all_benchmark_results.keys():
498-
results = transform(
499-
app_type,
500-
benchmark_results,
501-
benchmark_config,
502-
args.repo,
503-
args.head_branch,
504-
args.workflow_name,
505-
args.workflow_run_id,
506-
args.workflow_run_attempt,
507-
job_name,
508-
extract_job_id(args.artifacts),
509-
schema,
510-
)
511-
all_benchmark_results[schema].extend(results)
512-
513-
for schema in all_benchmark_results.keys():
514-
if not all_benchmark_results.get(schema):
515-
continue
516-
517-
output_dir = os.path.join(args.output_dir, schema)
518-
os.makedirs(output_dir, exist_ok=True)
425+
results = transform(
426+
app_type, benchmark_results, benchmark_config, job_name
427+
)
428+
all_benchmark_results.extend(results)
519429

430+
# add v3 in case we have higher version of schema
431+
output_dir = os.path.join(args.output_dir, "v3")
432+
os.makedirs(output_dir, exist_ok=True)
520433
output_file = os.path.basename(args.artifacts)
521434
with open(f"{output_dir}/{output_file}", "w") as f:
522-
json.dump(all_benchmark_results[schema], f)
435+
json.dump(all_benchmark_results, f)
523436

524437

525438
if __name__ == "__main__":

0 commit comments

Comments
 (0)