Skip to content

Commit ae2c541

Browse files
authored
Add examples for JS (#3)
* Add examples for JS * add copyright banner * update consume package version * add link for react_native * add link in main README
1 parent ca25bb6 commit ae2c541

25 files changed

+648
-0
lines changed

Diff for: README.md

+1
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ Outline the examples in the repository.
1313
| Example | Description |
1414
|-------------------|--------------------------------------------|
1515
|[Android Image Classifier](mobile/examples/image_classifications/android)| An example application for ONNX Runtime on Android. The example app uses image classification which is able to continuously classify the objects it sees from the device's camera in real-time and displays the most probable inference result on the screen. |
16+
|[JavaScript API examples](js)| Examples that demonstrates how to use JavaScript API for ONNX Runtime. |
1617

1718
## Contributing
1819

Diff for: js/.gitignore

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
dist
2+
node_modules
3+
4+
!**/*.onnx
5+
package-lock.json

Diff for: js/README.md

+27
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
# ONNX Runtime JavaScript examples
2+
3+
## Summary
4+
5+
This folder contains several JavaScript examples. Most of the examples, unless remarked explicitly, are available in all NPM packages as described below:
6+
7+
- [onnxruntime-node](https://github.com/microsoft/onnxruntime/tree/master/js/node): Node.js binding for ONNXRuntime. Can be used in Node.js applications and Node.js compatible environment (eg. Electron.js).
8+
- [onnxruntime-web](https://github.com/microsoft/onnxruntime/tree/master/js/web): ONNXRuntime on browsers.
9+
- [onnxruntime-react-native](https://github.com/microsoft/onnxruntime/tree/master/js/react_native): ONNXRuntime for React Native applications on Android and iOS.
10+
11+
## Usage
12+
13+
Click links for README of each examples.
14+
15+
### Quick Start
16+
17+
* [Quick Start - Nodejs Binding](quick-start_onnxruntime-node) - a demonstration of basic usage of ONNX Runtime Node.js binding.
18+
19+
* [Quick Start - Web (using script tag)](quick-start_onnxruntime-web-script-tag) - a demonstration of basic usage of ONNX Runtime Web using script tag.
20+
21+
* [Quick Start - Web (using bundler)](quick-start_onnxruntime-web-bundler) - a demonstration of basic usage of ONNX Runtime Web using a bundler.
22+
23+
### API usage
24+
25+
* [API usage - Tensor](api-usage_tensor) - a demonstration of basic usage of `Tensor`.
26+
27+
* [API usage - InferenceSession](api-usage_inference-session) - a demonstration of basic usage of `InferenceSession`.

Diff for: js/api-usage_inference-session/README.md

+20
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
# API usage - InferenceSession
2+
3+
## Summary
4+
5+
This example is a demonstration of basic usage of `InferenceSession`.
6+
7+
- `inference-session-create.js`: In this example, we create `InferenceSession` in different ways.
8+
- `inference-session-properties.js`: In this example, we get input/output names from an `InferenceSession` object.
9+
- `inference-session-run.js`: In this example, we run the model inferencing in different ways.
10+
11+
For more information about `SessionOptions` and `RunOptions`, please refer to other examples.
12+
13+
## Usage
14+
15+
```sh
16+
npm install
17+
node ./inference-session-create.js
18+
node ./inference-session-properties.js
19+
node ./inference-session-run.js
20+
```
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
// Copyright (c) Microsoft Corporation.
2+
// Licensed under the MIT license.
3+
4+
const fs = require('fs');
5+
const util = require('util');
6+
const ort = require('onnxruntime-node');
7+
8+
// following code also works for onnxruntime-web.
9+
10+
const InferenceSession = ort.InferenceSession;
11+
12+
// use an async context to call onnxruntime functions.
13+
async function main() {
14+
try {
15+
// create session option object
16+
const options = createMySessionOptions();
17+
18+
//
19+
// create inference session from a ONNX model file path or URL
20+
//
21+
const session01 = await InferenceSession.create('./model.onnx');
22+
const session01_B = await InferenceSession.create('./model.onnx', options); // specify options
23+
24+
//
25+
// create inference session from an Node.js Buffer (Uint8Array)
26+
//
27+
const buffer02 = await readMyModelDataFile('./model.onnx'); // buffer is Uint8Array
28+
const session02 = await InferenceSession.create(buffer02);
29+
const session02_B = await InferenceSession.create(buffer02, options); // specify options
30+
31+
//
32+
// create inference session from an ArrayBuffer
33+
//
34+
const arrayBuffer03 = buffer02.buffer;
35+
const offset03 = buffer02.byteOffset;
36+
const length03 = buffer02.byteLength;
37+
const session03 = await InferenceSession.create(arrayBuffer03, offset03, length03);
38+
const session03_B = await InferenceSession.create(arrayBuffer03, offset03, length03, options); // specify options
39+
40+
// example for browser
41+
//const arrayBuffer03_C = await fetchMyModel('./model.onnx');
42+
//const session03_C = await InferenceSession.create(arrayBuffer03_C);
43+
} catch (e) {
44+
console.error(`failed to create inference session: ${e}`);
45+
}
46+
}
47+
48+
main();
49+
50+
function createMySessionOptions() {
51+
// session options: please refer to the other example for details usage for session options
52+
53+
// example of a session option object in node.js:
54+
// specify intra operator threads number to 1 and disable CPU memory arena
55+
return { intraOpNumThreads: 1, enableCpuMemArena: false }
56+
57+
// example of a session option object in browser:
58+
// specify WebAssembly exection provider
59+
//return { executionProviders: ['wasm'] };
60+
61+
}
62+
63+
async function readMyModelDataFile(filepathOrUri) {
64+
// read model file content (Node.js) as Buffer (Uint8Array)
65+
return await util.promisify(fs.readFile)(filepathOrUri);
66+
}
67+
68+
async function fetchMyModel(filepathOrUri) {
69+
// use fetch to read model file (browser) as ArrayBuffer
70+
if (typeof fetch !== 'undefined') {
71+
const response = await fetch(filepathOrUri);
72+
return await response.arrayBuffer();
73+
}
74+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
// Copyright (c) Microsoft Corporation.
2+
// Licensed under the MIT license.
3+
4+
const ort = require('onnxruntime-node');
5+
6+
// following code also works for onnxruntime-web.
7+
8+
const InferenceSession = ort.InferenceSession;
9+
10+
// use an async context to call onnxruntime functions.
11+
async function main() {
12+
try {
13+
// create session and load model.onnx
14+
const session = await InferenceSession.create('./model.onnx');;
15+
16+
//
17+
// get input/output names from inference session object
18+
//
19+
const inputNames = session.inputNames;
20+
const outputNames = session.outputNames;
21+
22+
} catch (e) {
23+
console.error(`failed to create inference session: ${e}`);
24+
}
25+
}
26+
27+
main();
+76
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
// Copyright (c) Microsoft Corporation.
2+
// Licensed under the MIT license.
3+
4+
const ort = require('onnxruntime-node');
5+
6+
// following code also works for onnxruntime-web.
7+
8+
const InferenceSession = ort.InferenceSession;
9+
const Tensor = ort.Tensor;
10+
11+
// use an async context to call onnxruntime functions.
12+
async function main() {
13+
try {
14+
// create session and load model.onnx
15+
const session = await InferenceSession.create('./model.onnx');
16+
17+
// prepare inputs
18+
const dataA = prepareDataA(); // Float32Array(12)
19+
const dataB = prepareDataB(); // Float32Array(12)
20+
const tensorA = new ort.Tensor('float32', dataA, [3, 4]);
21+
const tensorB = new ort.Tensor('float32', dataB, [4, 3]);
22+
23+
// prepare feeds. use model input names as keys.
24+
const feeds = {
25+
a: new Tensor('float32', dataA, [3, 4]),
26+
b: new Tensor('float32', dataB, [4, 3])
27+
};
28+
29+
// run options
30+
const option = createRunOptions();
31+
32+
//
33+
// feed inputs and run
34+
//
35+
const results_02 = await session.run(feeds);
36+
const results_02_B = await session.run(feeds, option); // specify options
37+
38+
//
39+
// run with specified names of fetches (outputs)
40+
//
41+
const results_03 = await session.run(feeds, ['c']);
42+
const results_03_B = await session.run(feeds, ['c'], option); // specify options
43+
44+
//
45+
// run with fetches (outputs) as nullable map
46+
//
47+
const results_04 = await session.run(feeds, { c: null });
48+
const results_04_B = await session.run(feeds, { c: null }, option); // specify options
49+
50+
//
51+
// run with fetches (outputs) as nullable map, with tensor as value
52+
//
53+
const preAllocatedTensorC = new Tensor(new Float32Array(9), [3, 3]);
54+
const results_05 = await session.run(feeds, { c: preAllocatedTensorC });
55+
const results_05_B = await session.run(feeds, { c: preAllocatedTensorC }, option); // specify options
56+
57+
} catch (e) {
58+
console.error(`failed to inference ONNX model: ${e}.`);
59+
}
60+
}
61+
62+
main();
63+
64+
function prepareDataA() {
65+
return Float32Array.from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]);
66+
}
67+
function prepareDataB() {
68+
return Float32Array.from([10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120]);
69+
}
70+
71+
function createRunOptions() {
72+
// run options: please refer to the other example for details usage for run options
73+
74+
// specify log verbose to this inference run
75+
return { logSeverityLevel: 0 };
76+
}

Diff for: js/api-usage_inference-session/model.onnx

+16
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
 backend-test:b
2+

3+
a
4+
bc"MatMultest_matmul_2dZ
5+
a
6+

7+

8+
Z
9+
b
10+

11+

12+
b
13+
c
14+

15+

16+
B

Diff for: js/api-usage_inference-session/package.json

+10
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
{
2+
"name": "api-usage_inference-session",
3+
"private": true,
4+
"version": "1.0.0",
5+
"description": "This example is a demonstration of basic usage of InferenceSession.",
6+
"main": "index.js",
7+
"dependencies": {
8+
"onnxruntime-node": "^1.8.0"
9+
}
10+
}

Diff for: js/api-usage_tensor/README.md

+16
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
# API usage - Create Tensor
2+
3+
## Summary
4+
5+
This example is a demonstration of basic usage of `Tensor`.
6+
7+
- `tensor-create.js`: In this example, we create tensors in different ways.
8+
- `tensor-properties.js`: In this example, we get tensor properties from a Tensor object.
9+
10+
## Usage
11+
12+
```sh
13+
npm install
14+
node ./tensor-create.js
15+
node ./tensor-properties.js
16+
```

Diff for: js/api-usage_tensor/package.json

+9
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
{
2+
"name": "api-usage_create-tensor",
3+
"private": true,
4+
"version": "1.0.0",
5+
"description": "This example is a demonstration of basic usage of Tensor.",
6+
"dependencies": {
7+
"onnxruntime-node": "^1.8.0"
8+
}
9+
}

Diff for: js/api-usage_tensor/tensor-create.js

+68
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
// Copyright (c) Microsoft Corporation.
2+
// Licensed under the MIT license.
3+
4+
const ort = require('onnxruntime-node');
5+
6+
// following code also works for onnxruntime-web.
7+
8+
const Tensor = ort.Tensor;
9+
10+
//
11+
// create a [2x3x4] float tensor
12+
//
13+
const buffer01 = new Float32Array(24);
14+
buffer01[0] = 0.1; // fill buffer data
15+
const tensor01 = new Tensor('float32', buffer01, [2, 3, 4]);
16+
// type 'float32' can be omitted and the type is inferred from data
17+
const tensor01_B = new Tensor(buffer01, [2, 3, 4]);
18+
19+
//
20+
// create a [1x2] boolean tensor
21+
//
22+
const buffer02 = new Uint8Array(2);
23+
buffer02[0] = 1; // true
24+
buffer02[1] = 0; // false
25+
const tensor02 = new Tensor('bool', buffer02, [1, 2]); // type 'bool' cannot omit as both 'bool' and 'uint8' uses Uint8Array.
26+
27+
//
28+
// create a scaler float64 tensor
29+
//
30+
const tensor03 = new Tensor(new Float64Array(1), []);
31+
tensor03.data[0] = 1.0; // setting data after tensor is created is allowed
32+
33+
//
34+
// create a one-dimension tensor
35+
//
36+
const tensor04 = new Tensor(new Float32Array(100), [100]);
37+
const tensor04_B = new Tensor(new Float32Array(100)); // dims can be omitted if it is a 1-D tensor. tensor04.dims = [100]
38+
39+
//
40+
// create a [1x2] string tensor
41+
//
42+
const tensor05 = new Tensor('string', ['a', 'b'], [1, 2]);
43+
const tensor05_B = new Tensor(['a', 'b'], [1, 2]); // type 'string' can be omitted
44+
45+
//
46+
// !!! BAD USAGES !!!
47+
// followings are bad usages that may cause an error to be thrown. try not to make these mistakes.
48+
//
49+
50+
// create from mismatched TypedArray
51+
try {
52+
const tensor = new Tensor('float64', new Float32Array(100)); // 'float64' must use with Float64Array as data.
53+
} catch{ }
54+
55+
// bad dimension (negative value)
56+
try {
57+
const tensor = new Tensor(new Float32Array(100), [1, 2, -1]); // negative dims is not allowed.
58+
} catch{ }
59+
60+
// size mismatch (scalar size should be 1)
61+
try {
62+
const tensor = new Tensor(new Float32Array(0), []);
63+
} catch{ }
64+
65+
// size mismatch (5 * 6 != 40)
66+
try {
67+
const tensor = new Tensor(new Float32Array(40), [5, 6]);
68+
} catch{ }

0 commit comments

Comments
 (0)