Skip to content

Commit 0d12ae8

Browse files
JustinBeckwithalexander-fenster
authored andcommitted
Enable prefer-const in the eslint config (#201)
1 parent f789569 commit 0d12ae8

File tree

5 files changed

+46
-46
lines changed

5 files changed

+46
-46
lines changed

vision/samples/detect.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -821,7 +821,7 @@ function detectPdfText(bucketName, fileName) {
821821
return operation.promise();
822822
})
823823
.then(filesResponse => {
824-
let destinationUri =
824+
const destinationUri =
825825
filesResponse[0].responses[0].outputConfig.gcsDestination.uri;
826826
console.log('Json saved to: ' + destinationUri);
827827
})

vision/samples/faceDetection.js

+12-12
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,13 @@
2020
// specified by the GOOGLE_APPLICATION_CREDENTIALS environment variable and use
2121
// the project specified by the GCLOUD_PROJECT environment variable. See
2222
// https://googlecloudplatform.github.io/gcloud-node/#/docs/google-cloud/latest/guides/authentication
23-
let vision = require('@google-cloud/vision');
23+
const vision = require('@google-cloud/vision');
2424
// [END vision_face_detection_tutorial_imports]
2525
// [START vision_face_detection_tutorial_client]
2626
// Creates a client
27-
let client = new vision.ImageAnnotatorClient();
27+
const client = new vision.ImageAnnotatorClient();
2828

29-
let fs = require('fs');
29+
const fs = require('fs');
3030
// [END vision_face_detection_tutorial_client]
3131

3232
/**
@@ -40,7 +40,7 @@ function detectFaces(inputFile, callback) {
4040
.faceDetection(request)
4141
.then(results => {
4242
const faces = results[0].faceAnnotations;
43-
let numFaces = faces.length;
43+
const numFaces = faces.length;
4444
console.log('Found ' + numFaces + (numFaces === 1 ? ' face' : ' faces'));
4545
callback(null, faces);
4646
})
@@ -61,12 +61,12 @@ function highlightFaces(inputFile, faces, outputFile, Canvas, callback) {
6161
return callback(err);
6262
}
6363

64-
let Image = Canvas.Image;
64+
const Image = Canvas.Image;
6565
// Open the original image into a canvas
66-
let img = new Image();
66+
const img = new Image();
6767
img.src = image;
68-
let canvas = new Canvas(img.width, img.height);
69-
let context = canvas.getContext('2d');
68+
const canvas = new Canvas(img.width, img.height);
69+
const context = canvas.getContext('2d');
7070
context.drawImage(img, 0, 0, img.width, img.height);
7171

7272
// Now draw boxes around all the faces
@@ -90,8 +90,8 @@ function highlightFaces(inputFile, faces, outputFile, Canvas, callback) {
9090

9191
// Write the result to a file
9292
console.log('Writing to file ' + outputFile);
93-
let writeStream = fs.createWriteStream(outputFile);
94-
let pngStream = canvas.pngStream();
93+
const writeStream = fs.createWriteStream(outputFile);
94+
const pngStream = canvas.pngStream();
9595

9696
pngStream.on('data', chunk => {
9797
writeStream.write(chunk);
@@ -131,7 +131,7 @@ if (module === require.main) {
131131
// eslint-disable-next-line no-process-exit
132132
process.exit(1);
133133
}
134-
let inputFile = process.argv[2];
135-
let outputFile = process.argv[3];
134+
const inputFile = process.argv[2];
135+
const outputFile = process.argv[3];
136136
exports.main(inputFile, outputFile, require('canvas'), console.log);
137137
}

vision/samples/productSearch/importProductSets.v1p3beta1.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ function importProductSets(projectId, location, gcsUri) {
5353
console.log('Processing done.');
5454
console.log('Results of the processing:');
5555

56-
for (let i in responses[0].statuses) {
56+
for (const i in responses[0].statuses) {
5757
console.log(
5858
'Status of processing ',
5959
i,

vision/samples/system-test/faceDetection.test.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ test.after.always(tools.restoreConsole);
5959

6060
test.cb(`should detect faces`, t => {
6161
let done = false;
62-
let timeout = setTimeout(() => {
62+
const timeout = setTimeout(() => {
6363
if (!done) {
6464
console.warn('Face detection timed out!');
6565
t.end();

vision/samples/textDetection.js

+31-31
Original file line numberDiff line numberDiff line change
@@ -15,27 +15,27 @@
1515

1616
'use strict';
1717

18-
let async = require('async');
19-
let fs = require('fs');
20-
let path = require('path');
18+
const async = require('async');
19+
const fs = require('fs');
20+
const path = require('path');
2121

2222
// By default, the client will authenticate using the service account file
2323
// specified by the GOOGLE_APPLICATION_CREDENTIALS environment variable and use
2424
// the project specified by the GCLOUD_PROJECT environment variable. See
2525
// https://googlecloudplatform.github.io/gcloud-node/#/docs/google-cloud/latest/guides/authentication
26-
let vision = require('@google-cloud/vision');
27-
let natural = require('natural');
28-
let redis = require('redis');
26+
const vision = require('@google-cloud/vision');
27+
const natural = require('natural');
28+
const redis = require('redis');
2929

3030
// Instantiate a vision client
31-
let client = new vision.ImageAnnotatorClient();
31+
const client = new vision.ImageAnnotatorClient();
3232

3333
function Index() {
3434
// Connect to a redis server.
35-
let TOKEN_DB = 0;
36-
let DOCS_DB = 1;
37-
let PORT = process.env.REDIS_PORT || '6379';
38-
let HOST = process.env.REDIS_HOST || '127.0.0.1';
35+
const TOKEN_DB = 0;
36+
const DOCS_DB = 1;
37+
const PORT = process.env.REDIS_PORT || '6379';
38+
const HOST = process.env.REDIS_HOST || '127.0.0.1';
3939

4040
this.tokenClient = redis
4141
.createClient(PORT, HOST, {
@@ -59,12 +59,12 @@ Index.prototype.quit = function() {
5959
};
6060

6161
Index.prototype.add = function(filename, document, callback) {
62-
let self = this;
63-
let PUNCTUATION = ['.', ',', ':', ''];
64-
let tokenizer = new natural.WordTokenizer();
65-
let tokens = tokenizer.tokenize(document);
62+
const self = this;
63+
const PUNCTUATION = ['.', ',', ':', ''];
64+
const tokenizer = new natural.WordTokenizer();
65+
const tokens = tokenizer.tokenize(document);
6666

67-
let tasks = tokens
67+
const tasks = tokens
6868
.filter(function(token) {
6969
return PUNCTUATION.indexOf(token) === -1;
7070
})
@@ -82,8 +82,8 @@ Index.prototype.add = function(filename, document, callback) {
8282
};
8383

8484
Index.prototype.lookup = function(words, callback) {
85-
let self = this;
86-
let tasks = words.map(function(word) {
85+
const self = this;
86+
const tasks = words.map(function(word) {
8787
word = word.toLowerCase();
8888
return function(cb) {
8989
self.tokenClient.smembers(word, cb);
@@ -114,7 +114,7 @@ Index.prototype.setContainsNoText = function(filename, callback) {
114114
};
115115

116116
function lookup(words, callback) {
117-
let index = new Index();
117+
const index = new Index();
118118
index.lookup(words, function(err, hits) {
119119
index.quit();
120120
if (err) {
@@ -146,9 +146,9 @@ function extractDescriptions(filename, index, response, callback) {
146146

147147
function getTextFromFiles(index, inputFiles, callback) {
148148
// Make a call to the Vision API to detect text
149-
let requests = [];
149+
const requests = [];
150150
inputFiles.forEach(filename => {
151-
let request = {
151+
const request = {
152152
image: {content: fs.readFileSync(filename).toString('base64')},
153153
features: [{type: 'TEXT_DETECTION'}],
154154
};
@@ -157,11 +157,11 @@ function getTextFromFiles(index, inputFiles, callback) {
157157
client
158158
.batchAnnotateImages({requests: requests})
159159
.then(results => {
160-
let detections = results[0].responses;
161-
let textResponse = {};
162-
let tasks = [];
160+
const detections = results[0].responses;
161+
const textResponse = {};
162+
const tasks = [];
163163
inputFiles.forEach(function(filename, i) {
164-
let response = detections[i];
164+
const response = detections[i];
165165
if (response.error) {
166166
console.log('API Error for ' + filename, response.error);
167167
return;
@@ -186,7 +186,7 @@ function getTextFromFiles(index, inputFiles, callback) {
186186

187187
// Run the example
188188
function main(inputDir, callback) {
189-
let index = new Index();
189+
const index = new Index();
190190

191191
async.waterfall(
192192
[
@@ -198,7 +198,7 @@ function main(inputDir, callback) {
198198
function(files, cb) {
199199
async.parallel(
200200
files.map(function(file) {
201-
let filename = path.join(inputDir, file);
201+
const filename = path.join(inputDir, file);
202202
return function(cb) {
203203
fs.stat(filename, function(err, stats) {
204204
if (err) {
@@ -216,7 +216,7 @@ function main(inputDir, callback) {
216216
},
217217
// Figure out which files have already been processed
218218
function(allImageFiles, cb) {
219-
let tasks = allImageFiles
219+
const tasks = allImageFiles
220220
.filter(function(filename) {
221221
return filename;
222222
})
@@ -256,16 +256,16 @@ function main(inputDir, callback) {
256256
}
257257

258258
if (module === require.main) {
259-
let generalError =
259+
const generalError =
260260
'Usage: node textDetection <command> <arg> ...\n\n' +
261261
'\tCommands: analyze, lookup';
262262
if (process.argv.length < 3) {
263263
console.log(generalError);
264264
// eslint-disable-next-line no-process-exit
265265
process.exit(1);
266266
}
267-
let args = process.argv.slice(2);
268-
let command = args.shift();
267+
const args = process.argv.slice(2);
268+
const command = args.shift();
269269
if (command === 'analyze') {
270270
if (!args.length) {
271271
console.log('Usage: node textDetection analyze <dir>');

0 commit comments

Comments
 (0)