@@ -56,7 +56,7 @@ static int
56
56
tok_readline_raw (struct tok_state * tok )
57
57
{
58
58
do {
59
- if (!tok_reserve_buf (tok , BUFSIZ )) {
59
+ if (!_PyLexer_tok_reserve_buf (tok , BUFSIZ )) {
60
60
return 0 ;
61
61
}
62
62
int n_chars = (int )(tok -> end - tok -> inp );
@@ -86,7 +86,7 @@ tok_readline_recode(struct tok_state *tok) {
86
86
if (line == NULL ) {
87
87
line = PyObject_CallNoArgs (tok -> decoding_readline );
88
88
if (line == NULL ) {
89
- error_ret (tok );
89
+ _PyTokenizer_error_ret (tok );
90
90
goto error ;
91
91
}
92
92
}
@@ -95,14 +95,14 @@ tok_readline_recode(struct tok_state *tok) {
95
95
}
96
96
buf = PyUnicode_AsUTF8AndSize (line , & buflen );
97
97
if (buf == NULL ) {
98
- error_ret (tok );
98
+ _PyTokenizer_error_ret (tok );
99
99
goto error ;
100
100
}
101
101
// Make room for the null terminator *and* potentially
102
102
// an extra newline character that we may need to artificially
103
103
// add.
104
104
size_t buffer_size = buflen + 2 ;
105
- if (!tok_reserve_buf (tok , buffer_size )) {
105
+ if (!_PyLexer_tok_reserve_buf (tok , buffer_size )) {
106
106
goto error ;
107
107
}
108
108
memcpy (tok -> inp , buf , buflen );
@@ -132,7 +132,7 @@ static void fp_ungetc(int c, struct tok_state *tok) {
132
132
/* Set the readline function for TOK to a StreamReader's
133
133
readline function. The StreamReader is named ENC.
134
134
135
- This function is called from check_bom and check_coding_spec .
135
+ This function is called from _PyTokenizer_check_bom and _PyTokenizer_check_coding_spec .
136
136
137
137
ENC is usually identical to the future value of tok->encoding,
138
138
except for the (currently unsupported) case of UTF-16.
@@ -195,7 +195,7 @@ tok_underflow_interactive(struct tok_state *tok) {
195
195
}
196
196
char * newtok = PyOS_Readline (tok -> fp ? tok -> fp : stdin , stdout , tok -> prompt );
197
197
if (newtok != NULL ) {
198
- char * translated = translate_newlines (newtok , 0 , 0 , tok );
198
+ char * translated = _PyTokenizer_translate_newlines (newtok , 0 , 0 , tok );
199
199
PyMem_Free (newtok );
200
200
if (translated == NULL ) {
201
201
return 0 ;
@@ -206,7 +206,7 @@ tok_underflow_interactive(struct tok_state *tok) {
206
206
/* Recode to UTF-8 */
207
207
Py_ssize_t buflen ;
208
208
const char * buf ;
209
- PyObject * u = translate_into_utf8 (newtok , tok -> encoding );
209
+ PyObject * u = _PyTokenizer_translate_into_utf8 (newtok , tok -> encoding );
210
210
PyMem_Free (newtok );
211
211
if (u == NULL ) {
212
212
tok -> done = E_DECODE ;
@@ -240,10 +240,10 @@ tok_underflow_interactive(struct tok_state *tok) {
240
240
}
241
241
else if (tok -> start != NULL ) {
242
242
Py_ssize_t cur_multi_line_start = tok -> multi_line_start - tok -> buf ;
243
- remember_fstring_buffers (tok );
243
+ _PyLexer_remember_fstring_buffers (tok );
244
244
size_t size = strlen (newtok );
245
245
ADVANCE_LINENO ();
246
- if (!tok_reserve_buf (tok , size + 1 )) {
246
+ if (!_PyLexer_tok_reserve_buf (tok , size + 1 )) {
247
247
PyMem_Free (tok -> buf );
248
248
tok -> buf = NULL ;
249
249
PyMem_Free (newtok );
@@ -253,18 +253,18 @@ tok_underflow_interactive(struct tok_state *tok) {
253
253
PyMem_Free (newtok );
254
254
tok -> inp += size ;
255
255
tok -> multi_line_start = tok -> buf + cur_multi_line_start ;
256
- restore_fstring_buffers (tok );
256
+ _PyLexer_restore_fstring_buffers (tok );
257
257
}
258
258
else {
259
- remember_fstring_buffers (tok );
259
+ _PyLexer_remember_fstring_buffers (tok );
260
260
ADVANCE_LINENO ();
261
261
PyMem_Free (tok -> buf );
262
262
tok -> buf = newtok ;
263
263
tok -> cur = tok -> buf ;
264
264
tok -> line_start = tok -> buf ;
265
265
tok -> inp = strchr (tok -> buf , '\0' );
266
266
tok -> end = tok -> inp + 1 ;
267
- restore_fstring_buffers (tok );
267
+ _PyLexer_restore_fstring_buffers (tok );
268
268
}
269
269
if (tok -> done != E_OK ) {
270
270
if (tok -> prompt != NULL ) {
@@ -273,7 +273,7 @@ tok_underflow_interactive(struct tok_state *tok) {
273
273
return 0 ;
274
274
}
275
275
276
- if (tok -> tok_mode_stack_index && !update_fstring_expr (tok , 0 )) {
276
+ if (tok -> tok_mode_stack_index && !_PyLexer_update_fstring_expr (tok , 0 )) {
277
277
return 0 ;
278
278
}
279
279
return 1 ;
@@ -288,8 +288,8 @@ tok_underflow_file(struct tok_state *tok) {
288
288
/* We have not yet determined the encoding.
289
289
If an encoding is found, use the file-pointer
290
290
reader functions from now on. */
291
- if (!check_bom (fp_getc , fp_ungetc , fp_setreadl , tok )) {
292
- error_ret (tok );
291
+ if (!_PyTokenizer_check_bom (fp_getc , fp_ungetc , fp_setreadl , tok )) {
292
+ _PyTokenizer_error_ret (tok );
293
293
return 0 ;
294
294
}
295
295
assert (tok -> decoding_state != STATE_INIT );
@@ -320,7 +320,7 @@ tok_underflow_file(struct tok_state *tok) {
320
320
tok -> implicit_newline = 1 ;
321
321
}
322
322
323
- if (tok -> tok_mode_stack_index && !update_fstring_expr (tok , 0 )) {
323
+ if (tok -> tok_mode_stack_index && !_PyLexer_update_fstring_expr (tok , 0 )) {
324
324
return 0 ;
325
325
}
326
326
@@ -329,16 +329,16 @@ tok_underflow_file(struct tok_state *tok) {
329
329
if (tok -> lineno > 2 ) {
330
330
tok -> decoding_state = STATE_NORMAL ;
331
331
}
332
- else if (!check_coding_spec (tok -> cur , strlen (tok -> cur ),
332
+ else if (!_PyTokenizer_check_coding_spec (tok -> cur , strlen (tok -> cur ),
333
333
tok , fp_setreadl ))
334
334
{
335
335
return 0 ;
336
336
}
337
337
}
338
338
/* The default encoding is UTF-8, so make sure we don't have any
339
339
non-UTF-8 sequences in it. */
340
- if (!tok -> encoding && !ensure_utf8 (tok -> cur , tok )) {
341
- error_ret (tok );
340
+ if (!tok -> encoding && !_PyTokenizer_ensure_utf8 (tok -> cur , tok )) {
341
+ _PyTokenizer_error_ret (tok );
342
342
return 0 ;
343
343
}
344
344
assert (tok -> done == E_OK );
@@ -350,7 +350,7 @@ struct tok_state *
350
350
_PyTokenizer_FromFile (FILE * fp , const char * enc ,
351
351
const char * ps1 , const char * ps2 )
352
352
{
353
- struct tok_state * tok = tok_new ();
353
+ struct tok_state * tok = _PyTokenizer_tok_new ();
354
354
if (tok == NULL )
355
355
return NULL ;
356
356
if ((tok -> buf = (char * )PyMem_Malloc (BUFSIZ )) == NULL ) {
@@ -370,7 +370,7 @@ _PyTokenizer_FromFile(FILE *fp, const char* enc,
370
370
if (enc != NULL ) {
371
371
/* Must copy encoding declaration since it
372
372
gets copied into the parse tree. */
373
- tok -> encoding = new_string (enc , strlen (enc ), tok );
373
+ tok -> encoding = _PyTokenizer_new_string (enc , strlen (enc ), tok );
374
374
if (!tok -> encoding ) {
375
375
_PyTokenizer_Free (tok );
376
376
return NULL ;
0 commit comments