tokenize.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736
  1. """Tokenization help for Python programs.
  2. tokenize(readline) is a generator that breaks a stream of bytes into
  3. Python tokens. It decodes the bytes according to PEP-0263 for
  4. determining source file encoding.
  5. It accepts a readline-like method which is called repeatedly to get the
  6. next line of input (or b"" for EOF). It generates 5-tuples with these
  7. members:
  8. the token type (see token.py)
  9. the token (a string)
  10. the starting (row, column) indices of the token (a 2-tuple of ints)
  11. the ending (row, column) indices of the token (a 2-tuple of ints)
  12. the original line (string)
  13. It is designed to match the working of the Python tokenizer exactly, except
  14. that it produces COMMENT tokens for comments and gives type OP for all
  15. operators. Additionally, all token lists start with an ENCODING token
  16. which tells you which encoding was used to decode the bytes stream.
  17. """
  18. __author__ = 'Ka-Ping Yee <ping@lfw.org>'
  19. __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
  20. 'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
  21. 'Michael Foord')
  22. from builtins import open as _builtin_open
  23. from codecs import lookup, BOM_UTF8
  24. import collections
  25. from io import TextIOWrapper
  26. from itertools import chain
  27. import itertools as _itertools
  28. import re
  29. import sys
  30. from token import *
  31. cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
  32. blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
  33. import token
  34. __all__ = token.__all__ + ["tokenize", "detect_encoding",
  35. "untokenize", "TokenInfo"]
  36. del token
  37. EXACT_TOKEN_TYPES = {
  38. '(': LPAR,
  39. ')': RPAR,
  40. '[': LSQB,
  41. ']': RSQB,
  42. ':': COLON,
  43. ',': COMMA,
  44. ';': SEMI,
  45. '+': PLUS,
  46. '-': MINUS,
  47. '*': STAR,
  48. '/': SLASH,
  49. '|': VBAR,
  50. '&': AMPER,
  51. '<': LESS,
  52. '>': GREATER,
  53. '=': EQUAL,
  54. '.': DOT,
  55. '%': PERCENT,
  56. '{': LBRACE,
  57. '}': RBRACE,
  58. '==': EQEQUAL,
  59. '!=': NOTEQUAL,
  60. '<=': LESSEQUAL,
  61. '>=': GREATEREQUAL,
  62. '~': TILDE,
  63. '^': CIRCUMFLEX,
  64. '<<': LEFTSHIFT,
  65. '>>': RIGHTSHIFT,
  66. '**': DOUBLESTAR,
  67. '+=': PLUSEQUAL,
  68. '-=': MINEQUAL,
  69. '*=': STAREQUAL,
  70. '/=': SLASHEQUAL,
  71. '%=': PERCENTEQUAL,
  72. '&=': AMPEREQUAL,
  73. '|=': VBAREQUAL,
  74. '^=': CIRCUMFLEXEQUAL,
  75. '<<=': LEFTSHIFTEQUAL,
  76. '>>=': RIGHTSHIFTEQUAL,
  77. '**=': DOUBLESTAREQUAL,
  78. '//': DOUBLESLASH,
  79. '//=': DOUBLESLASHEQUAL,
  80. '...': ELLIPSIS,
  81. '->': RARROW,
  82. '@': AT,
  83. '@=': ATEQUAL,
  84. }
  85. class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
  86. def __repr__(self):
  87. annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
  88. return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
  89. self._replace(type=annotated_type))
  90. @property
  91. def exact_type(self):
  92. if self.type == OP and self.string in EXACT_TOKEN_TYPES:
  93. return EXACT_TOKEN_TYPES[self.string]
  94. else:
  95. return self.type
  96. def group(*choices): return '(' + '|'.join(choices) + ')'
  97. def any(*choices): return group(*choices) + '*'
  98. def maybe(*choices): return group(*choices) + '?'
  99. # Note: we use unicode matching for names ("\w") but ascii matching for
  100. # number literals.
  101. Whitespace = r'[ \f\t]*'
  102. Comment = r'#[^\r\n]*'
  103. Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
  104. Name = r'\w+'
  105. Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
  106. Binnumber = r'0[bB](?:_?[01])+'
  107. Octnumber = r'0[oO](?:_?[0-7])+'
  108. Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
  109. Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
  110. Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*'
  111. Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?',
  112. r'\.[0-9](?:_?[0-9])*') + maybe(Exponent)
  113. Expfloat = r'[0-9](?:_?[0-9])*' + Exponent
  114. Floatnumber = group(Pointfloat, Expfloat)
  115. Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]')
  116. Number = group(Imagnumber, Floatnumber, Intnumber)
  117. # Return the empty string, plus all of the valid string prefixes.
  118. def _all_string_prefixes():
  119. # The valid string prefixes. Only contain the lower case versions,
  120. # and don't contain any permutations (include 'fr', but not
  121. # 'rf'). The various permutations will be generated.
  122. _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
  123. # if we add binary f-strings, add: ['fb', 'fbr']
  124. result = {''}
  125. for prefix in _valid_string_prefixes:
  126. for t in _itertools.permutations(prefix):
  127. # create a list with upper and lower versions of each
  128. # character
  129. for u in _itertools.product(*[(c, c.upper()) for c in t]):
  130. result.add(''.join(u))
  131. return result
  132. def _compile(expr):
  133. return re.compile(expr, re.UNICODE)
  134. # Note that since _all_string_prefixes includes the empty string,
  135. # StringPrefix can be the empty string (making it optional).
  136. StringPrefix = group(*_all_string_prefixes())
  137. # Tail end of ' string.
  138. Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
  139. # Tail end of " string.
  140. Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
  141. # Tail end of ''' string.
  142. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
  143. # Tail end of """ string.
  144. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
  145. Triple = group(StringPrefix + "'''", StringPrefix + '"""')
  146. # Single-line ' or " string.
  147. String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
  148. StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
  149. # Because of leftmost-then-longest match semantics, be sure to put the
  150. # longest operators first (e.g., if = came before ==, == would get
  151. # recognized as two instances of =).
  152. Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
  153. r"//=?", r"->",
  154. r"[+\-*/%&@|^=<>]=?",
  155. r"~")
  156. Bracket = '[][(){}]'
  157. Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
  158. Funny = group(Operator, Bracket, Special)
  159. PlainToken = group(Number, Funny, String, Name)
  160. Token = Ignore + PlainToken
  161. # First (or only) line of ' or " string.
  162. ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
  163. group("'", r'\\\r?\n'),
  164. StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
  165. group('"', r'\\\r?\n'))
  166. PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
  167. PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
  168. # For a given string prefix plus quotes, endpats maps it to a regex
  169. # to match the remainder of that string. _prefix can be empty, for
  170. # a normal single or triple quoted string (with no prefix).
  171. endpats = {}
  172. for _prefix in _all_string_prefixes():
  173. endpats[_prefix + "'"] = Single
  174. endpats[_prefix + '"'] = Double
  175. endpats[_prefix + "'''"] = Single3
  176. endpats[_prefix + '"""'] = Double3
  177. # A set of all of the single and triple quoted string prefixes,
  178. # including the opening quotes.
  179. single_quoted = set()
  180. triple_quoted = set()
  181. for t in _all_string_prefixes():
  182. for u in (t + '"', t + "'"):
  183. single_quoted.add(u)
  184. for u in (t + '"""', t + "'''"):
  185. triple_quoted.add(u)
  186. tabsize = 8
  187. class TokenError(Exception): pass
  188. class StopTokenizing(Exception): pass
  189. class Untokenizer:
  190. def __init__(self):
  191. self.tokens = []
  192. self.prev_row = 1
  193. self.prev_col = 0
  194. self.encoding = None
  195. def add_whitespace(self, start):
  196. row, col = start
  197. if row < self.prev_row or row == self.prev_row and col < self.prev_col:
  198. raise ValueError("start ({},{}) precedes previous end ({},{})"
  199. .format(row, col, self.prev_row, self.prev_col))
  200. row_offset = row - self.prev_row
  201. if row_offset:
  202. self.tokens.append("\\\n" * row_offset)
  203. self.prev_col = 0
  204. col_offset = col - self.prev_col
  205. if col_offset:
  206. self.tokens.append(" " * col_offset)
  207. def untokenize(self, iterable):
  208. it = iter(iterable)
  209. indents = []
  210. startline = False
  211. for t in it:
  212. if len(t) == 2:
  213. self.compat(t, it)
  214. break
  215. tok_type, token, start, end, line = t
  216. if tok_type == ENCODING:
  217. self.encoding = token
  218. continue
  219. if tok_type == ENDMARKER:
  220. break
  221. if tok_type == INDENT:
  222. indents.append(token)
  223. continue
  224. elif tok_type == DEDENT:
  225. indents.pop()
  226. self.prev_row, self.prev_col = end
  227. continue
  228. elif tok_type in (NEWLINE, NL):
  229. startline = True
  230. elif startline and indents:
  231. indent = indents[-1]
  232. if start[1] >= len(indent):
  233. self.tokens.append(indent)
  234. self.prev_col = len(indent)
  235. startline = False
  236. self.add_whitespace(start)
  237. self.tokens.append(token)
  238. self.prev_row, self.prev_col = end
  239. if tok_type in (NEWLINE, NL):
  240. self.prev_row += 1
  241. self.prev_col = 0
  242. return "".join(self.tokens)
  243. def compat(self, token, iterable):
  244. indents = []
  245. toks_append = self.tokens.append
  246. startline = token[0] in (NEWLINE, NL)
  247. prevstring = False
  248. for tok in chain([token], iterable):
  249. toknum, tokval = tok[:2]
  250. if toknum == ENCODING:
  251. self.encoding = tokval
  252. continue
  253. if toknum in (NAME, NUMBER):
  254. tokval += ' '
  255. # Insert a space between two consecutive strings
  256. if toknum == STRING:
  257. if prevstring:
  258. tokval = ' ' + tokval
  259. prevstring = True
  260. else:
  261. prevstring = False
  262. if toknum == INDENT:
  263. indents.append(tokval)
  264. continue
  265. elif toknum == DEDENT:
  266. indents.pop()
  267. continue
  268. elif toknum in (NEWLINE, NL):
  269. startline = True
  270. elif startline and indents:
  271. toks_append(indents[-1])
  272. startline = False
  273. toks_append(tokval)
  274. def untokenize(iterable):
  275. """Transform tokens back into Python source code.
  276. It returns a bytes object, encoded using the ENCODING
  277. token, which is the first token sequence output by tokenize.
  278. Each element returned by the iterable must be a token sequence
  279. with at least two elements, a token number and token value. If
  280. only two tokens are passed, the resulting output is poor.
  281. Round-trip invariant for full input:
  282. Untokenized source will match input source exactly
  283. Round-trip invariant for limited input:
  284. # Output bytes will tokenize back to the input
  285. t1 = [tok[:2] for tok in tokenize(f.readline)]
  286. newcode = untokenize(t1)
  287. readline = BytesIO(newcode).readline
  288. t2 = [tok[:2] for tok in tokenize(readline)]
  289. assert t1 == t2
  290. """
  291. ut = Untokenizer()
  292. out = ut.untokenize(iterable)
  293. if ut.encoding is not None:
  294. out = out.encode(ut.encoding)
  295. return out
  296. def _get_normal_name(orig_enc):
  297. """Imitates get_normal_name in tokenizer.c."""
  298. # Only care about the first 12 characters.
  299. enc = orig_enc[:12].lower().replace("_", "-")
  300. if enc == "utf-8" or enc.startswith("utf-8-"):
  301. return "utf-8"
  302. if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
  303. enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
  304. return "iso-8859-1"
  305. return orig_enc
  306. def detect_encoding(readline):
  307. """
  308. The detect_encoding() function is used to detect the encoding that should
  309. be used to decode a Python source file. It requires one argument, readline,
  310. in the same way as the tokenize() generator.
  311. It will call readline a maximum of twice, and return the encoding used
  312. (as a string) and a list of any lines (left as bytes) it has read in.
  313. It detects the encoding from the presence of a utf-8 bom or an encoding
  314. cookie as specified in pep-0263. If both a bom and a cookie are present,
  315. but disagree, a SyntaxError will be raised. If the encoding cookie is an
  316. invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
  317. 'utf-8-sig' is returned.
  318. If no encoding is specified, then the default of 'utf-8' will be returned.
  319. """
  320. try:
  321. filename = readline.__self__.name
  322. except AttributeError:
  323. filename = None
  324. bom_found = False
  325. encoding = None
  326. default = 'utf-8'
  327. def read_or_stop():
  328. try:
  329. return readline()
  330. except StopIteration:
  331. return b''
  332. def find_cookie(line):
  333. try:
  334. # Decode as UTF-8. Either the line is an encoding declaration,
  335. # in which case it should be pure ASCII, or it must be UTF-8
  336. # per default encoding.
  337. line_string = line.decode('utf-8')
  338. except UnicodeDecodeError:
  339. msg = "invalid or missing encoding declaration"
  340. if filename is not None:
  341. msg = '{} for {!r}'.format(msg, filename)
  342. raise SyntaxError(msg)
  343. match = cookie_re.match(line_string)
  344. if not match:
  345. return None
  346. encoding = _get_normal_name(match.group(1))
  347. try:
  348. codec = lookup(encoding)
  349. except LookupError:
  350. # This behaviour mimics the Python interpreter
  351. if filename is None:
  352. msg = "unknown encoding: " + encoding
  353. else:
  354. msg = "unknown encoding for {!r}: {}".format(filename,
  355. encoding)
  356. raise SyntaxError(msg)
  357. if bom_found:
  358. if encoding != 'utf-8':
  359. # This behaviour mimics the Python interpreter
  360. if filename is None:
  361. msg = 'encoding problem: utf-8'
  362. else:
  363. msg = 'encoding problem for {!r}: utf-8'.format(filename)
  364. raise SyntaxError(msg)
  365. encoding += '-sig'
  366. return encoding
  367. first = read_or_stop()
  368. if first.startswith(BOM_UTF8):
  369. bom_found = True
  370. first = first[3:]
  371. default = 'utf-8-sig'
  372. if not first:
  373. return default, []
  374. encoding = find_cookie(first)
  375. if encoding:
  376. return encoding, [first]
  377. if not blank_re.match(first):
  378. return default, [first]
  379. second = read_or_stop()
  380. if not second:
  381. return default, [first]
  382. encoding = find_cookie(second)
  383. if encoding:
  384. return encoding, [first, second]
  385. return default, [first, second]
  386. def open(filename):
  387. """Open a file in read only mode using the encoding detected by
  388. detect_encoding().
  389. """
  390. buffer = _builtin_open(filename, 'rb')
  391. try:
  392. encoding, lines = detect_encoding(buffer.readline)
  393. buffer.seek(0)
  394. text = TextIOWrapper(buffer, encoding, line_buffering=True)
  395. text.mode = 'r'
  396. return text
  397. except:
  398. buffer.close()
  399. raise
  400. def tokenize(readline):
  401. """
  402. The tokenize() generator requires one argument, readline, which
  403. must be a callable object which provides the same interface as the
  404. readline() method of built-in file objects. Each call to the function
  405. should return one line of input as bytes. Alternatively, readline
  406. can be a callable function terminating with StopIteration:
  407. readline = open(myfile, 'rb').__next__ # Example of alternate readline
  408. The generator produces 5-tuples with these members: the token type; the
  409. token string; a 2-tuple (srow, scol) of ints specifying the row and
  410. column where the token begins in the source; a 2-tuple (erow, ecol) of
  411. ints specifying the row and column where the token ends in the source;
  412. and the line on which the token was found. The line passed is the
  413. logical line; continuation lines are included.
  414. The first token sequence will always be an ENCODING token
  415. which tells you which encoding was used to decode the bytes stream.
  416. """
  417. # This import is here to avoid problems when the itertools module is not
  418. # built yet and tokenize is imported.
  419. from itertools import chain, repeat
  420. encoding, consumed = detect_encoding(readline)
  421. rl_gen = iter(readline, b"")
  422. empty = repeat(b"")
  423. return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
  424. def _tokenize(readline, encoding):
  425. lnum = parenlev = continued = 0
  426. numchars = '0123456789'
  427. contstr, needcont = '', 0
  428. contline = None
  429. indents = [0]
  430. if encoding is not None:
  431. if encoding == "utf-8-sig":
  432. # BOM will already have been stripped.
  433. encoding = "utf-8"
  434. yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
  435. last_line = b''
  436. line = b''
  437. while True: # loop over lines in stream
  438. try:
  439. # We capture the value of the line variable here because
  440. # readline uses the empty string '' to signal end of input,
  441. # hence `line` itself will always be overwritten at the end
  442. # of this loop.
  443. last_line = line
  444. line = readline()
  445. except StopIteration:
  446. line = b''
  447. if encoding is not None:
  448. line = line.decode(encoding)
  449. lnum += 1
  450. pos, max = 0, len(line)
  451. if contstr: # continued string
  452. if not line:
  453. raise TokenError("EOF in multi-line string", strstart)
  454. endmatch = endprog.match(line)
  455. if endmatch:
  456. pos = end = endmatch.end(0)
  457. yield TokenInfo(STRING, contstr + line[:end],
  458. strstart, (lnum, end), contline + line)
  459. contstr, needcont = '', 0
  460. contline = None
  461. elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
  462. yield TokenInfo(ERRORTOKEN, contstr + line,
  463. strstart, (lnum, len(line)), contline)
  464. contstr = ''
  465. contline = None
  466. continue
  467. else:
  468. contstr = contstr + line
  469. contline = contline + line
  470. continue
  471. elif parenlev == 0 and not continued: # new statement
  472. if not line: break
  473. column = 0
  474. while pos < max: # measure leading whitespace
  475. if line[pos] == ' ':
  476. column += 1
  477. elif line[pos] == '\t':
  478. column = (column//tabsize + 1)*tabsize
  479. elif line[pos] == '\f':
  480. column = 0
  481. else:
  482. break
  483. pos += 1
  484. if pos == max:
  485. break
  486. if line[pos] in '#\r\n': # skip comments or blank lines
  487. if line[pos] == '#':
  488. comment_token = line[pos:].rstrip('\r\n')
  489. yield TokenInfo(COMMENT, comment_token,
  490. (lnum, pos), (lnum, pos + len(comment_token)), line)
  491. pos += len(comment_token)
  492. yield TokenInfo(NL, line[pos:],
  493. (lnum, pos), (lnum, len(line)), line)
  494. continue
  495. if column > indents[-1]: # count indents or dedents
  496. indents.append(column)
  497. yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
  498. while column < indents[-1]:
  499. if column not in indents:
  500. raise IndentationError(
  501. "unindent does not match any outer indentation level",
  502. ("<tokenize>", lnum, pos, line))
  503. indents = indents[:-1]
  504. yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
  505. else: # continued statement
  506. if not line:
  507. raise TokenError("EOF in multi-line statement", (lnum, 0))
  508. continued = 0
  509. while pos < max:
  510. pseudomatch = _compile(PseudoToken).match(line, pos)
  511. if pseudomatch: # scan for tokens
  512. start, end = pseudomatch.span(1)
  513. spos, epos, pos = (lnum, start), (lnum, end), end
  514. if start == end:
  515. continue
  516. token, initial = line[start:end], line[start]
  517. if (initial in numchars or # ordinary number
  518. (initial == '.' and token != '.' and token != '...')):
  519. yield TokenInfo(NUMBER, token, spos, epos, line)
  520. elif initial in '\r\n':
  521. if parenlev > 0:
  522. yield TokenInfo(NL, token, spos, epos, line)
  523. else:
  524. yield TokenInfo(NEWLINE, token, spos, epos, line)
  525. elif initial == '#':
  526. assert not token.endswith("\n")
  527. yield TokenInfo(COMMENT, token, spos, epos, line)
  528. elif token in triple_quoted:
  529. endprog = _compile(endpats[token])
  530. endmatch = endprog.match(line, pos)
  531. if endmatch: # all on one line
  532. pos = endmatch.end(0)
  533. token = line[start:pos]
  534. yield TokenInfo(STRING, token, spos, (lnum, pos), line)
  535. else:
  536. strstart = (lnum, start) # multiple lines
  537. contstr = line[start:]
  538. contline = line
  539. break
  540. # Check up to the first 3 chars of the token to see if
  541. # they're in the single_quoted set. If so, they start
  542. # a string.
  543. # We're using the first 3, because we're looking for
  544. # "rb'" (for example) at the start of the token. If
  545. # we switch to longer prefixes, this needs to be
  546. # adjusted.
  547. # Note that initial == token[:1].
  548. # Also note that single quote checking must come after
  549. # triple quote checking (above).
  550. elif (initial in single_quoted or
  551. token[:2] in single_quoted or
  552. token[:3] in single_quoted):
  553. if token[-1] == '\n': # continued string
  554. strstart = (lnum, start)
  555. # Again, using the first 3 chars of the
  556. # token. This is looking for the matching end
  557. # regex for the correct type of quote
  558. # character. So it's really looking for
  559. # endpats["'"] or endpats['"'], by trying to
  560. # skip string prefix characters, if any.
  561. endprog = _compile(endpats.get(initial) or
  562. endpats.get(token[1]) or
  563. endpats.get(token[2]))
  564. contstr, needcont = line[start:], 1
  565. contline = line
  566. break
  567. else: # ordinary string
  568. yield TokenInfo(STRING, token, spos, epos, line)
  569. elif initial.isidentifier(): # ordinary name
  570. yield TokenInfo(NAME, token, spos, epos, line)
  571. elif initial == '\\': # continued stmt
  572. continued = 1
  573. else:
  574. if initial in '([{':
  575. parenlev += 1
  576. elif initial in ')]}':
  577. parenlev -= 1
  578. yield TokenInfo(OP, token, spos, epos, line)
  579. else:
  580. yield TokenInfo(ERRORTOKEN, line[pos],
  581. (lnum, pos), (lnum, pos+1), line)
  582. pos += 1
  583. # Add an implicit NEWLINE if the input doesn't end in one
  584. if last_line and last_line[-1] not in '\r\n':
  585. yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
  586. for indent in indents[1:]: # pop remaining indent levels
  587. yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
  588. yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
  589. # An undocumented, backwards compatible, API for all the places in the standard
  590. # library that expect to be able to use tokenize with strings
  591. def generate_tokens(readline):
  592. return _tokenize(readline, None)
  593. def main():
  594. import argparse
  595. # Helper error handling routines
  596. def perror(message):
  597. print(message, file=sys.stderr)
  598. def error(message, filename=None, location=None):
  599. if location:
  600. args = (filename,) + location + (message,)
  601. perror("%s:%d:%d: error: %s" % args)
  602. elif filename:
  603. perror("%s: error: %s" % (filename, message))
  604. else:
  605. perror("error: %s" % message)
  606. sys.exit(1)
  607. # Parse the arguments and options
  608. parser = argparse.ArgumentParser(prog='python -m tokenize')
  609. parser.add_argument(dest='filename', nargs='?',
  610. metavar='filename.py',
  611. help='the file to tokenize; defaults to stdin')
  612. parser.add_argument('-e', '--exact', dest='exact', action='store_true',
  613. help='display token names using the exact type')
  614. args = parser.parse_args()
  615. try:
  616. # Tokenize the input
  617. if args.filename:
  618. filename = args.filename
  619. with _builtin_open(filename, 'rb') as f:
  620. tokens = list(tokenize(f.readline))
  621. else:
  622. filename = "<stdin>"
  623. tokens = _tokenize(sys.stdin.readline, None)
  624. # Output the tokenization
  625. for token in tokens:
  626. token_type = token.type
  627. if args.exact:
  628. token_type = token.exact_type
  629. token_range = "%d,%d-%d,%d:" % (token.start + token.end)
  630. print("%-20s%-15s%-15r" %
  631. (token_range, tok_name[token_type], token.string))
  632. except IndentationError as err:
  633. line, column = err.args[1][1:3]
  634. error(err.args[0], filename, (line, column))
  635. except TokenError as err:
  636. line, column = err.args[1]
  637. error(err.args[0], filename, (line, column))
  638. except SyntaxError as err:
  639. error(err, filename)
  640. except OSError as err:
  641. error(err)
  642. except KeyboardInterrupt:
  643. print("interrupted\n")
  644. except Exception as err:
  645. perror("unexpected error: %s" % err)
  646. raise
  647. if __name__ == "__main__":
  648. main()