3 \8s-@sdZdZdZddlmZddlmZmZddl Z ddl m Z ddl m Z ddl ZddlZddlZdd lTejd ejZejd ejZddlZejd d dddddgZ[eZd ee<edZdee<edZdee<ed7Zeeeee e!e"e#e$e%e&e'e(e)e*e+e,e-e.e/e0e1e2e3e4e5e6e7e8e9e:e;ee?e@eAeBeCeDeEeFeGd,ZHGddde jIddZJddZKddZLddZMdZNd ZOeNeLd!eNeMeOZPd"ZQd#ZRd$ZSd%ZTd&ZUeKeReSeTeUZVd'ZWeKd(d)eMeWZXd*eWZYeKeXeYZZeKd+eZd,Z[eKe[eZeVZ\d-d.Z]d/d0Z^eKe]Z_d1Z`d2Zad3Zbd4ZceKe_d5e_d6ZdeKe_d7e_d8ZeeKd9d:d;dd?d@ZfdAZgeKdBdCdDZheKefegehZieKe\eieeeQZjePejZkeKe_dEeKdFd!e_dGeKdHd!ZleKdIeOedZmeNeKeme\eieleQZniZox@e]D]6Zpe`eoepdF<eaeoepdH<ebeoepd5<eceoepd6<qWeqZreqZsx\e]D]RZtx$etdHetdFfD]Zuerjveuq8Wx$etd6etd5fD]Zuesjveuq^Wq"WdJZwGdKdLdLexZyGdMdNdNexZzGdOdPdPZ{dQdZ|dRdSZ}dTdZ~dUdVZdWd ZdXdYZdZd[Zd\d]Zed^kredS)_aoTokenization help for Python programs. tokenize(readline) is a generator that breaks a stream of bytes into Python tokens. It decodes the bytes according to PEP-0263 for determining source file encoding. It accepts a readline-like method which is called repeatedly to get the next line of input (or b"" for EOF). It generates 5-tuples with these members: the token type (see token.py) the token (a string) the starting (row, column) indices of the token (a 2-tuple of ints) the ending (row, column) indices of the token (a 2-tuple of ints) the original line (string) It is designed to match the working of the Python tokenizer exactly, except that it produces COMMENT tokens for comments and gives type OP for all operators. Additionally, all token lists start with an ENCODING token which tells you which encoding was used to decode the bytes stream. zKa-Ping Yee zpGvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro, Raymond Hettinger, Trent Nelson, Michael Foord)open)lookupBOM_UTF8N) TextIOWrapper)chain)*z&^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)s^[ \t\f]*(?:[#\r\n]|$)COMMENTtokenizedetect_encodingNL untokenizeENCODING TokenInfo),()[]:,;+-r/|&<>=.%{}z==z!=z<=z>=~^z<>z**z+=z-=z*=z/=z%=z&=z|=z^=z<<=z>>=z**=z//z//=@z@=c@s eZdZddZeddZdS)rcCs$d|jt|jf}d|j|dS)Nz%d (%s)z8TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r))type)r(tok_name_replace)selfannotated_typer- /usr/lib64/python3.6/tokenize.py__repr__dszTokenInfo.__repr__cCs(|jtkr|jtkrt|jS|jSdS)N)r(OPstringEXACT_TOKEN_TYPES)r+r-r-r. exact_typeis zTokenInfo.exact_typeN)__name__ __module__ __qualname__r/propertyr3r-r-r-r.rcsztype string start end linecGsddj|dS)Nrrr)join)choicesr-r-r.grouppsr:cGs t|dS)Nr)r:)r9r-r-r.anyqsr;cGs t|dS)N?)r:)r9r-r-r.maybersr=z[ \f\t]*z #[^\r\n]*z\\\r?\nz\w+z0[xX](?:_?[0-9a-fA-F])+z0[bB](?:_?[01])+z0[oO](?:_?[0-7])+z(?:0(?:_?0)*|[1-9](?:_?[0-9])*)z[eE][-+]?[0-9](?:_?[0-9])*z)[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?z\.[0-9](?:_?[0-9])*z[0-9](?:_?[0-9])*z[0-9](?:_?[0-9])*[jJ]z[jJ]cCsnddddddg}tdg}xN|D]F}x@tj|D]2}x,tjdd |DD]}|jdj|qJWq0Wq W|S) NbrufbrfrcSsg|]}||jfqSr-)upper).0cr-r-r. sz(_all_string_prefixes..)set _itertools permutationsproductaddr8)_valid_string_prefixesresultprefixtr@r-r-r._all_string_prefixess  rRcCstj|tjS)N)recompileUNICODE)exprr-r-r._compilesrWz[^'\\]*(?:\\.[^'\\]*)*'z[^"\\]*(?:\\.[^"\\]*)*"z%[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''z%[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""z'''z"""z'[^\n'\\]*(?:\\.[^\n'\\]*)*'z"[^\n"\\]*(?:\\.[^\n"\\]*)*"z\*\*=?z>>=?z<<=?z!=z//=?z->z[+\-*/%&@|^=<>]=?r%z[][(){}]z\r?\nz\.\.\.z[:;.,@]z'[^\n'\\]*(?:\\.[^\n'\\]*)*'z"[^\n"\\]*(?:\\.[^\n"\\]*)*"z \\\r?\n|\Zc@s eZdZdS) TokenErrorN)r4r5r6r-r-r-r.r[sr[c@s eZdZdS)StopTokenizingN)r4r5r6r-r-r-r.r\sr\c@s,eZdZddZddZddZddZd S) UntokenizercCsg|_d|_d|_d|_dS)Nrr)tokensprev_rowprev_colencoding)r+r-r-r.__init__szUntokenizer.__init__cCs|\}}||jks&||jkr>||jkr>tdj|||j|j||j}|rb|jjd|d|_||j}|r|jjd|dS)Nz+start ({},{}) precedes previous end ({},{})z\ r )r_r` ValueErrorformatr^append)r+startrowcol row_offset col_offsetr-r-r.add_whitespaces  zUntokenizer.add_whitespacec Cs4t|}g}d}x|D] }t|dkr8|j||P|\}}}} } |tkrV||_q|tkr`P|tkrv|j|qnl|tkr|j | \|_ |_ qnL|t t fkrd}n:|r|r|d} |dt| kr|jj| t| |_ d}|j||jj|| \|_ |_ |t t fkr|j d7_ d|_ qWdj|jS)NFrTrrrD)iterlencompatr ra ENDMARKERINDENTrfDEDENTpopr_r`NEWLINEr r^rlr8) r+iterableitindents startlinerQtok_typetokenrgendlineindentr-r-r.r sF            zUntokenizer.untokenizec Csg}|jj}|dttfk}d}xt|g|D]}|dd\}} |tkrR| |_q.|ttt t fkrj| d7} |t kr|r~d| } d}nd}|t kr|j| q.n>|t kr|jq.n*|ttfkrd}n|r|r||dd}|| q.WdS)NrFrrcTrrm)r^rfrur rr raNAMENUMBERASYNCAWAITSTRINGrrrsrt) r+r{rvrx toks_appendry prevstringtoktoknumtokvalr-r-r.rps8   zUntokenizer.compatN)r4r5r6rbrlr rpr-r-r-r.r]s %r]cCs*t}|j|}|jdk r&|j|j}|S)aTransform tokens back into Python source code. It returns a bytes object, encoded using the ENCODING token, which is the first token sequence output by tokenize. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor. Round-trip invariant for full input: Untokenized source will match input source exactly Round-trip invariant for limited input: # Output bytes will tokenize back to the input t1 = [tok[:2] for tok in tokenize(f.readline)] newcode = untokenize(t1) readline = BytesIO(newcode).readline t2 = [tok[:2] for tok in tokenize(readline)] assert t1 == t2 N)r]r raencode)rvutoutr-r-r.r =s    cCsH|ddjjdd}|dks*|jdr.dS|d ks@|jdrDdS|S)z(Imitates get_normal_name in tokenizer.c.N _rzutf-8zutf-8-latin-1 iso-8859-1 iso-latin-1latin-1- iso-8859-1- iso-latin-1-)rrr)rrr)lowerreplace startswith)orig_encencr-r-r._get_normal_nameXs rc sy jjWntk r$dYnXdd}d}fdd}fdd}|}|jtrpd|d d}d }|s||gfS||}|r||gfStj|s||gfS|}|s||gfS||}|r|||gfS|||gfS) a The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argument, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. NFzutf-8c s yStk rdSXdS)N) StopIterationr-)readliner-r. read_or_stop{sz%detect_encoding..read_or_stopcsy|jd}Wn4tk rBd}dk r6dj|}t|YnXtj|}|sVdSt|jd}y t|}Wn:t k rdkrd|}n dj|}t|YnXr|dkr؈dkrd}n dj}t||d 7}|S) Nzutf-8z'invalid or missing encoding declarationz {} for {!r}rzunknown encoding: zunknown encoding for {!r}: {}zencoding problem: utf-8z encoding problem for {!r}: utf-8z-sig) decodeUnicodeDecodeErrorre SyntaxError cookie_rematchrr:r LookupError)r} line_stringmsgrracodec) bom_foundfilenamer-r. find_cookies6     z$detect_encoding..find_cookieTrz utf-8-sig)__self__nameAttributeErrorrrblank_rer)rradefaultrrfirstsecondr-)rrrr.r cs8   &       c CsVt|d}y0t|j\}}|jdt||dd}d|_|S|jYnXdS)zXOpen a file in read only mode using the encoding detected by detect_encoding(). rbrT)line_bufferingr?N) _builtin_openr rseekrmodeclose)rbufferralinestextr-r-r.rs  rcCsBddlm}m}t|\}}t|d}|d}t||||j|S)a The tokenize() generator requires one argument, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as bytes. Alternatively, readline can be a callable function terminating with StopIteration: readline = open(myfile, 'rb').__next__ # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the logical line; continuation lines are included. The first token sequence will always be an ENCODING token which tells you which encoding was used to decode the bytes stream. r)rrepeatr) itertoolsrrr rn _tokenize__next__)rrrraconsumedrl_genemptyr-r-r.r s   c!cs"d}}}d}d!\}}d}dg} d} d} d} d} |dk rX|dkrFd}tt|d"d#dVd}d}xy|}|}Wntk rd}YnX|dk r|j|}|d7}dt|}}|r|std ||j|}|r|jd}}tt||d||||f||Vd$\}}d}nf|rn|d%dd krn|d&dd krntt ||||t|f|Vd}d}qdn||}||}qdn@|dkr| r|sPd}xf||kr||dkr|d7}n6||dkr|t dt }n||dkrd}nP|d7}qW||krP||dkr||dkr||dj d}|t|}tt |||f||t|f|Vtt ||d||f|t|f|Vqdtt t f||dk||d||f|t|f|Vqd|| d'kr | j|tt|d||df||f|Vxv|| d(kr|| kr8tdd|||f| dd)} | rd| | d*krdd} d} d} ttd||f||f|VqW| r| r| | d+krd} d} d} n|std|dfd}x||krzttj||}|rL|jd\}}||f||f|}}}||krq|||||}}||ksZ|dkrp|dkrp|dkrptt||||Vqv|dkr| r| Vd} |dkrtt ||||Vntt||||V| rJd} qv|dkr |jd st| r| Vd} tt ||||Vqv|tkr~tt|}|j||}|r`|jd}|||}tt||||f|Vn||f}||d}|}Pqv|tks|dd tks|dd tkr |d,dkr ||f}ttj|ptj|dptj|d }||dd}}|}Pntt||||Vqv|jr|d-kr^| r^t|dkrJtnt||||Vqtt ||||}|dkr| r|} q|dkr| r| j!t kr| j"dkrd} | d.} tt| j"| j#| j| j$Vd} | r| Vd} |VnX|dkrd}nH|dkr|d7}n|d kr(|d8}| r8| Vd} tt%||||Vn*tt ||||f||df|V|d7}qWqdW| r| Vd} |r|d/dkrttd|dt|f|dt|dfdVx0| ddD] } ttd|df|dfdVqWtt&d|df|dfdVdS)0Nr 0123456789rDFz utf-8-sigzutf-8rrzEOF in multi-line stringrz\ rz\ rc  z# #z z3unindent does not match any outer indentation levelz zEOF in multi-line statementr!z...T asyncawaitdef\z([{z)]})rDr)rr)rr)rDrrmrmrmrmrmrm)rrrmrm)'rr rrror[rr|r ERRORTOKENtabsizerstriprr rfrrIndentationErrorrsrW PseudoTokenspanrruendswithAssertionError triple_quotedendpats single_quotedget isidentifierrrrr(r1rgr}r0rq)!rralnumparenlev continuednumcharscontstrneedcontcontlinerxstashed async_defasync_def_indent async_def_nl last_liner}posmaxstrstartendprogendmatchr|column comment_tokennl_pos pseudomatchrgsposeposr{initialrr~r-r-r.rst      *      "                          . rcCs t|dS)N)r)rr-r-r.generate_tokenssrc s(ddl}dddfdd }|jdd}|jdd d d d |jd ddddd|j}y|jr|j}t|d}tt|j}WdQRXnd}t t j jd}xF|D]>}|j }|j r|j}d|j|j} td| t||jfqWWn8tk r:} z2| jddd\} } || jd|| | fWYdd} ~ Xntk r} z*| jd\} } || jd|| | fWYdd} ~ Xntk r} z|| |WYdd} ~ Xnxtk r} z|| WYdd} ~ XnNtk rtdYn2tk r"} zd| WYdd} ~ XnXdS)NrcSst|tjddS)N)file)printsysstderr)messager-r-r.perrorszmain..perrorcsR|r"|f||f}d|n"|r8d||fn d|tjddS)Nz%s:%d:%d: error: %sz %s: error: %sz error: %sr)rexit)rrlocationargs)rr-r.errors zmain..errorzpython -m tokenize)progrr<z filename.pyz'the file to tokenize; defaults to stdin)destnargsmetavarhelpz-ez--exactexact store_truez(display token names using the exact type)ractionrrzz %d,%d-%d,%d:z%-20s%-15s%-15rrrz interrupted zunexpected error: %s)NN)argparseArgumentParser add_argument parse_argsrrlistr rrrstdinr(rr3rgr|rr)r1rrr[rOSErrorKeyboardInterrupt Exception) rrparserrrrAr^r{ token_type token_rangeerrr}rr-)rr.mainsN    &&  r__main__)__doc__ __author__ __credits__builtinsrrcodecsrr collectionsiorrrrJrSrr{rTASCIIrr__all__N_TOKENSrr)r r LPARRPARLSQBRSQBCOLONCOMMASEMIPLUSMINUSSTARSLASHVBARAMPERLESSGREATEREQUALDOTPERCENTLBRACERBRACEEQEQUALNOTEQUAL LESSEQUAL GREATEREQUALTILDE CIRCUMFLEX LEFTSHIFT RIGHTSHIFT DOUBLESTAR PLUSEQUALMINEQUAL STAREQUAL SLASHEQUAL PERCENTEQUAL AMPEREQUAL VBAREQUALCIRCUMFLEXEQUALLEFTSHIFTEQUALRIGHTSHIFTEQUALDOUBLESTAREQUAL DOUBLESLASHDOUBLESLASHEQUALATATEQUALr2 namedtuplerr:r;r= WhitespaceCommentIgnoreName Hexnumber Binnumber Octnumber Decnumber IntnumberExponent PointfloatExpfloat Floatnumber ImagnumberNumberrRrW StringPrefixSingleDoubleSingle3Double3TripleStringOperatorBracketSpecialFunny PlainTokenTokenContStr PseudoExtrasrr_prefixrIrrrQr@rMrr r[r\r]r rr r rrrr4r-r-r-r.s                   _ ]x<