o
    iu                     @  s   d Z ddlmZ dZzddlZddlmZ ddlmZ dZ	W n e
y)   dZ	Y nw dd	lmZ g d
ZG dd deZG dd dZG dd dZdS )z@Lexical analysis of formal languages (i.e. code) using Pygments.    )annotationsreStructuredTextN)get_lexer_by_name)_get_ttype_classTF)ApplicationError)tokentext c                   @  s   e Zd ZdS )
LexerErrorN)__name__
__module____qualname__ r   r   b/var/www/html/karishye-ai-python/venv/lib/python3.10/site-packages/docutils/utils/code_analyzer.pyr
      s    r
   c                   @  s,   e Zd ZdZddddZdd Zd	d
 ZdS )Lexera  Parse `code` lines and yield "classified" tokens.

    Arguments

      code       -- string of source code to parse,
      language   -- formal language the code is written in,
      tokennames -- either 'long', 'short', or 'none' (see below).

    Merge subsequent tokens of the same token-type.

    Iterating over an instance yields the tokens as ``(tokentype, value)``
    tuples. The value of `tokennames` configures the naming of the tokentype:

      'long':  downcased full token type name,
      'short': short name defined by pygments.token.STANDARD_TYPES
               (= class argument used in pygments html output),
      'none':  skip lexical analysis.
    shortreturnNonec                 C  sj   || _ || _|| _d| _|dv s|dkrdS tstdz	t| j| _W dS  tjj	y4   td| w )zE
        Set up a lexical analyzer for `code` in `language`.
        N)r	   r   nonez0Cannot analyze code. Pygments package not found.z6Cannot analyze code. No Pygments lexer found for "%s".)
codelanguage
tokennameslexerwith_pygmentsr
   r   pygmentsutilClassNotFound)selfr   r   r   r   r   r   __init__4   s   zLexer.__init__c                 c  sj    t |}t|\}}|D ]\}}||u r||7 }q||fV  ||}}q|d}|r3||fV  dS dS )zrMerge subsequent tokens of same token-type.

           Also strip the final newline (added by pygments).
        
N)iternextremovesuffix)r   tokenslasttypelastvalttypevaluer   r   r   mergeO   s   


zLexer.mergec                 c  s    | j du rg | jfV  dS t| j| j }| |D ]$\}}| jdkr.t| d}nt	|g}dd |D }||fV  qdS )z7Parse self.code and yield "classified" tokens.
        Nlong.c                 S  s   g | ]}|t vr|qS r   )unstyled_tokens).0clsr   r   r   
<listcomp>l   s    z"Lexer.__iter__.<locals>.<listcomp>)
r   r   r   lexr(   r   strlowersplitr   )r   r#   	tokentyper'   classesr   r   r   __iter__`   s   


zLexer.__iter__N)r   r   r   )r   r   r   __doc__r   r(   r5   r   r   r   r   r       s
    r   c                   @  s"   e Zd ZdZd	ddZdd ZdS )
NumberLinesaq  Insert linenumber-tokens at the start of every code line.

    Arguments

       tokens    -- iterable of ``(classes, value)`` tuples
       startline -- first line number
       endline   -- last line number

    Iterating over an instance yields the tokens with a
    ``(['ln'], '<the line number>')`` token added for every code line.
    Multi-line tokens are split.r   r   c                 C  s&   || _ || _dtt| d| _d S )N%zd )r#   	startlinelenr0   fmt_str)r   r#   r:   endliner   r   r   r   }   s   zNumberLines.__init__c                 c  s    | j }dg| j| fV  | jD ]-\}}|d}|d d D ]}||d fV  |d7 }dg| j| fV  q||d fV  qd S )Nlnr      )r:   r<   r#   r2   )r   linenor&   r'   linesliner   r   r   r5      s   
zNumberLines.__iter__Nr6   )r   r   r   r7   r   r5   r   r   r   r   r8   p   s    
r8   )r7   
__future__r   __docformat__r   pygments.lexersr   pygments.formatters.htmlr   r   ImportErrordocutilsr   r+   r
   r   r8   r   r   r   r   <module>   s    P