JFIF   ( %!1!%)+...383,7(-.+  ++-+++++-++-++--+--+-+-------+-++-+--+---+++--+7+-+"F!1AQaq"2BRb#3Sr$CDsT&!Q1Aa"2Rbq ?򉄘ǷLR HR,nNb .&W)fJbMOYxj-\bT2(4CQ"qiC/ " %0Jl"e2V  0SDd2@TV^{cW&F͉x9#l,.XɳvRZ C8S 6ml!@!E! `FS!M #(d)Q lml1ml Ų&x(ʨ2NFmj@D<dN5UN˄uTB emLAy#` ` ` I!I 6āHBxL & J#7BQ.$hv h q+tC"EJ) 8R e2U2Y@j%6PF^4LnNBp"8)4JI-ֲvK ^؊)hz[T5˗",Rҥf8ڤS4ʘ!`D ` X+ L,(hl)*S##`6[`0*L T H*HA@I&&r1kr*r*)N$#L  1#ZFSl `[( ("((he`4 Ch [="A R / 0I`twCDcWh"i) cLad\BcLKHZ"ZEW$Ƚ@A~i^`S *A&h:+c Y6vϕGClRPs.`H`(@<$qDe pL@DpLX, E2MP A  `II m& AQ "AT rbg# g2!SiLj*3L \ G;TFL`K BMy 2S`YLh1 d >-"ZfD^Q DH" RAbEV#Lfq,(rETp64-IJ!*p4F$q;G8DQ/TKP2$jp3KW]FtLtƉ1ol]VBgػJH6 )h61GJR7Nj.Z4piJRDd]t]0dP]:N.b'⹙SvDSz]L,_#ugT&[~?cS^"{Bh{/=ۑxOk̳O59o dar793`)SeYM@\ "$E(Tm&)N2Ih)F5EDed(FS,Pa @!@#@lea HCD$11jCLJqcod S3yd*,lL+QEfsgW1nw)cT#dS HXkFJB"6(ʝH)H"#EZh:Y`khݳh%Sc<mlAko2]gDqQtro=3OƸU9_-t8UvW3sGəg*#:c)><"wc\ASmT|6Ę>9~#1Ƈ~ڒE1vVi# I MM#u$8W 5ǍfƬΜg*Qpi1ȩFOf۔S,/⎯(Lrմ`(Z LsbA \6 6dm[I=!r:REI.wgzG)ԇSbӑxuׇTyyL^e'x^ty4Z&eB]I|v59Jjhm;Ng񷫳n<ϞҼѝjk;׹DlY^ҍ\+x9V!j([cmS.NO6jxNζrm&oײizT$N>?~ Sl-:iڥk\at#E!CL`.O0a*w/WV7/r)DŽt7'Nĵ#7O1 ]{[/-2bA<$&Gm_4t)_>)mjG;V^'k59o>ɌM,ؾf9z6 4v_3T.5V/RD-5 %T5XTޫ4TaZ`U *ƱUƲ UG"5+sJJ2E9#܎kr2G3Bb,XM6H: ?@p!'\4V02aԙ) hbZ]:` ev3ʘ'}!ohȒ*TJjr[RFyQ*#{h{R]J]Lr-.D-.җfo$D ?X0%~1P.Og{cWϫ22&Ϭ_V.W3nmiOl}+!˫#`kR33aUb0-g:qmsέ+0HO|&nhOn+}n5QF_"gvLm/z'+r'n_oC语i|1}Gi|}_D~9JZ_%DVQp\koۅjAs~/c0ksUJi^W9W5!>?O:q|ˣSIB/&K<(lg(%Wg$|LW7vߤW߇q|jef3D H\S6(eJb*@&sTKTW/*@v:.N- @ITʓ1Zg&-eꓝM r]EMס{q$b]'7Z7N:O~lNlP7iͲk)$O^퉢<YSD*hr'Z#5e6t[Fdh AJǔP9P 1\R).Il+jI*,(ܢ22N*OwKFX gc?\mB7iA+εe8 "ġ/p5pW-$މ-[a 5ViAW/V{/&UsF./՞ҕ*)rZg.^_+gt_z-oAbqQn*WlHyZ*\TaEewlLR3ԹȭN}MM}aih"5ܕRT$:~'TcT|*)xGC>n+r{XU xuF"<~67у'fxlf`r3D*#Z1ђfH`2dIWo/qB| 63xxW6^m%Kvg>\>x>!H5Nr8J/FJ9Wx(Hou" S'kWاC\9ְ#^OaҮ+~gnkuЉ,aWU*1 읍jnb|e= :2.UL`Q}YS&gI.c=a`%j:C%2@^>])25/ܙ<lzwɛ)ݣS4h3=J tyϬ.E7 8ڞGZu\_JHsݢϑ}IZ"ӳ=X<Ɖ2{a:{7L+>V}c)*lo Yv&+|L;>+/Sj26K+澡*;>-s"}M2] Ig5aCL*r"&\} #^R.7_Mgf}.ߌy(}Z\gP&ʠHj%</{.]rߙQ`>;5g;u6dԛ %xb|oՋTJ5Ϥ(]XqP>f{Jk2,8'~ZU6tMQsg XKg^2ϓ3},[wo۴I|ܷ%[Ol\Pkr]Y//cg6U⧻/VПi8ys_n<\~cze!!H~x;QJZKȮ^ȧG|cS~8ji,Fo+,y~?pk)u /in3JmkX(Mj1N 4c Epc>BO *LfQO&` c;LjcYf 1ɻ)CLsY^Y5" lP/wuEln&dav,(;'W9ej ku`-KHI՟%ԁʁ 1\}?OjsF^Xn$Ё.օC>D:?I @aGE.ĩ1 $ et~T`߸Ir'RX.Zwc%~U=r>-UaFbǺ?R=Z?i'[ASS;siJrzy>nxu$[_B\4}:r'ҵj1_v-[;y?ֹ0I16 . M%4^!S&t ! h !zQð.bBT ?@]?CHq(rd!.$>/x+bnʎNN#w)` )*f!-ɂ\(طYLHzc`Uq7BfCcE0ԉ4Fم쏠ce5T r͸GVlФ?ѣ} mhrkly.Ts㷖)Mө S^%'g>wk%bP[}j~ǾV#K -Fgv켼ǨgɼeSz/6{M=BPZFu\Q75n3Iݤ.W9QfF{vJwF't[@iVj4G~KOnH߿_Do=.c.One?E+GfGN⧭H?4;u`ua|V-+j4?48n ɦ=-]puv&Jc}K>b%U x8pz6L8AXFsW]N55ҦbIWZQ7ï Ԗ3cjz匩ӺOTɖƴ%a'MI}cdR$ݚIζ̝ LIu>J3{^෠㜦˯xܿe\b"2y'x{ RDW b+o2KFhR0:U늞En>լRӉt Iڹ\ wշQEv"v;EJ)yl[5:F0=b4,\PqKtv4{bQz:>C7"8W#Zjdd| cjz%K %Z 9dD{=NFʳAƩtI)kS*s$`:A\ʬ*ֹ9{Nl|eJ١rQnM%z_#x_•TO><)kyD %GN<~y>vfǧB)F)c\lې(#\ h`fgfjTBdhhHL2Y0^ Y0^-"D!QaI15 m~ gՒd|;#gMn(P$l H.R2^PU")pN` N8󫅂OJ;^jz\uumJMF|ηq[]$Vrrt:Q^;QPkHՠ{]HwˆMuIr7!r&- j%"9LtUb56+^TWBqdhHAD7 HwKH^F3LIq #hK`]IWKiH?کǴeԥQ>g{^q^>HKoOB||8aݏS}{S_]ϸ/X~ܵw'OSPAf֩ܟ[>7 @[ֵ;G߇QU*Cթ *OKU^zz[fRnpcJX9u<iq8B]u8 ]I,;[G#2W.¸D8rPG Y%PBJ= wo;PJgx6;yB`3zZGPAͫy{5Nb_re*ONHR]Ji)U{Ӓ:qqɏ[mB4࢒I$ 2vpBADY`DIVAn"Bh$&&cMbdB 鮆wHR'E(ѸZA*H~{B M҅n\@N{7ISCp Vd( r+bg|ns:qg:|J|ɪV.UVaAS͓FyRuLѦT騬 `3􏳕{eo/Tz8DkW?,cl~TqLne֠[B*D +t 6˦S;5KjV3e WBrT.XSHm sl5F%NGM`Y )": J!W4]HTrPX2 QYɕ\m2VLd+`,^ѺiPztUGY6+cӧ6] U%u/ˈFOiB*nFF#ұJ Z/c')?Q͟5.8E~G6e<\?}GkhMFUظOqhEA - "`dQ#(4Ԧf VLmc@q5J8K; M^JZnn)9Zm\ qIJqS: i[9~Oaƒ]Z4F&+666( N]쁼LM(oyvUI/Χ[ھ]hTˉG".SeYgu;hRDtڬv=5 ׁqMS\Ȭi5D]1$*0UL1QY`QdLb[+z9";'yi`OT/4{@EZ'Y0>4I*d nM#5hі.vrM[]Ä;]\ʦS,叕DQZq0fӌI͋]TNK"#;?F;aURx_4WDm+F*0XJE@){ 1R-E2(@Qh l D rT.Q;[J;[`30`ɀ 2#=JeSsxRjG=`H rLJ@ Y$JaB2/x( "Id'6O0CI$:Ol+}I>[L|iK+]ZrH*2Aʶ uHRd)OrrbSx=5dmue1neܬ"e>Lw94勲u ҏ_4GuоJw]QtgSk(qW(6h|v= 1=P/\YZ|R>"*5W/ίR'o %R$5= .!VIRMf4*aR5nv% Usj:V Lj]Bn/TZ&.2„ܒBP)aYRʌW!#ErGf';tW$czI*\KI,c7Zc-ўj|p+-ђ{eg 2;R_{VLM]7sؒFmԻy853gҾqJG!E̤ӏqzs༿? U#R)ŧU(,>,&,-^e^۔.b EW^n<)\9.QeJuFiSh2"EL8yeCKQD\5R,D5.P]c1STt*ZFJ.T:N #%]M}khOe(͓iEMsɆ3( YF<"Ly^*[ry6.ɸm k݊iT%nM8 $Q#F# q 1*?% iS^4oܗ wWPS,aNޖxOxڽqp#F6&o,7LJuMΤK(Td{U Ƹf|q5U{3[FLNK6ӵQY5+'>Q3FSk).&:5z yZq/*q$d+Ge+$lO@Nڤy5eBvˌ䖥shS:JksgksF ꧸oi-FYxy9[Vȼĝ'_.[y2U*c?E+:TsWՀgOS> z75>ncߏ-Kz8ԋ,Ϧ70Z9_1h$Xiu10)0$+$! qsE4wRkh2*T.s%DH:`:=k.'WB{ ȮRGҷ7чVg)CHS}1ݍԳۂ<8g_4y*-Ml\]mZT)mJ~|k<6zWjf4'*u%RNRȉZA) .VLtp 4 V&mtJ#l˅;&{]8>TmhoLXOeD^_J>]jsSej﫦iOM SK([!Vc5zn-A@p]Ӄ \3kmK>#-sܧ?NLar@Js?…Xldny]݌E5•9.8hh69#7js׳R,'pqt:kgPhRԄ+ՕG9}="ֲ\kǁm R73pg$t3+o |o\]'ee5ɐ.7ѐ|ZعSF{qkx5-$Q h5*1yM$ 7)hJ2Kg`-hn*>)EYDIkBpȩAzfǪ>7O K#lߤg]:u~huُ۵u}(mjGIj܏6ES~/5CiRy|kVKGBޭ3;w /jꏈUu>iƪi:WRo'yr4C/?c:w!?\'?#Q:>u/?uEeuG*xY2)?־CAr*23_ץ}գk1%(_ _6aԗ _4 $ϗ+ϫɆzǾIgu?Y<#_xS>i\uɇ۽r}[ͫyRoWCC!H,iD։"Cj5 4] cTk2YZRBvRY~FqQt^RO-g"QP]Ih/t:ljs YӹqI] wqXp KV+8j} uu8PGP&zF:;8+ Sx9(. Q}:ƻWr,Ũ*'shfƧ-6__5,DH{* qp묘G MA}QRe{dyMucǨɾ7߈Avϩe͜jmUi p3\5,ާbf:o+7#ܾ~iU#up=}˄k{NV8m!ҌiptޜBvKi}!ש3UK)`igӞVMR'J[ky~g&6vǍ7ķ>uXd(3瓓[]QTTqnͮz1~_͓k俸0~Z1գ =18cL 5^lf^k^<ҲJɬcC-[^;J8j_q=WpeA_6 4.Ntc>Sv2Jf;G8. 5[,;ArSTˬmpmzjGe EoǩOgDWaGhz<|kT\$Q=u/ci˜S mN&Ok~'0,a} s + NC-G'(*>vw~&*wYG Ŷ K-L/$߮l/A/^:Z@X- Q-D2`@M2+w$Q"胊"47&+Dh'9Y* L7VhT+ -?K]Ik \Ϣgy) s v z)Z ˦2&ލ OjmG9@8F_u䊜r>3K%Yg-FFI]e+Kxkzװy"\Q4Ri'0+P=V&Sw3N/U|UEt*uS c M*tsBE 2ʃ@Kir(˫LRr璜Zy@].%NbXvz덟 hӰNMe#|g͒po9^licxB[e' {U? mlt%?霋ǒxZc X]ϗ15SeE{-Ӕi~DƯO|ë5a@G=%<ƧAs*+tzo, IpȔ|:X6J3Z5JXd]2 3%v*GvE@(S&SX7D0^{5t Z{ﮄsh- ]ɑqEV=^Ki9äBtI@&pEg*O<`F-}ǎ51H,<~qibQѓɳx#l$G9td1U+Sq%B[jOq+^ޏ7K >YY  $KK{*˝e"|$g"6v,,9.DaA,qэI~ܨ|kdv; hz2]x5{M5M~yלqTzUl9Mӏ.WVnkun !jzKO!v|& ;gۇ2BrI閵C tqHe[Zkގ=Q;OԶiᵞBcIU eN cOGz S__>.hNgG6).J$_Taѯ5^LqeB]O?A]H;ò{^0ٺuޚxB|:q'xu4"9Ο7k^eZ_fQOmzm̗{c3ٵKO|m*ek(8"yO(ٵ{LJb2Ǩkgg1_/qrDՆ[_l\ I~Bsc/x ),,̿@PFޞ>O)<<=5m=^x6}~6qoYGޣiY{uN+<,CǚwVxe~c!,5R4u/9In=G•^PF6ɼM򿶤$"\|78ؖYU cXFOKc4s-=6O<;.ϴ޶$q>e? qY}StirX?e/&R'ʑ[ѯMi{?8\g^>\!-VZCf.ȾzRWMh_{^H)mz}V%չM.EJUz7z>ZW6\BW~:W3!S_4~m ǚ! ;VeGKFڵ858Buj:ZZ(/H׭eav!$gpLV)țAJO~YBꤞ厅XJdjg{hR9~_f '5U+}W5%ZjzgTtozYD @%JK\qymeЪKIIp"xoz\B1$G)8Ԅ Jeyc".yyVBR-%BEA-k^Luj cYwԄ%X!e-4ZRḡlJvYsB԰˗0?RM\TlaߏVu4BmY!UyYylgd!m2$i=[hN,6)_~7͖CDF2zÕ{?l;Hܲk׋!/XAłrCXEI{]P[e! ?%Ktqܱ5! jַĞ*TvAG)fuxTҖV7~ 4=r! ob%jTwU$Bnqed䤿@0P&V]HJ)^YrޯĿbsY8=1! n}UD*7uƫi~!s[W{V9J;~Ӯ|[3s۷dڔIj?qJ'O,IkE]G(5\ۖ7)-g,ŶǗ=~e>k쐁%(g˦o[fxN_baGBm:܆VGЗ,G_D!/og,ҢVܤ_iS_~@ SkidSec Webshell

SkidSec WebShell

Server Address : 172.31.38.4

Web Server : Apache/2.4.58 (Ubuntu)

Uname : Linux ip-172-31-38-4 6.14.0-1017-aws #17~24.04.1-Ubuntu SMP Wed Nov 5 10:48:17 UTC 2025 x86_64

PHP Version : 7.4.33



Current Path : /lib/python3/dist-packages/pygments/lexers/



Current File : //lib/python3/dist-packages/pygments/lexers/robotframework.py
"""
    pygments.lexers.robotframework
    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    Lexer for Robot Framework.

    :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
    :license: BSD, see LICENSE for details.
"""

#  Copyright 2012 Nokia Siemens Networks Oyj
#
#  Licensed under the Apache License, Version 2.0 (the "License");
#  you may not use this file except in compliance with the License.
#  You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.

import re

from pygments.lexer import Lexer
from pygments.token import Token

__all__ = ['RobotFrameworkLexer']


HEADING = Token.Generic.Heading
SETTING = Token.Keyword.Namespace
IMPORT = Token.Name.Namespace
TC_KW_NAME = Token.Generic.Subheading
KEYWORD = Token.Name.Function
ARGUMENT = Token.String
VARIABLE = Token.Name.Variable
COMMENT = Token.Comment
SEPARATOR = Token.Punctuation
SYNTAX = Token.Punctuation
GHERKIN = Token.Generic.Emph
ERROR = Token.Error


def normalize(string, remove=''):
    string = string.lower()
    for char in remove + ' ':
        if char in string:
            string = string.replace(char, '')
    return string


class RobotFrameworkLexer(Lexer):
    """
    For Robot Framework test data.

    Supports both space and pipe separated plain text formats.

    .. versionadded:: 1.6
    """
    name = 'RobotFramework'
    url = 'http://robotframework.org'
    aliases = ['robotframework']
    filenames = ['*.robot', '*.resource']
    mimetypes = ['text/x-robotframework']

    def __init__(self, **options):
        options['tabsize'] = 2
        options['encoding'] = 'UTF-8'
        Lexer.__init__(self, **options)

    def get_tokens_unprocessed(self, text):
        row_tokenizer = RowTokenizer()
        var_tokenizer = VariableTokenizer()
        index = 0
        for row in text.splitlines():
            for value, token in row_tokenizer.tokenize(row):
                for value, token in var_tokenizer.tokenize(value, token):
                    if value:
                        yield index, token, str(value)
                        index += len(value)


class VariableTokenizer:

    def tokenize(self, string, token):
        var = VariableSplitter(string, identifiers='$@%&')
        if var.start < 0 or token in (COMMENT, ERROR):
            yield string, token
            return
        for value, token in self._tokenize(var, string, token):
            if value:
                yield value, token

    def _tokenize(self, var, string, orig_token):
        before = string[:var.start]
        yield before, orig_token
        yield var.identifier + '{', SYNTAX
        yield from self.tokenize(var.base, VARIABLE)
        yield '}', SYNTAX
        if var.index is not None:
            yield '[', SYNTAX
            yield from self.tokenize(var.index, VARIABLE)
            yield ']', SYNTAX
        yield from self.tokenize(string[var.end:], orig_token)


class RowTokenizer:

    def __init__(self):
        self._table = UnknownTable()
        self._splitter = RowSplitter()
        testcases = TestCaseTable()
        settings = SettingTable(testcases.set_default_template)
        variables = VariableTable()
        keywords = KeywordTable()
        self._tables = {'settings': settings, 'setting': settings,
                        'metadata': settings,
                        'variables': variables, 'variable': variables,
                        'testcases': testcases, 'testcase': testcases,
                        'tasks': testcases, 'task': testcases,
                        'keywords': keywords, 'keyword': keywords,
                        'userkeywords': keywords, 'userkeyword': keywords}

    def tokenize(self, row):
        commented = False
        heading = False
        for index, value in enumerate(self._splitter.split(row)):
            # First value, and every second after that, is a separator.
            index, separator = divmod(index-1, 2)
            if value.startswith('#'):
                commented = True
            elif index == 0 and value.startswith('*'):
                self._table = self._start_table(value)
                heading = True
            yield from self._tokenize(value, index, commented,
                                      separator, heading)
        self._table.end_row()

    def _start_table(self, header):
        name = normalize(header, remove='*')
        return self._tables.get(name, UnknownTable())

    def _tokenize(self, value, index, commented, separator, heading):
        if commented:
            yield value, COMMENT
        elif separator:
            yield value, SEPARATOR
        elif heading:
            yield value, HEADING
        else:
            yield from self._table.tokenize(value, index)


class RowSplitter:
    _space_splitter = re.compile('( {2,})')
    _pipe_splitter = re.compile(r'((?:^| +)\|(?: +|$))')

    def split(self, row):
        splitter = (row.startswith('| ') and self._split_from_pipes
                    or self._split_from_spaces)
        yield from splitter(row)
        yield '\n'

    def _split_from_spaces(self, row):
        yield ''  # Start with (pseudo)separator similarly as with pipes
        yield from self._space_splitter.split(row)

    def _split_from_pipes(self, row):
        _, separator, rest = self._pipe_splitter.split(row, 1)
        yield separator
        while self._pipe_splitter.search(rest):
            cell, separator, rest = self._pipe_splitter.split(rest, 1)
            yield cell
            yield separator
        yield rest


class Tokenizer:
    _tokens = None

    def __init__(self):
        self._index = 0

    def tokenize(self, value):
        values_and_tokens = self._tokenize(value, self._index)
        self._index += 1
        if isinstance(values_and_tokens, type(Token)):
            values_and_tokens = [(value, values_and_tokens)]
        return values_and_tokens

    def _tokenize(self, value, index):
        index = min(index, len(self._tokens) - 1)
        return self._tokens[index]

    def _is_assign(self, value):
        if value.endswith('='):
            value = value[:-1].strip()
        var = VariableSplitter(value, identifiers='$@&')
        return var.start == 0 and var.end == len(value)


class Comment(Tokenizer):
    _tokens = (COMMENT,)


class Setting(Tokenizer):
    _tokens = (SETTING, ARGUMENT)
    _keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown',
                         'suitepostcondition', 'testsetup', 'tasksetup', 'testprecondition',
                         'testteardown','taskteardown', 'testpostcondition', 'testtemplate', 'tasktemplate')
    _import_settings = ('library', 'resource', 'variables')
    _other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags',
                       'testtimeout','tasktimeout')
    _custom_tokenizer = None

    def __init__(self, template_setter=None):
        Tokenizer.__init__(self)
        self._template_setter = template_setter

    def _tokenize(self, value, index):
        if index == 1 and self._template_setter:
            self._template_setter(value)
        if index == 0:
            normalized = normalize(value)
            if normalized in self._keyword_settings:
                self._custom_tokenizer = KeywordCall(support_assign=False)
            elif normalized in self._import_settings:
                self._custom_tokenizer = ImportSetting()
            elif normalized not in self._other_settings:
                return ERROR
        elif self._custom_tokenizer:
            return self._custom_tokenizer.tokenize(value)
        return Tokenizer._tokenize(self, value, index)


class ImportSetting(Tokenizer):
    _tokens = (IMPORT, ARGUMENT)


class TestCaseSetting(Setting):
    _keyword_settings = ('setup', 'precondition', 'teardown', 'postcondition',
                         'template')
    _import_settings = ()
    _other_settings = ('documentation', 'tags', 'timeout')

    def _tokenize(self, value, index):
        if index == 0:
            type = Setting._tokenize(self, value[1:-1], index)
            return [('[', SYNTAX), (value[1:-1], type), (']', SYNTAX)]
        return Setting._tokenize(self, value, index)


class KeywordSetting(TestCaseSetting):
    _keyword_settings = ('teardown',)
    _other_settings = ('documentation', 'arguments', 'return', 'timeout', 'tags')


class Variable(Tokenizer):
    _tokens = (SYNTAX, ARGUMENT)

    def _tokenize(self, value, index):
        if index == 0 and not self._is_assign(value):
            return ERROR
        return Tokenizer._tokenize(self, value, index)


class KeywordCall(Tokenizer):
    _tokens = (KEYWORD, ARGUMENT)

    def __init__(self, support_assign=True):
        Tokenizer.__init__(self)
        self._keyword_found = not support_assign
        self._assigns = 0

    def _tokenize(self, value, index):
        if not self._keyword_found and self._is_assign(value):
            self._assigns += 1
            return SYNTAX  # VariableTokenizer tokenizes this later.
        if self._keyword_found:
            return Tokenizer._tokenize(self, value, index - self._assigns)
        self._keyword_found = True
        return GherkinTokenizer().tokenize(value, KEYWORD)


class GherkinTokenizer:
    _gherkin_prefix = re.compile('^(Given|When|Then|And|But) ', re.IGNORECASE)

    def tokenize(self, value, token):
        match = self._gherkin_prefix.match(value)
        if not match:
            return [(value, token)]
        end = match.end()
        return [(value[:end], GHERKIN), (value[end:], token)]


class TemplatedKeywordCall(Tokenizer):
    _tokens = (ARGUMENT,)


class ForLoop(Tokenizer):

    def __init__(self):
        Tokenizer.__init__(self)
        self._in_arguments = False

    def _tokenize(self, value, index):
        token = self._in_arguments and ARGUMENT or SYNTAX
        if value.upper() in ('IN', 'IN RANGE'):
            self._in_arguments = True
        return token


class _Table:
    _tokenizer_class = None

    def __init__(self, prev_tokenizer=None):
        self._tokenizer = self._tokenizer_class()
        self._prev_tokenizer = prev_tokenizer
        self._prev_values_on_row = []

    def tokenize(self, value, index):
        if self._continues(value, index):
            self._tokenizer = self._prev_tokenizer
            yield value, SYNTAX
        else:
            yield from self._tokenize(value, index)
        self._prev_values_on_row.append(value)

    def _continues(self, value, index):
        return value == '...' and all(self._is_empty(t)
                                      for t in self._prev_values_on_row)

    def _is_empty(self, value):
        return value in ('', '\\')

    def _tokenize(self, value, index):
        return self._tokenizer.tokenize(value)

    def end_row(self):
        self.__init__(prev_tokenizer=self._tokenizer)


class UnknownTable(_Table):
    _tokenizer_class = Comment

    def _continues(self, value, index):
        return False


class VariableTable(_Table):
    _tokenizer_class = Variable


class SettingTable(_Table):
    _tokenizer_class = Setting

    def __init__(self, template_setter, prev_tokenizer=None):
        _Table.__init__(self, prev_tokenizer)
        self._template_setter = template_setter

    def _tokenize(self, value, index):
        if index == 0 and normalize(value) == 'testtemplate':
            self._tokenizer = Setting(self._template_setter)
        return _Table._tokenize(self, value, index)

    def end_row(self):
        self.__init__(self._template_setter, prev_tokenizer=self._tokenizer)


class TestCaseTable(_Table):
    _setting_class = TestCaseSetting
    _test_template = None
    _default_template = None

    @property
    def _tokenizer_class(self):
        if self._test_template or (self._default_template and
                                   self._test_template is not False):
            return TemplatedKeywordCall
        return KeywordCall

    def _continues(self, value, index):
        return index > 0 and _Table._continues(self, value, index)

    def _tokenize(self, value, index):
        if index == 0:
            if value:
                self._test_template = None
            return GherkinTokenizer().tokenize(value, TC_KW_NAME)
        if index == 1 and self._is_setting(value):
            if self._is_template(value):
                self._test_template = False
                self._tokenizer = self._setting_class(self.set_test_template)
            else:
                self._tokenizer = self._setting_class()
        if index == 1 and self._is_for_loop(value):
            self._tokenizer = ForLoop()
        if index == 1 and self._is_empty(value):
            return [(value, SYNTAX)]
        return _Table._tokenize(self, value, index)

    def _is_setting(self, value):
        return value.startswith('[') and value.endswith(']')

    def _is_template(self, value):
        return normalize(value) == '[template]'

    def _is_for_loop(self, value):
        return value.startswith(':') and normalize(value, remove=':') == 'for'

    def set_test_template(self, template):
        self._test_template = self._is_template_set(template)

    def set_default_template(self, template):
        self._default_template = self._is_template_set(template)

    def _is_template_set(self, template):
        return normalize(template) not in ('', '\\', 'none', '${empty}')


class KeywordTable(TestCaseTable):
    _tokenizer_class = KeywordCall
    _setting_class = KeywordSetting

    def _is_template(self, value):
        return False


# Following code copied directly from Robot Framework 2.7.5.

class VariableSplitter:

    def __init__(self, string, identifiers):
        self.identifier = None
        self.base = None
        self.index = None
        self.start = -1
        self.end = -1
        self._identifiers = identifiers
        self._may_have_internal_variables = False
        try:
            self._split(string)
        except ValueError:
            pass
        else:
            self._finalize()

    def get_replaced_base(self, variables):
        if self._may_have_internal_variables:
            return variables.replace_string(self.base)
        return self.base

    def _finalize(self):
        self.identifier = self._variable_chars[0]
        self.base = ''.join(self._variable_chars[2:-1])
        self.end = self.start + len(self._variable_chars)
        if self._has_list_or_dict_variable_index():
            self.index = ''.join(self._list_and_dict_variable_index_chars[1:-1])
            self.end += len(self._list_and_dict_variable_index_chars)

    def _has_list_or_dict_variable_index(self):
        return self._list_and_dict_variable_index_chars\
        and self._list_and_dict_variable_index_chars[-1] == ']'

    def _split(self, string):
        start_index, max_index = self._find_variable(string)
        self.start = start_index
        self._open_curly = 1
        self._state = self._variable_state
        self._variable_chars = [string[start_index], '{']
        self._list_and_dict_variable_index_chars = []
        self._string = string
        start_index += 2
        for index, char in enumerate(string[start_index:]):
            index += start_index  # Giving start to enumerate only in Py 2.6+
            try:
                self._state(char, index)
            except StopIteration:
                return
            if index  == max_index and not self._scanning_list_variable_index():
                return

    def _scanning_list_variable_index(self):
        return self._state in [self._waiting_list_variable_index_state,
                               self._list_variable_index_state]

    def _find_variable(self, string):
        max_end_index = string.rfind('}')
        if max_end_index == -1:
            raise ValueError('No variable end found')
        if self._is_escaped(string, max_end_index):
            return self._find_variable(string[:max_end_index])
        start_index = self._find_start_index(string, 1, max_end_index)
        if start_index == -1:
            raise ValueError('No variable start found')
        return start_index, max_end_index

    def _find_start_index(self, string, start, end):
        index = string.find('{', start, end) - 1
        if index < 0:
            return -1
        if self._start_index_is_ok(string, index):
            return index
        return self._find_start_index(string, index+2, end)

    def _start_index_is_ok(self, string, index):
        return string[index] in self._identifiers\
        and not self._is_escaped(string, index)

    def _is_escaped(self, string, index):
        escaped = False
        while index > 0 and string[index-1] == '\\':
            index -= 1
            escaped = not escaped
        return escaped

    def _variable_state(self, char, index):
        self._variable_chars.append(char)
        if char == '}' and not self._is_escaped(self._string, index):
            self._open_curly -= 1
            if self._open_curly == 0:
                if not self._is_list_or_dict_variable():
                    raise StopIteration
                self._state = self._waiting_list_variable_index_state
        elif char in self._identifiers:
            self._state = self._internal_variable_start_state

    def _is_list_or_dict_variable(self):
        return self._variable_chars[0] in ('@','&')

    def _internal_variable_start_state(self, char, index):
        self._state = self._variable_state
        if char == '{':
            self._variable_chars.append(char)
            self._open_curly += 1
            self._may_have_internal_variables = True
        else:
            self._variable_state(char, index)

    def _waiting_list_variable_index_state(self, char, index):
        if char != '[':
            raise StopIteration
        self._list_and_dict_variable_index_chars.append(char)
        self._state = self._list_variable_index_state

    def _list_variable_index_state(self, char, index):
        self._list_and_dict_variable_index_chars.append(char)
        if char == ']':
            raise StopIteration