Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350

351

352

353

354

355

356

357

358

359

360

361

362

363

364

365

366

367

368

369

370

371

372

373

374

375

376

377

378

379

380

381

382

383

384

385

386

387

388

389

390

391

392

393

394

395

396

397

398

399

400

401

402

403

404

405

406

407

408

409

410

411

412

413

414

415

416

417

418

419

420

421

422

423

424

425

426

427

428

429

430

431

432

433

434

435

436

437

438

439

440

441

442

443

444

445

446

447

448

449

450

451

452

453

454

455

456

457

458

459

460

461

462

463

464

465

466

467

468

469

470

471

472

473

474

475

476

477

478

479

480

481

482

483

484

485

486

487

488

489

490

491

492

493

494

495

496

497

498

499

500

501

502

503

504

505

506

507

508

509

510

511

512

513

514

515

516

517

518

519

520

521

522

523

524

525

526

527

528

529

530

531

532

533

534

535

536

537

538

539

540

541

542

543

544

545

546

547

548

549

550

551

552

553

554

555

556

557

558

559

560

561

562

563

564

565

566

567

568

569

570

571

572

573

574

575

576

577

578

579

580

581

582

583

584

585

586

587

588

589

590

591

592

593

594

595

596

597

598

599

600

601

602

603

604

605

606

607

608

609

610

611

612

613

614

615

616

617

618

619

620

621

622

623

624

625

626

627

628

629

630

631

632

633

634

635

636

637

638

639

640

641

642

643

644

645

646

647

648

649

650

651

652

653

654

655

656

657

658

659

660

661

662

663

664

665

666

667

668

669

670

671

672

673

674

675

676

677

678

679

680

681

682

683

684

685

686

687

688

689

690

691

692

693

694

695

696

697

698

699

700

701

702

703

704

705

706

707

708

709

710

711

712

713

714

715

716

717

718

719

720

721

722

723

724

725

726

727

728

729

730

731

732

733

734

735

736

737

738

739

740

741

742

743

744

745

746

747

748

749

750

751

752

753

754

755

756

757

758

759

760

761

762

763

764

765

766

767

768

769

770

771

772

773

774

775

776

777

778

779

780

781

782

783

784

785

786

787

788

789

790

791

792

793

794

795

796

797

798

799

800

801

802

803

804

805

806

807

808

809

810

811

812

813

814

815

816

817

818

819

820

821

822

823

824

825

826

827

828

829

830

831

832

833

834

835

836

837

838

839

840

841

842

843

844

845

846

847

848

849

850

851

852

853

854

855

856

857

858

859

860

861

862

863

864

865

866

867

868

869

870

871

872

873

874

875

876

877

878

879

880

881

882

883

884

885

886

887

888

889

890

891

892

893

894

895

896

897

898

899

900

901

902

903

904

905

906

907

908

909

910

911

912

913

914

915

916

917

918

919

920

921

922

923

924

925

926

927

928

929

930

931

932

933

934

935

936

937

938

939

940

941

942

943

944

945

946

947

948

949

950

951

952

953

954

955

956

957

958

959

960

961

962

963

964

965

966

967

968

969

970

971

972

973

974

975

976

977

978

979

980

981

982

983

984

985

986

987

988

989

990

991

992

993

994

995

996

997

998

999

1000

1001

1002

1003

1004

1005

1006

1007

1008

1009

1010

1011

1012

1013

1014

1015

1016

1017

1018

1019

1020

1021

1022

1023

1024

1025

1026

1027

1028

1029

1030

1031

1032

1033

1034

1035

1036

1037

1038

1039

1040

1041

1042

1043

1044

1045

1046

1047

1048

1049

1050

1051

1052

1053

1054

1055

1056

1057

1058

1059

1060

1061

1062

1063

1064

1065

1066

1067

1068

1069

1070

1071

1072

1073

1074

1075

1076

1077

1078

1079

1080

1081

1082

1083

1084

1085

1086

1087

1088

1089

1090

1091

1092

1093

1094

1095

1096

1097

1098

# ----------------------------------------------------------------------------- 

# ply: lex.py 

# 

# Copyright (C) 2001-2018 

# David M. Beazley (Dabeaz LLC) 

# All rights reserved. 

# 

# Redistribution and use in source and binary forms, with or without 

# modification, are permitted provided that the following conditions are 

# met: 

# 

# * Redistributions of source code must retain the above copyright notice, 

# this list of conditions and the following disclaimer. 

# * Redistributions in binary form must reproduce the above copyright notice, 

# this list of conditions and the following disclaimer in the documentation 

# and/or other materials provided with the distribution. 

# * Neither the name of the David Beazley or Dabeaz LLC may be used to 

# endorse or promote products derived from this software without 

# specific prior written permission. 

# 

# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 

# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 

# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 

# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 

# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 

# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 

# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 

# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 

# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 

# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 

# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 

# ----------------------------------------------------------------------------- 

 

__version__ = '3.11' 

__tabversion__ = '3.10' 

 

import re 

import sys 

import types 

import copy 

import os 

import inspect 

 

# This tuple contains known string types 

try: 

# Python 2.6 

StringTypes = (types.StringType, types.UnicodeType) 

except AttributeError: 

# Python 3.0 

StringTypes = (str, bytes) 

 

# This regular expression is used to match valid token names 

_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$') 

 

# Exception thrown when invalid token encountered and no default error 

# handler is defined. 

class LexError(Exception): 

def __init__(self, message, s): 

self.args = (message,) 

self.text = s 

 

 

# Token class. This class is used to represent the tokens produced. 

class LexToken(object): 

def __str__(self): 

return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos) 

 

def __repr__(self): 

return str(self) 

 

 

# This object is a stand-in for a logging object created by the 

# logging module. 

 

class PlyLogger(object): 

def __init__(self, f): 

self.f = f 

 

def critical(self, msg, *args, **kwargs): 

self.f.write((msg % args) + '\n') 

 

def warning(self, msg, *args, **kwargs): 

self.f.write('WARNING: ' + (msg % args) + '\n') 

 

def error(self, msg, *args, **kwargs): 

self.f.write('ERROR: ' + (msg % args) + '\n') 

 

info = critical 

debug = critical 

 

 

# Null logger is used when no output is generated. Does nothing. 

class NullLogger(object): 

def __getattribute__(self, name): 

return self 

 

def __call__(self, *args, **kwargs): 

return self 

 

 

# ----------------------------------------------------------------------------- 

# === Lexing Engine === 

# 

# The following Lexer class implements the lexer runtime. There are only 

# a few public methods and attributes: 

# 

# input() - Store a new string in the lexer 

# token() - Get the next token 

# clone() - Clone the lexer 

# 

# lineno - Current line number 

# lexpos - Current position in the input string 

# ----------------------------------------------------------------------------- 

 

class Lexer: 

def __init__(self): 

self.lexre = None # Master regular expression. This is a list of 

# tuples (re, findex) where re is a compiled 

# regular expression and findex is a list 

# mapping regex group numbers to rules 

self.lexretext = None # Current regular expression strings 

self.lexstatere = {} # Dictionary mapping lexer states to master regexs 

self.lexstateretext = {} # Dictionary mapping lexer states to regex strings 

self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names 

self.lexstate = 'INITIAL' # Current lexer state 

self.lexstatestack = [] # Stack of lexer states 

self.lexstateinfo = None # State information 

self.lexstateignore = {} # Dictionary of ignored characters for each state 

self.lexstateerrorf = {} # Dictionary of error functions for each state 

self.lexstateeoff = {} # Dictionary of eof functions for each state 

self.lexreflags = 0 # Optional re compile flags 

self.lexdata = None # Actual input data (as a string) 

self.lexpos = 0 # Current position in input text 

self.lexlen = 0 # Length of the input text 

self.lexerrorf = None # Error rule (if any) 

self.lexeoff = None # EOF rule (if any) 

self.lextokens = None # List of valid tokens 

self.lexignore = '' # Ignored characters 

self.lexliterals = '' # Literal characters that can be passed through 

self.lexmodule = None # Module 

self.lineno = 1 # Current line number 

self.lexoptimize = False # Optimized mode 

 

def clone(self, object=None): 

c = copy.copy(self) 

 

# If the object parameter has been supplied, it means we are attaching the 

# lexer to a new object. In this case, we have to rebind all methods in 

# the lexstatere and lexstateerrorf tables. 

 

if object: 

newtab = {} 

for key, ritem in self.lexstatere.items(): 

newre = [] 

for cre, findex in ritem: 

newfindex = [] 

for f in findex: 

if not f or not f[0]: 

newfindex.append(f) 

continue 

newfindex.append((getattr(object, f[0].__name__), f[1])) 

newre.append((cre, newfindex)) 

newtab[key] = newre 

c.lexstatere = newtab 

c.lexstateerrorf = {} 

for key, ef in self.lexstateerrorf.items(): 

c.lexstateerrorf[key] = getattr(object, ef.__name__) 

c.lexmodule = object 

return c 

 

# ------------------------------------------------------------ 

# writetab() - Write lexer information to a table file 

# ------------------------------------------------------------ 

def writetab(self, lextab, outputdir=''): 

if isinstance(lextab, types.ModuleType): 

raise IOError("Won't overwrite existing lextab module") 

basetabmodule = lextab.split('.')[-1] 

filename = os.path.join(outputdir, basetabmodule) + '.py' 

with open(filename, 'w') as tf: 

tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__)) 

tf.write('_tabversion = %s\n' % repr(__tabversion__)) 

tf.write('_lextokens = set(%s)\n' % repr(tuple(sorted(self.lextokens)))) 

tf.write('_lexreflags = %s\n' % repr(int(self.lexreflags))) 

tf.write('_lexliterals = %s\n' % repr(self.lexliterals)) 

tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo)) 

 

# Rewrite the lexstatere table, replacing function objects with function names 

tabre = {} 

for statename, lre in self.lexstatere.items(): 

titem = [] 

for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]): 

titem.append((retext, _funcs_to_names(func, renames))) 

tabre[statename] = titem 

 

tf.write('_lexstatere = %s\n' % repr(tabre)) 

tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore)) 

 

taberr = {} 

for statename, ef in self.lexstateerrorf.items(): 

taberr[statename] = ef.__name__ if ef else None 

tf.write('_lexstateerrorf = %s\n' % repr(taberr)) 

 

tabeof = {} 

for statename, ef in self.lexstateeoff.items(): 

tabeof[statename] = ef.__name__ if ef else None 

tf.write('_lexstateeoff = %s\n' % repr(tabeof)) 

 

# ------------------------------------------------------------ 

# readtab() - Read lexer information from a tab file 

# ------------------------------------------------------------ 

def readtab(self, tabfile, fdict): 

if isinstance(tabfile, types.ModuleType): 

lextab = tabfile 

else: 

exec('import %s' % tabfile) 

lextab = sys.modules[tabfile] 

 

if getattr(lextab, '_tabversion', '0.0') != __tabversion__: 

raise ImportError('Inconsistent PLY version') 

 

self.lextokens = lextab._lextokens 

self.lexreflags = lextab._lexreflags 

self.lexliterals = lextab._lexliterals 

self.lextokens_all = self.lextokens | set(self.lexliterals) 

self.lexstateinfo = lextab._lexstateinfo 

self.lexstateignore = lextab._lexstateignore 

self.lexstatere = {} 

self.lexstateretext = {} 

for statename, lre in lextab._lexstatere.items(): 

titem = [] 

txtitem = [] 

for pat, func_name in lre: 

titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict))) 

 

self.lexstatere[statename] = titem 

self.lexstateretext[statename] = txtitem 

 

self.lexstateerrorf = {} 

for statename, ef in lextab._lexstateerrorf.items(): 

self.lexstateerrorf[statename] = fdict[ef] 

 

self.lexstateeoff = {} 

for statename, ef in lextab._lexstateeoff.items(): 

self.lexstateeoff[statename] = fdict[ef] 

 

self.begin('INITIAL') 

 

# ------------------------------------------------------------ 

# input() - Push a new string into the lexer 

# ------------------------------------------------------------ 

def input(self, s): 

# Pull off the first character to see if s looks like a string 

c = s[:1] 

if not isinstance(c, StringTypes): 

raise ValueError('Expected a string') 

self.lexdata = s 

self.lexpos = 0 

self.lexlen = len(s) 

 

# ------------------------------------------------------------ 

# begin() - Changes the lexing state 

# ------------------------------------------------------------ 

def begin(self, state): 

if state not in self.lexstatere: 

raise ValueError('Undefined state') 

self.lexre = self.lexstatere[state] 

self.lexretext = self.lexstateretext[state] 

self.lexignore = self.lexstateignore.get(state, '') 

self.lexerrorf = self.lexstateerrorf.get(state, None) 

self.lexeoff = self.lexstateeoff.get(state, None) 

self.lexstate = state 

 

# ------------------------------------------------------------ 

# push_state() - Changes the lexing state and saves old on stack 

# ------------------------------------------------------------ 

def push_state(self, state): 

self.lexstatestack.append(self.lexstate) 

self.begin(state) 

 

# ------------------------------------------------------------ 

# pop_state() - Restores the previous state 

# ------------------------------------------------------------ 

def pop_state(self): 

self.begin(self.lexstatestack.pop()) 

 

# ------------------------------------------------------------ 

# current_state() - Returns the current lexing state 

# ------------------------------------------------------------ 

def current_state(self): 

return self.lexstate 

 

# ------------------------------------------------------------ 

# skip() - Skip ahead n characters 

# ------------------------------------------------------------ 

def skip(self, n): 

self.lexpos += n 

 

# ------------------------------------------------------------ 

# opttoken() - Return the next token from the Lexer 

# 

# Note: This function has been carefully implemented to be as fast 

# as possible. Don't make changes unless you really know what 

# you are doing 

# ------------------------------------------------------------ 

def token(self): 

# Make local copies of frequently referenced attributes 

lexpos = self.lexpos 

lexlen = self.lexlen 

lexignore = self.lexignore 

lexdata = self.lexdata 

 

while lexpos < lexlen: 

# This code provides some short-circuit code for whitespace, tabs, and other ignored characters 

if lexdata[lexpos] in lexignore: 

lexpos += 1 

continue 

 

# Look for a regular expression match 

for lexre, lexindexfunc in self.lexre: 

m = lexre.match(lexdata, lexpos) 

if not m: 

continue 

 

# Create a token for return 

tok = LexToken() 

tok.value = m.group() 

tok.lineno = self.lineno 

tok.lexpos = lexpos 

 

i = m.lastindex 

func, tok.type = lexindexfunc[i] 

 

if not func: 

# If no token type was set, it's an ignored token 

if tok.type: 

self.lexpos = m.end() 

return tok 

else: 

lexpos = m.end() 

break 

 

lexpos = m.end() 

 

# If token is processed by a function, call it 

 

tok.lexer = self # Set additional attributes useful in token rules 

self.lexmatch = m 

self.lexpos = lexpos 

 

newtok = func(tok) 

 

# Every function must return a token, if nothing, we just move to next token 

if not newtok: 

lexpos = self.lexpos # This is here in case user has updated lexpos. 

lexignore = self.lexignore # This is here in case there was a state change 

break 

 

# Verify type of the token. If not in the token map, raise an error 

if not self.lexoptimize: 

if newtok.type not in self.lextokens_all: 

raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % ( 

func.__code__.co_filename, func.__code__.co_firstlineno, 

func.__name__, newtok.type), lexdata[lexpos:]) 

 

return newtok 

else: 

# No match, see if in literals 

if lexdata[lexpos] in self.lexliterals: 

tok = LexToken() 

tok.value = lexdata[lexpos] 

tok.lineno = self.lineno 

tok.type = tok.value 

tok.lexpos = lexpos 

self.lexpos = lexpos + 1 

return tok 

 

# No match. Call t_error() if defined. 

if self.lexerrorf: 

tok = LexToken() 

tok.value = self.lexdata[lexpos:] 

tok.lineno = self.lineno 

tok.type = 'error' 

tok.lexer = self 

tok.lexpos = lexpos 

self.lexpos = lexpos 

newtok = self.lexerrorf(tok) 

if lexpos == self.lexpos: 

# Error method didn't change text position at all. This is an error. 

raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:]) 

lexpos = self.lexpos 

if not newtok: 

continue 

return newtok 

 

self.lexpos = lexpos 

raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:]) 

 

if self.lexeoff: 

tok = LexToken() 

tok.type = 'eof' 

tok.value = '' 

tok.lineno = self.lineno 

tok.lexpos = lexpos 

tok.lexer = self 

self.lexpos = lexpos 

newtok = self.lexeoff(tok) 

return newtok 

 

self.lexpos = lexpos + 1 

if self.lexdata is None: 

raise RuntimeError('No input string given with input()') 

return None 

 

# Iterator interface 

def __iter__(self): 

return self 

 

def next(self): 

t = self.token() 

if t is None: 

raise StopIteration 

return t 

 

__next__ = next 

 

# ----------------------------------------------------------------------------- 

# ==== Lex Builder === 

# 

# The functions and classes below are used to collect lexing information 

# and build a Lexer object from it. 

# ----------------------------------------------------------------------------- 

 

# ----------------------------------------------------------------------------- 

# _get_regex(func) 

# 

# Returns the regular expression assigned to a function either as a doc string 

# or as a .regex attribute attached by the @TOKEN decorator. 

# ----------------------------------------------------------------------------- 

def _get_regex(func): 

return getattr(func, 'regex', func.__doc__) 

 

# ----------------------------------------------------------------------------- 

# get_caller_module_dict() 

# 

# This function returns a dictionary containing all of the symbols defined within 

# a caller further down the call stack. This is used to get the environment 

# associated with the yacc() call if none was provided. 

# ----------------------------------------------------------------------------- 

def get_caller_module_dict(levels): 

f = sys._getframe(levels) 

ldict = f.f_globals.copy() 

if f.f_globals != f.f_locals: 

ldict.update(f.f_locals) 

return ldict 

 

# ----------------------------------------------------------------------------- 

# _funcs_to_names() 

# 

# Given a list of regular expression functions, this converts it to a list 

# suitable for output to a table file 

# ----------------------------------------------------------------------------- 

def _funcs_to_names(funclist, namelist): 

result = [] 

for f, name in zip(funclist, namelist): 

if f and f[0]: 

result.append((name, f[1])) 

else: 

result.append(f) 

return result 

 

# ----------------------------------------------------------------------------- 

# _names_to_funcs() 

# 

# Given a list of regular expression function names, this converts it back to 

# functions. 

# ----------------------------------------------------------------------------- 

def _names_to_funcs(namelist, fdict): 

result = [] 

for n in namelist: 

if n and n[0]: 

result.append((fdict[n[0]], n[1])) 

else: 

result.append(n) 

return result 

 

# ----------------------------------------------------------------------------- 

# _form_master_re() 

# 

# This function takes a list of all of the regex components and attempts to 

# form the master regular expression. Given limitations in the Python re 

# module, it may be necessary to break the master regex into separate expressions. 

# ----------------------------------------------------------------------------- 

def _form_master_re(relist, reflags, ldict, toknames): 

if not relist: 

return [] 

regex = '|'.join(relist) 

try: 

lexre = re.compile(regex, reflags) 

 

# Build the index to function map for the matching engine 

lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1) 

lexindexnames = lexindexfunc[:] 

 

for f, i in lexre.groupindex.items(): 

handle = ldict.get(f, None) 

if type(handle) in (types.FunctionType, types.MethodType): 

lexindexfunc[i] = (handle, toknames[f]) 

lexindexnames[i] = f 

elif handle is not None: 

lexindexnames[i] = f 

if f.find('ignore_') > 0: 

lexindexfunc[i] = (None, None) 

else: 

lexindexfunc[i] = (None, toknames[f]) 

 

return [(lexre, lexindexfunc)], [regex], [lexindexnames] 

except Exception: 

m = int(len(relist)/2) 

if m == 0: 

m = 1 

llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames) 

rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames) 

return (llist+rlist), (lre+rre), (lnames+rnames) 

 

# ----------------------------------------------------------------------------- 

# def _statetoken(s,names) 

# 

# Given a declaration name s of the form "t_" and a dictionary whose keys are 

# state names, this function returns a tuple (states,tokenname) where states 

# is a tuple of state names and tokenname is the name of the token. For example, 

# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM') 

# ----------------------------------------------------------------------------- 

def _statetoken(s, names): 

parts = s.split('_') 

for i, part in enumerate(parts[1:], 1): 

if part not in names and part != 'ANY': 

break 

 

if i > 1: 

states = tuple(parts[1:i]) 

else: 

states = ('INITIAL',) 

 

if 'ANY' in states: 

states = tuple(names) 

 

tokenname = '_'.join(parts[i:]) 

return (states, tokenname) 

 

 

# ----------------------------------------------------------------------------- 

# LexerReflect() 

# 

# This class represents information needed to build a lexer as extracted from a 

# user's input file. 

# ----------------------------------------------------------------------------- 

class LexerReflect(object): 

def __init__(self, ldict, log=None, reflags=0): 

self.ldict = ldict 

self.error_func = None 

self.tokens = [] 

self.reflags = reflags 

self.stateinfo = {'INITIAL': 'inclusive'} 

self.modules = set() 

self.error = False 

self.log = PlyLogger(sys.stderr) if log is None else log 

 

# Get all of the basic information 

def get_all(self): 

self.get_tokens() 

self.get_literals() 

self.get_states() 

self.get_rules() 

 

# Validate all of the information 

def validate_all(self): 

self.validate_tokens() 

self.validate_literals() 

self.validate_rules() 

return self.error 

 

# Get the tokens map 

def get_tokens(self): 

tokens = self.ldict.get('tokens', None) 

if not tokens: 

self.log.error('No token list is defined') 

self.error = True 

return 

 

if not isinstance(tokens, (list, tuple)): 

self.log.error('tokens must be a list or tuple') 

self.error = True 

return 

 

if not tokens: 

self.log.error('tokens is empty') 

self.error = True 

return 

 

self.tokens = tokens 

 

# Validate the tokens 

def validate_tokens(self): 

terminals = {} 

for n in self.tokens: 

if not _is_identifier.match(n): 

self.log.error("Bad token name '%s'", n) 

self.error = True 

if n in terminals: 

self.log.warning("Token '%s' multiply defined", n) 

terminals[n] = 1 

 

# Get the literals specifier 

def get_literals(self): 

self.literals = self.ldict.get('literals', '') 

if not self.literals: 

self.literals = '' 

 

# Validate literals 

def validate_literals(self): 

try: 

for c in self.literals: 

if not isinstance(c, StringTypes) or len(c) > 1: 

self.log.error('Invalid literal %s. Must be a single character', repr(c)) 

self.error = True 

 

except TypeError: 

self.log.error('Invalid literals specification. literals must be a sequence of characters') 

self.error = True 

 

def get_states(self): 

self.states = self.ldict.get('states', None) 

# Build statemap 

if self.states: 

if not isinstance(self.states, (tuple, list)): 

self.log.error('states must be defined as a tuple or list') 

self.error = True 

else: 

for s in self.states: 

if not isinstance(s, tuple) or len(s) != 2: 

self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s)) 

self.error = True 

continue 

name, statetype = s 

if not isinstance(name, StringTypes): 

self.log.error('State name %s must be a string', repr(name)) 

self.error = True 

continue 

if not (statetype == 'inclusive' or statetype == 'exclusive'): 

self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name) 

self.error = True 

continue 

if name in self.stateinfo: 

self.log.error("State '%s' already defined", name) 

self.error = True 

continue 

self.stateinfo[name] = statetype 

 

# Get all of the symbols with a t_ prefix and sort them into various 

# categories (functions, strings, error functions, and ignore characters) 

 

def get_rules(self): 

tsymbols = [f for f in self.ldict if f[:2] == 't_'] 

 

# Now build up a list of functions and a list of strings 

self.toknames = {} # Mapping of symbols to token names 

self.funcsym = {} # Symbols defined as functions 

self.strsym = {} # Symbols defined as strings 

self.ignore = {} # Ignore strings by state 

self.errorf = {} # Error functions by state 

self.eoff = {} # EOF functions by state 

 

for s in self.stateinfo: 

self.funcsym[s] = [] 

self.strsym[s] = [] 

 

if len(tsymbols) == 0: 

self.log.error('No rules of the form t_rulename are defined') 

self.error = True 

return 

 

for f in tsymbols: 

t = self.ldict[f] 

states, tokname = _statetoken(f, self.stateinfo) 

self.toknames[f] = tokname 

 

if hasattr(t, '__call__'): 

if tokname == 'error': 

for s in states: 

self.errorf[s] = t 

elif tokname == 'eof': 

for s in states: 

self.eoff[s] = t 

elif tokname == 'ignore': 

line = t.__code__.co_firstlineno 

file = t.__code__.co_filename 

self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__) 

self.error = True 

else: 

for s in states: 

self.funcsym[s].append((f, t)) 

elif isinstance(t, StringTypes): 

if tokname == 'ignore': 

for s in states: 

self.ignore[s] = t 

if '\\' in t: 

self.log.warning("%s contains a literal backslash '\\'", f) 

 

elif tokname == 'error': 

self.log.error("Rule '%s' must be defined as a function", f) 

self.error = True 

else: 

for s in states: 

self.strsym[s].append((f, t)) 

else: 

self.log.error('%s not defined as a function or string', f) 

self.error = True 

 

# Sort the functions by line number 

for f in self.funcsym.values(): 

f.sort(key=lambda x: x[1].__code__.co_firstlineno) 

 

# Sort the strings by regular expression length 

for s in self.strsym.values(): 

s.sort(key=lambda x: len(x[1]), reverse=True) 

 

# Validate all of the t_rules collected 

def validate_rules(self): 

for state in self.stateinfo: 

# Validate all rules defined by functions 

 

for fname, f in self.funcsym[state]: 

line = f.__code__.co_firstlineno 

file = f.__code__.co_filename 

module = inspect.getmodule(f) 

self.modules.add(module) 

 

tokname = self.toknames[fname] 

if isinstance(f, types.MethodType): 

reqargs = 2 

else: 

reqargs = 1 

nargs = f.__code__.co_argcount 

if nargs > reqargs: 

self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) 

self.error = True 

continue 

 

if nargs < reqargs: 

self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) 

self.error = True 

continue 

 

if not _get_regex(f): 

self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__) 

self.error = True 

continue 

 

try: 

c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags) 

if c.match(''): 

self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__) 

self.error = True 

except re.error as e: 

self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e) 

if '#' in _get_regex(f): 

self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__) 

self.error = True 

 

# Validate all rules defined by strings 

for name, r in self.strsym[state]: 

tokname = self.toknames[name] 

if tokname == 'error': 

self.log.error("Rule '%s' must be defined as a function", name) 

self.error = True 

continue 

 

if tokname not in self.tokens and tokname.find('ignore_') < 0: 

self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname) 

self.error = True 

continue 

 

try: 

c = re.compile('(?P<%s>%s)' % (name, r), self.reflags) 

if (c.match('')): 

self.log.error("Regular expression for rule '%s' matches empty string", name) 

self.error = True 

except re.error as e: 

self.log.error("Invalid regular expression for rule '%s'. %s", name, e) 

if '#' in r: 

self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name) 

self.error = True 

 

if not self.funcsym[state] and not self.strsym[state]: 

self.log.error("No rules defined for state '%s'", state) 

self.error = True 

 

# Validate the error function 

efunc = self.errorf.get(state, None) 

if efunc: 

f = efunc 

line = f.__code__.co_firstlineno 

file = f.__code__.co_filename 

module = inspect.getmodule(f) 

self.modules.add(module) 

 

if isinstance(f, types.MethodType): 

reqargs = 2 

else: 

reqargs = 1 

nargs = f.__code__.co_argcount 

if nargs > reqargs: 

self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) 

self.error = True 

 

if nargs < reqargs: 

self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) 

self.error = True 

 

for module in self.modules: 

self.validate_module(module) 

 

# ----------------------------------------------------------------------------- 

# validate_module() 

# 

# This checks to see if there are duplicated t_rulename() functions or strings 

# in the parser input file. This is done using a simple regular expression 

# match on each line in the source code of the given module. 

# ----------------------------------------------------------------------------- 

 

def validate_module(self, module): 

try: 

lines, linen = inspect.getsourcelines(module) 

except IOError: 

return 

 

fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(') 

sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=') 

 

counthash = {} 

linen += 1 

for line in lines: 

m = fre.match(line) 

if not m: 

m = sre.match(line) 

if m: 

name = m.group(1) 

prev = counthash.get(name) 

if not prev: 

counthash[name] = linen 

else: 

filename = inspect.getsourcefile(module) 

self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev) 

self.error = True 

linen += 1 

 

# ----------------------------------------------------------------------------- 

# lex(module) 

# 

# Build all of the regular expression rules from definitions in the supplied module 

# ----------------------------------------------------------------------------- 

def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab', 

reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None): 

 

if lextab is None: 

lextab = 'lextab' 

 

global lexer 

 

ldict = None 

stateinfo = {'INITIAL': 'inclusive'} 

lexobj = Lexer() 

lexobj.lexoptimize = optimize 

global token, input 

 

if errorlog is None: 

errorlog = PlyLogger(sys.stderr) 

 

if debug: 

if debuglog is None: 

debuglog = PlyLogger(sys.stderr) 

 

# Get the module dictionary used for the lexer 

if object: 

module = object 

 

# Get the module dictionary used for the parser 

if module: 

_items = [(k, getattr(module, k)) for k in dir(module)] 

ldict = dict(_items) 

# If no __file__ attribute is available, try to obtain it from the __module__ instead 

if '__file__' not in ldict: 

ldict['__file__'] = sys.modules[ldict['__module__']].__file__ 

else: 

ldict = get_caller_module_dict(2) 

 

# Determine if the module is package of a package or not. 

# If so, fix the tabmodule setting so that tables load correctly 

pkg = ldict.get('__package__') 

if pkg and isinstance(lextab, str): 

if '.' not in lextab: 

lextab = pkg + '.' + lextab 

 

# Collect parser information from the dictionary 

linfo = LexerReflect(ldict, log=errorlog, reflags=reflags) 

linfo.get_all() 

if not optimize: 

if linfo.validate_all(): 

raise SyntaxError("Can't build lexer") 

 

if optimize and lextab: 

try: 

lexobj.readtab(lextab, ldict) 

token = lexobj.token 

input = lexobj.input 

lexer = lexobj 

return lexobj 

 

except ImportError: 

pass 

 

# Dump some basic debugging information 

if debug: 

debuglog.info('lex: tokens = %r', linfo.tokens) 

debuglog.info('lex: literals = %r', linfo.literals) 

debuglog.info('lex: states = %r', linfo.stateinfo) 

 

# Build a dictionary of valid token names 

lexobj.lextokens = set() 

for n in linfo.tokens: 

lexobj.lextokens.add(n) 

 

# Get literals specification 

if isinstance(linfo.literals, (list, tuple)): 

lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals) 

else: 

lexobj.lexliterals = linfo.literals 

 

lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals) 

 

# Get the stateinfo dictionary 

stateinfo = linfo.stateinfo 

 

regexs = {} 

# Build the master regular expressions 

for state in stateinfo: 

regex_list = [] 

 

# Add rules defined by functions first 

for fname, f in linfo.funcsym[state]: 

regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f))) 

if debug: 

debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state) 

 

# Now add all of the simple rules 

for name, r in linfo.strsym[state]: 

regex_list.append('(?P<%s>%s)' % (name, r)) 

if debug: 

debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state) 

 

regexs[state] = regex_list 

 

# Build the master regular expressions 

 

if debug: 

debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====') 

 

for state in regexs: 

lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames) 

lexobj.lexstatere[state] = lexre 

lexobj.lexstateretext[state] = re_text 

lexobj.lexstaterenames[state] = re_names 

if debug: 

for i, text in enumerate(re_text): 

debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text) 

 

# For inclusive states, we need to add the regular expressions from the INITIAL state 

for state, stype in stateinfo.items(): 

if state != 'INITIAL' and stype == 'inclusive': 

lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL']) 

lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL']) 

lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL']) 

 

lexobj.lexstateinfo = stateinfo 

lexobj.lexre = lexobj.lexstatere['INITIAL'] 

lexobj.lexretext = lexobj.lexstateretext['INITIAL'] 

lexobj.lexreflags = reflags 

 

# Set up ignore variables 

lexobj.lexstateignore = linfo.ignore 

lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '') 

 

# Set up error functions 

lexobj.lexstateerrorf = linfo.errorf 

lexobj.lexerrorf = linfo.errorf.get('INITIAL', None) 

if not lexobj.lexerrorf: 

errorlog.warning('No t_error rule is defined') 

 

# Set up eof functions 

lexobj.lexstateeoff = linfo.eoff 

lexobj.lexeoff = linfo.eoff.get('INITIAL', None) 

 

# Check state information for ignore and error rules 

for s, stype in stateinfo.items(): 

if stype == 'exclusive': 

if s not in linfo.errorf: 

errorlog.warning("No error rule is defined for exclusive state '%s'", s) 

if s not in linfo.ignore and lexobj.lexignore: 

errorlog.warning("No ignore rule is defined for exclusive state '%s'", s) 

elif stype == 'inclusive': 

if s not in linfo.errorf: 

linfo.errorf[s] = linfo.errorf.get('INITIAL', None) 

if s not in linfo.ignore: 

linfo.ignore[s] = linfo.ignore.get('INITIAL', '') 

 

# Create global versions of the token() and input() functions 

token = lexobj.token 

input = lexobj.input 

lexer = lexobj 

 

# If in optimize mode, we write the lextab 

if lextab and optimize: 

if outputdir is None: 

# If no output directory is set, the location of the output files 

# is determined according to the following rules: 

# - If lextab specifies a package, files go into that package directory 

# - Otherwise, files go in the same directory as the specifying module 

if isinstance(lextab, types.ModuleType): 

srcfile = lextab.__file__ 

else: 

if '.' not in lextab: 

srcfile = ldict['__file__'] 

else: 

parts = lextab.split('.') 

pkgname = '.'.join(parts[:-1]) 

exec('import %s' % pkgname) 

srcfile = getattr(sys.modules[pkgname], '__file__', '') 

outputdir = os.path.dirname(srcfile) 

try: 

lexobj.writetab(lextab, outputdir) 

if lextab in sys.modules: 

del sys.modules[lextab] 

except IOError as e: 

errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e)) 

 

return lexobj 

 

# ----------------------------------------------------------------------------- 

# runmain() 

# 

# This runs the lexer as a main program 

# ----------------------------------------------------------------------------- 

 

def runmain(lexer=None, data=None): 

if not data: 

try: 

filename = sys.argv[1] 

f = open(filename) 

data = f.read() 

f.close() 

except IndexError: 

sys.stdout.write('Reading from standard input (type EOF to end):\n') 

data = sys.stdin.read() 

 

if lexer: 

_input = lexer.input 

else: 

_input = input 

_input(data) 

if lexer: 

_token = lexer.token 

else: 

_token = token 

 

while True: 

tok = _token() 

if not tok: 

break 

sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos)) 

 

# ----------------------------------------------------------------------------- 

# @TOKEN(regex) 

# 

# This decorator function can be used to set the regex expression on a function 

# when its docstring might need to be set in an alternative way 

# ----------------------------------------------------------------------------- 

 

def TOKEN(r): 

def set_regex(f): 

if hasattr(r, '__call__'): 

f.regex = _get_regex(r) 

else: 

f.regex = r 

return f 

return set_regex 

 

# Alternative spelling of the TOKEN decorator 

Token = TOKEN