Package PyFoam :: Package RunDictionary :: Module ParsedParameterFile
[hide private]
[frames] | no frames]

Source Code for Module PyFoam.RunDictionary.ParsedParameterFile

   1  #  ICE Revision: $Id$ 
   2  """Parameter file is read into memory and modified there""" 
   3   
   4  from PyFoam.RunDictionary.FileBasis import FileBasisBackup 
   5  from PyFoam.Basics.PlyParser import PlyParser 
   6  from PyFoam.Basics.FoamFileGenerator import FoamFileGenerator 
   7   
   8  from PyFoam.Basics.DataStructures import Vector,Field,Dimension,DictProxy,TupleProxy,Tensor,SymmTensor,Unparsed,UnparsedList,Codestream,DictRedirection,BinaryBlob,BinaryList 
   9   
  10  from PyFoam.Error import error,warning,FatalErrorPyFoamException 
  11   
  12  from os import path 
  13  from copy import deepcopy 
  14  import sys 
  15   
  16  from PyFoam.ThirdParty.six import print_,integer_types,iteritems 
  17   
18 -class ParsedParameterFile(FileBasisBackup):
19 """ Parameterfile whose complete representation is read into 20 memory, can be manipulated and afterwards written to disk""" 21
22 - def __init__(self, 23 name, 24 backup=False, 25 debug=False, 26 boundaryDict=False, 27 listDict=False, 28 listDictWithHeader=False, 29 listLengthUnparsed=None, 30 preserveComments=True, 31 noHeader=False, 32 binaryMode=False, 33 treatBinaryAsASCII=False, 34 noBody=False, 35 doMacroExpansion=False, 36 dontRead=False, 37 noVectorOrTensor=False, 38 dictStack=None, 39 createZipped=True, 40 longListOutputThreshold=20):
41 """@param name: The name of the parameter file 42 @param backup: create a backup-copy of the file 43 @param boundaryDict: the file to parse is a boundary file 44 @param listDict: the file only contains a list 45 @param listDictWithHeader: the file only contains a list and a header 46 @param listLengthUnparsed: Lists longer than that length are not parsed 47 @param binaryMode: Parse long lists in binary mode (to be overridden by 48 the settings in the header). 49 @param treatBinaryAsASCII: even if the header says that this is a 50 binary file treat it like an ASCII-file 51 @param noHeader: don't expect a header 52 @param noBody: don't read the body of the file (only the header) 53 @param doMacroExpansion: expand #include and $var 54 @param noVectorOrTensor: short lists of length 3, 6 an 9 are NOT 55 interpreted as vectors or tensors 56 @param dontRead: Do not read the file during construction 57 @param longListOutputThreshold: Lists that are longer than this are 58 prefixed with a length 59 @param dictStack: dictionary stack for lookup (only used for include) 60 """ 61 62 self.noHeader=noHeader 63 self.noBody=noBody 64 FileBasisBackup.__init__(self, 65 name, 66 backup=backup, 67 createZipped=createZipped) 68 self.debug=debug 69 self.boundaryDict=boundaryDict 70 self.listDict=listDict 71 self.listDictWithHeader=listDictWithHeader 72 self.listLengthUnparsed=listLengthUnparsed 73 self.doMacros=doMacroExpansion 74 self.preserveComments=preserveComments 75 self.noVectorOrTensor=noVectorOrTensor 76 self.header=None 77 self.content=None 78 self.longListOutputThreshold=longListOutputThreshold 79 self.binaryMode=binaryMode 80 self.treatBinaryAsASCII=treatBinaryAsASCII 81 self.lastDecoration="" 82 self.dictStack=dictStack 83 84 if not dontRead: 85 self.readFile()
86
87 - def parse(self,content):
88 """Constructs a representation of the file""" 89 try: 90 parser=FoamFileParser(content, 91 debug=self.debug, 92 fName=self.name, 93 boundaryDict=self.boundaryDict, 94 listDict=self.listDict, 95 listDictWithHeader=self.listDictWithHeader, 96 listLengthUnparsed=self.listLengthUnparsed, 97 noHeader=self.noHeader, 98 noBody=self.noBody, 99 preserveComments=self.preserveComments, 100 binaryMode=self.binaryMode, 101 treatBinaryAsASCII=self.treatBinaryAsASCII, 102 noVectorOrTensor=self.noVectorOrTensor, 103 dictStack=self.dictStack, 104 doMacroExpansion=self.doMacros) 105 except BinaryParserError: 106 e = sys.exc_info()[1] # Needed because python 2.5 does not support 'as e' 107 if not self.treatBinaryAsASCII: 108 # Retrying in ASCII-mode although the file thinks it is binary 109 parser=FoamFileParser(content, 110 debug=self.debug, 111 fName=self.name, 112 boundaryDict=self.boundaryDict, 113 listDict=self.listDict, 114 listDictWithHeader=self.listDictWithHeader, 115 listLengthUnparsed=self.listLengthUnparsed, 116 noHeader=self.noHeader, 117 noBody=self.noBody, 118 preserveComments=self.preserveComments, 119 binaryMode=self.binaryMode, 120 treatBinaryAsASCII=True, 121 noVectorOrTensor=self.noVectorOrTensor, 122 dictStack=self.dictStack, 123 doMacroExpansion=self.doMacros) 124 else: 125 raise e 126 127 self.content=parser.getData() 128 self.header=parser.getHeader() 129 self.lastDecoration=parser._decorationBuffer 130 131 return self.content
132
133 - def __contains__(self,key):
134 return key in self.content
135
136 - def __getitem__(self,key):
137 return self.content[key]
138
139 - def __setitem__(self,key,value):
140 self.content[key]=value
141
142 - def __delitem__(self,key):
143 del self.content[key]
144
145 - def __len__(self):
146 return len(self.content)
147
148 - def __iter__(self):
149 for key in self.content: 150 yield key
151
152 - def __str__(self):
153 """Generates a string from the contents in memory 154 Used to be called makeString""" 155 156 string="// -*- C++ -*-\n// File generated by PyFoam - sorry for the ugliness\n\n" 157 158 generator=FoamFileGenerator(self.content, 159 header=self.header, 160 longListThreshold=self.longListOutputThreshold) 161 string+=generator.makeString(firstLevel=True) 162 163 if len(self.lastDecoration)>0: 164 string+="\n\n"+self.lastDecoration 165 166 return string
167
168 - def getValueDict(self):
169 """Get a dictionary with the values with the decorators removed""" 170 result={} 171 if self.content: 172 for k in self.content: 173 if type(k) not in integer_types: 174 result[k]=self.content[k] 175 return result
176
177 -class WriteParameterFile(ParsedParameterFile):
178 """A specialization that is used to only write to the file"""
179 - def __init__(self, 180 name, 181 backup=False, 182 className="dictionary", 183 objectName=None, 184 createZipped=False):
185 ParsedParameterFile.__init__(self, 186 name, 187 backup=backup, 188 dontRead=True, 189 createZipped=createZipped) 190 191 if objectName==None: 192 objectName=path.basename(name) 193 194 self.content=DictProxy() 195 self.header={"version":"2.0", 196 "format":"ascii", 197 "class":className, 198 "object":objectName}
199
200 -class Enumerate(object):
201 - def __init__(self, names):
202 for number, name in enumerate(names): 203 setattr(self, name, number)
204 205 inputModes=Enumerate(["merge","error","warn","protect","overwrite","default"]) 206
207 -class FoamFileParser(PlyParser):
208 """Class that parses a string that contains the contents of an 209 OpenFOAM-file and builds a nested structure of directories and 210 lists from it""" 211
212 - def __init__(self, 213 content, 214 fName=None, 215 debug=False, 216 noHeader=False, 217 noBody=False, 218 doMacroExpansion=False, 219 boundaryDict=False, 220 preserveComments=True, 221 preserveNewlines=True, 222 listDict=False, 223 listDictWithHeader=False, 224 listLengthUnparsed=None, 225 binaryMode=False, 226 treatBinaryAsASCII=False, 227 duplicateCheck=False, 228 noVectorOrTensor=False, 229 dictStack=None, 230 duplicateFail=True):
231 """@param content: the string to be parsed 232 @param fName: Name of the actual file (if any) 233 @param debug: output debug information during parsing 234 @param noHeader: switch that turns off the parsing of the header 235 @param duplicateCheck: Check for duplicates in dictionaries 236 @param duplicateFail: Fail if a duplicate is discovered""" 237 238 self.binaryMode=binaryMode 239 self.treatBinaryAsASCII=treatBinaryAsASCII 240 self.fName=fName 241 self.data=None 242 self.header=None 243 self.debug=debug 244 self.listLengthUnparsed=listLengthUnparsed 245 self.doMacros=doMacroExpansion 246 self.preserveComments=preserveComments 247 self.preserveNewLines=preserveNewlines 248 self.duplicateCheck=duplicateCheck 249 self.duplicateFail=duplicateFail 250 self.noVectorOrTensor=noVectorOrTensor 251 self.inHeader=True 252 self.inBinary=False 253 254 # Make sure that the first comment is discarded 255 self.collectDecorations=False 256 self.inputMode=inputModes.merge 257 258 self._decorationBuffer="" 259 260 startCnt=0 261 262 self.dictStack=dictStack 263 if self.dictStack==None: 264 self.dictStack=[DictProxy()] 265 266 if noBody: 267 self.start='noBody' 268 startCnt+=1 269 270 if noHeader: 271 self.inHeader=False 272 self.start='noHeader' 273 startCnt+=1 274 self.collectDecorations=True 275 276 if listDict: 277 self.inHeader=False 278 self.start='pureList' 279 startCnt+=1 280 self.dictStack=[] 281 self.collectDecorations=True 282 283 if listDictWithHeader: 284 self.start='pureListWithHeader' 285 startCnt+=1 286 287 if boundaryDict: 288 self.start='boundaryDict' 289 startCnt+=1 290 291 if startCnt>1: 292 error("Only one start symbol can be specified.",startCnt,"are specified") 293 294 PlyParser.__init__(self,debug=debug) 295 296 #sys.setrecursionlimit(50000) 297 #print sys.getrecursionlimit() 298 299 self.emptyCnt=0 300 301 self.header,self.data=self.parse(content)
302
303 - def __contains__(self,key):
304 return key in self.data
305
306 - def __getitem__(self,key):
307 return self.data[key]
308
309 - def __setitem__(self,key,value):
310 self.data[key]=value
311
312 - def __delitem__(self,key):
313 del self.data[key]
314
315 - def __iter__(self):
316 for key in self.data: 317 yield key
318 319 ## def __len__(self): 320 ## if self.data==None: 321 ## return 0 322 ## else: 323 ## return len(self.data) 324
325 - def resetDecoration(self):
326 self._decorationBuffer=""
327
328 - def addToDecoration(self,text):
329 if self.collectDecorations: 330 self._decorationBuffer+=text
331
332 - def addCommentToDecorations(self,text):
333 if self.preserveComments: 334 self.addToDecoration(text)
335
336 - def addNewlinesToDecorations(self,text):
337 if self.preserveNewLines: 338 self.addToDecoration(text)
339
340 - def getDecoration(self):
341 tmp=self._decorationBuffer 342 self.resetDecoration() 343 if len(tmp)>0: 344 if tmp[-1]=='\n': 345 tmp=tmp[:-1] 346 return tmp
347
348 - def directory(self):
349 if self.fName==None: 350 return path.curdir 351 else: 352 return path.dirname(self.fName)
353
354 - def getData(self):
355 """ Get the data structure""" 356 return self.data
357
358 - def getHeader(self):
359 """ Get the OpenFOAM-header""" 360 return self.header
361
362 - def printContext(self,c,ind):
363 """Prints the context of the current index""" 364 print_("------") 365 print_(c[max(0,ind-100):max(0,ind-1)]) 366 print_("------") 367 print_(">",c[ind-1],"<") 368 print_("------") 369 print_(c[min(len(c),ind):min(len(c),ind+100)]) 370 print_("------")
371
372 - def parserError(self,text,c,ind):
373 """Prints the error message of the parser and exit""" 374 print_("PARSER ERROR:",text) 375 print_("On index",ind) 376 self.printContext(c,ind) 377 raise PyFoamParserError("Unspecified")
378
379 - def condenseAllPreFixLists(self,orig):
380 """Checks whether this list is a list that consists only of prefix-Lists""" 381 isAllPreList=False 382 if (len(orig) % 2)==0: 383 isAllPreList=True 384 for i in range(0,len(orig),2): 385 if type(orig[i])==int and (type(orig[i+1]) in [list,Vector,Tensor,SymmTensor]): 386 if len(orig[i+1])!=orig[i]: 387 isAllPreList=False 388 break 389 else: 390 isAllPreList=False 391 break 392 393 if isAllPreList: 394 return orig[1::2] 395 else: 396 return orig
397 398 tokens = ( 399 'NAME', 400 'ICONST', 401 'FCONST', 402 'SCONST', 403 'FOAMFILE', 404 'UNIFORM', 405 'NONUNIFORM', 406 'UNPARSEDCHUNK', 407 'CODESTREAMCHUNK', 408 'REACTION', 409 'SUBSTITUTION', 410 'MERGE', 411 'OVERWRITE', 412 'ERROR', 413 'WARN', 414 'PROTECT', 415 'DEFAULT', 416 'INCLUDE', 417 'INCLUDEIFPRESENT', 418 'REMOVE', 419 'INPUTMODE', 420 'KANALGITTER', 421 'CODESTART', 422 'CODEEND', 423 'BINARYBLOB', 424 ) 425 426 reserved = { 427 'FoamFile' : 'FOAMFILE', 428 'uniform' : 'UNIFORM', 429 'nonuniform' : 'NONUNIFORM', 430 'include' : 'INCLUDE', 431 'includeIfPresent': 'INCLUDEIFPRESENT', 432 'remove' : 'REMOVE', 433 'inputMode' : 'INPUTMODE', 434 'merge' : 'MERGE', 435 'overwrite' : 'OVERWRITE', 436 'error' : 'ERROR', 437 'warn' : 'WARN', 438 'protect' : 'PROTECT', 439 'default' : 'DEFAULT', 440 } 441 442 states = ( 443 ('unparsed', 'exclusive'), 444 ('codestream', 'exclusive'), 445 ('mlcomment', 'exclusive'), 446 ('binaryblob', 'exclusive'), 447 ) 448
449 - def t_unparsed_left(self,t):
450 r'\(' 451 t.lexer.level+=1
452 # print "left",t.lexer.level, 453
454 - def t_unparsed_right(self,t):
455 r'\)' 456 t.lexer.level-=1 457 # print "right",t.lexer.level, 458 if t.lexer.level < 0 : 459 t.value = t.lexer.lexdata[t.lexer.code_start:t.lexer.lexpos-1] 460 # print t.value 461 t.lexer.lexpos-=1 462 t.type = "UNPARSEDCHUNK" 463 t.lexer.lineno += t.value.count('\n') 464 t.lexer.begin('INITIAL') 465 return t
466 467 t_unparsed_ignore = ' \t\n0123456789.-+e' 468
469 - def t_unparsed_error(self,t):
470 print_("Error",t.lexer.lexdata[t.lexer.lexpos]) 471 t.lexer.skip(1)
472 473 t_binaryblob_ignore = '' 474
475 - def t_binaryblob_close(self,t):
476 r"\)" 477 size=t.lexer.lexpos-t.lexer.binary_start-1 478 # print size,ord(t.lexer.lexdata[t.lexer.lexpos-1]),ord(t.lexer.lexdata[t.lexer.lexpos]),ord(t.lexer.lexdata[t.lexer.lexpos+1]) 479 # print size,ord(t.lexer.lexdata[t.lexer.binary_start-1]),ord(t.lexer.lexdata[t.lexer.binary_start]) 480 # print size % (t.lexer.binary_listlen), len(t.lexer.lexdata) 481 if (size % t.lexer.binary_listlen)==0: 482 # length of blob is multiple of the listlength 483 nextChar=t.lexer.lexdata[t.lexer.lexpos] 484 nextNextChar=t.lexer.lexdata[t.lexer.lexpos+1] 485 if (nextChar in [';','\n'] and nextNextChar=='\n'): 486 t.value = t.lexer.lexdata[t.lexer.binary_start:t.lexer.lexpos-1] 487 assert(len(t.value)%t.lexer.binary_listlen == 0) 488 t.lexer.lexpos-=1 489 t.type = "BINARYBLOB" 490 t.lexer.lineno += t.value.count('\n') 491 t.lexer.begin('INITIAL') 492 self.inBinary=False 493 return t
494
495 - def t_binaryblob_throwaway(self,t):
496 r'[^\)]' 497 pass
498
499 - def t_binaryblob_error(self,t):
500 print_("Error",t.lexer.lexdata[t.lexer.lexpos]) 501 t.lexer.skip(1)
502
503 - def t_codestream_end(self,t):
504 r"\#\}" 505 t.value = t.lexer.lexdata[t.lexer.code_start:t.lexer.lexpos-2] 506 t.lexer.lexpos-=2 507 t.type = "CODESTREAMCHUNK" 508 t.lexer.lineno += t.value.count('\n') 509 t.lexer.begin('INITIAL') 510 return t
511 512 t_codestream_ignore = '' 513
514 - def t_codestream_throwaway(self,t):
515 r'[^#]' 516 pass
517
518 - def t_codestream_error(self,t):
519 print_("Error",t.lexer.lexdata[t.lexer.lexpos]) 520 t.lexer.skip(1)
521
522 - def t_NAME(self,t):
523 r'[a-zA-Z_][+\-<>(),.\*|a-zA-Z_0-9&%:]*' 524 t.type=self.reserved.get(t.value,'NAME') 525 if t.value[-1]==")": 526 if t.value.count(")")>t.value.count("("): 527 # Give back the last ) because it propably belongs to a list 528 t.value=t.value[:-1] 529 t.lexer.lexpos-=1 530 531 return t
532
533 - def t_SUBSTITUITION(self,t):
534 r'\$[a-zA-Z_.:{][+\-<>(),.\*|a-zA-Z_0-9&%:${}]*' 535 t.type=self.reserved.get(t.value,'SUBSTITUTION') 536 if t.value[-1]==")": 537 if t.value.count(")")>t.value.count("("): 538 # Give back the last ) because it propably belongs to a list 539 t.value=t.value[:-1] 540 t.lexer.lexpos-=1 541 542 return t
543 544 t_CODESTART = r'\#\{' 545 546 t_CODEEND = r'\#\}' 547 548 t_KANALGITTER = r'\#' 549 550 t_ICONST = r'(-|)\d+([uU]|[lL]|[uU][lL]|[lL][uU])?' 551 552 t_FCONST = r'(-|)((\d+)(\.\d*)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?' 553 554 t_SCONST = r'\"([^\\\n]|(\\.))*?\"' 555 556 literals = "(){};[]" 557 558 t_ignore=" \t\r" 559 560 # Define a rule so we can track line numbers
561 - def t_newline(self,t):
562 r'\n+' 563 t.lexer.lineno += len(t.value) 564 now=t.lexer.lexpos 565 next=t.lexer.lexdata.find('\n',now) 566 if next>=0: 567 line=t.lexer.lexdata[now:next] 568 pos=line.find("=") 569 570 if pos>=0 and not self.binaryMode: 571 if ((line.find("//")>=0 and line.find("//")<pos)) or (line.find("/*")>=0 and line.find("/*")<pos) or (line.find('"')>=0 and line.find('"')<pos): 572 return 573 t.value = line 574 t.type = "REACTION" 575 t.lexer.lineno += 1 576 t.lexer.lexpos = next 577 return t
578 # self.addNewlinesToDecorations(t.value) 579 580 # C++ comment (ignore)
581 - def t_ccode_comment(self,t):
582 r'//.*' 583 t.lexer.lineno += t.value.count('\n') 584 self.addCommentToDecorations(t.value)
585
586 - def t_startmlcomment(self,t):
587 r'/\*' 588 t.lexer.begin('mlcomment') 589 self.mllevel=1 590 self.mlcomment_start = t.lexer.lexpos-2
591
592 - def t_mlcomment_newlevel(self,t):
593 r'/\*' 594 self.mllevel+=1
595
596 - def t_mlcomment_endcomment(self,t):
597 r'\*/' 598 self.mllevel-=1 599 if self.mllevel<=0: 600 t.lexer.begin('INITIAL') 601 mlcomment=t.lexer.lexdata[self.mlcomment_start:t.lexer.lexpos] 602 t.lexer.lineno += mlcomment.count('\n') 603 self.addCommentToDecorations(mlcomment)
604
605 - def t_mlcomment_throwaway(self,t):
606 r'[^\*]' 607 pass
608 609 t_mlcomment_ignore = '' 610
611 - def t_mlcomment_error(self,t):
612 if t.lexer.lexdata[t.lexer.lexpos]!="*": 613 print_("Error",t.lexer.lexdata[t.lexer.lexpos]) 614 t.lexer.skip(1)
615 616 # Error handling rule
617 - def t_error(self,t):
618 msg="Illegal character '%s' in line %d (pos: %d)" % ( 619 t.value[0], 620 t.lexer.lineno, 621 t.lexer.lexpos) 622 raise PyFoamParserError(msg)
623 # t.lexer.skip(1) # the old days when illegal characters were accepted 624
625 - def p_global(self,p):
626 'global : header dictbody' 627 p[0] = ( p[1] , p[2] )
628
629 - def p_gotHeader(self,p):
630 'gotHeader :' 631 p.lexer.lexpos=len(p.lexer.lexdata) 632 self.inHeader=False
633
634 - def p_noBody(self,p):
635 ''' noBody : FOAMFILE '{' dictbody gotHeader '}' ''' 636 p[0] = ( p[3] , {} )
637
638 - def p_noHeader(self,p):
639 'noHeader : dictbody' 640 p[0] = ( None , p[1] )
641
642 - def p_pureList(self,p):
643 'pureList : onlyListOrPList' 644 p[0] = ( None , p[1] )
645
646 - def p_onlyListOrPList(self,p):
647 '''onlyListOrPList : list 648 | prelist ''' 649 p[0]=p[1]
650
651 - def p_pureListWithHeader(self,p):
652 '''pureListWithHeader : header onlyListOrPList''' 653 p[0] = ( p[1] , p[2] )
654
655 - def p_afterHeader(self,p):
656 'afterHeader :' 657 pass
658
659 - def p_boundaryDict(self,p):
660 '''boundaryDict : header list 661 | header prelist ''' 662 # p[0] = ( p[1] , dict(zip(p[2][::2],p[2][1::2])) ) 663 p[0] = ( p[1] , p[2] )
664
665 - def p_header(self,p):
666 'header : FOAMFILE dictionary' 667 self.inHeader=False 668 p[0] = p[2] 669 670 # if p[0]["format"]=="binary": 671 # if not self.treatBinaryAsASCII: 672 # self.binaryMode=True 673 # else: 674 # self.binaryMode=False 675 # elif p[0]["format"]=="ascii": 676 # self.binaryMode=False 677 # else: 678 # raise FatalErrorPyFoamException("Don't know how to parse file format",p[0]["format"]) 679 680 self.collectDecorations=True
681
682 - def p_macro(self,p):
683 '''macro : KANALGITTER include 684 | KANALGITTER inputMode 685 | KANALGITTER remove''' 686 p[0] = p[1]+p[2]+"\n" 687 if self.doMacros: 688 p[0]="// "+p[0]
689
690 - def p_include(self,p):
691 '''include : INCLUDE SCONST 692 | INCLUDEIFPRESENT SCONST''' 693 if self.doMacros: 694 fName=path.join(self.directory(),p[2][1:-1]) 695 read=True 696 if p[1]=="includeIfPresent" and not path.exists(fName): 697 read=False 698 if read and not path.exists(fName): 699 raise PyFoamParserError("The included file "+fName+" does not exist") 700 if read: 701 data=ParsedParameterFile(fName, 702 noHeader=True, 703 dictStack=self.dictStack, 704 doMacroExpansion=self.doMacros) 705 into=self.dictStack[-1] 706 for k in data: 707 into[k]=data[k] 708 709 p[0] = p[1] + " " + p[2]
710
711 - def p_inputMode(self,p):
712 '''inputMode : INPUTMODE ERROR 713 | INPUTMODE WARN 714 | INPUTMODE PROTECT 715 | INPUTMODE DEFAULT 716 | INPUTMODE MERGE 717 | INPUTMODE OVERWRITE''' 718 p[0] = p[1] + " " + p[2] 719 self.inputMode=getattr(inputModes,p[2])
720
721 - def p_remove(self,p):
722 '''remove : REMOVE word 723 | REMOVE wlist''' 724 p[0] = p[1] + " " 725 if type(p[2])==str: 726 p[0]+=p[2] 727 else: 728 p[0]+="( " 729 for w in p[2]: 730 p[0]+=w+" " 731 p[0]+=")"
732
733 - def p_integer(self,p):
734 '''integer : ICONST''' 735 p[0] = int(p[1])
736
737 - def p_float(self,p):
738 '''integer : FCONST''' 739 p[0] = float(p[1])
740
741 - def p_enter_dict(self,p):
742 '''enter_dict :''' 743 self.dictStack.append(DictProxy())
744
745 - def p_exit_dict(self,p):
746 '''exit_dict :''' 747 p[0]=self.dictStack.pop()
748
749 - def p_dictionary(self,p):
750 '''dictionary : '{' enter_dict dictbody '}' exit_dict 751 | '{' '}' ''' 752 if len(p)==6: 753 p[0] = p[5] 754 else: 755 p[0] = DictProxy()
756
757 - def p_dictbody(self,p):
758 '''dictbody : dictbody dictline 759 | dictline 760 | empty''' 761 762 if len(p)==3: 763 p[0]=p[1] 764 if self.duplicateCheck: 765 if p[2][0] in p[0]: 766 if self.duplicateFail: 767 error("Key",p[2][0],"already defined") 768 else: 769 warning("Key",p[2][0],"already defined") 770 if type(p[2][0])==DictRedirection and p[2][1]=='': 771 p[0].addRedirection(p[2][0]) 772 else: 773 if type(p[2][1])==DictRedirection: 774 p[0][p[2][0]]=p[2][1].getContent() 775 else: 776 p[0][p[2][0]]=p[2][1] 777 p[0].addDecoration(p[2][0],self.getDecoration()) 778 else: 779 p[0]=self.dictStack[-1] 780 781 if p[1]: 782 if type(p[1][0])==DictRedirection and p[1][1]=='': 783 p[0].addRedirection(p[1][0]) 784 else: 785 if type(p[1][1])==DictRedirection: 786 p[0][p[1][0]]=p[1][1].getContent() 787 else: 788 p[0][p[1][0]]=p[1][1]
789 790
791 - def p_list(self,p):
792 '''list : '(' itemlist ')' ''' 793 p[0] = self.condenseAllPreFixLists(p[2]) 794 if not self.noVectorOrTensor and ( 795 len(p[2])==3 or len(p[2])==9 or len(p[2])==6): 796 isVector=True 797 for i in p[2]: 798 try: 799 float(i) 800 except: 801 isVector=False 802 if isVector: 803 if len(p[2])==3: 804 p[0]=Vector(*p[2]) 805 elif len(p[2])==9: 806 p[0]=Tensor(*p[2]) 807 else: 808 p[0]=SymmTensor(*p[2])
809
810 - def p_wlist(self,p):
811 '''wlist : '(' wordlist ')' ''' 812 p[0] = p[2]
813
814 - def p_unparsed(self,p):
815 '''unparsed : UNPARSEDCHUNK''' 816 p[0] = Unparsed(p[1])
817
818 - def p_binaryblob(self,p):
819 '''binaryblob : BINARYBLOB''' 820 p[0] = BinaryBlob(p[1])
821
822 - def p_prelist_seen(self,p):
823 '''prelist_seen : ''' 824 if self.binaryMode: 825 p.lexer.begin('binaryblob') 826 p.lexer.binary_start = p.lexer.lexpos 827 p.lexer.binary_listlen = p[-1] 828 self.inBinary=True 829 elif self.listLengthUnparsed!=None: 830 if int(p[-1])>=self.listLengthUnparsed: 831 p.lexer.begin('unparsed') 832 p.lexer.level=0 833 p.lexer.code_start = p.lexer.lexpos
834
835 - def p_codestream(self,p):
836 '''codestream : codeSeen CODESTART CODESTREAMCHUNK CODEEND ''' 837 p[0] = Codestream(p[3])
838
839 - def p_codeSeen(self,p):
840 '''codeSeen : ''' 841 p.lexer.begin('codestream') 842 p.lexer.level=0 843 p.lexer.code_start = p.lexer.lexpos
844
845 - def p_prelist(self,p):
846 '''prelist : integer prelist_seen '(' itemlist ')' 847 | integer prelist_seen '(' binaryblob ')' 848 | integer prelist_seen '(' unparsed ')' ''' 849 if type(p[4])==Unparsed: 850 p[0] = UnparsedList(int(p[1]),p[4].data) 851 elif type(p[4])==BinaryBlob: 852 p[0] = BinaryList(int(p[1]),p[4].data) 853 else: 854 p[0] = self.condenseAllPreFixLists(p[4])
855
856 - def p_itemlist(self,p):
857 '''itemlist : itemlist item 858 | itemlist ';' 859 | item ''' 860 if len(p)==2: 861 if p[1]==None: 862 p[0]=[] 863 else: 864 p[0]=[ p[1] ] 865 else: 866 p[0]=p[1] 867 if p[2]!=';': 868 p[0].append(p[2])
869
870 - def p_wordlist(self,p):
871 '''wordlist : wordlist word 872 | word ''' 873 if len(p)==2: 874 if p[1]==None: 875 p[0]=[] 876 else: 877 p[0]=[ p[1] ] 878 else: 879 p[0]=p[1] 880 p[0].append(p[2])
881
882 - def p_word(self,p):
883 '''word : NAME 884 | UNIFORM 885 | NONUNIFORM 886 | MERGE 887 | OVERWRITE 888 | DEFAULT 889 | WARN 890 | PROTECT 891 | ERROR''' 892 p[0]=p[1]
893
894 - def parseSubst_root(self,nm,stck):
895 if nm[0]==":": 896 stck=[self.dictStack[0]] 897 nm=nm[1:] 898 elif nm[0]=='.': 899 nm=nm[1:] 900 off=0 901 while nm[0]=='.': 902 nm=nm[1:] 903 off+=1 904 if off>0: 905 stck=stck[:-off] 906 elif nm[0]=="{": 907 inner=nm[1:nm.rfind("}")].strip() 908 if inner[0]=="$": 909 nm=self.parseSubst_root(inner[1:],stck)() 910 else: 911 nm=inner 912 rest=None 913 if nm.find(".")>0: 914 rest=nm[nm.find(".")+1:] 915 nm=nm[:nm.find(".")] 916 for i,di in enumerate(reversed(stck)): 917 if nm in di: 918 if rest==None: 919 v=DictRedirection(deepcopy(di[nm]), 920 di[nm], 921 nm) 922 return v 923 else: 924 newStck=stck[:i] 925 newStck.append(di[nm]) 926 return self.parseSubst_root(rest,newStck)
927
928 - def p_substitution(self,p):
929 '''substitution : SUBSTITUTION''' 930 if self.doMacros: 931 nm=p[1][1:] 932 p[0]="<Symbol '"+nm+"' not found>" 933 stck=self.dictStack 934 p[0]=self.parseSubst_root(nm,stck) 935 else: 936 p[0]=p[1]
937
938 - def p_dictkey(self,p):
939 '''dictkey : word 940 | SCONST''' 941 p[0]=p[1]
942
943 - def p_dictline(self,p):
944 '''dictline : dictkey dictitem ';' 945 | dictkey list ';' 946 | dictkey prelist ';' 947 | dictkey fieldvalue ';' 948 | macro 949 | substitution ';' 950 | dictkey codestream ';' 951 | dictkey dictionary''' 952 if len(p)==4 and self.inHeader and p[1]=="format" and type(p[2])==str: 953 if p[2]=="binary": 954 if not self.treatBinaryAsASCII: 955 self.binaryMode=True 956 else: 957 self.binaryMode=False 958 elif p[2]=="ascii": 959 self.binaryMode=False 960 else: 961 raise FatalErrorPyFoamException("Don't know how to parse file format",p[0]["format"]) 962 963 if len(p)==4 and type(p[2])==list: 964 # remove the prefix from long lists (if present) 965 doAgain=True 966 tmp=p[2] 967 while doAgain: 968 doAgain=False 969 for i in range(len(tmp)-1): 970 if type(tmp[i])==int and type(tmp[i+1]) in [list]: 971 if tmp[i]==len(tmp[i+1]): 972 nix=tmp[:i]+tmp[i+1:] 973 for i in range(len(tmp)): 974 tmp.pop() 975 tmp.extend(nix) 976 doAgain=True 977 break 978 if len(p)==4: 979 p[0] = ( p[1] , p[2] ) 980 elif len(p)==3: 981 if p[2]==';': 982 p[0]= (p[1],'') 983 else: 984 p[0] = ( p[1] , p[2] ) 985 else: 986 p[0] = ( self.emptyCnt , p[1] ) 987 self.emptyCnt+=1
988
989 - def p_number(self,p):
990 '''number : integer 991 | FCONST''' 992 p[0] = p[1]
993
994 - def p_dimension(self,p):
995 '''dimension : '[' number number number number number number number ']' 996 | '[' number number number number number ']' ''' 997 result=p[2:-1] 998 if len(result)==5: 999 result+=[0,0] 1000 1001 p[0]=Dimension(*result)
1002
1003 - def p_vector(self,p):
1004 '''vector : '(' number number number ')' ''' 1005 if self.noVectorOrTensor: 1006 p[0]=p[2:5] 1007 else: 1008 p[0]=Vector(*p[2:5])
1009
1010 - def p_tensor(self,p):
1011 '''tensor : '(' number number number number number number number number number ')' ''' 1012 if self.noVectorOrTensor: 1013 p[0]=p[2:11] 1014 else: 1015 p[0]=Tensor(*p[2:11])
1016
1017 - def p_symmtensor(self,p):
1018 '''symmtensor : '(' number number number number number number ')' ''' 1019 if self.noVectorOrTensor: 1020 p[0]=p[2:8] 1021 else: 1022 p[0]=SymmTensor(*p[2:8])
1023
1024 - def p_fieldvalue_uniform(self,p):
1025 '''fieldvalue : UNIFORM number 1026 | UNIFORM vector 1027 | UNIFORM tensor 1028 | UNIFORM symmtensor''' 1029 p[0] = Field(p[2])
1030
1031 - def p_fieldvalue_nonuniform(self,p):
1032 '''fieldvalue : NONUNIFORM NAME list 1033 | NONUNIFORM prelist 1034 | NONUNIFORM NAME prelist''' 1035 if len(p)==4: 1036 p[0] = Field(p[3],name=p[2]) 1037 else: 1038 p[0] = Field(p[2])
1039
1040 - def p_dictitem(self,p):
1041 '''dictitem : longitem 1042 | pitem''' 1043 if type(p[1])==tuple: 1044 if len(p[1])==2 and p[1][0]=="uniform": 1045 p[0]=Field(p[1][1]) 1046 elif len(p[1])==3 and p[1][0]=="nonuniform": 1047 p[0]=Field(p[1][2],name=p[1][1]) 1048 else: 1049 p[0]=TupleProxy(p[1]) 1050 else: 1051 p[0] = p[1]
1052
1053 - def p_longitem(self,p):
1054 '''longitem : pitemlist pitem''' 1055 p[0] = p[1]+(p[2],)
1056
1057 - def p_pitemlist(self,p):
1058 '''pitemlist : pitemlist pitem 1059 | pitem ''' 1060 if len(p)==2: 1061 p[0]=(p[1],) 1062 else: 1063 ## print type(p[1][-1]) 1064 ## if type(p[1][-1])==int and type(p[2])==tuple: 1065 ## print "Hepp",p[2] 1066 p[0]=p[1]+(p[2],)
1067
1068 - def p_pitem(self,p):
1069 '''pitem : word 1070 | SCONST 1071 | number 1072 | dictionary 1073 | list 1074 | dimension 1075 | substitution 1076 | empty''' 1077 p[0] = p[1]
1078
1079 - def p_item(self,p):
1080 '''item : pitem 1081 | REACTION 1082 | list 1083 | dictionary''' 1084 p[0] = p[1]
1085
1086 - def p_empty(self,p):
1087 'empty :' 1088 pass
1089
1090 - def p_error(self,p):
1091 if self.inBinary: 1092 raise BinaryParserError("Problem reading binary", p) # .type, p.lineno 1093 else: 1094 raise PyFoamParserError("Syntax error at token", p) # .type, p.lineno
1095 # Just discard the token and tell the parser it's okay. 1096 # self.yacc.errok() 1097
1098 -class PyFoamParserError(FatalErrorPyFoamException):
1099 - def __init__(self,descr,data=None):
1100 FatalErrorPyFoamException.__init__(self,"Parser Error:",descr) 1101 self.descr=descr 1102 self.data=data
1103
1104 - def __str__(self):
1105 result="Error in PyFoamParser: '"+self.descr+"'" 1106 if self.data!=None: 1107 val=self.data.value 1108 if len(val)>100: 1109 val=val[:40]+" .... "+val[-40:] 1110 1111 result+=" @ %r (Type: %s ) in line %d at position %d" % (val, 1112 self.data.type, 1113 self.data.lineno, 1114 self.data.lexpos) 1115 else: 1116 result+=" NONE" 1117 1118 return result
1119
1120 - def __repr__(self):
1121 return str(self)
1122
1123 -class BinaryParserError(PyFoamParserError):
1124 - def __init__(self,descr,data=None):
1125 PyFoamParserError.__init__(self,descr,data)
1126
1127 -class FoamStringParser(FoamFileParser):
1128 """Convenience class that parses only a headerless OpenFOAM dictionary""" 1129
1130 - def __init__(self, 1131 content, 1132 debug=False, 1133 noVectorOrTensor=False, 1134 duplicateCheck=False, 1135 listDict=False, 1136 doMacroExpansion=False, 1137 duplicateFail=False):
1138 """@param content: the string to be parsed 1139 @param debug: output debug information during parsing""" 1140 1141 FoamFileParser.__init__(self, 1142 content, 1143 debug=debug, 1144 noHeader=not listDict, 1145 boundaryDict=False, 1146 listDict=listDict, 1147 noVectorOrTensor=noVectorOrTensor, 1148 duplicateCheck=duplicateCheck, 1149 doMacroExpansion=doMacroExpansion, 1150 duplicateFail=duplicateFail)
1151
1152 - def __str__(self):
1153 return str(FoamFileGenerator(self.data))
1154
1155 -class ParsedBoundaryDict(ParsedParameterFile):
1156 """Convenience class that parses only a OpenFOAM polyMesh-boundaries file""" 1157
1158 - def __init__(self, 1159 name, 1160 treatBinaryAsASCII=False, 1161 backup=False, 1162 debug=False):
1163 """@param name: The name of the parameter file 1164 @param backup: create a backup-copy of the file""" 1165 1166 ParsedParameterFile.__init__(self, 1167 name, 1168 backup=backup, 1169 treatBinaryAsASCII=treatBinaryAsASCII, 1170 debug=debug, 1171 boundaryDict=True)
1172
1173 - def parse(self,content):
1174 """Constructs a representation of the file""" 1175 temp=ParsedParameterFile.parse(self,content) 1176 self.content=DictProxy() 1177 for i in range(0,len(temp),2): 1178 self.content[temp[i]]=temp[i+1] 1179 return self.content
1180
1181 - def __str__(self):
1182 string="// File generated by PyFoam - sorry for the ugliness\n\n" 1183 temp=[] 1184 for k,v in iteritems(self.content): 1185 temp.append((k,v)) 1186 1187 temp.sort(lambda x,y:cmp(int(x[1]["startFace"]),int(y[1]["startFace"]))) 1188 1189 temp2=[] 1190 1191 for b in temp: 1192 temp2.append(b[0]) 1193 temp2.append(b[1]) 1194 1195 generator=FoamFileGenerator(temp2,header=self.header) 1196 string+=str(generator) 1197 1198 return string
1199
1200 -class ParsedFileHeader(ParsedParameterFile):
1201 """Only parse the header of a file""" 1202
1203 - def __init__(self,name):
1204 ParsedParameterFile.__init__(self,name,backup=False,noBody=True)
1205
1206 - def __getitem__(self,name):
1207 return self.header[name]
1208
1209 - def __contains__(self,name):
1210 return name in self.header
1211
1212 - def __len__(self):
1213 return len(self.header)
1214 1215 # Should work with Python3 and Python2 1216