Package PyFoam :: Package RunDictionary :: Module ParsedParameterFile
[hide private]
[frames] | no frames]

Source Code for Module PyFoam.RunDictionary.ParsedParameterFile

   1  #  ICE Revision: $Id$ 
   2  """Parameter file is read into memory and modified there""" 
   3   
   4  from PyFoam.RunDictionary.FileBasis import FileBasisBackup 
   5  from PyFoam.Basics.PlyParser import PlyParser 
   6  from PyFoam.Basics.FoamFileGenerator import FoamFileGenerator 
   7   
   8  from PyFoam.Basics.DataStructures import Vector,Field,Dimension,DictProxy,TupleProxy,Tensor,SymmTensor,Unparsed,UnparsedList,Codestream,DictRedirection,BinaryBlob,BinaryList,BoolProxy 
   9   
  10  from PyFoam.Error import error,warning,FatalErrorPyFoamException 
  11   
  12  from os import path 
  13  from copy import deepcopy 
  14  import sys 
  15   
  16  from PyFoam.ThirdParty.six import print_,integer_types,iteritems 
  17   
18 -class ParsedParameterFile(FileBasisBackup):
19 """ Parameterfile whose complete representation is read into 20 memory, can be manipulated and afterwards written to disk""" 21
22 - def __init__(self, 23 name, 24 backup=False, 25 debug=False, 26 boundaryDict=False, 27 listDict=False, 28 listDictWithHeader=False, 29 listLengthUnparsed=None, 30 preserveComments=True, 31 noHeader=False, 32 binaryMode=False, 33 treatBinaryAsASCII=False, 34 noBody=False, 35 doMacroExpansion=False, 36 dontRead=False, 37 noVectorOrTensor=False, 38 dictStack=None, 39 createZipped=True, 40 longListOutputThreshold=20):
41 """@param name: The name of the parameter file 42 @param backup: create a backup-copy of the file 43 @param boundaryDict: the file to parse is a boundary file 44 @param listDict: the file only contains a list 45 @param listDictWithHeader: the file only contains a list and a header 46 @param listLengthUnparsed: Lists longer than that length are not parsed 47 @param binaryMode: Parse long lists in binary mode (to be overridden by 48 the settings in the header). 49 @param treatBinaryAsASCII: even if the header says that this is a 50 binary file treat it like an ASCII-file 51 @param noHeader: don't expect a header 52 @param noBody: don't read the body of the file (only the header) 53 @param doMacroExpansion: expand #include and $var 54 @param noVectorOrTensor: short lists of length 3, 6 an 9 are NOT 55 interpreted as vectors or tensors 56 @param dontRead: Do not read the file during construction 57 @param longListOutputThreshold: Lists that are longer than this are 58 prefixed with a length 59 @param dictStack: dictionary stack for lookup (only used for include) 60 """ 61 62 self.noHeader=noHeader 63 self.noBody=noBody 64 FileBasisBackup.__init__(self, 65 name, 66 backup=backup, 67 createZipped=createZipped) 68 self.debug=debug 69 self.boundaryDict=boundaryDict 70 self.listDict=listDict 71 self.listDictWithHeader=listDictWithHeader 72 self.listLengthUnparsed=listLengthUnparsed 73 self.doMacros=doMacroExpansion 74 self.preserveComments=preserveComments 75 self.noVectorOrTensor=noVectorOrTensor 76 self.header=None 77 self.content=None 78 self.longListOutputThreshold=longListOutputThreshold 79 self.binaryMode=binaryMode 80 self.treatBinaryAsASCII=treatBinaryAsASCII 81 self.lastDecoration="" 82 self.dictStack=dictStack 83 84 if not dontRead: 85 self.readFile()
86
87 - def parse(self,content):
88 """Constructs a representation of the file""" 89 try: 90 parser=FoamFileParser(content, 91 debug=self.debug, 92 fName=self.name, 93 boundaryDict=self.boundaryDict, 94 listDict=self.listDict, 95 listDictWithHeader=self.listDictWithHeader, 96 listLengthUnparsed=self.listLengthUnparsed, 97 noHeader=self.noHeader, 98 noBody=self.noBody, 99 preserveComments=self.preserveComments, 100 binaryMode=self.binaryMode, 101 treatBinaryAsASCII=self.treatBinaryAsASCII, 102 noVectorOrTensor=self.noVectorOrTensor, 103 dictStack=self.dictStack, 104 doMacroExpansion=self.doMacros) 105 except BinaryParserError: 106 e = sys.exc_info()[1] # Needed because python 2.5 does not support 'as e' 107 if not self.treatBinaryAsASCII: 108 # Retrying in ASCII-mode although the file thinks it is binary 109 parser=FoamFileParser(content, 110 debug=self.debug, 111 fName=self.name, 112 boundaryDict=self.boundaryDict, 113 listDict=self.listDict, 114 listDictWithHeader=self.listDictWithHeader, 115 listLengthUnparsed=self.listLengthUnparsed, 116 noHeader=self.noHeader, 117 noBody=self.noBody, 118 preserveComments=self.preserveComments, 119 binaryMode=self.binaryMode, 120 treatBinaryAsASCII=True, 121 noVectorOrTensor=self.noVectorOrTensor, 122 dictStack=self.dictStack, 123 doMacroExpansion=self.doMacros) 124 else: 125 raise e 126 127 self.content=parser.getData() 128 self.header=parser.getHeader() 129 self.lastDecoration=parser._decorationBuffer 130 131 return self.content
132
133 - def __contains__(self,key):
134 return key in self.content
135
136 - def __getitem__(self,key):
137 return self.content[key]
138
139 - def __setitem__(self,key,value):
140 self.content[key]=value
141
142 - def __delitem__(self,key):
143 del self.content[key]
144
145 - def __len__(self):
146 return len(self.content)
147
148 - def __iter__(self):
149 for key in self.content: 150 yield key
151
152 - def __str__(self):
153 """Generates a string from the contents in memory 154 Used to be called makeString""" 155 156 string="// -*- C++ -*-\n// File generated by PyFoam - sorry for the ugliness\n\n" 157 158 generator=FoamFileGenerator(self.content, 159 header=self.header if not self.noHeader else None, 160 longListThreshold=self.longListOutputThreshold) 161 string+=generator.makeString(firstLevel=True) 162 163 if len(self.lastDecoration)>0: 164 string+="\n\n"+self.lastDecoration 165 166 return string
167
168 - def getValueDict(self):
169 """Get a dictionary with the values with the decorators removed""" 170 result={} 171 if self.content: 172 for k in self.content: 173 if type(k) not in integer_types: 174 result[k]=self.content[k] 175 return result
176
177 -class WriteParameterFile(ParsedParameterFile):
178 """A specialization that is used to only write to the file"""
179 - def __init__(self, 180 name, 181 backup=False, 182 className="dictionary", 183 objectName=None, 184 createZipped=False, 185 **kwargs):
186 ParsedParameterFile.__init__(self, 187 name, 188 backup=backup, 189 dontRead=True, 190 createZipped=createZipped, 191 **kwargs) 192 193 if objectName==None: 194 objectName=path.basename(name) 195 196 self.content=DictProxy() 197 self.header={"version":"2.0", 198 "format":"ascii", 199 "class":className, 200 "object":objectName}
201
202 -class Enumerate(object):
203 - def __init__(self, names):
204 for number, name in enumerate(names): 205 setattr(self, name, number)
206 207 inputModes=Enumerate(["merge","error","warn","protect","overwrite","default"]) 208
209 -class FoamFileParser(PlyParser):
210 """Class that parses a string that contains the contents of an 211 OpenFOAM-file and builds a nested structure of directories and 212 lists from it""" 213
214 - def __init__(self, 215 content, 216 fName=None, 217 debug=False, 218 noHeader=False, 219 noBody=False, 220 doMacroExpansion=False, 221 boundaryDict=False, 222 preserveComments=True, 223 preserveNewlines=True, 224 listDict=False, 225 listDictWithHeader=False, 226 listLengthUnparsed=None, 227 binaryMode=False, 228 treatBinaryAsASCII=False, 229 duplicateCheck=False, 230 noVectorOrTensor=False, 231 dictStack=None, 232 duplicateFail=True):
233 """@param content: the string to be parsed 234 @param fName: Name of the actual file (if any) 235 @param debug: output debug information during parsing 236 @param noHeader: switch that turns off the parsing of the header 237 @param duplicateCheck: Check for duplicates in dictionaries 238 @param duplicateFail: Fail if a duplicate is discovered""" 239 240 self.binaryMode=binaryMode 241 self.treatBinaryAsASCII=treatBinaryAsASCII 242 self.fName=fName 243 self.data=None 244 self.header=None 245 self.debug=debug 246 self.listLengthUnparsed=listLengthUnparsed 247 self.doMacros=doMacroExpansion 248 self.preserveComments=preserveComments 249 self.preserveNewLines=preserveNewlines 250 self.duplicateCheck=duplicateCheck 251 self.duplicateFail=duplicateFail 252 self.noVectorOrTensor=noVectorOrTensor 253 self.inHeader=True 254 self.inBinary=False 255 256 # Make sure that the first comment is discarded 257 self.collectDecorations=False 258 self.inputMode=inputModes.merge 259 260 self._decorationBuffer="" 261 262 startCnt=0 263 264 self.dictStack=dictStack 265 if self.dictStack==None: 266 self.dictStack=[DictProxy()] 267 268 if noBody: 269 self.start='noBody' 270 startCnt+=1 271 272 if noHeader: 273 self.inHeader=False 274 self.start='noHeader' 275 startCnt+=1 276 self.collectDecorations=True 277 278 if listDict: 279 self.inHeader=False 280 self.start='pureList' 281 startCnt+=1 282 self.dictStack=[] 283 self.collectDecorations=True 284 285 if listDictWithHeader: 286 self.start='pureListWithHeader' 287 startCnt+=1 288 289 if boundaryDict: 290 self.start='boundaryDict' 291 startCnt+=1 292 293 if startCnt>1: 294 error("Only one start symbol can be specified.",startCnt,"are specified") 295 296 PlyParser.__init__(self,debug=debug) 297 298 #sys.setrecursionlimit(50000) 299 #print sys.getrecursionlimit() 300 301 self.emptyCnt=0 302 303 self.header,self.data=self.parse(content)
304
305 - def __contains__(self,key):
306 return key in self.data
307
308 - def __getitem__(self,key):
309 return self.data[key]
310
311 - def __setitem__(self,key,value):
312 self.data[key]=value
313
314 - def __delitem__(self,key):
315 del self.data[key]
316
317 - def __iter__(self):
318 for key in self.data: 319 yield key
320 321 ## def __len__(self): 322 ## if self.data==None: 323 ## return 0 324 ## else: 325 ## return len(self.data) 326
327 - def resetDecoration(self):
328 self._decorationBuffer=""
329
330 - def addToDecoration(self,text):
331 if self.collectDecorations: 332 self._decorationBuffer+=text
333
334 - def addCommentToDecorations(self,text):
335 if self.preserveComments: 336 self.addToDecoration(text)
337
338 - def addNewlinesToDecorations(self,text):
339 if self.preserveNewLines: 340 self.addToDecoration(text)
341
342 - def getDecoration(self):
343 tmp=self._decorationBuffer 344 self.resetDecoration() 345 if len(tmp)>0: 346 if tmp[-1]=='\n': 347 tmp=tmp[:-1] 348 return tmp
349
350 - def directory(self):
351 if self.fName==None: 352 return path.curdir 353 else: 354 return path.dirname(self.fName)
355
356 - def getData(self):
357 """ Get the data structure""" 358 return self.data
359
360 - def getHeader(self):
361 """ Get the OpenFOAM-header""" 362 return self.header
363
364 - def printContext(self,c,ind):
365 """Prints the context of the current index""" 366 print_("------") 367 print_(c[max(0,ind-100):max(0,ind-1)]) 368 print_("------") 369 print_(">",c[ind-1],"<") 370 print_("------") 371 print_(c[min(len(c),ind):min(len(c),ind+100)]) 372 print_("------")
373
374 - def parserError(self,text,c,ind):
375 """Prints the error message of the parser and exit""" 376 print_("PARSER ERROR:",text) 377 print_("On index",ind) 378 self.printContext(c,ind) 379 raise PyFoamParserError("Unspecified")
380
381 - def condenseAllPreFixLists(self,orig):
382 """Checks whether this list is a list that consists only of prefix-Lists""" 383 isAllPreList=False 384 if (len(orig) % 2)==0: 385 isAllPreList=True 386 for i in range(0,len(orig),2): 387 if type(orig[i])==int and (type(orig[i+1]) in [list,Vector,Tensor,SymmTensor]): 388 if len(orig[i+1])!=orig[i]: 389 isAllPreList=False 390 break 391 else: 392 isAllPreList=False 393 break 394 395 if isAllPreList: 396 return orig[1::2] 397 else: 398 return orig
399 400 tokens = ( 401 'NAME', 402 'ICONST', 403 'FCONST', 404 'SCONST', 405 'FOAMFILE', 406 'UNIFORM', 407 'NONUNIFORM', 408 'UNPARSEDCHUNK', 409 'CODESTREAMCHUNK', 410 'REACTION', 411 'SUBSTITUTION', 412 'MERGE', 413 'OVERWRITE', 414 'ERROR', 415 'WARN', 416 'PROTECT', 417 'DEFAULT', 418 'INCLUDE', 419 'INCLUDEIFPRESENT', 420 'REMOVE', 421 'INPUTMODE', 422 'KANALGITTER', 423 'CODESTART', 424 'CODEEND', 425 'BINARYBLOB', 426 ) 427 428 reserved = { 429 'FoamFile' : 'FOAMFILE', 430 'uniform' : 'UNIFORM', 431 'nonuniform' : 'NONUNIFORM', 432 'include' : 'INCLUDE', 433 'includeIfPresent': 'INCLUDEIFPRESENT', 434 'remove' : 'REMOVE', 435 'inputMode' : 'INPUTMODE', 436 'merge' : 'MERGE', 437 'overwrite' : 'OVERWRITE', 438 'error' : 'ERROR', 439 'warn' : 'WARN', 440 'protect' : 'PROTECT', 441 'default' : 'DEFAULT', 442 } 443 444 states = ( 445 ('unparsed', 'exclusive'), 446 ('codestream', 'exclusive'), 447 ('mlcomment', 'exclusive'), 448 ('binaryblob', 'exclusive'), 449 ) 450
451 - def t_unparsed_left(self,t):
452 r'\(' 453 t.lexer.level+=1
454 # print "left",t.lexer.level, 455
456 - def t_unparsed_right(self,t):
457 r'\)' 458 t.lexer.level-=1 459 # print "right",t.lexer.level, 460 if t.lexer.level < 0 : 461 t.value = t.lexer.lexdata[t.lexer.code_start:t.lexer.lexpos-1] 462 # print t.value 463 t.lexer.lexpos-=1 464 t.type = "UNPARSEDCHUNK" 465 t.lexer.lineno += t.value.count('\n') 466 t.lexer.begin('INITIAL') 467 return t
468 469 t_unparsed_ignore = ' \t\n0123456789.-+e' 470
471 - def t_unparsed_error(self,t):
472 print_("Error",t.lexer.lexdata[t.lexer.lexpos]) 473 t.lexer.skip(1)
474 475 t_binaryblob_ignore = '' 476
477 - def t_binaryblob_close(self,t):
478 r"\)" 479 size=t.lexer.lexpos-t.lexer.binary_start-1 480 # print size,ord(t.lexer.lexdata[t.lexer.lexpos-1]),ord(t.lexer.lexdata[t.lexer.lexpos]),ord(t.lexer.lexdata[t.lexer.lexpos+1]) 481 # print size,ord(t.lexer.lexdata[t.lexer.binary_start-1]),ord(t.lexer.lexdata[t.lexer.binary_start]) 482 # print size % (t.lexer.binary_listlen), len(t.lexer.lexdata) 483 if (size % t.lexer.binary_listlen)==0: 484 # length of blob is multiple of the listlength 485 nextChar=t.lexer.lexdata[t.lexer.lexpos] 486 nextNextChar=t.lexer.lexdata[t.lexer.lexpos+1] 487 if (nextChar in [';','\n'] and nextNextChar=='\n'): 488 t.value = t.lexer.lexdata[t.lexer.binary_start:t.lexer.lexpos-1] 489 assert(len(t.value)%t.lexer.binary_listlen == 0) 490 t.lexer.lexpos-=1 491 t.type = "BINARYBLOB" 492 t.lexer.lineno += t.value.count('\n') 493 t.lexer.begin('INITIAL') 494 self.inBinary=False 495 return t
496
497 - def t_binaryblob_throwaway(self,t):
498 r'[^\)]' 499 pass
500
501 - def t_binaryblob_error(self,t):
502 print_("Error",t.lexer.lexdata[t.lexer.lexpos]) 503 t.lexer.skip(1)
504
505 - def t_codestream_end(self,t):
506 r"\#\}" 507 t.value = t.lexer.lexdata[t.lexer.code_start:t.lexer.lexpos-2] 508 t.lexer.lexpos-=2 509 t.type = "CODESTREAMCHUNK" 510 t.lexer.lineno += t.value.count('\n') 511 t.lexer.begin('INITIAL') 512 return t
513 514 t_codestream_ignore = '' 515
516 - def t_codestream_throwaway(self,t):
517 r'[^#]' 518 pass
519
520 - def t_codestream_error(self,t):
521 print_("Error",t.lexer.lexdata[t.lexer.lexpos]) 522 t.lexer.skip(1)
523
524 - def t_NAME(self,t):
525 r'[a-zA-Z_][+\-<>(),.\*|a-zA-Z_0-9&%:]*' 526 t.type=self.reserved.get(t.value,'NAME') 527 if t.value[-1]==")": 528 if t.value.count(")")>t.value.count("("): 529 # Give back the last ) because it propably belongs to a list 530 t.value=t.value[:-1] 531 t.lexer.lexpos-=1 532 533 return t
534
535 - def t_SUBSTITUITION(self,t):
536 r'\$[a-zA-Z_.:{][+\-<>(),.\*|a-zA-Z_0-9&%:${}]*' 537 t.type=self.reserved.get(t.value,'SUBSTITUTION') 538 if t.value[-1]==")": 539 if t.value.count(")")>t.value.count("("): 540 # Give back the last ) because it propably belongs to a list 541 t.value=t.value[:-1] 542 t.lexer.lexpos-=1 543 544 return t
545 546 t_CODESTART = r'\#\{' 547 548 t_CODEEND = r'\#\}' 549 550 t_KANALGITTER = r'\#' 551 552 t_ICONST = r'(-|)\d+([uU]|[lL]|[uU][lL]|[lL][uU])?' 553 554 t_FCONST = r'(-|)((\d+)(\.\d*)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?' 555 556 t_SCONST = r'\"([^\\\n]|(\\.))*?\"' 557 558 literals = "(){};[]" 559 560 t_ignore=" \t\r" 561 562 # Define a rule so we can track line numbers
563 - def t_newline(self,t):
564 r'\n+' 565 t.lexer.lineno += len(t.value) 566 now=t.lexer.lexpos 567 next=t.lexer.lexdata.find('\n',now) 568 if next>=0: 569 line=t.lexer.lexdata[now:next] 570 pos=line.find("=") 571 572 if pos>=0 and not self.binaryMode: 573 if ((line.find("//")>=0 and line.find("//")<pos)) or (line.find("/*")>=0 and line.find("/*")<pos) or (line.find('"')>=0 and line.find('"')<pos): 574 return 575 t.value = line 576 t.type = "REACTION" 577 t.lexer.lineno += 1 578 t.lexer.lexpos = next 579 return t
580 # self.addNewlinesToDecorations(t.value) 581 582 # C++ comment (ignore)
583 - def t_ccode_comment(self,t):
584 r'//.*' 585 t.lexer.lineno += t.value.count('\n') 586 self.addCommentToDecorations(t.value)
587
588 - def t_startmlcomment(self,t):
589 r'/\*' 590 t.lexer.begin('mlcomment') 591 self.mllevel=1 592 self.mlcomment_start = t.lexer.lexpos-2
593
594 - def t_mlcomment_newlevel(self,t):
595 r'/\*' 596 self.mllevel+=1
597
598 - def t_mlcomment_endcomment(self,t):
599 r'\*/' 600 self.mllevel-=1 601 if self.mllevel<=0: 602 t.lexer.begin('INITIAL') 603 mlcomment=t.lexer.lexdata[self.mlcomment_start:t.lexer.lexpos] 604 t.lexer.lineno += mlcomment.count('\n') 605 self.addCommentToDecorations(mlcomment)
606
607 - def t_mlcomment_throwaway(self,t):
608 r'[^\*]' 609 pass
610 611 t_mlcomment_ignore = '' 612
613 - def t_mlcomment_error(self,t):
614 if t.lexer.lexdata[t.lexer.lexpos]!="*": 615 print_("Error",t.lexer.lexdata[t.lexer.lexpos]) 616 t.lexer.skip(1)
617 618 # Error handling rule
619 - def t_error(self,t):
620 msg="Illegal character '%s' in line %d (pos: %d)" % ( 621 t.value[0], 622 t.lexer.lineno, 623 t.lexer.lexpos) 624 raise PyFoamParserError(msg)
625 # t.lexer.skip(1) # the old days when illegal characters were accepted 626
627 - def p_global(self,p):
628 'global : header dictbody' 629 p[0] = ( p[1] , p[2] )
630
631 - def p_gotHeader(self,p):
632 'gotHeader :' 633 p.lexer.lexpos=len(p.lexer.lexdata) 634 self.inHeader=False
635
636 - def p_noBody(self,p):
637 ''' noBody : FOAMFILE '{' dictbody gotHeader '}' ''' 638 p[0] = ( p[3] , {} )
639
640 - def p_noHeader(self,p):
641 'noHeader : dictbody' 642 p[0] = ( None , p[1] )
643
644 - def p_pureList(self,p):
645 'pureList : onlyListOrPList' 646 p[0] = ( None , p[1] )
647
648 - def p_onlyListOrPList(self,p):
649 '''onlyListOrPList : list 650 | prelist ''' 651 p[0]=p[1]
652
653 - def p_pureListWithHeader(self,p):
654 '''pureListWithHeader : header onlyListOrPList''' 655 p[0] = ( p[1] , p[2] )
656
657 - def p_afterHeader(self,p):
658 'afterHeader :' 659 pass
660
661 - def p_boundaryDict(self,p):
662 '''boundaryDict : header list 663 | header prelist ''' 664 # p[0] = ( p[1] , dict(zip(p[2][::2],p[2][1::2])) ) 665 p[0] = ( p[1] , p[2] )
666
667 - def p_header(self,p):
668 'header : FOAMFILE dictionary' 669 self.inHeader=False 670 p[0] = p[2] 671 672 # if p[0]["format"]=="binary": 673 # if not self.treatBinaryAsASCII: 674 # self.binaryMode=True 675 # else: 676 # self.binaryMode=False 677 # elif p[0]["format"]=="ascii": 678 # self.binaryMode=False 679 # else: 680 # raise FatalErrorPyFoamException("Don't know how to parse file format",p[0]["format"]) 681 682 self.collectDecorations=True
683
684 - def p_macro(self,p):
685 '''macro : KANALGITTER include 686 | KANALGITTER inputMode 687 | KANALGITTER remove''' 688 p[0] = p[1]+p[2]+"\n" 689 if self.doMacros: 690 p[0]="// "+p[0]
691
692 - def p_include(self,p):
693 '''include : INCLUDE SCONST 694 | INCLUDEIFPRESENT SCONST''' 695 if self.doMacros: 696 fName=path.join(self.directory(),p[2][1:-1]) 697 read=True 698 if p[1]=="includeIfPresent" and not path.exists(fName): 699 read=False 700 if read and not path.exists(fName): 701 raise PyFoamParserError("The included file "+fName+" does not exist") 702 if read: 703 data=ParsedParameterFile(fName, 704 noHeader=True, 705 dictStack=self.dictStack, 706 doMacroExpansion=self.doMacros) 707 into=self.dictStack[-1] 708 for k in data: 709 into[k]=data[k] 710 711 p[0] = p[1] + " " + p[2]
712
713 - def p_inputMode(self,p):
714 '''inputMode : INPUTMODE ERROR 715 | INPUTMODE WARN 716 | INPUTMODE PROTECT 717 | INPUTMODE DEFAULT 718 | INPUTMODE MERGE 719 | INPUTMODE OVERWRITE''' 720 p[0] = p[1] + " " + p[2] 721 self.inputMode=getattr(inputModes,p[2])
722
723 - def p_remove(self,p):
724 '''remove : REMOVE word 725 | REMOVE wlist''' 726 p[0] = p[1] + " " 727 if type(p[2])==str: 728 p[0]+=p[2] 729 else: 730 p[0]+="( " 731 for w in p[2]: 732 p[0]+=w+" " 733 p[0]+=")"
734
735 - def p_integer(self,p):
736 '''integer : ICONST''' 737 p[0] = int(p[1])
738
739 - def p_float(self,p):
740 '''integer : FCONST''' 741 p[0] = float(p[1])
742
743 - def p_enter_dict(self,p):
744 '''enter_dict :''' 745 self.dictStack.append(DictProxy())
746
747 - def p_exit_dict(self,p):
748 '''exit_dict :''' 749 p[0]=self.dictStack.pop()
750
751 - def p_dictionary(self,p):
752 '''dictionary : '{' enter_dict dictbody '}' exit_dict 753 | '{' '}' ''' 754 if len(p)==6: 755 p[0] = p[5] 756 else: 757 p[0] = DictProxy()
758
759 - def p_dictbody(self,p):
760 '''dictbody : dictbody dictline 761 | dictline 762 | empty''' 763 764 if len(p)==3: 765 p[0]=p[1] 766 if self.duplicateCheck: 767 if p[2][0] in p[0]: 768 if self.duplicateFail: 769 error("Key",p[2][0],"already defined") 770 else: 771 warning("Key",p[2][0],"already defined") 772 if type(p[2][0])==DictRedirection and p[2][1]=='': 773 p[0].addRedirection(p[2][0]) 774 else: 775 if type(p[2][1])==DictRedirection: 776 p[0][p[2][0]]=p[2][1].getContent() 777 else: 778 p[0][p[2][0]]=p[2][1] 779 p[0].addDecoration(p[2][0],self.getDecoration()) 780 else: 781 p[0]=self.dictStack[-1] 782 783 if p[1]: 784 if type(p[1][0])==DictRedirection and p[1][1]=='': 785 p[0].addRedirection(p[1][0]) 786 else: 787 if type(p[1][1])==DictRedirection: 788 p[0][p[1][0]]=p[1][1].getContent() 789 else: 790 p[0][p[1][0]]=p[1][1]
791 792
793 - def p_list(self,p):
794 '''list : '(' itemlist ')' ''' 795 p[0] = self.condenseAllPreFixLists(p[2]) 796 if not self.noVectorOrTensor and ( 797 len(p[2])==3 or len(p[2])==9 or len(p[2])==6): 798 isVector=True 799 for i in p[2]: 800 try: 801 float(i) 802 except: 803 isVector=False 804 if isVector: 805 if len(p[2])==3: 806 p[0]=Vector(*p[2]) 807 elif len(p[2])==9: 808 p[0]=Tensor(*p[2]) 809 else: 810 p[0]=SymmTensor(*p[2])
811
812 - def p_wlist(self,p):
813 '''wlist : '(' wordlist ')' ''' 814 p[0] = p[2]
815
816 - def p_unparsed(self,p):
817 '''unparsed : UNPARSEDCHUNK''' 818 p[0] = Unparsed(p[1])
819
820 - def p_binaryblob(self,p):
821 '''binaryblob : BINARYBLOB''' 822 p[0] = BinaryBlob(p[1])
823
824 - def p_prelist_seen(self,p):
825 '''prelist_seen : ''' 826 if self.binaryMode: 827 p.lexer.begin('binaryblob') 828 p.lexer.binary_start = p.lexer.lexpos 829 p.lexer.binary_listlen = p[-1] 830 self.inBinary=True 831 elif self.listLengthUnparsed!=None: 832 if int(p[-1])>=self.listLengthUnparsed: 833 p.lexer.begin('unparsed') 834 p.lexer.level=0 835 p.lexer.code_start = p.lexer.lexpos
836
837 - def p_codestream(self,p):
838 '''codestream : codeSeen CODESTART CODESTREAMCHUNK CODEEND ''' 839 p[0] = Codestream(p[3])
840
841 - def p_codeSeen(self,p):
842 '''codeSeen : ''' 843 p.lexer.begin('codestream') 844 p.lexer.level=0 845 p.lexer.code_start = p.lexer.lexpos
846
847 - def p_prelist(self,p):
848 '''prelist : integer prelist_seen '(' itemlist ')' 849 | integer prelist_seen '(' binaryblob ')' 850 | integer prelist_seen '(' unparsed ')' ''' 851 if type(p[4])==Unparsed: 852 p[0] = UnparsedList(int(p[1]),p[4].data) 853 elif type(p[4])==BinaryBlob: 854 p[0] = BinaryList(int(p[1]),p[4].data) 855 else: 856 p[0] = self.condenseAllPreFixLists(p[4])
857
858 - def p_itemlist(self,p):
859 '''itemlist : itemlist item 860 | itemlist ';' 861 | item ''' 862 if len(p)==2: 863 if p[1]==None: 864 p[0]=[] 865 else: 866 p[0]=[ p[1] ] 867 else: 868 p[0]=p[1] 869 if p[2]!=';': 870 p[0].append(p[2])
871
872 - def p_wordlist(self,p):
873 '''wordlist : wordlist word 874 | word ''' 875 if len(p)==2: 876 if p[1]==None: 877 p[0]=[] 878 else: 879 p[0]=[ p[1] ] 880 else: 881 p[0]=p[1] 882 p[0].append(p[2])
883
884 - def p_word(self,p):
885 '''word : NAME 886 | UNIFORM 887 | NONUNIFORM 888 | MERGE 889 | OVERWRITE 890 | DEFAULT 891 | WARN 892 | PROTECT 893 | ERROR''' 894 if p[1] in BoolProxy.TrueStrings+BoolProxy.FalseStrings: 895 p[0]=BoolProxy(textual=p[1]) 896 else: 897 p[0]=p[1]
898
899 - def parseSubst_root(self,nm,stck):
900 if nm[0]==":": 901 stck=[self.dictStack[0]] 902 nm=nm[1:] 903 elif nm[0]=='.': 904 nm=nm[1:] 905 off=0 906 while nm[0]=='.': 907 nm=nm[1:] 908 off+=1 909 if off>0: 910 stck=stck[:-off] 911 elif nm[0]=="{": 912 inner=nm[1:nm.rfind("}")].strip() 913 if inner[0]=="$": 914 nm=self.parseSubst_root(inner[1:],stck)() 915 else: 916 nm=inner 917 rest=None 918 if nm.find(".")>0: 919 rest=nm[nm.find(".")+1:] 920 nm=nm[:nm.find(".")] 921 for i,di in enumerate(reversed(stck)): 922 if nm in di: 923 if rest==None: 924 v=DictRedirection(deepcopy(di[nm]), 925 di[nm], 926 nm) 927 return v 928 else: 929 newStck=stck[:i] 930 newStck.append(di[nm]) 931 return self.parseSubst_root(rest,newStck)
932
933 - def p_substitution(self,p):
934 '''substitution : SUBSTITUTION''' 935 if self.doMacros: 936 nm=p[1][1:] 937 p[0]="<Symbol '"+nm+"' not found>" 938 stck=self.dictStack 939 p[0]=self.parseSubst_root(nm,stck) 940 else: 941 p[0]=p[1]
942
943 - def p_dictkey(self,p):
944 '''dictkey : word 945 | SCONST''' 946 if type(p[1])==BoolProxy: 947 p[0]=str(p[1]) 948 else: 949 p[0]=p[1]
950
951 - def p_dictline(self,p):
952 '''dictline : dictkey dictitem ';' 953 | dictkey list ';' 954 | dictkey prelist ';' 955 | dictkey fieldvalue ';' 956 | macro 957 | substitution ';' 958 | dictkey codestream ';' 959 | dictkey dictionary''' 960 if len(p)==4 and self.inHeader and p[1]=="format" and type(p[2])==str: 961 if p[2]=="binary": 962 if not self.treatBinaryAsASCII: 963 self.binaryMode=True 964 else: 965 self.binaryMode=False 966 elif p[2]=="ascii": 967 self.binaryMode=False 968 else: 969 raise FatalErrorPyFoamException("Don't know how to parse file format",p[0]["format"]) 970 971 if len(p)==4 and type(p[2])==list: 972 # remove the prefix from long lists (if present) 973 doAgain=True 974 tmp=p[2] 975 while doAgain: 976 doAgain=False 977 for i in range(len(tmp)-1): 978 if type(tmp[i])==int and type(tmp[i+1]) in [list]: 979 if tmp[i]==len(tmp[i+1]): 980 nix=tmp[:i]+tmp[i+1:] 981 for i in range(len(tmp)): 982 tmp.pop() 983 tmp.extend(nix) 984 doAgain=True 985 break 986 if len(p)==4: 987 p[0] = ( p[1] , p[2] ) 988 elif len(p)==3: 989 if p[2]==';': 990 p[0]= (p[1],'') 991 else: 992 p[0] = ( p[1] , p[2] ) 993 else: 994 p[0] = ( self.emptyCnt , p[1] ) 995 self.emptyCnt+=1
996
997 - def p_number(self,p):
998 '''number : integer 999 | FCONST''' 1000 p[0] = p[1]
1001
1002 - def p_dimension(self,p):
1003 '''dimension : '[' number number number number number number number ']' 1004 | '[' number number number number number ']' ''' 1005 result=p[2:-1] 1006 if len(result)==5: 1007 result+=[0,0] 1008 1009 p[0]=Dimension(*result)
1010
1011 - def p_vector(self,p):
1012 '''vector : '(' number number number ')' ''' 1013 if self.noVectorOrTensor: 1014 p[0]=p[2:5] 1015 else: 1016 p[0]=Vector(*p[2:5])
1017
1018 - def p_tensor(self,p):
1019 '''tensor : '(' number number number number number number number number number ')' ''' 1020 if self.noVectorOrTensor: 1021 p[0]=p[2:11] 1022 else: 1023 p[0]=Tensor(*p[2:11])
1024
1025 - def p_symmtensor(self,p):
1026 '''symmtensor : '(' number number number number number number ')' ''' 1027 if self.noVectorOrTensor: 1028 p[0]=p[2:8] 1029 else: 1030 p[0]=SymmTensor(*p[2:8])
1031
1032 - def p_fieldvalue_uniform(self,p):
1033 '''fieldvalue : UNIFORM number 1034 | UNIFORM vector 1035 | UNIFORM tensor 1036 | UNIFORM symmtensor''' 1037 p[0] = Field(p[2])
1038
1039 - def p_fieldvalue_nonuniform(self,p):
1040 '''fieldvalue : NONUNIFORM NAME list 1041 | NONUNIFORM prelist 1042 | NONUNIFORM NAME prelist''' 1043 if len(p)==4: 1044 p[0] = Field(p[3],name=p[2]) 1045 else: 1046 p[0] = Field(p[2])
1047
1048 - def p_dictitem(self,p):
1049 '''dictitem : longitem 1050 | pitem''' 1051 if type(p[1])==tuple: 1052 if len(p[1])==2 and p[1][0]=="uniform": 1053 p[0]=Field(p[1][1]) 1054 elif len(p[1])==3 and p[1][0]=="nonuniform": 1055 p[0]=Field(p[1][2],name=p[1][1]) 1056 else: 1057 p[0]=TupleProxy(p[1]) 1058 else: 1059 p[0] = p[1]
1060
1061 - def p_longitem(self,p):
1062 '''longitem : pitemlist pitem''' 1063 p[0] = p[1]+(p[2],)
1064
1065 - def p_pitemlist(self,p):
1066 '''pitemlist : pitemlist pitem 1067 | pitem ''' 1068 if len(p)==2: 1069 p[0]=(p[1],) 1070 else: 1071 ## print type(p[1][-1]) 1072 ## if type(p[1][-1])==int and type(p[2])==tuple: 1073 ## print "Hepp",p[2] 1074 p[0]=p[1]+(p[2],)
1075
1076 - def p_pitem(self,p):
1077 '''pitem : word 1078 | SCONST 1079 | number 1080 | dictionary 1081 | list 1082 | dimension 1083 | substitution 1084 | empty''' 1085 p[0] = p[1]
1086
1087 - def p_item(self,p):
1088 '''item : pitem 1089 | REACTION 1090 | list 1091 | dictionary''' 1092 p[0] = p[1]
1093
1094 - def p_empty(self,p):
1095 'empty :' 1096 pass
1097
1098 - def p_error(self,p):
1099 if self.inBinary: 1100 raise BinaryParserError("Problem reading binary", p) # .type, p.lineno 1101 else: 1102 raise PyFoamParserError("Syntax error in file %s at token" % self.fName, p) # .type, p.lineno
1103 # Just discard the token and tell the parser it's okay. 1104 # self.yacc.errok() 1105
1106 -class PyFoamParserError(FatalErrorPyFoamException):
1107 - def __init__(self,descr,data=None):
1108 FatalErrorPyFoamException.__init__(self,"Parser Error:",descr) 1109 self.descr=descr 1110 self.data=data
1111
1112 - def __str__(self):
1113 result="Error in PyFoamParser: '"+self.descr+"'" 1114 if self.data!=None: 1115 val=self.data.value 1116 if len(val)>100: 1117 val=val[:40]+" .... "+val[-40:] 1118 1119 result+=" @ %r (Type: %s ) in line %d at position %d" % (val, 1120 self.data.type, 1121 self.data.lineno, 1122 self.data.lexpos) 1123 else: 1124 result+=" NONE" 1125 1126 return result
1127
1128 - def __repr__(self):
1129 return str(self)
1130
1131 -class BinaryParserError(PyFoamParserError):
1132 - def __init__(self,descr,data=None):
1133 PyFoamParserError.__init__(self,descr,data)
1134
1135 -class FoamStringParser(FoamFileParser):
1136 """Convenience class that parses only a headerless OpenFOAM dictionary""" 1137
1138 - def __init__(self, 1139 content, 1140 debug=False, 1141 noVectorOrTensor=False, 1142 duplicateCheck=False, 1143 listDict=False, 1144 doMacroExpansion=False, 1145 duplicateFail=False):
1146 """@param content: the string to be parsed 1147 @param debug: output debug information during parsing""" 1148 1149 FoamFileParser.__init__(self, 1150 content, 1151 debug=debug, 1152 noHeader=not listDict, 1153 boundaryDict=False, 1154 listDict=listDict, 1155 noVectorOrTensor=noVectorOrTensor, 1156 duplicateCheck=duplicateCheck, 1157 doMacroExpansion=doMacroExpansion, 1158 duplicateFail=duplicateFail)
1159
1160 - def __str__(self):
1161 return str(FoamFileGenerator(self.data))
1162
1163 -class ParsedBoundaryDict(ParsedParameterFile):
1164 """Convenience class that parses only a OpenFOAM polyMesh-boundaries file""" 1165
1166 - def __init__(self, 1167 name, 1168 treatBinaryAsASCII=False, 1169 backup=False, 1170 debug=False):
1171 """@param name: The name of the parameter file 1172 @param backup: create a backup-copy of the file""" 1173 1174 ParsedParameterFile.__init__(self, 1175 name, 1176 backup=backup, 1177 treatBinaryAsASCII=treatBinaryAsASCII, 1178 debug=debug, 1179 boundaryDict=True)
1180
1181 - def parse(self,content):
1182 """Constructs a representation of the file""" 1183 temp=ParsedParameterFile.parse(self,content) 1184 self.content=DictProxy() 1185 for i in range(0,len(temp),2): 1186 self.content[temp[i]]=temp[i+1] 1187 return self.content
1188
1189 - def __str__(self):
1190 string="// File generated by PyFoam - sorry for the ugliness\n\n" 1191 temp=[] 1192 for k,v in iteritems(self.content): 1193 temp.append((k,v)) 1194 1195 temp.sort(key=lambda x:int(x[1]["startFace"])) 1196 1197 temp2=[] 1198 1199 for b in temp: 1200 temp2.append(b[0]) 1201 temp2.append(b[1]) 1202 1203 generator=FoamFileGenerator(temp2,header=self.header) 1204 string+=str(generator) 1205 1206 return string
1207
1208 -class ParsedFileHeader(ParsedParameterFile):
1209 """Only parse the header of a file""" 1210
1211 - def __init__(self,name):
1212 ParsedParameterFile.__init__(self,name,backup=False,noBody=True)
1213
1214 - def __getitem__(self,name):
1215 return self.header[name]
1216
1217 - def __contains__(self,name):
1218 return name in self.header
1219
1220 - def __len__(self):
1221 return len(self.header)
1222 1223 # Should work with Python3 and Python2 1224