Package PyFoam :: Package RunDictionary :: Module ParsedParameterFile
[hide private]
[frames] | no frames]

Source Code for Module PyFoam.RunDictionary.ParsedParameterFile

   1  #  ICE Revision: $Id: /local/openfoam/Python/PyFoam/PyFoam/RunDictionary/ParsedParameterFile.py 7947 2012-03-29T21:47:16.093247Z bgschaid  $  
   2  """Parameter file is read into memory and modified there""" 
   3   
   4  from FileBasis import FileBasisBackup 
   5  from PyFoam.Basics.PlyParser import PlyParser 
   6  from PyFoam.Basics.FoamFileGenerator import FoamFileGenerator 
   7   
   8  from PyFoam.Basics.DataStructures import Vector,Field,Dimension,DictProxy,TupleProxy,Tensor,SymmTensor,Unparsed,UnparsedList,Codestream,DictRedirection 
   9   
  10  from PyFoam.Error import error,warning,FatalErrorPyFoamException 
  11   
  12  from os import path 
  13  from copy import deepcopy 
  14   
15 -class ParsedParameterFile(FileBasisBackup):
16 """ Parameterfile whose complete representation is read into 17 memory, can be manipulated and afterwards written to disk""" 18
19 - def __init__(self, 20 name, 21 backup=False, 22 debug=False, 23 boundaryDict=False, 24 listDict=False, 25 listDictWithHeader=False, 26 listLengthUnparsed=None, 27 preserveComments=True, 28 noHeader=False, 29 binaryMode=False, 30 noBody=False, 31 doMacroExpansion=False, 32 dontRead=False, 33 noVectorOrTensor=False, 34 createZipped=True, 35 longListOutputThreshold=20):
36 """@param name: The name of the parameter file 37 @param backup: create a backup-copy of the file 38 @param boundaryDict: the file to parse is a boundary file 39 @param listDict: the file only contains a list 40 @param listDictWithHeader: the file only contains a list and a header 41 @param listLengthUnparsed: Lists longer than that length are not parsed 42 @param binaryMode: Parse long lists in binary mode (to be overridden by 43 the settings in the header 44 @param noHeader: don't expect a header 45 @param noBody: don't read the body of the file (only the header) 46 @param doMacroExpansion: expand #include and $var 47 @param noVectorOrTensor: short lists of length 3, 6 an 9 are NOT 48 interpreted as vectors or tensors 49 @param dontRead: Do not read the file during construction 50 @param longListOutputThreshold: Lists that are longer than this are 51 prefixed with a length 52 """ 53 54 self.noHeader=noHeader 55 self.noBody=noBody 56 FileBasisBackup.__init__(self, 57 name, 58 backup=backup, 59 createZipped=createZipped) 60 self.debug=debug 61 self.boundaryDict=boundaryDict 62 self.listDict=listDict 63 self.listDictWithHeader=listDictWithHeader 64 self.listLengthUnparsed=listLengthUnparsed 65 self.doMacros=doMacroExpansion 66 self.preserveComments=preserveComments 67 self.noVectorOrTensor=noVectorOrTensor 68 self.header=None 69 self.content=None 70 self.longListOutputThreshold=longListOutputThreshold 71 self.binaryMode=binaryMode 72 73 if not dontRead: 74 self.readFile()
75
76 - def parse(self,content):
77 """Constructs a representation of the file""" 78 parser=FoamFileParser(content, 79 debug=self.debug, 80 fName=self.name, 81 boundaryDict=self.boundaryDict, 82 listDict=self.listDict, 83 listDictWithHeader=self.listDictWithHeader, 84 listLengthUnparsed=self.listLengthUnparsed, 85 noHeader=self.noHeader, 86 noBody=self.noBody, 87 preserveComments=self.preserveComments, 88 binaryMode=self.binaryMode, 89 noVectorOrTensor=self.noVectorOrTensor, 90 doMacroExpansion=self.doMacros) 91 92 self.content=parser.getData() 93 self.header=parser.getHeader() 94 return self.content
95
96 - def __contains__(self,key):
97 return key in self.content
98
99 - def __getitem__(self,key):
100 return self.content[key]
101
102 - def __setitem__(self,key,value):
103 self.content[key]=value
104
105 - def __delitem__(self,key):
106 del self.content[key]
107
108 - def __len__(self):
109 return len(self.content)
110
111 - def __iter__(self):
112 for key in self.content: 113 yield key
114
115 - def __str__(self):
116 """Generates a string from the contents in memory 117 Used to be called makeString""" 118 119 string="// -*- C++ -*-\n// File generated by PyFoam - sorry for the ugliness\n\n" 120 121 generator=FoamFileGenerator(self.content, 122 header=self.header, 123 longListThreshold=self.longListOutputThreshold) 124 string+=generator.makeString(firstLevel=True) 125 126 return string
127
128 -class WriteParameterFile(ParsedParameterFile):
129 """A specialization that is used to only write to the file"""
130 - def __init__(self, 131 name, 132 backup=False, 133 className="dictionary", 134 objectName=None, 135 createZipped=False):
136 ParsedParameterFile.__init__(self, 137 name, 138 backup=backup, 139 dontRead=True, 140 createZipped=createZipped) 141 142 if objectName==None: 143 objectName=path.basename(name) 144 145 self.content={} 146 self.header={"version":"2.0", 147 "format":"ascii", 148 "class":className, 149 "object":objectName}
150
151 -class Enumerate(object):
152 - def __init__(self, names):
153 for number, name in enumerate(names): 154 setattr(self, name, number)
155 156 inputModes=Enumerate(["merge","error","warn","protect","overwrite","default"]) 157
158 -class FoamFileParser(PlyParser):
159 """Class that parses a string that contains the contents of an 160 OpenFOAM-file and builds a nested structure of directories and 161 lists from it""" 162
163 - def __init__(self, 164 content, 165 fName=None, 166 debug=False, 167 noHeader=False, 168 noBody=False, 169 doMacroExpansion=False, 170 boundaryDict=False, 171 preserveComments=True, 172 preserveNewlines=True, 173 listDict=False, 174 listDictWithHeader=False, 175 listLengthUnparsed=None, 176 binaryMode=False, 177 duplicateCheck=False, 178 noVectorOrTensor=False, 179 duplicateFail=True):
180 """@param content: the string to be parsed 181 @param fName: Name of the actual file (if any) 182 @param debug: output debug information during parsing 183 @param noHeader: switch that turns off the parsing of the header 184 @param duplicateCheck: Check for duplicates in dictionaries 185 @param duplicateFail: Fail if a duplicate is discovered""" 186 187 self.binaryMode=binaryMode 188 self.fName=fName 189 self.data=None 190 self.header=None 191 self.debug=debug 192 self.listLengthUnparsed=listLengthUnparsed 193 self.doMacros=doMacroExpansion 194 self.preserveComments=preserveComments 195 self.preserveNewLines=preserveNewlines 196 self.duplicateCheck=duplicateCheck 197 self.duplicateFail=duplicateFail 198 self.noVectorOrTensor=noVectorOrTensor 199 200 # Make sure that the first comment is discarded 201 self.collectDecorations=False 202 self.inputMode=inputModes.merge 203 204 self._decorationBuffer="" 205 206 startCnt=0 207 208 self.dictStack=[DictProxy()] 209 210 if noBody: 211 self.start='noBody' 212 startCnt+=1 213 214 if noHeader: 215 self.start='noHeader' 216 startCnt+=1 217 self.collectDecorations=True 218 219 if listDict: 220 self.start='pureList' 221 startCnt+=1 222 self.dictStack=[] 223 self.collectDecorations=True 224 225 if listDictWithHeader: 226 self.start='pureListWithHeader' 227 startCnt+=1 228 229 if boundaryDict: 230 self.start='boundaryDict' 231 startCnt+=1 232 233 if startCnt>1: 234 error("Only one start symbol can be specified.",startCnt,"are specified") 235 236 PlyParser.__init__(self,debug=debug) 237 238 #sys.setrecursionlimit(50000) 239 #print sys.getrecursionlimit() 240 241 self.emptyCnt=0 242 243 self.header,self.data=self.parse(content)
244
245 - def __contains__(self,key):
246 return key in self.data
247
248 - def __getitem__(self,key):
249 return self.data[key]
250
251 - def __setitem__(self,key,value):
252 self.data[key]=value
253
254 - def __delitem__(self,key):
255 del self.data[key]
256
257 - def __iter__(self):
258 for key in self.data: 259 yield key
260 261 ## def __len__(self): 262 ## if self.data==None: 263 ## return 0 264 ## else: 265 ## return len(self.data) 266
267 - def resetDecoration(self):
268 self._decorationBuffer=""
269
270 - def addToDecoration(self,text):
271 if self.collectDecorations: 272 self._decorationBuffer+=text
273
274 - def addCommentToDecorations(self,text):
275 if self.preserveComments: 276 self.addToDecoration(text)
277
278 - def addNewlinesToDecorations(self,text):
279 if self.preserveNewLines: 280 self.addToDecoration(text)
281
282 - def getDecoration(self):
283 tmp=self._decorationBuffer 284 self.resetDecoration() 285 if len(tmp)>0: 286 if tmp[-1]=='\n': 287 tmp=tmp[:-1] 288 return tmp
289
290 - def directory(self):
291 if self.fName==None: 292 return path.curdir 293 else: 294 return path.dirname(self.fName)
295
296 - def getData(self):
297 """ Get the data structure""" 298 return self.data
299
300 - def getHeader(self):
301 """ Get the OpenFOAM-header""" 302 return self.header
303
304 - def printContext(self,c,ind):
305 """Prints the context of the current index""" 306 print "------" 307 print c[max(0,ind-100):max(0,ind-1)] 308 print "------" 309 print ">",c[ind-1],"<" 310 print "------" 311 print c[min(len(c),ind):min(len(c),ind+100)] 312 print "------"
313
314 - def parserError(self,text,c,ind):
315 """Prints the error message of the parser and exit""" 316 print "PARSER ERROR:",text 317 print "On index",ind 318 self.printContext(c,ind) 319 raise PyFoamParserError("Unspecified")
320
321 - def condenseAllPreFixLists(self,orig):
322 """Checks whether this list is a list that consists only of prefix-Lists""" 323 isAllPreList=False 324 if (len(orig) % 2)==0: 325 isAllPreList=True 326 for i in range(0,len(orig),2): 327 if type(orig[i])==int and (type(orig[i+1]) in [list,Vector,Tensor,SymmTensor]): 328 if len(orig[i+1])!=orig[i]: 329 isAllPreList=False 330 break 331 else: 332 isAllPreList=False 333 break 334 335 if isAllPreList: 336 return orig[1::2] 337 else: 338 return orig
339 340 tokens = ( 341 'NAME', 342 'ICONST', 343 'FCONST', 344 'SCONST', 345 'FOAMFILE', 346 'UNIFORM', 347 'NONUNIFORM', 348 'UNPARSEDCHUNK', 349 'CODESTREAMCHUNK', 350 'REACTION', 351 'SUBSTITUTION', 352 'MERGE', 353 'OVERWRITE', 354 'ERROR', 355 'WARN', 356 'PROTECT', 357 'DEFAULT', 358 'INCLUDE', 359 'INCLUDEIFPRESENT', 360 'REMOVE', 361 'INPUTMODE', 362 'KANALGITTER', 363 'CODESTART', 364 'CODEEND', 365 ) 366 367 reserved = { 368 'FoamFile' : 'FOAMFILE', 369 'uniform' : 'UNIFORM', 370 'nonuniform' : 'NONUNIFORM', 371 'include' : 'INCLUDE', 372 'includeIfPresent': 'INCLUDEIFPRESENT', 373 'remove' : 'REMOVE', 374 'inputMode' : 'INPUTMODE', 375 'merge' : 'MERGE', 376 'overwrite' : 'OVERWRITE', 377 'error' : 'ERROR', 378 'warn' : 'WARN', 379 'protect' : 'PROTECT', 380 'default' : 'DEFAULT', 381 } 382 383 states = ( 384 ('unparsed', 'exclusive'), 385 ('codestream', 'exclusive'), 386 ('mlcomment', 'exclusive'), 387 ) 388
389 - def t_unparsed_left(self,t):
390 r'\(' 391 t.lexer.level+=1
392 # print "left",t.lexer.level, 393
394 - def t_unparsed_right(self,t):
395 r'\)' 396 t.lexer.level-=1 397 # print "right",t.lexer.level, 398 if t.lexer.level < 0 : 399 t.value = t.lexer.lexdata[t.lexer.code_start:t.lexer.lexpos-1] 400 # print t.value 401 t.lexer.lexpos-=1 402 t.type = "UNPARSEDCHUNK" 403 t.lexer.lineno += t.value.count('\n') 404 t.lexer.begin('INITIAL') 405 return t
406 407 t_unparsed_ignore = ' \t\n0123456789.-+e' 408
409 - def t_unparsed_error(self,t):
410 print "Error",t.lexer.lexdata[t.lexer.lexpos] 411 t.lexer.skip(1)
412
413 - def t_codestream_end(self,t):
414 r"\#\}" 415 t.value = t.lexer.lexdata[t.lexer.code_start:t.lexer.lexpos-2] 416 t.lexer.lexpos-=2 417 t.type = "CODESTREAMCHUNK" 418 t.lexer.lineno += t.value.count('\n') 419 t.lexer.begin('INITIAL') 420 return t
421 422 t_codestream_ignore = '' 423
424 - def t_codestream_throwaway(self,t):
425 r'[^#]' 426 pass
427
428 - def t_codestream_error(self,t):
429 print "Error",t.lexer.lexdata[t.lexer.lexpos] 430 t.lexer.skip(1)
431
432 - def t_NAME(self,t):
433 r'[a-zA-Z_][+\-<>(),.\*|a-zA-Z_0-9&%:]*' 434 t.type=self.reserved.get(t.value,'NAME') 435 if t.value[-1]==")": 436 if t.value.count(")")>t.value.count("("): 437 # Give back the last ) because it propably belongs to a list 438 t.value=t.value[:-1] 439 t.lexer.lexpos-=1 440 441 return t
442
443 - def t_SUBSTITUITION(self,t):
444 r'\$[a-zA-Z_][+\-<>(),.\*|a-zA-Z_0-9&%:]*' 445 t.type=self.reserved.get(t.value,'SUBSTITUTION') 446 if t.value[-1]==")": 447 if t.value.count(")")>t.value.count("("): 448 # Give back the last ) because it propably belongs to a list 449 t.value=t.value[:-1] 450 t.lexer.lexpos-=1 451 452 return t
453 454 t_CODESTART = r'\#\{' 455 456 t_CODEEND = r'\#\}' 457 458 t_KANALGITTER = r'\#' 459 460 t_ICONST = r'(-|)\d+([uU]|[lL]|[uU][lL]|[lL][uU])?' 461 462 t_FCONST = r'(-|)((\d+)(\.\d*)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?' 463 464 t_SCONST = r'\"([^\\\n]|(\\.))*?\"' 465 466 literals = "(){};[]" 467 468 t_ignore=" \t\r" 469 470 # Define a rule so we can track line numbers
471 - def t_newline(self,t):
472 r'\n+' 473 t.lexer.lineno += len(t.value) 474 now=t.lexer.lexpos 475 next=t.lexer.lexdata.find('\n',now) 476 if next>=0: 477 line=t.lexer.lexdata[now:next] 478 pos=line.find("=") 479 if pos>=0: 480 if ((line.find("//")>=0 and line.find("//")<pos)) or (line.find("/*")>=0 and line.find("/*")<pos) or (line.find('"')>=0 and line.find('"')<pos): 481 return 482 t.value = line 483 t.type = "REACTION" 484 t.lexer.lineno += 1 485 t.lexer.lexpos = next 486 return t
487 # self.addNewlinesToDecorations(t.value) 488 489 # C++ comment (ignore)
490 - def t_ccode_comment(self,t):
491 r'//.*' 492 t.lexer.lineno += t.value.count('\n') 493 self.addCommentToDecorations(t.value)
494
495 - def t_startmlcomment(self,t):
496 r'/\*' 497 t.lexer.begin('mlcomment') 498 self.mllevel=1 499 self.mlcomment_start = t.lexer.lexpos-2
500
501 - def t_mlcomment_newlevel(self,t):
502 r'/\*' 503 self.mllevel+=1
504
505 - def t_mlcomment_endcomment(self,t):
506 r'\*/' 507 self.mllevel-=1 508 if self.mllevel<=0: 509 t.lexer.begin('INITIAL') 510 mlcomment=t.lexer.lexdata[self.mlcomment_start:t.lexer.lexpos] 511 t.lexer.lineno += mlcomment.count('\n') 512 self.addCommentToDecorations(mlcomment)
513
514 - def t_mlcomment_throwaway(self,t):
515 r'[^\*]' 516 pass
517 518 t_mlcomment_ignore = '' 519
520 - def t_mlcomment_error(self,t):
521 if t.lexer.lexdata[t.lexer.lexpos]!="*": 522 print "Error",t.lexer.lexdata[t.lexer.lexpos] 523 t.lexer.skip(1)
524 525 # Error handling rule
526 - def t_error(self,t):
527 raise PyFoamParserError("Illegal character '%s'" % t.value[0])
528 # t.lexer.skip(1) # the old days when illegal characters were accepted 529
530 - def p_global(self,p):
531 'global : header dictbody' 532 p[0] = ( p[1] , p[2] )
533
534 - def p_gotHeader(self,p):
535 'gotHeader :' 536 p.lexer.lexpos=len(p.lexer.lexdata)
537
538 - def p_noBody(self,p):
539 ''' noBody : FOAMFILE '{' dictbody gotHeader '}' ''' 540 p[0] = ( p[3] , {} )
541
542 - def p_noHeader(self,p):
543 'noHeader : dictbody' 544 p[0] = ( None , p[1] )
545
546 - def p_pureList(self,p):
547 'pureList : list' 548 p[0] = ( None , p[1] )
549
550 - def p_pureListWithHeader(self,p):
551 '''pureListWithHeader : header list 552 | header prelist ''' 553 p[0] = ( p[1] , p[2] )
554
555 - def p_boundaryDict(self,p):
556 '''boundaryDict : header list 557 | header prelist ''' 558 # p[0] = ( p[1] , dict(zip(p[2][::2],p[2][1::2])) ) 559 p[0] = ( p[1] , p[2] )
560
561 - def p_header(self,p):
562 'header : FOAMFILE dictionary' 563 p[0] = p[2] 564 if p[0]["format"]=="binary": 565 self.binaryMode=True 566 raise FatalErrorPyFoamException("Can not parse binary files. It is not implemented") 567 elif p[0]["format"]=="ascii": 568 self.binaryMode=False 569 else: 570 raise FatalErrorPyFoamException("Don't know how to parse file format",p[0]["format"]) 571 self.collectDecorations=True
572
573 - def p_macro(self,p):
574 '''macro : KANALGITTER include 575 | KANALGITTER inputMode 576 | KANALGITTER remove''' 577 p[0] = p[1]+p[2]+"\n" 578 if self.doMacros: 579 p[0]="// "+p[0]
580
581 - def p_include(self,p):
582 '''include : INCLUDE SCONST 583 | INCLUDEIFPRESENT SCONST''' 584 if self.doMacros: 585 fName=path.join(self.directory(),p[2][1:-1]) 586 read=True 587 if p[1]=="includeIfPresent" and not path.exists(fName): 588 read=False 589 if read and not path.exists(fName): 590 raise PyFoamParserError("The included file "+fName+" does not exist") 591 if read: 592 data=ParsedParameterFile(fName,noHeader=True) 593 into=self.dictStack[-1] 594 for k in data: 595 into[k]=data[k] 596 597 p[0] = p[1] + " " + p[2]
598
599 - def p_inputMode(self,p):
600 '''inputMode : INPUTMODE ERROR 601 | INPUTMODE WARN 602 | INPUTMODE PROTECT 603 | INPUTMODE DEFAULT 604 | INPUTMODE MERGE 605 | INPUTMODE OVERWRITE''' 606 p[0] = p[1] + " " + p[2] 607 self.inputMode=getattr(inputModes,p[2])
608
609 - def p_remove(self,p):
610 '''remove : REMOVE word 611 | REMOVE wlist''' 612 p[0] = p[1] + " " 613 if type(p[2])==str: 614 p[0]+=p[2] 615 else: 616 p[0]+="( " 617 for w in p[2]: 618 p[0]+=w+" " 619 p[0]+=")"
620
621 - def p_integer(self,p):
622 '''integer : ICONST''' 623 p[0] = int(p[1])
624
625 - def p_float(self,p):
626 '''integer : FCONST''' 627 p[0] = float(p[1])
628
629 - def p_enter_dict(self,p):
630 '''enter_dict :''' 631 self.dictStack.append(DictProxy())
632
633 - def p_exit_dict(self,p):
634 '''exit_dict :''' 635 p[0]=self.dictStack.pop()
636
637 - def p_dictionary(self,p):
638 '''dictionary : '{' enter_dict dictbody '}' exit_dict 639 | '{' '}' ''' 640 if len(p)==6: 641 p[0] = p[5] 642 else: 643 p[0] = DictProxy()
644
645 - def p_dictbody(self,p):
646 '''dictbody : dictbody dictline 647 | dictline 648 | empty''' 649 650 if len(p)==3: 651 p[0]=p[1] 652 if self.duplicateCheck: 653 if p[2][0] in p[0]: 654 if self.duplicateFail: 655 error("Key",p[2][0],"already defined") 656 else: 657 warning("Key",p[2][0],"already defined") 658 if type(p[2][0])==DictRedirection and p[2][1]=='': 659 p[0].addRedirection(p[2][0]) 660 else: 661 if type(p[2][1])==DictRedirection: 662 p[0][p[2][0]]=p[2][1].getContent() 663 else: 664 p[0][p[2][0]]=p[2][1] 665 p[0].addDecoration(p[2][0],self.getDecoration()) 666 else: 667 p[0]=self.dictStack[-1] 668 669 if p[1]: 670 if type(p[1][0])==DictRedirection and p[1][1]=='': 671 p[0].addRedirection(p[1][0]) 672 else: 673 if type(p[1][1])==DictRedirection: 674 p[0][p[1][0]]=p[1][1].getContent() 675 else: 676 p[0][p[1][0]]=p[1][1]
677 678
679 - def p_list(self,p):
680 '''list : '(' itemlist ')' ''' 681 p[0] = self.condenseAllPreFixLists(p[2]) 682 if not self.noVectorOrTensor and ( 683 len(p[2])==3 or len(p[2])==9 or len(p[2])==6): 684 isVector=True 685 for i in p[2]: 686 try: 687 float(i) 688 except: 689 isVector=False 690 if isVector: 691 if len(p[2])==3: 692 p[0]=apply(Vector,p[2]) 693 elif len(p[2])==9: 694 p[0]=apply(Tensor,p[2]) 695 else: 696 p[0]=apply(SymmTensor,p[2])
697
698 - def p_wlist(self,p):
699 '''wlist : '(' wordlist ')' ''' 700 p[0] = p[2]
701
702 - def p_unparsed(self,p):
703 '''unparsed : UNPARSEDCHUNK''' 704 p[0] = Unparsed(p[1])
705
706 - def p_prelist_seen(self,p):
707 '''prelist_seen : ''' 708 if self.listLengthUnparsed!=None: 709 if int(p[-1])>=self.listLengthUnparsed: 710 p.lexer.begin('unparsed') 711 p.lexer.level=0 712 p.lexer.code_start = p.lexer.lexpos
713
714 - def p_codestream(self,p):
715 '''codestream : codeSeen CODESTART CODESTREAMCHUNK CODEEND ''' 716 p[0] = Codestream(p[3])
717
718 - def p_codeSeen(self,p):
719 '''codeSeen : ''' 720 p.lexer.begin('codestream') 721 p.lexer.level=0 722 p.lexer.code_start = p.lexer.lexpos
723
724 - def p_prelist(self,p):
725 '''prelist : integer prelist_seen '(' itemlist ')' 726 | integer prelist_seen '(' unparsed ')' ''' 727 if type(p[4])==Unparsed: 728 p[0] = UnparsedList(int(p[1]),p[4].data) 729 else: 730 p[0] = self.condenseAllPreFixLists(p[4])
731
732 - def p_itemlist(self,p):
733 '''itemlist : itemlist item 734 | itemlist ';' 735 | item ''' 736 if len(p)==2: 737 if p[1]==None: 738 p[0]=[] 739 else: 740 p[0]=[ p[1] ] 741 else: 742 p[0]=p[1] 743 if p[2]!=';': 744 p[0].append(p[2])
745
746 - def p_wordlist(self,p):
747 '''wordlist : wordlist word 748 | word ''' 749 if len(p)==2: 750 if p[1]==None: 751 p[0]=[] 752 else: 753 p[0]=[ p[1] ] 754 else: 755 p[0]=p[1] 756 p[0].append(p[2])
757
758 - def p_word(self,p):
759 '''word : NAME 760 | UNIFORM 761 | NONUNIFORM 762 | MERGE 763 | OVERWRITE 764 | DEFAULT 765 | WARN 766 | PROTECT 767 | ERROR''' 768 p[0]=p[1]
769
770 - def p_substitution(self,p):
771 '''substitution : SUBSTITUTION''' 772 if self.doMacros: 773 nm=p[1][1:] 774 p[0]="<Symbol '"+nm+"' not found>" 775 for di in reversed(self.dictStack): 776 if nm in di: 777 p[0]=DictRedirection(deepcopy(di[nm]), 778 di[nm], 779 nm) 780 return 781 else: 782 p[0]=p[1]
783
784 - def p_dictkey(self,p):
785 '''dictkey : word 786 | SCONST''' 787 p[0]=p[1]
788
789 - def p_dictline(self,p):
790 '''dictline : dictkey dictitem ';' 791 | dictkey list ';' 792 | dictkey prelist ';' 793 | dictkey fieldvalue ';' 794 | macro 795 | substitution ';' 796 | dictkey codestream ';' 797 | dictkey dictionary''' 798 if len(p)==4 and type(p[2])==list: 799 # remove the prefix from long lists (if present) 800 doAgain=True 801 tmp=p[2] 802 while doAgain: 803 doAgain=False 804 for i in range(len(tmp)-1): 805 if type(tmp[i])==int and type(tmp[i+1]) in [list]: 806 if tmp[i]==len(tmp[i+1]): 807 nix=tmp[:i]+tmp[i+1:] 808 for i in range(len(tmp)): 809 tmp.pop() 810 tmp.extend(nix) 811 doAgain=True 812 break 813 if len(p)==4: 814 p[0] = ( p[1] , p[2] ) 815 elif len(p)==3: 816 if p[2]==';': 817 p[0]= (p[1],'') 818 else: 819 p[0] = ( p[1] , p[2] ) 820 else: 821 p[0] = ( self.emptyCnt , p[1] ) 822 self.emptyCnt+=1
823
824 - def p_number(self,p):
825 '''number : integer 826 | FCONST''' 827 p[0] = p[1]
828
829 - def p_dimension(self,p):
830 '''dimension : '[' number number number number number number number ']' 831 | '[' number number number number number ']' ''' 832 result=p[2:-1] 833 if len(result)==5: 834 result+=[0,0] 835 836 p[0]=apply(Dimension,result)
837
838 - def p_vector(self,p):
839 '''vector : '(' number number number ')' ''' 840 if self.noVectorOrTensor: 841 p[0]=p[2:5] 842 else: 843 p[0]=apply(Vector,p[2:5])
844
845 - def p_tensor(self,p):
846 '''tensor : '(' number number number number number number number number number ')' ''' 847 if self.noVectorOrTensor: 848 p[0]=p[2:11] 849 else: 850 p[0]=apply(Tensor,p[2:11])
851
852 - def p_symmtensor(self,p):
853 '''symmtensor : '(' number number number number number number ')' ''' 854 if self.noVectorOrTensor: 855 p[0]=p[2:8] 856 else: 857 p[0]=apply(SymmTensor,p[2:8])
858
859 - def p_fieldvalue_uniform(self,p):
860 '''fieldvalue : UNIFORM number 861 | UNIFORM vector 862 | UNIFORM tensor 863 | UNIFORM symmtensor''' 864 p[0] = Field(p[2])
865
866 - def p_fieldvalue_nonuniform(self,p):
867 '''fieldvalue : NONUNIFORM NAME list 868 | NONUNIFORM NAME prelist''' 869 p[0] = Field(p[3],name=p[2])
870
871 - def p_dictitem(self,p):
872 '''dictitem : longitem 873 | pitem''' 874 if type(p[1])==tuple: 875 if len(p[1])==2 and p[1][0]=="uniform": 876 p[0]=Field(p[1][1]) 877 elif len(p[1])==3 and p[1][0]=="nonuniform": 878 p[0]=Field(p[1][2],name=p[1][1]) 879 else: 880 p[0]=TupleProxy(p[1]) 881 else: 882 p[0] = p[1]
883
884 - def p_longitem(self,p):
885 '''longitem : pitemlist pitem''' 886 p[0] = p[1]+(p[2],)
887
888 - def p_pitemlist(self,p):
889 '''pitemlist : pitemlist pitem 890 | pitem ''' 891 if len(p)==2: 892 p[0]=(p[1],) 893 else: 894 ## print type(p[1][-1]) 895 ## if type(p[1][-1])==int and type(p[2])==tuple: 896 ## print "Hepp",p[2] 897 p[0]=p[1]+(p[2],)
898
899 - def p_pitem(self,p):
900 '''pitem : word 901 | SCONST 902 | number 903 | dictionary 904 | list 905 | dimension 906 | substitution 907 | empty''' 908 p[0] = p[1]
909
910 - def p_item(self,p):
911 '''item : pitem 912 | REACTION 913 | list 914 | dictionary''' 915 p[0] = p[1]
916
917 - def p_empty(self,p):
918 'empty :' 919 pass
920
921 - def p_error(self,p):
922 raise PyFoamParserError("Syntax error at token", p) # .type, p.lineno
923 # Just discard the token and tell the parser it's okay. 924 # self.yacc.errok() 925
926 -class PyFoamParserError(FatalErrorPyFoamException):
927 - def __init__(self,descr,data=None):
928 FatalErrorPyFoamException.__init__(self,"Parser Error:",descr) 929 self.descr=descr 930 self.data=data
931
932 - def __str__(self):
933 result="Error in PyFoamParser: '"+self.descr+"'" 934 if self.data!=None: 935 val=self.data.value 936 if len(val)>100: 937 val=val[:40]+" .... "+val[-40:] 938 939 result+=" @ %r (Type: %s ) in line %d at position %d" % (val, 940 self.data.type, 941 self.data.lineno, 942 self.data.lexpos) 943 944 return result
945
946 - def __repr__(self):
947 return str(self)
948
949 -class FoamStringParser(FoamFileParser):
950 """Convenience class that parses only a headerless OpenFOAM dictionary""" 951
952 - def __init__(self, 953 content, 954 debug=False, 955 noVectorOrTensor=False, 956 duplicateCheck=False, 957 listDict=False, 958 doMacroExpansion=False, 959 duplicateFail=False):
960 """@param content: the string to be parsed 961 @param debug: output debug information during parsing""" 962 963 FoamFileParser.__init__(self, 964 content, 965 debug=debug, 966 noHeader=not listDict, 967 boundaryDict=False, 968 listDict=listDict, 969 noVectorOrTensor=noVectorOrTensor, 970 duplicateCheck=duplicateCheck, 971 doMacroExpansion=doMacroExpansion, 972 duplicateFail=duplicateFail)
973
974 - def __str__(self):
975 return str(FoamFileGenerator(self.data))
976
977 -class ParsedBoundaryDict(ParsedParameterFile):
978 """Convenience class that parses only a OpenFOAM polyMesh-boundaries file""" 979
980 - def __init__(self,name,backup=False,debug=False):
981 """@param name: The name of the parameter file 982 @param backup: create a backup-copy of the file""" 983 984 ParsedParameterFile.__init__(self,name,backup=backup,debug=debug,boundaryDict=True)
985
986 - def parse(self,content):
987 """Constructs a representation of the file""" 988 temp=ParsedParameterFile.parse(self,content) 989 self.content={} 990 for i in range(0,len(temp),2): 991 self.content[temp[i]]=temp[i+1] 992 return self.content
993
994 - def __str__(self):
995 string="// File generated by PyFoam - sorry for the ugliness\n\n" 996 temp=[] 997 for k,v in self.content.iteritems(): 998 temp.append((k,v)) 999 1000 temp.sort(lambda x,y:cmp(int(x[1]["startFace"]),int(y[1]["startFace"]))) 1001 1002 temp2=[] 1003 1004 for b in temp: 1005 temp2.append(b[0]) 1006 temp2.append(b[1]) 1007 1008 generator=FoamFileGenerator(temp2,header=self.header) 1009 string+=str(generator) 1010 1011 return string
1012
1013 -class ParsedFileHeader(ParsedParameterFile):
1014 """Only parse the header of a file""" 1015
1016 - def __init__(self,name):
1017 ParsedParameterFile.__init__(self,name,backup=False,noBody=True)
1018
1019 - def __getitem__(self,name):
1020 return self.header[name]
1021
1022 - def __contains__(self,name):
1023 return name in self.header
1024
1025 - def __len__(self):
1026 return len(self.header)
1027