1	package antlr;
2	
3	/**
4	 * <b>SOFTWARE RIGHTS</b>
5	 * <p>
6	 * ANTLR 2.5.0 MageLang Institute, 1998
7	 * <p>
8	 * We reserve no legal rights to the ANTLR--it is fully in the
9	 * public domain. An individual or company may do whatever
10	 * they wish with source code distributed with ANTLR or the
11	 * code generated by ANTLR, including the incorporation of
12	 * ANTLR, or its output, into commerical software.
13	 * <p>
14	 * We encourage users to develop software with ANTLR. However,
15	 * we do ask that credit is given to us for developing
16	 * ANTLR. By "credit", we mean that if you use ANTLR or
17	 * incorporate any source code into one of your programs
18	 * (commercial product, research project, or otherwise) that
19	 * you acknowledge this fact somewhere in the documentation,
20	 * research report, etc... If you like ANTLR and have
21	 * developed a nice tool with the output, please mention that
22	 * you developed it using ANTLR. In addition, we ask that the
23	 * headers remain intact in our source code. As long as these
24	 * guidelines are kept, we expect to continue enhancing this
25	 * system and expect to make other tools available as they are
26	 * completed.
27	 * <p>
28	 * The ANTLR gang:
29	 * @version ANTLR 2.5.0 MageLang Institute, 1998
30	 * @author Terence Parr, <a href=http://www.MageLang.com>MageLang Institute</a>
31	 * @author <br>John Lilley, <a href=http://www.Empathy.com>Empathy Software</a>
32	 */
33	import java.util.Enumeration;
34	import java.util.Hashtable;
35	import antlr.collections.impl.BitSet;
36	import antlr.collections.impl.Vector;
37	import java.io.PrintWriter; //SAS: changed for proper text file io
38	import java.io.IOException;
39	import java.io.FileWriter;
40	//import java.io.ObjectOutput;
41	//import java.io.ObjectOutputStream;
42	
43	/**Generate MyParser.java, MyLexer.java and MyParserTokenTypes.java */
44	public class JavaCodeGenerator extends CodeGenerator {
45		// non-zero if inside syntactic predicate generation
46		protected int syntacticPredLevel = 0;
47		
48		// Are we generating ASTs (for parsers and tree parsers) right now?
49		protected boolean genAST = false;
50	
51		// Are we saving the text consumed (for lexers) right now?
52		protected boolean saveText = false;
53	
54		// Grammar parameters set up to handle different grammar classes.
55		// These are used to get instanceof tests out of code generation
56		String labeledElementType;
57		String labeledElementASTType;
58		String labeledElementInit;
59		String commonExtraArgs;
60		String commonExtraParams;
61		String commonLocalVars;
62		String lt1Value;
63		String exceptionThrown;
64		String throwNoViable;
65	
66		// Tracks the rule being generated.  Used for mapTreeId
67		RuleBlock currentRule;
68		// Tracks the rule or labeled subrule being generated.  Used for AST generation.
69		String currentASTResult;
70		// Mapping between the ids used in the current alt, and the
71		// names of variables used to represent their AST values.
72		Hashtable treeVariableMap = new Hashtable();
73		// Count of unnamed generated variables
74		int astVarNumber = 1;
75		// Special value used to mark duplicate in treeVariableMap
76		protected static final String NONUNIQUE = new String();
77	
78		private Vector semPreds;
79	
80	
81		/** Create a Java code-generator using the given Grammar.
82		 * The caller must still call setTool, setBehavior, and setAnalyzer
83		 * before generating code.
84		 */
85		public JavaCodeGenerator() {
86			super();
87			charFormatter = new JavaCharFormatter();
88		}
89		/** Adds a semantic predicate string to the sem pred vector
90		    These strings will be used to build an array of sem pred names
91		    when building a debugging parser.  This method should only be
92		    called when the debug option is specified
93		 */
94		protected int addSemPred(String predicate) {
95			semPreds.appendElement(predicate);
96			return semPreds.size()-1;
97		}
98		public void exitIfError() {
99			if (tool.hasError) {
100				System.out.println("Exiting due to errors.");
101				System.exit(1);
102			}
103		}
104		/**Generate the parser, lexer, treeparser, and token types in Java */
105		public void gen() {
106			// Do the code generation
107			try {
108				// Loop over all grammars
109				Enumeration grammarIter = behavior.grammars.elements();
110				while (grammarIter.hasMoreElements()) {
111					Grammar g = (Grammar)grammarIter.nextElement();
112					// Connect all the components to each other
113					g.setGrammarAnalyzer(analyzer);
114					g.setCodeGenerator(this);
115					analyzer.setGrammar(g);
116					// To get right overloading behavior across hetrogeneous grammars
117					setupGrammarParameters(g);
118					g.generate();
119					exitIfError();
120				}
121	
122				// Loop over all token managers (some of which are lexers)
123				Enumeration tmIter = behavior.tokenManagers.elements();
124				while (tmIter.hasMoreElements()) {
125					TokenManager tm = (TokenManager)tmIter.nextElement();
126					if (!tm.isReadOnly()) {
127						// Write the token manager tokens as Java
128						// this must appear before genTokenInterchange so that
129						// labels are set on string literals
130						genTokenTypes(tm);
131						// Write the token manager tokens as plain text
132						genTokenInterchange(tm);
133					}
134					exitIfError();
135				}
136			}
137			catch (IOException e) {
138				System.out.println(e.getMessage());
139			}
140		}
141		/** Generate code for the given grammar element.
142		 * @param blk The {...} action to generate
143		 */
144		public void gen(ActionElement action) {
145			if ( DEBUG_CODE_GENERATOR ) System.out.println("genAction("+action+")");
146			if ( action.isSemPred ) {
147				genSemPred(action.actionText);
148			}
149			else {
150				if ( grammar.hasSyntacticPredicate ) {
151					println("if ( guessing==0 ) {");
152					tabs++;
153				}
154	
155				ActionTransInfo tInfo = new ActionTransInfo();
156				String actionStr = processActionForTreeSpecifiers(action.actionText, action.getLine(), currentRule, tInfo);
157				
158				if ( tInfo.refRuleRoot!=null ) {
159					// Somebody referenced "#rule", make sure translated var is valid
160					// assignment to #rule is left as a ref also, meaning that assignments
161					// with no other refs like "#rule = foo();" still forces this code to be
162					// generated (unnecessarily).
163					println(tInfo.refRuleRoot + " = ("+labeledElementASTType+")currentAST.root;");
164				}
165				
166				// dump the translated action
167				printAction(actionStr);
168				
169				if ( tInfo.assignToRoot ) {
170					// Somebody did a "#rule=", reset internal currentAST.root
171					println("currentAST.root = "+tInfo.refRuleRoot+";");
172					// reset the child pointer too to be last sibling in sibling list
173			   	println("currentAST.child = "+tInfo.refRuleRoot+"!=null &&"+tInfo.refRuleRoot+".getFirstChild()!=null ?");
174					tabs++;
175					println(tInfo.refRuleRoot+".getFirstChild() : "+tInfo.refRuleRoot+";");				
176					tabs--;
177					println("currentAST.advanceChildToEnd();");
178				}
179				
180				if ( grammar.hasSyntacticPredicate ) {
181					tabs--;
182					println("}");
183				}
184			}
185		}
186		/** Generate code for the given grammar element.
187		 * @param blk The "x|y|z|..." block to generate
188		 */
189		public void gen(AlternativeBlock blk) {
190			if ( DEBUG_CODE_GENERATOR ) System.out.println("gen("+blk+")");
191			println("{");
192			genBlockPreamble(blk);
193	
194			// Tell AST generation to build subrule result
195			String saveCurrentASTResult = currentASTResult;
196			if (blk.getLabel() != null) {
197				currentASTResult = blk.getLabel();
198			}
199	
200			boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
201			
202			JavaBlockFinishingInfo howToFinish = genCommonBlock(blk, true);
203			genBlockFinish(howToFinish, throwNoViable);
204	
205			println("}");
206	
207			// Restore previous AST generation
208			currentASTResult = saveCurrentASTResult;
209		}
210		/** Generate code for the given grammar element.
211		 * @param blk The block-end element to generate.  Block-end
212		 * elements are synthesized by the grammar parser to represent
213		 * the end of a block.
214		 */
215		public void gen(BlockEndElement end) {
216			if ( DEBUG_CODE_GENERATOR ) System.out.println("genRuleEnd("+end+")");
217		}
218		/** Generate code for the given grammar element.
219		 * @param blk The character literal reference to generate
220		 */
221		public void gen(CharLiteralElement atom) {
222			if ( DEBUG_CODE_GENERATOR ) System.out.println("genChar("+atom+")");
223			
224			if ( atom.getLabel()!=null ) {
225				println(atom.getLabel() + " = " + lt1Value + ";");
226			}
227			
228			boolean oldsaveText = saveText;
229			saveText = saveText && atom.getAutoGenType()==GrammarElement.AUTO_GEN_NONE;
230			genMatch(atom);
231			saveText = oldsaveText;
232		}
233		/** Generate code for the given grammar element.
234		 * @param blk The character-range reference to generate
235		 */
236		public void gen(CharRangeElement r) {
237			if ( r.getLabel()!=null  && syntacticPredLevel == 0) {
238				println(r.getLabel() + " = " + lt1Value + ";");
239			}
240			println("matchRange("+r.beginText+","+r.endText+");");
241		}
242		/** Generate the lexer Java file */
243		public  void gen(LexerGrammar g) throws IOException {
244			// If debugging, create a new sempred vector for this grammar
245			if (g.debuggingOutput)
246				semPreds = new Vector();
247				
248			setGrammar(g);
249			if (!(grammar instanceof LexerGrammar)) {
250				tool.panic("Internal error generating lexer");
251			}
252	
253			// SAS: moved output creation to method so a subclass can change
254			//      how the output is generated (for VAJ interface)
255			setupOutput(grammar.getClassName());
256	
257			genAST = false;	// no way to gen trees.
258			saveText = true;	// save consumed characters.
259	
260			tabs=0;
261	
262			// Generate header common to all Java output files
263			genHeader();
264			// Do not use printAction because we assume tabs==0
265			println(behavior.headerAction);
266	
267			// Generate header specific to lexer Java file
268			// println("import java.io.FileInputStream;");
269			println("import java.io.InputStream;");
270			println("import java.io.Reader;");
271			println("import java.io.IOException;");
272			println("import java.util.Hashtable;");
273			println("import antlr." + grammar.getSuperClass() + ";");
274			println("import antlr.InputBuffer;");
275			println("import antlr.ByteBuffer;");
276			println("import antlr.CharBuffer;");
277			println("import antlr.Token;");
278			println("import antlr.CommonToken;");
279			println("import antlr.ScannerException;");
280			println("import antlr.Tokenizer;");
281			println("import antlr.ANTLRHashString;");
282			println("import antlr.collections.impl.BitSet;");
283	
284			// Generate user-defined lexer file preamble
285			println(grammar.preambleAction);
286	
287			// Generate lexer class definition
288			String sup=null;
289			if ( grammar.superClass!=null ) {
290				sup = grammar.superClass;
291			}
292			else {
293				sup = "antlr." + grammar.getSuperClass();
294			}	
295	
296			// print javadoc comment if any
297			if ( grammar.comment!=null ) {
298				_println(grammar.comment);
299			}
300			
301			print("public class " + grammar.getClassName() + " extends "+sup);
302			println(" implements " + grammar.tokenManager.getName() + "TokenTypes, Tokenizer");
303			Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
304			if ( tsuffix != null ) {
305				String suffix = Tool.stripFrontBack(tsuffix.getText(),"\"","\"");
306				if ( suffix != null ) {
307					print(", "+suffix);	// must be an interface name for Java
308				}
309			}
310			println(" {");
311	
312			// Generate user-defined lexer class members
313			print(
314				processActionForTreeSpecifiers(grammar.classMemberAction, 0, currentRule, null)
315			);
316	
317			//
318			// Generate the constructor from InputStream, which in turn
319			// calls the ByteBuffer constructor
320			//
321			println("public " + grammar.getClassName() + "(InputStream in) {");
322			tabs++;
323			println("this(new ByteBuffer(in));");
324			tabs--;
325			println("}");
326	
327			//
328			// Generate the constructor from Reader, which in turn
329			// calls the CharBuffer constructor
330			//
331			println("public " + grammar.getClassName() + "(Reader in) {");
332			tabs++;
333			println("this(new CharBuffer(in));");
334			tabs--;
335			println("}");
336	
337			//
338			// Generate the constructor from InputBuffer (char or byte)
339			//
340			println("public " + grammar.getClassName() + "(InputBuffer ib) {");
341			tabs++;
342			// if debugging, wrap the input buffer in a debugger
343			if (grammar.debuggingOutput)
344				println("super(new antlr.debug.DebuggingInputBuffer(ib));");
345			else
346				println("super(ib);");
347	
348			// if debugging, set up array variables and call user-overridable
349			//   debugging setup method
350			if ( grammar.debuggingOutput ) {
351				println("  ruleNames  = _ruleNames;");
352				println("  semPredNames = _semPredNames;");
353				println("  setupDebugging();");
354			}	
355	
356			// Generate the initialization of a hashtable
357			// containing the string literals used in the lexer
358			// The literals variable itself is in CharScanner
359			println("literals = new Hashtable();");
360			Enumeration ids = grammar.tokenManager.getTokenSymbolElements();
361			while ( ids.hasMoreElements() ) {
362				TokenSymbol sym = (TokenSymbol)ids.nextElement();
363				if ( sym instanceof StringLiteralSymbol ) {
364					StringLiteralSymbol s = (StringLiteralSymbol)sym;
365					println("literals.put(new ANTLRHashString(" + s.getId() + ", this), new Integer(" + s.getTokenType() + "));");
366				}
367			}
368			tabs--;
369			// Generate the setting of various generated options.
370			println("caseSensitiveLiterals = " + g.caseSensitiveLiterals + ";");
371			println("setCaseSensitive("+g.caseSensitive+");");
372			println("}");
373	
374			// generate the rule name array for debugging
375			if (grammar.debuggingOutput) {
376				println("private static final String _ruleNames[] = {");
377	
378				ids = grammar.rules.elements();
379				int ruleNum=0;
380				while ( ids.hasMoreElements() ) {
381					GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
382					if ( sym instanceof RuleSymbol)
383						println("  \""+((RuleSymbol)sym).getId()+"\",");
384				}
385				println("};");
386			}		
387	
388			// Generate nextToken() rule.
389			// nextToken() is a synthetic lexer rule that is the implicit OR of all
390			// user-defined lexer rules.
391			genNextToken();
392	
393			// Generate code for each rule in the lexer
394			ids = grammar.rules.elements();
395			int ruleNum=0;
396			while ( ids.hasMoreElements() ) {
397				RuleSymbol sym = (RuleSymbol) ids.nextElement();
398				// Don't generate the synthetic rules
399				if (!sym.getId().equals("mnextToken")) {
400					genRule(sym, false, ruleNum++);
401				}
402				exitIfError();
403			}
404	
405			// Generate the semantic predicate map for debugging
406			if (grammar.debuggingOutput)
407				genSemPredMap();
408	
409			// Generate the bitsets used throughout the lexer
410			genBitsets(bitsetsUsed, ((LexerGrammar)grammar).charVocabulary.size());
411	
412			println("");
413			println("}");
414			
415			// Close the lexer output stream
416			currentOutput.close();
417			currentOutput = null;
418		}
419		/** Generate code for the given grammar element.
420		 * @param blk The (...)+ block to generate
421		 */
422		public void gen(OneOrMoreBlock blk) {
423			if ( DEBUG_CODE_GENERATOR ) System.out.println("gen+("+blk+")");
424			String label;
425			String cnt;
426			println("{");
427			genBlockPreamble(blk);
428			if ( blk.getLabel() != null ) {
429				cnt = "_cnt_"+blk.getLabel();
430			}
431			else {
432				cnt = "_cnt" + blk.ID;
433			}
434			println("int "+cnt+"=0;");
435			if ( blk.getLabel() != null ) {
436				label = blk.getLabel();
437			}
438			else {
439				label = "_loop" + blk.ID;
440			}
441			println(label+":");
442			println("do {");
443			tabs++;
444			
445			// Tell AST generation to build subrule result
446			String saveCurrentASTResult = currentASTResult;
447			if (blk.getLabel() != null) {
448				currentASTResult = blk.getLabel();
449			}
450	
451			boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
452			JavaBlockFinishingInfo howToFinish = genCommonBlock(blk, false);
453			genBlockFinish(
454				howToFinish, 
455				"if ( "+cnt+">=1 ) { break "+label+"; } else {" + throwNoViable + "}"
456			);
457	
458			println(cnt+"++;");
459			tabs--;
460			println("} while (true);");
461			println("}");
462	
463			// Restore previous AST generation
464			currentASTResult = saveCurrentASTResult;
465		}
466		/** Generate the parser Java file */
467		public void gen(ParserGrammar g) throws IOException {
468	
469			// if debugging, set up a new vector to keep track of sempred
470			//   strings for this grammar
471			if (g.debuggingOutput)
472				semPreds = new Vector();
473	
474			setGrammar(g);
475			if (!(grammar instanceof ParserGrammar)) {
476				tool.panic("Internal error generating parser");
477			}
478	
479			// Open the output stream for the parser and set the currentOutput
480			// SAS: moved file setup so subclass could do it (for VAJ interface)
481			setupOutput(grammar.getClassName());
482	
483			genAST = grammar.buildAST;
484	
485			tabs = 0;
486	
487			// Generate the header common to all output files.
488			genHeader();
489			// Do not use printAction because we assume tabs==0
490			println(behavior.headerAction);
491			
492			// Generate header for the parser
493			println("import java.io.IOException;");
494			println("import antlr.Tokenizer;");
495			println("import antlr.TokenBuffer;");
496			println("import antlr." + grammar.getSuperClass() + ";");
497			println("import antlr.Token;");
498			println("import antlr.ParserException;");
499			println("import antlr.NoViableAltException;");
500			println("import antlr.MismatchedTokenException;");
501			println("import antlr.SemanticException;");
502			println("import antlr.collections.impl.BitSet;");
503			if (grammar.buildAST) {
504				println("import antlr.collections.AST;");
505				println("import antlr.ASTPair;");
506				println("import antlr.collections.impl.ASTArray;");
507			}
508			
509			// Output the user-defined parser preamble
510			println(grammar.preambleAction);
511	
512			// Generate parser class definition
513			String sup=null;
514			if ( grammar.superClass != null )
515				sup = grammar.superClass;
516			else
517				sup = "antlr." + grammar.getSuperClass();
518	
519			// print javadoc comment if any
520			if ( grammar.comment!=null ) {
521				_println(grammar.comment);
522			}
523			
524			println("public class " + grammar.getClassName() + " extends "+sup);
525			println("       implements " + grammar.tokenManager.getName() + "TokenTypes");
526	
527			Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
528			if ( tsuffix != null ) {
529				String suffix = Tool.stripFrontBack(tsuffix.getText(),"\"","\"");
530				if ( suffix != null )
531					print(", "+suffix);	// must be an interface name for Java
532			}
533			println(" {");
534	
535			// set up an array of all the rule names so the debugger can
536			// keep track of them only by number -- less to store in tree...
537			if (grammar.debuggingOutput) {
538				println("private static final String _ruleNames[] = {");
539	
540				Enumeration ids = grammar.rules.elements();
541				int ruleNum=0;
542				while ( ids.hasMoreElements() ) {
543					GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
544					if ( sym instanceof RuleSymbol)
545						println("  \""+((RuleSymbol)sym).getId()+"\",");
546				}
547				println("};");
548			}
549			
550			// Generate user-defined parser class members
551			print(
552				processActionForTreeSpecifiers(grammar.classMemberAction, 0, currentRule, null)
553			);
554	
555			// Generate parser class constructor from TokenBuffer
556			println("");
557			println("protected " + grammar.getClassName() + "(TokenBuffer tokenBuf, int k) {");
558			println("  super(tokenBuf,k);");
559			println("  tokenNames = _tokenNames;");
560			// if debugging, set up arrays and call the user-overridable
561			//   debugging setup method
562			if ( grammar.debuggingOutput ) {
563				println("  ruleNames  = _ruleNames;");
564				println("  semPredNames = _semPredNames;");
565				println("  setupDebugging(null);");
566			}	
567			println("}");
568			println("");
569	
570			println("public " + grammar.getClassName() + "(TokenBuffer tokenBuf) {");
571			println("  this(tokenBuf," + grammar.maxk + ");");
572			println("}");
573			println("");
574	
575			// Generate parser class constructor from Tokenizer (lexer)
576			println("protected " + grammar.getClassName()+"(Tokenizer lexer, int k) {");
577			println("  super(lexer,k);");
578			println("  tokenNames = _tokenNames;");
579	
580			// if debugging, set up arrays and call the user-overridable
581			//   debugging setup method
582			if ( grammar.debuggingOutput ) {
583				println("  ruleNames  = _ruleNames;");
584				println("  semPredNames = _semPredNames;");
585				println("  setupDebugging(lexer);");
586			}
587			println("}");
588			println("");
589	
590			println("public " + grammar.getClassName()+"(Tokenizer lexer) {");
591			println("  this(lexer," + grammar.maxk + ");");
592			println("}");
593			println("");
594	
595			// Generate code for each rule in the grammar
596			Enumeration ids = grammar.rules.elements();
597			int ruleNum=0;
598			while ( ids.hasMoreElements() ) {
599				GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
600				if ( sym instanceof RuleSymbol) {
601					RuleSymbol rs = (RuleSymbol)sym;
602					genRule(rs, rs.references.size()==0, ruleNum++);
603				}
604				exitIfError();
605			}
606	
607			// Generate the token names
608			genTokenStrings();
609	
610			// Generate the bitsets used throughout the grammar
611			genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType());
612	
613			// Generate the semantic predicate map for debugging
614			if (grammar.debuggingOutput)
615				genSemPredMap();
616	
617			// Close class definition
618			println("");
619			println("}");
620	
621			// Close the parser output stream
622			currentOutput.close();
623			currentOutput = null;
624		}
625		/** Generate code for the given grammar element.
626		 * @param blk The rule-reference to generate
627		 */
628		public void gen(RuleRefElement rr) {
629			if ( DEBUG_CODE_GENERATOR ) System.out.println("genRR("+rr+")");
630			RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
631			if (rs == null || !rs.isDefined())
632			{
633				// Is this redundant???
634				tool.error("Rule '" + rr.targetRule + "' is not defined", rr.getLine());
635				return;
636			}
637			if (!(rs instanceof RuleSymbol))
638			{
639				// Is this redundant???
640				tool.error("'" + rr.targetRule + "' does not name a grammar rule", rr.getLine());
641				return;
642			}
643	
644			genErrorTryForElement(rr);
645	
646			// AST value for labeled rule refs in tree walker.
647			// This is not AST construction;  it is just the input tree node value.
648			if ( grammar instanceof TreeWalkerGrammar &&
649				rr.getLabel() != null && 
650				syntacticPredLevel == 0 )
651			{
652				println(rr.getLabel() + " = _t==ASTNULL ? null : "+lt1Value+";");
653			}
654			
655			// if in lexer and ! on rule ref or alt or rule, save buffer index to kill later
656			if ( grammar instanceof LexerGrammar && (!saveText||rr.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
657				println("_saveIndex=text.length();");
658			}
659			
660			// Process return value assignment if any
661			printTabs();
662			if (rr.idAssign != null)
663			{
664				// Warn if the rule has no return type
665				if (rs.block.returnAction == null)
666				{
667					tool.warning("Rule '" + rr.targetRule + "' has no return type", rr.getLine());
668				}
669				_print(rr.idAssign + "=");
670			} else {
671				// Warn about return value if any, but not inside syntactic predicate
672				if ( !(grammar instanceof LexerGrammar) && syntacticPredLevel == 0 && rs.block.returnAction != null)
673				{
674					tool.warning("Rule '" + rr.targetRule + "' returns a value", rr.getLine());
675				}
676			}
677	
678			// Call the rule
679			GenRuleInvocation(rr);
680	
681			// if in lexer and ! on element or alt or rule, save buffer index to kill later
682			if ( grammar instanceof LexerGrammar && (!saveText||rr.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
683				println("text.setLength(_saveIndex);");
684			}
685	
686			// if not in a syntactic predicate
687			if (syntacticPredLevel == 0) {
688				boolean doNoGuessTest = (
689					grammar.hasSyntacticPredicate &&
690					(
691						grammar.buildAST && rr.getLabel() != null || 
692						(genAST && rr.getAutoGenType() == GrammarElement.AUTO_GEN_NONE)
693					)
694				);
695				if (doNoGuessTest) {
696					println("if (guessing==0) {"); 
697					tabs++;
698				}
699	
700				if (grammar.buildAST && rr.getLabel() != null) {
701					// always gen variable for rule return on labeled rules
702					println(rr.getLabel() + "_AST = ("+labeledElementASTType+")returnAST;");
703				}
704				if (genAST) {
705					switch (rr.getAutoGenType()) {
706					case GrammarElement.AUTO_GEN_NONE:
707						// println("theASTFactory.addASTChild(currentAST, returnAST);");
708						println("astFactory.addASTChild(currentAST, returnAST);");
709						break;
710					case GrammarElement.AUTO_GEN_CARET:
711						tool.error("Internal: encountered ^ after rule reference");
712						break;
713					default:
714						break;
715					}
716				}
717	
718				// if a lexer and labeled, Token label defined at rule level, just set it here
719				if ( grammar instanceof LexerGrammar && rr.getLabel() != null ) {
720					println(rr.getLabel()+"=_returnToken;");
721				}	
722	
723				if (doNoGuessTest) {
724					tabs--;
725					println("}"); 
726				}
727			}
728			genErrorCatchForElement(rr);
729		}
730		/** Generate code for the given grammar element.
731		 * @param blk The string-literal reference to generate
732		 */
733		public void gen(StringLiteralElement atom) {
734			if ( DEBUG_CODE_GENERATOR ) System.out.println("genString("+atom+")");
735	
736			// Variable declarations for labeled elements
737			if (atom.getLabel()!=null && syntacticPredLevel == 0) {
738				println(atom.getLabel() + " = " + lt1Value + ";");
739			}
740	
741			// AST
742			genElementAST(atom);
743	
744			// is there a bang on the literal?
745			boolean oldsaveText = saveText;
746			saveText = saveText && atom.getAutoGenType()==GrammarElement.AUTO_GEN_NONE;
747	
748			// matching
749			genMatch(atom);
750			
751			saveText = oldsaveText;
752	
753			// tack on tree cursor motion if doing a tree walker
754			if (grammar instanceof TreeWalkerGrammar) {
755				println("_t = _t.getNextSibling();");
756			}
757		}
758		/** Generate code for the given grammar element.
759		 * @param blk The token-range reference to generate
760		 */
761		public void gen(TokenRangeElement r) {
762			genErrorTryForElement(r);
763			if ( r.getLabel()!=null  && syntacticPredLevel == 0) {
764				println(r.getLabel() + " = " + lt1Value + ";");
765			}
766	
767			// AST
768			genElementAST(r);
769	
770			// match
771			println("matchRange("+r.beginText+","+r.endText+");");
772			genErrorCatchForElement(r);
773		}
774		/** Generate code for the given grammar element.
775		 * @param blk The token-reference to generate
776		 */
777		public void gen(TokenRefElement atom) {
778			if ( DEBUG_CODE_GENERATOR ) System.out.println("genTokenRef("+atom+")");
779			if ( grammar instanceof LexerGrammar ) {
780				tool.panic("Token reference found in lexer");
781			}
782			genErrorTryForElement(atom);
783			// Assign Token value to token label variable
784			if ( atom.getLabel()!=null && syntacticPredLevel == 0) {
785				println(atom.getLabel() + " = " + lt1Value + ";");
786			}
787	
788			// AST
789			genElementAST(atom);
790			// matching
791			genMatch(atom);
792			genErrorCatchForElement(atom);
793			
794			// tack on tree cursor motion if doing a tree walker
795			if (grammar instanceof TreeWalkerGrammar) {
796				println("_t = _t.getNextSibling();");
797			}
798		}
799		public void gen(TreeElement t) {
800			// save AST cursor
801			println("AST __t" + t.ID + " = _t;");
802	
803			// If there is a label on the root, then assign that to the variable
804			if (t.root.getLabel() != null) {
805				println(t.root.getLabel() + " = _t==ASTNULL ? null :("+labeledElementASTType +")_t;");
806			}
807	
808			// Generate AST variables
809			genElementAST(t.root);
810			if (grammar.buildAST) {
811				// Save the AST construction state
812				println("ASTPair __currentAST" + t.ID + " = currentAST.copy();");
813				// Make the next item added a child of the TreeElement root
814				println("currentAST.root = currentAST.child;");
815				println("currentAST.child = null;");
816			}
817	
818			// match root
819			genMatch(t.root);
820			// move to list of children
821			println("_t = _t.getFirstChild();"); 
822			
823			// walk list of children, generating code for each
824			for (int i=0; i<t.getAlternatives().size(); i++) {
825				Alternative a = t.getAlternativeAt(i);
826				AlternativeElement e = a.head;
827				while ( e != null ) {
828					e.generate();
829					e = e.next;
830				}
831			}
832	
833			if (grammar.buildAST) {
834				// restore the AST construction state to that just after the
835				// tree root was added
836				println("currentAST = __currentAST" + t.ID + ";");
837			}
838			// restore AST cursor
839			println("_t = __t" + t.ID + ";");
840			// move cursor to sibling of tree just parsed
841			println("_t = _t.getNextSibling();");
842		}
843		/** Generate the tree-parser Java file */
844		public void gen(TreeWalkerGrammar g) throws IOException {
845			// SAS: debugging stuff removed for now...
846			setGrammar(g);
847			if (!(grammar instanceof TreeWalkerGrammar)) {
848				tool.panic("Internal error generating tree-walker");
849			}
850			// Open the output stream for the parser and set the currentOutput
851			// SAS: move file open to method so subclass can override it
852			//      (mainly for VAJ interface)
853			setupOutput(grammar.getClassName());
854	
855			genAST = grammar.buildAST;
856			tabs = 0;
857	
858			// Generate the header common to all output files.
859			genHeader();
860			// Do not use printAction because we assume tabs==0
861			println(behavior.headerAction);
862			
863			// Generate header for the parser
864			println("import antlr." + grammar.getSuperClass() + ";");
865			println("import antlr.Token;");
866			println("import antlr.collections.AST;");
867			println("import antlr.ParserException;");
868			println("import antlr.NoViableAltException;");
869			println("import antlr.MismatchedTokenException;");
870			println("import antlr.SemanticException;");
871			println("import antlr.collections.impl.BitSet;");
872			if (grammar.buildAST) {
873				println("import antlr.ASTPair;");
874				println("import antlr.collections.impl.ASTArray;");
875			}
876		
877			// Output the user-defined parser premamble
878			println(grammar.preambleAction);
879	
880			// Generate parser class definition
881			String sup=null;
882			if ( grammar.superClass!=null ) {
883				sup = grammar.superClass;
884			}
885			else {
886				sup = "antlr." + grammar.getSuperClass();
887			}	
888			println("");
889	
890			// print javadoc comment if any
891			if ( grammar.comment!=null ) {
892				_println(grammar.comment);
893			}
894			
895			println("public class " + grammar.getClassName() + " extends "+sup);
896			println("       implements " + grammar.tokenManager.getName() + "TokenTypes");
897			Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
898			if ( tsuffix != null ) {
899				String suffix = Tool.stripFrontBack(tsuffix.getText(),"\"","\"");
900				if ( suffix != null ) {
901					print(", "+suffix);	// must be an interface name for Java
902				}
903			}
904			println(" {");
905	
906			// Generate user-defined parser class members
907			print(
908				processActionForTreeSpecifiers(grammar.classMemberAction, 0, currentRule, null)
909			);
910	
911			// Generate default parser class constructor
912			println("public " + grammar.getClassName() + "() {");
913			tabs++;
914			println("tokenNames = _tokenNames;");
915			tabs--;
916			println("}");
917			println("");
918	
919			// Generate code for each rule in the grammar
920			Enumeration ids = grammar.rules.elements();
921			int ruleNum=0;
922			String ruleNameInits = "";
923			while ( ids.hasMoreElements() ) {
924				GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
925				if ( sym instanceof RuleSymbol) {
926					RuleSymbol rs = (RuleSymbol)sym;
927					genRule(rs, rs.references.size()==0, ruleNum++);
928				}
929				exitIfError();
930			}
931	
932			// Generate the token names
933			genTokenStrings();
934	
935			// Generate the bitsets used throughout the grammar
936			genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType());
937	
938			// Close class definition
939			println("}");
940			println("");
941	
942			// Close the parser output stream
943			currentOutput.close();
944			currentOutput = null;
945		}
946		/** Generate code for the given grammar element.
947		 * @param wc The wildcard element to generate
948		 */
949		public void gen(WildcardElement wc) {
950			// Variable assignment for labeled elements
951			if (wc.getLabel()!=null && syntacticPredLevel == 0) {
952				println(wc.getLabel() + " = " + lt1Value + ";");
953			}
954	
955			// AST
956			genElementAST(wc);
957			// Match anything but EOF
958			if (grammar instanceof TreeWalkerGrammar) {
959				println("if ( _t==null ) throw new MismatchedTokenException();");
960			}
961			else if (grammar instanceof LexerGrammar) {
962				println("matchNot(EOF_CHAR);");
963			}
964			else {
965				println("matchNot(" + getValueString(Token.EOF_TYPE) + ");");
966			}
967			
968			// tack on tree cursor motion if doing a tree walker
969			if (grammar instanceof TreeWalkerGrammar) {
970				println("_t = _t.getNextSibling();");
971			}
972		}
973		/** Generate code for the given grammar element.
974		 * @param blk The (...)* block to generate
975		 */
976		public void gen(ZeroOrMoreBlock blk) {
977			if ( DEBUG_CODE_GENERATOR ) System.out.println("gen*("+blk+")");
978			println("{");
979			genBlockPreamble(blk);
980			String label;
981			if ( blk.getLabel() != null ) {
982				label = blk.getLabel();
983			}
984			else {
985				label = "_loop" + blk.ID;
986			}
987			println(label+":");
988			println("do {");
989			tabs++;
990		
991			// Tell AST generation to build subrule result
992			String saveCurrentASTResult = currentASTResult;
993			if (blk.getLabel() != null) {
994				currentASTResult = blk.getLabel();
995			}
996	
997			boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
998	
999			JavaBlockFinishingInfo howToFinish = genCommonBlock(blk, false);
1000			genBlockFinish(howToFinish, "break " + label + ";");
1001			
1002			tabs--;
1003			println("} while (true);");
1004			println("}");
1005	
1006			// Restore previous AST generation
1007			currentASTResult = saveCurrentASTResult;
1008		}
1009		/** Generate an alternative.
1010		  * @param alt  The alternative to generate
1011		  * @param blk The block to which the alternative belongs
1012		  */
1013		protected void genAlt(Alternative alt, AlternativeBlock blk) {
1014			// Save the AST generation state, and set it to that of the alt
1015			boolean savegenAST = genAST;
1016			genAST = genAST && alt.getAutoGen();
1017	
1018			boolean oldsaveTest = saveText;
1019			saveText = saveText && alt.getAutoGen();
1020	
1021			// Reset the variable name map for the alternative
1022			Hashtable saveMap = treeVariableMap;
1023			treeVariableMap = new Hashtable();
1024	
1025			// Generate try block around the alt for  error handling
1026			if (alt.exceptionSpec != null) {
1027				println("try {      // for error handling");
1028				tabs++;
1029			}
1030	
1031			AlternativeElement elem = alt.head;
1032			while ( !(elem instanceof BlockEndElement) ) {
1033				elem.generate(); // alt can begin with anything. Ask target to gen.
1034				elem = elem.next;
1035			}
1036	
1037			if ( genAST) {
1038				if (blk instanceof RuleBlock) {
1039					// Set the AST return value for the rule
1040					RuleBlock rblk = (RuleBlock)blk;
1041					println(rblk.getRuleName() + "_AST = ("+labeledElementASTType+")currentAST.root;");
1042				} 
1043				else if (blk.getLabel() != null) {
1044					// ### future: also set AST value for labeled subrules.
1045					// println(blk.getLabel() + "_AST = ("+labeledElementASTType+")currentAST.root;");
1046				}
1047			}
1048	
1049			if (alt.exceptionSpec != null) {
1050				// close try block
1051				tabs--;
1052				println("}");
1053				genErrorHandler(alt.exceptionSpec);
1054			}
1055	
1056			genAST = savegenAST;
1057			saveText = oldsaveTest;
1058	
1059			treeVariableMap = saveMap;
1060		}
1061		/** Generate all the bitsets to be used in the parser or lexer
1062		 * Generate the raw bitset data like "long _tokenSet1_data[] = {...};"
1063		 * and the BitSet object declarations like "BitSet _tokenSet1 = new BitSet(_tokenSet1_data);"
1064		 * Note that most languages do not support object initialization inside a
1065		 * class definition, so other code-generators may have to separate the
1066		 * bitset declarations from the initializations (e.g., put the initializations
1067		 * in the generated constructor instead).
1068		 * @param bitsetList The list of bitsets to generate.
1069		 * @param maxVocabulary Ensure that each generated bitset can contain at least this value.
1070		 */
1071		protected void genBitsets(
1072			Vector bitsetList,
1073			int maxVocabulary
1074		) {
1075			println("");
1076			for (int i = 0; i < bitsetList.size(); i++)
1077			{
1078				BitSet p = (BitSet)bitsetList.elementAt(i);
1079				// Ensure that generated BitSet is large enough for vocabulary
1080				p.growToInclude(maxVocabulary);
1081				// initialization data
1082				println(
1083					"private static final long " + getBitsetName(i) + "_data_" + "[] = { " +
1084					p.toStringOfWords() + 
1085					" };"
1086				);
1087				// BitSet object
1088				println(
1089					"public static final BitSet " + getBitsetName(i) + " = new BitSet(" +
1090					getBitsetName(i) + "_data_" + 
1091					");"
1092				);
1093			}
1094		}
1095		/** Generate the finish of a block, using a combination of the info
1096		 * returned from genCommonBlock() and the action to perform when
1097		 * no alts were taken
1098		 * @param howToFinish The return of genCommonBlock()
1099		 * @param noViableAction What to generate when no alt is taken
1100		 */
1101		private void genBlockFinish(JavaBlockFinishingInfo howToFinish, String noViableAction)
1102		{
1103			if (howToFinish.needAnErrorClause &&
1104				 (howToFinish.generatedAnIf || howToFinish.generatedSwitch)) {
1105				if ( howToFinish.generatedAnIf ) {
1106					println("else {");
1107				}
1108				else {
1109					println("{");
1110				}
1111				tabs++;
1112				println(noViableAction);
1113				tabs--;
1114				println("}");
1115			}
1116	
1117			if ( howToFinish.postscript!=null ) {
1118				println(howToFinish.postscript);
1119			}
1120		}
1121		/** Generate the header for a block, which may be a RuleBlock or a
1122		 * plain AlternativeBLock.  This generates any variable declarations,
1123		 * init-actions, and syntactic-predicate-testing variables.
1124		 * @blk The block for which the preamble is to be generated.
1125		 */
1126		protected void genBlockPreamble(AlternativeBlock blk) {
1127			// define labels for rule blocks.
1128			if ( blk instanceof RuleBlock ) {
1129				RuleBlock rblk = (RuleBlock)blk;
1130				if ( rblk.labeledElements!=null ) {
1131					for (int i=0; i<rblk.labeledElements.size(); i++) {
1132	
1133						AlternativeElement a = (AlternativeElement)rblk.labeledElements.elementAt(i);
1134						//System.out.println("looking at labeled element: "+a);
1135						// Variables for labeled rule refs and subrules are different than
1136						// variables for grammar atoms.  This test is a little tricky because
1137						// we want to get all rule refs and ebnf, but not rule blocks or
1138						// syntactic predicates
1139						if (
1140							a instanceof RuleRefElement ||
1141							a instanceof AlternativeBlock &&
1142							!(a instanceof RuleBlock) &&
1143							!(a instanceof SynPredBlock)
1144						) {
1145	
1146							if (
1147								!(a instanceof RuleRefElement) && 
1148								((AlternativeBlock)a).not &&
1149								analyzer.subruleCanBeInverted(((AlternativeBlock)a), grammar instanceof LexerGrammar)
1150							) {
1151								// Special case for inverted subrules that will be inlined.
1152								// Treat these like token or char literal references
1153								println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
1154								if (grammar.buildAST) {
1155									println(labeledElementASTType+" " + a.getLabel() + "_AST = null;");
1156								}
1157							}
1158							else {
1159								if (grammar.buildAST) {
1160									// Always gen AST variables for labeled elements, even if the
1161									// element itself is marked with !
1162									println(labeledElementASTType+" " + a.getLabel() + "_AST = null;");
1163								}
1164								if ( grammar instanceof LexerGrammar ) {
1165									println("Token "+a.getLabel()+"=null;");
1166								}	
1167								if (grammar instanceof TreeWalkerGrammar) {
1168									// always generate rule-ref variables for tree walker
1169									println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
1170								}
1171							}
1172						}
1173						else {
1174							// It is a token or literal reference.  Generate the
1175							// correct variable type for this grammar
1176							println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
1177							// In addition, generate *_AST variables if building ASTs
1178							if (grammar.buildAST) {
1179								println(labeledElementASTType+" " + a.getLabel() + "_AST = null;");
1180							}
1181						}
1182					}
1183				}
1184			}
1185	
1186			// dump out init action
1187			if ( blk.initAction!=null ) {
1188				printAction(
1189					processActionForTreeSpecifiers(blk.initAction, 0, currentRule, null)
1190				);
1191			}
1192		}
1193		/** Generate a series of case statements that implement a BitSet test.
1194		 * @param p The Bitset for which cases are to be generated
1195		 */
1196		protected void genCases(BitSet p) {
1197			if ( DEBUG_CODE_GENERATOR ) System.out.println("genCases("+p+")");
1198			int[] elems;
1199	
1200			elems = p.toArray();
1201			// Wrap cases four-per-line for lexer, one-per-line for parser
1202			int wrap = (grammar instanceof LexerGrammar) ? 4 : 1;
1203			int j=1;
1204			boolean startOfLine = true;
1205			for (int i = 0; i < elems.length; i++) {
1206				if (j==1) {
1207					print("");
1208				} else {
1209					_print("  ");
1210				}
1211				_print("case " + getValueString(elems[i]) + ":");
1212	
1213				if (j==wrap) {
1214					_println(""); 
1215					startOfLine = true;
1216					j=1;
1217				}
1218				else {
1219					j++;
1220					startOfLine = false;
1221				}
1222			}
1223			if (!startOfLine) {
1224				_println("");
1225			}
1226		}
1227		/**Generate common code for a block of alternatives; return a postscript
1228		 * that needs to be generated at the end of the block.  Other routines
1229		 * may append else-clauses and such for error checking before the postfix
1230		 * is generated.
1231		 * If the grammar is a lexer, then generate alternatives in an order where 
1232		 * alternatives requiring deeper lookahead are generated first, and 
1233		 * EOF in the lookahead set reduces the depth of the lookahead.
1234		 * @param blk The block to generate
1235		 * @param noTestForSingle If true, then it does not generate a test for a single alternative.
1236		 */
1237		public JavaBlockFinishingInfo genCommonBlock(
1238			AlternativeBlock blk, 
1239			boolean noTestForSingle)
1240		{
1241			int nIF=0;
1242			boolean createdLL1Switch = false;
1243			int closingBracesOfIFSequence = 0;
1244			JavaBlockFinishingInfo finishingInfo = new JavaBlockFinishingInfo();
1245			if ( DEBUG_CODE_GENERATOR ) System.out.println("genAltBlk("+blk+")");
1246	
1247			// Save the AST generation state, and set it to that of the block
1248			boolean savegenAST = genAST;
1249			genAST = genAST && blk.getAutoGen();
1250			
1251			boolean oldsaveTest = saveText;
1252			saveText = saveText && blk.getAutoGen();
1253	
1254			// Is this block inverted?  If so, generate special-case code
1255			if (
1256				blk.not &&
1257				analyzer.subruleCanBeInverted(blk, grammar instanceof LexerGrammar)
1258			) {
1259				Lookahead p = analyzer.look(1, blk);
1260				// Variable assignment for labeled elements
1261				if (blk.getLabel() != null && syntacticPredLevel == 0) {
1262					println(blk.getLabel() + " = " + lt1Value + ";");
1263				}
1264	
1265				// AST
1266				genElementAST(blk);
1267	
1268				String astArgs="";
1269			  if (grammar instanceof TreeWalkerGrammar) {
1270			      astArgs="_t,";
1271			  }
1272	
1273			  // match the bitset for the alternative
1274			  println("match(" + astArgs + getBitsetName(markBitsetForGen(p.fset)) + ");");
1275	
1276				// tack on tree cursor motion if doing a tree walker
1277				if (grammar instanceof TreeWalkerGrammar) {
1278					println("_t = _t.getNextSibling();");
1279				}
1280				return finishingInfo;
1281			}
1282	
1283			// Special handling for single alt
1284			if (blk.getAlternatives().size() == 1) {
1285				Alternative alt = blk.getAlternativeAt(0);
1286				// Generate a warning if there is a synPred for single alt.
1287				if (alt.synPred != null)
1288				{
1289					tool.warning(
1290						"Syntactic predicate superfluous for single alternative", 
1291						blk.getAlternativeAt(0).synPred.getLine()
1292					);
1293				}
1294				if (noTestForSingle) {
1295					if (alt.semPred != null) {
1296						// Generate validating predicate
1297						genSemPred(alt.semPred);
1298					}
1299					genAlt(alt, blk);
1300					return finishingInfo;
1301				}
1302			}
1303	
1304			// count number of simple LL(1) cases; only do switch for
1305			// many LL(1) cases (no preds, no end of token refs)
1306			// We don't care about exit paths for (...)*, (...)+
1307			// because we don't explicitly have a test for them
1308			// as an alt in the loop.
1309			int nLL1 = 0;
1310			for (int i=0; i<blk.getAlternatives().size(); i++) {
1311				Alternative a = blk.getAlternativeAt(i);
1312				if ( a.lookaheadDepth == 1 && a.semPred == null &&
1313					  !a.cache[1].containsEpsilon()) {
1314					nLL1++;
1315				}
1316			}
1317	
1318			// do LL(1) cases
1319			if ( nLL1 >= makeSwitchThreshold) {
1320				// Determine the name of the item to be compared
1321				String testExpr = lookaheadString(1);
1322				createdLL1Switch = true;
1323				// when parsing trees, convert null to valid tree node with NULL lookahead
1324				if ( grammar instanceof TreeWalkerGrammar ) {
1325					println("if (_t==null) _t=ASTNULL;");
1326				}
1327				println("switch ( "+testExpr+") {");
1328				for (int i=0; i<blk.alternatives.size(); i++) {
1329					Alternative alt = blk.getAlternativeAt(i);
1330					// ignore any non-LL(1) alts, predicated alts or end-of-token alts
1331					if ( alt.lookaheadDepth!=1 || alt.semPred != null ||
1332						  alt.cache[1].containsEpsilon() ) {
1333						continue;
1334					}
1335					Lookahead p = alt.cache[1];
1336					if (p.fset.degree() == 0 && !p.containsEpsilon()) {
1337						tool.warning("Alternate omitted due to empty prediction set",
1338							alt.head.getLine());
1339					}
1340					else {
1341						genCases(p.fset);
1342						println("{");
1343						tabs++;
1344						genAlt(alt, blk);
1345						println("break;");
1346						tabs--;
1347						println("}");
1348					}
1349				}
1350				println("default:");
1351				tabs++;
1352			}
1353	
1354			// do non-LL(1) and nondeterministic cases
1355			// This is tricky in the lexer, because of cases like:
1356			//     STAR : '*' ;
1357			//     ASSIGN_STAR : "*=";
1358			// Since nextToken is generated without a loop, then the STAR will
1359			// have end-of-token as it's lookahead set for LA(2).  So, we must generate the
1360			// alternatives containing trailing end-of-token in their lookahead sets *after*
1361			// the alternatives without end-of-token.  This implements the usual
1362			// lexer convention that longer matches come before shorter ones, e.g.
1363			// "*=" matches ASSIGN_STAR not STAR
1364			//
1365			// For non-lexer grammars, this does not sort the alternates by depth
1366			// Note that alts whose lookahead is purely end-of-token at k=1 end up
1367			// as default or else clauses.
1368			int startDepth = (grammar instanceof LexerGrammar) ? grammar.maxk : 0;
1369			for (int altDepth = startDepth; altDepth >= 0; altDepth--) {
1370				if ( DEBUG_CODE_GENERATOR ) System.out.println("checking depth "+altDepth);
1371				for (int i=0; i<blk.alternatives.size(); i++) {
1372					Alternative alt = blk.getAlternativeAt(i);
1373					if ( DEBUG_CODE_GENERATOR ) System.out.println("genAlt: "+i);
1374					// if we made a switch above, ignore what we already took care
1375					// of.  Specifically, LL(1) alts with no preds
1376					// that do not have end-of-token in their prediction set
1377					if ( createdLL1Switch &&
1378						  (alt.lookaheadDepth==1 && alt.semPred == null &&
1379						   !alt.cache[1].containsEpsilon()) ) {
1380						if ( DEBUG_CODE_GENERATOR ) System.out.println("ignoring alt because it was in the switch");
1381						continue;
1382					}
1383					String e;
1384	
1385					boolean unpredicted = false;
1386	
1387					if (grammar instanceof LexerGrammar) {
1388						// Calculate the "effective depth" of the alt, which is the max
1389						// depth at which cache[depth]!=end-of-token
1390						int effectiveDepth = alt.lookaheadDepth;
1391						if (effectiveDepth == GrammarAnalyzer.NONDETERMINISTIC) {
1392							// use maximum lookahead
1393							effectiveDepth = grammar.maxk;
1394						}
1395						while (
1396							effectiveDepth >= 1 &&
1397							alt.cache[effectiveDepth].containsEpsilon()
1398						) 
1399						{
1400							effectiveDepth--;
1401						}
1402						// Ignore alts whose effective depth is other than the ones we
1403						// are generating for this iteration.
1404						if (effectiveDepth != altDepth) {
1405							if ( DEBUG_CODE_GENERATOR )
1406								System.out.println("ignoring alt because effectiveDepth!=altDepth;"+effectiveDepth+"!="+altDepth);
1407							continue;
1408						}
1409						unpredicted = lookaheadIsEmpty(alt, effectiveDepth);
1410						e = getLookaheadTestExpression(alt, effectiveDepth);
1411					} else {
1412						unpredicted = lookaheadIsEmpty(alt, grammar.maxk);
1413						e = getLookaheadTestExpression(alt, grammar.maxk);
1414					}
1415	
1416					if (unpredicted && alt.semPred==null && alt.synPred==null) {
1417						// The alt has empty prediction set and no predicate to help out.
1418						// if we have not generated
1419						// a previous if, just put {...} around the end-of-token clause
1420						if ( nIF==0 ) {
1421							println("{");
1422						}
1423						else {
1424							println("else {");
1425						}			
1426						finishingInfo.needAnErrorClause = false;
1427						// continue;
1428					}
1429					else { // check for sem and syn preds
1430	
1431						// Add any semantic predicate expression to the lookahead test
1432						if ( alt.semPred != null ) {
1433							// if debugging, wrap the evaluation of the predicate in a method
1434							// call that will inform SemanticPredicateListeners of the result
1435							if (((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar)) && grammar.debuggingOutput)
1436								e = "("+e+"&& fireSemanticPredicateEvaluated(antlr.debug.SemanticPredicateEvent.PREDICTING,"+addSemPred(charFormatter.escapeString(alt.semPred))+","+alt.semPred+"))";
1437							else
1438								e = "("+e+"&&("+alt.semPred +"))";
1439						}
1440	
1441						// Generate any syntactic predicates
1442						if ( nIF>0 ) {
1443							if ( alt.synPred != null ) {
1444								println("else {");
1445								tabs++;
1446								genSynPred( alt.synPred, e );
1447								closingBracesOfIFSequence++;
1448							}
1449							else {
1450								println("else if " + e + " {");
1451							}
1452						}
1453						else {
1454							if ( alt.synPred != null ) {
1455								genSynPred( alt.synPred, e );
1456							}
1457							else {
1458								// when parsing trees, convert null to valid tree node
1459								// with NULL lookahead.
1460								if ( grammar instanceof TreeWalkerGrammar ) {
1461									println("if (_t==null) _t=ASTNULL;");
1462								}
1463								println("if " + e + " {");
1464							}
1465						}
1466						
1467					}	
1468	
1469					nIF++;
1470					tabs++;
1471					genAlt(alt, blk);
1472					tabs--;
1473					println("}");
1474				}
1475			}
1476			String ps = "";
1477			for (int i=1; i<=closingBracesOfIFSequence; i++) {
1478				ps+="}";
1479			}
1480	
1481			// Restore the AST generation state
1482			genAST = savegenAST;
1483			
1484			// restore save text state
1485			saveText=oldsaveTest;
1486	
1487			// Return the finishing info.
1488			if ( createdLL1Switch ) {
1489				tabs--;
1490				finishingInfo.postscript = ps+"}";
1491				finishingInfo.generatedSwitch = true;
1492				finishingInfo.generatedAnIf = nIF>0;
1493				//return new JavaBlockFinishingInfo(ps+"}",true,nIF>0); // close up switch statement
1494				
1495			}
1496			else {
1497				finishingInfo.postscript = ps;
1498				finishingInfo.generatedSwitch = false;
1499				finishingInfo.generatedAnIf = nIF>0;
1500				// return new JavaBlockFinishingInfo(ps, false,nIF>0);
1501			}	
1502			return finishingInfo;
1503		}
1504		/** Generate code to link an element reference into the AST */
1505		private void genElementAST(AlternativeElement el) {
1506			if (grammar.buildAST && syntacticPredLevel == 0) {
1507				boolean doNoGuessTest = (
1508					grammar.hasSyntacticPredicate &&
1509					(
1510						el.getLabel() != null ||
1511						el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG
1512					)
1513				);
1514	
1515				String elementRef;
1516				String astName;
1517	
1518				// Generate names and declarations of the AST variable(s)
1519				if (el.getLabel() != null) {
1520					elementRef = el.getLabel();
1521					astName = el.getLabel() + "_AST";
1522				} else {
1523					elementRef = lt1Value;
1524					// Generate AST variables for unlabeled stuff
1525					astName = "tmp" + astVarNumber + "_AST";
1526					astVarNumber++;
1527					// Generate the declaration
1528					println(labeledElementASTType+" " + astName + " = null;");
1529					// Map the generated AST variable in the alternate
1530					mapTreeVariable(el, astName);
1531					if (grammar instanceof TreeWalkerGrammar) {
1532						// Generate an "input" AST variable also
1533						println(labeledElementASTType+" " + astName + "_in = null;");
1534					}
1535				}
1536	
1537				// Enclose actions with !guessing
1538				if (doNoGuessTest) {
1539					println("if (guessing==0) {"); 
1540					tabs++;
1541				}
1542	
1543				if (el.getLabel() != null) {
1544					println(astName + " = "+getASTCreateString(elementRef) + ";");
1545				} else {
1546					elementRef = lt1Value;
1547					println(astName + " = "+getASTCreateString(elementRef) + ";");
1548					// Map the generated AST variable in the alternate
1549					if (grammar instanceof TreeWalkerGrammar) {
1550						// set "input" AST variable also
1551						println(astName + "_in = " + elementRef + ";");
1552					}
1553				}
1554	
1555				if (genAST) {
1556					switch (el.getAutoGenType()) {
1557					case GrammarElement.AUTO_GEN_NONE:
1558						println("astFactory.addASTChild(currentAST, " + astName + ");");
1559						break;
1560					case GrammarElement.AUTO_GEN_CARET:
1561						println("astFactory.makeASTRoot(currentAST, " + astName + ");");
1562						break;
1563					default:
1564						break;
1565					}
1566				}
1567				if (doNoGuessTest) {
1568					tabs--;
1569					println("}");
1570				}
1571			}
1572		}
1573		/** Close the try block and generate catch phrases 
1574		 * if the element has a labeled handler in the rule 
1575		 */
1576		private void genErrorCatchForElement(AlternativeElement el) {
1577			if (el.getLabel() == null) return;
1578			String r = el.enclosingRuleName;
1579			if ( grammar instanceof LexerGrammar ) {
1580				r = CodeGenerator.lexerRuleName(el.enclosingRuleName);
1581			}
1582			RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r);
1583			if (rs == null) {
1584				tool.panic("Enclosing rule not found!");
1585			}
1586			ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel());
1587			if (ex != null) {
1588				tabs--;
1589				println("}");
1590				genErrorHandler(ex);
1591			}
1592		}
1593		/** Generate the catch phrases for a user-specified error handler */
1594		private void genErrorHandler(ExceptionSpec ex) {
1595			// Each ExceptionHandler in the ExceptionSpec is a separate catch
1596			for (int i = 0; i < ex.handlers.size(); i++)
1597			{
1598				ExceptionHandler handler = (ExceptionHandler)ex.handlers.elementAt(i);
1599				// Generate catch phrase
1600				println("catch (" + handler.exceptionTypeAndName.getText() + ") {");
1601				tabs++;
1602				if (grammar.hasSyntacticPredicate) {
1603					println("if (guessing==0) {");
1604					tabs++;
1605				}
1606				
1607				// When not guessing, execute user handler action
1608				printAction(
1609					processActionForTreeSpecifiers(handler.action, 0, currentRule, null)
1610				);
1611					
1612				if (grammar.hasSyntacticPredicate) {
1613					tabs--;
1614					println("} else {");
1615					tabs++;
1616					// When guessing, rethrow exception
1617					println(
1618						"throw " + 
1619						extractIdOfAction(handler.exceptionTypeAndName) + 
1620						";"
1621					);
1622					tabs--;
1623					println("}");
1624				}
1625				// Close catch phrase
1626				tabs--;
1627				println("}");
1628			}
1629		}
1630		/** Generate a try { opening if the element has a labeled handler in the rule */
1631		private void genErrorTryForElement(AlternativeElement el) {
1632			if (el.getLabel() == null) return;
1633			String r = el.enclosingRuleName;
1634			if ( grammar instanceof LexerGrammar ) {
1635				r = CodeGenerator.lexerRuleName(el.enclosingRuleName);
1636			}
1637			RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r);
1638			if (rs == null) {
1639				tool.panic("Enclosing rule not found!");
1640			}
1641			ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel());
1642			if (ex != null) {
1643				println("try { // for error handling");
1644				tabs++;
1645			}
1646		}
1647		/** Generate a header that is common to all Java files */
1648		protected void genHeader() 
1649		{
1650			println("/*");
1651			println(" * ANTLR-generated file resulting from grammar " + tool.grammarFile);
1652			println(" * ");
1653			println(" * Terence Parr, MageLang Institute");
1654			println(" * with John Lilley, Empathy Software");
1655			println(" * ANTLR Version " + ANTLRParser.version + "; 1996-1998");
1656			println(" */");
1657		}
1658		private void genLiteralsTest() {
1659			println("_ttype = testLiteralsTable(_ttype);");
1660		}
1661		protected void genMatch(BitSet b) {
1662		}
1663		protected void genMatch(GrammarAtom atom) {
1664			if ( atom instanceof StringLiteralElement ) {
1665				if ( grammar instanceof LexerGrammar ) {
1666					genMatchUsingAtomText(atom);
1667				}
1668				else {
1669					genMatchUsingAtomTokenType(atom);
1670				}
1671			}
1672			else if ( atom instanceof CharLiteralElement ) {
1673				if ( grammar instanceof LexerGrammar ) {
1674					genMatchUsingAtomText(atom);
1675				}
1676				else {
1677					tool.error("cannot ref character literals in grammar: "+atom);				
1678				}
1679			}
1680			else if ( atom instanceof TokenRefElement ) {
1681				genMatchUsingAtomText(atom);
1682			}
1683		}
1684		protected void genMatchUsingAtomText(GrammarAtom atom) {
1685			// match() for trees needs the _t cursor
1686			String astArgs="";
1687			if (grammar instanceof TreeWalkerGrammar) {
1688				astArgs="_t,";
1689			}
1690			
1691			// if in lexer and ! on element, save buffer index to kill later
1692			if ( grammar instanceof LexerGrammar && (!saveText||atom.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
1693				println("_saveIndex=text.length();");
1694			}
1695			
1696			print(atom.not ? "matchNot(" : "match(");
1697			_print(astArgs);
1698			
1699			// print out what to match
1700			if (atom.atomText.equals("EOF")) {
1701				// horrible hack to handle EOF case
1702				_print("Token.EOF_TYPE");
1703			} 
1704			else {
1705				_print(atom.atomText);
1706			}
1707			_println(");");
1708	
1709			if ( grammar instanceof LexerGrammar && (!saveText||atom.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
1710				println("text.setLength(_saveIndex);");		// kill text atom put in buffer
1711			}
1712		}
1713		protected void genMatchUsingAtomTokenType(GrammarAtom atom) {
1714			// match() for trees needs the _t cursor
1715			String astArgs="";
1716			if (grammar instanceof TreeWalkerGrammar) {
1717				astArgs="_t,";
1718			}
1719	
1720			// If the literal can be mangled, generate the symbolic constant instead
1721			String mangledName = null;
1722			String s = astArgs + getValueString(atom.tokenType);
1723	
1724			// matching
1725			println( (atom.not ? "matchNot(" : "match(") + s + ");");
1726		}
1727		/** Generate the nextToken() rule.
1728		 * nextToken() is a synthetic lexer rule that is the implicit OR of all
1729		 * user-defined lexer rules.
1730		 * @param RuleBlock 
1731		 */
1732		public void genNextToken() {
1733			// Are there any public rules?  If not, then just generate a
1734			// fake nextToken().
1735			boolean hasPublicRules = false;
1736			for (int i = 0; i < grammar.rules.size(); i++) {
1737				RuleSymbol rs = (RuleSymbol)grammar.rules.elementAt(i);
1738				if ( rs.isDefined() && rs.access.equals("public") ) {
1739					hasPublicRules = true;
1740					break;
1741				}
1742			}
1743			if (!hasPublicRules) {
1744				println("");
1745				println("public Token nextToken() throws IOException { return new CommonToken(Token.EOF_TYPE, \"\"); }");
1746				println("");
1747				return;
1748			}
1749	
1750			// Create the synthesized nextToken() rule
1751			RuleBlock nextTokenBlk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken");
1752			// Define the nextToken rule symbol
1753			RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
1754			nextTokenRs.setDefined();
1755			nextTokenRs.setBlock(nextTokenBlk);
1756			nextTokenRs.access = "private";
1757			grammar.define(nextTokenRs);
1758			// Analyze the nextToken rule
1759			boolean ok = grammar.theLLkAnalyzer.deterministic(nextTokenBlk);
1760	
1761			// Generate the next token rule
1762			String filterRule=null;
1763			if ( ((LexerGrammar)grammar).filterMode ) {
1764				filterRule = ((LexerGrammar)grammar).filterRule;
1765			}
1766			
1767			println("");
1768			println("public Token nextToken() throws IOException {");
1769			tabs++;
1770			println("Token _rettoken=null;");
1771			_println("tryAgain:");
1772			println("for (;;) {");
1773			tabs++;
1774			println("Token _token = null;");
1775			println("int _ttype = Token.INVALID_TYPE;");
1776			if ( ((LexerGrammar)grammar).filterMode ) {
1777				println("setCommitToPath(false);");
1778				if ( filterRule!=null ) {
1779					// Here's a good place to ensure that the filter rule actually exists
1780					if ( !grammar.isDefined(CodeGenerator.lexerRuleName(filterRule)) ) {
1781						grammar.tool.error("Filter rule "+filterRule+" does not exist in this lexer");
1782					}
1783					else {
1784						RuleSymbol rs = (RuleSymbol)grammar.getSymbol(CodeGenerator.lexerRuleName(filterRule));
1785						if ( !rs.isDefined() ) {
1786							grammar.tool.error("Filter rule "+filterRule+" does not exist in this lexer");
1787						}
1788						else if ( rs.access.equals("public") ) {
1789							grammar.tool.error("Filter rule "+filterRule+" must be protected");
1790						}
1791					}
1792					println("int _m;");
1793					println("_m = mark();");
1794				}
1795			}
1796			println("resetText();");
1797	
1798			// Generate try around whole thing to trap scanner errors
1799			println("try {   // for error handling");
1800			tabs++;
1801	
1802			// Test for public lexical rules with empty paths
1803			for (int i=0; i<nextTokenBlk.getAlternatives().size(); i++) {
1804				Alternative a = nextTokenBlk.getAlternativeAt(i);
1805				if ( a.cache[1].containsEpsilon() ) {
1806					tool.warning("found optional path in nextToken()");
1807				}
1808			}
1809	
1810			// Generate the block
1811			String newline = System.getProperty("line.separator");
1812			JavaBlockFinishingInfo howToFinish = genCommonBlock(nextTokenBlk, false);
1813			String errFinish = "if (LA(1)==EOF_CHAR) {_returnToken = makeToken(Token.EOF_TYPE);}";
1814			errFinish += newline+"\t\t\t\t";
1815			if ( ((LexerGrammar)grammar).filterMode ) {
1816				if ( filterRule==null ) {
1817					errFinish += "else {consume(); continue tryAgain;}";
1818				}
1819				else {
1820					errFinish += "else {"+newline+
1821							"\t\t\t\t\tcommit();"+newline+
1822							"\t\t\t\t\ttry {m"+filterRule+"(false);}"+newline+
1823							"\t\t\t\t\tcatch(ScannerException e) {"+newline+
1824							"\t\t\t\t\t	// catastrophic failure"+newline+
1825							"\t\t\t\t\t	reportError(e);"+newline+
1826							"\t\t\t\t\t	consume();"+newline+
1827							"\t\t\t\t\t}"+newline+
1828	 						"\t\t\t\t\tcontinue tryAgain;"+newline+
1829	 						"\t\t\t\t}";
1830				}
1831			}
1832			else {
1833				errFinish += "else {"+throwNoViable+"}";
1834			}
1835			genBlockFinish(howToFinish, errFinish);
1836	
1837			// at this point a valid token has been matched, undo "mark" that was done
1838			if ( ((LexerGrammar)grammar).filterMode && filterRule!=null ) {
1839				println("commit();");
1840			}
1841			
1842			// Generate literals test if desired
1843			// make sure _ttype is set first; note _returnToken must be
1844			// non-null as the rule was required to create it.
1845			println("if ( _returnToken==null ) continue tryAgain; // found SKIP token");
1846			println("_ttype = _returnToken.getType();");
1847			if ( ((LexerGrammar)grammar).getTestLiterals()) {
1848				genLiteralsTest();
1849			}
1850	
1851			// return token created by rule reference in switch
1852			println("_returnToken.setType(_ttype);");
1853			println("return _returnToken;");
1854			
1855			// Close try block
1856			tabs--;
1857			println("}");
1858			println("catch (ScannerException e) {");
1859			tabs++;
1860			if ( ((LexerGrammar)grammar).filterMode ) {
1861				if ( filterRule==null ) {
1862					println("if ( !getCommitToPath() ) {consume(); continue tryAgain;}");
1863				}
1864				else {
1865					println("if ( !getCommitToPath() ) {");
1866					tabs++;
1867					println("rewind(_m);");
1868					println("resetText();");
1869					println("try {m"+filterRule+"(false);}");
1870					println("catch(ScannerException ee) {");
1871					println("	// horrendous failure: error in filter rule");
1872					println("	reportError(ee);");
1873					println("	consume();");
1874					println("}");
1875					println("continue tryAgain;");
1876					tabs--;
1877					println("}");
1878				}
1879			}
1880			println("reportError(e);");
1881			println("consume();");
1882			tabs--;
1883			println("}");
1884	
1885			// close for-loop
1886			tabs--;
1887			println("}");
1888	
1889			// close method nextToken
1890			tabs--;
1891			println("}");
1892			println("");
1893		}
1894		/** Gen a named rule block.
1895		 * ASTs are generated for each element of an alternative unless
1896		 * the rule or the alternative have a '!' modifier.
1897		 *
1898		 * If an alternative defeats the default tree construction, it
1899		 * must set <rule>_AST to the root of the returned AST.
1900		 *
1901		 * Each alternative that does automatic tree construction, builds
1902		 * up root and child list pointers in an ASTPair structure.
1903		 *
1904		 * A rule finishes by setting the returnAST variable from the
1905		 * ASTPair.
1906		 *
1907		 * @param rule The name of the rule to generate
1908		 * @param startSymbol true if the rule is a start symbol (i.e., not referenced elsewhere)
1909		*/
1910		public void genRule(RuleSymbol s, boolean startSymbol, int ruleNum) {
1911			tabs=1;
1912			if ( DEBUG_CODE_GENERATOR ) System.out.println("genRule("+ s.getId() +")");
1913			if ( !s.isDefined() ) {
1914				tool.error("undefined rule: "+ s.getId());
1915				return;
1916			}
1917	
1918			// Generate rule return type, name, arguments
1919			RuleBlock rblk = s.getBlock();
1920			currentRule = rblk;
1921			currentASTResult = s.getId();
1922	
1923			// Save the AST generation state, and set it to that of the rule
1924			boolean savegenAST = genAST;
1925			genAST = genAST && rblk.getAutoGen();
1926			
1927			// boolean oldsaveTest = saveText;
1928			saveText = rblk.getAutoGen();
1929	
1930			// print javadoc comment if any
1931			if ( s.comment!=null ) {
1932				_println(s.comment);
1933			}
1934			
1935			// Gen method access and final qualifier
1936			print(s.access + " final ");
1937	
1938			// Gen method return type (note lexer return action set at rule creation)
1939			if (rblk.returnAction != null)
1940			{
1941				// Has specified return value
1942				_print(extractTypeOfAction(rblk.returnAction, rblk.getLine()) + " ");
1943			} else {
1944				// No specified return value
1945				_print("void ");
1946			}
1947	
1948			// Gen method name
1949			_print(s.getId() + "(");
1950	
1951			// Additional rule parameters common to all rules for this grammar
1952			_print(commonExtraParams);
1953			if (commonExtraParams.length() != 0 && rblk.argAction != null ) {
1954				_print(",");
1955			}
1956	
1957			// Gen arguments
1958			if (rblk.argAction != null) 
1959			{
1960				// Has specified arguments
1961				_println("");
1962				tabs++;
1963				println(rblk.argAction);
1964				tabs--;
1965				print(")");
1966			} else {
1967				// No specified arguments
1968				_print(")");
1969			}
1970	
1971			// Gen throws clause and open curly
1972			_print(" throws " + exceptionThrown);
1973			if ( !(grammar instanceof TreeWalkerGrammar) ) {
1974				_print(", IOException");
1975			}
1976			_println(" {");
1977			tabs++;
1978	
1979			// Convert return action to variable declaration
1980			if (rblk.returnAction != null)
1981				println(rblk.returnAction + ";");
1982			
1983			// print out definitions needed by rules for various grammar types
1984			println(commonLocalVars);
1985			
1986			if (grammar.traceRules) {
1987				if ( grammar instanceof TreeWalkerGrammar ) {
1988					println("traceIn(\""+ s.getId() +"\",_t);");
1989				}
1990				else {
1991					println("traceIn(\""+ s.getId() +"\");");
1992				}
1993			}
1994	
1995			if ( grammar instanceof LexerGrammar ) {
1996				// lexer rule default return value is the rule's token name
1997				// This is a horrible hack to support the built-in EOF lexer rule.
1998				if (s.getId().equals("mEOF"))
1999					println("_ttype = Token.EOF_TYPE;");
2000				else
2001					println("_ttype = "+ s.getId().substring(1)+";");
2002				println("int _saveIndex;");		// used for element! (so we can kill text matched for element)
2003	/*
2004				println("boolean old_saveConsumedInput=saveConsumedInput;");
2005				if ( !rblk.getAutoGen() ) {		// turn off "save input" if ! on rule
2006					println("saveConsumedInput=false;");
2007				}
2008	*/
2009			}
2010	
2011			// if debugging, write code to mark entry to the rule
2012			if ( grammar.debuggingOutput)
2013			    if (grammar instanceof ParserGrammar)
2014					println("fireEnterRule(" + ruleNum + ",0);");
2015				else if (grammar instanceof LexerGrammar)
2016					println("fireEnterRule(" + ruleNum + ",_ttype);");
2017	   		    
2018	
2019	  		// Generate trace code if desired
2020		   	if ( grammar.debuggingOutput || grammar.traceRules) {
2021	 	  		println("try { // debugging");
2022	  	 		tabs++;
2023			}
2024			
2025			// Initialize AST variables
2026			if (grammar.buildAST) {
2027				// Parser member used to pass AST returns from rule invocations
2028				println("returnAST = null;");
2029				// Tracks AST construction
2030				println("ASTPair currentAST = new ASTPair();");
2031				// User-settable return value for rule.
2032				println(labeledElementASTType+" " + s.getId() + "_AST = null;");
2033				if (grammar instanceof TreeWalkerGrammar) {
2034					// "Input" value for rule
2035					println(labeledElementASTType+" " + s.getId() + "_AST_in = ("+labeledElementASTType+")_t;");
2036				}
2037			}
2038	
2039			genBlockPreamble(rblk);
2040			println("");
2041	
2042			// Search for an unlabeled exception specification attached to the rule
2043			ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec("");
2044	
2045			// Generate try block around the entire rule for  error handling
2046			if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler() ) {
2047				println("try {      // for error handling");
2048				tabs++;
2049			}
2050	
2051			// Generate the alternatives
2052			if ( rblk.alternatives.size()==1 ) {
2053				// One alternative -- use simple form
2054				Alternative alt = rblk.getAlternativeAt(0);
2055				String pred = alt.semPred;
2056				if ( pred!=null )
2057					genSemPred(pred);
2058				if (alt.synPred != null) {
2059					tool.warning(
2060						"Syntactic predicate ignored for single alternative", 
2061						alt.synPred.getLine()
2062					);
2063				}
2064				genAlt(alt, rblk);
2065			}
2066			else {
2067				// Multiple alternatives -- generate complex form
2068				boolean ok = grammar.theLLkAnalyzer.deterministic(rblk);
2069			
2070				JavaBlockFinishingInfo howToFinish = genCommonBlock(rblk, false);
2071				genBlockFinish(howToFinish, throwNoViable);
2072			}
2073	
2074			// Generate catch phrase for error handling
2075			if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler() ) {
2076				// Close the try block
2077				tabs--;
2078				println("}");
2079			}
2080	
2081			// Generate user-defined or default catch phrases
2082			if (unlabeledUserSpec != null) {
2083				genErrorHandler(unlabeledUserSpec);
2084			}
2085			else if (rblk.getDefaultErrorHandler()) {
2086				// Generate default catch phrase
2087				println("catch (" + exceptionThrown + " ex) {");
2088				tabs++;
2089				// Generate code to handle error if not guessing
2090				if (grammar.hasSyntacticPredicate) {
2091					println("if (guessing==0) {");
2092					tabs++;
2093				}
2094				println("reportError(ex);");
2095				if ( !(grammar instanceof TreeWalkerGrammar) ) {
2096					// Generate code to consume until token in k==1 follow set
2097					Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, rblk.endNode);
2098					String followSetName = getBitsetName(markBitsetForGen(follow.fset));
2099					println("consume();");
2100					println("consumeUntil(" + followSetName + ");");
2101				} else {
2102					// Just consume one token
2103					println("if (_t!=null) {_t = _t.getNextSibling();}");
2104				}
2105				if (grammar.hasSyntacticPredicate) {
2106					tabs--;
2107					// When guessing, rethrow exception
2108					println("} else {");
2109					println("  throw ex;");
2110					println("}");
2111				}
2112				// Close catch phrase
2113				tabs--;
2114				println("}");
2115			}
2116	
2117			// Squirrel away the AST "return" value
2118			if (grammar.buildAST) {
2119				println("returnAST = " + s.getId() + "_AST;");
2120			}
2121	
2122			// Set return tree value for tree walkers
2123			if ( grammar instanceof TreeWalkerGrammar ) {
2124				println("_retTree = _t;");
2125			}
2126	
2127			// Generate literals test for lexer rules so marked
2128			if (rblk.getTestLiterals()) {
2129				genLiteralsTest();
2130			}
2131	
2132			// if doing a lexer rule, dump code to create token if necessary
2133			if ( grammar instanceof LexerGrammar ) {
2134				println("if ( _createToken && _token==null && _ttype!=Token.SKIP ) {");
2135				println("	_token = makeToken(_ttype);");
2136				println("	_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));");
2137				println("}");
2138				println("_returnToken = _token;");
2139			}	
2140	
2141			// Gen the return statement if there is one (lexer has hard-wired return action)
2142			if (rblk.returnAction != null) {
2143				println("return " + extractIdOfAction(rblk.returnAction, rblk.getLine()) + ";");
2144			}
2145			
2146	   		if ( grammar.debuggingOutput || grammar.traceRules) {
2147	   		    tabs--;
2148	   		    println("} finally { // debugging");		
2149	   		    tabs++;
2150	
2151				// If debugging, generate calls to mark exit of rule
2152	   	    	if ( grammar.debuggingOutput)
2153			        if (grammar instanceof ParserGrammar)
2154						println("fireExitRule(" + ruleNum + ",0);");
2155			        else if (grammar instanceof LexerGrammar)
2156						println("fireExitRule(" + ruleNum + ",_ttype);");
2157	   		    
2158				if (grammar.traceRules) {
2159					if ( grammar instanceof TreeWalkerGrammar ) {
2160						println("traceOut(\""+ s.getId() +"\",_t);");
2161					}
2162					else {
2163						println("traceOut(\""+ s.getId() +"\");");
2164					}
2165				}
2166	
2167				tabs--;
2168				println("}");
2169			}
2170	
2171			tabs--;
2172			println("}");
2173			println("");
2174			
2175			// Restore the AST generation state
2176			genAST = savegenAST;
2177			
2178			// restore char save state
2179			// saveText = oldsaveTest;
2180		}
2181		private void GenRuleInvocation(RuleRefElement rr) {	
2182			// dump rule name
2183			_print(rr.targetRule + "(");
2184				
2185			// lexers must tell rule if it should set _returnToken
2186			if ( grammar instanceof LexerGrammar ) {
2187				// if labeled, could access Token, so tell rule to create
2188				if ( rr.getLabel() != null ) {
2189					_print("true");
2190				}
2191				else {
2192					_print("false");
2193				}		
2194				if (commonExtraArgs.length() != 0 || rr.args!=null ) {
2195					_print(",");
2196				}
2197			}	
2198	
2199			// Extra arguments common to all rules for this grammar
2200			_print(commonExtraArgs);
2201			if (commonExtraArgs.length() != 0 && rr.args!=null ) {
2202				_print(",");
2203			}
2204			
2205			// Process arguments to method, if any
2206			RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
2207			if (rr.args != null)
2208			{
2209				// When not guessing, execute user arg action
2210				ActionTransInfo tInfo = new ActionTransInfo();
2211				String args = processActionForTreeSpecifiers(rr.args, 0, currentRule, tInfo);
2212				if ( tInfo.assignToRoot || tInfo.refRuleRoot!=null ) {
2213					tool.error("Arguments of rule reference '" + rr.targetRule + "' cannot set or ref #"+
2214						currentRule.getRuleName()+" on line "+rr.getLine());
2215				}
2216				_print(args);
2217	
2218				// Warn if the rule accepts no arguments
2219				if (rs.block.argAction == null)
2220				{
2221					tool.warning("Rule '" + rr.targetRule + "' accepts no arguments", rr.getLine());
2222				}
2223			} else {
2224				// No warning if rule has parameters, because there may be default
2225				// values for all of the parameters
2226			}
2227			_println(");");
2228			
2229			// move down to the first child while parsing
2230			if ( grammar instanceof TreeWalkerGrammar ) {
2231				println("_t = _retTree;");
2232			}
2233		}
2234		protected void genSemPred(String pred) {
2235			String escapedPred = charFormatter.escapeString(pred);
2236			// if debugging, wrap the semantic predicate evaluation in a method
2237			// that can tell SemanticPredicateListeners the result
2238			if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) ||
2239			     (grammar instanceof LexerGrammar)))
2240				pred = "fireSemanticPredicateEvaluated(antlr.debug.SemanticPredicateEvent.VALIDATING,"+addSemPred(escapedPred)+","+pred+")";
2241			println("if (!("+pred+"))");
2242			println("  throw new SemanticException(\"" + escapedPred +"\");");
2243		}
2244		/** Write an array of Strings which are the semantic predicate
2245		 *  expressions.  The debugger will reference them by number only
2246		 */
2247		protected void genSemPredMap() {
2248			Enumeration e = semPreds.elements();
2249			println("private String _semPredNames[] = {");
2250			while(e.hasMoreElements())
2251				println("\""+e.nextElement()+"\",");
2252			println("};");			
2253		}
2254		protected void genSynPred(SynPredBlock blk, String lookaheadExpr) {
2255			if ( DEBUG_CODE_GENERATOR ) System.out.println("gen=>("+blk+")");
2256	
2257			// Dump synpred result variable
2258			println("boolean synPredMatched" + blk.ID + " = false;");
2259			// Gen normal lookahead test
2260			println("if (" + lookaheadExpr + ") {");
2261			tabs++;
2262	
2263			// Save input state
2264			if ( grammar instanceof TreeWalkerGrammar ) {
2265				println("AST __t" + blk.ID + " = _t;");
2266			}
2267			else {
2268				println("int _m" + blk.ID + " = mark();");
2269			}
2270	
2271			// Once inside the try, assume synpred works unless exception caught
2272			println("synPredMatched" + blk.ID + " = true;");
2273			println("guessing++;");
2274	
2275			// if debugging, tell listeners that a synpred has started
2276			if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) ||
2277				 (grammar instanceof LexerGrammar))) {
2278				println("fireSyntacticPredicateStarted();");
2279			}	
2280	
2281			syntacticPredLevel++;
2282			println("try {");
2283			tabs++;
2284			gen((AlternativeBlock)blk);		// gen code to test predicate
2285			tabs--;
2286			//println("System.out.println(\"pred "+blk+" succeeded\");");
2287			println("}");
2288			println("catch (" + exceptionThrown + " pe) {");
2289			tabs++;
2290			println("synPredMatched"+blk.ID+" = false;");
2291			//println("System.out.println(\"pred "+blk+" failed\");");
2292			tabs--;
2293			println("}");
2294	
2295			// Restore input state
2296			if ( grammar instanceof TreeWalkerGrammar ) {
2297				println("_t = __t"+blk.ID+";");
2298			}
2299			else {
2300				println("rewind(_m"+blk.ID+");");
2301			}
2302	
2303			println("guessing--;");
2304	
2305			// if debugging, tell listeners how the synpred turned out
2306			if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) ||
2307			     (grammar instanceof ParserGrammar))) {
2308				println("if (synPredMatched" + blk.ID +")");
2309				println("  fireSyntacticPredicateSucceeded();");
2310				println("else");
2311				println("  fireSyntacticPredicateFailed();");
2312			}	
2313	
2314			syntacticPredLevel--;
2315			tabs--;
2316			
2317			// Close lookahead test
2318			println("}");
2319	
2320			// Test synred result
2321			println("if ( synPredMatched"+blk.ID+" ) {");
2322		}
2323		/** Generate a static array containing the names of the tokens,
2324		 * indexed by the token type values.  This static array is used
2325		 * to format error messages so that the token identifers or literal
2326		 * strings are displayed instead of the token numbers.
2327		 *
2328		 * If a lexical rule has a paraphrase, use it rather than the
2329		 * token label.
2330		 */
2331		public void genTokenStrings() {
2332			// Generate a string for each token.  This creates a static
2333			// array of Strings indexed by token type.
2334			println("");
2335			println("public static final String[] _tokenNames = {");
2336			tabs++;
2337	
2338			// Walk the token vocabulary and generate a Vector of strings
2339			// from the tokens.
2340			Vector v = grammar.tokenManager.getVocabulary();
2341			for (int i = 0; i < v.size(); i++)
2342			{
2343				String s = (String)v.elementAt(i);
2344				if (s == null)
2345				{
2346					s = "<"+String.valueOf(i)+">";
2347				}
2348				if ( !s.startsWith("\"") && !s.startsWith("<") ) {
2349					TokenSymbol ts = (TokenSymbol)grammar.tokenManager.getTokenSymbol(s);
2350					if ( ts!=null && ts.getParaphrase()!=null ) {
2351						s = antlr.Tool.stripFrontBack(ts.getParaphrase(), "\"", "\"");
2352					}
2353				}	
2354				print(charFormatter.literalString(s));
2355				if (i != v.size()-1) {
2356					_print(",");
2357				}
2358				_println("");
2359			}
2360	
2361			// Close the string array initailizer
2362			tabs--;
2363			println("};");
2364		}
2365		/** Generate the token types Java file */
2366		protected void genTokenTypes(TokenManager tm) throws IOException {
2367			// Open the token output Java file and set the currentOutput stream
2368			// SAS: file open was moved to a method so a subclass can override
2369			//      This was mainly for the VAJ interface
2370			setupOutput(tm.getName() + "TokenTypes");
2371	
2372			tabs = 0;
2373	
2374			// Generate the header common to all Java files
2375			genHeader();
2376			// Do not use printAction because we assume tabs==0
2377			println(behavior.headerAction);
2378	
2379			// Encapsulate the definitions in an interface.  This can be done
2380			// because they are all constants.
2381			println("public interface " + tm.getName() + "TokenTypes {");
2382			tabs++;
2383	
2384			
2385			// Generate a definition for each token type
2386			Vector v = tm.getVocabulary();
2387			
2388			// Do special tokens manually
2389			println("public static final int EOF = " + Token.EOF_TYPE + ";");
2390			println("public static final int NULL_TREE_LOOKAHEAD = " + Token.NULL_TREE_LOOKAHEAD + ";");
2391			
2392			for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
2393				String s = (String)v.elementAt(i);
2394				if (s != null) {
2395					if ( s.startsWith("\"") ) {
2396						// a string literal
2397						StringLiteralSymbol sl = (StringLiteralSymbol)grammar.tokenManager.getTokenSymbol(s);
2398						if ( sl==null ) {
2399							antlr.Tool.panic("String literal "+s+" not in symbol table");
2400						}
2401						else if ( sl.label != null ) {
2402							println("public static final int " + sl.label + " = " + i + ";");
2403						}
2404						else {	
2405							String mangledName = mangleLiteral(s);
2406							if (mangledName != null) {
2407								// We were able to create a meaningful mangled token name
2408								println("public static final int " + mangledName + " = " + i + ";");
2409								// if no label specified, make the label equal to the mangled name
2410								sl.label = mangledName;
2411							}
2412							else {
2413								println("// " + s + " = " + i);
2414							}
2415						}	
2416					}
2417					else if ( !s.startsWith("<") ) {
2418						println("public static final int " + s + " = " + i + ";");
2419					}
2420				}
2421			}
2422	
2423			// Close the interface
2424			tabs--;
2425			println("}");
2426	
2427			// Close the tokens output file
2428			currentOutput.close();
2429			currentOutput = null;
2430			exitIfError();
2431		}
2432		/** Get a string for an expression to generate creation of an AST subtree.
2433		  * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
2434		  */
2435		public String getASTCreateString(Vector v) {
2436			if (v.size() == 0) {
2437				return "";
2438			}
2439			StringBuffer buf = new StringBuffer();
2440			buf.append("("+labeledElementASTType+")astFactory.make( (new ASTArray(" + v.size() + "))");
2441			for (int i = 0; i < v.size(); i++) {
2442				buf.append(".add(" + v.elementAt(i) + ")");
2443			}
2444			buf.append(")");
2445			return buf.toString();
2446		}
2447		/** Get a string for an expression to generate creating of an AST node
2448		  * @param str The arguments to the AST constructor
2449		  */
2450		public String getASTCreateString(String str) {
2451			return "("+labeledElementASTType+")astFactory.create(" + str + ")";
2452		}
2453		/**Generate a lookahead test expression for an alternate.  This
2454		 * will be a series of tests joined by '&&' and enclosed by '()',
2455		 * the number of such tests being determined by the depth of the lookahead.
2456		 */
2457		protected String getLookaheadTestExpression(Alternative alt, int maxDepth) {
2458			StringBuffer e = new StringBuffer("(");
2459					
2460			int depth = alt.lookaheadDepth;
2461			if ( depth == GrammarAnalyzer.NONDETERMINISTIC ) {
2462				// if the decision is nondeterministic, do the best we can: LL(k)
2463				// any predicates that are around will be generated later.
2464				depth = grammar.maxk;
2465			}
2466	
2467			if ( maxDepth==0 ) {
2468				// empty lookahead can result from alt with sem pred
2469				// that can see end of token.  E.g., A : {pred}? ('a')? ;
2470				return "true";
2471			}
2472						
2473			boolean first = true;
2474			for (int i=1; i<=depth && i<=maxDepth; i++) {
2475				BitSet p = alt.cache[i].fset;
2476				if (!first) {
2477					e.append(") && (");
2478				}
2479				first = false;
2480				
2481				// Syn preds can yield <end-of-syn-pred> (epsilon) lookahead.
2482				// There is no way to predict what that token would be.  Just
2483				// allow anything instead.
2484				if ( alt.cache[i].containsEpsilon() ) {
2485					e.append("true");
2486				}
2487				else {
2488					e.append(getLookaheadTestTerm(i, p));
2489				}	
2490			}
2491	
2492			e.append(")");
2493	
2494			return "(" + e.toString() + ")";
2495		}
2496		/**Generate a depth==1 lookahead test expression given the BitSet.
2497		 * This may be one of:
2498		 * 1) a series of 'x==X||' tests
2499		 * 2) a range test using >= && <= where possible,
2500		 * 3) a bitset membership test for complex comparisons
2501		 * @param k The lookahead level
2502		 * @param p The lookahead set for level k
2503		 */
2504		protected String getLookaheadTestTerm(int k, BitSet p) {
2505			// Determine the name of the item to be compared
2506			String ts = lookaheadString(k);
2507	
2508			// Generate a range expression if possible
2509			int[] elems = p.toArray();
2510			if (elementsAreRange(elems)) {
2511				return getRangeExpression(k, elems);
2512			}
2513	
2514			// Generate a bitset membership test if possible
2515			StringBuffer e;
2516			if (p.degree() >= bitsetTestThreshold) {
2517				int bitsetIdx = markBitsetForGen(p);
2518				return getBitsetName(bitsetIdx) + ".member(" + ts + ")";
2519			}
2520	
2521			// Otherwise, generate the long-winded series of "x==X||" tests
2522			e = new StringBuffer();
2523			for (int i = 0; i < elems.length; i++) {
2524				// Get the compared-to item (token or character value)
2525				String cs = getValueString(elems[i]);
2526	
2527				// Generate the element comparison
2528				if ( i>0 ) e.append("||");
2529				e.append(ts);
2530				e.append("==");
2531				e.append(cs);
2532			}
2533			return e.toString();
2534		}
2535		/** Return an expression for testing a contiguous renage of elements
2536		 * @param k The lookahead level
2537		 * @param elems The elements representing the set, usually from BitSet.toArray().
2538		 * @return String containing test expression.
2539		 */
2540		public String getRangeExpression(int k, int[] elems) {
2541			if (!elementsAreRange(elems)) {
2542				tool.panic("getRangeExpression called with non-range");
2543			}
2544			int begin = elems[0];
2545			int end = elems[elems.length-1];
2546			return 
2547				"(" + lookaheadString(k) + " >= " + getValueString(begin) + " && " + 
2548				lookaheadString(k) + " <= " + getValueString(end) + ")";
2549		}
2550		/** getValueString: get a string representation of a token or char value
2551		 * @param value The token or char value
2552		 */
2553		private String getValueString(int value) {
2554			String cs;
2555			if ( grammar instanceof LexerGrammar ) {
2556				cs = charFormatter.literalChar(value);
2557			}
2558			else {
2559				String t = (String)grammar.tokenManager.getTokenStringAt(value);
2560				if ( t == null ) {
2561					tool.panic("vocabulary for token type " + value + " is null");
2562				}
2563	/*			if ( t.equals("<eof>") ) {
2564					cs = "Token.EOF_TYPE";
2565				}
2566				else
2567	*/
2568				if ( t.startsWith("\"") ) {
2569					cs = mangleLiteral(t);
2570					if (cs == null) {
2571						cs = String.valueOf(value);
2572					}
2573				}
2574				else {
2575					cs = t;
2576				}
2577			}
2578			return cs;
2579		}
2580		/**Is the lookahead for this alt empty? */
2581		protected boolean lookaheadIsEmpty(Alternative alt, int maxDepth) {
2582			int depth = alt.lookaheadDepth;
2583			if ( depth == GrammarAnalyzer.NONDETERMINISTIC ) {
2584				depth = grammar.maxk;
2585			}
2586			for (int i=1; i<=depth && i<=maxDepth; i++) {
2587				BitSet p = alt.cache[i].fset;
2588				if (p.degree() != 0) {
2589					return false;
2590				}
2591			}
2592			return true;
2593		}
2594		private String lookaheadString(int k) {
2595			if (grammar instanceof TreeWalkerGrammar) {
2596				return "_t.getType()";
2597			} 
2598			return "LA(" + k + ")";
2599		}
2600		/** Mangle a string literal into a meaningful token name.  This is
2601		  * only possible for literals that are all characters.  The resulting
2602		  * mangled literal name is literalsPrefix with the text of the literal
2603		  * appended.
2604		  * @return A string representing the mangled literal, or null if not possible.
2605		  */
2606		private String mangleLiteral(String s) {
2607			String mangled = antlr.Tool.literalsPrefix;
2608			for (int i = 1; i < s.length()-1; i++) {
2609				if (!Character.isLetter(s.charAt(i)) &&
2610					 s.charAt(i) != '_') {
2611					return null;
2612				}
2613				mangled += s.charAt(i);
2614			}
2615			if ( antlr.Tool.upperCaseMangledLiterals ) {
2616				mangled = mangled.toUpperCase();
2617			}	
2618			return mangled;
2619		}
2620		/** Map an identifier to it's corresponding tree-node variable.
2621		  * This is context-sensitive, depending on the rule and alternative
2622		  * being generated
2623		  * @param idParam The identifier name to map
2624		  * @return The mapped id (which may be the same as the input), or null if the mapping is invalid due to duplicates
2625		  */
2626		public String mapTreeId(String idParam, ActionTransInfo transInfo) {
2627			// if not in an action of a rule, nothing to map.
2628			if ( currentRule==null ) return idParam;
2629	
2630			boolean in_var = false;
2631			String id = idParam;
2632			if (grammar instanceof TreeWalkerGrammar) {
2633				// If the id ends with "_in", then map it to the input variable
2634				if (id.length() > 3 && id.lastIndexOf("_in") == id.length()-3) {
2635					// Strip off the "_in"
2636					id = id.substring(0, id.length()-3);
2637					in_var = true;
2638				}
2639			}
2640	
2641			// Check the rule labels.  If id is a label, then the output
2642			// variable is label_AST, and the input variable is plain label.
2643			for (int i = 0; i < currentRule.labeledElements.size(); i++) {
2644				AlternativeElement elt = (AlternativeElement)currentRule.labeledElements.elementAt(i);
2645				if (elt.getLabel().equals(id)) {
2646					return in_var ? id : id + "_AST";
2647				}
2648			}
2649	
2650			// Failing that, check the id-to-variable map for the alternative.
2651			// If the id is in the map, then output variable is the name in the
2652			// map, and input variable is name_in
2653			String s = (String)treeVariableMap.get(id);
2654			if (s != null) {
2655				if (s == NONUNIQUE) {
2656					// There is more than one element with this id
2657					return null;
2658				} else if (s.equals(currentRule.getRuleName())) {
2659					// a recursive call to the enclosing rule is 
2660					// ambiguous with the rule itself.
2661					return null;
2662				} else {
2663					return in_var ? s + "_in" : s;
2664				}
2665			}
2666	
2667			// Failing that, check the rule name itself.  Output variable
2668			// is rule_AST; input variable is rule_AST_in (treeparsers).
2669			if (id.equals(currentRule.getRuleName())) {
2670				String r = in_var ? id + "_AST_in" : id + "_AST";
2671				if ( transInfo!=null ) {
2672					if ( !in_var ) {
2673						transInfo.refRuleRoot = r;
2674					}	
2675				}	
2676				return r;
2677			} else {
2678				// id does not map to anything -- return itself.
2679				return id;
2680			}
2681		}
2682		/** Given an element and the name of an associated AST variable,
2683		  * create a mapping between the element "name" and the variable name.
2684		  */
2685		private void mapTreeVariable(AlternativeElement e, String name)
2686		{
2687			// For tree elements, defer to the root
2688			if (e instanceof TreeElement) {
2689				mapTreeVariable( ((TreeElement)e).root, name);
2690				return;
2691			}
2692	
2693			// Determine the name of the element, if any, for mapping purposes
2694			String elName = null;
2695	
2696			// Don't map labeled items
2697			if (e.getLabel() == null) {
2698				if (e instanceof TokenRefElement) {
2699					// use the token id
2700					elName = ((TokenRefElement)e).atomText;
2701				}
2702				else if (e instanceof RuleRefElement) {
2703					// use the rule name
2704					elName = ((RuleRefElement)e).targetRule;
2705				}
2706			}
2707			// Add the element to the tree variable map if it has a name
2708			if (elName != null) {
2709				if (treeVariableMap.get(elName) != null) {
2710					// Name is already in the map -- mark it as duplicate
2711					treeVariableMap.remove(elName);
2712					treeVariableMap.put(elName, NONUNIQUE);
2713				}
2714				else {
2715					treeVariableMap.put(elName, name);
2716				}
2717			}
2718		}
2719		private void setupGrammarParameters(Grammar g) {
2720			if (g instanceof ParserGrammar) {
2721				labeledElementASTType = "AST";
2722				if ( g.hasOption("ASTLabelType") ) {
2723					Token tsuffix = g.getOption("ASTLabelType");
2724					if ( tsuffix != null ) {
2725						String suffix = Tool.stripFrontBack(tsuffix.getText(),"\"","\"");
2726						if ( suffix != null ) {
2727							labeledElementASTType = suffix;
2728						}
2729					}		
2730				}
2731				labeledElementType = "Token ";
2732				labeledElementInit = "null";
2733				commonExtraArgs = "";
2734				commonExtraParams = "";
2735				commonLocalVars = "";
2736				lt1Value = "LT(1)";
2737				exceptionThrown = "ParserException";
2738				throwNoViable = "throw new NoViableAltException(LT(1));";
2739			}
2740			else if (g instanceof LexerGrammar) {
2741				labeledElementType = "char ";
2742				labeledElementInit = "'\\0'";
2743				commonExtraArgs = "";
2744				commonExtraParams = "boolean _createToken";
2745				commonLocalVars = "int _ttype; Token _token=null; int _begin=text.length();";
2746				lt1Value = "LA(1)";
2747				exceptionThrown = "ScannerException";
2748				throwNoViable = "throw new ScannerException(\"no viable alt for char: \"+(char)LA(1),getLine());";
2749			}
2750			else if (g instanceof TreeWalkerGrammar) {
2751				labeledElementASTType = "AST";
2752				labeledElementType = "AST";
2753				if ( g.hasOption("ASTLabelType") ) {
2754					Token tsuffix = g.getOption("ASTLabelType");
2755					if ( tsuffix != null ) {
2756						String suffix = Tool.stripFrontBack(tsuffix.getText(),"\"","\"");
2757						if ( suffix != null ) {
2758							labeledElementASTType = suffix;
2759							labeledElementType = suffix;
2760						}
2761					}		
2762				}
2763				if ( !g.hasOption("ASTLabelType") ) {
2764					g.setOption("ASTLabelType", new Token(ANTLRTokenTypes.STRING_LITERAL,"AST"));
2765				}	
2766				labeledElementInit = "null";
2767				commonExtraArgs = "_t";
2768				commonExtraParams = "AST _t";
2769				commonLocalVars = "";
2770				lt1Value = "("+labeledElementASTType+")_t";
2771				exceptionThrown = "ParserException";
2772				throwNoViable = "throw new NoViableAltException(_t);";
2773			}
2774			else {
2775				tool.panic("Unknown grammar type");
2776			}
2777		}
2778		/** This method exists so a subclass, namely VAJCodeGenerator,
2779		 *  can open the file in its own evil way.  JavaCodeGenerator
2780		 *  simply opens a text file...
2781		 */
2782		public void setupOutput(String className) throws IOException {
2783			currentOutput = antlr.Tool.openOutputFile(className + ".java");
2784		}
2785	}
2786