1	package antlr;
2	
3	/**
4	 * <b>SOFTWARE RIGHTS</b>
5	 * <p>
6	 * ANTLR 2.5.0 MageLang Institute, 1998
7	 * <p>
8	 * We reserve no legal rights to the ANTLR--it is fully in the
9	 * public domain. An individual or company may do whatever
10	 * they wish with source code distributed with ANTLR or the
11	 * code generated by ANTLR, including the incorporation of
12	 * ANTLR, or its output, into commerical software.
13	 * <p>
14	 * We encourage users to develop software with ANTLR. However,
15	 * we do ask that credit is given to us for developing
16	 * ANTLR. By "credit", we mean that if you use ANTLR or
17	 * incorporate any source code into one of your programs
18	 * (commercial product, research project, or otherwise) that
19	 * you acknowledge this fact somewhere in the documentation,
20	 * research report, etc... If you like ANTLR and have
21	 * developed a nice tool with the output, please mention that
22	 * you developed it using ANTLR. In addition, we ask that the
23	 * headers remain intact in our source code. As long as these
24	 * guidelines are kept, we expect to continue enhancing this
25	 * system and expect to make other tools available as they are
26	 * completed.
27	 * <p>
28	 * The ANTLR gang:
29	 * @version ANTLR 2.5.0 MageLang Institute, 1998
30	 * @author Terence Parr, <a href=http://www.MageLang.com>MageLang Institute</a>
31	 * @author <br>John Lilley, <a href=http://www.Empathy.com>Empathy Software</a>
32	 */
33	import java.util.Enumeration;
34	import antlr.collections.impl.BitSet;
35	import antlr.collections.impl.Vector;
36	import java.io.PrintWriter; //SAS: changed for proper text file io
37	import java.io.IOException;
38	import java.io.FileWriter;
39	
40	/**Generate MyParser.txt, MyLexer.txt and MyParserTokenTypes.txt */
41	public class DiagnosticCodeGenerator extends CodeGenerator {
42		/** non-zero if inside syntactic predicate generation */
43		protected int syntacticPredLevel = 0;
44	
45		/** true during lexer generation, false during parser generation */
46		protected boolean doingLexRules = false;
47	
48	
49		/** Create a Diagnostic code-generator using the given Grammar
50		 * The caller must still call setTool, setBehavior, and setAnalyzer
51		 * before generating code.
52		 */
53		public DiagnosticCodeGenerator() {
54			super();
55			charFormatter = new JavaCharFormatter();
56		}
57		/**Generate the parser, lexer, and token types documentation */
58		public void gen() {
59	
60			// Do the code generation
61			try {
62				// Loop over all grammars
63				Enumeration grammarIter = behavior.grammars.elements();
64				while (grammarIter.hasMoreElements()) {
65					Grammar g = (Grammar)grammarIter.nextElement();
66	
67					// Connect all the components to each other
68					g.setGrammarAnalyzer(analyzer);
69					g.setCodeGenerator(this);
70					analyzer.setGrammar(g);
71	
72					// To get right overloading behavior across hetrogeneous grammars
73					g.generate();
74		
75					if (tool.hasError) {
76						System.out.println("Exiting due to errors.");
77						System.exit(1);
78					}
79	
80				}
81	
82				// Loop over all token managers (some of which are lexers)
83				Enumeration tmIter = behavior.tokenManagers.elements();
84				while (tmIter.hasMoreElements()) {
85					TokenManager tm = (TokenManager)tmIter.nextElement();
86					if (!tm.isReadOnly()) {
87						// Write the token manager tokens as Java
88						genTokenTypes(tm);
89					}
90				}
91			}
92			catch (IOException e) {
93				System.out.println(e.getMessage());
94			}
95		}
96		/** Generate code for the given grammar element.
97		 * @param blk The {...} action to generate
98		 */
99		public void gen(ActionElement action) {
100			if (action.isSemPred) {
101				// handled elsewhere
102			}
103			else {
104				print("ACTION: ");
105				_printAction(action.actionText);
106			}
107		}
108		/** Generate code for the given grammar element.
109		 * @param blk The "x|y|z|..." block to generate
110		 */
111		public void gen(AlternativeBlock blk) {
112			println("Start of alternative block.");
113			tabs++;
114			genBlockPreamble(blk);
115	
116			boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
117			if (!ok) {
118				println("Warning: This alternative block is non-deterministic");
119			}
120			genCommonBlock(blk);
121			tabs--;
122		}
123		/** Generate code for the given grammar element.
124		 * @param blk The block-end element to generate.  Block-end
125		 * elements are synthesized by the grammar parser to represent
126		 * the end of a block.
127		 */
128		public void gen(BlockEndElement end) {
129			// no-op
130		}
131		/** Generate code for the given grammar element.
132		 * @param blk The character literal reference to generate
133		 */
134		public void gen(CharLiteralElement atom) {
135			print("Match character ");
136			if (atom.not) {
137				_print("NOT ");
138			}
139			_print(atom.atomText);
140			if (atom.label != null) {
141				_print(", label=" + atom.label);
142			}
143			_println("");
144		}
145		/** Generate code for the given grammar element.
146		 * @param blk The character-range reference to generate
147		 */
148		public void gen(CharRangeElement r) {
149			print("Match character range: " + r.beginText + ".." + r.endText);
150			if ( r.label!=null ) {
151				_print(", label = " + r.label);
152			}
153			_println("");
154		}
155		/** Generate the lexer TXT file */
156		public void gen(LexerGrammar g) throws IOException {
157			setGrammar(g);
158			System.out.println("Generating " + grammar.getClassName() + ".txt");
159			currentOutput = antlr.Tool.openOutputFile(grammar.getClassName() + ".txt");
160			//SAS: changed for proper text file io
161			
162			tabs=0;
163			doingLexRules = true;
164	
165			// Generate header common to all TXT output files
166			genHeader();
167	
168			// Output the user-defined lexer premamble
169			println("");
170			println("*** Lexer Preamble Action.");
171			println("This action will appear before the declaration of your lexer class:");
172			tabs++;
173			println(grammar.preambleAction);
174			tabs--;
175			println("*** End of Lexer Preamble Action");
176	
177			// Generate lexer class definition
178			println("");
179			println("*** Your lexer class is called '" + grammar.getClassName() + "' and is a subclass of '" + grammar.getSuperClass() + "'.");
180	
181			// Generate user-defined parser class members
182			println("");
183			println("*** User-defined lexer  class members:");
184			println("These are the member declarations that you defined for your class:");
185			tabs++;
186			printAction(grammar.classMemberAction);
187			tabs--;
188			println("*** End of user-defined lexer class members");
189	
190			// Generate string literals
191			println("");
192			println("*** String literals used in the parser");
193			println("The following string literals were used in the parser.");
194			println("An actual code generator would arrange to place these literals");
195			println("into a table in the generated lexer, so that actions in the");
196			println("generated lexer could match token text against the literals.");
197			println("String literals used in the lexer are not listed here, as they");
198			println("are incorporated into the mainstream lexer processing.");
199			tabs++;
200			// Enumerate all of the symbols and look for string literal symbols
201			Enumeration ids = grammar.getSymbols();
202			while ( ids.hasMoreElements() ) {
203				GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
204				// Only processing string literals -- reject other symbol entries
205				if ( sym instanceof StringLiteralSymbol ) {
206					StringLiteralSymbol s = (StringLiteralSymbol)sym;
207					println(s.getId() + " = " + s.getTokenType());
208				}
209			}
210			tabs--;
211			println("*** End of string literals used by the parser");
212	
213			// Generate nextToken() rule.
214			// nextToken() is a synthetic lexer rule that is the implicit OR of all
215			// user-defined lexer rules.
216			genNextToken();
217	
218			// Generate code for each rule in the lexer
219			println("");
220			println("*** User-defined Lexer rules:");
221			tabs++;
222			
223			ids = grammar.rules.elements();
224			while ( ids.hasMoreElements() ) {
225				RuleSymbol rs = (RuleSymbol)ids.nextElement();
226				if (!rs.id.equals("mnextToken")) {
227					genRule(rs);
228				}
229			}
230	
231			tabs--;
232			println("");
233			println("*** End User-defined Lexer rules:");
234	
235			// Close the lexer output file
236			currentOutput.close();
237			currentOutput = null;
238			doingLexRules = false;
239		}
240		/** Generate code for the given grammar element.
241		 * @param blk The (...)+ block to generate
242		 */
243		public void gen(OneOrMoreBlock blk) {
244			println("Start ONE-OR-MORE (...)+ block:");
245			tabs++;
246			genBlockPreamble(blk);
247			boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
248			if (!ok) {
249				println("Warning: This one-or-more block is non-deterministic");
250			}
251			genCommonBlock(blk);
252			tabs--;
253			println("End ONE-OR-MORE block.");
254		}
255		/** Generate the parser TXT file */
256		public void gen(ParserGrammar g) throws IOException {
257			setGrammar(g);
258			// Open the output stream for the parser and set the currentOutput
259			System.out.println("Generating " + grammar.getClassName() + ".txt");
260			currentOutput = antlr.Tool.openOutputFile(grammar.getClassName()+".txt");
261			//SAS: changed for proper text file io
262			
263			tabs = 0;
264	
265			// Generate the header common to all output files.
266			genHeader();
267			
268			// Output the user-defined parser premamble
269			println("");
270			println("*** Parser Preamble Action.");
271			println("This action will appear before the declaration of your parser class:");
272			tabs++;
273			println(grammar.preambleAction);
274			tabs--;
275			println("*** End of Parser Preamble Action");
276	
277			// Generate parser class definition
278			println("");
279			println("*** Your parser class is called '" + grammar.getClassName() + "' and is a subclass of '" + grammar.getSuperClass() + "'.");
280	
281			// Generate user-defined parser class members
282			println("");
283			println("*** User-defined parser class members:");
284			println("These are the member declarations that you defined for your class:");
285			tabs++;
286			printAction(grammar.classMemberAction);
287			tabs--;
288			println("*** End of user-defined parser class members");
289	
290			// Generate code for each rule in the grammar
291			println("");
292			println("*** Parser rules:");
293			tabs++;
294	
295			// Enumerate the parser rules
296			Enumeration rules = grammar.rules.elements();
297			while ( rules.hasMoreElements() ) {
298				println("");
299				// Get the rules from the list and downcast it to proper type
300				GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
301				// Only process parser rules
302				if ( sym instanceof RuleSymbol) {
303					genRule((RuleSymbol)sym);
304				}
305			}
306			tabs--;
307			println("");
308			println("*** End of parser rules");
309	
310			println("");
311			println("*** End of parser");
312	
313			// Close the parser output stream
314			currentOutput.close();
315			currentOutput = null;
316		}
317		/** Generate code for the given grammar element.
318		 * @param blk The rule-reference to generate
319		 */
320		public void gen(RuleRefElement rr) {
321			RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
322	
323			// Generate the actual rule description
324			print("Rule Reference: " + rr.targetRule);
325			if (rr.idAssign != null) {
326				_print(", assigned to '" + rr.idAssign + "'");
327			}
328			if (rr.args != null) {
329				_print(", arguments = " + rr.args);
330			}
331			_println("");
332	
333			// Perform diagnostics
334			if (rs == null || !rs.isDefined())
335			{
336				println("Rule '" + rr.targetRule + "' is referenced, but that rule is not defined.");
337				println("\tPerhaps the rule is misspelled, or you forgot to define it.");
338				return;
339			}
340			if (!(rs instanceof RuleSymbol))
341			{
342				// Should this ever happen??
343				println("Rule '" + rr.targetRule + "' is referenced, but that is not a grammar rule.");
344				return;
345			}
346			if (rr.idAssign != null)
347			{
348				// Warn if the rule has no return type
349				if (rs.block.returnAction == null)
350				{
351					println("Error: You assigned from Rule '" + rr.targetRule + "', but that rule has no return type.");
352				}
353			} else {
354				// Warn about return value if any, but not inside syntactic predicate
355				if (!(grammar instanceof LexerGrammar) && syntacticPredLevel == 0 && rs.block.returnAction != null)
356				{
357					println("Warning: Rule '" + rr.targetRule + "' returns a value");
358				}
359			}
360			if (rr.args != null && rs.block.argAction == null) {
361				println("Error: Rule '" + rr.targetRule + "' accepts no arguments.");
362			}
363		}
364		/** Generate code for the given grammar element.
365		 * @param blk The string-literal reference to generate
366		 */
367		public void gen(StringLiteralElement atom) {
368			print("Match string literal ");
369			_print(atom.atomText);
370			if (atom.label != null) {
371				_print(", label=" + atom.label);
372			}
373			_println("");
374		}
375		/** Generate code for the given grammar element.
376		 * @param blk The token-range reference to generate
377		 */
378		public void gen(TokenRangeElement r) {
379			print("Match token range: " + r.beginText + ".." + r.endText);
380			if ( r.label!=null ) {
381				_print(", label = " + r.label);
382			}
383			_println("");
384		}
385		/** Generate code for the given grammar element.
386		 * @param blk The token-reference to generate
387		 */
388		public void gen(TokenRefElement atom) {
389			print("Match token ");
390			if (atom.not) {
391				_print("NOT ");
392			}
393			_print(atom.atomText);
394			if (atom.label != null) {
395				_print(", label=" + atom.label);
396			}
397			_println("");
398		}
399		public void gen(TreeElement t) {
400			print("Tree reference: "+t);
401		}
402		/** Generate the tree-walker TXT file */
403		public  void gen(TreeWalkerGrammar g) throws IOException {
404			setGrammar(g);
405			// Open the output stream for the parser and set the currentOutput
406			System.out.println("Generating " + grammar.getClassName() + ".txt");
407			currentOutput = antlr.Tool.openOutputFile(grammar.getClassName()+".txt");
408			//SAS: changed for proper text file io
409			
410			tabs = 0;
411	
412			// Generate the header common to all output files.
413			genHeader();
414			
415			// Output the user-defined parser premamble
416			println("");
417			println("*** Tree-walker Preamble Action.");
418			println("This action will appear before the declaration of your tree-walker class:");
419			tabs++;
420			println(grammar.preambleAction);
421			tabs--;
422			println("*** End of tree-walker Preamble Action");
423	
424			// Generate tree-walker class definition
425			println("");
426			println("*** Your tree-walker class is called '" + grammar.getClassName() + "' and is a subclass of '" + grammar.getSuperClass() + "'.");
427	
428			// Generate user-defined tree-walker class members
429			println("");
430			println("*** User-defined tree-walker class members:");
431			println("These are the member declarations that you defined for your class:");
432			tabs++;
433			printAction(grammar.classMemberAction);
434			tabs--;
435			println("*** End of user-defined tree-walker class members");
436	
437			// Generate code for each rule in the grammar
438			println("");
439			println("*** tree-walker rules:");
440			tabs++;
441	
442			// Enumerate the tree-walker rules
443			Enumeration rules = grammar.rules.elements();
444			while ( rules.hasMoreElements() ) {
445				println("");
446				// Get the rules from the list and downcast it to proper type
447				GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
448				// Only process tree-walker rules
449				if ( sym instanceof RuleSymbol) {
450					genRule((RuleSymbol)sym);
451				}
452			}
453			tabs--;
454			println("");
455			println("*** End of tree-walker rules");
456	
457			println("");
458			println("*** End of tree-walker");
459	
460			// Close the tree-walker output stream
461			currentOutput.close();
462			currentOutput = null;
463		}
464		/** Generate a wildcard element */
465		public void gen(WildcardElement wc) {
466			print("Match wildcard");
467			if ( wc.getLabel()!=null ) {
468				_print(", label = " + wc.getLabel());
469			}
470			_println("");
471		}
472		/** Generate code for the given grammar element.
473		 * @param blk The (...)* block to generate
474		 */
475		public void gen(ZeroOrMoreBlock blk) {
476			println("Start ZERO-OR-MORE (...)+ block:");
477			tabs++;
478			genBlockPreamble(blk);
479			boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
480			if (!ok) {
481				println("Warning: This zero-or-more block is non-deterministic");
482			}
483			genCommonBlock(blk);
484			tabs--;
485			println("End ZERO-OR-MORE block.");
486		}
487		protected void genAlt(Alternative alt) {
488			for (
489				AlternativeElement elem = alt.head;
490				!(elem instanceof BlockEndElement);
491				elem = elem.next
492			)
493			{
494				elem.generate();
495			}
496			if (alt.getTreeSpecifier() != null) 
497			{
498				println("AST will be built as: " + alt.getTreeSpecifier().getText());
499			}
500		}
501		/** Generate the header for a block, which may be a RuleBlock or a
502		 * plain AlternativeBLock.  This generates any variable declarations,
503		 * init-actions, and syntactic-predicate-testing variables.
504		 * @blk The block for which the preamble is to be generated.
505		 */
506		protected void genBlockPreamble(AlternativeBlock blk) {
507			// dump out init action
508			if ( blk.initAction!=null ) {
509				printAction("Init action: " + blk.initAction);
510			}
511		}
512		/**Generate common code for a block of alternatives; return a postscript
513		 * that needs to be generated at the end of the block.  Other routines
514		 * may append else-clauses and such for error checking before the postfix
515		 * is generated.
516		 */
517		public void genCommonBlock(AlternativeBlock blk) {
518			boolean singleAlt = (blk.alternatives.size() == 1);
519	
520			println("Start of an alternative block.");
521			tabs++;
522			println("The lookahead set for this block is:");
523			tabs++;
524			genLookaheadSetForBlock(blk);
525			tabs--;
526	
527			if (singleAlt) {
528				println("This block has a single alternative");
529				if (blk.getAlternativeAt(0).synPred != null)
530				{
531					// Generate a warning if there is one alt and it has a synPred
532					println("Warning: you specified a syntactic predicate for this alternative,");
533					println("and it is the only alternative of a block and will be ignored.");
534				}
535			}
536			else {
537				println("This block has multiple alternatives:");
538				tabs++;
539			}
540	
541			for (int i=0; i<blk.alternatives.size(); i++) {
542				Alternative alt = blk.getAlternativeAt(i);
543				AlternativeElement elem = alt.head;
544	
545				// Print lookahead set for alternate
546				println("");
547				if (i != 0) {
548					print("Otherwise, ");
549				} else {
550					print("");
551				}
552				_println("Alternate(" + (i+1) + ") will be taken IF:");
553				println("The lookahead set: ");
554				tabs++;
555				genLookaheadSetForAlt(alt);
556				tabs--;
557				if ( alt.semPred != null || alt.synPred != null ) {
558					print("is matched, AND ");
559				} else {
560					println("is matched.");
561				}
562	
563				// Dump semantic predicates
564				if ( alt.semPred != null ) {
565					_println("the semantic predicate:");
566					tabs++;
567					println(alt.semPred);
568					if ( alt.synPred != null ) {
569						print("is true, AND ");
570					} else {
571						println("is true.");
572					}
573				}
574	
575				// Dump syntactic predicate
576				if ( alt.synPred != null ) {
577					_println("the syntactic predicate:");
578					tabs++;
579					genSynPred( alt.synPred );
580					tabs--;
581					println("is matched.");
582				}
583	
584				// Dump the alternative
585				genAlt(alt);
586			}
587			println("");
588			println("OTHERWISE, a NoViableAlt exception will be thrown");
589			println("");
590	
591			if (!singleAlt) {
592				tabs--;
593				println("End of alternatives");
594			}
595			tabs--;
596			println("End of alternative block.");
597		}
598		/** Generate a textual representation of the follow set
599		 * for a block.
600		 * @param blk  The rule block of interest
601		 */
602		public void genFollowSetForRuleBlock(RuleBlock blk)
603		{
604			Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, blk.endNode);
605			printSet(grammar.maxk, 1, follow);
606		}
607		/** Generate a header that is common to all TXT files */
608		protected void genHeader() 
609		{
610			println("ANTLR-generated file resulting from grammar " + tool.grammarFile);
611			println("Diagnostic output");
612			println("");
613			println("Terence Parr, MageLang Institute");
614			println("with John Lilley, Empathy Software");
615			println("ANTLR Version "+ANTLRParser.version+"; 1996,1997");
616			println("");
617			println("*** Header Action.");
618			println("This action will appear at the top of all generated files.");
619			tabs++;
620			printAction(behavior.headerAction);
621			tabs--;
622			println("*** End of Header Action");
623			println("");
624		}
625		/**Generate the lookahead set for an alternate. */
626		protected void genLookaheadSetForAlt(Alternative alt) {
627			if ( doingLexRules && alt.cache[1].containsEpsilon() ) {
628				println("MATCHES ALL");
629				return;
630			}
631			int depth = alt.lookaheadDepth;
632			if ( depth == GrammarAnalyzer.NONDETERMINISTIC ) {
633				// if the decision is nondeterministic, do the best we can: LL(k)
634				// any predicates that are around will be generated later.
635				depth = grammar.maxk;
636			}
637			for (int i = 1; i <= depth; i++)
638			{
639				Lookahead lookahead = alt.cache[i];
640				printSet(depth, i, lookahead);
641			}
642		}
643		/** Generate a textual representation of the lookahead set
644		 * for a block.
645		 * @param blk  The block of interest
646		 */
647		public void genLookaheadSetForBlock(AlternativeBlock blk)
648		{
649			// Find the maximal lookahead depth over all alternatives
650			int depth = 0;
651			for (int i=0; i<blk.alternatives.size(); i++) {
652				Alternative alt = blk.getAlternativeAt(i);
653				if (alt.lookaheadDepth == GrammarAnalyzer.NONDETERMINISTIC) {
654					depth = grammar.maxk;
655					break;
656				} 
657				else if (depth < alt.lookaheadDepth) {
658					depth = alt.lookaheadDepth;
659				}
660			}
661	
662			for (int i = 1; i <= depth; i++)
663			{
664				Lookahead lookahead = grammar.theLLkAnalyzer.look(i, blk);
665				printSet(depth, i, lookahead);
666			}
667		}
668		/** Generate the nextToken rule.
669		 * nextToken is a synthetic lexer rule that is the implicit OR of all
670		 * user-defined lexer rules.
671		 */
672		public void genNextToken() {
673			println("");
674			println("*** Lexer nextToken rule:");
675			println("The lexer nextToken rule is synthesized from all of the user-defined");
676			println("lexer rules.  It logically consists of one big alternative block with");
677			println("each user-defined rule being an alternative.");
678			println("");
679	
680			// Create the synthesized rule block for nextToken consisting
681			// of an alternate block containing all the user-defined lexer rules.
682			RuleBlock blk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken");
683	
684			// Define the nextToken rule symbol
685			RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
686			nextTokenRs.setDefined();
687			nextTokenRs.setBlock(blk);
688			nextTokenRs.access = "private";
689			grammar.define(nextTokenRs);
690	
691			// Analyze the synthesized block
692			if (!grammar.theLLkAnalyzer.deterministic(blk))
693			{
694				println("The grammar analyzer has determined that the synthesized");
695				println("nextToken rule is non-deterministic (i.e., it has ambiguities)");
696				println("This means that there is some overlap of the character");
697				println("lookahead for two or more of your lexer rules.");
698			}
699	
700			genCommonBlock(blk);
701	
702			println("*** End of nextToken lexer rule.");
703		}
704		/** Generate code for a named rule block
705		 * @param s The RuleSymbol describing the rule to generate
706		*/
707		public void genRule(RuleSymbol s) {
708			println("");
709			String ruleType = (doingLexRules ? "Lexer" : "Parser");
710			println("*** " + ruleType + " Rule: " + s.getId());
711			if (!s.isDefined() ) {
712				println("This rule is undefined.");
713				println("This means that the rule was referenced somewhere in the grammar,");
714				println("but a definition for the rule was not encountered.");
715				println("It is also possible that syntax errors during the parse of");
716				println("your grammar file prevented correct processing of the rule.");
717				println("*** End " + ruleType + " Rule: " + s.getId());
718				return;
719			}
720			tabs++;
721	
722			if (s.access.length() != 0) {
723				println("Access: " + s.access);
724			}
725	
726			// Get rule return type and arguments
727			RuleBlock rblk = s.getBlock();
728	
729			// Gen method return value(s)
730			if (rblk.returnAction != null) {
731				println("Return value(s): " + rblk.returnAction);
732				if ( doingLexRules ) {
733					println("Error: you specified return value(s) for a lexical rule.");
734					println("\tLexical rules have an implicit return type of 'int'.");
735				}
736			} else {
737				if ( doingLexRules ) {
738					println("Return value: lexical rule returns an implicit token type");
739				} else {
740					println("Return value: none");
741				}
742			}
743	
744			// Gen arguments
745			if (rblk.argAction != null) 
746			{
747				println("Arguments: " + rblk.argAction);
748			}
749	
750			// Dump any init-action
751			genBlockPreamble(rblk);
752	
753			// Analyze the rule
754			boolean ok = grammar.theLLkAnalyzer.deterministic(rblk);
755			if (!ok) {
756				println("Error: This rule is non-deterministic");
757			}
758		
759			// Dump the alternates of the rule
760			genCommonBlock(rblk);
761	
762			// Search for an unlabeled exception specification attached to the rule
763			ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec("");
764	
765			// Generate user-defined or default catch phrases
766			if (unlabeledUserSpec != null) {
767				println("You specified error-handler(s) for this rule:");
768				tabs++;
769				for (int i = 0; i < unlabeledUserSpec.handlers.size(); i++)
770				{
771					if (i != 0) {
772						println("");
773					}
774	
775					ExceptionHandler handler = (ExceptionHandler)unlabeledUserSpec.handlers.elementAt(i);
776					println("Error-handler(" + (i+1) + ") catches [" + handler.exceptionTypeAndName.getText() + "] and executes:");
777					printAction(handler.action);
778				}
779				tabs--;
780				println("End error-handlers.");
781			}
782			else if (!doingLexRules) {
783				println("Default error-handling will be generated, which catches all");
784				println("parser exceptions and consumes tokens until the follow-set is seen.");
785			}
786	
787	
788			// Dump the follow set
789			// Doesn't seem to work for lexical rules...
790			if (!doingLexRules) {
791				println("The follow set for this rule is:");
792				tabs++;
793				genFollowSetForRuleBlock(rblk);
794				tabs--;
795			}
796	
797			tabs--;
798			println("*** End " + ruleType + " Rule: " + s.getId());
799		}
800		/** Generate the syntactic predicate.  This basically generates
801		 * the alternative block, buts tracks if we are inside a synPred
802		 * @param blk  The syntactic predicate block
803		 */
804		protected void genSynPred(SynPredBlock blk) {
805			syntacticPredLevel++;
806			gen((AlternativeBlock)blk);
807			syntacticPredLevel--;
808		}
809		/** Generate the token types TXT file */
810		protected void genTokenTypes(TokenManager tm) throws IOException {
811			// Open the token output TXT file and set the currentOutput stream
812			System.out.println("Generating " + tm.getName() + "TokenTypes.txt");
813			currentOutput = antlr.Tool.openOutputFile(tm.getName() + "TokenTypes.txt");
814			//SAS: changed for proper text file io
815			tabs = 0;
816		
817			// Generate the header common to all diagnostic files
818			genHeader();
819	
820			// Generate a string for each token.  This creates a static
821			// array of Strings indexed by token type.
822			println("");
823			println("*** Tokens used by the parser");
824			println("This is a list of the token numeric values and the corresponding");
825			println("token identifiers.  Some tokens are literals, and because of that");
826			println("they have no identifiers.  Literals are double-quoted.");
827			tabs++;
828	
829			// Enumerate all the valid token types
830			Vector v = tm.getVocabulary();
831			for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
832				String s = (String)v.elementAt(i);
833				if (s != null) {
834					println(s + " = " + i);
835				}
836			}
837	
838			// Close the interface
839			tabs--;
840			println("*** End of tokens used by the parser");
841	
842			// Close the tokens output file
843			currentOutput.close();
844			currentOutput = null;
845		}
846		/** Get a string for an expression to generate creation of an AST subtree.
847		  * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
848		  */
849		public String getASTCreateString(Vector v) {
850			return "***Create an AST from a vector here***"+System.getProperty("line.separator");
851		}
852		/** Get a string for an expression to generate creating of an AST node
853		  * @param str The arguments to the AST constructor
854		  */
855		public String getASTCreateString(String str) {
856			return "[" + str + "]";
857		}
858		/** Map an identifier to it's corresponding tree-node variable.
859		  * This is context-sensitive, depending on the rule and alternative
860		  * being generated
861		  * @param id The identifier name to map
862		  * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned.
863		  */
864		public String mapTreeId(String id, ActionTransInfo tInfo) {
865			return id;
866		}
867		/** Format a lookahead or follow set.
868		 * @param depth The depth of the entire lookahead/follow
869		 * @param k The lookahead level to print
870		 * @param lookahead  The lookahead/follow set to print
871		 */
872		public void printSet(int depth, int k, Lookahead lookahead) {
873			int numCols = 5;
874	
875			int[] elems = lookahead.fset.toArray();
876	
877			if (depth != 1) {
878				print("k==" + k + ": {");
879			} else {
880				print("{ ");
881			}
882			if (elems.length > numCols) {
883				_println("");
884				tabs++;
885				print("");
886			}
887	
888			int column = 0;
889			for (int i = 0; i < elems.length; i++)
890			{
891				column++;
892				if (column > numCols) {
893					_println("");
894					print("");
895					column = 0;
896				}
897				if (doingLexRules) {
898					_print(charFormatter.literalChar(elems[i]));
899				} else {
900					_print((String)grammar.tokenManager.getVocabulary().elementAt(elems[i]));
901				}
902				if (i != elems.length-1) {
903					_print(", ");
904				}
905			}
906	
907			if (elems.length > numCols) {
908				_println("");
909				tabs--;
910				print("");
911			}
912			_println(" }");
913		}
914	}
915