1 package antlr;
2
3 * <b>SOFTWARE RIGHTS</b>
5 * <p>
6 * ANTLR 2.5.0 MageLang Institute, 1998
7 * <p>
8 * We reserve no legal rights to the ANTLR--it is fully in the
9 * public domain. An individual or company may do whatever
10 * they wish with source code distributed with ANTLR or the
11 * code generated by ANTLR, including the incorporation of
12 * ANTLR, or its output, into commerical software.
13 * <p>
14 * We encourage users to develop software with ANTLR. However,
15 * we do ask that credit is given to us for developing
16 * ANTLR. By "credit", we mean that if you use ANTLR or
17 * incorporate any source code into one of your programs
18 * (commercial product, research project, or otherwise) that
19 * you acknowledge this fact somewhere in the documentation,
20 * research report, etc... If you like ANTLR and have
21 * developed a nice tool with the output, please mention that
22 * you developed it using ANTLR. In addition, we ask that the
23 * headers remain intact in our source code. As long as these
24 * guidelines are kept, we expect to continue enhancing this
25 * system and expect to make other tools available as they are
26 * completed.
27 * <p>
28 * The ANTLR gang:
29 * @version ANTLR 2.5.0 MageLang Institute, 1998
30 * @author Terence Parr, <a href=http://www.MageLang.com>MageLang Institute</a>
31 * @author <br>John Lilley, <a href=http://www.Empathy.com>Empathy Software</a>
32 */
33 import java.util.Enumeration;
34 import antlr.collections.impl.BitSet;
35 import antlr.collections.impl.Vector;
36 import java.io.PrintWriter; import java.io.IOException;
38 import java.io.FileWriter;
39
40
41 public class DiagnosticCodeGenerator extends CodeGenerator {
42
43 protected int syntacticPredLevel = 0;
44
45
46 protected boolean doingLexRules = false;
47
48
49 * The caller must still call setTool, setBehavior, and setAnalyzer
51 * before generating code.
52 */
53 public DiagnosticCodeGenerator() {
54 super();
55 charFormatter = new JavaCharFormatter();
56 }
57
58 public void gen() {
59
60 try {
62 Enumeration grammarIter = behavior.grammars.elements();
64 while (grammarIter.hasMoreElements()) {
65 Grammar g = (Grammar)grammarIter.nextElement();
66
67 g.setGrammarAnalyzer(analyzer);
69 g.setCodeGenerator(this);
70 analyzer.setGrammar(g);
71
72 g.generate();
74
75 if (tool.hasError) {
76 System.out.println("Exiting due to errors.");
77 System.exit(1);
78 }
79
80 }
81
82 Enumeration tmIter = behavior.tokenManagers.elements();
84 while (tmIter.hasMoreElements()) {
85 TokenManager tm = (TokenManager)tmIter.nextElement();
86 if (!tm.isReadOnly()) {
87 genTokenTypes(tm);
89 }
90 }
91 }
92 catch (IOException e) {
93 System.out.println(e.getMessage());
94 }
95 }
96 * @param blk The {...} action to generate
98 */
99 public void gen(ActionElement action) {
100 if (action.isSemPred) {
101 }
103 else {
104 print("ACTION: ");
105 _printAction(action.actionText);
106 }
107 }
108 * @param blk The "x|y|z|..." block to generate
110 */
111 public void gen(AlternativeBlock blk) {
112 println("Start of alternative block.");
113 tabs++;
114 genBlockPreamble(blk);
115
116 boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
117 if (!ok) {
118 println("Warning: This alternative block is non-deterministic");
119 }
120 genCommonBlock(blk);
121 tabs--;
122 }
123 * @param blk The block-end element to generate. Block-end
125 * elements are synthesized by the grammar parser to represent
126 * the end of a block.
127 */
128 public void gen(BlockEndElement end) {
129 }
131 * @param blk The character literal reference to generate
133 */
134 public void gen(CharLiteralElement atom) {
135 print("Match character ");
136 if (atom.not) {
137 _print("NOT ");
138 }
139 _print(atom.atomText);
140 if (atom.label != null) {
141 _print(", label=" + atom.label);
142 }
143 _println("");
144 }
145 * @param blk The character-range reference to generate
147 */
148 public void gen(CharRangeElement r) {
149 print("Match character range: " + r.beginText + ".." + r.endText);
150 if ( r.label!=null ) {
151 _print(", label = " + r.label);
152 }
153 _println("");
154 }
155
156 public void gen(LexerGrammar g) throws IOException {
157 setGrammar(g);
158 System.out.println("Generating " + grammar.getClassName() + ".txt");
159 currentOutput = antlr.Tool.openOutputFile(grammar.getClassName() + ".txt");
160
162 tabs=0;
163 doingLexRules = true;
164
165 genHeader();
167
168 println("");
170 println("*** Lexer Preamble Action.");
171 println("This action will appear before the declaration of your lexer class:");
172 tabs++;
173 println(grammar.preambleAction);
174 tabs--;
175 println("*** End of Lexer Preamble Action");
176
177 println("");
179 println("*** Your lexer class is called '" + grammar.getClassName() + "' and is a subclass of '" + grammar.getSuperClass() + "'.");
180
181 println("");
183 println("*** User-defined lexer class members:");
184 println("These are the member declarations that you defined for your class:");
185 tabs++;
186 printAction(grammar.classMemberAction);
187 tabs--;
188 println("*** End of user-defined lexer class members");
189
190 println("");
192 println("*** String literals used in the parser");
193 println("The following string literals were used in the parser.");
194 println("An actual code generator would arrange to place these literals");
195 println("into a table in the generated lexer, so that actions in the");
196 println("generated lexer could match token text against the literals.");
197 println("String literals used in the lexer are not listed here, as they");
198 println("are incorporated into the mainstream lexer processing.");
199 tabs++;
200 Enumeration ids = grammar.getSymbols();
202 while ( ids.hasMoreElements() ) {
203 GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
204 if ( sym instanceof StringLiteralSymbol ) {
206 StringLiteralSymbol s = (StringLiteralSymbol)sym;
207 println(s.getId() + " = " + s.getTokenType());
208 }
209 }
210 tabs--;
211 println("*** End of string literals used by the parser");
212
213 genNextToken();
217
218 println("");
220 println("*** User-defined Lexer rules:");
221 tabs++;
222
223 ids = grammar.rules.elements();
224 while ( ids.hasMoreElements() ) {
225 RuleSymbol rs = (RuleSymbol)ids.nextElement();
226 if (!rs.id.equals("mnextToken")) {
227 genRule(rs);
228 }
229 }
230
231 tabs--;
232 println("");
233 println("*** End User-defined Lexer rules:");
234
235 currentOutput.close();
237 currentOutput = null;
238 doingLexRules = false;
239 }
240 * @param blk The (...)+ block to generate
242 */
243 public void gen(OneOrMoreBlock blk) {
244 println("Start ONE-OR-MORE (...)+ block:");
245 tabs++;
246 genBlockPreamble(blk);
247 boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
248 if (!ok) {
249 println("Warning: This one-or-more block is non-deterministic");
250 }
251 genCommonBlock(blk);
252 tabs--;
253 println("End ONE-OR-MORE block.");
254 }
255
256 public void gen(ParserGrammar g) throws IOException {
257 setGrammar(g);
258 System.out.println("Generating " + grammar.getClassName() + ".txt");
260 currentOutput = antlr.Tool.openOutputFile(grammar.getClassName()+".txt");
261
263 tabs = 0;
264
265 genHeader();
267
268 println("");
270 println("*** Parser Preamble Action.");
271 println("This action will appear before the declaration of your parser class:");
272 tabs++;
273 println(grammar.preambleAction);
274 tabs--;
275 println("*** End of Parser Preamble Action");
276
277 println("");
279 println("*** Your parser class is called '" + grammar.getClassName() + "' and is a subclass of '" + grammar.getSuperClass() + "'.");
280
281 println("");
283 println("*** User-defined parser class members:");
284 println("These are the member declarations that you defined for your class:");
285 tabs++;
286 printAction(grammar.classMemberAction);
287 tabs--;
288 println("*** End of user-defined parser class members");
289
290 println("");
292 println("*** Parser rules:");
293 tabs++;
294
295 Enumeration rules = grammar.rules.elements();
297 while ( rules.hasMoreElements() ) {
298 println("");
299 GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
301 if ( sym instanceof RuleSymbol) {
303 genRule((RuleSymbol)sym);
304 }
305 }
306 tabs--;
307 println("");
308 println("*** End of parser rules");
309
310 println("");
311 println("*** End of parser");
312
313 currentOutput.close();
315 currentOutput = null;
316 }
317 * @param blk The rule-reference to generate
319 */
320 public void gen(RuleRefElement rr) {
321 RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
322
323 print("Rule Reference: " + rr.targetRule);
325 if (rr.idAssign != null) {
326 _print(", assigned to '" + rr.idAssign + "'");
327 }
328 if (rr.args != null) {
329 _print(", arguments = " + rr.args);
330 }
331 _println("");
332
333 if (rs == null || !rs.isDefined())
335 {
336 println("Rule '" + rr.targetRule + "' is referenced, but that rule is not defined.");
337 println("\tPerhaps the rule is misspelled, or you forgot to define it.");
338 return;
339 }
340 if (!(rs instanceof RuleSymbol))
341 {
342 println("Rule '" + rr.targetRule + "' is referenced, but that is not a grammar rule.");
344 return;
345 }
346 if (rr.idAssign != null)
347 {
348 if (rs.block.returnAction == null)
350 {
351 println("Error: You assigned from Rule '" + rr.targetRule + "', but that rule has no return type.");
352 }
353 } else {
354 if (!(grammar instanceof LexerGrammar) && syntacticPredLevel == 0 && rs.block.returnAction != null)
356 {
357 println("Warning: Rule '" + rr.targetRule + "' returns a value");
358 }
359 }
360 if (rr.args != null && rs.block.argAction == null) {
361 println("Error: Rule '" + rr.targetRule + "' accepts no arguments.");
362 }
363 }
364 * @param blk The string-literal reference to generate
366 */
367 public void gen(StringLiteralElement atom) {
368 print("Match string literal ");
369 _print(atom.atomText);
370 if (atom.label != null) {
371 _print(", label=" + atom.label);
372 }
373 _println("");
374 }
375 * @param blk The token-range reference to generate
377 */
378 public void gen(TokenRangeElement r) {
379 print("Match token range: " + r.beginText + ".." + r.endText);
380 if ( r.label!=null ) {
381 _print(", label = " + r.label);
382 }
383 _println("");
384 }
385 * @param blk The token-reference to generate
387 */
388 public void gen(TokenRefElement atom) {
389 print("Match token ");
390 if (atom.not) {
391 _print("NOT ");
392 }
393 _print(atom.atomText);
394 if (atom.label != null) {
395 _print(", label=" + atom.label);
396 }
397 _println("");
398 }
399 public void gen(TreeElement t) {
400 print("Tree reference: "+t);
401 }
402
403 public void gen(TreeWalkerGrammar g) throws IOException {
404 setGrammar(g);
405 System.out.println("Generating " + grammar.getClassName() + ".txt");
407 currentOutput = antlr.Tool.openOutputFile(grammar.getClassName()+".txt");
408
410 tabs = 0;
411
412 genHeader();
414
415 println("");
417 println("*** Tree-walker Preamble Action.");
418 println("This action will appear before the declaration of your tree-walker class:");
419 tabs++;
420 println(grammar.preambleAction);
421 tabs--;
422 println("*** End of tree-walker Preamble Action");
423
424 println("");
426 println("*** Your tree-walker class is called '" + grammar.getClassName() + "' and is a subclass of '" + grammar.getSuperClass() + "'.");
427
428 println("");
430 println("*** User-defined tree-walker class members:");
431 println("These are the member declarations that you defined for your class:");
432 tabs++;
433 printAction(grammar.classMemberAction);
434 tabs--;
435 println("*** End of user-defined tree-walker class members");
436
437 println("");
439 println("*** tree-walker rules:");
440 tabs++;
441
442 Enumeration rules = grammar.rules.elements();
444 while ( rules.hasMoreElements() ) {
445 println("");
446 GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
448 if ( sym instanceof RuleSymbol) {
450 genRule((RuleSymbol)sym);
451 }
452 }
453 tabs--;
454 println("");
455 println("*** End of tree-walker rules");
456
457 println("");
458 println("*** End of tree-walker");
459
460 currentOutput.close();
462 currentOutput = null;
463 }
464
465 public void gen(WildcardElement wc) {
466 print("Match wildcard");
467 if ( wc.getLabel()!=null ) {
468 _print(", label = " + wc.getLabel());
469 }
470 _println("");
471 }
472 * @param blk The (...)* block to generate
474 */
475 public void gen(ZeroOrMoreBlock blk) {
476 println("Start ZERO-OR-MORE (...)+ block:");
477 tabs++;
478 genBlockPreamble(blk);
479 boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
480 if (!ok) {
481 println("Warning: This zero-or-more block is non-deterministic");
482 }
483 genCommonBlock(blk);
484 tabs--;
485 println("End ZERO-OR-MORE block.");
486 }
487 protected void genAlt(Alternative alt) {
488 for (
489 AlternativeElement elem = alt.head;
490 !(elem instanceof BlockEndElement);
491 elem = elem.next
492 )
493 {
494 elem.generate();
495 }
496 if (alt.getTreeSpecifier() != null)
497 {
498 println("AST will be built as: " + alt.getTreeSpecifier().getText());
499 }
500 }
501 * plain AlternativeBLock. This generates any variable declarations,
503 * init-actions, and syntactic-predicate-testing variables.
504 * @blk The block for which the preamble is to be generated.
505 */
506 protected void genBlockPreamble(AlternativeBlock blk) {
507 if ( blk.initAction!=null ) {
509 printAction("Init action: " + blk.initAction);
510 }
511 }
512 * that needs to be generated at the end of the block. Other routines
514 * may append else-clauses and such for error checking before the postfix
515 * is generated.
516 */
517 public void genCommonBlock(AlternativeBlock blk) {
518 boolean singleAlt = (blk.alternatives.size() == 1);
519
520 println("Start of an alternative block.");
521 tabs++;
522 println("The lookahead set for this block is:");
523 tabs++;
524 genLookaheadSetForBlock(blk);
525 tabs--;
526
527 if (singleAlt) {
528 println("This block has a single alternative");
529 if (blk.getAlternativeAt(0).synPred != null)
530 {
531 println("Warning: you specified a syntactic predicate for this alternative,");
533 println("and it is the only alternative of a block and will be ignored.");
534 }
535 }
536 else {
537 println("This block has multiple alternatives:");
538 tabs++;
539 }
540
541 for (int i=0; i<blk.alternatives.size(); i++) {
542 Alternative alt = blk.getAlternativeAt(i);
543 AlternativeElement elem = alt.head;
544
545 println("");
547 if (i != 0) {
548 print("Otherwise, ");
549 } else {
550 print("");
551 }
552 _println("Alternate(" + (i+1) + ") will be taken IF:");
553 println("The lookahead set: ");
554 tabs++;
555 genLookaheadSetForAlt(alt);
556 tabs--;
557 if ( alt.semPred != null || alt.synPred != null ) {
558 print("is matched, AND ");
559 } else {
560 println("is matched.");
561 }
562
563 if ( alt.semPred != null ) {
565 _println("the semantic predicate:");
566 tabs++;
567 println(alt.semPred);
568 if ( alt.synPred != null ) {
569 print("is true, AND ");
570 } else {
571 println("is true.");
572 }
573 }
574
575 if ( alt.synPred != null ) {
577 _println("the syntactic predicate:");
578 tabs++;
579 genSynPred( alt.synPred );
580 tabs--;
581 println("is matched.");
582 }
583
584 genAlt(alt);
586 }
587 println("");
588 println("OTHERWISE, a NoViableAlt exception will be thrown");
589 println("");
590
591 if (!singleAlt) {
592 tabs--;
593 println("End of alternatives");
594 }
595 tabs--;
596 println("End of alternative block.");
597 }
598 * for a block.
600 * @param blk The rule block of interest
601 */
602 public void genFollowSetForRuleBlock(RuleBlock blk)
603 {
604 Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, blk.endNode);
605 printSet(grammar.maxk, 1, follow);
606 }
607
608 protected void genHeader()
609 {
610 println("ANTLR-generated file resulting from grammar " + tool.grammarFile);
611 println("Diagnostic output");
612 println("");
613 println("Terence Parr, MageLang Institute");
614 println("with John Lilley, Empathy Software");
615 println("ANTLR Version "+ANTLRParser.version+"; 1996,1997");
616 println("");
617 println("*** Header Action.");
618 println("This action will appear at the top of all generated files.");
619 tabs++;
620 printAction(behavior.headerAction);
621 tabs--;
622 println("*** End of Header Action");
623 println("");
624 }
625
626 protected void genLookaheadSetForAlt(Alternative alt) {
627 if ( doingLexRules && alt.cache[1].containsEpsilon() ) {
628 println("MATCHES ALL");
629 return;
630 }
631 int depth = alt.lookaheadDepth;
632 if ( depth == GrammarAnalyzer.NONDETERMINISTIC ) {
633 depth = grammar.maxk;
636 }
637 for (int i = 1; i <= depth; i++)
638 {
639 Lookahead lookahead = alt.cache[i];
640 printSet(depth, i, lookahead);
641 }
642 }
643 * for a block.
645 * @param blk The block of interest
646 */
647 public void genLookaheadSetForBlock(AlternativeBlock blk)
648 {
649 int depth = 0;
651 for (int i=0; i<blk.alternatives.size(); i++) {
652 Alternative alt = blk.getAlternativeAt(i);
653 if (alt.lookaheadDepth == GrammarAnalyzer.NONDETERMINISTIC) {
654 depth = grammar.maxk;
655 break;
656 }
657 else if (depth < alt.lookaheadDepth) {
658 depth = alt.lookaheadDepth;
659 }
660 }
661
662 for (int i = 1; i <= depth; i++)
663 {
664 Lookahead lookahead = grammar.theLLkAnalyzer.look(i, blk);
665 printSet(depth, i, lookahead);
666 }
667 }
668 * nextToken is a synthetic lexer rule that is the implicit OR of all
670 * user-defined lexer rules.
671 */
672 public void genNextToken() {
673 println("");
674 println("*** Lexer nextToken rule:");
675 println("The lexer nextToken rule is synthesized from all of the user-defined");
676 println("lexer rules. It logically consists of one big alternative block with");
677 println("each user-defined rule being an alternative.");
678 println("");
679
680 RuleBlock blk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken");
683
684 RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
686 nextTokenRs.setDefined();
687 nextTokenRs.setBlock(blk);
688 nextTokenRs.access = "private";
689 grammar.define(nextTokenRs);
690
691 if (!grammar.theLLkAnalyzer.deterministic(blk))
693 {
694 println("The grammar analyzer has determined that the synthesized");
695 println("nextToken rule is non-deterministic (i.e., it has ambiguities)");
696 println("This means that there is some overlap of the character");
697 println("lookahead for two or more of your lexer rules.");
698 }
699
700 genCommonBlock(blk);
701
702 println("*** End of nextToken lexer rule.");
703 }
704 * @param s The RuleSymbol describing the rule to generate
706 */
707 public void genRule(RuleSymbol s) {
708 println("");
709 String ruleType = (doingLexRules ? "Lexer" : "Parser");
710 println("*** " + ruleType + " Rule: " + s.getId());
711 if (!s.isDefined() ) {
712 println("This rule is undefined.");
713 println("This means that the rule was referenced somewhere in the grammar,");
714 println("but a definition for the rule was not encountered.");
715 println("It is also possible that syntax errors during the parse of");
716 println("your grammar file prevented correct processing of the rule.");
717 println("*** End " + ruleType + " Rule: " + s.getId());
718 return;
719 }
720 tabs++;
721
722 if (s.access.length() != 0) {
723 println("Access: " + s.access);
724 }
725
726 RuleBlock rblk = s.getBlock();
728
729 if (rblk.returnAction != null) {
731 println("Return value(s): " + rblk.returnAction);
732 if ( doingLexRules ) {
733 println("Error: you specified return value(s) for a lexical rule.");
734 println("\tLexical rules have an implicit return type of 'int'.");
735 }
736 } else {
737 if ( doingLexRules ) {
738 println("Return value: lexical rule returns an implicit token type");
739 } else {
740 println("Return value: none");
741 }
742 }
743
744 if (rblk.argAction != null)
746 {
747 println("Arguments: " + rblk.argAction);
748 }
749
750 genBlockPreamble(rblk);
752
753 boolean ok = grammar.theLLkAnalyzer.deterministic(rblk);
755 if (!ok) {
756 println("Error: This rule is non-deterministic");
757 }
758
759 genCommonBlock(rblk);
761
762 ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec("");
764
765 if (unlabeledUserSpec != null) {
767 println("You specified error-handler(s) for this rule:");
768 tabs++;
769 for (int i = 0; i < unlabeledUserSpec.handlers.size(); i++)
770 {
771 if (i != 0) {
772 println("");
773 }
774
775 ExceptionHandler handler = (ExceptionHandler)unlabeledUserSpec.handlers.elementAt(i);
776 println("Error-handler(" + (i+1) + ") catches [" + handler.exceptionTypeAndName.getText() + "] and executes:");
777 printAction(handler.action);
778 }
779 tabs--;
780 println("End error-handlers.");
781 }
782 else if (!doingLexRules) {
783 println("Default error-handling will be generated, which catches all");
784 println("parser exceptions and consumes tokens until the follow-set is seen.");
785 }
786
787
788 if (!doingLexRules) {
791 println("The follow set for this rule is:");
792 tabs++;
793 genFollowSetForRuleBlock(rblk);
794 tabs--;
795 }
796
797 tabs--;
798 println("*** End " + ruleType + " Rule: " + s.getId());
799 }
800 * the alternative block, buts tracks if we are inside a synPred
802 * @param blk The syntactic predicate block
803 */
804 protected void genSynPred(SynPredBlock blk) {
805 syntacticPredLevel++;
806 gen((AlternativeBlock)blk);
807 syntacticPredLevel--;
808 }
809
810 protected void genTokenTypes(TokenManager tm) throws IOException {
811 System.out.println("Generating " + tm.getName() + "TokenTypes.txt");
813 currentOutput = antlr.Tool.openOutputFile(tm.getName() + "TokenTypes.txt");
814 tabs = 0;
816
817 genHeader();
819
820 println("");
823 println("*** Tokens used by the parser");
824 println("This is a list of the token numeric values and the corresponding");
825 println("token identifiers. Some tokens are literals, and because of that");
826 println("they have no identifiers. Literals are double-quoted.");
827 tabs++;
828
829 Vector v = tm.getVocabulary();
831 for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
832 String s = (String)v.elementAt(i);
833 if (s != null) {
834 println(s + " = " + i);
835 }
836 }
837
838 tabs--;
840 println("*** End of tokens used by the parser");
841
842 currentOutput.close();
844 currentOutput = null;
845 }
846 * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
848 */
849 public String getASTCreateString(Vector v) {
850 return "***Create an AST from a vector here***"+System.getProperty("line.separator");
851 }
852 * @param str The arguments to the AST constructor
854 */
855 public String getASTCreateString(String str) {
856 return "[" + str + "]";
857 }
858 * This is context-sensitive, depending on the rule and alternative
860 * being generated
861 * @param id The identifier name to map
862 * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned.
863 */
864 public String mapTreeId(String id, ActionTransInfo tInfo) {
865 return id;
866 }
867 * @param depth The depth of the entire lookahead/follow
869 * @param k The lookahead level to print
870 * @param lookahead The lookahead/follow set to print
871 */
872 public void printSet(int depth, int k, Lookahead lookahead) {
873 int numCols = 5;
874
875 int[] elems = lookahead.fset.toArray();
876
877 if (depth != 1) {
878 print("k==" + k + ": {");
879 } else {
880 print("{ ");
881 }
882 if (elems.length > numCols) {
883 _println("");
884 tabs++;
885 print("");
886 }
887
888 int column = 0;
889 for (int i = 0; i < elems.length; i++)
890 {
891 column++;
892 if (column > numCols) {
893 _println("");
894 print("");
895 column = 0;
896 }
897 if (doingLexRules) {
898 _print(charFormatter.literalChar(elems[i]));
899 } else {
900 _print((String)grammar.tokenManager.getVocabulary().elementAt(elems[i]));
901 }
902 if (i != elems.length-1) {
903 _print(", ");
904 }
905 }
906
907 if (elems.length > numCols) {
908 _println("");
909 tabs--;
910 print("");
911 }
912 _println(" }");
913 }
914 }
915