1 package antlr;
2
3 * <b>SOFTWARE RIGHTS</b>
5 * <p>
6 * ANTLR 2.5.0 MageLang Institute, 1998
7 * <p>
8 * We reserve no legal rights to the ANTLR--it is fully in the
9 * public domain. An individual or company may do whatever
10 * they wish with source code distributed with ANTLR or the
11 * code generated by ANTLR, including the incorporation of
12 * ANTLR, or its output, into commerical software.
13 * <p>
14 * We encourage users to develop software with ANTLR. However,
15 * we do ask that credit is given to us for developing
16 * ANTLR. By "credit", we mean that if you use ANTLR or
17 * incorporate any source code into one of your programs
18 * (commercial product, research project, or otherwise) that
19 * you acknowledge this fact somewhere in the documentation,
20 * research report, etc... If you like ANTLR and have
21 * developed a nice tool with the output, please mention that
22 * you developed it using ANTLR. In addition, we ask that the
23 * headers remain intact in our source code. As long as these
24 * guidelines are kept, we expect to continue enhancing this
25 * system and expect to make other tools available as they are
26 * completed.
27 * <p>
28 * The ANTLR gang:
29 * @version ANTLR 2.5.0 MageLang Institute, 1998
30 * @author Terence Parr, <a href=http://www.MageLang.com>MageLang Institute</a>
31 * @author <br>John Lilley, <a href=http://www.Empathy.com>Empathy Software</a>
32 */
33 import java.util.Hashtable;
34 import antlr.collections.impl.BitSet;
35
36 * the token and rule symbols to the grammar symbol table.
38 *
39 * Token types are assigned to token symbols in this class also.
40 * The token type for a token is done in the order seen (lexically).
41 */
42 public class DefineGrammarSymbols implements ANTLRGrammarParseBehavior {
43 protected Hashtable grammars = new Hashtable();
45 protected Hashtable tokenManagers = new Hashtable();
47 protected Grammar grammar;
49 protected Tool tool;
51 LLkAnalyzer analyzer;
53 String[] args;
56 static final String DEFAULT_TOKENMANAGER_NAME = "*default";
58 protected String headerAction = null;
60 String thePreambleAction = null;
62 String language = "Java";
64
65
66 public DefineGrammarSymbols(Tool tool_, String[] args_, LLkAnalyzer analyzer_) {
67 tool = tool_;
68 args = args_;
69 analyzer = analyzer_;
70 }
71
72 public void abortGrammar() {
73 if (grammar != null && grammar.getClassName() != null) {
74 grammars.remove(grammar.getClassName());
75 }
76 grammar = null;
77 }
78 public void beginAlt(boolean doAST_) {
79 }
80 public void beginChildList() {
81 }
82 public void beginExceptionGroup() {}
84 public void beginExceptionSpec(Token label) {}
85 public void beginSubRule(Token label, int line, boolean not) {
86 }
87 public void beginTree(int line) throws SemanticException {
88 }
89
90 public void defineRuleName(Token r, String access, boolean ruleAutoGen, String docComment) throws SemanticException {
91 String id = r.getText();
92
93 if ( Character.isUpperCase(id.charAt(0)) ) {
94 id = CodeGenerator.lexerRuleName(id);
96 if ( !grammar.tokenManager.tokenDefined(r.getText()) ) {
98 int tt = grammar.tokenManager.nextTokenType();
99 if (tt != 0) {
100 TokenSymbol ts = new TokenSymbol(r.getText());
101 ts.setTokenType(tt);
102 grammar.tokenManager.define(ts);
103 } else {
104 tool.error("You cannot define new tokens when using tokdef", r.getLine());
105 }
106 }
107 }
108
109 RuleSymbol rs;
110 if ( grammar.isDefined(id) ) { rs = (RuleSymbol) grammar.getSymbol(id);
112 if ( rs.isDefined() ) {
114 tool.error("redefinition of rule "+id, r.getLine());
115 }
116 }
117 else {
118 rs = new RuleSymbol(id);
119 grammar.define(rs);
120 }
121 rs.setDefined();
122 rs.access = access;
123 rs.comment = docComment;
124 }
125 public void endAlt() {
126 }
127 public void endChildList() {
128 }
129 public void endExceptionGroup() {}
130 public void endExceptionSpec() {}
131 public void endGrammar() {
132 }
133 * options that may not have been set
135 */
136 public void endOptions() {
137 if (grammar.tokenManager == null) {
138 if (tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
139 TokenManager tm = (TokenManager)tokenManagers.get(DEFAULT_TOKENMANAGER_NAME);
141 grammar.setTokenManager(tm);
143 } else {
144 TokenManager tm = new SimpleTokenManager(grammar.getClassName(), tool);
146 grammar.setTokenManager(tm);
147 tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, grammar.tokenManager);
149 }
150 }
151 }
152 public void endRule(String r) {
153 }
154 public void endSubRule() {
155 }
156 public void endTree() {
157 }
158 public void hasError() {
159 }
160 public void noASTSubRule() {
161 }
162 public void oneOrMoreSubRule() {
163 }
164 public void optionalSubRule() {
165 }
166 public void refAction(Token action) {
167 }
168 public void refArgAction(Token action) {
169 }
170 public void refCharLiteral(Token lit, Token label, boolean inverted, int autoGenType, boolean lastInRule) {
171 }
172 public void refCharRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) {
173 }
174 public void refExceptionHandler(Token exTypeAndName, String action) {}
175 public void refHeaderAction(Token act) {
177 headerAction = act.getText();
178 }
179 public void refInitAction(Token action) {
180 }
181 public void refMemberAction(Token act) {
182 }
183 public void refPreambleAction(Token act) {
184 thePreambleAction = act.getText();
185 }
186 public void refReturnAction(Token returnAction) {
187 }
188 public void refRule(Token idAssign, Token r, Token label, Token args, int autoGenType) {
189 String id = r.getText();
190 if ( Character.isUpperCase(id.charAt(0)) ) { id = CodeGenerator.lexerRuleName(id);
192 }
193 if ( !grammar.isDefined(id) ) {
194 grammar.define(new RuleSymbol(id));
195 }
196 }
197 public void refSemPred(Token pred) {
198 }
199 public void refStringLiteral(Token lit, Token label, int autoGenType, boolean lastInRule) {
200 if (!(grammar instanceof LexerGrammar)) {
201 String str = lit.getText();
203 if ( grammar.tokenManager.getTokenSymbol(str) != null ) {
204 return;
206 }
207 StringLiteralSymbol sl = new StringLiteralSymbol(str);
208 int tt = grammar.tokenManager.nextTokenType();
209 if (tt != 0) {
210 sl.setTokenType(tt);
211 grammar.tokenManager.define(sl);
212 } else {
213 tool.error("You cannot define new string literals when using tokdef", lit.getLine());
214 }
215 }
216 }
217
218 public void refToken(Token assignId, Token t, Token label, Token args,
219 boolean inverted, int autoGenType, boolean lastInRule) {
220 String id = t.getText();
221 if ( !grammar.tokenManager.tokenDefined(id) ) {
222 int tt = grammar.tokenManager.nextTokenType();
223 if (tt != 0) {
224 TokenSymbol ts = new TokenSymbol(id);
225 ts.setTokenType(tt);
226 grammar.tokenManager.define(ts);
227 } else {
228 tool.error("You cannot define new tokens when using tokdef", t.getLine());
229 }
230 }
231 }
232 public void refTokenRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) {
233 if ( t1.getText().charAt(0) == '"' ) {
234 refStringLiteral(t1, null, GrammarElement.AUTO_GEN_NONE, lastInRule);
235 }
236 else {
237 refToken(null, t1, null, null, false, GrammarElement.AUTO_GEN_NONE, lastInRule);
238 }
239 if ( t2.getText().charAt(0) == '"' ) {
240 refStringLiteral(t2, null, GrammarElement.AUTO_GEN_NONE, lastInRule);
241 }
242 else {
243 refToken(null, t2, null, null, false, GrammarElement.AUTO_GEN_NONE, lastInRule);
244 }
245 }
246 public void refTreeSpecifier(Token treeSpec) {}
247 public void refWildcard(Token t, Token label, int autoGenType) {
248 }
249
250 public void reset() {
251 grammar = null;
252 }
253 public void setArgOfRuleRef(Token argaction) {
254 }
255
256 public void setCharVocabulary(BitSet b) {
257 ((LexerGrammar)grammar).setCharVocabulary(b);
259 }
260 * This applies to options for an entire grammar file.
262 * @param key The token containing the option name
263 * @param value The token containing the option value.
264 */
265 public void setFileOption(Token key, Token value)
266 {
267 if (key.getText().equals("language")) {
268 if (value.getType() == ANTLRParser.STRING_LITERAL) {
269 language = Tool.stripBack(Tool.stripFront(value.getText(), '"'), '"');
270 }
271 else if (value.getType() == ANTLRParser.TOKEN_REF || value.getType() == ANTLRParser.RULE_REF) {
272 language = value.getText();
273 }
274 else {
275 tool.error("language option must be string or identifier", value.getLine());
276 }
277 }
278 else if (key.getText().equals("mangleLiteralPrefix")) {
279 if (value.getType() == ANTLRParser.STRING_LITERAL) {
280 tool.literalsPrefix = tool.stripFrontBack(value.getText(), "\"","\"");
281 }
282 else {
283 tool.error("mangleLiteralPrefix option must be string", value.getLine());
284 }
285 }
286 else if (key.getText().equals("upperCaseMangledLiterals")) {
287 if (value.getText().equals("true")) {
288 tool.upperCaseMangledLiterals = true;
289 } else if (value.getText().equals("false")) {
290 tool.upperCaseMangledLiterals = false;
291 } else {
292 grammar.tool.error("Value for upperCaseMangledLiterals must be true or false", key.getLine());
293 }
294 }
295 else {
296 tool.error("Invalid file-level option: " + key.getText(), key.getLine());
297 }
298 }
299 * This function forwards to Grammar.setOption for some options.
301 * @param key The token containing the option name
302 * @param value The token containing the option value.
303 */
304 public void setGrammarOption(Token key, Token value)
305 {
306 if (key.getText().equals("tokenVocabulary")) {
307 if (grammar.tokenManager != null) {
310 tool.error("Only one tokdef or tokenVocabulary option may be specified", value.getLine());
311 }
312 else {
313 if (tokenManagers.containsKey(value.getText())) {
315 TokenManager tm = (TokenManager)tokenManagers.get(value.getText());
317 if (tm instanceof SimpleTokenManager) {
318 grammar.setTokenManager(tm);
319 }
320 else {
321 tool.error("'" + value.getText() + "' is already defined as something else", value.getLine());
322 }
323 }
324 else {
325 if (value.getType() == ANTLRParser.RULE_REF || value.getType() == ANTLRParser.TOKEN_REF) {
326 SimpleTokenManager tm = new SimpleTokenManager(value.getText(), tool);
328 tokenManagers.put(tm.getName(), tm);
330 grammar.setTokenManager(tm);
332 }
333 else {
334 tool.error("tokenVocabulary must be an identifier", value.getLine());
335 }
336 }
337 }
338 }
339 else if (key.getText().equals("tokdef")) {
340 if (grammar instanceof LexerGrammar) {
342 tool.error("tokdef= option cannot be used with a lexer", key.getLine());
343 }
344 else if (grammar.tokenManager != null) {
345 tool.error("Only one tokdef or tokenVocabulary option may be specified", value.getLine());
346 }
347 else {
348 if (tokenManagers.containsKey(value.getText())) {
350 TokenManager tm = (TokenManager)tokenManagers.get(value.getText());
352 if (tm instanceof TokdefTokenManager) {
353 grammar.setTokenManager(tm);
354 }
355 else {
356 tool.error("'" + value.getText() + "' is already defined as something else", value.getLine());
357 }
358 }
359 else {
360 if (value.getType() == ANTLRParser.STRING_LITERAL) {
361 String filename = value.getText();
363 filename = filename.substring(1, filename.length()-1);
365 TokdefTokenManager tm = new TokdefTokenManager(grammar, filename, tool);
367 tokenManagers.put(tm.getName(), tm);
369 grammar.setTokenManager(tm);
371 }
372 else {
373 tool.error("tokdef filename must be double-quoted", value.getLine());
374 }
375 }
376 }
377 }
378
379 else {
380 grammar.setOption(key.getText(), value);
382 }
383 }
384 public void setRuleOption(Token key, Token value) {
385 }
386 public void setSubruleOption(Token key, Token value) {
387 }
388
389 public void startLexer(Token name, String superClass, String doc)
390 {
391 reset();
392 Grammar g = (Grammar)grammars.get(name);
395 if (g != null) {
396 if (!(g instanceof LexerGrammar)) {
397 tool.panic("'" + name.getText() + "' is already defined as a non-lexer");
398 } else {
399 tool.panic("Lexer '" + name.getText() + "' is already defined");
400 }
401 }
402 else {
403 LexerGrammar lg = new LexerGrammar(name.getText(), tool, superClass);
405 lg.comment = doc;
406 lg.processArguments(args);
407 grammars.put(lg.getClassName(), lg);
408 lg.preambleAction = thePreambleAction;
410 thePreambleAction = null;
411 grammar = lg;
413 }
414 }
415
416 public void startParser(Token name, String superClass, String doc)
417 {
418 reset();
419 Grammar g = (Grammar)grammars.get(name);
422 if (g != null) {
423 if (!(g instanceof ParserGrammar)) {
424 tool.panic("'" + name.getText() + "' is already defined as a non-parser");
425 } else {
426 tool.panic("Parser '" + name.getText() + "' is already defined");
427 }
428 }
429 else {
430 grammar = new ParserGrammar(name.getText(), tool, superClass);
432 grammar.comment = doc;
433 grammar.processArguments(args);
434 grammars.put(grammar.getClassName(), grammar);
435 grammar.preambleAction = thePreambleAction;
437 thePreambleAction = null;
438 }
439 }
440
441 public void startTreeWalker(Token name, String superClass, String doc)
442 {
443 reset();
444 Grammar g = (Grammar)grammars.get(name);
447 if (g != null) {
448 if (!(g instanceof TreeWalkerGrammar)) {
449 tool.panic("'" + name.getText() + "' is already defined as a non-tree-walker");
450 } else {
451 tool.panic("Tree-walker '" + name.getText() + "' is already defined");
452 }
453 }
454 else {
455 grammar = new TreeWalkerGrammar(name.getText(), tool, superClass);
457 grammar.comment = doc;
458 grammar.processArguments(args);
459 grammars.put(grammar.getClassName(), grammar);
460 grammar.preambleAction = thePreambleAction;
462 thePreambleAction = null;
463 }
464 }
465 public void synPred() {
466 }
467 public void zeroOrMoreSubRule() {
468 }
469 }
470