2 # Scanner produces tokens of the following types:
5 # DIRECTIVE(name, value)
22 # SCALAR(value, plain, style)
24 # Read comments in the Scanner code for more details.
27 __all__ = ['Scanner', 'ScannerError']
29 from .error import MarkedYAMLError
32 class ScannerError(MarkedYAMLError):
36 # See below simple keys treatment.
38 def __init__(self, token_number, required, index, line, column, mark):
39 self.token_number = token_number
40 self.required = required
49 """Initialize the scanner."""
50 # It is assumed that Scanner and Reader will have a common descendant.
51 # Reader do the dirty work of checking for BOM and converting the
52 # input data to Unicode. It also adds NUL to the end.
54 # Reader supports the following methods
55 # self.peek(i=0) # peek the next i-th character
56 # self.prefix(l=1) # peek the next l characters
57 # self.forward(l=1) # read the next l characters and move the pointer.
59 # Had we reached the end of the stream?
62 # The number of unclosed '{' and '['. `flow_level == 0` means block
66 # List of processed tokens that are not yet emitted.
69 # Add the STREAM-START token.
70 self.fetch_stream_start()
72 # Number of tokens that were emitted through the `get_token` method.
75 # The current indentation level.
78 # Past indentation levels.
81 # Variables related to simple keys treatment.
83 # A simple key is a key that is not denoted by the '?' indicator.
84 # Example of simple keys:
86 # block simple key: value
88 # : { flow simple key: value }
89 # We emit the KEY token before all keys, so when we find a potential
90 # simple key, we try to locate the corresponding ':' indicator.
91 # Simple keys should be limited to a single line and 1024 characters.
93 # Can a simple key start at the current position? A simple key may
95 # - at the beginning of the line, not counting indentation spaces
97 # - after '{', '[', ',' (in the flow context),
98 # - after '?', ':', '-' (in the block context).
99 # In the block context, this flag also signifies if a block collection
100 # may start at the current position.
101 self.allow_simple_key = True
103 # Keep track of possible simple keys. This is a dictionary. The key
104 # is `flow_level`; there can be no more that one possible simple key
105 # for each level. The value is a SimpleKey record:
106 # (token_number, required, index, line, column, mark)
107 # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
108 # '[', or '{' tokens.
109 self.possible_simple_keys = {}
113 def check_token(self, *choices):
114 # Check if the next token is one of the given types.
115 while self.need_more_tokens():
116 self.fetch_more_tokens()
120 for choice in choices:
121 if isinstance(self.tokens[0], choice):
125 def peek_token(self):
126 # Return the next token, but do not delete if from the queue.
127 while self.need_more_tokens():
128 self.fetch_more_tokens()
130 return self.tokens[0]
133 # Return the next token.
134 while self.need_more_tokens():
135 self.fetch_more_tokens()
137 self.tokens_taken += 1
138 return self.tokens.pop(0)
142 def need_more_tokens(self):
147 # The current token may be a potential simple key, so we
148 # need to look further.
149 self.stale_possible_simple_keys()
150 if self.next_possible_simple_key() == self.tokens_taken:
153 def fetch_more_tokens(self):
155 # Eat whitespaces and comments until we reach the next token.
156 self.scan_to_next_token()
158 # Remove obsolete possible simple keys.
159 self.stale_possible_simple_keys()
161 # Compare the current indentation and column. It may add some tokens
162 # and decrease the current indentation level.
163 self.unwind_indent(self.column)
165 # Peek the next character.
168 # Is it the end of stream?
170 return self.fetch_stream_end()
173 if ch == '%' and self.check_directive():
174 return self.fetch_directive()
176 # Is it the document start?
177 if ch == '-' and self.check_document_start():
178 return self.fetch_document_start()
180 # Is it the document end?
181 if ch == '.' and self.check_document_end():
182 return self.fetch_document_end()
184 # TODO: support for BOM within a stream.
186 # return self.fetch_bom() <-- issue BOMToken
188 # Note: the order of the following checks is NOT significant.
190 # Is it the flow sequence start indicator?
192 return self.fetch_flow_sequence_start()
194 # Is it the flow mapping start indicator?
196 return self.fetch_flow_mapping_start()
198 # Is it the flow sequence end indicator?
200 return self.fetch_flow_sequence_end()
202 # Is it the flow mapping end indicator?
204 return self.fetch_flow_mapping_end()
206 # Is it the flow entry indicator?
208 return self.fetch_flow_entry()
210 # Is it the block entry indicator?
211 if ch == '-' and self.check_block_entry():
212 return self.fetch_block_entry()
214 # Is it the key indicator?
215 if ch == '?' and self.check_key():
216 return self.fetch_key()
218 # Is it the value indicator?
219 if ch == ':' and self.check_value():
220 return self.fetch_value()
224 return self.fetch_alias()
228 return self.fetch_anchor()
232 return self.fetch_tag()
234 # Is it a literal scalar?
235 if ch == '|' and not self.flow_level:
236 return self.fetch_literal()
238 # Is it a folded scalar?
239 if ch == '>' and not self.flow_level:
240 return self.fetch_folded()
242 # Is it a single quoted scalar?
244 return self.fetch_single()
246 # Is it a double quoted scalar?
248 return self.fetch_double()
250 # It must be a plain scalar then.
251 if self.check_plain():
252 return self.fetch_plain()
254 # No? It's an error. Let's produce a nice error message.
255 raise ScannerError("while scanning for the next token", None,
256 "found character %r that cannot start any token" % ch,
259 # Simple keys treatment.
261 def next_possible_simple_key(self):
262 # Return the number of the nearest possible simple key. Actually we
263 # don't need to loop through the whole dictionary. We may replace it
264 # with the following code:
265 # if not self.possible_simple_keys:
267 # return self.possible_simple_keys[
268 # min(self.possible_simple_keys.keys())].token_number
269 min_token_number = None
270 for level in self.possible_simple_keys:
271 key = self.possible_simple_keys[level]
272 if min_token_number is None or key.token_number < min_token_number:
273 min_token_number = key.token_number
274 return min_token_number
276 def stale_possible_simple_keys(self):
277 # Remove entries that are no longer possible simple keys. According to
278 # the YAML specification, simple keys
279 # - should be limited to a single line,
280 # - should be no longer than 1024 characters.
281 # Disabling this procedure will allow simple keys of any length and
282 # height (may cause problems if indentation is broken though).
283 for level in list(self.possible_simple_keys):
284 key = self.possible_simple_keys[level]
285 if key.line != self.line \
286 or self.index-key.index > 1024:
288 raise ScannerError("while scanning a simple key", key.mark,
289 "could not found expected ':'", self.get_mark())
290 del self.possible_simple_keys[level]
292 def save_possible_simple_key(self):
293 # The next token may start a simple key. We check if it's possible
294 # and save its position. This function is called for
295 # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
297 # Check if a simple key is required at the current position.
298 required = not self.flow_level and self.indent == self.column
300 # A simple key is required only if it is the first token in the current
301 # line. Therefore it is always allowed.
302 assert self.allow_simple_key or not required
304 # The next token might be a simple key. Let's save it's number and
306 if self.allow_simple_key:
307 self.remove_possible_simple_key()
308 token_number = self.tokens_taken+len(self.tokens)
309 key = SimpleKey(token_number, required,
310 self.index, self.line, self.column, self.get_mark())
311 self.possible_simple_keys[self.flow_level] = key
313 def remove_possible_simple_key(self):
314 # Remove the saved possible key position at the current flow level.
315 if self.flow_level in self.possible_simple_keys:
316 key = self.possible_simple_keys[self.flow_level]
319 raise ScannerError("while scanning a simple key", key.mark,
320 "could not found expected ':'", self.get_mark())
322 del self.possible_simple_keys[self.flow_level]
324 # Indentation functions.
326 def unwind_indent(self, column):
328 ## In flow context, tokens should respect indentation.
329 ## Actually the condition should be `self.indent >= column` according to
330 ## the spec. But this condition will prohibit intuitively correct
331 ## constructions such as
334 #if self.flow_level and self.indent > column:
335 # raise ScannerError(None, None,
336 # "invalid intendation or unclosed '[' or '{'",
339 # In the flow context, indentation is ignored. We make the scanner less
340 # restrictive then specification requires.
344 # In block context, we may need to issue the BLOCK-END tokens.
345 while self.indent > column:
346 mark = self.get_mark()
347 self.indent = self.indents.pop()
348 self.tokens.append(BlockEndToken(mark, mark))
350 def add_indent(self, column):
351 # Check if we need to increase indentation.
352 if self.indent < column:
353 self.indents.append(self.indent)
360 def fetch_stream_start(self):
361 # We always add STREAM-START as the first token and STREAM-END as the
365 mark = self.get_mark()
368 self.tokens.append(StreamStartToken(mark, mark,
369 encoding=self.encoding))
372 def fetch_stream_end(self):
374 # Set the current intendation to -1.
375 self.unwind_indent(-1)
378 self.remove_possible_simple_key()
379 self.allow_simple_key = False
380 self.possible_simple_keys = {}
383 mark = self.get_mark()
386 self.tokens.append(StreamEndToken(mark, mark))
388 # The steam is finished.
391 def fetch_directive(self):
393 # Set the current intendation to -1.
394 self.unwind_indent(-1)
397 self.remove_possible_simple_key()
398 self.allow_simple_key = False
400 # Scan and add DIRECTIVE.
401 self.tokens.append(self.scan_directive())
403 def fetch_document_start(self):
404 self.fetch_document_indicator(DocumentStartToken)
406 def fetch_document_end(self):
407 self.fetch_document_indicator(DocumentEndToken)
409 def fetch_document_indicator(self, TokenClass):
411 # Set the current intendation to -1.
412 self.unwind_indent(-1)
414 # Reset simple keys. Note that there could not be a block collection
416 self.remove_possible_simple_key()
417 self.allow_simple_key = False
419 # Add DOCUMENT-START or DOCUMENT-END.
420 start_mark = self.get_mark()
422 end_mark = self.get_mark()
423 self.tokens.append(TokenClass(start_mark, end_mark))
425 def fetch_flow_sequence_start(self):
426 self.fetch_flow_collection_start(FlowSequenceStartToken)
428 def fetch_flow_mapping_start(self):
429 self.fetch_flow_collection_start(FlowMappingStartToken)
431 def fetch_flow_collection_start(self, TokenClass):
433 # '[' and '{' may start a simple key.
434 self.save_possible_simple_key()
436 # Increase the flow level.
439 # Simple keys are allowed after '[' and '{'.
440 self.allow_simple_key = True
442 # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
443 start_mark = self.get_mark()
445 end_mark = self.get_mark()
446 self.tokens.append(TokenClass(start_mark, end_mark))
448 def fetch_flow_sequence_end(self):
449 self.fetch_flow_collection_end(FlowSequenceEndToken)
451 def fetch_flow_mapping_end(self):
452 self.fetch_flow_collection_end(FlowMappingEndToken)
454 def fetch_flow_collection_end(self, TokenClass):
456 # Reset possible simple key on the current level.
457 self.remove_possible_simple_key()
459 # Decrease the flow level.
462 # No simple keys after ']' or '}'.
463 self.allow_simple_key = False
465 # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
466 start_mark = self.get_mark()
468 end_mark = self.get_mark()
469 self.tokens.append(TokenClass(start_mark, end_mark))
471 def fetch_flow_entry(self):
473 # Simple keys are allowed after ','.
474 self.allow_simple_key = True
476 # Reset possible simple key on the current level.
477 self.remove_possible_simple_key()
480 start_mark = self.get_mark()
482 end_mark = self.get_mark()
483 self.tokens.append(FlowEntryToken(start_mark, end_mark))
485 def fetch_block_entry(self):
487 # Block context needs additional checks.
488 if not self.flow_level:
490 # Are we allowed to start a new entry?
491 if not self.allow_simple_key:
492 raise ScannerError(None, None,
493 "sequence entries are not allowed here",
496 # We may need to add BLOCK-SEQUENCE-START.
497 if self.add_indent(self.column):
498 mark = self.get_mark()
499 self.tokens.append(BlockSequenceStartToken(mark, mark))
501 # It's an error for the block entry to occur in the flow context,
502 # but we let the parser detect this.
506 # Simple keys are allowed after '-'.
507 self.allow_simple_key = True
509 # Reset possible simple key on the current level.
510 self.remove_possible_simple_key()
513 start_mark = self.get_mark()
515 end_mark = self.get_mark()
516 self.tokens.append(BlockEntryToken(start_mark, end_mark))
520 # Block context needs additional checks.
521 if not self.flow_level:
523 # Are we allowed to start a key (not nessesary a simple)?
524 if not self.allow_simple_key:
525 raise ScannerError(None, None,
526 "mapping keys are not allowed here",
529 # We may need to add BLOCK-MAPPING-START.
530 if self.add_indent(self.column):
531 mark = self.get_mark()
532 self.tokens.append(BlockMappingStartToken(mark, mark))
534 # Simple keys are allowed after '?' in the block context.
535 self.allow_simple_key = not self.flow_level
537 # Reset possible simple key on the current level.
538 self.remove_possible_simple_key()
541 start_mark = self.get_mark()
543 end_mark = self.get_mark()
544 self.tokens.append(KeyToken(start_mark, end_mark))
546 def fetch_value(self):
548 # Do we determine a simple key?
549 if self.flow_level in self.possible_simple_keys:
552 key = self.possible_simple_keys[self.flow_level]
553 del self.possible_simple_keys[self.flow_level]
554 self.tokens.insert(key.token_number-self.tokens_taken,
555 KeyToken(key.mark, key.mark))
557 # If this key starts a new block mapping, we need to add
558 # BLOCK-MAPPING-START.
559 if not self.flow_level:
560 if self.add_indent(key.column):
561 self.tokens.insert(key.token_number-self.tokens_taken,
562 BlockMappingStartToken(key.mark, key.mark))
564 # There cannot be two simple keys one after another.
565 self.allow_simple_key = False
567 # It must be a part of a complex key.
570 # Block context needs additional checks.
571 # (Do we really need them? They will be catched by the parser
573 if not self.flow_level:
575 # We are allowed to start a complex value if and only if
576 # we can start a simple key.
577 if not self.allow_simple_key:
578 raise ScannerError(None, None,
579 "mapping values are not allowed here",
582 # If this value starts a new block mapping, we need to add
583 # BLOCK-MAPPING-START. It will be detected as an error later by
585 if not self.flow_level:
586 if self.add_indent(self.column):
587 mark = self.get_mark()
588 self.tokens.append(BlockMappingStartToken(mark, mark))
590 # Simple keys are allowed after ':' in the block context.
591 self.allow_simple_key = not self.flow_level
593 # Reset possible simple key on the current level.
594 self.remove_possible_simple_key()
597 start_mark = self.get_mark()
599 end_mark = self.get_mark()
600 self.tokens.append(ValueToken(start_mark, end_mark))
602 def fetch_alias(self):
604 # ALIAS could be a simple key.
605 self.save_possible_simple_key()
607 # No simple keys after ALIAS.
608 self.allow_simple_key = False
610 # Scan and add ALIAS.
611 self.tokens.append(self.scan_anchor(AliasToken))
613 def fetch_anchor(self):
615 # ANCHOR could start a simple key.
616 self.save_possible_simple_key()
618 # No simple keys after ANCHOR.
619 self.allow_simple_key = False
621 # Scan and add ANCHOR.
622 self.tokens.append(self.scan_anchor(AnchorToken))
626 # TAG could start a simple key.
627 self.save_possible_simple_key()
629 # No simple keys after TAG.
630 self.allow_simple_key = False
633 self.tokens.append(self.scan_tag())
635 def fetch_literal(self):
636 self.fetch_block_scalar(style='|')
638 def fetch_folded(self):
639 self.fetch_block_scalar(style='>')
641 def fetch_block_scalar(self, style):
643 # A simple key may follow a block scalar.
644 self.allow_simple_key = True
646 # Reset possible simple key on the current level.
647 self.remove_possible_simple_key()
649 # Scan and add SCALAR.
650 self.tokens.append(self.scan_block_scalar(style))
652 def fetch_single(self):
653 self.fetch_flow_scalar(style='\'')
655 def fetch_double(self):
656 self.fetch_flow_scalar(style='"')
658 def fetch_flow_scalar(self, style):
660 # A flow scalar could be a simple key.
661 self.save_possible_simple_key()
663 # No simple keys after flow scalars.
664 self.allow_simple_key = False
666 # Scan and add SCALAR.
667 self.tokens.append(self.scan_flow_scalar(style))
669 def fetch_plain(self):
671 # A plain scalar could be a simple key.
672 self.save_possible_simple_key()
674 # No simple keys after plain scalars. But note that `scan_plain` will
675 # change this flag if the scan is finished at the beginning of the
677 self.allow_simple_key = False
679 # Scan and add SCALAR. May change `allow_simple_key`.
680 self.tokens.append(self.scan_plain())
684 def check_directive(self):
686 # DIRECTIVE: ^ '%' ...
687 # The '%' indicator is already checked.
691 def check_document_start(self):
693 # DOCUMENT-START: ^ '---' (' '|'\n')
695 if self.prefix(3) == '---' \
696 and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
699 def check_document_end(self):
701 # DOCUMENT-END: ^ '...' (' '|'\n')
703 if self.prefix(3) == '...' \
704 and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
707 def check_block_entry(self):
709 # BLOCK-ENTRY: '-' (' '|'\n')
710 return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
714 # KEY(flow context): '?'
718 # KEY(block context): '?' (' '|'\n')
720 return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
722 def check_value(self):
724 # VALUE(flow context): ':'
728 # VALUE(block context): ':' (' '|'\n')
730 return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
732 def check_plain(self):
734 # A plain scalar may start with any non-space character except:
735 # '-', '?', ':', ',', '[', ']', '{', '}',
736 # '#', '&', '*', '!', '|', '>', '\'', '\"',
739 # It may also start with
741 # if it is followed by a non-space character.
743 # Note that we limit the last rule to the block context (except the
744 # '-' character) because we want the flow context to be space
747 return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
748 or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
749 and (ch == '-' or (not self.flow_level and ch in '?:')))
753 def scan_to_next_token(self):
754 # We ignore spaces, line breaks and comments.
755 # If we find a line break in the block context, we set the flag
756 # `allow_simple_key` on.
757 # The byte order mark is stripped if it's the first character in the
758 # stream. We do not yet support BOM inside the stream as the
759 # specification requires. Any such mark will be considered as a part
762 # TODO: We need to make tab handling rules more sane. A good rule is
763 # Tabs cannot precede tokens
764 # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
765 # KEY(block), VALUE(block), BLOCK-ENTRY
766 # So the checking code is
768 # self.allow_simple_keys = False
769 # We also need to add the check for `allow_simple_keys == True` to
770 # `unwind_indent` before issuing BLOCK-END.
771 # Scanners for block, flow, and plain scalars need to be modified.
773 if self.index == 0 and self.peek() == '\uFEFF':
777 while self.peek() == ' ':
779 if self.peek() == '#':
780 while self.peek() not in '\0\r\n\x85\u2028\u2029':
782 if self.scan_line_break():
783 if not self.flow_level:
784 self.allow_simple_key = True
788 def scan_directive(self):
789 # See the specification for details.
790 start_mark = self.get_mark()
792 name = self.scan_directive_name(start_mark)
795 value = self.scan_yaml_directive_value(start_mark)
796 end_mark = self.get_mark()
798 value = self.scan_tag_directive_value(start_mark)
799 end_mark = self.get_mark()
801 end_mark = self.get_mark()
802 while self.peek() not in '\0\r\n\x85\u2028\u2029':
804 self.scan_directive_ignored_line(start_mark)
805 return DirectiveToken(name, value, start_mark, end_mark)
807 def scan_directive_name(self, start_mark):
808 # See the specification for details.
810 ch = self.peek(length)
811 while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
814 ch = self.peek(length)
816 raise ScannerError("while scanning a directive", start_mark,
817 "expected alphabetic or numeric character, but found %r"
818 % ch, self.get_mark())
819 value = self.prefix(length)
822 if ch not in '\0 \r\n\x85\u2028\u2029':
823 raise ScannerError("while scanning a directive", start_mark,
824 "expected alphabetic or numeric character, but found %r"
825 % ch, self.get_mark())
828 def scan_yaml_directive_value(self, start_mark):
829 # See the specification for details.
830 while self.peek() == ' ':
832 major = self.scan_yaml_directive_number(start_mark)
833 if self.peek() != '.':
834 raise ScannerError("while scanning a directive", start_mark,
835 "expected a digit or '.', but found %r" % self.peek(),
838 minor = self.scan_yaml_directive_number(start_mark)
839 if self.peek() not in '\0 \r\n\x85\u2028\u2029':
840 raise ScannerError("while scanning a directive", start_mark,
841 "expected a digit or ' ', but found %r" % self.peek(),
843 return (major, minor)
845 def scan_yaml_directive_number(self, start_mark):
846 # See the specification for details.
848 if not ('0' <= ch <= '9'):
849 raise ScannerError("while scanning a directive", start_mark,
850 "expected a digit, but found %r" % ch, self.get_mark())
852 while '0' <= self.peek(length) <= '9':
854 value = int(self.prefix(length))
858 def scan_tag_directive_value(self, start_mark):
859 # See the specification for details.
860 while self.peek() == ' ':
862 handle = self.scan_tag_directive_handle(start_mark)
863 while self.peek() == ' ':
865 prefix = self.scan_tag_directive_prefix(start_mark)
866 return (handle, prefix)
868 def scan_tag_directive_handle(self, start_mark):
869 # See the specification for details.
870 value = self.scan_tag_handle('directive', start_mark)
873 raise ScannerError("while scanning a directive", start_mark,
874 "expected ' ', but found %r" % ch, self.get_mark())
877 def scan_tag_directive_prefix(self, start_mark):
878 # See the specification for details.
879 value = self.scan_tag_uri('directive', start_mark)
881 if ch not in '\0 \r\n\x85\u2028\u2029':
882 raise ScannerError("while scanning a directive", start_mark,
883 "expected ' ', but found %r" % ch, self.get_mark())
886 def scan_directive_ignored_line(self, start_mark):
887 # See the specification for details.
888 while self.peek() == ' ':
890 if self.peek() == '#':
891 while self.peek() not in '\0\r\n\x85\u2028\u2029':
894 if ch not in '\0\r\n\x85\u2028\u2029':
895 raise ScannerError("while scanning a directive", start_mark,
896 "expected a comment or a line break, but found %r"
897 % ch, self.get_mark())
898 self.scan_line_break()
900 def scan_anchor(self, TokenClass):
901 # The specification does not restrict characters for anchors and
902 # aliases. This may lead to problems, for instance, the document:
904 # can be interpteted in two ways, as
907 # [ *alias , "value" ]
908 # Therefore we restrict aliases to numbers and ASCII letters.
909 start_mark = self.get_mark()
910 indicator = self.peek()
917 ch = self.peek(length)
918 while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
921 ch = self.peek(length)
923 raise ScannerError("while scanning an %s" % name, start_mark,
924 "expected alphabetic or numeric character, but found %r"
925 % ch, self.get_mark())
926 value = self.prefix(length)
929 if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
930 raise ScannerError("while scanning an %s" % name, start_mark,
931 "expected alphabetic or numeric character, but found %r"
932 % ch, self.get_mark())
933 end_mark = self.get_mark()
934 return TokenClass(value, start_mark, end_mark)
937 # See the specification for details.
938 start_mark = self.get_mark()
943 suffix = self.scan_tag_uri('tag', start_mark)
944 if self.peek() != '>':
945 raise ScannerError("while parsing a tag", start_mark,
946 "expected '>', but found %r" % self.peek(),
949 elif ch in '\0 \t\r\n\x85\u2028\u2029':
956 while ch not in '\0 \r\n\x85\u2028\u2029':
961 ch = self.peek(length)
964 handle = self.scan_tag_handle('tag', start_mark)
968 suffix = self.scan_tag_uri('tag', start_mark)
970 if ch not in '\0 \r\n\x85\u2028\u2029':
971 raise ScannerError("while scanning a tag", start_mark,
972 "expected ' ', but found %r" % ch, self.get_mark())
973 value = (handle, suffix)
974 end_mark = self.get_mark()
975 return TagToken(value, start_mark, end_mark)
977 def scan_block_scalar(self, style):
978 # See the specification for details.
986 start_mark = self.get_mark()
990 chomping, increment = self.scan_block_scalar_indicators(start_mark)
991 self.scan_block_scalar_ignored_line(start_mark)
993 # Determine the indentation level and go to the first non-empty line.
994 min_indent = self.indent+1
997 if increment is None:
998 breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
999 indent = max(min_indent, max_indent)
1001 indent = min_indent+increment-1
1002 breaks, end_mark = self.scan_block_scalar_breaks(indent)
1005 # Scan the inner part of the block scalar.
1006 while self.column == indent and self.peek() != '\0':
1007 chunks.extend(breaks)
1008 leading_non_space = self.peek() not in ' \t'
1010 while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
1012 chunks.append(self.prefix(length))
1013 self.forward(length)
1014 line_break = self.scan_line_break()
1015 breaks, end_mark = self.scan_block_scalar_breaks(indent)
1016 if self.column == indent and self.peek() != '\0':
1018 # Unfortunately, folding rules are ambiguous.
1020 # This is the folding according to the specification:
1022 if folded and line_break == '\n' \
1023 and leading_non_space and self.peek() not in ' \t':
1027 chunks.append(line_break)
1029 # This is Clark Evans's interpretation (also in the spec
1032 #if folded and line_break == '\n':
1034 # if self.peek() not in ' \t':
1035 # chunks.append(' ')
1037 # chunks.append(line_break)
1039 # chunks.append(line_break)
1044 if chomping is not False:
1045 chunks.append(line_break)
1046 if chomping is True:
1047 chunks.extend(breaks)
1050 return ScalarToken(''.join(chunks), False, start_mark, end_mark,
1053 def scan_block_scalar_indicators(self, start_mark):
1054 # See the specification for details.
1065 if ch in '0123456789':
1068 raise ScannerError("while scanning a block scalar", start_mark,
1069 "expected indentation indicator in the range 1-9, but found 0",
1072 elif ch in '0123456789':
1075 raise ScannerError("while scanning a block scalar", start_mark,
1076 "expected indentation indicator in the range 1-9, but found 0",
1087 if ch not in '\0 \r\n\x85\u2028\u2029':
1088 raise ScannerError("while scanning a block scalar", start_mark,
1089 "expected chomping or indentation indicators, but found %r"
1090 % ch, self.get_mark())
1091 return chomping, increment
1093 def scan_block_scalar_ignored_line(self, start_mark):
1094 # See the specification for details.
1095 while self.peek() == ' ':
1097 if self.peek() == '#':
1098 while self.peek() not in '\0\r\n\x85\u2028\u2029':
1101 if ch not in '\0\r\n\x85\u2028\u2029':
1102 raise ScannerError("while scanning a block scalar", start_mark,
1103 "expected a comment or a line break, but found %r" % ch,
1105 self.scan_line_break()
1107 def scan_block_scalar_indentation(self):
1108 # See the specification for details.
1111 end_mark = self.get_mark()
1112 while self.peek() in ' \r\n\x85\u2028\u2029':
1113 if self.peek() != ' ':
1114 chunks.append(self.scan_line_break())
1115 end_mark = self.get_mark()
1118 if self.column > max_indent:
1119 max_indent = self.column
1120 return chunks, max_indent, end_mark
1122 def scan_block_scalar_breaks(self, indent):
1123 # See the specification for details.
1125 end_mark = self.get_mark()
1126 while self.column < indent and self.peek() == ' ':
1128 while self.peek() in '\r\n\x85\u2028\u2029':
1129 chunks.append(self.scan_line_break())
1130 end_mark = self.get_mark()
1131 while self.column < indent and self.peek() == ' ':
1133 return chunks, end_mark
1135 def scan_flow_scalar(self, style):
1136 # See the specification for details.
1137 # Note that we loose indentation rules for quoted scalars. Quoted
1138 # scalars don't need to adhere indentation because " and ' clearly
1139 # mark the beginning and the end of them. Therefore we are less
1140 # restrictive then the specification requires. We only need to check
1141 # that document separators are not included in scalars.
1147 start_mark = self.get_mark()
1150 chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
1151 while self.peek() != quote:
1152 chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
1153 chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
1155 end_mark = self.get_mark()
1156 return ScalarToken(''.join(chunks), False, start_mark, end_mark,
1159 ESCAPE_REPLACEMENTS = {
1185 def scan_flow_scalar_non_spaces(self, double, start_mark):
1186 # See the specification for details.
1190 while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
1193 chunks.append(self.prefix(length))
1194 self.forward(length)
1196 if not double and ch == '\'' and self.peek(1) == '\'':
1199 elif (double and ch == '\'') or (not double and ch in '\"\\'):
1202 elif double and ch == '\\':
1205 if ch in self.ESCAPE_REPLACEMENTS:
1206 chunks.append(self.ESCAPE_REPLACEMENTS[ch])
1208 elif ch in self.ESCAPE_CODES:
1209 length = self.ESCAPE_CODES[ch]
1211 for k in range(length):
1212 if self.peek(k) not in '0123456789ABCDEFabcdef':
1213 raise ScannerError("while scanning a double-quoted scalar", start_mark,
1214 "expected escape sequence of %d hexdecimal numbers, but found %r" %
1215 (length, self.peek(k)), self.get_mark())
1216 code = int(self.prefix(length), 16)
1217 chunks.append(chr(code))
1218 self.forward(length)
1219 elif ch in '\r\n\x85\u2028\u2029':
1220 self.scan_line_break()
1221 chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
1223 raise ScannerError("while scanning a double-quoted scalar", start_mark,
1224 "found unknown escape character %r" % ch, self.get_mark())
1228 def scan_flow_scalar_spaces(self, double, start_mark):
1229 # See the specification for details.
1232 while self.peek(length) in ' \t':
1234 whitespaces = self.prefix(length)
1235 self.forward(length)
1238 raise ScannerError("while scanning a quoted scalar", start_mark,
1239 "found unexpected end of stream", self.get_mark())
1240 elif ch in '\r\n\x85\u2028\u2029':
1241 line_break = self.scan_line_break()
1242 breaks = self.scan_flow_scalar_breaks(double, start_mark)
1243 if line_break != '\n':
1244 chunks.append(line_break)
1247 chunks.extend(breaks)
1249 chunks.append(whitespaces)
1252 def scan_flow_scalar_breaks(self, double, start_mark):
1253 # See the specification for details.
1256 # Instead of checking indentation, we check for document
1258 prefix = self.prefix(3)
1259 if (prefix == '---' or prefix == '...') \
1260 and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
1261 raise ScannerError("while scanning a quoted scalar", start_mark,
1262 "found unexpected document separator", self.get_mark())
1263 while self.peek() in ' \t':
1265 if self.peek() in '\r\n\x85\u2028\u2029':
1266 chunks.append(self.scan_line_break())
1270 def scan_plain(self):
1271 # See the specification for details.
1272 # We add an additional restriction for the flow context:
1273 # plain scalars in the flow context cannot contain ',', ':' and '?'.
1274 # We also keep track of the `allow_simple_key` flag here.
1275 # Indentation rules are loosed for the flow context.
1277 start_mark = self.get_mark()
1278 end_mark = start_mark
1279 indent = self.indent+1
1280 # We allow zero indentation for scalars, but then we need to check for
1281 # document separators at the beginning of the line.
1287 if self.peek() == '#':
1290 ch = self.peek(length)
1291 if ch in '\0 \t\r\n\x85\u2028\u2029' \
1292 or (not self.flow_level and ch == ':' and
1293 self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
1294 or (self.flow_level and ch in ',:?[]{}'):
1297 # It's not clear what we should do with ':' in the flow context.
1298 if (self.flow_level and ch == ':'
1299 and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
1300 self.forward(length)
1301 raise ScannerError("while scanning a plain scalar", start_mark,
1302 "found unexpected ':'", self.get_mark(),
1303 "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
1306 self.allow_simple_key = False
1307 chunks.extend(spaces)
1308 chunks.append(self.prefix(length))
1309 self.forward(length)
1310 end_mark = self.get_mark()
1311 spaces = self.scan_plain_spaces(indent, start_mark)
1312 if not spaces or self.peek() == '#' \
1313 or (not self.flow_level and self.column < indent):
1315 return ScalarToken(''.join(chunks), True, start_mark, end_mark)
1317 def scan_plain_spaces(self, indent, start_mark):
1318 # See the specification for details.
1319 # The specification is really confusing about tabs in plain scalars.
1320 # We just forbid them completely. Do not use tabs in YAML!
1323 while self.peek(length) in ' ':
1325 whitespaces = self.prefix(length)
1326 self.forward(length)
1328 if ch in '\r\n\x85\u2028\u2029':
1329 line_break = self.scan_line_break()
1330 self.allow_simple_key = True
1331 prefix = self.prefix(3)
1332 if (prefix == '---' or prefix == '...') \
1333 and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
1336 while self.peek() in ' \r\n\x85\u2028\u2029':
1337 if self.peek() == ' ':
1340 breaks.append(self.scan_line_break())
1341 prefix = self.prefix(3)
1342 if (prefix == '---' or prefix == '...') \
1343 and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
1345 if line_break != '\n':
1346 chunks.append(line_break)
1349 chunks.extend(breaks)
1351 chunks.append(whitespaces)
1354 def scan_tag_handle(self, name, start_mark):
1355 # See the specification for details.
1356 # For some strange reasons, the specification does not allow '_' in
1357 # tag handles. I have allowed it anyway.
1360 raise ScannerError("while scanning a %s" % name, start_mark,
1361 "expected '!', but found %r" % ch, self.get_mark())
1363 ch = self.peek(length)
1365 while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
1368 ch = self.peek(length)
1370 self.forward(length)
1371 raise ScannerError("while scanning a %s" % name, start_mark,
1372 "expected '!', but found %r" % ch, self.get_mark())
1374 value = self.prefix(length)
1375 self.forward(length)
1378 def scan_tag_uri(self, name, start_mark):
1379 # See the specification for details.
1380 # Note: we do not check if URI is well-formed.
1383 ch = self.peek(length)
1384 while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
1385 or ch in '-;/?:@&=+$,_.!~*\'()[]%':
1387 chunks.append(self.prefix(length))
1388 self.forward(length)
1390 chunks.append(self.scan_uri_escapes(name, start_mark))
1393 ch = self.peek(length)
1395 chunks.append(self.prefix(length))
1396 self.forward(length)
1399 raise ScannerError("while parsing a %s" % name, start_mark,
1400 "expected URI, but found %r" % ch, self.get_mark())
1401 return ''.join(chunks)
1403 def scan_uri_escapes(self, name, start_mark):
1404 # See the specification for details.
1406 mark = self.get_mark()
1407 while self.peek() == '%':
1410 if self.peek(k) not in '0123456789ABCDEFabcdef':
1411 raise ScannerError("while scanning a %s" % name, start_mark,
1412 "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
1413 % self.peek(k), self.get_mark())
1414 codes.append(int(self.prefix(2), 16))
1417 value = bytes(codes).decode('utf-8')
1418 except UnicodeDecodeError as exc:
1419 raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
1422 def scan_line_break(self):
1428 # '\u2028' : '\u2028'
1429 # '\u2029 : '\u2029'
1432 if ch in '\r\n\x85':
1433 if self.prefix(2) == '\r\n':
1438 elif ch in '\u2028\u2029':
1445 # psyco.bind(Scanner)
1446 #except ImportError: