2007-07-05 00:36:59 +02:00
|
|
|
require 'html5/constants'
|
|
|
|
require 'html5/inputstream'
|
2007-05-26 03:52:27 +02:00
|
|
|
|
2007-07-05 00:36:59 +02:00
|
|
|
module HTML5
|
2007-05-26 03:52:27 +02:00
|
|
|
|
2007-05-30 17:45:52 +02:00
|
|
|
# This class takes care of tokenizing HTML.
|
|
|
|
#
|
2007-08-30 19:19:10 +02:00
|
|
|
# * @current_token
|
2007-05-30 17:45:52 +02:00
|
|
|
# Holds the token that is currently being processed.
|
|
|
|
#
|
|
|
|
# * @state
|
|
|
|
# Holds a reference to the method to be invoked... XXX
|
|
|
|
#
|
|
|
|
# * @states
|
|
|
|
# Holds a mapping between states and methods that implement the state.
|
|
|
|
#
|
|
|
|
# * @stream
|
|
|
|
# Points to HTMLInputStream object.
|
|
|
|
|
|
|
|
class HTMLTokenizer
|
2007-08-30 19:19:10 +02:00
|
|
|
attr_accessor :content_model_flag, :current_token
|
2007-05-26 03:52:27 +02:00
|
|
|
attr_reader :stream
|
|
|
|
|
|
|
|
# XXX need to fix documentation
|
|
|
|
|
2007-05-30 17:45:52 +02:00
|
|
|
def initialize(stream, options = {})
|
|
|
|
@stream = HTMLInputStream.new(stream, options)
|
|
|
|
|
|
|
|
# Setup the initial tokenizer state
|
2007-08-30 19:19:10 +02:00
|
|
|
@content_model_flag = :PCDATA
|
|
|
|
@state = :data_state
|
|
|
|
@escapeFlag = false
|
|
|
|
@lastFourChars = []
|
2007-05-30 17:45:52 +02:00
|
|
|
|
|
|
|
# The current token being created
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token = nil
|
2007-05-30 17:45:52 +02:00
|
|
|
|
|
|
|
# Tokens to be processed.
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue = []
|
|
|
|
@lowercase_element_name = options[:lowercase_element_name] != false
|
|
|
|
@lowercase_attr_name = options[:lowercase_attr_name] != false
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
|
|
|
# This is where the magic happens.
|
|
|
|
#
|
|
|
|
# We do our usually processing through the states and when we have a token
|
|
|
|
# to return we yield the token which pauses processing until the next token
|
|
|
|
# is requested.
|
|
|
|
def each
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue = []
|
2007-05-30 17:45:52 +02:00
|
|
|
# Start processing. When EOF is reached @state will return false
|
|
|
|
# instead of true and the loop will terminate.
|
|
|
|
while send @state
|
2007-08-30 19:19:10 +02:00
|
|
|
yield :type => :ParseError, :data => @stream.errors.shift until @stream.errors.empty?
|
|
|
|
yield @token_queue.shift until @token_queue.empty?
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
|
|
|
# Below are various helper functions the tokenizer states use worked out.
|
2007-05-30 17:45:52 +02:00
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
# If the next character is a '>', convert the current_token into
|
2007-05-26 03:52:27 +02:00
|
|
|
# an EmptyTag
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def process_solidus_in_tag
|
2007-05-26 03:52:27 +02:00
|
|
|
|
2007-05-30 17:45:52 +02:00
|
|
|
# We need to consume another character to make sure it's a ">"
|
|
|
|
data = @stream.char
|
2007-05-26 03:52:27 +02:00
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
if @current_token[:type] == :StartTag and data == ">"
|
|
|
|
@current_token[:type] = :EmptyTag
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "incorrectly-placed-solidus"}
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
2007-05-26 03:52:27 +02:00
|
|
|
|
2007-05-30 17:45:52 +02:00
|
|
|
# The character we just consumed need to be put back on the stack so it
|
|
|
|
# doesn't get lost...
|
2007-07-05 00:36:59 +02:00
|
|
|
@stream.unget(data)
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
|
|
|
# This function returns either U+FFFD or the character based on the
|
|
|
|
# decimal or hexadecimal representation. It also discards ";" if present.
|
2007-08-30 19:19:10 +02:00
|
|
|
# If not present @token_queue << {:type => :ParseError}" is invoked.
|
2007-05-26 03:52:27 +02:00
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def consume_number_entity(isHex)
|
2007-05-26 03:52:27 +02:00
|
|
|
|
2007-05-30 17:45:52 +02:00
|
|
|
# XXX More need to be done here. For instance, #13 should prolly be
|
|
|
|
# converted to #10 so we don't get \r (#13 is \r right?) in the DOM and
|
|
|
|
# such. Thoughts on this appreciated.
|
|
|
|
allowed = DIGITS
|
|
|
|
radix = 10
|
|
|
|
if isHex
|
|
|
|
allowed = HEX_DIGITS
|
|
|
|
radix = 16
|
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
char_stack = []
|
2007-05-30 17:45:52 +02:00
|
|
|
|
|
|
|
# Consume all the characters that are in range while making sure we
|
|
|
|
# don't hit an EOF.
|
|
|
|
c = @stream.char
|
|
|
|
while allowed.include?(c) and c != :EOF
|
2007-08-30 19:19:10 +02:00
|
|
|
char_stack.push(c)
|
2007-05-26 03:52:27 +02:00
|
|
|
c = @stream.char
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
|
|
|
|
# Convert the set of characters consumed to an int.
|
2007-08-30 19:19:10 +02:00
|
|
|
charAsInt = char_stack.join('').to_i(radix)
|
2007-05-30 17:45:52 +02:00
|
|
|
|
2007-07-05 00:36:59 +02:00
|
|
|
if charAsInt == 13
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "incorrect-cr-newline-entity"}
|
2007-07-05 00:36:59 +02:00
|
|
|
charAsInt = 10
|
|
|
|
elsif (128..159).include? charAsInt
|
|
|
|
# If the integer is between 127 and 160 (so 128 and bigger and 159
|
|
|
|
# and smaller) we need to do the "windows trick".
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "illegal-windows-1252-entity"}
|
2007-05-30 17:45:52 +02:00
|
|
|
|
|
|
|
charAsInt = ENTITIES_WINDOWS1252[charAsInt - 128]
|
|
|
|
end
|
|
|
|
|
2007-07-05 00:36:59 +02:00
|
|
|
if 0 < charAsInt and charAsInt <= 1114111 and not (55296 <= charAsInt and charAsInt <= 57343)
|
2008-01-21 18:59:55 +01:00
|
|
|
if String.method_defined? :force_encoding
|
|
|
|
char = charAsInt.chr('utf-8')
|
|
|
|
else
|
|
|
|
char = [charAsInt].pack('U')
|
|
|
|
end
|
2007-07-05 00:36:59 +02:00
|
|
|
else
|
|
|
|
char = [0xFFFD].pack('U')
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "cant-convert-numeric-entity", :datavars => {"charAsInt" => charAsInt}}
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
|
|
|
|
# Discard the ; if present. Otherwise, put it back on the queue and
|
2007-08-30 19:19:10 +02:00
|
|
|
# invoke parse_error on parser.
|
2007-05-30 17:45:52 +02:00
|
|
|
if c != ";"
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "numeric-entity-without-semicolon"}
|
2007-07-05 00:36:59 +02:00
|
|
|
@stream.unget(c)
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
|
|
|
|
return char
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def consume_entity(from_attribute=false)
|
2007-05-30 17:45:52 +02:00
|
|
|
char = nil
|
2007-08-30 19:19:10 +02:00
|
|
|
char_stack = [@stream.char]
|
|
|
|
if SPACE_CHARACTERS.include?(char_stack[0]) or [:EOF, '<', '&'].include?(char_stack[0])
|
|
|
|
@stream.unget(char_stack)
|
|
|
|
elsif char_stack[0] == '#'
|
2007-05-30 17:45:52 +02:00
|
|
|
# We might have a number entity here.
|
2007-08-30 19:19:10 +02:00
|
|
|
char_stack += [@stream.char, @stream.char]
|
|
|
|
if char_stack[0 .. 1].include? :EOF
|
2007-05-30 17:45:52 +02:00
|
|
|
# If we reach the end of the file put everything up to :EOF
|
|
|
|
# back in the queue
|
2007-08-30 19:19:10 +02:00
|
|
|
char_stack = char_stack[0...char_stack.index(:EOF)]
|
|
|
|
@stream.unget(char_stack)
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "expected-numeric-entity-but-got-eof"}
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
if char_stack[1].downcase == "x" and HEX_DIGITS.include? char_stack[2]
|
2007-05-30 17:45:52 +02:00
|
|
|
# Hexadecimal entity detected.
|
2007-08-30 19:19:10 +02:00
|
|
|
@stream.unget(char_stack[2])
|
|
|
|
char = consume_number_entity(true)
|
|
|
|
elsif DIGITS.include? char_stack[1]
|
2007-05-30 17:45:52 +02:00
|
|
|
# Decimal entity detected.
|
2007-08-30 19:19:10 +02:00
|
|
|
@stream.unget(char_stack[1..-1])
|
|
|
|
char = consume_number_entity(false)
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
|
|
|
# No number entity detected.
|
2007-08-30 19:19:10 +02:00
|
|
|
@stream.unget(char_stack)
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "expected-numeric-entity"}
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
end
|
|
|
|
else
|
|
|
|
# At this point in the process might have named entity. Entities
|
|
|
|
# are stored in the global variable "entities".
|
|
|
|
#
|
|
|
|
# Consume characters and compare to these to a substring of the
|
|
|
|
# entity names in the list until the substring no longer matches.
|
|
|
|
filteredEntityList = ENTITIES.keys
|
2007-08-30 19:19:10 +02:00
|
|
|
filteredEntityList.reject! {|e| e[0].chr != char_stack[0]}
|
2007-05-30 17:45:52 +02:00
|
|
|
entityName = nil
|
|
|
|
|
2007-07-05 00:36:59 +02:00
|
|
|
# Try to find the longest entity the string will match to take care
|
|
|
|
# of ¬i for instance.
|
2007-08-30 19:19:10 +02:00
|
|
|
while char_stack.last != :EOF
|
|
|
|
name = char_stack.join('')
|
2007-05-30 17:45:52 +02:00
|
|
|
if filteredEntityList.any? {|e| e[0...name.length] == name}
|
|
|
|
filteredEntityList.reject! {|e| e[0...name.length] != name}
|
2007-08-30 19:19:10 +02:00
|
|
|
char_stack.push(@stream.char)
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
|
|
|
break
|
|
|
|
end
|
|
|
|
|
|
|
|
if ENTITIES.include? name
|
|
|
|
entityName = name
|
2007-07-05 00:36:59 +02:00
|
|
|
break if entityName[-1] == ';'
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
if entityName != nil
|
|
|
|
char = ENTITIES[entityName]
|
|
|
|
|
|
|
|
# Check whether or not the last character returned can be
|
|
|
|
# discarded or needs to be put back.
|
2007-07-05 00:36:59 +02:00
|
|
|
if entityName[-1] != ?;
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "named-entity-without-semicolon"}
|
2007-07-05 00:36:59 +02:00
|
|
|
end
|
|
|
|
|
2007-09-10 05:26:19 +02:00
|
|
|
if entityName[-1] != ";" and from_attribute and
|
2007-08-30 19:19:10 +02:00
|
|
|
(ASCII_LETTERS.include?(char_stack[entityName.length]) or
|
|
|
|
DIGITS.include?(char_stack[entityName.length]))
|
|
|
|
@stream.unget(char_stack)
|
2007-07-05 00:36:59 +02:00
|
|
|
char = '&'
|
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@stream.unget(char_stack[entityName.length..-1])
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
2007-05-26 03:52:27 +02:00
|
|
|
else
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "expected-named-entity"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@stream.unget(char_stack)
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
return char
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
|
|
|
# This method replaces the need for "entityInAttributeValueState".
|
2007-08-30 19:19:10 +02:00
|
|
|
def process_entity_in_attribute
|
2007-09-06 17:40:48 +02:00
|
|
|
entity = consume_entity()
|
2007-05-30 17:45:52 +02:00
|
|
|
if entity
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data][-1][1] += entity
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data][-1][1] += "&"
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
|
|
|
# This method is a generic handler for emitting the tags. It also sets
|
|
|
|
# the state to "data" because that's what's needed after a token has been
|
|
|
|
# emitted.
|
2007-08-30 19:19:10 +02:00
|
|
|
def emit_current_token
|
2007-05-30 17:45:52 +02:00
|
|
|
# Add token to the queue to be yielded
|
2007-08-30 19:19:10 +02:00
|
|
|
token = @current_token
|
|
|
|
if [:StartTag, :EndTag, :EmptyTag].include?(token[:type])
|
|
|
|
if @lowercase_element_name
|
|
|
|
token[:name] = token[:name].downcase
|
|
|
|
end
|
|
|
|
@token_queue << token
|
|
|
|
@state = :data_state
|
|
|
|
end
|
|
|
|
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
|
|
|
# Below are the various tokenizer states worked out.
|
|
|
|
|
|
|
|
# XXX AT Perhaps we should have Hixie run some evaluation on billions of
|
|
|
|
# documents to figure out what the order of the various if and elsif
|
|
|
|
# statements should be.
|
2007-08-30 19:19:10 +02:00
|
|
|
def data_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
2007-06-22 10:12:08 +02:00
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
if @content_model_flag == :CDATA or @content_model_flag == :RCDATA
|
2007-06-22 10:12:08 +02:00
|
|
|
@lastFourChars << data
|
|
|
|
@lastFourChars.shift if @lastFourChars.length > 4
|
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
if data == "&" and [:PCDATA,:RCDATA].include?(@content_model_flag) and !@escapeFlag
|
|
|
|
@state = :entity_data_state
|
|
|
|
elsif data == "-" && [:CDATA, :RCDATA].include?(@content_model_flag) && !@escapeFlag && @lastFourChars.join('') == "<!--"
|
2007-06-22 10:12:08 +02:00
|
|
|
@escapeFlag = true
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << {:type => :Characters, :data => data}
|
2007-07-05 00:36:59 +02:00
|
|
|
elsif data == "<" and !@escapeFlag and
|
2007-08-30 19:19:10 +02:00
|
|
|
[:PCDATA,:CDATA,:RCDATA].include?(@content_model_flag)
|
|
|
|
@state = :tag_open_state
|
2007-07-05 00:36:59 +02:00
|
|
|
elsif data == ">" and @escapeFlag and
|
2007-08-30 19:19:10 +02:00
|
|
|
[:CDATA,:RCDATA].include?(@content_model_flag) and
|
2007-07-05 00:36:59 +02:00
|
|
|
@lastFourChars[1..-1].join('') == "-->"
|
2007-06-22 10:12:08 +02:00
|
|
|
@escapeFlag = false
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << {:type => :Characters, :data => data}
|
2007-06-22 10:12:08 +02:00
|
|
|
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == :EOF
|
|
|
|
# Tokenization ends.
|
|
|
|
return false
|
2007-06-22 10:12:08 +02:00
|
|
|
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif SPACE_CHARACTERS.include? data
|
|
|
|
# Directly after emitting a token you switch back to the "data
|
|
|
|
# state". At that point SPACE_CHARACTERS are important so they are
|
|
|
|
# emitted separately.
|
|
|
|
# XXX need to check if we don't need a special "spaces" flag on
|
|
|
|
# characters.
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << {:type => :SpaceCharacters, :data => data + @stream.chars_until(SPACE_CHARACTERS, true)}
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << {:type => :Characters, :data => data + @stream.chars_until(%w[& < > -])}
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def entity_data_state
|
|
|
|
entity = consume_entity
|
2007-05-30 17:45:52 +02:00
|
|
|
if entity
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << {:type => :Characters, :data => entity}
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << {:type => :Characters, :data => "&"}
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :data_state
|
2007-05-30 17:45:52 +02:00
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def tag_open_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
2007-08-30 19:19:10 +02:00
|
|
|
if @content_model_flag == :PCDATA
|
2007-05-30 17:45:52 +02:00
|
|
|
if data == "!"
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :markup_declaration_open_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == "/"
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :close_tag_open_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data != :EOF and ASCII_LETTERS.include? data
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token = {:type => :StartTag, :name => data, :data => []}
|
|
|
|
@state = :tag_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == ">"
|
|
|
|
# XXX In theory it could be something besides a tag name. But
|
|
|
|
# do we really care?
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "expected-tag-name-but-got-right-bracket"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << {:type => :Characters, :data => "<>"}
|
|
|
|
@state = :data_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == "?"
|
|
|
|
# XXX In theory it could be something besides a tag name. But
|
|
|
|
# do we really care?
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue.push({:type => :ParseError, :data => "expected-tag-name-but-got-question-mark"})
|
2007-07-05 00:36:59 +02:00
|
|
|
@stream.unget(data)
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :bogus_comment_state
|
2007-05-26 03:52:27 +02:00
|
|
|
else
|
2007-05-30 17:45:52 +02:00
|
|
|
# XXX
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "expected-tag-name"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << {:type => :Characters, :data => "<"}
|
2007-07-05 00:36:59 +02:00
|
|
|
@stream.unget(data)
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :data_state
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
else
|
|
|
|
# We know the content model flag is set to either RCDATA or CDATA
|
|
|
|
# now because this state can never be entered with the PLAINTEXT
|
|
|
|
# flag.
|
|
|
|
if data == "/"
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :close_tag_open_state
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << {:type => :Characters, :data => "<"}
|
2007-07-05 00:36:59 +02:00
|
|
|
@stream.unget(data)
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :data_state
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def close_tag_open_state
|
|
|
|
if (@content_model_flag == :RCDATA or @content_model_flag == :CDATA)
|
|
|
|
if @current_token
|
|
|
|
char_stack = []
|
2007-05-30 17:45:52 +02:00
|
|
|
|
|
|
|
# So far we know that "</" has been consumed. We now need to know
|
|
|
|
# whether the next few characters match the name of last emitted
|
2007-08-30 19:19:10 +02:00
|
|
|
# start tag which also happens to be the current_token. We also need
|
2007-05-30 17:45:52 +02:00
|
|
|
# to have the character directly after the characters that could
|
|
|
|
# match the start tag name.
|
2007-08-30 19:19:10 +02:00
|
|
|
(@current_token[:name].length + 1).times do
|
|
|
|
char_stack.push(@stream.char)
|
2007-05-30 17:45:52 +02:00
|
|
|
# Make sure we don't get hit by :EOF
|
2007-08-30 19:19:10 +02:00
|
|
|
break if char_stack[-1] == :EOF
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
|
|
|
|
# Since this is just for checking. We put the characters back on
|
|
|
|
# the stack.
|
2007-08-30 19:19:10 +02:00
|
|
|
@stream.unget(char_stack)
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
if @current_token and
|
|
|
|
@current_token[:name].downcase ==
|
|
|
|
char_stack[0...-1].join('').downcase and
|
|
|
|
(SPACE_CHARACTERS + [">", "/", "<", :EOF]).include? char_stack[-1]
|
2007-05-30 17:45:52 +02:00
|
|
|
# Because the characters are correct we can safely switch to
|
|
|
|
# PCDATA mode now. This also means we don't have to do it when
|
|
|
|
# emitting the end tag token.
|
2007-08-30 19:19:10 +02:00
|
|
|
@content_model_flag = :PCDATA
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << {:type => :Characters, :data => "</"}
|
|
|
|
@state = :data_state
|
2007-05-26 03:52:27 +02:00
|
|
|
|
2007-05-30 17:45:52 +02:00
|
|
|
# Need to return here since we don't want the rest of the
|
|
|
|
# method to be walked through.
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
2007-05-26 03:52:27 +02:00
|
|
|
|
2007-06-22 10:12:08 +02:00
|
|
|
data = @stream.char
|
|
|
|
if data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "expected-closing-tag-but-got-eof"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << {:type => :Characters, :data => "</"}
|
|
|
|
@state = :data_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif ASCII_LETTERS.include? data
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token = {:type => :EndTag, :name => data, :data => []}
|
|
|
|
@state = :tag_name_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif data == ">"
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "expected-closing-tag-but-got-right-bracket"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :data_state
|
2007-06-22 10:12:08 +02:00
|
|
|
else
|
|
|
|
# XXX data can be _'_...
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "expected-closing-tag-but-got-char", :datavars => {:data => data}}
|
2007-07-05 00:36:59 +02:00
|
|
|
@stream.unget(data)
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :bogus_comment_state
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
2007-06-22 10:12:08 +02:00
|
|
|
|
2007-05-30 17:45:52 +02:00
|
|
|
return true
|
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def tag_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
|
|
|
if SPACE_CHARACTERS.include? data
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :before_attribute_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-tag-name"}
|
2007-08-30 19:19:10 +02:00
|
|
|
emit_current_token
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif ASCII_LETTERS.include? data
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:name] += data + @stream.chars_until(ASCII_LETTERS, true)
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == ">"
|
2007-08-30 19:19:10 +02:00
|
|
|
emit_current_token
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == "/"
|
2007-08-30 19:19:10 +02:00
|
|
|
process_solidus_in_tag
|
|
|
|
@state = :before_attribute_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:name] += data
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def before_attribute_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
|
|
|
if SPACE_CHARACTERS.include? data
|
|
|
|
@stream.chars_until(SPACE_CHARACTERS, true)
|
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "expected-attribute-name-but-got-eof"}
|
2007-08-30 19:19:10 +02:00
|
|
|
emit_current_token
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif ASCII_LETTERS.include? data
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data].push([data, ""])
|
|
|
|
@state = :attribute_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == ">"
|
2007-08-30 19:19:10 +02:00
|
|
|
emit_current_token
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == "/"
|
2007-08-30 19:19:10 +02:00
|
|
|
process_solidus_in_tag
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data].push([data, ""])
|
|
|
|
@state = :attribute_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def attribute_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
|
|
|
leavingThisState = true
|
2007-08-30 19:19:10 +02:00
|
|
|
emitToken = false
|
2007-05-30 17:45:52 +02:00
|
|
|
if data == "="
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :before_attribute_value_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-attribute-name"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :data_state
|
|
|
|
emitToken = true
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif ASCII_LETTERS.include? data
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data][-1][0] += data + @stream.chars_until(ASCII_LETTERS, true)
|
2007-05-30 17:45:52 +02:00
|
|
|
leavingThisState = false
|
|
|
|
elsif data == ">"
|
|
|
|
# XXX If we emit here the attributes are converted to a dict
|
|
|
|
# without being checked and when the code below runs we error
|
|
|
|
# because data is a dict not a list
|
2007-08-30 19:19:10 +02:00
|
|
|
emitToken = true
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif SPACE_CHARACTERS.include? data
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :after_attribute_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == "/"
|
2007-08-30 19:19:10 +02:00
|
|
|
process_solidus_in_tag
|
|
|
|
@state = :before_attribute_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data][-1][0] += data
|
2007-05-30 17:45:52 +02:00
|
|
|
leavingThisState = false
|
|
|
|
end
|
|
|
|
|
|
|
|
if leavingThisState
|
|
|
|
# Attributes are not dropped at this stage. That happens when the
|
|
|
|
# start tag token is emitted so values can still be safely appended
|
|
|
|
# to attributes, but we do want to report the parse error in time.
|
2007-08-30 19:19:10 +02:00
|
|
|
if @lowercase_attr_name
|
|
|
|
@current_token[:data][-1][0] = @current_token[:data].last.first.downcase
|
|
|
|
end
|
|
|
|
@current_token[:data][0...-1].each {|name,value|
|
|
|
|
if @current_token[:data].last.first == name
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "duplicate-attribute"}
|
2007-08-30 19:19:10 +02:00
|
|
|
break # don't report an error more than once
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
}
|
|
|
|
# XXX Fix for above XXX
|
2007-08-30 19:19:10 +02:00
|
|
|
emit_current_token if emitToken
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def after_attribute_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
|
|
|
if SPACE_CHARACTERS.include? data
|
|
|
|
@stream.chars_until(SPACE_CHARACTERS, true)
|
|
|
|
elsif data == "="
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :before_attribute_value_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == ">"
|
2007-08-30 19:19:10 +02:00
|
|
|
emit_current_token
|
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "expected-end-of-tag-but-got-eof"}
|
2007-08-30 19:19:10 +02:00
|
|
|
emit_current_token
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif ASCII_LETTERS.include? data
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data].push([data, ""])
|
|
|
|
@state = :attribute_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == "/"
|
2007-08-30 19:19:10 +02:00
|
|
|
process_solidus_in_tag
|
|
|
|
@state = :before_attribute_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data].push([data, ""])
|
|
|
|
@state = :attribute_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def before_attribute_value_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
|
|
|
if SPACE_CHARACTERS.include? data
|
|
|
|
@stream.chars_until(SPACE_CHARACTERS, true)
|
|
|
|
elsif data == "\""
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :attribute_value_double_quoted_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == "&"
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :attribute_value_unquoted_state
|
2007-07-05 00:36:59 +02:00
|
|
|
@stream.unget(data);
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == "'"
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :attribute_value_single_quoted_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == ">"
|
2007-08-30 19:19:10 +02:00
|
|
|
emit_current_token
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "expected-attribute-value-but-got-eof"}
|
2007-08-30 19:19:10 +02:00
|
|
|
emit_current_token
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data][-1][1] += data
|
|
|
|
@state = :attribute_value_unquoted_state
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def attribute_value_double_quoted_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
|
|
|
if data == "\""
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :before_attribute_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == "&"
|
2007-08-30 19:19:10 +02:00
|
|
|
process_entity_in_attribute
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-attribute-value-double-quote"}
|
2007-08-30 19:19:10 +02:00
|
|
|
emit_current_token
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data][-1][1] += data + @stream.chars_until(["\"", "&"])
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def attribute_value_single_quoted_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
|
|
|
if data == "'"
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :before_attribute_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == "&"
|
2007-08-30 19:19:10 +02:00
|
|
|
process_entity_in_attribute
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-attribute-value-single-quote"}
|
2007-08-30 19:19:10 +02:00
|
|
|
emit_current_token
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data][-1][1] += data +\
|
2007-05-30 17:45:52 +02:00
|
|
|
@stream.chars_until(["'", "&"])
|
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def attribute_value_unquoted_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
|
|
|
if SPACE_CHARACTERS.include? data
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :before_attribute_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == "&"
|
2007-08-30 19:19:10 +02:00
|
|
|
process_entity_in_attribute
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == ">"
|
2007-08-30 19:19:10 +02:00
|
|
|
emit_current_token
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-attribute-value-no-quotes"}
|
2007-08-30 19:19:10 +02:00
|
|
|
emit_current_token
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data][-1][1] += data + @stream.chars_until(["&", ">","<"] + SPACE_CHARACTERS)
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def bogus_comment_state
|
2007-05-30 17:45:52 +02:00
|
|
|
# Make a new comment token and give it as value all the characters
|
|
|
|
# until the first > or :EOF (chars_until checks for :EOF automatically)
|
|
|
|
# and emit it.
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << {:type => :Comment, :data => @stream.chars_until((">"))}
|
2007-05-30 17:45:52 +02:00
|
|
|
|
|
|
|
# Eat the character directly after the bogus comment which is either a
|
|
|
|
# ">" or an :EOF.
|
|
|
|
@stream.char
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :data_state
|
2007-05-30 17:45:52 +02:00
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def markup_declaration_open_state
|
|
|
|
char_stack = [@stream.char, @stream.char]
|
|
|
|
if char_stack == ["-", "-"]
|
|
|
|
@current_token = {:type => :Comment, :data => ""}
|
|
|
|
@state = :comment_start_state
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
5.times { char_stack.push(@stream.char) }
|
2007-05-30 17:45:52 +02:00
|
|
|
# Put in explicit :EOF check
|
2007-08-30 19:19:10 +02:00
|
|
|
if !char_stack.include?(:EOF) && char_stack.join("").upcase == "DOCTYPE"
|
|
|
|
@current_token = {:type => :Doctype, :name => "", :publicId => nil, :systemId => nil, :correct => true}
|
|
|
|
@state = :doctype_state
|
2007-05-26 03:52:27 +02:00
|
|
|
else
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "expected-dashes-or-doctype"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@stream.unget(char_stack)
|
|
|
|
@state = :bogus_comment_state
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def comment_start_state
|
2007-06-22 10:12:08 +02:00
|
|
|
data = @stream.char
|
|
|
|
if data == "-"
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :comment_start_dash_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif data == ">"
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "incorrect-comment"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-comment"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-06-22 10:12:08 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data] += data + @stream.chars_until("-")
|
|
|
|
@state = :comment_state
|
2007-06-22 10:12:08 +02:00
|
|
|
end
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def comment_start_dash_state
|
2007-06-22 10:12:08 +02:00
|
|
|
data = @stream.char
|
|
|
|
if data == "-"
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :comment_end_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif data == ">"
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "incorrect-comment"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-comment"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-06-22 10:12:08 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data] += '-' + data + @stream.chars_until("-")
|
|
|
|
@state = :comment_state
|
2007-06-22 10:12:08 +02:00
|
|
|
end
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def comment_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
|
|
|
if data == "-"
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :comment_end_dash_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-comment"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data] += data + @stream.chars_until("-")
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def comment_end_dash_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
|
|
|
if data == "-"
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :comment_end_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-comment-end-dash"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data] += "-" + data +\
|
2007-05-30 17:45:52 +02:00
|
|
|
@stream.chars_until("-")
|
|
|
|
# Consume the next character which is either a "-" or an :EOF as
|
|
|
|
# well so if there's a "-" directly after the "-" we go nicely to
|
|
|
|
# the "comment end state" without emitting a ParseError there.
|
|
|
|
@stream.char
|
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def comment_end_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
|
|
|
if data == ">"
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == "-"
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "unexpected-dash-after-double-dash-in-comment"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data] += data
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-comment-double-dash"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
|
|
|
# XXX
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "unexpected-char-in-comment"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:data] += "--" + data
|
|
|
|
@state = :comment_state
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def doctype_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
|
|
|
if SPACE_CHARACTERS.include? data
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :before_doctype_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "need-space-after-doctype"}
|
2007-07-05 00:36:59 +02:00
|
|
|
@stream.unget(data)
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :before_doctype_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def before_doctype_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
|
|
|
if SPACE_CHARACTERS.include? data
|
|
|
|
elsif data == ">"
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "expected-doctype-name-but-got-right-bracket"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:correct] = false
|
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "expected-doctype-name-but-got-eof"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:correct] = false
|
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:name] = data
|
|
|
|
@state = :doctype_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def doctype_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
|
|
|
if SPACE_CHARACTERS.include? data
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :after_doctype_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == ">"
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-doctype-name"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:correct] = false
|
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-05-30 17:45:52 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:name] += data
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def after_doctype_name_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
|
|
|
if SPACE_CHARACTERS.include? data
|
|
|
|
elsif data == ">"
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == :EOF
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:correct] = false
|
2007-07-05 00:36:59 +02:00
|
|
|
@stream.unget(data)
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-doctype"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
|
|
|
else
|
|
|
|
char_stack = [data]
|
|
|
|
5.times { char_stack << stream.char }
|
|
|
|
token = char_stack.join('').tr(ASCII_UPPERCASE,ASCII_LOWERCASE)
|
|
|
|
if token == "public" and !char_stack.include?(:EOF)
|
|
|
|
@state = :before_doctype_public_identifier_state
|
|
|
|
elsif token == "system" and !char_stack.include?(:EOF)
|
|
|
|
@state = :before_doctype_system_identifier_state
|
2007-06-22 10:12:08 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@stream.unget(char_stack)
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "expected-space-or-right-bracket-in-doctype", "datavars" => {"data" => data}}
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :bogus_doctype_state
|
2007-06-22 10:12:08 +02:00
|
|
|
end
|
|
|
|
end
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def before_doctype_public_identifier_state
|
2007-06-22 10:12:08 +02:00
|
|
|
data = @stream.char
|
|
|
|
|
|
|
|
if SPACE_CHARACTERS.include?(data)
|
|
|
|
elsif data == "\""
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:publicId] = ""
|
|
|
|
@state = :doctype_public_identifier_double_quoted_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif data == "'"
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:publicId] = ""
|
|
|
|
@state = :doctype_public_identifier_single_quoted_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif data == ">"
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "unexpected-end-of-doctype"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:correct] = false
|
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-doctype"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:correct] = false
|
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-06-22 10:12:08 +02:00
|
|
|
else
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "unexpected-char-in-doctype"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :bogus_doctype_state
|
2007-06-22 10:12:08 +02:00
|
|
|
end
|
|
|
|
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def doctype_public_identifier_double_quoted_state
|
2007-06-22 10:12:08 +02:00
|
|
|
data = @stream.char
|
|
|
|
if data == "\""
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :after_doctype_public_identifier_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-doctype"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:correct] = false
|
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-06-22 10:12:08 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:publicId] += data
|
2007-06-22 10:12:08 +02:00
|
|
|
end
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def doctype_public_identifier_single_quoted_state
|
2007-06-22 10:12:08 +02:00
|
|
|
data = @stream.char
|
|
|
|
if data == "'"
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :after_doctype_public_identifier_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-doctype"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:correct] = false
|
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-06-22 10:12:08 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:publicId] += data
|
2007-06-22 10:12:08 +02:00
|
|
|
end
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def after_doctype_public_identifier_state
|
2007-06-22 10:12:08 +02:00
|
|
|
data = @stream.char
|
|
|
|
if SPACE_CHARACTERS.include?(data)
|
|
|
|
elsif data == "\""
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:systemId] = ""
|
|
|
|
@state = :doctype_system_identifier_double_quoted_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif data == "'"
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:systemId] = ""
|
|
|
|
@state = :doctype_system_identifier_single_quoted_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif data == ">"
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-doctype"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:correct] = false
|
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-06-22 10:12:08 +02:00
|
|
|
else
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-doctype"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :bogus_doctype_state
|
2007-06-22 10:12:08 +02:00
|
|
|
end
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def before_doctype_system_identifier_state
|
2007-06-22 10:12:08 +02:00
|
|
|
data = @stream.char
|
|
|
|
if SPACE_CHARACTERS.include?(data)
|
|
|
|
elsif data == "\""
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:systemId] = ""
|
|
|
|
@state = :doctype_system_identifier_double_quoted_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif data == "'"
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:systemId] = ""
|
|
|
|
@state = :doctype_system_identifier_single_quoted_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif data == ">"
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "unexpected-char-in-doctype"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:correct] = false
|
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-doctype"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:correct] = false
|
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-06-22 10:12:08 +02:00
|
|
|
else
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "unexpected-char-in-doctype"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :bogus_doctype_state
|
2007-06-22 10:12:08 +02:00
|
|
|
end
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def doctype_system_identifier_double_quoted_state
|
2007-06-22 10:12:08 +02:00
|
|
|
data = @stream.char
|
|
|
|
if data == "\""
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :after_doctype_system_identifier_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-doctype"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:correct] = false
|
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-06-22 10:12:08 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:systemId] += data
|
2007-06-22 10:12:08 +02:00
|
|
|
end
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def doctype_system_identifier_single_quoted_state
|
2007-06-22 10:12:08 +02:00
|
|
|
data = @stream.char
|
|
|
|
if data == "'"
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :after_doctype_system_identifier_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-doctype"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:correct] = false
|
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-06-22 10:12:08 +02:00
|
|
|
else
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:systemId] += data
|
2007-06-22 10:12:08 +02:00
|
|
|
end
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def after_doctype_system_identifier_state
|
2007-06-22 10:12:08 +02:00
|
|
|
data = @stream.char
|
|
|
|
if SPACE_CHARACTERS.include?(data)
|
|
|
|
elsif data == ">"
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-06-22 10:12:08 +02:00
|
|
|
elsif data == :EOF
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-doctype"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:correct] = false
|
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-06-22 10:12:08 +02:00
|
|
|
else
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-doctype"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@state = :bogus_doctype_state
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-08-30 19:19:10 +02:00
|
|
|
def bogus_doctype_state
|
2007-05-30 17:45:52 +02:00
|
|
|
data = @stream.char
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:correct] = false
|
2007-05-30 17:45:52 +02:00
|
|
|
if data == ">"
|
2007-08-30 19:19:10 +02:00
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-05-30 17:45:52 +02:00
|
|
|
elsif data == :EOF
|
|
|
|
# XXX EMIT
|
2007-07-05 00:36:59 +02:00
|
|
|
@stream.unget(data)
|
2007-09-10 05:26:19 +02:00
|
|
|
@token_queue << {:type => :ParseError, :data => "eof-in-doctype"}
|
2007-08-30 19:19:10 +02:00
|
|
|
@current_token[:correct] = false
|
|
|
|
@token_queue << @current_token
|
|
|
|
@state = :data_state
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
|
|
|
return true
|
2007-05-26 03:52:27 +02:00
|
|
|
end
|
|
|
|
|
2007-05-30 17:45:52 +02:00
|
|
|
end
|
2007-05-26 03:52:27 +02:00
|
|
|
|
|
|
|
end
|