2007-01-22 14:43:50 +01:00
|
|
|
require 'chunks/chunk'
|
2009-12-08 16:08:25 +01:00
|
|
|
require 'stringsupport'
|
2007-01-22 14:43:50 +01:00
|
|
|
|
|
|
|
# Contains all the methods for finding and replacing wiki related links.
|
|
|
|
module WikiChunk
|
|
|
|
include Chunk
|
|
|
|
|
|
|
|
# A wiki reference is the top-level class for anything that refers to
|
|
|
|
# another wiki page.
|
|
|
|
class WikiReference < Chunk::Abstract
|
|
|
|
|
|
|
|
# Name of the referenced page
|
|
|
|
attr_reader :page_name
|
2007-10-06 23:04:11 +02:00
|
|
|
|
|
|
|
# Name of the referenced page
|
|
|
|
attr_reader :web_name
|
|
|
|
|
2007-01-22 14:43:50 +01:00
|
|
|
# the referenced page
|
|
|
|
def refpage
|
|
|
|
@content.web.page(@page_name)
|
|
|
|
end
|
2007-10-06 23:04:11 +02:00
|
|
|
|
2007-01-22 14:43:50 +01:00
|
|
|
end
|
|
|
|
|
|
|
|
# A wiki link is the top-level class for links that refers to
|
|
|
|
# another wiki page.
|
|
|
|
class WikiLink < WikiReference
|
2007-10-06 23:04:11 +02:00
|
|
|
|
2007-01-22 14:43:50 +01:00
|
|
|
attr_reader :link_text, :link_type
|
|
|
|
|
|
|
|
def initialize(match_data, content)
|
|
|
|
super
|
|
|
|
@link_type = :show
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.apply_to(content)
|
2009-12-08 16:08:25 +01:00
|
|
|
content.as_utf8.gsub!( self.pattern ) do |matched_text|
|
2007-01-22 14:43:50 +01:00
|
|
|
chunk = self.new($~, content)
|
|
|
|
if chunk.textile_url?
|
|
|
|
# do not substitute
|
|
|
|
matched_text
|
|
|
|
else
|
|
|
|
content.add_chunk(chunk)
|
|
|
|
chunk.mask
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def textile_url?
|
|
|
|
not @textile_link_suffix.nil?
|
|
|
|
end
|
|
|
|
|
2007-10-06 23:04:11 +02:00
|
|
|
def interweb_link?
|
|
|
|
not @web_name.nil? and Web.find_by_name(@web_name) or
|
|
|
|
Web.find_by_address(@web_name)
|
|
|
|
end
|
|
|
|
|
2007-01-22 14:43:50 +01:00
|
|
|
# replace any sequence of whitespace characters with a single space
|
|
|
|
def normalize_whitespace(line)
|
|
|
|
line.gsub(/\s+/, ' ')
|
|
|
|
end
|
2007-10-06 23:04:11 +02:00
|
|
|
|
2007-01-22 14:43:50 +01:00
|
|
|
end
|
|
|
|
|
|
|
|
# This chunk matches a WikiWord. WikiWords can be escaped
|
|
|
|
# by prepending a '\'. When this is the case, the +escaped_text+
|
|
|
|
# method will return the WikiWord instead of the usual +nil+.
|
|
|
|
# The +page_name+ method returns the matched WikiWord.
|
|
|
|
class Word < WikiLink
|
|
|
|
|
|
|
|
attr_reader :escaped_text
|
2007-10-06 23:04:11 +02:00
|
|
|
|
2007-01-22 14:43:50 +01:00
|
|
|
unless defined? WIKI_WORD
|
2009-12-19 06:53:43 +01:00
|
|
|
WIKI_WORD = ''.respond_to?(:force_encoding) ?
|
|
|
|
Regexp.new('(":)?(\\\\)?(' + WikiWords::WIKI_WORD_PATTERN + ')\b', 0) :
|
|
|
|
Regexp.new('(":)?(\\\\)?(' + WikiWords::WIKI_WORD_PATTERN + ')\b', 0, 'u')
|
2007-01-22 14:43:50 +01:00
|
|
|
end
|
|
|
|
|
|
|
|
def self.pattern
|
|
|
|
WIKI_WORD
|
|
|
|
end
|
|
|
|
|
|
|
|
def initialize(match_data, content)
|
|
|
|
super
|
|
|
|
@textile_link_suffix, @escape, @page_name = match_data[1..3]
|
2007-10-06 23:04:11 +02:00
|
|
|
if @escape
|
2007-01-22 14:43:50 +01:00
|
|
|
@unmask_mode = :escape
|
|
|
|
@escaped_text = @page_name
|
|
|
|
else
|
|
|
|
@escaped_text = nil
|
|
|
|
end
|
|
|
|
@link_text = WikiWords.separate(@page_name)
|
2007-10-06 23:04:11 +02:00
|
|
|
@unmask_text = (@escaped_text || @content.page_link(@web_name, @page_name, @link_text, @link_type))
|
2007-01-22 14:43:50 +01:00
|
|
|
end
|
|
|
|
|
|
|
|
end
|
|
|
|
|
2007-10-06 23:04:11 +02:00
|
|
|
# This chunk handles [[bracketted wiki words]] and
|
2007-01-22 14:43:50 +01:00
|
|
|
# [[AliasedWords|aliased wiki words]]. The first part of an
|
|
|
|
# aliased wiki word must be a WikiWord. If the WikiWord
|
|
|
|
# is aliased, the +link_text+ field will contain the
|
|
|
|
# alias, otherwise +link_text+ will contain the entire
|
|
|
|
# contents within the double brackets.
|
|
|
|
#
|
|
|
|
# NOTE: This chunk must be tested before WikiWord since
|
2007-10-06 23:04:11 +02:00
|
|
|
# a WikiWords can be a substring of a WikiLink.
|
2007-01-22 14:43:50 +01:00
|
|
|
class Link < WikiLink
|
2007-10-06 23:04:11 +02:00
|
|
|
|
2007-01-22 14:43:50 +01:00
|
|
|
unless defined? WIKI_LINK
|
2008-12-23 06:57:21 +01:00
|
|
|
WIKI_LINK = /(":)?\[\[\s*([^\]\s][^\]]*?)\s*\]\]/
|
2009-03-03 19:17:14 +01:00
|
|
|
LINK_TYPE_SEPARATION = Regexp.new('^(.+):((file)|(pic)|(video)|(audio)|(delete))$', 0)
|
2008-11-12 16:47:24 +01:00
|
|
|
ALIAS_SEPARATION = Regexp.new('^(.+)\|(.+)$', 0)
|
|
|
|
WEB_SEPARATION = Regexp.new('^(.+):(.+)$', 0)
|
2007-10-06 23:04:11 +02:00
|
|
|
end
|
|
|
|
|
2007-01-22 14:43:50 +01:00
|
|
|
def self.pattern() WIKI_LINK end
|
|
|
|
|
|
|
|
def initialize(match_data, content)
|
|
|
|
super
|
|
|
|
@textile_link_suffix = match_data[1]
|
|
|
|
@link_text = @page_name = normalize_whitespace(match_data[2])
|
|
|
|
separate_link_type
|
|
|
|
separate_alias
|
2007-10-06 23:04:11 +02:00
|
|
|
separate_web
|
|
|
|
@unmask_text = @content.page_link(@web_name, @page_name, @link_text, @link_type)
|
2007-01-22 14:43:50 +01:00
|
|
|
end
|
|
|
|
|
|
|
|
private
|
|
|
|
|
2007-10-06 23:04:11 +02:00
|
|
|
# if link wihin the brackets has a form of [[filename:file]] or [[filename:pic]],
|
2007-01-22 14:43:50 +01:00
|
|
|
# this means a link to a picture or a file
|
|
|
|
def separate_link_type
|
|
|
|
link_type_match = LINK_TYPE_SEPARATION.match(@page_name)
|
|
|
|
if link_type_match
|
|
|
|
@link_text = @page_name = link_type_match[1]
|
|
|
|
@link_type = link_type_match[2..3].compact[0].to_sym
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# link text may be different from page name. this will look like [[actual page|link text]]
|
|
|
|
def separate_alias
|
|
|
|
alias_match = ALIAS_SEPARATION.match(@page_name)
|
|
|
|
if alias_match
|
|
|
|
@page_name = normalize_whitespace(alias_match[1])
|
|
|
|
@link_text = alias_match[2]
|
|
|
|
end
|
|
|
|
# note that [[filename|link text:file]] is also supported
|
2007-10-06 23:04:11 +02:00
|
|
|
end
|
|
|
|
|
|
|
|
# Interweb links have the form [[Web Name:Page Name]] or
|
|
|
|
# [[address:PageName]]. Alternate text links of the form
|
|
|
|
# [[address:PageName|Other text]] are also supported.
|
|
|
|
def separate_web
|
|
|
|
web_match = WEB_SEPARATION.match(@page_name)
|
|
|
|
if web_match
|
|
|
|
@web_name = normalize_whitespace(web_match[1])
|
|
|
|
@page_name = web_match[2]
|
2010-01-03 20:19:47 +01:00
|
|
|
@link_text = @page_name if @link_text == web_match[0]
|
2007-10-06 23:04:11 +02:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2007-01-22 14:43:50 +01:00
|
|
|
end
|
|
|
|
|
|
|
|
end
|