removed old models'

This commit is contained in:
Rick Okin 2005-08-09 02:20:50 +00:00
parent 26c046cdfa
commit 6832b2edf9
19 changed files with 0 additions and 1683 deletions

View file

@ -1,4 +0,0 @@
class Author < String
attr_accessor :ip
def initialize(name, ip) @ip = ip; super(name) end
end

View file

@ -1,33 +0,0 @@
require 'chunks/chunk'
# The category chunk looks for "category: news" on a line by
# itself and parses the terms after the ':' as categories.
# Other classes can search for Category chunks within
# rendered content to find out what categories this page
# should be in.
#
# Category lines can be hidden using ':category: news', for example
class Category < Chunk::Abstract
CATEGORY_PATTERN = /^(:)?category\s*:(.*)$/i
def self.pattern() CATEGORY_PATTERN end
attr_reader :hidden, :list
def initialize(match_data, content)
super(match_data, content)
@hidden = match_data[1]
@list = match_data[2].split(',').map { |c| c.strip }
@unmask_text = ''
if @hidden
@unmask_text = ''
else
category_urls = @list.map { |category| url(category) }.join(', ')
@unmask_text = '<div class="property"> category: ' + category_urls + '</div>'
end
end
# TODO move presentation of page metadata to controller/view
def url(category)
%{<a class="category_link" href="../list/?category=#{category}">#{category}</a>}
end
end

View file

@ -1,86 +0,0 @@
require 'uri/common'
# A chunk is a pattern of text that can be protected
# and interrogated by a renderer. Each Chunk class has a
# +pattern+ that states what sort of text it matches.
# Chunks are initalized by passing in the result of a
# match by its pattern.
module Chunk
class Abstract
# automatically construct the array of derivatives of Chunk::Abstract
@derivatives = []
class << self
attr_reader :derivatives
end
def self::inherited( klass )
Abstract::derivatives << klass
end
# the class name part of the mask strings
def self.mask_string
self.to_s.delete(':').downcase
end
# a regexp that matches all chunk_types masks
def Abstract::mask_re(chunk_types)
tmp = chunk_types.map{|klass| klass.mask_string}.join("|")
Regexp.new("chunk([0-9a-f]+n\\d+)(#{tmp})chunk")
end
attr_reader :text, :unmask_text, :unmask_mode
def initialize(match_data, content)
@text = match_data[0]
@content = content
@unmask_mode = :normal
end
# Find all the chunks of the given type in content
# Each time the pattern is matched, create a new
# chunk for it, and replace the occurance of the chunk
# in this content with its mask.
def self.apply_to(content)
content.gsub!( self.pattern ) do |match|
new_chunk = self.new($~, content)
content.add_chunk(new_chunk)
new_chunk.mask
end
end
# should contain only [a-z0-9]
def mask
@mask ||="chunk#{@id}#{self.class.mask_string}chunk"
end
# We should not use object_id because object_id is not guarantied
# to be unique when we restart the wiki (new object ids can equal old ones
# that were restored from madeleine storage)
def id
@id ||= "#{@content.page_id}n#{@content.chunk_id}"
end
def unmask
@content.sub!(mask, @unmask_text)
end
def rendered?
@unmask_mode == :normal
end
def escaped?
@unmask_mode == :escape
end
def revert
@content.sub!(mask, @text)
# unregister
@content.delete_chunk(self)
end
end
end

View file

@ -1,61 +0,0 @@
$: << File.dirname(__FILE__) + "../../lib"
require 'redcloth'
require 'bluecloth_tweaked'
require 'rdocsupport'
require 'chunks/chunk'
# The markup engines are Chunks that call the one of RedCloth
# or RDoc to convert text. This markup occurs when the chunk is required
# to mask itself.
module Engines
class AbstractEngine < Chunk::Abstract
# Create a new chunk for the whole content and replace it with its mask.
def self.apply_to(content)
new_chunk = self.new(content)
content.replace(new_chunk.mask)
end
private
# Never create engines by constructor - use apply_to instead
def initialize(content)
@content = content
end
end
class Textile < AbstractEngine
def mask
redcloth = RedCloth.new(@content, [:hard_breaks] + @content.options[:engine_opts])
redcloth.filter_html = false
redcloth.no_span_caps = false
redcloth.to_html(:textile)
end
end
class Markdown < AbstractEngine
def mask
BlueCloth.new(@content, @content.options[:engine_opts]).to_html
end
end
class Mixed < AbstractEngine
def mask
redcloth = RedCloth.new(@content, @content.options[:engine_opts])
redcloth.filter_html = false
redcloth.no_span_caps = false
redcloth.to_html
end
end
class RDoc < AbstractEngine
def mask
RDocSupport::RDocFormatter.new(@content).to_html
end
end
MAP = { :textile => Textile, :markdown => Markdown, :mixed => Mixed, :rdoc => RDoc }
MAP.default = Textile
end

View file

@ -1,41 +0,0 @@
require 'chunks/wiki'
# Includes the contents of another page for rendering.
# The include command looks like this: "[[!include PageName]]".
# It is a WikiReference since it refers to another page (PageName)
# and the wiki content using this command must be notified
# of changes to that page.
# If the included page could not be found, a warning is displayed.
class Include < WikiChunk::WikiReference
INCLUDE_PATTERN = /\[\[!include\s+(.*?)\]\]\s*/i
def self.pattern() INCLUDE_PATTERN end
def initialize(match_data, content)
super
@page_name = match_data[1].strip
@unmask_text = get_unmask_text_avoiding_recursion_loops
end
private
def get_unmask_text_avoiding_recursion_loops
if refpage then
refpage.clear_display_cache
if refpage.wiki_includes.include?(@content.page_name)
# this will break the recursion
@content.delete_chunk(self)
return "<em>Recursive include detected; #{@page_name} --> #{@content.page_name} " +
"--> #{@page_name}</em>\n"
else
@content.merge_chunks(refpage.display_content)
return refpage.display_content.pre_rendered
end
else
return "<em>Could not include #{@page_name}</em>\n"
end
end
end

View file

@ -1,31 +0,0 @@
require 'chunks/chunk'
# These are basic chunks that have a pattern and can be protected.
# They are used by rendering process to prevent wiki rendering
# occuring within literal areas such as <code> and <pre> blocks
# and within HTML tags.
module Literal
class AbstractLiteral < Chunk::Abstract
def initialize(match_data, content)
super
@unmask_text = @text
end
end
# A literal chunk that protects 'code' and 'pre' tags from wiki rendering.
class Pre < AbstractLiteral
PRE_BLOCKS = "a|pre|code"
PRE_PATTERN = Regexp.new('<('+PRE_BLOCKS+')\b[^>]*?>.*?</\1>', Regexp::MULTILINE)
def self.pattern() PRE_PATTERN end
end
# A literal chunk that protects HTML tags from wiki rendering.
class Tags < AbstractLiteral
TAGS = "a|img|em|strong|div|span|table|td|th|ul|ol|li|dl|dt|dd"
TAGS_PATTERN = Regexp.new('<(?:'+TAGS+')[^>]*?>', Regexp::MULTILINE)
def self.pattern() TAGS_PATTERN end
end
end

View file

@ -1,28 +0,0 @@
require 'chunks/chunk'
# This chunks allows certain parts of a wiki page to be hidden from the
# rest of the rendering pipeline. It should be run at the beginning
# of the pipeline in `wiki_content.rb`.
#
# An example use of this chunk is to markup double brackets or
# auto URI links:
# <nowiki>Here are [[double brackets]] and a URI: www.uri.org</nowiki>
#
# The contents of the chunks will not be processed by any other chunk
# so the `www.uri.org` and the double brackets will appear verbatim.
#
# Author: Mark Reid <mark at threewordslong dot com>
# Created: 8th June 2004
class NoWiki < Chunk::Abstract
NOWIKI_PATTERN = Regexp.new('<nowiki>(.*?)</nowiki>', Regexp::MULTILINE)
def self.pattern() NOWIKI_PATTERN end
attr_reader :plain_text
def initialize(match_data, content)
super
@plain_text = @unmask_text = match_data[1]
end
end

View file

@ -1,18 +0,0 @@
require 'test/unit'
class ChunkTest < Test::Unit::TestCase
# Asserts a number of tests for the given type and text.
def match(type, test_text, expected)
pattern = type.pattern
assert_match(pattern, test_text)
pattern =~ test_text # Previous assertion guarantees match
chunk = type.new($~)
# Test if requested parts are correct.
for method_sym, value in expected do
assert_respond_to(chunk, method_sym)
assert_equal(value, chunk.method(method_sym).call, "Checking value of '#{method_sym}'")
end
end
end

View file

@ -1,182 +0,0 @@
require 'chunks/chunk'
# This wiki chunk matches arbitrary URIs, using patterns from the Ruby URI modules.
# It parses out a variety of fields that could be used by renderers to format
# the links in various ways (shortening domain names, hiding email addresses)
# It matches email addresses and host.com.au domains without schemes (http://)
# but adds these on as required.
#
# The heuristic used to match a URI is designed to err on the side of caution.
# That is, it is more likely to not autolink a URI than it is to accidently
# autolink something that is not a URI. The reason behind this is it is easier
# to force a URI link by prefixing 'http://' to it than it is to escape and
# incorrectly marked up non-URI.
#
# I'm using a part of the [ISO 3166-1 Standard][iso3166] for country name suffixes.
# The generic names are from www.bnoack.com/data/countrycode2.html)
# [iso3166]: http://geotags.com/iso3166/
class URIChunk < Chunk::Abstract
include URI::REGEXP::PATTERN
# this condition is to get rid of pesky warnings in tests
unless defined? URIChunk::INTERNET_URI_REGEXP
GENERIC = 'aero|biz|com|coop|edu|gov|info|int|mil|museum|name|net|org'
COUNTRY = 'ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|az|ba|bb|bd|be|' +
'bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cf|cd|cg|ch|ci|ck|cl|' +
'cm|cn|co|cr|cs|cu|cv|cx|cy|cz|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|fi|' +
'fj|fk|fm|fo|fr|fx|ga|gb|gd|ge|gf|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|' +
'hk|hm|hn|hr|ht|hu|id|ie|il|in|io|iq|ir|is|it|jm|jo|jp|ke|kg|kh|ki|km|kn|' +
'kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|mg|mh|mk|ml|mm|' +
'mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nt|' +
'nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|pt|pw|py|qa|re|ro|ru|rw|sa|sb|sc|' +
'sd|se|sg|sh|si|sj|sk|sl|sm|sn|so|sr|st|su|sv|sy|sz|tc|td|tf|tg|th|tj|tk|' +
'tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|um|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|' +
'ws|ye|yt|yu|za|zm|zr|zw'
# These are needed otherwise HOST will match almost anything
TLDS = "(?:#{GENERIC}|#{COUNTRY})"
# Redefine USERINFO so that it must have non-zero length
USERINFO = "(?:[#{UNRESERVED};:&=+$,]|#{ESCAPED})+"
# unreserved_no_ending = alphanum | mark, but URI_ENDING [)!] excluded
UNRESERVED_NO_ENDING = "-_.~*'(#{ALNUM}"
# this ensures that query or fragment do not end with URI_ENDING
# and enable us to use a much simpler self.pattern Regexp
# uric_no_ending = reserved | unreserved_no_ending | escaped
URIC_NO_ENDING = "(?:[#{UNRESERVED_NO_ENDING}#{RESERVED}]|#{ESCAPED})"
# query = *uric
QUERY = "#{URIC_NO_ENDING}*"
# fragment = *uric
FRAGMENT = "#{URIC_NO_ENDING}*"
# DOMLABEL is defined in the ruby uri library, TLDS is defined above
INTERNET_HOSTNAME = "(?:#{DOMLABEL}\\.)+#{TLDS}"
# Correct a typo bug in ruby 1.8.x lib/uri/common.rb
PORT = '\\d*'
INTERNET_URI =
"(?:(#{SCHEME}):/{0,2})?" + # Optional scheme: (\1)
"(?:(#{USERINFO})@)?" + # Optional userinfo@ (\2)
"(#{INTERNET_HOSTNAME})" + # Mandatory hostname (\3)
"(?::(#{PORT}))?" + # Optional :port (\4)
"(#{ABS_PATH})?" + # Optional absolute path (\5)
"(?:\\?(#{QUERY}))?" + # Optional ?query (\6)
"(?:\\#(#{FRAGMENT}))?" + # Optional #fragment (\7)
'(?=\.?(?:\s|\)|\z))' # ends only with optional dot + space or ")"
# or end of the string
SUSPICIOUS_PRECEDING_CHARACTER = '(!|\"\:|\"|\\\'|\]\()?' # any of !, ":, ", ', ](
INTERNET_URI_REGEXP =
Regexp.new(SUSPICIOUS_PRECEDING_CHARACTER + INTERNET_URI, Regexp::EXTENDED, 'N')
end
def URIChunk.pattern
INTERNET_URI_REGEXP
end
attr_reader :user, :host, :port, :path, :query, :fragment, :link_text
def self.apply_to(content)
content.gsub!( self.pattern ) do |matched_text|
chunk = self.new($~, content)
if chunk.avoid_autolinking?
# do not substitute nor register the chunk
matched_text
else
content.add_chunk(chunk)
chunk.mask
end
end
end
def initialize(match_data, content)
super
@link_text = match_data[0]
@suspicious_preceding_character = match_data[1]
@original_scheme, @user, @host, @port, @path, @query, @fragment = match_data[2..-1]
treat_trailing_character
@unmask_text = "<a href=\"#{uri}\">#{link_text}</a>"
end
def avoid_autolinking?
not @suspicious_preceding_character.nil?
end
def treat_trailing_character
# If the last character matched by URI pattern is in ! or ), this may be part of the markup,
# not a URL. We should handle it as such. It is possible to do it by a regexp, but
# much easier to do programmatically
last_char = @link_text[-1..-1]
if last_char == ')' or last_char == '!'
@trailing_punctuation = last_char
@link_text.chop!
[@original_scheme, @user, @host, @port, @path, @query, @fragment].compact.last.chop!
else
@trailing_punctuation = nil
end
end
def scheme
@original_scheme or (@user ? 'mailto' : 'http')
end
def scheme_delimiter
scheme == 'mailto' ? ':' : '://'
end
def user_delimiter
'@' unless @user.nil?
end
def port_delimiter
':' unless @port.nil?
end
def query_delimiter
'?' unless @query.nil?
end
def uri
[scheme, scheme_delimiter, user, user_delimiter, host, port_delimiter, port, path,
query_delimiter, query].compact.join
end
end
# uri with mandatory scheme but less restrictive hostname, like
# http://localhost:2500/blah.html
class LocalURIChunk < URIChunk
unless defined? LocalURIChunk::LOCAL_URI_REGEXP
# hostname can be just a simple word like 'localhost'
ANY_HOSTNAME = "(?:#{DOMLABEL}\\.)*#{TOPLABEL}\\.?"
# The basic URI expression as a string
# Scheme and hostname are mandatory
LOCAL_URI =
"(?:(#{SCHEME})://)+" + # Mandatory scheme:// (\1)
"(?:(#{USERINFO})@)?" + # Optional userinfo@ (\2)
"(#{ANY_HOSTNAME})" + # Mandatory hostname (\3)
"(?::(#{PORT}))?" + # Optional :port (\4)
"(#{ABS_PATH})?" + # Optional absolute path (\5)
"(?:\\?(#{QUERY}))?" + # Optional ?query (\6)
"(?:\\#(#{FRAGMENT}))?" + # Optional #fragment (\7)
'(?=\.?(?:\s|\)|\z))' # ends only with optional dot + space or ")"
# or end of the string
LOCAL_URI_REGEXP = Regexp.new(SUSPICIOUS_PRECEDING_CHARACTER + LOCAL_URI, Regexp::EXTENDED, 'N')
end
def LocalURIChunk.pattern
LOCAL_URI_REGEXP
end
end

View file

@ -1,141 +0,0 @@
require 'wiki_words'
require 'chunks/chunk'
require 'chunks/wiki'
require 'cgi'
# Contains all the methods for finding and replacing wiki related links.
module WikiChunk
include Chunk
# A wiki reference is the top-level class for anything that refers to
# another wiki page.
class WikiReference < Chunk::Abstract
# Name of the referenced page
attr_reader :page_name
# the referenced page
def refpage
@content.web.pages[@page_name]
end
end
# A wiki link is the top-level class for links that refers to
# another wiki page.
class WikiLink < WikiReference
attr_reader :link_text, :link_type
def initialize(match_data, content)
super
@link_type = :show
end
def self.apply_to(content)
content.gsub!( self.pattern ) do |matched_text|
chunk = self.new($~, content)
if chunk.textile_url?
# do not substitute
matched_text
else
content.add_chunk(chunk)
chunk.mask
end
end
end
# the referenced page
def refpage
@content.web.pages[@page_name]
end
def textile_url?
not @textile_link_suffix.nil?
end
end
# This chunk matches a WikiWord. WikiWords can be escaped
# by prepending a '\'. When this is the case, the +escaped_text+
# method will return the WikiWord instead of the usual +nil+.
# The +page_name+ method returns the matched WikiWord.
class Word < WikiLink
attr_reader :escaped_text
unless defined? WIKI_WORD
WIKI_WORD = Regexp.new('(":)?(\\\\)?(' + WikiWords::WIKI_WORD_PATTERN + ')\b', 0, "utf-8")
end
def self.pattern
WIKI_WORD
end
def initialize(match_data, content)
super
@textile_link_suffix, @escape, @page_name = match_data[1..3]
if @escape
@unmask_mode = :escape
@escaped_text = @page_name
else
@escaped_text = nil
end
@link_text = WikiWords.separate(@page_name)
@unmask_text = (@escaped_text || @content.page_link(@page_name, @link_text, @link_type))
end
end
# This chunk handles [[bracketted wiki words]] and
# [[AliasedWords|aliased wiki words]]. The first part of an
# aliased wiki word must be a WikiWord. If the WikiWord
# is aliased, the +link_text+ field will contain the
# alias, otherwise +link_text+ will contain the entire
# contents within the double brackets.
#
# NOTE: This chunk must be tested before WikiWord since
# a WikiWords can be a substring of a WikiLink.
class Link < WikiLink
unless defined? WIKI_LINK
WIKI_LINK = /(":)?\[\[\s*([^\]\s][^\]]+?)\s*\]\]/
LINK_TYPE_SEPARATION = Regexp.new('^(.+):((file)|(pic))$', 0, 'utf-8')
ALIAS_SEPARATION = Regexp.new('^(.+)\|(.+)$', 0, 'utf-8')
end
def self.pattern() WIKI_LINK end
def initialize(match_data, content)
super
@textile_link_suffix, @page_name = match_data[1..2]
@link_text = @page_name
separate_link_type
separate_alias
@unmask_text = @content.page_link(@page_name, @link_text, @link_type)
end
private
# if link wihin the brackets has a form of [[filename:file]] or [[filename:pic]],
# this means a link to a picture or a file
def separate_link_type
link_type_match = LINK_TYPE_SEPARATION.match(@page_name)
if link_type_match
@link_text = @page_name = link_type_match[1]
@link_type = link_type_match[2..3].compact[0].to_sym
end
end
# link text may be different from page name. this will look like [[actual page|link text]]
def separate_alias
alias_match = ALIAS_SEPARATION.match(@page_name)
if alias_match
@page_name, @link_text = alias_match[1..2]
end
# note that [[filename|link text:file]] is also supported
end
end
end

View file

@ -1,58 +0,0 @@
require 'fileutils'
require 'instiki_errors'
class FileYard
attr_reader :files_path
def initialize(files_path, max_upload_size)
@files_path, @max_upload_size = files_path, max_upload_size
FileUtils.mkdir_p(@files_path) unless File.exist?(@files_path)
@files = Dir["#{@files_path}/*"].collect{|path| File.basename(path) if File.file?(path) }.compact
end
def upload_file(name, io)
sanitize_file_name(name)
if io.kind_of?(Tempfile)
io.close
check_upload_size(io.size)
File.chmod(600, file_path(name)) if File.exists? file_path(name)
FileUtils.mv(io.path, file_path(name))
else
content = io.read
check_upload_size(content.length)
File.open(file_path(name), 'wb') { |f| f.write(content) }
end
# just in case, restrict read access and prohibit write access to the uploaded file
FileUtils.chmod(0440, file_path(name))
end
def files
Dir["#{files_path}/*"].collect{|path| File.basename(path) if File.file?(path)}.compact
end
def has_file?(name)
files.include?(name)
end
def file_path(name)
"#{files_path}/#{name}"
end
SANE_FILE_NAME = /[a-zA-Z0-9\-_\. ]{1,255}/
def sanitize_file_name(name)
unless name =~ SANE_FILE_NAME or name == '.' or name == '..'
raise Instiki::ValidationError.new("Invalid file name: '#{name}'.\n" +
"Only latin characters, digits, dots, underscores, dashes and spaces are accepted.")
end
end
def check_upload_size(actual_upload_size)
if actual_upload_size > @max_upload_size.kilobytes
raise Instiki::ValidationError.new("Uploaded file size (#{actual_upload_size / 1024} " +
"kbytes) exceeds the maximum (#{@max_upload_size} kbytes) set for this wiki")
end
end
end

View file

@ -1,120 +0,0 @@
require 'date'
require 'page_lock'
require 'revision'
require 'wiki_words'
require 'chunks/wiki'
class Page
include PageLock
attr_reader :name, :web
attr_accessor :revisions
def initialize(web, name)
raise 'nil web' if web.nil?
raise 'nil name' if name.nil?
@web, @name, @revisions = web, name, []
end
def revise(content, created_at, author)
if not @revisions.empty? and content == @revisions.last.content
raise Instiki::ValidationError.new(
"You have tried to save page '#{name}' without changing its content")
end
# Try to render content to make sure that markup engine can take it,
# before addin a revision to the page
Revision.new(self, @revisions.length, content, created_at, author).force_rendering
# A user may change a page, look at it and make some more changes - several times.
# Not to record every such iteration as a new revision, if the previous revision was done
# by the same author, not more than 30 minutes ago, then update the last revision instead of
# creating a new one
if !@revisions.empty? && continous_revision?(created_at, author)
@revisions.last.created_at = created_at
@revisions.last.content = content
@revisions.last.clear_display_cache
else
@revisions << Revision.new(self, @revisions.length, content, created_at, author)
end
self.revisions.last.force_rendering
# at this point the page may not be inserted in the web yet, and therefore
# references to the page itself are rendered as "unresolved". Clearing the cache allows
# the page to re-render itself once again, hopefully _after_ it is inserted in the web
self.revisions.last.clear_display_cache
@web.refresh_pages_with_references(@name) if @revisions.length == 1
self
end
def rollback(revision_number, created_at, author_ip = nil)
roll_back_revision = @revisions[revision_number].dup
revise(roll_back_revision.content, created_at, Author.new(roll_back_revision.author, author_ip))
end
def revisions?
revisions.length > 1
end
def revised_on
created_on
end
def in_category?(cat)
cat.nil? || cat.empty? || categories.include?(cat)
end
def categories
display_content.find_chunks(Category).map { |cat| cat.list }.flatten
end
def authors
revisions.collect { |rev| rev.author }
end
def references
@web.select.pages_that_reference(name)
end
def linked_from
@web.select.pages_that_link_to(name)
end
def included_from
@web.select.pages_that_include(name)
end
# Returns the original wiki-word name as separate words, so "MyPage" becomes "My Page".
def plain_name
@web.brackets_only ? name : WikiWords.separate(name)
end
# used to build chunk ids.
def id
@id ||= name.unpack('H*').first
end
def link(options = {})
@web.make_link(name, nil, options)
end
def author_link(options = {})
@web.make_link(author, nil, options)
end
private
def continous_revision?(created_at, author)
@revisions.last.author == author && @revisions.last.created_at + 30.minutes > created_at
end
# Forward method calls to the current revision, so the page responds to all revision calls
def method_missing(method_symbol)
revisions.last.send(method_symbol)
end
end

View file

@ -1,23 +0,0 @@
# Contains all the lock methods to be mixed in with the page
module PageLock
LOCKING_PERIOD = 30 * 60 # 30 minutes
attr_reader :locked_by
def lock(time, locked_by)
@locked_at, @locked_by = time, locked_by
end
def lock_duration(time)
((time - @locked_at) / 60).to_i unless @locked_at.nil?
end
def unlock
@locked_at = nil
end
def locked?(comparison_time)
@locked_at + LOCKING_PERIOD > comparison_time unless @locked_at.nil?
end
end

View file

@ -1,89 +0,0 @@
# Container for a set of pages with methods for manipulation.
class PageSet < Array
attr_reader :web
def initialize(web, pages = nil, condition = nil)
@web = web
# if pages is not specified, make a list of all pages in the web
if pages.nil?
super(web.pages.values)
# otherwise use specified pages and condition to produce a set of pages
elsif condition.nil?
super(pages)
else
super(pages.select { |page| condition[page] })
end
end
def most_recent_revision
self.map { |page| page.created_at }.max || Time.at(0)
end
def by_name
PageSet.new(@web, sort_by { |page| page.name })
end
alias :sort :by_name
def by_revision
PageSet.new(@web, sort_by { |page| page.created_at }).reverse
end
def pages_that_reference(page_name)
self.select { |page| page.wiki_references.include?(page_name) }
end
def pages_that_link_to(page_name)
self.select { |page| page.wiki_words.include?(page_name) }
end
def pages_that_include(page_name)
self.select { |page| page.wiki_includes.include?(page_name) }
end
def pages_authored_by(author)
self.select { |page| page.authors.include?(author) }
end
def characters
self.inject(0) { |chars,page| chars += page.content.size }
end
# Returns all the orphaned pages in this page set. That is,
# pages in this set for which there is no reference in the web.
# The HomePage and author pages are always assumed to have
# references and so cannot be orphans
# Pages that refer to themselves and have no links from outside are oprphans.
def orphaned_pages
never_orphans = web.select.authors + ['HomePage']
self.select { |page|
if never_orphans.include? page.name
false
else
references = pages_that_reference(page.name)
references.empty? or references == [page]
end
}
end
# Returns all the wiki words in this page set for which
# there are no pages in this page set's web
def wanted_pages
wiki_words - web.select.names
end
def names
self.map { |page| page.name }
end
def wiki_words
self.inject([]) { |wiki_words, page| wiki_words << page.wiki_words }.flatten.uniq
end
def authors
self.inject([]) { |authors, page| authors << page.authors }.flatten.uniq.sort
end
end

View file

@ -1,127 +0,0 @@
require 'diff'
require 'wiki_content'
require 'chunks/wiki'
require 'date'
require 'author'
require 'page'
class Revision
attr_accessor :page, :number, :content, :created_at, :author
def initialize(page, number, content, created_at, author)
@page, @number, @created_at, @author = page, number, created_at, author
self.content = content
@display_cache = nil
end
def created_on
Date.new(@created_at.year, @created_at.mon, @created_at.day)
end
def pretty_created_at
# Must use DateTime because Time doesn't support %e on at least some platforms
DateTime.new(
@created_at.year, @created_at.mon, @created_at.day, @created_at.hour, @created_at.min
).strftime "%B %e, %Y %H:%M"
end
# todo: drop next_revision, previuous_revision and number from here - unused code
def next_revision
page.revisions[number + 1]
end
def previous_revision
number > 0 ? page.revisions[number - 1] : nil
end
# Returns an array of all the WikiIncludes present in the content of this revision.
def wiki_includes
unless @wiki_includes_cache
chunks = display_content.find_chunks(Include)
@wiki_includes_cache = chunks.map { |c| ( c.escaped? ? nil : c.page_name ) }.compact.uniq
end
@wiki_includes_cache
end
# Returns an array of all the WikiReferences present in the content of this revision.
def wiki_references
unless @wiki_references_cache
chunks = display_content.find_chunks(WikiChunk::WikiReference)
@wiki_references_cache = chunks.map { |c| ( c.escaped? ? nil : c.page_name ) }.compact.uniq
end
@wiki_references_cache
end
# Returns an array of all the WikiWords present in the content of this revision.
def wiki_words
unless @wiki_words_cache
wiki_chunks = display_content.find_chunks(WikiChunk::WikiLink)
@wiki_words_cache = wiki_chunks.map { |c| ( c.escaped? ? nil : c.page_name ) }.compact.uniq
end
@wiki_words_cache
end
# Returns an array of all the WikiWords present in the content of this revision.
# that already exists as a page in the web.
def existing_pages
wiki_words.select { |wiki_word| page.web.pages[wiki_word] }
end
# Returns an array of all the WikiWords present in the content of this revision
# that *doesn't* already exists as a page in the web.
def unexisting_pages
wiki_words - existing_pages
end
# Explicit check for new type of display cache with chunks_by_type method.
# Ensures new version works with older snapshots.
def display_content
unless @display_cache && @display_cache.respond_to?(:chunks_by_type)
@display_cache = WikiContent.new(self)
@display_cache.render!
end
@display_cache
end
def display_diff
previous_revision ? HTMLDiff.diff(previous_revision.display_content, display_content) : display_content
end
def clear_display_cache
@wiki_words_cache = @published_cache = @display_cache = @wiki_includes_cache =
@wiki_references_cache = nil
end
def display_published
unless @published_cache && @published_cache.respond_to?(:chunks_by_type)
@published_cache = WikiContent.new(self, {:mode => :publish})
@published_cache.render!
end
@published_cache
end
def display_content_for_export
WikiContent.new(self, {:mode => :export} ).render!
end
def force_rendering
begin
display_content.render!
rescue => e
ApplicationController.logger.error "Failed rendering page #{@name}"
ApplicationController.logger.error e
message = e.message
# substitute content with an error message
self.content = <<-EOL
<p>Markup engine has failed to render this page, raising the following error:</p>
<p>#{message}</p>
<pre>#{self.content}</pre>
EOL
clear_display_cache
raise e
end
end
end

View file

@ -1,184 +0,0 @@
require 'cgi'
require 'page'
require 'page_set'
require 'wiki_words'
require 'zip/zip'
class Web
attr_accessor :name, :password, :safe_mode, :pages
attr_accessor :additional_style, :allow_uploads, :published
attr_reader :address
# there are getters for all these attributes, too
attr_writer :markup, :color, :brackets_only, :count_pages, :max_upload_size
def initialize(parent_wiki, name, address, password = nil)
self.address = address
@wiki, @name, @password = parent_wiki, name, password
set_compatible_defaults
@pages = {}
@allow_uploads = true
@additional_style = nil
@published = false
@count_pages = false
end
# Explicitly sets value of some web attributes to defaults, unless they are already set
def set_compatible_defaults
@markup = markup()
@color = color()
@safe_mode = safe_mode()
@brackets_only = brackets_only()
@max_upload_size = max_upload_size()
@wiki = wiki
end
# All below getters know their default values. This is necessary to ensure compatibility with
# 0.9 storages, where they were not defined.
def brackets_only() @brackets_only || false end
def color() @color ||= '008B26' end
def count_pages() @count_pages || false end
def markup() @markup ||= :textile end
def max_upload_size() @max_upload_size || 100; end
def wiki() @wiki ||= WikiService.instance; end
def add_page(name, content, created_at, author)
page = Page.new(self, name)
page.revise(content, created_at, author)
@pages[page.name] = page
end
def address=(the_address)
if the_address != CGI.escape(the_address)
raise Instiki::ValidationError.new('Web name should contain only valid URI characters')
end
@address = the_address
end
def authors
select.authors
end
def categories
select.map { |page| page.categories }.flatten.uniq.sort
end
def has_page?(name)
pages[name]
end
def has_file?(name)
wiki.file_yard(self).has_file?(name)
end
def make_file_link(mode, name, text, base_url)
link = CGI.escape(name)
case mode
when :export
if has_file?(name) then "<a class=\"existingWikiWord\" href=\"#{link}.html\">#{text}</a>"
else "<span class=\"newWikiWord\">#{text}</span>" end
when :publish
if has_file?(name) then "<a class=\"existingWikiWord\" href=\"#{base_url}/published/#{link}\">#{text}</a>"
else "<span class=\"newWikiWord\">#{text}</span>" end
else
if has_file?(name)
"<a class=\"existingWikiWord\" href=\"#{base_url}/file/#{link}\">#{text}</a>"
else
"<span class=\"newWikiWord\">#{text}<a href=\"#{base_url}/file/#{link}\">?</a></span>"
end
end
end
# Create a link for the given page name and link text based
# on the render mode in options and whether the page exists
# in the this web.
# The links a relative, and will work only if displayed on another WikiPage.
# It should not be used in menus, templates and such - instead, use link_to_page helper
def make_link(name, text = nil, options = {})
text = CGI.escapeHTML(text || WikiWords.separate(name))
mode = options[:mode] || :show
base_url = options[:base_url] || '..'
link_type = options[:link_type] || :show
case link_type.to_sym
when :show
make_page_link(mode, name, text, base_url)
when :file
make_file_link(mode, name, text, base_url)
when :pic
make_pic_link(mode, name, text, base_url)
else
raise "Unknown link type: #{link_type}"
end
end
def make_page_link(mode, name, text, base_url)
link = CGI.escape(name)
case mode.to_sym
when :export
if has_page?(name) then %{<a class="existingWikiWord" href="#{link}.html">#{text}</a>}
else %{<span class="newWikiWord">#{text}</span>} end
when :publish
if has_page?(name) then %{<a class="existingWikiWord" href="#{base_url}/published/#{link}">#{text}</a>}
else %{<span class="newWikiWord">#{text}</span>} end
else
if has_page?(name)
%{<a class="existingWikiWord" href="#{base_url}/show/#{link}">#{text}</a>}
else
%{<span class="newWikiWord">#{text}<a href="#{base_url}/show/#{link}">?</a></span>}
end
end
end
def make_pic_link(mode, name, text, base_url)
link = CGI.escape(name)
case mode.to_sym
when :export
if has_file?(name) then %{<img alt="#{text}" src="#{link}" />}
else %{<img alt="#{text}" src="no image" />} end
when :publish
if has_file?(name) then %{<img alt="#{text}" src="#{link}" />}
else %{<span class="newWikiWord">#{text}</span>} end
else
if has_file?(name) then %{<img alt="#{text}" src="#{base_url}/pic/#{link}" />}
else %{<span class="newWikiWord">#{text}<a href="#{base_url}/pic/#{link}">?</a></span>} end
end
end
# Clears the display cache for all the pages with references to
def refresh_pages_with_references(page_name)
select.pages_that_reference(page_name).each { |page|
page.revisions.each { |revision| revision.clear_display_cache }
}
end
def refresh_revisions
select.each { |page| page.revisions.each { |revision| revision.clear_display_cache } }
end
def remove_pages(pages_to_be_removed)
pages.delete_if { |page_name, page| pages_to_be_removed.include?(page) }
end
def revised_on
select.most_recent_revision
end
def select(&condition)
PageSet.new(self, @pages.values, condition)
end
private
# Returns an array of all the wiki words in any current revision
def wiki_words
pages.values.inject([]) { |wiki_words, page| wiki_words << page.wiki_words }.flatten.uniq
end
# Returns an array of all the page names on this web
def page_names
pages.keys
end
end

View file

@ -1,205 +0,0 @@
require 'cgi'
require 'chunks/engines'
require 'chunks/category'
require 'chunks/include'
require 'chunks/wiki'
require 'chunks/literal'
require 'chunks/uri'
require 'chunks/nowiki'
# Wiki content is just a string that can process itself with a chain of
# actions. The actions can modify wiki content so that certain parts of
# it are protected from being rendered by later actions.
#
# When wiki content is rendered, it can be interrogated to find out
# which chunks were rendered. This means things like categories, wiki
# links, can be determined.
#
# Exactly how wiki content is rendered is determined by a number of
# settings that are optionally passed in to a constructor. The current
# options are:
# * :engine
# => The structural markup engine to use (Textile, Markdown, RDoc)
# * :engine_opts
# => A list of options to pass to the markup engines (safe modes, etc)
# * :pre_engine_actions
# => A list of render actions or chunks to be processed before the
# markup engine is applied. By default this is:
# Category, Include, URIChunk, WikiChunk::Link, WikiChunk::Word
# * :post_engine_actions
# => A list of render actions or chunks to apply after the markup
# engine. By default these are:
# Literal::Pre, Literal::Tags
# * :mode
# => How should the content be rendered? For normal display (show),
# publishing (:publish) or export (:export)?
module ChunkManager
attr_reader :chunks_by_type, :chunks_by_id, :chunks, :chunk_id
ACTIVE_CHUNKS = [ NoWiki, Category, WikiChunk::Link, URIChunk, LocalURIChunk,
WikiChunk::Word ]
HIDE_CHUNKS = [ Literal::Pre, Literal::Tags ]
MASK_RE = {
ACTIVE_CHUNKS => Chunk::Abstract.mask_re(ACTIVE_CHUNKS),
HIDE_CHUNKS => Chunk::Abstract.mask_re(HIDE_CHUNKS)
}
def init_chunk_manager
@chunks_by_type = Hash.new
Chunk::Abstract::derivatives.each{|chunk_type|
@chunks_by_type[chunk_type] = Array.new
}
@chunks_by_id = Hash.new
@chunks = []
@chunk_id = 0
end
def add_chunk(c)
@chunks_by_type[c.class] << c
@chunks_by_id[c.id] = c
@chunks << c
@chunk_id += 1
end
def delete_chunk(c)
@chunks_by_type[c.class].delete(c)
@chunks_by_id.delete(c.id)
@chunks.delete(c)
end
def merge_chunks(other)
other.chunks.each{|c| add_chunk(c)}
end
def scan_chunkid(text)
text.scan(MASK_RE[ACTIVE_CHUNKS]){|a| yield a[0] }
end
def find_chunks(chunk_type)
@chunks.select { |chunk| chunk.kind_of?(chunk_type) and chunk.rendered? }
end
# for testing and WikiContentStub; we need a page_id even if we have no page
def page_id
0
end
end
# A simplified version of WikiContent. Useful to avoid recursion problems in
# WikiContent.new
class WikiContentStub < String
attr_reader :options
include ChunkManager
def initialize(content, options)
super(content)
@options = options
init_chunk_manager
end
# Detects the mask strings contained in the text of chunks of type chunk_types
# and yields the corresponding chunk ids
# example: content = "chunk123categorychunk <pre>chunk456categorychunk</pre>"
# inside_chunks(Literal::Pre) ==> yield 456
def inside_chunks(chunk_types)
chunk_types.each{|chunk_type| chunk_type.apply_to(self) }
chunk_types.each{|chunk_type| @chunks_by_type[chunk_type].each{|hide_chunk|
scan_chunkid(hide_chunk.text){|id| yield id }
}
}
end
end
class WikiContent < String
include ChunkManager
DEFAULT_OPTS = {
:active_chunks => ACTIVE_CHUNKS,
:engine => Engines::Textile,
:engine_opts => [],
:mode => :show
}.freeze
attr_reader :web, :options, :revision, :not_rendered, :pre_rendered
# Create a new wiki content string from the given one.
# The options are explained at the top of this file.
def initialize(revision, options = {})
@revision = revision
@web = @revision.page.web
@options = DEFAULT_OPTS.dup.merge(options)
@options[:engine] = Engines::MAP[@web.markup]
@options[:engine_opts] = [:filter_html, :filter_styles] if @web.safe_mode
@options[:active_chunks] = (ACTIVE_CHUNKS - [WikiChunk::Word] ) if @web.brackets_only
@not_rendered = @pre_rendered = nil
super(@revision.content)
init_chunk_manager
build_chunks
@not_rendered = String.new(self)
end
# Call @web.page_link using current options.
def page_link(name, text, link_type)
@options[:link_type] = (link_type || :show)
@web.make_link(name, text, @options)
end
def build_chunks
# create and mask Includes and "active_chunks" chunks
Include.apply_to(self)
@options[:active_chunks].each{|chunk_type| chunk_type.apply_to(self)}
# Handle hiding contexts like "pre" and "code" etc..
# The markup (textile, rdoc etc) can produce such contexts with its own syntax.
# To reveal them, we work on a copy of the content.
# The copy is rendered and used to detect the chunks that are inside protecting context
# These chunks are reverted on the original content string.
copy = WikiContentStub.new(self, @options)
@options[:engine].apply_to(copy)
copy.inside_chunks(HIDE_CHUNKS) do |id|
@chunks_by_id[id].revert
end
end
def pre_render!
unless @pre_rendered
@chunks_by_type[Include].each{|chunk| chunk.unmask }
@pre_rendered = String.new(self)
end
@pre_rendered
end
def render!
pre_render!
@options[:engine].apply_to(self)
# unmask in one go. $~[1] is the chunk id
gsub!(MASK_RE[ACTIVE_CHUNKS]){
if chunk = @chunks_by_id[$~[1]]
chunk.unmask_text
# if we match a chunkmask that existed in the original content string
# just keep it as it is
else
$~[0]
end}
self
end
def page_name
@revision.page.name
end
def page_id
@revision.page.id
end
end

View file

@ -1,229 +0,0 @@
require 'open-uri'
require 'yaml'
require 'madeleine'
require 'madeleine/automatic'
require 'madeleine/zmarshal'
require 'web'
require 'page'
require 'author'
require 'file_yard'
require 'instiki_errors'
module AbstractWikiService
attr_reader :webs, :system
def authenticate(password)
# system['password'] variant is for compatibility with storages from older versions
password == (@system[:password] || @system['password'] || 'instiki')
end
def create_web(name, address, password = nil)
@webs[address] = Web.new(self, name, address, password) unless @webs[address]
end
def delete_web(address)
@webs[address] = nil
end
def file_yard(web)
raise "Web #{@web.name} does not belong to this wiki service" unless @webs.values.include?(web)
# TODO cache FileYards
FileYard.new("#{self.storage_path}/#{web.address}", web.max_upload_size)
end
def init_wiki_service
@webs = {}
@system = {}
end
def edit_web(old_address, new_address, name, markup, color, additional_style, safe_mode = false,
password = nil, published = false, brackets_only = false, count_pages = false,
allow_uploads = true, max_upload_size = nil)
if not @webs.key? old_address
raise Instiki::ValidationError.new("Web with address '#{old_address}' does not exist")
end
if old_address != new_address
if @webs.key? new_address
raise Instiki::ValidationError.new("There is already a web with address '#{new_address}'")
end
@webs[new_address] = @webs[old_address]
@webs.delete(old_address)
@webs[new_address].address = new_address
end
web = @webs[new_address]
web.refresh_revisions if settings_changed?(web, markup, safe_mode, brackets_only)
web.name, web.markup, web.color, web.additional_style, web.safe_mode =
name, markup, color, additional_style, safe_mode
web.password, web.published, web.brackets_only, web.count_pages =
password, published, brackets_only, count_pages, allow_uploads
web.allow_uploads, web.max_upload_size = allow_uploads, max_upload_size.to_i
end
def read_page(web_address, page_name)
ApplicationController.logger.debug "Reading page '#{page_name}' from web '#{web_address}'"
web = @webs[web_address]
if web.nil?
ApplicationController.logger.debug "Web '#{web_address}' not found"
return nil
else
page = web.pages[page_name]
ApplicationController.logger.debug "Page '#{page_name}' #{page.nil? ? 'not' : ''} found"
return page
end
end
def remove_orphaned_pages(web_address)
@webs[web_address].remove_pages(@webs[web_address].select.orphaned_pages)
end
def revise_page(web_address, page_name, content, revised_on, author)
page = read_page(web_address, page_name)
page.revise(content, revised_on, author)
end
def rollback_page(web_address, page_name, revision_number, created_at, author_id = nil)
page = read_page(web_address, page_name)
page.rollback(revision_number, created_at, author_id)
end
def setup(password, web_name, web_address)
@system[:password] = password
create_web(web_name, web_address)
end
def setup?
not (@webs.empty?)
end
def storage_path
self.class.storage_path
end
def write_page(web_address, page_name, content, written_on, author)
@webs[web_address].add_page(page_name, content, written_on, author)
end
private
def settings_changed?(web, markup, safe_mode, brackets_only)
web.markup != markup ||
web.safe_mode != safe_mode ||
web.brackets_only != brackets_only
end
end
class WikiService
include AbstractWikiService
include Madeleine::Automatic::Interceptor
# These methods do not change the state of persistent objects, and
# should not be logged by Madeleine
automatic_read_only :authenticate, :read_page, :setup?, :webs, :storage_path, :file_yard
@@storage_path = './storage/'
class << self
def storage_path=(storage_path)
@@storage_path = storage_path
end
def storage_path
@@storage_path
end
def clean_storage
MadeleineServer.clean_storage(self)
end
def instance
@madeleine ||= MadeleineServer.new(self)
@system = @madeleine.system
return @system
end
def snapshot
@madeleine.snapshot
end
end
def initialize
init_wiki_service
end
end
class MadeleineServer
attr_reader :storage_path
# Clears all the command_log and snapshot files located in the storage directory, so the
# database is essentially dropped and recreated as blank
def self.clean_storage(service)
begin
Dir.foreach(service.storage_path) do |file|
if file =~ /(command_log|snapshot)$/
File.delete(File.join(service.storage_path, file))
end
end
rescue
Dir.mkdir(service.storage_path)
end
end
def initialize(service)
@storage_path = service.storage_path
@server = Madeleine::Automatic::AutomaticSnapshotMadeleine.new(service.storage_path,
Madeleine::ZMarshal.new) {
service.new
}
start_snapshot_thread
end
def command_log_present?
not Dir[storage_path + '/*.command_log'].empty?
end
def snapshot
@server.take_snapshot
end
def start_snapshot_thread
Thread.new(@server) {
hours_since_last_snapshot = 0
while true
begin
hours_since_last_snapshot += 1
# Take a snapshot if there is a command log, or 24 hours
# have passed since the last snapshot
if command_log_present? or hours_since_last_snapshot >= 24
ActionController::Base.logger.info "[#{Time.now.strftime('%Y-%m-%d %H:%M:%S')}] " +
'Taking a Madeleine snapshot'
snapshot
hours_since_last_snapshot = 0
end
sleep(1.hour)
rescue => e
ActionController::Base.logger.error(e)
# wait for a minute (not to spoof the log with the same error)
# and go back into the loop, to keep trying
sleep(1.minute)
ActionController::Base.logger.info("Retrying to save a snapshot")
end
end
}
end
def system
@server.system
end
end

View file

@ -1,23 +0,0 @@
# Contains all the methods for finding and replacing wiki words
module WikiWords
# In order of appearance: Latin, greek, cyrillian, armenian
I18N_HIGHER_CASE_LETTERS =
"À<EFBFBD>?ÂÃÄÅĀĄĂÆÇĆČĈĊĎ<C48A>?ÈÉÊËĒĘĚĔĖĜĞĠĢĤĦÌ<C4A6><>?ĪĨĬĮİIJĴĶ<C4B4>?ĽĹĻĿÑŃŇŅŊÒÓÔÕÖØŌ<C398>?ŎŒŔŘŖŚŠŞŜȘŤŢŦȚÙÚÛÜŪŮŰŬŨŲŴ<C5B2>?ŶŸŹŽŻ" +
"ΑΒΓΔΕΖΗΘΙΚΛΜ<EFBFBD>?ΞΟΠΡΣΤΥΦΧΨΩ" +
"ΆΈΉΊΌΎ<EFBFBD>?ѠѢѤѦѨѪѬѮѰѲѴѶѸѺѼѾҀҊҌҎ<D28C>?ҒҔҖҘҚҜҞҠҢҤҦҨҪҬҮҰҲҴҶҸҺҼҾ<D2BC>?ӃӅӇӉӋ<D389>?<3F>?ӒӔӖӘӚӜӞӠӢӤӦӨӪӬӮӰӲӴӸЖ" +
"ԱԲԳԴԵԶԷԸԹԺԻԼԽԾԿՀ<EFBFBD>?ՂՃՄՅՆՇՈՉՊՋՌ<D58B>?<3F>?<3F>?ՑՒՓՔՕՖ"
I18N_LOWER_CASE_LETTERS =
"àáâãäå<EFBFBD>?ąăæçć<C3A7>?ĉċ<C489>?đèéêëēęěĕėƒ<C497>?ğġģĥħìíîïīĩĭįıijĵķĸłľĺļŀñńňņʼnŋòóôõöø<C3B6><>?œŕřŗśšş<C5A1>?șťţŧțùúûüūůűŭũųŵýÿŷžżźÞþßſ<C39F>" +
"άέήίΰαβγδεζηθικλμνξοπ<EFBFBD>στυφχψωϊϋό<CF8B><>?" +
"абвгдежзийклмнопр<EFBFBD>уфхцчшщъыь<D18B><>?<3F>?ёђѓєѕіїјљћќ<D19B>?ўџѡѣѥѧѩѫѭѯѱѳѵѷѹѻѽѿ<D1BD><>?<3F>?ґғҕҗҙқ<D299>?ҟҡңҥҧҩҫҭүұҳҵҷҹһҽҿӀӂӄӆӈӊӌӎӑӓӕӗәӛ<D399>?ӟӡӣӥӧөӫӭӯӱӳӵӹ" +
"աբգդեզէըթժիլխծկհձղճմյնշոչպջռսվտր<EFBFBD>?ւփքօֆև"
WIKI_WORD_PATTERN = '[A-Z' + I18N_HIGHER_CASE_LETTERS + '][a-z' + I18N_LOWER_CASE_LETTERS + ']+[A-Z' + I18N_HIGHER_CASE_LETTERS + ']\w+'
CAMEL_CASED_WORD_BORDER = /([a-z#{I18N_LOWER_CASE_LETTERS}])([A-Z#{I18N_HIGHER_CASE_LETTERS}])/u
def self.separate(wiki_word)
wiki_word.gsub(CAMEL_CASED_WORD_BORDER, '\1 \2')
end
end