couchrest_model/lib/couchrest/core/database.rb

233 lines
7.4 KiB
Ruby
Raw Normal View History

require 'cgi'
2008-06-07 17:32:51 +02:00
require "base64"
2008-09-12 06:14:34 +02:00
module CouchRest
2008-03-18 19:37:10 +01:00
class Database
2008-09-08 00:28:20 +02:00
attr_reader :server, :host, :name, :root
attr_accessor :bulk_save_cache_limit
2008-09-08 00:28:20 +02:00
2008-10-14 10:07:48 +02:00
# Create a CouchRest::Database adapter for the supplied CouchRest::Server
# and database name.
#
2008-09-30 08:39:57 +02:00
# ==== Parameters
# server<CouchRest::Server>:: database host
# name<String>:: database name
#
def initialize server, name
2008-03-18 19:37:10 +01:00
@name = name
@server = server
@host = server.uri
2008-03-19 16:57:20 +01:00
@root = "#{host}/#{name}"
2008-10-14 01:46:48 +02:00
@streamer = Streamer.new(self)
@bulk_save_cache = []
@bulk_save_cache_limit = 50
2008-03-18 19:37:10 +01:00
end
2008-09-30 08:39:57 +02:00
# returns the database's uri
2008-08-03 21:51:17 +02:00
def to_s
@root
end
2008-09-30 08:26:34 +02:00
# GET the database info from CouchDB
def info
CouchRest.get @root
end
2008-09-30 08:26:34 +02:00
# Query the <tt>_all_docs</tt> view. Accepts all the same arguments as view.
def documents params = {}
keys = params.delete(:keys)
2008-05-25 02:01:28 +02:00
url = CouchRest.paramify_url "#{@root}/_all_docs", params
if keys
CouchRest.post(url, {:keys => keys})
else
CouchRest.get url
end
2008-03-19 16:57:20 +01:00
end
2008-10-14 10:07:48 +02:00
# POST a temporary view function to CouchDB for querying. This is not
# recommended, as you don't get any performance benefit from CouchDB's
# materialized views. Can be quite slow on large databases.
def slow_view funcs, params = {}
keys = params.delete(:keys)
funcs = funcs.merge({:keys => keys}) if keys
url = CouchRest.paramify_url "#{@root}/_slow_view", params
JSON.parse(RestClient.post(url, funcs.to_json, {"Content-Type" => 'application/json'}))
2008-03-20 02:10:16 +01:00
end
# backwards compatibility is a plus
alias :temp_view :slow_view
2008-03-20 02:10:16 +01:00
2008-10-14 10:07:48 +02:00
# Query a CouchDB view as defined by a <tt>_design</tt> document. Accepts
# paramaters as described in http://wiki.apache.org/couchdb/HttpViewApi
2008-10-14 01:46:48 +02:00
def view name, params = {}, &block
keys = params.delete(:keys)
url = CouchRest.paramify_url "#{@root}/_view/#{name}", params
if keys
CouchRest.post(url, {:keys => keys})
else
2008-10-14 01:46:48 +02:00
if block_given?
@streamer.view(name, params, &block)
else
CouchRest.get url
end
end
2008-03-19 16:57:20 +01:00
end
2008-07-05 01:56:09 +02:00
2008-09-30 08:26:34 +02:00
# GET a document from CouchDB, by id. Returns a Ruby Hash.
2008-03-19 18:17:25 +01:00
def get id
2009-01-13 04:50:00 +01:00
slug = /^_design\/(.*)/ =~ id ? "_design/#{CGI.escape($1)}" : CGI.escape(id)
2008-11-09 01:28:58 +01:00
hash = CouchRest.get("#{@root}/#{slug}")
doc = if /^_design/ =~ hash["_id"]
Design.new(hash)
else
Document.new(hash)
end
doc.database = self
doc
2008-03-19 18:17:25 +01:00
end
2008-03-19 16:57:20 +01:00
2008-09-30 08:26:34 +02:00
# GET an attachment directly from CouchDB
2008-06-07 17:32:51 +02:00
def fetch_attachment doc, name
doc = CGI.escape(doc)
name = CGI.escape(name)
RestClient.get "#{@root}/#{doc}/#{name}"
end
2008-10-01 02:22:54 +02:00
# PUT an attachment directly to CouchDB
def put_attachment doc, name, file, options = {}
docid = CGI.escape(doc['_id'])
name = CGI.escape(name)
uri = if doc['_rev']
"#{@root}/#{docid}/#{name}?rev=#{doc['_rev']}"
else
"#{@root}/#{docid}/#{name}"
end
JSON.parse(RestClient.put(uri, file, options))
end
2008-10-14 10:07:48 +02:00
# Save a document to CouchDB. This will use the <tt>_id</tt> field from
# the document as the id for PUT, or request a new UUID from CouchDB, if
# no <tt>_id</tt> is present on the document. IDs are attached to
# documents on the client side because POST has the curious property of
# being automatically retried by proxies in the event of network
# segmentation and lost responses.
#
# If <tt>bulk</tt> is true (false by default) the document is cached for bulk-saving later.
# Bulk saving happens automatically when #bulk_save_cache limit is exceded, or on the next non bulk save.
def save (doc, bulk = false)
2008-06-07 17:32:51 +02:00
if doc['_attachments']
doc['_attachments'] = encode_attachments(doc['_attachments'])
end
if bulk
@bulk_save_cache << doc
return bulk_save if @bulk_save_cache.length >= @bulk_save_cache_limit
return {"ok" => true} # Compatibility with Document#save
elsif !bulk && @bulk_save_cache.length > 0
bulk_save
end
2008-11-09 01:28:58 +01:00
result = if doc['_id']
slug = CGI.escape(doc['_id'])
CouchRest.put "#{@root}/#{slug}", doc
2008-03-19 16:57:20 +01:00
else
begin
slug = doc['_id'] = @server.next_uuid
CouchRest.put "#{@root}/#{slug}", doc
rescue #old version of couchdb
CouchRest.post @root, doc
end
2008-03-19 16:57:20 +01:00
end
2008-11-09 01:28:58 +01:00
if result['ok']
doc['_id'] = result['id']
doc['_rev'] = result['rev']
doc.database = self if doc.respond_to?(:database=)
end
result
2008-03-19 16:57:20 +01:00
end
2008-10-14 10:07:48 +02:00
# POST an array of documents to CouchDB. If any of the documents are
# missing ids, supply one from the uuid cache.
#
# If called with no arguments, bulk saves the cache of documents to be bulk saved.
def bulk_save (docs = nil)
if docs.nil?
docs = @bulk_save_cache
@bulk_save_cache = []
end
ids, noids = docs.partition{|d|d['_id']}
uuid_count = [noids.length, @server.uuid_batch_count].max
noids.each do |doc|
nextid = @server.next_uuid(uuid_count) rescue nil
doc['_id'] = nextid if nextid
end
CouchRest.post "#{@root}/_bulk_docs", {:docs => docs}
2008-03-20 00:38:07 +01:00
end
2008-10-14 10:07:48 +02:00
# DELETE the document from CouchDB that has the given <tt>_id</tt> and
# <tt>_rev</tt>.
def delete doc
2008-11-22 01:21:20 +01:00
raise ArgumentError, "_id and _rev required for deleting" unless doc['_id'] && doc['_rev']
slug = CGI.escape(doc['_id'])
CouchRest.delete "#{@root}/#{slug}?rev=#{doc['_rev']}"
end
# COPY an existing document to a new id. If the destination id currently exists, a rev must be provided.
# <tt>dest</tt> can take one of two forms if overwriting: "id_to_overwrite?rev=revision" or the actual doc
# hash with a '_rev' key
def copy doc, dest
raise ArgumentError, "_id is required for copying" unless doc['_id']
slug = CGI.escape(doc['_id'])
destination = if dest.respond_to?(:has_key?) && dest['_id'] && dest['_rev']
"#{dest['_id']}?rev=#{dest['_rev']}"
else
dest
end
CouchRest.copy "#{@root}/#{slug}", destination
end
# MOVE an existing document to a new id. If the destination id currently exists, a rev must be provided.
# <tt>dest</tt> can take one of two forms if overwriting: "id_to_overwrite?rev=revision" or the actual doc
# hash with a '_rev' key
def move doc, dest
raise ArgumentError, "_id and _rev are required for moving" unless doc['_id'] && doc['_rev']
slug = CGI.escape(doc['_id'])
destination = if dest.respond_to?(:has_key?) && dest['_id'] && dest['_rev']
"#{dest['_id']}?rev=#{dest['_rev']}"
else
dest
end
CouchRest.move "#{@root}/#{slug}?rev=#{doc['_rev']}", destination
end
2008-12-15 00:29:15 +01:00
# Compact the database, removing old document revisions and optimizing space use.
def compact!
CouchRest.post "#{@root}/_compact"
end
2008-10-14 10:07:48 +02:00
# DELETE the database itself. This is not undoable and could be rather
# catastrophic. Use with care!
2008-03-18 19:37:10 +01:00
def delete!
2008-03-19 16:57:20 +01:00
CouchRest.delete @root
2008-03-18 19:37:10 +01:00
end
2008-06-07 17:32:51 +02:00
private
2009-01-13 04:50:00 +01:00
def escape_docid id
end
2008-06-07 17:32:51 +02:00
def encode_attachments attachments
attachments.each do |k,v|
2008-06-12 17:40:52 +02:00
next if v['stub']
2008-06-07 18:05:29 +02:00
v['data'] = base64(v['data'])
2008-06-07 17:32:51 +02:00
end
2008-06-07 18:05:29 +02:00
attachments
2008-06-07 17:32:51 +02:00
end
2008-06-07 17:32:51 +02:00
def base64 data
Base64.encode64(data).gsub(/\s/,'')
end
2008-03-18 19:37:10 +01:00
end
2008-06-20 23:26:26 +02:00
end