Merge pull request #31 from nvt/storage-comments

Improved the comments in Antelope and Coffee.
This commit is contained in:
Adam Dunkels 2012-11-19 04:42:24 -08:00
commit 01a28e47d5
6 changed files with 89 additions and 28 deletions

View file

@ -130,7 +130,7 @@ db_print_tuple(db_handle_t *handle)
output("Unable to get the value for row %lu, column %u: %s\n",
(unsigned long)handle->current_row, column,
db_get_result_message(result));
break;
return result;
}
switch(value.domain) {

View file

@ -555,7 +555,7 @@ PARSER(remove_from)
/* Use a temporary persistent relation to assign the query result to. */
AQL_SET_FLAG(adt, AQL_FLAG_ASSIGN);
AQL_ADD_RELATION(adt, TEMP_RELATION);
AQL_ADD_RELATION(adt, REMOVE_RELATION);
CONSUME(IDENTIFIER);
AQL_ADD_RELATION(adt, VALUE);

View file

@ -39,59 +39,82 @@
#include "contiki-conf.h"
/* Features. Include only what is needed in order to save space. */
/*----------------------------------------------------------------------------*/
/* Optional Antelope features. Include only what is needed
in order to save space. */
/* Support join operations on relations. */
#ifndef DB_FEATURE_JOIN
#define DB_FEATURE_JOIN 1
#endif /* DB_FEATURE_JOIN */
/* Support tuple removals. */
#ifndef DB_FEATURE_REMOVE
#define DB_FEATURE_REMOVE 1
#endif /* DB_FEATURE_REMOVE */
/* Support floating-point values in attributes. */
#ifndef DB_FEATURE_FLOATS
#define DB_FEATURE_FLOATS 0
#endif /* DB_FEATURE_FLOATS */
/* Optimize storage access for the Coffee file system. */
#ifndef DB_FEATURE_COFFEE
#define DB_FEATURE_COFFEE 1
#endif /* DB_FEATURE_COFFEE */
/* Enable basic data integrity checks. */
#ifndef DB_FEATURE_INTEGRITY
#define DB_FEATURE_INTEGRITY 0
#endif /* DB_FEATURE_INTEGRITY */
/*----------------------------------------------------------------------------*/
/* Configuration parameters that may be trimmed to save space. */
/* The size of the error message buffer used by the parser. */
#ifndef DB_ERROR_BUF_SIZE
#define DB_ERROR_BUF_SIZE 50
#endif /* DB_ERROR_BUF_SIZE */
/* The maximum number of indexes in use by all relations loaded in memory. */
#ifndef DB_INDEX_POOL_SIZE
#define DB_INDEX_POOL_SIZE 3
#endif /* DB_INDEX_POOL_SIZE */
/* The maximum number of relations loaded in memory. */
#ifndef DB_RELATION_POOL_SIZE
#define DB_RELATION_POOL_SIZE 5
#endif /* DB_RELATION_POOL_SIZE */
/* The maximum number of attributes loaded in memory. */
#ifndef DB_ATTRIBUTE_POOL_SIZE
#define DB_ATTRIBUTE_POOL_SIZE 16
#endif /* DB_ATTRIBUTE_POOL_SIZE */
/* The maximum number of attributes in a relation. */
#ifndef DB_MAX_ATTRIBUTES_PER_RELATION
#define DB_MAX_ATTRIBUTES_PER_RELATION 6
#endif /* DB_MAX_ATTRIBUTES_PER_RELATION */
/* The maximum physical storage size on an attribute value. */
#ifndef DB_MAX_ELEMENT_SIZE
#define DB_MAX_ELEMENT_SIZE 16
#endif /* DB_MAX_ELEMENT_SIZE */
/* The maximum size of the LVM bytecode compiled from a
single database query. */
#ifndef DB_VM_BYTECODE_SIZE
#define DB_VM_BYTECODE_SIZE 128
#endif /* DB_VM_BYTECODE_SIZE */
/*----------------------------------------------------------------------------*/
/* Language options. */
/* The maximum length of a database query in AQL text format. */
#ifndef AQL_MAX_QUERY_LENGTH
#define AQL_MAX_QUERY_LENGTH 128
#endif /* AQL_MAX_QUERY_LENGTH */
@ -100,78 +123,109 @@
#define AQL_MAX_VALUE_LENGTH DB_MAX_ELEMENT_SIZE
#endif /* AQL_MAX_VALUE_LENGTH */
/* The maximum number of relations used in a single query. */
#ifndef AQL_RELATION_LIMIT
#define AQL_RELATION_LIMIT 3
#endif /* AQL_RELATION_LIMIT */
/* The maximum number of attributes used in a single query. */
#ifndef AQL_ATTRIBUTE_LIMIT
#define AQL_ATTRIBUTE_LIMIT 5
#endif /* AQL_ATTRIBUTE_LIMIT */
/*----------------------------------------------------------------------------*/
/* Physical storage options. Changing these may cause compatibility problems. */
/*
* Physical storage options. Changing these options might cause
* compatibility problems if the database files are moved between
* different installations of Antelope.
*/
/* The default relation file size to reserve when using Coffee. */
#ifndef DB_COFFEE_RESERVE_SIZE
#define DB_COFFEE_RESERVE_SIZE (128 * 1024UL)
#endif /* DB_COFFEE_RESERVE_SIZE */
/* The maximum size of the physical storage of a tuple (labelled a "row"
in Antelope's terminology. */
#ifndef DB_MAX_CHAR_SIZE_PER_ROW
#define DB_MAX_CHAR_SIZE_PER_ROW 64
#endif /* DB_MAX_CHAR_SIZE_PER_ROW */
/* The maximum file name length to use for creating various database file. */
#ifndef DB_MAX_FILENAME_LENGTH
#define DB_MAX_FILENAME_LENGTH 16
#endif /* DB_MAX_FILENAME_LENGTH */
/* The maximum length of an attribute name. */
#ifndef ATTRIBUTE_NAME_LENGTH
#define ATTRIBUTE_NAME_LENGTH 12
#endif /* ATTRIBUTE_NAME_LENGTH */
/* The maximum length on a relation name. */
#ifndef RELATION_NAME_LENGTH
#define RELATION_NAME_LENGTH 10
#endif /* RELATION_NAME_LENGTH */
/* The name of the intermediate "result" relation file, which is used
for presenting the result of a query to a user. */
#ifndef RESULT_RELATION
#define RESULT_RELATION "db-result"
#endif /* RESULT_RELATION */
#ifndef TEMP_RELATION
#define TEMP_RELATION "db-temp"
#endif /* TEMP_RELATION */
/* The name of the relation used for processing a REMOVE query. */
#ifndef REMOVE_RELATION
#define REMOVE_RELATION "db-remove"
#endif /* REMOVE_RELATION */
/*----------------------------------------------------------------------------*/
/* Index options. */
#ifndef DB_INDEX_COST
#define DB_INDEX_COST 64
#endif /* DB_INDEX_COST */
/* The maximum number of hash table indexes. */
#ifndef DB_MEMHASH_INDEX_LIMIT
#define DB_MEMHASH_INDEX_LIMIT 1
#endif /* DB_MEMHASH_INDEX_LIMIT */
/* The default hash table index size. */
#ifndef DB_MEMHASH_TABLE_SIZE
#define DB_MEMHASH_TABLE_SIZE 61
#endif /* DB_MEMHASH_TABLE_SIZE */
/* The maximum number of Maxheap indexes. */
#ifndef DB_HEAP_INDEX_LIMIT
#define DB_HEAP_INDEX_LIMIT 1
#endif /* DB_HEAP_INDEX_LIMIT */
/* The maximum number of buckets cached in the MaxHeap index. */
#ifndef DB_HEAP_CACHE_LIMIT
#define DB_HEAP_CACHE_LIMIT 1
#endif /* DB_HEAP_CACHE_LIMIT */
/*----------------------------------------------------------------------------*/
/* Propositional Logic Engine options. */
#ifndef PLE_MAX_NAME_LENGTH
#define PLE_MAX_NAME_LENGTH ATTRIBUTE_NAME_LENGTH
#endif /* PLE_MAX_NAME_LENGTH */
/* LVM options. */
#ifndef PLE_MAX_VARIABLE_ID
#define PLE_MAX_VARIABLE_ID AQL_ATTRIBUTE_LIMIT - 1
#endif /* PLE_MAX_VARIABLE_ID */
/* The maximum length of a variable in LVM. This value should preferably
be identical to the maximum attribute name length. */
#ifndef LVM_MAX_NAME_LENGTH
#define LVM_MAX_NAME_LENGTH ATTRIBUTE_NAME_LENGTH
#endif /* LVM_MAX_NAME_LENGTH */
#ifndef PLE_USE_FLOATS
#define PLE_USE_FLOATS DB_FEATURE_FLOATS
#endif /* PLE_USE_FLOATS */
/* The maximum variable identifier number in the LVM. The default
value corresponds to the highest attribute ID. */
#ifndef LVM_MAX_VARIABLE_ID
#define LVM_MAX_VARIABLE_ID AQL_ATTRIBUTE_LIMIT - 1
#endif /* LVM_MAX_VARIABLE_ID */
/* Specify whether floats should be used or not inside the LVM. */
#ifndef LVM_USE_FLOATS
#define LVM_USE_FLOATS DB_FEATURE_FLOATS
#endif /* LVM_USE_FLOATS */
#endif /* !DB_OPTIONS_H */

View file

@ -29,9 +29,9 @@
/**
* \file
* An binary maximum heap for data indexing over flash memory.
* MaxHeap - A binary maximum heap index for flash memory.
*
* The idea behind this method is to write entries sequentially
* The idea behind the MaxHeap index is to write entries sequentially
* into small buckets, which are indexed in a binary maximum heap.
* Although sequential writes make the entries unsorted within a
* bucket, the time to load and scan a single bucket is small. The
@ -114,6 +114,7 @@ struct bucket_cache {
bucket_t bucket;
};
/* Keep a cache of buckets read from storage. */
static struct bucket_cache bucket_cache[DB_HEAP_CACHE_LIMIT];
MEMB(heaps, heap_t, DB_HEAP_INDEX_LIMIT);
@ -504,7 +505,7 @@ create(index_t *index)
bucket_filename[0] = '\0';
/* Generate the heap file, which is the main index file that is
inserted into the metadata of the relation. */
referenced from the metadata of the relation. */
filename = storage_generate_file("heap",
(unsigned long)NODE_LIMIT * sizeof(heap_node_t));
if(filename == NULL) {
@ -693,7 +694,8 @@ get_next(index_iterator_t *iterator)
cache.found_items = cache.start = 0;
cache.index_iterator = iterator;
/* Find a path of heap nodes which can contain the key. */
/* Find the downward path through the heap consisting of all nodes
that could possibly contain the key. */
for(i = tmp_heap_iterator = 0; i < NODE_DEPTH; i++) {
cache.visited_buckets[i] = heap_find(heap, key, &tmp_heap_iterator);
if(cache.visited_buckets[i] < 0) {
@ -706,7 +708,8 @@ get_next(index_iterator_t *iterator)
/*
* Search for the key in each heap node, starting from the bottom
* of the heap. There is a much higher chance that the key will be
* of the heap. Because the bottom nodes contain are very narrow
* range of keys, there is a much higher chance that the key will be
* there rather than at the top.
*/
for(; cache.heap_iterator >= 0; cache.heap_iterator--) {
@ -719,8 +722,8 @@ get_next(index_iterator_t *iterator)
return INVALID_TUPLE;
}
/* Compare the key against the bucket_ids in the bucket sequentially because
they are placed in arbitrary order. */
/* Because keys are stored in an unsorted order in the bucket, we
* need to search the bucket sequentially. */
next_free_slot = heap->next_free_slot[bucket_id];
for(i = cache.start; i < next_free_slot; i++) {
if(bcache->bucket.pairs[i].key == key) {

View file

@ -140,6 +140,8 @@ index_create(index_type_t index_type, relation_t *rel, attribute_t *attr)
index->flags = INDEX_LOAD_NEEDED;
process_post(&db_indexer, load_request_event, NULL);
} else {
/* Inline indexes (i.e., those using the existing storage of the relation)
do not need to be reloaded after restarting the system. */
PRINTF("DB: Index created for attribute %s\n", attr->name);
index->flags |= INDEX_READY;
}
@ -266,7 +268,7 @@ index_get_iterator(index_iterator_t *iterator, index_t *index,
* value in the search range. If the search range is sparse, this
* iteration will incur a considerable overhead per found key.
*
* Hence, The emulation is preferable when an external module wants
* Hence, the emulation is preferable when an external module wants
* to iterate over a narrow range of keys, for which the total
* search cost is smaller than that of an iteration over all tuples
* in the relation.
@ -373,6 +375,8 @@ PROCESS_THREAD(db_indexer, ev, data)
PRINTF("DB: Loading the index for %s.%s...\n",
index->rel->name, index->attr->name);
/* Project the values of the indexed attribute from all tuples in
the relation, and insert them into the index again. */
if(DB_ERROR(db_query(&handle, "SELECT %s FROM %s;", index->attr->name, index->rel->name))) {
index->flags |= INDEX_LOAD_ERROR;
index->flags &= ~INDEX_LOAD_NEEDED;

View file

@ -257,7 +257,7 @@ get_sector_status(uint16_t sector, struct sector_status *stats)
/*
* get_sector_status() is an iterative function using local static
* state. It therefore requires the the caller loops starts from
* state. It therefore requires that the caller starts iterating from
* sector 0 in order to reset the internal state.
*/
if(sector == 0) {
@ -332,7 +332,7 @@ get_sector_status(uint16_t sector, struct sector_status *stats)
stats->free = free;
/*
* To avoid unnecessary page isolation, we notify the callee that
* To avoid unnecessary page isolation, we notify the caller that
* "skip_pages" pages should be isolated only if the current file extent
* ends in the next sector. If the file extent ends in a more distant
* sector, however, the garbage collection can free the next sector
@ -815,7 +815,7 @@ merge_log(coffee_page_t file_page, int extend)
/*
* The reservation function adds extra space for the header, which has
* already been calculated with in the previous reservation.
* already been accounted for in the previous reservation.
*/
max_pages = hdr.max_pages << extend;
new_file = reserve(hdr.name, max_pages, 1, 0);