diff options
author | mpcomplete@google.com <mpcomplete@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-01-06 22:39:41 +0000 |
---|---|---|
committer | mpcomplete@google.com <mpcomplete@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-01-06 22:39:41 +0000 |
commit | 586381f8db3497c24c11f96234f1879b34e74bc7 (patch) | |
tree | 99f7d18350289b135ef6dd5c161baba8bce668a3 /third_party/sqlite/ext | |
parent | 6e3b12ff2cbbe8c481f986c8f0dd230bb50add2a (diff) | |
download | chromium_src-586381f8db3497c24c11f96234f1879b34e74bc7.zip chromium_src-586381f8db3497c24c11f96234f1879b34e74bc7.tar.gz chromium_src-586381f8db3497c24c11f96234f1879b34e74bc7.tar.bz2 |
Upgrade our sqlite to 3.6.1, with the local changes made by Gears. I'm
checking in the full sqlite tree to make upstream merges easier. This means
we'll have generated sources split out from the originals.
One important change this makes is that "BEGIN" now defaults to "BEGIN
IMMEDIATE" rather than "BEGIN DEFERRED". This doesn't affect us because we
don't use unqualified BEGIN statements.
The full CL is too big for Rietveld. I'm splitting it into 2. This one is
reviewable. The other CL is just a fresh drop of:
//depot/googleclient/gears/opensource/third_party/sqlite_google
Review URL: http://codereview.chromium.org/15067
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@7623 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'third_party/sqlite/ext')
52 files changed, 31126 insertions, 0 deletions
diff --git a/third_party/sqlite/ext/README.txt b/third_party/sqlite/ext/README.txt new file mode 100755 index 0000000..009495f --- /dev/null +++ b/third_party/sqlite/ext/README.txt @@ -0,0 +1,2 @@ +Version loadable extensions to SQLite are found in subfolders +of this folder. diff --git a/third_party/sqlite/ext/fts1/README.txt b/third_party/sqlite/ext/fts1/README.txt new file mode 100755 index 0000000..292b7da --- /dev/null +++ b/third_party/sqlite/ext/fts1/README.txt @@ -0,0 +1,2 @@ +This folder contains source code to the first full-text search +extension for SQLite. diff --git a/third_party/sqlite/ext/fts1/ft_hash.c b/third_party/sqlite/ext/fts1/ft_hash.c new file mode 100755 index 0000000..8b3a706 --- /dev/null +++ b/third_party/sqlite/ext/fts1/ft_hash.c @@ -0,0 +1,404 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the implementation of generic hash-tables used in SQLite. +** We've modified it slightly to serve as a standalone hash table +** implementation for the full-text indexing module. +*/ +#include <assert.h> +#include <stdlib.h> +#include <string.h> + +#include "ft_hash.h" + +void *malloc_and_zero(int n){ + void *p = malloc(n); + if( p ){ + memset(p, 0, n); + } + return p; +} + +/* Turn bulk memory into a hash table object by initializing the +** fields of the Hash structure. +** +** "pNew" is a pointer to the hash table that is to be initialized. +** keyClass is one of the constants HASH_INT, HASH_POINTER, +** HASH_BINARY, or HASH_STRING. The value of keyClass +** determines what kind of key the hash table will use. "copyKey" is +** true if the hash table should make its own private copy of keys and +** false if it should just use the supplied pointer. CopyKey only makes +** sense for HASH_STRING and HASH_BINARY and is ignored +** for other key classes. +*/ +void HashInit(Hash *pNew, int keyClass, int copyKey){ + assert( pNew!=0 ); + assert( keyClass>=HASH_STRING && keyClass<=HASH_BINARY ); + pNew->keyClass = keyClass; +#if 0 + if( keyClass==HASH_POINTER || keyClass==HASH_INT ) copyKey = 0; +#endif + pNew->copyKey = copyKey; + pNew->first = 0; + pNew->count = 0; + pNew->htsize = 0; + pNew->ht = 0; + pNew->xMalloc = malloc_and_zero; + pNew->xFree = free; +} + +/* Remove all entries from a hash table. Reclaim all memory. +** Call this routine to delete a hash table or to reset a hash table +** to the empty state. +*/ +void HashClear(Hash *pH){ + HashElem *elem; /* For looping over all elements of the table */ + + assert( pH!=0 ); + elem = pH->first; + pH->first = 0; + if( pH->ht ) pH->xFree(pH->ht); + pH->ht = 0; + pH->htsize = 0; + while( elem ){ + HashElem *next_elem = elem->next; + if( pH->copyKey && elem->pKey ){ + pH->xFree(elem->pKey); + } + pH->xFree(elem); + elem = next_elem; + } + pH->count = 0; +} + +#if 0 /* NOT USED */ +/* +** Hash and comparison functions when the mode is HASH_INT +*/ +static int intHash(const void *pKey, int nKey){ + return nKey ^ (nKey<<8) ^ (nKey>>8); +} +static int intCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + return n2 - n1; +} +#endif + +#if 0 /* NOT USED */ +/* +** Hash and comparison functions when the mode is HASH_POINTER +*/ +static int ptrHash(const void *pKey, int nKey){ + uptr x = Addr(pKey); + return x ^ (x<<8) ^ (x>>8); +} +static int ptrCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( pKey1==pKey2 ) return 0; + if( pKey1<pKey2 ) return -1; + return 1; +} +#endif + +/* +** Hash and comparison functions when the mode is HASH_STRING +*/ +static int strHash(const void *pKey, int nKey){ + const char *z = (const char *)pKey; + int h = 0; + if( nKey<=0 ) nKey = (int) strlen(z); + while( nKey > 0 ){ + h = (h<<3) ^ h ^ *z++; + nKey--; + } + return h & 0x7fffffff; +} +static int strCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return strncmp((const char*)pKey1,(const char*)pKey2,n1); +} + +/* +** Hash and comparison functions when the mode is HASH_BINARY +*/ +static int binHash(const void *pKey, int nKey){ + int h = 0; + const char *z = (const char *)pKey; + while( nKey-- > 0 ){ + h = (h<<3) ^ h ^ *(z++); + } + return h & 0x7fffffff; +} +static int binCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return memcmp(pKey1,pKey2,n1); +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** The C syntax in this function definition may be unfamilar to some +** programmers, so we provide the following additional explanation: +** +** The name of the function is "hashFunction". The function takes a +** single parameter "keyClass". The return value of hashFunction() +** is a pointer to another function. Specifically, the return value +** of hashFunction() is a pointer to a function that takes two parameters +** with types "const void*" and "int" and returns an "int". +*/ +static int (*hashFunction(int keyClass))(const void*,int){ +#if 0 /* HASH_INT and HASH_POINTER are never used */ + switch( keyClass ){ + case HASH_INT: return &intHash; + case HASH_POINTER: return &ptrHash; + case HASH_STRING: return &strHash; + case HASH_BINARY: return &binHash;; + default: break; + } + return 0; +#else + if( keyClass==HASH_STRING ){ + return &strHash; + }else{ + assert( keyClass==HASH_BINARY ); + return &binHash; + } +#endif +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** For help in interpreted the obscure C code in the function definition, +** see the header comment on the previous function. +*/ +static int (*compareFunction(int keyClass))(const void*,int,const void*,int){ +#if 0 /* HASH_INT and HASH_POINTER are never used */ + switch( keyClass ){ + case HASH_INT: return &intCompare; + case HASH_POINTER: return &ptrCompare; + case HASH_STRING: return &strCompare; + case HASH_BINARY: return &binCompare; + default: break; + } + return 0; +#else + if( keyClass==HASH_STRING ){ + return &strCompare; + }else{ + assert( keyClass==HASH_BINARY ); + return &binCompare; + } +#endif +} + +/* Link an element into the hash table +*/ +static void insertElement( + Hash *pH, /* The complete hash table */ + struct _ht *pEntry, /* The entry into which pNew is inserted */ + HashElem *pNew /* The element to be inserted */ +){ + HashElem *pHead; /* First element already in pEntry */ + pHead = pEntry->chain; + if( pHead ){ + pNew->next = pHead; + pNew->prev = pHead->prev; + if( pHead->prev ){ pHead->prev->next = pNew; } + else { pH->first = pNew; } + pHead->prev = pNew; + }else{ + pNew->next = pH->first; + if( pH->first ){ pH->first->prev = pNew; } + pNew->prev = 0; + pH->first = pNew; + } + pEntry->count++; + pEntry->chain = pNew; +} + + +/* Resize the hash table so that it cantains "new_size" buckets. +** "new_size" must be a power of 2. The hash table might fail +** to resize if sqliteMalloc() fails. +*/ +static void rehash(Hash *pH, int new_size){ + struct _ht *new_ht; /* The new hash table */ + HashElem *elem, *next_elem; /* For looping over existing elements */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( (new_size & (new_size-1))==0 ); + new_ht = (struct _ht *)pH->xMalloc( new_size*sizeof(struct _ht) ); + if( new_ht==0 ) return; + if( pH->ht ) pH->xFree(pH->ht); + pH->ht = new_ht; + pH->htsize = new_size; + xHash = hashFunction(pH->keyClass); + for(elem=pH->first, pH->first=0; elem; elem = next_elem){ + int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1); + next_elem = elem->next; + insertElement(pH, &new_ht[h], elem); + } +} + +/* This function (for internal use only) locates an element in an +** hash table that matches the given key. The hash for this key has +** already been computed and is passed as the 4th parameter. +*/ +static HashElem *findElementGivenHash( + const Hash *pH, /* The pH to be searched */ + const void *pKey, /* The key we are searching for */ + int nKey, + int h /* The hash for this key. */ +){ + HashElem *elem; /* Used to loop thru the element list */ + int count; /* Number of elements left to test */ + int (*xCompare)(const void*,int,const void*,int); /* comparison function */ + + if( pH->ht ){ + struct _ht *pEntry = &pH->ht[h]; + elem = pEntry->chain; + count = pEntry->count; + xCompare = compareFunction(pH->keyClass); + while( count-- && elem ){ + if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){ + return elem; + } + elem = elem->next; + } + } + return 0; +} + +/* Remove a single entry from the hash table given a pointer to that +** element and a hash on the element's key. +*/ +static void removeElementGivenHash( + Hash *pH, /* The pH containing "elem" */ + HashElem* elem, /* The element to be removed from the pH */ + int h /* Hash value for the element */ +){ + struct _ht *pEntry; + if( elem->prev ){ + elem->prev->next = elem->next; + }else{ + pH->first = elem->next; + } + if( elem->next ){ + elem->next->prev = elem->prev; + } + pEntry = &pH->ht[h]; + if( pEntry->chain==elem ){ + pEntry->chain = elem->next; + } + pEntry->count--; + if( pEntry->count<=0 ){ + pEntry->chain = 0; + } + if( pH->copyKey && elem->pKey ){ + pH->xFree(elem->pKey); + } + pH->xFree( elem ); + pH->count--; + if( pH->count<=0 ){ + assert( pH->first==0 ); + assert( pH->count==0 ); + HashClear(pH); + } +} + +/* Attempt to locate an element of the hash table pH with a key +** that matches pKey,nKey. Return the data for this element if it is +** found, or NULL if there is no match. +*/ +void *HashFind(const Hash *pH, const void *pKey, int nKey){ + int h; /* A hash on key */ + HashElem *elem; /* The element that matches key */ + int (*xHash)(const void*,int); /* The hash function */ + + if( pH==0 || pH->ht==0 ) return 0; + xHash = hashFunction(pH->keyClass); + assert( xHash!=0 ); + h = (*xHash)(pKey,nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + elem = findElementGivenHash(pH,pKey,nKey, h & (pH->htsize-1)); + return elem ? elem->data : 0; +} + +/* Insert an element into the hash table pH. The key is pKey,nKey +** and the data is "data". +** +** If no element exists with a matching key, then a new +** element is created. A copy of the key is made if the copyKey +** flag is set. NULL is returned. +** +** If another element already exists with the same key, then the +** new data replaces the old data and the old data is returned. +** The key is not copied in this instance. If a malloc fails, then +** the new data is returned and the hash table is unchanged. +** +** If the "data" parameter to this function is NULL, then the +** element corresponding to "key" is removed from the hash table. +*/ +void *HashInsert(Hash *pH, const void *pKey, int nKey, void *data){ + int hraw; /* Raw hash value of the key */ + int h; /* the hash of the key modulo hash table size */ + HashElem *elem; /* Used to loop thru the element list */ + HashElem *new_elem; /* New element added to the pH */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( pH!=0 ); + xHash = hashFunction(pH->keyClass); + assert( xHash!=0 ); + hraw = (*xHash)(pKey, nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + elem = findElementGivenHash(pH,pKey,nKey,h); + if( elem ){ + void *old_data = elem->data; + if( data==0 ){ + removeElementGivenHash(pH,elem,h); + }else{ + elem->data = data; + } + return old_data; + } + if( data==0 ) return 0; + new_elem = (HashElem*)pH->xMalloc( sizeof(HashElem) ); + if( new_elem==0 ) return data; + if( pH->copyKey && pKey!=0 ){ + new_elem->pKey = pH->xMalloc( nKey ); + if( new_elem->pKey==0 ){ + pH->xFree(new_elem); + return data; + } + memcpy((void*)new_elem->pKey, pKey, nKey); + }else{ + new_elem->pKey = (void*)pKey; + } + new_elem->nKey = nKey; + pH->count++; + if( pH->htsize==0 ){ + rehash(pH,8); + if( pH->htsize==0 ){ + pH->count = 0; + pH->xFree(new_elem); + return data; + } + } + if( pH->count > pH->htsize ){ + rehash(pH,pH->htsize*2); + } + assert( pH->htsize>0 ); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + insertElement(pH, &pH->ht[h], new_elem); + new_elem->data = data; + return 0; +} diff --git a/third_party/sqlite/ext/fts1/ft_hash.h b/third_party/sqlite/ext/fts1/ft_hash.h new file mode 100755 index 0000000..93b6dcf --- /dev/null +++ b/third_party/sqlite/ext/fts1/ft_hash.h @@ -0,0 +1,111 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the header file for the generic hash-table implemenation +** used in SQLite. We've modified it slightly to serve as a standalone +** hash table implementation for the full-text indexing module. +** +*/ +#ifndef _HASH_H_ +#define _HASH_H_ + +/* Forward declarations of structures. */ +typedef struct Hash Hash; +typedef struct HashElem HashElem; + +/* A complete hash table is an instance of the following structure. +** The internals of this structure are intended to be opaque -- client +** code should not attempt to access or modify the fields of this structure +** directly. Change this structure only by using the routines below. +** However, many of the "procedures" and "functions" for modifying and +** accessing this structure are really macros, so we can't really make +** this structure opaque. +*/ +struct Hash { + char keyClass; /* HASH_INT, _POINTER, _STRING, _BINARY */ + char copyKey; /* True if copy of key made on insert */ + int count; /* Number of entries in this table */ + HashElem *first; /* The first element of the array */ + void *(*xMalloc)(int); /* malloc() function to use */ + void (*xFree)(void *); /* free() function to use */ + int htsize; /* Number of buckets in the hash table */ + struct _ht { /* the hash table */ + int count; /* Number of entries with this hash */ + HashElem *chain; /* Pointer to first entry with this hash */ + } *ht; +}; + +/* Each element in the hash table is an instance of the following +** structure. All elements are stored on a single doubly-linked list. +** +** Again, this structure is intended to be opaque, but it can't really +** be opaque because it is used by macros. +*/ +struct HashElem { + HashElem *next, *prev; /* Next and previous elements in the table */ + void *data; /* Data associated with this element */ + void *pKey; int nKey; /* Key associated with this element */ +}; + +/* +** There are 4 different modes of operation for a hash table: +** +** HASH_INT nKey is used as the key and pKey is ignored. +** +** HASH_POINTER pKey is used as the key and nKey is ignored. +** +** HASH_STRING pKey points to a string that is nKey bytes long +** (including the null-terminator, if any). Case +** is respected in comparisons. +** +** HASH_BINARY pKey points to binary data nKey bytes long. +** memcmp() is used to compare keys. +** +** A copy of the key is made for HASH_STRING and HASH_BINARY +** if the copyKey parameter to HashInit is 1. +*/ +/* #define HASH_INT 1 // NOT USED */ +/* #define HASH_POINTER 2 // NOT USED */ +#define HASH_STRING 3 +#define HASH_BINARY 4 + +/* +** Access routines. To delete, insert a NULL pointer. +*/ +void HashInit(Hash*, int keytype, int copyKey); +void *HashInsert(Hash*, const void *pKey, int nKey, void *pData); +void *HashFind(const Hash*, const void *pKey, int nKey); +void HashClear(Hash*); + +/* +** Macros for looping over all elements of a hash table. The idiom is +** like this: +** +** Hash h; +** HashElem *p; +** ... +** for(p=HashFirst(&h); p; p=HashNext(p)){ +** SomeStructure *pData = HashData(p); +** // do something with pData +** } +*/ +#define HashFirst(H) ((H)->first) +#define HashNext(E) ((E)->next) +#define HashData(E) ((E)->data) +#define HashKey(E) ((E)->pKey) +#define HashKeysize(E) ((E)->nKey) + +/* +** Number of entries in a hash table +*/ +#define HashCount(H) ((H)->count) + +#endif /* _HASH_H_ */ diff --git a/third_party/sqlite/ext/fts1/fts1.c b/third_party/sqlite/ext/fts1/fts1.c new file mode 100755 index 0000000..f067c55 --- /dev/null +++ b/third_party/sqlite/ext/fts1/fts1.c @@ -0,0 +1,3341 @@ +/* fts1 has a design flaw which can lead to database corruption (see +** below). It is recommended not to use it any longer, instead use +** fts3 (or higher). If you believe that your use of fts1 is safe, +** add -DSQLITE_ENABLE_BROKEN_FTS1=1 to your CFLAGS. +*/ +#if (!defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1)) \ + && !defined(SQLITE_ENABLE_BROKEN_FTS1) +#error fts1 has a design flaw and has been deprecated. +#endif +/* The flaw is that fts1 uses the content table's unaliased rowid as +** the unique docid. fts1 embeds the rowid in the index it builds, +** and expects the rowid to not change. The SQLite VACUUM operation +** will renumber such rowids, thereby breaking fts1. If you are using +** fts1 in a system which has disabled VACUUM, then you can continue +** to use it safely. Note that PRAGMA auto_vacuum does NOT disable +** VACUUM, though systems using auto_vacuum are unlikely to invoke +** VACUUM. +** +** fts1 should be safe even across VACUUM if you only insert documents +** and never delete. +*/ + +/* The author disclaims copyright to this source code. + * + * This is an SQLite module implementing full-text search. + */ + +/* +** The code in this file is only compiled if: +** +** * The FTS1 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS1 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS1 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) + +#if defined(SQLITE_ENABLE_FTS1) && !defined(SQLITE_CORE) +# define SQLITE_CORE 1 +#endif + +#include <assert.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <ctype.h> + +#include "fts1.h" +#include "fts1_hash.h" +#include "fts1_tokenizer.h" +#include "sqlite3.h" +#include "sqlite3ext.h" +SQLITE_EXTENSION_INIT1 + + +#if 0 +# define TRACE(A) printf A; fflush(stdout) +#else +# define TRACE(A) +#endif + +/* utility functions */ + +typedef struct StringBuffer { + int len; /* length, not including null terminator */ + int alloced; /* Space allocated for s[] */ + char *s; /* Content of the string */ +} StringBuffer; + +static void initStringBuffer(StringBuffer *sb){ + sb->len = 0; + sb->alloced = 100; + sb->s = malloc(100); + sb->s[0] = '\0'; +} + +static void nappend(StringBuffer *sb, const char *zFrom, int nFrom){ + if( sb->len + nFrom >= sb->alloced ){ + sb->alloced = sb->len + nFrom + 100; + sb->s = realloc(sb->s, sb->alloced+1); + if( sb->s==0 ){ + initStringBuffer(sb); + return; + } + } + memcpy(sb->s + sb->len, zFrom, nFrom); + sb->len += nFrom; + sb->s[sb->len] = 0; +} +static void append(StringBuffer *sb, const char *zFrom){ + nappend(sb, zFrom, strlen(zFrom)); +} + +/* We encode variable-length integers in little-endian order using seven bits + * per byte as follows: +** +** KEY: +** A = 0xxxxxxx 7 bits of data and one flag bit +** B = 1xxxxxxx 7 bits of data and one flag bit +** +** 7 bits - A +** 14 bits - BA +** 21 bits - BBA +** and so on. +*/ + +/* We may need up to VARINT_MAX bytes to store an encoded 64-bit integer. */ +#define VARINT_MAX 10 + +/* Write a 64-bit variable-length integer to memory starting at p[0]. + * The length of data written will be between 1 and VARINT_MAX bytes. + * The number of bytes written is returned. */ +static int putVarint(char *p, sqlite_int64 v){ + unsigned char *q = (unsigned char *) p; + sqlite_uint64 vu = v; + do{ + *q++ = (unsigned char) ((vu & 0x7f) | 0x80); + vu >>= 7; + }while( vu!=0 ); + q[-1] &= 0x7f; /* turn off high bit in final byte */ + assert( q - (unsigned char *)p <= VARINT_MAX ); + return (int) (q - (unsigned char *)p); +} + +/* Read a 64-bit variable-length integer from memory starting at p[0]. + * Return the number of bytes read, or 0 on error. + * The value is stored in *v. */ +static int getVarint(const char *p, sqlite_int64 *v){ + const unsigned char *q = (const unsigned char *) p; + sqlite_uint64 x = 0, y = 1; + while( (*q & 0x80) == 0x80 ){ + x += y * (*q++ & 0x7f); + y <<= 7; + if( q - (unsigned char *)p >= VARINT_MAX ){ /* bad data */ + assert( 0 ); + return 0; + } + } + x += y * (*q++); + *v = (sqlite_int64) x; + return (int) (q - (unsigned char *)p); +} + +static int getVarint32(const char *p, int *pi){ + sqlite_int64 i; + int ret = getVarint(p, &i); + *pi = (int) i; + assert( *pi==i ); + return ret; +} + +/*** Document lists *** + * + * A document list holds a sorted list of varint-encoded document IDs. + * + * A doclist with type DL_POSITIONS_OFFSETS is stored like this: + * + * array { + * varint docid; + * array { + * varint position; (delta from previous position plus POS_BASE) + * varint startOffset; (delta from previous startOffset) + * varint endOffset; (delta from startOffset) + * } + * } + * + * Here, array { X } means zero or more occurrences of X, adjacent in memory. + * + * A position list may hold positions for text in multiple columns. A position + * POS_COLUMN is followed by a varint containing the index of the column for + * following positions in the list. Any positions appearing before any + * occurrences of POS_COLUMN are for column 0. + * + * A doclist with type DL_POSITIONS is like the above, but holds only docids + * and positions without offset information. + * + * A doclist with type DL_DOCIDS is like the above, but holds only docids + * without positions or offset information. + * + * On disk, every document list has positions and offsets, so we don't bother + * to serialize a doclist's type. + * + * We don't yet delta-encode document IDs; doing so will probably be a + * modest win. + * + * NOTE(shess) I've thought of a slightly (1%) better offset encoding. + * After the first offset, estimate the next offset by using the + * current token position and the previous token position and offset, + * offset to handle some variance. So the estimate would be + * (iPosition*w->iStartOffset/w->iPosition-64), which is delta-encoded + * as normal. Offsets more than 64 chars from the estimate are + * encoded as the delta to the previous start offset + 128. An + * additional tiny increment can be gained by using the end offset of + * the previous token to make the estimate a tiny bit more precise. +*/ + +/* It is not safe to call isspace(), tolower(), or isalnum() on +** hi-bit-set characters. This is the same solution used in the +** tokenizer. +*/ +/* TODO(shess) The snippet-generation code should be using the +** tokenizer-generated tokens rather than doing its own local +** tokenization. +*/ +/* TODO(shess) Is __isascii() a portable version of (c&0x80)==0? */ +static int safe_isspace(char c){ + return (c&0x80)==0 ? isspace(c) : 0; +} +static int safe_tolower(char c){ + return (c&0x80)==0 ? tolower(c) : c; +} +static int safe_isalnum(char c){ + return (c&0x80)==0 ? isalnum(c) : 0; +} + +typedef enum DocListType { + DL_DOCIDS, /* docids only */ + DL_POSITIONS, /* docids + positions */ + DL_POSITIONS_OFFSETS /* docids + positions + offsets */ +} DocListType; + +/* +** By default, only positions and not offsets are stored in the doclists. +** To change this so that offsets are stored too, compile with +** +** -DDL_DEFAULT=DL_POSITIONS_OFFSETS +** +*/ +#ifndef DL_DEFAULT +# define DL_DEFAULT DL_POSITIONS +#endif + +typedef struct DocList { + char *pData; + int nData; + DocListType iType; + int iLastColumn; /* the last column written */ + int iLastPos; /* the last position written */ + int iLastOffset; /* the last start offset written */ +} DocList; + +enum { + POS_END = 0, /* end of this position list */ + POS_COLUMN, /* followed by new column number */ + POS_BASE +}; + +/* Initialize a new DocList to hold the given data. */ +static void docListInit(DocList *d, DocListType iType, + const char *pData, int nData){ + d->nData = nData; + if( nData>0 ){ + d->pData = malloc(nData); + memcpy(d->pData, pData, nData); + } else { + d->pData = NULL; + } + d->iType = iType; + d->iLastColumn = 0; + d->iLastPos = d->iLastOffset = 0; +} + +/* Create a new dynamically-allocated DocList. */ +static DocList *docListNew(DocListType iType){ + DocList *d = (DocList *) malloc(sizeof(DocList)); + docListInit(d, iType, 0, 0); + return d; +} + +static void docListDestroy(DocList *d){ + free(d->pData); +#ifndef NDEBUG + memset(d, 0x55, sizeof(*d)); +#endif +} + +static void docListDelete(DocList *d){ + docListDestroy(d); + free(d); +} + +static char *docListEnd(DocList *d){ + return d->pData + d->nData; +} + +/* Append a varint to a DocList's data. */ +static void appendVarint(DocList *d, sqlite_int64 i){ + char c[VARINT_MAX]; + int n = putVarint(c, i); + d->pData = realloc(d->pData, d->nData + n); + memcpy(d->pData + d->nData, c, n); + d->nData += n; +} + +static void docListAddDocid(DocList *d, sqlite_int64 iDocid){ + appendVarint(d, iDocid); + if( d->iType>=DL_POSITIONS ){ + appendVarint(d, POS_END); /* initially empty position list */ + d->iLastColumn = 0; + d->iLastPos = d->iLastOffset = 0; + } +} + +/* helper function for docListAddPos and docListAddPosOffset */ +static void addPos(DocList *d, int iColumn, int iPos){ + assert( d->nData>0 ); + --d->nData; /* remove previous terminator */ + if( iColumn!=d->iLastColumn ){ + assert( iColumn>d->iLastColumn ); + appendVarint(d, POS_COLUMN); + appendVarint(d, iColumn); + d->iLastColumn = iColumn; + d->iLastPos = d->iLastOffset = 0; + } + assert( iPos>=d->iLastPos ); + appendVarint(d, iPos-d->iLastPos+POS_BASE); + d->iLastPos = iPos; +} + +/* Add a position to the last position list in a doclist. */ +static void docListAddPos(DocList *d, int iColumn, int iPos){ + assert( d->iType==DL_POSITIONS ); + addPos(d, iColumn, iPos); + appendVarint(d, POS_END); /* add new terminator */ +} + +/* +** Add a position and starting and ending offsets to a doclist. +** +** If the doclist is setup to handle only positions, then insert +** the position only and ignore the offsets. +*/ +static void docListAddPosOffset( + DocList *d, /* Doclist under construction */ + int iColumn, /* Column the inserted term is part of */ + int iPos, /* Position of the inserted term */ + int iStartOffset, /* Starting offset of inserted term */ + int iEndOffset /* Ending offset of inserted term */ +){ + assert( d->iType>=DL_POSITIONS ); + addPos(d, iColumn, iPos); + if( d->iType==DL_POSITIONS_OFFSETS ){ + assert( iStartOffset>=d->iLastOffset ); + appendVarint(d, iStartOffset-d->iLastOffset); + d->iLastOffset = iStartOffset; + assert( iEndOffset>=iStartOffset ); + appendVarint(d, iEndOffset-iStartOffset); + } + appendVarint(d, POS_END); /* add new terminator */ +} + +/* +** A DocListReader object is a cursor into a doclist. Initialize +** the cursor to the beginning of the doclist by calling readerInit(). +** Then use routines +** +** peekDocid() +** readDocid() +** readPosition() +** skipPositionList() +** and so forth... +** +** to read information out of the doclist. When we reach the end +** of the doclist, atEnd() returns TRUE. +*/ +typedef struct DocListReader { + DocList *pDoclist; /* The document list we are stepping through */ + char *p; /* Pointer to next unread byte in the doclist */ + int iLastColumn; + int iLastPos; /* the last position read, or -1 when not in a position list */ +} DocListReader; + +/* +** Initialize the DocListReader r to point to the beginning of pDoclist. +*/ +static void readerInit(DocListReader *r, DocList *pDoclist){ + r->pDoclist = pDoclist; + if( pDoclist!=NULL ){ + r->p = pDoclist->pData; + } + r->iLastColumn = -1; + r->iLastPos = -1; +} + +/* +** Return TRUE if we have reached then end of pReader and there is +** nothing else left to read. +*/ +static int atEnd(DocListReader *pReader){ + return pReader->pDoclist==0 || (pReader->p >= docListEnd(pReader->pDoclist)); +} + +/* Peek at the next docid without advancing the read pointer. +*/ +static sqlite_int64 peekDocid(DocListReader *pReader){ + sqlite_int64 ret; + assert( !atEnd(pReader) ); + assert( pReader->iLastPos==-1 ); + getVarint(pReader->p, &ret); + return ret; +} + +/* Read the next docid. See also nextDocid(). +*/ +static sqlite_int64 readDocid(DocListReader *pReader){ + sqlite_int64 ret; + assert( !atEnd(pReader) ); + assert( pReader->iLastPos==-1 ); + pReader->p += getVarint(pReader->p, &ret); + if( pReader->pDoclist->iType>=DL_POSITIONS ){ + pReader->iLastColumn = 0; + pReader->iLastPos = 0; + } + return ret; +} + +/* Read the next position and column index from a position list. + * Returns the position, or -1 at the end of the list. */ +static int readPosition(DocListReader *pReader, int *iColumn){ + int i; + int iType = pReader->pDoclist->iType; + + if( pReader->iLastPos==-1 ){ + return -1; + } + assert( !atEnd(pReader) ); + + if( iType<DL_POSITIONS ){ + return -1; + } + pReader->p += getVarint32(pReader->p, &i); + if( i==POS_END ){ + pReader->iLastColumn = pReader->iLastPos = -1; + *iColumn = -1; + return -1; + } + if( i==POS_COLUMN ){ + pReader->p += getVarint32(pReader->p, &pReader->iLastColumn); + pReader->iLastPos = 0; + pReader->p += getVarint32(pReader->p, &i); + assert( i>=POS_BASE ); + } + pReader->iLastPos += ((int) i)-POS_BASE; + if( iType>=DL_POSITIONS_OFFSETS ){ + /* Skip over offsets, ignoring them for now. */ + int iStart, iEnd; + pReader->p += getVarint32(pReader->p, &iStart); + pReader->p += getVarint32(pReader->p, &iEnd); + } + *iColumn = pReader->iLastColumn; + return pReader->iLastPos; +} + +/* Skip past the end of a position list. */ +static void skipPositionList(DocListReader *pReader){ + DocList *p = pReader->pDoclist; + if( p && p->iType>=DL_POSITIONS ){ + int iColumn; + while( readPosition(pReader, &iColumn)!=-1 ){} + } +} + +/* Skip over a docid, including its position list if the doclist has + * positions. */ +static void skipDocument(DocListReader *pReader){ + readDocid(pReader); + skipPositionList(pReader); +} + +/* Skip past all docids which are less than [iDocid]. Returns 1 if a docid + * matching [iDocid] was found. */ +static int skipToDocid(DocListReader *pReader, sqlite_int64 iDocid){ + sqlite_int64 d = 0; + while( !atEnd(pReader) && (d=peekDocid(pReader))<iDocid ){ + skipDocument(pReader); + } + return !atEnd(pReader) && d==iDocid; +} + +/* Return the first document in a document list. +*/ +static sqlite_int64 firstDocid(DocList *d){ + DocListReader r; + readerInit(&r, d); + return readDocid(&r); +} + +#ifdef SQLITE_DEBUG +/* +** This routine is used for debugging purpose only. +** +** Write the content of a doclist to standard output. +*/ +static void printDoclist(DocList *p){ + DocListReader r; + const char *zSep = ""; + + readerInit(&r, p); + while( !atEnd(&r) ){ + sqlite_int64 docid = readDocid(&r); + if( docid==0 ){ + skipPositionList(&r); + continue; + } + printf("%s%lld", zSep, docid); + zSep = ","; + if( p->iType>=DL_POSITIONS ){ + int iPos, iCol; + const char *zDiv = ""; + printf("("); + while( (iPos = readPosition(&r, &iCol))>=0 ){ + printf("%s%d:%d", zDiv, iCol, iPos); + zDiv = ":"; + } + printf(")"); + } + } + printf("\n"); + fflush(stdout); +} +#endif /* SQLITE_DEBUG */ + +/* Trim the given doclist to contain only positions in column + * [iRestrictColumn]. */ +static void docListRestrictColumn(DocList *in, int iRestrictColumn){ + DocListReader r; + DocList out; + + assert( in->iType>=DL_POSITIONS ); + readerInit(&r, in); + docListInit(&out, DL_POSITIONS, NULL, 0); + + while( !atEnd(&r) ){ + sqlite_int64 iDocid = readDocid(&r); + int iPos, iColumn; + + docListAddDocid(&out, iDocid); + while( (iPos = readPosition(&r, &iColumn)) != -1 ){ + if( iColumn==iRestrictColumn ){ + docListAddPos(&out, iColumn, iPos); + } + } + } + + docListDestroy(in); + *in = out; +} + +/* Trim the given doclist by discarding any docids without any remaining + * positions. */ +static void docListDiscardEmpty(DocList *in) { + DocListReader r; + DocList out; + + /* TODO: It would be nice to implement this operation in place; that + * could save a significant amount of memory in queries with long doclists. */ + assert( in->iType>=DL_POSITIONS ); + readerInit(&r, in); + docListInit(&out, DL_POSITIONS, NULL, 0); + + while( !atEnd(&r) ){ + sqlite_int64 iDocid = readDocid(&r); + int match = 0; + int iPos, iColumn; + while( (iPos = readPosition(&r, &iColumn)) != -1 ){ + if( !match ){ + docListAddDocid(&out, iDocid); + match = 1; + } + docListAddPos(&out, iColumn, iPos); + } + } + + docListDestroy(in); + *in = out; +} + +/* Helper function for docListUpdate() and docListAccumulate(). +** Splices a doclist element into the doclist represented by r, +** leaving r pointing after the newly spliced element. +*/ +static void docListSpliceElement(DocListReader *r, sqlite_int64 iDocid, + const char *pSource, int nSource){ + DocList *d = r->pDoclist; + char *pTarget; + int nTarget, found; + + found = skipToDocid(r, iDocid); + + /* Describe slice in d to place pSource/nSource. */ + pTarget = r->p; + if( found ){ + skipDocument(r); + nTarget = r->p-pTarget; + }else{ + nTarget = 0; + } + + /* The sense of the following is that there are three possibilities. + ** If nTarget==nSource, we should not move any memory nor realloc. + ** If nTarget>nSource, trim target and realloc. + ** If nTarget<nSource, realloc then expand target. + */ + if( nTarget>nSource ){ + memmove(pTarget+nSource, pTarget+nTarget, docListEnd(d)-(pTarget+nTarget)); + } + if( nTarget!=nSource ){ + int iDoclist = pTarget-d->pData; + d->pData = realloc(d->pData, d->nData+nSource-nTarget); + pTarget = d->pData+iDoclist; + } + if( nTarget<nSource ){ + memmove(pTarget+nSource, pTarget+nTarget, docListEnd(d)-(pTarget+nTarget)); + } + + memcpy(pTarget, pSource, nSource); + d->nData += nSource-nTarget; + r->p = pTarget+nSource; +} + +/* Insert/update pUpdate into the doclist. */ +static void docListUpdate(DocList *d, DocList *pUpdate){ + DocListReader reader; + + assert( d!=NULL && pUpdate!=NULL ); + assert( d->iType==pUpdate->iType); + + readerInit(&reader, d); + docListSpliceElement(&reader, firstDocid(pUpdate), + pUpdate->pData, pUpdate->nData); +} + +/* Propagate elements from pUpdate to pAcc, overwriting elements with +** matching docids. +*/ +static void docListAccumulate(DocList *pAcc, DocList *pUpdate){ + DocListReader accReader, updateReader; + + /* Handle edge cases where one doclist is empty. */ + assert( pAcc!=NULL ); + if( pUpdate==NULL || pUpdate->nData==0 ) return; + if( pAcc->nData==0 ){ + pAcc->pData = malloc(pUpdate->nData); + memcpy(pAcc->pData, pUpdate->pData, pUpdate->nData); + pAcc->nData = pUpdate->nData; + return; + } + + readerInit(&accReader, pAcc); + readerInit(&updateReader, pUpdate); + + while( !atEnd(&updateReader) ){ + char *pSource = updateReader.p; + sqlite_int64 iDocid = readDocid(&updateReader); + skipPositionList(&updateReader); + docListSpliceElement(&accReader, iDocid, pSource, updateReader.p-pSource); + } +} + +/* +** Read the next docid off of pIn. Return 0 if we reach the end. +* +* TODO: This assumes that docids are never 0, but they may actually be 0 since +* users can choose docids when inserting into a full-text table. Fix this. +*/ +static sqlite_int64 nextDocid(DocListReader *pIn){ + skipPositionList(pIn); + return atEnd(pIn) ? 0 : readDocid(pIn); +} + +/* +** pLeft and pRight are two DocListReaders that are pointing to +** positions lists of the same document: iDocid. +** +** If there are no instances in pLeft or pRight where the position +** of pLeft is one less than the position of pRight, then this +** routine adds nothing to pOut. +** +** If there are one or more instances where positions from pLeft +** are exactly one less than positions from pRight, then add a new +** document record to pOut. If pOut wants to hold positions, then +** include the positions from pRight that are one more than a +** position in pLeft. In other words: pRight.iPos==pLeft.iPos+1. +** +** pLeft and pRight are left pointing at the next document record. +*/ +static void mergePosList( + DocListReader *pLeft, /* Left position list */ + DocListReader *pRight, /* Right position list */ + sqlite_int64 iDocid, /* The docid from pLeft and pRight */ + DocList *pOut /* Write the merged document record here */ +){ + int iLeftCol, iLeftPos = readPosition(pLeft, &iLeftCol); + int iRightCol, iRightPos = readPosition(pRight, &iRightCol); + int match = 0; + + /* Loop until we've reached the end of both position lists. */ + while( iLeftPos!=-1 && iRightPos!=-1 ){ + if( iLeftCol==iRightCol && iLeftPos+1==iRightPos ){ + if( !match ){ + docListAddDocid(pOut, iDocid); + match = 1; + } + if( pOut->iType>=DL_POSITIONS ){ + docListAddPos(pOut, iRightCol, iRightPos); + } + iLeftPos = readPosition(pLeft, &iLeftCol); + iRightPos = readPosition(pRight, &iRightCol); + }else if( iRightCol<iLeftCol || + (iRightCol==iLeftCol && iRightPos<iLeftPos+1) ){ + iRightPos = readPosition(pRight, &iRightCol); + }else{ + iLeftPos = readPosition(pLeft, &iLeftCol); + } + } + if( iLeftPos>=0 ) skipPositionList(pLeft); + if( iRightPos>=0 ) skipPositionList(pRight); +} + +/* We have two doclists: pLeft and pRight. +** Write the phrase intersection of these two doclists into pOut. +** +** A phrase intersection means that two documents only match +** if pLeft.iPos+1==pRight.iPos. +** +** The output pOut may or may not contain positions. If pOut +** does contain positions, they are the positions of pRight. +*/ +static void docListPhraseMerge( + DocList *pLeft, /* Doclist resulting from the words on the left */ + DocList *pRight, /* Doclist for the next word to the right */ + DocList *pOut /* Write the combined doclist here */ +){ + DocListReader left, right; + sqlite_int64 docidLeft, docidRight; + + readerInit(&left, pLeft); + readerInit(&right, pRight); + docidLeft = nextDocid(&left); + docidRight = nextDocid(&right); + + while( docidLeft>0 && docidRight>0 ){ + if( docidLeft<docidRight ){ + docidLeft = nextDocid(&left); + }else if( docidRight<docidLeft ){ + docidRight = nextDocid(&right); + }else{ + mergePosList(&left, &right, docidLeft, pOut); + docidLeft = nextDocid(&left); + docidRight = nextDocid(&right); + } + } +} + +/* We have two doclists: pLeft and pRight. +** Write the intersection of these two doclists into pOut. +** Only docids are matched. Position information is ignored. +** +** The output pOut never holds positions. +*/ +static void docListAndMerge( + DocList *pLeft, /* Doclist resulting from the words on the left */ + DocList *pRight, /* Doclist for the next word to the right */ + DocList *pOut /* Write the combined doclist here */ +){ + DocListReader left, right; + sqlite_int64 docidLeft, docidRight; + + assert( pOut->iType<DL_POSITIONS ); + + readerInit(&left, pLeft); + readerInit(&right, pRight); + docidLeft = nextDocid(&left); + docidRight = nextDocid(&right); + + while( docidLeft>0 && docidRight>0 ){ + if( docidLeft<docidRight ){ + docidLeft = nextDocid(&left); + }else if( docidRight<docidLeft ){ + docidRight = nextDocid(&right); + }else{ + docListAddDocid(pOut, docidLeft); + docidLeft = nextDocid(&left); + docidRight = nextDocid(&right); + } + } +} + +/* We have two doclists: pLeft and pRight. +** Write the union of these two doclists into pOut. +** Only docids are matched. Position information is ignored. +** +** The output pOut never holds positions. +*/ +static void docListOrMerge( + DocList *pLeft, /* Doclist resulting from the words on the left */ + DocList *pRight, /* Doclist for the next word to the right */ + DocList *pOut /* Write the combined doclist here */ +){ + DocListReader left, right; + sqlite_int64 docidLeft, docidRight, priorLeft; + + readerInit(&left, pLeft); + readerInit(&right, pRight); + docidLeft = nextDocid(&left); + docidRight = nextDocid(&right); + + while( docidLeft>0 && docidRight>0 ){ + if( docidLeft<=docidRight ){ + docListAddDocid(pOut, docidLeft); + }else{ + docListAddDocid(pOut, docidRight); + } + priorLeft = docidLeft; + if( docidLeft<=docidRight ){ + docidLeft = nextDocid(&left); + } + if( docidRight>0 && docidRight<=priorLeft ){ + docidRight = nextDocid(&right); + } + } + while( docidLeft>0 ){ + docListAddDocid(pOut, docidLeft); + docidLeft = nextDocid(&left); + } + while( docidRight>0 ){ + docListAddDocid(pOut, docidRight); + docidRight = nextDocid(&right); + } +} + +/* We have two doclists: pLeft and pRight. +** Write into pOut all documents that occur in pLeft but not +** in pRight. +** +** Only docids are matched. Position information is ignored. +** +** The output pOut never holds positions. +*/ +static void docListExceptMerge( + DocList *pLeft, /* Doclist resulting from the words on the left */ + DocList *pRight, /* Doclist for the next word to the right */ + DocList *pOut /* Write the combined doclist here */ +){ + DocListReader left, right; + sqlite_int64 docidLeft, docidRight, priorLeft; + + readerInit(&left, pLeft); + readerInit(&right, pRight); + docidLeft = nextDocid(&left); + docidRight = nextDocid(&right); + + while( docidLeft>0 && docidRight>0 ){ + priorLeft = docidLeft; + if( docidLeft<docidRight ){ + docListAddDocid(pOut, docidLeft); + } + if( docidLeft<=docidRight ){ + docidLeft = nextDocid(&left); + } + if( docidRight>0 && docidRight<=priorLeft ){ + docidRight = nextDocid(&right); + } + } + while( docidLeft>0 ){ + docListAddDocid(pOut, docidLeft); + docidLeft = nextDocid(&left); + } +} + +static char *string_dup_n(const char *s, int n){ + char *str = malloc(n + 1); + memcpy(str, s, n); + str[n] = '\0'; + return str; +} + +/* Duplicate a string; the caller must free() the returned string. + * (We don't use strdup() since it is not part of the standard C library and + * may not be available everywhere.) */ +static char *string_dup(const char *s){ + return string_dup_n(s, strlen(s)); +} + +/* Format a string, replacing each occurrence of the % character with + * zDb.zName. This may be more convenient than sqlite_mprintf() + * when one string is used repeatedly in a format string. + * The caller must free() the returned string. */ +static char *string_format(const char *zFormat, + const char *zDb, const char *zName){ + const char *p; + size_t len = 0; + size_t nDb = strlen(zDb); + size_t nName = strlen(zName); + size_t nFullTableName = nDb+1+nName; + char *result; + char *r; + + /* first compute length needed */ + for(p = zFormat ; *p ; ++p){ + len += (*p=='%' ? nFullTableName : 1); + } + len += 1; /* for null terminator */ + + r = result = malloc(len); + for(p = zFormat; *p; ++p){ + if( *p=='%' ){ + memcpy(r, zDb, nDb); + r += nDb; + *r++ = '.'; + memcpy(r, zName, nName); + r += nName; + } else { + *r++ = *p; + } + } + *r++ = '\0'; + assert( r == result + len ); + return result; +} + +static int sql_exec(sqlite3 *db, const char *zDb, const char *zName, + const char *zFormat){ + char *zCommand = string_format(zFormat, zDb, zName); + int rc; + TRACE(("FTS1 sql: %s\n", zCommand)); + rc = sqlite3_exec(db, zCommand, NULL, 0, NULL); + free(zCommand); + return rc; +} + +static int sql_prepare(sqlite3 *db, const char *zDb, const char *zName, + sqlite3_stmt **ppStmt, const char *zFormat){ + char *zCommand = string_format(zFormat, zDb, zName); + int rc; + TRACE(("FTS1 prepare: %s\n", zCommand)); + rc = sqlite3_prepare(db, zCommand, -1, ppStmt, NULL); + free(zCommand); + return rc; +} + +/* end utility functions */ + +/* Forward reference */ +typedef struct fulltext_vtab fulltext_vtab; + +/* A single term in a query is represented by an instances of +** the following structure. +*/ +typedef struct QueryTerm { + short int nPhrase; /* How many following terms are part of the same phrase */ + short int iPhrase; /* This is the i-th term of a phrase. */ + short int iColumn; /* Column of the index that must match this term */ + signed char isOr; /* this term is preceded by "OR" */ + signed char isNot; /* this term is preceded by "-" */ + char *pTerm; /* text of the term. '\000' terminated. malloced */ + int nTerm; /* Number of bytes in pTerm[] */ +} QueryTerm; + + +/* A query string is parsed into a Query structure. + * + * We could, in theory, allow query strings to be complicated + * nested expressions with precedence determined by parentheses. + * But none of the major search engines do this. (Perhaps the + * feeling is that an parenthesized expression is two complex of + * an idea for the average user to grasp.) Taking our lead from + * the major search engines, we will allow queries to be a list + * of terms (with an implied AND operator) or phrases in double-quotes, + * with a single optional "-" before each non-phrase term to designate + * negation and an optional OR connector. + * + * OR binds more tightly than the implied AND, which is what the + * major search engines seem to do. So, for example: + * + * [one two OR three] ==> one AND (two OR three) + * [one OR two three] ==> (one OR two) AND three + * + * A "-" before a term matches all entries that lack that term. + * The "-" must occur immediately before the term with in intervening + * space. This is how the search engines do it. + * + * A NOT term cannot be the right-hand operand of an OR. If this + * occurs in the query string, the NOT is ignored: + * + * [one OR -two] ==> one OR two + * + */ +typedef struct Query { + fulltext_vtab *pFts; /* The full text index */ + int nTerms; /* Number of terms in the query */ + QueryTerm *pTerms; /* Array of terms. Space obtained from malloc() */ + int nextIsOr; /* Set the isOr flag on the next inserted term */ + int nextColumn; /* Next word parsed must be in this column */ + int dfltColumn; /* The default column */ +} Query; + + +/* +** An instance of the following structure keeps track of generated +** matching-word offset information and snippets. +*/ +typedef struct Snippet { + int nMatch; /* Total number of matches */ + int nAlloc; /* Space allocated for aMatch[] */ + struct snippetMatch { /* One entry for each matching term */ + char snStatus; /* Status flag for use while constructing snippets */ + short int iCol; /* The column that contains the match */ + short int iTerm; /* The index in Query.pTerms[] of the matching term */ + short int nByte; /* Number of bytes in the term */ + int iStart; /* The offset to the first character of the term */ + } *aMatch; /* Points to space obtained from malloc */ + char *zOffset; /* Text rendering of aMatch[] */ + int nOffset; /* strlen(zOffset) */ + char *zSnippet; /* Snippet text */ + int nSnippet; /* strlen(zSnippet) */ +} Snippet; + + +typedef enum QueryType { + QUERY_GENERIC, /* table scan */ + QUERY_ROWID, /* lookup by rowid */ + QUERY_FULLTEXT /* QUERY_FULLTEXT + [i] is a full-text search for column i*/ +} QueryType; + +/* TODO(shess) CHUNK_MAX controls how much data we allow in segment 0 +** before we start aggregating into larger segments. Lower CHUNK_MAX +** means that for a given input we have more individual segments per +** term, which means more rows in the table and a bigger index (due to +** both more rows and bigger rowids). But it also reduces the average +** cost of adding new elements to the segment 0 doclist, and it seems +** to reduce the number of pages read and written during inserts. 256 +** was chosen by measuring insertion times for a certain input (first +** 10k documents of Enron corpus), though including query performance +** in the decision may argue for a larger value. +*/ +#define CHUNK_MAX 256 + +typedef enum fulltext_statement { + CONTENT_INSERT_STMT, + CONTENT_SELECT_STMT, + CONTENT_UPDATE_STMT, + CONTENT_DELETE_STMT, + + TERM_SELECT_STMT, + TERM_SELECT_ALL_STMT, + TERM_INSERT_STMT, + TERM_UPDATE_STMT, + TERM_DELETE_STMT, + + MAX_STMT /* Always at end! */ +} fulltext_statement; + +/* These must exactly match the enum above. */ +/* TODO(adam): Is there some risk that a statement (in particular, +** pTermSelectStmt) will be used in two cursors at once, e.g. if a +** query joins a virtual table to itself? If so perhaps we should +** move some of these to the cursor object. +*/ +static const char *const fulltext_zStatement[MAX_STMT] = { + /* CONTENT_INSERT */ NULL, /* generated in contentInsertStatement() */ + /* CONTENT_SELECT */ "select * from %_content where rowid = ?", + /* CONTENT_UPDATE */ NULL, /* generated in contentUpdateStatement() */ + /* CONTENT_DELETE */ "delete from %_content where rowid = ?", + + /* TERM_SELECT */ + "select rowid, doclist from %_term where term = ? and segment = ?", + /* TERM_SELECT_ALL */ + "select doclist from %_term where term = ? order by segment", + /* TERM_INSERT */ + "insert into %_term (rowid, term, segment, doclist) values (?, ?, ?, ?)", + /* TERM_UPDATE */ "update %_term set doclist = ? where rowid = ?", + /* TERM_DELETE */ "delete from %_term where rowid = ?", +}; + +/* +** A connection to a fulltext index is an instance of the following +** structure. The xCreate and xConnect methods create an instance +** of this structure and xDestroy and xDisconnect free that instance. +** All other methods receive a pointer to the structure as one of their +** arguments. +*/ +struct fulltext_vtab { + sqlite3_vtab base; /* Base class used by SQLite core */ + sqlite3 *db; /* The database connection */ + const char *zDb; /* logical database name */ + const char *zName; /* virtual table name */ + int nColumn; /* number of columns in virtual table */ + char **azColumn; /* column names. malloced */ + char **azContentColumn; /* column names in content table; malloced */ + sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ + + /* Precompiled statements which we keep as long as the table is + ** open. + */ + sqlite3_stmt *pFulltextStatements[MAX_STMT]; +}; + +/* +** When the core wants to do a query, it create a cursor using a +** call to xOpen. This structure is an instance of a cursor. It +** is destroyed by xClose. +*/ +typedef struct fulltext_cursor { + sqlite3_vtab_cursor base; /* Base class used by SQLite core */ + QueryType iCursorType; /* Copy of sqlite3_index_info.idxNum */ + sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ + int eof; /* True if at End Of Results */ + Query q; /* Parsed query string */ + Snippet snippet; /* Cached snippet for the current row */ + int iColumn; /* Column being searched */ + DocListReader result; /* used when iCursorType == QUERY_FULLTEXT */ +} fulltext_cursor; + +static struct fulltext_vtab *cursor_vtab(fulltext_cursor *c){ + return (fulltext_vtab *) c->base.pVtab; +} + +static const sqlite3_module fulltextModule; /* forward declaration */ + +/* Append a list of strings separated by commas to a StringBuffer. */ +static void appendList(StringBuffer *sb, int nString, char **azString){ + int i; + for(i=0; i<nString; ++i){ + if( i>0 ) append(sb, ", "); + append(sb, azString[i]); + } +} + +/* Return a dynamically generated statement of the form + * insert into %_content (rowid, ...) values (?, ...) + */ +static const char *contentInsertStatement(fulltext_vtab *v){ + StringBuffer sb; + int i; + + initStringBuffer(&sb); + append(&sb, "insert into %_content (rowid, "); + appendList(&sb, v->nColumn, v->azContentColumn); + append(&sb, ") values (?"); + for(i=0; i<v->nColumn; ++i) + append(&sb, ", ?"); + append(&sb, ")"); + return sb.s; +} + +/* Return a dynamically generated statement of the form + * update %_content set [col_0] = ?, [col_1] = ?, ... + * where rowid = ? + */ +static const char *contentUpdateStatement(fulltext_vtab *v){ + StringBuffer sb; + int i; + + initStringBuffer(&sb); + append(&sb, "update %_content set "); + for(i=0; i<v->nColumn; ++i) { + if( i>0 ){ + append(&sb, ", "); + } + append(&sb, v->azContentColumn[i]); + append(&sb, " = ?"); + } + append(&sb, " where rowid = ?"); + return sb.s; +} + +/* Puts a freshly-prepared statement determined by iStmt in *ppStmt. +** If the indicated statement has never been prepared, it is prepared +** and cached, otherwise the cached version is reset. +*/ +static int sql_get_statement(fulltext_vtab *v, fulltext_statement iStmt, + sqlite3_stmt **ppStmt){ + assert( iStmt<MAX_STMT ); + if( v->pFulltextStatements[iStmt]==NULL ){ + const char *zStmt; + int rc; + switch( iStmt ){ + case CONTENT_INSERT_STMT: + zStmt = contentInsertStatement(v); break; + case CONTENT_UPDATE_STMT: + zStmt = contentUpdateStatement(v); break; + default: + zStmt = fulltext_zStatement[iStmt]; + } + rc = sql_prepare(v->db, v->zDb, v->zName, &v->pFulltextStatements[iStmt], + zStmt); + if( zStmt != fulltext_zStatement[iStmt]) free((void *) zStmt); + if( rc!=SQLITE_OK ) return rc; + } else { + int rc = sqlite3_reset(v->pFulltextStatements[iStmt]); + if( rc!=SQLITE_OK ) return rc; + } + + *ppStmt = v->pFulltextStatements[iStmt]; + return SQLITE_OK; +} + +/* Step the indicated statement, handling errors SQLITE_BUSY (by +** retrying) and SQLITE_SCHEMA (by re-preparing and transferring +** bindings to the new statement). +** TODO(adam): We should extend this function so that it can work with +** statements declared locally, not only globally cached statements. +*/ +static int sql_step_statement(fulltext_vtab *v, fulltext_statement iStmt, + sqlite3_stmt **ppStmt){ + int rc; + sqlite3_stmt *s = *ppStmt; + assert( iStmt<MAX_STMT ); + assert( s==v->pFulltextStatements[iStmt] ); + + while( (rc=sqlite3_step(s))!=SQLITE_DONE && rc!=SQLITE_ROW ){ + if( rc==SQLITE_BUSY ) continue; + if( rc!=SQLITE_ERROR ) return rc; + + /* If an SQLITE_SCHEMA error has occured, then finalizing this + * statement is going to delete the fulltext_vtab structure. If + * the statement just executed is in the pFulltextStatements[] + * array, it will be finalized twice. So remove it before + * calling sqlite3_finalize(). + */ + v->pFulltextStatements[iStmt] = NULL; + rc = sqlite3_finalize(s); + break; + } + return rc; +} + +/* Like sql_step_statement(), but convert SQLITE_DONE to SQLITE_OK. +** Useful for statements like UPDATE, where we expect no results. +*/ +static int sql_single_step_statement(fulltext_vtab *v, + fulltext_statement iStmt, + sqlite3_stmt **ppStmt){ + int rc = sql_step_statement(v, iStmt, ppStmt); + return (rc==SQLITE_DONE) ? SQLITE_OK : rc; +} + +/* insert into %_content (rowid, ...) values ([rowid], [pValues]) */ +static int content_insert(fulltext_vtab *v, sqlite3_value *rowid, + sqlite3_value **pValues){ + sqlite3_stmt *s; + int i; + int rc = sql_get_statement(v, CONTENT_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_value(s, 1, rowid); + if( rc!=SQLITE_OK ) return rc; + + for(i=0; i<v->nColumn; ++i){ + rc = sqlite3_bind_value(s, 2+i, pValues[i]); + if( rc!=SQLITE_OK ) return rc; + } + + return sql_single_step_statement(v, CONTENT_INSERT_STMT, &s); +} + +/* update %_content set col0 = pValues[0], col1 = pValues[1], ... + * where rowid = [iRowid] */ +static int content_update(fulltext_vtab *v, sqlite3_value **pValues, + sqlite_int64 iRowid){ + sqlite3_stmt *s; + int i; + int rc = sql_get_statement(v, CONTENT_UPDATE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + for(i=0; i<v->nColumn; ++i){ + rc = sqlite3_bind_value(s, 1+i, pValues[i]); + if( rc!=SQLITE_OK ) return rc; + } + + rc = sqlite3_bind_int64(s, 1+v->nColumn, iRowid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, CONTENT_UPDATE_STMT, &s); +} + +static void freeStringArray(int nString, const char **pString){ + int i; + + for (i=0 ; i < nString ; ++i) { + if( pString[i]!=NULL ) free((void *) pString[i]); + } + free((void *) pString); +} + +/* select * from %_content where rowid = [iRow] + * The caller must delete the returned array and all strings in it. + * null fields will be NULL in the returned array. + * + * TODO: Perhaps we should return pointer/length strings here for consistency + * with other code which uses pointer/length. */ +static int content_select(fulltext_vtab *v, sqlite_int64 iRow, + const char ***pValues){ + sqlite3_stmt *s; + const char **values; + int i; + int rc; + + *pValues = NULL; + + rc = sql_get_statement(v, CONTENT_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_step_statement(v, CONTENT_SELECT_STMT, &s); + if( rc!=SQLITE_ROW ) return rc; + + values = (const char **) malloc(v->nColumn * sizeof(const char *)); + for(i=0; i<v->nColumn; ++i){ + if( sqlite3_column_type(s, i)==SQLITE_NULL ){ + values[i] = NULL; + }else{ + values[i] = string_dup((char*)sqlite3_column_text(s, i)); + } + } + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ){ + *pValues = values; + return SQLITE_OK; + } + + freeStringArray(v->nColumn, values); + return rc; +} + +/* delete from %_content where rowid = [iRow ] */ +static int content_delete(fulltext_vtab *v, sqlite_int64 iRow){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, CONTENT_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iRow); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, CONTENT_DELETE_STMT, &s); +} + +/* select rowid, doclist from %_term + * where term = [pTerm] and segment = [iSegment] + * If found, returns SQLITE_ROW; the caller must free the + * returned doclist. If no rows found, returns SQLITE_DONE. */ +static int term_select(fulltext_vtab *v, const char *pTerm, int nTerm, + int iSegment, + sqlite_int64 *rowid, DocList *out){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_text(s, 1, pTerm, nTerm, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 2, iSegment); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_step_statement(v, TERM_SELECT_STMT, &s); + if( rc!=SQLITE_ROW ) return rc; + + *rowid = sqlite3_column_int64(s, 0); + docListInit(out, DL_DEFAULT, + sqlite3_column_blob(s, 1), sqlite3_column_bytes(s, 1)); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + return rc==SQLITE_DONE ? SQLITE_ROW : rc; +} + +/* Load the segment doclists for term pTerm and merge them in +** appropriate order into out. Returns SQLITE_OK if successful. If +** there are no segments for pTerm, successfully returns an empty +** doclist in out. +** +** Each document consists of 1 or more "columns". The number of +** columns is v->nColumn. If iColumn==v->nColumn, then return +** position information about all columns. If iColumn<v->nColumn, +** then only return position information about the iColumn-th column +** (where the first column is 0). +*/ +static int term_select_all( + fulltext_vtab *v, /* The fulltext index we are querying against */ + int iColumn, /* If <nColumn, only look at the iColumn-th column */ + const char *pTerm, /* The term whose posting lists we want */ + int nTerm, /* Number of bytes in pTerm */ + DocList *out /* Write the resulting doclist here */ +){ + DocList doclist; + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_SELECT_ALL_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_text(s, 1, pTerm, nTerm, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + docListInit(&doclist, DL_DEFAULT, 0, 0); + + /* TODO(shess) Handle schema and busy errors. */ + while( (rc=sql_step_statement(v, TERM_SELECT_ALL_STMT, &s))==SQLITE_ROW ){ + DocList old; + + /* TODO(shess) If we processed doclists from oldest to newest, we + ** could skip the malloc() involved with the following call. For + ** now, I'd rather keep this logic similar to index_insert_term(). + ** We could additionally drop elements when we see deletes, but + ** that would require a distinct version of docListAccumulate(). + */ + docListInit(&old, DL_DEFAULT, + sqlite3_column_blob(s, 0), sqlite3_column_bytes(s, 0)); + + if( iColumn<v->nColumn ){ /* querying a single column */ + docListRestrictColumn(&old, iColumn); + } + + /* doclist contains the newer data, so write it over old. Then + ** steal accumulated result for doclist. + */ + docListAccumulate(&old, &doclist); + docListDestroy(&doclist); + doclist = old; + } + if( rc!=SQLITE_DONE ){ + docListDestroy(&doclist); + return rc; + } + + docListDiscardEmpty(&doclist); + *out = doclist; + return SQLITE_OK; +} + +/* insert into %_term (rowid, term, segment, doclist) + values ([piRowid], [pTerm], [iSegment], [doclist]) +** Lets sqlite select rowid if piRowid is NULL, else uses *piRowid. +** +** NOTE(shess) piRowid is IN, with values of "space of int64" plus +** null, it is not used to pass data back to the caller. +*/ +static int term_insert(fulltext_vtab *v, sqlite_int64 *piRowid, + const char *pTerm, int nTerm, + int iSegment, DocList *doclist){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + if( piRowid==NULL ){ + rc = sqlite3_bind_null(s, 1); + }else{ + rc = sqlite3_bind_int64(s, 1, *piRowid); + } + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_text(s, 2, pTerm, nTerm, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 3, iSegment); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 4, doclist->pData, doclist->nData, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, TERM_INSERT_STMT, &s); +} + +/* update %_term set doclist = [doclist] where rowid = [rowid] */ +static int term_update(fulltext_vtab *v, sqlite_int64 rowid, + DocList *doclist){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_UPDATE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 1, doclist->pData, doclist->nData, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, rowid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, TERM_UPDATE_STMT, &s); +} + +static int term_delete(fulltext_vtab *v, sqlite_int64 rowid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, rowid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, TERM_DELETE_STMT, &s); +} + +/* +** Free the memory used to contain a fulltext_vtab structure. +*/ +static void fulltext_vtab_destroy(fulltext_vtab *v){ + int iStmt, i; + + TRACE(("FTS1 Destroy %p\n", v)); + for( iStmt=0; iStmt<MAX_STMT; iStmt++ ){ + if( v->pFulltextStatements[iStmt]!=NULL ){ + sqlite3_finalize(v->pFulltextStatements[iStmt]); + v->pFulltextStatements[iStmt] = NULL; + } + } + + if( v->pTokenizer!=NULL ){ + v->pTokenizer->pModule->xDestroy(v->pTokenizer); + v->pTokenizer = NULL; + } + + free(v->azColumn); + for(i = 0; i < v->nColumn; ++i) { + sqlite3_free(v->azContentColumn[i]); + } + free(v->azContentColumn); + free(v); +} + +/* +** Token types for parsing the arguments to xConnect or xCreate. +*/ +#define TOKEN_EOF 0 /* End of file */ +#define TOKEN_SPACE 1 /* Any kind of whitespace */ +#define TOKEN_ID 2 /* An identifier */ +#define TOKEN_STRING 3 /* A string literal */ +#define TOKEN_PUNCT 4 /* A single punctuation character */ + +/* +** If X is a character that can be used in an identifier then +** IdChar(X) will be true. Otherwise it is false. +** +** For ASCII, any character with the high-order bit set is +** allowed in an identifier. For 7-bit characters, +** sqlite3IsIdChar[X] must be 1. +** +** Ticket #1066. the SQL standard does not allow '$' in the +** middle of identfiers. But many SQL implementations do. +** SQLite will allow '$' in identifiers for compatibility. +** But the feature is undocumented. +*/ +static const char isIdChar[] = { +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ +}; +#define IdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && isIdChar[c-0x20])) + + +/* +** Return the length of the token that begins at z[0]. +** Store the token type in *tokenType before returning. +*/ +static int getToken(const char *z, int *tokenType){ + int i, c; + switch( *z ){ + case 0: { + *tokenType = TOKEN_EOF; + return 0; + } + case ' ': case '\t': case '\n': case '\f': case '\r': { + for(i=1; safe_isspace(z[i]); i++){} + *tokenType = TOKEN_SPACE; + return i; + } + case '`': + case '\'': + case '"': { + int delim = z[0]; + for(i=1; (c=z[i])!=0; i++){ + if( c==delim ){ + if( z[i+1]==delim ){ + i++; + }else{ + break; + } + } + } + *tokenType = TOKEN_STRING; + return i + (c!=0); + } + case '[': { + for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){} + *tokenType = TOKEN_ID; + return i; + } + default: { + if( !IdChar(*z) ){ + break; + } + for(i=1; IdChar(z[i]); i++){} + *tokenType = TOKEN_ID; + return i; + } + } + *tokenType = TOKEN_PUNCT; + return 1; +} + +/* +** A token extracted from a string is an instance of the following +** structure. +*/ +typedef struct Token { + const char *z; /* Pointer to token text. Not '\000' terminated */ + short int n; /* Length of the token text in bytes. */ +} Token; + +/* +** Given a input string (which is really one of the argv[] parameters +** passed into xConnect or xCreate) split the string up into tokens. +** Return an array of pointers to '\000' terminated strings, one string +** for each non-whitespace token. +** +** The returned array is terminated by a single NULL pointer. +** +** Space to hold the returned array is obtained from a single +** malloc and should be freed by passing the return value to free(). +** The individual strings within the token list are all a part of +** the single memory allocation and will all be freed at once. +*/ +static char **tokenizeString(const char *z, int *pnToken){ + int nToken = 0; + Token *aToken = malloc( strlen(z) * sizeof(aToken[0]) ); + int n = 1; + int e, i; + int totalSize = 0; + char **azToken; + char *zCopy; + while( n>0 ){ + n = getToken(z, &e); + if( e!=TOKEN_SPACE ){ + aToken[nToken].z = z; + aToken[nToken].n = n; + nToken++; + totalSize += n+1; + } + z += n; + } + azToken = (char**)malloc( nToken*sizeof(char*) + totalSize ); + zCopy = (char*)&azToken[nToken]; + nToken--; + for(i=0; i<nToken; i++){ + azToken[i] = zCopy; + n = aToken[i].n; + memcpy(zCopy, aToken[i].z, n); + zCopy[n] = 0; + zCopy += n+1; + } + azToken[nToken] = 0; + free(aToken); + *pnToken = nToken; + return azToken; +} + +/* +** Convert an SQL-style quoted string into a normal string by removing +** the quote characters. The conversion is done in-place. If the +** input does not begin with a quote character, then this routine +** is a no-op. +** +** Examples: +** +** "abc" becomes abc +** 'xyz' becomes xyz +** [pqr] becomes pqr +** `mno` becomes mno +*/ +static void dequoteString(char *z){ + int quote; + int i, j; + if( z==0 ) return; + quote = z[0]; + switch( quote ){ + case '\'': break; + case '"': break; + case '`': break; /* For MySQL compatibility */ + case '[': quote = ']'; break; /* For MS SqlServer compatibility */ + default: return; + } + for(i=1, j=0; z[i]; i++){ + if( z[i]==quote ){ + if( z[i+1]==quote ){ + z[j++] = quote; + i++; + }else{ + z[j++] = 0; + break; + } + }else{ + z[j++] = z[i]; + } + } +} + +/* +** The input azIn is a NULL-terminated list of tokens. Remove the first +** token and all punctuation tokens. Remove the quotes from +** around string literal tokens. +** +** Example: +** +** input: tokenize chinese ( 'simplifed' , 'mixed' ) +** output: chinese simplifed mixed +** +** Another example: +** +** input: delimiters ( '[' , ']' , '...' ) +** output: [ ] ... +*/ +static void tokenListToIdList(char **azIn){ + int i, j; + if( azIn ){ + for(i=0, j=-1; azIn[i]; i++){ + if( safe_isalnum(azIn[i][0]) || azIn[i][1] ){ + dequoteString(azIn[i]); + if( j>=0 ){ + azIn[j] = azIn[i]; + } + j++; + } + } + azIn[j] = 0; + } +} + + +/* +** Find the first alphanumeric token in the string zIn. Null-terminate +** this token. Remove any quotation marks. And return a pointer to +** the result. +*/ +static char *firstToken(char *zIn, char **pzTail){ + int n, ttype; + while(1){ + n = getToken(zIn, &ttype); + if( ttype==TOKEN_SPACE ){ + zIn += n; + }else if( ttype==TOKEN_EOF ){ + *pzTail = zIn; + return 0; + }else{ + zIn[n] = 0; + *pzTail = &zIn[1]; + dequoteString(zIn); + return zIn; + } + } + /*NOTREACHED*/ +} + +/* Return true if... +** +** * s begins with the string t, ignoring case +** * s is longer than t +** * The first character of s beyond t is not a alphanumeric +** +** Ignore leading space in *s. +** +** To put it another way, return true if the first token of +** s[] is t[]. +*/ +static int startsWith(const char *s, const char *t){ + while( safe_isspace(*s) ){ s++; } + while( *t ){ + if( safe_tolower(*s++)!=safe_tolower(*t++) ) return 0; + } + return *s!='_' && !safe_isalnum(*s); +} + +/* +** An instance of this structure defines the "spec" of a +** full text index. This structure is populated by parseSpec +** and use by fulltextConnect and fulltextCreate. +*/ +typedef struct TableSpec { + const char *zDb; /* Logical database name */ + const char *zName; /* Name of the full-text index */ + int nColumn; /* Number of columns to be indexed */ + char **azColumn; /* Original names of columns to be indexed */ + char **azContentColumn; /* Column names for %_content */ + char **azTokenizer; /* Name of tokenizer and its arguments */ +} TableSpec; + +/* +** Reclaim all of the memory used by a TableSpec +*/ +static void clearTableSpec(TableSpec *p) { + free(p->azColumn); + free(p->azContentColumn); + free(p->azTokenizer); +} + +/* Parse a CREATE VIRTUAL TABLE statement, which looks like this: + * + * CREATE VIRTUAL TABLE email + * USING fts1(subject, body, tokenize mytokenizer(myarg)) + * + * We return parsed information in a TableSpec structure. + * + */ +static int parseSpec(TableSpec *pSpec, int argc, const char *const*argv, + char**pzErr){ + int i, n; + char *z, *zDummy; + char **azArg; + const char *zTokenizer = 0; /* argv[] entry describing the tokenizer */ + + assert( argc>=3 ); + /* Current interface: + ** argv[0] - module name + ** argv[1] - database name + ** argv[2] - table name + ** argv[3..] - columns, optionally followed by tokenizer specification + ** and snippet delimiters specification. + */ + + /* Make a copy of the complete argv[][] array in a single allocation. + ** The argv[][] array is read-only and transient. We can write to the + ** copy in order to modify things and the copy is persistent. + */ + memset(pSpec, 0, sizeof(*pSpec)); + for(i=n=0; i<argc; i++){ + n += strlen(argv[i]) + 1; + } + azArg = malloc( sizeof(char*)*argc + n ); + if( azArg==0 ){ + return SQLITE_NOMEM; + } + z = (char*)&azArg[argc]; + for(i=0; i<argc; i++){ + azArg[i] = z; + strcpy(z, argv[i]); + z += strlen(z)+1; + } + + /* Identify the column names and the tokenizer and delimiter arguments + ** in the argv[][] array. + */ + pSpec->zDb = azArg[1]; + pSpec->zName = azArg[2]; + pSpec->nColumn = 0; + pSpec->azColumn = azArg; + zTokenizer = "tokenize simple"; + for(i=3; i<argc; ++i){ + if( startsWith(azArg[i],"tokenize") ){ + zTokenizer = azArg[i]; + }else{ + z = azArg[pSpec->nColumn] = firstToken(azArg[i], &zDummy); + pSpec->nColumn++; + } + } + if( pSpec->nColumn==0 ){ + azArg[0] = "content"; + pSpec->nColumn = 1; + } + + /* + ** Construct the list of content column names. + ** + ** Each content column name will be of the form cNNAAAA + ** where NN is the column number and AAAA is the sanitized + ** column name. "sanitized" means that special characters are + ** converted to "_". The cNN prefix guarantees that all column + ** names are unique. + ** + ** The AAAA suffix is not strictly necessary. It is included + ** for the convenience of people who might examine the generated + ** %_content table and wonder what the columns are used for. + */ + pSpec->azContentColumn = malloc( pSpec->nColumn * sizeof(char *) ); + if( pSpec->azContentColumn==0 ){ + clearTableSpec(pSpec); + return SQLITE_NOMEM; + } + for(i=0; i<pSpec->nColumn; i++){ + char *p; + pSpec->azContentColumn[i] = sqlite3_mprintf("c%d%s", i, azArg[i]); + for (p = pSpec->azContentColumn[i]; *p ; ++p) { + if( !safe_isalnum(*p) ) *p = '_'; + } + } + + /* + ** Parse the tokenizer specification string. + */ + pSpec->azTokenizer = tokenizeString(zTokenizer, &n); + tokenListToIdList(pSpec->azTokenizer); + + return SQLITE_OK; +} + +/* +** Generate a CREATE TABLE statement that describes the schema of +** the virtual table. Return a pointer to this schema string. +** +** Space is obtained from sqlite3_mprintf() and should be freed +** using sqlite3_free(). +*/ +static char *fulltextSchema( + int nColumn, /* Number of columns */ + const char *const* azColumn, /* List of columns */ + const char *zTableName /* Name of the table */ +){ + int i; + char *zSchema, *zNext; + const char *zSep = "("; + zSchema = sqlite3_mprintf("CREATE TABLE x"); + for(i=0; i<nColumn; i++){ + zNext = sqlite3_mprintf("%s%s%Q", zSchema, zSep, azColumn[i]); + sqlite3_free(zSchema); + zSchema = zNext; + zSep = ","; + } + zNext = sqlite3_mprintf("%s,%Q)", zSchema, zTableName); + sqlite3_free(zSchema); + return zNext; +} + +/* +** Build a new sqlite3_vtab structure that will describe the +** fulltext index defined by spec. +*/ +static int constructVtab( + sqlite3 *db, /* The SQLite database connection */ + TableSpec *spec, /* Parsed spec information from parseSpec() */ + sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */ + char **pzErr /* Write any error message here */ +){ + int rc; + int n; + fulltext_vtab *v = 0; + const sqlite3_tokenizer_module *m = NULL; + char *schema; + + v = (fulltext_vtab *) malloc(sizeof(fulltext_vtab)); + if( v==0 ) return SQLITE_NOMEM; + memset(v, 0, sizeof(*v)); + /* sqlite will initialize v->base */ + v->db = db; + v->zDb = spec->zDb; /* Freed when azColumn is freed */ + v->zName = spec->zName; /* Freed when azColumn is freed */ + v->nColumn = spec->nColumn; + v->azContentColumn = spec->azContentColumn; + spec->azContentColumn = 0; + v->azColumn = spec->azColumn; + spec->azColumn = 0; + + if( spec->azTokenizer==0 ){ + return SQLITE_NOMEM; + } + /* TODO(shess) For now, add new tokenizers as else if clauses. */ + if( spec->azTokenizer[0]==0 || startsWith(spec->azTokenizer[0], "simple") ){ + sqlite3Fts1SimpleTokenizerModule(&m); + }else if( startsWith(spec->azTokenizer[0], "porter") ){ + sqlite3Fts1PorterTokenizerModule(&m); + }else{ + *pzErr = sqlite3_mprintf("unknown tokenizer: %s", spec->azTokenizer[0]); + rc = SQLITE_ERROR; + goto err; + } + for(n=0; spec->azTokenizer[n]; n++){} + if( n ){ + rc = m->xCreate(n-1, (const char*const*)&spec->azTokenizer[1], + &v->pTokenizer); + }else{ + rc = m->xCreate(0, 0, &v->pTokenizer); + } + if( rc!=SQLITE_OK ) goto err; + v->pTokenizer->pModule = m; + + /* TODO: verify the existence of backing tables foo_content, foo_term */ + + schema = fulltextSchema(v->nColumn, (const char*const*)v->azColumn, + spec->zName); + rc = sqlite3_declare_vtab(db, schema); + sqlite3_free(schema); + if( rc!=SQLITE_OK ) goto err; + + memset(v->pFulltextStatements, 0, sizeof(v->pFulltextStatements)); + + *ppVTab = &v->base; + TRACE(("FTS1 Connect %p\n", v)); + + return rc; + +err: + fulltext_vtab_destroy(v); + return rc; +} + +static int fulltextConnect( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVTab, + char **pzErr +){ + TableSpec spec; + int rc = parseSpec(&spec, argc, argv, pzErr); + if( rc!=SQLITE_OK ) return rc; + + rc = constructVtab(db, &spec, ppVTab, pzErr); + clearTableSpec(&spec); + return rc; +} + + /* The %_content table holds the text of each document, with + ** the rowid used as the docid. + ** + ** The %_term table maps each term to a document list blob + ** containing elements sorted by ascending docid, each element + ** encoded as: + ** + ** docid varint-encoded + ** token elements: + ** position+1 varint-encoded as delta from previous position + ** start offset varint-encoded as delta from previous start offset + ** end offset varint-encoded as delta from start offset + ** + ** The sentinel position of 0 indicates the end of the token list. + ** + ** Additionally, doclist blobs are chunked into multiple segments, + ** using segment to order the segments. New elements are added to + ** the segment at segment 0, until it exceeds CHUNK_MAX. Then + ** segment 0 is deleted, and the doclist is inserted at segment 1. + ** If there is already a doclist at segment 1, the segment 0 doclist + ** is merged with it, the segment 1 doclist is deleted, and the + ** merged doclist is inserted at segment 2, repeating those + ** operations until an insert succeeds. + ** + ** Since this structure doesn't allow us to update elements in place + ** in case of deletion or update, these are simply written to + ** segment 0 (with an empty token list in case of deletion), with + ** docListAccumulate() taking care to retain lower-segment + ** information in preference to higher-segment information. + */ + /* TODO(shess) Provide a VACUUM type operation which both removes + ** deleted elements which are no longer necessary, and duplicated + ** elements. I suspect this will probably not be necessary in + ** practice, though. + */ +static int fulltextCreate(sqlite3 *db, void *pAux, + int argc, const char * const *argv, + sqlite3_vtab **ppVTab, char **pzErr){ + int rc; + TableSpec spec; + StringBuffer schema; + TRACE(("FTS1 Create\n")); + + rc = parseSpec(&spec, argc, argv, pzErr); + if( rc!=SQLITE_OK ) return rc; + + initStringBuffer(&schema); + append(&schema, "CREATE TABLE %_content("); + appendList(&schema, spec.nColumn, spec.azContentColumn); + append(&schema, ")"); + rc = sql_exec(db, spec.zDb, spec.zName, schema.s); + free(schema.s); + if( rc!=SQLITE_OK ) goto out; + + rc = sql_exec(db, spec.zDb, spec.zName, + "create table %_term(term text, segment integer, doclist blob, " + "primary key(term, segment));"); + if( rc!=SQLITE_OK ) goto out; + + rc = constructVtab(db, &spec, ppVTab, pzErr); + +out: + clearTableSpec(&spec); + return rc; +} + +/* Decide how to handle an SQL query. */ +static int fulltextBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ + int i; + TRACE(("FTS1 BestIndex\n")); + + for(i=0; i<pInfo->nConstraint; ++i){ + const struct sqlite3_index_constraint *pConstraint; + pConstraint = &pInfo->aConstraint[i]; + if( pConstraint->usable ) { + if( pConstraint->iColumn==-1 && + pConstraint->op==SQLITE_INDEX_CONSTRAINT_EQ ){ + pInfo->idxNum = QUERY_ROWID; /* lookup by rowid */ + TRACE(("FTS1 QUERY_ROWID\n")); + } else if( pConstraint->iColumn>=0 && + pConstraint->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ + /* full-text search */ + pInfo->idxNum = QUERY_FULLTEXT + pConstraint->iColumn; + TRACE(("FTS1 QUERY_FULLTEXT %d\n", pConstraint->iColumn)); + } else continue; + + pInfo->aConstraintUsage[i].argvIndex = 1; + pInfo->aConstraintUsage[i].omit = 1; + + /* An arbitrary value for now. + * TODO: Perhaps rowid matches should be considered cheaper than + * full-text searches. */ + pInfo->estimatedCost = 1.0; + + return SQLITE_OK; + } + } + pInfo->idxNum = QUERY_GENERIC; + return SQLITE_OK; +} + +static int fulltextDisconnect(sqlite3_vtab *pVTab){ + TRACE(("FTS1 Disconnect %p\n", pVTab)); + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextDestroy(sqlite3_vtab *pVTab){ + fulltext_vtab *v = (fulltext_vtab *)pVTab; + int rc; + + TRACE(("FTS1 Destroy %p\n", pVTab)); + rc = sql_exec(v->db, v->zDb, v->zName, + "drop table if exists %_content;" + "drop table if exists %_term;" + ); + if( rc!=SQLITE_OK ) return rc; + + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + fulltext_cursor *c; + + c = (fulltext_cursor *) calloc(sizeof(fulltext_cursor), 1); + /* sqlite will initialize c->base */ + *ppCursor = &c->base; + TRACE(("FTS1 Open %p: %p\n", pVTab, c)); + + return SQLITE_OK; +} + + +/* Free all of the dynamically allocated memory held by *q +*/ +static void queryClear(Query *q){ + int i; + for(i = 0; i < q->nTerms; ++i){ + free(q->pTerms[i].pTerm); + } + free(q->pTerms); + memset(q, 0, sizeof(*q)); +} + +/* Free all of the dynamically allocated memory held by the +** Snippet +*/ +static void snippetClear(Snippet *p){ + free(p->aMatch); + free(p->zOffset); + free(p->zSnippet); + memset(p, 0, sizeof(*p)); +} +/* +** Append a single entry to the p->aMatch[] log. +*/ +static void snippetAppendMatch( + Snippet *p, /* Append the entry to this snippet */ + int iCol, int iTerm, /* The column and query term */ + int iStart, int nByte /* Offset and size of the match */ +){ + int i; + struct snippetMatch *pMatch; + if( p->nMatch+1>=p->nAlloc ){ + p->nAlloc = p->nAlloc*2 + 10; + p->aMatch = realloc(p->aMatch, p->nAlloc*sizeof(p->aMatch[0]) ); + if( p->aMatch==0 ){ + p->nMatch = 0; + p->nAlloc = 0; + return; + } + } + i = p->nMatch++; + pMatch = &p->aMatch[i]; + pMatch->iCol = iCol; + pMatch->iTerm = iTerm; + pMatch->iStart = iStart; + pMatch->nByte = nByte; +} + +/* +** Sizing information for the circular buffer used in snippetOffsetsOfColumn() +*/ +#define FTS1_ROTOR_SZ (32) +#define FTS1_ROTOR_MASK (FTS1_ROTOR_SZ-1) + +/* +** Add entries to pSnippet->aMatch[] for every match that occurs against +** document zDoc[0..nDoc-1] which is stored in column iColumn. +*/ +static void snippetOffsetsOfColumn( + Query *pQuery, + Snippet *pSnippet, + int iColumn, + const char *zDoc, + int nDoc +){ + const sqlite3_tokenizer_module *pTModule; /* The tokenizer module */ + sqlite3_tokenizer *pTokenizer; /* The specific tokenizer */ + sqlite3_tokenizer_cursor *pTCursor; /* Tokenizer cursor */ + fulltext_vtab *pVtab; /* The full text index */ + int nColumn; /* Number of columns in the index */ + const QueryTerm *aTerm; /* Query string terms */ + int nTerm; /* Number of query string terms */ + int i, j; /* Loop counters */ + int rc; /* Return code */ + unsigned int match, prevMatch; /* Phrase search bitmasks */ + const char *zToken; /* Next token from the tokenizer */ + int nToken; /* Size of zToken */ + int iBegin, iEnd, iPos; /* Offsets of beginning and end */ + + /* The following variables keep a circular buffer of the last + ** few tokens */ + unsigned int iRotor = 0; /* Index of current token */ + int iRotorBegin[FTS1_ROTOR_SZ]; /* Beginning offset of token */ + int iRotorLen[FTS1_ROTOR_SZ]; /* Length of token */ + + pVtab = pQuery->pFts; + nColumn = pVtab->nColumn; + pTokenizer = pVtab->pTokenizer; + pTModule = pTokenizer->pModule; + rc = pTModule->xOpen(pTokenizer, zDoc, nDoc, &pTCursor); + if( rc ) return; + pTCursor->pTokenizer = pTokenizer; + aTerm = pQuery->pTerms; + nTerm = pQuery->nTerms; + if( nTerm>=FTS1_ROTOR_SZ ){ + nTerm = FTS1_ROTOR_SZ - 1; + } + prevMatch = 0; + while(1){ + rc = pTModule->xNext(pTCursor, &zToken, &nToken, &iBegin, &iEnd, &iPos); + if( rc ) break; + iRotorBegin[iRotor&FTS1_ROTOR_MASK] = iBegin; + iRotorLen[iRotor&FTS1_ROTOR_MASK] = iEnd-iBegin; + match = 0; + for(i=0; i<nTerm; i++){ + int iCol; + iCol = aTerm[i].iColumn; + if( iCol>=0 && iCol<nColumn && iCol!=iColumn ) continue; + if( aTerm[i].nTerm!=nToken ) continue; + if( memcmp(aTerm[i].pTerm, zToken, nToken) ) continue; + if( aTerm[i].iPhrase>1 && (prevMatch & (1<<i))==0 ) continue; + match |= 1<<i; + if( i==nTerm-1 || aTerm[i+1].iPhrase==1 ){ + for(j=aTerm[i].iPhrase-1; j>=0; j--){ + int k = (iRotor-j) & FTS1_ROTOR_MASK; + snippetAppendMatch(pSnippet, iColumn, i-j, + iRotorBegin[k], iRotorLen[k]); + } + } + } + prevMatch = match<<1; + iRotor++; + } + pTModule->xClose(pTCursor); +} + + +/* +** Compute all offsets for the current row of the query. +** If the offsets have already been computed, this routine is a no-op. +*/ +static void snippetAllOffsets(fulltext_cursor *p){ + int nColumn; + int iColumn, i; + int iFirst, iLast; + fulltext_vtab *pFts; + + if( p->snippet.nMatch ) return; + if( p->q.nTerms==0 ) return; + pFts = p->q.pFts; + nColumn = pFts->nColumn; + iColumn = p->iCursorType - QUERY_FULLTEXT; + if( iColumn<0 || iColumn>=nColumn ){ + iFirst = 0; + iLast = nColumn-1; + }else{ + iFirst = iColumn; + iLast = iColumn; + } + for(i=iFirst; i<=iLast; i++){ + const char *zDoc; + int nDoc; + zDoc = (const char*)sqlite3_column_text(p->pStmt, i+1); + nDoc = sqlite3_column_bytes(p->pStmt, i+1); + snippetOffsetsOfColumn(&p->q, &p->snippet, i, zDoc, nDoc); + } +} + +/* +** Convert the information in the aMatch[] array of the snippet +** into the string zOffset[0..nOffset-1]. +*/ +static void snippetOffsetText(Snippet *p){ + int i; + int cnt = 0; + StringBuffer sb; + char zBuf[200]; + if( p->zOffset ) return; + initStringBuffer(&sb); + for(i=0; i<p->nMatch; i++){ + struct snippetMatch *pMatch = &p->aMatch[i]; + zBuf[0] = ' '; + sqlite3_snprintf(sizeof(zBuf)-1, &zBuf[cnt>0], "%d %d %d %d", + pMatch->iCol, pMatch->iTerm, pMatch->iStart, pMatch->nByte); + append(&sb, zBuf); + cnt++; + } + p->zOffset = sb.s; + p->nOffset = sb.len; +} + +/* +** zDoc[0..nDoc-1] is phrase of text. aMatch[0..nMatch-1] are a set +** of matching words some of which might be in zDoc. zDoc is column +** number iCol. +** +** iBreak is suggested spot in zDoc where we could begin or end an +** excerpt. Return a value similar to iBreak but possibly adjusted +** to be a little left or right so that the break point is better. +*/ +static int wordBoundary( + int iBreak, /* The suggested break point */ + const char *zDoc, /* Document text */ + int nDoc, /* Number of bytes in zDoc[] */ + struct snippetMatch *aMatch, /* Matching words */ + int nMatch, /* Number of entries in aMatch[] */ + int iCol /* The column number for zDoc[] */ +){ + int i; + if( iBreak<=10 ){ + return 0; + } + if( iBreak>=nDoc-10 ){ + return nDoc; + } + for(i=0; i<nMatch && aMatch[i].iCol<iCol; i++){} + while( i<nMatch && aMatch[i].iStart+aMatch[i].nByte<iBreak ){ i++; } + if( i<nMatch ){ + if( aMatch[i].iStart<iBreak+10 ){ + return aMatch[i].iStart; + } + if( i>0 && aMatch[i-1].iStart+aMatch[i-1].nByte>=iBreak ){ + return aMatch[i-1].iStart; + } + } + for(i=1; i<=10; i++){ + if( safe_isspace(zDoc[iBreak-i]) ){ + return iBreak - i + 1; + } + if( safe_isspace(zDoc[iBreak+i]) ){ + return iBreak + i + 1; + } + } + return iBreak; +} + +/* +** If the StringBuffer does not end in white space, add a single +** space character to the end. +*/ +static void appendWhiteSpace(StringBuffer *p){ + if( p->len==0 ) return; + if( safe_isspace(p->s[p->len-1]) ) return; + append(p, " "); +} + +/* +** Remove white space from teh end of the StringBuffer +*/ +static void trimWhiteSpace(StringBuffer *p){ + while( p->len>0 && safe_isspace(p->s[p->len-1]) ){ + p->len--; + } +} + + + +/* +** Allowed values for Snippet.aMatch[].snStatus +*/ +#define SNIPPET_IGNORE 0 /* It is ok to omit this match from the snippet */ +#define SNIPPET_DESIRED 1 /* We want to include this match in the snippet */ + +/* +** Generate the text of a snippet. +*/ +static void snippetText( + fulltext_cursor *pCursor, /* The cursor we need the snippet for */ + const char *zStartMark, /* Markup to appear before each match */ + const char *zEndMark, /* Markup to appear after each match */ + const char *zEllipsis /* Ellipsis mark */ +){ + int i, j; + struct snippetMatch *aMatch; + int nMatch; + int nDesired; + StringBuffer sb; + int tailCol; + int tailOffset; + int iCol; + int nDoc; + const char *zDoc; + int iStart, iEnd; + int tailEllipsis = 0; + int iMatch; + + + free(pCursor->snippet.zSnippet); + pCursor->snippet.zSnippet = 0; + aMatch = pCursor->snippet.aMatch; + nMatch = pCursor->snippet.nMatch; + initStringBuffer(&sb); + + for(i=0; i<nMatch; i++){ + aMatch[i].snStatus = SNIPPET_IGNORE; + } + nDesired = 0; + for(i=0; i<pCursor->q.nTerms; i++){ + for(j=0; j<nMatch; j++){ + if( aMatch[j].iTerm==i ){ + aMatch[j].snStatus = SNIPPET_DESIRED; + nDesired++; + break; + } + } + } + + iMatch = 0; + tailCol = -1; + tailOffset = 0; + for(i=0; i<nMatch && nDesired>0; i++){ + if( aMatch[i].snStatus!=SNIPPET_DESIRED ) continue; + nDesired--; + iCol = aMatch[i].iCol; + zDoc = (const char*)sqlite3_column_text(pCursor->pStmt, iCol+1); + nDoc = sqlite3_column_bytes(pCursor->pStmt, iCol+1); + iStart = aMatch[i].iStart - 40; + iStart = wordBoundary(iStart, zDoc, nDoc, aMatch, nMatch, iCol); + if( iStart<=10 ){ + iStart = 0; + } + if( iCol==tailCol && iStart<=tailOffset+20 ){ + iStart = tailOffset; + } + if( (iCol!=tailCol && tailCol>=0) || iStart!=tailOffset ){ + trimWhiteSpace(&sb); + appendWhiteSpace(&sb); + append(&sb, zEllipsis); + appendWhiteSpace(&sb); + } + iEnd = aMatch[i].iStart + aMatch[i].nByte + 40; + iEnd = wordBoundary(iEnd, zDoc, nDoc, aMatch, nMatch, iCol); + if( iEnd>=nDoc-10 ){ + iEnd = nDoc; + tailEllipsis = 0; + }else{ + tailEllipsis = 1; + } + while( iMatch<nMatch && aMatch[iMatch].iCol<iCol ){ iMatch++; } + while( iStart<iEnd ){ + while( iMatch<nMatch && aMatch[iMatch].iStart<iStart + && aMatch[iMatch].iCol<=iCol ){ + iMatch++; + } + if( iMatch<nMatch && aMatch[iMatch].iStart<iEnd + && aMatch[iMatch].iCol==iCol ){ + nappend(&sb, &zDoc[iStart], aMatch[iMatch].iStart - iStart); + iStart = aMatch[iMatch].iStart; + append(&sb, zStartMark); + nappend(&sb, &zDoc[iStart], aMatch[iMatch].nByte); + append(&sb, zEndMark); + iStart += aMatch[iMatch].nByte; + for(j=iMatch+1; j<nMatch; j++){ + if( aMatch[j].iTerm==aMatch[iMatch].iTerm + && aMatch[j].snStatus==SNIPPET_DESIRED ){ + nDesired--; + aMatch[j].snStatus = SNIPPET_IGNORE; + } + } + }else{ + nappend(&sb, &zDoc[iStart], iEnd - iStart); + iStart = iEnd; + } + } + tailCol = iCol; + tailOffset = iEnd; + } + trimWhiteSpace(&sb); + if( tailEllipsis ){ + appendWhiteSpace(&sb); + append(&sb, zEllipsis); + } + pCursor->snippet.zSnippet = sb.s; + pCursor->snippet.nSnippet = sb.len; +} + + +/* +** Close the cursor. For additional information see the documentation +** on the xClose method of the virtual table interface. +*/ +static int fulltextClose(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + TRACE(("FTS1 Close %p\n", c)); + sqlite3_finalize(c->pStmt); + queryClear(&c->q); + snippetClear(&c->snippet); + if( c->result.pDoclist!=NULL ){ + docListDelete(c->result.pDoclist); + } + free(c); + return SQLITE_OK; +} + +static int fulltextNext(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + sqlite_int64 iDocid; + int rc; + + TRACE(("FTS1 Next %p\n", pCursor)); + snippetClear(&c->snippet); + if( c->iCursorType < QUERY_FULLTEXT ){ + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + switch( rc ){ + case SQLITE_ROW: + c->eof = 0; + return SQLITE_OK; + case SQLITE_DONE: + c->eof = 1; + return SQLITE_OK; + default: + c->eof = 1; + return rc; + } + } else { /* full-text query */ + rc = sqlite3_reset(c->pStmt); + if( rc!=SQLITE_OK ) return rc; + + iDocid = nextDocid(&c->result); + if( iDocid==0 ){ + c->eof = 1; + return SQLITE_OK; + } + rc = sqlite3_bind_int64(c->pStmt, 1, iDocid); + if( rc!=SQLITE_OK ) return rc; + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + if( rc==SQLITE_ROW ){ /* the case we expect */ + c->eof = 0; + return SQLITE_OK; + } + /* an error occurred; abort */ + return rc==SQLITE_DONE ? SQLITE_ERROR : rc; + } +} + + +/* Return a DocList corresponding to the query term *pTerm. If *pTerm +** is the first term of a phrase query, go ahead and evaluate the phrase +** query and return the doclist for the entire phrase query. +** +** The result is stored in pTerm->doclist. +*/ +static int docListOfTerm( + fulltext_vtab *v, /* The full text index */ + int iColumn, /* column to restrict to. No restrition if >=nColumn */ + QueryTerm *pQTerm, /* Term we are looking for, or 1st term of a phrase */ + DocList **ppResult /* Write the result here */ +){ + DocList *pLeft, *pRight, *pNew; + int i, rc; + + pLeft = docListNew(DL_POSITIONS); + rc = term_select_all(v, iColumn, pQTerm->pTerm, pQTerm->nTerm, pLeft); + if( rc ){ + docListDelete(pLeft); + return rc; + } + for(i=1; i<=pQTerm->nPhrase; i++){ + pRight = docListNew(DL_POSITIONS); + rc = term_select_all(v, iColumn, pQTerm[i].pTerm, pQTerm[i].nTerm, pRight); + if( rc ){ + docListDelete(pLeft); + return rc; + } + pNew = docListNew(i<pQTerm->nPhrase ? DL_POSITIONS : DL_DOCIDS); + docListPhraseMerge(pLeft, pRight, pNew); + docListDelete(pLeft); + docListDelete(pRight); + pLeft = pNew; + } + *ppResult = pLeft; + return SQLITE_OK; +} + +/* Add a new term pTerm[0..nTerm-1] to the query *q. +*/ +static void queryAdd(Query *q, const char *pTerm, int nTerm){ + QueryTerm *t; + ++q->nTerms; + q->pTerms = realloc(q->pTerms, q->nTerms * sizeof(q->pTerms[0])); + if( q->pTerms==0 ){ + q->nTerms = 0; + return; + } + t = &q->pTerms[q->nTerms - 1]; + memset(t, 0, sizeof(*t)); + t->pTerm = malloc(nTerm+1); + memcpy(t->pTerm, pTerm, nTerm); + t->pTerm[nTerm] = 0; + t->nTerm = nTerm; + t->isOr = q->nextIsOr; + q->nextIsOr = 0; + t->iColumn = q->nextColumn; + q->nextColumn = q->dfltColumn; +} + +/* +** Check to see if the string zToken[0...nToken-1] matches any +** column name in the virtual table. If it does, +** return the zero-indexed column number. If not, return -1. +*/ +static int checkColumnSpecifier( + fulltext_vtab *pVtab, /* The virtual table */ + const char *zToken, /* Text of the token */ + int nToken /* Number of characters in the token */ +){ + int i; + for(i=0; i<pVtab->nColumn; i++){ + if( memcmp(pVtab->azColumn[i], zToken, nToken)==0 + && pVtab->azColumn[i][nToken]==0 ){ + return i; + } + } + return -1; +} + +/* +** Parse the text at pSegment[0..nSegment-1]. Add additional terms +** to the query being assemblied in pQuery. +** +** inPhrase is true if pSegment[0..nSegement-1] is contained within +** double-quotes. If inPhrase is true, then the first term +** is marked with the number of terms in the phrase less one and +** OR and "-" syntax is ignored. If inPhrase is false, then every +** term found is marked with nPhrase=0 and OR and "-" syntax is significant. +*/ +static int tokenizeSegment( + sqlite3_tokenizer *pTokenizer, /* The tokenizer to use */ + const char *pSegment, int nSegment, /* Query expression being parsed */ + int inPhrase, /* True if within "..." */ + Query *pQuery /* Append results here */ +){ + const sqlite3_tokenizer_module *pModule = pTokenizer->pModule; + sqlite3_tokenizer_cursor *pCursor; + int firstIndex = pQuery->nTerms; + int iCol; + int nTerm = 1; + + int rc = pModule->xOpen(pTokenizer, pSegment, nSegment, &pCursor); + if( rc!=SQLITE_OK ) return rc; + pCursor->pTokenizer = pTokenizer; + + while( 1 ){ + const char *pToken; + int nToken, iBegin, iEnd, iPos; + + rc = pModule->xNext(pCursor, + &pToken, &nToken, + &iBegin, &iEnd, &iPos); + if( rc!=SQLITE_OK ) break; + if( !inPhrase && + pSegment[iEnd]==':' && + (iCol = checkColumnSpecifier(pQuery->pFts, pToken, nToken))>=0 ){ + pQuery->nextColumn = iCol; + continue; + } + if( !inPhrase && pQuery->nTerms>0 && nToken==2 + && pSegment[iBegin]=='O' && pSegment[iBegin+1]=='R' ){ + pQuery->nextIsOr = 1; + continue; + } + queryAdd(pQuery, pToken, nToken); + if( !inPhrase && iBegin>0 && pSegment[iBegin-1]=='-' ){ + pQuery->pTerms[pQuery->nTerms-1].isNot = 1; + } + pQuery->pTerms[pQuery->nTerms-1].iPhrase = nTerm; + if( inPhrase ){ + nTerm++; + } + } + + if( inPhrase && pQuery->nTerms>firstIndex ){ + pQuery->pTerms[firstIndex].nPhrase = pQuery->nTerms - firstIndex - 1; + } + + return pModule->xClose(pCursor); +} + +/* Parse a query string, yielding a Query object pQuery. +** +** The calling function will need to queryClear() to clean up +** the dynamically allocated memory held by pQuery. +*/ +static int parseQuery( + fulltext_vtab *v, /* The fulltext index */ + const char *zInput, /* Input text of the query string */ + int nInput, /* Size of the input text */ + int dfltColumn, /* Default column of the index to match against */ + Query *pQuery /* Write the parse results here. */ +){ + int iInput, inPhrase = 0; + + if( zInput==0 ) nInput = 0; + if( nInput<0 ) nInput = strlen(zInput); + pQuery->nTerms = 0; + pQuery->pTerms = NULL; + pQuery->nextIsOr = 0; + pQuery->nextColumn = dfltColumn; + pQuery->dfltColumn = dfltColumn; + pQuery->pFts = v; + + for(iInput=0; iInput<nInput; ++iInput){ + int i; + for(i=iInput; i<nInput && zInput[i]!='"'; ++i){} + if( i>iInput ){ + tokenizeSegment(v->pTokenizer, zInput+iInput, i-iInput, inPhrase, + pQuery); + } + iInput = i; + if( i<nInput ){ + assert( zInput[i]=='"' ); + inPhrase = !inPhrase; + } + } + + if( inPhrase ){ + /* unmatched quote */ + queryClear(pQuery); + return SQLITE_ERROR; + } + return SQLITE_OK; +} + +/* Perform a full-text query using the search expression in +** zInput[0..nInput-1]. Return a list of matching documents +** in pResult. +** +** Queries must match column iColumn. Or if iColumn>=nColumn +** they are allowed to match against any column. +*/ +static int fulltextQuery( + fulltext_vtab *v, /* The full text index */ + int iColumn, /* Match against this column by default */ + const char *zInput, /* The query string */ + int nInput, /* Number of bytes in zInput[] */ + DocList **pResult, /* Write the result doclist here */ + Query *pQuery /* Put parsed query string here */ +){ + int i, iNext, rc; + DocList *pLeft = NULL; + DocList *pRight, *pNew, *pOr; + int nNot = 0; + QueryTerm *aTerm; + + rc = parseQuery(v, zInput, nInput, iColumn, pQuery); + if( rc!=SQLITE_OK ) return rc; + + /* Merge AND terms. */ + aTerm = pQuery->pTerms; + for(i = 0; i<pQuery->nTerms; i=iNext){ + if( aTerm[i].isNot ){ + /* Handle all NOT terms in a separate pass */ + nNot++; + iNext = i + aTerm[i].nPhrase+1; + continue; + } + iNext = i + aTerm[i].nPhrase + 1; + rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &pRight); + if( rc ){ + queryClear(pQuery); + return rc; + } + while( iNext<pQuery->nTerms && aTerm[iNext].isOr ){ + rc = docListOfTerm(v, aTerm[iNext].iColumn, &aTerm[iNext], &pOr); + iNext += aTerm[iNext].nPhrase + 1; + if( rc ){ + queryClear(pQuery); + return rc; + } + pNew = docListNew(DL_DOCIDS); + docListOrMerge(pRight, pOr, pNew); + docListDelete(pRight); + docListDelete(pOr); + pRight = pNew; + } + if( pLeft==0 ){ + pLeft = pRight; + }else{ + pNew = docListNew(DL_DOCIDS); + docListAndMerge(pLeft, pRight, pNew); + docListDelete(pRight); + docListDelete(pLeft); + pLeft = pNew; + } + } + + if( nNot && pLeft==0 ){ + /* We do not yet know how to handle a query of only NOT terms */ + return SQLITE_ERROR; + } + + /* Do the EXCEPT terms */ + for(i=0; i<pQuery->nTerms; i += aTerm[i].nPhrase + 1){ + if( !aTerm[i].isNot ) continue; + rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &pRight); + if( rc ){ + queryClear(pQuery); + docListDelete(pLeft); + return rc; + } + pNew = docListNew(DL_DOCIDS); + docListExceptMerge(pLeft, pRight, pNew); + docListDelete(pRight); + docListDelete(pLeft); + pLeft = pNew; + } + + *pResult = pLeft; + return rc; +} + +/* +** This is the xFilter interface for the virtual table. See +** the virtual table xFilter method documentation for additional +** information. +** +** If idxNum==QUERY_GENERIC then do a full table scan against +** the %_content table. +** +** If idxNum==QUERY_ROWID then do a rowid lookup for a single entry +** in the %_content table. +** +** If idxNum>=QUERY_FULLTEXT then use the full text index. The +** column on the left-hand side of the MATCH operator is column +** number idxNum-QUERY_FULLTEXT, 0 indexed. argv[0] is the right-hand +** side of the MATCH operator. +*/ +/* TODO(shess) Upgrade the cursor initialization and destruction to +** account for fulltextFilter() being called multiple times on the +** same cursor. The current solution is very fragile. Apply fix to +** fts2 as appropriate. +*/ +static int fulltextFilter( + sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ + int idxNum, const char *idxStr, /* Which indexing scheme to use */ + int argc, sqlite3_value **argv /* Arguments for the indexing scheme */ +){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + fulltext_vtab *v = cursor_vtab(c); + int rc; + char *zSql; + + TRACE(("FTS1 Filter %p\n",pCursor)); + + zSql = sqlite3_mprintf("select rowid, * from %%_content %s", + idxNum==QUERY_GENERIC ? "" : "where rowid=?"); + sqlite3_finalize(c->pStmt); + rc = sql_prepare(v->db, v->zDb, v->zName, &c->pStmt, zSql); + sqlite3_free(zSql); + if( rc!=SQLITE_OK ) return rc; + + c->iCursorType = idxNum; + switch( idxNum ){ + case QUERY_GENERIC: + break; + + case QUERY_ROWID: + rc = sqlite3_bind_int64(c->pStmt, 1, sqlite3_value_int64(argv[0])); + if( rc!=SQLITE_OK ) return rc; + break; + + default: /* full-text search */ + { + const char *zQuery = (const char *)sqlite3_value_text(argv[0]); + DocList *pResult; + assert( idxNum<=QUERY_FULLTEXT+v->nColumn); + assert( argc==1 ); + queryClear(&c->q); + rc = fulltextQuery(v, idxNum-QUERY_FULLTEXT, zQuery, -1, &pResult, &c->q); + if( rc!=SQLITE_OK ) return rc; + if( c->result.pDoclist!=NULL ) docListDelete(c->result.pDoclist); + readerInit(&c->result, pResult); + break; + } + } + + return fulltextNext(pCursor); +} + +/* This is the xEof method of the virtual table. The SQLite core +** calls this routine to find out if it has reached the end of +** a query's results set. +*/ +static int fulltextEof(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + return c->eof; +} + +/* This is the xColumn method of the virtual table. The SQLite +** core calls this method during a query when it needs the value +** of a column from the virtual table. This method needs to use +** one of the sqlite3_result_*() routines to store the requested +** value back in the pContext. +*/ +static int fulltextColumn(sqlite3_vtab_cursor *pCursor, + sqlite3_context *pContext, int idxCol){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + fulltext_vtab *v = cursor_vtab(c); + + if( idxCol<v->nColumn ){ + sqlite3_value *pVal = sqlite3_column_value(c->pStmt, idxCol+1); + sqlite3_result_value(pContext, pVal); + }else if( idxCol==v->nColumn ){ + /* The extra column whose name is the same as the table. + ** Return a blob which is a pointer to the cursor + */ + sqlite3_result_blob(pContext, &c, sizeof(c), SQLITE_TRANSIENT); + } + return SQLITE_OK; +} + +/* This is the xRowid method. The SQLite core calls this routine to +** retrive the rowid for the current row of the result set. The +** rowid should be written to *pRowid. +*/ +static int fulltextRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + + *pRowid = sqlite3_column_int64(c->pStmt, 0); + return SQLITE_OK; +} + +/* Add all terms in [zText] to the given hash table. If [iColumn] > 0, + * we also store positions and offsets in the hash table using the given + * column number. */ +static int buildTerms(fulltext_vtab *v, fts1Hash *terms, sqlite_int64 iDocid, + const char *zText, int iColumn){ + sqlite3_tokenizer *pTokenizer = v->pTokenizer; + sqlite3_tokenizer_cursor *pCursor; + const char *pToken; + int nTokenBytes; + int iStartOffset, iEndOffset, iPosition; + int rc; + + rc = pTokenizer->pModule->xOpen(pTokenizer, zText, -1, &pCursor); + if( rc!=SQLITE_OK ) return rc; + + pCursor->pTokenizer = pTokenizer; + while( SQLITE_OK==pTokenizer->pModule->xNext(pCursor, + &pToken, &nTokenBytes, + &iStartOffset, &iEndOffset, + &iPosition) ){ + DocList *p; + + /* Positions can't be negative; we use -1 as a terminator internally. */ + if( iPosition<0 ){ + pTokenizer->pModule->xClose(pCursor); + return SQLITE_ERROR; + } + + p = fts1HashFind(terms, pToken, nTokenBytes); + if( p==NULL ){ + p = docListNew(DL_DEFAULT); + docListAddDocid(p, iDocid); + fts1HashInsert(terms, pToken, nTokenBytes, p); + } + if( iColumn>=0 ){ + docListAddPosOffset(p, iColumn, iPosition, iStartOffset, iEndOffset); + } + } + + /* TODO(shess) Check return? Should this be able to cause errors at + ** this point? Actually, same question about sqlite3_finalize(), + ** though one could argue that failure there means that the data is + ** not durable. *ponder* + */ + pTokenizer->pModule->xClose(pCursor); + return rc; +} + +/* Update the %_terms table to map the term [pTerm] to the given rowid. */ +static int index_insert_term(fulltext_vtab *v, const char *pTerm, int nTerm, + DocList *d){ + sqlite_int64 iIndexRow; + DocList doclist; + int iSegment = 0, rc; + + rc = term_select(v, pTerm, nTerm, iSegment, &iIndexRow, &doclist); + if( rc==SQLITE_DONE ){ + docListInit(&doclist, DL_DEFAULT, 0, 0); + docListUpdate(&doclist, d); + /* TODO(shess) Consider length(doclist)>CHUNK_MAX? */ + rc = term_insert(v, NULL, pTerm, nTerm, iSegment, &doclist); + goto err; + } + if( rc!=SQLITE_ROW ) return SQLITE_ERROR; + + docListUpdate(&doclist, d); + if( doclist.nData<=CHUNK_MAX ){ + rc = term_update(v, iIndexRow, &doclist); + goto err; + } + + /* Doclist doesn't fit, delete what's there, and accumulate + ** forward. + */ + rc = term_delete(v, iIndexRow); + if( rc!=SQLITE_OK ) goto err; + + /* Try to insert the doclist into a higher segment bucket. On + ** failure, accumulate existing doclist with the doclist from that + ** bucket, and put results in the next bucket. + */ + iSegment++; + while( (rc=term_insert(v, &iIndexRow, pTerm, nTerm, iSegment, + &doclist))!=SQLITE_OK ){ + sqlite_int64 iSegmentRow; + DocList old; + int rc2; + + /* Retain old error in case the term_insert() error was really an + ** error rather than a bounced insert. + */ + rc2 = term_select(v, pTerm, nTerm, iSegment, &iSegmentRow, &old); + if( rc2!=SQLITE_ROW ) goto err; + + rc = term_delete(v, iSegmentRow); + if( rc!=SQLITE_OK ) goto err; + + /* Reusing lowest-number deleted row keeps the index smaller. */ + if( iSegmentRow<iIndexRow ) iIndexRow = iSegmentRow; + + /* doclist contains the newer data, so accumulate it over old. + ** Then steal accumulated data for doclist. + */ + docListAccumulate(&old, &doclist); + docListDestroy(&doclist); + doclist = old; + + iSegment++; + } + + err: + docListDestroy(&doclist); + return rc; +} + +/* Add doclists for all terms in [pValues] to the hash table [terms]. */ +static int insertTerms(fulltext_vtab *v, fts1Hash *terms, sqlite_int64 iRowid, + sqlite3_value **pValues){ + int i; + for(i = 0; i < v->nColumn ; ++i){ + char *zText = (char*)sqlite3_value_text(pValues[i]); + int rc = buildTerms(v, terms, iRowid, zText, i); + if( rc!=SQLITE_OK ) return rc; + } + return SQLITE_OK; +} + +/* Add empty doclists for all terms in the given row's content to the hash + * table [pTerms]. */ +static int deleteTerms(fulltext_vtab *v, fts1Hash *pTerms, sqlite_int64 iRowid){ + const char **pValues; + int i; + + int rc = content_select(v, iRowid, &pValues); + if( rc!=SQLITE_OK ) return rc; + + for(i = 0 ; i < v->nColumn; ++i) { + rc = buildTerms(v, pTerms, iRowid, pValues[i], -1); + if( rc!=SQLITE_OK ) break; + } + + freeStringArray(v->nColumn, pValues); + return SQLITE_OK; +} + +/* Insert a row into the %_content table; set *piRowid to be the ID of the + * new row. Fill [pTerms] with new doclists for the %_term table. */ +static int index_insert(fulltext_vtab *v, sqlite3_value *pRequestRowid, + sqlite3_value **pValues, + sqlite_int64 *piRowid, fts1Hash *pTerms){ + int rc; + + rc = content_insert(v, pRequestRowid, pValues); /* execute an SQL INSERT */ + if( rc!=SQLITE_OK ) return rc; + *piRowid = sqlite3_last_insert_rowid(v->db); + return insertTerms(v, pTerms, *piRowid, pValues); +} + +/* Delete a row from the %_content table; fill [pTerms] with empty doclists + * to be written to the %_term table. */ +static int index_delete(fulltext_vtab *v, sqlite_int64 iRow, fts1Hash *pTerms){ + int rc = deleteTerms(v, pTerms, iRow); + if( rc!=SQLITE_OK ) return rc; + return content_delete(v, iRow); /* execute an SQL DELETE */ +} + +/* Update a row in the %_content table; fill [pTerms] with new doclists for the + * %_term table. */ +static int index_update(fulltext_vtab *v, sqlite_int64 iRow, + sqlite3_value **pValues, fts1Hash *pTerms){ + /* Generate an empty doclist for each term that previously appeared in this + * row. */ + int rc = deleteTerms(v, pTerms, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = content_update(v, pValues, iRow); /* execute an SQL UPDATE */ + if( rc!=SQLITE_OK ) return rc; + + /* Now add positions for terms which appear in the updated row. */ + return insertTerms(v, pTerms, iRow, pValues); +} + +/* This function implements the xUpdate callback; it is the top-level entry + * point for inserting, deleting or updating a row in a full-text table. */ +static int fulltextUpdate(sqlite3_vtab *pVtab, int nArg, sqlite3_value **ppArg, + sqlite_int64 *pRowid){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + fts1Hash terms; /* maps term string -> PosList */ + int rc; + fts1HashElem *e; + + TRACE(("FTS1 Update %p\n", pVtab)); + + fts1HashInit(&terms, FTS1_HASH_STRING, 1); + + if( nArg<2 ){ + rc = index_delete(v, sqlite3_value_int64(ppArg[0]), &terms); + } else if( sqlite3_value_type(ppArg[0]) != SQLITE_NULL ){ + /* An update: + * ppArg[0] = old rowid + * ppArg[1] = new rowid + * ppArg[2..2+v->nColumn-1] = values + * ppArg[2+v->nColumn] = value for magic column (we ignore this) + */ + sqlite_int64 rowid = sqlite3_value_int64(ppArg[0]); + if( sqlite3_value_type(ppArg[1]) != SQLITE_INTEGER || + sqlite3_value_int64(ppArg[1]) != rowid ){ + rc = SQLITE_ERROR; /* we don't allow changing the rowid */ + } else { + assert( nArg==2+v->nColumn+1); + rc = index_update(v, rowid, &ppArg[2], &terms); + } + } else { + /* An insert: + * ppArg[1] = requested rowid + * ppArg[2..2+v->nColumn-1] = values + * ppArg[2+v->nColumn] = value for magic column (we ignore this) + */ + assert( nArg==2+v->nColumn+1); + rc = index_insert(v, ppArg[1], &ppArg[2], pRowid, &terms); + } + + if( rc==SQLITE_OK ){ + /* Write updated doclists to disk. */ + for(e=fts1HashFirst(&terms); e; e=fts1HashNext(e)){ + DocList *p = fts1HashData(e); + rc = index_insert_term(v, fts1HashKey(e), fts1HashKeysize(e), p); + if( rc!=SQLITE_OK ) break; + } + } + + /* clean up */ + for(e=fts1HashFirst(&terms); e; e=fts1HashNext(e)){ + DocList *p = fts1HashData(e); + docListDelete(p); + } + fts1HashClear(&terms); + + return rc; +} + +/* +** Implementation of the snippet() function for FTS1 +*/ +static void snippetFunc( + sqlite3_context *pContext, + int argc, + sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc<1 ) return; + if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to html_snippet",-1); + }else{ + const char *zStart = "<b>"; + const char *zEnd = "</b>"; + const char *zEllipsis = "<b>...</b>"; + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + if( argc>=2 ){ + zStart = (const char*)sqlite3_value_text(argv[1]); + if( argc>=3 ){ + zEnd = (const char*)sqlite3_value_text(argv[2]); + if( argc>=4 ){ + zEllipsis = (const char*)sqlite3_value_text(argv[3]); + } + } + } + snippetAllOffsets(pCursor); + snippetText(pCursor, zStart, zEnd, zEllipsis); + sqlite3_result_text(pContext, pCursor->snippet.zSnippet, + pCursor->snippet.nSnippet, SQLITE_STATIC); + } +} + +/* +** Implementation of the offsets() function for FTS1 +*/ +static void snippetOffsetsFunc( + sqlite3_context *pContext, + int argc, + sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc<1 ) return; + if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to offsets",-1); + }else{ + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + snippetAllOffsets(pCursor); + snippetOffsetText(&pCursor->snippet); + sqlite3_result_text(pContext, + pCursor->snippet.zOffset, pCursor->snippet.nOffset, + SQLITE_STATIC); + } +} + +/* +** This routine implements the xFindFunction method for the FTS1 +** virtual table. +*/ +static int fulltextFindFunction( + sqlite3_vtab *pVtab, + int nArg, + const char *zName, + void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), + void **ppArg +){ + if( strcmp(zName,"snippet")==0 ){ + *pxFunc = snippetFunc; + return 1; + }else if( strcmp(zName,"offsets")==0 ){ + *pxFunc = snippetOffsetsFunc; + return 1; + } + return 0; +} + +/* +** Rename an fts1 table. +*/ +static int fulltextRename( + sqlite3_vtab *pVtab, + const char *zName +){ + fulltext_vtab *p = (fulltext_vtab *)pVtab; + int rc = SQLITE_NOMEM; + char *zSql = sqlite3_mprintf( + "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';" + "ALTER TABLE %Q.'%q_term' RENAME TO '%q_term';" + , p->zDb, p->zName, zName + , p->zDb, p->zName, zName + ); + if( zSql ){ + rc = sqlite3_exec(p->db, zSql, 0, 0, 0); + sqlite3_free(zSql); + } + return rc; +} + +static const sqlite3_module fulltextModule = { + /* iVersion */ 0, + /* xCreate */ fulltextCreate, + /* xConnect */ fulltextConnect, + /* xBestIndex */ fulltextBestIndex, + /* xDisconnect */ fulltextDisconnect, + /* xDestroy */ fulltextDestroy, + /* xOpen */ fulltextOpen, + /* xClose */ fulltextClose, + /* xFilter */ fulltextFilter, + /* xNext */ fulltextNext, + /* xEof */ fulltextEof, + /* xColumn */ fulltextColumn, + /* xRowid */ fulltextRowid, + /* xUpdate */ fulltextUpdate, + /* xBegin */ 0, + /* xSync */ 0, + /* xCommit */ 0, + /* xRollback */ 0, + /* xFindFunction */ fulltextFindFunction, + /* xRename */ fulltextRename, +}; + +int sqlite3Fts1Init(sqlite3 *db){ + sqlite3_overload_function(db, "snippet", -1); + sqlite3_overload_function(db, "offsets", -1); + return sqlite3_create_module(db, "fts1", &fulltextModule, 0); +} + +#if !SQLITE_CORE +int sqlite3_extension_init(sqlite3 *db, char **pzErrMsg, + const sqlite3_api_routines *pApi){ + SQLITE_EXTENSION_INIT2(pApi) + return sqlite3Fts1Init(db); +} +#endif + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) */ diff --git a/third_party/sqlite/ext/fts1/fts1.h b/third_party/sqlite/ext/fts1/fts1.h new file mode 100755 index 0000000..d55e689 --- /dev/null +++ b/third_party/sqlite/ext/fts1/fts1.h @@ -0,0 +1,11 @@ +#include "sqlite3.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +int sqlite3Fts1Init(sqlite3 *db); + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ diff --git a/third_party/sqlite/ext/fts1/fts1_hash.c b/third_party/sqlite/ext/fts1/fts1_hash.c new file mode 100755 index 0000000..463a52b --- /dev/null +++ b/third_party/sqlite/ext/fts1/fts1_hash.c @@ -0,0 +1,369 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the implementation of generic hash-tables used in SQLite. +** We've modified it slightly to serve as a standalone hash table +** implementation for the full-text indexing module. +*/ +#include <assert.h> +#include <stdlib.h> +#include <string.h> + +/* +** The code in this file is only compiled if: +** +** * The FTS1 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS1 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS1 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) + + +#include "fts1_hash.h" + +static void *malloc_and_zero(int n){ + void *p = malloc(n); + if( p ){ + memset(p, 0, n); + } + return p; +} + +/* Turn bulk memory into a hash table object by initializing the +** fields of the Hash structure. +** +** "pNew" is a pointer to the hash table that is to be initialized. +** keyClass is one of the constants +** FTS1_HASH_BINARY or FTS1_HASH_STRING. The value of keyClass +** determines what kind of key the hash table will use. "copyKey" is +** true if the hash table should make its own private copy of keys and +** false if it should just use the supplied pointer. +*/ +void sqlite3Fts1HashInit(fts1Hash *pNew, int keyClass, int copyKey){ + assert( pNew!=0 ); + assert( keyClass>=FTS1_HASH_STRING && keyClass<=FTS1_HASH_BINARY ); + pNew->keyClass = keyClass; + pNew->copyKey = copyKey; + pNew->first = 0; + pNew->count = 0; + pNew->htsize = 0; + pNew->ht = 0; + pNew->xMalloc = malloc_and_zero; + pNew->xFree = free; +} + +/* Remove all entries from a hash table. Reclaim all memory. +** Call this routine to delete a hash table or to reset a hash table +** to the empty state. +*/ +void sqlite3Fts1HashClear(fts1Hash *pH){ + fts1HashElem *elem; /* For looping over all elements of the table */ + + assert( pH!=0 ); + elem = pH->first; + pH->first = 0; + if( pH->ht ) pH->xFree(pH->ht); + pH->ht = 0; + pH->htsize = 0; + while( elem ){ + fts1HashElem *next_elem = elem->next; + if( pH->copyKey && elem->pKey ){ + pH->xFree(elem->pKey); + } + pH->xFree(elem); + elem = next_elem; + } + pH->count = 0; +} + +/* +** Hash and comparison functions when the mode is FTS1_HASH_STRING +*/ +static int strHash(const void *pKey, int nKey){ + const char *z = (const char *)pKey; + int h = 0; + if( nKey<=0 ) nKey = (int) strlen(z); + while( nKey > 0 ){ + h = (h<<3) ^ h ^ *z++; + nKey--; + } + return h & 0x7fffffff; +} +static int strCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return strncmp((const char*)pKey1,(const char*)pKey2,n1); +} + +/* +** Hash and comparison functions when the mode is FTS1_HASH_BINARY +*/ +static int binHash(const void *pKey, int nKey){ + int h = 0; + const char *z = (const char *)pKey; + while( nKey-- > 0 ){ + h = (h<<3) ^ h ^ *(z++); + } + return h & 0x7fffffff; +} +static int binCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return memcmp(pKey1,pKey2,n1); +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** The C syntax in this function definition may be unfamilar to some +** programmers, so we provide the following additional explanation: +** +** The name of the function is "hashFunction". The function takes a +** single parameter "keyClass". The return value of hashFunction() +** is a pointer to another function. Specifically, the return value +** of hashFunction() is a pointer to a function that takes two parameters +** with types "const void*" and "int" and returns an "int". +*/ +static int (*hashFunction(int keyClass))(const void*,int){ + if( keyClass==FTS1_HASH_STRING ){ + return &strHash; + }else{ + assert( keyClass==FTS1_HASH_BINARY ); + return &binHash; + } +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** For help in interpreted the obscure C code in the function definition, +** see the header comment on the previous function. +*/ +static int (*compareFunction(int keyClass))(const void*,int,const void*,int){ + if( keyClass==FTS1_HASH_STRING ){ + return &strCompare; + }else{ + assert( keyClass==FTS1_HASH_BINARY ); + return &binCompare; + } +} + +/* Link an element into the hash table +*/ +static void insertElement( + fts1Hash *pH, /* The complete hash table */ + struct _fts1ht *pEntry, /* The entry into which pNew is inserted */ + fts1HashElem *pNew /* The element to be inserted */ +){ + fts1HashElem *pHead; /* First element already in pEntry */ + pHead = pEntry->chain; + if( pHead ){ + pNew->next = pHead; + pNew->prev = pHead->prev; + if( pHead->prev ){ pHead->prev->next = pNew; } + else { pH->first = pNew; } + pHead->prev = pNew; + }else{ + pNew->next = pH->first; + if( pH->first ){ pH->first->prev = pNew; } + pNew->prev = 0; + pH->first = pNew; + } + pEntry->count++; + pEntry->chain = pNew; +} + + +/* Resize the hash table so that it cantains "new_size" buckets. +** "new_size" must be a power of 2. The hash table might fail +** to resize if sqliteMalloc() fails. +*/ +static void rehash(fts1Hash *pH, int new_size){ + struct _fts1ht *new_ht; /* The new hash table */ + fts1HashElem *elem, *next_elem; /* For looping over existing elements */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( (new_size & (new_size-1))==0 ); + new_ht = (struct _fts1ht *)pH->xMalloc( new_size*sizeof(struct _fts1ht) ); + if( new_ht==0 ) return; + if( pH->ht ) pH->xFree(pH->ht); + pH->ht = new_ht; + pH->htsize = new_size; + xHash = hashFunction(pH->keyClass); + for(elem=pH->first, pH->first=0; elem; elem = next_elem){ + int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1); + next_elem = elem->next; + insertElement(pH, &new_ht[h], elem); + } +} + +/* This function (for internal use only) locates an element in an +** hash table that matches the given key. The hash for this key has +** already been computed and is passed as the 4th parameter. +*/ +static fts1HashElem *findElementGivenHash( + const fts1Hash *pH, /* The pH to be searched */ + const void *pKey, /* The key we are searching for */ + int nKey, + int h /* The hash for this key. */ +){ + fts1HashElem *elem; /* Used to loop thru the element list */ + int count; /* Number of elements left to test */ + int (*xCompare)(const void*,int,const void*,int); /* comparison function */ + + if( pH->ht ){ + struct _fts1ht *pEntry = &pH->ht[h]; + elem = pEntry->chain; + count = pEntry->count; + xCompare = compareFunction(pH->keyClass); + while( count-- && elem ){ + if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){ + return elem; + } + elem = elem->next; + } + } + return 0; +} + +/* Remove a single entry from the hash table given a pointer to that +** element and a hash on the element's key. +*/ +static void removeElementGivenHash( + fts1Hash *pH, /* The pH containing "elem" */ + fts1HashElem* elem, /* The element to be removed from the pH */ + int h /* Hash value for the element */ +){ + struct _fts1ht *pEntry; + if( elem->prev ){ + elem->prev->next = elem->next; + }else{ + pH->first = elem->next; + } + if( elem->next ){ + elem->next->prev = elem->prev; + } + pEntry = &pH->ht[h]; + if( pEntry->chain==elem ){ + pEntry->chain = elem->next; + } + pEntry->count--; + if( pEntry->count<=0 ){ + pEntry->chain = 0; + } + if( pH->copyKey && elem->pKey ){ + pH->xFree(elem->pKey); + } + pH->xFree( elem ); + pH->count--; + if( pH->count<=0 ){ + assert( pH->first==0 ); + assert( pH->count==0 ); + fts1HashClear(pH); + } +} + +/* Attempt to locate an element of the hash table pH with a key +** that matches pKey,nKey. Return the data for this element if it is +** found, or NULL if there is no match. +*/ +void *sqlite3Fts1HashFind(const fts1Hash *pH, const void *pKey, int nKey){ + int h; /* A hash on key */ + fts1HashElem *elem; /* The element that matches key */ + int (*xHash)(const void*,int); /* The hash function */ + + if( pH==0 || pH->ht==0 ) return 0; + xHash = hashFunction(pH->keyClass); + assert( xHash!=0 ); + h = (*xHash)(pKey,nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + elem = findElementGivenHash(pH,pKey,nKey, h & (pH->htsize-1)); + return elem ? elem->data : 0; +} + +/* Insert an element into the hash table pH. The key is pKey,nKey +** and the data is "data". +** +** If no element exists with a matching key, then a new +** element is created. A copy of the key is made if the copyKey +** flag is set. NULL is returned. +** +** If another element already exists with the same key, then the +** new data replaces the old data and the old data is returned. +** The key is not copied in this instance. If a malloc fails, then +** the new data is returned and the hash table is unchanged. +** +** If the "data" parameter to this function is NULL, then the +** element corresponding to "key" is removed from the hash table. +*/ +void *sqlite3Fts1HashInsert( + fts1Hash *pH, /* The hash table to insert into */ + const void *pKey, /* The key */ + int nKey, /* Number of bytes in the key */ + void *data /* The data */ +){ + int hraw; /* Raw hash value of the key */ + int h; /* the hash of the key modulo hash table size */ + fts1HashElem *elem; /* Used to loop thru the element list */ + fts1HashElem *new_elem; /* New element added to the pH */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( pH!=0 ); + xHash = hashFunction(pH->keyClass); + assert( xHash!=0 ); + hraw = (*xHash)(pKey, nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + elem = findElementGivenHash(pH,pKey,nKey,h); + if( elem ){ + void *old_data = elem->data; + if( data==0 ){ + removeElementGivenHash(pH,elem,h); + }else{ + elem->data = data; + } + return old_data; + } + if( data==0 ) return 0; + new_elem = (fts1HashElem*)pH->xMalloc( sizeof(fts1HashElem) ); + if( new_elem==0 ) return data; + if( pH->copyKey && pKey!=0 ){ + new_elem->pKey = pH->xMalloc( nKey ); + if( new_elem->pKey==0 ){ + pH->xFree(new_elem); + return data; + } + memcpy((void*)new_elem->pKey, pKey, nKey); + }else{ + new_elem->pKey = (void*)pKey; + } + new_elem->nKey = nKey; + pH->count++; + if( pH->htsize==0 ){ + rehash(pH,8); + if( pH->htsize==0 ){ + pH->count = 0; + pH->xFree(new_elem); + return data; + } + } + if( pH->count > pH->htsize ){ + rehash(pH,pH->htsize*2); + } + assert( pH->htsize>0 ); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + insertElement(pH, &pH->ht[h], new_elem); + new_elem->data = data; + return 0; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) */ diff --git a/third_party/sqlite/ext/fts1/fts1_hash.h b/third_party/sqlite/ext/fts1/fts1_hash.h new file mode 100755 index 0000000..c31c430 --- /dev/null +++ b/third_party/sqlite/ext/fts1/fts1_hash.h @@ -0,0 +1,112 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the header file for the generic hash-table implemenation +** used in SQLite. We've modified it slightly to serve as a standalone +** hash table implementation for the full-text indexing module. +** +*/ +#ifndef _FTS1_HASH_H_ +#define _FTS1_HASH_H_ + +/* Forward declarations of structures. */ +typedef struct fts1Hash fts1Hash; +typedef struct fts1HashElem fts1HashElem; + +/* A complete hash table is an instance of the following structure. +** The internals of this structure are intended to be opaque -- client +** code should not attempt to access or modify the fields of this structure +** directly. Change this structure only by using the routines below. +** However, many of the "procedures" and "functions" for modifying and +** accessing this structure are really macros, so we can't really make +** this structure opaque. +*/ +struct fts1Hash { + char keyClass; /* HASH_INT, _POINTER, _STRING, _BINARY */ + char copyKey; /* True if copy of key made on insert */ + int count; /* Number of entries in this table */ + fts1HashElem *first; /* The first element of the array */ + void *(*xMalloc)(int); /* malloc() function to use */ + void (*xFree)(void *); /* free() function to use */ + int htsize; /* Number of buckets in the hash table */ + struct _fts1ht { /* the hash table */ + int count; /* Number of entries with this hash */ + fts1HashElem *chain; /* Pointer to first entry with this hash */ + } *ht; +}; + +/* Each element in the hash table is an instance of the following +** structure. All elements are stored on a single doubly-linked list. +** +** Again, this structure is intended to be opaque, but it can't really +** be opaque because it is used by macros. +*/ +struct fts1HashElem { + fts1HashElem *next, *prev; /* Next and previous elements in the table */ + void *data; /* Data associated with this element */ + void *pKey; int nKey; /* Key associated with this element */ +}; + +/* +** There are 2 different modes of operation for a hash table: +** +** FTS1_HASH_STRING pKey points to a string that is nKey bytes long +** (including the null-terminator, if any). Case +** is respected in comparisons. +** +** FTS1_HASH_BINARY pKey points to binary data nKey bytes long. +** memcmp() is used to compare keys. +** +** A copy of the key is made if the copyKey parameter to fts1HashInit is 1. +*/ +#define FTS1_HASH_STRING 1 +#define FTS1_HASH_BINARY 2 + +/* +** Access routines. To delete, insert a NULL pointer. +*/ +void sqlite3Fts1HashInit(fts1Hash*, int keytype, int copyKey); +void *sqlite3Fts1HashInsert(fts1Hash*, const void *pKey, int nKey, void *pData); +void *sqlite3Fts1HashFind(const fts1Hash*, const void *pKey, int nKey); +void sqlite3Fts1HashClear(fts1Hash*); + +/* +** Shorthand for the functions above +*/ +#define fts1HashInit sqlite3Fts1HashInit +#define fts1HashInsert sqlite3Fts1HashInsert +#define fts1HashFind sqlite3Fts1HashFind +#define fts1HashClear sqlite3Fts1HashClear + +/* +** Macros for looping over all elements of a hash table. The idiom is +** like this: +** +** fts1Hash h; +** fts1HashElem *p; +** ... +** for(p=fts1HashFirst(&h); p; p=fts1HashNext(p)){ +** SomeStructure *pData = fts1HashData(p); +** // do something with pData +** } +*/ +#define fts1HashFirst(H) ((H)->first) +#define fts1HashNext(E) ((E)->next) +#define fts1HashData(E) ((E)->data) +#define fts1HashKey(E) ((E)->pKey) +#define fts1HashKeysize(E) ((E)->nKey) + +/* +** Number of entries in a hash table +*/ +#define fts1HashCount(H) ((H)->count) + +#endif /* _FTS1_HASH_H_ */ diff --git a/third_party/sqlite/ext/fts1/fts1_porter.c b/third_party/sqlite/ext/fts1/fts1_porter.c new file mode 100755 index 0000000..1d26236 --- /dev/null +++ b/third_party/sqlite/ext/fts1/fts1_porter.c @@ -0,0 +1,643 @@ +/* +** 2006 September 30 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Implementation of the full-text-search tokenizer that implements +** a Porter stemmer. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS1 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS1 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS1 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) + + +#include <assert.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <ctype.h> + +#include "fts1_tokenizer.h" + +/* +** Class derived from sqlite3_tokenizer +*/ +typedef struct porter_tokenizer { + sqlite3_tokenizer base; /* Base class */ +} porter_tokenizer; + +/* +** Class derived from sqlit3_tokenizer_cursor +*/ +typedef struct porter_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *zInput; /* input we are tokenizing */ + int nInput; /* size of the input */ + int iOffset; /* current position in zInput */ + int iToken; /* index of next token to be returned */ + char *zToken; /* storage for current token */ + int nAllocated; /* space allocated to zToken buffer */ +} porter_tokenizer_cursor; + + +/* Forward declaration */ +static const sqlite3_tokenizer_module porterTokenizerModule; + + +/* +** Create a new tokenizer instance. +*/ +static int porterCreate( + int argc, const char * const *argv, + sqlite3_tokenizer **ppTokenizer +){ + porter_tokenizer *t; + t = (porter_tokenizer *) calloc(sizeof(*t), 1); + if( t==NULL ) return SQLITE_NOMEM; + + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int porterDestroy(sqlite3_tokenizer *pTokenizer){ + free(pTokenizer); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is zInput[0..nInput-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int porterOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *zInput, int nInput, /* String to be tokenized */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + porter_tokenizer_cursor *c; + + c = (porter_tokenizer_cursor *) malloc(sizeof(*c)); + if( c==NULL ) return SQLITE_NOMEM; + + c->zInput = zInput; + if( zInput==0 ){ + c->nInput = 0; + }else if( nInput<0 ){ + c->nInput = (int)strlen(zInput); + }else{ + c->nInput = nInput; + } + c->iOffset = 0; /* start tokenizing at the beginning */ + c->iToken = 0; + c->zToken = NULL; /* no space allocated, yet. */ + c->nAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to +** porterOpen() above. +*/ +static int porterClose(sqlite3_tokenizer_cursor *pCursor){ + porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; + free(c->zToken); + free(c); + return SQLITE_OK; +} +/* +** Vowel or consonant +*/ +static const char cType[] = { + 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 2, 1 +}; + +/* +** isConsonant() and isVowel() determine if their first character in +** the string they point to is a consonant or a vowel, according +** to Porter ruls. +** +** A consonate is any letter other than 'a', 'e', 'i', 'o', or 'u'. +** 'Y' is a consonant unless it follows another consonant, +** in which case it is a vowel. +** +** In these routine, the letters are in reverse order. So the 'y' rule +** is that 'y' is a consonant unless it is followed by another +** consonent. +*/ +static int isVowel(const char*); +static int isConsonant(const char *z){ + int j; + char x = *z; + if( x==0 ) return 0; + assert( x>='a' && x<='z' ); + j = cType[x-'a']; + if( j<2 ) return j; + return z[1]==0 || isVowel(z + 1); +} +static int isVowel(const char *z){ + int j; + char x = *z; + if( x==0 ) return 0; + assert( x>='a' && x<='z' ); + j = cType[x-'a']; + if( j<2 ) return 1-j; + return isConsonant(z + 1); +} + +/* +** Let any sequence of one or more vowels be represented by V and let +** C be sequence of one or more consonants. Then every word can be +** represented as: +** +** [C] (VC){m} [V] +** +** In prose: A word is an optional consonant followed by zero or +** vowel-consonant pairs followed by an optional vowel. "m" is the +** number of vowel consonant pairs. This routine computes the value +** of m for the first i bytes of a word. +** +** Return true if the m-value for z is 1 or more. In other words, +** return true if z contains at least one vowel that is followed +** by a consonant. +** +** In this routine z[] is in reverse order. So we are really looking +** for an instance of of a consonant followed by a vowel. +*/ +static int m_gt_0(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* Like mgt0 above except we are looking for a value of m which is +** exactly 1 +*/ +static int m_eq_1(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + if( *z==0 ) return 0; + while( isVowel(z) ){ z++; } + if( *z==0 ) return 1; + while( isConsonant(z) ){ z++; } + return *z==0; +} + +/* Like mgt0 above except we are looking for a value of m>1 instead +** or m>0 +*/ +static int m_gt_1(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + if( *z==0 ) return 0; + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* +** Return TRUE if there is a vowel anywhere within z[0..n-1] +*/ +static int hasVowel(const char *z){ + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* +** Return TRUE if the word ends in a double consonant. +** +** The text is reversed here. So we are really looking at +** the first two characters of z[]. +*/ +static int doubleConsonant(const char *z){ + return isConsonant(z) && z[0]==z[1] && isConsonant(z+1); +} + +/* +** Return TRUE if the word ends with three letters which +** are consonant-vowel-consonent and where the final consonant +** is not 'w', 'x', or 'y'. +** +** The word is reversed here. So we are really checking the +** first three letters and the first one cannot be in [wxy]. +*/ +static int star_oh(const char *z){ + return + z[0]!=0 && isConsonant(z) && + z[0]!='w' && z[0]!='x' && z[0]!='y' && + z[1]!=0 && isVowel(z+1) && + z[2]!=0 && isConsonant(z+2); +} + +/* +** If the word ends with zFrom and xCond() is true for the stem +** of the word that preceeds the zFrom ending, then change the +** ending to zTo. +** +** The input word *pz and zFrom are both in reverse order. zTo +** is in normal order. +** +** Return TRUE if zFrom matches. Return FALSE if zFrom does not +** match. Not that TRUE is returned even if xCond() fails and +** no substitution occurs. +*/ +static int stem( + char **pz, /* The word being stemmed (Reversed) */ + const char *zFrom, /* If the ending matches this... (Reversed) */ + const char *zTo, /* ... change the ending to this (not reversed) */ + int (*xCond)(const char*) /* Condition that must be true */ +){ + char *z = *pz; + while( *zFrom && *zFrom==*z ){ z++; zFrom++; } + if( *zFrom!=0 ) return 0; + if( xCond && !xCond(z) ) return 1; + while( *zTo ){ + *(--z) = *(zTo++); + } + *pz = z; + return 1; +} + +/* +** This is the fallback stemmer used when the porter stemmer is +** inappropriate. The input word is copied into the output with +** US-ASCII case folding. If the input word is too long (more +** than 20 bytes if it contains no digits or more than 6 bytes if +** it contains digits) then word is truncated to 20 or 6 bytes +** by taking 10 or 3 bytes from the beginning and end. +*/ +static void copy_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){ + int i, mx, j; + int hasDigit = 0; + for(i=0; i<nIn; i++){ + int c = zIn[i]; + if( c>='A' && c<='Z' ){ + zOut[i] = c - 'A' + 'a'; + }else{ + if( c>='0' && c<='9' ) hasDigit = 1; + zOut[i] = c; + } + } + mx = hasDigit ? 3 : 10; + if( nIn>mx*2 ){ + for(j=mx, i=nIn-mx; i<nIn; i++, j++){ + zOut[j] = zOut[i]; + } + i = j; + } + zOut[i] = 0; + *pnOut = i; +} + + +/* +** Stem the input word zIn[0..nIn-1]. Store the output in zOut. +** zOut is at least big enough to hold nIn bytes. Write the actual +** size of the output word (exclusive of the '\0' terminator) into *pnOut. +** +** Any upper-case characters in the US-ASCII character set ([A-Z]) +** are converted to lower case. Upper-case UTF characters are +** unchanged. +** +** Words that are longer than about 20 bytes are stemmed by retaining +** a few bytes from the beginning and the end of the word. If the +** word contains digits, 3 bytes are taken from the beginning and +** 3 bytes from the end. For long words without digits, 10 bytes +** are taken from each end. US-ASCII case folding still applies. +** +** If the input word contains not digits but does characters not +** in [a-zA-Z] then no stemming is attempted and this routine just +** copies the input into the input into the output with US-ASCII +** case folding. +** +** Stemming never increases the length of the word. So there is +** no chance of overflowing the zOut buffer. +*/ +static void porter_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){ + int i, j, c; + char zReverse[28]; + char *z, *z2; + if( nIn<3 || nIn>=sizeof(zReverse)-7 ){ + /* The word is too big or too small for the porter stemmer. + ** Fallback to the copy stemmer */ + copy_stemmer(zIn, nIn, zOut, pnOut); + return; + } + for(i=0, j=sizeof(zReverse)-6; i<nIn; i++, j--){ + c = zIn[i]; + if( c>='A' && c<='Z' ){ + zReverse[j] = c + 'a' - 'A'; + }else if( c>='a' && c<='z' ){ + zReverse[j] = c; + }else{ + /* The use of a character not in [a-zA-Z] means that we fallback + ** to the copy stemmer */ + copy_stemmer(zIn, nIn, zOut, pnOut); + return; + } + } + memset(&zReverse[sizeof(zReverse)-5], 0, 5); + z = &zReverse[j+1]; + + + /* Step 1a */ + if( z[0]=='s' ){ + if( + !stem(&z, "sess", "ss", 0) && + !stem(&z, "sei", "i", 0) && + !stem(&z, "ss", "ss", 0) + ){ + z++; + } + } + + /* Step 1b */ + z2 = z; + if( stem(&z, "dee", "ee", m_gt_0) ){ + /* Do nothing. The work was all in the test */ + }else if( + (stem(&z, "gni", "", hasVowel) || stem(&z, "de", "", hasVowel)) + && z!=z2 + ){ + if( stem(&z, "ta", "ate", 0) || + stem(&z, "lb", "ble", 0) || + stem(&z, "zi", "ize", 0) ){ + /* Do nothing. The work was all in the test */ + }else if( doubleConsonant(z) && (*z!='l' && *z!='s' && *z!='z') ){ + z++; + }else if( m_eq_1(z) && star_oh(z) ){ + *(--z) = 'e'; + } + } + + /* Step 1c */ + if( z[0]=='y' && hasVowel(z+1) ){ + z[0] = 'i'; + } + + /* Step 2 */ + switch( z[1] ){ + case 'a': + stem(&z, "lanoita", "ate", m_gt_0) || + stem(&z, "lanoit", "tion", m_gt_0); + break; + case 'c': + stem(&z, "icne", "ence", m_gt_0) || + stem(&z, "icna", "ance", m_gt_0); + break; + case 'e': + stem(&z, "rezi", "ize", m_gt_0); + break; + case 'g': + stem(&z, "igol", "log", m_gt_0); + break; + case 'l': + stem(&z, "ilb", "ble", m_gt_0) || + stem(&z, "illa", "al", m_gt_0) || + stem(&z, "iltne", "ent", m_gt_0) || + stem(&z, "ile", "e", m_gt_0) || + stem(&z, "ilsuo", "ous", m_gt_0); + break; + case 'o': + stem(&z, "noitazi", "ize", m_gt_0) || + stem(&z, "noita", "ate", m_gt_0) || + stem(&z, "rota", "ate", m_gt_0); + break; + case 's': + stem(&z, "msila", "al", m_gt_0) || + stem(&z, "ssenevi", "ive", m_gt_0) || + stem(&z, "ssenluf", "ful", m_gt_0) || + stem(&z, "ssensuo", "ous", m_gt_0); + break; + case 't': + stem(&z, "itila", "al", m_gt_0) || + stem(&z, "itivi", "ive", m_gt_0) || + stem(&z, "itilib", "ble", m_gt_0); + break; + } + + /* Step 3 */ + switch( z[0] ){ + case 'e': + stem(&z, "etaci", "ic", m_gt_0) || + stem(&z, "evita", "", m_gt_0) || + stem(&z, "ezila", "al", m_gt_0); + break; + case 'i': + stem(&z, "itici", "ic", m_gt_0); + break; + case 'l': + stem(&z, "laci", "ic", m_gt_0) || + stem(&z, "luf", "", m_gt_0); + break; + case 's': + stem(&z, "ssen", "", m_gt_0); + break; + } + + /* Step 4 */ + switch( z[1] ){ + case 'a': + if( z[0]=='l' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'c': + if( z[0]=='e' && z[2]=='n' && (z[3]=='a' || z[3]=='e') && m_gt_1(z+4) ){ + z += 4; + } + break; + case 'e': + if( z[0]=='r' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'i': + if( z[0]=='c' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'l': + if( z[0]=='e' && z[2]=='b' && (z[3]=='a' || z[3]=='i') && m_gt_1(z+4) ){ + z += 4; + } + break; + case 'n': + if( z[0]=='t' ){ + if( z[2]=='a' ){ + if( m_gt_1(z+3) ){ + z += 3; + } + }else if( z[2]=='e' ){ + stem(&z, "tneme", "", m_gt_1) || + stem(&z, "tnem", "", m_gt_1) || + stem(&z, "tne", "", m_gt_1); + } + } + break; + case 'o': + if( z[0]=='u' ){ + if( m_gt_1(z+2) ){ + z += 2; + } + }else if( z[3]=='s' || z[3]=='t' ){ + stem(&z, "noi", "", m_gt_1); + } + break; + case 's': + if( z[0]=='m' && z[2]=='i' && m_gt_1(z+3) ){ + z += 3; + } + break; + case 't': + stem(&z, "eta", "", m_gt_1) || + stem(&z, "iti", "", m_gt_1); + break; + case 'u': + if( z[0]=='s' && z[2]=='o' && m_gt_1(z+3) ){ + z += 3; + } + break; + case 'v': + case 'z': + if( z[0]=='e' && z[2]=='i' && m_gt_1(z+3) ){ + z += 3; + } + break; + } + + /* Step 5a */ + if( z[0]=='e' ){ + if( m_gt_1(z+1) ){ + z++; + }else if( m_eq_1(z+1) && !star_oh(z+1) ){ + z++; + } + } + + /* Step 5b */ + if( m_gt_1(z) && z[0]=='l' && z[1]=='l' ){ + z++; + } + + /* z[] is now the stemmed word in reverse order. Flip it back + ** around into forward order and return. + */ + *pnOut = i = strlen(z); + zOut[i] = 0; + while( *z ){ + zOut[--i] = *(z++); + } +} + +/* +** Characters that can be part of a token. We assume any character +** whose value is greater than 0x80 (any UTF character) can be +** part of a token. In other words, delimiters all must have +** values of 0x7f or lower. +*/ +static const char isIdChar[] = { +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ +}; +#define idChar(C) (((ch=C)&0x80)!=0 || (ch>0x2f && isIdChar[ch-0x30])) +#define isDelim(C) (((ch=C)&0x80)==0 && (ch<0x30 || !isIdChar[ch-0x30])) + +/* +** Extract the next token from a tokenization cursor. The cursor must +** have been opened by a prior call to porterOpen(). +*/ +static int porterNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by porterOpen */ + const char **pzToken, /* OUT: *pzToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; + const char *z = c->zInput; + + while( c->iOffset<c->nInput ){ + int iStartOffset, ch; + + /* Scan past delimiter characters */ + while( c->iOffset<c->nInput && isDelim(z[c->iOffset]) ){ + c->iOffset++; + } + + /* Count non-delimiter characters. */ + iStartOffset = c->iOffset; + while( c->iOffset<c->nInput && !isDelim(z[c->iOffset]) ){ + c->iOffset++; + } + + if( c->iOffset>iStartOffset ){ + int n = c->iOffset-iStartOffset; + if( n>c->nAllocated ){ + c->nAllocated = n+20; + c->zToken = realloc(c->zToken, c->nAllocated); + if( c->zToken==NULL ) return SQLITE_NOMEM; + } + porter_stemmer(&z[iStartOffset], n, c->zToken, pnBytes); + *pzToken = c->zToken; + *piStartOffset = iStartOffset; + *piEndOffset = c->iOffset; + *piPosition = c->iToken++; + return SQLITE_OK; + } + } + return SQLITE_DONE; +} + +/* +** The set of routines that implement the porter-stemmer tokenizer +*/ +static const sqlite3_tokenizer_module porterTokenizerModule = { + 0, + porterCreate, + porterDestroy, + porterOpen, + porterClose, + porterNext, +}; + +/* +** Allocate a new porter tokenizer. Return a pointer to the new +** tokenizer in *ppModule +*/ +void sqlite3Fts1PorterTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &porterTokenizerModule; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) */ diff --git a/third_party/sqlite/ext/fts1/fts1_tokenizer.h b/third_party/sqlite/ext/fts1/fts1_tokenizer.h new file mode 100755 index 0000000..a48cb74 --- /dev/null +++ b/third_party/sqlite/ext/fts1/fts1_tokenizer.h @@ -0,0 +1,90 @@ +/* +** 2006 July 10 +** +** The author disclaims copyright to this source code. +** +************************************************************************* +** Defines the interface to tokenizers used by fulltext-search. There +** are three basic components: +** +** sqlite3_tokenizer_module is a singleton defining the tokenizer +** interface functions. This is essentially the class structure for +** tokenizers. +** +** sqlite3_tokenizer is used to define a particular tokenizer, perhaps +** including customization information defined at creation time. +** +** sqlite3_tokenizer_cursor is generated by a tokenizer to generate +** tokens from a particular input. +*/ +#ifndef _FTS1_TOKENIZER_H_ +#define _FTS1_TOKENIZER_H_ + +/* TODO(shess) Only used for SQLITE_OK and SQLITE_DONE at this time. +** If tokenizers are to be allowed to call sqlite3_*() functions, then +** we will need a way to register the API consistently. +*/ +#include "sqlite3.h" + +/* +** Structures used by the tokenizer interface. +*/ +typedef struct sqlite3_tokenizer sqlite3_tokenizer; +typedef struct sqlite3_tokenizer_cursor sqlite3_tokenizer_cursor; +typedef struct sqlite3_tokenizer_module sqlite3_tokenizer_module; + +struct sqlite3_tokenizer_module { + int iVersion; /* currently 0 */ + + /* + ** Create and destroy a tokenizer. argc/argv are passed down from + ** the fulltext virtual table creation to allow customization. + */ + int (*xCreate)(int argc, const char *const*argv, + sqlite3_tokenizer **ppTokenizer); + int (*xDestroy)(sqlite3_tokenizer *pTokenizer); + + /* + ** Tokenize a particular input. Call xOpen() to prepare to + ** tokenize, xNext() repeatedly until it returns SQLITE_DONE, then + ** xClose() to free any internal state. The pInput passed to + ** xOpen() must exist until the cursor is closed. The ppToken + ** result from xNext() is only valid until the next call to xNext() + ** or until xClose() is called. + */ + /* TODO(shess) current implementation requires pInput to be + ** nul-terminated. This should either be fixed, or pInput/nBytes + ** should be converted to zInput. + */ + int (*xOpen)(sqlite3_tokenizer *pTokenizer, + const char *pInput, int nBytes, + sqlite3_tokenizer_cursor **ppCursor); + int (*xClose)(sqlite3_tokenizer_cursor *pCursor); + int (*xNext)(sqlite3_tokenizer_cursor *pCursor, + const char **ppToken, int *pnBytes, + int *piStartOffset, int *piEndOffset, int *piPosition); +}; + +struct sqlite3_tokenizer { + const sqlite3_tokenizer_module *pModule; /* The module for this tokenizer */ + /* Tokenizer implementations will typically add additional fields */ +}; + +struct sqlite3_tokenizer_cursor { + sqlite3_tokenizer *pTokenizer; /* Tokenizer for this cursor. */ + /* Tokenizer implementations will typically add additional fields */ +}; + +/* +** Get the module for a tokenizer which generates tokens based on a +** set of non-token characters. The default is to break tokens at any +** non-alnum character, though the set of delimiters can also be +** specified by the first argv argument to xCreate(). +*/ +/* TODO(shess) This doesn't belong here. Need some sort of +** registration process. +*/ +void sqlite3Fts1SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); +void sqlite3Fts1PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule); + +#endif /* _FTS1_TOKENIZER_H_ */ diff --git a/third_party/sqlite/ext/fts1/fts1_tokenizer1.c b/third_party/sqlite/ext/fts1/fts1_tokenizer1.c new file mode 100755 index 0000000..f58fba8 --- /dev/null +++ b/third_party/sqlite/ext/fts1/fts1_tokenizer1.c @@ -0,0 +1,221 @@ +/* +** The author disclaims copyright to this source code. +** +************************************************************************* +** Implementation of the "simple" full-text-search tokenizer. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS1 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS1 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS1 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) + + +#include <assert.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <ctype.h> + +#include "fts1_tokenizer.h" + +typedef struct simple_tokenizer { + sqlite3_tokenizer base; + char delim[128]; /* flag ASCII delimiters */ +} simple_tokenizer; + +typedef struct simple_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *pInput; /* input we are tokenizing */ + int nBytes; /* size of the input */ + int iOffset; /* current position in pInput */ + int iToken; /* index of next token to be returned */ + char *pToken; /* storage for current token */ + int nTokenAllocated; /* space allocated to zToken buffer */ +} simple_tokenizer_cursor; + + +/* Forward declaration */ +static const sqlite3_tokenizer_module simpleTokenizerModule; + +static int isDelim(simple_tokenizer *t, unsigned char c){ + return c<0x80 && t->delim[c]; +} + +/* +** Create a new tokenizer instance. +*/ +static int simpleCreate( + int argc, const char * const *argv, + sqlite3_tokenizer **ppTokenizer +){ + simple_tokenizer *t; + + t = (simple_tokenizer *) calloc(sizeof(*t), 1); + if( t==NULL ) return SQLITE_NOMEM; + + /* TODO(shess) Delimiters need to remain the same from run to run, + ** else we need to reindex. One solution would be a meta-table to + ** track such information in the database, then we'd only want this + ** information on the initial create. + */ + if( argc>1 ){ + int i, n = strlen(argv[1]); + for(i=0; i<n; i++){ + unsigned char ch = argv[1][i]; + /* We explicitly don't support UTF-8 delimiters for now. */ + if( ch>=0x80 ){ + free(t); + return SQLITE_ERROR; + } + t->delim[ch] = 1; + } + } else { + /* Mark non-alphanumeric ASCII characters as delimiters */ + int i; + for(i=1; i<0x80; i++){ + t->delim[i] = !isalnum(i); + } + } + + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int simpleDestroy(sqlite3_tokenizer *pTokenizer){ + free(pTokenizer); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is pInput[0..nBytes-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int simpleOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *pInput, int nBytes, /* String to be tokenized */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + simple_tokenizer_cursor *c; + + c = (simple_tokenizer_cursor *) malloc(sizeof(*c)); + if( c==NULL ) return SQLITE_NOMEM; + + c->pInput = pInput; + if( pInput==0 ){ + c->nBytes = 0; + }else if( nBytes<0 ){ + c->nBytes = (int)strlen(pInput); + }else{ + c->nBytes = nBytes; + } + c->iOffset = 0; /* start tokenizing at the beginning */ + c->iToken = 0; + c->pToken = NULL; /* no space allocated, yet. */ + c->nTokenAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to +** simpleOpen() above. +*/ +static int simpleClose(sqlite3_tokenizer_cursor *pCursor){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + free(c->pToken); + free(c); + return SQLITE_OK; +} + +/* +** Extract the next token from a tokenization cursor. The cursor must +** have been opened by a prior call to simpleOpen(). +*/ +static int simpleNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ + const char **ppToken, /* OUT: *ppToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + simple_tokenizer *t = (simple_tokenizer *) pCursor->pTokenizer; + unsigned char *p = (unsigned char *)c->pInput; + + while( c->iOffset<c->nBytes ){ + int iStartOffset; + + /* Scan past delimiter characters */ + while( c->iOffset<c->nBytes && isDelim(t, p[c->iOffset]) ){ + c->iOffset++; + } + + /* Count non-delimiter characters. */ + iStartOffset = c->iOffset; + while( c->iOffset<c->nBytes && !isDelim(t, p[c->iOffset]) ){ + c->iOffset++; + } + + if( c->iOffset>iStartOffset ){ + int i, n = c->iOffset-iStartOffset; + if( n>c->nTokenAllocated ){ + c->nTokenAllocated = n+20; + c->pToken = realloc(c->pToken, c->nTokenAllocated); + if( c->pToken==NULL ) return SQLITE_NOMEM; + } + for(i=0; i<n; i++){ + /* TODO(shess) This needs expansion to handle UTF-8 + ** case-insensitivity. + */ + unsigned char ch = p[iStartOffset+i]; + c->pToken[i] = ch<0x80 ? tolower(ch) : ch; + } + *ppToken = c->pToken; + *pnBytes = n; + *piStartOffset = iStartOffset; + *piEndOffset = c->iOffset; + *piPosition = c->iToken++; + + return SQLITE_OK; + } + } + return SQLITE_DONE; +} + +/* +** The set of routines that implement the simple tokenizer +*/ +static const sqlite3_tokenizer_module simpleTokenizerModule = { + 0, + simpleCreate, + simpleDestroy, + simpleOpen, + simpleClose, + simpleNext, +}; + +/* +** Allocate a new simple tokenizer. Return a pointer to the new +** tokenizer in *ppModule +*/ +void sqlite3Fts1SimpleTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &simpleTokenizerModule; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) */ diff --git a/third_party/sqlite/ext/fts1/fulltext.c b/third_party/sqlite/ext/fts1/fulltext.c new file mode 100755 index 0000000..e6034ba --- /dev/null +++ b/third_party/sqlite/ext/fts1/fulltext.c @@ -0,0 +1,1496 @@ +/* The author disclaims copyright to this source code. + * + * This is an SQLite module implementing full-text search. + */ + +#include <assert.h> +#if !defined(__APPLE__) +#include <malloc.h> +#else +#include <stdlib.h> +#endif +#include <stdio.h> +#include <string.h> +#include <ctype.h> + +#include "fulltext.h" +#include "ft_hash.h" +#include "tokenizer.h" +#include "sqlite3.h" +#include "sqlite3ext.h" +SQLITE_EXTENSION_INIT1 + +/* utility functions */ + +/* We encode variable-length integers in little-endian order using seven bits + * per byte as follows: +** +** KEY: +** A = 0xxxxxxx 7 bits of data and one flag bit +** B = 1xxxxxxx 7 bits of data and one flag bit +** +** 7 bits - A +** 14 bits - BA +** 21 bits - BBA +** and so on. +*/ + +/* We may need up to VARINT_MAX bytes to store an encoded 64-bit integer. */ +#define VARINT_MAX 10 + +/* Write a 64-bit variable-length integer to memory starting at p[0]. + * The length of data written will be between 1 and VARINT_MAX bytes. + * The number of bytes written is returned. */ +static int putVarint(char *p, sqlite_int64 v){ + unsigned char *q = (unsigned char *) p; + sqlite_uint64 vu = v; + do{ + *q++ = (unsigned char) ((vu & 0x7f) | 0x80); + vu >>= 7; + }while( vu!=0 ); + q[-1] &= 0x7f; /* turn off high bit in final byte */ + assert( q - (unsigned char *)p <= VARINT_MAX ); + return (int) (q - (unsigned char *)p); +} + +/* Read a 64-bit variable-length integer from memory starting at p[0]. + * Return the number of bytes read, or 0 on error. + * The value is stored in *v. */ +static int getVarint(const char *p, sqlite_int64 *v){ + const unsigned char *q = (const unsigned char *) p; + sqlite_uint64 x = 0, y = 1; + while( (*q & 0x80) == 0x80 ){ + x += y * (*q++ & 0x7f); + y <<= 7; + if( q - (unsigned char *)p >= VARINT_MAX ){ /* bad data */ + assert( 0 ); + return 0; + } + } + x += y * (*q++); + *v = (sqlite_int64) x; + return (int) (q - (unsigned char *)p); +} + +static int getVarint32(const char *p, int *pi){ + sqlite_int64 i; + int ret = getVarint(p, &i); + *pi = (int) i; + assert( *pi==i ); + return ret; +} + +/*** Document lists *** + * + * A document list holds a sorted list of varint-encoded document IDs. + * + * A doclist with type DL_POSITIONS_OFFSETS is stored like this: + * + * array { + * varint docid; + * array { + * varint position; (delta from previous position plus 1, or 0 for end) + * varint startOffset; (delta from previous startOffset) + * varint endOffset; (delta from startOffset) + * } + * } + * + * Here, array { X } means zero or more occurrences of X, adjacent in memory. + * + * A doclist with type DL_POSITIONS is like the above, but holds only docids + * and positions without offset information. + * + * A doclist with type DL_DOCIDS is like the above, but holds only docids + * without positions or offset information. + * + * On disk, every document list has positions and offsets, so we don't bother + * to serialize a doclist's type. + * + * We don't yet delta-encode document IDs; doing so will probably be a + * modest win. + * + * NOTE(shess) I've thought of a slightly (1%) better offset encoding. + * After the first offset, estimate the next offset by using the + * current token position and the previous token position and offset, + * offset to handle some variance. So the estimate would be + * (iPosition*w->iStartOffset/w->iPosition-64), which is delta-encoded + * as normal. Offsets more than 64 chars from the estimate are + * encoded as the delta to the previous start offset + 128. An + * additional tiny increment can be gained by using the end offset of + * the previous token to make the estimate a tiny bit more precise. +*/ + +typedef enum DocListType { + DL_DOCIDS, /* docids only */ + DL_POSITIONS, /* docids + positions */ + DL_POSITIONS_OFFSETS /* docids + positions + offsets */ +} DocListType; + +typedef struct DocList { + char *pData; + int nData; + DocListType iType; + int iLastPos; /* the last position written */ + int iLastOffset; /* the last start offset written */ +} DocList; + +/* Initialize a new DocList to hold the given data. */ +static void docListInit(DocList *d, DocListType iType, + const char *pData, int nData){ + d->nData = nData; + if( nData>0 ){ + d->pData = malloc(nData); + memcpy(d->pData, pData, nData); + } else { + d->pData = NULL; + } + d->iType = iType; + d->iLastPos = 0; + d->iLastOffset = 0; +} + +/* Create a new dynamically-allocated DocList. */ +static DocList *docListNew(DocListType iType){ + DocList *d = (DocList *) malloc(sizeof(DocList)); + docListInit(d, iType, 0, 0); + return d; +} + +static void docListDestroy(DocList *d){ + free(d->pData); +#ifndef NDEBUG + memset(d, 0x55, sizeof(*d)); +#endif +} + +static void docListDelete(DocList *d){ + docListDestroy(d); + free(d); +} + +static char *docListEnd(DocList *d){ + return d->pData + d->nData; +} + +/* Append a varint to a DocList's data. */ +static void appendVarint(DocList *d, sqlite_int64 i){ + char c[VARINT_MAX]; + int n = putVarint(c, i); + d->pData = realloc(d->pData, d->nData + n); + memcpy(d->pData + d->nData, c, n); + d->nData += n; +} + +static void docListAddDocid(DocList *d, sqlite_int64 iDocid){ + appendVarint(d, iDocid); + d->iLastPos = 0; +} + +/* Add a position to the last position list in a doclist. */ +static void docListAddPos(DocList *d, int iPos){ + assert( d->iType>=DL_POSITIONS ); + appendVarint(d, iPos-d->iLastPos+1); + d->iLastPos = iPos; +} + +static void docListAddPosOffset(DocList *d, int iPos, + int iStartOffset, int iEndOffset){ + assert( d->iType==DL_POSITIONS_OFFSETS ); + docListAddPos(d, iPos); + appendVarint(d, iStartOffset-d->iLastOffset); + d->iLastOffset = iStartOffset; + appendVarint(d, iEndOffset-iStartOffset); +} + +/* Terminate the last position list in the given doclist. */ +static void docListAddEndPos(DocList *d){ + appendVarint(d, 0); +} + +typedef struct DocListReader { + DocList *pDoclist; + char *p; + int iLastPos; /* the last position read */ +} DocListReader; + +static void readerInit(DocListReader *r, DocList *pDoclist){ + r->pDoclist = pDoclist; + if( pDoclist!=NULL ){ + r->p = pDoclist->pData; + } + r->iLastPos = 0; +} + +static int readerAtEnd(DocListReader *pReader){ + return pReader->p >= docListEnd(pReader->pDoclist); +} + +/* Peek at the next docid without advancing the read pointer. */ +static sqlite_int64 peekDocid(DocListReader *pReader){ + sqlite_int64 ret; + assert( !readerAtEnd(pReader) ); + getVarint(pReader->p, &ret); + return ret; +} + +/* Read the next docid. */ +static sqlite_int64 readDocid(DocListReader *pReader){ + sqlite_int64 ret; + assert( !readerAtEnd(pReader) ); + pReader->p += getVarint(pReader->p, &ret); + pReader->iLastPos = 0; + return ret; +} + +/* Read the next position from a position list. + * Returns the position, or -1 at the end of the list. */ +static int readPosition(DocListReader *pReader){ + int i; + int iType = pReader->pDoclist->iType; + assert( iType>=DL_POSITIONS ); + assert( !readerAtEnd(pReader) ); + + pReader->p += getVarint32(pReader->p, &i); + if( i==0 ){ + pReader->iLastPos = -1; + return -1; + } + pReader->iLastPos += ((int) i)-1; + if( iType>=DL_POSITIONS_OFFSETS ){ + /* Skip over offsets, ignoring them for now. */ + int iStart, iEnd; + pReader->p += getVarint32(pReader->p, &iStart); + pReader->p += getVarint32(pReader->p, &iEnd); + } + return pReader->iLastPos; +} + +/* Skip past the end of a position list. */ +static void skipPositionList(DocListReader *pReader){ + while( readPosition(pReader)!=-1 ) + ; +} + +/* Skip over a docid, including its position list if the doclist has + * positions. */ +static void skipDocument(DocListReader *pReader){ + readDocid(pReader); + if( pReader->pDoclist->iType >= DL_POSITIONS ){ + skipPositionList(pReader); + } +} + +static sqlite_int64 firstDocid(DocList *d){ + DocListReader r; + readerInit(&r, d); + return readDocid(&r); +} + +/* Doclist multi-tool. Pass pUpdate==NULL to delete the indicated docid; + * otherwise pUpdate, which must contain only the single docid [iDocid], is + * inserted (if not present) or updated (if already present). */ +static int docListUpdate(DocList *d, sqlite_int64 iDocid, DocList *pUpdate){ + int modified = 0; + DocListReader reader; + char *p; + + if( pUpdate!=NULL ){ + assert( d->iType==pUpdate->iType); + assert( iDocid==firstDocid(pUpdate) ); + } + + readerInit(&reader, d); + while( !readerAtEnd(&reader) && peekDocid(&reader)<iDocid ){ + skipDocument(&reader); + } + + p = reader.p; + /* Delete if there is a matching element. */ + if( !readerAtEnd(&reader) && iDocid==peekDocid(&reader) ){ + skipDocument(&reader); + memmove(p, reader.p, docListEnd(d) - reader.p); + d->nData -= (reader.p - p); + modified = 1; + } + + /* Insert if indicated. */ + if( pUpdate!=NULL ){ + int iDoclist = p-d->pData; + docListAddEndPos(pUpdate); + + d->pData = realloc(d->pData, d->nData+pUpdate->nData); + p = d->pData + iDoclist; + + memmove(p+pUpdate->nData, p, docListEnd(d) - p); + memcpy(p, pUpdate->pData, pUpdate->nData); + d->nData += pUpdate->nData; + modified = 1; + } + + return modified; +} + +/* Split the second half of doclist d into a separate doclist d2. Returns 1 + * if successful, or 0 if d contains a single document and hence can't be + * split. */ +static int docListSplit(DocList *d, DocList *d2){ + const char *pSplitPoint = d->pData + d->nData / 2; + DocListReader reader; + + readerInit(&reader, d); + while( reader.p<pSplitPoint ){ + skipDocument(&reader); + } + if( readerAtEnd(&reader) ) return 0; + docListInit(d2, d->iType, reader.p, docListEnd(d) - reader.p); + d->nData = reader.p - d->pData; + d->pData = realloc(d->pData, d->nData); + return 1; +} + +/* A DocListMerge computes the AND of an in-memory DocList [in] and a chunked + * on-disk doclist, resulting in another in-memory DocList [out]. [in] + * and [out] may or may not store position information according to the + * caller's wishes. The on-disk doclist always comes with positions. + * + * The caller must read each chunk of the on-disk doclist in succession and + * pass it to mergeBlock(). + * + * If [in] has positions, then the merge output contains only documents with + * matching positions in the two input doclists. If [in] does not have + * positions, then the merge output contains all documents common to the two + * input doclists. + * + * If [in] is NULL, then the on-disk doclist is copied to [out] directly. + * + * A merge is performed using an integer [iOffset] provided by the caller. + * [iOffset] is subtracted from each position in the on-disk doclist for the + * purpose of position comparison; this is helpful in implementing phrase + * searches. + * + * A DocListMerge is not yet able to propagate offsets through query + * processing; we should add that capability soon. +*/ +typedef struct DocListMerge { + DocListReader in; + DocList *pOut; + int iOffset; +} DocListMerge; + +static void mergeInit(DocListMerge *m, + DocList *pIn, int iOffset, DocList *pOut){ + readerInit(&m->in, pIn); + m->pOut = pOut; + m->iOffset = iOffset; + + /* can't handle offsets yet */ + assert( pIn==NULL || pIn->iType <= DL_POSITIONS ); + assert( pOut->iType <= DL_POSITIONS ); +} + +/* A helper function for mergeBlock(), below. Merge the position lists + * pointed to by m->in and pBlockReader. + * If the merge matches, write [iDocid] to m->pOut; if m->pOut + * has positions then write all matching positions as well. */ +static void mergePosList(DocListMerge *m, sqlite_int64 iDocid, + DocListReader *pBlockReader){ + int block_pos = readPosition(pBlockReader); + int in_pos = readPosition(&m->in); + int match = 0; + while( block_pos!=-1 || in_pos!=-1 ){ + if( block_pos-m->iOffset==in_pos ){ + if( !match ){ + docListAddDocid(m->pOut, iDocid); + match = 1; + } + if( m->pOut->iType >= DL_POSITIONS ){ + docListAddPos(m->pOut, in_pos); + } + block_pos = readPosition(pBlockReader); + in_pos = readPosition(&m->in); + } else if( in_pos==-1 || (block_pos!=-1 && block_pos-m->iOffset<in_pos) ){ + block_pos = readPosition(pBlockReader); + } else { + in_pos = readPosition(&m->in); + } + } + if( m->pOut->iType >= DL_POSITIONS && match ){ + docListAddEndPos(m->pOut); + } +} + +/* Merge one block of an on-disk doclist into a DocListMerge. */ +static void mergeBlock(DocListMerge *m, DocList *pBlock){ + DocListReader blockReader; + assert( pBlock->iType >= DL_POSITIONS ); + readerInit(&blockReader, pBlock); + while( !readerAtEnd(&blockReader) ){ + sqlite_int64 iDocid = readDocid(&blockReader); + if( m->in.pDoclist!=NULL ){ + while( 1 ){ + if( readerAtEnd(&m->in) ) return; /* nothing more to merge */ + if( peekDocid(&m->in)>=iDocid ) break; + skipDocument(&m->in); + } + if( peekDocid(&m->in)>iDocid ){ /* [pIn] has no match with iDocid */ + skipPositionList(&blockReader); /* skip this docid in the block */ + continue; + } + readDocid(&m->in); + } + /* We have a document match. */ + if( m->in.pDoclist==NULL || m->in.pDoclist->iType < DL_POSITIONS ){ + /* We don't need to do a poslist merge. */ + docListAddDocid(m->pOut, iDocid); + if( m->pOut->iType >= DL_POSITIONS ){ + /* Copy all positions to the output doclist. */ + while( 1 ){ + int pos = readPosition(&blockReader); + if( pos==-1 ) break; + docListAddPos(m->pOut, pos); + } + docListAddEndPos(m->pOut); + } else skipPositionList(&blockReader); + continue; + } + mergePosList(m, iDocid, &blockReader); + } +} + +static char *string_dup_n(const char *s, int n){ + char *str = malloc(n + 1); + memcpy(str, s, n); + str[n] = '\0'; + return str; +} + +/* Duplicate a string; the caller must free() the returned string. + * (We don't use strdup() since it's not part of the standard C library and + * may not be available everywhere.) */ +static char *string_dup(const char *s){ + return string_dup_n(s, strlen(s)); +} + +/* Format a string, replacing each occurrence of the % character with + * zName. This may be more convenient than sqlite_mprintf() + * when one string is used repeatedly in a format string. + * The caller must free() the returned string. */ +static char *string_format(const char *zFormat, const char *zName){ + const char *p; + size_t len = 0; + size_t nName = strlen(zName); + char *result; + char *r; + + /* first compute length needed */ + for(p = zFormat ; *p ; ++p){ + len += (*p=='%' ? nName : 1); + } + len += 1; /* for null terminator */ + + r = result = malloc(len); + for(p = zFormat; *p; ++p){ + if( *p=='%' ){ + memcpy(r, zName, nName); + r += nName; + } else { + *r++ = *p; + } + } + *r++ = '\0'; + assert( r == result + len ); + return result; +} + +static int sql_exec(sqlite3 *db, const char *zName, const char *zFormat){ + char *zCommand = string_format(zFormat, zName); + int rc = sqlite3_exec(db, zCommand, NULL, 0, NULL); + free(zCommand); + return rc; +} + +static int sql_prepare(sqlite3 *db, const char *zName, sqlite3_stmt **ppStmt, + const char *zFormat){ + char *zCommand = string_format(zFormat, zName); + int rc = sqlite3_prepare(db, zCommand, -1, ppStmt, NULL); + free(zCommand); + return rc; +} + +/* end utility functions */ + +#define QUERY_GENERIC 0 +#define QUERY_FULLTEXT 1 + +#define CHUNK_MAX 1024 + +typedef enum fulltext_statement { + CONTENT_INSERT_STMT, + CONTENT_SELECT_STMT, + CONTENT_DELETE_STMT, + + TERM_SELECT_STMT, + TERM_CHUNK_SELECT_STMT, + TERM_INSERT_STMT, + TERM_UPDATE_STMT, + TERM_DELETE_STMT, + + MAX_STMT /* Always at end! */ +} fulltext_statement; + +/* These must exactly match the enum above. */ +/* TODO(adam): Is there some risk that a statement (in particular, +** pTermSelectStmt) will be used in two cursors at once, e.g. if a +** query joins a virtual table to itself? If so perhaps we should +** move some of these to the cursor object. +*/ +static const char *fulltext_zStatement[MAX_STMT] = { + /* CONTENT_INSERT */ "insert into %_content (rowid, content) values (?, ?)", + /* CONTENT_SELECT */ "select content from %_content where rowid = ?", + /* CONTENT_DELETE */ "delete from %_content where rowid = ?", + + /* TERM_SELECT */ + "select rowid, doclist from %_term where term = ? and first = ?", + /* TERM_CHUNK_SELECT */ + "select max(first) from %_term where term = ? and first <= ?", + /* TERM_INSERT */ + "insert into %_term (term, first, doclist) values (?, ?, ?)", + /* TERM_UPDATE */ "update %_term set doclist = ? where rowid = ?", + /* TERM_DELETE */ "delete from %_term where rowid = ?", +}; + +typedef struct fulltext_vtab { + sqlite3_vtab base; + sqlite3 *db; + const char *zName; /* virtual table name */ + sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ + + /* Precompiled statements which we keep as long as the table is + ** open. + */ + sqlite3_stmt *pFulltextStatements[MAX_STMT]; +} fulltext_vtab; + +typedef struct fulltext_cursor { + sqlite3_vtab_cursor base; + int iCursorType; /* QUERY_GENERIC or QUERY_FULLTEXT */ + + sqlite3_stmt *pStmt; + + int eof; + + /* The following is used only when iCursorType == QUERY_FULLTEXT. */ + DocListReader result; +} fulltext_cursor; + +static struct fulltext_vtab *cursor_vtab(fulltext_cursor *c){ + return (fulltext_vtab *) c->base.pVtab; +} + +static sqlite3_module fulltextModule; /* forward declaration */ + +/* Puts a freshly-prepared statement determined by iStmt in *ppStmt. +** If the indicated statement has never been prepared, it is prepared +** and cached, otherwise the cached version is reset. +*/ +static int sql_get_statement(fulltext_vtab *v, fulltext_statement iStmt, + sqlite3_stmt **ppStmt){ + assert( iStmt<MAX_STMT ); + if( v->pFulltextStatements[iStmt]==NULL ){ + int rc = sql_prepare(v->db, v->zName, &v->pFulltextStatements[iStmt], + fulltext_zStatement[iStmt]); + if( rc!=SQLITE_OK ) return rc; + } else { + int rc = sqlite3_reset(v->pFulltextStatements[iStmt]); + if( rc!=SQLITE_OK ) return rc; + } + + *ppStmt = v->pFulltextStatements[iStmt]; + return SQLITE_OK; +} + +/* Step the indicated statement, handling errors SQLITE_BUSY (by +** retrying) and SQLITE_SCHEMA (by re-preparing and transferring +** bindings to the new statement). +** TODO(adam): We should extend this function so that it can work with +** statements declared locally, not only globally cached statements. +*/ +static int sql_step_statement(fulltext_vtab *v, fulltext_statement iStmt, + sqlite3_stmt **ppStmt){ + int rc; + sqlite3_stmt *s = *ppStmt; + assert( iStmt<MAX_STMT ); + assert( s==v->pFulltextStatements[iStmt] ); + + while( (rc=sqlite3_step(s))!=SQLITE_DONE && rc!=SQLITE_ROW ){ + sqlite3_stmt *pNewStmt; + + if( rc==SQLITE_BUSY ) continue; + if( rc!=SQLITE_ERROR ) return rc; + + rc = sqlite3_reset(s); + if( rc!=SQLITE_SCHEMA ) return SQLITE_ERROR; + + v->pFulltextStatements[iStmt] = NULL; /* Still in s */ + rc = sql_get_statement(v, iStmt, &pNewStmt); + if( rc!=SQLITE_OK ) goto err; + *ppStmt = pNewStmt; + + rc = sqlite3_transfer_bindings(s, pNewStmt); + if( rc!=SQLITE_OK ) goto err; + + rc = sqlite3_finalize(s); + if( rc!=SQLITE_OK ) return rc; + s = pNewStmt; + } + return rc; + + err: + sqlite3_finalize(s); + return rc; +} + +/* Like sql_step_statement(), but convert SQLITE_DONE to SQLITE_OK. +** Useful for statements like UPDATE, where we expect no results. +*/ +static int sql_single_step_statement(fulltext_vtab *v, + fulltext_statement iStmt, + sqlite3_stmt **ppStmt){ + int rc = sql_step_statement(v, iStmt, ppStmt); + return (rc==SQLITE_DONE) ? SQLITE_OK : rc; +} + +/* insert into %_content (rowid, content) values ([rowid], [zContent]) */ +static int content_insert(fulltext_vtab *v, sqlite3_value *rowid, + const char *zContent, int nContent){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, CONTENT_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_value(s, 1, rowid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_text(s, 2, zContent, nContent, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, CONTENT_INSERT_STMT, &s); +} + +/* select content from %_content where rowid = [iRow] + * The caller must delete the returned string. */ +static int content_select(fulltext_vtab *v, sqlite_int64 iRow, + char **pzContent){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, CONTENT_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_step_statement(v, CONTENT_SELECT_STMT, &s); + if( rc!=SQLITE_ROW ) return rc; + + *pzContent = string_dup((const char *)sqlite3_column_text(s, 0)); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_OK; + + free(*pzContent); + return rc; +} + +/* delete from %_content where rowid = [iRow ] */ +static int content_delete(fulltext_vtab *v, sqlite_int64 iRow){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, CONTENT_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iRow); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, CONTENT_DELETE_STMT, &s); +} + +/* select rowid, doclist from %_term where term = [zTerm] and first = [iFirst] + * If found, returns SQLITE_OK; the caller must free the returned doclist. + * If no rows found, returns SQLITE_ERROR. */ +static int term_select(fulltext_vtab *v, const char *zTerm, int nTerm, + sqlite_int64 iFirst, + sqlite_int64 *rowid, + DocList *out){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_text(s, 1, zTerm, nTerm, SQLITE_TRANSIENT); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, iFirst); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_step_statement(v, TERM_SELECT_STMT, &s); + if( rc!=SQLITE_ROW ) return rc==SQLITE_DONE ? SQLITE_ERROR : rc; + + *rowid = sqlite3_column_int64(s, 0); + docListInit(out, DL_POSITIONS_OFFSETS, + sqlite3_column_blob(s, 1), sqlite3_column_bytes(s, 1)); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + return rc==SQLITE_DONE ? SQLITE_OK : rc; +} + +/* select max(first) from %_term where term = [zTerm] and first <= [iFirst] + * If found, returns SQLITE_ROW and result in *piResult; if the query returns + * NULL (meaning no row found) returns SQLITE_DONE. + */ +static int term_chunk_select(fulltext_vtab *v, const char *zTerm, int nTerm, + sqlite_int64 iFirst, sqlite_int64 *piResult){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_CHUNK_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_text(s, 1, zTerm, nTerm, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, iFirst); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_step_statement(v, TERM_CHUNK_SELECT_STMT, &s); + if( rc!=SQLITE_ROW ) return rc==SQLITE_DONE ? SQLITE_ERROR : rc; + + switch( sqlite3_column_type(s, 0) ){ + case SQLITE_NULL: + rc = SQLITE_DONE; + break; + case SQLITE_INTEGER: + *piResult = sqlite3_column_int64(s, 0); + break; + default: + return SQLITE_ERROR; + } + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + if( sqlite3_step(s) != SQLITE_DONE ) return SQLITE_ERROR; + return rc; +} + +/* insert into %_term (term, first, doclist) + values ([zTerm], [iFirst], [doclist]) */ +static int term_insert(fulltext_vtab *v, const char *zTerm, int nTerm, + sqlite_int64 iFirst, DocList *doclist){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_text(s, 1, zTerm, nTerm, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, iFirst); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 3, doclist->pData, doclist->nData, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, TERM_INSERT_STMT, &s); +} + +/* update %_term set doclist = [doclist] where rowid = [rowid] */ +static int term_update(fulltext_vtab *v, sqlite_int64 rowid, + DocList *doclist){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_UPDATE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 1, doclist->pData, doclist->nData, + SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, rowid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, TERM_UPDATE_STMT, &s); +} + +static int term_delete(fulltext_vtab *v, sqlite_int64 rowid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, TERM_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, rowid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step_statement(v, TERM_DELETE_STMT, &s); +} + +static void fulltext_vtab_destroy(fulltext_vtab *v){ + int iStmt; + + for( iStmt=0; iStmt<MAX_STMT; iStmt++ ){ + if( v->pFulltextStatements[iStmt]!=NULL ){ + sqlite3_finalize(v->pFulltextStatements[iStmt]); + v->pFulltextStatements[iStmt] = NULL; + } + } + + if( v->pTokenizer!=NULL ){ + v->pTokenizer->pModule->xDestroy(v->pTokenizer); + v->pTokenizer = NULL; + } + + free((void *) v->zName); + free(v); +} + +/* Current interface: +** argv[0] - module name +** argv[1] - database name +** argv[2] - table name +** argv[3] - tokenizer name (optional, a sensible default is provided) +** argv[4..] - passed to tokenizer (optional based on tokenizer) +**/ +static int fulltextConnect(sqlite3 *db, void *pAux, int argc, char **argv, + sqlite3_vtab **ppVTab){ + int rc; + fulltext_vtab *v; + sqlite3_tokenizer_module *m = NULL; + + assert( argc>=3 ); + v = (fulltext_vtab *) malloc(sizeof(fulltext_vtab)); + /* sqlite will initialize v->base */ + v->db = db; + v->zName = string_dup(argv[2]); + v->pTokenizer = NULL; + + if( argc==3 ){ + get_simple_tokenizer_module(&m); + } else { + /* TODO(shess) For now, add new tokenizers as else if clauses. */ + if( !strcmp(argv[3], "simple") ){ + get_simple_tokenizer_module(&m); + } else { + assert( "unrecognized tokenizer"==NULL ); + } + } + + /* TODO(shess) Since tokenization impacts the index, the parameters + ** to the tokenizer need to be identical when a persistent virtual + ** table is re-created. One solution would be a meta-table to track + ** such information in the database. Then we could verify that the + ** information is identical on subsequent creates. + */ + /* TODO(shess) Why isn't argv already (const char **)? */ + rc = m->xCreate(argc-3, (const char **) (argv+3), &v->pTokenizer); + if( rc!=SQLITE_OK ) return rc; + v->pTokenizer->pModule = m; + + /* TODO: verify the existence of backing tables foo_content, foo_term */ + + rc = sqlite3_declare_vtab(db, "create table x(content text)"); + if( rc!=SQLITE_OK ) return rc; + + memset(v->pFulltextStatements, 0, sizeof(v->pFulltextStatements)); + + *ppVTab = &v->base; + return SQLITE_OK; +} + +static int fulltextCreate(sqlite3 *db, void *pAux, int argc, char **argv, + sqlite3_vtab **ppVTab){ + int rc; + assert( argc>=3 ); + + /* The %_content table holds the text of each full-text item, with + ** the rowid used as the docid. + ** + ** The %_term table maps each term to a document list blob + ** containing elements sorted by ascending docid, each element + ** encoded as: + ** + ** docid varint-encoded + ** token count varint-encoded + ** "count" token elements (poslist): + ** position varint-encoded as delta from previous position + ** start offset varint-encoded as delta from previous start offset + ** end offset varint-encoded as delta from start offset + ** + ** Additionally, doclist blobs can be chunked into multiple rows, + ** using "first" to order the blobs. "first" is simply the first + ** docid in the blob. + */ + /* + ** NOTE(shess) That last sentence is incorrect in the face of + ** deletion, which can leave a doclist that doesn't contain the + ** first from that row. I _believe_ this does not matter to the + ** operation of the system, but it might be reasonable to update + ** appropriately in case this assumption becomes more important. + */ + rc = sql_exec(db, argv[2], + "create table %_content(content text);" + "create table %_term(term text, first integer, doclist blob);" + "create index %_index on %_term(term, first)"); + if( rc!=SQLITE_OK ) return rc; + + return fulltextConnect(db, pAux, argc, argv, ppVTab); +} + +/* Decide how to handle an SQL query. + * At the moment, MATCH queries can include implicit boolean ANDs; we + * haven't implemented phrase searches or OR yet. */ +static int fulltextBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ + int i; + + for(i=0; i<pInfo->nConstraint; ++i){ + const struct sqlite3_index_constraint *pConstraint; + pConstraint = &pInfo->aConstraint[i]; + if( pConstraint->iColumn==0 && + pConstraint->op==SQLITE_INDEX_CONSTRAINT_MATCH && + pConstraint->usable ){ /* a full-text search */ + pInfo->aConstraintUsage[i].argvIndex = 1; + pInfo->aConstraintUsage[i].omit = 1; + pInfo->idxNum = QUERY_FULLTEXT; + pInfo->estimatedCost = 1.0; /* an arbitrary value for now */ + return SQLITE_OK; + } + } + pInfo->idxNum = QUERY_GENERIC; + return SQLITE_OK; +} + +static int fulltextDisconnect(sqlite3_vtab *pVTab){ + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextDestroy(sqlite3_vtab *pVTab){ + fulltext_vtab *v = (fulltext_vtab *)pVTab; + + int rc = sql_exec(v->db, v->zName, + "drop table %_content; drop table %_term"); + if( rc!=SQLITE_OK ) return rc; + + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + fulltext_cursor *c; + + c = (fulltext_cursor *) calloc(sizeof(fulltext_cursor), 1); + /* sqlite will initialize c->base */ + *ppCursor = &c->base; + + return SQLITE_OK; +} + +static int fulltextClose(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + sqlite3_finalize(c->pStmt); + if( c->result.pDoclist!=NULL ){ + docListDelete(c->result.pDoclist); + } + free(c); + return SQLITE_OK; +} + +static int fulltextNext(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + sqlite_int64 iDocid; + int rc; + + switch( c->iCursorType ){ + case QUERY_GENERIC: + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + switch( rc ){ + case SQLITE_ROW: + c->eof = 0; + return SQLITE_OK; + case SQLITE_DONE: + c->eof = 1; + return SQLITE_OK; + default: + c->eof = 1; + return rc; + } + case QUERY_FULLTEXT: + rc = sqlite3_reset(c->pStmt); + if( rc!=SQLITE_OK ) return rc; + + if( readerAtEnd(&c->result)){ + c->eof = 1; + return SQLITE_OK; + } + iDocid = readDocid(&c->result); + rc = sqlite3_bind_int64(c->pStmt, 1, iDocid); + if( rc!=SQLITE_OK ) return rc; + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + if( rc==SQLITE_ROW ){ /* the case we expect */ + c->eof = 0; + return SQLITE_OK; + } + /* an error occurred; abort */ + return rc==SQLITE_DONE ? SQLITE_ERROR : rc; + default: + assert( 0 ); + return SQLITE_ERROR; /* not reached */ + } +} + +static int term_select_doclist(fulltext_vtab *v, const char *pTerm, int nTerm, + sqlite3_stmt **ppStmt){ + int rc; + if( *ppStmt ){ + rc = sqlite3_reset(*ppStmt); + } else { + rc = sql_prepare(v->db, v->zName, ppStmt, + "select doclist from %_term where term = ? order by first"); + } + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_text(*ppStmt, 1, pTerm, nTerm, SQLITE_TRANSIENT); + if( rc!=SQLITE_OK ) return rc; + + return sqlite3_step(*ppStmt); /* TODO(adamd): handle schema error */ +} + +/* Read the posting list for [zTerm]; AND it with the doclist [in] to + * produce the doclist [out], using the given offset [iOffset] for phrase + * matching. + * (*pSelect) is used to hold an SQLite statement used inside this function; + * the caller should initialize *pSelect to NULL before the first call. + */ +static int query_merge(fulltext_vtab *v, sqlite3_stmt **pSelect, + const char *zTerm, + DocList *pIn, int iOffset, DocList *out){ + int rc; + DocListMerge merge; + + if( pIn!=NULL && !pIn->nData ){ + /* If [pIn] is already empty, there's no point in reading the + * posting list to AND it in; return immediately. */ + return SQLITE_OK; + } + + rc = term_select_doclist(v, zTerm, -1, pSelect); + if( rc!=SQLITE_ROW && rc!=SQLITE_DONE ) return rc; + + mergeInit(&merge, pIn, iOffset, out); + while( rc==SQLITE_ROW ){ + DocList block; + docListInit(&block, DL_POSITIONS_OFFSETS, + sqlite3_column_blob(*pSelect, 0), + sqlite3_column_bytes(*pSelect, 0)); + mergeBlock(&merge, &block); + docListDestroy(&block); + + rc = sqlite3_step(*pSelect); + if( rc!=SQLITE_ROW && rc!=SQLITE_DONE ){ + return rc; + } + } + + return SQLITE_OK; +} + +typedef struct QueryTerm { + int is_phrase; /* true if this term begins a new phrase */ + const char *zTerm; +} QueryTerm; + +/* A parsed query. + * + * As an example, parsing the query ["four score" years "new nation"] will + * yield a Query with 5 terms: + * "four", is_phrase = 1 + * "score", is_phrase = 0 + * "years", is_phrase = 1 + * "new", is_phrase = 1 + * "nation", is_phrase = 0 + */ +typedef struct Query { + int nTerms; + QueryTerm *pTerm; +} Query; + +static void query_add(Query *q, int is_phrase, const char *zTerm){ + QueryTerm *t; + ++q->nTerms; + q->pTerm = realloc(q->pTerm, q->nTerms * sizeof(q->pTerm[0])); + t = &q->pTerm[q->nTerms - 1]; + t->is_phrase = is_phrase; + t->zTerm = zTerm; +} + +static void query_free(Query *q){ + int i; + for(i = 0; i < q->nTerms; ++i){ + free((void *) q->pTerm[i].zTerm); + } + free(q->pTerm); +} + +static int tokenize_segment(sqlite3_tokenizer *pTokenizer, + const char *zQuery, int in_phrase, + Query *pQuery){ + sqlite3_tokenizer_module *pModule = pTokenizer->pModule; + sqlite3_tokenizer_cursor *pCursor; + int is_first = 1; + + int rc = pModule->xOpen(pTokenizer, zQuery, -1, &pCursor); + if( rc!=SQLITE_OK ) return rc; + pCursor->pTokenizer = pTokenizer; + + while( 1 ){ + const char *zToken; + int nToken, iStartOffset, iEndOffset, dummy_pos; + + rc = pModule->xNext(pCursor, + &zToken, &nToken, + &iStartOffset, &iEndOffset, + &dummy_pos); + if( rc!=SQLITE_OK ) break; + query_add(pQuery, !in_phrase || is_first, string_dup_n(zToken, nToken)); + is_first = 0; + } + + return pModule->xClose(pCursor); +} + +/* Parse a query string, yielding a Query object. */ +static int parse_query(fulltext_vtab *v, const char *zQuery, Query *pQuery){ + char *zQuery1 = string_dup(zQuery); + int in_phrase = 0; + char *s = zQuery1; + pQuery->nTerms = 0; + pQuery->pTerm = NULL; + + while( *s ){ + char *t = s; + while( *t ){ + if( *t=='"' ){ + *t++ = '\0'; + break; + } + ++t; + } + if( *s ){ + tokenize_segment(v->pTokenizer, s, in_phrase, pQuery); + } + s = t; + in_phrase = !in_phrase; + } + + free(zQuery1); + return SQLITE_OK; +} + +/* Perform a full-text query; return a list of documents in [pResult]. */ +static int fulltext_query(fulltext_vtab *v, const char *zQuery, + DocList **pResult){ + Query q; + int phrase_start = -1; + int i; + sqlite3_stmt *pSelect = NULL; + DocList *d = NULL; + + int rc = parse_query(v, zQuery, &q); + if( rc!=SQLITE_OK ) return rc; + + /* Merge terms. */ + for(i = 0 ; i < q.nTerms ; ++i){ + /* In each merge step, we need to generate positions whenever we're + * processing a phrase which hasn't ended yet. */ + int need_positions = i<q.nTerms-1 && !q.pTerm[i+1].is_phrase; + DocList *next = docListNew(need_positions ? DL_POSITIONS : DL_DOCIDS); + if( q.pTerm[i].is_phrase ){ + phrase_start = i; + } + rc = query_merge(v, &pSelect, q.pTerm[i].zTerm, d, i - phrase_start, next); + if( rc!=SQLITE_OK ) break; + if( d!=NULL ){ + docListDelete(d); + } + d = next; + } + + sqlite3_finalize(pSelect); + query_free(&q); + *pResult = d; + return rc; +} + +static int fulltextFilter(sqlite3_vtab_cursor *pCursor, + int idxNum, const char *idxStr, + int argc, sqlite3_value **argv){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + fulltext_vtab *v = cursor_vtab(c); + int rc; + const char *zStatement; + + c->iCursorType = idxNum; + switch( idxNum ){ + case QUERY_GENERIC: + zStatement = "select rowid, content from %_content"; + break; + + case QUERY_FULLTEXT: /* full-text search */ + { + const char *zQuery = (const char *)sqlite3_value_text(argv[0]); + DocList *pResult; + assert( argc==1 ); + rc = fulltext_query(v, zQuery, &pResult); + if( rc!=SQLITE_OK ) return rc; + readerInit(&c->result, pResult); + zStatement = "select rowid, content from %_content where rowid = ?"; + break; + } + + default: + assert( 0 ); + } + + rc = sql_prepare(v->db, v->zName, &c->pStmt, zStatement); + if( rc!=SQLITE_OK ) return rc; + + return fulltextNext(pCursor); +} + +static int fulltextEof(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + return c->eof; +} + +static int fulltextColumn(sqlite3_vtab_cursor *pCursor, + sqlite3_context *pContext, int idxCol){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + const char *s; + + assert( idxCol==0 ); + s = (const char *) sqlite3_column_text(c->pStmt, 1); + sqlite3_result_text(pContext, s, -1, SQLITE_TRANSIENT); + + return SQLITE_OK; +} + +static int fulltextRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + + *pRowid = sqlite3_column_int64(c->pStmt, 0); + return SQLITE_OK; +} + +/* Build a hash table containing all terms in zText. */ +static int build_terms(Hash *terms, sqlite3_tokenizer *pTokenizer, + const char *zText, sqlite_int64 iDocid){ + sqlite3_tokenizer_cursor *pCursor; + const char *pToken; + int nTokenBytes; + int iStartOffset, iEndOffset, iPosition; + + int rc = pTokenizer->pModule->xOpen(pTokenizer, zText, -1, &pCursor); + if( rc!=SQLITE_OK ) return rc; + + pCursor->pTokenizer = pTokenizer; + HashInit(terms, HASH_STRING, 1); + while( SQLITE_OK==pTokenizer->pModule->xNext(pCursor, + &pToken, &nTokenBytes, + &iStartOffset, &iEndOffset, + &iPosition) ){ + DocList *p; + + /* Positions can't be negative; we use -1 as a terminator internally. */ + if( iPosition<0 ) { + rc = SQLITE_ERROR; + goto err; + } + + p = HashFind(terms, pToken, nTokenBytes); + if( p==NULL ){ + p = docListNew(DL_POSITIONS_OFFSETS); + docListAddDocid(p, iDocid); + HashInsert(terms, pToken, nTokenBytes, p); + } + docListAddPosOffset(p, iPosition, iStartOffset, iEndOffset); + } + +err: + /* TODO(shess) Check return? Should this be able to cause errors at + ** this point? Actually, same question about sqlite3_finalize(), + ** though one could argue that failure there means that the data is + ** not durable. *ponder* + */ + pTokenizer->pModule->xClose(pCursor); + return rc; +} +/* Update the %_terms table to map the term [zTerm] to the given rowid. */ +static int index_insert_term(fulltext_vtab *v, const char *zTerm, int nTerm, + sqlite_int64 iDocid, DocList *p){ + sqlite_int64 iFirst; + sqlite_int64 iIndexRow; + DocList doclist; + + int rc = term_chunk_select(v, zTerm, nTerm, iDocid, &iFirst); + if( rc==SQLITE_DONE ){ + docListInit(&doclist, DL_POSITIONS_OFFSETS, 0, 0); + if( docListUpdate(&doclist, iDocid, p) ){ + rc = term_insert(v, zTerm, nTerm, iDocid, &doclist); + docListDestroy(&doclist); + return rc; + } + return SQLITE_OK; + } + if( rc!=SQLITE_ROW ) return SQLITE_ERROR; + + /* This word is in the index; add this document ID to its blob. */ + + rc = term_select(v, zTerm, nTerm, iFirst, &iIndexRow, &doclist); + if( rc!=SQLITE_OK ) return rc; + + if( docListUpdate(&doclist, iDocid, p) ){ + /* If the blob is too big, split it in half. */ + if( doclist.nData>CHUNK_MAX ){ + DocList half; + if( docListSplit(&doclist, &half) ){ + rc = term_insert(v, zTerm, nTerm, firstDocid(&half), &half); + docListDestroy(&half); + if( rc!=SQLITE_OK ) goto err; + } + } + rc = term_update(v, iIndexRow, &doclist); + } + +err: + docListDestroy(&doclist); + return rc; +} + +/* Insert a row into the full-text index; set *piRowid to be the ID of the + * new row. */ +static int index_insert(fulltext_vtab *v, + sqlite3_value *pRequestRowid, const char *zText, + sqlite_int64 *piRowid){ + Hash terms; /* maps term string -> PosList */ + HashElem *e; + + int rc = content_insert(v, pRequestRowid, zText, -1); + if( rc!=SQLITE_OK ) return rc; + *piRowid = sqlite3_last_insert_rowid(v->db); + + if( !zText ) return SQLITE_OK; /* nothing to index */ + + rc = build_terms(&terms, v->pTokenizer, zText, *piRowid); + if( rc!=SQLITE_OK ) return rc; + + for(e=HashFirst(&terms); e; e=HashNext(e)){ + DocList *p = HashData(e); + rc = index_insert_term(v, HashKey(e), HashKeysize(e), *piRowid, p); + if( rc!=SQLITE_OK ) break; + } + + for(e=HashFirst(&terms); e; e=HashNext(e)){ + DocList *p = HashData(e); + docListDelete(p); + } + HashClear(&terms); + return rc; +} + +static int index_delete_term(fulltext_vtab *v, const char *zTerm, int nTerm, + sqlite_int64 iDocid){ + sqlite_int64 iFirst; + sqlite_int64 iIndexRow; + DocList doclist; + + int rc = term_chunk_select(v, zTerm, nTerm, iDocid, &iFirst); + if( rc!=SQLITE_ROW ) return SQLITE_ERROR; + + rc = term_select(v, zTerm, nTerm, iFirst, &iIndexRow, &doclist); + if( rc!=SQLITE_OK ) return rc; + + if( docListUpdate(&doclist, iDocid, NULL) ){ + if( doclist.nData>0 ){ + rc = term_update(v, iIndexRow, &doclist); + } else { /* empty posting list */ + rc = term_delete(v, iIndexRow); + } + } + docListDestroy(&doclist); + return rc; +} + +/* Delete a row from the full-text index. */ +static int index_delete(fulltext_vtab *v, sqlite_int64 iRow){ + char *zText; + Hash terms; + HashElem *e; + + int rc = content_select(v, iRow, &zText); + if( rc!=SQLITE_OK ) return rc; + + rc = build_terms(&terms, v->pTokenizer, zText, iRow); + free(zText); + if( rc!=SQLITE_OK ) return rc; + + for(e=HashFirst(&terms); e; e=HashNext(e)){ + rc = index_delete_term(v, HashKey(e), HashKeysize(e), iRow); + if( rc!=SQLITE_OK ) break; + } + for(e=HashFirst(&terms); e; e=HashNext(e)){ + DocList *p = HashData(e); + docListDelete(p); + } + HashClear(&terms); + + return content_delete(v, iRow); +} + +static int fulltextUpdate(sqlite3_vtab *pVtab, int nArg, sqlite3_value **ppArg, + sqlite_int64 *pRowid){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + + if( nArg<2 ){ + return index_delete(v, sqlite3_value_int64(ppArg[0])); + } + + if( sqlite3_value_type(ppArg[0]) != SQLITE_NULL ){ + return SQLITE_ERROR; /* an update; not yet supported */ + } + + assert( nArg==3 ); /* ppArg[1] = rowid, ppArg[2] = content */ + return index_insert(v, ppArg[1], + (const char *)sqlite3_value_text(ppArg[2]), pRowid); +} + +static sqlite3_module fulltextModule = { + 0, + fulltextCreate, + fulltextConnect, + fulltextBestIndex, + fulltextDisconnect, + fulltextDestroy, + fulltextOpen, + fulltextClose, + fulltextFilter, + fulltextNext, + fulltextEof, + fulltextColumn, + fulltextRowid, + fulltextUpdate +}; + +int fulltext_init(sqlite3 *db){ + return sqlite3_create_module(db, "fulltext", &fulltextModule, 0); +} + +#if !SQLITE_CORE +int sqlite3_extension_init(sqlite3 *db, char **pzErrMsg, + const sqlite3_api_routines *pApi){ + SQLITE_EXTENSION_INIT2(pApi) + return fulltext_init(db); +} +#endif diff --git a/third_party/sqlite/ext/fts1/fulltext.h b/third_party/sqlite/ext/fts1/fulltext.h new file mode 100755 index 0000000..477dcab --- /dev/null +++ b/third_party/sqlite/ext/fts1/fulltext.h @@ -0,0 +1,11 @@ +#include "sqlite3.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +int fulltext_init(sqlite3 *db); + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ diff --git a/third_party/sqlite/ext/fts1/simple_tokenizer.c b/third_party/sqlite/ext/fts1/simple_tokenizer.c new file mode 100755 index 0000000..d00a770 --- /dev/null +++ b/third_party/sqlite/ext/fts1/simple_tokenizer.c @@ -0,0 +1,174 @@ +/* +** The author disclaims copyright to this source code. +** +************************************************************************* +** Implementation of the "simple" full-text-search tokenizer. +*/ + +#include <assert.h> +#if !defined(__APPLE__) +#include <malloc.h> +#else +#include <stdlib.h> +#endif +#include <stdio.h> +#include <string.h> +#include <ctype.h> + +#include "tokenizer.h" + +/* Duplicate a string; the caller must free() the returned string. + * (We don't use strdup() since it's not part of the standard C library and + * may not be available everywhere.) */ +/* TODO(shess) Copied from fulltext.c, consider util.c for such +** things. */ +static char *string_dup(const char *s){ + char *str = malloc(strlen(s) + 1); + strcpy(str, s); + return str; +} + +typedef struct simple_tokenizer { + sqlite3_tokenizer base; + const char *zDelim; /* token delimiters */ +} simple_tokenizer; + +typedef struct simple_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *pInput; /* input we are tokenizing */ + int nBytes; /* size of the input */ + const char *pCurrent; /* current position in pInput */ + int iToken; /* index of next token to be returned */ + char *zToken; /* storage for current token */ + int nTokenBytes; /* actual size of current token */ + int nTokenAllocated; /* space allocated to zToken buffer */ +} simple_tokenizer_cursor; + +static sqlite3_tokenizer_module simpleTokenizerModule;/* forward declaration */ + +static int simpleCreate( + int argc, const char **argv, + sqlite3_tokenizer **ppTokenizer +){ + simple_tokenizer *t; + + t = (simple_tokenizer *) malloc(sizeof(simple_tokenizer)); + /* TODO(shess) Delimiters need to remain the same from run to run, + ** else we need to reindex. One solution would be a meta-table to + ** track such information in the database, then we'd only want this + ** information on the initial create. + */ + if( argc>1 ){ + t->zDelim = string_dup(argv[1]); + } else { + /* Build a string excluding alphanumeric ASCII characters */ + char zDelim[0x80]; /* nul-terminated, so nul not a member */ + int i, j; + for(i=1, j=0; i<0x80; i++){ + if( !isalnum(i) ){ + zDelim[j++] = i; + } + } + zDelim[j++] = '\0'; + assert( j<=sizeof(zDelim) ); + t->zDelim = string_dup(zDelim); + } + + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +static int simpleDestroy(sqlite3_tokenizer *pTokenizer){ + simple_tokenizer *t = (simple_tokenizer *) pTokenizer; + + free((void *) t->zDelim); + free(t); + + return SQLITE_OK; +} + +static int simpleOpen( + sqlite3_tokenizer *pTokenizer, + const char *pInput, int nBytes, + sqlite3_tokenizer_cursor **ppCursor +){ + simple_tokenizer_cursor *c; + + c = (simple_tokenizer_cursor *) malloc(sizeof(simple_tokenizer_cursor)); + c->pInput = pInput; + c->nBytes = nBytes<0 ? (int) strlen(pInput) : nBytes; + c->pCurrent = c->pInput; /* start tokenizing at the beginning */ + c->iToken = 0; + c->zToken = NULL; /* no space allocated, yet. */ + c->nTokenBytes = 0; + c->nTokenAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +static int simpleClose(sqlite3_tokenizer_cursor *pCursor){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + + if( NULL!=c->zToken ){ + free(c->zToken); + } + free(c); + + return SQLITE_OK; +} + +static int simpleNext( + sqlite3_tokenizer_cursor *pCursor, + const char **ppToken, int *pnBytes, + int *piStartOffset, int *piEndOffset, int *piPosition +){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + simple_tokenizer *t = (simple_tokenizer *) pCursor->pTokenizer; + int ii; + + while( c->pCurrent-c->pInput<c->nBytes ){ + int n = (int) strcspn(c->pCurrent, t->zDelim); + if( n>0 ){ + if( n+1>c->nTokenAllocated ){ + c->zToken = realloc(c->zToken, n+1); + } + for(ii=0; ii<n; ii++){ + /* TODO(shess) This needs expansion to handle UTF-8 + ** case-insensitivity. + */ + char ch = c->pCurrent[ii]; + c->zToken[ii] = (unsigned char)ch<0x80 ? tolower(ch) : ch; + } + c->zToken[n] = '\0'; + *ppToken = c->zToken; + *pnBytes = n; + *piStartOffset = (int) (c->pCurrent-c->pInput); + *piEndOffset = *piStartOffset+n; + *piPosition = c->iToken++; + c->pCurrent += n + 1; + + return SQLITE_OK; + } + c->pCurrent += n + 1; + /* TODO(shess) could strspn() to skip delimiters en masse. Needs + ** to happen in two places, though, which is annoying. + */ + } + return SQLITE_DONE; +} + +static sqlite3_tokenizer_module simpleTokenizerModule = { + 0, + simpleCreate, + simpleDestroy, + simpleOpen, + simpleClose, + simpleNext, +}; + +void get_simple_tokenizer_module( + sqlite3_tokenizer_module **ppModule +){ + *ppModule = &simpleTokenizerModule; +} diff --git a/third_party/sqlite/ext/fts1/tokenizer.h b/third_party/sqlite/ext/fts1/tokenizer.h new file mode 100755 index 0000000..1d7bd1f --- /dev/null +++ b/third_party/sqlite/ext/fts1/tokenizer.h @@ -0,0 +1,89 @@ +/* +** 2006 July 10 +** +** The author disclaims copyright to this source code. +** +************************************************************************* +** Defines the interface to tokenizers used by fulltext-search. There +** are three basic components: +** +** sqlite3_tokenizer_module is a singleton defining the tokenizer +** interface functions. This is essentially the class structure for +** tokenizers. +** +** sqlite3_tokenizer is used to define a particular tokenizer, perhaps +** including customization information defined at creation time. +** +** sqlite3_tokenizer_cursor is generated by a tokenizer to generate +** tokens from a particular input. +*/ +#ifndef _TOKENIZER_H_ +#define _TOKENIZER_H_ + +/* TODO(shess) Only used for SQLITE_OK and SQLITE_DONE at this time. +** If tokenizers are to be allowed to call sqlite3_*() functions, then +** we will need a way to register the API consistently. +*/ +#include "sqlite3.h" + +/* +** Structures used by the tokenizer interface. +*/ +typedef struct sqlite3_tokenizer sqlite3_tokenizer; +typedef struct sqlite3_tokenizer_cursor sqlite3_tokenizer_cursor; +typedef struct sqlite3_tokenizer_module sqlite3_tokenizer_module; + +struct sqlite3_tokenizer_module { + int iVersion; /* currently 0 */ + + /* + ** Create and destroy a tokenizer. argc/argv are passed down from + ** the fulltext virtual table creation to allow customization. + */ + int (*xCreate)(int argc, const char **argv, + sqlite3_tokenizer **ppTokenizer); + int (*xDestroy)(sqlite3_tokenizer *pTokenizer); + + /* + ** Tokenize a particular input. Call xOpen() to prepare to + ** tokenize, xNext() repeatedly until it returns SQLITE_DONE, then + ** xClose() to free any internal state. The pInput passed to + ** xOpen() must exist until the cursor is closed. The ppToken + ** result from xNext() is only valid until the next call to xNext() + ** or until xClose() is called. + */ + /* TODO(shess) current implementation requires pInput to be + ** nul-terminated. This should either be fixed, or pInput/nBytes + ** should be converted to zInput. + */ + int (*xOpen)(sqlite3_tokenizer *pTokenizer, + const char *pInput, int nBytes, + sqlite3_tokenizer_cursor **ppCursor); + int (*xClose)(sqlite3_tokenizer_cursor *pCursor); + int (*xNext)(sqlite3_tokenizer_cursor *pCursor, + const char **ppToken, int *pnBytes, + int *piStartOffset, int *piEndOffset, int *piPosition); +}; + +struct sqlite3_tokenizer { + sqlite3_tokenizer_module *pModule; /* The module for this tokenizer */ + /* Tokenizer implementations will typically add additional fields */ +}; + +struct sqlite3_tokenizer_cursor { + sqlite3_tokenizer *pTokenizer; /* Tokenizer for this cursor. */ + /* Tokenizer implementations will typically add additional fields */ +}; + +/* +** Get the module for a tokenizer which generates tokens based on a +** set of non-token characters. The default is to break tokens at any +** non-alnum character, though the set of delimiters can also be +** specified by the first argv argument to xCreate(). +*/ +/* TODO(shess) This doesn't belong here. Need some sort of +** registration process. +*/ +void get_simple_tokenizer_module(sqlite3_tokenizer_module **ppModule); + +#endif /* _TOKENIZER_H_ */ diff --git a/third_party/sqlite/ext/fts2/README.tokenizers b/third_party/sqlite/ext/fts2/README.tokenizers new file mode 100755 index 0000000..98d2021 --- /dev/null +++ b/third_party/sqlite/ext/fts2/README.tokenizers @@ -0,0 +1,133 @@ + +1. FTS2 Tokenizers + + When creating a new full-text table, FTS2 allows the user to select + the text tokenizer implementation to be used when indexing text + by specifying a "tokenizer" clause as part of the CREATE VIRTUAL TABLE + statement: + + CREATE VIRTUAL TABLE <table-name> USING fts2( + <columns ...> [, tokenizer <tokenizer-name> [<tokenizer-args>]] + ); + + The built-in tokenizers (valid values to pass as <tokenizer name>) are + "simple" and "porter". + + <tokenizer-args> should consist of zero or more white-space separated + arguments to pass to the selected tokenizer implementation. The + interpretation of the arguments, if any, depends on the individual + tokenizer. + +2. Custom Tokenizers + + FTS2 allows users to provide custom tokenizer implementations. The + interface used to create a new tokenizer is defined and described in + the fts2_tokenizer.h source file. + + Registering a new FTS2 tokenizer is similar to registering a new + virtual table module with SQLite. The user passes a pointer to a + structure containing pointers to various callback functions that + make up the implementation of the new tokenizer type. For tokenizers, + the structure (defined in fts2_tokenizer.h) is called + "sqlite3_tokenizer_module". + + FTS2 does not expose a C-function that users call to register new + tokenizer types with a database handle. Instead, the pointer must + be encoded as an SQL blob value and passed to FTS2 through the SQL + engine by evaluating a special scalar function, "fts2_tokenizer()". + The fts2_tokenizer() function may be called with one or two arguments, + as follows: + + SELECT fts2_tokenizer(<tokenizer-name>); + SELECT fts2_tokenizer(<tokenizer-name>, <sqlite3_tokenizer_module ptr>); + + Where <tokenizer-name> is a string identifying the tokenizer and + <sqlite3_tokenizer_module ptr> is a pointer to an sqlite3_tokenizer_module + structure encoded as an SQL blob. If the second argument is present, + it is registered as tokenizer <tokenizer-name> and a copy of it + returned. If only one argument is passed, a pointer to the tokenizer + implementation currently registered as <tokenizer-name> is returned, + encoded as a blob. Or, if no such tokenizer exists, an SQL exception + (error) is raised. + + SECURITY: If the fts2 extension is used in an environment where potentially + malicious users may execute arbitrary SQL (i.e. gears), they should be + prevented from invoking the fts2_tokenizer() function, possibly using the + authorisation callback. + + See "Sample code" below for an example of calling the fts2_tokenizer() + function from C code. + +3. ICU Library Tokenizers + + If this extension is compiled with the SQLITE_ENABLE_ICU pre-processor + symbol defined, then there exists a built-in tokenizer named "icu" + implemented using the ICU library. The first argument passed to the + xCreate() method (see fts2_tokenizer.h) of this tokenizer may be + an ICU locale identifier. For example "tr_TR" for Turkish as used + in Turkey, or "en_AU" for English as used in Australia. For example: + + "CREATE VIRTUAL TABLE thai_text USING fts2(text, tokenizer icu th_TH)" + + The ICU tokenizer implementation is very simple. It splits the input + text according to the ICU rules for finding word boundaries and discards + any tokens that consist entirely of white-space. This may be suitable + for some applications in some locales, but not all. If more complex + processing is required, for example to implement stemming or + discard punctuation, this can be done by creating a tokenizer + implementation that uses the ICU tokenizer as part of its implementation. + + When using the ICU tokenizer this way, it is safe to overwrite the + contents of the strings returned by the xNext() method (see + fts2_tokenizer.h). + +4. Sample code. + + The following two code samples illustrate the way C code should invoke + the fts2_tokenizer() scalar function: + + int registerTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module *p + ){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts2_tokenizer(?, ?)"; + + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + sqlite3_bind_blob(pStmt, 2, &p, sizeof(p), SQLITE_STATIC); + sqlite3_step(pStmt); + + return sqlite3_finalize(pStmt); + } + + int queryTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module **pp + ){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts2_tokenizer(?)"; + + *pp = 0; + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ + memcpy(pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); + } + } + + return sqlite3_finalize(pStmt); + } diff --git a/third_party/sqlite/ext/fts2/README.txt b/third_party/sqlite/ext/fts2/README.txt new file mode 100755 index 0000000..517a2a0 --- /dev/null +++ b/third_party/sqlite/ext/fts2/README.txt @@ -0,0 +1,4 @@ +This folder contains source code to the second full-text search +extension for SQLite. While the API is the same, this version uses a +substantially different storage schema from fts1, so tables will need +to be rebuilt. diff --git a/third_party/sqlite/ext/fts2/fts2.c b/third_party/sqlite/ext/fts2/fts2.c new file mode 100755 index 0000000..35ab87c --- /dev/null +++ b/third_party/sqlite/ext/fts2/fts2.c @@ -0,0 +1,7004 @@ +/* fts2 has a design flaw which can lead to database corruption (see +** below). It is recommended not to use it any longer, instead use +** fts3 (or higher). If you believe that your use of fts2 is safe, +** add -DSQLITE_ENABLE_BROKEN_FTS2=1 to your CFLAGS. +*/ +#if (!defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2)) \ + && !defined(SQLITE_ENABLE_BROKEN_FTS2) +#error fts2 has a design flaw and has been deprecated. +#endif +/* The flaw is that fts2 uses the content table's unaliased rowid as +** the unique docid. fts2 embeds the rowid in the index it builds, +** and expects the rowid to not change. The SQLite VACUUM operation +** will renumber such rowids, thereby breaking fts2. If you are using +** fts2 in a system which has disabled VACUUM, then you can continue +** to use it safely. Note that PRAGMA auto_vacuum does NOT disable +** VACUUM, though systems using auto_vacuum are unlikely to invoke +** VACUUM. +** +** Unlike fts1, which is safe across VACUUM if you never delete +** documents, fts2 has a second exposure to this flaw, in the segments +** table. So fts2 should be considered unsafe across VACUUM in all +** cases. +*/ + +/* +** 2006 Oct 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This is an SQLite module implementing full-text search. +*/ + +/* TODO(shess): To make it easier to spot changes without groveling +** through changelogs, I've defined GEARS_FTS2_CHANGES to call them +** out, and I will document them here. On imports, these changes +** should be reviewed to make sure they are still present, or are +** dropped as appropriate. +** +** SQLite core adds the custom function fts2_tokenizer() to be used +** for defining new tokenizers. The second parameter is a vtable +** pointer encoded as a blob. Obviously this cannot be exposed to +** Gears callers for security reasons. It could be suppressed in the +** authorizer, but for now I have simply commented the definition out. +*/ +#define GEARS_FTS2_CHANGES 1 + +/* +** The code in this file is only compiled if: +** +** * The FTS2 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS2 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). +*/ + +/* TODO(shess) Consider exporting this comment to an HTML file or the +** wiki. +*/ +/* The full-text index is stored in a series of b+tree (-like) +** structures called segments which map terms to doclists. The +** structures are like b+trees in layout, but are constructed from the +** bottom up in optimal fashion and are not updatable. Since trees +** are built from the bottom up, things will be described from the +** bottom up. +** +** +**** Varints **** +** The basic unit of encoding is a variable-length integer called a +** varint. We encode variable-length integers in little-endian order +** using seven bits * per byte as follows: +** +** KEY: +** A = 0xxxxxxx 7 bits of data and one flag bit +** B = 1xxxxxxx 7 bits of data and one flag bit +** +** 7 bits - A +** 14 bits - BA +** 21 bits - BBA +** and so on. +** +** This is identical to how sqlite encodes varints (see util.c). +** +** +**** Document lists **** +** A doclist (document list) holds a docid-sorted list of hits for a +** given term. Doclists hold docids, and can optionally associate +** token positions and offsets with docids. +** +** A DL_POSITIONS_OFFSETS doclist is stored like this: +** +** array { +** varint docid; +** array { (position list for column 0) +** varint position; (delta from previous position plus POS_BASE) +** varint startOffset; (delta from previous startOffset) +** varint endOffset; (delta from startOffset) +** } +** array { +** varint POS_COLUMN; (marks start of position list for new column) +** varint column; (index of new column) +** array { +** varint position; (delta from previous position plus POS_BASE) +** varint startOffset;(delta from previous startOffset) +** varint endOffset; (delta from startOffset) +** } +** } +** varint POS_END; (marks end of positions for this document. +** } +** +** Here, array { X } means zero or more occurrences of X, adjacent in +** memory. A "position" is an index of a token in the token stream +** generated by the tokenizer, while an "offset" is a byte offset, +** both based at 0. Note that POS_END and POS_COLUMN occur in the +** same logical place as the position element, and act as sentinals +** ending a position list array. +** +** A DL_POSITIONS doclist omits the startOffset and endOffset +** information. A DL_DOCIDS doclist omits both the position and +** offset information, becoming an array of varint-encoded docids. +** +** On-disk data is stored as type DL_DEFAULT, so we don't serialize +** the type. Due to how deletion is implemented in the segmentation +** system, on-disk doclists MUST store at least positions. +** +** +**** Segment leaf nodes **** +** Segment leaf nodes store terms and doclists, ordered by term. Leaf +** nodes are written using LeafWriter, and read using LeafReader (to +** iterate through a single leaf node's data) and LeavesReader (to +** iterate through a segment's entire leaf layer). Leaf nodes have +** the format: +** +** varint iHeight; (height from leaf level, always 0) +** varint nTerm; (length of first term) +** char pTerm[nTerm]; (content of first term) +** varint nDoclist; (length of term's associated doclist) +** char pDoclist[nDoclist]; (content of doclist) +** array { +** (further terms are delta-encoded) +** varint nPrefix; (length of prefix shared with previous term) +** varint nSuffix; (length of unshared suffix) +** char pTermSuffix[nSuffix];(unshared suffix of next term) +** varint nDoclist; (length of term's associated doclist) +** char pDoclist[nDoclist]; (content of doclist) +** } +** +** Here, array { X } means zero or more occurrences of X, adjacent in +** memory. +** +** Leaf nodes are broken into blocks which are stored contiguously in +** the %_segments table in sorted order. This means that when the end +** of a node is reached, the next term is in the node with the next +** greater node id. +** +** New data is spilled to a new leaf node when the current node +** exceeds LEAF_MAX bytes (default 2048). New data which itself is +** larger than STANDALONE_MIN (default 1024) is placed in a standalone +** node (a leaf node with a single term and doclist). The goal of +** these settings is to pack together groups of small doclists while +** making it efficient to directly access large doclists. The +** assumption is that large doclists represent terms which are more +** likely to be query targets. +** +** TODO(shess) It may be useful for blocking decisions to be more +** dynamic. For instance, it may make more sense to have a 2.5k leaf +** node rather than splitting into 2k and .5k nodes. My intuition is +** that this might extend through 2x or 4x the pagesize. +** +** +**** Segment interior nodes **** +** Segment interior nodes store blockids for subtree nodes and terms +** to describe what data is stored by the each subtree. Interior +** nodes are written using InteriorWriter, and read using +** InteriorReader. InteriorWriters are created as needed when +** SegmentWriter creates new leaf nodes, or when an interior node +** itself grows too big and must be split. The format of interior +** nodes: +** +** varint iHeight; (height from leaf level, always >0) +** varint iBlockid; (block id of node's leftmost subtree) +** optional { +** varint nTerm; (length of first term) +** char pTerm[nTerm]; (content of first term) +** array { +** (further terms are delta-encoded) +** varint nPrefix; (length of shared prefix with previous term) +** varint nSuffix; (length of unshared suffix) +** char pTermSuffix[nSuffix]; (unshared suffix of next term) +** } +** } +** +** Here, optional { X } means an optional element, while array { X } +** means zero or more occurrences of X, adjacent in memory. +** +** An interior node encodes n terms separating n+1 subtrees. The +** subtree blocks are contiguous, so only the first subtree's blockid +** is encoded. The subtree at iBlockid will contain all terms less +** than the first term encoded (or all terms if no term is encoded). +** Otherwise, for terms greater than or equal to pTerm[i] but less +** than pTerm[i+1], the subtree for that term will be rooted at +** iBlockid+i. Interior nodes only store enough term data to +** distinguish adjacent children (if the rightmost term of the left +** child is "something", and the leftmost term of the right child is +** "wicked", only "w" is stored). +** +** New data is spilled to a new interior node at the same height when +** the current node exceeds INTERIOR_MAX bytes (default 2048). +** INTERIOR_MIN_TERMS (default 7) keeps large terms from monopolizing +** interior nodes and making the tree too skinny. The interior nodes +** at a given height are naturally tracked by interior nodes at +** height+1, and so on. +** +** +**** Segment directory **** +** The segment directory in table %_segdir stores meta-information for +** merging and deleting segments, and also the root node of the +** segment's tree. +** +** The root node is the top node of the segment's tree after encoding +** the entire segment, restricted to ROOT_MAX bytes (default 1024). +** This could be either a leaf node or an interior node. If the top +** node requires more than ROOT_MAX bytes, it is flushed to %_segments +** and a new root interior node is generated (which should always fit +** within ROOT_MAX because it only needs space for 2 varints, the +** height and the blockid of the previous root). +** +** The meta-information in the segment directory is: +** level - segment level (see below) +** idx - index within level +** - (level,idx uniquely identify a segment) +** start_block - first leaf node +** leaves_end_block - last leaf node +** end_block - last block (including interior nodes) +** root - contents of root node +** +** If the root node is a leaf node, then start_block, +** leaves_end_block, and end_block are all 0. +** +** +**** Segment merging **** +** To amortize update costs, segments are groups into levels and +** merged in matches. Each increase in level represents exponentially +** more documents. +** +** New documents (actually, document updates) are tokenized and +** written individually (using LeafWriter) to a level 0 segment, with +** incrementing idx. When idx reaches MERGE_COUNT (default 16), all +** level 0 segments are merged into a single level 1 segment. Level 1 +** is populated like level 0, and eventually MERGE_COUNT level 1 +** segments are merged to a single level 2 segment (representing +** MERGE_COUNT^2 updates), and so on. +** +** A segment merge traverses all segments at a given level in +** parallel, performing a straightforward sorted merge. Since segment +** leaf nodes are written in to the %_segments table in order, this +** merge traverses the underlying sqlite disk structures efficiently. +** After the merge, all segment blocks from the merged level are +** deleted. +** +** MERGE_COUNT controls how often we merge segments. 16 seems to be +** somewhat of a sweet spot for insertion performance. 32 and 64 show +** very similar performance numbers to 16 on insertion, though they're +** a tiny bit slower (perhaps due to more overhead in merge-time +** sorting). 8 is about 20% slower than 16, 4 about 50% slower than +** 16, 2 about 66% slower than 16. +** +** At query time, high MERGE_COUNT increases the number of segments +** which need to be scanned and merged. For instance, with 100k docs +** inserted: +** +** MERGE_COUNT segments +** 16 25 +** 8 12 +** 4 10 +** 2 6 +** +** This appears to have only a moderate impact on queries for very +** frequent terms (which are somewhat dominated by segment merge +** costs), and infrequent and non-existent terms still seem to be fast +** even with many segments. +** +** TODO(shess) That said, it would be nice to have a better query-side +** argument for MERGE_COUNT of 16. Also, it is possible/likely that +** optimizations to things like doclist merging will swing the sweet +** spot around. +** +** +** +**** Handling of deletions and updates **** +** Since we're using a segmented structure, with no docid-oriented +** index into the term index, we clearly cannot simply update the term +** index when a document is deleted or updated. For deletions, we +** write an empty doclist (varint(docid) varint(POS_END)), for updates +** we simply write the new doclist. Segment merges overwrite older +** data for a particular docid with newer data, so deletes or updates +** will eventually overtake the earlier data and knock it out. The +** query logic likewise merges doclists so that newer data knocks out +** older data. +** +** TODO(shess) Provide a VACUUM type operation to clear out all +** deletions and duplications. This would basically be a forced merge +** into a single segment. +*/ + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) + +#if defined(SQLITE_ENABLE_FTS2) && !defined(SQLITE_CORE) +# define SQLITE_CORE 1 +#endif + +#include <assert.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <ctype.h> + +#include "fts2.h" +#include "fts2_hash.h" +#include "fts2_tokenizer.h" +#include "sqlite3.h" +#include "sqlite3ext.h" +SQLITE_EXTENSION_INIT1 + + +/* TODO(shess) MAN, this thing needs some refactoring. At minimum, it +** would be nice to order the file better, perhaps something along the +** lines of: +** +** - utility functions +** - table setup functions +** - table update functions +** - table query functions +** +** Put the query functions last because they're likely to reference +** typedefs or functions from the table update section. +*/ + +#if 0 +# define TRACE(A) printf A; fflush(stdout) +#else +# define TRACE(A) +#endif + +#if 0 +/* Useful to set breakpoints. See main.c sqlite3Corrupt(). */ +static int fts2Corrupt(void){ + return SQLITE_CORRUPT; +} +# define SQLITE_CORRUPT_BKPT fts2Corrupt() +#else +# define SQLITE_CORRUPT_BKPT SQLITE_CORRUPT +#endif + +/* It is not safe to call isspace(), tolower(), or isalnum() on +** hi-bit-set characters. This is the same solution used in the +** tokenizer. +*/ +/* TODO(shess) The snippet-generation code should be using the +** tokenizer-generated tokens rather than doing its own local +** tokenization. +*/ +/* TODO(shess) Is __isascii() a portable version of (c&0x80)==0? */ +static int safe_isspace(char c){ + return (c&0x80)==0 ? isspace(c) : 0; +} +static int safe_tolower(char c){ + return (c&0x80)==0 ? tolower(c) : c; +} +static int safe_isalnum(char c){ + return (c&0x80)==0 ? isalnum(c) : 0; +} + +typedef enum DocListType { + DL_DOCIDS, /* docids only */ + DL_POSITIONS, /* docids + positions */ + DL_POSITIONS_OFFSETS /* docids + positions + offsets */ +} DocListType; + +/* +** By default, only positions and not offsets are stored in the doclists. +** To change this so that offsets are stored too, compile with +** +** -DDL_DEFAULT=DL_POSITIONS_OFFSETS +** +** If DL_DEFAULT is set to DL_DOCIDS, your table can only be inserted +** into (no deletes or updates). +*/ +#ifndef DL_DEFAULT +# define DL_DEFAULT DL_POSITIONS +#endif + +enum { + POS_END = 0, /* end of this position list */ + POS_COLUMN, /* followed by new column number */ + POS_BASE +}; + +/* MERGE_COUNT controls how often we merge segments (see comment at +** top of file). +*/ +#define MERGE_COUNT 16 + +/* utility functions */ + +/* CLEAR() and SCRAMBLE() abstract memset() on a pointer to a single +** record to prevent errors of the form: +** +** my_function(SomeType *b){ +** memset(b, '\0', sizeof(b)); // sizeof(b)!=sizeof(*b) +** } +*/ +/* TODO(shess) Obvious candidates for a header file. */ +#define CLEAR(b) memset(b, '\0', sizeof(*(b))) + +#ifndef NDEBUG +# define SCRAMBLE(b) memset(b, 0x55, sizeof(*(b))) +#else +# define SCRAMBLE(b) +#endif + +/* We may need up to VARINT_MAX bytes to store an encoded 64-bit integer. */ +#define VARINT_MAX 10 + +/* Write a 64-bit variable-length integer to memory starting at p[0]. + * The length of data written will be between 1 and VARINT_MAX bytes. + * The number of bytes written is returned. */ +static int putVarint(char *p, sqlite_int64 v){ + unsigned char *q = (unsigned char *) p; + sqlite_uint64 vu = v; + do{ + *q++ = (unsigned char) ((vu & 0x7f) | 0x80); + vu >>= 7; + }while( vu!=0 ); + q[-1] &= 0x7f; /* turn off high bit in final byte */ + assert( q - (unsigned char *)p <= VARINT_MAX ); + return (int) (q - (unsigned char *)p); +} + +/* Read a 64-bit variable-length integer from memory starting at p[0]. + * Return the number of bytes read, or 0 on error. + * The value is stored in *v. */ +static int getVarint(const char *p, sqlite_int64 *v){ + const unsigned char *q = (const unsigned char *) p; + sqlite_uint64 x = 0, y = 1; + while( (*q & 0x80) == 0x80 ){ + x += y * (*q++ & 0x7f); + y <<= 7; + if( q - (unsigned char *)p >= VARINT_MAX ){ /* bad data */ + assert( 0 ); + return 0; + } + } + x += y * (*q++); + *v = (sqlite_int64) x; + return (int) (q - (unsigned char *)p); +} + +static int getVarint32(const char *p, int *pi){ + sqlite_int64 i; + int ret = getVarint(p, &i); + *pi = (int) i; + assert( *pi==i ); + return ret; +} + +/*******************************************************************/ +/* DataBuffer is used to collect data into a buffer in piecemeal +** fashion. It implements the usual distinction between amount of +** data currently stored (nData) and buffer capacity (nCapacity). +** +** dataBufferInit - create a buffer with given initial capacity. +** dataBufferReset - forget buffer's data, retaining capacity. +** dataBufferDestroy - free buffer's data. +** dataBufferSwap - swap contents of two buffers. +** dataBufferExpand - expand capacity without adding data. +** dataBufferAppend - append data. +** dataBufferAppend2 - append two pieces of data at once. +** dataBufferReplace - replace buffer's data. +*/ +typedef struct DataBuffer { + char *pData; /* Pointer to malloc'ed buffer. */ + int nCapacity; /* Size of pData buffer. */ + int nData; /* End of data loaded into pData. */ +} DataBuffer; + +static void dataBufferInit(DataBuffer *pBuffer, int nCapacity){ + assert( nCapacity>=0 ); + pBuffer->nData = 0; + pBuffer->nCapacity = nCapacity; + pBuffer->pData = nCapacity==0 ? NULL : sqlite3_malloc(nCapacity); +} +static void dataBufferReset(DataBuffer *pBuffer){ + pBuffer->nData = 0; +} +static void dataBufferDestroy(DataBuffer *pBuffer){ + if( pBuffer->pData!=NULL ) sqlite3_free(pBuffer->pData); + SCRAMBLE(pBuffer); +} +static void dataBufferSwap(DataBuffer *pBuffer1, DataBuffer *pBuffer2){ + DataBuffer tmp = *pBuffer1; + *pBuffer1 = *pBuffer2; + *pBuffer2 = tmp; +} +static void dataBufferExpand(DataBuffer *pBuffer, int nAddCapacity){ + assert( nAddCapacity>0 ); + /* TODO(shess) Consider expanding more aggressively. Note that the + ** underlying malloc implementation may take care of such things for + ** us already. + */ + if( pBuffer->nData+nAddCapacity>pBuffer->nCapacity ){ + pBuffer->nCapacity = pBuffer->nData+nAddCapacity; + pBuffer->pData = sqlite3_realloc(pBuffer->pData, pBuffer->nCapacity); + } +} +static void dataBufferAppend(DataBuffer *pBuffer, + const char *pSource, int nSource){ + assert( nSource>0 && pSource!=NULL ); + dataBufferExpand(pBuffer, nSource); + memcpy(pBuffer->pData+pBuffer->nData, pSource, nSource); + pBuffer->nData += nSource; +} +static void dataBufferAppend2(DataBuffer *pBuffer, + const char *pSource1, int nSource1, + const char *pSource2, int nSource2){ + assert( nSource1>0 && pSource1!=NULL ); + assert( nSource2>0 && pSource2!=NULL ); + dataBufferExpand(pBuffer, nSource1+nSource2); + memcpy(pBuffer->pData+pBuffer->nData, pSource1, nSource1); + memcpy(pBuffer->pData+pBuffer->nData+nSource1, pSource2, nSource2); + pBuffer->nData += nSource1+nSource2; +} +static void dataBufferReplace(DataBuffer *pBuffer, + const char *pSource, int nSource){ + dataBufferReset(pBuffer); + dataBufferAppend(pBuffer, pSource, nSource); +} + +/* StringBuffer is a null-terminated version of DataBuffer. */ +typedef struct StringBuffer { + DataBuffer b; /* Includes null terminator. */ +} StringBuffer; + +static void initStringBuffer(StringBuffer *sb){ + dataBufferInit(&sb->b, 100); + dataBufferReplace(&sb->b, "", 1); +} +static int stringBufferLength(StringBuffer *sb){ + return sb->b.nData-1; +} +static char *stringBufferData(StringBuffer *sb){ + return sb->b.pData; +} +static void stringBufferDestroy(StringBuffer *sb){ + dataBufferDestroy(&sb->b); +} + +static void nappend(StringBuffer *sb, const char *zFrom, int nFrom){ + assert( sb->b.nData>0 ); + if( nFrom>0 ){ + sb->b.nData--; + dataBufferAppend2(&sb->b, zFrom, nFrom, "", 1); + } +} +static void append(StringBuffer *sb, const char *zFrom){ + nappend(sb, zFrom, strlen(zFrom)); +} + +/* Append a list of strings separated by commas. */ +static void appendList(StringBuffer *sb, int nString, char **azString){ + int i; + for(i=0; i<nString; ++i){ + if( i>0 ) append(sb, ", "); + append(sb, azString[i]); + } +} + +static int endsInWhiteSpace(StringBuffer *p){ + return stringBufferLength(p)>0 && + safe_isspace(stringBufferData(p)[stringBufferLength(p)-1]); +} + +/* If the StringBuffer ends in something other than white space, add a +** single space character to the end. +*/ +static void appendWhiteSpace(StringBuffer *p){ + if( stringBufferLength(p)==0 ) return; + if( !endsInWhiteSpace(p) ) append(p, " "); +} + +/* Remove white space from the end of the StringBuffer */ +static void trimWhiteSpace(StringBuffer *p){ + while( endsInWhiteSpace(p) ){ + p->b.pData[--p->b.nData-1] = '\0'; + } +} + +/*******************************************************************/ +/* DLReader is used to read document elements from a doclist. The +** current docid is cached, so dlrDocid() is fast. DLReader does not +** own the doclist buffer. +** +** dlrAtEnd - true if there's no more data to read. +** dlrDocid - docid of current document. +** dlrDocData - doclist data for current document (including docid). +** dlrDocDataBytes - length of same. +** dlrAllDataBytes - length of all remaining data. +** dlrPosData - position data for current document. +** dlrPosDataLen - length of pos data for current document (incl POS_END). +** dlrStep - step to current document. +** dlrInit - initial for doclist of given type against given data. +** dlrDestroy - clean up. +** +** Expected usage is something like: +** +** DLReader reader; +** dlrInit(&reader, pData, nData); +** while( !dlrAtEnd(&reader) ){ +** // calls to dlrDocid() and kin. +** dlrStep(&reader); +** } +** dlrDestroy(&reader); +*/ +typedef struct DLReader { + DocListType iType; + const char *pData; + int nData; + + sqlite_int64 iDocid; + int nElement; +} DLReader; + +static int dlrAtEnd(DLReader *pReader){ + assert( pReader->nData>=0 ); + return pReader->nData==0; +} +static sqlite_int64 dlrDocid(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->iDocid; +} +static const char *dlrDocData(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->pData; +} +static int dlrDocDataBytes(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->nElement; +} +static int dlrAllDataBytes(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->nData; +} +/* TODO(shess) Consider adding a field to track iDocid varint length +** to make these two functions faster. This might matter (a tiny bit) +** for queries. +*/ +static const char *dlrPosData(DLReader *pReader){ + sqlite_int64 iDummy; + int n = getVarint(pReader->pData, &iDummy); + assert( !dlrAtEnd(pReader) ); + return pReader->pData+n; +} +static int dlrPosDataLen(DLReader *pReader){ + sqlite_int64 iDummy; + int n = getVarint(pReader->pData, &iDummy); + assert( !dlrAtEnd(pReader) ); + return pReader->nElement-n; +} +static void dlrStep(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + + /* Skip past current doclist element. */ + assert( pReader->nElement<=pReader->nData ); + pReader->pData += pReader->nElement; + pReader->nData -= pReader->nElement; + + /* If there is more data, read the next doclist element. */ + if( pReader->nData!=0 ){ + sqlite_int64 iDocidDelta; + int iDummy, n = getVarint(pReader->pData, &iDocidDelta); + pReader->iDocid += iDocidDelta; + if( pReader->iType>=DL_POSITIONS ){ + assert( n<pReader->nData ); + while( 1 ){ + n += getVarint32(pReader->pData+n, &iDummy); + assert( n<=pReader->nData ); + if( iDummy==POS_END ) break; + if( iDummy==POS_COLUMN ){ + n += getVarint32(pReader->pData+n, &iDummy); + assert( n<pReader->nData ); + }else if( pReader->iType==DL_POSITIONS_OFFSETS ){ + n += getVarint32(pReader->pData+n, &iDummy); + n += getVarint32(pReader->pData+n, &iDummy); + assert( n<pReader->nData ); + } + } + } + pReader->nElement = n; + assert( pReader->nElement<=pReader->nData ); + } +} +static void dlrInit(DLReader *pReader, DocListType iType, + const char *pData, int nData){ + assert( pData!=NULL && nData!=0 ); + pReader->iType = iType; + pReader->pData = pData; + pReader->nData = nData; + pReader->nElement = 0; + pReader->iDocid = 0; + + /* Load the first element's data. There must be a first element. */ + dlrStep(pReader); +} +static void dlrDestroy(DLReader *pReader){ + SCRAMBLE(pReader); +} + +#ifndef NDEBUG +/* Verify that the doclist can be validly decoded. Also returns the +** last docid found because it is convenient in other assertions for +** DLWriter. +*/ +static void docListValidate(DocListType iType, const char *pData, int nData, + sqlite_int64 *pLastDocid){ + sqlite_int64 iPrevDocid = 0; + assert( nData>0 ); + assert( pData!=0 ); + assert( pData+nData>pData ); + while( nData!=0 ){ + sqlite_int64 iDocidDelta; + int n = getVarint(pData, &iDocidDelta); + iPrevDocid += iDocidDelta; + if( iType>DL_DOCIDS ){ + int iDummy; + while( 1 ){ + n += getVarint32(pData+n, &iDummy); + if( iDummy==POS_END ) break; + if( iDummy==POS_COLUMN ){ + n += getVarint32(pData+n, &iDummy); + }else if( iType>DL_POSITIONS ){ + n += getVarint32(pData+n, &iDummy); + n += getVarint32(pData+n, &iDummy); + } + assert( n<=nData ); + } + } + assert( n<=nData ); + pData += n; + nData -= n; + } + if( pLastDocid ) *pLastDocid = iPrevDocid; +} +#define ASSERT_VALID_DOCLIST(i, p, n, o) docListValidate(i, p, n, o) +#else +#define ASSERT_VALID_DOCLIST(i, p, n, o) assert( 1 ) +#endif + +/*******************************************************************/ +/* DLWriter is used to write doclist data to a DataBuffer. DLWriter +** always appends to the buffer and does not own it. +** +** dlwInit - initialize to write a given type doclistto a buffer. +** dlwDestroy - clear the writer's memory. Does not free buffer. +** dlwAppend - append raw doclist data to buffer. +** dlwCopy - copy next doclist from reader to writer. +** dlwAdd - construct doclist element and append to buffer. +** Only apply dlwAdd() to DL_DOCIDS doclists (else use PLWriter). +*/ +typedef struct DLWriter { + DocListType iType; + DataBuffer *b; + sqlite_int64 iPrevDocid; +#ifndef NDEBUG + int has_iPrevDocid; +#endif +} DLWriter; + +static void dlwInit(DLWriter *pWriter, DocListType iType, DataBuffer *b){ + pWriter->b = b; + pWriter->iType = iType; + pWriter->iPrevDocid = 0; +#ifndef NDEBUG + pWriter->has_iPrevDocid = 0; +#endif +} +static void dlwDestroy(DLWriter *pWriter){ + SCRAMBLE(pWriter); +} +/* iFirstDocid is the first docid in the doclist in pData. It is +** needed because pData may point within a larger doclist, in which +** case the first item would be delta-encoded. +** +** iLastDocid is the final docid in the doclist in pData. It is +** needed to create the new iPrevDocid for future delta-encoding. The +** code could decode the passed doclist to recreate iLastDocid, but +** the only current user (docListMerge) already has decoded this +** information. +*/ +/* TODO(shess) This has become just a helper for docListMerge. +** Consider a refactor to make this cleaner. +*/ +static void dlwAppend(DLWriter *pWriter, + const char *pData, int nData, + sqlite_int64 iFirstDocid, sqlite_int64 iLastDocid){ + sqlite_int64 iDocid = 0; + char c[VARINT_MAX]; + int nFirstOld, nFirstNew; /* Old and new varint len of first docid. */ +#ifndef NDEBUG + sqlite_int64 iLastDocidDelta; +#endif + + /* Recode the initial docid as delta from iPrevDocid. */ + nFirstOld = getVarint(pData, &iDocid); + assert( nFirstOld<nData || (nFirstOld==nData && pWriter->iType==DL_DOCIDS) ); + nFirstNew = putVarint(c, iFirstDocid-pWriter->iPrevDocid); + + /* Verify that the incoming doclist is valid AND that it ends with + ** the expected docid. This is essential because we'll trust this + ** docid in future delta-encoding. + */ + ASSERT_VALID_DOCLIST(pWriter->iType, pData, nData, &iLastDocidDelta); + assert( iLastDocid==iFirstDocid-iDocid+iLastDocidDelta ); + + /* Append recoded initial docid and everything else. Rest of docids + ** should have been delta-encoded from previous initial docid. + */ + if( nFirstOld<nData ){ + dataBufferAppend2(pWriter->b, c, nFirstNew, + pData+nFirstOld, nData-nFirstOld); + }else{ + dataBufferAppend(pWriter->b, c, nFirstNew); + } + pWriter->iPrevDocid = iLastDocid; +} +static void dlwCopy(DLWriter *pWriter, DLReader *pReader){ + dlwAppend(pWriter, dlrDocData(pReader), dlrDocDataBytes(pReader), + dlrDocid(pReader), dlrDocid(pReader)); +} +static void dlwAdd(DLWriter *pWriter, sqlite_int64 iDocid){ + char c[VARINT_MAX]; + int n = putVarint(c, iDocid-pWriter->iPrevDocid); + + /* Docids must ascend. */ + assert( !pWriter->has_iPrevDocid || iDocid>pWriter->iPrevDocid ); + assert( pWriter->iType==DL_DOCIDS ); + + dataBufferAppend(pWriter->b, c, n); + pWriter->iPrevDocid = iDocid; +#ifndef NDEBUG + pWriter->has_iPrevDocid = 1; +#endif +} + +/*******************************************************************/ +/* PLReader is used to read data from a document's position list. As +** the caller steps through the list, data is cached so that varints +** only need to be decoded once. +** +** plrInit, plrDestroy - create/destroy a reader. +** plrColumn, plrPosition, plrStartOffset, plrEndOffset - accessors +** plrAtEnd - at end of stream, only call plrDestroy once true. +** plrStep - step to the next element. +*/ +typedef struct PLReader { + /* These refer to the next position's data. nData will reach 0 when + ** reading the last position, so plrStep() signals EOF by setting + ** pData to NULL. + */ + const char *pData; + int nData; + + DocListType iType; + int iColumn; /* the last column read */ + int iPosition; /* the last position read */ + int iStartOffset; /* the last start offset read */ + int iEndOffset; /* the last end offset read */ +} PLReader; + +static int plrAtEnd(PLReader *pReader){ + return pReader->pData==NULL; +} +static int plrColumn(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iColumn; +} +static int plrPosition(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iPosition; +} +static int plrStartOffset(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iStartOffset; +} +static int plrEndOffset(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iEndOffset; +} +static void plrStep(PLReader *pReader){ + int i, n; + + assert( !plrAtEnd(pReader) ); + + if( pReader->nData==0 ){ + pReader->pData = NULL; + return; + } + + n = getVarint32(pReader->pData, &i); + if( i==POS_COLUMN ){ + n += getVarint32(pReader->pData+n, &pReader->iColumn); + pReader->iPosition = 0; + pReader->iStartOffset = 0; + n += getVarint32(pReader->pData+n, &i); + } + /* Should never see adjacent column changes. */ + assert( i!=POS_COLUMN ); + + if( i==POS_END ){ + pReader->nData = 0; + pReader->pData = NULL; + return; + } + + pReader->iPosition += i-POS_BASE; + if( pReader->iType==DL_POSITIONS_OFFSETS ){ + n += getVarint32(pReader->pData+n, &i); + pReader->iStartOffset += i; + n += getVarint32(pReader->pData+n, &i); + pReader->iEndOffset = pReader->iStartOffset+i; + } + assert( n<=pReader->nData ); + pReader->pData += n; + pReader->nData -= n; +} + +static void plrInit(PLReader *pReader, DLReader *pDLReader){ + pReader->pData = dlrPosData(pDLReader); + pReader->nData = dlrPosDataLen(pDLReader); + pReader->iType = pDLReader->iType; + pReader->iColumn = 0; + pReader->iPosition = 0; + pReader->iStartOffset = 0; + pReader->iEndOffset = 0; + plrStep(pReader); +} +static void plrDestroy(PLReader *pReader){ + SCRAMBLE(pReader); +} + +/*******************************************************************/ +/* PLWriter is used in constructing a document's position list. As a +** convenience, if iType is DL_DOCIDS, PLWriter becomes a no-op. +** PLWriter writes to the associated DLWriter's buffer. +** +** plwInit - init for writing a document's poslist. +** plwDestroy - clear a writer. +** plwAdd - append position and offset information. +** plwCopy - copy next position's data from reader to writer. +** plwTerminate - add any necessary doclist terminator. +** +** Calling plwAdd() after plwTerminate() may result in a corrupt +** doclist. +*/ +/* TODO(shess) Until we've written the second item, we can cache the +** first item's information. Then we'd have three states: +** +** - initialized with docid, no positions. +** - docid and one position. +** - docid and multiple positions. +** +** Only the last state needs to actually write to dlw->b, which would +** be an improvement in the DLCollector case. +*/ +typedef struct PLWriter { + DLWriter *dlw; + + int iColumn; /* the last column written */ + int iPos; /* the last position written */ + int iOffset; /* the last start offset written */ +} PLWriter; + +/* TODO(shess) In the case where the parent is reading these values +** from a PLReader, we could optimize to a copy if that PLReader has +** the same type as pWriter. +*/ +static void plwAdd(PLWriter *pWriter, int iColumn, int iPos, + int iStartOffset, int iEndOffset){ + /* Worst-case space for POS_COLUMN, iColumn, iPosDelta, + ** iStartOffsetDelta, and iEndOffsetDelta. + */ + char c[5*VARINT_MAX]; + int n = 0; + + /* Ban plwAdd() after plwTerminate(). */ + assert( pWriter->iPos!=-1 ); + + if( pWriter->dlw->iType==DL_DOCIDS ) return; + + if( iColumn!=pWriter->iColumn ){ + n += putVarint(c+n, POS_COLUMN); + n += putVarint(c+n, iColumn); + pWriter->iColumn = iColumn; + pWriter->iPos = 0; + pWriter->iOffset = 0; + } + assert( iPos>=pWriter->iPos ); + n += putVarint(c+n, POS_BASE+(iPos-pWriter->iPos)); + pWriter->iPos = iPos; + if( pWriter->dlw->iType==DL_POSITIONS_OFFSETS ){ + assert( iStartOffset>=pWriter->iOffset ); + n += putVarint(c+n, iStartOffset-pWriter->iOffset); + pWriter->iOffset = iStartOffset; + assert( iEndOffset>=iStartOffset ); + n += putVarint(c+n, iEndOffset-iStartOffset); + } + dataBufferAppend(pWriter->dlw->b, c, n); +} +static void plwCopy(PLWriter *pWriter, PLReader *pReader){ + plwAdd(pWriter, plrColumn(pReader), plrPosition(pReader), + plrStartOffset(pReader), plrEndOffset(pReader)); +} +static void plwInit(PLWriter *pWriter, DLWriter *dlw, sqlite_int64 iDocid){ + char c[VARINT_MAX]; + int n; + + pWriter->dlw = dlw; + + /* Docids must ascend. */ + assert( !pWriter->dlw->has_iPrevDocid || iDocid>pWriter->dlw->iPrevDocid ); + n = putVarint(c, iDocid-pWriter->dlw->iPrevDocid); + dataBufferAppend(pWriter->dlw->b, c, n); + pWriter->dlw->iPrevDocid = iDocid; +#ifndef NDEBUG + pWriter->dlw->has_iPrevDocid = 1; +#endif + + pWriter->iColumn = 0; + pWriter->iPos = 0; + pWriter->iOffset = 0; +} +/* TODO(shess) Should plwDestroy() also terminate the doclist? But +** then plwDestroy() would no longer be just a destructor, it would +** also be doing work, which isn't consistent with the overall idiom. +** Another option would be for plwAdd() to always append any necessary +** terminator, so that the output is always correct. But that would +** add incremental work to the common case with the only benefit being +** API elegance. Punt for now. +*/ +static void plwTerminate(PLWriter *pWriter){ + if( pWriter->dlw->iType>DL_DOCIDS ){ + char c[VARINT_MAX]; + int n = putVarint(c, POS_END); + dataBufferAppend(pWriter->dlw->b, c, n); + } +#ifndef NDEBUG + /* Mark as terminated for assert in plwAdd(). */ + pWriter->iPos = -1; +#endif +} +static void plwDestroy(PLWriter *pWriter){ + SCRAMBLE(pWriter); +} + +/*******************************************************************/ +/* DLCollector wraps PLWriter and DLWriter to provide a +** dynamically-allocated doclist area to use during tokenization. +** +** dlcNew - malloc up and initialize a collector. +** dlcDelete - destroy a collector and all contained items. +** dlcAddPos - append position and offset information. +** dlcAddDoclist - add the collected doclist to the given buffer. +** dlcNext - terminate the current document and open another. +*/ +typedef struct DLCollector { + DataBuffer b; + DLWriter dlw; + PLWriter plw; +} DLCollector; + +/* TODO(shess) This could also be done by calling plwTerminate() and +** dataBufferAppend(). I tried that, expecting nominal performance +** differences, but it seemed to pretty reliably be worth 1% to code +** it this way. I suspect it is the incremental malloc overhead (some +** percentage of the plwTerminate() calls will cause a realloc), so +** this might be worth revisiting if the DataBuffer implementation +** changes. +*/ +static void dlcAddDoclist(DLCollector *pCollector, DataBuffer *b){ + if( pCollector->dlw.iType>DL_DOCIDS ){ + char c[VARINT_MAX]; + int n = putVarint(c, POS_END); + dataBufferAppend2(b, pCollector->b.pData, pCollector->b.nData, c, n); + }else{ + dataBufferAppend(b, pCollector->b.pData, pCollector->b.nData); + } +} +static void dlcNext(DLCollector *pCollector, sqlite_int64 iDocid){ + plwTerminate(&pCollector->plw); + plwDestroy(&pCollector->plw); + plwInit(&pCollector->plw, &pCollector->dlw, iDocid); +} +static void dlcAddPos(DLCollector *pCollector, int iColumn, int iPos, + int iStartOffset, int iEndOffset){ + plwAdd(&pCollector->plw, iColumn, iPos, iStartOffset, iEndOffset); +} + +static DLCollector *dlcNew(sqlite_int64 iDocid, DocListType iType){ + DLCollector *pCollector = sqlite3_malloc(sizeof(DLCollector)); + dataBufferInit(&pCollector->b, 0); + dlwInit(&pCollector->dlw, iType, &pCollector->b); + plwInit(&pCollector->plw, &pCollector->dlw, iDocid); + return pCollector; +} +static void dlcDelete(DLCollector *pCollector){ + plwDestroy(&pCollector->plw); + dlwDestroy(&pCollector->dlw); + dataBufferDestroy(&pCollector->b); + SCRAMBLE(pCollector); + sqlite3_free(pCollector); +} + + +/* Copy the doclist data of iType in pData/nData into *out, trimming +** unnecessary data as we go. Only columns matching iColumn are +** copied, all columns copied if iColumn is -1. Elements with no +** matching columns are dropped. The output is an iOutType doclist. +*/ +/* NOTE(shess) This code is only valid after all doclists are merged. +** If this is run before merges, then doclist items which represent +** deletion will be trimmed, and will thus not effect a deletion +** during the merge. +*/ +static void docListTrim(DocListType iType, const char *pData, int nData, + int iColumn, DocListType iOutType, DataBuffer *out){ + DLReader dlReader; + DLWriter dlWriter; + + assert( iOutType<=iType ); + + dlrInit(&dlReader, iType, pData, nData); + dlwInit(&dlWriter, iOutType, out); + + while( !dlrAtEnd(&dlReader) ){ + PLReader plReader; + PLWriter plWriter; + int match = 0; + + plrInit(&plReader, &dlReader); + + while( !plrAtEnd(&plReader) ){ + if( iColumn==-1 || plrColumn(&plReader)==iColumn ){ + if( !match ){ + plwInit(&plWriter, &dlWriter, dlrDocid(&dlReader)); + match = 1; + } + plwAdd(&plWriter, plrColumn(&plReader), plrPosition(&plReader), + plrStartOffset(&plReader), plrEndOffset(&plReader)); + } + plrStep(&plReader); + } + if( match ){ + plwTerminate(&plWriter); + plwDestroy(&plWriter); + } + + plrDestroy(&plReader); + dlrStep(&dlReader); + } + dlwDestroy(&dlWriter); + dlrDestroy(&dlReader); +} + +/* Used by docListMerge() to keep doclists in the ascending order by +** docid, then ascending order by age (so the newest comes first). +*/ +typedef struct OrderedDLReader { + DLReader *pReader; + + /* TODO(shess) If we assume that docListMerge pReaders is ordered by + ** age (which we do), then we could use pReader comparisons to break + ** ties. + */ + int idx; +} OrderedDLReader; + +/* Order eof to end, then by docid asc, idx desc. */ +static int orderedDLReaderCmp(OrderedDLReader *r1, OrderedDLReader *r2){ + if( dlrAtEnd(r1->pReader) ){ + if( dlrAtEnd(r2->pReader) ) return 0; /* Both atEnd(). */ + return 1; /* Only r1 atEnd(). */ + } + if( dlrAtEnd(r2->pReader) ) return -1; /* Only r2 atEnd(). */ + + if( dlrDocid(r1->pReader)<dlrDocid(r2->pReader) ) return -1; + if( dlrDocid(r1->pReader)>dlrDocid(r2->pReader) ) return 1; + + /* Descending on idx. */ + return r2->idx-r1->idx; +} + +/* Bubble p[0] to appropriate place in p[1..n-1]. Assumes that +** p[1..n-1] is already sorted. +*/ +/* TODO(shess) Is this frequent enough to warrant a binary search? +** Before implementing that, instrument the code to check. In most +** current usage, I expect that p[0] will be less than p[1] a very +** high proportion of the time. +*/ +static void orderedDLReaderReorder(OrderedDLReader *p, int n){ + while( n>1 && orderedDLReaderCmp(p, p+1)>0 ){ + OrderedDLReader tmp = p[0]; + p[0] = p[1]; + p[1] = tmp; + n--; + p++; + } +} + +/* Given an array of doclist readers, merge their doclist elements +** into out in sorted order (by docid), dropping elements from older +** readers when there is a duplicate docid. pReaders is assumed to be +** ordered by age, oldest first. +*/ +/* TODO(shess) nReaders must be <= MERGE_COUNT. This should probably +** be fixed. +*/ +static void docListMerge(DataBuffer *out, + DLReader *pReaders, int nReaders){ + OrderedDLReader readers[MERGE_COUNT]; + DLWriter writer; + int i, n; + const char *pStart = 0; + int nStart = 0; + sqlite_int64 iFirstDocid = 0, iLastDocid = 0; + + assert( nReaders>0 ); + if( nReaders==1 ){ + dataBufferAppend(out, dlrDocData(pReaders), dlrAllDataBytes(pReaders)); + return; + } + + assert( nReaders<=MERGE_COUNT ); + n = 0; + for(i=0; i<nReaders; i++){ + assert( pReaders[i].iType==pReaders[0].iType ); + readers[i].pReader = pReaders+i; + readers[i].idx = i; + n += dlrAllDataBytes(&pReaders[i]); + } + /* Conservatively size output to sum of inputs. Output should end + ** up strictly smaller than input. + */ + dataBufferExpand(out, n); + + /* Get the readers into sorted order. */ + while( i-->0 ){ + orderedDLReaderReorder(readers+i, nReaders-i); + } + + dlwInit(&writer, pReaders[0].iType, out); + while( !dlrAtEnd(readers[0].pReader) ){ + sqlite_int64 iDocid = dlrDocid(readers[0].pReader); + + /* If this is a continuation of the current buffer to copy, extend + ** that buffer. memcpy() seems to be more efficient if it has a + ** lots of data to copy. + */ + if( dlrDocData(readers[0].pReader)==pStart+nStart ){ + nStart += dlrDocDataBytes(readers[0].pReader); + }else{ + if( pStart!=0 ){ + dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); + } + pStart = dlrDocData(readers[0].pReader); + nStart = dlrDocDataBytes(readers[0].pReader); + iFirstDocid = iDocid; + } + iLastDocid = iDocid; + dlrStep(readers[0].pReader); + + /* Drop all of the older elements with the same docid. */ + for(i=1; i<nReaders && + !dlrAtEnd(readers[i].pReader) && + dlrDocid(readers[i].pReader)==iDocid; i++){ + dlrStep(readers[i].pReader); + } + + /* Get the readers back into order. */ + while( i-->0 ){ + orderedDLReaderReorder(readers+i, nReaders-i); + } + } + + /* Copy over any remaining elements. */ + if( nStart>0 ) dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); + dlwDestroy(&writer); +} + +/* Helper function for posListUnion(). Compares the current position +** between left and right, returning as standard C idiom of <0 if +** left<right, >0 if left>right, and 0 if left==right. "End" always +** compares greater. +*/ +static int posListCmp(PLReader *pLeft, PLReader *pRight){ + assert( pLeft->iType==pRight->iType ); + if( pLeft->iType==DL_DOCIDS ) return 0; + + if( plrAtEnd(pLeft) ) return plrAtEnd(pRight) ? 0 : 1; + if( plrAtEnd(pRight) ) return -1; + + if( plrColumn(pLeft)<plrColumn(pRight) ) return -1; + if( plrColumn(pLeft)>plrColumn(pRight) ) return 1; + + if( plrPosition(pLeft)<plrPosition(pRight) ) return -1; + if( plrPosition(pLeft)>plrPosition(pRight) ) return 1; + if( pLeft->iType==DL_POSITIONS ) return 0; + + if( plrStartOffset(pLeft)<plrStartOffset(pRight) ) return -1; + if( plrStartOffset(pLeft)>plrStartOffset(pRight) ) return 1; + + if( plrEndOffset(pLeft)<plrEndOffset(pRight) ) return -1; + if( plrEndOffset(pLeft)>plrEndOffset(pRight) ) return 1; + + return 0; +} + +/* Write the union of position lists in pLeft and pRight to pOut. +** "Union" in this case meaning "All unique position tuples". Should +** work with any doclist type, though both inputs and the output +** should be the same type. +*/ +static void posListUnion(DLReader *pLeft, DLReader *pRight, DLWriter *pOut){ + PLReader left, right; + PLWriter writer; + + assert( dlrDocid(pLeft)==dlrDocid(pRight) ); + assert( pLeft->iType==pRight->iType ); + assert( pLeft->iType==pOut->iType ); + + plrInit(&left, pLeft); + plrInit(&right, pRight); + plwInit(&writer, pOut, dlrDocid(pLeft)); + + while( !plrAtEnd(&left) || !plrAtEnd(&right) ){ + int c = posListCmp(&left, &right); + if( c<0 ){ + plwCopy(&writer, &left); + plrStep(&left); + }else if( c>0 ){ + plwCopy(&writer, &right); + plrStep(&right); + }else{ + plwCopy(&writer, &left); + plrStep(&left); + plrStep(&right); + } + } + + plwTerminate(&writer); + plwDestroy(&writer); + plrDestroy(&left); + plrDestroy(&right); +} + +/* Write the union of doclists in pLeft and pRight to pOut. For +** docids in common between the inputs, the union of the position +** lists is written. Inputs and outputs are always type DL_DEFAULT. +*/ +static void docListUnion( + const char *pLeft, int nLeft, + const char *pRight, int nRight, + DataBuffer *pOut /* Write the combined doclist here */ +){ + DLReader left, right; + DLWriter writer; + + if( nLeft==0 ){ + if( nRight!=0) dataBufferAppend(pOut, pRight, nRight); + return; + } + if( nRight==0 ){ + dataBufferAppend(pOut, pLeft, nLeft); + return; + } + + dlrInit(&left, DL_DEFAULT, pLeft, nLeft); + dlrInit(&right, DL_DEFAULT, pRight, nRight); + dlwInit(&writer, DL_DEFAULT, pOut); + + while( !dlrAtEnd(&left) || !dlrAtEnd(&right) ){ + if( dlrAtEnd(&right) ){ + dlwCopy(&writer, &left); + dlrStep(&left); + }else if( dlrAtEnd(&left) ){ + dlwCopy(&writer, &right); + dlrStep(&right); + }else if( dlrDocid(&left)<dlrDocid(&right) ){ + dlwCopy(&writer, &left); + dlrStep(&left); + }else if( dlrDocid(&left)>dlrDocid(&right) ){ + dlwCopy(&writer, &right); + dlrStep(&right); + }else{ + posListUnion(&left, &right, &writer); + dlrStep(&left); + dlrStep(&right); + } + } + + dlrDestroy(&left); + dlrDestroy(&right); + dlwDestroy(&writer); +} + +/* pLeft and pRight are DLReaders positioned to the same docid. +** +** If there are no instances in pLeft or pRight where the position +** of pLeft is one less than the position of pRight, then this +** routine adds nothing to pOut. +** +** If there are one or more instances where positions from pLeft +** are exactly one less than positions from pRight, then add a new +** document record to pOut. If pOut wants to hold positions, then +** include the positions from pRight that are one more than a +** position in pLeft. In other words: pRight.iPos==pLeft.iPos+1. +*/ +static void posListPhraseMerge(DLReader *pLeft, DLReader *pRight, + DLWriter *pOut){ + PLReader left, right; + PLWriter writer; + int match = 0; + + assert( dlrDocid(pLeft)==dlrDocid(pRight) ); + assert( pOut->iType!=DL_POSITIONS_OFFSETS ); + + plrInit(&left, pLeft); + plrInit(&right, pRight); + + while( !plrAtEnd(&left) && !plrAtEnd(&right) ){ + if( plrColumn(&left)<plrColumn(&right) ){ + plrStep(&left); + }else if( plrColumn(&left)>plrColumn(&right) ){ + plrStep(&right); + }else if( plrPosition(&left)+1<plrPosition(&right) ){ + plrStep(&left); + }else if( plrPosition(&left)+1>plrPosition(&right) ){ + plrStep(&right); + }else{ + if( !match ){ + plwInit(&writer, pOut, dlrDocid(pLeft)); + match = 1; + } + plwAdd(&writer, plrColumn(&right), plrPosition(&right), 0, 0); + plrStep(&left); + plrStep(&right); + } + } + + if( match ){ + plwTerminate(&writer); + plwDestroy(&writer); + } + + plrDestroy(&left); + plrDestroy(&right); +} + +/* We have two doclists with positions: pLeft and pRight. +** Write the phrase intersection of these two doclists into pOut. +** +** A phrase intersection means that two documents only match +** if pLeft.iPos+1==pRight.iPos. +** +** iType controls the type of data written to pOut. If iType is +** DL_POSITIONS, the positions are those from pRight. +*/ +static void docListPhraseMerge( + const char *pLeft, int nLeft, + const char *pRight, int nRight, + DocListType iType, + DataBuffer *pOut /* Write the combined doclist here */ +){ + DLReader left, right; + DLWriter writer; + + if( nLeft==0 || nRight==0 ) return; + + assert( iType!=DL_POSITIONS_OFFSETS ); + + dlrInit(&left, DL_POSITIONS, pLeft, nLeft); + dlrInit(&right, DL_POSITIONS, pRight, nRight); + dlwInit(&writer, iType, pOut); + + while( !dlrAtEnd(&left) && !dlrAtEnd(&right) ){ + if( dlrDocid(&left)<dlrDocid(&right) ){ + dlrStep(&left); + }else if( dlrDocid(&right)<dlrDocid(&left) ){ + dlrStep(&right); + }else{ + posListPhraseMerge(&left, &right, &writer); + dlrStep(&left); + dlrStep(&right); + } + } + + dlrDestroy(&left); + dlrDestroy(&right); + dlwDestroy(&writer); +} + +/* We have two DL_DOCIDS doclists: pLeft and pRight. +** Write the intersection of these two doclists into pOut as a +** DL_DOCIDS doclist. +*/ +static void docListAndMerge( + const char *pLeft, int nLeft, + const char *pRight, int nRight, + DataBuffer *pOut /* Write the combined doclist here */ +){ + DLReader left, right; + DLWriter writer; + + if( nLeft==0 || nRight==0 ) return; + + dlrInit(&left, DL_DOCIDS, pLeft, nLeft); + dlrInit(&right, DL_DOCIDS, pRight, nRight); + dlwInit(&writer, DL_DOCIDS, pOut); + + while( !dlrAtEnd(&left) && !dlrAtEnd(&right) ){ + if( dlrDocid(&left)<dlrDocid(&right) ){ + dlrStep(&left); + }else if( dlrDocid(&right)<dlrDocid(&left) ){ + dlrStep(&right); + }else{ + dlwAdd(&writer, dlrDocid(&left)); + dlrStep(&left); + dlrStep(&right); + } + } + + dlrDestroy(&left); + dlrDestroy(&right); + dlwDestroy(&writer); +} + +/* We have two DL_DOCIDS doclists: pLeft and pRight. +** Write the union of these two doclists into pOut as a +** DL_DOCIDS doclist. +*/ +static void docListOrMerge( + const char *pLeft, int nLeft, + const char *pRight, int nRight, + DataBuffer *pOut /* Write the combined doclist here */ +){ + DLReader left, right; + DLWriter writer; + + if( nLeft==0 ){ + if( nRight!=0 ) dataBufferAppend(pOut, pRight, nRight); + return; + } + if( nRight==0 ){ + dataBufferAppend(pOut, pLeft, nLeft); + return; + } + + dlrInit(&left, DL_DOCIDS, pLeft, nLeft); + dlrInit(&right, DL_DOCIDS, pRight, nRight); + dlwInit(&writer, DL_DOCIDS, pOut); + + while( !dlrAtEnd(&left) || !dlrAtEnd(&right) ){ + if( dlrAtEnd(&right) ){ + dlwAdd(&writer, dlrDocid(&left)); + dlrStep(&left); + }else if( dlrAtEnd(&left) ){ + dlwAdd(&writer, dlrDocid(&right)); + dlrStep(&right); + }else if( dlrDocid(&left)<dlrDocid(&right) ){ + dlwAdd(&writer, dlrDocid(&left)); + dlrStep(&left); + }else if( dlrDocid(&right)<dlrDocid(&left) ){ + dlwAdd(&writer, dlrDocid(&right)); + dlrStep(&right); + }else{ + dlwAdd(&writer, dlrDocid(&left)); + dlrStep(&left); + dlrStep(&right); + } + } + + dlrDestroy(&left); + dlrDestroy(&right); + dlwDestroy(&writer); +} + +/* We have two DL_DOCIDS doclists: pLeft and pRight. +** Write into pOut as DL_DOCIDS doclist containing all documents that +** occur in pLeft but not in pRight. +*/ +static void docListExceptMerge( + const char *pLeft, int nLeft, + const char *pRight, int nRight, + DataBuffer *pOut /* Write the combined doclist here */ +){ + DLReader left, right; + DLWriter writer; + + if( nLeft==0 ) return; + if( nRight==0 ){ + dataBufferAppend(pOut, pLeft, nLeft); + return; + } + + dlrInit(&left, DL_DOCIDS, pLeft, nLeft); + dlrInit(&right, DL_DOCIDS, pRight, nRight); + dlwInit(&writer, DL_DOCIDS, pOut); + + while( !dlrAtEnd(&left) ){ + while( !dlrAtEnd(&right) && dlrDocid(&right)<dlrDocid(&left) ){ + dlrStep(&right); + } + if( dlrAtEnd(&right) || dlrDocid(&left)<dlrDocid(&right) ){ + dlwAdd(&writer, dlrDocid(&left)); + } + dlrStep(&left); + } + + dlrDestroy(&left); + dlrDestroy(&right); + dlwDestroy(&writer); +} + +static char *string_dup_n(const char *s, int n){ + char *str = sqlite3_malloc(n + 1); + memcpy(str, s, n); + str[n] = '\0'; + return str; +} + +/* Duplicate a string; the caller must free() the returned string. + * (We don't use strdup() since it is not part of the standard C library and + * may not be available everywhere.) */ +static char *string_dup(const char *s){ + return string_dup_n(s, strlen(s)); +} + +/* Format a string, replacing each occurrence of the % character with + * zDb.zName. This may be more convenient than sqlite_mprintf() + * when one string is used repeatedly in a format string. + * The caller must free() the returned string. */ +static char *string_format(const char *zFormat, + const char *zDb, const char *zName){ + const char *p; + size_t len = 0; + size_t nDb = strlen(zDb); + size_t nName = strlen(zName); + size_t nFullTableName = nDb+1+nName; + char *result; + char *r; + + /* first compute length needed */ + for(p = zFormat ; *p ; ++p){ + len += (*p=='%' ? nFullTableName : 1); + } + len += 1; /* for null terminator */ + + r = result = sqlite3_malloc(len); + for(p = zFormat; *p; ++p){ + if( *p=='%' ){ + memcpy(r, zDb, nDb); + r += nDb; + *r++ = '.'; + memcpy(r, zName, nName); + r += nName; + } else { + *r++ = *p; + } + } + *r++ = '\0'; + assert( r == result + len ); + return result; +} + +static int sql_exec(sqlite3 *db, const char *zDb, const char *zName, + const char *zFormat){ + char *zCommand = string_format(zFormat, zDb, zName); + int rc; + TRACE(("FTS2 sql: %s\n", zCommand)); + rc = sqlite3_exec(db, zCommand, NULL, 0, NULL); + sqlite3_free(zCommand); + return rc; +} + +static int sql_prepare(sqlite3 *db, const char *zDb, const char *zName, + sqlite3_stmt **ppStmt, const char *zFormat){ + char *zCommand = string_format(zFormat, zDb, zName); + int rc; + TRACE(("FTS2 prepare: %s\n", zCommand)); + rc = sqlite3_prepare_v2(db, zCommand, -1, ppStmt, NULL); + sqlite3_free(zCommand); + return rc; +} + +/* end utility functions */ + +/* Forward reference */ +typedef struct fulltext_vtab fulltext_vtab; + +/* A single term in a query is represented by an instances of +** the following structure. +*/ +typedef struct QueryTerm { + short int nPhrase; /* How many following terms are part of the same phrase */ + short int iPhrase; /* This is the i-th term of a phrase. */ + short int iColumn; /* Column of the index that must match this term */ + signed char isOr; /* this term is preceded by "OR" */ + signed char isNot; /* this term is preceded by "-" */ + signed char isPrefix; /* this term is followed by "*" */ + char *pTerm; /* text of the term. '\000' terminated. malloced */ + int nTerm; /* Number of bytes in pTerm[] */ +} QueryTerm; + + +/* A query string is parsed into a Query structure. + * + * We could, in theory, allow query strings to be complicated + * nested expressions with precedence determined by parentheses. + * But none of the major search engines do this. (Perhaps the + * feeling is that an parenthesized expression is two complex of + * an idea for the average user to grasp.) Taking our lead from + * the major search engines, we will allow queries to be a list + * of terms (with an implied AND operator) or phrases in double-quotes, + * with a single optional "-" before each non-phrase term to designate + * negation and an optional OR connector. + * + * OR binds more tightly than the implied AND, which is what the + * major search engines seem to do. So, for example: + * + * [one two OR three] ==> one AND (two OR three) + * [one OR two three] ==> (one OR two) AND three + * + * A "-" before a term matches all entries that lack that term. + * The "-" must occur immediately before the term with in intervening + * space. This is how the search engines do it. + * + * A NOT term cannot be the right-hand operand of an OR. If this + * occurs in the query string, the NOT is ignored: + * + * [one OR -two] ==> one OR two + * + */ +typedef struct Query { + fulltext_vtab *pFts; /* The full text index */ + int nTerms; /* Number of terms in the query */ + QueryTerm *pTerms; /* Array of terms. Space obtained from malloc() */ + int nextIsOr; /* Set the isOr flag on the next inserted term */ + int nextColumn; /* Next word parsed must be in this column */ + int dfltColumn; /* The default column */ +} Query; + + +/* +** An instance of the following structure keeps track of generated +** matching-word offset information and snippets. +*/ +typedef struct Snippet { + int nMatch; /* Total number of matches */ + int nAlloc; /* Space allocated for aMatch[] */ + struct snippetMatch { /* One entry for each matching term */ + char snStatus; /* Status flag for use while constructing snippets */ + short int iCol; /* The column that contains the match */ + short int iTerm; /* The index in Query.pTerms[] of the matching term */ + short int nByte; /* Number of bytes in the term */ + int iStart; /* The offset to the first character of the term */ + } *aMatch; /* Points to space obtained from malloc */ + char *zOffset; /* Text rendering of aMatch[] */ + int nOffset; /* strlen(zOffset) */ + char *zSnippet; /* Snippet text */ + int nSnippet; /* strlen(zSnippet) */ +} Snippet; + + +typedef enum QueryType { + QUERY_GENERIC, /* table scan */ + QUERY_ROWID, /* lookup by rowid */ + QUERY_FULLTEXT /* QUERY_FULLTEXT + [i] is a full-text search for column i*/ +} QueryType; + +typedef enum fulltext_statement { + CONTENT_INSERT_STMT, + CONTENT_SELECT_STMT, + CONTENT_UPDATE_STMT, + CONTENT_DELETE_STMT, + CONTENT_EXISTS_STMT, + + BLOCK_INSERT_STMT, + BLOCK_SELECT_STMT, + BLOCK_DELETE_STMT, + BLOCK_DELETE_ALL_STMT, + + SEGDIR_MAX_INDEX_STMT, + SEGDIR_SET_STMT, + SEGDIR_SELECT_LEVEL_STMT, + SEGDIR_SPAN_STMT, + SEGDIR_DELETE_STMT, + SEGDIR_SELECT_SEGMENT_STMT, + SEGDIR_SELECT_ALL_STMT, + SEGDIR_DELETE_ALL_STMT, + SEGDIR_COUNT_STMT, + + MAX_STMT /* Always at end! */ +} fulltext_statement; + +/* These must exactly match the enum above. */ +/* TODO(shess): Is there some risk that a statement will be used in two +** cursors at once, e.g. if a query joins a virtual table to itself? +** If so perhaps we should move some of these to the cursor object. +*/ +static const char *const fulltext_zStatement[MAX_STMT] = { + /* CONTENT_INSERT */ NULL, /* generated in contentInsertStatement() */ + /* CONTENT_SELECT */ "select * from %_content where rowid = ?", + /* CONTENT_UPDATE */ NULL, /* generated in contentUpdateStatement() */ + /* CONTENT_DELETE */ "delete from %_content where rowid = ?", + /* CONTENT_EXISTS */ "select rowid from %_content limit 1", + + /* BLOCK_INSERT */ "insert into %_segments values (?)", + /* BLOCK_SELECT */ "select block from %_segments where rowid = ?", + /* BLOCK_DELETE */ "delete from %_segments where rowid between ? and ?", + /* BLOCK_DELETE_ALL */ "delete from %_segments", + + /* SEGDIR_MAX_INDEX */ "select max(idx) from %_segdir where level = ?", + /* SEGDIR_SET */ "insert into %_segdir values (?, ?, ?, ?, ?, ?)", + /* SEGDIR_SELECT_LEVEL */ + "select start_block, leaves_end_block, root from %_segdir " + " where level = ? order by idx", + /* SEGDIR_SPAN */ + "select min(start_block), max(end_block) from %_segdir " + " where level = ? and start_block <> 0", + /* SEGDIR_DELETE */ "delete from %_segdir where level = ?", + + /* NOTE(shess): The first three results of the following two + ** statements must match. + */ + /* SEGDIR_SELECT_SEGMENT */ + "select start_block, leaves_end_block, root from %_segdir " + " where level = ? and idx = ?", + /* SEGDIR_SELECT_ALL */ + "select start_block, leaves_end_block, root from %_segdir " + " order by level desc, idx asc", + /* SEGDIR_DELETE_ALL */ "delete from %_segdir", + /* SEGDIR_COUNT */ "select count(*), ifnull(max(level),0) from %_segdir", +}; + +/* +** A connection to a fulltext index is an instance of the following +** structure. The xCreate and xConnect methods create an instance +** of this structure and xDestroy and xDisconnect free that instance. +** All other methods receive a pointer to the structure as one of their +** arguments. +*/ +struct fulltext_vtab { + sqlite3_vtab base; /* Base class used by SQLite core */ + sqlite3 *db; /* The database connection */ + const char *zDb; /* logical database name */ + const char *zName; /* virtual table name */ + int nColumn; /* number of columns in virtual table */ + char **azColumn; /* column names. malloced */ + char **azContentColumn; /* column names in content table; malloced */ + sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ + + /* Precompiled statements which we keep as long as the table is + ** open. + */ + sqlite3_stmt *pFulltextStatements[MAX_STMT]; + + /* Precompiled statements used for segment merges. We run a + ** separate select across the leaf level of each tree being merged. + */ + sqlite3_stmt *pLeafSelectStmts[MERGE_COUNT]; + /* The statement used to prepare pLeafSelectStmts. */ +#define LEAF_SELECT \ + "select block from %_segments where rowid between ? and ? order by rowid" + + /* These buffer pending index updates during transactions. + ** nPendingData estimates the memory size of the pending data. It + ** doesn't include the hash-bucket overhead, nor any malloc + ** overhead. When nPendingData exceeds kPendingThreshold, the + ** buffer is flushed even before the transaction closes. + ** pendingTerms stores the data, and is only valid when nPendingData + ** is >=0 (nPendingData<0 means pendingTerms has not been + ** initialized). iPrevDocid is the last docid written, used to make + ** certain we're inserting in sorted order. + */ + int nPendingData; +#define kPendingThreshold (1*1024*1024) + sqlite_int64 iPrevDocid; + fts2Hash pendingTerms; +}; + +/* +** When the core wants to do a query, it create a cursor using a +** call to xOpen. This structure is an instance of a cursor. It +** is destroyed by xClose. +*/ +typedef struct fulltext_cursor { + sqlite3_vtab_cursor base; /* Base class used by SQLite core */ + QueryType iCursorType; /* Copy of sqlite3_index_info.idxNum */ + sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ + int eof; /* True if at End Of Results */ + Query q; /* Parsed query string */ + Snippet snippet; /* Cached snippet for the current row */ + int iColumn; /* Column being searched */ + DataBuffer result; /* Doclist results from fulltextQuery */ + DLReader reader; /* Result reader if result not empty */ +} fulltext_cursor; + +static struct fulltext_vtab *cursor_vtab(fulltext_cursor *c){ + return (fulltext_vtab *) c->base.pVtab; +} + +static const sqlite3_module fts2Module; /* forward declaration */ + +/* Return a dynamically generated statement of the form + * insert into %_content (rowid, ...) values (?, ...) + */ +static const char *contentInsertStatement(fulltext_vtab *v){ + StringBuffer sb; + int i; + + initStringBuffer(&sb); + append(&sb, "insert into %_content (rowid, "); + appendList(&sb, v->nColumn, v->azContentColumn); + append(&sb, ") values (?"); + for(i=0; i<v->nColumn; ++i) + append(&sb, ", ?"); + append(&sb, ")"); + return stringBufferData(&sb); +} + +/* Return a dynamically generated statement of the form + * update %_content set [col_0] = ?, [col_1] = ?, ... + * where rowid = ? + */ +static const char *contentUpdateStatement(fulltext_vtab *v){ + StringBuffer sb; + int i; + + initStringBuffer(&sb); + append(&sb, "update %_content set "); + for(i=0; i<v->nColumn; ++i) { + if( i>0 ){ + append(&sb, ", "); + } + append(&sb, v->azContentColumn[i]); + append(&sb, " = ?"); + } + append(&sb, " where rowid = ?"); + return stringBufferData(&sb); +} + +/* Puts a freshly-prepared statement determined by iStmt in *ppStmt. +** If the indicated statement has never been prepared, it is prepared +** and cached, otherwise the cached version is reset. +*/ +static int sql_get_statement(fulltext_vtab *v, fulltext_statement iStmt, + sqlite3_stmt **ppStmt){ + assert( iStmt<MAX_STMT ); + if( v->pFulltextStatements[iStmt]==NULL ){ + const char *zStmt; + int rc; + switch( iStmt ){ + case CONTENT_INSERT_STMT: + zStmt = contentInsertStatement(v); break; + case CONTENT_UPDATE_STMT: + zStmt = contentUpdateStatement(v); break; + default: + zStmt = fulltext_zStatement[iStmt]; + } + rc = sql_prepare(v->db, v->zDb, v->zName, &v->pFulltextStatements[iStmt], + zStmt); + if( zStmt != fulltext_zStatement[iStmt]) sqlite3_free((void *) zStmt); + if( rc!=SQLITE_OK ) return rc; + } else { + int rc = sqlite3_reset(v->pFulltextStatements[iStmt]); + if( rc!=SQLITE_OK ) return rc; + } + + *ppStmt = v->pFulltextStatements[iStmt]; + return SQLITE_OK; +} + +/* Like sqlite3_step(), but convert SQLITE_DONE to SQLITE_OK and +** SQLITE_ROW to SQLITE_ERROR. Useful for statements like UPDATE, +** where we expect no results. +*/ +static int sql_single_step(sqlite3_stmt *s){ + int rc = sqlite3_step(s); + return (rc==SQLITE_DONE) ? SQLITE_OK : rc; +} + +/* Like sql_get_statement(), but for special replicated LEAF_SELECT +** statements. idx -1 is a special case for an uncached version of +** the statement (used in the optimize implementation). +*/ +/* TODO(shess) Write version for generic statements and then share +** that between the cached-statement functions. +*/ +static int sql_get_leaf_statement(fulltext_vtab *v, int idx, + sqlite3_stmt **ppStmt){ + assert( idx>=-1 && idx<MERGE_COUNT ); + if( idx==-1 ){ + return sql_prepare(v->db, v->zDb, v->zName, ppStmt, LEAF_SELECT); + }else if( v->pLeafSelectStmts[idx]==NULL ){ + int rc = sql_prepare(v->db, v->zDb, v->zName, &v->pLeafSelectStmts[idx], + LEAF_SELECT); + if( rc!=SQLITE_OK ) return rc; + }else{ + int rc = sqlite3_reset(v->pLeafSelectStmts[idx]); + if( rc!=SQLITE_OK ) return rc; + } + + *ppStmt = v->pLeafSelectStmts[idx]; + return SQLITE_OK; +} + +/* insert into %_content (rowid, ...) values ([rowid], [pValues]) */ +static int content_insert(fulltext_vtab *v, sqlite3_value *rowid, + sqlite3_value **pValues){ + sqlite3_stmt *s; + int i; + int rc = sql_get_statement(v, CONTENT_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_value(s, 1, rowid); + if( rc!=SQLITE_OK ) return rc; + + for(i=0; i<v->nColumn; ++i){ + rc = sqlite3_bind_value(s, 2+i, pValues[i]); + if( rc!=SQLITE_OK ) return rc; + } + + return sql_single_step(s); +} + +/* update %_content set col0 = pValues[0], col1 = pValues[1], ... + * where rowid = [iRowid] */ +static int content_update(fulltext_vtab *v, sqlite3_value **pValues, + sqlite_int64 iRowid){ + sqlite3_stmt *s; + int i; + int rc = sql_get_statement(v, CONTENT_UPDATE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + for(i=0; i<v->nColumn; ++i){ + rc = sqlite3_bind_value(s, 1+i, pValues[i]); + if( rc!=SQLITE_OK ) return rc; + } + + rc = sqlite3_bind_int64(s, 1+v->nColumn, iRowid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +static void freeStringArray(int nString, const char **pString){ + int i; + + for (i=0 ; i < nString ; ++i) { + if( pString[i]!=NULL ) sqlite3_free((void *) pString[i]); + } + sqlite3_free((void *) pString); +} + +/* select * from %_content where rowid = [iRow] + * The caller must delete the returned array and all strings in it. + * null fields will be NULL in the returned array. + * + * TODO: Perhaps we should return pointer/length strings here for consistency + * with other code which uses pointer/length. */ +static int content_select(fulltext_vtab *v, sqlite_int64 iRow, + const char ***pValues){ + sqlite3_stmt *s; + const char **values; + int i; + int rc; + + *pValues = NULL; + + rc = sql_get_statement(v, CONTENT_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc!=SQLITE_ROW ) return rc; + + values = (const char **) sqlite3_malloc(v->nColumn * sizeof(const char *)); + for(i=0; i<v->nColumn; ++i){ + if( sqlite3_column_type(s, i)==SQLITE_NULL ){ + values[i] = NULL; + }else{ + values[i] = string_dup((char*)sqlite3_column_text(s, i)); + } + } + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ){ + *pValues = values; + return SQLITE_OK; + } + + freeStringArray(v->nColumn, values); + return rc; +} + +/* delete from %_content where rowid = [iRow ] */ +static int content_delete(fulltext_vtab *v, sqlite_int64 iRow){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, CONTENT_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iRow); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Returns SQLITE_ROW if any rows exist in %_content, SQLITE_DONE if +** no rows exist, and any error in case of failure. +*/ +static int content_exists(fulltext_vtab *v){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, CONTENT_EXISTS_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc!=SQLITE_ROW ) return rc; + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_ROW; + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + return rc; +} + +/* insert into %_segments values ([pData]) +** returns assigned rowid in *piBlockid +*/ +static int block_insert(fulltext_vtab *v, const char *pData, int nData, + sqlite_int64 *piBlockid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, BLOCK_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 1, pData, nData, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + + *piBlockid = sqlite3_last_insert_rowid(v->db); + return SQLITE_OK; +} + +/* delete from %_segments +** where rowid between [iStartBlockid] and [iEndBlockid] +** +** Deletes the range of blocks, inclusive, used to delete the blocks +** which form a segment. +*/ +static int block_delete(fulltext_vtab *v, + sqlite_int64 iStartBlockid, sqlite_int64 iEndBlockid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, BLOCK_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iStartBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Returns SQLITE_ROW with *pidx set to the maximum segment idx found +** at iLevel. Returns SQLITE_DONE if there are no segments at +** iLevel. Otherwise returns an error. +*/ +static int segdir_max_index(fulltext_vtab *v, int iLevel, int *pidx){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_MAX_INDEX_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + /* Should always get at least one row due to how max() works. */ + if( rc==SQLITE_DONE ) return SQLITE_DONE; + if( rc!=SQLITE_ROW ) return rc; + + /* NULL means that there were no inputs to max(). */ + if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + return rc; + } + + *pidx = sqlite3_column_int(s, 0); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + return SQLITE_ROW; +} + +/* insert into %_segdir values ( +** [iLevel], [idx], +** [iStartBlockid], [iLeavesEndBlockid], [iEndBlockid], +** [pRootData] +** ) +*/ +static int segdir_set(fulltext_vtab *v, int iLevel, int idx, + sqlite_int64 iStartBlockid, + sqlite_int64 iLeavesEndBlockid, + sqlite_int64 iEndBlockid, + const char *pRootData, int nRootData){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_SET_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 2, idx); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 3, iStartBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 4, iLeavesEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 5, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 6, pRootData, nRootData, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Queries %_segdir for the block span of the segments in level +** iLevel. Returns SQLITE_DONE if there are no blocks for iLevel, +** SQLITE_ROW if there are blocks, else an error. +*/ +static int segdir_span(fulltext_vtab *v, int iLevel, + sqlite_int64 *piStartBlockid, + sqlite_int64 *piEndBlockid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_SPAN_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_DONE; /* Should never happen */ + if( rc!=SQLITE_ROW ) return rc; + + /* This happens if all segments at this level are entirely inline. */ + if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + int rc2 = sqlite3_step(s); + if( rc2==SQLITE_ROW ) return SQLITE_ERROR; + return rc2; + } + + *piStartBlockid = sqlite3_column_int64(s, 0); + *piEndBlockid = sqlite3_column_int64(s, 1); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + return SQLITE_ROW; +} + +/* Delete the segment blocks and segment directory records for all +** segments at iLevel. +*/ +static int segdir_delete(fulltext_vtab *v, int iLevel){ + sqlite3_stmt *s; + sqlite_int64 iStartBlockid, iEndBlockid; + int rc = segdir_span(v, iLevel, &iStartBlockid, &iEndBlockid); + if( rc!=SQLITE_ROW && rc!=SQLITE_DONE ) return rc; + + if( rc==SQLITE_ROW ){ + rc = block_delete(v, iStartBlockid, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + } + + /* Delete the segment directory itself. */ + rc = sql_get_statement(v, SEGDIR_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Delete entire fts index, SQLITE_OK on success, relevant error on +** failure. +*/ +static int segdir_delete_all(fulltext_vtab *v){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_DELETE_ALL_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_single_step(s); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_get_statement(v, BLOCK_DELETE_ALL_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Returns SQLITE_OK with *pnSegments set to the number of entries in +** %_segdir and *piMaxLevel set to the highest level which has a +** segment. Otherwise returns the SQLite error which caused failure. +*/ +static int segdir_count(fulltext_vtab *v, int *pnSegments, int *piMaxLevel){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_COUNT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + /* TODO(shess): This case should not be possible? Should stronger + ** measures be taken if it happens? + */ + if( rc==SQLITE_DONE ){ + *pnSegments = 0; + *piMaxLevel = 0; + return SQLITE_OK; + } + if( rc!=SQLITE_ROW ) return rc; + + *pnSegments = sqlite3_column_int(s, 0); + *piMaxLevel = sqlite3_column_int(s, 1); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_OK; + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + return rc; +} + +/* TODO(shess) clearPendingTerms() is far down the file because +** writeZeroSegment() is far down the file because LeafWriter is far +** down the file. Consider refactoring the code to move the non-vtab +** code above the vtab code so that we don't need this forward +** reference. +*/ +static int clearPendingTerms(fulltext_vtab *v); + +/* +** Free the memory used to contain a fulltext_vtab structure. +*/ +static void fulltext_vtab_destroy(fulltext_vtab *v){ + int iStmt, i; + + TRACE(("FTS2 Destroy %p\n", v)); + for( iStmt=0; iStmt<MAX_STMT; iStmt++ ){ + if( v->pFulltextStatements[iStmt]!=NULL ){ + sqlite3_finalize(v->pFulltextStatements[iStmt]); + v->pFulltextStatements[iStmt] = NULL; + } + } + + for( i=0; i<MERGE_COUNT; i++ ){ + if( v->pLeafSelectStmts[i]!=NULL ){ + sqlite3_finalize(v->pLeafSelectStmts[i]); + v->pLeafSelectStmts[i] = NULL; + } + } + + if( v->pTokenizer!=NULL ){ + v->pTokenizer->pModule->xDestroy(v->pTokenizer); + v->pTokenizer = NULL; + } + + clearPendingTerms(v); + + sqlite3_free(v->azColumn); + for(i = 0; i < v->nColumn; ++i) { + sqlite3_free(v->azContentColumn[i]); + } + sqlite3_free(v->azContentColumn); + sqlite3_free(v); +} + +/* +** Token types for parsing the arguments to xConnect or xCreate. +*/ +#define TOKEN_EOF 0 /* End of file */ +#define TOKEN_SPACE 1 /* Any kind of whitespace */ +#define TOKEN_ID 2 /* An identifier */ +#define TOKEN_STRING 3 /* A string literal */ +#define TOKEN_PUNCT 4 /* A single punctuation character */ + +/* +** If X is a character that can be used in an identifier then +** IdChar(X) will be true. Otherwise it is false. +** +** For ASCII, any character with the high-order bit set is +** allowed in an identifier. For 7-bit characters, +** sqlite3IsIdChar[X] must be 1. +** +** Ticket #1066. the SQL standard does not allow '$' in the +** middle of identfiers. But many SQL implementations do. +** SQLite will allow '$' in identifiers for compatibility. +** But the feature is undocumented. +*/ +static const char isIdChar[] = { +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ +}; +#define IdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && isIdChar[c-0x20])) + + +/* +** Return the length of the token that begins at z[0]. +** Store the token type in *tokenType before returning. +*/ +static int getToken(const char *z, int *tokenType){ + int i, c; + switch( *z ){ + case 0: { + *tokenType = TOKEN_EOF; + return 0; + } + case ' ': case '\t': case '\n': case '\f': case '\r': { + for(i=1; safe_isspace(z[i]); i++){} + *tokenType = TOKEN_SPACE; + return i; + } + case '`': + case '\'': + case '"': { + int delim = z[0]; + for(i=1; (c=z[i])!=0; i++){ + if( c==delim ){ + if( z[i+1]==delim ){ + i++; + }else{ + break; + } + } + } + *tokenType = TOKEN_STRING; + return i + (c!=0); + } + case '[': { + for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){} + *tokenType = TOKEN_ID; + return i; + } + default: { + if( !IdChar(*z) ){ + break; + } + for(i=1; IdChar(z[i]); i++){} + *tokenType = TOKEN_ID; + return i; + } + } + *tokenType = TOKEN_PUNCT; + return 1; +} + +/* +** A token extracted from a string is an instance of the following +** structure. +*/ +typedef struct Token { + const char *z; /* Pointer to token text. Not '\000' terminated */ + short int n; /* Length of the token text in bytes. */ +} Token; + +/* +** Given a input string (which is really one of the argv[] parameters +** passed into xConnect or xCreate) split the string up into tokens. +** Return an array of pointers to '\000' terminated strings, one string +** for each non-whitespace token. +** +** The returned array is terminated by a single NULL pointer. +** +** Space to hold the returned array is obtained from a single +** malloc and should be freed by passing the return value to free(). +** The individual strings within the token list are all a part of +** the single memory allocation and will all be freed at once. +*/ +static char **tokenizeString(const char *z, int *pnToken){ + int nToken = 0; + Token *aToken = sqlite3_malloc( strlen(z) * sizeof(aToken[0]) ); + int n = 1; + int e, i; + int totalSize = 0; + char **azToken; + char *zCopy; + while( n>0 ){ + n = getToken(z, &e); + if( e!=TOKEN_SPACE ){ + aToken[nToken].z = z; + aToken[nToken].n = n; + nToken++; + totalSize += n+1; + } + z += n; + } + azToken = (char**)sqlite3_malloc( nToken*sizeof(char*) + totalSize ); + zCopy = (char*)&azToken[nToken]; + nToken--; + for(i=0; i<nToken; i++){ + azToken[i] = zCopy; + n = aToken[i].n; + memcpy(zCopy, aToken[i].z, n); + zCopy[n] = 0; + zCopy += n+1; + } + azToken[nToken] = 0; + sqlite3_free(aToken); + *pnToken = nToken; + return azToken; +} + +/* +** Convert an SQL-style quoted string into a normal string by removing +** the quote characters. The conversion is done in-place. If the +** input does not begin with a quote character, then this routine +** is a no-op. +** +** Examples: +** +** "abc" becomes abc +** 'xyz' becomes xyz +** [pqr] becomes pqr +** `mno` becomes mno +*/ +static void dequoteString(char *z){ + int quote; + int i, j; + if( z==0 ) return; + quote = z[0]; + switch( quote ){ + case '\'': break; + case '"': break; + case '`': break; /* For MySQL compatibility */ + case '[': quote = ']'; break; /* For MS SqlServer compatibility */ + default: return; + } + for(i=1, j=0; z[i]; i++){ + if( z[i]==quote ){ + if( z[i+1]==quote ){ + z[j++] = quote; + i++; + }else{ + z[j++] = 0; + break; + } + }else{ + z[j++] = z[i]; + } + } +} + +/* +** The input azIn is a NULL-terminated list of tokens. Remove the first +** token and all punctuation tokens. Remove the quotes from +** around string literal tokens. +** +** Example: +** +** input: tokenize chinese ( 'simplifed' , 'mixed' ) +** output: chinese simplifed mixed +** +** Another example: +** +** input: delimiters ( '[' , ']' , '...' ) +** output: [ ] ... +*/ +static void tokenListToIdList(char **azIn){ + int i, j; + if( azIn ){ + for(i=0, j=-1; azIn[i]; i++){ + if( safe_isalnum(azIn[i][0]) || azIn[i][1] ){ + dequoteString(azIn[i]); + if( j>=0 ){ + azIn[j] = azIn[i]; + } + j++; + } + } + azIn[j] = 0; + } +} + + +/* +** Find the first alphanumeric token in the string zIn. Null-terminate +** this token. Remove any quotation marks. And return a pointer to +** the result. +*/ +static char *firstToken(char *zIn, char **pzTail){ + int n, ttype; + while(1){ + n = getToken(zIn, &ttype); + if( ttype==TOKEN_SPACE ){ + zIn += n; + }else if( ttype==TOKEN_EOF ){ + *pzTail = zIn; + return 0; + }else{ + zIn[n] = 0; + *pzTail = &zIn[1]; + dequoteString(zIn); + return zIn; + } + } + /*NOTREACHED*/ +} + +/* Return true if... +** +** * s begins with the string t, ignoring case +** * s is longer than t +** * The first character of s beyond t is not a alphanumeric +** +** Ignore leading space in *s. +** +** To put it another way, return true if the first token of +** s[] is t[]. +*/ +static int startsWith(const char *s, const char *t){ + while( safe_isspace(*s) ){ s++; } + while( *t ){ + if( safe_tolower(*s++)!=safe_tolower(*t++) ) return 0; + } + return *s!='_' && !safe_isalnum(*s); +} + +/* +** An instance of this structure defines the "spec" of a +** full text index. This structure is populated by parseSpec +** and use by fulltextConnect and fulltextCreate. +*/ +typedef struct TableSpec { + const char *zDb; /* Logical database name */ + const char *zName; /* Name of the full-text index */ + int nColumn; /* Number of columns to be indexed */ + char **azColumn; /* Original names of columns to be indexed */ + char **azContentColumn; /* Column names for %_content */ + char **azTokenizer; /* Name of tokenizer and its arguments */ +} TableSpec; + +/* +** Reclaim all of the memory used by a TableSpec +*/ +static void clearTableSpec(TableSpec *p) { + sqlite3_free(p->azColumn); + sqlite3_free(p->azContentColumn); + sqlite3_free(p->azTokenizer); +} + +/* Parse a CREATE VIRTUAL TABLE statement, which looks like this: + * + * CREATE VIRTUAL TABLE email + * USING fts2(subject, body, tokenize mytokenizer(myarg)) + * + * We return parsed information in a TableSpec structure. + * + */ +static int parseSpec(TableSpec *pSpec, int argc, const char *const*argv, + char**pzErr){ + int i, n; + char *z, *zDummy; + char **azArg; + const char *zTokenizer = 0; /* argv[] entry describing the tokenizer */ + + assert( argc>=3 ); + /* Current interface: + ** argv[0] - module name + ** argv[1] - database name + ** argv[2] - table name + ** argv[3..] - columns, optionally followed by tokenizer specification + ** and snippet delimiters specification. + */ + + /* Make a copy of the complete argv[][] array in a single allocation. + ** The argv[][] array is read-only and transient. We can write to the + ** copy in order to modify things and the copy is persistent. + */ + CLEAR(pSpec); + for(i=n=0; i<argc; i++){ + n += strlen(argv[i]) + 1; + } + azArg = sqlite3_malloc( sizeof(char*)*argc + n ); + if( azArg==0 ){ + return SQLITE_NOMEM; + } + z = (char*)&azArg[argc]; + for(i=0; i<argc; i++){ + azArg[i] = z; + strcpy(z, argv[i]); + z += strlen(z)+1; + } + + /* Identify the column names and the tokenizer and delimiter arguments + ** in the argv[][] array. + */ + pSpec->zDb = azArg[1]; + pSpec->zName = azArg[2]; + pSpec->nColumn = 0; + pSpec->azColumn = azArg; + zTokenizer = "tokenize simple"; + for(i=3; i<argc; ++i){ + if( startsWith(azArg[i],"tokenize") ){ + zTokenizer = azArg[i]; + }else{ + z = azArg[pSpec->nColumn] = firstToken(azArg[i], &zDummy); + pSpec->nColumn++; + } + } + if( pSpec->nColumn==0 ){ + azArg[0] = "content"; + pSpec->nColumn = 1; + } + + /* + ** Construct the list of content column names. + ** + ** Each content column name will be of the form cNNAAAA + ** where NN is the column number and AAAA is the sanitized + ** column name. "sanitized" means that special characters are + ** converted to "_". The cNN prefix guarantees that all column + ** names are unique. + ** + ** The AAAA suffix is not strictly necessary. It is included + ** for the convenience of people who might examine the generated + ** %_content table and wonder what the columns are used for. + */ + pSpec->azContentColumn = sqlite3_malloc( pSpec->nColumn * sizeof(char *) ); + if( pSpec->azContentColumn==0 ){ + clearTableSpec(pSpec); + return SQLITE_NOMEM; + } + for(i=0; i<pSpec->nColumn; i++){ + char *p; + pSpec->azContentColumn[i] = sqlite3_mprintf("c%d%s", i, azArg[i]); + for (p = pSpec->azContentColumn[i]; *p ; ++p) { + if( !safe_isalnum(*p) ) *p = '_'; + } + } + + /* + ** Parse the tokenizer specification string. + */ + pSpec->azTokenizer = tokenizeString(zTokenizer, &n); + tokenListToIdList(pSpec->azTokenizer); + + return SQLITE_OK; +} + +/* +** Generate a CREATE TABLE statement that describes the schema of +** the virtual table. Return a pointer to this schema string. +** +** Space is obtained from sqlite3_mprintf() and should be freed +** using sqlite3_free(). +*/ +static char *fulltextSchema( + int nColumn, /* Number of columns */ + const char *const* azColumn, /* List of columns */ + const char *zTableName /* Name of the table */ +){ + int i; + char *zSchema, *zNext; + const char *zSep = "("; + zSchema = sqlite3_mprintf("CREATE TABLE x"); + for(i=0; i<nColumn; i++){ + zNext = sqlite3_mprintf("%s%s%Q", zSchema, zSep, azColumn[i]); + sqlite3_free(zSchema); + zSchema = zNext; + zSep = ","; + } + zNext = sqlite3_mprintf("%s,%Q)", zSchema, zTableName); + sqlite3_free(zSchema); + return zNext; +} + +/* +** Build a new sqlite3_vtab structure that will describe the +** fulltext index defined by spec. +*/ +static int constructVtab( + sqlite3 *db, /* The SQLite database connection */ + fts2Hash *pHash, /* Hash table containing tokenizers */ + TableSpec *spec, /* Parsed spec information from parseSpec() */ + sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */ + char **pzErr /* Write any error message here */ +){ + int rc; + int n; + fulltext_vtab *v = 0; + const sqlite3_tokenizer_module *m = NULL; + char *schema; + + char const *zTok; /* Name of tokenizer to use for this fts table */ + int nTok; /* Length of zTok, including nul terminator */ + + v = (fulltext_vtab *) sqlite3_malloc(sizeof(fulltext_vtab)); + if( v==0 ) return SQLITE_NOMEM; + CLEAR(v); + /* sqlite will initialize v->base */ + v->db = db; + v->zDb = spec->zDb; /* Freed when azColumn is freed */ + v->zName = spec->zName; /* Freed when azColumn is freed */ + v->nColumn = spec->nColumn; + v->azContentColumn = spec->azContentColumn; + spec->azContentColumn = 0; + v->azColumn = spec->azColumn; + spec->azColumn = 0; + + if( spec->azTokenizer==0 ){ + return SQLITE_NOMEM; + } + + zTok = spec->azTokenizer[0]; + if( !zTok ){ + zTok = "simple"; + } + nTok = strlen(zTok)+1; + + m = (sqlite3_tokenizer_module *)sqlite3Fts2HashFind(pHash, zTok, nTok); + if( !m ){ + *pzErr = sqlite3_mprintf("unknown tokenizer: %s", spec->azTokenizer[0]); + rc = SQLITE_ERROR; + goto err; + } + + for(n=0; spec->azTokenizer[n]; n++){} + if( n ){ + rc = m->xCreate(n-1, (const char*const*)&spec->azTokenizer[1], + &v->pTokenizer); + }else{ + rc = m->xCreate(0, 0, &v->pTokenizer); + } + if( rc!=SQLITE_OK ) goto err; + v->pTokenizer->pModule = m; + + /* TODO: verify the existence of backing tables foo_content, foo_term */ + + schema = fulltextSchema(v->nColumn, (const char*const*)v->azColumn, + spec->zName); + rc = sqlite3_declare_vtab(db, schema); + sqlite3_free(schema); + if( rc!=SQLITE_OK ) goto err; + + memset(v->pFulltextStatements, 0, sizeof(v->pFulltextStatements)); + + /* Indicate that the buffer is not live. */ + v->nPendingData = -1; + + *ppVTab = &v->base; + TRACE(("FTS2 Connect %p\n", v)); + + return rc; + +err: + fulltext_vtab_destroy(v); + return rc; +} + +static int fulltextConnect( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVTab, + char **pzErr +){ + TableSpec spec; + int rc = parseSpec(&spec, argc, argv, pzErr); + if( rc!=SQLITE_OK ) return rc; + + rc = constructVtab(db, (fts2Hash *)pAux, &spec, ppVTab, pzErr); + clearTableSpec(&spec); + return rc; +} + +/* The %_content table holds the text of each document, with +** the rowid used as the docid. +*/ +/* TODO(shess) This comment needs elaboration to match the updated +** code. Work it into the top-of-file comment at that time. +*/ +static int fulltextCreate(sqlite3 *db, void *pAux, + int argc, const char * const *argv, + sqlite3_vtab **ppVTab, char **pzErr){ + int rc; + TableSpec spec; + StringBuffer schema; + TRACE(("FTS2 Create\n")); + + rc = parseSpec(&spec, argc, argv, pzErr); + if( rc!=SQLITE_OK ) return rc; + + initStringBuffer(&schema); + append(&schema, "CREATE TABLE %_content("); + appendList(&schema, spec.nColumn, spec.azContentColumn); + append(&schema, ")"); + rc = sql_exec(db, spec.zDb, spec.zName, stringBufferData(&schema)); + stringBufferDestroy(&schema); + if( rc!=SQLITE_OK ) goto out; + + rc = sql_exec(db, spec.zDb, spec.zName, + "create table %_segments(block blob);"); + if( rc!=SQLITE_OK ) goto out; + + rc = sql_exec(db, spec.zDb, spec.zName, + "create table %_segdir(" + " level integer," + " idx integer," + " start_block integer," + " leaves_end_block integer," + " end_block integer," + " root blob," + " primary key(level, idx)" + ");"); + if( rc!=SQLITE_OK ) goto out; + + rc = constructVtab(db, (fts2Hash *)pAux, &spec, ppVTab, pzErr); + +out: + clearTableSpec(&spec); + return rc; +} + +/* Decide how to handle an SQL query. */ +static int fulltextBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ + int i; + TRACE(("FTS2 BestIndex\n")); + + for(i=0; i<pInfo->nConstraint; ++i){ + const struct sqlite3_index_constraint *pConstraint; + pConstraint = &pInfo->aConstraint[i]; + if( pConstraint->usable ) { + if( pConstraint->iColumn==-1 && + pConstraint->op==SQLITE_INDEX_CONSTRAINT_EQ ){ + pInfo->idxNum = QUERY_ROWID; /* lookup by rowid */ + TRACE(("FTS2 QUERY_ROWID\n")); + } else if( pConstraint->iColumn>=0 && + pConstraint->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ + /* full-text search */ + pInfo->idxNum = QUERY_FULLTEXT + pConstraint->iColumn; + TRACE(("FTS2 QUERY_FULLTEXT %d\n", pConstraint->iColumn)); + } else continue; + + pInfo->aConstraintUsage[i].argvIndex = 1; + pInfo->aConstraintUsage[i].omit = 1; + + /* An arbitrary value for now. + * TODO: Perhaps rowid matches should be considered cheaper than + * full-text searches. */ + pInfo->estimatedCost = 1.0; + + return SQLITE_OK; + } + } + pInfo->idxNum = QUERY_GENERIC; + return SQLITE_OK; +} + +static int fulltextDisconnect(sqlite3_vtab *pVTab){ + TRACE(("FTS2 Disconnect %p\n", pVTab)); + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextDestroy(sqlite3_vtab *pVTab){ + fulltext_vtab *v = (fulltext_vtab *)pVTab; + int rc; + + TRACE(("FTS2 Destroy %p\n", pVTab)); + rc = sql_exec(v->db, v->zDb, v->zName, + "drop table if exists %_content;" + "drop table if exists %_segments;" + "drop table if exists %_segdir;" + ); + if( rc!=SQLITE_OK ) return rc; + + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + fulltext_cursor *c; + + c = (fulltext_cursor *) sqlite3_malloc(sizeof(fulltext_cursor)); + if( c ){ + memset(c, 0, sizeof(fulltext_cursor)); + /* sqlite will initialize c->base */ + *ppCursor = &c->base; + TRACE(("FTS2 Open %p: %p\n", pVTab, c)); + return SQLITE_OK; + }else{ + return SQLITE_NOMEM; + } +} + + +/* Free all of the dynamically allocated memory held by *q +*/ +static void queryClear(Query *q){ + int i; + for(i = 0; i < q->nTerms; ++i){ + sqlite3_free(q->pTerms[i].pTerm); + } + sqlite3_free(q->pTerms); + CLEAR(q); +} + +/* Free all of the dynamically allocated memory held by the +** Snippet +*/ +static void snippetClear(Snippet *p){ + sqlite3_free(p->aMatch); + sqlite3_free(p->zOffset); + sqlite3_free(p->zSnippet); + CLEAR(p); +} +/* +** Append a single entry to the p->aMatch[] log. +*/ +static void snippetAppendMatch( + Snippet *p, /* Append the entry to this snippet */ + int iCol, int iTerm, /* The column and query term */ + int iStart, int nByte /* Offset and size of the match */ +){ + int i; + struct snippetMatch *pMatch; + if( p->nMatch+1>=p->nAlloc ){ + p->nAlloc = p->nAlloc*2 + 10; + p->aMatch = sqlite3_realloc(p->aMatch, p->nAlloc*sizeof(p->aMatch[0]) ); + if( p->aMatch==0 ){ + p->nMatch = 0; + p->nAlloc = 0; + return; + } + } + i = p->nMatch++; + pMatch = &p->aMatch[i]; + pMatch->iCol = iCol; + pMatch->iTerm = iTerm; + pMatch->iStart = iStart; + pMatch->nByte = nByte; +} + +/* +** Sizing information for the circular buffer used in snippetOffsetsOfColumn() +*/ +#define FTS2_ROTOR_SZ (32) +#define FTS2_ROTOR_MASK (FTS2_ROTOR_SZ-1) + +/* +** Add entries to pSnippet->aMatch[] for every match that occurs against +** document zDoc[0..nDoc-1] which is stored in column iColumn. +*/ +static void snippetOffsetsOfColumn( + Query *pQuery, + Snippet *pSnippet, + int iColumn, + const char *zDoc, + int nDoc +){ + const sqlite3_tokenizer_module *pTModule; /* The tokenizer module */ + sqlite3_tokenizer *pTokenizer; /* The specific tokenizer */ + sqlite3_tokenizer_cursor *pTCursor; /* Tokenizer cursor */ + fulltext_vtab *pVtab; /* The full text index */ + int nColumn; /* Number of columns in the index */ + const QueryTerm *aTerm; /* Query string terms */ + int nTerm; /* Number of query string terms */ + int i, j; /* Loop counters */ + int rc; /* Return code */ + unsigned int match, prevMatch; /* Phrase search bitmasks */ + const char *zToken; /* Next token from the tokenizer */ + int nToken; /* Size of zToken */ + int iBegin, iEnd, iPos; /* Offsets of beginning and end */ + + /* The following variables keep a circular buffer of the last + ** few tokens */ + unsigned int iRotor = 0; /* Index of current token */ + int iRotorBegin[FTS2_ROTOR_SZ]; /* Beginning offset of token */ + int iRotorLen[FTS2_ROTOR_SZ]; /* Length of token */ + + pVtab = pQuery->pFts; + nColumn = pVtab->nColumn; + pTokenizer = pVtab->pTokenizer; + pTModule = pTokenizer->pModule; + rc = pTModule->xOpen(pTokenizer, zDoc, nDoc, &pTCursor); + if( rc ) return; + pTCursor->pTokenizer = pTokenizer; + aTerm = pQuery->pTerms; + nTerm = pQuery->nTerms; + if( nTerm>=FTS2_ROTOR_SZ ){ + nTerm = FTS2_ROTOR_SZ - 1; + } + prevMatch = 0; + while(1){ + rc = pTModule->xNext(pTCursor, &zToken, &nToken, &iBegin, &iEnd, &iPos); + if( rc ) break; + iRotorBegin[iRotor&FTS2_ROTOR_MASK] = iBegin; + iRotorLen[iRotor&FTS2_ROTOR_MASK] = iEnd-iBegin; + match = 0; + for(i=0; i<nTerm; i++){ + int iCol; + iCol = aTerm[i].iColumn; + if( iCol>=0 && iCol<nColumn && iCol!=iColumn ) continue; + if( aTerm[i].nTerm>nToken ) continue; + if( !aTerm[i].isPrefix && aTerm[i].nTerm<nToken ) continue; + assert( aTerm[i].nTerm<=nToken ); + if( memcmp(aTerm[i].pTerm, zToken, aTerm[i].nTerm) ) continue; + if( aTerm[i].iPhrase>1 && (prevMatch & (1<<i))==0 ) continue; + match |= 1<<i; + if( i==nTerm-1 || aTerm[i+1].iPhrase==1 ){ + for(j=aTerm[i].iPhrase-1; j>=0; j--){ + int k = (iRotor-j) & FTS2_ROTOR_MASK; + snippetAppendMatch(pSnippet, iColumn, i-j, + iRotorBegin[k], iRotorLen[k]); + } + } + } + prevMatch = match<<1; + iRotor++; + } + pTModule->xClose(pTCursor); +} + + +/* +** Compute all offsets for the current row of the query. +** If the offsets have already been computed, this routine is a no-op. +*/ +static void snippetAllOffsets(fulltext_cursor *p){ + int nColumn; + int iColumn, i; + int iFirst, iLast; + fulltext_vtab *pFts; + + if( p->snippet.nMatch ) return; + if( p->q.nTerms==0 ) return; + pFts = p->q.pFts; + nColumn = pFts->nColumn; + iColumn = (p->iCursorType - QUERY_FULLTEXT); + if( iColumn<0 || iColumn>=nColumn ){ + iFirst = 0; + iLast = nColumn-1; + }else{ + iFirst = iColumn; + iLast = iColumn; + } + for(i=iFirst; i<=iLast; i++){ + const char *zDoc; + int nDoc; + zDoc = (const char*)sqlite3_column_text(p->pStmt, i+1); + nDoc = sqlite3_column_bytes(p->pStmt, i+1); + snippetOffsetsOfColumn(&p->q, &p->snippet, i, zDoc, nDoc); + } +} + +/* +** Convert the information in the aMatch[] array of the snippet +** into the string zOffset[0..nOffset-1]. +*/ +static void snippetOffsetText(Snippet *p){ + int i; + int cnt = 0; + StringBuffer sb; + char zBuf[200]; + if( p->zOffset ) return; + initStringBuffer(&sb); + for(i=0; i<p->nMatch; i++){ + struct snippetMatch *pMatch = &p->aMatch[i]; + zBuf[0] = ' '; + sqlite3_snprintf(sizeof(zBuf)-1, &zBuf[cnt>0], "%d %d %d %d", + pMatch->iCol, pMatch->iTerm, pMatch->iStart, pMatch->nByte); + append(&sb, zBuf); + cnt++; + } + p->zOffset = stringBufferData(&sb); + p->nOffset = stringBufferLength(&sb); +} + +/* +** zDoc[0..nDoc-1] is phrase of text. aMatch[0..nMatch-1] are a set +** of matching words some of which might be in zDoc. zDoc is column +** number iCol. +** +** iBreak is suggested spot in zDoc where we could begin or end an +** excerpt. Return a value similar to iBreak but possibly adjusted +** to be a little left or right so that the break point is better. +*/ +static int wordBoundary( + int iBreak, /* The suggested break point */ + const char *zDoc, /* Document text */ + int nDoc, /* Number of bytes in zDoc[] */ + struct snippetMatch *aMatch, /* Matching words */ + int nMatch, /* Number of entries in aMatch[] */ + int iCol /* The column number for zDoc[] */ +){ + int i; + if( iBreak<=10 ){ + return 0; + } + if( iBreak>=nDoc-10 ){ + return nDoc; + } + for(i=0; i<nMatch && aMatch[i].iCol<iCol; i++){} + while( i<nMatch && aMatch[i].iStart+aMatch[i].nByte<iBreak ){ i++; } + if( i<nMatch ){ + if( aMatch[i].iStart<iBreak+10 ){ + return aMatch[i].iStart; + } + if( i>0 && aMatch[i-1].iStart+aMatch[i-1].nByte>=iBreak ){ + return aMatch[i-1].iStart; + } + } + for(i=1; i<=10; i++){ + if( safe_isspace(zDoc[iBreak-i]) ){ + return iBreak - i + 1; + } + if( safe_isspace(zDoc[iBreak+i]) ){ + return iBreak + i + 1; + } + } + return iBreak; +} + + + +/* +** Allowed values for Snippet.aMatch[].snStatus +*/ +#define SNIPPET_IGNORE 0 /* It is ok to omit this match from the snippet */ +#define SNIPPET_DESIRED 1 /* We want to include this match in the snippet */ + +/* +** Generate the text of a snippet. +*/ +static void snippetText( + fulltext_cursor *pCursor, /* The cursor we need the snippet for */ + const char *zStartMark, /* Markup to appear before each match */ + const char *zEndMark, /* Markup to appear after each match */ + const char *zEllipsis /* Ellipsis mark */ +){ + int i, j; + struct snippetMatch *aMatch; + int nMatch; + int nDesired; + StringBuffer sb; + int tailCol; + int tailOffset; + int iCol; + int nDoc; + const char *zDoc; + int iStart, iEnd; + int tailEllipsis = 0; + int iMatch; + + + sqlite3_free(pCursor->snippet.zSnippet); + pCursor->snippet.zSnippet = 0; + aMatch = pCursor->snippet.aMatch; + nMatch = pCursor->snippet.nMatch; + initStringBuffer(&sb); + + for(i=0; i<nMatch; i++){ + aMatch[i].snStatus = SNIPPET_IGNORE; + } + nDesired = 0; + for(i=0; i<pCursor->q.nTerms; i++){ + for(j=0; j<nMatch; j++){ + if( aMatch[j].iTerm==i ){ + aMatch[j].snStatus = SNIPPET_DESIRED; + nDesired++; + break; + } + } + } + + iMatch = 0; + tailCol = -1; + tailOffset = 0; + for(i=0; i<nMatch && nDesired>0; i++){ + if( aMatch[i].snStatus!=SNIPPET_DESIRED ) continue; + nDesired--; + iCol = aMatch[i].iCol; + zDoc = (const char*)sqlite3_column_text(pCursor->pStmt, iCol+1); + nDoc = sqlite3_column_bytes(pCursor->pStmt, iCol+1); + iStart = aMatch[i].iStart - 40; + iStart = wordBoundary(iStart, zDoc, nDoc, aMatch, nMatch, iCol); + if( iStart<=10 ){ + iStart = 0; + } + if( iCol==tailCol && iStart<=tailOffset+20 ){ + iStart = tailOffset; + } + if( (iCol!=tailCol && tailCol>=0) || iStart!=tailOffset ){ + trimWhiteSpace(&sb); + appendWhiteSpace(&sb); + append(&sb, zEllipsis); + appendWhiteSpace(&sb); + } + iEnd = aMatch[i].iStart + aMatch[i].nByte + 40; + iEnd = wordBoundary(iEnd, zDoc, nDoc, aMatch, nMatch, iCol); + if( iEnd>=nDoc-10 ){ + iEnd = nDoc; + tailEllipsis = 0; + }else{ + tailEllipsis = 1; + } + while( iMatch<nMatch && aMatch[iMatch].iCol<iCol ){ iMatch++; } + while( iStart<iEnd ){ + while( iMatch<nMatch && aMatch[iMatch].iStart<iStart + && aMatch[iMatch].iCol<=iCol ){ + iMatch++; + } + if( iMatch<nMatch && aMatch[iMatch].iStart<iEnd + && aMatch[iMatch].iCol==iCol ){ + nappend(&sb, &zDoc[iStart], aMatch[iMatch].iStart - iStart); + iStart = aMatch[iMatch].iStart; + append(&sb, zStartMark); + nappend(&sb, &zDoc[iStart], aMatch[iMatch].nByte); + append(&sb, zEndMark); + iStart += aMatch[iMatch].nByte; + for(j=iMatch+1; j<nMatch; j++){ + if( aMatch[j].iTerm==aMatch[iMatch].iTerm + && aMatch[j].snStatus==SNIPPET_DESIRED ){ + nDesired--; + aMatch[j].snStatus = SNIPPET_IGNORE; + } + } + }else{ + nappend(&sb, &zDoc[iStart], iEnd - iStart); + iStart = iEnd; + } + } + tailCol = iCol; + tailOffset = iEnd; + } + trimWhiteSpace(&sb); + if( tailEllipsis ){ + appendWhiteSpace(&sb); + append(&sb, zEllipsis); + } + pCursor->snippet.zSnippet = stringBufferData(&sb); + pCursor->snippet.nSnippet = stringBufferLength(&sb); +} + + +/* +** Close the cursor. For additional information see the documentation +** on the xClose method of the virtual table interface. +*/ +static int fulltextClose(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + TRACE(("FTS2 Close %p\n", c)); + sqlite3_finalize(c->pStmt); + queryClear(&c->q); + snippetClear(&c->snippet); + if( c->result.nData!=0 ) dlrDestroy(&c->reader); + dataBufferDestroy(&c->result); + sqlite3_free(c); + return SQLITE_OK; +} + +static int fulltextNext(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + int rc; + + TRACE(("FTS2 Next %p\n", pCursor)); + snippetClear(&c->snippet); + if( c->iCursorType < QUERY_FULLTEXT ){ + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + switch( rc ){ + case SQLITE_ROW: + c->eof = 0; + return SQLITE_OK; + case SQLITE_DONE: + c->eof = 1; + return SQLITE_OK; + default: + c->eof = 1; + return rc; + } + } else { /* full-text query */ + rc = sqlite3_reset(c->pStmt); + if( rc!=SQLITE_OK ) return rc; + + if( c->result.nData==0 || dlrAtEnd(&c->reader) ){ + c->eof = 1; + return SQLITE_OK; + } + rc = sqlite3_bind_int64(c->pStmt, 1, dlrDocid(&c->reader)); + dlrStep(&c->reader); + if( rc!=SQLITE_OK ) return rc; + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + if( rc==SQLITE_ROW ){ /* the case we expect */ + c->eof = 0; + return SQLITE_OK; + } + + /* Corrupt if the index refers to missing document. */ + if( rc==SQLITE_DONE ) return SQLITE_CORRUPT_BKPT; + + return rc; + } +} + + +/* TODO(shess) If we pushed LeafReader to the top of the file, or to +** another file, term_select() could be pushed above +** docListOfTerm(). +*/ +static int termSelect(fulltext_vtab *v, int iColumn, + const char *pTerm, int nTerm, int isPrefix, + DocListType iType, DataBuffer *out); + +/* Return a DocList corresponding to the query term *pTerm. If *pTerm +** is the first term of a phrase query, go ahead and evaluate the phrase +** query and return the doclist for the entire phrase query. +** +** The resulting DL_DOCIDS doclist is stored in pResult, which is +** overwritten. +*/ +static int docListOfTerm( + fulltext_vtab *v, /* The full text index */ + int iColumn, /* column to restrict to. No restriction if >=nColumn */ + QueryTerm *pQTerm, /* Term we are looking for, or 1st term of a phrase */ + DataBuffer *pResult /* Write the result here */ +){ + DataBuffer left, right, new; + int i, rc; + + /* No phrase search if no position info. */ + assert( pQTerm->nPhrase==0 || DL_DEFAULT!=DL_DOCIDS ); + + /* This code should never be called with buffered updates. */ + assert( v->nPendingData<0 ); + + dataBufferInit(&left, 0); + rc = termSelect(v, iColumn, pQTerm->pTerm, pQTerm->nTerm, pQTerm->isPrefix, + 0<pQTerm->nPhrase ? DL_POSITIONS : DL_DOCIDS, &left); + if( rc ) return rc; + for(i=1; i<=pQTerm->nPhrase && left.nData>0; i++){ + dataBufferInit(&right, 0); + rc = termSelect(v, iColumn, pQTerm[i].pTerm, pQTerm[i].nTerm, + pQTerm[i].isPrefix, DL_POSITIONS, &right); + if( rc ){ + dataBufferDestroy(&left); + return rc; + } + dataBufferInit(&new, 0); + docListPhraseMerge(left.pData, left.nData, right.pData, right.nData, + i<pQTerm->nPhrase ? DL_POSITIONS : DL_DOCIDS, &new); + dataBufferDestroy(&left); + dataBufferDestroy(&right); + left = new; + } + *pResult = left; + return SQLITE_OK; +} + +/* Add a new term pTerm[0..nTerm-1] to the query *q. +*/ +static void queryAdd(Query *q, const char *pTerm, int nTerm){ + QueryTerm *t; + ++q->nTerms; + q->pTerms = sqlite3_realloc(q->pTerms, q->nTerms * sizeof(q->pTerms[0])); + if( q->pTerms==0 ){ + q->nTerms = 0; + return; + } + t = &q->pTerms[q->nTerms - 1]; + CLEAR(t); + t->pTerm = sqlite3_malloc(nTerm+1); + memcpy(t->pTerm, pTerm, nTerm); + t->pTerm[nTerm] = 0; + t->nTerm = nTerm; + t->isOr = q->nextIsOr; + t->isPrefix = 0; + q->nextIsOr = 0; + t->iColumn = q->nextColumn; + q->nextColumn = q->dfltColumn; +} + +/* +** Check to see if the string zToken[0...nToken-1] matches any +** column name in the virtual table. If it does, +** return the zero-indexed column number. If not, return -1. +*/ +static int checkColumnSpecifier( + fulltext_vtab *pVtab, /* The virtual table */ + const char *zToken, /* Text of the token */ + int nToken /* Number of characters in the token */ +){ + int i; + for(i=0; i<pVtab->nColumn; i++){ + if( memcmp(pVtab->azColumn[i], zToken, nToken)==0 + && pVtab->azColumn[i][nToken]==0 ){ + return i; + } + } + return -1; +} + +/* +** Parse the text at pSegment[0..nSegment-1]. Add additional terms +** to the query being assemblied in pQuery. +** +** inPhrase is true if pSegment[0..nSegement-1] is contained within +** double-quotes. If inPhrase is true, then the first term +** is marked with the number of terms in the phrase less one and +** OR and "-" syntax is ignored. If inPhrase is false, then every +** term found is marked with nPhrase=0 and OR and "-" syntax is significant. +*/ +static int tokenizeSegment( + sqlite3_tokenizer *pTokenizer, /* The tokenizer to use */ + const char *pSegment, int nSegment, /* Query expression being parsed */ + int inPhrase, /* True if within "..." */ + Query *pQuery /* Append results here */ +){ + const sqlite3_tokenizer_module *pModule = pTokenizer->pModule; + sqlite3_tokenizer_cursor *pCursor; + int firstIndex = pQuery->nTerms; + int iCol; + int nTerm = 1; + int iEndLast = -1; + + int rc = pModule->xOpen(pTokenizer, pSegment, nSegment, &pCursor); + if( rc!=SQLITE_OK ) return rc; + pCursor->pTokenizer = pTokenizer; + + while( 1 ){ + const char *pToken; + int nToken, iBegin, iEnd, iPos; + + rc = pModule->xNext(pCursor, + &pToken, &nToken, + &iBegin, &iEnd, &iPos); + if( rc!=SQLITE_OK ) break; + if( !inPhrase && + pSegment[iEnd]==':' && + (iCol = checkColumnSpecifier(pQuery->pFts, pToken, nToken))>=0 ){ + pQuery->nextColumn = iCol; + continue; + } + if( !inPhrase && pQuery->nTerms>0 && nToken==2 + && pSegment[iBegin]=='O' && pSegment[iBegin+1]=='R' ){ + pQuery->nextIsOr = 1; + continue; + } + + /* + * The ICU tokenizer considers '*' a break character, so the code below + * sets isPrefix correctly, but since that code doesn't eat the '*', the + * ICU tokenizer returns it as the next token. So eat it here until a + * better solution presents itself. + */ + if( pQuery->nTerms>0 && nToken==1 && pSegment[iBegin]=='*' && + iEndLast==iBegin){ + pQuery->pTerms[pQuery->nTerms-1].isPrefix = 1; + continue; + } + iEndLast = iEnd; + + queryAdd(pQuery, pToken, nToken); + if( !inPhrase && iBegin>0 && pSegment[iBegin-1]=='-' ){ + pQuery->pTerms[pQuery->nTerms-1].isNot = 1; + } + if( iEnd<nSegment && pSegment[iEnd]=='*' ){ + pQuery->pTerms[pQuery->nTerms-1].isPrefix = 1; + } + pQuery->pTerms[pQuery->nTerms-1].iPhrase = nTerm; + if( inPhrase ){ + nTerm++; + } + } + + if( inPhrase && pQuery->nTerms>firstIndex ){ + pQuery->pTerms[firstIndex].nPhrase = pQuery->nTerms - firstIndex - 1; + } + + return pModule->xClose(pCursor); +} + +/* Parse a query string, yielding a Query object pQuery. +** +** The calling function will need to queryClear() to clean up +** the dynamically allocated memory held by pQuery. +*/ +static int parseQuery( + fulltext_vtab *v, /* The fulltext index */ + const char *zInput, /* Input text of the query string */ + int nInput, /* Size of the input text */ + int dfltColumn, /* Default column of the index to match against */ + Query *pQuery /* Write the parse results here. */ +){ + int iInput, inPhrase = 0; + + if( zInput==0 ) nInput = 0; + if( nInput<0 ) nInput = strlen(zInput); + pQuery->nTerms = 0; + pQuery->pTerms = NULL; + pQuery->nextIsOr = 0; + pQuery->nextColumn = dfltColumn; + pQuery->dfltColumn = dfltColumn; + pQuery->pFts = v; + + for(iInput=0; iInput<nInput; ++iInput){ + int i; + for(i=iInput; i<nInput && zInput[i]!='"'; ++i){} + if( i>iInput ){ + tokenizeSegment(v->pTokenizer, zInput+iInput, i-iInput, inPhrase, + pQuery); + } + iInput = i; + if( i<nInput ){ + assert( zInput[i]=='"' ); + inPhrase = !inPhrase; + } + } + + if( inPhrase ){ + /* unmatched quote */ + queryClear(pQuery); + return SQLITE_ERROR; + } + return SQLITE_OK; +} + +/* TODO(shess) Refactor the code to remove this forward decl. */ +static int flushPendingTerms(fulltext_vtab *v); + +/* Perform a full-text query using the search expression in +** zInput[0..nInput-1]. Return a list of matching documents +** in pResult. +** +** Queries must match column iColumn. Or if iColumn>=nColumn +** they are allowed to match against any column. +*/ +static int fulltextQuery( + fulltext_vtab *v, /* The full text index */ + int iColumn, /* Match against this column by default */ + const char *zInput, /* The query string */ + int nInput, /* Number of bytes in zInput[] */ + DataBuffer *pResult, /* Write the result doclist here */ + Query *pQuery /* Put parsed query string here */ +){ + int i, iNext, rc; + DataBuffer left, right, or, new; + int nNot = 0; + QueryTerm *aTerm; + + /* TODO(shess) Instead of flushing pendingTerms, we could query for + ** the relevant term and merge the doclist into what we receive from + ** the database. Wait and see if this is a common issue, first. + ** + ** A good reason not to flush is to not generate update-related + ** error codes from here. + */ + + /* Flush any buffered updates before executing the query. */ + rc = flushPendingTerms(v); + if( rc!=SQLITE_OK ) return rc; + + /* TODO(shess) I think that the queryClear() calls below are not + ** necessary, because fulltextClose() already clears the query. + */ + rc = parseQuery(v, zInput, nInput, iColumn, pQuery); + if( rc!=SQLITE_OK ) return rc; + + /* Empty or NULL queries return no results. */ + if( pQuery->nTerms==0 ){ + dataBufferInit(pResult, 0); + return SQLITE_OK; + } + + /* Merge AND terms. */ + /* TODO(shess) I think we can early-exit if( i>nNot && left.nData==0 ). */ + aTerm = pQuery->pTerms; + for(i = 0; i<pQuery->nTerms; i=iNext){ + if( aTerm[i].isNot ){ + /* Handle all NOT terms in a separate pass */ + nNot++; + iNext = i + aTerm[i].nPhrase+1; + continue; + } + iNext = i + aTerm[i].nPhrase + 1; + rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &right); + if( rc ){ + if( i!=nNot ) dataBufferDestroy(&left); + queryClear(pQuery); + return rc; + } + while( iNext<pQuery->nTerms && aTerm[iNext].isOr ){ + rc = docListOfTerm(v, aTerm[iNext].iColumn, &aTerm[iNext], &or); + iNext += aTerm[iNext].nPhrase + 1; + if( rc ){ + if( i!=nNot ) dataBufferDestroy(&left); + dataBufferDestroy(&right); + queryClear(pQuery); + return rc; + } + dataBufferInit(&new, 0); + docListOrMerge(right.pData, right.nData, or.pData, or.nData, &new); + dataBufferDestroy(&right); + dataBufferDestroy(&or); + right = new; + } + if( i==nNot ){ /* first term processed. */ + left = right; + }else{ + dataBufferInit(&new, 0); + docListAndMerge(left.pData, left.nData, right.pData, right.nData, &new); + dataBufferDestroy(&right); + dataBufferDestroy(&left); + left = new; + } + } + + if( nNot==pQuery->nTerms ){ + /* We do not yet know how to handle a query of only NOT terms */ + return SQLITE_ERROR; + } + + /* Do the EXCEPT terms */ + for(i=0; i<pQuery->nTerms; i += aTerm[i].nPhrase + 1){ + if( !aTerm[i].isNot ) continue; + rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &right); + if( rc ){ + queryClear(pQuery); + dataBufferDestroy(&left); + return rc; + } + dataBufferInit(&new, 0); + docListExceptMerge(left.pData, left.nData, right.pData, right.nData, &new); + dataBufferDestroy(&right); + dataBufferDestroy(&left); + left = new; + } + + *pResult = left; + return rc; +} + +/* +** This is the xFilter interface for the virtual table. See +** the virtual table xFilter method documentation for additional +** information. +** +** If idxNum==QUERY_GENERIC then do a full table scan against +** the %_content table. +** +** If idxNum==QUERY_ROWID then do a rowid lookup for a single entry +** in the %_content table. +** +** If idxNum>=QUERY_FULLTEXT then use the full text index. The +** column on the left-hand side of the MATCH operator is column +** number idxNum-QUERY_FULLTEXT, 0 indexed. argv[0] is the right-hand +** side of the MATCH operator. +*/ +/* TODO(shess) Upgrade the cursor initialization and destruction to +** account for fulltextFilter() being called multiple times on the +** same cursor. The current solution is very fragile. Apply fix to +** fts2 as appropriate. +*/ +static int fulltextFilter( + sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ + int idxNum, const char *idxStr, /* Which indexing scheme to use */ + int argc, sqlite3_value **argv /* Arguments for the indexing scheme */ +){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + fulltext_vtab *v = cursor_vtab(c); + int rc; + + TRACE(("FTS2 Filter %p\n",pCursor)); + + /* If the cursor has a statement that was not prepared according to + ** idxNum, clear it. I believe all calls to fulltextFilter with a + ** given cursor will have the same idxNum , but in this case it's + ** easy to be safe. + */ + if( c->pStmt && c->iCursorType!=idxNum ){ + sqlite3_finalize(c->pStmt); + c->pStmt = NULL; + } + + /* Get a fresh statement appropriate to idxNum. */ + /* TODO(shess): Add a prepared-statement cache in the vt structure. + ** The cache must handle multiple open cursors. Easier to cache the + ** statement variants at the vt to reduce malloc/realloc/free here. + ** Or we could have a StringBuffer variant which allowed stack + ** construction for small values. + */ + if( !c->pStmt ){ + char *zSql = sqlite3_mprintf("select rowid, * from %%_content %s", + idxNum==QUERY_GENERIC ? "" : "where rowid=?"); + rc = sql_prepare(v->db, v->zDb, v->zName, &c->pStmt, zSql); + sqlite3_free(zSql); + if( rc!=SQLITE_OK ) return rc; + c->iCursorType = idxNum; + }else{ + sqlite3_reset(c->pStmt); + assert( c->iCursorType==idxNum ); + } + + switch( idxNum ){ + case QUERY_GENERIC: + break; + + case QUERY_ROWID: + rc = sqlite3_bind_int64(c->pStmt, 1, sqlite3_value_int64(argv[0])); + if( rc!=SQLITE_OK ) return rc; + break; + + default: /* full-text search */ + { + const char *zQuery = (const char *)sqlite3_value_text(argv[0]); + assert( idxNum<=QUERY_FULLTEXT+v->nColumn); + assert( argc==1 ); + queryClear(&c->q); + if( c->result.nData!=0 ){ + /* This case happens if the same cursor is used repeatedly. */ + dlrDestroy(&c->reader); + dataBufferReset(&c->result); + }else{ + dataBufferInit(&c->result, 0); + } + rc = fulltextQuery(v, idxNum-QUERY_FULLTEXT, zQuery, -1, &c->result, &c->q); + if( rc!=SQLITE_OK ) return rc; + if( c->result.nData!=0 ){ + dlrInit(&c->reader, DL_DOCIDS, c->result.pData, c->result.nData); + } + break; + } + } + + return fulltextNext(pCursor); +} + +/* This is the xEof method of the virtual table. The SQLite core +** calls this routine to find out if it has reached the end of +** a query's results set. +*/ +static int fulltextEof(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + return c->eof; +} + +/* This is the xColumn method of the virtual table. The SQLite +** core calls this method during a query when it needs the value +** of a column from the virtual table. This method needs to use +** one of the sqlite3_result_*() routines to store the requested +** value back in the pContext. +*/ +static int fulltextColumn(sqlite3_vtab_cursor *pCursor, + sqlite3_context *pContext, int idxCol){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + fulltext_vtab *v = cursor_vtab(c); + + if( idxCol<v->nColumn ){ + sqlite3_value *pVal = sqlite3_column_value(c->pStmt, idxCol+1); + sqlite3_result_value(pContext, pVal); + }else if( idxCol==v->nColumn ){ + /* The extra column whose name is the same as the table. + ** Return a blob which is a pointer to the cursor + */ + sqlite3_result_blob(pContext, &c, sizeof(c), SQLITE_TRANSIENT); + } + return SQLITE_OK; +} + +/* This is the xRowid method. The SQLite core calls this routine to +** retrive the rowid for the current row of the result set. The +** rowid should be written to *pRowid. +*/ +static int fulltextRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + + *pRowid = sqlite3_column_int64(c->pStmt, 0); + return SQLITE_OK; +} + +/* Add all terms in [zText] to pendingTerms table. If [iColumn] > 0, +** we also store positions and offsets in the hash table using that +** column number. +*/ +static int buildTerms(fulltext_vtab *v, sqlite_int64 iDocid, + const char *zText, int iColumn){ + sqlite3_tokenizer *pTokenizer = v->pTokenizer; + sqlite3_tokenizer_cursor *pCursor; + const char *pToken; + int nTokenBytes; + int iStartOffset, iEndOffset, iPosition; + int rc; + + rc = pTokenizer->pModule->xOpen(pTokenizer, zText, -1, &pCursor); + if( rc!=SQLITE_OK ) return rc; + + pCursor->pTokenizer = pTokenizer; + while( SQLITE_OK==(rc=pTokenizer->pModule->xNext(pCursor, + &pToken, &nTokenBytes, + &iStartOffset, &iEndOffset, + &iPosition)) ){ + DLCollector *p; + int nData; /* Size of doclist before our update. */ + + /* Positions can't be negative; we use -1 as a terminator + * internally. Token can't be NULL or empty. */ + if( iPosition<0 || pToken == NULL || nTokenBytes == 0 ){ + rc = SQLITE_ERROR; + break; + } + + p = fts2HashFind(&v->pendingTerms, pToken, nTokenBytes); + if( p==NULL ){ + nData = 0; + p = dlcNew(iDocid, DL_DEFAULT); + fts2HashInsert(&v->pendingTerms, pToken, nTokenBytes, p); + + /* Overhead for our hash table entry, the key, and the value. */ + v->nPendingData += sizeof(struct fts2HashElem)+sizeof(*p)+nTokenBytes; + }else{ + nData = p->b.nData; + if( p->dlw.iPrevDocid!=iDocid ) dlcNext(p, iDocid); + } + if( iColumn>=0 ){ + dlcAddPos(p, iColumn, iPosition, iStartOffset, iEndOffset); + } + + /* Accumulate data added by dlcNew or dlcNext, and dlcAddPos. */ + v->nPendingData += p->b.nData-nData; + } + + /* TODO(shess) Check return? Should this be able to cause errors at + ** this point? Actually, same question about sqlite3_finalize(), + ** though one could argue that failure there means that the data is + ** not durable. *ponder* + */ + pTokenizer->pModule->xClose(pCursor); + if( SQLITE_DONE == rc ) return SQLITE_OK; + return rc; +} + +/* Add doclists for all terms in [pValues] to pendingTerms table. */ +static int insertTerms(fulltext_vtab *v, sqlite_int64 iRowid, + sqlite3_value **pValues){ + int i; + for(i = 0; i < v->nColumn ; ++i){ + char *zText = (char*)sqlite3_value_text(pValues[i]); + int rc = buildTerms(v, iRowid, zText, i); + if( rc!=SQLITE_OK ) return rc; + } + return SQLITE_OK; +} + +/* Add empty doclists for all terms in the given row's content to +** pendingTerms. +*/ +static int deleteTerms(fulltext_vtab *v, sqlite_int64 iRowid){ + const char **pValues; + int i, rc; + + /* TODO(shess) Should we allow such tables at all? */ + if( DL_DEFAULT==DL_DOCIDS ) return SQLITE_ERROR; + + rc = content_select(v, iRowid, &pValues); + if( rc!=SQLITE_OK ) return rc; + + for(i = 0 ; i < v->nColumn; ++i) { + rc = buildTerms(v, iRowid, pValues[i], -1); + if( rc!=SQLITE_OK ) break; + } + + freeStringArray(v->nColumn, pValues); + return SQLITE_OK; +} + +/* TODO(shess) Refactor the code to remove this forward decl. */ +static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid); + +/* Insert a row into the %_content table; set *piRowid to be the ID of the +** new row. Add doclists for terms to pendingTerms. +*/ +static int index_insert(fulltext_vtab *v, sqlite3_value *pRequestRowid, + sqlite3_value **pValues, sqlite_int64 *piRowid){ + int rc; + + rc = content_insert(v, pRequestRowid, pValues); /* execute an SQL INSERT */ + if( rc!=SQLITE_OK ) return rc; + + *piRowid = sqlite3_last_insert_rowid(v->db); + rc = initPendingTerms(v, *piRowid); + if( rc!=SQLITE_OK ) return rc; + + return insertTerms(v, *piRowid, pValues); +} + +/* Delete a row from the %_content table; add empty doclists for terms +** to pendingTerms. +*/ +static int index_delete(fulltext_vtab *v, sqlite_int64 iRow){ + int rc = initPendingTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = deleteTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + return content_delete(v, iRow); /* execute an SQL DELETE */ +} + +/* Update a row in the %_content table; add delete doclists to +** pendingTerms for old terms not in the new data, add insert doclists +** to pendingTerms for terms in the new data. +*/ +static int index_update(fulltext_vtab *v, sqlite_int64 iRow, + sqlite3_value **pValues){ + int rc = initPendingTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + /* Generate an empty doclist for each term that previously appeared in this + * row. */ + rc = deleteTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = content_update(v, pValues, iRow); /* execute an SQL UPDATE */ + if( rc!=SQLITE_OK ) return rc; + + /* Now add positions for terms which appear in the updated row. */ + return insertTerms(v, iRow, pValues); +} + +/*******************************************************************/ +/* InteriorWriter is used to collect terms and block references into +** interior nodes in %_segments. See commentary at top of file for +** format. +*/ + +/* How large interior nodes can grow. */ +#define INTERIOR_MAX 2048 + +/* Minimum number of terms per interior node (except the root). This +** prevents large terms from making the tree too skinny - must be >0 +** so that the tree always makes progress. Note that the min tree +** fanout will be INTERIOR_MIN_TERMS+1. +*/ +#define INTERIOR_MIN_TERMS 7 +#if INTERIOR_MIN_TERMS<1 +# error INTERIOR_MIN_TERMS must be greater than 0. +#endif + +/* ROOT_MAX controls how much data is stored inline in the segment +** directory. +*/ +/* TODO(shess) Push ROOT_MAX down to whoever is writing things. It's +** only here so that interiorWriterRootInfo() and leafWriterRootInfo() +** can both see it, but if the caller passed it in, we wouldn't even +** need a define. +*/ +#define ROOT_MAX 1024 +#if ROOT_MAX<VARINT_MAX*2 +# error ROOT_MAX must have enough space for a header. +#endif + +/* InteriorBlock stores a linked-list of interior blocks while a lower +** layer is being constructed. +*/ +typedef struct InteriorBlock { + DataBuffer term; /* Leftmost term in block's subtree. */ + DataBuffer data; /* Accumulated data for the block. */ + struct InteriorBlock *next; +} InteriorBlock; + +static InteriorBlock *interiorBlockNew(int iHeight, sqlite_int64 iChildBlock, + const char *pTerm, int nTerm){ + InteriorBlock *block = sqlite3_malloc(sizeof(InteriorBlock)); + char c[VARINT_MAX+VARINT_MAX]; + int n; + + if( block ){ + memset(block, 0, sizeof(*block)); + dataBufferInit(&block->term, 0); + dataBufferReplace(&block->term, pTerm, nTerm); + + n = putVarint(c, iHeight); + n += putVarint(c+n, iChildBlock); + dataBufferInit(&block->data, INTERIOR_MAX); + dataBufferReplace(&block->data, c, n); + } + return block; +} + +#ifndef NDEBUG +/* Verify that the data is readable as an interior node. */ +static void interiorBlockValidate(InteriorBlock *pBlock){ + const char *pData = pBlock->data.pData; + int nData = pBlock->data.nData; + int n, iDummy; + sqlite_int64 iBlockid; + + assert( nData>0 ); + assert( pData!=0 ); + assert( pData+nData>pData ); + + /* Must lead with height of node as a varint(n), n>0 */ + n = getVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n<nData ); + pData += n; + nData -= n; + + /* Must contain iBlockid. */ + n = getVarint(pData, &iBlockid); + assert( n>0 ); + assert( n<=nData ); + pData += n; + nData -= n; + + /* Zero or more terms of positive length */ + if( nData!=0 ){ + /* First term is not delta-encoded. */ + n = getVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n+iDummy>0); + assert( n+iDummy<=nData ); + pData += n+iDummy; + nData -= n+iDummy; + + /* Following terms delta-encoded. */ + while( nData!=0 ){ + /* Length of shared prefix. */ + n = getVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>=0 ); + assert( n<nData ); + pData += n; + nData -= n; + + /* Length and data of distinct suffix. */ + n = getVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n+iDummy>0); + assert( n+iDummy<=nData ); + pData += n+iDummy; + nData -= n+iDummy; + } + } +} +#define ASSERT_VALID_INTERIOR_BLOCK(x) interiorBlockValidate(x) +#else +#define ASSERT_VALID_INTERIOR_BLOCK(x) assert( 1 ) +#endif + +typedef struct InteriorWriter { + int iHeight; /* from 0 at leaves. */ + InteriorBlock *first, *last; + struct InteriorWriter *parentWriter; + + DataBuffer term; /* Last term written to block "last". */ + sqlite_int64 iOpeningChildBlock; /* First child block in block "last". */ +#ifndef NDEBUG + sqlite_int64 iLastChildBlock; /* for consistency checks. */ +#endif +} InteriorWriter; + +/* Initialize an interior node where pTerm[nTerm] marks the leftmost +** term in the tree. iChildBlock is the leftmost child block at the +** next level down the tree. +*/ +static void interiorWriterInit(int iHeight, const char *pTerm, int nTerm, + sqlite_int64 iChildBlock, + InteriorWriter *pWriter){ + InteriorBlock *block; + assert( iHeight>0 ); + CLEAR(pWriter); + + pWriter->iHeight = iHeight; + pWriter->iOpeningChildBlock = iChildBlock; +#ifndef NDEBUG + pWriter->iLastChildBlock = iChildBlock; +#endif + block = interiorBlockNew(iHeight, iChildBlock, pTerm, nTerm); + pWriter->last = pWriter->first = block; + ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); + dataBufferInit(&pWriter->term, 0); +} + +/* Append the child node rooted at iChildBlock to the interior node, +** with pTerm[nTerm] as the leftmost term in iChildBlock's subtree. +*/ +static void interiorWriterAppend(InteriorWriter *pWriter, + const char *pTerm, int nTerm, + sqlite_int64 iChildBlock){ + char c[VARINT_MAX+VARINT_MAX]; + int n, nPrefix = 0; + + ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); + + /* The first term written into an interior node is actually + ** associated with the second child added (the first child was added + ** in interiorWriterInit, or in the if clause at the bottom of this + ** function). That term gets encoded straight up, with nPrefix left + ** at 0. + */ + if( pWriter->term.nData==0 ){ + n = putVarint(c, nTerm); + }else{ + while( nPrefix<pWriter->term.nData && + pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ + nPrefix++; + } + + n = putVarint(c, nPrefix); + n += putVarint(c+n, nTerm-nPrefix); + } + +#ifndef NDEBUG + pWriter->iLastChildBlock++; +#endif + assert( pWriter->iLastChildBlock==iChildBlock ); + + /* Overflow to a new block if the new term makes the current block + ** too big, and the current block already has enough terms. + */ + if( pWriter->last->data.nData+n+nTerm-nPrefix>INTERIOR_MAX && + iChildBlock-pWriter->iOpeningChildBlock>INTERIOR_MIN_TERMS ){ + pWriter->last->next = interiorBlockNew(pWriter->iHeight, iChildBlock, + pTerm, nTerm); + pWriter->last = pWriter->last->next; + pWriter->iOpeningChildBlock = iChildBlock; + dataBufferReset(&pWriter->term); + }else{ + dataBufferAppend2(&pWriter->last->data, c, n, + pTerm+nPrefix, nTerm-nPrefix); + dataBufferReplace(&pWriter->term, pTerm, nTerm); + } + ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); +} + +/* Free the space used by pWriter, including the linked-list of +** InteriorBlocks, and parentWriter, if present. +*/ +static int interiorWriterDestroy(InteriorWriter *pWriter){ + InteriorBlock *block = pWriter->first; + + while( block!=NULL ){ + InteriorBlock *b = block; + block = block->next; + dataBufferDestroy(&b->term); + dataBufferDestroy(&b->data); + sqlite3_free(b); + } + if( pWriter->parentWriter!=NULL ){ + interiorWriterDestroy(pWriter->parentWriter); + sqlite3_free(pWriter->parentWriter); + } + dataBufferDestroy(&pWriter->term); + SCRAMBLE(pWriter); + return SQLITE_OK; +} + +/* If pWriter can fit entirely in ROOT_MAX, return it as the root info +** directly, leaving *piEndBlockid unchanged. Otherwise, flush +** pWriter to %_segments, building a new layer of interior nodes, and +** recursively ask for their root into. +*/ +static int interiorWriterRootInfo(fulltext_vtab *v, InteriorWriter *pWriter, + char **ppRootInfo, int *pnRootInfo, + sqlite_int64 *piEndBlockid){ + InteriorBlock *block = pWriter->first; + sqlite_int64 iBlockid = 0; + int rc; + + /* If we can fit the segment inline */ + if( block==pWriter->last && block->data.nData<ROOT_MAX ){ + *ppRootInfo = block->data.pData; + *pnRootInfo = block->data.nData; + return SQLITE_OK; + } + + /* Flush the first block to %_segments, and create a new level of + ** interior node. + */ + ASSERT_VALID_INTERIOR_BLOCK(block); + rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); + if( rc!=SQLITE_OK ) return rc; + *piEndBlockid = iBlockid; + + pWriter->parentWriter = sqlite3_malloc(sizeof(*pWriter->parentWriter)); + interiorWriterInit(pWriter->iHeight+1, + block->term.pData, block->term.nData, + iBlockid, pWriter->parentWriter); + + /* Flush additional blocks and append to the higher interior + ** node. + */ + for(block=block->next; block!=NULL; block=block->next){ + ASSERT_VALID_INTERIOR_BLOCK(block); + rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); + if( rc!=SQLITE_OK ) return rc; + *piEndBlockid = iBlockid; + + interiorWriterAppend(pWriter->parentWriter, + block->term.pData, block->term.nData, iBlockid); + } + + /* Parent node gets the chance to be the root. */ + return interiorWriterRootInfo(v, pWriter->parentWriter, + ppRootInfo, pnRootInfo, piEndBlockid); +} + +/****************************************************************/ +/* InteriorReader is used to read off the data from an interior node +** (see comment at top of file for the format). +*/ +typedef struct InteriorReader { + const char *pData; + int nData; + + DataBuffer term; /* previous term, for decoding term delta. */ + + sqlite_int64 iBlockid; +} InteriorReader; + +static void interiorReaderDestroy(InteriorReader *pReader){ + dataBufferDestroy(&pReader->term); + SCRAMBLE(pReader); +} + +/* TODO(shess) The assertions are great, but what if we're in NDEBUG +** and the blob is empty or otherwise contains suspect data? +*/ +static void interiorReaderInit(const char *pData, int nData, + InteriorReader *pReader){ + int n, nTerm; + + /* Require at least the leading flag byte */ + assert( nData>0 ); + assert( pData[0]!='\0' ); + + CLEAR(pReader); + + /* Decode the base blockid, and set the cursor to the first term. */ + n = getVarint(pData+1, &pReader->iBlockid); + assert( 1+n<=nData ); + pReader->pData = pData+1+n; + pReader->nData = nData-(1+n); + + /* A single-child interior node (such as when a leaf node was too + ** large for the segment directory) won't have any terms. + ** Otherwise, decode the first term. + */ + if( pReader->nData==0 ){ + dataBufferInit(&pReader->term, 0); + }else{ + n = getVarint32(pReader->pData, &nTerm); + dataBufferInit(&pReader->term, nTerm); + dataBufferReplace(&pReader->term, pReader->pData+n, nTerm); + assert( n+nTerm<=pReader->nData ); + pReader->pData += n+nTerm; + pReader->nData -= n+nTerm; + } +} + +static int interiorReaderAtEnd(InteriorReader *pReader){ + return pReader->term.nData==0; +} + +static sqlite_int64 interiorReaderCurrentBlockid(InteriorReader *pReader){ + return pReader->iBlockid; +} + +static int interiorReaderTermBytes(InteriorReader *pReader){ + assert( !interiorReaderAtEnd(pReader) ); + return pReader->term.nData; +} +static const char *interiorReaderTerm(InteriorReader *pReader){ + assert( !interiorReaderAtEnd(pReader) ); + return pReader->term.pData; +} + +/* Step forward to the next term in the node. */ +static void interiorReaderStep(InteriorReader *pReader){ + assert( !interiorReaderAtEnd(pReader) ); + + /* If the last term has been read, signal eof, else construct the + ** next term. + */ + if( pReader->nData==0 ){ + dataBufferReset(&pReader->term); + }else{ + int n, nPrefix, nSuffix; + + n = getVarint32(pReader->pData, &nPrefix); + n += getVarint32(pReader->pData+n, &nSuffix); + + /* Truncate the current term and append suffix data. */ + pReader->term.nData = nPrefix; + dataBufferAppend(&pReader->term, pReader->pData+n, nSuffix); + + assert( n+nSuffix<=pReader->nData ); + pReader->pData += n+nSuffix; + pReader->nData -= n+nSuffix; + } + pReader->iBlockid++; +} + +/* Compare the current term to pTerm[nTerm], returning strcmp-style +** results. If isPrefix, equality means equal through nTerm bytes. +*/ +static int interiorReaderTermCmp(InteriorReader *pReader, + const char *pTerm, int nTerm, int isPrefix){ + const char *pReaderTerm = interiorReaderTerm(pReader); + int nReaderTerm = interiorReaderTermBytes(pReader); + int c, n = nReaderTerm<nTerm ? nReaderTerm : nTerm; + + if( n==0 ){ + if( nReaderTerm>0 ) return -1; + if( nTerm>0 ) return 1; + return 0; + } + + c = memcmp(pReaderTerm, pTerm, n); + if( c!=0 ) return c; + if( isPrefix && n==nTerm ) return 0; + return nReaderTerm - nTerm; +} + +/****************************************************************/ +/* LeafWriter is used to collect terms and associated doclist data +** into leaf blocks in %_segments (see top of file for format info). +** Expected usage is: +** +** LeafWriter writer; +** leafWriterInit(0, 0, &writer); +** while( sorted_terms_left_to_process ){ +** // data is doclist data for that term. +** rc = leafWriterStep(v, &writer, pTerm, nTerm, pData, nData); +** if( rc!=SQLITE_OK ) goto err; +** } +** rc = leafWriterFinalize(v, &writer); +**err: +** leafWriterDestroy(&writer); +** return rc; +** +** leafWriterStep() may write a collected leaf out to %_segments. +** leafWriterFinalize() finishes writing any buffered data and stores +** a root node in %_segdir. leafWriterDestroy() frees all buffers and +** InteriorWriters allocated as part of writing this segment. +** +** TODO(shess) Document leafWriterStepMerge(). +*/ + +/* Put terms with data this big in their own block. */ +#define STANDALONE_MIN 1024 + +/* Keep leaf blocks below this size. */ +#define LEAF_MAX 2048 + +typedef struct LeafWriter { + int iLevel; + int idx; + sqlite_int64 iStartBlockid; /* needed to create the root info */ + sqlite_int64 iEndBlockid; /* when we're done writing. */ + + DataBuffer term; /* previous encoded term */ + DataBuffer data; /* encoding buffer */ + + /* bytes of first term in the current node which distinguishes that + ** term from the last term of the previous node. + */ + int nTermDistinct; + + InteriorWriter parentWriter; /* if we overflow */ + int has_parent; +} LeafWriter; + +static void leafWriterInit(int iLevel, int idx, LeafWriter *pWriter){ + CLEAR(pWriter); + pWriter->iLevel = iLevel; + pWriter->idx = idx; + + dataBufferInit(&pWriter->term, 32); + + /* Start out with a reasonably sized block, though it can grow. */ + dataBufferInit(&pWriter->data, LEAF_MAX); +} + +#ifndef NDEBUG +/* Verify that the data is readable as a leaf node. */ +static void leafNodeValidate(const char *pData, int nData){ + int n, iDummy; + + if( nData==0 ) return; + assert( nData>0 ); + assert( pData!=0 ); + assert( pData+nData>pData ); + + /* Must lead with a varint(0) */ + n = getVarint32(pData, &iDummy); + assert( iDummy==0 ); + assert( n>0 ); + assert( n<nData ); + pData += n; + nData -= n; + + /* Leading term length and data must fit in buffer. */ + n = getVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy<nData ); + pData += n+iDummy; + nData -= n+iDummy; + + /* Leading term's doclist length and data must fit. */ + n = getVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy<=nData ); + ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); + pData += n+iDummy; + nData -= n+iDummy; + + /* Verify that trailing terms and doclists also are readable. */ + while( nData!=0 ){ + n = getVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>=0 ); + assert( n<nData ); + pData += n; + nData -= n; + n = getVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy<nData ); + pData += n+iDummy; + nData -= n+iDummy; + + n = getVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy<=nData ); + ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); + pData += n+iDummy; + nData -= n+iDummy; + } +} +#define ASSERT_VALID_LEAF_NODE(p, n) leafNodeValidate(p, n) +#else +#define ASSERT_VALID_LEAF_NODE(p, n) assert( 1 ) +#endif + +/* Flush the current leaf node to %_segments, and adding the resulting +** blockid and the starting term to the interior node which will +** contain it. +*/ +static int leafWriterInternalFlush(fulltext_vtab *v, LeafWriter *pWriter, + int iData, int nData){ + sqlite_int64 iBlockid = 0; + const char *pStartingTerm; + int nStartingTerm, rc, n; + + /* Must have the leading varint(0) flag, plus at least some + ** valid-looking data. + */ + assert( nData>2 ); + assert( iData>=0 ); + assert( iData+nData<=pWriter->data.nData ); + ASSERT_VALID_LEAF_NODE(pWriter->data.pData+iData, nData); + + rc = block_insert(v, pWriter->data.pData+iData, nData, &iBlockid); + if( rc!=SQLITE_OK ) return rc; + assert( iBlockid!=0 ); + + /* Reconstruct the first term in the leaf for purposes of building + ** the interior node. + */ + n = getVarint32(pWriter->data.pData+iData+1, &nStartingTerm); + pStartingTerm = pWriter->data.pData+iData+1+n; + assert( pWriter->data.nData>iData+1+n+nStartingTerm ); + assert( pWriter->nTermDistinct>0 ); + assert( pWriter->nTermDistinct<=nStartingTerm ); + nStartingTerm = pWriter->nTermDistinct; + + if( pWriter->has_parent ){ + interiorWriterAppend(&pWriter->parentWriter, + pStartingTerm, nStartingTerm, iBlockid); + }else{ + interiorWriterInit(1, pStartingTerm, nStartingTerm, iBlockid, + &pWriter->parentWriter); + pWriter->has_parent = 1; + } + + /* Track the span of this segment's leaf nodes. */ + if( pWriter->iEndBlockid==0 ){ + pWriter->iEndBlockid = pWriter->iStartBlockid = iBlockid; + }else{ + pWriter->iEndBlockid++; + assert( iBlockid==pWriter->iEndBlockid ); + } + + return SQLITE_OK; +} +static int leafWriterFlush(fulltext_vtab *v, LeafWriter *pWriter){ + int rc = leafWriterInternalFlush(v, pWriter, 0, pWriter->data.nData); + if( rc!=SQLITE_OK ) return rc; + + /* Re-initialize the output buffer. */ + dataBufferReset(&pWriter->data); + + return SQLITE_OK; +} + +/* Fetch the root info for the segment. If the entire leaf fits +** within ROOT_MAX, then it will be returned directly, otherwise it +** will be flushed and the root info will be returned from the +** interior node. *piEndBlockid is set to the blockid of the last +** interior or leaf node written to disk (0 if none are written at +** all). +*/ +static int leafWriterRootInfo(fulltext_vtab *v, LeafWriter *pWriter, + char **ppRootInfo, int *pnRootInfo, + sqlite_int64 *piEndBlockid){ + /* we can fit the segment entirely inline */ + if( !pWriter->has_parent && pWriter->data.nData<ROOT_MAX ){ + *ppRootInfo = pWriter->data.pData; + *pnRootInfo = pWriter->data.nData; + *piEndBlockid = 0; + return SQLITE_OK; + } + + /* Flush remaining leaf data. */ + if( pWriter->data.nData>0 ){ + int rc = leafWriterFlush(v, pWriter); + if( rc!=SQLITE_OK ) return rc; + } + + /* We must have flushed a leaf at some point. */ + assert( pWriter->has_parent ); + + /* Tenatively set the end leaf blockid as the end blockid. If the + ** interior node can be returned inline, this will be the final + ** blockid, otherwise it will be overwritten by + ** interiorWriterRootInfo(). + */ + *piEndBlockid = pWriter->iEndBlockid; + + return interiorWriterRootInfo(v, &pWriter->parentWriter, + ppRootInfo, pnRootInfo, piEndBlockid); +} + +/* Collect the rootInfo data and store it into the segment directory. +** This has the effect of flushing the segment's leaf data to +** %_segments, and also flushing any interior nodes to %_segments. +*/ +static int leafWriterFinalize(fulltext_vtab *v, LeafWriter *pWriter){ + sqlite_int64 iEndBlockid; + char *pRootInfo; + int rc, nRootInfo; + + rc = leafWriterRootInfo(v, pWriter, &pRootInfo, &nRootInfo, &iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + /* Don't bother storing an entirely empty segment. */ + if( iEndBlockid==0 && nRootInfo==0 ) return SQLITE_OK; + + return segdir_set(v, pWriter->iLevel, pWriter->idx, + pWriter->iStartBlockid, pWriter->iEndBlockid, + iEndBlockid, pRootInfo, nRootInfo); +} + +static void leafWriterDestroy(LeafWriter *pWriter){ + if( pWriter->has_parent ) interiorWriterDestroy(&pWriter->parentWriter); + dataBufferDestroy(&pWriter->term); + dataBufferDestroy(&pWriter->data); +} + +/* Encode a term into the leafWriter, delta-encoding as appropriate. +** Returns the length of the new term which distinguishes it from the +** previous term, which can be used to set nTermDistinct when a node +** boundary is crossed. +*/ +static int leafWriterEncodeTerm(LeafWriter *pWriter, + const char *pTerm, int nTerm){ + char c[VARINT_MAX+VARINT_MAX]; + int n, nPrefix = 0; + + assert( nTerm>0 ); + while( nPrefix<pWriter->term.nData && + pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ + nPrefix++; + /* Failing this implies that the terms weren't in order. */ + assert( nPrefix<nTerm ); + } + + if( pWriter->data.nData==0 ){ + /* Encode the node header and leading term as: + ** varint(0) + ** varint(nTerm) + ** char pTerm[nTerm] + */ + n = putVarint(c, '\0'); + n += putVarint(c+n, nTerm); + dataBufferAppend2(&pWriter->data, c, n, pTerm, nTerm); + }else{ + /* Delta-encode the term as: + ** varint(nPrefix) + ** varint(nSuffix) + ** char pTermSuffix[nSuffix] + */ + n = putVarint(c, nPrefix); + n += putVarint(c+n, nTerm-nPrefix); + dataBufferAppend2(&pWriter->data, c, n, pTerm+nPrefix, nTerm-nPrefix); + } + dataBufferReplace(&pWriter->term, pTerm, nTerm); + + return nPrefix+1; +} + +/* Used to avoid a memmove when a large amount of doclist data is in +** the buffer. This constructs a node and term header before +** iDoclistData and flushes the resulting complete node using +** leafWriterInternalFlush(). +*/ +static int leafWriterInlineFlush(fulltext_vtab *v, LeafWriter *pWriter, + const char *pTerm, int nTerm, + int iDoclistData){ + char c[VARINT_MAX+VARINT_MAX]; + int iData, n = putVarint(c, 0); + n += putVarint(c+n, nTerm); + + /* There should always be room for the header. Even if pTerm shared + ** a substantial prefix with the previous term, the entire prefix + ** could be constructed from earlier data in the doclist, so there + ** should be room. + */ + assert( iDoclistData>=n+nTerm ); + + iData = iDoclistData-(n+nTerm); + memcpy(pWriter->data.pData+iData, c, n); + memcpy(pWriter->data.pData+iData+n, pTerm, nTerm); + + return leafWriterInternalFlush(v, pWriter, iData, pWriter->data.nData-iData); +} + +/* Push pTerm[nTerm] along with the doclist data to the leaf layer of +** %_segments. +*/ +static int leafWriterStepMerge(fulltext_vtab *v, LeafWriter *pWriter, + const char *pTerm, int nTerm, + DLReader *pReaders, int nReaders){ + char c[VARINT_MAX+VARINT_MAX]; + int iTermData = pWriter->data.nData, iDoclistData; + int i, nData, n, nActualData, nActual, rc, nTermDistinct; + + ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); + nTermDistinct = leafWriterEncodeTerm(pWriter, pTerm, nTerm); + + /* Remember nTermDistinct if opening a new node. */ + if( iTermData==0 ) pWriter->nTermDistinct = nTermDistinct; + + iDoclistData = pWriter->data.nData; + + /* Estimate the length of the merged doclist so we can leave space + ** to encode it. + */ + for(i=0, nData=0; i<nReaders; i++){ + nData += dlrAllDataBytes(&pReaders[i]); + } + n = putVarint(c, nData); + dataBufferAppend(&pWriter->data, c, n); + + docListMerge(&pWriter->data, pReaders, nReaders); + ASSERT_VALID_DOCLIST(DL_DEFAULT, + pWriter->data.pData+iDoclistData+n, + pWriter->data.nData-iDoclistData-n, NULL); + + /* The actual amount of doclist data at this point could be smaller + ** than the length we encoded. Additionally, the space required to + ** encode this length could be smaller. For small doclists, this is + ** not a big deal, we can just use memmove() to adjust things. + */ + nActualData = pWriter->data.nData-(iDoclistData+n); + nActual = putVarint(c, nActualData); + assert( nActualData<=nData ); + assert( nActual<=n ); + + /* If the new doclist is big enough for force a standalone leaf + ** node, we can immediately flush it inline without doing the + ** memmove(). + */ + /* TODO(shess) This test matches leafWriterStep(), which does this + ** test before it knows the cost to varint-encode the term and + ** doclist lengths. At some point, change to + ** pWriter->data.nData-iTermData>STANDALONE_MIN. + */ + if( nTerm+nActualData>STANDALONE_MIN ){ + /* Push leaf node from before this term. */ + if( iTermData>0 ){ + rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); + if( rc!=SQLITE_OK ) return rc; + + pWriter->nTermDistinct = nTermDistinct; + } + + /* Fix the encoded doclist length. */ + iDoclistData += n - nActual; + memcpy(pWriter->data.pData+iDoclistData, c, nActual); + + /* Push the standalone leaf node. */ + rc = leafWriterInlineFlush(v, pWriter, pTerm, nTerm, iDoclistData); + if( rc!=SQLITE_OK ) return rc; + + /* Leave the node empty. */ + dataBufferReset(&pWriter->data); + + return rc; + } + + /* At this point, we know that the doclist was small, so do the + ** memmove if indicated. + */ + if( nActual<n ){ + memmove(pWriter->data.pData+iDoclistData+nActual, + pWriter->data.pData+iDoclistData+n, + pWriter->data.nData-(iDoclistData+n)); + pWriter->data.nData -= n-nActual; + } + + /* Replace written length with actual length. */ + memcpy(pWriter->data.pData+iDoclistData, c, nActual); + + /* If the node is too large, break things up. */ + /* TODO(shess) This test matches leafWriterStep(), which does this + ** test before it knows the cost to varint-encode the term and + ** doclist lengths. At some point, change to + ** pWriter->data.nData>LEAF_MAX. + */ + if( iTermData+nTerm+nActualData>LEAF_MAX ){ + /* Flush out the leading data as a node */ + rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); + if( rc!=SQLITE_OK ) return rc; + + pWriter->nTermDistinct = nTermDistinct; + + /* Rebuild header using the current term */ + n = putVarint(pWriter->data.pData, 0); + n += putVarint(pWriter->data.pData+n, nTerm); + memcpy(pWriter->data.pData+n, pTerm, nTerm); + n += nTerm; + + /* There should always be room, because the previous encoding + ** included all data necessary to construct the term. + */ + assert( n<iDoclistData ); + /* So long as STANDALONE_MIN is half or less of LEAF_MAX, the + ** following memcpy() is safe (as opposed to needing a memmove). + */ + assert( 2*STANDALONE_MIN<=LEAF_MAX ); + assert( n+pWriter->data.nData-iDoclistData<iDoclistData ); + memcpy(pWriter->data.pData+n, + pWriter->data.pData+iDoclistData, + pWriter->data.nData-iDoclistData); + pWriter->data.nData -= iDoclistData-n; + } + ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); + + return SQLITE_OK; +} + +/* Push pTerm[nTerm] along with the doclist data to the leaf layer of +** %_segments. +*/ +/* TODO(shess) Revise writeZeroSegment() so that doclists are +** constructed directly in pWriter->data. +*/ +static int leafWriterStep(fulltext_vtab *v, LeafWriter *pWriter, + const char *pTerm, int nTerm, + const char *pData, int nData){ + int rc; + DLReader reader; + + dlrInit(&reader, DL_DEFAULT, pData, nData); + rc = leafWriterStepMerge(v, pWriter, pTerm, nTerm, &reader, 1); + dlrDestroy(&reader); + + return rc; +} + + +/****************************************************************/ +/* LeafReader is used to iterate over an individual leaf node. */ +typedef struct LeafReader { + DataBuffer term; /* copy of current term. */ + + const char *pData; /* data for current term. */ + int nData; +} LeafReader; + +static void leafReaderDestroy(LeafReader *pReader){ + dataBufferDestroy(&pReader->term); + SCRAMBLE(pReader); +} + +static int leafReaderAtEnd(LeafReader *pReader){ + return pReader->nData<=0; +} + +/* Access the current term. */ +static int leafReaderTermBytes(LeafReader *pReader){ + return pReader->term.nData; +} +static const char *leafReaderTerm(LeafReader *pReader){ + assert( pReader->term.nData>0 ); + return pReader->term.pData; +} + +/* Access the doclist data for the current term. */ +static int leafReaderDataBytes(LeafReader *pReader){ + int nData; + assert( pReader->term.nData>0 ); + getVarint32(pReader->pData, &nData); + return nData; +} +static const char *leafReaderData(LeafReader *pReader){ + int n, nData; + assert( pReader->term.nData>0 ); + n = getVarint32(pReader->pData, &nData); + return pReader->pData+n; +} + +static void leafReaderInit(const char *pData, int nData, + LeafReader *pReader){ + int nTerm, n; + + assert( nData>0 ); + assert( pData[0]=='\0' ); + + CLEAR(pReader); + + /* Read the first term, skipping the header byte. */ + n = getVarint32(pData+1, &nTerm); + dataBufferInit(&pReader->term, nTerm); + dataBufferReplace(&pReader->term, pData+1+n, nTerm); + + /* Position after the first term. */ + assert( 1+n+nTerm<nData ); + pReader->pData = pData+1+n+nTerm; + pReader->nData = nData-1-n-nTerm; +} + +/* Step the reader forward to the next term. */ +static void leafReaderStep(LeafReader *pReader){ + int n, nData, nPrefix, nSuffix; + assert( !leafReaderAtEnd(pReader) ); + + /* Skip previous entry's data block. */ + n = getVarint32(pReader->pData, &nData); + assert( n+nData<=pReader->nData ); + pReader->pData += n+nData; + pReader->nData -= n+nData; + + if( !leafReaderAtEnd(pReader) ){ + /* Construct the new term using a prefix from the old term plus a + ** suffix from the leaf data. + */ + n = getVarint32(pReader->pData, &nPrefix); + n += getVarint32(pReader->pData+n, &nSuffix); + assert( n+nSuffix<pReader->nData ); + pReader->term.nData = nPrefix; + dataBufferAppend(&pReader->term, pReader->pData+n, nSuffix); + + pReader->pData += n+nSuffix; + pReader->nData -= n+nSuffix; + } +} + +/* strcmp-style comparison of pReader's current term against pTerm. +** If isPrefix, equality means equal through nTerm bytes. +*/ +static int leafReaderTermCmp(LeafReader *pReader, + const char *pTerm, int nTerm, int isPrefix){ + int c, n = pReader->term.nData<nTerm ? pReader->term.nData : nTerm; + if( n==0 ){ + if( pReader->term.nData>0 ) return -1; + if(nTerm>0 ) return 1; + return 0; + } + + c = memcmp(pReader->term.pData, pTerm, n); + if( c!=0 ) return c; + if( isPrefix && n==nTerm ) return 0; + return pReader->term.nData - nTerm; +} + + +/****************************************************************/ +/* LeavesReader wraps LeafReader to allow iterating over the entire +** leaf layer of the tree. +*/ +typedef struct LeavesReader { + int idx; /* Index within the segment. */ + + sqlite3_stmt *pStmt; /* Statement we're streaming leaves from. */ + int eof; /* we've seen SQLITE_DONE from pStmt. */ + + LeafReader leafReader; /* reader for the current leaf. */ + DataBuffer rootData; /* root data for inline. */ +} LeavesReader; + +/* Access the current term. */ +static int leavesReaderTermBytes(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderTermBytes(&pReader->leafReader); +} +static const char *leavesReaderTerm(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderTerm(&pReader->leafReader); +} + +/* Access the doclist data for the current term. */ +static int leavesReaderDataBytes(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderDataBytes(&pReader->leafReader); +} +static const char *leavesReaderData(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderData(&pReader->leafReader); +} + +static int leavesReaderAtEnd(LeavesReader *pReader){ + return pReader->eof; +} + +/* loadSegmentLeaves() may not read all the way to SQLITE_DONE, thus +** leaving the statement handle open, which locks the table. +*/ +/* TODO(shess) This "solution" is not satisfactory. Really, there +** should be check-in function for all statement handles which +** arranges to call sqlite3_reset(). This most likely will require +** modification to control flow all over the place, though, so for now +** just punt. +** +** Note the the current system assumes that segment merges will run to +** completion, which is why this particular probably hasn't arisen in +** this case. Probably a brittle assumption. +*/ +static int leavesReaderReset(LeavesReader *pReader){ + return sqlite3_reset(pReader->pStmt); +} + +static void leavesReaderDestroy(LeavesReader *pReader){ + /* If idx is -1, that means we're using a non-cached statement + ** handle in the optimize() case, so we need to release it. + */ + if( pReader->pStmt!=NULL && pReader->idx==-1 ){ + sqlite3_finalize(pReader->pStmt); + } + leafReaderDestroy(&pReader->leafReader); + dataBufferDestroy(&pReader->rootData); + SCRAMBLE(pReader); +} + +/* Initialize pReader with the given root data (if iStartBlockid==0 +** the leaf data was entirely contained in the root), or from the +** stream of blocks between iStartBlockid and iEndBlockid, inclusive. +*/ +/* TODO(shess): Figure out a means of indicating how many leaves are +** expected, for purposes of detecting corruption. +*/ +static int leavesReaderInit(fulltext_vtab *v, + int idx, + sqlite_int64 iStartBlockid, + sqlite_int64 iEndBlockid, + const char *pRootData, int nRootData, + LeavesReader *pReader){ + CLEAR(pReader); + pReader->idx = idx; + + dataBufferInit(&pReader->rootData, 0); + if( iStartBlockid==0 ){ + /* Corrupt if this can't be a leaf node. */ + if( pRootData==NULL || nRootData<1 || pRootData[0]!='\0' ){ + return SQLITE_CORRUPT_BKPT; + } + /* Entire leaf level fit in root data. */ + dataBufferReplace(&pReader->rootData, pRootData, nRootData); + leafReaderInit(pReader->rootData.pData, pReader->rootData.nData, + &pReader->leafReader); + }else{ + sqlite3_stmt *s; + int rc = sql_get_leaf_statement(v, idx, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iStartBlockid); + if( rc!=SQLITE_OK ) goto err; + + rc = sqlite3_bind_int64(s, 2, iEndBlockid); + if( rc!=SQLITE_OK ) goto err; + + rc = sqlite3_step(s); + + /* Corrupt if interior node referenced missing leaf node. */ + if( rc==SQLITE_DONE ){ + rc = SQLITE_CORRUPT_BKPT; + goto err; + } + + if( rc!=SQLITE_ROW ) goto err; + rc = SQLITE_OK; + + /* Corrupt if leaf data isn't a blob. */ + if( sqlite3_column_type(s, 0)!=SQLITE_BLOB ){ + rc = SQLITE_CORRUPT_BKPT; + }else{ + const char *pLeafData = sqlite3_column_blob(s, 0); + int nLeafData = sqlite3_column_bytes(s, 0); + + /* Corrupt if this can't be a leaf node. */ + if( pLeafData==NULL || nLeafData<1 || pLeafData[0]!='\0' ){ + rc = SQLITE_CORRUPT_BKPT; + }else{ + leafReaderInit(pLeafData, nLeafData, &pReader->leafReader); + } + } + + err: + if( rc!=SQLITE_OK ){ + if( idx==-1 ){ + sqlite3_finalize(s); + }else{ + sqlite3_reset(s); + } + return rc; + } + + pReader->pStmt = s; + } + return SQLITE_OK; +} + +/* Step the current leaf forward to the next term. If we reach the +** end of the current leaf, step forward to the next leaf block. +*/ +static int leavesReaderStep(fulltext_vtab *v, LeavesReader *pReader){ + assert( !leavesReaderAtEnd(pReader) ); + leafReaderStep(&pReader->leafReader); + + if( leafReaderAtEnd(&pReader->leafReader) ){ + int rc; + if( pReader->rootData.pData ){ + pReader->eof = 1; + return SQLITE_OK; + } + rc = sqlite3_step(pReader->pStmt); + if( rc!=SQLITE_ROW ){ + pReader->eof = 1; + return rc==SQLITE_DONE ? SQLITE_OK : rc; + } + + /* Corrupt if leaf data isn't a blob. */ + if( sqlite3_column_type(pReader->pStmt, 0)!=SQLITE_BLOB ){ + return SQLITE_CORRUPT_BKPT; + }else{ + const char *pLeafData = sqlite3_column_blob(pReader->pStmt, 0); + int nLeafData = sqlite3_column_bytes(pReader->pStmt, 0); + + /* Corrupt if this can't be a leaf node. */ + if( pLeafData==NULL || nLeafData<1 || pLeafData[0]!='\0' ){ + return SQLITE_CORRUPT_BKPT; + } + + leafReaderDestroy(&pReader->leafReader); + leafReaderInit(pLeafData, nLeafData, &pReader->leafReader); + } + } + return SQLITE_OK; +} + +/* Order LeavesReaders by their term, ignoring idx. Readers at eof +** always sort to the end. +*/ +static int leavesReaderTermCmp(LeavesReader *lr1, LeavesReader *lr2){ + if( leavesReaderAtEnd(lr1) ){ + if( leavesReaderAtEnd(lr2) ) return 0; + return 1; + } + if( leavesReaderAtEnd(lr2) ) return -1; + + return leafReaderTermCmp(&lr1->leafReader, + leavesReaderTerm(lr2), leavesReaderTermBytes(lr2), + 0); +} + +/* Similar to leavesReaderTermCmp(), with additional ordering by idx +** so that older segments sort before newer segments. +*/ +static int leavesReaderCmp(LeavesReader *lr1, LeavesReader *lr2){ + int c = leavesReaderTermCmp(lr1, lr2); + if( c!=0 ) return c; + return lr1->idx-lr2->idx; +} + +/* Assume that pLr[1]..pLr[nLr] are sorted. Bubble pLr[0] into its +** sorted position. +*/ +static void leavesReaderReorder(LeavesReader *pLr, int nLr){ + while( nLr>1 && leavesReaderCmp(pLr, pLr+1)>0 ){ + LeavesReader tmp = pLr[0]; + pLr[0] = pLr[1]; + pLr[1] = tmp; + nLr--; + pLr++; + } +} + +/* Initializes pReaders with the segments from level iLevel, returning +** the number of segments in *piReaders. Leaves pReaders in sorted +** order. +*/ +static int leavesReadersInit(fulltext_vtab *v, int iLevel, + LeavesReader *pReaders, int *piReaders){ + sqlite3_stmt *s; + int i, rc = sql_get_statement(v, SEGDIR_SELECT_LEVEL_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + i = 0; + while( (rc = sqlite3_step(s))==SQLITE_ROW ){ + sqlite_int64 iStart = sqlite3_column_int64(s, 0); + sqlite_int64 iEnd = sqlite3_column_int64(s, 1); + const char *pRootData = sqlite3_column_blob(s, 2); + int nRootData = sqlite3_column_bytes(s, 2); + + /* Corrupt if we get back different types than we stored. */ + if( sqlite3_column_type(s, 0)!=SQLITE_INTEGER || + sqlite3_column_type(s, 1)!=SQLITE_INTEGER || + sqlite3_column_type(s, 2)!=SQLITE_BLOB ){ + rc = SQLITE_CORRUPT_BKPT; + break; + } + + assert( i<MERGE_COUNT ); + rc = leavesReaderInit(v, i, iStart, iEnd, pRootData, nRootData, + &pReaders[i]); + if( rc!=SQLITE_OK ) break; + + i++; + } + if( rc!=SQLITE_DONE ){ + while( i-->0 ){ + leavesReaderDestroy(&pReaders[i]); + } + sqlite3_reset(s); /* So we don't leave a lock. */ + return rc; + } + + *piReaders = i; + + /* Leave our results sorted by term, then age. */ + while( i-- ){ + leavesReaderReorder(pReaders+i, *piReaders-i); + } + return SQLITE_OK; +} + +/* Merge doclists from pReaders[nReaders] into a single doclist, which +** is written to pWriter. Assumes pReaders is ordered oldest to +** newest. +*/ +/* TODO(shess) Consider putting this inline in segmentMerge(). */ +static int leavesReadersMerge(fulltext_vtab *v, + LeavesReader *pReaders, int nReaders, + LeafWriter *pWriter){ + DLReader dlReaders[MERGE_COUNT]; + const char *pTerm = leavesReaderTerm(pReaders); + int i, nTerm = leavesReaderTermBytes(pReaders); + + assert( nReaders<=MERGE_COUNT ); + + for(i=0; i<nReaders; i++){ + dlrInit(&dlReaders[i], DL_DEFAULT, + leavesReaderData(pReaders+i), + leavesReaderDataBytes(pReaders+i)); + } + + return leafWriterStepMerge(v, pWriter, pTerm, nTerm, dlReaders, nReaders); +} + +/* Forward ref due to mutual recursion with segdirNextIndex(). */ +static int segmentMerge(fulltext_vtab *v, int iLevel); + +/* Put the next available index at iLevel into *pidx. If iLevel +** already has MERGE_COUNT segments, they are merged to a higher +** level to make room. +*/ +static int segdirNextIndex(fulltext_vtab *v, int iLevel, int *pidx){ + int rc = segdir_max_index(v, iLevel, pidx); + if( rc==SQLITE_DONE ){ /* No segments at iLevel. */ + *pidx = 0; + }else if( rc==SQLITE_ROW ){ + if( *pidx==(MERGE_COUNT-1) ){ + rc = segmentMerge(v, iLevel); + if( rc!=SQLITE_OK ) return rc; + *pidx = 0; + }else{ + (*pidx)++; + } + }else{ + return rc; + } + return SQLITE_OK; +} + +/* Merge MERGE_COUNT segments at iLevel into a new segment at +** iLevel+1. If iLevel+1 is already full of segments, those will be +** merged to make room. +*/ +static int segmentMerge(fulltext_vtab *v, int iLevel){ + LeafWriter writer; + LeavesReader lrs[MERGE_COUNT]; + int i, rc, idx = 0; + + /* Determine the next available segment index at the next level, + ** merging as necessary. + */ + rc = segdirNextIndex(v, iLevel+1, &idx); + if( rc!=SQLITE_OK ) return rc; + + /* TODO(shess) This assumes that we'll always see exactly + ** MERGE_COUNT segments to merge at a given level. That will be + ** broken if we allow the developer to request preemptive or + ** deferred merging. + */ + memset(&lrs, '\0', sizeof(lrs)); + rc = leavesReadersInit(v, iLevel, lrs, &i); + if( rc!=SQLITE_OK ) return rc; + assert( i==MERGE_COUNT ); + + leafWriterInit(iLevel+1, idx, &writer); + + /* Since leavesReaderReorder() pushes readers at eof to the end, + ** when the first reader is empty, all will be empty. + */ + while( !leavesReaderAtEnd(lrs) ){ + /* Figure out how many readers share their next term. */ + for(i=1; i<MERGE_COUNT && !leavesReaderAtEnd(lrs+i); i++){ + if( 0!=leavesReaderTermCmp(lrs, lrs+i) ) break; + } + + rc = leavesReadersMerge(v, lrs, i, &writer); + if( rc!=SQLITE_OK ) goto err; + + /* Step forward those that were merged. */ + while( i-->0 ){ + rc = leavesReaderStep(v, lrs+i); + if( rc!=SQLITE_OK ) goto err; + + /* Reorder by term, then by age. */ + leavesReaderReorder(lrs+i, MERGE_COUNT-i); + } + } + + for(i=0; i<MERGE_COUNT; i++){ + leavesReaderDestroy(&lrs[i]); + } + + rc = leafWriterFinalize(v, &writer); + leafWriterDestroy(&writer); + if( rc!=SQLITE_OK ) return rc; + + /* Delete the merged segment data. */ + return segdir_delete(v, iLevel); + + err: + for(i=0; i<MERGE_COUNT; i++){ + leavesReaderDestroy(&lrs[i]); + } + leafWriterDestroy(&writer); + return rc; +} + +/* Accumulate the union of *acc and *pData into *acc. */ +static void docListAccumulateUnion(DataBuffer *acc, + const char *pData, int nData) { + DataBuffer tmp = *acc; + dataBufferInit(acc, tmp.nData+nData); + docListUnion(tmp.pData, tmp.nData, pData, nData, acc); + dataBufferDestroy(&tmp); +} + +/* TODO(shess) It might be interesting to explore different merge +** strategies, here. For instance, since this is a sorted merge, we +** could easily merge many doclists in parallel. With some +** comprehension of the storage format, we could merge all of the +** doclists within a leaf node directly from the leaf node's storage. +** It may be worthwhile to merge smaller doclists before larger +** doclists, since they can be traversed more quickly - but the +** results may have less overlap, making them more expensive in a +** different way. +*/ + +/* Scan pReader for pTerm/nTerm, and merge the term's doclist over +** *out (any doclists with duplicate docids overwrite those in *out). +** Internal function for loadSegmentLeaf(). +*/ +static int loadSegmentLeavesInt(fulltext_vtab *v, LeavesReader *pReader, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + /* doclist data is accumulated into pBuffers similar to how one does + ** increment in binary arithmetic. If index 0 is empty, the data is + ** stored there. If there is data there, it is merged and the + ** results carried into position 1, with further merge-and-carry + ** until an empty position is found. + */ + DataBuffer *pBuffers = NULL; + int nBuffers = 0, nMaxBuffers = 0, rc; + + assert( nTerm>0 ); + + for(rc=SQLITE_OK; rc==SQLITE_OK && !leavesReaderAtEnd(pReader); + rc=leavesReaderStep(v, pReader)){ + /* TODO(shess) Really want leavesReaderTermCmp(), but that name is + ** already taken to compare the terms of two LeavesReaders. Think + ** on a better name. [Meanwhile, break encapsulation rather than + ** use a confusing name.] + */ + int c = leafReaderTermCmp(&pReader->leafReader, pTerm, nTerm, isPrefix); + if( c>0 ) break; /* Past any possible matches. */ + if( c==0 ){ + const char *pData = leavesReaderData(pReader); + int iBuffer, nData = leavesReaderDataBytes(pReader); + + /* Find the first empty buffer. */ + for(iBuffer=0; iBuffer<nBuffers; ++iBuffer){ + if( 0==pBuffers[iBuffer].nData ) break; + } + + /* Out of buffers, add an empty one. */ + if( iBuffer==nBuffers ){ + if( nBuffers==nMaxBuffers ){ + DataBuffer *p; + nMaxBuffers += 20; + + /* Manual realloc so we can handle NULL appropriately. */ + p = sqlite3_malloc(nMaxBuffers*sizeof(*pBuffers)); + if( p==NULL ){ + rc = SQLITE_NOMEM; + break; + } + + if( nBuffers>0 ){ + assert(pBuffers!=NULL); + memcpy(p, pBuffers, nBuffers*sizeof(*pBuffers)); + sqlite3_free(pBuffers); + } + pBuffers = p; + } + dataBufferInit(&(pBuffers[nBuffers]), 0); + nBuffers++; + } + + /* At this point, must have an empty at iBuffer. */ + assert(iBuffer<nBuffers && pBuffers[iBuffer].nData==0); + + /* If empty was first buffer, no need for merge logic. */ + if( iBuffer==0 ){ + dataBufferReplace(&(pBuffers[0]), pData, nData); + }else{ + /* pAcc is the empty buffer the merged data will end up in. */ + DataBuffer *pAcc = &(pBuffers[iBuffer]); + DataBuffer *p = &(pBuffers[0]); + + /* Handle position 0 specially to avoid need to prime pAcc + ** with pData/nData. + */ + dataBufferSwap(p, pAcc); + docListAccumulateUnion(pAcc, pData, nData); + + /* Accumulate remaining doclists into pAcc. */ + for(++p; p<pAcc; ++p){ + docListAccumulateUnion(pAcc, p->pData, p->nData); + + /* dataBufferReset() could allow a large doclist to blow up + ** our memory requirements. + */ + if( p->nCapacity<1024 ){ + dataBufferReset(p); + }else{ + dataBufferDestroy(p); + dataBufferInit(p, 0); + } + } + } + } + } + + /* Union all the doclists together into *out. */ + /* TODO(shess) What if *out is big? Sigh. */ + if( rc==SQLITE_OK && nBuffers>0 ){ + int iBuffer; + for(iBuffer=0; iBuffer<nBuffers; ++iBuffer){ + if( pBuffers[iBuffer].nData>0 ){ + if( out->nData==0 ){ + dataBufferSwap(out, &(pBuffers[iBuffer])); + }else{ + docListAccumulateUnion(out, pBuffers[iBuffer].pData, + pBuffers[iBuffer].nData); + } + } + } + } + + while( nBuffers-- ){ + dataBufferDestroy(&(pBuffers[nBuffers])); + } + if( pBuffers!=NULL ) sqlite3_free(pBuffers); + + return rc; +} + +/* Call loadSegmentLeavesInt() with pData/nData as input. */ +static int loadSegmentLeaf(fulltext_vtab *v, const char *pData, int nData, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + LeavesReader reader; + int rc; + + assert( nData>1 ); + assert( *pData=='\0' ); + rc = leavesReaderInit(v, 0, 0, 0, pData, nData, &reader); + if( rc!=SQLITE_OK ) return rc; + + rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); + leavesReaderReset(&reader); + leavesReaderDestroy(&reader); + return rc; +} + +/* Call loadSegmentLeavesInt() with the leaf nodes from iStartLeaf to +** iEndLeaf (inclusive) as input, and merge the resulting doclist into +** out. +*/ +static int loadSegmentLeaves(fulltext_vtab *v, + sqlite_int64 iStartLeaf, sqlite_int64 iEndLeaf, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + int rc; + LeavesReader reader; + + assert( iStartLeaf<=iEndLeaf ); + rc = leavesReaderInit(v, 0, iStartLeaf, iEndLeaf, NULL, 0, &reader); + if( rc!=SQLITE_OK ) return rc; + + rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); + leavesReaderReset(&reader); + leavesReaderDestroy(&reader); + return rc; +} + +/* Taking pData/nData as an interior node, find the sequence of child +** nodes which could include pTerm/nTerm/isPrefix. Note that the +** interior node terms logically come between the blocks, so there is +** one more blockid than there are terms (that block contains terms >= +** the last interior-node term). +*/ +/* TODO(shess) The calling code may already know that the end child is +** not worth calculating, because the end may be in a later sibling +** node. Consider whether breaking symmetry is worthwhile. I suspect +** it is not worthwhile. +*/ +static void getChildrenContaining(const char *pData, int nData, + const char *pTerm, int nTerm, int isPrefix, + sqlite_int64 *piStartChild, + sqlite_int64 *piEndChild){ + InteriorReader reader; + + assert( nData>1 ); + assert( *pData!='\0' ); + interiorReaderInit(pData, nData, &reader); + + /* Scan for the first child which could contain pTerm/nTerm. */ + while( !interiorReaderAtEnd(&reader) ){ + if( interiorReaderTermCmp(&reader, pTerm, nTerm, 0)>0 ) break; + interiorReaderStep(&reader); + } + *piStartChild = interiorReaderCurrentBlockid(&reader); + + /* Keep scanning to find a term greater than our term, using prefix + ** comparison if indicated. If isPrefix is false, this will be the + ** same blockid as the starting block. + */ + while( !interiorReaderAtEnd(&reader) ){ + if( interiorReaderTermCmp(&reader, pTerm, nTerm, isPrefix)>0 ) break; + interiorReaderStep(&reader); + } + *piEndChild = interiorReaderCurrentBlockid(&reader); + + interiorReaderDestroy(&reader); + + /* Children must ascend, and if !prefix, both must be the same. */ + assert( *piEndChild>=*piStartChild ); + assert( isPrefix || *piStartChild==*piEndChild ); +} + +/* Read block at iBlockid and pass it with other params to +** getChildrenContaining(). +*/ +static int loadAndGetChildrenContaining( + fulltext_vtab *v, + sqlite_int64 iBlockid, + const char *pTerm, int nTerm, int isPrefix, + sqlite_int64 *piStartChild, sqlite_int64 *piEndChild +){ + sqlite3_stmt *s = NULL; + int rc; + + assert( iBlockid!=0 ); + assert( pTerm!=NULL ); + assert( nTerm!=0 ); /* TODO(shess) Why not allow this? */ + assert( piStartChild!=NULL ); + assert( piEndChild!=NULL ); + + rc = sql_get_statement(v, BLOCK_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + /* Corrupt if interior node references missing child node. */ + if( rc==SQLITE_DONE ) return SQLITE_CORRUPT_BKPT; + if( rc!=SQLITE_ROW ) return rc; + + /* Corrupt if child node isn't a blob. */ + if( sqlite3_column_type(s, 0)!=SQLITE_BLOB ){ + sqlite3_reset(s); /* So we don't leave a lock. */ + return SQLITE_CORRUPT_BKPT; + }else{ + const char *pData = sqlite3_column_blob(s, 0); + int nData = sqlite3_column_bytes(s, 0); + + /* Corrupt if child is not a valid interior node. */ + if( pData==NULL || nData<1 || pData[0]=='\0' ){ + sqlite3_reset(s); /* So we don't leave a lock. */ + return SQLITE_CORRUPT_BKPT; + } + + getChildrenContaining(pData, nData, pTerm, nTerm, + isPrefix, piStartChild, piEndChild); + } + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain + * locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + + return SQLITE_OK; +} + +/* Traverse the tree represented by pData[nData] looking for +** pTerm[nTerm], placing its doclist into *out. This is internal to +** loadSegment() to make error-handling cleaner. +*/ +static int loadSegmentInt(fulltext_vtab *v, const char *pData, int nData, + sqlite_int64 iLeavesEnd, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + /* Special case where root is a leaf. */ + if( *pData=='\0' ){ + return loadSegmentLeaf(v, pData, nData, pTerm, nTerm, isPrefix, out); + }else{ + int rc; + sqlite_int64 iStartChild, iEndChild; + + /* Process pData as an interior node, then loop down the tree + ** until we find the set of leaf nodes to scan for the term. + */ + getChildrenContaining(pData, nData, pTerm, nTerm, isPrefix, + &iStartChild, &iEndChild); + while( iStartChild>iLeavesEnd ){ + sqlite_int64 iNextStart, iNextEnd; + rc = loadAndGetChildrenContaining(v, iStartChild, pTerm, nTerm, isPrefix, + &iNextStart, &iNextEnd); + if( rc!=SQLITE_OK ) return rc; + + /* If we've branched, follow the end branch, too. */ + if( iStartChild!=iEndChild ){ + sqlite_int64 iDummy; + rc = loadAndGetChildrenContaining(v, iEndChild, pTerm, nTerm, isPrefix, + &iDummy, &iNextEnd); + if( rc!=SQLITE_OK ) return rc; + } + + assert( iNextStart<=iNextEnd ); + iStartChild = iNextStart; + iEndChild = iNextEnd; + } + assert( iStartChild<=iLeavesEnd ); + assert( iEndChild<=iLeavesEnd ); + + /* Scan through the leaf segments for doclists. */ + return loadSegmentLeaves(v, iStartChild, iEndChild, + pTerm, nTerm, isPrefix, out); + } +} + +/* Call loadSegmentInt() to collect the doclist for pTerm/nTerm, then +** merge its doclist over *out (any duplicate doclists read from the +** segment rooted at pData will overwrite those in *out). +*/ +/* TODO(shess) Consider changing this to determine the depth of the +** leaves using either the first characters of interior nodes (when +** ==1, we're one level above the leaves), or the first character of +** the root (which will describe the height of the tree directly). +** Either feels somewhat tricky to me. +*/ +/* TODO(shess) The current merge is likely to be slow for large +** doclists (though it should process from newest/smallest to +** oldest/largest, so it may not be that bad). It might be useful to +** modify things to allow for N-way merging. This could either be +** within a segment, with pairwise merges across segments, or across +** all segments at once. +*/ +static int loadSegment(fulltext_vtab *v, const char *pData, int nData, + sqlite_int64 iLeavesEnd, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + DataBuffer result; + int rc; + + /* Corrupt if segment root can't be valid. */ + if( pData==NULL || nData<1 ) return SQLITE_CORRUPT_BKPT; + + /* This code should never be called with buffered updates. */ + assert( v->nPendingData<0 ); + + dataBufferInit(&result, 0); + rc = loadSegmentInt(v, pData, nData, iLeavesEnd, + pTerm, nTerm, isPrefix, &result); + if( rc==SQLITE_OK && result.nData>0 ){ + if( out->nData==0 ){ + DataBuffer tmp = *out; + *out = result; + result = tmp; + }else{ + DataBuffer merged; + DLReader readers[2]; + + dlrInit(&readers[0], DL_DEFAULT, out->pData, out->nData); + dlrInit(&readers[1], DL_DEFAULT, result.pData, result.nData); + dataBufferInit(&merged, out->nData+result.nData); + docListMerge(&merged, readers, 2); + dataBufferDestroy(out); + *out = merged; + dlrDestroy(&readers[0]); + dlrDestroy(&readers[1]); + } + } + dataBufferDestroy(&result); + return rc; +} + +/* Scan the database and merge together the posting lists for the term +** into *out. +*/ +static int termSelect(fulltext_vtab *v, int iColumn, + const char *pTerm, int nTerm, int isPrefix, + DocListType iType, DataBuffer *out){ + DataBuffer doclist; + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + /* This code should never be called with buffered updates. */ + assert( v->nPendingData<0 ); + + dataBufferInit(&doclist, 0); + + /* Traverse the segments from oldest to newest so that newer doclist + ** elements for given docids overwrite older elements. + */ + while( (rc = sqlite3_step(s))==SQLITE_ROW ){ + const char *pData = sqlite3_column_blob(s, 2); + const int nData = sqlite3_column_bytes(s, 2); + const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); + + /* Corrupt if we get back different types than we stored. */ + if( sqlite3_column_type(s, 1)!=SQLITE_INTEGER || + sqlite3_column_type(s, 2)!=SQLITE_BLOB ){ + rc = SQLITE_CORRUPT_BKPT; + goto err; + } + + rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, isPrefix, + &doclist); + if( rc!=SQLITE_OK ) goto err; + } + if( rc==SQLITE_DONE ){ + if( doclist.nData!=0 ){ + /* TODO(shess) The old term_select_all() code applied the column + ** restrict as we merged segments, leading to smaller buffers. + ** This is probably worthwhile to bring back, once the new storage + ** system is checked in. + */ + if( iColumn==v->nColumn) iColumn = -1; + docListTrim(DL_DEFAULT, doclist.pData, doclist.nData, + iColumn, iType, out); + } + rc = SQLITE_OK; + } + + err: + sqlite3_reset(s); /* So we don't leave a lock. */ + dataBufferDestroy(&doclist); + return rc; +} + +/****************************************************************/ +/* Used to hold hashtable data for sorting. */ +typedef struct TermData { + const char *pTerm; + int nTerm; + DLCollector *pCollector; +} TermData; + +/* Orders TermData elements in strcmp fashion ( <0 for less-than, 0 +** for equal, >0 for greater-than). +*/ +static int termDataCmp(const void *av, const void *bv){ + const TermData *a = (const TermData *)av; + const TermData *b = (const TermData *)bv; + int n = a->nTerm<b->nTerm ? a->nTerm : b->nTerm; + int c = memcmp(a->pTerm, b->pTerm, n); + if( c!=0 ) return c; + return a->nTerm-b->nTerm; +} + +/* Order pTerms data by term, then write a new level 0 segment using +** LeafWriter. +*/ +static int writeZeroSegment(fulltext_vtab *v, fts2Hash *pTerms){ + fts2HashElem *e; + int idx, rc, i, n; + TermData *pData; + LeafWriter writer; + DataBuffer dl; + + /* Determine the next index at level 0, merging as necessary. */ + rc = segdirNextIndex(v, 0, &idx); + if( rc!=SQLITE_OK ) return rc; + + n = fts2HashCount(pTerms); + pData = sqlite3_malloc(n*sizeof(TermData)); + + for(i = 0, e = fts2HashFirst(pTerms); e; i++, e = fts2HashNext(e)){ + assert( i<n ); + pData[i].pTerm = fts2HashKey(e); + pData[i].nTerm = fts2HashKeysize(e); + pData[i].pCollector = fts2HashData(e); + } + assert( i==n ); + + /* TODO(shess) Should we allow user-defined collation sequences, + ** here? I think we only need that once we support prefix searches. + */ + if( n>1 ) qsort(pData, n, sizeof(*pData), termDataCmp); + + /* TODO(shess) Refactor so that we can write directly to the segment + ** DataBuffer, as happens for segment merges. + */ + leafWriterInit(0, idx, &writer); + dataBufferInit(&dl, 0); + for(i=0; i<n; i++){ + dataBufferReset(&dl); + dlcAddDoclist(pData[i].pCollector, &dl); + rc = leafWriterStep(v, &writer, + pData[i].pTerm, pData[i].nTerm, dl.pData, dl.nData); + if( rc!=SQLITE_OK ) goto err; + } + rc = leafWriterFinalize(v, &writer); + + err: + dataBufferDestroy(&dl); + sqlite3_free(pData); + leafWriterDestroy(&writer); + return rc; +} + +/* If pendingTerms has data, free it. */ +static int clearPendingTerms(fulltext_vtab *v){ + if( v->nPendingData>=0 ){ + fts2HashElem *e; + for(e=fts2HashFirst(&v->pendingTerms); e; e=fts2HashNext(e)){ + dlcDelete(fts2HashData(e)); + } + fts2HashClear(&v->pendingTerms); + v->nPendingData = -1; + } + return SQLITE_OK; +} + +/* If pendingTerms has data, flush it to a level-zero segment, and +** free it. +*/ +static int flushPendingTerms(fulltext_vtab *v){ + if( v->nPendingData>=0 ){ + int rc = writeZeroSegment(v, &v->pendingTerms); + if( rc==SQLITE_OK ) clearPendingTerms(v); + return rc; + } + return SQLITE_OK; +} + +/* If pendingTerms is "too big", or docid is out of order, flush it. +** Regardless, be certain that pendingTerms is initialized for use. +*/ +static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid){ + /* TODO(shess) Explore whether partially flushing the buffer on + ** forced-flush would provide better performance. I suspect that if + ** we ordered the doclists by size and flushed the largest until the + ** buffer was half empty, that would let the less frequent terms + ** generate longer doclists. + */ + if( iDocid<=v->iPrevDocid || v->nPendingData>kPendingThreshold ){ + int rc = flushPendingTerms(v); + if( rc!=SQLITE_OK ) return rc; + } + if( v->nPendingData<0 ){ + fts2HashInit(&v->pendingTerms, FTS2_HASH_STRING, 1); + v->nPendingData = 0; + } + v->iPrevDocid = iDocid; + return SQLITE_OK; +} + +/* This function implements the xUpdate callback; it is the top-level entry + * point for inserting, deleting or updating a row in a full-text table. */ +static int fulltextUpdate(sqlite3_vtab *pVtab, int nArg, sqlite3_value **ppArg, + sqlite_int64 *pRowid){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + int rc; + + TRACE(("FTS2 Update %p\n", pVtab)); + + if( nArg<2 ){ + rc = index_delete(v, sqlite3_value_int64(ppArg[0])); + if( rc==SQLITE_OK ){ + /* If we just deleted the last row in the table, clear out the + ** index data. + */ + rc = content_exists(v); + if( rc==SQLITE_ROW ){ + rc = SQLITE_OK; + }else if( rc==SQLITE_DONE ){ + /* Clear the pending terms so we don't flush a useless level-0 + ** segment when the transaction closes. + */ + rc = clearPendingTerms(v); + if( rc==SQLITE_OK ){ + rc = segdir_delete_all(v); + } + } + } + } else if( sqlite3_value_type(ppArg[0]) != SQLITE_NULL ){ + /* An update: + * ppArg[0] = old rowid + * ppArg[1] = new rowid + * ppArg[2..2+v->nColumn-1] = values + * ppArg[2+v->nColumn] = value for magic column (we ignore this) + */ + sqlite_int64 rowid = sqlite3_value_int64(ppArg[0]); + if( sqlite3_value_type(ppArg[1]) != SQLITE_INTEGER || + sqlite3_value_int64(ppArg[1]) != rowid ){ + rc = SQLITE_ERROR; /* we don't allow changing the rowid */ + } else { + assert( nArg==2+v->nColumn+1); + rc = index_update(v, rowid, &ppArg[2]); + } + } else { + /* An insert: + * ppArg[1] = requested rowid + * ppArg[2..2+v->nColumn-1] = values + * ppArg[2+v->nColumn] = value for magic column (we ignore this) + */ + assert( nArg==2+v->nColumn+1); + rc = index_insert(v, ppArg[1], &ppArg[2], pRowid); + } + + return rc; +} + +static int fulltextSync(sqlite3_vtab *pVtab){ + TRACE(("FTS2 xSync()\n")); + return flushPendingTerms((fulltext_vtab *)pVtab); +} + +static int fulltextBegin(sqlite3_vtab *pVtab){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + TRACE(("FTS2 xBegin()\n")); + + /* Any buffered updates should have been cleared by the previous + ** transaction. + */ + assert( v->nPendingData<0 ); + return clearPendingTerms(v); +} + +static int fulltextCommit(sqlite3_vtab *pVtab){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + TRACE(("FTS2 xCommit()\n")); + + /* Buffered updates should have been cleared by fulltextSync(). */ + assert( v->nPendingData<0 ); + return clearPendingTerms(v); +} + +static int fulltextRollback(sqlite3_vtab *pVtab){ + TRACE(("FTS2 xRollback()\n")); + return clearPendingTerms((fulltext_vtab *)pVtab); +} + +/* +** Implementation of the snippet() function for FTS2 +*/ +static void snippetFunc( + sqlite3_context *pContext, + int argc, + sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc<1 ) return; + if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to html_snippet",-1); + }else{ + const char *zStart = "<b>"; + const char *zEnd = "</b>"; + const char *zEllipsis = "<b>...</b>"; + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + if( argc>=2 ){ + zStart = (const char*)sqlite3_value_text(argv[1]); + if( argc>=3 ){ + zEnd = (const char*)sqlite3_value_text(argv[2]); + if( argc>=4 ){ + zEllipsis = (const char*)sqlite3_value_text(argv[3]); + } + } + } + snippetAllOffsets(pCursor); + snippetText(pCursor, zStart, zEnd, zEllipsis); + sqlite3_result_text(pContext, pCursor->snippet.zSnippet, + pCursor->snippet.nSnippet, SQLITE_STATIC); + } +} + +/* +** Implementation of the offsets() function for FTS2 +*/ +static void snippetOffsetsFunc( + sqlite3_context *pContext, + int argc, + sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc<1 ) return; + if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to offsets",-1); + }else{ + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + snippetAllOffsets(pCursor); + snippetOffsetText(&pCursor->snippet); + sqlite3_result_text(pContext, + pCursor->snippet.zOffset, pCursor->snippet.nOffset, + SQLITE_STATIC); + } +} + +/* OptLeavesReader is nearly identical to LeavesReader, except that +** where LeavesReader is geared towards the merging of complete +** segment levels (with exactly MERGE_COUNT segments), OptLeavesReader +** is geared towards implementation of the optimize() function, and +** can merge all segments simultaneously. This version may be +** somewhat less efficient than LeavesReader because it merges into an +** accumulator rather than doing an N-way merge, but since segment +** size grows exponentially (so segment count logrithmically) this is +** probably not an immediate problem. +*/ +/* TODO(shess): Prove that assertion, or extend the merge code to +** merge tree fashion (like the prefix-searching code does). +*/ +/* TODO(shess): OptLeavesReader and LeavesReader could probably be +** merged with little or no loss of performance for LeavesReader. The +** merged code would need to handle >MERGE_COUNT segments, and would +** also need to be able to optionally optimize away deletes. +*/ +typedef struct OptLeavesReader { + /* Segment number, to order readers by age. */ + int segment; + LeavesReader reader; +} OptLeavesReader; + +static int optLeavesReaderAtEnd(OptLeavesReader *pReader){ + return leavesReaderAtEnd(&pReader->reader); +} +static int optLeavesReaderTermBytes(OptLeavesReader *pReader){ + return leavesReaderTermBytes(&pReader->reader); +} +static const char *optLeavesReaderData(OptLeavesReader *pReader){ + return leavesReaderData(&pReader->reader); +} +static int optLeavesReaderDataBytes(OptLeavesReader *pReader){ + return leavesReaderDataBytes(&pReader->reader); +} +static const char *optLeavesReaderTerm(OptLeavesReader *pReader){ + return leavesReaderTerm(&pReader->reader); +} +static int optLeavesReaderStep(fulltext_vtab *v, OptLeavesReader *pReader){ + return leavesReaderStep(v, &pReader->reader); +} +static int optLeavesReaderTermCmp(OptLeavesReader *lr1, OptLeavesReader *lr2){ + return leavesReaderTermCmp(&lr1->reader, &lr2->reader); +} +/* Order by term ascending, segment ascending (oldest to newest), with +** exhausted readers to the end. +*/ +static int optLeavesReaderCmp(OptLeavesReader *lr1, OptLeavesReader *lr2){ + int c = optLeavesReaderTermCmp(lr1, lr2); + if( c!=0 ) return c; + return lr1->segment-lr2->segment; +} +/* Bubble pLr[0] to appropriate place in pLr[1..nLr-1]. Assumes that +** pLr[1..nLr-1] is already sorted. +*/ +static void optLeavesReaderReorder(OptLeavesReader *pLr, int nLr){ + while( nLr>1 && optLeavesReaderCmp(pLr, pLr+1)>0 ){ + OptLeavesReader tmp = pLr[0]; + pLr[0] = pLr[1]; + pLr[1] = tmp; + nLr--; + pLr++; + } +} + +/* optimize() helper function. Put the readers in order and iterate +** through them, merging doclists for matching terms into pWriter. +** Returns SQLITE_OK on success, or the SQLite error code which +** prevented success. +*/ +static int optimizeInternal(fulltext_vtab *v, + OptLeavesReader *readers, int nReaders, + LeafWriter *pWriter){ + int i, rc = SQLITE_OK; + DataBuffer doclist, merged, tmp; + + /* Order the readers. */ + i = nReaders; + while( i-- > 0 ){ + optLeavesReaderReorder(&readers[i], nReaders-i); + } + + dataBufferInit(&doclist, LEAF_MAX); + dataBufferInit(&merged, LEAF_MAX); + + /* Exhausted readers bubble to the end, so when the first reader is + ** at eof, all are at eof. + */ + while( !optLeavesReaderAtEnd(&readers[0]) ){ + + /* Figure out how many readers share the next term. */ + for(i=1; i<nReaders && !optLeavesReaderAtEnd(&readers[i]); i++){ + if( 0!=optLeavesReaderTermCmp(&readers[0], &readers[i]) ) break; + } + + /* Special-case for no merge. */ + if( i==1 ){ + /* Trim deletions from the doclist. */ + dataBufferReset(&merged); + docListTrim(DL_DEFAULT, + optLeavesReaderData(&readers[0]), + optLeavesReaderDataBytes(&readers[0]), + -1, DL_DEFAULT, &merged); + }else{ + DLReader dlReaders[MERGE_COUNT]; + int iReader, nReaders; + + /* Prime the pipeline with the first reader's doclist. After + ** one pass index 0 will reference the accumulated doclist. + */ + dlrInit(&dlReaders[0], DL_DEFAULT, + optLeavesReaderData(&readers[0]), + optLeavesReaderDataBytes(&readers[0])); + iReader = 1; + + assert( iReader<i ); /* Must execute the loop at least once. */ + while( iReader<i ){ + /* Merge 16 inputs per pass. */ + for( nReaders=1; iReader<i && nReaders<MERGE_COUNT; + iReader++, nReaders++ ){ + dlrInit(&dlReaders[nReaders], DL_DEFAULT, + optLeavesReaderData(&readers[iReader]), + optLeavesReaderDataBytes(&readers[iReader])); + } + + /* Merge doclists and swap result into accumulator. */ + dataBufferReset(&merged); + docListMerge(&merged, dlReaders, nReaders); + tmp = merged; + merged = doclist; + doclist = tmp; + + while( nReaders-- > 0 ){ + dlrDestroy(&dlReaders[nReaders]); + } + + /* Accumulated doclist to reader 0 for next pass. */ + dlrInit(&dlReaders[0], DL_DEFAULT, doclist.pData, doclist.nData); + } + + /* Destroy reader that was left in the pipeline. */ + dlrDestroy(&dlReaders[0]); + + /* Trim deletions from the doclist. */ + dataBufferReset(&merged); + docListTrim(DL_DEFAULT, doclist.pData, doclist.nData, + -1, DL_DEFAULT, &merged); + } + + /* Only pass doclists with hits (skip if all hits deleted). */ + if( merged.nData>0 ){ + rc = leafWriterStep(v, pWriter, + optLeavesReaderTerm(&readers[0]), + optLeavesReaderTermBytes(&readers[0]), + merged.pData, merged.nData); + if( rc!=SQLITE_OK ) goto err; + } + + /* Step merged readers to next term and reorder. */ + while( i-- > 0 ){ + rc = optLeavesReaderStep(v, &readers[i]); + if( rc!=SQLITE_OK ) goto err; + + optLeavesReaderReorder(&readers[i], nReaders-i); + } + } + + err: + dataBufferDestroy(&doclist); + dataBufferDestroy(&merged); + return rc; +} + +/* Implement optimize() function for FTS3. optimize(t) merges all +** segments in the fts index into a single segment. 't' is the magic +** table-named column. +*/ +static void optimizeFunc(sqlite3_context *pContext, + int argc, sqlite3_value **argv){ + fulltext_cursor *pCursor; + if( argc>1 ){ + sqlite3_result_error(pContext, "excess arguments to optimize()",-1); + }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to optimize",-1); + }else{ + fulltext_vtab *v; + int i, rc, iMaxLevel; + OptLeavesReader *readers; + int nReaders; + LeafWriter writer; + sqlite3_stmt *s; + + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + v = cursor_vtab(pCursor); + + /* Flush any buffered updates before optimizing. */ + rc = flushPendingTerms(v); + if( rc!=SQLITE_OK ) goto err; + + rc = segdir_count(v, &nReaders, &iMaxLevel); + if( rc!=SQLITE_OK ) goto err; + if( nReaders==0 || nReaders==1 ){ + sqlite3_result_text(pContext, "Index already optimal", -1, + SQLITE_STATIC); + return; + } + + rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); + if( rc!=SQLITE_OK ) goto err; + + readers = sqlite3_malloc(nReaders*sizeof(readers[0])); + if( readers==NULL ) goto err; + + /* Note that there will already be a segment at this position + ** until we call segdir_delete() on iMaxLevel. + */ + leafWriterInit(iMaxLevel, 0, &writer); + + i = 0; + while( (rc = sqlite3_step(s))==SQLITE_ROW ){ + sqlite_int64 iStart = sqlite3_column_int64(s, 0); + sqlite_int64 iEnd = sqlite3_column_int64(s, 1); + const char *pRootData = sqlite3_column_blob(s, 2); + int nRootData = sqlite3_column_bytes(s, 2); + + /* Corrupt if we get back different types than we stored. */ + if( sqlite3_column_type(s, 0)!=SQLITE_INTEGER || + sqlite3_column_type(s, 1)!=SQLITE_INTEGER || + sqlite3_column_type(s, 2)!=SQLITE_BLOB ){ + rc = SQLITE_CORRUPT_BKPT; + break; + } + + assert( i<nReaders ); + rc = leavesReaderInit(v, -1, iStart, iEnd, pRootData, nRootData, + &readers[i].reader); + if( rc!=SQLITE_OK ) break; + + readers[i].segment = i; + i++; + } + + /* If we managed to succesfully read them all, optimize them. */ + if( rc==SQLITE_DONE ){ + assert( i==nReaders ); + rc = optimizeInternal(v, readers, nReaders, &writer); + }else{ + sqlite3_reset(s); /* So we don't leave a lock. */ + } + + while( i-- > 0 ){ + leavesReaderDestroy(&readers[i].reader); + } + sqlite3_free(readers); + + /* If we've successfully gotten to here, delete the old segments + ** and flush the interior structure of the new segment. + */ + if( rc==SQLITE_OK ){ + for( i=0; i<=iMaxLevel; i++ ){ + rc = segdir_delete(v, i); + if( rc!=SQLITE_OK ) break; + } + + if( rc==SQLITE_OK ) rc = leafWriterFinalize(v, &writer); + } + + leafWriterDestroy(&writer); + + if( rc!=SQLITE_OK ) goto err; + + sqlite3_result_text(pContext, "Index optimized", -1, SQLITE_STATIC); + return; + + /* TODO(shess): Error-handling needs to be improved along the + ** lines of the dump_ functions. + */ + err: + { + char buf[512]; + sqlite3_snprintf(sizeof(buf), buf, "Error in optimize: %s", + sqlite3_errmsg(sqlite3_context_db_handle(pContext))); + sqlite3_result_error(pContext, buf, -1); + } + } +} + +#ifdef SQLITE_TEST +/* Generate an error of the form "<prefix>: <msg>". If msg is NULL, +** pull the error from the context's db handle. +*/ +static void generateError(sqlite3_context *pContext, + const char *prefix, const char *msg){ + char buf[512]; + if( msg==NULL ) msg = sqlite3_errmsg(sqlite3_context_db_handle(pContext)); + sqlite3_snprintf(sizeof(buf), buf, "%s: %s", prefix, msg); + sqlite3_result_error(pContext, buf, -1); +} + +/* Helper function to collect the set of terms in the segment into +** pTerms. The segment is defined by the leaf nodes between +** iStartBlockid and iEndBlockid, inclusive, or by the contents of +** pRootData if iStartBlockid is 0 (in which case the entire segment +** fit in a leaf). +*/ +static int collectSegmentTerms(fulltext_vtab *v, sqlite3_stmt *s, + fts2Hash *pTerms){ + const sqlite_int64 iStartBlockid = sqlite3_column_int64(s, 0); + const sqlite_int64 iEndBlockid = sqlite3_column_int64(s, 1); + const char *pRootData = sqlite3_column_blob(s, 2); + const int nRootData = sqlite3_column_bytes(s, 2); + int rc; + LeavesReader reader; + + /* Corrupt if we get back different types than we stored. */ + if( sqlite3_column_type(s, 0)!=SQLITE_INTEGER || + sqlite3_column_type(s, 1)!=SQLITE_INTEGER || + sqlite3_column_type(s, 2)!=SQLITE_BLOB ){ + return SQLITE_CORRUPT_BKPT; + } + + rc = leavesReaderInit(v, 0, iStartBlockid, iEndBlockid, + pRootData, nRootData, &reader); + if( rc!=SQLITE_OK ) return rc; + + while( rc==SQLITE_OK && !leavesReaderAtEnd(&reader) ){ + const char *pTerm = leavesReaderTerm(&reader); + const int nTerm = leavesReaderTermBytes(&reader); + void *oldValue = sqlite3Fts2HashFind(pTerms, pTerm, nTerm); + void *newValue = (void *)((char *)oldValue+1); + + /* From the comment before sqlite3Fts2HashInsert in fts2_hash.c, + ** the data value passed is returned in case of malloc failure. + */ + if( newValue==sqlite3Fts2HashInsert(pTerms, pTerm, nTerm, newValue) ){ + rc = SQLITE_NOMEM; + }else{ + rc = leavesReaderStep(v, &reader); + } + } + + leavesReaderDestroy(&reader); + return rc; +} + +/* Helper function to build the result string for dump_terms(). */ +static int generateTermsResult(sqlite3_context *pContext, fts2Hash *pTerms){ + int iTerm, nTerms, nResultBytes, iByte; + char *result; + TermData *pData; + fts2HashElem *e; + + /* Iterate pTerms to generate an array of terms in pData for + ** sorting. + */ + nTerms = fts2HashCount(pTerms); + assert( nTerms>0 ); + pData = sqlite3_malloc(nTerms*sizeof(TermData)); + if( pData==NULL ) return SQLITE_NOMEM; + + nResultBytes = 0; + for(iTerm = 0, e = fts2HashFirst(pTerms); e; iTerm++, e = fts2HashNext(e)){ + nResultBytes += fts2HashKeysize(e)+1; /* Term plus trailing space */ + assert( iTerm<nTerms ); + pData[iTerm].pTerm = fts2HashKey(e); + pData[iTerm].nTerm = fts2HashKeysize(e); + pData[iTerm].pCollector = fts2HashData(e); /* unused */ + } + assert( iTerm==nTerms ); + + assert( nResultBytes>0 ); /* nTerms>0, nResultsBytes must be, too. */ + result = sqlite3_malloc(nResultBytes); + if( result==NULL ){ + sqlite3_free(pData); + return SQLITE_NOMEM; + } + + if( nTerms>1 ) qsort(pData, nTerms, sizeof(*pData), termDataCmp); + + /* Read the terms in order to build the result. */ + iByte = 0; + for(iTerm=0; iTerm<nTerms; ++iTerm){ + memcpy(result+iByte, pData[iTerm].pTerm, pData[iTerm].nTerm); + iByte += pData[iTerm].nTerm; + result[iByte++] = ' '; + } + assert( iByte==nResultBytes ); + assert( result[nResultBytes-1]==' ' ); + result[nResultBytes-1] = '\0'; + + /* Passes away ownership of result. */ + sqlite3_result_text(pContext, result, nResultBytes-1, sqlite3_free); + sqlite3_free(pData); + return SQLITE_OK; +} + +/* Implements dump_terms() for use in inspecting the fts2 index from +** tests. TEXT result containing the ordered list of terms joined by +** spaces. dump_terms(t, level, idx) dumps the terms for the segment +** specified by level, idx (in %_segdir), while dump_terms(t) dumps +** all terms in the index. In both cases t is the fts table's magic +** table-named column. +*/ +static void dumpTermsFunc( + sqlite3_context *pContext, + int argc, sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc!=3 && argc!=1 ){ + generateError(pContext, "dump_terms", "incorrect arguments"); + }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + generateError(pContext, "dump_terms", "illegal first argument"); + }else{ + fulltext_vtab *v; + fts2Hash terms; + sqlite3_stmt *s = NULL; + int rc; + + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + v = cursor_vtab(pCursor); + + /* If passed only the cursor column, get all segments. Otherwise + ** get the segment described by the following two arguments. + */ + if( argc==1 ){ + rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); + }else{ + rc = sql_get_statement(v, SEGDIR_SELECT_SEGMENT_STMT, &s); + if( rc==SQLITE_OK ){ + rc = sqlite3_bind_int(s, 1, sqlite3_value_int(argv[1])); + if( rc==SQLITE_OK ){ + rc = sqlite3_bind_int(s, 2, sqlite3_value_int(argv[2])); + } + } + } + + if( rc!=SQLITE_OK ){ + generateError(pContext, "dump_terms", NULL); + return; + } + + /* Collect the terms for each segment. */ + sqlite3Fts2HashInit(&terms, FTS2_HASH_STRING, 1); + while( (rc = sqlite3_step(s))==SQLITE_ROW ){ + rc = collectSegmentTerms(v, s, &terms); + if( rc!=SQLITE_OK ) break; + } + + if( rc!=SQLITE_DONE ){ + sqlite3_reset(s); + generateError(pContext, "dump_terms", NULL); + }else{ + const int nTerms = fts2HashCount(&terms); + if( nTerms>0 ){ + rc = generateTermsResult(pContext, &terms); + if( rc==SQLITE_NOMEM ){ + generateError(pContext, "dump_terms", "out of memory"); + }else{ + assert( rc==SQLITE_OK ); + } + }else if( argc==3 ){ + /* The specific segment asked for could not be found. */ + generateError(pContext, "dump_terms", "segment not found"); + }else{ + /* No segments found. */ + /* TODO(shess): It should be impossible to reach this. This + ** case can only happen for an empty table, in which case + ** SQLite has no rows to call this function on. + */ + sqlite3_result_null(pContext); + } + } + sqlite3Fts2HashClear(&terms); + } +} + +/* Expand the DL_DEFAULT doclist in pData into a text result in +** pContext. +*/ +static void createDoclistResult(sqlite3_context *pContext, + const char *pData, int nData){ + DataBuffer dump; + DLReader dlReader; + + assert( pData!=NULL && nData>0 ); + + dataBufferInit(&dump, 0); + dlrInit(&dlReader, DL_DEFAULT, pData, nData); + for( ; !dlrAtEnd(&dlReader); dlrStep(&dlReader) ){ + char buf[256]; + PLReader plReader; + + plrInit(&plReader, &dlReader); + if( DL_DEFAULT==DL_DOCIDS || plrAtEnd(&plReader) ){ + sqlite3_snprintf(sizeof(buf), buf, "[%lld] ", dlrDocid(&dlReader)); + dataBufferAppend(&dump, buf, strlen(buf)); + }else{ + int iColumn = plrColumn(&plReader); + + sqlite3_snprintf(sizeof(buf), buf, "[%lld %d[", + dlrDocid(&dlReader), iColumn); + dataBufferAppend(&dump, buf, strlen(buf)); + + for( ; !plrAtEnd(&plReader); plrStep(&plReader) ){ + if( plrColumn(&plReader)!=iColumn ){ + iColumn = plrColumn(&plReader); + sqlite3_snprintf(sizeof(buf), buf, "] %d[", iColumn); + assert( dump.nData>0 ); + dump.nData--; /* Overwrite trailing space. */ + assert( dump.pData[dump.nData]==' '); + dataBufferAppend(&dump, buf, strlen(buf)); + } + if( DL_DEFAULT==DL_POSITIONS_OFFSETS ){ + sqlite3_snprintf(sizeof(buf), buf, "%d,%d,%d ", + plrPosition(&plReader), + plrStartOffset(&plReader), plrEndOffset(&plReader)); + }else if( DL_DEFAULT==DL_POSITIONS ){ + sqlite3_snprintf(sizeof(buf), buf, "%d ", plrPosition(&plReader)); + }else{ + assert( NULL=="Unhandled DL_DEFAULT value"); + } + dataBufferAppend(&dump, buf, strlen(buf)); + } + plrDestroy(&plReader); + + assert( dump.nData>0 ); + dump.nData--; /* Overwrite trailing space. */ + assert( dump.pData[dump.nData]==' '); + dataBufferAppend(&dump, "]] ", 3); + } + } + dlrDestroy(&dlReader); + + assert( dump.nData>0 ); + dump.nData--; /* Overwrite trailing space. */ + assert( dump.pData[dump.nData]==' '); + dump.pData[dump.nData] = '\0'; + assert( dump.nData>0 ); + + /* Passes ownership of dump's buffer to pContext. */ + sqlite3_result_text(pContext, dump.pData, dump.nData, sqlite3_free); + dump.pData = NULL; + dump.nData = dump.nCapacity = 0; +} + +/* Implements dump_doclist() for use in inspecting the fts2 index from +** tests. TEXT result containing a string representation of the +** doclist for the indicated term. dump_doclist(t, term, level, idx) +** dumps the doclist for term from the segment specified by level, idx +** (in %_segdir), while dump_doclist(t, term) dumps the logical +** doclist for the term across all segments. The per-segment doclist +** can contain deletions, while the full-index doclist will not +** (deletions are omitted). +** +** Result formats differ with the setting of DL_DEFAULTS. Examples: +** +** DL_DOCIDS: [1] [3] [7] +** DL_POSITIONS: [1 0[0 4] 1[17]] [3 1[5]] +** DL_POSITIONS_OFFSETS: [1 0[0,0,3 4,23,26] 1[17,102,105]] [3 1[5,20,23]] +** +** In each case the number after the outer '[' is the docid. In the +** latter two cases, the number before the inner '[' is the column +** associated with the values within. For DL_POSITIONS the numbers +** within are the positions, for DL_POSITIONS_OFFSETS they are the +** position, the start offset, and the end offset. +*/ +static void dumpDoclistFunc( + sqlite3_context *pContext, + int argc, sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc!=2 && argc!=4 ){ + generateError(pContext, "dump_doclist", "incorrect arguments"); + }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + generateError(pContext, "dump_doclist", "illegal first argument"); + }else if( sqlite3_value_text(argv[1])==NULL || + sqlite3_value_text(argv[1])[0]=='\0' ){ + generateError(pContext, "dump_doclist", "empty second argument"); + }else{ + const char *pTerm = (const char *)sqlite3_value_text(argv[1]); + const int nTerm = strlen(pTerm); + fulltext_vtab *v; + int rc; + DataBuffer doclist; + + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + v = cursor_vtab(pCursor); + + dataBufferInit(&doclist, 0); + + /* termSelect() yields the same logical doclist that queries are + ** run against. + */ + if( argc==2 ){ + rc = termSelect(v, v->nColumn, pTerm, nTerm, 0, DL_DEFAULT, &doclist); + }else{ + sqlite3_stmt *s = NULL; + + /* Get our specific segment's information. */ + rc = sql_get_statement(v, SEGDIR_SELECT_SEGMENT_STMT, &s); + if( rc==SQLITE_OK ){ + rc = sqlite3_bind_int(s, 1, sqlite3_value_int(argv[2])); + if( rc==SQLITE_OK ){ + rc = sqlite3_bind_int(s, 2, sqlite3_value_int(argv[3])); + } + } + + if( rc==SQLITE_OK ){ + rc = sqlite3_step(s); + + if( rc==SQLITE_DONE ){ + dataBufferDestroy(&doclist); + generateError(pContext, "dump_doclist", "segment not found"); + return; + } + + /* Found a segment, load it into doclist. */ + if( rc==SQLITE_ROW ){ + const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); + const char *pData = sqlite3_column_blob(s, 2); + const int nData = sqlite3_column_bytes(s, 2); + + /* loadSegment() is used by termSelect() to load each + ** segment's data. + */ + rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, 0, + &doclist); + if( rc==SQLITE_OK ){ + rc = sqlite3_step(s); + + /* Should not have more than one matching segment. */ + if( rc!=SQLITE_DONE ){ + sqlite3_reset(s); + dataBufferDestroy(&doclist); + generateError(pContext, "dump_doclist", "invalid segdir"); + return; + } + rc = SQLITE_OK; + } + } + } + + sqlite3_reset(s); + } + + if( rc==SQLITE_OK ){ + if( doclist.nData>0 ){ + createDoclistResult(pContext, doclist.pData, doclist.nData); + }else{ + /* TODO(shess): This can happen if the term is not present, or + ** if all instances of the term have been deleted and this is + ** an all-index dump. It may be interesting to distinguish + ** these cases. + */ + sqlite3_result_text(pContext, "", 0, SQLITE_STATIC); + } + }else if( rc==SQLITE_NOMEM ){ + /* Handle out-of-memory cases specially because if they are + ** generated in fts2 code they may not be reflected in the db + ** handle. + */ + /* TODO(shess): Handle this more comprehensively. + ** sqlite3ErrStr() has what I need, but is internal. + */ + generateError(pContext, "dump_doclist", "out of memory"); + }else{ + generateError(pContext, "dump_doclist", NULL); + } + + dataBufferDestroy(&doclist); + } +} +#endif + +/* +** This routine implements the xFindFunction method for the FTS2 +** virtual table. +*/ +static int fulltextFindFunction( + sqlite3_vtab *pVtab, + int nArg, + const char *zName, + void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), + void **ppArg +){ + if( strcmp(zName,"snippet")==0 ){ + *pxFunc = snippetFunc; + return 1; + }else if( strcmp(zName,"offsets")==0 ){ + *pxFunc = snippetOffsetsFunc; + return 1; + }else if( strcmp(zName,"optimize")==0 ){ + *pxFunc = optimizeFunc; + return 1; +#ifdef SQLITE_TEST + /* NOTE(shess): These functions are present only for testing + ** purposes. No particular effort is made to optimize their + ** execution or how they build their results. + */ + }else if( strcmp(zName,"dump_terms")==0 ){ + /* fprintf(stderr, "Found dump_terms\n"); */ + *pxFunc = dumpTermsFunc; + return 1; + }else if( strcmp(zName,"dump_doclist")==0 ){ + /* fprintf(stderr, "Found dump_doclist\n"); */ + *pxFunc = dumpDoclistFunc; + return 1; +#endif + } + return 0; +} + +/* +** Rename an fts2 table. +*/ +static int fulltextRename( + sqlite3_vtab *pVtab, + const char *zName +){ + fulltext_vtab *p = (fulltext_vtab *)pVtab; + int rc = SQLITE_NOMEM; + char *zSql = sqlite3_mprintf( + "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';" + "ALTER TABLE %Q.'%q_segments' RENAME TO '%q_segments';" + "ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';" + , p->zDb, p->zName, zName + , p->zDb, p->zName, zName + , p->zDb, p->zName, zName + ); + if( zSql ){ + rc = sqlite3_exec(p->db, zSql, 0, 0, 0); + sqlite3_free(zSql); + } + return rc; +} + +static const sqlite3_module fts2Module = { + /* iVersion */ 0, + /* xCreate */ fulltextCreate, + /* xConnect */ fulltextConnect, + /* xBestIndex */ fulltextBestIndex, + /* xDisconnect */ fulltextDisconnect, + /* xDestroy */ fulltextDestroy, + /* xOpen */ fulltextOpen, + /* xClose */ fulltextClose, + /* xFilter */ fulltextFilter, + /* xNext */ fulltextNext, + /* xEof */ fulltextEof, + /* xColumn */ fulltextColumn, + /* xRowid */ fulltextRowid, + /* xUpdate */ fulltextUpdate, + /* xBegin */ fulltextBegin, + /* xSync */ fulltextSync, + /* xCommit */ fulltextCommit, + /* xRollback */ fulltextRollback, + /* xFindFunction */ fulltextFindFunction, + /* xRename */ fulltextRename, +}; + +static void hashDestroy(void *p){ + fts2Hash *pHash = (fts2Hash *)p; + sqlite3Fts2HashClear(pHash); + sqlite3_free(pHash); +} + +/* +** The fts2 built-in tokenizers - "simple" and "porter" - are implemented +** in files fts2_tokenizer1.c and fts2_porter.c respectively. The following +** two forward declarations are for functions declared in these files +** used to retrieve the respective implementations. +** +** Calling sqlite3Fts2SimpleTokenizerModule() sets the value pointed +** to by the argument to point a the "simple" tokenizer implementation. +** Function ...PorterTokenizerModule() sets *pModule to point to the +** porter tokenizer/stemmer implementation. +*/ +void sqlite3Fts2SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); +void sqlite3Fts2PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule); +void sqlite3Fts2IcuTokenizerModule(sqlite3_tokenizer_module const**ppModule); + +int sqlite3Fts2InitHashTable(sqlite3 *, fts2Hash *, const char *); + +/* +** Initialise the fts2 extension. If this extension is built as part +** of the sqlite library, then this function is called directly by +** SQLite. If fts2 is built as a dynamically loadable extension, this +** function is called by the sqlite3_extension_init() entry point. +*/ +int sqlite3Fts2Init(sqlite3 *db){ + int rc = SQLITE_OK; + fts2Hash *pHash = 0; + const sqlite3_tokenizer_module *pSimple = 0; + const sqlite3_tokenizer_module *pPorter = 0; + const sqlite3_tokenizer_module *pIcu = 0; + + sqlite3Fts2SimpleTokenizerModule(&pSimple); + sqlite3Fts2PorterTokenizerModule(&pPorter); +#ifdef SQLITE_ENABLE_ICU + sqlite3Fts2IcuTokenizerModule(&pIcu); +#endif + + /* Allocate and initialise the hash-table used to store tokenizers. */ + pHash = sqlite3_malloc(sizeof(fts2Hash)); + if( !pHash ){ + rc = SQLITE_NOMEM; + }else{ + sqlite3Fts2HashInit(pHash, FTS2_HASH_STRING, 1); + } + + /* Load the built-in tokenizers into the hash table */ + if( rc==SQLITE_OK ){ + if( sqlite3Fts2HashInsert(pHash, "simple", 7, (void *)pSimple) + || sqlite3Fts2HashInsert(pHash, "porter", 7, (void *)pPorter) + || (pIcu && sqlite3Fts2HashInsert(pHash, "icu", 4, (void *)pIcu)) + ){ + rc = SQLITE_NOMEM; + } + } + + /* Create the virtual table wrapper around the hash-table and overload + ** the two scalar functions. If this is successful, register the + ** module with sqlite. + */ + if( SQLITE_OK==rc +#if GEARS_FTS2_CHANGES && !SQLITE_TEST + /* fts2_tokenizer() disabled for security reasons. */ +#else + && SQLITE_OK==(rc = sqlite3Fts2InitHashTable(db, pHash, "fts2_tokenizer")) +#endif + && SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", -1)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "optimize", -1)) +#ifdef SQLITE_TEST + && SQLITE_OK==(rc = sqlite3_overload_function(db, "dump_terms", -1)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "dump_doclist", -1)) +#endif + ){ + return sqlite3_create_module_v2( + db, "fts2", &fts2Module, (void *)pHash, hashDestroy + ); + } + + /* An error has occured. Delete the hash table and return the error code. */ + assert( rc!=SQLITE_OK ); + if( pHash ){ + sqlite3Fts2HashClear(pHash); + sqlite3_free(pHash); + } + return rc; +} + +#if !SQLITE_CORE +int sqlite3_extension_init( + sqlite3 *db, + char **pzErrMsg, + const sqlite3_api_routines *pApi +){ + SQLITE_EXTENSION_INIT2(pApi) + return sqlite3Fts2Init(db); +} +#endif + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/third_party/sqlite/ext/fts2/fts2.h b/third_party/sqlite/ext/fts2/fts2.h new file mode 100755 index 0000000..4da4c38 --- /dev/null +++ b/third_party/sqlite/ext/fts2/fts2.h @@ -0,0 +1,26 @@ +/* +** 2006 Oct 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This header file is used by programs that want to link against the +** FTS2 library. All it does is declare the sqlite3Fts2Init() interface. +*/ +#include "sqlite3.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +int sqlite3Fts2Init(sqlite3 *db); + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ diff --git a/third_party/sqlite/ext/fts2/fts2_hash.c b/third_party/sqlite/ext/fts2/fts2_hash.c new file mode 100755 index 0000000..f22fcc91 --- /dev/null +++ b/third_party/sqlite/ext/fts2/fts2_hash.c @@ -0,0 +1,374 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the implementation of generic hash-tables used in SQLite. +** We've modified it slightly to serve as a standalone hash table +** implementation for the full-text indexing module. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS2 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS2 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) + +#include <assert.h> +#include <stdlib.h> +#include <string.h> + +#include "sqlite3.h" +#include "fts2_hash.h" + +/* +** Malloc and Free functions +*/ +static void *fts2HashMalloc(int n){ + void *p = sqlite3_malloc(n); + if( p ){ + memset(p, 0, n); + } + return p; +} +static void fts2HashFree(void *p){ + sqlite3_free(p); +} + +/* Turn bulk memory into a hash table object by initializing the +** fields of the Hash structure. +** +** "pNew" is a pointer to the hash table that is to be initialized. +** keyClass is one of the constants +** FTS2_HASH_BINARY or FTS2_HASH_STRING. The value of keyClass +** determines what kind of key the hash table will use. "copyKey" is +** true if the hash table should make its own private copy of keys and +** false if it should just use the supplied pointer. +*/ +void sqlite3Fts2HashInit(fts2Hash *pNew, int keyClass, int copyKey){ + assert( pNew!=0 ); + assert( keyClass>=FTS2_HASH_STRING && keyClass<=FTS2_HASH_BINARY ); + pNew->keyClass = keyClass; + pNew->copyKey = copyKey; + pNew->first = 0; + pNew->count = 0; + pNew->htsize = 0; + pNew->ht = 0; +} + +/* Remove all entries from a hash table. Reclaim all memory. +** Call this routine to delete a hash table or to reset a hash table +** to the empty state. +*/ +void sqlite3Fts2HashClear(fts2Hash *pH){ + fts2HashElem *elem; /* For looping over all elements of the table */ + + assert( pH!=0 ); + elem = pH->first; + pH->first = 0; + fts2HashFree(pH->ht); + pH->ht = 0; + pH->htsize = 0; + while( elem ){ + fts2HashElem *next_elem = elem->next; + if( pH->copyKey && elem->pKey ){ + fts2HashFree(elem->pKey); + } + fts2HashFree(elem); + elem = next_elem; + } + pH->count = 0; +} + +/* +** Hash and comparison functions when the mode is FTS2_HASH_STRING +*/ +static int strHash(const void *pKey, int nKey){ + const char *z = (const char *)pKey; + int h = 0; + if( nKey<=0 ) nKey = (int) strlen(z); + while( nKey > 0 ){ + h = (h<<3) ^ h ^ *z++; + nKey--; + } + return h & 0x7fffffff; +} +static int strCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return strncmp((const char*)pKey1,(const char*)pKey2,n1); +} + +/* +** Hash and comparison functions when the mode is FTS2_HASH_BINARY +*/ +static int binHash(const void *pKey, int nKey){ + int h = 0; + const char *z = (const char *)pKey; + while( nKey-- > 0 ){ + h = (h<<3) ^ h ^ *(z++); + } + return h & 0x7fffffff; +} +static int binCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return memcmp(pKey1,pKey2,n1); +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** The C syntax in this function definition may be unfamilar to some +** programmers, so we provide the following additional explanation: +** +** The name of the function is "hashFunction". The function takes a +** single parameter "keyClass". The return value of hashFunction() +** is a pointer to another function. Specifically, the return value +** of hashFunction() is a pointer to a function that takes two parameters +** with types "const void*" and "int" and returns an "int". +*/ +static int (*hashFunction(int keyClass))(const void*,int){ + if( keyClass==FTS2_HASH_STRING ){ + return &strHash; + }else{ + assert( keyClass==FTS2_HASH_BINARY ); + return &binHash; + } +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** For help in interpreted the obscure C code in the function definition, +** see the header comment on the previous function. +*/ +static int (*compareFunction(int keyClass))(const void*,int,const void*,int){ + if( keyClass==FTS2_HASH_STRING ){ + return &strCompare; + }else{ + assert( keyClass==FTS2_HASH_BINARY ); + return &binCompare; + } +} + +/* Link an element into the hash table +*/ +static void insertElement( + fts2Hash *pH, /* The complete hash table */ + struct _fts2ht *pEntry, /* The entry into which pNew is inserted */ + fts2HashElem *pNew /* The element to be inserted */ +){ + fts2HashElem *pHead; /* First element already in pEntry */ + pHead = pEntry->chain; + if( pHead ){ + pNew->next = pHead; + pNew->prev = pHead->prev; + if( pHead->prev ){ pHead->prev->next = pNew; } + else { pH->first = pNew; } + pHead->prev = pNew; + }else{ + pNew->next = pH->first; + if( pH->first ){ pH->first->prev = pNew; } + pNew->prev = 0; + pH->first = pNew; + } + pEntry->count++; + pEntry->chain = pNew; +} + + +/* Resize the hash table so that it cantains "new_size" buckets. +** "new_size" must be a power of 2. The hash table might fail +** to resize if sqliteMalloc() fails. +*/ +static void rehash(fts2Hash *pH, int new_size){ + struct _fts2ht *new_ht; /* The new hash table */ + fts2HashElem *elem, *next_elem; /* For looping over existing elements */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( (new_size & (new_size-1))==0 ); + new_ht = (struct _fts2ht *)fts2HashMalloc( new_size*sizeof(struct _fts2ht) ); + if( new_ht==0 ) return; + fts2HashFree(pH->ht); + pH->ht = new_ht; + pH->htsize = new_size; + xHash = hashFunction(pH->keyClass); + for(elem=pH->first, pH->first=0; elem; elem = next_elem){ + int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1); + next_elem = elem->next; + insertElement(pH, &new_ht[h], elem); + } +} + +/* This function (for internal use only) locates an element in an +** hash table that matches the given key. The hash for this key has +** already been computed and is passed as the 4th parameter. +*/ +static fts2HashElem *findElementGivenHash( + const fts2Hash *pH, /* The pH to be searched */ + const void *pKey, /* The key we are searching for */ + int nKey, + int h /* The hash for this key. */ +){ + fts2HashElem *elem; /* Used to loop thru the element list */ + int count; /* Number of elements left to test */ + int (*xCompare)(const void*,int,const void*,int); /* comparison function */ + + if( pH->ht ){ + struct _fts2ht *pEntry = &pH->ht[h]; + elem = pEntry->chain; + count = pEntry->count; + xCompare = compareFunction(pH->keyClass); + while( count-- && elem ){ + if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){ + return elem; + } + elem = elem->next; + } + } + return 0; +} + +/* Remove a single entry from the hash table given a pointer to that +** element and a hash on the element's key. +*/ +static void removeElementGivenHash( + fts2Hash *pH, /* The pH containing "elem" */ + fts2HashElem* elem, /* The element to be removed from the pH */ + int h /* Hash value for the element */ +){ + struct _fts2ht *pEntry; + if( elem->prev ){ + elem->prev->next = elem->next; + }else{ + pH->first = elem->next; + } + if( elem->next ){ + elem->next->prev = elem->prev; + } + pEntry = &pH->ht[h]; + if( pEntry->chain==elem ){ + pEntry->chain = elem->next; + } + pEntry->count--; + if( pEntry->count<=0 ){ + pEntry->chain = 0; + } + if( pH->copyKey && elem->pKey ){ + fts2HashFree(elem->pKey); + } + fts2HashFree( elem ); + pH->count--; + if( pH->count<=0 ){ + assert( pH->first==0 ); + assert( pH->count==0 ); + fts2HashClear(pH); + } +} + +/* Attempt to locate an element of the hash table pH with a key +** that matches pKey,nKey. Return the data for this element if it is +** found, or NULL if there is no match. +*/ +void *sqlite3Fts2HashFind(const fts2Hash *pH, const void *pKey, int nKey){ + int h; /* A hash on key */ + fts2HashElem *elem; /* The element that matches key */ + int (*xHash)(const void*,int); /* The hash function */ + + if( pH==0 || pH->ht==0 ) return 0; + xHash = hashFunction(pH->keyClass); + assert( xHash!=0 ); + h = (*xHash)(pKey,nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + elem = findElementGivenHash(pH,pKey,nKey, h & (pH->htsize-1)); + return elem ? elem->data : 0; +} + +/* Insert an element into the hash table pH. The key is pKey,nKey +** and the data is "data". +** +** If no element exists with a matching key, then a new +** element is created. A copy of the key is made if the copyKey +** flag is set. NULL is returned. +** +** If another element already exists with the same key, then the +** new data replaces the old data and the old data is returned. +** The key is not copied in this instance. If a malloc fails, then +** the new data is returned and the hash table is unchanged. +** +** If the "data" parameter to this function is NULL, then the +** element corresponding to "key" is removed from the hash table. +*/ +void *sqlite3Fts2HashInsert( + fts2Hash *pH, /* The hash table to insert into */ + const void *pKey, /* The key */ + int nKey, /* Number of bytes in the key */ + void *data /* The data */ +){ + int hraw; /* Raw hash value of the key */ + int h; /* the hash of the key modulo hash table size */ + fts2HashElem *elem; /* Used to loop thru the element list */ + fts2HashElem *new_elem; /* New element added to the pH */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( pH!=0 ); + xHash = hashFunction(pH->keyClass); + assert( xHash!=0 ); + hraw = (*xHash)(pKey, nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + elem = findElementGivenHash(pH,pKey,nKey,h); + if( elem ){ + void *old_data = elem->data; + if( data==0 ){ + removeElementGivenHash(pH,elem,h); + }else{ + elem->data = data; + } + return old_data; + } + if( data==0 ) return 0; + new_elem = (fts2HashElem*)fts2HashMalloc( sizeof(fts2HashElem) ); + if( new_elem==0 ) return data; + if( pH->copyKey && pKey!=0 ){ + new_elem->pKey = fts2HashMalloc( nKey ); + if( new_elem->pKey==0 ){ + fts2HashFree(new_elem); + return data; + } + memcpy((void*)new_elem->pKey, pKey, nKey); + }else{ + new_elem->pKey = (void*)pKey; + } + new_elem->nKey = nKey; + pH->count++; + if( pH->htsize==0 ){ + rehash(pH,8); + if( pH->htsize==0 ){ + pH->count = 0; + fts2HashFree(new_elem); + return data; + } + } + if( pH->count > pH->htsize ){ + rehash(pH,pH->htsize*2); + } + assert( pH->htsize>0 ); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + insertElement(pH, &pH->ht[h], new_elem); + new_elem->data = data; + return 0; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/third_party/sqlite/ext/fts2/fts2_hash.h b/third_party/sqlite/ext/fts2/fts2_hash.h new file mode 100755 index 0000000..571aa2c1 --- /dev/null +++ b/third_party/sqlite/ext/fts2/fts2_hash.h @@ -0,0 +1,110 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the header file for the generic hash-table implemenation +** used in SQLite. We've modified it slightly to serve as a standalone +** hash table implementation for the full-text indexing module. +** +*/ +#ifndef _FTS2_HASH_H_ +#define _FTS2_HASH_H_ + +/* Forward declarations of structures. */ +typedef struct fts2Hash fts2Hash; +typedef struct fts2HashElem fts2HashElem; + +/* A complete hash table is an instance of the following structure. +** The internals of this structure are intended to be opaque -- client +** code should not attempt to access or modify the fields of this structure +** directly. Change this structure only by using the routines below. +** However, many of the "procedures" and "functions" for modifying and +** accessing this structure are really macros, so we can't really make +** this structure opaque. +*/ +struct fts2Hash { + char keyClass; /* HASH_INT, _POINTER, _STRING, _BINARY */ + char copyKey; /* True if copy of key made on insert */ + int count; /* Number of entries in this table */ + fts2HashElem *first; /* The first element of the array */ + int htsize; /* Number of buckets in the hash table */ + struct _fts2ht { /* the hash table */ + int count; /* Number of entries with this hash */ + fts2HashElem *chain; /* Pointer to first entry with this hash */ + } *ht; +}; + +/* Each element in the hash table is an instance of the following +** structure. All elements are stored on a single doubly-linked list. +** +** Again, this structure is intended to be opaque, but it can't really +** be opaque because it is used by macros. +*/ +struct fts2HashElem { + fts2HashElem *next, *prev; /* Next and previous elements in the table */ + void *data; /* Data associated with this element */ + void *pKey; int nKey; /* Key associated with this element */ +}; + +/* +** There are 2 different modes of operation for a hash table: +** +** FTS2_HASH_STRING pKey points to a string that is nKey bytes long +** (including the null-terminator, if any). Case +** is respected in comparisons. +** +** FTS2_HASH_BINARY pKey points to binary data nKey bytes long. +** memcmp() is used to compare keys. +** +** A copy of the key is made if the copyKey parameter to fts2HashInit is 1. +*/ +#define FTS2_HASH_STRING 1 +#define FTS2_HASH_BINARY 2 + +/* +** Access routines. To delete, insert a NULL pointer. +*/ +void sqlite3Fts2HashInit(fts2Hash*, int keytype, int copyKey); +void *sqlite3Fts2HashInsert(fts2Hash*, const void *pKey, int nKey, void *pData); +void *sqlite3Fts2HashFind(const fts2Hash*, const void *pKey, int nKey); +void sqlite3Fts2HashClear(fts2Hash*); + +/* +** Shorthand for the functions above +*/ +#define fts2HashInit sqlite3Fts2HashInit +#define fts2HashInsert sqlite3Fts2HashInsert +#define fts2HashFind sqlite3Fts2HashFind +#define fts2HashClear sqlite3Fts2HashClear + +/* +** Macros for looping over all elements of a hash table. The idiom is +** like this: +** +** fts2Hash h; +** fts2HashElem *p; +** ... +** for(p=fts2HashFirst(&h); p; p=fts2HashNext(p)){ +** SomeStructure *pData = fts2HashData(p); +** // do something with pData +** } +*/ +#define fts2HashFirst(H) ((H)->first) +#define fts2HashNext(E) ((E)->next) +#define fts2HashData(E) ((E)->data) +#define fts2HashKey(E) ((E)->pKey) +#define fts2HashKeysize(E) ((E)->nKey) + +/* +** Number of entries in a hash table +*/ +#define fts2HashCount(H) ((H)->count) + +#endif /* _FTS2_HASH_H_ */ diff --git a/third_party/sqlite/ext/fts2/fts2_icu.c b/third_party/sqlite/ext/fts2/fts2_icu.c new file mode 100755 index 0000000..917c2ec --- /dev/null +++ b/third_party/sqlite/ext/fts2/fts2_icu.c @@ -0,0 +1,258 @@ +/* +** 2007 June 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file implements a tokenizer for fts2 based on the ICU library. +** +** $Id: fts2_icu.c,v 1.2 2008/07/22 22:20:50 shess Exp $ +*/ + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) +#ifdef SQLITE_ENABLE_ICU + +#include <assert.h> +#include <string.h> +#include "fts2_tokenizer.h" + +#include <unicode/ubrk.h> +#include <unicode/ucol.h> +#include <unicode/ustring.h> +#include <unicode/utf16.h> + +typedef struct IcuTokenizer IcuTokenizer; +typedef struct IcuCursor IcuCursor; + +struct IcuTokenizer { + sqlite3_tokenizer base; + char *zLocale; +}; + +struct IcuCursor { + sqlite3_tokenizer_cursor base; + + UBreakIterator *pIter; /* ICU break-iterator object */ + int nChar; /* Number of UChar elements in pInput */ + UChar *aChar; /* Copy of input using utf-16 encoding */ + int *aOffset; /* Offsets of each character in utf-8 input */ + + int nBuffer; + char *zBuffer; + + int iToken; +}; + +/* +** Create a new tokenizer instance. +*/ +static int icuCreate( + int argc, /* Number of entries in argv[] */ + const char * const *argv, /* Tokenizer creation arguments */ + sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ +){ + IcuTokenizer *p; + int n = 0; + + if( argc>0 ){ + n = strlen(argv[0])+1; + } + p = (IcuTokenizer *)sqlite3_malloc(sizeof(IcuTokenizer)+n); + if( !p ){ + return SQLITE_NOMEM; + } + memset(p, 0, sizeof(IcuTokenizer)); + + if( n ){ + p->zLocale = (char *)&p[1]; + memcpy(p->zLocale, argv[0], n); + } + + *ppTokenizer = (sqlite3_tokenizer *)p; + + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int icuDestroy(sqlite3_tokenizer *pTokenizer){ + IcuTokenizer *p = (IcuTokenizer *)pTokenizer; + sqlite3_free(p); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is pInput[0..nBytes-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int icuOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *zInput, /* Input string */ + int nInput, /* Length of zInput in bytes */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + IcuTokenizer *p = (IcuTokenizer *)pTokenizer; + IcuCursor *pCsr; + + const int32_t opt = U_FOLD_CASE_DEFAULT; + UErrorCode status = U_ZERO_ERROR; + int nChar; + + UChar32 c; + int iInput = 0; + int iOut = 0; + + *ppCursor = 0; + + if( -1 == nInput ) nInput = strlen(zInput); + nChar = nInput+1; + pCsr = (IcuCursor *)sqlite3_malloc( + sizeof(IcuCursor) + /* IcuCursor */ + nChar * sizeof(UChar) + /* IcuCursor.aChar[] */ + (nChar+1) * sizeof(int) /* IcuCursor.aOffset[] */ + ); + if( !pCsr ){ + return SQLITE_NOMEM; + } + memset(pCsr, 0, sizeof(IcuCursor)); + pCsr->aChar = (UChar *)&pCsr[1]; + pCsr->aOffset = (int *)&pCsr->aChar[nChar]; + + pCsr->aOffset[iOut] = iInput; + U8_NEXT(zInput, iInput, nInput, c); + while( c>0 ){ + int isError = 0; + c = u_foldCase(c, opt); + U16_APPEND(pCsr->aChar, iOut, nChar, c, isError); + if( isError ){ + sqlite3_free(pCsr); + return SQLITE_ERROR; + } + pCsr->aOffset[iOut] = iInput; + + if( iInput<nInput ){ + U8_NEXT(zInput, iInput, nInput, c); + }else{ + c = 0; + } + } + + pCsr->pIter = ubrk_open(UBRK_WORD, p->zLocale, pCsr->aChar, iOut, &status); + if( !U_SUCCESS(status) ){ + sqlite3_free(pCsr); + return SQLITE_ERROR; + } + pCsr->nChar = iOut; + + ubrk_first(pCsr->pIter); + *ppCursor = (sqlite3_tokenizer_cursor *)pCsr; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to icuOpen(). +*/ +static int icuClose(sqlite3_tokenizer_cursor *pCursor){ + IcuCursor *pCsr = (IcuCursor *)pCursor; + ubrk_close(pCsr->pIter); + sqlite3_free(pCsr->zBuffer); + sqlite3_free(pCsr); + return SQLITE_OK; +} + +/* +** Extract the next token from a tokenization cursor. +*/ +static int icuNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ + const char **ppToken, /* OUT: *ppToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + IcuCursor *pCsr = (IcuCursor *)pCursor; + + int iStart = 0; + int iEnd = 0; + int nByte = 0; + + while( iStart==iEnd ){ + UChar32 c; + + iStart = ubrk_current(pCsr->pIter); + iEnd = ubrk_next(pCsr->pIter); + if( iEnd==UBRK_DONE ){ + return SQLITE_DONE; + } + + while( iStart<iEnd ){ + int iWhite = iStart; + U16_NEXT(pCsr->aChar, iWhite, pCsr->nChar, c); + if( u_isspace(c) ){ + iStart = iWhite; + }else{ + break; + } + } + assert(iStart<=iEnd); + } + + do { + UErrorCode status = U_ZERO_ERROR; + if( nByte ){ + char *zNew = sqlite3_realloc(pCsr->zBuffer, nByte); + if( !zNew ){ + return SQLITE_NOMEM; + } + pCsr->zBuffer = zNew; + pCsr->nBuffer = nByte; + } + + u_strToUTF8( + pCsr->zBuffer, pCsr->nBuffer, &nByte, /* Output vars */ + &pCsr->aChar[iStart], iEnd-iStart, /* Input vars */ + &status /* Output success/failure */ + ); + } while( nByte>pCsr->nBuffer ); + + *ppToken = pCsr->zBuffer; + *pnBytes = nByte; + *piStartOffset = pCsr->aOffset[iStart]; + *piEndOffset = pCsr->aOffset[iEnd]; + *piPosition = pCsr->iToken++; + + return SQLITE_OK; +} + +/* +** The set of routines that implement the simple tokenizer +*/ +static const sqlite3_tokenizer_module icuTokenizerModule = { + 0, /* iVersion */ + icuCreate, /* xCreate */ + icuDestroy, /* xCreate */ + icuOpen, /* xOpen */ + icuClose, /* xClose */ + icuNext, /* xNext */ +}; + +/* +** Set *ppModule to point at the implementation of the ICU tokenizer. +*/ +void sqlite3Fts2IcuTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &icuTokenizerModule; +} + +#endif /* defined(SQLITE_ENABLE_ICU) */ +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/third_party/sqlite/ext/fts2/fts2_porter.c b/third_party/sqlite/ext/fts2/fts2_porter.c new file mode 100755 index 0000000..97a95c8 --- /dev/null +++ b/third_party/sqlite/ext/fts2/fts2_porter.c @@ -0,0 +1,642 @@ +/* +** 2006 September 30 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Implementation of the full-text-search tokenizer that implements +** a Porter stemmer. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS2 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS2 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) + + +#include <assert.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <ctype.h> + +#include "fts2_tokenizer.h" + +/* +** Class derived from sqlite3_tokenizer +*/ +typedef struct porter_tokenizer { + sqlite3_tokenizer base; /* Base class */ +} porter_tokenizer; + +/* +** Class derived from sqlit3_tokenizer_cursor +*/ +typedef struct porter_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *zInput; /* input we are tokenizing */ + int nInput; /* size of the input */ + int iOffset; /* current position in zInput */ + int iToken; /* index of next token to be returned */ + char *zToken; /* storage for current token */ + int nAllocated; /* space allocated to zToken buffer */ +} porter_tokenizer_cursor; + + +/* Forward declaration */ +static const sqlite3_tokenizer_module porterTokenizerModule; + + +/* +** Create a new tokenizer instance. +*/ +static int porterCreate( + int argc, const char * const *argv, + sqlite3_tokenizer **ppTokenizer +){ + porter_tokenizer *t; + t = (porter_tokenizer *) sqlite3_malloc(sizeof(*t)); + if( t==NULL ) return SQLITE_NOMEM; + memset(t, 0, sizeof(*t)); + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int porterDestroy(sqlite3_tokenizer *pTokenizer){ + sqlite3_free(pTokenizer); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is zInput[0..nInput-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int porterOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *zInput, int nInput, /* String to be tokenized */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + porter_tokenizer_cursor *c; + + c = (porter_tokenizer_cursor *) sqlite3_malloc(sizeof(*c)); + if( c==NULL ) return SQLITE_NOMEM; + + c->zInput = zInput; + if( zInput==0 ){ + c->nInput = 0; + }else if( nInput<0 ){ + c->nInput = (int)strlen(zInput); + }else{ + c->nInput = nInput; + } + c->iOffset = 0; /* start tokenizing at the beginning */ + c->iToken = 0; + c->zToken = NULL; /* no space allocated, yet. */ + c->nAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to +** porterOpen() above. +*/ +static int porterClose(sqlite3_tokenizer_cursor *pCursor){ + porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; + sqlite3_free(c->zToken); + sqlite3_free(c); + return SQLITE_OK; +} +/* +** Vowel or consonant +*/ +static const char cType[] = { + 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 2, 1 +}; + +/* +** isConsonant() and isVowel() determine if their first character in +** the string they point to is a consonant or a vowel, according +** to Porter ruls. +** +** A consonate is any letter other than 'a', 'e', 'i', 'o', or 'u'. +** 'Y' is a consonant unless it follows another consonant, +** in which case it is a vowel. +** +** In these routine, the letters are in reverse order. So the 'y' rule +** is that 'y' is a consonant unless it is followed by another +** consonent. +*/ +static int isVowel(const char*); +static int isConsonant(const char *z){ + int j; + char x = *z; + if( x==0 ) return 0; + assert( x>='a' && x<='z' ); + j = cType[x-'a']; + if( j<2 ) return j; + return z[1]==0 || isVowel(z + 1); +} +static int isVowel(const char *z){ + int j; + char x = *z; + if( x==0 ) return 0; + assert( x>='a' && x<='z' ); + j = cType[x-'a']; + if( j<2 ) return 1-j; + return isConsonant(z + 1); +} + +/* +** Let any sequence of one or more vowels be represented by V and let +** C be sequence of one or more consonants. Then every word can be +** represented as: +** +** [C] (VC){m} [V] +** +** In prose: A word is an optional consonant followed by zero or +** vowel-consonant pairs followed by an optional vowel. "m" is the +** number of vowel consonant pairs. This routine computes the value +** of m for the first i bytes of a word. +** +** Return true if the m-value for z is 1 or more. In other words, +** return true if z contains at least one vowel that is followed +** by a consonant. +** +** In this routine z[] is in reverse order. So we are really looking +** for an instance of of a consonant followed by a vowel. +*/ +static int m_gt_0(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* Like mgt0 above except we are looking for a value of m which is +** exactly 1 +*/ +static int m_eq_1(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + if( *z==0 ) return 0; + while( isVowel(z) ){ z++; } + if( *z==0 ) return 1; + while( isConsonant(z) ){ z++; } + return *z==0; +} + +/* Like mgt0 above except we are looking for a value of m>1 instead +** or m>0 +*/ +static int m_gt_1(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + if( *z==0 ) return 0; + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* +** Return TRUE if there is a vowel anywhere within z[0..n-1] +*/ +static int hasVowel(const char *z){ + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* +** Return TRUE if the word ends in a double consonant. +** +** The text is reversed here. So we are really looking at +** the first two characters of z[]. +*/ +static int doubleConsonant(const char *z){ + return isConsonant(z) && z[0]==z[1] && isConsonant(z+1); +} + +/* +** Return TRUE if the word ends with three letters which +** are consonant-vowel-consonent and where the final consonant +** is not 'w', 'x', or 'y'. +** +** The word is reversed here. So we are really checking the +** first three letters and the first one cannot be in [wxy]. +*/ +static int star_oh(const char *z){ + return + z[0]!=0 && isConsonant(z) && + z[0]!='w' && z[0]!='x' && z[0]!='y' && + z[1]!=0 && isVowel(z+1) && + z[2]!=0 && isConsonant(z+2); +} + +/* +** If the word ends with zFrom and xCond() is true for the stem +** of the word that preceeds the zFrom ending, then change the +** ending to zTo. +** +** The input word *pz and zFrom are both in reverse order. zTo +** is in normal order. +** +** Return TRUE if zFrom matches. Return FALSE if zFrom does not +** match. Not that TRUE is returned even if xCond() fails and +** no substitution occurs. +*/ +static int stem( + char **pz, /* The word being stemmed (Reversed) */ + const char *zFrom, /* If the ending matches this... (Reversed) */ + const char *zTo, /* ... change the ending to this (not reversed) */ + int (*xCond)(const char*) /* Condition that must be true */ +){ + char *z = *pz; + while( *zFrom && *zFrom==*z ){ z++; zFrom++; } + if( *zFrom!=0 ) return 0; + if( xCond && !xCond(z) ) return 1; + while( *zTo ){ + *(--z) = *(zTo++); + } + *pz = z; + return 1; +} + +/* +** This is the fallback stemmer used when the porter stemmer is +** inappropriate. The input word is copied into the output with +** US-ASCII case folding. If the input word is too long (more +** than 20 bytes if it contains no digits or more than 6 bytes if +** it contains digits) then word is truncated to 20 or 6 bytes +** by taking 10 or 3 bytes from the beginning and end. +*/ +static void copy_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){ + int i, mx, j; + int hasDigit = 0; + for(i=0; i<nIn; i++){ + int c = zIn[i]; + if( c>='A' && c<='Z' ){ + zOut[i] = c - 'A' + 'a'; + }else{ + if( c>='0' && c<='9' ) hasDigit = 1; + zOut[i] = c; + } + } + mx = hasDigit ? 3 : 10; + if( nIn>mx*2 ){ + for(j=mx, i=nIn-mx; i<nIn; i++, j++){ + zOut[j] = zOut[i]; + } + i = j; + } + zOut[i] = 0; + *pnOut = i; +} + + +/* +** Stem the input word zIn[0..nIn-1]. Store the output in zOut. +** zOut is at least big enough to hold nIn bytes. Write the actual +** size of the output word (exclusive of the '\0' terminator) into *pnOut. +** +** Any upper-case characters in the US-ASCII character set ([A-Z]) +** are converted to lower case. Upper-case UTF characters are +** unchanged. +** +** Words that are longer than about 20 bytes are stemmed by retaining +** a few bytes from the beginning and the end of the word. If the +** word contains digits, 3 bytes are taken from the beginning and +** 3 bytes from the end. For long words without digits, 10 bytes +** are taken from each end. US-ASCII case folding still applies. +** +** If the input word contains not digits but does characters not +** in [a-zA-Z] then no stemming is attempted and this routine just +** copies the input into the input into the output with US-ASCII +** case folding. +** +** Stemming never increases the length of the word. So there is +** no chance of overflowing the zOut buffer. +*/ +static void porter_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){ + int i, j, c; + char zReverse[28]; + char *z, *z2; + if( nIn<3 || nIn>=sizeof(zReverse)-7 ){ + /* The word is too big or too small for the porter stemmer. + ** Fallback to the copy stemmer */ + copy_stemmer(zIn, nIn, zOut, pnOut); + return; + } + for(i=0, j=sizeof(zReverse)-6; i<nIn; i++, j--){ + c = zIn[i]; + if( c>='A' && c<='Z' ){ + zReverse[j] = c + 'a' - 'A'; + }else if( c>='a' && c<='z' ){ + zReverse[j] = c; + }else{ + /* The use of a character not in [a-zA-Z] means that we fallback + ** to the copy stemmer */ + copy_stemmer(zIn, nIn, zOut, pnOut); + return; + } + } + memset(&zReverse[sizeof(zReverse)-5], 0, 5); + z = &zReverse[j+1]; + + + /* Step 1a */ + if( z[0]=='s' ){ + if( + !stem(&z, "sess", "ss", 0) && + !stem(&z, "sei", "i", 0) && + !stem(&z, "ss", "ss", 0) + ){ + z++; + } + } + + /* Step 1b */ + z2 = z; + if( stem(&z, "dee", "ee", m_gt_0) ){ + /* Do nothing. The work was all in the test */ + }else if( + (stem(&z, "gni", "", hasVowel) || stem(&z, "de", "", hasVowel)) + && z!=z2 + ){ + if( stem(&z, "ta", "ate", 0) || + stem(&z, "lb", "ble", 0) || + stem(&z, "zi", "ize", 0) ){ + /* Do nothing. The work was all in the test */ + }else if( doubleConsonant(z) && (*z!='l' && *z!='s' && *z!='z') ){ + z++; + }else if( m_eq_1(z) && star_oh(z) ){ + *(--z) = 'e'; + } + } + + /* Step 1c */ + if( z[0]=='y' && hasVowel(z+1) ){ + z[0] = 'i'; + } + + /* Step 2 */ + switch( z[1] ){ + case 'a': + stem(&z, "lanoita", "ate", m_gt_0) || + stem(&z, "lanoit", "tion", m_gt_0); + break; + case 'c': + stem(&z, "icne", "ence", m_gt_0) || + stem(&z, "icna", "ance", m_gt_0); + break; + case 'e': + stem(&z, "rezi", "ize", m_gt_0); + break; + case 'g': + stem(&z, "igol", "log", m_gt_0); + break; + case 'l': + stem(&z, "ilb", "ble", m_gt_0) || + stem(&z, "illa", "al", m_gt_0) || + stem(&z, "iltne", "ent", m_gt_0) || + stem(&z, "ile", "e", m_gt_0) || + stem(&z, "ilsuo", "ous", m_gt_0); + break; + case 'o': + stem(&z, "noitazi", "ize", m_gt_0) || + stem(&z, "noita", "ate", m_gt_0) || + stem(&z, "rota", "ate", m_gt_0); + break; + case 's': + stem(&z, "msila", "al", m_gt_0) || + stem(&z, "ssenevi", "ive", m_gt_0) || + stem(&z, "ssenluf", "ful", m_gt_0) || + stem(&z, "ssensuo", "ous", m_gt_0); + break; + case 't': + stem(&z, "itila", "al", m_gt_0) || + stem(&z, "itivi", "ive", m_gt_0) || + stem(&z, "itilib", "ble", m_gt_0); + break; + } + + /* Step 3 */ + switch( z[0] ){ + case 'e': + stem(&z, "etaci", "ic", m_gt_0) || + stem(&z, "evita", "", m_gt_0) || + stem(&z, "ezila", "al", m_gt_0); + break; + case 'i': + stem(&z, "itici", "ic", m_gt_0); + break; + case 'l': + stem(&z, "laci", "ic", m_gt_0) || + stem(&z, "luf", "", m_gt_0); + break; + case 's': + stem(&z, "ssen", "", m_gt_0); + break; + } + + /* Step 4 */ + switch( z[1] ){ + case 'a': + if( z[0]=='l' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'c': + if( z[0]=='e' && z[2]=='n' && (z[3]=='a' || z[3]=='e') && m_gt_1(z+4) ){ + z += 4; + } + break; + case 'e': + if( z[0]=='r' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'i': + if( z[0]=='c' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'l': + if( z[0]=='e' && z[2]=='b' && (z[3]=='a' || z[3]=='i') && m_gt_1(z+4) ){ + z += 4; + } + break; + case 'n': + if( z[0]=='t' ){ + if( z[2]=='a' ){ + if( m_gt_1(z+3) ){ + z += 3; + } + }else if( z[2]=='e' ){ + stem(&z, "tneme", "", m_gt_1) || + stem(&z, "tnem", "", m_gt_1) || + stem(&z, "tne", "", m_gt_1); + } + } + break; + case 'o': + if( z[0]=='u' ){ + if( m_gt_1(z+2) ){ + z += 2; + } + }else if( z[3]=='s' || z[3]=='t' ){ + stem(&z, "noi", "", m_gt_1); + } + break; + case 's': + if( z[0]=='m' && z[2]=='i' && m_gt_1(z+3) ){ + z += 3; + } + break; + case 't': + stem(&z, "eta", "", m_gt_1) || + stem(&z, "iti", "", m_gt_1); + break; + case 'u': + if( z[0]=='s' && z[2]=='o' && m_gt_1(z+3) ){ + z += 3; + } + break; + case 'v': + case 'z': + if( z[0]=='e' && z[2]=='i' && m_gt_1(z+3) ){ + z += 3; + } + break; + } + + /* Step 5a */ + if( z[0]=='e' ){ + if( m_gt_1(z+1) ){ + z++; + }else if( m_eq_1(z+1) && !star_oh(z+1) ){ + z++; + } + } + + /* Step 5b */ + if( m_gt_1(z) && z[0]=='l' && z[1]=='l' ){ + z++; + } + + /* z[] is now the stemmed word in reverse order. Flip it back + ** around into forward order and return. + */ + *pnOut = i = strlen(z); + zOut[i] = 0; + while( *z ){ + zOut[--i] = *(z++); + } +} + +/* +** Characters that can be part of a token. We assume any character +** whose value is greater than 0x80 (any UTF character) can be +** part of a token. In other words, delimiters all must have +** values of 0x7f or lower. +*/ +static const char porterIdChar[] = { +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ +}; +#define isDelim(C) (((ch=C)&0x80)==0 && (ch<0x30 || !porterIdChar[ch-0x30])) + +/* +** Extract the next token from a tokenization cursor. The cursor must +** have been opened by a prior call to porterOpen(). +*/ +static int porterNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by porterOpen */ + const char **pzToken, /* OUT: *pzToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; + const char *z = c->zInput; + + while( c->iOffset<c->nInput ){ + int iStartOffset, ch; + + /* Scan past delimiter characters */ + while( c->iOffset<c->nInput && isDelim(z[c->iOffset]) ){ + c->iOffset++; + } + + /* Count non-delimiter characters. */ + iStartOffset = c->iOffset; + while( c->iOffset<c->nInput && !isDelim(z[c->iOffset]) ){ + c->iOffset++; + } + + if( c->iOffset>iStartOffset ){ + int n = c->iOffset-iStartOffset; + if( n>c->nAllocated ){ + c->nAllocated = n+20; + c->zToken = sqlite3_realloc(c->zToken, c->nAllocated); + if( c->zToken==NULL ) return SQLITE_NOMEM; + } + porter_stemmer(&z[iStartOffset], n, c->zToken, pnBytes); + *pzToken = c->zToken; + *piStartOffset = iStartOffset; + *piEndOffset = c->iOffset; + *piPosition = c->iToken++; + return SQLITE_OK; + } + } + return SQLITE_DONE; +} + +/* +** The set of routines that implement the porter-stemmer tokenizer +*/ +static const sqlite3_tokenizer_module porterTokenizerModule = { + 0, + porterCreate, + porterDestroy, + porterOpen, + porterClose, + porterNext, +}; + +/* +** Allocate a new porter tokenizer. Return a pointer to the new +** tokenizer in *ppModule +*/ +void sqlite3Fts2PorterTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &porterTokenizerModule; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/third_party/sqlite/ext/fts2/fts2_tokenizer.c b/third_party/sqlite/ext/fts2/fts2_tokenizer.c new file mode 100755 index 0000000..43eba11 --- /dev/null +++ b/third_party/sqlite/ext/fts2/fts2_tokenizer.c @@ -0,0 +1,372 @@ +/* +** 2007 June 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This is part of an SQLite module implementing full-text search. +** This particular file implements the generic tokenizer interface. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS2 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS2 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) + + +#include "sqlite3.h" +#include "sqlite3ext.h" +SQLITE_EXTENSION_INIT1 + +#include "fts2_hash.h" +#include "fts2_tokenizer.h" +#include <assert.h> +#include <stddef.h> + +/* +** Implementation of the SQL scalar function for accessing the underlying +** hash table. This function may be called as follows: +** +** SELECT <function-name>(<key-name>); +** SELECT <function-name>(<key-name>, <pointer>); +** +** where <function-name> is the name passed as the second argument +** to the sqlite3Fts2InitHashTable() function (e.g. 'fts2_tokenizer'). +** +** If the <pointer> argument is specified, it must be a blob value +** containing a pointer to be stored as the hash data corresponding +** to the string <key-name>. If <pointer> is not specified, then +** the string <key-name> must already exist in the has table. Otherwise, +** an error is returned. +** +** Whether or not the <pointer> argument is specified, the value returned +** is a blob containing the pointer stored as the hash data corresponding +** to string <key-name> (after the hash-table is updated, if applicable). +*/ +static void scalarFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + fts2Hash *pHash; + void *pPtr = 0; + const unsigned char *zName; + int nName; + + assert( argc==1 || argc==2 ); + + pHash = (fts2Hash *)sqlite3_user_data(context); + + zName = sqlite3_value_text(argv[0]); + nName = sqlite3_value_bytes(argv[0])+1; + + if( argc==2 ){ + void *pOld; + int n = sqlite3_value_bytes(argv[1]); + if( n!=sizeof(pPtr) ){ + sqlite3_result_error(context, "argument type mismatch", -1); + return; + } + pPtr = *(void **)sqlite3_value_blob(argv[1]); + pOld = sqlite3Fts2HashInsert(pHash, (void *)zName, nName, pPtr); + if( pOld==pPtr ){ + sqlite3_result_error(context, "out of memory", -1); + return; + } + }else{ + pPtr = sqlite3Fts2HashFind(pHash, zName, nName); + if( !pPtr ){ + char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName); + sqlite3_result_error(context, zErr, -1); + sqlite3_free(zErr); + return; + } + } + + sqlite3_result_blob(context, (void *)&pPtr, sizeof(pPtr), SQLITE_TRANSIENT); +} + +#ifdef SQLITE_TEST + +#include <tcl.h> +#include <string.h> + +/* +** Implementation of a special SQL scalar function for testing tokenizers +** designed to be used in concert with the Tcl testing framework. This +** function must be called with two arguments: +** +** SELECT <function-name>(<key-name>, <input-string>); +** SELECT <function-name>(<key-name>, <pointer>); +** +** where <function-name> is the name passed as the second argument +** to the sqlite3Fts2InitHashTable() function (e.g. 'fts2_tokenizer') +** concatenated with the string '_test' (e.g. 'fts2_tokenizer_test'). +** +** The return value is a string that may be interpreted as a Tcl +** list. For each token in the <input-string>, three elements are +** added to the returned list. The first is the token position, the +** second is the token text (folded, stemmed, etc.) and the third is the +** substring of <input-string> associated with the token. For example, +** using the built-in "simple" tokenizer: +** +** SELECT fts_tokenizer_test('simple', 'I don't see how'); +** +** will return the string: +** +** "{0 i I 1 dont don't 2 see see 3 how how}" +** +*/ +static void testFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + fts2Hash *pHash; + sqlite3_tokenizer_module *p; + sqlite3_tokenizer *pTokenizer = 0; + sqlite3_tokenizer_cursor *pCsr = 0; + + const char *zErr = 0; + + const char *zName; + int nName; + const char *zInput; + int nInput; + + const char *zArg = 0; + + const char *zToken; + int nToken; + int iStart; + int iEnd; + int iPos; + + Tcl_Obj *pRet; + + assert( argc==2 || argc==3 ); + + nName = sqlite3_value_bytes(argv[0]); + zName = (const char *)sqlite3_value_text(argv[0]); + nInput = sqlite3_value_bytes(argv[argc-1]); + zInput = (const char *)sqlite3_value_text(argv[argc-1]); + + if( argc==3 ){ + zArg = (const char *)sqlite3_value_text(argv[1]); + } + + pHash = (fts2Hash *)sqlite3_user_data(context); + p = (sqlite3_tokenizer_module *)sqlite3Fts2HashFind(pHash, zName, nName+1); + + if( !p ){ + char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName); + sqlite3_result_error(context, zErr, -1); + sqlite3_free(zErr); + return; + } + + pRet = Tcl_NewObj(); + Tcl_IncrRefCount(pRet); + + if( SQLITE_OK!=p->xCreate(zArg ? 1 : 0, &zArg, &pTokenizer) ){ + zErr = "error in xCreate()"; + goto finish; + } + pTokenizer->pModule = p; + if( SQLITE_OK!=p->xOpen(pTokenizer, zInput, nInput, &pCsr) ){ + zErr = "error in xOpen()"; + goto finish; + } + pCsr->pTokenizer = pTokenizer; + + while( SQLITE_OK==p->xNext(pCsr, &zToken, &nToken, &iStart, &iEnd, &iPos) ){ + Tcl_ListObjAppendElement(0, pRet, Tcl_NewIntObj(iPos)); + Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); + zToken = &zInput[iStart]; + nToken = iEnd-iStart; + Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); + } + + if( SQLITE_OK!=p->xClose(pCsr) ){ + zErr = "error in xClose()"; + goto finish; + } + if( SQLITE_OK!=p->xDestroy(pTokenizer) ){ + zErr = "error in xDestroy()"; + goto finish; + } + +finish: + if( zErr ){ + sqlite3_result_error(context, zErr, -1); + }else{ + sqlite3_result_text(context, Tcl_GetString(pRet), -1, SQLITE_TRANSIENT); + } + Tcl_DecrRefCount(pRet); +} + +static +int registerTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module *p +){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts2_tokenizer(?, ?)"; + + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + sqlite3_bind_blob(pStmt, 2, &p, sizeof(p), SQLITE_STATIC); + sqlite3_step(pStmt); + + return sqlite3_finalize(pStmt); +} + +static +int queryTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module **pp +){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts2_tokenizer(?)"; + + *pp = 0; + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ + memcpy(pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); + } + } + + return sqlite3_finalize(pStmt); +} + +void sqlite3Fts2SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); + +/* +** Implementation of the scalar function fts2_tokenizer_internal_test(). +** This function is used for testing only, it is not included in the +** build unless SQLITE_TEST is defined. +** +** The purpose of this is to test that the fts2_tokenizer() function +** can be used as designed by the C-code in the queryTokenizer and +** registerTokenizer() functions above. These two functions are repeated +** in the README.tokenizer file as an example, so it is important to +** test them. +** +** To run the tests, evaluate the fts2_tokenizer_internal_test() scalar +** function with no arguments. An assert() will fail if a problem is +** detected. i.e.: +** +** SELECT fts2_tokenizer_internal_test(); +** +*/ +static void intTestFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int rc; + const sqlite3_tokenizer_module *p1; + const sqlite3_tokenizer_module *p2; + sqlite3 *db = (sqlite3 *)sqlite3_user_data(context); + + /* Test the query function */ + sqlite3Fts2SimpleTokenizerModule(&p1); + rc = queryTokenizer(db, "simple", &p2); + assert( rc==SQLITE_OK ); + assert( p1==p2 ); + rc = queryTokenizer(db, "nosuchtokenizer", &p2); + assert( rc==SQLITE_ERROR ); + assert( p2==0 ); + assert( 0==strcmp(sqlite3_errmsg(db), "unknown tokenizer: nosuchtokenizer") ); + + /* Test the storage function */ + rc = registerTokenizer(db, "nosuchtokenizer", p1); + assert( rc==SQLITE_OK ); + rc = queryTokenizer(db, "nosuchtokenizer", &p2); + assert( rc==SQLITE_OK ); + assert( p2==p1 ); + + sqlite3_result_text(context, "ok", -1, SQLITE_STATIC); +} + +#endif + +/* +** Set up SQL objects in database db used to access the contents of +** the hash table pointed to by argument pHash. The hash table must +** been initialised to use string keys, and to take a private copy +** of the key when a value is inserted. i.e. by a call similar to: +** +** sqlite3Fts2HashInit(pHash, FTS2_HASH_STRING, 1); +** +** This function adds a scalar function (see header comment above +** scalarFunc() in this file for details) and, if ENABLE_TABLE is +** defined at compilation time, a temporary virtual table (see header +** comment above struct HashTableVtab) to the database schema. Both +** provide read/write access to the contents of *pHash. +** +** The third argument to this function, zName, is used as the name +** of both the scalar and, if created, the virtual table. +*/ +int sqlite3Fts2InitHashTable( + sqlite3 *db, + fts2Hash *pHash, + const char *zName +){ + int rc = SQLITE_OK; + void *p = (void *)pHash; + const int any = SQLITE_ANY; + char *zTest = 0; + char *zTest2 = 0; + +#ifdef SQLITE_TEST + void *pdb = (void *)db; + zTest = sqlite3_mprintf("%s_test", zName); + zTest2 = sqlite3_mprintf("%s_internal_test", zName); + if( !zTest || !zTest2 ){ + rc = SQLITE_NOMEM; + } +#endif + + if( rc!=SQLITE_OK + || (rc = sqlite3_create_function(db, zName, 1, any, p, scalarFunc, 0, 0)) + || (rc = sqlite3_create_function(db, zName, 2, any, p, scalarFunc, 0, 0)) +#ifdef SQLITE_TEST + || (rc = sqlite3_create_function(db, zTest, 2, any, p, testFunc, 0, 0)) + || (rc = sqlite3_create_function(db, zTest, 3, any, p, testFunc, 0, 0)) + || (rc = sqlite3_create_function(db, zTest2, 0, any, pdb, intTestFunc, 0, 0)) +#endif + ); + + sqlite3_free(zTest); + sqlite3_free(zTest2); + return rc; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/third_party/sqlite/ext/fts2/fts2_tokenizer.h b/third_party/sqlite/ext/fts2/fts2_tokenizer.h new file mode 100755 index 0000000..8c256b2 --- /dev/null +++ b/third_party/sqlite/ext/fts2/fts2_tokenizer.h @@ -0,0 +1,145 @@ +/* +** 2006 July 10 +** +** The author disclaims copyright to this source code. +** +************************************************************************* +** Defines the interface to tokenizers used by fulltext-search. There +** are three basic components: +** +** sqlite3_tokenizer_module is a singleton defining the tokenizer +** interface functions. This is essentially the class structure for +** tokenizers. +** +** sqlite3_tokenizer is used to define a particular tokenizer, perhaps +** including customization information defined at creation time. +** +** sqlite3_tokenizer_cursor is generated by a tokenizer to generate +** tokens from a particular input. +*/ +#ifndef _FTS2_TOKENIZER_H_ +#define _FTS2_TOKENIZER_H_ + +/* TODO(shess) Only used for SQLITE_OK and SQLITE_DONE at this time. +** If tokenizers are to be allowed to call sqlite3_*() functions, then +** we will need a way to register the API consistently. +*/ +#include "sqlite3.h" + +/* +** Structures used by the tokenizer interface. When a new tokenizer +** implementation is registered, the caller provides a pointer to +** an sqlite3_tokenizer_module containing pointers to the callback +** functions that make up an implementation. +** +** When an fts2 table is created, it passes any arguments passed to +** the tokenizer clause of the CREATE VIRTUAL TABLE statement to the +** sqlite3_tokenizer_module.xCreate() function of the requested tokenizer +** implementation. The xCreate() function in turn returns an +** sqlite3_tokenizer structure representing the specific tokenizer to +** be used for the fts2 table (customized by the tokenizer clause arguments). +** +** To tokenize an input buffer, the sqlite3_tokenizer_module.xOpen() +** method is called. It returns an sqlite3_tokenizer_cursor object +** that may be used to tokenize a specific input buffer based on +** the tokenization rules supplied by a specific sqlite3_tokenizer +** object. +*/ +typedef struct sqlite3_tokenizer_module sqlite3_tokenizer_module; +typedef struct sqlite3_tokenizer sqlite3_tokenizer; +typedef struct sqlite3_tokenizer_cursor sqlite3_tokenizer_cursor; + +struct sqlite3_tokenizer_module { + + /* + ** Structure version. Should always be set to 0. + */ + int iVersion; + + /* + ** Create a new tokenizer. The values in the argv[] array are the + ** arguments passed to the "tokenizer" clause of the CREATE VIRTUAL + ** TABLE statement that created the fts2 table. For example, if + ** the following SQL is executed: + ** + ** CREATE .. USING fts2( ... , tokenizer <tokenizer-name> arg1 arg2) + ** + ** then argc is set to 2, and the argv[] array contains pointers + ** to the strings "arg1" and "arg2". + ** + ** This method should return either SQLITE_OK (0), or an SQLite error + ** code. If SQLITE_OK is returned, then *ppTokenizer should be set + ** to point at the newly created tokenizer structure. The generic + ** sqlite3_tokenizer.pModule variable should not be initialised by + ** this callback. The caller will do so. + */ + int (*xCreate)( + int argc, /* Size of argv array */ + const char *const*argv, /* Tokenizer argument strings */ + sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ + ); + + /* + ** Destroy an existing tokenizer. The fts2 module calls this method + ** exactly once for each successful call to xCreate(). + */ + int (*xDestroy)(sqlite3_tokenizer *pTokenizer); + + /* + ** Create a tokenizer cursor to tokenize an input buffer. The caller + ** is responsible for ensuring that the input buffer remains valid + ** until the cursor is closed (using the xClose() method). + */ + int (*xOpen)( + sqlite3_tokenizer *pTokenizer, /* Tokenizer object */ + const char *pInput, int nBytes, /* Input buffer */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Created tokenizer cursor */ + ); + + /* + ** Destroy an existing tokenizer cursor. The fts2 module calls this + ** method exactly once for each successful call to xOpen(). + */ + int (*xClose)(sqlite3_tokenizer_cursor *pCursor); + + /* + ** Retrieve the next token from the tokenizer cursor pCursor. This + ** method should either return SQLITE_OK and set the values of the + ** "OUT" variables identified below, or SQLITE_DONE to indicate that + ** the end of the buffer has been reached, or an SQLite error code. + ** + ** *ppToken should be set to point at a buffer containing the + ** normalized version of the token (i.e. after any case-folding and/or + ** stemming has been performed). *pnBytes should be set to the length + ** of this buffer in bytes. The input text that generated the token is + ** identified by the byte offsets returned in *piStartOffset and + ** *piEndOffset. + ** + ** The buffer *ppToken is set to point at is managed by the tokenizer + ** implementation. It is only required to be valid until the next call + ** to xNext() or xClose(). + */ + /* TODO(shess) current implementation requires pInput to be + ** nul-terminated. This should either be fixed, or pInput/nBytes + ** should be converted to zInput. + */ + int (*xNext)( + sqlite3_tokenizer_cursor *pCursor, /* Tokenizer cursor */ + const char **ppToken, int *pnBytes, /* OUT: Normalized text for token */ + int *piStartOffset, /* OUT: Byte offset of token in input buffer */ + int *piEndOffset, /* OUT: Byte offset of end of token in input buffer */ + int *piPosition /* OUT: Number of tokens returned before this one */ + ); +}; + +struct sqlite3_tokenizer { + const sqlite3_tokenizer_module *pModule; /* The module for this tokenizer */ + /* Tokenizer implementations will typically add additional fields */ +}; + +struct sqlite3_tokenizer_cursor { + sqlite3_tokenizer *pTokenizer; /* Tokenizer for this cursor. */ + /* Tokenizer implementations will typically add additional fields */ +}; + +#endif /* _FTS2_TOKENIZER_H_ */ diff --git a/third_party/sqlite/ext/fts2/fts2_tokenizer1.c b/third_party/sqlite/ext/fts2/fts2_tokenizer1.c new file mode 100755 index 0000000..f2ba49e --- /dev/null +++ b/third_party/sqlite/ext/fts2/fts2_tokenizer1.c @@ -0,0 +1,230 @@ +/* +** 2006 Oct 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** Implementation of the "simple" full-text-search tokenizer. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS2 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS2 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) + + +#include <assert.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <ctype.h> + +#include "fts2_tokenizer.h" + +typedef struct simple_tokenizer { + sqlite3_tokenizer base; + char delim[128]; /* flag ASCII delimiters */ +} simple_tokenizer; + +typedef struct simple_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *pInput; /* input we are tokenizing */ + int nBytes; /* size of the input */ + int iOffset; /* current position in pInput */ + int iToken; /* index of next token to be returned */ + char *pToken; /* storage for current token */ + int nTokenAllocated; /* space allocated to zToken buffer */ +} simple_tokenizer_cursor; + + +/* Forward declaration */ +static const sqlite3_tokenizer_module simpleTokenizerModule; + +static int simpleDelim(simple_tokenizer *t, unsigned char c){ + return c<0x80 && t->delim[c]; +} + +/* +** Create a new tokenizer instance. +*/ +static int simpleCreate( + int argc, const char * const *argv, + sqlite3_tokenizer **ppTokenizer +){ + simple_tokenizer *t; + + t = (simple_tokenizer *) sqlite3_malloc(sizeof(*t)); + if( t==NULL ) return SQLITE_NOMEM; + memset(t, 0, sizeof(*t)); + + /* TODO(shess) Delimiters need to remain the same from run to run, + ** else we need to reindex. One solution would be a meta-table to + ** track such information in the database, then we'd only want this + ** information on the initial create. + */ + if( argc>1 ){ + int i, n = strlen(argv[1]); + for(i=0; i<n; i++){ + unsigned char ch = argv[1][i]; + /* We explicitly don't support UTF-8 delimiters for now. */ + if( ch>=0x80 ){ + sqlite3_free(t); + return SQLITE_ERROR; + } + t->delim[ch] = 1; + } + } else { + /* Mark non-alphanumeric ASCII characters as delimiters */ + int i; + for(i=1; i<0x80; i++){ + t->delim[i] = !isalnum(i); + } + } + + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int simpleDestroy(sqlite3_tokenizer *pTokenizer){ + sqlite3_free(pTokenizer); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is pInput[0..nBytes-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int simpleOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *pInput, int nBytes, /* String to be tokenized */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + simple_tokenizer_cursor *c; + + c = (simple_tokenizer_cursor *) sqlite3_malloc(sizeof(*c)); + if( c==NULL ) return SQLITE_NOMEM; + + c->pInput = pInput; + if( pInput==0 ){ + c->nBytes = 0; + }else if( nBytes<0 ){ + c->nBytes = (int)strlen(pInput); + }else{ + c->nBytes = nBytes; + } + c->iOffset = 0; /* start tokenizing at the beginning */ + c->iToken = 0; + c->pToken = NULL; /* no space allocated, yet. */ + c->nTokenAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to +** simpleOpen() above. +*/ +static int simpleClose(sqlite3_tokenizer_cursor *pCursor){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + sqlite3_free(c->pToken); + sqlite3_free(c); + return SQLITE_OK; +} + +/* +** Extract the next token from a tokenization cursor. The cursor must +** have been opened by a prior call to simpleOpen(). +*/ +static int simpleNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ + const char **ppToken, /* OUT: *ppToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + simple_tokenizer *t = (simple_tokenizer *) pCursor->pTokenizer; + unsigned char *p = (unsigned char *)c->pInput; + + while( c->iOffset<c->nBytes ){ + int iStartOffset; + + /* Scan past delimiter characters */ + while( c->iOffset<c->nBytes && simpleDelim(t, p[c->iOffset]) ){ + c->iOffset++; + } + + /* Count non-delimiter characters. */ + iStartOffset = c->iOffset; + while( c->iOffset<c->nBytes && !simpleDelim(t, p[c->iOffset]) ){ + c->iOffset++; + } + + if( c->iOffset>iStartOffset ){ + int i, n = c->iOffset-iStartOffset; + if( n>c->nTokenAllocated ){ + c->nTokenAllocated = n+20; + c->pToken = sqlite3_realloc(c->pToken, c->nTokenAllocated); + if( c->pToken==NULL ) return SQLITE_NOMEM; + } + for(i=0; i<n; i++){ + /* TODO(shess) This needs expansion to handle UTF-8 + ** case-insensitivity. + */ + unsigned char ch = p[iStartOffset+i]; + c->pToken[i] = ch<0x80 ? tolower(ch) : ch; + } + *ppToken = c->pToken; + *pnBytes = n; + *piStartOffset = iStartOffset; + *piEndOffset = c->iOffset; + *piPosition = c->iToken++; + + return SQLITE_OK; + } + } + return SQLITE_DONE; +} + +/* +** The set of routines that implement the simple tokenizer +*/ +static const sqlite3_tokenizer_module simpleTokenizerModule = { + 0, + simpleCreate, + simpleDestroy, + simpleOpen, + simpleClose, + simpleNext, +}; + +/* +** Allocate a new simple tokenizer. Return a pointer to the new +** tokenizer in *ppModule +*/ +void sqlite3Fts2SimpleTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &simpleTokenizerModule; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/third_party/sqlite/ext/fts2/mkfts2amal.tcl b/third_party/sqlite/ext/fts2/mkfts2amal.tcl new file mode 100755 index 0000000..5c8d1e9 --- /dev/null +++ b/third_party/sqlite/ext/fts2/mkfts2amal.tcl @@ -0,0 +1,116 @@ +#!/usr/bin/tclsh +# +# This script builds a single C code file holding all of FTS2 code. +# The name of the output file is fts2amal.c. To build this file, +# first do: +# +# make target_source +# +# The make target above moves all of the source code files into +# a subdirectory named "tsrc". (This script expects to find the files +# there and will not work if they are not found.) +# +# After the "tsrc" directory has been created and populated, run +# this script: +# +# tclsh mkfts2amal.tcl +# +# The amalgamated FTS2 code will be written into fts2amal.c +# + +# Open the output file and write a header comment at the beginning +# of the file. +# +set out [open fts2amal.c w] +set today [clock format [clock seconds] -format "%Y-%m-%d %H:%M:%S UTC" -gmt 1] +puts $out [subst \ +{/****************************************************************************** +** This file is an amalgamation of separate C source files from the SQLite +** Full Text Search extension 2 (fts2). By combining all the individual C +** code files into this single large file, the entire code can be compiled +** as a one translation unit. This allows many compilers to do optimizations +** that would not be possible if the files were compiled separately. It also +** makes the code easier to import into other projects. +** +** This amalgamation was generated on $today. +*/}] + +# These are the header files used by FTS2. The first time any of these +# files are seen in a #include statement in the C code, include the complete +# text of the file in-line. The file only needs to be included once. +# +foreach hdr { + fts2.h + fts2_hash.h + fts2_tokenizer.h + sqlite3.h + sqlite3ext.h +} { + set available_hdr($hdr) 1 +} + +# 78 stars used for comment formatting. +set s78 \ +{*****************************************************************************} + +# Insert a comment into the code +# +proc section_comment {text} { + global out s78 + set n [string length $text] + set nstar [expr {60 - $n}] + set stars [string range $s78 0 $nstar] + puts $out "/************** $text $stars/" +} + +# Read the source file named $filename and write it into the +# sqlite3.c output file. If any #include statements are seen, +# process them approprately. +# +proc copy_file {filename} { + global seen_hdr available_hdr out + set tail [file tail $filename] + section_comment "Begin file $tail" + set in [open $filename r] + while {![eof $in]} { + set line [gets $in] + if {[regexp {^#\s*include\s+["<]([^">]+)[">]} $line all hdr]} { + if {[info exists available_hdr($hdr)]} { + if {$available_hdr($hdr)} { + section_comment "Include $hdr in the middle of $tail" + copy_file tsrc/$hdr + section_comment "Continuing where we left off in $tail" + } + } elseif {![info exists seen_hdr($hdr)]} { + set seen_hdr($hdr) 1 + puts $out $line + } + } elseif {[regexp {^#ifdef __cplusplus} $line]} { + puts $out "#if 0" + } elseif {[regexp {^#line} $line]} { + # Skip #line directives. + } else { + puts $out $line + } + } + close $in + section_comment "End of $tail" +} + + +# Process the source files. Process files containing commonly +# used subroutines first in order to help the compiler find +# inlining opportunities. +# +foreach file { + fts2.c + fts2_hash.c + fts2_porter.c + fts2_tokenizer.c + fts2_tokenizer1.c + fts2_icu.c +} { + copy_file tsrc/$file +} + +close $out diff --git a/third_party/sqlite/ext/fts3/README.tokenizers b/third_party/sqlite/ext/fts3/README.tokenizers new file mode 100755 index 0000000..83d2f9d --- /dev/null +++ b/third_party/sqlite/ext/fts3/README.tokenizers @@ -0,0 +1,133 @@ + +1. FTS3 Tokenizers + + When creating a new full-text table, FTS3 allows the user to select + the text tokenizer implementation to be used when indexing text + by specifying a "tokenizer" clause as part of the CREATE VIRTUAL TABLE + statement: + + CREATE VIRTUAL TABLE <table-name> USING fts3( + <columns ...> [, tokenizer <tokenizer-name> [<tokenizer-args>]] + ); + + The built-in tokenizers (valid values to pass as <tokenizer name>) are + "simple" and "porter". + + <tokenizer-args> should consist of zero or more white-space separated + arguments to pass to the selected tokenizer implementation. The + interpretation of the arguments, if any, depends on the individual + tokenizer. + +2. Custom Tokenizers + + FTS3 allows users to provide custom tokenizer implementations. The + interface used to create a new tokenizer is defined and described in + the fts3_tokenizer.h source file. + + Registering a new FTS3 tokenizer is similar to registering a new + virtual table module with SQLite. The user passes a pointer to a + structure containing pointers to various callback functions that + make up the implementation of the new tokenizer type. For tokenizers, + the structure (defined in fts3_tokenizer.h) is called + "sqlite3_tokenizer_module". + + FTS3 does not expose a C-function that users call to register new + tokenizer types with a database handle. Instead, the pointer must + be encoded as an SQL blob value and passed to FTS3 through the SQL + engine by evaluating a special scalar function, "fts3_tokenizer()". + The fts3_tokenizer() function may be called with one or two arguments, + as follows: + + SELECT fts3_tokenizer(<tokenizer-name>); + SELECT fts3_tokenizer(<tokenizer-name>, <sqlite3_tokenizer_module ptr>); + + Where <tokenizer-name> is a string identifying the tokenizer and + <sqlite3_tokenizer_module ptr> is a pointer to an sqlite3_tokenizer_module + structure encoded as an SQL blob. If the second argument is present, + it is registered as tokenizer <tokenizer-name> and a copy of it + returned. If only one argument is passed, a pointer to the tokenizer + implementation currently registered as <tokenizer-name> is returned, + encoded as a blob. Or, if no such tokenizer exists, an SQL exception + (error) is raised. + + SECURITY: If the fts3 extension is used in an environment where potentially + malicious users may execute arbitrary SQL (i.e. gears), they should be + prevented from invoking the fts3_tokenizer() function, possibly using the + authorisation callback. + + See "Sample code" below for an example of calling the fts3_tokenizer() + function from C code. + +3. ICU Library Tokenizers + + If this extension is compiled with the SQLITE_ENABLE_ICU pre-processor + symbol defined, then there exists a built-in tokenizer named "icu" + implemented using the ICU library. The first argument passed to the + xCreate() method (see fts3_tokenizer.h) of this tokenizer may be + an ICU locale identifier. For example "tr_TR" for Turkish as used + in Turkey, or "en_AU" for English as used in Australia. For example: + + "CREATE VIRTUAL TABLE thai_text USING fts3(text, tokenizer icu th_TH)" + + The ICU tokenizer implementation is very simple. It splits the input + text according to the ICU rules for finding word boundaries and discards + any tokens that consist entirely of white-space. This may be suitable + for some applications in some locales, but not all. If more complex + processing is required, for example to implement stemming or + discard punctuation, this can be done by creating a tokenizer + implementation that uses the ICU tokenizer as part of its implementation. + + When using the ICU tokenizer this way, it is safe to overwrite the + contents of the strings returned by the xNext() method (see + fts3_tokenizer.h). + +4. Sample code. + + The following two code samples illustrate the way C code should invoke + the fts3_tokenizer() scalar function: + + int registerTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module *p + ){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts3_tokenizer(?, ?)"; + + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + sqlite3_bind_blob(pStmt, 2, &p, sizeof(p), SQLITE_STATIC); + sqlite3_step(pStmt); + + return sqlite3_finalize(pStmt); + } + + int queryTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module **pp + ){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts3_tokenizer(?)"; + + *pp = 0; + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ + memcpy(pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); + } + } + + return sqlite3_finalize(pStmt); + } diff --git a/third_party/sqlite/ext/fts3/README.txt b/third_party/sqlite/ext/fts3/README.txt new file mode 100755 index 0000000..517a2a0 --- /dev/null +++ b/third_party/sqlite/ext/fts3/README.txt @@ -0,0 +1,4 @@ +This folder contains source code to the second full-text search +extension for SQLite. While the API is the same, this version uses a +substantially different storage schema from fts1, so tables will need +to be rebuilt. diff --git a/third_party/sqlite/ext/fts3/fts3.c b/third_party/sqlite/ext/fts3/fts3.c new file mode 100755 index 0000000..cc6dcd8 --- /dev/null +++ b/third_party/sqlite/ext/fts3/fts3.c @@ -0,0 +1,7216 @@ +/* +** 2006 Oct 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This is an SQLite module implementing full-text search. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ + +/* TODO(shess) Consider exporting this comment to an HTML file or the +** wiki. +*/ +/* The full-text index is stored in a series of b+tree (-like) +** structures called segments which map terms to doclists. The +** structures are like b+trees in layout, but are constructed from the +** bottom up in optimal fashion and are not updatable. Since trees +** are built from the bottom up, things will be described from the +** bottom up. +** +** +**** Varints **** +** The basic unit of encoding is a variable-length integer called a +** varint. We encode variable-length integers in little-endian order +** using seven bits * per byte as follows: +** +** KEY: +** A = 0xxxxxxx 7 bits of data and one flag bit +** B = 1xxxxxxx 7 bits of data and one flag bit +** +** 7 bits - A +** 14 bits - BA +** 21 bits - BBA +** and so on. +** +** This is identical to how sqlite encodes varints (see util.c). +** +** +**** Document lists **** +** A doclist (document list) holds a docid-sorted list of hits for a +** given term. Doclists hold docids, and can optionally associate +** token positions and offsets with docids. +** +** A DL_POSITIONS_OFFSETS doclist is stored like this: +** +** array { +** varint docid; +** array { (position list for column 0) +** varint position; (delta from previous position plus POS_BASE) +** varint startOffset; (delta from previous startOffset) +** varint endOffset; (delta from startOffset) +** } +** array { +** varint POS_COLUMN; (marks start of position list for new column) +** varint column; (index of new column) +** array { +** varint position; (delta from previous position plus POS_BASE) +** varint startOffset;(delta from previous startOffset) +** varint endOffset; (delta from startOffset) +** } +** } +** varint POS_END; (marks end of positions for this document. +** } +** +** Here, array { X } means zero or more occurrences of X, adjacent in +** memory. A "position" is an index of a token in the token stream +** generated by the tokenizer, while an "offset" is a byte offset, +** both based at 0. Note that POS_END and POS_COLUMN occur in the +** same logical place as the position element, and act as sentinals +** ending a position list array. +** +** A DL_POSITIONS doclist omits the startOffset and endOffset +** information. A DL_DOCIDS doclist omits both the position and +** offset information, becoming an array of varint-encoded docids. +** +** On-disk data is stored as type DL_DEFAULT, so we don't serialize +** the type. Due to how deletion is implemented in the segmentation +** system, on-disk doclists MUST store at least positions. +** +** +**** Segment leaf nodes **** +** Segment leaf nodes store terms and doclists, ordered by term. Leaf +** nodes are written using LeafWriter, and read using LeafReader (to +** iterate through a single leaf node's data) and LeavesReader (to +** iterate through a segment's entire leaf layer). Leaf nodes have +** the format: +** +** varint iHeight; (height from leaf level, always 0) +** varint nTerm; (length of first term) +** char pTerm[nTerm]; (content of first term) +** varint nDoclist; (length of term's associated doclist) +** char pDoclist[nDoclist]; (content of doclist) +** array { +** (further terms are delta-encoded) +** varint nPrefix; (length of prefix shared with previous term) +** varint nSuffix; (length of unshared suffix) +** char pTermSuffix[nSuffix];(unshared suffix of next term) +** varint nDoclist; (length of term's associated doclist) +** char pDoclist[nDoclist]; (content of doclist) +** } +** +** Here, array { X } means zero or more occurrences of X, adjacent in +** memory. +** +** Leaf nodes are broken into blocks which are stored contiguously in +** the %_segments table in sorted order. This means that when the end +** of a node is reached, the next term is in the node with the next +** greater node id. +** +** New data is spilled to a new leaf node when the current node +** exceeds LEAF_MAX bytes (default 2048). New data which itself is +** larger than STANDALONE_MIN (default 1024) is placed in a standalone +** node (a leaf node with a single term and doclist). The goal of +** these settings is to pack together groups of small doclists while +** making it efficient to directly access large doclists. The +** assumption is that large doclists represent terms which are more +** likely to be query targets. +** +** TODO(shess) It may be useful for blocking decisions to be more +** dynamic. For instance, it may make more sense to have a 2.5k leaf +** node rather than splitting into 2k and .5k nodes. My intuition is +** that this might extend through 2x or 4x the pagesize. +** +** +**** Segment interior nodes **** +** Segment interior nodes store blockids for subtree nodes and terms +** to describe what data is stored by the each subtree. Interior +** nodes are written using InteriorWriter, and read using +** InteriorReader. InteriorWriters are created as needed when +** SegmentWriter creates new leaf nodes, or when an interior node +** itself grows too big and must be split. The format of interior +** nodes: +** +** varint iHeight; (height from leaf level, always >0) +** varint iBlockid; (block id of node's leftmost subtree) +** optional { +** varint nTerm; (length of first term) +** char pTerm[nTerm]; (content of first term) +** array { +** (further terms are delta-encoded) +** varint nPrefix; (length of shared prefix with previous term) +** varint nSuffix; (length of unshared suffix) +** char pTermSuffix[nSuffix]; (unshared suffix of next term) +** } +** } +** +** Here, optional { X } means an optional element, while array { X } +** means zero or more occurrences of X, adjacent in memory. +** +** An interior node encodes n terms separating n+1 subtrees. The +** subtree blocks are contiguous, so only the first subtree's blockid +** is encoded. The subtree at iBlockid will contain all terms less +** than the first term encoded (or all terms if no term is encoded). +** Otherwise, for terms greater than or equal to pTerm[i] but less +** than pTerm[i+1], the subtree for that term will be rooted at +** iBlockid+i. Interior nodes only store enough term data to +** distinguish adjacent children (if the rightmost term of the left +** child is "something", and the leftmost term of the right child is +** "wicked", only "w" is stored). +** +** New data is spilled to a new interior node at the same height when +** the current node exceeds INTERIOR_MAX bytes (default 2048). +** INTERIOR_MIN_TERMS (default 7) keeps large terms from monopolizing +** interior nodes and making the tree too skinny. The interior nodes +** at a given height are naturally tracked by interior nodes at +** height+1, and so on. +** +** +**** Segment directory **** +** The segment directory in table %_segdir stores meta-information for +** merging and deleting segments, and also the root node of the +** segment's tree. +** +** The root node is the top node of the segment's tree after encoding +** the entire segment, restricted to ROOT_MAX bytes (default 1024). +** This could be either a leaf node or an interior node. If the top +** node requires more than ROOT_MAX bytes, it is flushed to %_segments +** and a new root interior node is generated (which should always fit +** within ROOT_MAX because it only needs space for 2 varints, the +** height and the blockid of the previous root). +** +** The meta-information in the segment directory is: +** level - segment level (see below) +** idx - index within level +** - (level,idx uniquely identify a segment) +** start_block - first leaf node +** leaves_end_block - last leaf node +** end_block - last block (including interior nodes) +** root - contents of root node +** +** If the root node is a leaf node, then start_block, +** leaves_end_block, and end_block are all 0. +** +** +**** Segment merging **** +** To amortize update costs, segments are groups into levels and +** merged in matches. Each increase in level represents exponentially +** more documents. +** +** New documents (actually, document updates) are tokenized and +** written individually (using LeafWriter) to a level 0 segment, with +** incrementing idx. When idx reaches MERGE_COUNT (default 16), all +** level 0 segments are merged into a single level 1 segment. Level 1 +** is populated like level 0, and eventually MERGE_COUNT level 1 +** segments are merged to a single level 2 segment (representing +** MERGE_COUNT^2 updates), and so on. +** +** A segment merge traverses all segments at a given level in +** parallel, performing a straightforward sorted merge. Since segment +** leaf nodes are written in to the %_segments table in order, this +** merge traverses the underlying sqlite disk structures efficiently. +** After the merge, all segment blocks from the merged level are +** deleted. +** +** MERGE_COUNT controls how often we merge segments. 16 seems to be +** somewhat of a sweet spot for insertion performance. 32 and 64 show +** very similar performance numbers to 16 on insertion, though they're +** a tiny bit slower (perhaps due to more overhead in merge-time +** sorting). 8 is about 20% slower than 16, 4 about 50% slower than +** 16, 2 about 66% slower than 16. +** +** At query time, high MERGE_COUNT increases the number of segments +** which need to be scanned and merged. For instance, with 100k docs +** inserted: +** +** MERGE_COUNT segments +** 16 25 +** 8 12 +** 4 10 +** 2 6 +** +** This appears to have only a moderate impact on queries for very +** frequent terms (which are somewhat dominated by segment merge +** costs), and infrequent and non-existent terms still seem to be fast +** even with many segments. +** +** TODO(shess) That said, it would be nice to have a better query-side +** argument for MERGE_COUNT of 16. Also, it is possible/likely that +** optimizations to things like doclist merging will swing the sweet +** spot around. +** +** +** +**** Handling of deletions and updates **** +** Since we're using a segmented structure, with no docid-oriented +** index into the term index, we clearly cannot simply update the term +** index when a document is deleted or updated. For deletions, we +** write an empty doclist (varint(docid) varint(POS_END)), for updates +** we simply write the new doclist. Segment merges overwrite older +** data for a particular docid with newer data, so deletes or updates +** will eventually overtake the earlier data and knock it out. The +** query logic likewise merges doclists so that newer data knocks out +** older data. +** +** TODO(shess) Provide a VACUUM type operation to clear out all +** deletions and duplications. This would basically be a forced merge +** into a single segment. +*/ + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +#if defined(SQLITE_ENABLE_FTS3) && !defined(SQLITE_CORE) +# define SQLITE_CORE 1 +#endif + +#include <assert.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <ctype.h> + +#include "fts3.h" +#include "fts3_hash.h" +#include "fts3_tokenizer.h" +#ifndef SQLITE_CORE +# include "sqlite3ext.h" + SQLITE_EXTENSION_INIT1 +#endif + + +/* TODO(shess) MAN, this thing needs some refactoring. At minimum, it +** would be nice to order the file better, perhaps something along the +** lines of: +** +** - utility functions +** - table setup functions +** - table update functions +** - table query functions +** +** Put the query functions last because they're likely to reference +** typedefs or functions from the table update section. +*/ + +#if 0 +# define FTSTRACE(A) printf A; fflush(stdout) +#else +# define FTSTRACE(A) +#endif + +/* +** Default span for NEAR operators. +*/ +#define SQLITE_FTS3_DEFAULT_NEAR_PARAM 10 + +/* It is not safe to call isspace(), tolower(), or isalnum() on +** hi-bit-set characters. This is the same solution used in the +** tokenizer. +*/ +/* TODO(shess) The snippet-generation code should be using the +** tokenizer-generated tokens rather than doing its own local +** tokenization. +*/ +/* TODO(shess) Is __isascii() a portable version of (c&0x80)==0? */ +static int safe_isspace(char c){ + return (c&0x80)==0 ? isspace(c) : 0; +} +static int safe_tolower(char c){ + return (c&0x80)==0 ? tolower(c) : c; +} +static int safe_isalnum(char c){ + return (c&0x80)==0 ? isalnum(c) : 0; +} + +typedef enum DocListType { + DL_DOCIDS, /* docids only */ + DL_POSITIONS, /* docids + positions */ + DL_POSITIONS_OFFSETS /* docids + positions + offsets */ +} DocListType; + +/* +** By default, only positions and not offsets are stored in the doclists. +** To change this so that offsets are stored too, compile with +** +** -DDL_DEFAULT=DL_POSITIONS_OFFSETS +** +** If DL_DEFAULT is set to DL_DOCIDS, your table can only be inserted +** into (no deletes or updates). +*/ +#ifndef DL_DEFAULT +# define DL_DEFAULT DL_POSITIONS +#endif + +enum { + POS_END = 0, /* end of this position list */ + POS_COLUMN, /* followed by new column number */ + POS_BASE +}; + +/* MERGE_COUNT controls how often we merge segments (see comment at +** top of file). +*/ +#define MERGE_COUNT 16 + +/* utility functions */ + +/* CLEAR() and SCRAMBLE() abstract memset() on a pointer to a single +** record to prevent errors of the form: +** +** my_function(SomeType *b){ +** memset(b, '\0', sizeof(b)); // sizeof(b)!=sizeof(*b) +** } +*/ +/* TODO(shess) Obvious candidates for a header file. */ +#define CLEAR(b) memset(b, '\0', sizeof(*(b))) + +#ifndef NDEBUG +# define SCRAMBLE(b) memset(b, 0x55, sizeof(*(b))) +#else +# define SCRAMBLE(b) +#endif + +/* We may need up to VARINT_MAX bytes to store an encoded 64-bit integer. */ +#define VARINT_MAX 10 + +/* Write a 64-bit variable-length integer to memory starting at p[0]. + * The length of data written will be between 1 and VARINT_MAX bytes. + * The number of bytes written is returned. */ +static int fts3PutVarint(char *p, sqlite_int64 v){ + unsigned char *q = (unsigned char *) p; + sqlite_uint64 vu = v; + do{ + *q++ = (unsigned char) ((vu & 0x7f) | 0x80); + vu >>= 7; + }while( vu!=0 ); + q[-1] &= 0x7f; /* turn off high bit in final byte */ + assert( q - (unsigned char *)p <= VARINT_MAX ); + return (int) (q - (unsigned char *)p); +} + +/* Read a 64-bit variable-length integer from memory starting at p[0]. + * Return the number of bytes read, or 0 on error. + * The value is stored in *v. */ +static int fts3GetVarint(const char *p, sqlite_int64 *v){ + const unsigned char *q = (const unsigned char *) p; + sqlite_uint64 x = 0, y = 1; + while( (*q & 0x80) == 0x80 ){ + x += y * (*q++ & 0x7f); + y <<= 7; + if( q - (unsigned char *)p >= VARINT_MAX ){ /* bad data */ + assert( 0 ); + return 0; + } + } + x += y * (*q++); + *v = (sqlite_int64) x; + return (int) (q - (unsigned char *)p); +} + +static int fts3GetVarint32(const char *p, int *pi){ + sqlite_int64 i; + int ret = fts3GetVarint(p, &i); + *pi = (int) i; + assert( *pi==i ); + return ret; +} + +/*******************************************************************/ +/* DataBuffer is used to collect data into a buffer in piecemeal +** fashion. It implements the usual distinction between amount of +** data currently stored (nData) and buffer capacity (nCapacity). +** +** dataBufferInit - create a buffer with given initial capacity. +** dataBufferReset - forget buffer's data, retaining capacity. +** dataBufferDestroy - free buffer's data. +** dataBufferSwap - swap contents of two buffers. +** dataBufferExpand - expand capacity without adding data. +** dataBufferAppend - append data. +** dataBufferAppend2 - append two pieces of data at once. +** dataBufferReplace - replace buffer's data. +*/ +typedef struct DataBuffer { + char *pData; /* Pointer to malloc'ed buffer. */ + int nCapacity; /* Size of pData buffer. */ + int nData; /* End of data loaded into pData. */ +} DataBuffer; + +static void dataBufferInit(DataBuffer *pBuffer, int nCapacity){ + assert( nCapacity>=0 ); + pBuffer->nData = 0; + pBuffer->nCapacity = nCapacity; + pBuffer->pData = nCapacity==0 ? NULL : sqlite3_malloc(nCapacity); +} +static void dataBufferReset(DataBuffer *pBuffer){ + pBuffer->nData = 0; +} +static void dataBufferDestroy(DataBuffer *pBuffer){ + if( pBuffer->pData!=NULL ) sqlite3_free(pBuffer->pData); + SCRAMBLE(pBuffer); +} +static void dataBufferSwap(DataBuffer *pBuffer1, DataBuffer *pBuffer2){ + DataBuffer tmp = *pBuffer1; + *pBuffer1 = *pBuffer2; + *pBuffer2 = tmp; +} +static void dataBufferExpand(DataBuffer *pBuffer, int nAddCapacity){ + assert( nAddCapacity>0 ); + /* TODO(shess) Consider expanding more aggressively. Note that the + ** underlying malloc implementation may take care of such things for + ** us already. + */ + if( pBuffer->nData+nAddCapacity>pBuffer->nCapacity ){ + pBuffer->nCapacity = pBuffer->nData+nAddCapacity; + pBuffer->pData = sqlite3_realloc(pBuffer->pData, pBuffer->nCapacity); + } +} +static void dataBufferAppend(DataBuffer *pBuffer, + const char *pSource, int nSource){ + assert( nSource>0 && pSource!=NULL ); + dataBufferExpand(pBuffer, nSource); + memcpy(pBuffer->pData+pBuffer->nData, pSource, nSource); + pBuffer->nData += nSource; +} +static void dataBufferAppend2(DataBuffer *pBuffer, + const char *pSource1, int nSource1, + const char *pSource2, int nSource2){ + assert( nSource1>0 && pSource1!=NULL ); + assert( nSource2>0 && pSource2!=NULL ); + dataBufferExpand(pBuffer, nSource1+nSource2); + memcpy(pBuffer->pData+pBuffer->nData, pSource1, nSource1); + memcpy(pBuffer->pData+pBuffer->nData+nSource1, pSource2, nSource2); + pBuffer->nData += nSource1+nSource2; +} +static void dataBufferReplace(DataBuffer *pBuffer, + const char *pSource, int nSource){ + dataBufferReset(pBuffer); + dataBufferAppend(pBuffer, pSource, nSource); +} + +/* StringBuffer is a null-terminated version of DataBuffer. */ +typedef struct StringBuffer { + DataBuffer b; /* Includes null terminator. */ +} StringBuffer; + +static void initStringBuffer(StringBuffer *sb){ + dataBufferInit(&sb->b, 100); + dataBufferReplace(&sb->b, "", 1); +} +static int stringBufferLength(StringBuffer *sb){ + return sb->b.nData-1; +} +static char *stringBufferData(StringBuffer *sb){ + return sb->b.pData; +} +static void stringBufferDestroy(StringBuffer *sb){ + dataBufferDestroy(&sb->b); +} + +static void nappend(StringBuffer *sb, const char *zFrom, int nFrom){ + assert( sb->b.nData>0 ); + if( nFrom>0 ){ + sb->b.nData--; + dataBufferAppend2(&sb->b, zFrom, nFrom, "", 1); + } +} +static void append(StringBuffer *sb, const char *zFrom){ + nappend(sb, zFrom, strlen(zFrom)); +} + +/* Append a list of strings separated by commas. */ +static void appendList(StringBuffer *sb, int nString, char **azString){ + int i; + for(i=0; i<nString; ++i){ + if( i>0 ) append(sb, ", "); + append(sb, azString[i]); + } +} + +static int endsInWhiteSpace(StringBuffer *p){ + return stringBufferLength(p)>0 && + safe_isspace(stringBufferData(p)[stringBufferLength(p)-1]); +} + +/* If the StringBuffer ends in something other than white space, add a +** single space character to the end. +*/ +static void appendWhiteSpace(StringBuffer *p){ + if( stringBufferLength(p)==0 ) return; + if( !endsInWhiteSpace(p) ) append(p, " "); +} + +/* Remove white space from the end of the StringBuffer */ +static void trimWhiteSpace(StringBuffer *p){ + while( endsInWhiteSpace(p) ){ + p->b.pData[--p->b.nData-1] = '\0'; + } +} + +/*******************************************************************/ +/* DLReader is used to read document elements from a doclist. The +** current docid is cached, so dlrDocid() is fast. DLReader does not +** own the doclist buffer. +** +** dlrAtEnd - true if there's no more data to read. +** dlrDocid - docid of current document. +** dlrDocData - doclist data for current document (including docid). +** dlrDocDataBytes - length of same. +** dlrAllDataBytes - length of all remaining data. +** dlrPosData - position data for current document. +** dlrPosDataLen - length of pos data for current document (incl POS_END). +** dlrStep - step to current document. +** dlrInit - initial for doclist of given type against given data. +** dlrDestroy - clean up. +** +** Expected usage is something like: +** +** DLReader reader; +** dlrInit(&reader, pData, nData); +** while( !dlrAtEnd(&reader) ){ +** // calls to dlrDocid() and kin. +** dlrStep(&reader); +** } +** dlrDestroy(&reader); +*/ +typedef struct DLReader { + DocListType iType; + const char *pData; + int nData; + + sqlite_int64 iDocid; + int nElement; +} DLReader; + +static int dlrAtEnd(DLReader *pReader){ + assert( pReader->nData>=0 ); + return pReader->nData==0; +} +static sqlite_int64 dlrDocid(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->iDocid; +} +static const char *dlrDocData(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->pData; +} +static int dlrDocDataBytes(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->nElement; +} +static int dlrAllDataBytes(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + return pReader->nData; +} +/* TODO(shess) Consider adding a field to track iDocid varint length +** to make these two functions faster. This might matter (a tiny bit) +** for queries. +*/ +static const char *dlrPosData(DLReader *pReader){ + sqlite_int64 iDummy; + int n = fts3GetVarint(pReader->pData, &iDummy); + assert( !dlrAtEnd(pReader) ); + return pReader->pData+n; +} +static int dlrPosDataLen(DLReader *pReader){ + sqlite_int64 iDummy; + int n = fts3GetVarint(pReader->pData, &iDummy); + assert( !dlrAtEnd(pReader) ); + return pReader->nElement-n; +} +static void dlrStep(DLReader *pReader){ + assert( !dlrAtEnd(pReader) ); + + /* Skip past current doclist element. */ + assert( pReader->nElement<=pReader->nData ); + pReader->pData += pReader->nElement; + pReader->nData -= pReader->nElement; + + /* If there is more data, read the next doclist element. */ + if( pReader->nData!=0 ){ + sqlite_int64 iDocidDelta; + int iDummy, n = fts3GetVarint(pReader->pData, &iDocidDelta); + pReader->iDocid += iDocidDelta; + if( pReader->iType>=DL_POSITIONS ){ + assert( n<pReader->nData ); + while( 1 ){ + n += fts3GetVarint32(pReader->pData+n, &iDummy); + assert( n<=pReader->nData ); + if( iDummy==POS_END ) break; + if( iDummy==POS_COLUMN ){ + n += fts3GetVarint32(pReader->pData+n, &iDummy); + assert( n<pReader->nData ); + }else if( pReader->iType==DL_POSITIONS_OFFSETS ){ + n += fts3GetVarint32(pReader->pData+n, &iDummy); + n += fts3GetVarint32(pReader->pData+n, &iDummy); + assert( n<pReader->nData ); + } + } + } + pReader->nElement = n; + assert( pReader->nElement<=pReader->nData ); + } +} +static void dlrInit(DLReader *pReader, DocListType iType, + const char *pData, int nData){ + assert( pData!=NULL && nData!=0 ); + pReader->iType = iType; + pReader->pData = pData; + pReader->nData = nData; + pReader->nElement = 0; + pReader->iDocid = 0; + + /* Load the first element's data. There must be a first element. */ + dlrStep(pReader); +} +static void dlrDestroy(DLReader *pReader){ + SCRAMBLE(pReader); +} + +#ifndef NDEBUG +/* Verify that the doclist can be validly decoded. Also returns the +** last docid found because it is convenient in other assertions for +** DLWriter. +*/ +static void docListValidate(DocListType iType, const char *pData, int nData, + sqlite_int64 *pLastDocid){ + sqlite_int64 iPrevDocid = 0; + assert( nData>0 ); + assert( pData!=0 ); + assert( pData+nData>pData ); + while( nData!=0 ){ + sqlite_int64 iDocidDelta; + int n = fts3GetVarint(pData, &iDocidDelta); + iPrevDocid += iDocidDelta; + if( iType>DL_DOCIDS ){ + int iDummy; + while( 1 ){ + n += fts3GetVarint32(pData+n, &iDummy); + if( iDummy==POS_END ) break; + if( iDummy==POS_COLUMN ){ + n += fts3GetVarint32(pData+n, &iDummy); + }else if( iType>DL_POSITIONS ){ + n += fts3GetVarint32(pData+n, &iDummy); + n += fts3GetVarint32(pData+n, &iDummy); + } + assert( n<=nData ); + } + } + assert( n<=nData ); + pData += n; + nData -= n; + } + if( pLastDocid ) *pLastDocid = iPrevDocid; +} +#define ASSERT_VALID_DOCLIST(i, p, n, o) docListValidate(i, p, n, o) +#else +#define ASSERT_VALID_DOCLIST(i, p, n, o) assert( 1 ) +#endif + +/*******************************************************************/ +/* DLWriter is used to write doclist data to a DataBuffer. DLWriter +** always appends to the buffer and does not own it. +** +** dlwInit - initialize to write a given type doclistto a buffer. +** dlwDestroy - clear the writer's memory. Does not free buffer. +** dlwAppend - append raw doclist data to buffer. +** dlwCopy - copy next doclist from reader to writer. +** dlwAdd - construct doclist element and append to buffer. +** Only apply dlwAdd() to DL_DOCIDS doclists (else use PLWriter). +*/ +typedef struct DLWriter { + DocListType iType; + DataBuffer *b; + sqlite_int64 iPrevDocid; +#ifndef NDEBUG + int has_iPrevDocid; +#endif +} DLWriter; + +static void dlwInit(DLWriter *pWriter, DocListType iType, DataBuffer *b){ + pWriter->b = b; + pWriter->iType = iType; + pWriter->iPrevDocid = 0; +#ifndef NDEBUG + pWriter->has_iPrevDocid = 0; +#endif +} +static void dlwDestroy(DLWriter *pWriter){ + SCRAMBLE(pWriter); +} +/* iFirstDocid is the first docid in the doclist in pData. It is +** needed because pData may point within a larger doclist, in which +** case the first item would be delta-encoded. +** +** iLastDocid is the final docid in the doclist in pData. It is +** needed to create the new iPrevDocid for future delta-encoding. The +** code could decode the passed doclist to recreate iLastDocid, but +** the only current user (docListMerge) already has decoded this +** information. +*/ +/* TODO(shess) This has become just a helper for docListMerge. +** Consider a refactor to make this cleaner. +*/ +static void dlwAppend(DLWriter *pWriter, + const char *pData, int nData, + sqlite_int64 iFirstDocid, sqlite_int64 iLastDocid){ + sqlite_int64 iDocid = 0; + char c[VARINT_MAX]; + int nFirstOld, nFirstNew; /* Old and new varint len of first docid. */ +#ifndef NDEBUG + sqlite_int64 iLastDocidDelta; +#endif + + /* Recode the initial docid as delta from iPrevDocid. */ + nFirstOld = fts3GetVarint(pData, &iDocid); + assert( nFirstOld<nData || (nFirstOld==nData && pWriter->iType==DL_DOCIDS) ); + nFirstNew = fts3PutVarint(c, iFirstDocid-pWriter->iPrevDocid); + + /* Verify that the incoming doclist is valid AND that it ends with + ** the expected docid. This is essential because we'll trust this + ** docid in future delta-encoding. + */ + ASSERT_VALID_DOCLIST(pWriter->iType, pData, nData, &iLastDocidDelta); + assert( iLastDocid==iFirstDocid-iDocid+iLastDocidDelta ); + + /* Append recoded initial docid and everything else. Rest of docids + ** should have been delta-encoded from previous initial docid. + */ + if( nFirstOld<nData ){ + dataBufferAppend2(pWriter->b, c, nFirstNew, + pData+nFirstOld, nData-nFirstOld); + }else{ + dataBufferAppend(pWriter->b, c, nFirstNew); + } + pWriter->iPrevDocid = iLastDocid; +} +static void dlwCopy(DLWriter *pWriter, DLReader *pReader){ + dlwAppend(pWriter, dlrDocData(pReader), dlrDocDataBytes(pReader), + dlrDocid(pReader), dlrDocid(pReader)); +} +static void dlwAdd(DLWriter *pWriter, sqlite_int64 iDocid){ + char c[VARINT_MAX]; + int n = fts3PutVarint(c, iDocid-pWriter->iPrevDocid); + + /* Docids must ascend. */ + assert( !pWriter->has_iPrevDocid || iDocid>pWriter->iPrevDocid ); + assert( pWriter->iType==DL_DOCIDS ); + + dataBufferAppend(pWriter->b, c, n); + pWriter->iPrevDocid = iDocid; +#ifndef NDEBUG + pWriter->has_iPrevDocid = 1; +#endif +} + +/*******************************************************************/ +/* PLReader is used to read data from a document's position list. As +** the caller steps through the list, data is cached so that varints +** only need to be decoded once. +** +** plrInit, plrDestroy - create/destroy a reader. +** plrColumn, plrPosition, plrStartOffset, plrEndOffset - accessors +** plrAtEnd - at end of stream, only call plrDestroy once true. +** plrStep - step to the next element. +*/ +typedef struct PLReader { + /* These refer to the next position's data. nData will reach 0 when + ** reading the last position, so plrStep() signals EOF by setting + ** pData to NULL. + */ + const char *pData; + int nData; + + DocListType iType; + int iColumn; /* the last column read */ + int iPosition; /* the last position read */ + int iStartOffset; /* the last start offset read */ + int iEndOffset; /* the last end offset read */ +} PLReader; + +static int plrAtEnd(PLReader *pReader){ + return pReader->pData==NULL; +} +static int plrColumn(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iColumn; +} +static int plrPosition(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iPosition; +} +static int plrStartOffset(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iStartOffset; +} +static int plrEndOffset(PLReader *pReader){ + assert( !plrAtEnd(pReader) ); + return pReader->iEndOffset; +} +static void plrStep(PLReader *pReader){ + int i, n; + + assert( !plrAtEnd(pReader) ); + + if( pReader->nData==0 ){ + pReader->pData = NULL; + return; + } + + n = fts3GetVarint32(pReader->pData, &i); + if( i==POS_COLUMN ){ + n += fts3GetVarint32(pReader->pData+n, &pReader->iColumn); + pReader->iPosition = 0; + pReader->iStartOffset = 0; + n += fts3GetVarint32(pReader->pData+n, &i); + } + /* Should never see adjacent column changes. */ + assert( i!=POS_COLUMN ); + + if( i==POS_END ){ + pReader->nData = 0; + pReader->pData = NULL; + return; + } + + pReader->iPosition += i-POS_BASE; + if( pReader->iType==DL_POSITIONS_OFFSETS ){ + n += fts3GetVarint32(pReader->pData+n, &i); + pReader->iStartOffset += i; + n += fts3GetVarint32(pReader->pData+n, &i); + pReader->iEndOffset = pReader->iStartOffset+i; + } + assert( n<=pReader->nData ); + pReader->pData += n; + pReader->nData -= n; +} + +static void plrInit(PLReader *pReader, DLReader *pDLReader){ + pReader->pData = dlrPosData(pDLReader); + pReader->nData = dlrPosDataLen(pDLReader); + pReader->iType = pDLReader->iType; + pReader->iColumn = 0; + pReader->iPosition = 0; + pReader->iStartOffset = 0; + pReader->iEndOffset = 0; + plrStep(pReader); +} +static void plrDestroy(PLReader *pReader){ + SCRAMBLE(pReader); +} + +/*******************************************************************/ +/* PLWriter is used in constructing a document's position list. As a +** convenience, if iType is DL_DOCIDS, PLWriter becomes a no-op. +** PLWriter writes to the associated DLWriter's buffer. +** +** plwInit - init for writing a document's poslist. +** plwDestroy - clear a writer. +** plwAdd - append position and offset information. +** plwCopy - copy next position's data from reader to writer. +** plwTerminate - add any necessary doclist terminator. +** +** Calling plwAdd() after plwTerminate() may result in a corrupt +** doclist. +*/ +/* TODO(shess) Until we've written the second item, we can cache the +** first item's information. Then we'd have three states: +** +** - initialized with docid, no positions. +** - docid and one position. +** - docid and multiple positions. +** +** Only the last state needs to actually write to dlw->b, which would +** be an improvement in the DLCollector case. +*/ +typedef struct PLWriter { + DLWriter *dlw; + + int iColumn; /* the last column written */ + int iPos; /* the last position written */ + int iOffset; /* the last start offset written */ +} PLWriter; + +/* TODO(shess) In the case where the parent is reading these values +** from a PLReader, we could optimize to a copy if that PLReader has +** the same type as pWriter. +*/ +static void plwAdd(PLWriter *pWriter, int iColumn, int iPos, + int iStartOffset, int iEndOffset){ + /* Worst-case space for POS_COLUMN, iColumn, iPosDelta, + ** iStartOffsetDelta, and iEndOffsetDelta. + */ + char c[5*VARINT_MAX]; + int n = 0; + + /* Ban plwAdd() after plwTerminate(). */ + assert( pWriter->iPos!=-1 ); + + if( pWriter->dlw->iType==DL_DOCIDS ) return; + + if( iColumn!=pWriter->iColumn ){ + n += fts3PutVarint(c+n, POS_COLUMN); + n += fts3PutVarint(c+n, iColumn); + pWriter->iColumn = iColumn; + pWriter->iPos = 0; + pWriter->iOffset = 0; + } + assert( iPos>=pWriter->iPos ); + n += fts3PutVarint(c+n, POS_BASE+(iPos-pWriter->iPos)); + pWriter->iPos = iPos; + if( pWriter->dlw->iType==DL_POSITIONS_OFFSETS ){ + assert( iStartOffset>=pWriter->iOffset ); + n += fts3PutVarint(c+n, iStartOffset-pWriter->iOffset); + pWriter->iOffset = iStartOffset; + assert( iEndOffset>=iStartOffset ); + n += fts3PutVarint(c+n, iEndOffset-iStartOffset); + } + dataBufferAppend(pWriter->dlw->b, c, n); +} +static void plwCopy(PLWriter *pWriter, PLReader *pReader){ + plwAdd(pWriter, plrColumn(pReader), plrPosition(pReader), + plrStartOffset(pReader), plrEndOffset(pReader)); +} +static void plwInit(PLWriter *pWriter, DLWriter *dlw, sqlite_int64 iDocid){ + char c[VARINT_MAX]; + int n; + + pWriter->dlw = dlw; + + /* Docids must ascend. */ + assert( !pWriter->dlw->has_iPrevDocid || iDocid>pWriter->dlw->iPrevDocid ); + n = fts3PutVarint(c, iDocid-pWriter->dlw->iPrevDocid); + dataBufferAppend(pWriter->dlw->b, c, n); + pWriter->dlw->iPrevDocid = iDocid; +#ifndef NDEBUG + pWriter->dlw->has_iPrevDocid = 1; +#endif + + pWriter->iColumn = 0; + pWriter->iPos = 0; + pWriter->iOffset = 0; +} +/* TODO(shess) Should plwDestroy() also terminate the doclist? But +** then plwDestroy() would no longer be just a destructor, it would +** also be doing work, which isn't consistent with the overall idiom. +** Another option would be for plwAdd() to always append any necessary +** terminator, so that the output is always correct. But that would +** add incremental work to the common case with the only benefit being +** API elegance. Punt for now. +*/ +static void plwTerminate(PLWriter *pWriter){ + if( pWriter->dlw->iType>DL_DOCIDS ){ + char c[VARINT_MAX]; + int n = fts3PutVarint(c, POS_END); + dataBufferAppend(pWriter->dlw->b, c, n); + } +#ifndef NDEBUG + /* Mark as terminated for assert in plwAdd(). */ + pWriter->iPos = -1; +#endif +} +static void plwDestroy(PLWriter *pWriter){ + SCRAMBLE(pWriter); +} + +/*******************************************************************/ +/* DLCollector wraps PLWriter and DLWriter to provide a +** dynamically-allocated doclist area to use during tokenization. +** +** dlcNew - malloc up and initialize a collector. +** dlcDelete - destroy a collector and all contained items. +** dlcAddPos - append position and offset information. +** dlcAddDoclist - add the collected doclist to the given buffer. +** dlcNext - terminate the current document and open another. +*/ +typedef struct DLCollector { + DataBuffer b; + DLWriter dlw; + PLWriter plw; +} DLCollector; + +/* TODO(shess) This could also be done by calling plwTerminate() and +** dataBufferAppend(). I tried that, expecting nominal performance +** differences, but it seemed to pretty reliably be worth 1% to code +** it this way. I suspect it is the incremental malloc overhead (some +** percentage of the plwTerminate() calls will cause a realloc), so +** this might be worth revisiting if the DataBuffer implementation +** changes. +*/ +static void dlcAddDoclist(DLCollector *pCollector, DataBuffer *b){ + if( pCollector->dlw.iType>DL_DOCIDS ){ + char c[VARINT_MAX]; + int n = fts3PutVarint(c, POS_END); + dataBufferAppend2(b, pCollector->b.pData, pCollector->b.nData, c, n); + }else{ + dataBufferAppend(b, pCollector->b.pData, pCollector->b.nData); + } +} +static void dlcNext(DLCollector *pCollector, sqlite_int64 iDocid){ + plwTerminate(&pCollector->plw); + plwDestroy(&pCollector->plw); + plwInit(&pCollector->plw, &pCollector->dlw, iDocid); +} +static void dlcAddPos(DLCollector *pCollector, int iColumn, int iPos, + int iStartOffset, int iEndOffset){ + plwAdd(&pCollector->plw, iColumn, iPos, iStartOffset, iEndOffset); +} + +static DLCollector *dlcNew(sqlite_int64 iDocid, DocListType iType){ + DLCollector *pCollector = sqlite3_malloc(sizeof(DLCollector)); + dataBufferInit(&pCollector->b, 0); + dlwInit(&pCollector->dlw, iType, &pCollector->b); + plwInit(&pCollector->plw, &pCollector->dlw, iDocid); + return pCollector; +} +static void dlcDelete(DLCollector *pCollector){ + plwDestroy(&pCollector->plw); + dlwDestroy(&pCollector->dlw); + dataBufferDestroy(&pCollector->b); + SCRAMBLE(pCollector); + sqlite3_free(pCollector); +} + + +/* Copy the doclist data of iType in pData/nData into *out, trimming +** unnecessary data as we go. Only columns matching iColumn are +** copied, all columns copied if iColumn is -1. Elements with no +** matching columns are dropped. The output is an iOutType doclist. +*/ +/* NOTE(shess) This code is only valid after all doclists are merged. +** If this is run before merges, then doclist items which represent +** deletion will be trimmed, and will thus not effect a deletion +** during the merge. +*/ +static void docListTrim(DocListType iType, const char *pData, int nData, + int iColumn, DocListType iOutType, DataBuffer *out){ + DLReader dlReader; + DLWriter dlWriter; + + assert( iOutType<=iType ); + + dlrInit(&dlReader, iType, pData, nData); + dlwInit(&dlWriter, iOutType, out); + + while( !dlrAtEnd(&dlReader) ){ + PLReader plReader; + PLWriter plWriter; + int match = 0; + + plrInit(&plReader, &dlReader); + + while( !plrAtEnd(&plReader) ){ + if( iColumn==-1 || plrColumn(&plReader)==iColumn ){ + if( !match ){ + plwInit(&plWriter, &dlWriter, dlrDocid(&dlReader)); + match = 1; + } + plwAdd(&plWriter, plrColumn(&plReader), plrPosition(&plReader), + plrStartOffset(&plReader), plrEndOffset(&plReader)); + } + plrStep(&plReader); + } + if( match ){ + plwTerminate(&plWriter); + plwDestroy(&plWriter); + } + + plrDestroy(&plReader); + dlrStep(&dlReader); + } + dlwDestroy(&dlWriter); + dlrDestroy(&dlReader); +} + +/* Used by docListMerge() to keep doclists in the ascending order by +** docid, then ascending order by age (so the newest comes first). +*/ +typedef struct OrderedDLReader { + DLReader *pReader; + + /* TODO(shess) If we assume that docListMerge pReaders is ordered by + ** age (which we do), then we could use pReader comparisons to break + ** ties. + */ + int idx; +} OrderedDLReader; + +/* Order eof to end, then by docid asc, idx desc. */ +static int orderedDLReaderCmp(OrderedDLReader *r1, OrderedDLReader *r2){ + if( dlrAtEnd(r1->pReader) ){ + if( dlrAtEnd(r2->pReader) ) return 0; /* Both atEnd(). */ + return 1; /* Only r1 atEnd(). */ + } + if( dlrAtEnd(r2->pReader) ) return -1; /* Only r2 atEnd(). */ + + if( dlrDocid(r1->pReader)<dlrDocid(r2->pReader) ) return -1; + if( dlrDocid(r1->pReader)>dlrDocid(r2->pReader) ) return 1; + + /* Descending on idx. */ + return r2->idx-r1->idx; +} + +/* Bubble p[0] to appropriate place in p[1..n-1]. Assumes that +** p[1..n-1] is already sorted. +*/ +/* TODO(shess) Is this frequent enough to warrant a binary search? +** Before implementing that, instrument the code to check. In most +** current usage, I expect that p[0] will be less than p[1] a very +** high proportion of the time. +*/ +static void orderedDLReaderReorder(OrderedDLReader *p, int n){ + while( n>1 && orderedDLReaderCmp(p, p+1)>0 ){ + OrderedDLReader tmp = p[0]; + p[0] = p[1]; + p[1] = tmp; + n--; + p++; + } +} + +/* Given an array of doclist readers, merge their doclist elements +** into out in sorted order (by docid), dropping elements from older +** readers when there is a duplicate docid. pReaders is assumed to be +** ordered by age, oldest first. +*/ +/* TODO(shess) nReaders must be <= MERGE_COUNT. This should probably +** be fixed. +*/ +static void docListMerge(DataBuffer *out, + DLReader *pReaders, int nReaders){ + OrderedDLReader readers[MERGE_COUNT]; + DLWriter writer; + int i, n; + const char *pStart = 0; + int nStart = 0; + sqlite_int64 iFirstDocid = 0, iLastDocid = 0; + + assert( nReaders>0 ); + if( nReaders==1 ){ + dataBufferAppend(out, dlrDocData(pReaders), dlrAllDataBytes(pReaders)); + return; + } + + assert( nReaders<=MERGE_COUNT ); + n = 0; + for(i=0; i<nReaders; i++){ + assert( pReaders[i].iType==pReaders[0].iType ); + readers[i].pReader = pReaders+i; + readers[i].idx = i; + n += dlrAllDataBytes(&pReaders[i]); + } + /* Conservatively size output to sum of inputs. Output should end + ** up strictly smaller than input. + */ + dataBufferExpand(out, n); + + /* Get the readers into sorted order. */ + while( i-->0 ){ + orderedDLReaderReorder(readers+i, nReaders-i); + } + + dlwInit(&writer, pReaders[0].iType, out); + while( !dlrAtEnd(readers[0].pReader) ){ + sqlite_int64 iDocid = dlrDocid(readers[0].pReader); + + /* If this is a continuation of the current buffer to copy, extend + ** that buffer. memcpy() seems to be more efficient if it has a + ** lots of data to copy. + */ + if( dlrDocData(readers[0].pReader)==pStart+nStart ){ + nStart += dlrDocDataBytes(readers[0].pReader); + }else{ + if( pStart!=0 ){ + dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); + } + pStart = dlrDocData(readers[0].pReader); + nStart = dlrDocDataBytes(readers[0].pReader); + iFirstDocid = iDocid; + } + iLastDocid = iDocid; + dlrStep(readers[0].pReader); + + /* Drop all of the older elements with the same docid. */ + for(i=1; i<nReaders && + !dlrAtEnd(readers[i].pReader) && + dlrDocid(readers[i].pReader)==iDocid; i++){ + dlrStep(readers[i].pReader); + } + + /* Get the readers back into order. */ + while( i-->0 ){ + orderedDLReaderReorder(readers+i, nReaders-i); + } + } + + /* Copy over any remaining elements. */ + if( nStart>0 ) dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); + dlwDestroy(&writer); +} + +/* Helper function for posListUnion(). Compares the current position +** between left and right, returning as standard C idiom of <0 if +** left<right, >0 if left>right, and 0 if left==right. "End" always +** compares greater. +*/ +static int posListCmp(PLReader *pLeft, PLReader *pRight){ + assert( pLeft->iType==pRight->iType ); + if( pLeft->iType==DL_DOCIDS ) return 0; + + if( plrAtEnd(pLeft) ) return plrAtEnd(pRight) ? 0 : 1; + if( plrAtEnd(pRight) ) return -1; + + if( plrColumn(pLeft)<plrColumn(pRight) ) return -1; + if( plrColumn(pLeft)>plrColumn(pRight) ) return 1; + + if( plrPosition(pLeft)<plrPosition(pRight) ) return -1; + if( plrPosition(pLeft)>plrPosition(pRight) ) return 1; + if( pLeft->iType==DL_POSITIONS ) return 0; + + if( plrStartOffset(pLeft)<plrStartOffset(pRight) ) return -1; + if( plrStartOffset(pLeft)>plrStartOffset(pRight) ) return 1; + + if( plrEndOffset(pLeft)<plrEndOffset(pRight) ) return -1; + if( plrEndOffset(pLeft)>plrEndOffset(pRight) ) return 1; + + return 0; +} + +/* Write the union of position lists in pLeft and pRight to pOut. +** "Union" in this case meaning "All unique position tuples". Should +** work with any doclist type, though both inputs and the output +** should be the same type. +*/ +static void posListUnion(DLReader *pLeft, DLReader *pRight, DLWriter *pOut){ + PLReader left, right; + PLWriter writer; + + assert( dlrDocid(pLeft)==dlrDocid(pRight) ); + assert( pLeft->iType==pRight->iType ); + assert( pLeft->iType==pOut->iType ); + + plrInit(&left, pLeft); + plrInit(&right, pRight); + plwInit(&writer, pOut, dlrDocid(pLeft)); + + while( !plrAtEnd(&left) || !plrAtEnd(&right) ){ + int c = posListCmp(&left, &right); + if( c<0 ){ + plwCopy(&writer, &left); + plrStep(&left); + }else if( c>0 ){ + plwCopy(&writer, &right); + plrStep(&right); + }else{ + plwCopy(&writer, &left); + plrStep(&left); + plrStep(&right); + } + } + + plwTerminate(&writer); + plwDestroy(&writer); + plrDestroy(&left); + plrDestroy(&right); +} + +/* Write the union of doclists in pLeft and pRight to pOut. For +** docids in common between the inputs, the union of the position +** lists is written. Inputs and outputs are always type DL_DEFAULT. +*/ +static void docListUnion( + const char *pLeft, int nLeft, + const char *pRight, int nRight, + DataBuffer *pOut /* Write the combined doclist here */ +){ + DLReader left, right; + DLWriter writer; + + if( nLeft==0 ){ + if( nRight!=0) dataBufferAppend(pOut, pRight, nRight); + return; + } + if( nRight==0 ){ + dataBufferAppend(pOut, pLeft, nLeft); + return; + } + + dlrInit(&left, DL_DEFAULT, pLeft, nLeft); + dlrInit(&right, DL_DEFAULT, pRight, nRight); + dlwInit(&writer, DL_DEFAULT, pOut); + + while( !dlrAtEnd(&left) || !dlrAtEnd(&right) ){ + if( dlrAtEnd(&right) ){ + dlwCopy(&writer, &left); + dlrStep(&left); + }else if( dlrAtEnd(&left) ){ + dlwCopy(&writer, &right); + dlrStep(&right); + }else if( dlrDocid(&left)<dlrDocid(&right) ){ + dlwCopy(&writer, &left); + dlrStep(&left); + }else if( dlrDocid(&left)>dlrDocid(&right) ){ + dlwCopy(&writer, &right); + dlrStep(&right); + }else{ + posListUnion(&left, &right, &writer); + dlrStep(&left); + dlrStep(&right); + } + } + + dlrDestroy(&left); + dlrDestroy(&right); + dlwDestroy(&writer); +} + +/* +** This function is used as part of the implementation of phrase and +** NEAR matching. +** +** pLeft and pRight are DLReaders positioned to the same docid in +** lists of type DL_POSITION. This function writes an entry to the +** DLWriter pOut for each position in pRight that is less than +** (nNear+1) greater (but not equal to or smaller) than a position +** in pLeft. For example, if nNear is 0, and the positions contained +** by pLeft and pRight are: +** +** pLeft: 5 10 15 20 +** pRight: 6 9 17 21 +** +** then the docid is added to pOut. If pOut is of type DL_POSITIONS, +** then a positionids "6" and "21" are also added to pOut. +** +** If boolean argument isSaveLeft is true, then positionids are copied +** from pLeft instead of pRight. In the example above, the positions "5" +** and "20" would be added instead of "6" and "21". +*/ +static void posListPhraseMerge( + DLReader *pLeft, + DLReader *pRight, + int nNear, + int isSaveLeft, + DLWriter *pOut +){ + PLReader left, right; + PLWriter writer; + int match = 0; + + assert( dlrDocid(pLeft)==dlrDocid(pRight) ); + assert( pOut->iType!=DL_POSITIONS_OFFSETS ); + + plrInit(&left, pLeft); + plrInit(&right, pRight); + + while( !plrAtEnd(&left) && !plrAtEnd(&right) ){ + if( plrColumn(&left)<plrColumn(&right) ){ + plrStep(&left); + }else if( plrColumn(&left)>plrColumn(&right) ){ + plrStep(&right); + }else if( plrPosition(&left)>=plrPosition(&right) ){ + plrStep(&right); + }else{ + if( (plrPosition(&right)-plrPosition(&left))<=(nNear+1) ){ + if( !match ){ + plwInit(&writer, pOut, dlrDocid(pLeft)); + match = 1; + } + if( !isSaveLeft ){ + plwAdd(&writer, plrColumn(&right), plrPosition(&right), 0, 0); + }else{ + plwAdd(&writer, plrColumn(&left), plrPosition(&left), 0, 0); + } + plrStep(&right); + }else{ + plrStep(&left); + } + } + } + + if( match ){ + plwTerminate(&writer); + plwDestroy(&writer); + } + + plrDestroy(&left); + plrDestroy(&right); +} + +/* +** Compare the values pointed to by the PLReaders passed as arguments. +** Return -1 if the value pointed to by pLeft is considered less than +** the value pointed to by pRight, +1 if it is considered greater +** than it, or 0 if it is equal. i.e. +** +** (*pLeft - *pRight) +** +** A PLReader that is in the EOF condition is considered greater than +** any other. If neither argument is in EOF state, the return value of +** plrColumn() is used. If the plrColumn() values are equal, the +** comparison is on the basis of plrPosition(). +*/ +static int plrCompare(PLReader *pLeft, PLReader *pRight){ + assert(!plrAtEnd(pLeft) || !plrAtEnd(pRight)); + + if( plrAtEnd(pRight) || plrAtEnd(pLeft) ){ + return (plrAtEnd(pRight) ? -1 : 1); + } + if( plrColumn(pLeft)!=plrColumn(pRight) ){ + return ((plrColumn(pLeft)<plrColumn(pRight)) ? -1 : 1); + } + if( plrPosition(pLeft)!=plrPosition(pRight) ){ + return ((plrPosition(pLeft)<plrPosition(pRight)) ? -1 : 1); + } + return 0; +} + +/* We have two doclists with positions: pLeft and pRight. Depending +** on the value of the nNear parameter, perform either a phrase +** intersection (if nNear==0) or a NEAR intersection (if nNear>0) +** and write the results into pOut. +** +** A phrase intersection means that two documents only match +** if pLeft.iPos+1==pRight.iPos. +** +** A NEAR intersection means that two documents only match if +** (abs(pLeft.iPos-pRight.iPos)<nNear). +** +** If a NEAR intersection is requested, then the nPhrase argument should +** be passed the number of tokens in the two operands to the NEAR operator +** combined. For example: +** +** Query syntax nPhrase +** ------------------------------------ +** "A B C" NEAR "D E" 5 +** A NEAR B 2 +** +** iType controls the type of data written to pOut. If iType is +** DL_POSITIONS, the positions are those from pRight. +*/ +static void docListPhraseMerge( + const char *pLeft, int nLeft, + const char *pRight, int nRight, + int nNear, /* 0 for a phrase merge, non-zero for a NEAR merge */ + int nPhrase, /* Number of tokens in left+right operands to NEAR */ + DocListType iType, /* Type of doclist to write to pOut */ + DataBuffer *pOut /* Write the combined doclist here */ +){ + DLReader left, right; + DLWriter writer; + + if( nLeft==0 || nRight==0 ) return; + + assert( iType!=DL_POSITIONS_OFFSETS ); + + dlrInit(&left, DL_POSITIONS, pLeft, nLeft); + dlrInit(&right, DL_POSITIONS, pRight, nRight); + dlwInit(&writer, iType, pOut); + + while( !dlrAtEnd(&left) && !dlrAtEnd(&right) ){ + if( dlrDocid(&left)<dlrDocid(&right) ){ + dlrStep(&left); + }else if( dlrDocid(&right)<dlrDocid(&left) ){ + dlrStep(&right); + }else{ + if( nNear==0 ){ + posListPhraseMerge(&left, &right, 0, 0, &writer); + }else{ + /* This case occurs when two terms (simple terms or phrases) are + * connected by a NEAR operator, span (nNear+1). i.e. + * + * '"terrible company" NEAR widget' + */ + DataBuffer one = {0, 0, 0}; + DataBuffer two = {0, 0, 0}; + + DLWriter dlwriter2; + DLReader dr1 = {0, 0, 0, 0, 0}; + DLReader dr2 = {0, 0, 0, 0, 0}; + + dlwInit(&dlwriter2, iType, &one); + posListPhraseMerge(&right, &left, nNear-3+nPhrase, 1, &dlwriter2); + dlwInit(&dlwriter2, iType, &two); + posListPhraseMerge(&left, &right, nNear-1, 0, &dlwriter2); + + if( one.nData) dlrInit(&dr1, iType, one.pData, one.nData); + if( two.nData) dlrInit(&dr2, iType, two.pData, two.nData); + + if( !dlrAtEnd(&dr1) || !dlrAtEnd(&dr2) ){ + PLReader pr1 = {0}; + PLReader pr2 = {0}; + + PLWriter plwriter; + plwInit(&plwriter, &writer, dlrDocid(dlrAtEnd(&dr1)?&dr2:&dr1)); + + if( one.nData ) plrInit(&pr1, &dr1); + if( two.nData ) plrInit(&pr2, &dr2); + while( !plrAtEnd(&pr1) || !plrAtEnd(&pr2) ){ + int iCompare = plrCompare(&pr1, &pr2); + switch( iCompare ){ + case -1: + plwCopy(&plwriter, &pr1); + plrStep(&pr1); + break; + case 1: + plwCopy(&plwriter, &pr2); + plrStep(&pr2); + break; + case 0: + plwCopy(&plwriter, &pr1); + plrStep(&pr1); + plrStep(&pr2); + break; + } + } + plwTerminate(&plwriter); + } + dataBufferDestroy(&one); + dataBufferDestroy(&two); + } + dlrStep(&left); + dlrStep(&right); + } + } + + dlrDestroy(&left); + dlrDestroy(&right); + dlwDestroy(&writer); +} + +/* We have two DL_DOCIDS doclists: pLeft and pRight. +** Write the intersection of these two doclists into pOut as a +** DL_DOCIDS doclist. +*/ +static void docListAndMerge( + const char *pLeft, int nLeft, + const char *pRight, int nRight, + DataBuffer *pOut /* Write the combined doclist here */ +){ + DLReader left, right; + DLWriter writer; + + if( nLeft==0 || nRight==0 ) return; + + dlrInit(&left, DL_DOCIDS, pLeft, nLeft); + dlrInit(&right, DL_DOCIDS, pRight, nRight); + dlwInit(&writer, DL_DOCIDS, pOut); + + while( !dlrAtEnd(&left) && !dlrAtEnd(&right) ){ + if( dlrDocid(&left)<dlrDocid(&right) ){ + dlrStep(&left); + }else if( dlrDocid(&right)<dlrDocid(&left) ){ + dlrStep(&right); + }else{ + dlwAdd(&writer, dlrDocid(&left)); + dlrStep(&left); + dlrStep(&right); + } + } + + dlrDestroy(&left); + dlrDestroy(&right); + dlwDestroy(&writer); +} + +/* We have two DL_DOCIDS doclists: pLeft and pRight. +** Write the union of these two doclists into pOut as a +** DL_DOCIDS doclist. +*/ +static void docListOrMerge( + const char *pLeft, int nLeft, + const char *pRight, int nRight, + DataBuffer *pOut /* Write the combined doclist here */ +){ + DLReader left, right; + DLWriter writer; + + if( nLeft==0 ){ + if( nRight!=0 ) dataBufferAppend(pOut, pRight, nRight); + return; + } + if( nRight==0 ){ + dataBufferAppend(pOut, pLeft, nLeft); + return; + } + + dlrInit(&left, DL_DOCIDS, pLeft, nLeft); + dlrInit(&right, DL_DOCIDS, pRight, nRight); + dlwInit(&writer, DL_DOCIDS, pOut); + + while( !dlrAtEnd(&left) || !dlrAtEnd(&right) ){ + if( dlrAtEnd(&right) ){ + dlwAdd(&writer, dlrDocid(&left)); + dlrStep(&left); + }else if( dlrAtEnd(&left) ){ + dlwAdd(&writer, dlrDocid(&right)); + dlrStep(&right); + }else if( dlrDocid(&left)<dlrDocid(&right) ){ + dlwAdd(&writer, dlrDocid(&left)); + dlrStep(&left); + }else if( dlrDocid(&right)<dlrDocid(&left) ){ + dlwAdd(&writer, dlrDocid(&right)); + dlrStep(&right); + }else{ + dlwAdd(&writer, dlrDocid(&left)); + dlrStep(&left); + dlrStep(&right); + } + } + + dlrDestroy(&left); + dlrDestroy(&right); + dlwDestroy(&writer); +} + +/* We have two DL_DOCIDS doclists: pLeft and pRight. +** Write into pOut as DL_DOCIDS doclist containing all documents that +** occur in pLeft but not in pRight. +*/ +static void docListExceptMerge( + const char *pLeft, int nLeft, + const char *pRight, int nRight, + DataBuffer *pOut /* Write the combined doclist here */ +){ + DLReader left, right; + DLWriter writer; + + if( nLeft==0 ) return; + if( nRight==0 ){ + dataBufferAppend(pOut, pLeft, nLeft); + return; + } + + dlrInit(&left, DL_DOCIDS, pLeft, nLeft); + dlrInit(&right, DL_DOCIDS, pRight, nRight); + dlwInit(&writer, DL_DOCIDS, pOut); + + while( !dlrAtEnd(&left) ){ + while( !dlrAtEnd(&right) && dlrDocid(&right)<dlrDocid(&left) ){ + dlrStep(&right); + } + if( dlrAtEnd(&right) || dlrDocid(&left)<dlrDocid(&right) ){ + dlwAdd(&writer, dlrDocid(&left)); + } + dlrStep(&left); + } + + dlrDestroy(&left); + dlrDestroy(&right); + dlwDestroy(&writer); +} + +static char *string_dup_n(const char *s, int n){ + char *str = sqlite3_malloc(n + 1); + memcpy(str, s, n); + str[n] = '\0'; + return str; +} + +/* Duplicate a string; the caller must free() the returned string. + * (We don't use strdup() since it is not part of the standard C library and + * may not be available everywhere.) */ +static char *string_dup(const char *s){ + return string_dup_n(s, strlen(s)); +} + +/* Format a string, replacing each occurrence of the % character with + * zDb.zName. This may be more convenient than sqlite_mprintf() + * when one string is used repeatedly in a format string. + * The caller must free() the returned string. */ +static char *string_format(const char *zFormat, + const char *zDb, const char *zName){ + const char *p; + size_t len = 0; + size_t nDb = strlen(zDb); + size_t nName = strlen(zName); + size_t nFullTableName = nDb+1+nName; + char *result; + char *r; + + /* first compute length needed */ + for(p = zFormat ; *p ; ++p){ + len += (*p=='%' ? nFullTableName : 1); + } + len += 1; /* for null terminator */ + + r = result = sqlite3_malloc(len); + for(p = zFormat; *p; ++p){ + if( *p=='%' ){ + memcpy(r, zDb, nDb); + r += nDb; + *r++ = '.'; + memcpy(r, zName, nName); + r += nName; + } else { + *r++ = *p; + } + } + *r++ = '\0'; + assert( r == result + len ); + return result; +} + +static int sql_exec(sqlite3 *db, const char *zDb, const char *zName, + const char *zFormat){ + char *zCommand = string_format(zFormat, zDb, zName); + int rc; + FTSTRACE(("FTS3 sql: %s\n", zCommand)); + rc = sqlite3_exec(db, zCommand, NULL, 0, NULL); + sqlite3_free(zCommand); + return rc; +} + +static int sql_prepare(sqlite3 *db, const char *zDb, const char *zName, + sqlite3_stmt **ppStmt, const char *zFormat){ + char *zCommand = string_format(zFormat, zDb, zName); + int rc; + FTSTRACE(("FTS3 prepare: %s\n", zCommand)); + rc = sqlite3_prepare_v2(db, zCommand, -1, ppStmt, NULL); + sqlite3_free(zCommand); + return rc; +} + +/* end utility functions */ + +/* Forward reference */ +typedef struct fulltext_vtab fulltext_vtab; + +/* A single term in a query is represented by an instances of +** the following structure. Each word which may match against +** document content is a term. Operators, like NEAR or OR, are +** not terms. Query terms are organized as a flat list stored +** in the Query.pTerms array. +** +** If the QueryTerm.nPhrase variable is non-zero, then the QueryTerm +** is the first in a contiguous string of terms that are either part +** of the same phrase, or connected by the NEAR operator. +** +** If the QueryTerm.nNear variable is non-zero, then the token is followed +** by a NEAR operator with span set to (nNear-1). For example, the +** following query: +** +** The QueryTerm.iPhrase variable stores the index of the token within +** its phrase, indexed starting at 1, or 1 if the token is not part +** of any phrase. +** +** For example, the data structure used to represent the following query: +** +** ... MATCH 'sqlite NEAR/5 google NEAR/2 "search engine"' +** +** is: +** +** {nPhrase=4, iPhrase=1, nNear=6, pTerm="sqlite"}, +** {nPhrase=0, iPhrase=1, nNear=3, pTerm="google"}, +** {nPhrase=0, iPhrase=1, nNear=0, pTerm="search"}, +** {nPhrase=0, iPhrase=2, nNear=0, pTerm="engine"}, +** +** compiling the FTS3 syntax to Query structures is done by the parseQuery() +** function. +*/ +typedef struct QueryTerm { + short int nPhrase; /* How many following terms are part of the same phrase */ + short int iPhrase; /* This is the i-th term of a phrase. */ + short int iColumn; /* Column of the index that must match this term */ + signed char nNear; /* term followed by a NEAR operator with span=(nNear-1) */ + signed char isOr; /* this term is preceded by "OR" */ + signed char isNot; /* this term is preceded by "-" */ + signed char isPrefix; /* this term is followed by "*" */ + char *pTerm; /* text of the term. '\000' terminated. malloced */ + int nTerm; /* Number of bytes in pTerm[] */ +} QueryTerm; + + +/* A query string is parsed into a Query structure. + * + * We could, in theory, allow query strings to be complicated + * nested expressions with precedence determined by parentheses. + * But none of the major search engines do this. (Perhaps the + * feeling is that an parenthesized expression is two complex of + * an idea for the average user to grasp.) Taking our lead from + * the major search engines, we will allow queries to be a list + * of terms (with an implied AND operator) or phrases in double-quotes, + * with a single optional "-" before each non-phrase term to designate + * negation and an optional OR connector. + * + * OR binds more tightly than the implied AND, which is what the + * major search engines seem to do. So, for example: + * + * [one two OR three] ==> one AND (two OR three) + * [one OR two three] ==> (one OR two) AND three + * + * A "-" before a term matches all entries that lack that term. + * The "-" must occur immediately before the term with in intervening + * space. This is how the search engines do it. + * + * A NOT term cannot be the right-hand operand of an OR. If this + * occurs in the query string, the NOT is ignored: + * + * [one OR -two] ==> one OR two + * + */ +typedef struct Query { + fulltext_vtab *pFts; /* The full text index */ + int nTerms; /* Number of terms in the query */ + QueryTerm *pTerms; /* Array of terms. Space obtained from malloc() */ + int nextIsOr; /* Set the isOr flag on the next inserted term */ + int nextIsNear; /* Set the isOr flag on the next inserted term */ + int nextColumn; /* Next word parsed must be in this column */ + int dfltColumn; /* The default column */ +} Query; + + +/* +** An instance of the following structure keeps track of generated +** matching-word offset information and snippets. +*/ +typedef struct Snippet { + int nMatch; /* Total number of matches */ + int nAlloc; /* Space allocated for aMatch[] */ + struct snippetMatch { /* One entry for each matching term */ + char snStatus; /* Status flag for use while constructing snippets */ + short int iCol; /* The column that contains the match */ + short int iTerm; /* The index in Query.pTerms[] of the matching term */ + int iToken; /* The index of the matching document token */ + short int nByte; /* Number of bytes in the term */ + int iStart; /* The offset to the first character of the term */ + } *aMatch; /* Points to space obtained from malloc */ + char *zOffset; /* Text rendering of aMatch[] */ + int nOffset; /* strlen(zOffset) */ + char *zSnippet; /* Snippet text */ + int nSnippet; /* strlen(zSnippet) */ +} Snippet; + + +typedef enum QueryType { + QUERY_GENERIC, /* table scan */ + QUERY_DOCID, /* lookup by docid */ + QUERY_FULLTEXT /* QUERY_FULLTEXT + [i] is a full-text search for column i*/ +} QueryType; + +typedef enum fulltext_statement { + CONTENT_INSERT_STMT, + CONTENT_SELECT_STMT, + CONTENT_UPDATE_STMT, + CONTENT_DELETE_STMT, + CONTENT_EXISTS_STMT, + + BLOCK_INSERT_STMT, + BLOCK_SELECT_STMT, + BLOCK_DELETE_STMT, + BLOCK_DELETE_ALL_STMT, + + SEGDIR_MAX_INDEX_STMT, + SEGDIR_SET_STMT, + SEGDIR_SELECT_LEVEL_STMT, + SEGDIR_SPAN_STMT, + SEGDIR_DELETE_STMT, + SEGDIR_SELECT_SEGMENT_STMT, + SEGDIR_SELECT_ALL_STMT, + SEGDIR_DELETE_ALL_STMT, + SEGDIR_COUNT_STMT, + + MAX_STMT /* Always at end! */ +} fulltext_statement; + +/* These must exactly match the enum above. */ +/* TODO(shess): Is there some risk that a statement will be used in two +** cursors at once, e.g. if a query joins a virtual table to itself? +** If so perhaps we should move some of these to the cursor object. +*/ +static const char *const fulltext_zStatement[MAX_STMT] = { + /* CONTENT_INSERT */ NULL, /* generated in contentInsertStatement() */ + /* CONTENT_SELECT */ NULL, /* generated in contentSelectStatement() */ + /* CONTENT_UPDATE */ NULL, /* generated in contentUpdateStatement() */ + /* CONTENT_DELETE */ "delete from %_content where docid = ?", + /* CONTENT_EXISTS */ "select docid from %_content limit 1", + + /* BLOCK_INSERT */ + "insert into %_segments (blockid, block) values (null, ?)", + /* BLOCK_SELECT */ "select block from %_segments where blockid = ?", + /* BLOCK_DELETE */ "delete from %_segments where blockid between ? and ?", + /* BLOCK_DELETE_ALL */ "delete from %_segments", + + /* SEGDIR_MAX_INDEX */ "select max(idx) from %_segdir where level = ?", + /* SEGDIR_SET */ "insert into %_segdir values (?, ?, ?, ?, ?, ?)", + /* SEGDIR_SELECT_LEVEL */ + "select start_block, leaves_end_block, root from %_segdir " + " where level = ? order by idx", + /* SEGDIR_SPAN */ + "select min(start_block), max(end_block) from %_segdir " + " where level = ? and start_block <> 0", + /* SEGDIR_DELETE */ "delete from %_segdir where level = ?", + + /* NOTE(shess): The first three results of the following two + ** statements must match. + */ + /* SEGDIR_SELECT_SEGMENT */ + "select start_block, leaves_end_block, root from %_segdir " + " where level = ? and idx = ?", + /* SEGDIR_SELECT_ALL */ + "select start_block, leaves_end_block, root from %_segdir " + " order by level desc, idx asc", + /* SEGDIR_DELETE_ALL */ "delete from %_segdir", + /* SEGDIR_COUNT */ "select count(*), ifnull(max(level),0) from %_segdir", +}; + +/* +** A connection to a fulltext index is an instance of the following +** structure. The xCreate and xConnect methods create an instance +** of this structure and xDestroy and xDisconnect free that instance. +** All other methods receive a pointer to the structure as one of their +** arguments. +*/ +struct fulltext_vtab { + sqlite3_vtab base; /* Base class used by SQLite core */ + sqlite3 *db; /* The database connection */ + const char *zDb; /* logical database name */ + const char *zName; /* virtual table name */ + int nColumn; /* number of columns in virtual table */ + char **azColumn; /* column names. malloced */ + char **azContentColumn; /* column names in content table; malloced */ + sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ + + /* Precompiled statements which we keep as long as the table is + ** open. + */ + sqlite3_stmt *pFulltextStatements[MAX_STMT]; + + /* Precompiled statements used for segment merges. We run a + ** separate select across the leaf level of each tree being merged. + */ + sqlite3_stmt *pLeafSelectStmts[MERGE_COUNT]; + /* The statement used to prepare pLeafSelectStmts. */ +#define LEAF_SELECT \ + "select block from %_segments where blockid between ? and ? order by blockid" + + /* These buffer pending index updates during transactions. + ** nPendingData estimates the memory size of the pending data. It + ** doesn't include the hash-bucket overhead, nor any malloc + ** overhead. When nPendingData exceeds kPendingThreshold, the + ** buffer is flushed even before the transaction closes. + ** pendingTerms stores the data, and is only valid when nPendingData + ** is >=0 (nPendingData<0 means pendingTerms has not been + ** initialized). iPrevDocid is the last docid written, used to make + ** certain we're inserting in sorted order. + */ + int nPendingData; +#define kPendingThreshold (1*1024*1024) + sqlite_int64 iPrevDocid; + fts3Hash pendingTerms; +}; + +/* +** When the core wants to do a query, it create a cursor using a +** call to xOpen. This structure is an instance of a cursor. It +** is destroyed by xClose. +*/ +typedef struct fulltext_cursor { + sqlite3_vtab_cursor base; /* Base class used by SQLite core */ + QueryType iCursorType; /* Copy of sqlite3_index_info.idxNum */ + sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ + int eof; /* True if at End Of Results */ + Query q; /* Parsed query string */ + Snippet snippet; /* Cached snippet for the current row */ + int iColumn; /* Column being searched */ + DataBuffer result; /* Doclist results from fulltextQuery */ + DLReader reader; /* Result reader if result not empty */ +} fulltext_cursor; + +static struct fulltext_vtab *cursor_vtab(fulltext_cursor *c){ + return (fulltext_vtab *) c->base.pVtab; +} + +static const sqlite3_module fts3Module; /* forward declaration */ + +/* Return a dynamically generated statement of the form + * insert into %_content (docid, ...) values (?, ...) + */ +static const char *contentInsertStatement(fulltext_vtab *v){ + StringBuffer sb; + int i; + + initStringBuffer(&sb); + append(&sb, "insert into %_content (docid, "); + appendList(&sb, v->nColumn, v->azContentColumn); + append(&sb, ") values (?"); + for(i=0; i<v->nColumn; ++i) + append(&sb, ", ?"); + append(&sb, ")"); + return stringBufferData(&sb); +} + +/* Return a dynamically generated statement of the form + * select <content columns> from %_content where docid = ? + */ +static const char *contentSelectStatement(fulltext_vtab *v){ + StringBuffer sb; + initStringBuffer(&sb); + append(&sb, "SELECT "); + appendList(&sb, v->nColumn, v->azContentColumn); + append(&sb, " FROM %_content WHERE docid = ?"); + return stringBufferData(&sb); +} + +/* Return a dynamically generated statement of the form + * update %_content set [col_0] = ?, [col_1] = ?, ... + * where docid = ? + */ +static const char *contentUpdateStatement(fulltext_vtab *v){ + StringBuffer sb; + int i; + + initStringBuffer(&sb); + append(&sb, "update %_content set "); + for(i=0; i<v->nColumn; ++i) { + if( i>0 ){ + append(&sb, ", "); + } + append(&sb, v->azContentColumn[i]); + append(&sb, " = ?"); + } + append(&sb, " where docid = ?"); + return stringBufferData(&sb); +} + +/* Puts a freshly-prepared statement determined by iStmt in *ppStmt. +** If the indicated statement has never been prepared, it is prepared +** and cached, otherwise the cached version is reset. +*/ +static int sql_get_statement(fulltext_vtab *v, fulltext_statement iStmt, + sqlite3_stmt **ppStmt){ + assert( iStmt<MAX_STMT ); + if( v->pFulltextStatements[iStmt]==NULL ){ + const char *zStmt; + int rc; + switch( iStmt ){ + case CONTENT_INSERT_STMT: + zStmt = contentInsertStatement(v); break; + case CONTENT_SELECT_STMT: + zStmt = contentSelectStatement(v); break; + case CONTENT_UPDATE_STMT: + zStmt = contentUpdateStatement(v); break; + default: + zStmt = fulltext_zStatement[iStmt]; + } + rc = sql_prepare(v->db, v->zDb, v->zName, &v->pFulltextStatements[iStmt], + zStmt); + if( zStmt != fulltext_zStatement[iStmt]) sqlite3_free((void *) zStmt); + if( rc!=SQLITE_OK ) return rc; + } else { + int rc = sqlite3_reset(v->pFulltextStatements[iStmt]); + if( rc!=SQLITE_OK ) return rc; + } + + *ppStmt = v->pFulltextStatements[iStmt]; + return SQLITE_OK; +} + +/* Like sqlite3_step(), but convert SQLITE_DONE to SQLITE_OK and +** SQLITE_ROW to SQLITE_ERROR. Useful for statements like UPDATE, +** where we expect no results. +*/ +static int sql_single_step(sqlite3_stmt *s){ + int rc = sqlite3_step(s); + return (rc==SQLITE_DONE) ? SQLITE_OK : rc; +} + +/* Like sql_get_statement(), but for special replicated LEAF_SELECT +** statements. idx -1 is a special case for an uncached version of +** the statement (used in the optimize implementation). +*/ +/* TODO(shess) Write version for generic statements and then share +** that between the cached-statement functions. +*/ +static int sql_get_leaf_statement(fulltext_vtab *v, int idx, + sqlite3_stmt **ppStmt){ + assert( idx>=-1 && idx<MERGE_COUNT ); + if( idx==-1 ){ + return sql_prepare(v->db, v->zDb, v->zName, ppStmt, LEAF_SELECT); + }else if( v->pLeafSelectStmts[idx]==NULL ){ + int rc = sql_prepare(v->db, v->zDb, v->zName, &v->pLeafSelectStmts[idx], + LEAF_SELECT); + if( rc!=SQLITE_OK ) return rc; + }else{ + int rc = sqlite3_reset(v->pLeafSelectStmts[idx]); + if( rc!=SQLITE_OK ) return rc; + } + + *ppStmt = v->pLeafSelectStmts[idx]; + return SQLITE_OK; +} + +/* insert into %_content (docid, ...) values ([docid], [pValues]) +** If the docid contains SQL NULL, then a unique docid will be +** generated. +*/ +static int content_insert(fulltext_vtab *v, sqlite3_value *docid, + sqlite3_value **pValues){ + sqlite3_stmt *s; + int i; + int rc = sql_get_statement(v, CONTENT_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_value(s, 1, docid); + if( rc!=SQLITE_OK ) return rc; + + for(i=0; i<v->nColumn; ++i){ + rc = sqlite3_bind_value(s, 2+i, pValues[i]); + if( rc!=SQLITE_OK ) return rc; + } + + return sql_single_step(s); +} + +/* update %_content set col0 = pValues[0], col1 = pValues[1], ... + * where docid = [iDocid] */ +static int content_update(fulltext_vtab *v, sqlite3_value **pValues, + sqlite_int64 iDocid){ + sqlite3_stmt *s; + int i; + int rc = sql_get_statement(v, CONTENT_UPDATE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + for(i=0; i<v->nColumn; ++i){ + rc = sqlite3_bind_value(s, 1+i, pValues[i]); + if( rc!=SQLITE_OK ) return rc; + } + + rc = sqlite3_bind_int64(s, 1+v->nColumn, iDocid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +static void freeStringArray(int nString, const char **pString){ + int i; + + for (i=0 ; i < nString ; ++i) { + if( pString[i]!=NULL ) sqlite3_free((void *) pString[i]); + } + sqlite3_free((void *) pString); +} + +/* select * from %_content where docid = [iDocid] + * The caller must delete the returned array and all strings in it. + * null fields will be NULL in the returned array. + * + * TODO: Perhaps we should return pointer/length strings here for consistency + * with other code which uses pointer/length. */ +static int content_select(fulltext_vtab *v, sqlite_int64 iDocid, + const char ***pValues){ + sqlite3_stmt *s; + const char **values; + int i; + int rc; + + *pValues = NULL; + + rc = sql_get_statement(v, CONTENT_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iDocid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc!=SQLITE_ROW ) return rc; + + values = (const char **) sqlite3_malloc(v->nColumn * sizeof(const char *)); + for(i=0; i<v->nColumn; ++i){ + if( sqlite3_column_type(s, i)==SQLITE_NULL ){ + values[i] = NULL; + }else{ + values[i] = string_dup((char*)sqlite3_column_text(s, i)); + } + } + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ){ + *pValues = values; + return SQLITE_OK; + } + + freeStringArray(v->nColumn, values); + return rc; +} + +/* delete from %_content where docid = [iDocid ] */ +static int content_delete(fulltext_vtab *v, sqlite_int64 iDocid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, CONTENT_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iDocid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Returns SQLITE_ROW if any rows exist in %_content, SQLITE_DONE if +** no rows exist, and any error in case of failure. +*/ +static int content_exists(fulltext_vtab *v){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, CONTENT_EXISTS_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc!=SQLITE_ROW ) return rc; + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_ROW; + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + return rc; +} + +/* insert into %_segments values ([pData]) +** returns assigned blockid in *piBlockid +*/ +static int block_insert(fulltext_vtab *v, const char *pData, int nData, + sqlite_int64 *piBlockid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, BLOCK_INSERT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 1, pData, nData, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + + /* blockid column is an alias for rowid. */ + *piBlockid = sqlite3_last_insert_rowid(v->db); + return SQLITE_OK; +} + +/* delete from %_segments +** where blockid between [iStartBlockid] and [iEndBlockid] +** +** Deletes the range of blocks, inclusive, used to delete the blocks +** which form a segment. +*/ +static int block_delete(fulltext_vtab *v, + sqlite_int64 iStartBlockid, sqlite_int64 iEndBlockid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, BLOCK_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iStartBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Returns SQLITE_ROW with *pidx set to the maximum segment idx found +** at iLevel. Returns SQLITE_DONE if there are no segments at +** iLevel. Otherwise returns an error. +*/ +static int segdir_max_index(fulltext_vtab *v, int iLevel, int *pidx){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_MAX_INDEX_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + /* Should always get at least one row due to how max() works. */ + if( rc==SQLITE_DONE ) return SQLITE_DONE; + if( rc!=SQLITE_ROW ) return rc; + + /* NULL means that there were no inputs to max(). */ + if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + return rc; + } + + *pidx = sqlite3_column_int(s, 0); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + return SQLITE_ROW; +} + +/* insert into %_segdir values ( +** [iLevel], [idx], +** [iStartBlockid], [iLeavesEndBlockid], [iEndBlockid], +** [pRootData] +** ) +*/ +static int segdir_set(fulltext_vtab *v, int iLevel, int idx, + sqlite_int64 iStartBlockid, + sqlite_int64 iLeavesEndBlockid, + sqlite_int64 iEndBlockid, + const char *pRootData, int nRootData){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_SET_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 2, idx); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 3, iStartBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 4, iLeavesEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 5, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_blob(s, 6, pRootData, nRootData, SQLITE_STATIC); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Queries %_segdir for the block span of the segments in level +** iLevel. Returns SQLITE_DONE if there are no blocks for iLevel, +** SQLITE_ROW if there are blocks, else an error. +*/ +static int segdir_span(fulltext_vtab *v, int iLevel, + sqlite_int64 *piStartBlockid, + sqlite_int64 *piEndBlockid){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_SPAN_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_DONE; /* Should never happen */ + if( rc!=SQLITE_ROW ) return rc; + + /* This happens if all segments at this level are entirely inline. */ + if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + int rc2 = sqlite3_step(s); + if( rc2==SQLITE_ROW ) return SQLITE_ERROR; + return rc2; + } + + *piStartBlockid = sqlite3_column_int64(s, 0); + *piEndBlockid = sqlite3_column_int64(s, 1); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + return SQLITE_ROW; +} + +/* Delete the segment blocks and segment directory records for all +** segments at iLevel. +*/ +static int segdir_delete(fulltext_vtab *v, int iLevel){ + sqlite3_stmt *s; + sqlite_int64 iStartBlockid, iEndBlockid; + int rc = segdir_span(v, iLevel, &iStartBlockid, &iEndBlockid); + if( rc!=SQLITE_ROW && rc!=SQLITE_DONE ) return rc; + + if( rc==SQLITE_ROW ){ + rc = block_delete(v, iStartBlockid, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + } + + /* Delete the segment directory itself. */ + rc = sql_get_statement(v, SEGDIR_DELETE_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Delete entire fts index, SQLITE_OK on success, relevant error on +** failure. +*/ +static int segdir_delete_all(fulltext_vtab *v){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_DELETE_ALL_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_single_step(s); + if( rc!=SQLITE_OK ) return rc; + + rc = sql_get_statement(v, BLOCK_DELETE_ALL_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + return sql_single_step(s); +} + +/* Returns SQLITE_OK with *pnSegments set to the number of entries in +** %_segdir and *piMaxLevel set to the highest level which has a +** segment. Otherwise returns the SQLite error which caused failure. +*/ +static int segdir_count(fulltext_vtab *v, int *pnSegments, int *piMaxLevel){ + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_COUNT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + /* TODO(shess): This case should not be possible? Should stronger + ** measures be taken if it happens? + */ + if( rc==SQLITE_DONE ){ + *pnSegments = 0; + *piMaxLevel = 0; + return SQLITE_OK; + } + if( rc!=SQLITE_ROW ) return rc; + + *pnSegments = sqlite3_column_int(s, 0); + *piMaxLevel = sqlite3_column_int(s, 1); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_OK; + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + return rc; +} + +/* TODO(shess) clearPendingTerms() is far down the file because +** writeZeroSegment() is far down the file because LeafWriter is far +** down the file. Consider refactoring the code to move the non-vtab +** code above the vtab code so that we don't need this forward +** reference. +*/ +static int clearPendingTerms(fulltext_vtab *v); + +/* +** Free the memory used to contain a fulltext_vtab structure. +*/ +static void fulltext_vtab_destroy(fulltext_vtab *v){ + int iStmt, i; + + FTSTRACE(("FTS3 Destroy %p\n", v)); + for( iStmt=0; iStmt<MAX_STMT; iStmt++ ){ + if( v->pFulltextStatements[iStmt]!=NULL ){ + sqlite3_finalize(v->pFulltextStatements[iStmt]); + v->pFulltextStatements[iStmt] = NULL; + } + } + + for( i=0; i<MERGE_COUNT; i++ ){ + if( v->pLeafSelectStmts[i]!=NULL ){ + sqlite3_finalize(v->pLeafSelectStmts[i]); + v->pLeafSelectStmts[i] = NULL; + } + } + + if( v->pTokenizer!=NULL ){ + v->pTokenizer->pModule->xDestroy(v->pTokenizer); + v->pTokenizer = NULL; + } + + clearPendingTerms(v); + + sqlite3_free(v->azColumn); + for(i = 0; i < v->nColumn; ++i) { + sqlite3_free(v->azContentColumn[i]); + } + sqlite3_free(v->azContentColumn); + sqlite3_free(v); +} + +/* +** Token types for parsing the arguments to xConnect or xCreate. +*/ +#define TOKEN_EOF 0 /* End of file */ +#define TOKEN_SPACE 1 /* Any kind of whitespace */ +#define TOKEN_ID 2 /* An identifier */ +#define TOKEN_STRING 3 /* A string literal */ +#define TOKEN_PUNCT 4 /* A single punctuation character */ + +/* +** If X is a character that can be used in an identifier then +** ftsIdChar(X) will be true. Otherwise it is false. +** +** For ASCII, any character with the high-order bit set is +** allowed in an identifier. For 7-bit characters, +** isFtsIdChar[X] must be 1. +** +** Ticket #1066. the SQL standard does not allow '$' in the +** middle of identfiers. But many SQL implementations do. +** SQLite will allow '$' in identifiers for compatibility. +** But the feature is undocumented. +*/ +static const char isFtsIdChar[] = { +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ +}; +#define ftsIdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && isFtsIdChar[c-0x20])) + + +/* +** Return the length of the token that begins at z[0]. +** Store the token type in *tokenType before returning. +*/ +static int ftsGetToken(const char *z, int *tokenType){ + int i, c; + switch( *z ){ + case 0: { + *tokenType = TOKEN_EOF; + return 0; + } + case ' ': case '\t': case '\n': case '\f': case '\r': { + for(i=1; safe_isspace(z[i]); i++){} + *tokenType = TOKEN_SPACE; + return i; + } + case '`': + case '\'': + case '"': { + int delim = z[0]; + for(i=1; (c=z[i])!=0; i++){ + if( c==delim ){ + if( z[i+1]==delim ){ + i++; + }else{ + break; + } + } + } + *tokenType = TOKEN_STRING; + return i + (c!=0); + } + case '[': { + for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){} + *tokenType = TOKEN_ID; + return i; + } + default: { + if( !ftsIdChar(*z) ){ + break; + } + for(i=1; ftsIdChar(z[i]); i++){} + *tokenType = TOKEN_ID; + return i; + } + } + *tokenType = TOKEN_PUNCT; + return 1; +} + +/* +** A token extracted from a string is an instance of the following +** structure. +*/ +typedef struct FtsToken { + const char *z; /* Pointer to token text. Not '\000' terminated */ + short int n; /* Length of the token text in bytes. */ +} FtsToken; + +/* +** Given a input string (which is really one of the argv[] parameters +** passed into xConnect or xCreate) split the string up into tokens. +** Return an array of pointers to '\000' terminated strings, one string +** for each non-whitespace token. +** +** The returned array is terminated by a single NULL pointer. +** +** Space to hold the returned array is obtained from a single +** malloc and should be freed by passing the return value to free(). +** The individual strings within the token list are all a part of +** the single memory allocation and will all be freed at once. +*/ +static char **tokenizeString(const char *z, int *pnToken){ + int nToken = 0; + FtsToken *aToken = sqlite3_malloc( strlen(z) * sizeof(aToken[0]) ); + int n = 1; + int e, i; + int totalSize = 0; + char **azToken; + char *zCopy; + while( n>0 ){ + n = ftsGetToken(z, &e); + if( e!=TOKEN_SPACE ){ + aToken[nToken].z = z; + aToken[nToken].n = n; + nToken++; + totalSize += n+1; + } + z += n; + } + azToken = (char**)sqlite3_malloc( nToken*sizeof(char*) + totalSize ); + zCopy = (char*)&azToken[nToken]; + nToken--; + for(i=0; i<nToken; i++){ + azToken[i] = zCopy; + n = aToken[i].n; + memcpy(zCopy, aToken[i].z, n); + zCopy[n] = 0; + zCopy += n+1; + } + azToken[nToken] = 0; + sqlite3_free(aToken); + *pnToken = nToken; + return azToken; +} + +/* +** Convert an SQL-style quoted string into a normal string by removing +** the quote characters. The conversion is done in-place. If the +** input does not begin with a quote character, then this routine +** is a no-op. +** +** Examples: +** +** "abc" becomes abc +** 'xyz' becomes xyz +** [pqr] becomes pqr +** `mno` becomes mno +*/ +static void dequoteString(char *z){ + int quote; + int i, j; + if( z==0 ) return; + quote = z[0]; + switch( quote ){ + case '\'': break; + case '"': break; + case '`': break; /* For MySQL compatibility */ + case '[': quote = ']'; break; /* For MS SqlServer compatibility */ + default: return; + } + for(i=1, j=0; z[i]; i++){ + if( z[i]==quote ){ + if( z[i+1]==quote ){ + z[j++] = quote; + i++; + }else{ + z[j++] = 0; + break; + } + }else{ + z[j++] = z[i]; + } + } +} + +/* +** The input azIn is a NULL-terminated list of tokens. Remove the first +** token and all punctuation tokens. Remove the quotes from +** around string literal tokens. +** +** Example: +** +** input: tokenize chinese ( 'simplifed' , 'mixed' ) +** output: chinese simplifed mixed +** +** Another example: +** +** input: delimiters ( '[' , ']' , '...' ) +** output: [ ] ... +*/ +static void tokenListToIdList(char **azIn){ + int i, j; + if( azIn ){ + for(i=0, j=-1; azIn[i]; i++){ + if( safe_isalnum(azIn[i][0]) || azIn[i][1] ){ + dequoteString(azIn[i]); + if( j>=0 ){ + azIn[j] = azIn[i]; + } + j++; + } + } + azIn[j] = 0; + } +} + + +/* +** Find the first alphanumeric token in the string zIn. Null-terminate +** this token. Remove any quotation marks. And return a pointer to +** the result. +*/ +static char *firstToken(char *zIn, char **pzTail){ + int n, ttype; + while(1){ + n = ftsGetToken(zIn, &ttype); + if( ttype==TOKEN_SPACE ){ + zIn += n; + }else if( ttype==TOKEN_EOF ){ + *pzTail = zIn; + return 0; + }else{ + zIn[n] = 0; + *pzTail = &zIn[1]; + dequoteString(zIn); + return zIn; + } + } + /*NOTREACHED*/ +} + +/* Return true if... +** +** * s begins with the string t, ignoring case +** * s is longer than t +** * The first character of s beyond t is not a alphanumeric +** +** Ignore leading space in *s. +** +** To put it another way, return true if the first token of +** s[] is t[]. +*/ +static int startsWith(const char *s, const char *t){ + while( safe_isspace(*s) ){ s++; } + while( *t ){ + if( safe_tolower(*s++)!=safe_tolower(*t++) ) return 0; + } + return *s!='_' && !safe_isalnum(*s); +} + +/* +** An instance of this structure defines the "spec" of a +** full text index. This structure is populated by parseSpec +** and use by fulltextConnect and fulltextCreate. +*/ +typedef struct TableSpec { + const char *zDb; /* Logical database name */ + const char *zName; /* Name of the full-text index */ + int nColumn; /* Number of columns to be indexed */ + char **azColumn; /* Original names of columns to be indexed */ + char **azContentColumn; /* Column names for %_content */ + char **azTokenizer; /* Name of tokenizer and its arguments */ +} TableSpec; + +/* +** Reclaim all of the memory used by a TableSpec +*/ +static void clearTableSpec(TableSpec *p) { + sqlite3_free(p->azColumn); + sqlite3_free(p->azContentColumn); + sqlite3_free(p->azTokenizer); +} + +/* Parse a CREATE VIRTUAL TABLE statement, which looks like this: + * + * CREATE VIRTUAL TABLE email + * USING fts3(subject, body, tokenize mytokenizer(myarg)) + * + * We return parsed information in a TableSpec structure. + * + */ +static int parseSpec(TableSpec *pSpec, int argc, const char *const*argv, + char**pzErr){ + int i, n; + char *z, *zDummy; + char **azArg; + const char *zTokenizer = 0; /* argv[] entry describing the tokenizer */ + + assert( argc>=3 ); + /* Current interface: + ** argv[0] - module name + ** argv[1] - database name + ** argv[2] - table name + ** argv[3..] - columns, optionally followed by tokenizer specification + ** and snippet delimiters specification. + */ + + /* Make a copy of the complete argv[][] array in a single allocation. + ** The argv[][] array is read-only and transient. We can write to the + ** copy in order to modify things and the copy is persistent. + */ + CLEAR(pSpec); + for(i=n=0; i<argc; i++){ + n += strlen(argv[i]) + 1; + } + azArg = sqlite3_malloc( sizeof(char*)*argc + n ); + if( azArg==0 ){ + return SQLITE_NOMEM; + } + z = (char*)&azArg[argc]; + for(i=0; i<argc; i++){ + azArg[i] = z; + strcpy(z, argv[i]); + z += strlen(z)+1; + } + + /* Identify the column names and the tokenizer and delimiter arguments + ** in the argv[][] array. + */ + pSpec->zDb = azArg[1]; + pSpec->zName = azArg[2]; + pSpec->nColumn = 0; + pSpec->azColumn = azArg; + zTokenizer = "tokenize simple"; + for(i=3; i<argc; ++i){ + if( startsWith(azArg[i],"tokenize") ){ + zTokenizer = azArg[i]; + }else{ + z = azArg[pSpec->nColumn] = firstToken(azArg[i], &zDummy); + pSpec->nColumn++; + } + } + if( pSpec->nColumn==0 ){ + azArg[0] = "content"; + pSpec->nColumn = 1; + } + + /* + ** Construct the list of content column names. + ** + ** Each content column name will be of the form cNNAAAA + ** where NN is the column number and AAAA is the sanitized + ** column name. "sanitized" means that special characters are + ** converted to "_". The cNN prefix guarantees that all column + ** names are unique. + ** + ** The AAAA suffix is not strictly necessary. It is included + ** for the convenience of people who might examine the generated + ** %_content table and wonder what the columns are used for. + */ + pSpec->azContentColumn = sqlite3_malloc( pSpec->nColumn * sizeof(char *) ); + if( pSpec->azContentColumn==0 ){ + clearTableSpec(pSpec); + return SQLITE_NOMEM; + } + for(i=0; i<pSpec->nColumn; i++){ + char *p; + pSpec->azContentColumn[i] = sqlite3_mprintf("c%d%s", i, azArg[i]); + for (p = pSpec->azContentColumn[i]; *p ; ++p) { + if( !safe_isalnum(*p) ) *p = '_'; + } + } + + /* + ** Parse the tokenizer specification string. + */ + pSpec->azTokenizer = tokenizeString(zTokenizer, &n); + tokenListToIdList(pSpec->azTokenizer); + + return SQLITE_OK; +} + +/* +** Generate a CREATE TABLE statement that describes the schema of +** the virtual table. Return a pointer to this schema string. +** +** Space is obtained from sqlite3_mprintf() and should be freed +** using sqlite3_free(). +*/ +static char *fulltextSchema( + int nColumn, /* Number of columns */ + const char *const* azColumn, /* List of columns */ + const char *zTableName /* Name of the table */ +){ + int i; + char *zSchema, *zNext; + const char *zSep = "("; + zSchema = sqlite3_mprintf("CREATE TABLE x"); + for(i=0; i<nColumn; i++){ + zNext = sqlite3_mprintf("%s%s%Q", zSchema, zSep, azColumn[i]); + sqlite3_free(zSchema); + zSchema = zNext; + zSep = ","; + } + zNext = sqlite3_mprintf("%s,%Q HIDDEN", zSchema, zTableName); + sqlite3_free(zSchema); + zSchema = zNext; + zNext = sqlite3_mprintf("%s,docid HIDDEN)", zSchema); + sqlite3_free(zSchema); + return zNext; +} + +/* +** Build a new sqlite3_vtab structure that will describe the +** fulltext index defined by spec. +*/ +static int constructVtab( + sqlite3 *db, /* The SQLite database connection */ + fts3Hash *pHash, /* Hash table containing tokenizers */ + TableSpec *spec, /* Parsed spec information from parseSpec() */ + sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */ + char **pzErr /* Write any error message here */ +){ + int rc; + int n; + fulltext_vtab *v = 0; + const sqlite3_tokenizer_module *m = NULL; + char *schema; + + char const *zTok; /* Name of tokenizer to use for this fts table */ + int nTok; /* Length of zTok, including nul terminator */ + + v = (fulltext_vtab *) sqlite3_malloc(sizeof(fulltext_vtab)); + if( v==0 ) return SQLITE_NOMEM; + CLEAR(v); + /* sqlite will initialize v->base */ + v->db = db; + v->zDb = spec->zDb; /* Freed when azColumn is freed */ + v->zName = spec->zName; /* Freed when azColumn is freed */ + v->nColumn = spec->nColumn; + v->azContentColumn = spec->azContentColumn; + spec->azContentColumn = 0; + v->azColumn = spec->azColumn; + spec->azColumn = 0; + + if( spec->azTokenizer==0 ){ + return SQLITE_NOMEM; + } + + zTok = spec->azTokenizer[0]; + if( !zTok ){ + zTok = "simple"; + } + nTok = strlen(zTok)+1; + + m = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zTok, nTok); + if( !m ){ + *pzErr = sqlite3_mprintf("unknown tokenizer: %s", spec->azTokenizer[0]); + rc = SQLITE_ERROR; + goto err; + } + + for(n=0; spec->azTokenizer[n]; n++){} + if( n ){ + rc = m->xCreate(n-1, (const char*const*)&spec->azTokenizer[1], + &v->pTokenizer); + }else{ + rc = m->xCreate(0, 0, &v->pTokenizer); + } + if( rc!=SQLITE_OK ) goto err; + v->pTokenizer->pModule = m; + + /* TODO: verify the existence of backing tables foo_content, foo_term */ + + schema = fulltextSchema(v->nColumn, (const char*const*)v->azColumn, + spec->zName); + rc = sqlite3_declare_vtab(db, schema); + sqlite3_free(schema); + if( rc!=SQLITE_OK ) goto err; + + memset(v->pFulltextStatements, 0, sizeof(v->pFulltextStatements)); + + /* Indicate that the buffer is not live. */ + v->nPendingData = -1; + + *ppVTab = &v->base; + FTSTRACE(("FTS3 Connect %p\n", v)); + + return rc; + +err: + fulltext_vtab_destroy(v); + return rc; +} + +static int fulltextConnect( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVTab, + char **pzErr +){ + TableSpec spec; + int rc = parseSpec(&spec, argc, argv, pzErr); + if( rc!=SQLITE_OK ) return rc; + + rc = constructVtab(db, (fts3Hash *)pAux, &spec, ppVTab, pzErr); + clearTableSpec(&spec); + return rc; +} + +/* The %_content table holds the text of each document, with +** the docid column exposed as the SQLite rowid for the table. +*/ +/* TODO(shess) This comment needs elaboration to match the updated +** code. Work it into the top-of-file comment at that time. +*/ +static int fulltextCreate(sqlite3 *db, void *pAux, + int argc, const char * const *argv, + sqlite3_vtab **ppVTab, char **pzErr){ + int rc; + TableSpec spec; + StringBuffer schema; + FTSTRACE(("FTS3 Create\n")); + + rc = parseSpec(&spec, argc, argv, pzErr); + if( rc!=SQLITE_OK ) return rc; + + initStringBuffer(&schema); + append(&schema, "CREATE TABLE %_content("); + append(&schema, " docid INTEGER PRIMARY KEY,"); + appendList(&schema, spec.nColumn, spec.azContentColumn); + append(&schema, ")"); + rc = sql_exec(db, spec.zDb, spec.zName, stringBufferData(&schema)); + stringBufferDestroy(&schema); + if( rc!=SQLITE_OK ) goto out; + + rc = sql_exec(db, spec.zDb, spec.zName, + "create table %_segments(" + " blockid INTEGER PRIMARY KEY," + " block blob" + ");" + ); + if( rc!=SQLITE_OK ) goto out; + + rc = sql_exec(db, spec.zDb, spec.zName, + "create table %_segdir(" + " level integer," + " idx integer," + " start_block integer," + " leaves_end_block integer," + " end_block integer," + " root blob," + " primary key(level, idx)" + ");"); + if( rc!=SQLITE_OK ) goto out; + + rc = constructVtab(db, (fts3Hash *)pAux, &spec, ppVTab, pzErr); + +out: + clearTableSpec(&spec); + return rc; +} + +/* Decide how to handle an SQL query. */ +static int fulltextBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ + fulltext_vtab *v = (fulltext_vtab *)pVTab; + int i; + FTSTRACE(("FTS3 BestIndex\n")); + + for(i=0; i<pInfo->nConstraint; ++i){ + const struct sqlite3_index_constraint *pConstraint; + pConstraint = &pInfo->aConstraint[i]; + if( pConstraint->usable ) { + if( (pConstraint->iColumn==-1 || pConstraint->iColumn==v->nColumn+1) && + pConstraint->op==SQLITE_INDEX_CONSTRAINT_EQ ){ + pInfo->idxNum = QUERY_DOCID; /* lookup by docid */ + FTSTRACE(("FTS3 QUERY_DOCID\n")); + } else if( pConstraint->iColumn>=0 && pConstraint->iColumn<=v->nColumn && + pConstraint->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ + /* full-text search */ + pInfo->idxNum = QUERY_FULLTEXT + pConstraint->iColumn; + FTSTRACE(("FTS3 QUERY_FULLTEXT %d\n", pConstraint->iColumn)); + } else continue; + + pInfo->aConstraintUsage[i].argvIndex = 1; + pInfo->aConstraintUsage[i].omit = 1; + + /* An arbitrary value for now. + * TODO: Perhaps docid matches should be considered cheaper than + * full-text searches. */ + pInfo->estimatedCost = 1.0; + + return SQLITE_OK; + } + } + pInfo->idxNum = QUERY_GENERIC; + return SQLITE_OK; +} + +static int fulltextDisconnect(sqlite3_vtab *pVTab){ + FTSTRACE(("FTS3 Disconnect %p\n", pVTab)); + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextDestroy(sqlite3_vtab *pVTab){ + fulltext_vtab *v = (fulltext_vtab *)pVTab; + int rc; + + FTSTRACE(("FTS3 Destroy %p\n", pVTab)); + rc = sql_exec(v->db, v->zDb, v->zName, + "drop table if exists %_content;" + "drop table if exists %_segments;" + "drop table if exists %_segdir;" + ); + if( rc!=SQLITE_OK ) return rc; + + fulltext_vtab_destroy((fulltext_vtab *)pVTab); + return SQLITE_OK; +} + +static int fulltextOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + fulltext_cursor *c; + + c = (fulltext_cursor *) sqlite3_malloc(sizeof(fulltext_cursor)); + if( c ){ + memset(c, 0, sizeof(fulltext_cursor)); + /* sqlite will initialize c->base */ + *ppCursor = &c->base; + FTSTRACE(("FTS3 Open %p: %p\n", pVTab, c)); + return SQLITE_OK; + }else{ + return SQLITE_NOMEM; + } +} + + +/* Free all of the dynamically allocated memory held by *q +*/ +static void queryClear(Query *q){ + int i; + for(i = 0; i < q->nTerms; ++i){ + sqlite3_free(q->pTerms[i].pTerm); + } + sqlite3_free(q->pTerms); + CLEAR(q); +} + +/* Free all of the dynamically allocated memory held by the +** Snippet +*/ +static void snippetClear(Snippet *p){ + sqlite3_free(p->aMatch); + sqlite3_free(p->zOffset); + sqlite3_free(p->zSnippet); + CLEAR(p); +} +/* +** Append a single entry to the p->aMatch[] log. +*/ +static void snippetAppendMatch( + Snippet *p, /* Append the entry to this snippet */ + int iCol, int iTerm, /* The column and query term */ + int iToken, /* Matching token in document */ + int iStart, int nByte /* Offset and size of the match */ +){ + int i; + struct snippetMatch *pMatch; + if( p->nMatch+1>=p->nAlloc ){ + p->nAlloc = p->nAlloc*2 + 10; + p->aMatch = sqlite3_realloc(p->aMatch, p->nAlloc*sizeof(p->aMatch[0]) ); + if( p->aMatch==0 ){ + p->nMatch = 0; + p->nAlloc = 0; + return; + } + } + i = p->nMatch++; + pMatch = &p->aMatch[i]; + pMatch->iCol = iCol; + pMatch->iTerm = iTerm; + pMatch->iToken = iToken; + pMatch->iStart = iStart; + pMatch->nByte = nByte; +} + +/* +** Sizing information for the circular buffer used in snippetOffsetsOfColumn() +*/ +#define FTS3_ROTOR_SZ (32) +#define FTS3_ROTOR_MASK (FTS3_ROTOR_SZ-1) + +/* +** Add entries to pSnippet->aMatch[] for every match that occurs against +** document zDoc[0..nDoc-1] which is stored in column iColumn. +*/ +static void snippetOffsetsOfColumn( + Query *pQuery, + Snippet *pSnippet, + int iColumn, + const char *zDoc, + int nDoc +){ + const sqlite3_tokenizer_module *pTModule; /* The tokenizer module */ + sqlite3_tokenizer *pTokenizer; /* The specific tokenizer */ + sqlite3_tokenizer_cursor *pTCursor; /* Tokenizer cursor */ + fulltext_vtab *pVtab; /* The full text index */ + int nColumn; /* Number of columns in the index */ + const QueryTerm *aTerm; /* Query string terms */ + int nTerm; /* Number of query string terms */ + int i, j; /* Loop counters */ + int rc; /* Return code */ + unsigned int match, prevMatch; /* Phrase search bitmasks */ + const char *zToken; /* Next token from the tokenizer */ + int nToken; /* Size of zToken */ + int iBegin, iEnd, iPos; /* Offsets of beginning and end */ + + /* The following variables keep a circular buffer of the last + ** few tokens */ + unsigned int iRotor = 0; /* Index of current token */ + int iRotorBegin[FTS3_ROTOR_SZ]; /* Beginning offset of token */ + int iRotorLen[FTS3_ROTOR_SZ]; /* Length of token */ + + pVtab = pQuery->pFts; + nColumn = pVtab->nColumn; + pTokenizer = pVtab->pTokenizer; + pTModule = pTokenizer->pModule; + rc = pTModule->xOpen(pTokenizer, zDoc, nDoc, &pTCursor); + if( rc ) return; + pTCursor->pTokenizer = pTokenizer; + aTerm = pQuery->pTerms; + nTerm = pQuery->nTerms; + if( nTerm>=FTS3_ROTOR_SZ ){ + nTerm = FTS3_ROTOR_SZ - 1; + } + prevMatch = 0; + while(1){ + rc = pTModule->xNext(pTCursor, &zToken, &nToken, &iBegin, &iEnd, &iPos); + if( rc ) break; + iRotorBegin[iRotor&FTS3_ROTOR_MASK] = iBegin; + iRotorLen[iRotor&FTS3_ROTOR_MASK] = iEnd-iBegin; + match = 0; + for(i=0; i<nTerm; i++){ + int iCol; + iCol = aTerm[i].iColumn; + if( iCol>=0 && iCol<nColumn && iCol!=iColumn ) continue; + if( aTerm[i].nTerm>nToken ) continue; + if( !aTerm[i].isPrefix && aTerm[i].nTerm<nToken ) continue; + assert( aTerm[i].nTerm<=nToken ); + if( memcmp(aTerm[i].pTerm, zToken, aTerm[i].nTerm) ) continue; + if( aTerm[i].iPhrase>1 && (prevMatch & (1<<i))==0 ) continue; + match |= 1<<i; + if( i==nTerm-1 || aTerm[i+1].iPhrase==1 ){ + for(j=aTerm[i].iPhrase-1; j>=0; j--){ + int k = (iRotor-j) & FTS3_ROTOR_MASK; + snippetAppendMatch(pSnippet, iColumn, i-j, iPos-j, + iRotorBegin[k], iRotorLen[k]); + } + } + } + prevMatch = match<<1; + iRotor++; + } + pTModule->xClose(pTCursor); +} + +/* +** Remove entries from the pSnippet structure to account for the NEAR +** operator. When this is called, pSnippet contains the list of token +** offsets produced by treating all NEAR operators as AND operators. +** This function removes any entries that should not be present after +** accounting for the NEAR restriction. For example, if the queried +** document is: +** +** "A B C D E A" +** +** and the query is: +** +** A NEAR/0 E +** +** then when this function is called the Snippet contains token offsets +** 0, 4 and 5. This function removes the "0" entry (because the first A +** is not near enough to an E). +*/ +static void trimSnippetOffsetsForNear(Query *pQuery, Snippet *pSnippet){ + int ii; + int iDir = 1; + + while(iDir>-2) { + assert( iDir==1 || iDir==-1 ); + for(ii=0; ii<pSnippet->nMatch; ii++){ + int jj; + int nNear; + struct snippetMatch *pMatch = &pSnippet->aMatch[ii]; + QueryTerm *pQueryTerm = &pQuery->pTerms[pMatch->iTerm]; + + if( (pMatch->iTerm+iDir)<0 + || (pMatch->iTerm+iDir)>=pQuery->nTerms + ){ + continue; + } + + nNear = pQueryTerm->nNear; + if( iDir<0 ){ + nNear = pQueryTerm[-1].nNear; + } + + if( pMatch->iTerm>=0 && nNear ){ + int isOk = 0; + int iNextTerm = pMatch->iTerm+iDir; + int iPrevTerm = iNextTerm; + + int iEndToken; + int iStartToken; + + if( iDir<0 ){ + int nPhrase = 1; + iStartToken = pMatch->iToken; + while( (pMatch->iTerm+nPhrase)<pQuery->nTerms + && pQuery->pTerms[pMatch->iTerm+nPhrase].iPhrase>1 + ){ + nPhrase++; + } + iEndToken = iStartToken + nPhrase - 1; + }else{ + iEndToken = pMatch->iToken; + iStartToken = pMatch->iToken+1-pQueryTerm->iPhrase; + } + + while( pQuery->pTerms[iNextTerm].iPhrase>1 ){ + iNextTerm--; + } + while( (iPrevTerm+1)<pQuery->nTerms && + pQuery->pTerms[iPrevTerm+1].iPhrase>1 + ){ + iPrevTerm++; + } + + for(jj=0; isOk==0 && jj<pSnippet->nMatch; jj++){ + struct snippetMatch *p = &pSnippet->aMatch[jj]; + if( p->iCol==pMatch->iCol && (( + p->iTerm==iNextTerm && + p->iToken>iEndToken && + p->iToken<=iEndToken+nNear + ) || ( + p->iTerm==iPrevTerm && + p->iToken<iStartToken && + p->iToken>=iStartToken-nNear + ))){ + isOk = 1; + } + } + if( !isOk ){ + for(jj=1-pQueryTerm->iPhrase; jj<=0; jj++){ + pMatch[jj].iTerm = -1; + } + ii = -1; + iDir = 1; + } + } + } + iDir -= 2; + } +} + +/* +** Compute all offsets for the current row of the query. +** If the offsets have already been computed, this routine is a no-op. +*/ +static void snippetAllOffsets(fulltext_cursor *p){ + int nColumn; + int iColumn, i; + int iFirst, iLast; + fulltext_vtab *pFts; + + if( p->snippet.nMatch ) return; + if( p->q.nTerms==0 ) return; + pFts = p->q.pFts; + nColumn = pFts->nColumn; + iColumn = (p->iCursorType - QUERY_FULLTEXT); + if( iColumn<0 || iColumn>=nColumn ){ + iFirst = 0; + iLast = nColumn-1; + }else{ + iFirst = iColumn; + iLast = iColumn; + } + for(i=iFirst; i<=iLast; i++){ + const char *zDoc; + int nDoc; + zDoc = (const char*)sqlite3_column_text(p->pStmt, i+1); + nDoc = sqlite3_column_bytes(p->pStmt, i+1); + snippetOffsetsOfColumn(&p->q, &p->snippet, i, zDoc, nDoc); + } + + trimSnippetOffsetsForNear(&p->q, &p->snippet); +} + +/* +** Convert the information in the aMatch[] array of the snippet +** into the string zOffset[0..nOffset-1]. +*/ +static void snippetOffsetText(Snippet *p){ + int i; + int cnt = 0; + StringBuffer sb; + char zBuf[200]; + if( p->zOffset ) return; + initStringBuffer(&sb); + for(i=0; i<p->nMatch; i++){ + struct snippetMatch *pMatch = &p->aMatch[i]; + if( pMatch->iTerm>=0 ){ + /* If snippetMatch.iTerm is less than 0, then the match was + ** discarded as part of processing the NEAR operator (see the + ** trimSnippetOffsetsForNear() function for details). Ignore + ** it in this case + */ + zBuf[0] = ' '; + sqlite3_snprintf(sizeof(zBuf)-1, &zBuf[cnt>0], "%d %d %d %d", + pMatch->iCol, pMatch->iTerm, pMatch->iStart, pMatch->nByte); + append(&sb, zBuf); + cnt++; + } + } + p->zOffset = stringBufferData(&sb); + p->nOffset = stringBufferLength(&sb); +} + +/* +** zDoc[0..nDoc-1] is phrase of text. aMatch[0..nMatch-1] are a set +** of matching words some of which might be in zDoc. zDoc is column +** number iCol. +** +** iBreak is suggested spot in zDoc where we could begin or end an +** excerpt. Return a value similar to iBreak but possibly adjusted +** to be a little left or right so that the break point is better. +*/ +static int wordBoundary( + int iBreak, /* The suggested break point */ + const char *zDoc, /* Document text */ + int nDoc, /* Number of bytes in zDoc[] */ + struct snippetMatch *aMatch, /* Matching words */ + int nMatch, /* Number of entries in aMatch[] */ + int iCol /* The column number for zDoc[] */ +){ + int i; + if( iBreak<=10 ){ + return 0; + } + if( iBreak>=nDoc-10 ){ + return nDoc; + } + for(i=0; i<nMatch && aMatch[i].iCol<iCol; i++){} + while( i<nMatch && aMatch[i].iStart+aMatch[i].nByte<iBreak ){ i++; } + if( i<nMatch ){ + if( aMatch[i].iStart<iBreak+10 ){ + return aMatch[i].iStart; + } + if( i>0 && aMatch[i-1].iStart+aMatch[i-1].nByte>=iBreak ){ + return aMatch[i-1].iStart; + } + } + for(i=1; i<=10; i++){ + if( safe_isspace(zDoc[iBreak-i]) ){ + return iBreak - i + 1; + } + if( safe_isspace(zDoc[iBreak+i]) ){ + return iBreak + i + 1; + } + } + return iBreak; +} + + + +/* +** Allowed values for Snippet.aMatch[].snStatus +*/ +#define SNIPPET_IGNORE 0 /* It is ok to omit this match from the snippet */ +#define SNIPPET_DESIRED 1 /* We want to include this match in the snippet */ + +/* +** Generate the text of a snippet. +*/ +static void snippetText( + fulltext_cursor *pCursor, /* The cursor we need the snippet for */ + const char *zStartMark, /* Markup to appear before each match */ + const char *zEndMark, /* Markup to appear after each match */ + const char *zEllipsis /* Ellipsis mark */ +){ + int i, j; + struct snippetMatch *aMatch; + int nMatch; + int nDesired; + StringBuffer sb; + int tailCol; + int tailOffset; + int iCol; + int nDoc; + const char *zDoc; + int iStart, iEnd; + int tailEllipsis = 0; + int iMatch; + + + sqlite3_free(pCursor->snippet.zSnippet); + pCursor->snippet.zSnippet = 0; + aMatch = pCursor->snippet.aMatch; + nMatch = pCursor->snippet.nMatch; + initStringBuffer(&sb); + + for(i=0; i<nMatch; i++){ + aMatch[i].snStatus = SNIPPET_IGNORE; + } + nDesired = 0; + for(i=0; i<pCursor->q.nTerms; i++){ + for(j=0; j<nMatch; j++){ + if( aMatch[j].iTerm==i ){ + aMatch[j].snStatus = SNIPPET_DESIRED; + nDesired++; + break; + } + } + } + + iMatch = 0; + tailCol = -1; + tailOffset = 0; + for(i=0; i<nMatch && nDesired>0; i++){ + if( aMatch[i].snStatus!=SNIPPET_DESIRED ) continue; + nDesired--; + iCol = aMatch[i].iCol; + zDoc = (const char*)sqlite3_column_text(pCursor->pStmt, iCol+1); + nDoc = sqlite3_column_bytes(pCursor->pStmt, iCol+1); + iStart = aMatch[i].iStart - 40; + iStart = wordBoundary(iStart, zDoc, nDoc, aMatch, nMatch, iCol); + if( iStart<=10 ){ + iStart = 0; + } + if( iCol==tailCol && iStart<=tailOffset+20 ){ + iStart = tailOffset; + } + if( (iCol!=tailCol && tailCol>=0) || iStart!=tailOffset ){ + trimWhiteSpace(&sb); + appendWhiteSpace(&sb); + append(&sb, zEllipsis); + appendWhiteSpace(&sb); + } + iEnd = aMatch[i].iStart + aMatch[i].nByte + 40; + iEnd = wordBoundary(iEnd, zDoc, nDoc, aMatch, nMatch, iCol); + if( iEnd>=nDoc-10 ){ + iEnd = nDoc; + tailEllipsis = 0; + }else{ + tailEllipsis = 1; + } + while( iMatch<nMatch && aMatch[iMatch].iCol<iCol ){ iMatch++; } + while( iStart<iEnd ){ + while( iMatch<nMatch && aMatch[iMatch].iStart<iStart + && aMatch[iMatch].iCol<=iCol ){ + iMatch++; + } + if( iMatch<nMatch && aMatch[iMatch].iStart<iEnd + && aMatch[iMatch].iCol==iCol ){ + nappend(&sb, &zDoc[iStart], aMatch[iMatch].iStart - iStart); + iStart = aMatch[iMatch].iStart; + append(&sb, zStartMark); + nappend(&sb, &zDoc[iStart], aMatch[iMatch].nByte); + append(&sb, zEndMark); + iStart += aMatch[iMatch].nByte; + for(j=iMatch+1; j<nMatch; j++){ + if( aMatch[j].iTerm==aMatch[iMatch].iTerm + && aMatch[j].snStatus==SNIPPET_DESIRED ){ + nDesired--; + aMatch[j].snStatus = SNIPPET_IGNORE; + } + } + }else{ + nappend(&sb, &zDoc[iStart], iEnd - iStart); + iStart = iEnd; + } + } + tailCol = iCol; + tailOffset = iEnd; + } + trimWhiteSpace(&sb); + if( tailEllipsis ){ + appendWhiteSpace(&sb); + append(&sb, zEllipsis); + } + pCursor->snippet.zSnippet = stringBufferData(&sb); + pCursor->snippet.nSnippet = stringBufferLength(&sb); +} + + +/* +** Close the cursor. For additional information see the documentation +** on the xClose method of the virtual table interface. +*/ +static int fulltextClose(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + FTSTRACE(("FTS3 Close %p\n", c)); + sqlite3_finalize(c->pStmt); + queryClear(&c->q); + snippetClear(&c->snippet); + if( c->result.nData!=0 ) dlrDestroy(&c->reader); + dataBufferDestroy(&c->result); + sqlite3_free(c); + return SQLITE_OK; +} + +static int fulltextNext(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + int rc; + + FTSTRACE(("FTS3 Next %p\n", pCursor)); + snippetClear(&c->snippet); + if( c->iCursorType < QUERY_FULLTEXT ){ + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + switch( rc ){ + case SQLITE_ROW: + c->eof = 0; + return SQLITE_OK; + case SQLITE_DONE: + c->eof = 1; + return SQLITE_OK; + default: + c->eof = 1; + return rc; + } + } else { /* full-text query */ + rc = sqlite3_reset(c->pStmt); + if( rc!=SQLITE_OK ) return rc; + + if( c->result.nData==0 || dlrAtEnd(&c->reader) ){ + c->eof = 1; + return SQLITE_OK; + } + rc = sqlite3_bind_int64(c->pStmt, 1, dlrDocid(&c->reader)); + dlrStep(&c->reader); + if( rc!=SQLITE_OK ) return rc; + /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ + rc = sqlite3_step(c->pStmt); + if( rc==SQLITE_ROW ){ /* the case we expect */ + c->eof = 0; + return SQLITE_OK; + } + /* an error occurred; abort */ + return rc==SQLITE_DONE ? SQLITE_ERROR : rc; + } +} + + +/* TODO(shess) If we pushed LeafReader to the top of the file, or to +** another file, term_select() could be pushed above +** docListOfTerm(). +*/ +static int termSelect(fulltext_vtab *v, int iColumn, + const char *pTerm, int nTerm, int isPrefix, + DocListType iType, DataBuffer *out); + +/* Return a DocList corresponding to the query term *pTerm. If *pTerm +** is the first term of a phrase query, go ahead and evaluate the phrase +** query and return the doclist for the entire phrase query. +** +** The resulting DL_DOCIDS doclist is stored in pResult, which is +** overwritten. +*/ +static int docListOfTerm( + fulltext_vtab *v, /* The full text index */ + int iColumn, /* column to restrict to. No restriction if >=nColumn */ + QueryTerm *pQTerm, /* Term we are looking for, or 1st term of a phrase */ + DataBuffer *pResult /* Write the result here */ +){ + DataBuffer left, right, new; + int i, rc; + + /* No phrase search if no position info. */ + assert( pQTerm->nPhrase==0 || DL_DEFAULT!=DL_DOCIDS ); + + /* This code should never be called with buffered updates. */ + assert( v->nPendingData<0 ); + + dataBufferInit(&left, 0); + rc = termSelect(v, iColumn, pQTerm->pTerm, pQTerm->nTerm, pQTerm->isPrefix, + (0<pQTerm->nPhrase ? DL_POSITIONS : DL_DOCIDS), &left); + if( rc ) return rc; + for(i=1; i<=pQTerm->nPhrase && left.nData>0; i++){ + /* If this token is connected to the next by a NEAR operator, and + ** the next token is the start of a phrase, then set nPhraseRight + ** to the number of tokens in the phrase. Otherwise leave it at 1. + */ + int nPhraseRight = 1; + while( (i+nPhraseRight)<=pQTerm->nPhrase + && pQTerm[i+nPhraseRight].nNear==0 + ){ + nPhraseRight++; + } + + dataBufferInit(&right, 0); + rc = termSelect(v, iColumn, pQTerm[i].pTerm, pQTerm[i].nTerm, + pQTerm[i].isPrefix, DL_POSITIONS, &right); + if( rc ){ + dataBufferDestroy(&left); + return rc; + } + dataBufferInit(&new, 0); + docListPhraseMerge(left.pData, left.nData, right.pData, right.nData, + pQTerm[i-1].nNear, pQTerm[i-1].iPhrase + nPhraseRight, + ((i<pQTerm->nPhrase) ? DL_POSITIONS : DL_DOCIDS), + &new); + dataBufferDestroy(&left); + dataBufferDestroy(&right); + left = new; + } + *pResult = left; + return SQLITE_OK; +} + +/* Add a new term pTerm[0..nTerm-1] to the query *q. +*/ +static void queryAdd(Query *q, const char *pTerm, int nTerm){ + QueryTerm *t; + ++q->nTerms; + q->pTerms = sqlite3_realloc(q->pTerms, q->nTerms * sizeof(q->pTerms[0])); + if( q->pTerms==0 ){ + q->nTerms = 0; + return; + } + t = &q->pTerms[q->nTerms - 1]; + CLEAR(t); + t->pTerm = sqlite3_malloc(nTerm+1); + memcpy(t->pTerm, pTerm, nTerm); + t->pTerm[nTerm] = 0; + t->nTerm = nTerm; + t->isOr = q->nextIsOr; + t->isPrefix = 0; + q->nextIsOr = 0; + t->iColumn = q->nextColumn; + q->nextColumn = q->dfltColumn; +} + +/* +** Check to see if the string zToken[0...nToken-1] matches any +** column name in the virtual table. If it does, +** return the zero-indexed column number. If not, return -1. +*/ +static int checkColumnSpecifier( + fulltext_vtab *pVtab, /* The virtual table */ + const char *zToken, /* Text of the token */ + int nToken /* Number of characters in the token */ +){ + int i; + for(i=0; i<pVtab->nColumn; i++){ + if( memcmp(pVtab->azColumn[i], zToken, nToken)==0 + && pVtab->azColumn[i][nToken]==0 ){ + return i; + } + } + return -1; +} + +/* +** Parse the text at pSegment[0..nSegment-1]. Add additional terms +** to the query being assemblied in pQuery. +** +** inPhrase is true if pSegment[0..nSegement-1] is contained within +** double-quotes. If inPhrase is true, then the first term +** is marked with the number of terms in the phrase less one and +** OR and "-" syntax is ignored. If inPhrase is false, then every +** term found is marked with nPhrase=0 and OR and "-" syntax is significant. +*/ +static int tokenizeSegment( + sqlite3_tokenizer *pTokenizer, /* The tokenizer to use */ + const char *pSegment, int nSegment, /* Query expression being parsed */ + int inPhrase, /* True if within "..." */ + Query *pQuery /* Append results here */ +){ + const sqlite3_tokenizer_module *pModule = pTokenizer->pModule; + sqlite3_tokenizer_cursor *pCursor; + int firstIndex = pQuery->nTerms; + int iCol; + int nTerm = 1; + + int rc = pModule->xOpen(pTokenizer, pSegment, nSegment, &pCursor); + if( rc!=SQLITE_OK ) return rc; + pCursor->pTokenizer = pTokenizer; + + while( 1 ){ + const char *pToken; + int nToken, iBegin, iEnd, iPos; + + rc = pModule->xNext(pCursor, + &pToken, &nToken, + &iBegin, &iEnd, &iPos); + if( rc!=SQLITE_OK ) break; + if( !inPhrase && + pSegment[iEnd]==':' && + (iCol = checkColumnSpecifier(pQuery->pFts, pToken, nToken))>=0 ){ + pQuery->nextColumn = iCol; + continue; + } + if( !inPhrase && pQuery->nTerms>0 && nToken==2 + && pSegment[iBegin+0]=='O' + && pSegment[iBegin+1]=='R' + ){ + pQuery->nextIsOr = 1; + continue; + } + if( !inPhrase && pQuery->nTerms>0 && !pQuery->nextIsOr && nToken==4 + && pSegment[iBegin+0]=='N' + && pSegment[iBegin+1]=='E' + && pSegment[iBegin+2]=='A' + && pSegment[iBegin+3]=='R' + ){ + QueryTerm *pTerm = &pQuery->pTerms[pQuery->nTerms-1]; + if( (iBegin+6)<nSegment + && pSegment[iBegin+4] == '/' + && pSegment[iBegin+5]>='0' && pSegment[iBegin+5]<='9' + ){ + pTerm->nNear = (pSegment[iBegin+5] - '0'); + nToken += 2; + if( pSegment[iBegin+6]>='0' && pSegment[iBegin+6]<=9 ){ + pTerm->nNear = pTerm->nNear * 10 + (pSegment[iBegin+6] - '0'); + iEnd++; + } + pModule->xNext(pCursor, &pToken, &nToken, &iBegin, &iEnd, &iPos); + } else { + pTerm->nNear = SQLITE_FTS3_DEFAULT_NEAR_PARAM; + } + pTerm->nNear++; + continue; + } + + queryAdd(pQuery, pToken, nToken); + if( !inPhrase && iBegin>0 && pSegment[iBegin-1]=='-' ){ + pQuery->pTerms[pQuery->nTerms-1].isNot = 1; + } + if( iEnd<nSegment && pSegment[iEnd]=='*' ){ + pQuery->pTerms[pQuery->nTerms-1].isPrefix = 1; + } + pQuery->pTerms[pQuery->nTerms-1].iPhrase = nTerm; + if( inPhrase ){ + nTerm++; + } + } + + if( inPhrase && pQuery->nTerms>firstIndex ){ + pQuery->pTerms[firstIndex].nPhrase = pQuery->nTerms - firstIndex - 1; + } + + return pModule->xClose(pCursor); +} + +/* Parse a query string, yielding a Query object pQuery. +** +** The calling function will need to queryClear() to clean up +** the dynamically allocated memory held by pQuery. +*/ +static int parseQuery( + fulltext_vtab *v, /* The fulltext index */ + const char *zInput, /* Input text of the query string */ + int nInput, /* Size of the input text */ + int dfltColumn, /* Default column of the index to match against */ + Query *pQuery /* Write the parse results here. */ +){ + int iInput, inPhrase = 0; + int ii; + QueryTerm *aTerm; + + if( zInput==0 ) nInput = 0; + if( nInput<0 ) nInput = strlen(zInput); + pQuery->nTerms = 0; + pQuery->pTerms = NULL; + pQuery->nextIsOr = 0; + pQuery->nextColumn = dfltColumn; + pQuery->dfltColumn = dfltColumn; + pQuery->pFts = v; + + for(iInput=0; iInput<nInput; ++iInput){ + int i; + for(i=iInput; i<nInput && zInput[i]!='"'; ++i){} + if( i>iInput ){ + tokenizeSegment(v->pTokenizer, zInput+iInput, i-iInput, inPhrase, + pQuery); + } + iInput = i; + if( i<nInput ){ + assert( zInput[i]=='"' ); + inPhrase = !inPhrase; + } + } + + if( inPhrase ){ + /* unmatched quote */ + queryClear(pQuery); + return SQLITE_ERROR; + } + + /* Modify the values of the QueryTerm.nPhrase variables to account for + ** the NEAR operator. For the purposes of QueryTerm.nPhrase, phrases + ** and tokens connected by the NEAR operator are handled as a single + ** phrase. See comments above the QueryTerm structure for details. + */ + aTerm = pQuery->pTerms; + for(ii=0; ii<pQuery->nTerms; ii++){ + if( aTerm[ii].nNear || aTerm[ii].nPhrase ){ + while (aTerm[ii+aTerm[ii].nPhrase].nNear) { + aTerm[ii].nPhrase += (1 + aTerm[ii+aTerm[ii].nPhrase+1].nPhrase); + } + } + } + + return SQLITE_OK; +} + +/* TODO(shess) Refactor the code to remove this forward decl. */ +static int flushPendingTerms(fulltext_vtab *v); + +/* Perform a full-text query using the search expression in +** zInput[0..nInput-1]. Return a list of matching documents +** in pResult. +** +** Queries must match column iColumn. Or if iColumn>=nColumn +** they are allowed to match against any column. +*/ +static int fulltextQuery( + fulltext_vtab *v, /* The full text index */ + int iColumn, /* Match against this column by default */ + const char *zInput, /* The query string */ + int nInput, /* Number of bytes in zInput[] */ + DataBuffer *pResult, /* Write the result doclist here */ + Query *pQuery /* Put parsed query string here */ +){ + int i, iNext, rc; + DataBuffer left, right, or, new; + int nNot = 0; + QueryTerm *aTerm; + + /* TODO(shess) Instead of flushing pendingTerms, we could query for + ** the relevant term and merge the doclist into what we receive from + ** the database. Wait and see if this is a common issue, first. + ** + ** A good reason not to flush is to not generate update-related + ** error codes from here. + */ + + /* Flush any buffered updates before executing the query. */ + rc = flushPendingTerms(v); + if( rc!=SQLITE_OK ) return rc; + + /* TODO(shess) I think that the queryClear() calls below are not + ** necessary, because fulltextClose() already clears the query. + */ + rc = parseQuery(v, zInput, nInput, iColumn, pQuery); + if( rc!=SQLITE_OK ) return rc; + + /* Empty or NULL queries return no results. */ + if( pQuery->nTerms==0 ){ + dataBufferInit(pResult, 0); + return SQLITE_OK; + } + + /* Merge AND terms. */ + /* TODO(shess) I think we can early-exit if( i>nNot && left.nData==0 ). */ + aTerm = pQuery->pTerms; + for(i = 0; i<pQuery->nTerms; i=iNext){ + if( aTerm[i].isNot ){ + /* Handle all NOT terms in a separate pass */ + nNot++; + iNext = i + aTerm[i].nPhrase+1; + continue; + } + iNext = i + aTerm[i].nPhrase + 1; + rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &right); + if( rc ){ + if( i!=nNot ) dataBufferDestroy(&left); + queryClear(pQuery); + return rc; + } + while( iNext<pQuery->nTerms && aTerm[iNext].isOr ){ + rc = docListOfTerm(v, aTerm[iNext].iColumn, &aTerm[iNext], &or); + iNext += aTerm[iNext].nPhrase + 1; + if( rc ){ + if( i!=nNot ) dataBufferDestroy(&left); + dataBufferDestroy(&right); + queryClear(pQuery); + return rc; + } + dataBufferInit(&new, 0); + docListOrMerge(right.pData, right.nData, or.pData, or.nData, &new); + dataBufferDestroy(&right); + dataBufferDestroy(&or); + right = new; + } + if( i==nNot ){ /* first term processed. */ + left = right; + }else{ + dataBufferInit(&new, 0); + docListAndMerge(left.pData, left.nData, right.pData, right.nData, &new); + dataBufferDestroy(&right); + dataBufferDestroy(&left); + left = new; + } + } + + if( nNot==pQuery->nTerms ){ + /* We do not yet know how to handle a query of only NOT terms */ + return SQLITE_ERROR; + } + + /* Do the EXCEPT terms */ + for(i=0; i<pQuery->nTerms; i += aTerm[i].nPhrase + 1){ + if( !aTerm[i].isNot ) continue; + rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &right); + if( rc ){ + queryClear(pQuery); + dataBufferDestroy(&left); + return rc; + } + dataBufferInit(&new, 0); + docListExceptMerge(left.pData, left.nData, right.pData, right.nData, &new); + dataBufferDestroy(&right); + dataBufferDestroy(&left); + left = new; + } + + *pResult = left; + return rc; +} + +/* +** This is the xFilter interface for the virtual table. See +** the virtual table xFilter method documentation for additional +** information. +** +** If idxNum==QUERY_GENERIC then do a full table scan against +** the %_content table. +** +** If idxNum==QUERY_DOCID then do a docid lookup for a single entry +** in the %_content table. +** +** If idxNum>=QUERY_FULLTEXT then use the full text index. The +** column on the left-hand side of the MATCH operator is column +** number idxNum-QUERY_FULLTEXT, 0 indexed. argv[0] is the right-hand +** side of the MATCH operator. +*/ +/* TODO(shess) Upgrade the cursor initialization and destruction to +** account for fulltextFilter() being called multiple times on the +** same cursor. The current solution is very fragile. Apply fix to +** fts3 as appropriate. +*/ +static int fulltextFilter( + sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ + int idxNum, const char *idxStr, /* Which indexing scheme to use */ + int argc, sqlite3_value **argv /* Arguments for the indexing scheme */ +){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + fulltext_vtab *v = cursor_vtab(c); + int rc; + + FTSTRACE(("FTS3 Filter %p\n",pCursor)); + + /* If the cursor has a statement that was not prepared according to + ** idxNum, clear it. I believe all calls to fulltextFilter with a + ** given cursor will have the same idxNum , but in this case it's + ** easy to be safe. + */ + if( c->pStmt && c->iCursorType!=idxNum ){ + sqlite3_finalize(c->pStmt); + c->pStmt = NULL; + } + + /* Get a fresh statement appropriate to idxNum. */ + /* TODO(shess): Add a prepared-statement cache in the vt structure. + ** The cache must handle multiple open cursors. Easier to cache the + ** statement variants at the vt to reduce malloc/realloc/free here. + ** Or we could have a StringBuffer variant which allowed stack + ** construction for small values. + */ + if( !c->pStmt ){ + StringBuffer sb; + initStringBuffer(&sb); + append(&sb, "SELECT docid, "); + appendList(&sb, v->nColumn, v->azContentColumn); + append(&sb, " FROM %_content"); + if( idxNum!=QUERY_GENERIC ) append(&sb, " WHERE docid = ?"); + rc = sql_prepare(v->db, v->zDb, v->zName, &c->pStmt, + stringBufferData(&sb)); + stringBufferDestroy(&sb); + if( rc!=SQLITE_OK ) return rc; + c->iCursorType = idxNum; + }else{ + sqlite3_reset(c->pStmt); + assert( c->iCursorType==idxNum ); + } + + switch( idxNum ){ + case QUERY_GENERIC: + break; + + case QUERY_DOCID: + rc = sqlite3_bind_int64(c->pStmt, 1, sqlite3_value_int64(argv[0])); + if( rc!=SQLITE_OK ) return rc; + break; + + default: /* full-text search */ + { + const char *zQuery = (const char *)sqlite3_value_text(argv[0]); + assert( idxNum<=QUERY_FULLTEXT+v->nColumn); + assert( argc==1 ); + queryClear(&c->q); + if( c->result.nData!=0 ){ + /* This case happens if the same cursor is used repeatedly. */ + dlrDestroy(&c->reader); + dataBufferReset(&c->result); + }else{ + dataBufferInit(&c->result, 0); + } + rc = fulltextQuery(v, idxNum-QUERY_FULLTEXT, zQuery, -1, &c->result, &c->q); + if( rc!=SQLITE_OK ) return rc; + if( c->result.nData!=0 ){ + dlrInit(&c->reader, DL_DOCIDS, c->result.pData, c->result.nData); + } + break; + } + } + + return fulltextNext(pCursor); +} + +/* This is the xEof method of the virtual table. The SQLite core +** calls this routine to find out if it has reached the end of +** a query's results set. +*/ +static int fulltextEof(sqlite3_vtab_cursor *pCursor){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + return c->eof; +} + +/* This is the xColumn method of the virtual table. The SQLite +** core calls this method during a query when it needs the value +** of a column from the virtual table. This method needs to use +** one of the sqlite3_result_*() routines to store the requested +** value back in the pContext. +*/ +static int fulltextColumn(sqlite3_vtab_cursor *pCursor, + sqlite3_context *pContext, int idxCol){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + fulltext_vtab *v = cursor_vtab(c); + + if( idxCol<v->nColumn ){ + sqlite3_value *pVal = sqlite3_column_value(c->pStmt, idxCol+1); + sqlite3_result_value(pContext, pVal); + }else if( idxCol==v->nColumn ){ + /* The extra column whose name is the same as the table. + ** Return a blob which is a pointer to the cursor + */ + sqlite3_result_blob(pContext, &c, sizeof(c), SQLITE_TRANSIENT); + }else if( idxCol==v->nColumn+1 ){ + /* The docid column, which is an alias for rowid. */ + sqlite3_value *pVal = sqlite3_column_value(c->pStmt, 0); + sqlite3_result_value(pContext, pVal); + } + return SQLITE_OK; +} + +/* This is the xRowid method. The SQLite core calls this routine to +** retrieve the rowid for the current row of the result set. fts3 +** exposes %_content.docid as the rowid for the virtual table. The +** rowid should be written to *pRowid. +*/ +static int fulltextRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ + fulltext_cursor *c = (fulltext_cursor *) pCursor; + + *pRowid = sqlite3_column_int64(c->pStmt, 0); + return SQLITE_OK; +} + +/* Add all terms in [zText] to pendingTerms table. If [iColumn] > 0, +** we also store positions and offsets in the hash table using that +** column number. +*/ +static int buildTerms(fulltext_vtab *v, sqlite_int64 iDocid, + const char *zText, int iColumn){ + sqlite3_tokenizer *pTokenizer = v->pTokenizer; + sqlite3_tokenizer_cursor *pCursor; + const char *pToken; + int nTokenBytes; + int iStartOffset, iEndOffset, iPosition; + int rc; + + rc = pTokenizer->pModule->xOpen(pTokenizer, zText, -1, &pCursor); + if( rc!=SQLITE_OK ) return rc; + + pCursor->pTokenizer = pTokenizer; + while( SQLITE_OK==(rc=pTokenizer->pModule->xNext(pCursor, + &pToken, &nTokenBytes, + &iStartOffset, &iEndOffset, + &iPosition)) ){ + DLCollector *p; + int nData; /* Size of doclist before our update. */ + + /* Positions can't be negative; we use -1 as a terminator + * internally. Token can't be NULL or empty. */ + if( iPosition<0 || pToken == NULL || nTokenBytes == 0 ){ + rc = SQLITE_ERROR; + break; + } + + p = fts3HashFind(&v->pendingTerms, pToken, nTokenBytes); + if( p==NULL ){ + nData = 0; + p = dlcNew(iDocid, DL_DEFAULT); + fts3HashInsert(&v->pendingTerms, pToken, nTokenBytes, p); + + /* Overhead for our hash table entry, the key, and the value. */ + v->nPendingData += sizeof(struct fts3HashElem)+sizeof(*p)+nTokenBytes; + }else{ + nData = p->b.nData; + if( p->dlw.iPrevDocid!=iDocid ) dlcNext(p, iDocid); + } + if( iColumn>=0 ){ + dlcAddPos(p, iColumn, iPosition, iStartOffset, iEndOffset); + } + + /* Accumulate data added by dlcNew or dlcNext, and dlcAddPos. */ + v->nPendingData += p->b.nData-nData; + } + + /* TODO(shess) Check return? Should this be able to cause errors at + ** this point? Actually, same question about sqlite3_finalize(), + ** though one could argue that failure there means that the data is + ** not durable. *ponder* + */ + pTokenizer->pModule->xClose(pCursor); + if( SQLITE_DONE == rc ) return SQLITE_OK; + return rc; +} + +/* Add doclists for all terms in [pValues] to pendingTerms table. */ +static int insertTerms(fulltext_vtab *v, sqlite_int64 iDocid, + sqlite3_value **pValues){ + int i; + for(i = 0; i < v->nColumn ; ++i){ + char *zText = (char*)sqlite3_value_text(pValues[i]); + int rc = buildTerms(v, iDocid, zText, i); + if( rc!=SQLITE_OK ) return rc; + } + return SQLITE_OK; +} + +/* Add empty doclists for all terms in the given row's content to +** pendingTerms. +*/ +static int deleteTerms(fulltext_vtab *v, sqlite_int64 iDocid){ + const char **pValues; + int i, rc; + + /* TODO(shess) Should we allow such tables at all? */ + if( DL_DEFAULT==DL_DOCIDS ) return SQLITE_ERROR; + + rc = content_select(v, iDocid, &pValues); + if( rc!=SQLITE_OK ) return rc; + + for(i = 0 ; i < v->nColumn; ++i) { + rc = buildTerms(v, iDocid, pValues[i], -1); + if( rc!=SQLITE_OK ) break; + } + + freeStringArray(v->nColumn, pValues); + return SQLITE_OK; +} + +/* TODO(shess) Refactor the code to remove this forward decl. */ +static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid); + +/* Insert a row into the %_content table; set *piDocid to be the ID of the +** new row. Add doclists for terms to pendingTerms. +*/ +static int index_insert(fulltext_vtab *v, sqlite3_value *pRequestDocid, + sqlite3_value **pValues, sqlite_int64 *piDocid){ + int rc; + + rc = content_insert(v, pRequestDocid, pValues); /* execute an SQL INSERT */ + if( rc!=SQLITE_OK ) return rc; + + /* docid column is an alias for rowid. */ + *piDocid = sqlite3_last_insert_rowid(v->db); + rc = initPendingTerms(v, *piDocid); + if( rc!=SQLITE_OK ) return rc; + + return insertTerms(v, *piDocid, pValues); +} + +/* Delete a row from the %_content table; add empty doclists for terms +** to pendingTerms. +*/ +static int index_delete(fulltext_vtab *v, sqlite_int64 iRow){ + int rc = initPendingTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = deleteTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + return content_delete(v, iRow); /* execute an SQL DELETE */ +} + +/* Update a row in the %_content table; add delete doclists to +** pendingTerms for old terms not in the new data, add insert doclists +** to pendingTerms for terms in the new data. +*/ +static int index_update(fulltext_vtab *v, sqlite_int64 iRow, + sqlite3_value **pValues){ + int rc = initPendingTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + /* Generate an empty doclist for each term that previously appeared in this + * row. */ + rc = deleteTerms(v, iRow); + if( rc!=SQLITE_OK ) return rc; + + rc = content_update(v, pValues, iRow); /* execute an SQL UPDATE */ + if( rc!=SQLITE_OK ) return rc; + + /* Now add positions for terms which appear in the updated row. */ + return insertTerms(v, iRow, pValues); +} + +/*******************************************************************/ +/* InteriorWriter is used to collect terms and block references into +** interior nodes in %_segments. See commentary at top of file for +** format. +*/ + +/* How large interior nodes can grow. */ +#define INTERIOR_MAX 2048 + +/* Minimum number of terms per interior node (except the root). This +** prevents large terms from making the tree too skinny - must be >0 +** so that the tree always makes progress. Note that the min tree +** fanout will be INTERIOR_MIN_TERMS+1. +*/ +#define INTERIOR_MIN_TERMS 7 +#if INTERIOR_MIN_TERMS<1 +# error INTERIOR_MIN_TERMS must be greater than 0. +#endif + +/* ROOT_MAX controls how much data is stored inline in the segment +** directory. +*/ +/* TODO(shess) Push ROOT_MAX down to whoever is writing things. It's +** only here so that interiorWriterRootInfo() and leafWriterRootInfo() +** can both see it, but if the caller passed it in, we wouldn't even +** need a define. +*/ +#define ROOT_MAX 1024 +#if ROOT_MAX<VARINT_MAX*2 +# error ROOT_MAX must have enough space for a header. +#endif + +/* InteriorBlock stores a linked-list of interior blocks while a lower +** layer is being constructed. +*/ +typedef struct InteriorBlock { + DataBuffer term; /* Leftmost term in block's subtree. */ + DataBuffer data; /* Accumulated data for the block. */ + struct InteriorBlock *next; +} InteriorBlock; + +static InteriorBlock *interiorBlockNew(int iHeight, sqlite_int64 iChildBlock, + const char *pTerm, int nTerm){ + InteriorBlock *block = sqlite3_malloc(sizeof(InteriorBlock)); + char c[VARINT_MAX+VARINT_MAX]; + int n; + + if( block ){ + memset(block, 0, sizeof(*block)); + dataBufferInit(&block->term, 0); + dataBufferReplace(&block->term, pTerm, nTerm); + + n = fts3PutVarint(c, iHeight); + n += fts3PutVarint(c+n, iChildBlock); + dataBufferInit(&block->data, INTERIOR_MAX); + dataBufferReplace(&block->data, c, n); + } + return block; +} + +#ifndef NDEBUG +/* Verify that the data is readable as an interior node. */ +static void interiorBlockValidate(InteriorBlock *pBlock){ + const char *pData = pBlock->data.pData; + int nData = pBlock->data.nData; + int n, iDummy; + sqlite_int64 iBlockid; + + assert( nData>0 ); + assert( pData!=0 ); + assert( pData+nData>pData ); + + /* Must lead with height of node as a varint(n), n>0 */ + n = fts3GetVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n<nData ); + pData += n; + nData -= n; + + /* Must contain iBlockid. */ + n = fts3GetVarint(pData, &iBlockid); + assert( n>0 ); + assert( n<=nData ); + pData += n; + nData -= n; + + /* Zero or more terms of positive length */ + if( nData!=0 ){ + /* First term is not delta-encoded. */ + n = fts3GetVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n+iDummy>0); + assert( n+iDummy<=nData ); + pData += n+iDummy; + nData -= n+iDummy; + + /* Following terms delta-encoded. */ + while( nData!=0 ){ + /* Length of shared prefix. */ + n = fts3GetVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>=0 ); + assert( n<nData ); + pData += n; + nData -= n; + + /* Length and data of distinct suffix. */ + n = fts3GetVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n+iDummy>0); + assert( n+iDummy<=nData ); + pData += n+iDummy; + nData -= n+iDummy; + } + } +} +#define ASSERT_VALID_INTERIOR_BLOCK(x) interiorBlockValidate(x) +#else +#define ASSERT_VALID_INTERIOR_BLOCK(x) assert( 1 ) +#endif + +typedef struct InteriorWriter { + int iHeight; /* from 0 at leaves. */ + InteriorBlock *first, *last; + struct InteriorWriter *parentWriter; + + DataBuffer term; /* Last term written to block "last". */ + sqlite_int64 iOpeningChildBlock; /* First child block in block "last". */ +#ifndef NDEBUG + sqlite_int64 iLastChildBlock; /* for consistency checks. */ +#endif +} InteriorWriter; + +/* Initialize an interior node where pTerm[nTerm] marks the leftmost +** term in the tree. iChildBlock is the leftmost child block at the +** next level down the tree. +*/ +static void interiorWriterInit(int iHeight, const char *pTerm, int nTerm, + sqlite_int64 iChildBlock, + InteriorWriter *pWriter){ + InteriorBlock *block; + assert( iHeight>0 ); + CLEAR(pWriter); + + pWriter->iHeight = iHeight; + pWriter->iOpeningChildBlock = iChildBlock; +#ifndef NDEBUG + pWriter->iLastChildBlock = iChildBlock; +#endif + block = interiorBlockNew(iHeight, iChildBlock, pTerm, nTerm); + pWriter->last = pWriter->first = block; + ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); + dataBufferInit(&pWriter->term, 0); +} + +/* Append the child node rooted at iChildBlock to the interior node, +** with pTerm[nTerm] as the leftmost term in iChildBlock's subtree. +*/ +static void interiorWriterAppend(InteriorWriter *pWriter, + const char *pTerm, int nTerm, + sqlite_int64 iChildBlock){ + char c[VARINT_MAX+VARINT_MAX]; + int n, nPrefix = 0; + + ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); + + /* The first term written into an interior node is actually + ** associated with the second child added (the first child was added + ** in interiorWriterInit, or in the if clause at the bottom of this + ** function). That term gets encoded straight up, with nPrefix left + ** at 0. + */ + if( pWriter->term.nData==0 ){ + n = fts3PutVarint(c, nTerm); + }else{ + while( nPrefix<pWriter->term.nData && + pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ + nPrefix++; + } + + n = fts3PutVarint(c, nPrefix); + n += fts3PutVarint(c+n, nTerm-nPrefix); + } + +#ifndef NDEBUG + pWriter->iLastChildBlock++; +#endif + assert( pWriter->iLastChildBlock==iChildBlock ); + + /* Overflow to a new block if the new term makes the current block + ** too big, and the current block already has enough terms. + */ + if( pWriter->last->data.nData+n+nTerm-nPrefix>INTERIOR_MAX && + iChildBlock-pWriter->iOpeningChildBlock>INTERIOR_MIN_TERMS ){ + pWriter->last->next = interiorBlockNew(pWriter->iHeight, iChildBlock, + pTerm, nTerm); + pWriter->last = pWriter->last->next; + pWriter->iOpeningChildBlock = iChildBlock; + dataBufferReset(&pWriter->term); + }else{ + dataBufferAppend2(&pWriter->last->data, c, n, + pTerm+nPrefix, nTerm-nPrefix); + dataBufferReplace(&pWriter->term, pTerm, nTerm); + } + ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); +} + +/* Free the space used by pWriter, including the linked-list of +** InteriorBlocks, and parentWriter, if present. +*/ +static int interiorWriterDestroy(InteriorWriter *pWriter){ + InteriorBlock *block = pWriter->first; + + while( block!=NULL ){ + InteriorBlock *b = block; + block = block->next; + dataBufferDestroy(&b->term); + dataBufferDestroy(&b->data); + sqlite3_free(b); + } + if( pWriter->parentWriter!=NULL ){ + interiorWriterDestroy(pWriter->parentWriter); + sqlite3_free(pWriter->parentWriter); + } + dataBufferDestroy(&pWriter->term); + SCRAMBLE(pWriter); + return SQLITE_OK; +} + +/* If pWriter can fit entirely in ROOT_MAX, return it as the root info +** directly, leaving *piEndBlockid unchanged. Otherwise, flush +** pWriter to %_segments, building a new layer of interior nodes, and +** recursively ask for their root into. +*/ +static int interiorWriterRootInfo(fulltext_vtab *v, InteriorWriter *pWriter, + char **ppRootInfo, int *pnRootInfo, + sqlite_int64 *piEndBlockid){ + InteriorBlock *block = pWriter->first; + sqlite_int64 iBlockid = 0; + int rc; + + /* If we can fit the segment inline */ + if( block==pWriter->last && block->data.nData<ROOT_MAX ){ + *ppRootInfo = block->data.pData; + *pnRootInfo = block->data.nData; + return SQLITE_OK; + } + + /* Flush the first block to %_segments, and create a new level of + ** interior node. + */ + ASSERT_VALID_INTERIOR_BLOCK(block); + rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); + if( rc!=SQLITE_OK ) return rc; + *piEndBlockid = iBlockid; + + pWriter->parentWriter = sqlite3_malloc(sizeof(*pWriter->parentWriter)); + interiorWriterInit(pWriter->iHeight+1, + block->term.pData, block->term.nData, + iBlockid, pWriter->parentWriter); + + /* Flush additional blocks and append to the higher interior + ** node. + */ + for(block=block->next; block!=NULL; block=block->next){ + ASSERT_VALID_INTERIOR_BLOCK(block); + rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); + if( rc!=SQLITE_OK ) return rc; + *piEndBlockid = iBlockid; + + interiorWriterAppend(pWriter->parentWriter, + block->term.pData, block->term.nData, iBlockid); + } + + /* Parent node gets the chance to be the root. */ + return interiorWriterRootInfo(v, pWriter->parentWriter, + ppRootInfo, pnRootInfo, piEndBlockid); +} + +/****************************************************************/ +/* InteriorReader is used to read off the data from an interior node +** (see comment at top of file for the format). +*/ +typedef struct InteriorReader { + const char *pData; + int nData; + + DataBuffer term; /* previous term, for decoding term delta. */ + + sqlite_int64 iBlockid; +} InteriorReader; + +static void interiorReaderDestroy(InteriorReader *pReader){ + dataBufferDestroy(&pReader->term); + SCRAMBLE(pReader); +} + +/* TODO(shess) The assertions are great, but what if we're in NDEBUG +** and the blob is empty or otherwise contains suspect data? +*/ +static void interiorReaderInit(const char *pData, int nData, + InteriorReader *pReader){ + int n, nTerm; + + /* Require at least the leading flag byte */ + assert( nData>0 ); + assert( pData[0]!='\0' ); + + CLEAR(pReader); + + /* Decode the base blockid, and set the cursor to the first term. */ + n = fts3GetVarint(pData+1, &pReader->iBlockid); + assert( 1+n<=nData ); + pReader->pData = pData+1+n; + pReader->nData = nData-(1+n); + + /* A single-child interior node (such as when a leaf node was too + ** large for the segment directory) won't have any terms. + ** Otherwise, decode the first term. + */ + if( pReader->nData==0 ){ + dataBufferInit(&pReader->term, 0); + }else{ + n = fts3GetVarint32(pReader->pData, &nTerm); + dataBufferInit(&pReader->term, nTerm); + dataBufferReplace(&pReader->term, pReader->pData+n, nTerm); + assert( n+nTerm<=pReader->nData ); + pReader->pData += n+nTerm; + pReader->nData -= n+nTerm; + } +} + +static int interiorReaderAtEnd(InteriorReader *pReader){ + return pReader->term.nData==0; +} + +static sqlite_int64 interiorReaderCurrentBlockid(InteriorReader *pReader){ + return pReader->iBlockid; +} + +static int interiorReaderTermBytes(InteriorReader *pReader){ + assert( !interiorReaderAtEnd(pReader) ); + return pReader->term.nData; +} +static const char *interiorReaderTerm(InteriorReader *pReader){ + assert( !interiorReaderAtEnd(pReader) ); + return pReader->term.pData; +} + +/* Step forward to the next term in the node. */ +static void interiorReaderStep(InteriorReader *pReader){ + assert( !interiorReaderAtEnd(pReader) ); + + /* If the last term has been read, signal eof, else construct the + ** next term. + */ + if( pReader->nData==0 ){ + dataBufferReset(&pReader->term); + }else{ + int n, nPrefix, nSuffix; + + n = fts3GetVarint32(pReader->pData, &nPrefix); + n += fts3GetVarint32(pReader->pData+n, &nSuffix); + + /* Truncate the current term and append suffix data. */ + pReader->term.nData = nPrefix; + dataBufferAppend(&pReader->term, pReader->pData+n, nSuffix); + + assert( n+nSuffix<=pReader->nData ); + pReader->pData += n+nSuffix; + pReader->nData -= n+nSuffix; + } + pReader->iBlockid++; +} + +/* Compare the current term to pTerm[nTerm], returning strcmp-style +** results. If isPrefix, equality means equal through nTerm bytes. +*/ +static int interiorReaderTermCmp(InteriorReader *pReader, + const char *pTerm, int nTerm, int isPrefix){ + const char *pReaderTerm = interiorReaderTerm(pReader); + int nReaderTerm = interiorReaderTermBytes(pReader); + int c, n = nReaderTerm<nTerm ? nReaderTerm : nTerm; + + if( n==0 ){ + if( nReaderTerm>0 ) return -1; + if( nTerm>0 ) return 1; + return 0; + } + + c = memcmp(pReaderTerm, pTerm, n); + if( c!=0 ) return c; + if( isPrefix && n==nTerm ) return 0; + return nReaderTerm - nTerm; +} + +/****************************************************************/ +/* LeafWriter is used to collect terms and associated doclist data +** into leaf blocks in %_segments (see top of file for format info). +** Expected usage is: +** +** LeafWriter writer; +** leafWriterInit(0, 0, &writer); +** while( sorted_terms_left_to_process ){ +** // data is doclist data for that term. +** rc = leafWriterStep(v, &writer, pTerm, nTerm, pData, nData); +** if( rc!=SQLITE_OK ) goto err; +** } +** rc = leafWriterFinalize(v, &writer); +**err: +** leafWriterDestroy(&writer); +** return rc; +** +** leafWriterStep() may write a collected leaf out to %_segments. +** leafWriterFinalize() finishes writing any buffered data and stores +** a root node in %_segdir. leafWriterDestroy() frees all buffers and +** InteriorWriters allocated as part of writing this segment. +** +** TODO(shess) Document leafWriterStepMerge(). +*/ + +/* Put terms with data this big in their own block. */ +#define STANDALONE_MIN 1024 + +/* Keep leaf blocks below this size. */ +#define LEAF_MAX 2048 + +typedef struct LeafWriter { + int iLevel; + int idx; + sqlite_int64 iStartBlockid; /* needed to create the root info */ + sqlite_int64 iEndBlockid; /* when we're done writing. */ + + DataBuffer term; /* previous encoded term */ + DataBuffer data; /* encoding buffer */ + + /* bytes of first term in the current node which distinguishes that + ** term from the last term of the previous node. + */ + int nTermDistinct; + + InteriorWriter parentWriter; /* if we overflow */ + int has_parent; +} LeafWriter; + +static void leafWriterInit(int iLevel, int idx, LeafWriter *pWriter){ + CLEAR(pWriter); + pWriter->iLevel = iLevel; + pWriter->idx = idx; + + dataBufferInit(&pWriter->term, 32); + + /* Start out with a reasonably sized block, though it can grow. */ + dataBufferInit(&pWriter->data, LEAF_MAX); +} + +#ifndef NDEBUG +/* Verify that the data is readable as a leaf node. */ +static void leafNodeValidate(const char *pData, int nData){ + int n, iDummy; + + if( nData==0 ) return; + assert( nData>0 ); + assert( pData!=0 ); + assert( pData+nData>pData ); + + /* Must lead with a varint(0) */ + n = fts3GetVarint32(pData, &iDummy); + assert( iDummy==0 ); + assert( n>0 ); + assert( n<nData ); + pData += n; + nData -= n; + + /* Leading term length and data must fit in buffer. */ + n = fts3GetVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy<nData ); + pData += n+iDummy; + nData -= n+iDummy; + + /* Leading term's doclist length and data must fit. */ + n = fts3GetVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy<=nData ); + ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); + pData += n+iDummy; + nData -= n+iDummy; + + /* Verify that trailing terms and doclists also are readable. */ + while( nData!=0 ){ + n = fts3GetVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>=0 ); + assert( n<nData ); + pData += n; + nData -= n; + n = fts3GetVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy<nData ); + pData += n+iDummy; + nData -= n+iDummy; + + n = fts3GetVarint32(pData, &iDummy); + assert( n>0 ); + assert( iDummy>0 ); + assert( n+iDummy>0 ); + assert( n+iDummy<=nData ); + ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); + pData += n+iDummy; + nData -= n+iDummy; + } +} +#define ASSERT_VALID_LEAF_NODE(p, n) leafNodeValidate(p, n) +#else +#define ASSERT_VALID_LEAF_NODE(p, n) assert( 1 ) +#endif + +/* Flush the current leaf node to %_segments, and adding the resulting +** blockid and the starting term to the interior node which will +** contain it. +*/ +static int leafWriterInternalFlush(fulltext_vtab *v, LeafWriter *pWriter, + int iData, int nData){ + sqlite_int64 iBlockid = 0; + const char *pStartingTerm; + int nStartingTerm, rc, n; + + /* Must have the leading varint(0) flag, plus at least some + ** valid-looking data. + */ + assert( nData>2 ); + assert( iData>=0 ); + assert( iData+nData<=pWriter->data.nData ); + ASSERT_VALID_LEAF_NODE(pWriter->data.pData+iData, nData); + + rc = block_insert(v, pWriter->data.pData+iData, nData, &iBlockid); + if( rc!=SQLITE_OK ) return rc; + assert( iBlockid!=0 ); + + /* Reconstruct the first term in the leaf for purposes of building + ** the interior node. + */ + n = fts3GetVarint32(pWriter->data.pData+iData+1, &nStartingTerm); + pStartingTerm = pWriter->data.pData+iData+1+n; + assert( pWriter->data.nData>iData+1+n+nStartingTerm ); + assert( pWriter->nTermDistinct>0 ); + assert( pWriter->nTermDistinct<=nStartingTerm ); + nStartingTerm = pWriter->nTermDistinct; + + if( pWriter->has_parent ){ + interiorWriterAppend(&pWriter->parentWriter, + pStartingTerm, nStartingTerm, iBlockid); + }else{ + interiorWriterInit(1, pStartingTerm, nStartingTerm, iBlockid, + &pWriter->parentWriter); + pWriter->has_parent = 1; + } + + /* Track the span of this segment's leaf nodes. */ + if( pWriter->iEndBlockid==0 ){ + pWriter->iEndBlockid = pWriter->iStartBlockid = iBlockid; + }else{ + pWriter->iEndBlockid++; + assert( iBlockid==pWriter->iEndBlockid ); + } + + return SQLITE_OK; +} +static int leafWriterFlush(fulltext_vtab *v, LeafWriter *pWriter){ + int rc = leafWriterInternalFlush(v, pWriter, 0, pWriter->data.nData); + if( rc!=SQLITE_OK ) return rc; + + /* Re-initialize the output buffer. */ + dataBufferReset(&pWriter->data); + + return SQLITE_OK; +} + +/* Fetch the root info for the segment. If the entire leaf fits +** within ROOT_MAX, then it will be returned directly, otherwise it +** will be flushed and the root info will be returned from the +** interior node. *piEndBlockid is set to the blockid of the last +** interior or leaf node written to disk (0 if none are written at +** all). +*/ +static int leafWriterRootInfo(fulltext_vtab *v, LeafWriter *pWriter, + char **ppRootInfo, int *pnRootInfo, + sqlite_int64 *piEndBlockid){ + /* we can fit the segment entirely inline */ + if( !pWriter->has_parent && pWriter->data.nData<ROOT_MAX ){ + *ppRootInfo = pWriter->data.pData; + *pnRootInfo = pWriter->data.nData; + *piEndBlockid = 0; + return SQLITE_OK; + } + + /* Flush remaining leaf data. */ + if( pWriter->data.nData>0 ){ + int rc = leafWriterFlush(v, pWriter); + if( rc!=SQLITE_OK ) return rc; + } + + /* We must have flushed a leaf at some point. */ + assert( pWriter->has_parent ); + + /* Tenatively set the end leaf blockid as the end blockid. If the + ** interior node can be returned inline, this will be the final + ** blockid, otherwise it will be overwritten by + ** interiorWriterRootInfo(). + */ + *piEndBlockid = pWriter->iEndBlockid; + + return interiorWriterRootInfo(v, &pWriter->parentWriter, + ppRootInfo, pnRootInfo, piEndBlockid); +} + +/* Collect the rootInfo data and store it into the segment directory. +** This has the effect of flushing the segment's leaf data to +** %_segments, and also flushing any interior nodes to %_segments. +*/ +static int leafWriterFinalize(fulltext_vtab *v, LeafWriter *pWriter){ + sqlite_int64 iEndBlockid; + char *pRootInfo; + int rc, nRootInfo; + + rc = leafWriterRootInfo(v, pWriter, &pRootInfo, &nRootInfo, &iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + /* Don't bother storing an entirely empty segment. */ + if( iEndBlockid==0 && nRootInfo==0 ) return SQLITE_OK; + + return segdir_set(v, pWriter->iLevel, pWriter->idx, + pWriter->iStartBlockid, pWriter->iEndBlockid, + iEndBlockid, pRootInfo, nRootInfo); +} + +static void leafWriterDestroy(LeafWriter *pWriter){ + if( pWriter->has_parent ) interiorWriterDestroy(&pWriter->parentWriter); + dataBufferDestroy(&pWriter->term); + dataBufferDestroy(&pWriter->data); +} + +/* Encode a term into the leafWriter, delta-encoding as appropriate. +** Returns the length of the new term which distinguishes it from the +** previous term, which can be used to set nTermDistinct when a node +** boundary is crossed. +*/ +static int leafWriterEncodeTerm(LeafWriter *pWriter, + const char *pTerm, int nTerm){ + char c[VARINT_MAX+VARINT_MAX]; + int n, nPrefix = 0; + + assert( nTerm>0 ); + while( nPrefix<pWriter->term.nData && + pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ + nPrefix++; + /* Failing this implies that the terms weren't in order. */ + assert( nPrefix<nTerm ); + } + + if( pWriter->data.nData==0 ){ + /* Encode the node header and leading term as: + ** varint(0) + ** varint(nTerm) + ** char pTerm[nTerm] + */ + n = fts3PutVarint(c, '\0'); + n += fts3PutVarint(c+n, nTerm); + dataBufferAppend2(&pWriter->data, c, n, pTerm, nTerm); + }else{ + /* Delta-encode the term as: + ** varint(nPrefix) + ** varint(nSuffix) + ** char pTermSuffix[nSuffix] + */ + n = fts3PutVarint(c, nPrefix); + n += fts3PutVarint(c+n, nTerm-nPrefix); + dataBufferAppend2(&pWriter->data, c, n, pTerm+nPrefix, nTerm-nPrefix); + } + dataBufferReplace(&pWriter->term, pTerm, nTerm); + + return nPrefix+1; +} + +/* Used to avoid a memmove when a large amount of doclist data is in +** the buffer. This constructs a node and term header before +** iDoclistData and flushes the resulting complete node using +** leafWriterInternalFlush(). +*/ +static int leafWriterInlineFlush(fulltext_vtab *v, LeafWriter *pWriter, + const char *pTerm, int nTerm, + int iDoclistData){ + char c[VARINT_MAX+VARINT_MAX]; + int iData, n = fts3PutVarint(c, 0); + n += fts3PutVarint(c+n, nTerm); + + /* There should always be room for the header. Even if pTerm shared + ** a substantial prefix with the previous term, the entire prefix + ** could be constructed from earlier data in the doclist, so there + ** should be room. + */ + assert( iDoclistData>=n+nTerm ); + + iData = iDoclistData-(n+nTerm); + memcpy(pWriter->data.pData+iData, c, n); + memcpy(pWriter->data.pData+iData+n, pTerm, nTerm); + + return leafWriterInternalFlush(v, pWriter, iData, pWriter->data.nData-iData); +} + +/* Push pTerm[nTerm] along with the doclist data to the leaf layer of +** %_segments. +*/ +static int leafWriterStepMerge(fulltext_vtab *v, LeafWriter *pWriter, + const char *pTerm, int nTerm, + DLReader *pReaders, int nReaders){ + char c[VARINT_MAX+VARINT_MAX]; + int iTermData = pWriter->data.nData, iDoclistData; + int i, nData, n, nActualData, nActual, rc, nTermDistinct; + + ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); + nTermDistinct = leafWriterEncodeTerm(pWriter, pTerm, nTerm); + + /* Remember nTermDistinct if opening a new node. */ + if( iTermData==0 ) pWriter->nTermDistinct = nTermDistinct; + + iDoclistData = pWriter->data.nData; + + /* Estimate the length of the merged doclist so we can leave space + ** to encode it. + */ + for(i=0, nData=0; i<nReaders; i++){ + nData += dlrAllDataBytes(&pReaders[i]); + } + n = fts3PutVarint(c, nData); + dataBufferAppend(&pWriter->data, c, n); + + docListMerge(&pWriter->data, pReaders, nReaders); + ASSERT_VALID_DOCLIST(DL_DEFAULT, + pWriter->data.pData+iDoclistData+n, + pWriter->data.nData-iDoclistData-n, NULL); + + /* The actual amount of doclist data at this point could be smaller + ** than the length we encoded. Additionally, the space required to + ** encode this length could be smaller. For small doclists, this is + ** not a big deal, we can just use memmove() to adjust things. + */ + nActualData = pWriter->data.nData-(iDoclistData+n); + nActual = fts3PutVarint(c, nActualData); + assert( nActualData<=nData ); + assert( nActual<=n ); + + /* If the new doclist is big enough for force a standalone leaf + ** node, we can immediately flush it inline without doing the + ** memmove(). + */ + /* TODO(shess) This test matches leafWriterStep(), which does this + ** test before it knows the cost to varint-encode the term and + ** doclist lengths. At some point, change to + ** pWriter->data.nData-iTermData>STANDALONE_MIN. + */ + if( nTerm+nActualData>STANDALONE_MIN ){ + /* Push leaf node from before this term. */ + if( iTermData>0 ){ + rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); + if( rc!=SQLITE_OK ) return rc; + + pWriter->nTermDistinct = nTermDistinct; + } + + /* Fix the encoded doclist length. */ + iDoclistData += n - nActual; + memcpy(pWriter->data.pData+iDoclistData, c, nActual); + + /* Push the standalone leaf node. */ + rc = leafWriterInlineFlush(v, pWriter, pTerm, nTerm, iDoclistData); + if( rc!=SQLITE_OK ) return rc; + + /* Leave the node empty. */ + dataBufferReset(&pWriter->data); + + return rc; + } + + /* At this point, we know that the doclist was small, so do the + ** memmove if indicated. + */ + if( nActual<n ){ + memmove(pWriter->data.pData+iDoclistData+nActual, + pWriter->data.pData+iDoclistData+n, + pWriter->data.nData-(iDoclistData+n)); + pWriter->data.nData -= n-nActual; + } + + /* Replace written length with actual length. */ + memcpy(pWriter->data.pData+iDoclistData, c, nActual); + + /* If the node is too large, break things up. */ + /* TODO(shess) This test matches leafWriterStep(), which does this + ** test before it knows the cost to varint-encode the term and + ** doclist lengths. At some point, change to + ** pWriter->data.nData>LEAF_MAX. + */ + if( iTermData+nTerm+nActualData>LEAF_MAX ){ + /* Flush out the leading data as a node */ + rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); + if( rc!=SQLITE_OK ) return rc; + + pWriter->nTermDistinct = nTermDistinct; + + /* Rebuild header using the current term */ + n = fts3PutVarint(pWriter->data.pData, 0); + n += fts3PutVarint(pWriter->data.pData+n, nTerm); + memcpy(pWriter->data.pData+n, pTerm, nTerm); + n += nTerm; + + /* There should always be room, because the previous encoding + ** included all data necessary to construct the term. + */ + assert( n<iDoclistData ); + /* So long as STANDALONE_MIN is half or less of LEAF_MAX, the + ** following memcpy() is safe (as opposed to needing a memmove). + */ + assert( 2*STANDALONE_MIN<=LEAF_MAX ); + assert( n+pWriter->data.nData-iDoclistData<iDoclistData ); + memcpy(pWriter->data.pData+n, + pWriter->data.pData+iDoclistData, + pWriter->data.nData-iDoclistData); + pWriter->data.nData -= iDoclistData-n; + } + ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); + + return SQLITE_OK; +} + +/* Push pTerm[nTerm] along with the doclist data to the leaf layer of +** %_segments. +*/ +/* TODO(shess) Revise writeZeroSegment() so that doclists are +** constructed directly in pWriter->data. +*/ +static int leafWriterStep(fulltext_vtab *v, LeafWriter *pWriter, + const char *pTerm, int nTerm, + const char *pData, int nData){ + int rc; + DLReader reader; + + dlrInit(&reader, DL_DEFAULT, pData, nData); + rc = leafWriterStepMerge(v, pWriter, pTerm, nTerm, &reader, 1); + dlrDestroy(&reader); + + return rc; +} + + +/****************************************************************/ +/* LeafReader is used to iterate over an individual leaf node. */ +typedef struct LeafReader { + DataBuffer term; /* copy of current term. */ + + const char *pData; /* data for current term. */ + int nData; +} LeafReader; + +static void leafReaderDestroy(LeafReader *pReader){ + dataBufferDestroy(&pReader->term); + SCRAMBLE(pReader); +} + +static int leafReaderAtEnd(LeafReader *pReader){ + return pReader->nData<=0; +} + +/* Access the current term. */ +static int leafReaderTermBytes(LeafReader *pReader){ + return pReader->term.nData; +} +static const char *leafReaderTerm(LeafReader *pReader){ + assert( pReader->term.nData>0 ); + return pReader->term.pData; +} + +/* Access the doclist data for the current term. */ +static int leafReaderDataBytes(LeafReader *pReader){ + int nData; + assert( pReader->term.nData>0 ); + fts3GetVarint32(pReader->pData, &nData); + return nData; +} +static const char *leafReaderData(LeafReader *pReader){ + int n, nData; + assert( pReader->term.nData>0 ); + n = fts3GetVarint32(pReader->pData, &nData); + return pReader->pData+n; +} + +static void leafReaderInit(const char *pData, int nData, + LeafReader *pReader){ + int nTerm, n; + + assert( nData>0 ); + assert( pData[0]=='\0' ); + + CLEAR(pReader); + + /* Read the first term, skipping the header byte. */ + n = fts3GetVarint32(pData+1, &nTerm); + dataBufferInit(&pReader->term, nTerm); + dataBufferReplace(&pReader->term, pData+1+n, nTerm); + + /* Position after the first term. */ + assert( 1+n+nTerm<nData ); + pReader->pData = pData+1+n+nTerm; + pReader->nData = nData-1-n-nTerm; +} + +/* Step the reader forward to the next term. */ +static void leafReaderStep(LeafReader *pReader){ + int n, nData, nPrefix, nSuffix; + assert( !leafReaderAtEnd(pReader) ); + + /* Skip previous entry's data block. */ + n = fts3GetVarint32(pReader->pData, &nData); + assert( n+nData<=pReader->nData ); + pReader->pData += n+nData; + pReader->nData -= n+nData; + + if( !leafReaderAtEnd(pReader) ){ + /* Construct the new term using a prefix from the old term plus a + ** suffix from the leaf data. + */ + n = fts3GetVarint32(pReader->pData, &nPrefix); + n += fts3GetVarint32(pReader->pData+n, &nSuffix); + assert( n+nSuffix<pReader->nData ); + pReader->term.nData = nPrefix; + dataBufferAppend(&pReader->term, pReader->pData+n, nSuffix); + + pReader->pData += n+nSuffix; + pReader->nData -= n+nSuffix; + } +} + +/* strcmp-style comparison of pReader's current term against pTerm. +** If isPrefix, equality means equal through nTerm bytes. +*/ +static int leafReaderTermCmp(LeafReader *pReader, + const char *pTerm, int nTerm, int isPrefix){ + int c, n = pReader->term.nData<nTerm ? pReader->term.nData : nTerm; + if( n==0 ){ + if( pReader->term.nData>0 ) return -1; + if(nTerm>0 ) return 1; + return 0; + } + + c = memcmp(pReader->term.pData, pTerm, n); + if( c!=0 ) return c; + if( isPrefix && n==nTerm ) return 0; + return pReader->term.nData - nTerm; +} + + +/****************************************************************/ +/* LeavesReader wraps LeafReader to allow iterating over the entire +** leaf layer of the tree. +*/ +typedef struct LeavesReader { + int idx; /* Index within the segment. */ + + sqlite3_stmt *pStmt; /* Statement we're streaming leaves from. */ + int eof; /* we've seen SQLITE_DONE from pStmt. */ + + LeafReader leafReader; /* reader for the current leaf. */ + DataBuffer rootData; /* root data for inline. */ +} LeavesReader; + +/* Access the current term. */ +static int leavesReaderTermBytes(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderTermBytes(&pReader->leafReader); +} +static const char *leavesReaderTerm(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderTerm(&pReader->leafReader); +} + +/* Access the doclist data for the current term. */ +static int leavesReaderDataBytes(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderDataBytes(&pReader->leafReader); +} +static const char *leavesReaderData(LeavesReader *pReader){ + assert( !pReader->eof ); + return leafReaderData(&pReader->leafReader); +} + +static int leavesReaderAtEnd(LeavesReader *pReader){ + return pReader->eof; +} + +/* loadSegmentLeaves() may not read all the way to SQLITE_DONE, thus +** leaving the statement handle open, which locks the table. +*/ +/* TODO(shess) This "solution" is not satisfactory. Really, there +** should be check-in function for all statement handles which +** arranges to call sqlite3_reset(). This most likely will require +** modification to control flow all over the place, though, so for now +** just punt. +** +** Note the the current system assumes that segment merges will run to +** completion, which is why this particular probably hasn't arisen in +** this case. Probably a brittle assumption. +*/ +static int leavesReaderReset(LeavesReader *pReader){ + return sqlite3_reset(pReader->pStmt); +} + +static void leavesReaderDestroy(LeavesReader *pReader){ + /* If idx is -1, that means we're using a non-cached statement + ** handle in the optimize() case, so we need to release it. + */ + if( pReader->pStmt!=NULL && pReader->idx==-1 ){ + sqlite3_finalize(pReader->pStmt); + } + leafReaderDestroy(&pReader->leafReader); + dataBufferDestroy(&pReader->rootData); + SCRAMBLE(pReader); +} + +/* Initialize pReader with the given root data (if iStartBlockid==0 +** the leaf data was entirely contained in the root), or from the +** stream of blocks between iStartBlockid and iEndBlockid, inclusive. +*/ +static int leavesReaderInit(fulltext_vtab *v, + int idx, + sqlite_int64 iStartBlockid, + sqlite_int64 iEndBlockid, + const char *pRootData, int nRootData, + LeavesReader *pReader){ + CLEAR(pReader); + pReader->idx = idx; + + dataBufferInit(&pReader->rootData, 0); + if( iStartBlockid==0 ){ + /* Entire leaf level fit in root data. */ + dataBufferReplace(&pReader->rootData, pRootData, nRootData); + leafReaderInit(pReader->rootData.pData, pReader->rootData.nData, + &pReader->leafReader); + }else{ + sqlite3_stmt *s; + int rc = sql_get_leaf_statement(v, idx, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iStartBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 2, iEndBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ){ + pReader->eof = 1; + return SQLITE_OK; + } + if( rc!=SQLITE_ROW ) return rc; + + pReader->pStmt = s; + leafReaderInit(sqlite3_column_blob(pReader->pStmt, 0), + sqlite3_column_bytes(pReader->pStmt, 0), + &pReader->leafReader); + } + return SQLITE_OK; +} + +/* Step the current leaf forward to the next term. If we reach the +** end of the current leaf, step forward to the next leaf block. +*/ +static int leavesReaderStep(fulltext_vtab *v, LeavesReader *pReader){ + assert( !leavesReaderAtEnd(pReader) ); + leafReaderStep(&pReader->leafReader); + + if( leafReaderAtEnd(&pReader->leafReader) ){ + int rc; + if( pReader->rootData.pData ){ + pReader->eof = 1; + return SQLITE_OK; + } + rc = sqlite3_step(pReader->pStmt); + if( rc!=SQLITE_ROW ){ + pReader->eof = 1; + return rc==SQLITE_DONE ? SQLITE_OK : rc; + } + leafReaderDestroy(&pReader->leafReader); + leafReaderInit(sqlite3_column_blob(pReader->pStmt, 0), + sqlite3_column_bytes(pReader->pStmt, 0), + &pReader->leafReader); + } + return SQLITE_OK; +} + +/* Order LeavesReaders by their term, ignoring idx. Readers at eof +** always sort to the end. +*/ +static int leavesReaderTermCmp(LeavesReader *lr1, LeavesReader *lr2){ + if( leavesReaderAtEnd(lr1) ){ + if( leavesReaderAtEnd(lr2) ) return 0; + return 1; + } + if( leavesReaderAtEnd(lr2) ) return -1; + + return leafReaderTermCmp(&lr1->leafReader, + leavesReaderTerm(lr2), leavesReaderTermBytes(lr2), + 0); +} + +/* Similar to leavesReaderTermCmp(), with additional ordering by idx +** so that older segments sort before newer segments. +*/ +static int leavesReaderCmp(LeavesReader *lr1, LeavesReader *lr2){ + int c = leavesReaderTermCmp(lr1, lr2); + if( c!=0 ) return c; + return lr1->idx-lr2->idx; +} + +/* Assume that pLr[1]..pLr[nLr] are sorted. Bubble pLr[0] into its +** sorted position. +*/ +static void leavesReaderReorder(LeavesReader *pLr, int nLr){ + while( nLr>1 && leavesReaderCmp(pLr, pLr+1)>0 ){ + LeavesReader tmp = pLr[0]; + pLr[0] = pLr[1]; + pLr[1] = tmp; + nLr--; + pLr++; + } +} + +/* Initializes pReaders with the segments from level iLevel, returning +** the number of segments in *piReaders. Leaves pReaders in sorted +** order. +*/ +static int leavesReadersInit(fulltext_vtab *v, int iLevel, + LeavesReader *pReaders, int *piReaders){ + sqlite3_stmt *s; + int i, rc = sql_get_statement(v, SEGDIR_SELECT_LEVEL_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int(s, 1, iLevel); + if( rc!=SQLITE_OK ) return rc; + + i = 0; + while( (rc = sqlite3_step(s))==SQLITE_ROW ){ + sqlite_int64 iStart = sqlite3_column_int64(s, 0); + sqlite_int64 iEnd = sqlite3_column_int64(s, 1); + const char *pRootData = sqlite3_column_blob(s, 2); + int nRootData = sqlite3_column_bytes(s, 2); + + assert( i<MERGE_COUNT ); + rc = leavesReaderInit(v, i, iStart, iEnd, pRootData, nRootData, + &pReaders[i]); + if( rc!=SQLITE_OK ) break; + + i++; + } + if( rc!=SQLITE_DONE ){ + while( i-->0 ){ + leavesReaderDestroy(&pReaders[i]); + } + return rc; + } + + *piReaders = i; + + /* Leave our results sorted by term, then age. */ + while( i-- ){ + leavesReaderReorder(pReaders+i, *piReaders-i); + } + return SQLITE_OK; +} + +/* Merge doclists from pReaders[nReaders] into a single doclist, which +** is written to pWriter. Assumes pReaders is ordered oldest to +** newest. +*/ +/* TODO(shess) Consider putting this inline in segmentMerge(). */ +static int leavesReadersMerge(fulltext_vtab *v, + LeavesReader *pReaders, int nReaders, + LeafWriter *pWriter){ + DLReader dlReaders[MERGE_COUNT]; + const char *pTerm = leavesReaderTerm(pReaders); + int i, nTerm = leavesReaderTermBytes(pReaders); + + assert( nReaders<=MERGE_COUNT ); + + for(i=0; i<nReaders; i++){ + dlrInit(&dlReaders[i], DL_DEFAULT, + leavesReaderData(pReaders+i), + leavesReaderDataBytes(pReaders+i)); + } + + return leafWriterStepMerge(v, pWriter, pTerm, nTerm, dlReaders, nReaders); +} + +/* Forward ref due to mutual recursion with segdirNextIndex(). */ +static int segmentMerge(fulltext_vtab *v, int iLevel); + +/* Put the next available index at iLevel into *pidx. If iLevel +** already has MERGE_COUNT segments, they are merged to a higher +** level to make room. +*/ +static int segdirNextIndex(fulltext_vtab *v, int iLevel, int *pidx){ + int rc = segdir_max_index(v, iLevel, pidx); + if( rc==SQLITE_DONE ){ /* No segments at iLevel. */ + *pidx = 0; + }else if( rc==SQLITE_ROW ){ + if( *pidx==(MERGE_COUNT-1) ){ + rc = segmentMerge(v, iLevel); + if( rc!=SQLITE_OK ) return rc; + *pidx = 0; + }else{ + (*pidx)++; + } + }else{ + return rc; + } + return SQLITE_OK; +} + +/* Merge MERGE_COUNT segments at iLevel into a new segment at +** iLevel+1. If iLevel+1 is already full of segments, those will be +** merged to make room. +*/ +static int segmentMerge(fulltext_vtab *v, int iLevel){ + LeafWriter writer; + LeavesReader lrs[MERGE_COUNT]; + int i, rc, idx = 0; + + /* Determine the next available segment index at the next level, + ** merging as necessary. + */ + rc = segdirNextIndex(v, iLevel+1, &idx); + if( rc!=SQLITE_OK ) return rc; + + /* TODO(shess) This assumes that we'll always see exactly + ** MERGE_COUNT segments to merge at a given level. That will be + ** broken if we allow the developer to request preemptive or + ** deferred merging. + */ + memset(&lrs, '\0', sizeof(lrs)); + rc = leavesReadersInit(v, iLevel, lrs, &i); + if( rc!=SQLITE_OK ) return rc; + assert( i==MERGE_COUNT ); + + leafWriterInit(iLevel+1, idx, &writer); + + /* Since leavesReaderReorder() pushes readers at eof to the end, + ** when the first reader is empty, all will be empty. + */ + while( !leavesReaderAtEnd(lrs) ){ + /* Figure out how many readers share their next term. */ + for(i=1; i<MERGE_COUNT && !leavesReaderAtEnd(lrs+i); i++){ + if( 0!=leavesReaderTermCmp(lrs, lrs+i) ) break; + } + + rc = leavesReadersMerge(v, lrs, i, &writer); + if( rc!=SQLITE_OK ) goto err; + + /* Step forward those that were merged. */ + while( i-->0 ){ + rc = leavesReaderStep(v, lrs+i); + if( rc!=SQLITE_OK ) goto err; + + /* Reorder by term, then by age. */ + leavesReaderReorder(lrs+i, MERGE_COUNT-i); + } + } + + for(i=0; i<MERGE_COUNT; i++){ + leavesReaderDestroy(&lrs[i]); + } + + rc = leafWriterFinalize(v, &writer); + leafWriterDestroy(&writer); + if( rc!=SQLITE_OK ) return rc; + + /* Delete the merged segment data. */ + return segdir_delete(v, iLevel); + + err: + for(i=0; i<MERGE_COUNT; i++){ + leavesReaderDestroy(&lrs[i]); + } + leafWriterDestroy(&writer); + return rc; +} + +/* Accumulate the union of *acc and *pData into *acc. */ +static void docListAccumulateUnion(DataBuffer *acc, + const char *pData, int nData) { + DataBuffer tmp = *acc; + dataBufferInit(acc, tmp.nData+nData); + docListUnion(tmp.pData, tmp.nData, pData, nData, acc); + dataBufferDestroy(&tmp); +} + +/* TODO(shess) It might be interesting to explore different merge +** strategies, here. For instance, since this is a sorted merge, we +** could easily merge many doclists in parallel. With some +** comprehension of the storage format, we could merge all of the +** doclists within a leaf node directly from the leaf node's storage. +** It may be worthwhile to merge smaller doclists before larger +** doclists, since they can be traversed more quickly - but the +** results may have less overlap, making them more expensive in a +** different way. +*/ + +/* Scan pReader for pTerm/nTerm, and merge the term's doclist over +** *out (any doclists with duplicate docids overwrite those in *out). +** Internal function for loadSegmentLeaf(). +*/ +static int loadSegmentLeavesInt(fulltext_vtab *v, LeavesReader *pReader, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + /* doclist data is accumulated into pBuffers similar to how one does + ** increment in binary arithmetic. If index 0 is empty, the data is + ** stored there. If there is data there, it is merged and the + ** results carried into position 1, with further merge-and-carry + ** until an empty position is found. + */ + DataBuffer *pBuffers = NULL; + int nBuffers = 0, nMaxBuffers = 0, rc; + + assert( nTerm>0 ); + + for(rc=SQLITE_OK; rc==SQLITE_OK && !leavesReaderAtEnd(pReader); + rc=leavesReaderStep(v, pReader)){ + /* TODO(shess) Really want leavesReaderTermCmp(), but that name is + ** already taken to compare the terms of two LeavesReaders. Think + ** on a better name. [Meanwhile, break encapsulation rather than + ** use a confusing name.] + */ + int c = leafReaderTermCmp(&pReader->leafReader, pTerm, nTerm, isPrefix); + if( c>0 ) break; /* Past any possible matches. */ + if( c==0 ){ + const char *pData = leavesReaderData(pReader); + int iBuffer, nData = leavesReaderDataBytes(pReader); + + /* Find the first empty buffer. */ + for(iBuffer=0; iBuffer<nBuffers; ++iBuffer){ + if( 0==pBuffers[iBuffer].nData ) break; + } + + /* Out of buffers, add an empty one. */ + if( iBuffer==nBuffers ){ + if( nBuffers==nMaxBuffers ){ + DataBuffer *p; + nMaxBuffers += 20; + + /* Manual realloc so we can handle NULL appropriately. */ + p = sqlite3_malloc(nMaxBuffers*sizeof(*pBuffers)); + if( p==NULL ){ + rc = SQLITE_NOMEM; + break; + } + + if( nBuffers>0 ){ + assert(pBuffers!=NULL); + memcpy(p, pBuffers, nBuffers*sizeof(*pBuffers)); + sqlite3_free(pBuffers); + } + pBuffers = p; + } + dataBufferInit(&(pBuffers[nBuffers]), 0); + nBuffers++; + } + + /* At this point, must have an empty at iBuffer. */ + assert(iBuffer<nBuffers && pBuffers[iBuffer].nData==0); + + /* If empty was first buffer, no need for merge logic. */ + if( iBuffer==0 ){ + dataBufferReplace(&(pBuffers[0]), pData, nData); + }else{ + /* pAcc is the empty buffer the merged data will end up in. */ + DataBuffer *pAcc = &(pBuffers[iBuffer]); + DataBuffer *p = &(pBuffers[0]); + + /* Handle position 0 specially to avoid need to prime pAcc + ** with pData/nData. + */ + dataBufferSwap(p, pAcc); + docListAccumulateUnion(pAcc, pData, nData); + + /* Accumulate remaining doclists into pAcc. */ + for(++p; p<pAcc; ++p){ + docListAccumulateUnion(pAcc, p->pData, p->nData); + + /* dataBufferReset() could allow a large doclist to blow up + ** our memory requirements. + */ + if( p->nCapacity<1024 ){ + dataBufferReset(p); + }else{ + dataBufferDestroy(p); + dataBufferInit(p, 0); + } + } + } + } + } + + /* Union all the doclists together into *out. */ + /* TODO(shess) What if *out is big? Sigh. */ + if( rc==SQLITE_OK && nBuffers>0 ){ + int iBuffer; + for(iBuffer=0; iBuffer<nBuffers; ++iBuffer){ + if( pBuffers[iBuffer].nData>0 ){ + if( out->nData==0 ){ + dataBufferSwap(out, &(pBuffers[iBuffer])); + }else{ + docListAccumulateUnion(out, pBuffers[iBuffer].pData, + pBuffers[iBuffer].nData); + } + } + } + } + + while( nBuffers-- ){ + dataBufferDestroy(&(pBuffers[nBuffers])); + } + if( pBuffers!=NULL ) sqlite3_free(pBuffers); + + return rc; +} + +/* Call loadSegmentLeavesInt() with pData/nData as input. */ +static int loadSegmentLeaf(fulltext_vtab *v, const char *pData, int nData, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + LeavesReader reader; + int rc; + + assert( nData>1 ); + assert( *pData=='\0' ); + rc = leavesReaderInit(v, 0, 0, 0, pData, nData, &reader); + if( rc!=SQLITE_OK ) return rc; + + rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); + leavesReaderReset(&reader); + leavesReaderDestroy(&reader); + return rc; +} + +/* Call loadSegmentLeavesInt() with the leaf nodes from iStartLeaf to +** iEndLeaf (inclusive) as input, and merge the resulting doclist into +** out. +*/ +static int loadSegmentLeaves(fulltext_vtab *v, + sqlite_int64 iStartLeaf, sqlite_int64 iEndLeaf, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + int rc; + LeavesReader reader; + + assert( iStartLeaf<=iEndLeaf ); + rc = leavesReaderInit(v, 0, iStartLeaf, iEndLeaf, NULL, 0, &reader); + if( rc!=SQLITE_OK ) return rc; + + rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); + leavesReaderReset(&reader); + leavesReaderDestroy(&reader); + return rc; +} + +/* Taking pData/nData as an interior node, find the sequence of child +** nodes which could include pTerm/nTerm/isPrefix. Note that the +** interior node terms logically come between the blocks, so there is +** one more blockid than there are terms (that block contains terms >= +** the last interior-node term). +*/ +/* TODO(shess) The calling code may already know that the end child is +** not worth calculating, because the end may be in a later sibling +** node. Consider whether breaking symmetry is worthwhile. I suspect +** it is not worthwhile. +*/ +static void getChildrenContaining(const char *pData, int nData, + const char *pTerm, int nTerm, int isPrefix, + sqlite_int64 *piStartChild, + sqlite_int64 *piEndChild){ + InteriorReader reader; + + assert( nData>1 ); + assert( *pData!='\0' ); + interiorReaderInit(pData, nData, &reader); + + /* Scan for the first child which could contain pTerm/nTerm. */ + while( !interiorReaderAtEnd(&reader) ){ + if( interiorReaderTermCmp(&reader, pTerm, nTerm, 0)>0 ) break; + interiorReaderStep(&reader); + } + *piStartChild = interiorReaderCurrentBlockid(&reader); + + /* Keep scanning to find a term greater than our term, using prefix + ** comparison if indicated. If isPrefix is false, this will be the + ** same blockid as the starting block. + */ + while( !interiorReaderAtEnd(&reader) ){ + if( interiorReaderTermCmp(&reader, pTerm, nTerm, isPrefix)>0 ) break; + interiorReaderStep(&reader); + } + *piEndChild = interiorReaderCurrentBlockid(&reader); + + interiorReaderDestroy(&reader); + + /* Children must ascend, and if !prefix, both must be the same. */ + assert( *piEndChild>=*piStartChild ); + assert( isPrefix || *piStartChild==*piEndChild ); +} + +/* Read block at iBlockid and pass it with other params to +** getChildrenContaining(). +*/ +static int loadAndGetChildrenContaining( + fulltext_vtab *v, + sqlite_int64 iBlockid, + const char *pTerm, int nTerm, int isPrefix, + sqlite_int64 *piStartChild, sqlite_int64 *piEndChild +){ + sqlite3_stmt *s = NULL; + int rc; + + assert( iBlockid!=0 ); + assert( pTerm!=NULL ); + assert( nTerm!=0 ); /* TODO(shess) Why not allow this? */ + assert( piStartChild!=NULL ); + assert( piEndChild!=NULL ); + + rc = sql_get_statement(v, BLOCK_SELECT_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_bind_int64(s, 1, iBlockid); + if( rc!=SQLITE_OK ) return rc; + + rc = sqlite3_step(s); + if( rc==SQLITE_DONE ) return SQLITE_ERROR; + if( rc!=SQLITE_ROW ) return rc; + + getChildrenContaining(sqlite3_column_blob(s, 0), sqlite3_column_bytes(s, 0), + pTerm, nTerm, isPrefix, piStartChild, piEndChild); + + /* We expect only one row. We must execute another sqlite3_step() + * to complete the iteration; otherwise the table will remain + * locked. */ + rc = sqlite3_step(s); + if( rc==SQLITE_ROW ) return SQLITE_ERROR; + if( rc!=SQLITE_DONE ) return rc; + + return SQLITE_OK; +} + +/* Traverse the tree represented by pData[nData] looking for +** pTerm[nTerm], placing its doclist into *out. This is internal to +** loadSegment() to make error-handling cleaner. +*/ +static int loadSegmentInt(fulltext_vtab *v, const char *pData, int nData, + sqlite_int64 iLeavesEnd, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + /* Special case where root is a leaf. */ + if( *pData=='\0' ){ + return loadSegmentLeaf(v, pData, nData, pTerm, nTerm, isPrefix, out); + }else{ + int rc; + sqlite_int64 iStartChild, iEndChild; + + /* Process pData as an interior node, then loop down the tree + ** until we find the set of leaf nodes to scan for the term. + */ + getChildrenContaining(pData, nData, pTerm, nTerm, isPrefix, + &iStartChild, &iEndChild); + while( iStartChild>iLeavesEnd ){ + sqlite_int64 iNextStart, iNextEnd; + rc = loadAndGetChildrenContaining(v, iStartChild, pTerm, nTerm, isPrefix, + &iNextStart, &iNextEnd); + if( rc!=SQLITE_OK ) return rc; + + /* If we've branched, follow the end branch, too. */ + if( iStartChild!=iEndChild ){ + sqlite_int64 iDummy; + rc = loadAndGetChildrenContaining(v, iEndChild, pTerm, nTerm, isPrefix, + &iDummy, &iNextEnd); + if( rc!=SQLITE_OK ) return rc; + } + + assert( iNextStart<=iNextEnd ); + iStartChild = iNextStart; + iEndChild = iNextEnd; + } + assert( iStartChild<=iLeavesEnd ); + assert( iEndChild<=iLeavesEnd ); + + /* Scan through the leaf segments for doclists. */ + return loadSegmentLeaves(v, iStartChild, iEndChild, + pTerm, nTerm, isPrefix, out); + } +} + +/* Call loadSegmentInt() to collect the doclist for pTerm/nTerm, then +** merge its doclist over *out (any duplicate doclists read from the +** segment rooted at pData will overwrite those in *out). +*/ +/* TODO(shess) Consider changing this to determine the depth of the +** leaves using either the first characters of interior nodes (when +** ==1, we're one level above the leaves), or the first character of +** the root (which will describe the height of the tree directly). +** Either feels somewhat tricky to me. +*/ +/* TODO(shess) The current merge is likely to be slow for large +** doclists (though it should process from newest/smallest to +** oldest/largest, so it may not be that bad). It might be useful to +** modify things to allow for N-way merging. This could either be +** within a segment, with pairwise merges across segments, or across +** all segments at once. +*/ +static int loadSegment(fulltext_vtab *v, const char *pData, int nData, + sqlite_int64 iLeavesEnd, + const char *pTerm, int nTerm, int isPrefix, + DataBuffer *out){ + DataBuffer result; + int rc; + + assert( nData>1 ); + + /* This code should never be called with buffered updates. */ + assert( v->nPendingData<0 ); + + dataBufferInit(&result, 0); + rc = loadSegmentInt(v, pData, nData, iLeavesEnd, + pTerm, nTerm, isPrefix, &result); + if( rc==SQLITE_OK && result.nData>0 ){ + if( out->nData==0 ){ + DataBuffer tmp = *out; + *out = result; + result = tmp; + }else{ + DataBuffer merged; + DLReader readers[2]; + + dlrInit(&readers[0], DL_DEFAULT, out->pData, out->nData); + dlrInit(&readers[1], DL_DEFAULT, result.pData, result.nData); + dataBufferInit(&merged, out->nData+result.nData); + docListMerge(&merged, readers, 2); + dataBufferDestroy(out); + *out = merged; + dlrDestroy(&readers[0]); + dlrDestroy(&readers[1]); + } + } + dataBufferDestroy(&result); + return rc; +} + +/* Scan the database and merge together the posting lists for the term +** into *out. +*/ +static int termSelect(fulltext_vtab *v, int iColumn, + const char *pTerm, int nTerm, int isPrefix, + DocListType iType, DataBuffer *out){ + DataBuffer doclist; + sqlite3_stmt *s; + int rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); + if( rc!=SQLITE_OK ) return rc; + + /* This code should never be called with buffered updates. */ + assert( v->nPendingData<0 ); + + dataBufferInit(&doclist, 0); + + /* Traverse the segments from oldest to newest so that newer doclist + ** elements for given docids overwrite older elements. + */ + while( (rc = sqlite3_step(s))==SQLITE_ROW ){ + const char *pData = sqlite3_column_blob(s, 2); + const int nData = sqlite3_column_bytes(s, 2); + const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); + rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, isPrefix, + &doclist); + if( rc!=SQLITE_OK ) goto err; + } + if( rc==SQLITE_DONE ){ + if( doclist.nData!=0 ){ + /* TODO(shess) The old term_select_all() code applied the column + ** restrict as we merged segments, leading to smaller buffers. + ** This is probably worthwhile to bring back, once the new storage + ** system is checked in. + */ + if( iColumn==v->nColumn) iColumn = -1; + docListTrim(DL_DEFAULT, doclist.pData, doclist.nData, + iColumn, iType, out); + } + rc = SQLITE_OK; + } + + err: + dataBufferDestroy(&doclist); + return rc; +} + +/****************************************************************/ +/* Used to hold hashtable data for sorting. */ +typedef struct TermData { + const char *pTerm; + int nTerm; + DLCollector *pCollector; +} TermData; + +/* Orders TermData elements in strcmp fashion ( <0 for less-than, 0 +** for equal, >0 for greater-than). +*/ +static int termDataCmp(const void *av, const void *bv){ + const TermData *a = (const TermData *)av; + const TermData *b = (const TermData *)bv; + int n = a->nTerm<b->nTerm ? a->nTerm : b->nTerm; + int c = memcmp(a->pTerm, b->pTerm, n); + if( c!=0 ) return c; + return a->nTerm-b->nTerm; +} + +/* Order pTerms data by term, then write a new level 0 segment using +** LeafWriter. +*/ +static int writeZeroSegment(fulltext_vtab *v, fts3Hash *pTerms){ + fts3HashElem *e; + int idx, rc, i, n; + TermData *pData; + LeafWriter writer; + DataBuffer dl; + + /* Determine the next index at level 0, merging as necessary. */ + rc = segdirNextIndex(v, 0, &idx); + if( rc!=SQLITE_OK ) return rc; + + n = fts3HashCount(pTerms); + pData = sqlite3_malloc(n*sizeof(TermData)); + + for(i = 0, e = fts3HashFirst(pTerms); e; i++, e = fts3HashNext(e)){ + assert( i<n ); + pData[i].pTerm = fts3HashKey(e); + pData[i].nTerm = fts3HashKeysize(e); + pData[i].pCollector = fts3HashData(e); + } + assert( i==n ); + + /* TODO(shess) Should we allow user-defined collation sequences, + ** here? I think we only need that once we support prefix searches. + */ + if( n>1 ) qsort(pData, n, sizeof(*pData), termDataCmp); + + /* TODO(shess) Refactor so that we can write directly to the segment + ** DataBuffer, as happens for segment merges. + */ + leafWriterInit(0, idx, &writer); + dataBufferInit(&dl, 0); + for(i=0; i<n; i++){ + dataBufferReset(&dl); + dlcAddDoclist(pData[i].pCollector, &dl); + rc = leafWriterStep(v, &writer, + pData[i].pTerm, pData[i].nTerm, dl.pData, dl.nData); + if( rc!=SQLITE_OK ) goto err; + } + rc = leafWriterFinalize(v, &writer); + + err: + dataBufferDestroy(&dl); + sqlite3_free(pData); + leafWriterDestroy(&writer); + return rc; +} + +/* If pendingTerms has data, free it. */ +static int clearPendingTerms(fulltext_vtab *v){ + if( v->nPendingData>=0 ){ + fts3HashElem *e; + for(e=fts3HashFirst(&v->pendingTerms); e; e=fts3HashNext(e)){ + dlcDelete(fts3HashData(e)); + } + fts3HashClear(&v->pendingTerms); + v->nPendingData = -1; + } + return SQLITE_OK; +} + +/* If pendingTerms has data, flush it to a level-zero segment, and +** free it. +*/ +static int flushPendingTerms(fulltext_vtab *v){ + if( v->nPendingData>=0 ){ + int rc = writeZeroSegment(v, &v->pendingTerms); + if( rc==SQLITE_OK ) clearPendingTerms(v); + return rc; + } + return SQLITE_OK; +} + +/* If pendingTerms is "too big", or docid is out of order, flush it. +** Regardless, be certain that pendingTerms is initialized for use. +*/ +static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid){ + /* TODO(shess) Explore whether partially flushing the buffer on + ** forced-flush would provide better performance. I suspect that if + ** we ordered the doclists by size and flushed the largest until the + ** buffer was half empty, that would let the less frequent terms + ** generate longer doclists. + */ + if( iDocid<=v->iPrevDocid || v->nPendingData>kPendingThreshold ){ + int rc = flushPendingTerms(v); + if( rc!=SQLITE_OK ) return rc; + } + if( v->nPendingData<0 ){ + fts3HashInit(&v->pendingTerms, FTS3_HASH_STRING, 1); + v->nPendingData = 0; + } + v->iPrevDocid = iDocid; + return SQLITE_OK; +} + +/* This function implements the xUpdate callback; it is the top-level entry + * point for inserting, deleting or updating a row in a full-text table. */ +static int fulltextUpdate(sqlite3_vtab *pVtab, int nArg, sqlite3_value **ppArg, + sqlite_int64 *pRowid){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + int rc; + + FTSTRACE(("FTS3 Update %p\n", pVtab)); + + if( nArg<2 ){ + rc = index_delete(v, sqlite3_value_int64(ppArg[0])); + if( rc==SQLITE_OK ){ + /* If we just deleted the last row in the table, clear out the + ** index data. + */ + rc = content_exists(v); + if( rc==SQLITE_ROW ){ + rc = SQLITE_OK; + }else if( rc==SQLITE_DONE ){ + /* Clear the pending terms so we don't flush a useless level-0 + ** segment when the transaction closes. + */ + rc = clearPendingTerms(v); + if( rc==SQLITE_OK ){ + rc = segdir_delete_all(v); + } + } + } + } else if( sqlite3_value_type(ppArg[0]) != SQLITE_NULL ){ + /* An update: + * ppArg[0] = old rowid + * ppArg[1] = new rowid + * ppArg[2..2+v->nColumn-1] = values + * ppArg[2+v->nColumn] = value for magic column (we ignore this) + * ppArg[2+v->nColumn+1] = value for docid + */ + sqlite_int64 rowid = sqlite3_value_int64(ppArg[0]); + if( sqlite3_value_type(ppArg[1]) != SQLITE_INTEGER || + sqlite3_value_int64(ppArg[1]) != rowid ){ + rc = SQLITE_ERROR; /* we don't allow changing the rowid */ + }else if( sqlite3_value_type(ppArg[2+v->nColumn+1]) != SQLITE_INTEGER || + sqlite3_value_int64(ppArg[2+v->nColumn+1]) != rowid ){ + rc = SQLITE_ERROR; /* we don't allow changing the docid */ + }else{ + assert( nArg==2+v->nColumn+2); + rc = index_update(v, rowid, &ppArg[2]); + } + } else { + /* An insert: + * ppArg[1] = requested rowid + * ppArg[2..2+v->nColumn-1] = values + * ppArg[2+v->nColumn] = value for magic column (we ignore this) + * ppArg[2+v->nColumn+1] = value for docid + */ + sqlite3_value *pRequestDocid = ppArg[2+v->nColumn+1]; + assert( nArg==2+v->nColumn+2); + if( SQLITE_NULL != sqlite3_value_type(pRequestDocid) && + SQLITE_NULL != sqlite3_value_type(ppArg[1]) ){ + /* TODO(shess) Consider allowing this to work if the values are + ** identical. I'm inclined to discourage that usage, though, + ** given that both rowid and docid are special columns. Better + ** would be to define one or the other as the default winner, + ** but should it be fts3-centric (docid) or SQLite-centric + ** (rowid)? + */ + rc = SQLITE_ERROR; + }else{ + if( SQLITE_NULL == sqlite3_value_type(pRequestDocid) ){ + pRequestDocid = ppArg[1]; + } + rc = index_insert(v, pRequestDocid, &ppArg[2], pRowid); + } + } + + return rc; +} + +static int fulltextSync(sqlite3_vtab *pVtab){ + FTSTRACE(("FTS3 xSync()\n")); + return flushPendingTerms((fulltext_vtab *)pVtab); +} + +static int fulltextBegin(sqlite3_vtab *pVtab){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + FTSTRACE(("FTS3 xBegin()\n")); + + /* Any buffered updates should have been cleared by the previous + ** transaction. + */ + assert( v->nPendingData<0 ); + return clearPendingTerms(v); +} + +static int fulltextCommit(sqlite3_vtab *pVtab){ + fulltext_vtab *v = (fulltext_vtab *) pVtab; + FTSTRACE(("FTS3 xCommit()\n")); + + /* Buffered updates should have been cleared by fulltextSync(). */ + assert( v->nPendingData<0 ); + return clearPendingTerms(v); +} + +static int fulltextRollback(sqlite3_vtab *pVtab){ + FTSTRACE(("FTS3 xRollback()\n")); + return clearPendingTerms((fulltext_vtab *)pVtab); +} + +/* +** Implementation of the snippet() function for FTS3 +*/ +static void snippetFunc( + sqlite3_context *pContext, + int argc, + sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc<1 ) return; + if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to html_snippet",-1); + }else{ + const char *zStart = "<b>"; + const char *zEnd = "</b>"; + const char *zEllipsis = "<b>...</b>"; + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + if( argc>=2 ){ + zStart = (const char*)sqlite3_value_text(argv[1]); + if( argc>=3 ){ + zEnd = (const char*)sqlite3_value_text(argv[2]); + if( argc>=4 ){ + zEllipsis = (const char*)sqlite3_value_text(argv[3]); + } + } + } + snippetAllOffsets(pCursor); + snippetText(pCursor, zStart, zEnd, zEllipsis); + sqlite3_result_text(pContext, pCursor->snippet.zSnippet, + pCursor->snippet.nSnippet, SQLITE_STATIC); + } +} + +/* +** Implementation of the offsets() function for FTS3 +*/ +static void snippetOffsetsFunc( + sqlite3_context *pContext, + int argc, + sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc<1 ) return; + if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to offsets",-1); + }else{ + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + snippetAllOffsets(pCursor); + snippetOffsetText(&pCursor->snippet); + sqlite3_result_text(pContext, + pCursor->snippet.zOffset, pCursor->snippet.nOffset, + SQLITE_STATIC); + } +} + +/* OptLeavesReader is nearly identical to LeavesReader, except that +** where LeavesReader is geared towards the merging of complete +** segment levels (with exactly MERGE_COUNT segments), OptLeavesReader +** is geared towards implementation of the optimize() function, and +** can merge all segments simultaneously. This version may be +** somewhat less efficient than LeavesReader because it merges into an +** accumulator rather than doing an N-way merge, but since segment +** size grows exponentially (so segment count logrithmically) this is +** probably not an immediate problem. +*/ +/* TODO(shess): Prove that assertion, or extend the merge code to +** merge tree fashion (like the prefix-searching code does). +*/ +/* TODO(shess): OptLeavesReader and LeavesReader could probably be +** merged with little or no loss of performance for LeavesReader. The +** merged code would need to handle >MERGE_COUNT segments, and would +** also need to be able to optionally optimize away deletes. +*/ +typedef struct OptLeavesReader { + /* Segment number, to order readers by age. */ + int segment; + LeavesReader reader; +} OptLeavesReader; + +static int optLeavesReaderAtEnd(OptLeavesReader *pReader){ + return leavesReaderAtEnd(&pReader->reader); +} +static int optLeavesReaderTermBytes(OptLeavesReader *pReader){ + return leavesReaderTermBytes(&pReader->reader); +} +static const char *optLeavesReaderData(OptLeavesReader *pReader){ + return leavesReaderData(&pReader->reader); +} +static int optLeavesReaderDataBytes(OptLeavesReader *pReader){ + return leavesReaderDataBytes(&pReader->reader); +} +static const char *optLeavesReaderTerm(OptLeavesReader *pReader){ + return leavesReaderTerm(&pReader->reader); +} +static int optLeavesReaderStep(fulltext_vtab *v, OptLeavesReader *pReader){ + return leavesReaderStep(v, &pReader->reader); +} +static int optLeavesReaderTermCmp(OptLeavesReader *lr1, OptLeavesReader *lr2){ + return leavesReaderTermCmp(&lr1->reader, &lr2->reader); +} +/* Order by term ascending, segment ascending (oldest to newest), with +** exhausted readers to the end. +*/ +static int optLeavesReaderCmp(OptLeavesReader *lr1, OptLeavesReader *lr2){ + int c = optLeavesReaderTermCmp(lr1, lr2); + if( c!=0 ) return c; + return lr1->segment-lr2->segment; +} +/* Bubble pLr[0] to appropriate place in pLr[1..nLr-1]. Assumes that +** pLr[1..nLr-1] is already sorted. +*/ +static void optLeavesReaderReorder(OptLeavesReader *pLr, int nLr){ + while( nLr>1 && optLeavesReaderCmp(pLr, pLr+1)>0 ){ + OptLeavesReader tmp = pLr[0]; + pLr[0] = pLr[1]; + pLr[1] = tmp; + nLr--; + pLr++; + } +} + +/* optimize() helper function. Put the readers in order and iterate +** through them, merging doclists for matching terms into pWriter. +** Returns SQLITE_OK on success, or the SQLite error code which +** prevented success. +*/ +static int optimizeInternal(fulltext_vtab *v, + OptLeavesReader *readers, int nReaders, + LeafWriter *pWriter){ + int i, rc = SQLITE_OK; + DataBuffer doclist, merged, tmp; + + /* Order the readers. */ + i = nReaders; + while( i-- > 0 ){ + optLeavesReaderReorder(&readers[i], nReaders-i); + } + + dataBufferInit(&doclist, LEAF_MAX); + dataBufferInit(&merged, LEAF_MAX); + + /* Exhausted readers bubble to the end, so when the first reader is + ** at eof, all are at eof. + */ + while( !optLeavesReaderAtEnd(&readers[0]) ){ + + /* Figure out how many readers share the next term. */ + for(i=1; i<nReaders && !optLeavesReaderAtEnd(&readers[i]); i++){ + if( 0!=optLeavesReaderTermCmp(&readers[0], &readers[i]) ) break; + } + + /* Special-case for no merge. */ + if( i==1 ){ + /* Trim deletions from the doclist. */ + dataBufferReset(&merged); + docListTrim(DL_DEFAULT, + optLeavesReaderData(&readers[0]), + optLeavesReaderDataBytes(&readers[0]), + -1, DL_DEFAULT, &merged); + }else{ + DLReader dlReaders[MERGE_COUNT]; + int iReader, nReaders; + + /* Prime the pipeline with the first reader's doclist. After + ** one pass index 0 will reference the accumulated doclist. + */ + dlrInit(&dlReaders[0], DL_DEFAULT, + optLeavesReaderData(&readers[0]), + optLeavesReaderDataBytes(&readers[0])); + iReader = 1; + + assert( iReader<i ); /* Must execute the loop at least once. */ + while( iReader<i ){ + /* Merge 16 inputs per pass. */ + for( nReaders=1; iReader<i && nReaders<MERGE_COUNT; + iReader++, nReaders++ ){ + dlrInit(&dlReaders[nReaders], DL_DEFAULT, + optLeavesReaderData(&readers[iReader]), + optLeavesReaderDataBytes(&readers[iReader])); + } + + /* Merge doclists and swap result into accumulator. */ + dataBufferReset(&merged); + docListMerge(&merged, dlReaders, nReaders); + tmp = merged; + merged = doclist; + doclist = tmp; + + while( nReaders-- > 0 ){ + dlrDestroy(&dlReaders[nReaders]); + } + + /* Accumulated doclist to reader 0 for next pass. */ + dlrInit(&dlReaders[0], DL_DEFAULT, doclist.pData, doclist.nData); + } + + /* Destroy reader that was left in the pipeline. */ + dlrDestroy(&dlReaders[0]); + + /* Trim deletions from the doclist. */ + dataBufferReset(&merged); + docListTrim(DL_DEFAULT, doclist.pData, doclist.nData, + -1, DL_DEFAULT, &merged); + } + + /* Only pass doclists with hits (skip if all hits deleted). */ + if( merged.nData>0 ){ + rc = leafWriterStep(v, pWriter, + optLeavesReaderTerm(&readers[0]), + optLeavesReaderTermBytes(&readers[0]), + merged.pData, merged.nData); + if( rc!=SQLITE_OK ) goto err; + } + + /* Step merged readers to next term and reorder. */ + while( i-- > 0 ){ + rc = optLeavesReaderStep(v, &readers[i]); + if( rc!=SQLITE_OK ) goto err; + + optLeavesReaderReorder(&readers[i], nReaders-i); + } + } + + err: + dataBufferDestroy(&doclist); + dataBufferDestroy(&merged); + return rc; +} + +/* Implement optimize() function for FTS3. optimize(t) merges all +** segments in the fts index into a single segment. 't' is the magic +** table-named column. +*/ +static void optimizeFunc(sqlite3_context *pContext, + int argc, sqlite3_value **argv){ + fulltext_cursor *pCursor; + if( argc>1 ){ + sqlite3_result_error(pContext, "excess arguments to optimize()",-1); + }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + sqlite3_result_error(pContext, "illegal first argument to optimize",-1); + }else{ + fulltext_vtab *v; + int i, rc, iMaxLevel; + OptLeavesReader *readers; + int nReaders; + LeafWriter writer; + sqlite3_stmt *s; + + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + v = cursor_vtab(pCursor); + + /* Flush any buffered updates before optimizing. */ + rc = flushPendingTerms(v); + if( rc!=SQLITE_OK ) goto err; + + rc = segdir_count(v, &nReaders, &iMaxLevel); + if( rc!=SQLITE_OK ) goto err; + if( nReaders==0 || nReaders==1 ){ + sqlite3_result_text(pContext, "Index already optimal", -1, + SQLITE_STATIC); + return; + } + + rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); + if( rc!=SQLITE_OK ) goto err; + + readers = sqlite3_malloc(nReaders*sizeof(readers[0])); + if( readers==NULL ) goto err; + + /* Note that there will already be a segment at this position + ** until we call segdir_delete() on iMaxLevel. + */ + leafWriterInit(iMaxLevel, 0, &writer); + + i = 0; + while( (rc = sqlite3_step(s))==SQLITE_ROW ){ + sqlite_int64 iStart = sqlite3_column_int64(s, 0); + sqlite_int64 iEnd = sqlite3_column_int64(s, 1); + const char *pRootData = sqlite3_column_blob(s, 2); + int nRootData = sqlite3_column_bytes(s, 2); + + assert( i<nReaders ); + rc = leavesReaderInit(v, -1, iStart, iEnd, pRootData, nRootData, + &readers[i].reader); + if( rc!=SQLITE_OK ) break; + + readers[i].segment = i; + i++; + } + + /* If we managed to succesfully read them all, optimize them. */ + if( rc==SQLITE_DONE ){ + assert( i==nReaders ); + rc = optimizeInternal(v, readers, nReaders, &writer); + } + + while( i-- > 0 ){ + leavesReaderDestroy(&readers[i].reader); + } + sqlite3_free(readers); + + /* If we've successfully gotten to here, delete the old segments + ** and flush the interior structure of the new segment. + */ + if( rc==SQLITE_OK ){ + for( i=0; i<=iMaxLevel; i++ ){ + rc = segdir_delete(v, i); + if( rc!=SQLITE_OK ) break; + } + + if( rc==SQLITE_OK ) rc = leafWriterFinalize(v, &writer); + } + + leafWriterDestroy(&writer); + + if( rc!=SQLITE_OK ) goto err; + + sqlite3_result_text(pContext, "Index optimized", -1, SQLITE_STATIC); + return; + + /* TODO(shess): Error-handling needs to be improved along the + ** lines of the dump_ functions. + */ + err: + { + char buf[512]; + sqlite3_snprintf(sizeof(buf), buf, "Error in optimize: %s", + sqlite3_errmsg(sqlite3_context_db_handle(pContext))); + sqlite3_result_error(pContext, buf, -1); + } + } +} + +#ifdef SQLITE_TEST +/* Generate an error of the form "<prefix>: <msg>". If msg is NULL, +** pull the error from the context's db handle. +*/ +static void generateError(sqlite3_context *pContext, + const char *prefix, const char *msg){ + char buf[512]; + if( msg==NULL ) msg = sqlite3_errmsg(sqlite3_context_db_handle(pContext)); + sqlite3_snprintf(sizeof(buf), buf, "%s: %s", prefix, msg); + sqlite3_result_error(pContext, buf, -1); +} + +/* Helper function to collect the set of terms in the segment into +** pTerms. The segment is defined by the leaf nodes between +** iStartBlockid and iEndBlockid, inclusive, or by the contents of +** pRootData if iStartBlockid is 0 (in which case the entire segment +** fit in a leaf). +*/ +static int collectSegmentTerms(fulltext_vtab *v, sqlite3_stmt *s, + fts3Hash *pTerms){ + const sqlite_int64 iStartBlockid = sqlite3_column_int64(s, 0); + const sqlite_int64 iEndBlockid = sqlite3_column_int64(s, 1); + const char *pRootData = sqlite3_column_blob(s, 2); + const int nRootData = sqlite3_column_bytes(s, 2); + LeavesReader reader; + int rc = leavesReaderInit(v, 0, iStartBlockid, iEndBlockid, + pRootData, nRootData, &reader); + if( rc!=SQLITE_OK ) return rc; + + while( rc==SQLITE_OK && !leavesReaderAtEnd(&reader) ){ + const char *pTerm = leavesReaderTerm(&reader); + const int nTerm = leavesReaderTermBytes(&reader); + void *oldValue = sqlite3Fts3HashFind(pTerms, pTerm, nTerm); + void *newValue = (void *)((char *)oldValue+1); + + /* From the comment before sqlite3Fts3HashInsert in fts3_hash.c, + ** the data value passed is returned in case of malloc failure. + */ + if( newValue==sqlite3Fts3HashInsert(pTerms, pTerm, nTerm, newValue) ){ + rc = SQLITE_NOMEM; + }else{ + rc = leavesReaderStep(v, &reader); + } + } + + leavesReaderDestroy(&reader); + return rc; +} + +/* Helper function to build the result string for dump_terms(). */ +static int generateTermsResult(sqlite3_context *pContext, fts3Hash *pTerms){ + int iTerm, nTerms, nResultBytes, iByte; + char *result; + TermData *pData; + fts3HashElem *e; + + /* Iterate pTerms to generate an array of terms in pData for + ** sorting. + */ + nTerms = fts3HashCount(pTerms); + assert( nTerms>0 ); + pData = sqlite3_malloc(nTerms*sizeof(TermData)); + if( pData==NULL ) return SQLITE_NOMEM; + + nResultBytes = 0; + for(iTerm = 0, e = fts3HashFirst(pTerms); e; iTerm++, e = fts3HashNext(e)){ + nResultBytes += fts3HashKeysize(e)+1; /* Term plus trailing space */ + assert( iTerm<nTerms ); + pData[iTerm].pTerm = fts3HashKey(e); + pData[iTerm].nTerm = fts3HashKeysize(e); + pData[iTerm].pCollector = fts3HashData(e); /* unused */ + } + assert( iTerm==nTerms ); + + assert( nResultBytes>0 ); /* nTerms>0, nResultsBytes must be, too. */ + result = sqlite3_malloc(nResultBytes); + if( result==NULL ){ + sqlite3_free(pData); + return SQLITE_NOMEM; + } + + if( nTerms>1 ) qsort(pData, nTerms, sizeof(*pData), termDataCmp); + + /* Read the terms in order to build the result. */ + iByte = 0; + for(iTerm=0; iTerm<nTerms; ++iTerm){ + memcpy(result+iByte, pData[iTerm].pTerm, pData[iTerm].nTerm); + iByte += pData[iTerm].nTerm; + result[iByte++] = ' '; + } + assert( iByte==nResultBytes ); + assert( result[nResultBytes-1]==' ' ); + result[nResultBytes-1] = '\0'; + + /* Passes away ownership of result. */ + sqlite3_result_text(pContext, result, nResultBytes-1, sqlite3_free); + sqlite3_free(pData); + return SQLITE_OK; +} + +/* Implements dump_terms() for use in inspecting the fts3 index from +** tests. TEXT result containing the ordered list of terms joined by +** spaces. dump_terms(t, level, idx) dumps the terms for the segment +** specified by level, idx (in %_segdir), while dump_terms(t) dumps +** all terms in the index. In both cases t is the fts table's magic +** table-named column. +*/ +static void dumpTermsFunc( + sqlite3_context *pContext, + int argc, sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc!=3 && argc!=1 ){ + generateError(pContext, "dump_terms", "incorrect arguments"); + }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + generateError(pContext, "dump_terms", "illegal first argument"); + }else{ + fulltext_vtab *v; + fts3Hash terms; + sqlite3_stmt *s = NULL; + int rc; + + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + v = cursor_vtab(pCursor); + + /* If passed only the cursor column, get all segments. Otherwise + ** get the segment described by the following two arguments. + */ + if( argc==1 ){ + rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); + }else{ + rc = sql_get_statement(v, SEGDIR_SELECT_SEGMENT_STMT, &s); + if( rc==SQLITE_OK ){ + rc = sqlite3_bind_int(s, 1, sqlite3_value_int(argv[1])); + if( rc==SQLITE_OK ){ + rc = sqlite3_bind_int(s, 2, sqlite3_value_int(argv[2])); + } + } + } + + if( rc!=SQLITE_OK ){ + generateError(pContext, "dump_terms", NULL); + return; + } + + /* Collect the terms for each segment. */ + sqlite3Fts3HashInit(&terms, FTS3_HASH_STRING, 1); + while( (rc = sqlite3_step(s))==SQLITE_ROW ){ + rc = collectSegmentTerms(v, s, &terms); + if( rc!=SQLITE_OK ) break; + } + + if( rc!=SQLITE_DONE ){ + sqlite3_reset(s); + generateError(pContext, "dump_terms", NULL); + }else{ + const int nTerms = fts3HashCount(&terms); + if( nTerms>0 ){ + rc = generateTermsResult(pContext, &terms); + if( rc==SQLITE_NOMEM ){ + generateError(pContext, "dump_terms", "out of memory"); + }else{ + assert( rc==SQLITE_OK ); + } + }else if( argc==3 ){ + /* The specific segment asked for could not be found. */ + generateError(pContext, "dump_terms", "segment not found"); + }else{ + /* No segments found. */ + /* TODO(shess): It should be impossible to reach this. This + ** case can only happen for an empty table, in which case + ** SQLite has no rows to call this function on. + */ + sqlite3_result_null(pContext); + } + } + sqlite3Fts3HashClear(&terms); + } +} + +/* Expand the DL_DEFAULT doclist in pData into a text result in +** pContext. +*/ +static void createDoclistResult(sqlite3_context *pContext, + const char *pData, int nData){ + DataBuffer dump; + DLReader dlReader; + + assert( pData!=NULL && nData>0 ); + + dataBufferInit(&dump, 0); + dlrInit(&dlReader, DL_DEFAULT, pData, nData); + for( ; !dlrAtEnd(&dlReader); dlrStep(&dlReader) ){ + char buf[256]; + PLReader plReader; + + plrInit(&plReader, &dlReader); + if( DL_DEFAULT==DL_DOCIDS || plrAtEnd(&plReader) ){ + sqlite3_snprintf(sizeof(buf), buf, "[%lld] ", dlrDocid(&dlReader)); + dataBufferAppend(&dump, buf, strlen(buf)); + }else{ + int iColumn = plrColumn(&plReader); + + sqlite3_snprintf(sizeof(buf), buf, "[%lld %d[", + dlrDocid(&dlReader), iColumn); + dataBufferAppend(&dump, buf, strlen(buf)); + + for( ; !plrAtEnd(&plReader); plrStep(&plReader) ){ + if( plrColumn(&plReader)!=iColumn ){ + iColumn = plrColumn(&plReader); + sqlite3_snprintf(sizeof(buf), buf, "] %d[", iColumn); + assert( dump.nData>0 ); + dump.nData--; /* Overwrite trailing space. */ + assert( dump.pData[dump.nData]==' '); + dataBufferAppend(&dump, buf, strlen(buf)); + } + if( DL_DEFAULT==DL_POSITIONS_OFFSETS ){ + sqlite3_snprintf(sizeof(buf), buf, "%d,%d,%d ", + plrPosition(&plReader), + plrStartOffset(&plReader), plrEndOffset(&plReader)); + }else if( DL_DEFAULT==DL_POSITIONS ){ + sqlite3_snprintf(sizeof(buf), buf, "%d ", plrPosition(&plReader)); + }else{ + assert( NULL=="Unhandled DL_DEFAULT value"); + } + dataBufferAppend(&dump, buf, strlen(buf)); + } + plrDestroy(&plReader); + + assert( dump.nData>0 ); + dump.nData--; /* Overwrite trailing space. */ + assert( dump.pData[dump.nData]==' '); + dataBufferAppend(&dump, "]] ", 3); + } + } + dlrDestroy(&dlReader); + + assert( dump.nData>0 ); + dump.nData--; /* Overwrite trailing space. */ + assert( dump.pData[dump.nData]==' '); + dump.pData[dump.nData] = '\0'; + assert( dump.nData>0 ); + + /* Passes ownership of dump's buffer to pContext. */ + sqlite3_result_text(pContext, dump.pData, dump.nData, sqlite3_free); + dump.pData = NULL; + dump.nData = dump.nCapacity = 0; +} + +/* Implements dump_doclist() for use in inspecting the fts3 index from +** tests. TEXT result containing a string representation of the +** doclist for the indicated term. dump_doclist(t, term, level, idx) +** dumps the doclist for term from the segment specified by level, idx +** (in %_segdir), while dump_doclist(t, term) dumps the logical +** doclist for the term across all segments. The per-segment doclist +** can contain deletions, while the full-index doclist will not +** (deletions are omitted). +** +** Result formats differ with the setting of DL_DEFAULTS. Examples: +** +** DL_DOCIDS: [1] [3] [7] +** DL_POSITIONS: [1 0[0 4] 1[17]] [3 1[5]] +** DL_POSITIONS_OFFSETS: [1 0[0,0,3 4,23,26] 1[17,102,105]] [3 1[5,20,23]] +** +** In each case the number after the outer '[' is the docid. In the +** latter two cases, the number before the inner '[' is the column +** associated with the values within. For DL_POSITIONS the numbers +** within are the positions, for DL_POSITIONS_OFFSETS they are the +** position, the start offset, and the end offset. +*/ +static void dumpDoclistFunc( + sqlite3_context *pContext, + int argc, sqlite3_value **argv +){ + fulltext_cursor *pCursor; + if( argc!=2 && argc!=4 ){ + generateError(pContext, "dump_doclist", "incorrect arguments"); + }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || + sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ + generateError(pContext, "dump_doclist", "illegal first argument"); + }else if( sqlite3_value_text(argv[1])==NULL || + sqlite3_value_text(argv[1])[0]=='\0' ){ + generateError(pContext, "dump_doclist", "empty second argument"); + }else{ + const char *pTerm = (const char *)sqlite3_value_text(argv[1]); + const int nTerm = strlen(pTerm); + fulltext_vtab *v; + int rc; + DataBuffer doclist; + + memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); + v = cursor_vtab(pCursor); + + dataBufferInit(&doclist, 0); + + /* termSelect() yields the same logical doclist that queries are + ** run against. + */ + if( argc==2 ){ + rc = termSelect(v, v->nColumn, pTerm, nTerm, 0, DL_DEFAULT, &doclist); + }else{ + sqlite3_stmt *s = NULL; + + /* Get our specific segment's information. */ + rc = sql_get_statement(v, SEGDIR_SELECT_SEGMENT_STMT, &s); + if( rc==SQLITE_OK ){ + rc = sqlite3_bind_int(s, 1, sqlite3_value_int(argv[2])); + if( rc==SQLITE_OK ){ + rc = sqlite3_bind_int(s, 2, sqlite3_value_int(argv[3])); + } + } + + if( rc==SQLITE_OK ){ + rc = sqlite3_step(s); + + if( rc==SQLITE_DONE ){ + dataBufferDestroy(&doclist); + generateError(pContext, "dump_doclist", "segment not found"); + return; + } + + /* Found a segment, load it into doclist. */ + if( rc==SQLITE_ROW ){ + const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); + const char *pData = sqlite3_column_blob(s, 2); + const int nData = sqlite3_column_bytes(s, 2); + + /* loadSegment() is used by termSelect() to load each + ** segment's data. + */ + rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, 0, + &doclist); + if( rc==SQLITE_OK ){ + rc = sqlite3_step(s); + + /* Should not have more than one matching segment. */ + if( rc!=SQLITE_DONE ){ + sqlite3_reset(s); + dataBufferDestroy(&doclist); + generateError(pContext, "dump_doclist", "invalid segdir"); + return; + } + rc = SQLITE_OK; + } + } + } + + sqlite3_reset(s); + } + + if( rc==SQLITE_OK ){ + if( doclist.nData>0 ){ + createDoclistResult(pContext, doclist.pData, doclist.nData); + }else{ + /* TODO(shess): This can happen if the term is not present, or + ** if all instances of the term have been deleted and this is + ** an all-index dump. It may be interesting to distinguish + ** these cases. + */ + sqlite3_result_text(pContext, "", 0, SQLITE_STATIC); + } + }else if( rc==SQLITE_NOMEM ){ + /* Handle out-of-memory cases specially because if they are + ** generated in fts3 code they may not be reflected in the db + ** handle. + */ + /* TODO(shess): Handle this more comprehensively. + ** sqlite3ErrStr() has what I need, but is internal. + */ + generateError(pContext, "dump_doclist", "out of memory"); + }else{ + generateError(pContext, "dump_doclist", NULL); + } + + dataBufferDestroy(&doclist); + } +} +#endif + +/* +** This routine implements the xFindFunction method for the FTS3 +** virtual table. +*/ +static int fulltextFindFunction( + sqlite3_vtab *pVtab, + int nArg, + const char *zName, + void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), + void **ppArg +){ + if( strcmp(zName,"snippet")==0 ){ + *pxFunc = snippetFunc; + return 1; + }else if( strcmp(zName,"offsets")==0 ){ + *pxFunc = snippetOffsetsFunc; + return 1; + }else if( strcmp(zName,"optimize")==0 ){ + *pxFunc = optimizeFunc; + return 1; +#ifdef SQLITE_TEST + /* NOTE(shess): These functions are present only for testing + ** purposes. No particular effort is made to optimize their + ** execution or how they build their results. + */ + }else if( strcmp(zName,"dump_terms")==0 ){ + /* fprintf(stderr, "Found dump_terms\n"); */ + *pxFunc = dumpTermsFunc; + return 1; + }else if( strcmp(zName,"dump_doclist")==0 ){ + /* fprintf(stderr, "Found dump_doclist\n"); */ + *pxFunc = dumpDoclistFunc; + return 1; +#endif + } + return 0; +} + +/* +** Rename an fts3 table. +*/ +static int fulltextRename( + sqlite3_vtab *pVtab, + const char *zName +){ + fulltext_vtab *p = (fulltext_vtab *)pVtab; + int rc = SQLITE_NOMEM; + char *zSql = sqlite3_mprintf( + "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';" + "ALTER TABLE %Q.'%q_segments' RENAME TO '%q_segments';" + "ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';" + , p->zDb, p->zName, zName + , p->zDb, p->zName, zName + , p->zDb, p->zName, zName + ); + if( zSql ){ + rc = sqlite3_exec(p->db, zSql, 0, 0, 0); + sqlite3_free(zSql); + } + return rc; +} + +static const sqlite3_module fts3Module = { + /* iVersion */ 0, + /* xCreate */ fulltextCreate, + /* xConnect */ fulltextConnect, + /* xBestIndex */ fulltextBestIndex, + /* xDisconnect */ fulltextDisconnect, + /* xDestroy */ fulltextDestroy, + /* xOpen */ fulltextOpen, + /* xClose */ fulltextClose, + /* xFilter */ fulltextFilter, + /* xNext */ fulltextNext, + /* xEof */ fulltextEof, + /* xColumn */ fulltextColumn, + /* xRowid */ fulltextRowid, + /* xUpdate */ fulltextUpdate, + /* xBegin */ fulltextBegin, + /* xSync */ fulltextSync, + /* xCommit */ fulltextCommit, + /* xRollback */ fulltextRollback, + /* xFindFunction */ fulltextFindFunction, + /* xRename */ fulltextRename, +}; + +static void hashDestroy(void *p){ + fts3Hash *pHash = (fts3Hash *)p; + sqlite3Fts3HashClear(pHash); + sqlite3_free(pHash); +} + +/* +** The fts3 built-in tokenizers - "simple" and "porter" - are implemented +** in files fts3_tokenizer1.c and fts3_porter.c respectively. The following +** two forward declarations are for functions declared in these files +** used to retrieve the respective implementations. +** +** Calling sqlite3Fts3SimpleTokenizerModule() sets the value pointed +** to by the argument to point a the "simple" tokenizer implementation. +** Function ...PorterTokenizerModule() sets *pModule to point to the +** porter tokenizer/stemmer implementation. +*/ +void sqlite3Fts3SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); +void sqlite3Fts3PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule); +void sqlite3Fts3IcuTokenizerModule(sqlite3_tokenizer_module const**ppModule); + +int sqlite3Fts3InitHashTable(sqlite3 *, fts3Hash *, const char *); + +/* +** Initialise the fts3 extension. If this extension is built as part +** of the sqlite library, then this function is called directly by +** SQLite. If fts3 is built as a dynamically loadable extension, this +** function is called by the sqlite3_extension_init() entry point. +*/ +int sqlite3Fts3Init(sqlite3 *db){ + int rc = SQLITE_OK; + fts3Hash *pHash = 0; + const sqlite3_tokenizer_module *pSimple = 0; + const sqlite3_tokenizer_module *pPorter = 0; + const sqlite3_tokenizer_module *pIcu = 0; + + sqlite3Fts3SimpleTokenizerModule(&pSimple); + sqlite3Fts3PorterTokenizerModule(&pPorter); +#ifdef SQLITE_ENABLE_ICU + sqlite3Fts3IcuTokenizerModule(&pIcu); +#endif + + /* Allocate and initialise the hash-table used to store tokenizers. */ + pHash = sqlite3_malloc(sizeof(fts3Hash)); + if( !pHash ){ + rc = SQLITE_NOMEM; + }else{ + sqlite3Fts3HashInit(pHash, FTS3_HASH_STRING, 1); + } + + /* Load the built-in tokenizers into the hash table */ + if( rc==SQLITE_OK ){ + if( sqlite3Fts3HashInsert(pHash, "simple", 7, (void *)pSimple) + || sqlite3Fts3HashInsert(pHash, "porter", 7, (void *)pPorter) + || (pIcu && sqlite3Fts3HashInsert(pHash, "icu", 4, (void *)pIcu)) + ){ + rc = SQLITE_NOMEM; + } + } + + /* Create the virtual table wrapper around the hash-table and overload + ** the two scalar functions. If this is successful, register the + ** module with sqlite. + */ + if( SQLITE_OK==rc + && SQLITE_OK==(rc = sqlite3Fts3InitHashTable(db, pHash, "fts3_tokenizer")) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", -1)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "optimize", -1)) +#ifdef SQLITE_TEST + && SQLITE_OK==(rc = sqlite3_overload_function(db, "dump_terms", -1)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "dump_doclist", -1)) +#endif + ){ + return sqlite3_create_module_v2( + db, "fts3", &fts3Module, (void *)pHash, hashDestroy + ); + } + + /* An error has occured. Delete the hash table and return the error code. */ + assert( rc!=SQLITE_OK ); + if( pHash ){ + sqlite3Fts3HashClear(pHash); + sqlite3_free(pHash); + } + return rc; +} + +#if !SQLITE_CORE +int sqlite3_extension_init( + sqlite3 *db, + char **pzErrMsg, + const sqlite3_api_routines *pApi +){ + SQLITE_EXTENSION_INIT2(pApi) + return sqlite3Fts3Init(db); +} +#endif + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff --git a/third_party/sqlite/ext/fts3/fts3.h b/third_party/sqlite/ext/fts3/fts3.h new file mode 100755 index 0000000..c1aa8ca --- /dev/null +++ b/third_party/sqlite/ext/fts3/fts3.h @@ -0,0 +1,26 @@ +/* +** 2006 Oct 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This header file is used by programs that want to link against the +** FTS3 library. All it does is declare the sqlite3Fts3Init() interface. +*/ +#include "sqlite3.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +int sqlite3Fts3Init(sqlite3 *db); + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ diff --git a/third_party/sqlite/ext/fts3/fts3_hash.c b/third_party/sqlite/ext/fts3/fts3_hash.c new file mode 100755 index 0000000..64bd5d0 --- /dev/null +++ b/third_party/sqlite/ext/fts3/fts3_hash.c @@ -0,0 +1,374 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the implementation of generic hash-tables used in SQLite. +** We've modified it slightly to serve as a standalone hash table +** implementation for the full-text indexing module. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +#include <assert.h> +#include <stdlib.h> +#include <string.h> + +#include "sqlite3.h" +#include "fts3_hash.h" + +/* +** Malloc and Free functions +*/ +static void *fts3HashMalloc(int n){ + void *p = sqlite3_malloc(n); + if( p ){ + memset(p, 0, n); + } + return p; +} +static void fts3HashFree(void *p){ + sqlite3_free(p); +} + +/* Turn bulk memory into a hash table object by initializing the +** fields of the Hash structure. +** +** "pNew" is a pointer to the hash table that is to be initialized. +** keyClass is one of the constants +** FTS3_HASH_BINARY or FTS3_HASH_STRING. The value of keyClass +** determines what kind of key the hash table will use. "copyKey" is +** true if the hash table should make its own private copy of keys and +** false if it should just use the supplied pointer. +*/ +void sqlite3Fts3HashInit(fts3Hash *pNew, int keyClass, int copyKey){ + assert( pNew!=0 ); + assert( keyClass>=FTS3_HASH_STRING && keyClass<=FTS3_HASH_BINARY ); + pNew->keyClass = keyClass; + pNew->copyKey = copyKey; + pNew->first = 0; + pNew->count = 0; + pNew->htsize = 0; + pNew->ht = 0; +} + +/* Remove all entries from a hash table. Reclaim all memory. +** Call this routine to delete a hash table or to reset a hash table +** to the empty state. +*/ +void sqlite3Fts3HashClear(fts3Hash *pH){ + fts3HashElem *elem; /* For looping over all elements of the table */ + + assert( pH!=0 ); + elem = pH->first; + pH->first = 0; + fts3HashFree(pH->ht); + pH->ht = 0; + pH->htsize = 0; + while( elem ){ + fts3HashElem *next_elem = elem->next; + if( pH->copyKey && elem->pKey ){ + fts3HashFree(elem->pKey); + } + fts3HashFree(elem); + elem = next_elem; + } + pH->count = 0; +} + +/* +** Hash and comparison functions when the mode is FTS3_HASH_STRING +*/ +static int fts3StrHash(const void *pKey, int nKey){ + const char *z = (const char *)pKey; + int h = 0; + if( nKey<=0 ) nKey = (int) strlen(z); + while( nKey > 0 ){ + h = (h<<3) ^ h ^ *z++; + nKey--; + } + return h & 0x7fffffff; +} +static int fts3StrCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return strncmp((const char*)pKey1,(const char*)pKey2,n1); +} + +/* +** Hash and comparison functions when the mode is FTS3_HASH_BINARY +*/ +static int fts3BinHash(const void *pKey, int nKey){ + int h = 0; + const char *z = (const char *)pKey; + while( nKey-- > 0 ){ + h = (h<<3) ^ h ^ *(z++); + } + return h & 0x7fffffff; +} +static int fts3BinCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return memcmp(pKey1,pKey2,n1); +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** The C syntax in this function definition may be unfamilar to some +** programmers, so we provide the following additional explanation: +** +** The name of the function is "ftsHashFunction". The function takes a +** single parameter "keyClass". The return value of ftsHashFunction() +** is a pointer to another function. Specifically, the return value +** of ftsHashFunction() is a pointer to a function that takes two parameters +** with types "const void*" and "int" and returns an "int". +*/ +static int (*ftsHashFunction(int keyClass))(const void*,int){ + if( keyClass==FTS3_HASH_STRING ){ + return &fts3StrHash; + }else{ + assert( keyClass==FTS3_HASH_BINARY ); + return &fts3BinHash; + } +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** For help in interpreted the obscure C code in the function definition, +** see the header comment on the previous function. +*/ +static int (*ftsCompareFunction(int keyClass))(const void*,int,const void*,int){ + if( keyClass==FTS3_HASH_STRING ){ + return &fts3StrCompare; + }else{ + assert( keyClass==FTS3_HASH_BINARY ); + return &fts3BinCompare; + } +} + +/* Link an element into the hash table +*/ +static void fts3HashInsertElement( + fts3Hash *pH, /* The complete hash table */ + struct _fts3ht *pEntry, /* The entry into which pNew is inserted */ + fts3HashElem *pNew /* The element to be inserted */ +){ + fts3HashElem *pHead; /* First element already in pEntry */ + pHead = pEntry->chain; + if( pHead ){ + pNew->next = pHead; + pNew->prev = pHead->prev; + if( pHead->prev ){ pHead->prev->next = pNew; } + else { pH->first = pNew; } + pHead->prev = pNew; + }else{ + pNew->next = pH->first; + if( pH->first ){ pH->first->prev = pNew; } + pNew->prev = 0; + pH->first = pNew; + } + pEntry->count++; + pEntry->chain = pNew; +} + + +/* Resize the hash table so that it cantains "new_size" buckets. +** "new_size" must be a power of 2. The hash table might fail +** to resize if sqliteMalloc() fails. +*/ +static void fts3Rehash(fts3Hash *pH, int new_size){ + struct _fts3ht *new_ht; /* The new hash table */ + fts3HashElem *elem, *next_elem; /* For looping over existing elements */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( (new_size & (new_size-1))==0 ); + new_ht = (struct _fts3ht *)fts3HashMalloc( new_size*sizeof(struct _fts3ht) ); + if( new_ht==0 ) return; + fts3HashFree(pH->ht); + pH->ht = new_ht; + pH->htsize = new_size; + xHash = ftsHashFunction(pH->keyClass); + for(elem=pH->first, pH->first=0; elem; elem = next_elem){ + int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1); + next_elem = elem->next; + fts3HashInsertElement(pH, &new_ht[h], elem); + } +} + +/* This function (for internal use only) locates an element in an +** hash table that matches the given key. The hash for this key has +** already been computed and is passed as the 4th parameter. +*/ +static fts3HashElem *fts3FindElementByHash( + const fts3Hash *pH, /* The pH to be searched */ + const void *pKey, /* The key we are searching for */ + int nKey, + int h /* The hash for this key. */ +){ + fts3HashElem *elem; /* Used to loop thru the element list */ + int count; /* Number of elements left to test */ + int (*xCompare)(const void*,int,const void*,int); /* comparison function */ + + if( pH->ht ){ + struct _fts3ht *pEntry = &pH->ht[h]; + elem = pEntry->chain; + count = pEntry->count; + xCompare = ftsCompareFunction(pH->keyClass); + while( count-- && elem ){ + if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){ + return elem; + } + elem = elem->next; + } + } + return 0; +} + +/* Remove a single entry from the hash table given a pointer to that +** element and a hash on the element's key. +*/ +static void fts3RemoveElementByHash( + fts3Hash *pH, /* The pH containing "elem" */ + fts3HashElem* elem, /* The element to be removed from the pH */ + int h /* Hash value for the element */ +){ + struct _fts3ht *pEntry; + if( elem->prev ){ + elem->prev->next = elem->next; + }else{ + pH->first = elem->next; + } + if( elem->next ){ + elem->next->prev = elem->prev; + } + pEntry = &pH->ht[h]; + if( pEntry->chain==elem ){ + pEntry->chain = elem->next; + } + pEntry->count--; + if( pEntry->count<=0 ){ + pEntry->chain = 0; + } + if( pH->copyKey && elem->pKey ){ + fts3HashFree(elem->pKey); + } + fts3HashFree( elem ); + pH->count--; + if( pH->count<=0 ){ + assert( pH->first==0 ); + assert( pH->count==0 ); + fts3HashClear(pH); + } +} + +/* Attempt to locate an element of the hash table pH with a key +** that matches pKey,nKey. Return the data for this element if it is +** found, or NULL if there is no match. +*/ +void *sqlite3Fts3HashFind(const fts3Hash *pH, const void *pKey, int nKey){ + int h; /* A hash on key */ + fts3HashElem *elem; /* The element that matches key */ + int (*xHash)(const void*,int); /* The hash function */ + + if( pH==0 || pH->ht==0 ) return 0; + xHash = ftsHashFunction(pH->keyClass); + assert( xHash!=0 ); + h = (*xHash)(pKey,nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + elem = fts3FindElementByHash(pH,pKey,nKey, h & (pH->htsize-1)); + return elem ? elem->data : 0; +} + +/* Insert an element into the hash table pH. The key is pKey,nKey +** and the data is "data". +** +** If no element exists with a matching key, then a new +** element is created. A copy of the key is made if the copyKey +** flag is set. NULL is returned. +** +** If another element already exists with the same key, then the +** new data replaces the old data and the old data is returned. +** The key is not copied in this instance. If a malloc fails, then +** the new data is returned and the hash table is unchanged. +** +** If the "data" parameter to this function is NULL, then the +** element corresponding to "key" is removed from the hash table. +*/ +void *sqlite3Fts3HashInsert( + fts3Hash *pH, /* The hash table to insert into */ + const void *pKey, /* The key */ + int nKey, /* Number of bytes in the key */ + void *data /* The data */ +){ + int hraw; /* Raw hash value of the key */ + int h; /* the hash of the key modulo hash table size */ + fts3HashElem *elem; /* Used to loop thru the element list */ + fts3HashElem *new_elem; /* New element added to the pH */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( pH!=0 ); + xHash = ftsHashFunction(pH->keyClass); + assert( xHash!=0 ); + hraw = (*xHash)(pKey, nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + elem = fts3FindElementByHash(pH,pKey,nKey,h); + if( elem ){ + void *old_data = elem->data; + if( data==0 ){ + fts3RemoveElementByHash(pH,elem,h); + }else{ + elem->data = data; + } + return old_data; + } + if( data==0 ) return 0; + new_elem = (fts3HashElem*)fts3HashMalloc( sizeof(fts3HashElem) ); + if( new_elem==0 ) return data; + if( pH->copyKey && pKey!=0 ){ + new_elem->pKey = fts3HashMalloc( nKey ); + if( new_elem->pKey==0 ){ + fts3HashFree(new_elem); + return data; + } + memcpy((void*)new_elem->pKey, pKey, nKey); + }else{ + new_elem->pKey = (void*)pKey; + } + new_elem->nKey = nKey; + pH->count++; + if( pH->htsize==0 ){ + fts3Rehash(pH,8); + if( pH->htsize==0 ){ + pH->count = 0; + fts3HashFree(new_elem); + return data; + } + } + if( pH->count > pH->htsize ){ + fts3Rehash(pH,pH->htsize*2); + } + assert( pH->htsize>0 ); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + fts3HashInsertElement(pH, &pH->ht[h], new_elem); + new_elem->data = data; + return 0; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff --git a/third_party/sqlite/ext/fts3/fts3_hash.h b/third_party/sqlite/ext/fts3/fts3_hash.h new file mode 100755 index 0000000..e01954e --- /dev/null +++ b/third_party/sqlite/ext/fts3/fts3_hash.h @@ -0,0 +1,110 @@ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the header file for the generic hash-table implemenation +** used in SQLite. We've modified it slightly to serve as a standalone +** hash table implementation for the full-text indexing module. +** +*/ +#ifndef _FTS3_HASH_H_ +#define _FTS3_HASH_H_ + +/* Forward declarations of structures. */ +typedef struct fts3Hash fts3Hash; +typedef struct fts3HashElem fts3HashElem; + +/* A complete hash table is an instance of the following structure. +** The internals of this structure are intended to be opaque -- client +** code should not attempt to access or modify the fields of this structure +** directly. Change this structure only by using the routines below. +** However, many of the "procedures" and "functions" for modifying and +** accessing this structure are really macros, so we can't really make +** this structure opaque. +*/ +struct fts3Hash { + char keyClass; /* HASH_INT, _POINTER, _STRING, _BINARY */ + char copyKey; /* True if copy of key made on insert */ + int count; /* Number of entries in this table */ + fts3HashElem *first; /* The first element of the array */ + int htsize; /* Number of buckets in the hash table */ + struct _fts3ht { /* the hash table */ + int count; /* Number of entries with this hash */ + fts3HashElem *chain; /* Pointer to first entry with this hash */ + } *ht; +}; + +/* Each element in the hash table is an instance of the following +** structure. All elements are stored on a single doubly-linked list. +** +** Again, this structure is intended to be opaque, but it can't really +** be opaque because it is used by macros. +*/ +struct fts3HashElem { + fts3HashElem *next, *prev; /* Next and previous elements in the table */ + void *data; /* Data associated with this element */ + void *pKey; int nKey; /* Key associated with this element */ +}; + +/* +** There are 2 different modes of operation for a hash table: +** +** FTS3_HASH_STRING pKey points to a string that is nKey bytes long +** (including the null-terminator, if any). Case +** is respected in comparisons. +** +** FTS3_HASH_BINARY pKey points to binary data nKey bytes long. +** memcmp() is used to compare keys. +** +** A copy of the key is made if the copyKey parameter to fts3HashInit is 1. +*/ +#define FTS3_HASH_STRING 1 +#define FTS3_HASH_BINARY 2 + +/* +** Access routines. To delete, insert a NULL pointer. +*/ +void sqlite3Fts3HashInit(fts3Hash*, int keytype, int copyKey); +void *sqlite3Fts3HashInsert(fts3Hash*, const void *pKey, int nKey, void *pData); +void *sqlite3Fts3HashFind(const fts3Hash*, const void *pKey, int nKey); +void sqlite3Fts3HashClear(fts3Hash*); + +/* +** Shorthand for the functions above +*/ +#define fts3HashInit sqlite3Fts3HashInit +#define fts3HashInsert sqlite3Fts3HashInsert +#define fts3HashFind sqlite3Fts3HashFind +#define fts3HashClear sqlite3Fts3HashClear + +/* +** Macros for looping over all elements of a hash table. The idiom is +** like this: +** +** fts3Hash h; +** fts3HashElem *p; +** ... +** for(p=fts3HashFirst(&h); p; p=fts3HashNext(p)){ +** SomeStructure *pData = fts3HashData(p); +** // do something with pData +** } +*/ +#define fts3HashFirst(H) ((H)->first) +#define fts3HashNext(E) ((E)->next) +#define fts3HashData(E) ((E)->data) +#define fts3HashKey(E) ((E)->pKey) +#define fts3HashKeysize(E) ((E)->nKey) + +/* +** Number of entries in a hash table +*/ +#define fts3HashCount(H) ((H)->count) + +#endif /* _FTS3_HASH_H_ */ diff --git a/third_party/sqlite/ext/fts3/fts3_icu.c b/third_party/sqlite/ext/fts3/fts3_icu.c new file mode 100755 index 0000000..e2d040c --- /dev/null +++ b/third_party/sqlite/ext/fts3/fts3_icu.c @@ -0,0 +1,258 @@ +/* +** 2007 June 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file implements a tokenizer for fts3 based on the ICU library. +** +** $Id: fts3_icu.c,v 1.2 2007/10/24 21:52:37 shess Exp $ +*/ + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) +#ifdef SQLITE_ENABLE_ICU + +#include <assert.h> +#include <string.h> +#include "fts3_tokenizer.h" + +#include <unicode/ubrk.h> +#include <unicode/ucol.h> +#include <unicode/ustring.h> +#include <unicode/utf16.h> + +typedef struct IcuTokenizer IcuTokenizer; +typedef struct IcuCursor IcuCursor; + +struct IcuTokenizer { + sqlite3_tokenizer base; + char *zLocale; +}; + +struct IcuCursor { + sqlite3_tokenizer_cursor base; + + UBreakIterator *pIter; /* ICU break-iterator object */ + int nChar; /* Number of UChar elements in pInput */ + UChar *aChar; /* Copy of input using utf-16 encoding */ + int *aOffset; /* Offsets of each character in utf-8 input */ + + int nBuffer; + char *zBuffer; + + int iToken; +}; + +/* +** Create a new tokenizer instance. +*/ +static int icuCreate( + int argc, /* Number of entries in argv[] */ + const char * const *argv, /* Tokenizer creation arguments */ + sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ +){ + IcuTokenizer *p; + int n = 0; + + if( argc>0 ){ + n = strlen(argv[0])+1; + } + p = (IcuTokenizer *)sqlite3_malloc(sizeof(IcuTokenizer)+n); + if( !p ){ + return SQLITE_NOMEM; + } + memset(p, 0, sizeof(IcuTokenizer)); + + if( n ){ + p->zLocale = (char *)&p[1]; + memcpy(p->zLocale, argv[0], n); + } + + *ppTokenizer = (sqlite3_tokenizer *)p; + + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int icuDestroy(sqlite3_tokenizer *pTokenizer){ + IcuTokenizer *p = (IcuTokenizer *)pTokenizer; + sqlite3_free(p); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is pInput[0..nBytes-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int icuOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *zInput, /* Input string */ + int nInput, /* Length of zInput in bytes */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + IcuTokenizer *p = (IcuTokenizer *)pTokenizer; + IcuCursor *pCsr; + + const int32_t opt = U_FOLD_CASE_DEFAULT; + UErrorCode status = U_ZERO_ERROR; + int nChar; + + UChar32 c; + int iInput = 0; + int iOut = 0; + + *ppCursor = 0; + + if( -1 == nInput ) nInput = strlen(nInput); + nChar = nInput+1; + pCsr = (IcuCursor *)sqlite3_malloc( + sizeof(IcuCursor) + /* IcuCursor */ + nChar * sizeof(UChar) + /* IcuCursor.aChar[] */ + (nChar+1) * sizeof(int) /* IcuCursor.aOffset[] */ + ); + if( !pCsr ){ + return SQLITE_NOMEM; + } + memset(pCsr, 0, sizeof(IcuCursor)); + pCsr->aChar = (UChar *)&pCsr[1]; + pCsr->aOffset = (int *)&pCsr->aChar[nChar]; + + pCsr->aOffset[iOut] = iInput; + U8_NEXT(zInput, iInput, nInput, c); + while( c>0 ){ + int isError = 0; + c = u_foldCase(c, opt); + U16_APPEND(pCsr->aChar, iOut, nChar, c, isError); + if( isError ){ + sqlite3_free(pCsr); + return SQLITE_ERROR; + } + pCsr->aOffset[iOut] = iInput; + + if( iInput<nInput ){ + U8_NEXT(zInput, iInput, nInput, c); + }else{ + c = 0; + } + } + + pCsr->pIter = ubrk_open(UBRK_WORD, p->zLocale, pCsr->aChar, iOut, &status); + if( !U_SUCCESS(status) ){ + sqlite3_free(pCsr); + return SQLITE_ERROR; + } + pCsr->nChar = iOut; + + ubrk_first(pCsr->pIter); + *ppCursor = (sqlite3_tokenizer_cursor *)pCsr; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to icuOpen(). +*/ +static int icuClose(sqlite3_tokenizer_cursor *pCursor){ + IcuCursor *pCsr = (IcuCursor *)pCursor; + ubrk_close(pCsr->pIter); + sqlite3_free(pCsr->zBuffer); + sqlite3_free(pCsr); + return SQLITE_OK; +} + +/* +** Extract the next token from a tokenization cursor. +*/ +static int icuNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ + const char **ppToken, /* OUT: *ppToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + IcuCursor *pCsr = (IcuCursor *)pCursor; + + int iStart = 0; + int iEnd = 0; + int nByte = 0; + + while( iStart==iEnd ){ + UChar32 c; + + iStart = ubrk_current(pCsr->pIter); + iEnd = ubrk_next(pCsr->pIter); + if( iEnd==UBRK_DONE ){ + return SQLITE_DONE; + } + + while( iStart<iEnd ){ + int iWhite = iStart; + U8_NEXT(pCsr->aChar, iWhite, pCsr->nChar, c); + if( u_isspace(c) ){ + iStart = iWhite; + }else{ + break; + } + } + assert(iStart<=iEnd); + } + + do { + UErrorCode status = U_ZERO_ERROR; + if( nByte ){ + char *zNew = sqlite3_realloc(pCsr->zBuffer, nByte); + if( !zNew ){ + return SQLITE_NOMEM; + } + pCsr->zBuffer = zNew; + pCsr->nBuffer = nByte; + } + + u_strToUTF8( + pCsr->zBuffer, pCsr->nBuffer, &nByte, /* Output vars */ + &pCsr->aChar[iStart], iEnd-iStart, /* Input vars */ + &status /* Output success/failure */ + ); + } while( nByte>pCsr->nBuffer ); + + *ppToken = pCsr->zBuffer; + *pnBytes = nByte; + *piStartOffset = pCsr->aOffset[iStart]; + *piEndOffset = pCsr->aOffset[iEnd]; + *piPosition = pCsr->iToken++; + + return SQLITE_OK; +} + +/* +** The set of routines that implement the simple tokenizer +*/ +static const sqlite3_tokenizer_module icuTokenizerModule = { + 0, /* iVersion */ + icuCreate, /* xCreate */ + icuDestroy, /* xCreate */ + icuOpen, /* xOpen */ + icuClose, /* xClose */ + icuNext, /* xNext */ +}; + +/* +** Set *ppModule to point at the implementation of the ICU tokenizer. +*/ +void sqlite3Fts3IcuTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &icuTokenizerModule; +} + +#endif /* defined(SQLITE_ENABLE_ICU) */ +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff --git a/third_party/sqlite/ext/fts3/fts3_porter.c b/third_party/sqlite/ext/fts3/fts3_porter.c new file mode 100755 index 0000000..6ff67a9 --- /dev/null +++ b/third_party/sqlite/ext/fts3/fts3_porter.c @@ -0,0 +1,642 @@ +/* +** 2006 September 30 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Implementation of the full-text-search tokenizer that implements +** a Porter stemmer. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + + +#include <assert.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <ctype.h> + +#include "fts3_tokenizer.h" + +/* +** Class derived from sqlite3_tokenizer +*/ +typedef struct porter_tokenizer { + sqlite3_tokenizer base; /* Base class */ +} porter_tokenizer; + +/* +** Class derived from sqlit3_tokenizer_cursor +*/ +typedef struct porter_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *zInput; /* input we are tokenizing */ + int nInput; /* size of the input */ + int iOffset; /* current position in zInput */ + int iToken; /* index of next token to be returned */ + char *zToken; /* storage for current token */ + int nAllocated; /* space allocated to zToken buffer */ +} porter_tokenizer_cursor; + + +/* Forward declaration */ +static const sqlite3_tokenizer_module porterTokenizerModule; + + +/* +** Create a new tokenizer instance. +*/ +static int porterCreate( + int argc, const char * const *argv, + sqlite3_tokenizer **ppTokenizer +){ + porter_tokenizer *t; + t = (porter_tokenizer *) sqlite3_malloc(sizeof(*t)); + if( t==NULL ) return SQLITE_NOMEM; + memset(t, 0, sizeof(*t)); + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int porterDestroy(sqlite3_tokenizer *pTokenizer){ + sqlite3_free(pTokenizer); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is zInput[0..nInput-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int porterOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *zInput, int nInput, /* String to be tokenized */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + porter_tokenizer_cursor *c; + + c = (porter_tokenizer_cursor *) sqlite3_malloc(sizeof(*c)); + if( c==NULL ) return SQLITE_NOMEM; + + c->zInput = zInput; + if( zInput==0 ){ + c->nInput = 0; + }else if( nInput<0 ){ + c->nInput = (int)strlen(zInput); + }else{ + c->nInput = nInput; + } + c->iOffset = 0; /* start tokenizing at the beginning */ + c->iToken = 0; + c->zToken = NULL; /* no space allocated, yet. */ + c->nAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to +** porterOpen() above. +*/ +static int porterClose(sqlite3_tokenizer_cursor *pCursor){ + porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; + sqlite3_free(c->zToken); + sqlite3_free(c); + return SQLITE_OK; +} +/* +** Vowel or consonant +*/ +static const char cType[] = { + 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 2, 1 +}; + +/* +** isConsonant() and isVowel() determine if their first character in +** the string they point to is a consonant or a vowel, according +** to Porter ruls. +** +** A consonate is any letter other than 'a', 'e', 'i', 'o', or 'u'. +** 'Y' is a consonant unless it follows another consonant, +** in which case it is a vowel. +** +** In these routine, the letters are in reverse order. So the 'y' rule +** is that 'y' is a consonant unless it is followed by another +** consonent. +*/ +static int isVowel(const char*); +static int isConsonant(const char *z){ + int j; + char x = *z; + if( x==0 ) return 0; + assert( x>='a' && x<='z' ); + j = cType[x-'a']; + if( j<2 ) return j; + return z[1]==0 || isVowel(z + 1); +} +static int isVowel(const char *z){ + int j; + char x = *z; + if( x==0 ) return 0; + assert( x>='a' && x<='z' ); + j = cType[x-'a']; + if( j<2 ) return 1-j; + return isConsonant(z + 1); +} + +/* +** Let any sequence of one or more vowels be represented by V and let +** C be sequence of one or more consonants. Then every word can be +** represented as: +** +** [C] (VC){m} [V] +** +** In prose: A word is an optional consonant followed by zero or +** vowel-consonant pairs followed by an optional vowel. "m" is the +** number of vowel consonant pairs. This routine computes the value +** of m for the first i bytes of a word. +** +** Return true if the m-value for z is 1 or more. In other words, +** return true if z contains at least one vowel that is followed +** by a consonant. +** +** In this routine z[] is in reverse order. So we are really looking +** for an instance of of a consonant followed by a vowel. +*/ +static int m_gt_0(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* Like mgt0 above except we are looking for a value of m which is +** exactly 1 +*/ +static int m_eq_1(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + if( *z==0 ) return 0; + while( isVowel(z) ){ z++; } + if( *z==0 ) return 1; + while( isConsonant(z) ){ z++; } + return *z==0; +} + +/* Like mgt0 above except we are looking for a value of m>1 instead +** or m>0 +*/ +static int m_gt_1(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + if( *z==0 ) return 0; + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* +** Return TRUE if there is a vowel anywhere within z[0..n-1] +*/ +static int hasVowel(const char *z){ + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* +** Return TRUE if the word ends in a double consonant. +** +** The text is reversed here. So we are really looking at +** the first two characters of z[]. +*/ +static int doubleConsonant(const char *z){ + return isConsonant(z) && z[0]==z[1] && isConsonant(z+1); +} + +/* +** Return TRUE if the word ends with three letters which +** are consonant-vowel-consonent and where the final consonant +** is not 'w', 'x', or 'y'. +** +** The word is reversed here. So we are really checking the +** first three letters and the first one cannot be in [wxy]. +*/ +static int star_oh(const char *z){ + return + z[0]!=0 && isConsonant(z) && + z[0]!='w' && z[0]!='x' && z[0]!='y' && + z[1]!=0 && isVowel(z+1) && + z[2]!=0 && isConsonant(z+2); +} + +/* +** If the word ends with zFrom and xCond() is true for the stem +** of the word that preceeds the zFrom ending, then change the +** ending to zTo. +** +** The input word *pz and zFrom are both in reverse order. zTo +** is in normal order. +** +** Return TRUE if zFrom matches. Return FALSE if zFrom does not +** match. Not that TRUE is returned even if xCond() fails and +** no substitution occurs. +*/ +static int stem( + char **pz, /* The word being stemmed (Reversed) */ + const char *zFrom, /* If the ending matches this... (Reversed) */ + const char *zTo, /* ... change the ending to this (not reversed) */ + int (*xCond)(const char*) /* Condition that must be true */ +){ + char *z = *pz; + while( *zFrom && *zFrom==*z ){ z++; zFrom++; } + if( *zFrom!=0 ) return 0; + if( xCond && !xCond(z) ) return 1; + while( *zTo ){ + *(--z) = *(zTo++); + } + *pz = z; + return 1; +} + +/* +** This is the fallback stemmer used when the porter stemmer is +** inappropriate. The input word is copied into the output with +** US-ASCII case folding. If the input word is too long (more +** than 20 bytes if it contains no digits or more than 6 bytes if +** it contains digits) then word is truncated to 20 or 6 bytes +** by taking 10 or 3 bytes from the beginning and end. +*/ +static void copy_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){ + int i, mx, j; + int hasDigit = 0; + for(i=0; i<nIn; i++){ + int c = zIn[i]; + if( c>='A' && c<='Z' ){ + zOut[i] = c - 'A' + 'a'; + }else{ + if( c>='0' && c<='9' ) hasDigit = 1; + zOut[i] = c; + } + } + mx = hasDigit ? 3 : 10; + if( nIn>mx*2 ){ + for(j=mx, i=nIn-mx; i<nIn; i++, j++){ + zOut[j] = zOut[i]; + } + i = j; + } + zOut[i] = 0; + *pnOut = i; +} + + +/* +** Stem the input word zIn[0..nIn-1]. Store the output in zOut. +** zOut is at least big enough to hold nIn bytes. Write the actual +** size of the output word (exclusive of the '\0' terminator) into *pnOut. +** +** Any upper-case characters in the US-ASCII character set ([A-Z]) +** are converted to lower case. Upper-case UTF characters are +** unchanged. +** +** Words that are longer than about 20 bytes are stemmed by retaining +** a few bytes from the beginning and the end of the word. If the +** word contains digits, 3 bytes are taken from the beginning and +** 3 bytes from the end. For long words without digits, 10 bytes +** are taken from each end. US-ASCII case folding still applies. +** +** If the input word contains not digits but does characters not +** in [a-zA-Z] then no stemming is attempted and this routine just +** copies the input into the input into the output with US-ASCII +** case folding. +** +** Stemming never increases the length of the word. So there is +** no chance of overflowing the zOut buffer. +*/ +static void porter_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){ + int i, j, c; + char zReverse[28]; + char *z, *z2; + if( nIn<3 || nIn>=sizeof(zReverse)-7 ){ + /* The word is too big or too small for the porter stemmer. + ** Fallback to the copy stemmer */ + copy_stemmer(zIn, nIn, zOut, pnOut); + return; + } + for(i=0, j=sizeof(zReverse)-6; i<nIn; i++, j--){ + c = zIn[i]; + if( c>='A' && c<='Z' ){ + zReverse[j] = c + 'a' - 'A'; + }else if( c>='a' && c<='z' ){ + zReverse[j] = c; + }else{ + /* The use of a character not in [a-zA-Z] means that we fallback + ** to the copy stemmer */ + copy_stemmer(zIn, nIn, zOut, pnOut); + return; + } + } + memset(&zReverse[sizeof(zReverse)-5], 0, 5); + z = &zReverse[j+1]; + + + /* Step 1a */ + if( z[0]=='s' ){ + if( + !stem(&z, "sess", "ss", 0) && + !stem(&z, "sei", "i", 0) && + !stem(&z, "ss", "ss", 0) + ){ + z++; + } + } + + /* Step 1b */ + z2 = z; + if( stem(&z, "dee", "ee", m_gt_0) ){ + /* Do nothing. The work was all in the test */ + }else if( + (stem(&z, "gni", "", hasVowel) || stem(&z, "de", "", hasVowel)) + && z!=z2 + ){ + if( stem(&z, "ta", "ate", 0) || + stem(&z, "lb", "ble", 0) || + stem(&z, "zi", "ize", 0) ){ + /* Do nothing. The work was all in the test */ + }else if( doubleConsonant(z) && (*z!='l' && *z!='s' && *z!='z') ){ + z++; + }else if( m_eq_1(z) && star_oh(z) ){ + *(--z) = 'e'; + } + } + + /* Step 1c */ + if( z[0]=='y' && hasVowel(z+1) ){ + z[0] = 'i'; + } + + /* Step 2 */ + switch( z[1] ){ + case 'a': + stem(&z, "lanoita", "ate", m_gt_0) || + stem(&z, "lanoit", "tion", m_gt_0); + break; + case 'c': + stem(&z, "icne", "ence", m_gt_0) || + stem(&z, "icna", "ance", m_gt_0); + break; + case 'e': + stem(&z, "rezi", "ize", m_gt_0); + break; + case 'g': + stem(&z, "igol", "log", m_gt_0); + break; + case 'l': + stem(&z, "ilb", "ble", m_gt_0) || + stem(&z, "illa", "al", m_gt_0) || + stem(&z, "iltne", "ent", m_gt_0) || + stem(&z, "ile", "e", m_gt_0) || + stem(&z, "ilsuo", "ous", m_gt_0); + break; + case 'o': + stem(&z, "noitazi", "ize", m_gt_0) || + stem(&z, "noita", "ate", m_gt_0) || + stem(&z, "rota", "ate", m_gt_0); + break; + case 's': + stem(&z, "msila", "al", m_gt_0) || + stem(&z, "ssenevi", "ive", m_gt_0) || + stem(&z, "ssenluf", "ful", m_gt_0) || + stem(&z, "ssensuo", "ous", m_gt_0); + break; + case 't': + stem(&z, "itila", "al", m_gt_0) || + stem(&z, "itivi", "ive", m_gt_0) || + stem(&z, "itilib", "ble", m_gt_0); + break; + } + + /* Step 3 */ + switch( z[0] ){ + case 'e': + stem(&z, "etaci", "ic", m_gt_0) || + stem(&z, "evita", "", m_gt_0) || + stem(&z, "ezila", "al", m_gt_0); + break; + case 'i': + stem(&z, "itici", "ic", m_gt_0); + break; + case 'l': + stem(&z, "laci", "ic", m_gt_0) || + stem(&z, "luf", "", m_gt_0); + break; + case 's': + stem(&z, "ssen", "", m_gt_0); + break; + } + + /* Step 4 */ + switch( z[1] ){ + case 'a': + if( z[0]=='l' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'c': + if( z[0]=='e' && z[2]=='n' && (z[3]=='a' || z[3]=='e') && m_gt_1(z+4) ){ + z += 4; + } + break; + case 'e': + if( z[0]=='r' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'i': + if( z[0]=='c' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'l': + if( z[0]=='e' && z[2]=='b' && (z[3]=='a' || z[3]=='i') && m_gt_1(z+4) ){ + z += 4; + } + break; + case 'n': + if( z[0]=='t' ){ + if( z[2]=='a' ){ + if( m_gt_1(z+3) ){ + z += 3; + } + }else if( z[2]=='e' ){ + stem(&z, "tneme", "", m_gt_1) || + stem(&z, "tnem", "", m_gt_1) || + stem(&z, "tne", "", m_gt_1); + } + } + break; + case 'o': + if( z[0]=='u' ){ + if( m_gt_1(z+2) ){ + z += 2; + } + }else if( z[3]=='s' || z[3]=='t' ){ + stem(&z, "noi", "", m_gt_1); + } + break; + case 's': + if( z[0]=='m' && z[2]=='i' && m_gt_1(z+3) ){ + z += 3; + } + break; + case 't': + stem(&z, "eta", "", m_gt_1) || + stem(&z, "iti", "", m_gt_1); + break; + case 'u': + if( z[0]=='s' && z[2]=='o' && m_gt_1(z+3) ){ + z += 3; + } + break; + case 'v': + case 'z': + if( z[0]=='e' && z[2]=='i' && m_gt_1(z+3) ){ + z += 3; + } + break; + } + + /* Step 5a */ + if( z[0]=='e' ){ + if( m_gt_1(z+1) ){ + z++; + }else if( m_eq_1(z+1) && !star_oh(z+1) ){ + z++; + } + } + + /* Step 5b */ + if( m_gt_1(z) && z[0]=='l' && z[1]=='l' ){ + z++; + } + + /* z[] is now the stemmed word in reverse order. Flip it back + ** around into forward order and return. + */ + *pnOut = i = strlen(z); + zOut[i] = 0; + while( *z ){ + zOut[--i] = *(z++); + } +} + +/* +** Characters that can be part of a token. We assume any character +** whose value is greater than 0x80 (any UTF character) can be +** part of a token. In other words, delimiters all must have +** values of 0x7f or lower. +*/ +static const char porterIdChar[] = { +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ +}; +#define isDelim(C) (((ch=C)&0x80)==0 && (ch<0x30 || !porterIdChar[ch-0x30])) + +/* +** Extract the next token from a tokenization cursor. The cursor must +** have been opened by a prior call to porterOpen(). +*/ +static int porterNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by porterOpen */ + const char **pzToken, /* OUT: *pzToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; + const char *z = c->zInput; + + while( c->iOffset<c->nInput ){ + int iStartOffset, ch; + + /* Scan past delimiter characters */ + while( c->iOffset<c->nInput && isDelim(z[c->iOffset]) ){ + c->iOffset++; + } + + /* Count non-delimiter characters. */ + iStartOffset = c->iOffset; + while( c->iOffset<c->nInput && !isDelim(z[c->iOffset]) ){ + c->iOffset++; + } + + if( c->iOffset>iStartOffset ){ + int n = c->iOffset-iStartOffset; + if( n>c->nAllocated ){ + c->nAllocated = n+20; + c->zToken = sqlite3_realloc(c->zToken, c->nAllocated); + if( c->zToken==NULL ) return SQLITE_NOMEM; + } + porter_stemmer(&z[iStartOffset], n, c->zToken, pnBytes); + *pzToken = c->zToken; + *piStartOffset = iStartOffset; + *piEndOffset = c->iOffset; + *piPosition = c->iToken++; + return SQLITE_OK; + } + } + return SQLITE_DONE; +} + +/* +** The set of routines that implement the porter-stemmer tokenizer +*/ +static const sqlite3_tokenizer_module porterTokenizerModule = { + 0, + porterCreate, + porterDestroy, + porterOpen, + porterClose, + porterNext, +}; + +/* +** Allocate a new porter tokenizer. Return a pointer to the new +** tokenizer in *ppModule +*/ +void sqlite3Fts3PorterTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &porterTokenizerModule; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff --git a/third_party/sqlite/ext/fts3/fts3_tokenizer.c b/third_party/sqlite/ext/fts3/fts3_tokenizer.c new file mode 100755 index 0000000..ef19995 --- /dev/null +++ b/third_party/sqlite/ext/fts3/fts3_tokenizer.c @@ -0,0 +1,371 @@ +/* +** 2007 June 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This is part of an SQLite module implementing full-text search. +** This particular file implements the generic tokenizer interface. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +#include "sqlite3ext.h" +#ifndef SQLITE_CORE + SQLITE_EXTENSION_INIT1 +#endif + +#include "fts3_hash.h" +#include "fts3_tokenizer.h" +#include <assert.h> + +/* +** Implementation of the SQL scalar function for accessing the underlying +** hash table. This function may be called as follows: +** +** SELECT <function-name>(<key-name>); +** SELECT <function-name>(<key-name>, <pointer>); +** +** where <function-name> is the name passed as the second argument +** to the sqlite3Fts3InitHashTable() function (e.g. 'fts3_tokenizer'). +** +** If the <pointer> argument is specified, it must be a blob value +** containing a pointer to be stored as the hash data corresponding +** to the string <key-name>. If <pointer> is not specified, then +** the string <key-name> must already exist in the has table. Otherwise, +** an error is returned. +** +** Whether or not the <pointer> argument is specified, the value returned +** is a blob containing the pointer stored as the hash data corresponding +** to string <key-name> (after the hash-table is updated, if applicable). +*/ +static void scalarFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + fts3Hash *pHash; + void *pPtr = 0; + const unsigned char *zName; + int nName; + + assert( argc==1 || argc==2 ); + + pHash = (fts3Hash *)sqlite3_user_data(context); + + zName = sqlite3_value_text(argv[0]); + nName = sqlite3_value_bytes(argv[0])+1; + + if( argc==2 ){ + void *pOld; + int n = sqlite3_value_bytes(argv[1]); + if( n!=sizeof(pPtr) ){ + sqlite3_result_error(context, "argument type mismatch", -1); + return; + } + pPtr = *(void **)sqlite3_value_blob(argv[1]); + pOld = sqlite3Fts3HashInsert(pHash, (void *)zName, nName, pPtr); + if( pOld==pPtr ){ + sqlite3_result_error(context, "out of memory", -1); + return; + } + }else{ + pPtr = sqlite3Fts3HashFind(pHash, zName, nName); + if( !pPtr ){ + char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName); + sqlite3_result_error(context, zErr, -1); + sqlite3_free(zErr); + return; + } + } + + sqlite3_result_blob(context, (void *)&pPtr, sizeof(pPtr), SQLITE_TRANSIENT); +} + +#ifdef SQLITE_TEST + +#include <tcl.h> +#include <string.h> + +/* +** Implementation of a special SQL scalar function for testing tokenizers +** designed to be used in concert with the Tcl testing framework. This +** function must be called with two arguments: +** +** SELECT <function-name>(<key-name>, <input-string>); +** SELECT <function-name>(<key-name>, <pointer>); +** +** where <function-name> is the name passed as the second argument +** to the sqlite3Fts3InitHashTable() function (e.g. 'fts3_tokenizer') +** concatenated with the string '_test' (e.g. 'fts3_tokenizer_test'). +** +** The return value is a string that may be interpreted as a Tcl +** list. For each token in the <input-string>, three elements are +** added to the returned list. The first is the token position, the +** second is the token text (folded, stemmed, etc.) and the third is the +** substring of <input-string> associated with the token. For example, +** using the built-in "simple" tokenizer: +** +** SELECT fts_tokenizer_test('simple', 'I don't see how'); +** +** will return the string: +** +** "{0 i I 1 dont don't 2 see see 3 how how}" +** +*/ +static void testFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + fts3Hash *pHash; + sqlite3_tokenizer_module *p; + sqlite3_tokenizer *pTokenizer = 0; + sqlite3_tokenizer_cursor *pCsr = 0; + + const char *zErr = 0; + + const char *zName; + int nName; + const char *zInput; + int nInput; + + const char *zArg = 0; + + const char *zToken; + int nToken; + int iStart; + int iEnd; + int iPos; + + Tcl_Obj *pRet; + + assert( argc==2 || argc==3 ); + + nName = sqlite3_value_bytes(argv[0]); + zName = (const char *)sqlite3_value_text(argv[0]); + nInput = sqlite3_value_bytes(argv[argc-1]); + zInput = (const char *)sqlite3_value_text(argv[argc-1]); + + if( argc==3 ){ + zArg = (const char *)sqlite3_value_text(argv[1]); + } + + pHash = (fts3Hash *)sqlite3_user_data(context); + p = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zName, nName+1); + + if( !p ){ + char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName); + sqlite3_result_error(context, zErr, -1); + sqlite3_free(zErr); + return; + } + + pRet = Tcl_NewObj(); + Tcl_IncrRefCount(pRet); + + if( SQLITE_OK!=p->xCreate(zArg ? 1 : 0, &zArg, &pTokenizer) ){ + zErr = "error in xCreate()"; + goto finish; + } + pTokenizer->pModule = p; + if( SQLITE_OK!=p->xOpen(pTokenizer, zInput, nInput, &pCsr) ){ + zErr = "error in xOpen()"; + goto finish; + } + pCsr->pTokenizer = pTokenizer; + + while( SQLITE_OK==p->xNext(pCsr, &zToken, &nToken, &iStart, &iEnd, &iPos) ){ + Tcl_ListObjAppendElement(0, pRet, Tcl_NewIntObj(iPos)); + Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); + zToken = &zInput[iStart]; + nToken = iEnd-iStart; + Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); + } + + if( SQLITE_OK!=p->xClose(pCsr) ){ + zErr = "error in xClose()"; + goto finish; + } + if( SQLITE_OK!=p->xDestroy(pTokenizer) ){ + zErr = "error in xDestroy()"; + goto finish; + } + +finish: + if( zErr ){ + sqlite3_result_error(context, zErr, -1); + }else{ + sqlite3_result_text(context, Tcl_GetString(pRet), -1, SQLITE_TRANSIENT); + } + Tcl_DecrRefCount(pRet); +} + +static +int registerTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module *p +){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts3_tokenizer(?, ?)"; + + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + sqlite3_bind_blob(pStmt, 2, &p, sizeof(p), SQLITE_STATIC); + sqlite3_step(pStmt); + + return sqlite3_finalize(pStmt); +} + +static +int queryTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module **pp +){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts3_tokenizer(?)"; + + *pp = 0; + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ + memcpy(pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); + } + } + + return sqlite3_finalize(pStmt); +} + +void sqlite3Fts3SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); + +/* +** Implementation of the scalar function fts3_tokenizer_internal_test(). +** This function is used for testing only, it is not included in the +** build unless SQLITE_TEST is defined. +** +** The purpose of this is to test that the fts3_tokenizer() function +** can be used as designed by the C-code in the queryTokenizer and +** registerTokenizer() functions above. These two functions are repeated +** in the README.tokenizer file as an example, so it is important to +** test them. +** +** To run the tests, evaluate the fts3_tokenizer_internal_test() scalar +** function with no arguments. An assert() will fail if a problem is +** detected. i.e.: +** +** SELECT fts3_tokenizer_internal_test(); +** +*/ +static void intTestFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int rc; + const sqlite3_tokenizer_module *p1; + const sqlite3_tokenizer_module *p2; + sqlite3 *db = (sqlite3 *)sqlite3_user_data(context); + + /* Test the query function */ + sqlite3Fts3SimpleTokenizerModule(&p1); + rc = queryTokenizer(db, "simple", &p2); + assert( rc==SQLITE_OK ); + assert( p1==p2 ); + rc = queryTokenizer(db, "nosuchtokenizer", &p2); + assert( rc==SQLITE_ERROR ); + assert( p2==0 ); + assert( 0==strcmp(sqlite3_errmsg(db), "unknown tokenizer: nosuchtokenizer") ); + + /* Test the storage function */ + rc = registerTokenizer(db, "nosuchtokenizer", p1); + assert( rc==SQLITE_OK ); + rc = queryTokenizer(db, "nosuchtokenizer", &p2); + assert( rc==SQLITE_OK ); + assert( p2==p1 ); + + sqlite3_result_text(context, "ok", -1, SQLITE_STATIC); +} + +#endif + +/* +** Set up SQL objects in database db used to access the contents of +** the hash table pointed to by argument pHash. The hash table must +** been initialised to use string keys, and to take a private copy +** of the key when a value is inserted. i.e. by a call similar to: +** +** sqlite3Fts3HashInit(pHash, FTS3_HASH_STRING, 1); +** +** This function adds a scalar function (see header comment above +** scalarFunc() in this file for details) and, if ENABLE_TABLE is +** defined at compilation time, a temporary virtual table (see header +** comment above struct HashTableVtab) to the database schema. Both +** provide read/write access to the contents of *pHash. +** +** The third argument to this function, zName, is used as the name +** of both the scalar and, if created, the virtual table. +*/ +int sqlite3Fts3InitHashTable( + sqlite3 *db, + fts3Hash *pHash, + const char *zName +){ + int rc = SQLITE_OK; + void *p = (void *)pHash; + const int any = SQLITE_ANY; + char *zTest = 0; + char *zTest2 = 0; + +#ifdef SQLITE_TEST + void *pdb = (void *)db; + zTest = sqlite3_mprintf("%s_test", zName); + zTest2 = sqlite3_mprintf("%s_internal_test", zName); + if( !zTest || !zTest2 ){ + rc = SQLITE_NOMEM; + } +#endif + + if( rc!=SQLITE_OK + || (rc = sqlite3_create_function(db, zName, 1, any, p, scalarFunc, 0, 0)) + || (rc = sqlite3_create_function(db, zName, 2, any, p, scalarFunc, 0, 0)) +#ifdef SQLITE_TEST + || (rc = sqlite3_create_function(db, zTest, 2, any, p, testFunc, 0, 0)) + || (rc = sqlite3_create_function(db, zTest, 3, any, p, testFunc, 0, 0)) + || (rc = sqlite3_create_function(db, zTest2, 0, any, pdb, intTestFunc, 0, 0)) +#endif + ); + + sqlite3_free(zTest); + sqlite3_free(zTest2); + return rc; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff --git a/third_party/sqlite/ext/fts3/fts3_tokenizer.h b/third_party/sqlite/ext/fts3/fts3_tokenizer.h new file mode 100755 index 0000000..4faef56 --- /dev/null +++ b/third_party/sqlite/ext/fts3/fts3_tokenizer.h @@ -0,0 +1,145 @@ +/* +** 2006 July 10 +** +** The author disclaims copyright to this source code. +** +************************************************************************* +** Defines the interface to tokenizers used by fulltext-search. There +** are three basic components: +** +** sqlite3_tokenizer_module is a singleton defining the tokenizer +** interface functions. This is essentially the class structure for +** tokenizers. +** +** sqlite3_tokenizer is used to define a particular tokenizer, perhaps +** including customization information defined at creation time. +** +** sqlite3_tokenizer_cursor is generated by a tokenizer to generate +** tokens from a particular input. +*/ +#ifndef _FTS3_TOKENIZER_H_ +#define _FTS3_TOKENIZER_H_ + +/* TODO(shess) Only used for SQLITE_OK and SQLITE_DONE at this time. +** If tokenizers are to be allowed to call sqlite3_*() functions, then +** we will need a way to register the API consistently. +*/ +#include "sqlite3.h" + +/* +** Structures used by the tokenizer interface. When a new tokenizer +** implementation is registered, the caller provides a pointer to +** an sqlite3_tokenizer_module containing pointers to the callback +** functions that make up an implementation. +** +** When an fts3 table is created, it passes any arguments passed to +** the tokenizer clause of the CREATE VIRTUAL TABLE statement to the +** sqlite3_tokenizer_module.xCreate() function of the requested tokenizer +** implementation. The xCreate() function in turn returns an +** sqlite3_tokenizer structure representing the specific tokenizer to +** be used for the fts3 table (customized by the tokenizer clause arguments). +** +** To tokenize an input buffer, the sqlite3_tokenizer_module.xOpen() +** method is called. It returns an sqlite3_tokenizer_cursor object +** that may be used to tokenize a specific input buffer based on +** the tokenization rules supplied by a specific sqlite3_tokenizer +** object. +*/ +typedef struct sqlite3_tokenizer_module sqlite3_tokenizer_module; +typedef struct sqlite3_tokenizer sqlite3_tokenizer; +typedef struct sqlite3_tokenizer_cursor sqlite3_tokenizer_cursor; + +struct sqlite3_tokenizer_module { + + /* + ** Structure version. Should always be set to 0. + */ + int iVersion; + + /* + ** Create a new tokenizer. The values in the argv[] array are the + ** arguments passed to the "tokenizer" clause of the CREATE VIRTUAL + ** TABLE statement that created the fts3 table. For example, if + ** the following SQL is executed: + ** + ** CREATE .. USING fts3( ... , tokenizer <tokenizer-name> arg1 arg2) + ** + ** then argc is set to 2, and the argv[] array contains pointers + ** to the strings "arg1" and "arg2". + ** + ** This method should return either SQLITE_OK (0), or an SQLite error + ** code. If SQLITE_OK is returned, then *ppTokenizer should be set + ** to point at the newly created tokenizer structure. The generic + ** sqlite3_tokenizer.pModule variable should not be initialised by + ** this callback. The caller will do so. + */ + int (*xCreate)( + int argc, /* Size of argv array */ + const char *const*argv, /* Tokenizer argument strings */ + sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ + ); + + /* + ** Destroy an existing tokenizer. The fts3 module calls this method + ** exactly once for each successful call to xCreate(). + */ + int (*xDestroy)(sqlite3_tokenizer *pTokenizer); + + /* + ** Create a tokenizer cursor to tokenize an input buffer. The caller + ** is responsible for ensuring that the input buffer remains valid + ** until the cursor is closed (using the xClose() method). + */ + int (*xOpen)( + sqlite3_tokenizer *pTokenizer, /* Tokenizer object */ + const char *pInput, int nBytes, /* Input buffer */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Created tokenizer cursor */ + ); + + /* + ** Destroy an existing tokenizer cursor. The fts3 module calls this + ** method exactly once for each successful call to xOpen(). + */ + int (*xClose)(sqlite3_tokenizer_cursor *pCursor); + + /* + ** Retrieve the next token from the tokenizer cursor pCursor. This + ** method should either return SQLITE_OK and set the values of the + ** "OUT" variables identified below, or SQLITE_DONE to indicate that + ** the end of the buffer has been reached, or an SQLite error code. + ** + ** *ppToken should be set to point at a buffer containing the + ** normalized version of the token (i.e. after any case-folding and/or + ** stemming has been performed). *pnBytes should be set to the length + ** of this buffer in bytes. The input text that generated the token is + ** identified by the byte offsets returned in *piStartOffset and + ** *piEndOffset. + ** + ** The buffer *ppToken is set to point at is managed by the tokenizer + ** implementation. It is only required to be valid until the next call + ** to xNext() or xClose(). + */ + /* TODO(shess) current implementation requires pInput to be + ** nul-terminated. This should either be fixed, or pInput/nBytes + ** should be converted to zInput. + */ + int (*xNext)( + sqlite3_tokenizer_cursor *pCursor, /* Tokenizer cursor */ + const char **ppToken, int *pnBytes, /* OUT: Normalized text for token */ + int *piStartOffset, /* OUT: Byte offset of token in input buffer */ + int *piEndOffset, /* OUT: Byte offset of end of token in input buffer */ + int *piPosition /* OUT: Number of tokens returned before this one */ + ); +}; + +struct sqlite3_tokenizer { + const sqlite3_tokenizer_module *pModule; /* The module for this tokenizer */ + /* Tokenizer implementations will typically add additional fields */ +}; + +struct sqlite3_tokenizer_cursor { + sqlite3_tokenizer *pTokenizer; /* Tokenizer for this cursor. */ + /* Tokenizer implementations will typically add additional fields */ +}; + +#endif /* _FTS3_TOKENIZER_H_ */ diff --git a/third_party/sqlite/ext/fts3/fts3_tokenizer1.c b/third_party/sqlite/ext/fts3/fts3_tokenizer1.c new file mode 100755 index 0000000..da255d9 --- /dev/null +++ b/third_party/sqlite/ext/fts3/fts3_tokenizer1.c @@ -0,0 +1,230 @@ +/* +** 2006 Oct 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** Implementation of the "simple" full-text-search tokenizer. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + + +#include <assert.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <ctype.h> + +#include "fts3_tokenizer.h" + +typedef struct simple_tokenizer { + sqlite3_tokenizer base; + char delim[128]; /* flag ASCII delimiters */ +} simple_tokenizer; + +typedef struct simple_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *pInput; /* input we are tokenizing */ + int nBytes; /* size of the input */ + int iOffset; /* current position in pInput */ + int iToken; /* index of next token to be returned */ + char *pToken; /* storage for current token */ + int nTokenAllocated; /* space allocated to zToken buffer */ +} simple_tokenizer_cursor; + + +/* Forward declaration */ +static const sqlite3_tokenizer_module simpleTokenizerModule; + +static int simpleDelim(simple_tokenizer *t, unsigned char c){ + return c<0x80 && t->delim[c]; +} + +/* +** Create a new tokenizer instance. +*/ +static int simpleCreate( + int argc, const char * const *argv, + sqlite3_tokenizer **ppTokenizer +){ + simple_tokenizer *t; + + t = (simple_tokenizer *) sqlite3_malloc(sizeof(*t)); + if( t==NULL ) return SQLITE_NOMEM; + memset(t, 0, sizeof(*t)); + + /* TODO(shess) Delimiters need to remain the same from run to run, + ** else we need to reindex. One solution would be a meta-table to + ** track such information in the database, then we'd only want this + ** information on the initial create. + */ + if( argc>1 ){ + int i, n = strlen(argv[1]); + for(i=0; i<n; i++){ + unsigned char ch = argv[1][i]; + /* We explicitly don't support UTF-8 delimiters for now. */ + if( ch>=0x80 ){ + sqlite3_free(t); + return SQLITE_ERROR; + } + t->delim[ch] = 1; + } + } else { + /* Mark non-alphanumeric ASCII characters as delimiters */ + int i; + for(i=1; i<0x80; i++){ + t->delim[i] = !isalnum(i); + } + } + + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int simpleDestroy(sqlite3_tokenizer *pTokenizer){ + sqlite3_free(pTokenizer); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is pInput[0..nBytes-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int simpleOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *pInput, int nBytes, /* String to be tokenized */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + simple_tokenizer_cursor *c; + + c = (simple_tokenizer_cursor *) sqlite3_malloc(sizeof(*c)); + if( c==NULL ) return SQLITE_NOMEM; + + c->pInput = pInput; + if( pInput==0 ){ + c->nBytes = 0; + }else if( nBytes<0 ){ + c->nBytes = (int)strlen(pInput); + }else{ + c->nBytes = nBytes; + } + c->iOffset = 0; /* start tokenizing at the beginning */ + c->iToken = 0; + c->pToken = NULL; /* no space allocated, yet. */ + c->nTokenAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to +** simpleOpen() above. +*/ +static int simpleClose(sqlite3_tokenizer_cursor *pCursor){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + sqlite3_free(c->pToken); + sqlite3_free(c); + return SQLITE_OK; +} + +/* +** Extract the next token from a tokenization cursor. The cursor must +** have been opened by a prior call to simpleOpen(). +*/ +static int simpleNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ + const char **ppToken, /* OUT: *ppToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + simple_tokenizer *t = (simple_tokenizer *) pCursor->pTokenizer; + unsigned char *p = (unsigned char *)c->pInput; + + while( c->iOffset<c->nBytes ){ + int iStartOffset; + + /* Scan past delimiter characters */ + while( c->iOffset<c->nBytes && simpleDelim(t, p[c->iOffset]) ){ + c->iOffset++; + } + + /* Count non-delimiter characters. */ + iStartOffset = c->iOffset; + while( c->iOffset<c->nBytes && !simpleDelim(t, p[c->iOffset]) ){ + c->iOffset++; + } + + if( c->iOffset>iStartOffset ){ + int i, n = c->iOffset-iStartOffset; + if( n>c->nTokenAllocated ){ + c->nTokenAllocated = n+20; + c->pToken = sqlite3_realloc(c->pToken, c->nTokenAllocated); + if( c->pToken==NULL ) return SQLITE_NOMEM; + } + for(i=0; i<n; i++){ + /* TODO(shess) This needs expansion to handle UTF-8 + ** case-insensitivity. + */ + unsigned char ch = p[iStartOffset+i]; + c->pToken[i] = ch<0x80 ? tolower(ch) : ch; + } + *ppToken = c->pToken; + *pnBytes = n; + *piStartOffset = iStartOffset; + *piEndOffset = c->iOffset; + *piPosition = c->iToken++; + + return SQLITE_OK; + } + } + return SQLITE_DONE; +} + +/* +** The set of routines that implement the simple tokenizer +*/ +static const sqlite3_tokenizer_module simpleTokenizerModule = { + 0, + simpleCreate, + simpleDestroy, + simpleOpen, + simpleClose, + simpleNext, +}; + +/* +** Allocate a new simple tokenizer. Return a pointer to the new +** tokenizer in *ppModule +*/ +void sqlite3Fts3SimpleTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &simpleTokenizerModule; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ diff --git a/third_party/sqlite/ext/fts3/mkfts3amal.tcl b/third_party/sqlite/ext/fts3/mkfts3amal.tcl new file mode 100755 index 0000000..0590487 --- /dev/null +++ b/third_party/sqlite/ext/fts3/mkfts3amal.tcl @@ -0,0 +1,115 @@ +#!/usr/bin/tclsh +# +# This script builds a single C code file holding all of FTS3 code. +# The name of the output file is fts3amal.c. To build this file, +# first do: +# +# make target_source +# +# The make target above moves all of the source code files into +# a subdirectory named "tsrc". (This script expects to find the files +# there and will not work if they are not found.) +# +# After the "tsrc" directory has been created and populated, run +# this script: +# +# tclsh mkfts3amal.tcl +# +# The amalgamated FTS3 code will be written into fts3amal.c +# + +# Open the output file and write a header comment at the beginning +# of the file. +# +set out [open fts3amal.c w] +set today [clock format [clock seconds] -format "%Y-%m-%d %H:%M:%S UTC" -gmt 1] +puts $out [subst \ +{/****************************************************************************** +** This file is an amalgamation of separate C source files from the SQLite +** Full Text Search extension 2 (fts3). By combining all the individual C +** code files into this single large file, the entire code can be compiled +** as a one translation unit. This allows many compilers to do optimizations +** that would not be possible if the files were compiled separately. It also +** makes the code easier to import into other projects. +** +** This amalgamation was generated on $today. +*/}] + +# These are the header files used by FTS3. The first time any of these +# files are seen in a #include statement in the C code, include the complete +# text of the file in-line. The file only needs to be included once. +# +foreach hdr { + fts3.h + fts3_hash.h + fts3_tokenizer.h + sqlite3.h + sqlite3ext.h +} { + set available_hdr($hdr) 1 +} + +# 78 stars used for comment formatting. +set s78 \ +{*****************************************************************************} + +# Insert a comment into the code +# +proc section_comment {text} { + global out s78 + set n [string length $text] + set nstar [expr {60 - $n}] + set stars [string range $s78 0 $nstar] + puts $out "/************** $text $stars/" +} + +# Read the source file named $filename and write it into the +# sqlite3.c output file. If any #include statements are seen, +# process them approprately. +# +proc copy_file {filename} { + global seen_hdr available_hdr out + set tail [file tail $filename] + section_comment "Begin file $tail" + set in [open $filename r] + while {![eof $in]} { + set line [gets $in] + if {[regexp {^#\s*include\s+["<]([^">]+)[">]} $line all hdr]} { + if {[info exists available_hdr($hdr)]} { + if {$available_hdr($hdr)} { + section_comment "Include $hdr in the middle of $tail" + copy_file tsrc/$hdr + section_comment "Continuing where we left off in $tail" + } + } elseif {![info exists seen_hdr($hdr)]} { + set seen_hdr($hdr) 1 + puts $out $line + } + } elseif {[regexp {^#ifdef __cplusplus} $line]} { + puts $out "#if 0" + } elseif {[regexp {^#line} $line]} { + # Skip #line directives. + } else { + puts $out $line + } + } + close $in + section_comment "End of $tail" +} + + +# Process the source files. Process files containing commonly +# used subroutines first in order to help the compiler find +# inlining opportunities. +# +foreach file { + fts3.c + fts3_hash.c + fts3_porter.c + fts3_tokenizer.c + fts3_tokenizer1.c +} { + copy_file tsrc/$file +} + +close $out diff --git a/third_party/sqlite/ext/icu/README.txt b/third_party/sqlite/ext/icu/README.txt new file mode 100755 index 0000000..5c995cc --- /dev/null +++ b/third_party/sqlite/ext/icu/README.txt @@ -0,0 +1,170 @@ + +This directory contains source code for the SQLite "ICU" extension, an +integration of the "International Components for Unicode" library with +SQLite. Documentation follows. + + 1. Features + + 1.1 SQL Scalars upper() and lower() + 1.2 Unicode Aware LIKE Operator + 1.3 ICU Collation Sequences + 1.4 SQL REGEXP Operator + + 2. Compilation and Usage + + 3. Bugs, Problems and Security Issues + + 3.1 The "case_sensitive_like" Pragma + 3.2 The SQLITE_MAX_LIKE_PATTERN_LENGTH Macro + 3.3 Collation Sequence Security Issue + + +1. FEATURES + + 1.1 SQL Scalars upper() and lower() + + SQLite's built-in implementations of these two functions only + provide case mapping for the 26 letters used in the English + language. The ICU based functions provided by this extension + provide case mapping, where defined, for the full range of + unicode characters. + + ICU provides two types of case mapping, "general" case mapping and + "language specific". Refer to ICU documentation for the differences + between the two. Specifically: + + http://www.icu-project.org/userguide/caseMappings.html + http://www.icu-project.org/userguide/posix.html#case_mappings + + To utilise "general" case mapping, the upper() or lower() scalar + functions are invoked with one argument: + + upper('ABC') -> 'abc' + lower('abc') -> 'ABC' + + To access ICU "language specific" case mapping, upper() or lower() + should be invoked with two arguments. The second argument is the name + of the locale to use. Passing an empty string ("") or SQL NULL value + as the second argument is the same as invoking the 1 argument version + of upper() or lower(): + + lower('I', 'en_us') -> 'i' + lower('I', 'tr_tr') -> 'ı' (small dotless i) + + 1.2 Unicode Aware LIKE Operator + + Similarly to the upper() and lower() functions, the built-in SQLite LIKE + operator understands case equivalence for the 26 letters of the English + language alphabet. The implementation of LIKE included in this + extension uses the ICU function u_foldCase() to provide case + independent comparisons for the full range of unicode characters. + + The U_FOLD_CASE_DEFAULT flag is passed to u_foldCase(), meaning the + dotless 'I' character used in the Turkish language is considered + to be in the same equivalence class as the dotted 'I' character + used by many languages (including English). + + 1.3 ICU Collation Sequences + + A special SQL scalar function, icu_load_collation() is provided that + may be used to register ICU collation sequences with SQLite. It + is always called with exactly two arguments, the ICU locale + identifying the collation sequence to ICU, and the name of the + SQLite collation sequence to create. For example, to create an + SQLite collation sequence named "turkish" using Turkish language + sorting rules, the SQL statement: + + SELECT icu_load_collation('tr_TR', 'turkish'); + + Or, for Australian English: + + SELECT icu_load_collation('en_AU', 'australian'); + + The identifiers "turkish" and "australian" may then be used + as collation sequence identifiers in SQL statements: + + CREATE TABLE aust_turkish_penpals( + australian_penpal_name TEXT COLLATE australian, + turkish_penpal_name TEXT COLLATE turkish + ); + + 1.4 SQL REGEXP Operator + + This extension provides an implementation of the SQL binary + comparision operator "REGEXP", based on the regular expression functions + provided by the ICU library. The syntax of the operator is as described + in SQLite documentation: + + <string> REGEXP <re-pattern> + + This extension uses the ICU defaults for regular expression matching + behaviour. Specifically, this means that: + + * Matching is case-sensitive, + * Regular expression comments are not allowed within patterns, and + * The '^' and '$' characters match the beginning and end of the + <string> argument, not the beginning and end of lines within + the <string> argument. + + Even more specifically, the value passed to the "flags" parameter + of ICU C function uregex_open() is 0. + + +2 COMPILATION AND USAGE + + The easiest way to compile and use the ICU extension is to build + and use it as a dynamically loadable SQLite extension. To do this + using gcc on *nix: + + gcc -shared icu.c `icu-config --ldflags` -o libSqliteIcu.so + + You may need to add "-I" flags so that gcc can find sqlite3ext.h + and sqlite3.h. The resulting shared lib, libSqliteIcu.so, may be + loaded into sqlite in the same way as any other dynamically loadable + extension. + + +3 BUGS, PROBLEMS AND SECURITY ISSUES + + 3.1 The "case_sensitive_like" Pragma + + This extension does not work well with the "case_sensitive_like" + pragma. If this pragma is used before the ICU extension is loaded, + then the pragma has no effect. If the pragma is used after the ICU + extension is loaded, then SQLite ignores the ICU implementation and + always uses the built-in LIKE operator. + + The ICU extension LIKE operator is always case insensitive. + + 3.2 The SQLITE_MAX_LIKE_PATTERN_LENGTH Macro + + Passing very long patterns to the built-in SQLite LIKE operator can + cause a stack overflow. To curb this problem, SQLite defines the + SQLITE_MAX_LIKE_PATTERN_LENGTH macro as the maximum length of a + pattern in bytes (irrespective of encoding). The default value is + defined in internal header file "limits.h". + + The ICU extension LIKE implementation suffers from the same + problem and uses the same solution. However, since the ICU extension + code does not include the SQLite file "limits.h", modifying + the default value therein does not affect the ICU extension. + The default value of SQLITE_MAX_LIKE_PATTERN_LENGTH used by + the ICU extension LIKE operator is 50000, defined in source + file "icu.c". + + 3.3 Collation Sequence Security Issue + + Internally, SQLite assumes that indices stored in database files + are sorted according to the collation sequence indicated by the + SQL schema. Changing the definition of a collation sequence after + an index has been built is therefore equivalent to database + corruption. The SQLite library is not very well tested under + these conditions, and may contain potential buffer overruns + or other programming errors that could be exploited by a malicious + programmer. + + If the ICU extension is used in an environment where potentially + malicious users may execute arbitrary SQL (i.e. gears), they + should be prevented from invoking the icu_load_collation() function, + possibly using the authorisation callback. + diff --git a/third_party/sqlite/ext/icu/icu.c b/third_party/sqlite/ext/icu/icu.c new file mode 100755 index 0000000..57d59a6 --- /dev/null +++ b/third_party/sqlite/ext/icu/icu.c @@ -0,0 +1,504 @@ +/* +** 2007 May 6 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** $Id: icu.c,v 1.7 2007/12/13 21:54:11 drh Exp $ +** +** This file implements an integration between the ICU library +** ("International Components for Unicode", an open-source library +** for handling unicode data) and SQLite. The integration uses +** ICU to provide the following to SQLite: +** +** * An implementation of the SQL regexp() function (and hence REGEXP +** operator) using the ICU uregex_XX() APIs. +** +** * Implementations of the SQL scalar upper() and lower() functions +** for case mapping. +** +** * Integration of ICU and SQLite collation seqences. +** +** * An implementation of the LIKE operator that uses ICU to +** provide case-independent matching. +*/ + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_ICU) + +/* Include ICU headers */ +#include <unicode/utypes.h> +#include <unicode/uregex.h> +#include <unicode/ustring.h> +#include <unicode/ucol.h> + +#include <assert.h> + +// TODO(evanm): this is cut'n'pasted from fts2.c. Why is it necessary? +#if !defined(SQLITE_CORE) +# define SQLITE_CORE 1 +#endif + +#ifndef SQLITE_CORE + #include "sqlite3ext.h" + SQLITE_EXTENSION_INIT1 +#else + #include "sqlite3.h" +#endif + +/* +** Maximum length (in bytes) of the pattern in a LIKE or GLOB +** operator. +*/ +#ifndef SQLITE_MAX_LIKE_PATTERN_LENGTH +# define SQLITE_MAX_LIKE_PATTERN_LENGTH 50000 +#endif + +/* +** Version of sqlite3_free() that is always a function, never a macro. +*/ +static void xFree(void *p){ + sqlite3_free(p); +} + +/* +** Compare two UTF-8 strings for equality where the first string is +** a "LIKE" expression. Return true (1) if they are the same and +** false (0) if they are different. +*/ +static int icuLikeCompare( + const uint8_t *zPattern, /* LIKE pattern */ + const uint8_t *zString, /* The UTF-8 string to compare against */ + const UChar32 uEsc /* The escape character */ +){ + static const int MATCH_ONE = (UChar32)'_'; + static const int MATCH_ALL = (UChar32)'%'; + + int iPattern = 0; /* Current byte index in zPattern */ + int iString = 0; /* Current byte index in zString */ + + int prevEscape = 0; /* True if the previous character was uEsc */ + + while( zPattern[iPattern]!=0 ){ + + /* Read (and consume) the next character from the input pattern. */ + UChar32 uPattern; + U8_NEXT_UNSAFE(zPattern, iPattern, uPattern); + assert(uPattern!=0); + + /* There are now 4 possibilities: + ** + ** 1. uPattern is an unescaped match-all character "%", + ** 2. uPattern is an unescaped match-one character "_", + ** 3. uPattern is an unescaped escape character, or + ** 4. uPattern is to be handled as an ordinary character + */ + if( !prevEscape && uPattern==MATCH_ALL ){ + /* Case 1. */ + uint8_t c; + + /* Skip any MATCH_ALL or MATCH_ONE characters that follow a + ** MATCH_ALL. For each MATCH_ONE, skip one character in the + ** test string. + */ + while( (c=zPattern[iPattern]) == MATCH_ALL || c == MATCH_ONE ){ + if( c==MATCH_ONE ){ + if( zString[iString]==0 ) return 0; + U8_FWD_1_UNSAFE(zString, iString); + } + iPattern++; + } + + if( zPattern[iPattern]==0 ) return 1; + + while( zString[iString] ){ + if( icuLikeCompare(&zPattern[iPattern], &zString[iString], uEsc) ){ + return 1; + } + U8_FWD_1_UNSAFE(zString, iString); + } + return 0; + + }else if( !prevEscape && uPattern==MATCH_ONE ){ + /* Case 2. */ + if( zString[iString]==0 ) return 0; + U8_FWD_1_UNSAFE(zString, iString); + + }else if( !prevEscape && uPattern==uEsc){ + /* Case 3. */ + prevEscape = 1; + + }else{ + /* Case 4. */ + UChar32 uString; + U8_NEXT_UNSAFE(zString, iString, uString); + uString = u_foldCase(uString, U_FOLD_CASE_DEFAULT); + uPattern = u_foldCase(uPattern, U_FOLD_CASE_DEFAULT); + if( uString!=uPattern ){ + return 0; + } + prevEscape = 0; + } + } + + return zString[iString]==0; +} + +/* +** Implementation of the like() SQL function. This function implements +** the build-in LIKE operator. The first argument to the function is the +** pattern and the second argument is the string. So, the SQL statements: +** +** A LIKE B +** +** is implemented as like(B, A). If there is an escape character E, +** +** A LIKE B ESCAPE E +** +** is mapped to like(B, A, E). +*/ +static void icuLikeFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const unsigned char *zA = sqlite3_value_text(argv[0]); + const unsigned char *zB = sqlite3_value_text(argv[1]); + UChar32 uEsc = 0; + + /* Limit the length of the LIKE or GLOB pattern to avoid problems + ** of deep recursion and N*N behavior in patternCompare(). + */ + if( sqlite3_value_bytes(argv[0])>SQLITE_MAX_LIKE_PATTERN_LENGTH ){ + sqlite3_result_error(context, "LIKE or GLOB pattern too complex", -1); + return; + } + + + if( argc==3 ){ + /* The escape character string must consist of a single UTF-8 character. + ** Otherwise, return an error. + */ + int nE= sqlite3_value_bytes(argv[2]); + const unsigned char *zE = sqlite3_value_text(argv[2]); + int i = 0; + if( zE==0 ) return; + U8_NEXT(zE, i, nE, uEsc); + if( i!=nE){ + sqlite3_result_error(context, + "ESCAPE expression must be a single character", -1); + return; + } + } + + if( zA && zB ){ + sqlite3_result_int(context, icuLikeCompare(zA, zB, uEsc)); + } +} + +/* +** This function is called when an ICU function called from within +** the implementation of an SQL scalar function returns an error. +** +** The scalar function context passed as the first argument is +** loaded with an error message based on the following two args. +*/ +static void icuFunctionError( + sqlite3_context *pCtx, /* SQLite scalar function context */ + const char *zName, /* Name of ICU function that failed */ + UErrorCode e /* Error code returned by ICU function */ +){ + char zBuf[128]; + sqlite3_snprintf(128, zBuf, "ICU error: %s(): %s", zName, u_errorName(e)); + zBuf[127] = '\0'; + sqlite3_result_error(pCtx, zBuf, -1); +} + +/* +** Function to delete compiled regexp objects. Registered as +** a destructor function with sqlite3_set_auxdata(). +*/ +static void icuRegexpDelete(void *p){ + URegularExpression *pExpr = (URegularExpression *)p; + uregex_close(pExpr); +} + +/* +** Implementation of SQLite REGEXP operator. This scalar function takes +** two arguments. The first is a regular expression pattern to compile +** the second is a string to match against that pattern. If either +** argument is an SQL NULL, then NULL Is returned. Otherwise, the result +** is 1 if the string matches the pattern, or 0 otherwise. +** +** SQLite maps the regexp() function to the regexp() operator such +** that the following two are equivalent: +** +** zString REGEXP zPattern +** regexp(zPattern, zString) +** +** Uses the following ICU regexp APIs: +** +** uregex_open() +** uregex_matches() +** uregex_close() +*/ +static void icuRegexpFunc(sqlite3_context *p, int nArg, sqlite3_value **apArg){ + UErrorCode status = U_ZERO_ERROR; + URegularExpression *pExpr; + UBool res; + const UChar *zString = sqlite3_value_text16(apArg[1]); + + /* If the left hand side of the regexp operator is NULL, + ** then the result is also NULL. + */ + if( !zString ){ + return; + } + + pExpr = sqlite3_get_auxdata(p, 0); + if( !pExpr ){ + const UChar *zPattern = sqlite3_value_text16(apArg[0]); + if( !zPattern ){ + return; + } + pExpr = uregex_open(zPattern, -1, 0, 0, &status); + + if( U_SUCCESS(status) ){ + sqlite3_set_auxdata(p, 0, pExpr, icuRegexpDelete); + }else{ + assert(!pExpr); + icuFunctionError(p, "uregex_open", status); + return; + } + } + + /* Configure the text that the regular expression operates on. */ + uregex_setText(pExpr, zString, -1, &status); + if( !U_SUCCESS(status) ){ + icuFunctionError(p, "uregex_setText", status); + return; + } + + /* Attempt the match */ + res = uregex_matches(pExpr, 0, &status); + if( !U_SUCCESS(status) ){ + icuFunctionError(p, "uregex_matches", status); + return; + } + + /* Set the text that the regular expression operates on to a NULL + ** pointer. This is not really necessary, but it is tidier than + ** leaving the regular expression object configured with an invalid + ** pointer after this function returns. + */ + uregex_setText(pExpr, 0, 0, &status); + + /* Return 1 or 0. */ + sqlite3_result_int(p, res ? 1 : 0); +} + +/* +** Implementations of scalar functions for case mapping - upper() and +** lower(). Function upper() converts its input to upper-case (ABC). +** Function lower() converts to lower-case (abc). +** +** ICU provides two types of case mapping, "general" case mapping and +** "language specific". Refer to ICU documentation for the differences +** between the two. +** +** To utilise "general" case mapping, the upper() or lower() scalar +** functions are invoked with one argument: +** +** upper('ABC') -> 'abc' +** lower('abc') -> 'ABC' +** +** To access ICU "language specific" case mapping, upper() or lower() +** should be invoked with two arguments. The second argument is the name +** of the locale to use. Passing an empty string ("") or SQL NULL value +** as the second argument is the same as invoking the 1 argument version +** of upper() or lower(). +** +** lower('I', 'en_us') -> 'i' +** lower('I', 'tr_tr') -> 'ı' (small dotless i) +** +** http://www.icu-project.org/userguide/posix.html#case_mappings +*/ +static void icuCaseFunc16(sqlite3_context *p, int nArg, sqlite3_value **apArg){ + const UChar *zInput; + UChar *zOutput; + int nInput; + int nOutput; + + UErrorCode status = U_ZERO_ERROR; + const char *zLocale = 0; + + assert(nArg==1 || nArg==2); + if( nArg==2 ){ + zLocale = (const char *)sqlite3_value_text(apArg[1]); + } + + zInput = sqlite3_value_text16(apArg[0]); + if( !zInput ){ + return; + } + nInput = sqlite3_value_bytes16(apArg[0]); + + nOutput = nInput * 2 + 2; + zOutput = sqlite3_malloc(nOutput); + if( !zOutput ){ + return; + } + + if( sqlite3_user_data(p) ){ + u_strToUpper(zOutput, nOutput/2, zInput, nInput/2, zLocale, &status); + }else{ + u_strToLower(zOutput, nOutput/2, zInput, nInput/2, zLocale, &status); + } + + if( !U_SUCCESS(status) ){ + icuFunctionError(p, "u_strToLower()/u_strToUpper", status); + return; + } + + sqlite3_result_text16(p, zOutput, -1, xFree); +} + +/* +** Collation sequence destructor function. The pCtx argument points to +** a UCollator structure previously allocated using ucol_open(). +*/ +static void icuCollationDel(void *pCtx){ + UCollator *p = (UCollator *)pCtx; + ucol_close(p); +} + +/* +** Collation sequence comparison function. The pCtx argument points to +** a UCollator structure previously allocated using ucol_open(). +*/ +static int icuCollationColl( + void *pCtx, + int nLeft, + const void *zLeft, + int nRight, + const void *zRight +){ + UCollationResult res; + UCollator *p = (UCollator *)pCtx; + res = ucol_strcoll(p, (UChar *)zLeft, nLeft/2, (UChar *)zRight, nRight/2); + switch( res ){ + case UCOL_LESS: return -1; + case UCOL_GREATER: return +1; + case UCOL_EQUAL: return 0; + } + assert(!"Unexpected return value from ucol_strcoll()"); + return 0; +} + +/* +** Implementation of the scalar function icu_load_collation(). +** +** This scalar function is used to add ICU collation based collation +** types to an SQLite database connection. It is intended to be called +** as follows: +** +** SELECT icu_load_collation(<locale>, <collation-name>); +** +** Where <locale> is a string containing an ICU locale identifier (i.e. +** "en_AU", "tr_TR" etc.) and <collation-name> is the name of the +** collation sequence to create. +*/ +static void icuLoadCollation( + sqlite3_context *p, + int nArg, + sqlite3_value **apArg +){ + sqlite3 *db = (sqlite3 *)sqlite3_user_data(p); + UErrorCode status = U_ZERO_ERROR; + const char *zLocale; /* Locale identifier - (eg. "jp_JP") */ + const char *zName; /* SQL Collation sequence name (eg. "japanese") */ + UCollator *pUCollator; /* ICU library collation object */ + int rc; /* Return code from sqlite3_create_collation_x() */ + + assert(nArg==2); + zLocale = (const char *)sqlite3_value_text(apArg[0]); + zName = (const char *)sqlite3_value_text(apArg[1]); + + if( !zLocale || !zName ){ + return; + } + + pUCollator = ucol_open(zLocale, &status); + if( !U_SUCCESS(status) ){ + icuFunctionError(p, "ucol_open", status); + return; + } + assert(p); + + rc = sqlite3_create_collation_v2(db, zName, SQLITE_UTF16, (void *)pUCollator, + icuCollationColl, icuCollationDel + ); + if( rc!=SQLITE_OK ){ + ucol_close(pUCollator); + sqlite3_result_error(p, "Error registering collation function", -1); + } +} + +/* +** Register the ICU extension functions with database db. +*/ +int sqlite3IcuInit(sqlite3 *db){ + struct IcuScalar { + const char *zName; /* Function name */ + int nArg; /* Number of arguments */ + int enc; /* Optimal text encoding */ + void *pContext; /* sqlite3_user_data() context */ + void (*xFunc)(sqlite3_context*,int,sqlite3_value**); + } scalars[] = { + {"regexp",-1, SQLITE_ANY, 0, icuRegexpFunc}, + + {"lower", 1, SQLITE_UTF16, 0, icuCaseFunc16}, + {"lower", 2, SQLITE_UTF16, 0, icuCaseFunc16}, + {"upper", 1, SQLITE_UTF16, (void*)1, icuCaseFunc16}, + {"upper", 2, SQLITE_UTF16, (void*)1, icuCaseFunc16}, + + {"lower", 1, SQLITE_UTF8, 0, icuCaseFunc16}, + {"lower", 2, SQLITE_UTF8, 0, icuCaseFunc16}, + {"upper", 1, SQLITE_UTF8, (void*)1, icuCaseFunc16}, + {"upper", 2, SQLITE_UTF8, (void*)1, icuCaseFunc16}, + + {"like", 2, SQLITE_UTF8, 0, icuLikeFunc}, + {"like", 3, SQLITE_UTF8, 0, icuLikeFunc}, + + {"icu_load_collation", 2, SQLITE_UTF8, (void*)db, icuLoadCollation}, + }; + + int rc = SQLITE_OK; + int i; + + for(i=0; rc==SQLITE_OK && i<(sizeof(scalars)/sizeof(struct IcuScalar)); i++){ + struct IcuScalar *p = &scalars[i]; + rc = sqlite3_create_function( + db, p->zName, p->nArg, p->enc, p->pContext, p->xFunc, 0, 0 + ); + } + + return rc; +} + +#if !SQLITE_CORE +int sqlite3_extension_init( + sqlite3 *db, + char **pzErrMsg, + const sqlite3_api_routines *pApi +){ + SQLITE_EXTENSION_INIT2(pApi) + return sqlite3IcuInit(db); +} +#endif + +#endif diff --git a/third_party/sqlite/ext/rtree/README b/third_party/sqlite/ext/rtree/README new file mode 100755 index 0000000..3736f45 --- /dev/null +++ b/third_party/sqlite/ext/rtree/README @@ -0,0 +1,120 @@ + +This directory contains an SQLite extension that implements a virtual +table type that allows users to create, query and manipulate r-tree[1] +data structures inside of SQLite databases. Users create, populate +and query r-tree structures using ordinary SQL statements. + + 1. SQL Interface + + 1.1 Table Creation + 1.2 Data Manipulation + 1.3 Data Querying + 1.4 Introspection and Analysis + + 2. Compilation and Deployment + + 3. References + + +1. SQL INTERFACE + + 1.1 Table Creation. + + All r-tree virtual tables have an odd number of columns between + 3 and 11. Unlike regular SQLite tables, r-tree tables are strongly + typed. + + The leftmost column is always the pimary key and contains 64-bit + integer values. Each subsequent column contains a 32-bit real + value. For each pair of real values, the first (leftmost) must be + less than or equal to the second. R-tree tables may be + constructed using the following syntax: + + CREATE VIRTUAL TABLE <name> USING rtree(<column-names>) + + For example: + + CREATE VIRTUAL TABLE boxes USING rtree(boxno, xmin, xmax, ymin, ymax); + INSERT INTO boxes VALUES(1, 1.0, 3.0, 2.0, 4.0); + + Constructing a virtual r-tree table <name> creates the following three + real tables in the database to store the data structure: + + <name>_node + <name>_rowid + <name>_parent + + Dropping or modifying the contents of these tables directly will + corrupt the r-tree structure. To delete an r-tree from a database, + use a regular DROP TABLE statement: + + DROP TABLE <name>; + + Dropping the main r-tree table automatically drops the automatically + created tables. + + 1.2 Data Manipulation (INSERT, UPDATE, DELETE). + + The usual INSERT, UPDATE or DELETE syntax is used to manipulate data + stored in an r-tree table. Please note the following: + + * Inserting a NULL value into the primary key column has the + same effect as inserting a NULL into an INTEGER PRIMARY KEY + column of a regular table. The system automatically assigns + an unused integer key value to the new record. Usually, this + is one greater than the largest primary key value currently + present in the table. + + * Attempting to insert a duplicate primary key value fails with + an SQLITE_CONSTRAINT error. + + * Attempting to insert or modify a record such that the value + stored in the (N*2)th column is greater than that stored in + the (N*2+1)th column fails with an SQLITE_CONSTRAINT error. + + * When a record is inserted, values are always converted to + the required type (64-bit integer or 32-bit real) as if they + were part of an SQL CAST expression. Non-numeric strings are + converted to zero. + + 1.3 Queries. + + R-tree tables may be queried using all of the same SQL syntax supported + by regular tables. However, some query patterns are more efficient + than others. + + R-trees support fast lookup by primary key value (O(logN), like + regular tables). + + Any combination of equality and range (<, <=, >, >=) constraints + on spatial data columns may be used to optimize other queries. This + is the key advantage to using r-tree tables instead of creating + indices on regular tables. + + 1.4 Introspection and Analysis. + + TODO: Describe rtreenode() and rtreedepth() functions. + + +2. COMPILATION AND USAGE + + The easiest way to compile and use the RTREE extension is to build + and use it as a dynamically loadable SQLite extension. To do this + using gcc on *nix: + + gcc -shared rtree.c -o libSqliteRtree.so + + You may need to add "-I" flags so that gcc can find sqlite3ext.h + and sqlite3.h. The resulting shared lib, libSqliteRtree.so, may be + loaded into sqlite in the same way as any other dynamicly loadable + extension. + + +3. REFERENCES + + [1] Atonin Guttman, "R-trees - A Dynamic Index Structure For Spatial + Searching", University of California Berkeley, 1984. + + [2] Norbert Beckmann, Hans-Peter Kriegel, Ralf Schneider, Bernhard Seeger, + "The R*-tree: An Efficient and Robust Access Method for Points and + Rectangles", Universitaet Bremen, 1990. diff --git a/third_party/sqlite/ext/rtree/rtree.c b/third_party/sqlite/ext/rtree/rtree.c new file mode 100755 index 0000000..39116ce --- /dev/null +++ b/third_party/sqlite/ext/rtree/rtree.c @@ -0,0 +1,2826 @@ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code for implementations of the r-tree and r*-tree +** algorithms packaged as an SQLite virtual table module. +** +** $Id: rtree.c,v 1.7 2008/07/16 14:43:35 drh Exp $ +*/ + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_RTREE) + +/* +** This file contains an implementation of a couple of different variants +** of the r-tree algorithm. See the README file for further details. The +** same data-structure is used for all, but the algorithms for insert and +** delete operations vary. The variants used are selected at compile time +** by defining the following symbols: +*/ + +/* Either, both or none of the following may be set to activate +** r*tree variant algorithms. +*/ +#define VARIANT_RSTARTREE_CHOOSESUBTREE 0 +#define VARIANT_RSTARTREE_REINSERT 1 + +/* +** Exactly one of the following must be set to 1. +*/ +#define VARIANT_GUTTMAN_QUADRATIC_SPLIT 0 +#define VARIANT_GUTTMAN_LINEAR_SPLIT 0 +#define VARIANT_RSTARTREE_SPLIT 1 + +#define VARIANT_GUTTMAN_SPLIT \ + (VARIANT_GUTTMAN_LINEAR_SPLIT||VARIANT_GUTTMAN_QUADRATIC_SPLIT) + +#if VARIANT_GUTTMAN_QUADRATIC_SPLIT + #define PickNext QuadraticPickNext + #define PickSeeds QuadraticPickSeeds + #define AssignCells splitNodeGuttman +#endif +#if VARIANT_GUTTMAN_LINEAR_SPLIT + #define PickNext LinearPickNext + #define PickSeeds LinearPickSeeds + #define AssignCells splitNodeGuttman +#endif +#if VARIANT_RSTARTREE_SPLIT + #define AssignCells splitNodeStartree +#endif + + +#ifndef SQLITE_CORE + #include "sqlite3ext.h" + SQLITE_EXTENSION_INIT1 +#else + #include "sqlite3.h" +#endif + +#include <string.h> +#include <assert.h> + +#ifndef SQLITE_AMALGAMATION +typedef sqlite3_int64 i64; +typedef unsigned char u8; +typedef unsigned int u32; +#endif + +typedef struct Rtree Rtree; +typedef struct RtreeCursor RtreeCursor; +typedef struct RtreeNode RtreeNode; +typedef struct RtreeCell RtreeCell; +typedef struct RtreeConstraint RtreeConstraint; +typedef union RtreeCoord RtreeCoord; + +/* The rtree may have between 1 and RTREE_MAX_DIMENSIONS dimensions. */ +#define RTREE_MAX_DIMENSIONS 5 + +/* Size of hash table Rtree.aHash. This hash table is not expected to +** ever contain very many entries, so a fixed number of buckets is +** used. +*/ +#define HASHSIZE 128 + +/* +** An rtree virtual-table object. +*/ +struct Rtree { + sqlite3_vtab base; + sqlite3 *db; /* Host database connection */ + int iNodeSize; /* Size in bytes of each node in the node table */ + int nDim; /* Number of dimensions */ + int nBytesPerCell; /* Bytes consumed per cell */ + int iDepth; /* Current depth of the r-tree structure */ + char *zDb; /* Name of database containing r-tree table */ + char *zName; /* Name of r-tree table */ + RtreeNode *aHash[HASHSIZE]; /* Hash table of in-memory nodes. */ + int nBusy; /* Current number of users of this structure */ + + /* List of nodes removed during a CondenseTree operation. List is + ** linked together via the pointer normally used for hash chains - + ** RtreeNode.pNext. RtreeNode.iNode stores the depth of the sub-tree + ** headed by the node (leaf nodes have RtreeNode.iNode==0). + */ + RtreeNode *pDeleted; + int iReinsertHeight; /* Height of sub-trees Reinsert() has run on */ + + /* Statements to read/write/delete a record from xxx_node */ + sqlite3_stmt *pReadNode; + sqlite3_stmt *pWriteNode; + sqlite3_stmt *pDeleteNode; + + /* Statements to read/write/delete a record from xxx_rowid */ + sqlite3_stmt *pReadRowid; + sqlite3_stmt *pWriteRowid; + sqlite3_stmt *pDeleteRowid; + + /* Statements to read/write/delete a record from xxx_parent */ + sqlite3_stmt *pReadParent; + sqlite3_stmt *pWriteParent; + sqlite3_stmt *pDeleteParent; + + int eCoordType; +}; + +/* Possible values for eCoordType: */ +#define RTREE_COORD_REAL32 0 +#define RTREE_COORD_INT32 1 + +/* +** The minimum number of cells allowed for a node is a third of the +** maximum. In Gutman's notation: +** +** m = M/3 +** +** If an R*-tree "Reinsert" operation is required, the same number of +** cells are removed from the overfull node and reinserted into the tree. +*/ +#define RTREE_MINCELLS(p) ((((p)->iNodeSize-4)/(p)->nBytesPerCell)/3) +#define RTREE_REINSERT(p) RTREE_MINCELLS(p) +#define RTREE_MAXCELLS 51 + +/* +** An rtree cursor object. +*/ +struct RtreeCursor { + sqlite3_vtab_cursor base; + RtreeNode *pNode; /* Node cursor is currently pointing at */ + int iCell; /* Index of current cell in pNode */ + int iStrategy; /* Copy of idxNum search parameter */ + int nConstraint; /* Number of entries in aConstraint */ + RtreeConstraint *aConstraint; /* Search constraints. */ +}; + +union RtreeCoord { + float f; + int i; +}; + +/* +** The argument is an RtreeCoord. Return the value stored within the RtreeCoord +** formatted as a double. This macro assumes that local variable pRtree points +** to the Rtree structure associated with the RtreeCoord. +*/ +#define DCOORD(coord) ( \ + (pRtree->eCoordType==RTREE_COORD_REAL32) ? \ + ((double)coord.f) : \ + ((double)coord.i) \ +) + +/* +** A search constraint. +*/ +struct RtreeConstraint { + int iCoord; /* Index of constrained coordinate */ + int op; /* Constraining operation */ + double rValue; /* Constraint value. */ +}; + +/* Possible values for RtreeConstraint.op */ +#define RTREE_EQ 0x41 +#define RTREE_LE 0x42 +#define RTREE_LT 0x43 +#define RTREE_GE 0x44 +#define RTREE_GT 0x45 + +/* +** An rtree structure node. +** +** Data format (RtreeNode.zData): +** +** 1. If the node is the root node (node 1), then the first 2 bytes +** of the node contain the tree depth as a big-endian integer. +** For non-root nodes, the first 2 bytes are left unused. +** +** 2. The next 2 bytes contain the number of entries currently +** stored in the node. +** +** 3. The remainder of the node contains the node entries. Each entry +** consists of a single 8-byte integer followed by an even number +** of 4-byte coordinates. For leaf nodes the integer is the rowid +** of a record. For internal nodes it is the node number of a +** child page. +*/ +struct RtreeNode { + RtreeNode *pParent; /* Parent node */ + i64 iNode; + int nRef; + int isDirty; + u8 *zData; + RtreeNode *pNext; /* Next node in this hash chain */ +}; +#define NCELL(pNode) readInt16(&(pNode)->zData[2]) + +/* +** Structure to store a deserialized rtree record. +*/ +struct RtreeCell { + i64 iRowid; + RtreeCoord aCoord[RTREE_MAX_DIMENSIONS*2]; +}; + +#define MAX(x,y) ((x) < (y) ? (y) : (x)) +#define MIN(x,y) ((x) > (y) ? (y) : (x)) + +/* +** Functions to deserialize a 16 bit integer, 32 bit real number and +** 64 bit integer. The deserialized value is returned. +*/ +static int readInt16(u8 *p){ + return (p[0]<<8) + p[1]; +} +static void readCoord(u8 *p, RtreeCoord *pCoord){ + u32 i = ( + (((u32)p[0]) << 24) + + (((u32)p[1]) << 16) + + (((u32)p[2]) << 8) + + (((u32)p[3]) << 0) + ); + *(u32 *)pCoord = i; +} +static i64 readInt64(u8 *p){ + return ( + (((i64)p[0]) << 56) + + (((i64)p[1]) << 48) + + (((i64)p[2]) << 40) + + (((i64)p[3]) << 32) + + (((i64)p[4]) << 24) + + (((i64)p[5]) << 16) + + (((i64)p[6]) << 8) + + (((i64)p[7]) << 0) + ); +} + +/* +** Functions to serialize a 16 bit integer, 32 bit real number and +** 64 bit integer. The value returned is the number of bytes written +** to the argument buffer (always 2, 4 and 8 respectively). +*/ +static int writeInt16(u8 *p, int i){ + p[0] = (i>> 8)&0xFF; + p[1] = (i>> 0)&0xFF; + return 2; +} +static int writeCoord(u8 *p, RtreeCoord *pCoord){ + u32 i; + assert( sizeof(RtreeCoord)==4 ); + assert( sizeof(u32)==4 ); + i = *(u32 *)pCoord; + p[0] = (i>>24)&0xFF; + p[1] = (i>>16)&0xFF; + p[2] = (i>> 8)&0xFF; + p[3] = (i>> 0)&0xFF; + return 4; +} +static int writeInt64(u8 *p, i64 i){ + p[0] = (i>>56)&0xFF; + p[1] = (i>>48)&0xFF; + p[2] = (i>>40)&0xFF; + p[3] = (i>>32)&0xFF; + p[4] = (i>>24)&0xFF; + p[5] = (i>>16)&0xFF; + p[6] = (i>> 8)&0xFF; + p[7] = (i>> 0)&0xFF; + return 8; +} + +/* +** Increment the reference count of node p. +*/ +static void nodeReference(RtreeNode *p){ + if( p ){ + p->nRef++; + } +} + +/* +** Clear the content of node p (set all bytes to 0x00). +*/ +static void nodeZero(Rtree *pRtree, RtreeNode *p){ + if( p ){ + memset(&p->zData[2], 0, pRtree->iNodeSize-2); + p->isDirty = 1; + } +} + +/* +** Given a node number iNode, return the corresponding key to use +** in the Rtree.aHash table. +*/ +static int nodeHash(i64 iNode){ + return ( + (iNode>>56) ^ (iNode>>48) ^ (iNode>>40) ^ (iNode>>32) ^ + (iNode>>24) ^ (iNode>>16) ^ (iNode>> 8) ^ (iNode>> 0) + ) % HASHSIZE; +} + +/* +** Search the node hash table for node iNode. If found, return a pointer +** to it. Otherwise, return 0. +*/ +static RtreeNode *nodeHashLookup(Rtree *pRtree, i64 iNode){ + RtreeNode *p; + assert( iNode!=0 ); + for(p=pRtree->aHash[nodeHash(iNode)]; p && p->iNode!=iNode; p=p->pNext); + return p; +} + +/* +** Add node pNode to the node hash table. +*/ +static void nodeHashInsert(Rtree *pRtree, RtreeNode *pNode){ + if( pNode ){ + int iHash; + assert( pNode->pNext==0 ); + iHash = nodeHash(pNode->iNode); + pNode->pNext = pRtree->aHash[iHash]; + pRtree->aHash[iHash] = pNode; + } +} + +/* +** Remove node pNode from the node hash table. +*/ +static void nodeHashDelete(Rtree *pRtree, RtreeNode *pNode){ + RtreeNode **pp; + if( pNode->iNode!=0 ){ + pp = &pRtree->aHash[nodeHash(pNode->iNode)]; + for( ; (*pp)!=pNode; pp = &(*pp)->pNext){ assert(*pp); } + *pp = pNode->pNext; + pNode->pNext = 0; + } +} + +/* +** Allocate and return new r-tree node. Initially, (RtreeNode.iNode==0), +** indicating that node has not yet been assigned a node number. It is +** assigned a node number when nodeWrite() is called to write the +** node contents out to the database. +*/ +static RtreeNode *nodeNew(Rtree *pRtree, RtreeNode *pParent, int zero){ + RtreeNode *pNode; + pNode = (RtreeNode *)sqlite3_malloc(sizeof(RtreeNode) + pRtree->iNodeSize); + if( pNode ){ + memset(pNode, 0, sizeof(RtreeNode) + (zero?pRtree->iNodeSize:0)); + pNode->zData = (u8 *)&pNode[1]; + pNode->nRef = 1; + pNode->pParent = pParent; + pNode->isDirty = 1; + nodeReference(pParent); + } + return pNode; +} + +/* +** Obtain a reference to an r-tree node. +*/ +static int +nodeAcquire( + Rtree *pRtree, /* R-tree structure */ + i64 iNode, /* Node number to load */ + RtreeNode *pParent, /* Either the parent node or NULL */ + RtreeNode **ppNode /* OUT: Acquired node */ +){ + int rc; + RtreeNode *pNode; + + /* Check if the requested node is already in the hash table. If so, + ** increase its reference count and return it. + */ + if( (pNode = nodeHashLookup(pRtree, iNode)) ){ + assert( !pParent || !pNode->pParent || pNode->pParent==pParent ); + if( pParent ){ + pNode->pParent = pParent; + } + pNode->nRef++; + *ppNode = pNode; + return SQLITE_OK; + } + + pNode = (RtreeNode *)sqlite3_malloc(sizeof(RtreeNode) + pRtree->iNodeSize); + if( !pNode ){ + *ppNode = 0; + return SQLITE_NOMEM; + } + pNode->pParent = pParent; + pNode->zData = (u8 *)&pNode[1]; + pNode->nRef = 1; + pNode->iNode = iNode; + pNode->isDirty = 0; + pNode->pNext = 0; + + sqlite3_bind_int64(pRtree->pReadNode, 1, iNode); + rc = sqlite3_step(pRtree->pReadNode); + if( rc==SQLITE_ROW ){ + const u8 *zBlob = sqlite3_column_blob(pRtree->pReadNode, 0); + memcpy(pNode->zData, zBlob, pRtree->iNodeSize); + nodeReference(pParent); + }else{ + sqlite3_free(pNode); + pNode = 0; + } + + *ppNode = pNode; + rc = sqlite3_reset(pRtree->pReadNode); + + if( rc==SQLITE_OK && iNode==1 ){ + pRtree->iDepth = readInt16(pNode->zData); + } + + assert( (rc==SQLITE_OK && pNode) || (pNode==0 && rc!=SQLITE_OK) ); + nodeHashInsert(pRtree, pNode); + + return rc; +} + +/* +** Overwrite cell iCell of node pNode with the contents of pCell. +*/ +static void nodeOverwriteCell( + Rtree *pRtree, + RtreeNode *pNode, + RtreeCell *pCell, + int iCell +){ + int ii; + u8 *p = &pNode->zData[4 + pRtree->nBytesPerCell*iCell]; + p += writeInt64(p, pCell->iRowid); + for(ii=0; ii<(pRtree->nDim*2); ii++){ + p += writeCoord(p, &pCell->aCoord[ii]); + } + pNode->isDirty = 1; +} + +/* +** Remove cell the cell with index iCell from node pNode. +*/ +static void nodeDeleteCell(Rtree *pRtree, RtreeNode *pNode, int iCell){ + u8 *pDst = &pNode->zData[4 + pRtree->nBytesPerCell*iCell]; + u8 *pSrc = &pDst[pRtree->nBytesPerCell]; + int nByte = (NCELL(pNode) - iCell - 1) * pRtree->nBytesPerCell; + memmove(pDst, pSrc, nByte); + writeInt16(&pNode->zData[2], NCELL(pNode)-1); + pNode->isDirty = 1; +} + +/* +** Insert the contents of cell pCell into node pNode. If the insert +** is successful, return SQLITE_OK. +** +** If there is not enough free space in pNode, return SQLITE_FULL. +*/ +static int +nodeInsertCell( + Rtree *pRtree, + RtreeNode *pNode, + RtreeCell *pCell +){ + int nCell; /* Current number of cells in pNode */ + int nMaxCell; /* Maximum number of cells for pNode */ + + nMaxCell = (pRtree->iNodeSize-4)/pRtree->nBytesPerCell; + nCell = NCELL(pNode); + + assert(nCell<=nMaxCell); + + if( nCell<nMaxCell ){ + nodeOverwriteCell(pRtree, pNode, pCell, nCell); + writeInt16(&pNode->zData[2], nCell+1); + pNode->isDirty = 1; + } + + return (nCell==nMaxCell); +} + +/* +** If the node is dirty, write it out to the database. +*/ +static int +nodeWrite(Rtree *pRtree, RtreeNode *pNode){ + int rc = SQLITE_OK; + if( pNode->isDirty ){ + sqlite3_stmt *p = pRtree->pWriteNode; + if( pNode->iNode ){ + sqlite3_bind_int64(p, 1, pNode->iNode); + }else{ + sqlite3_bind_null(p, 1); + } + sqlite3_bind_blob(p, 2, pNode->zData, pRtree->iNodeSize, SQLITE_STATIC); + sqlite3_step(p); + pNode->isDirty = 0; + rc = sqlite3_reset(p); + if( pNode->iNode==0 && rc==SQLITE_OK ){ + pNode->iNode = sqlite3_last_insert_rowid(pRtree->db); + nodeHashInsert(pRtree, pNode); + } + } + return rc; +} + +/* +** Release a reference to a node. If the node is dirty and the reference +** count drops to zero, the node data is written to the database. +*/ +static int +nodeRelease(Rtree *pRtree, RtreeNode *pNode){ + int rc = SQLITE_OK; + if( pNode ){ + assert( pNode->nRef>0 ); + pNode->nRef--; + if( pNode->nRef==0 ){ + if( pNode->iNode==1 ){ + pRtree->iDepth = -1; + } + if( pNode->pParent ){ + rc = nodeRelease(pRtree, pNode->pParent); + } + if( rc==SQLITE_OK ){ + rc = nodeWrite(pRtree, pNode); + } + nodeHashDelete(pRtree, pNode); + sqlite3_free(pNode); + } + } + return rc; +} + +/* +** Return the 64-bit integer value associated with cell iCell of +** node pNode. If pNode is a leaf node, this is a rowid. If it is +** an internal node, then the 64-bit integer is a child page number. +*/ +static i64 nodeGetRowid( + Rtree *pRtree, + RtreeNode *pNode, + int iCell +){ + assert( iCell<NCELL(pNode) ); + return readInt64(&pNode->zData[4 + pRtree->nBytesPerCell*iCell]); +} + +/* +** Return coordinate iCoord from cell iCell in node pNode. +*/ +static void nodeGetCoord( + Rtree *pRtree, + RtreeNode *pNode, + int iCell, + int iCoord, + RtreeCoord *pCoord /* Space to write result to */ +){ + readCoord(&pNode->zData[12 + pRtree->nBytesPerCell*iCell + 4*iCoord], pCoord); +} + +/* +** Deserialize cell iCell of node pNode. Populate the structure pointed +** to by pCell with the results. +*/ +static void nodeGetCell( + Rtree *pRtree, + RtreeNode *pNode, + int iCell, + RtreeCell *pCell +){ + int ii; + pCell->iRowid = nodeGetRowid(pRtree, pNode, iCell); + for(ii=0; ii<pRtree->nDim*2; ii++){ + nodeGetCoord(pRtree, pNode, iCell, ii, &pCell->aCoord[ii]); + } +} + + +/* Forward declaration for the function that does the work of +** the virtual table module xCreate() and xConnect() methods. +*/ +static int rtreeInit( + sqlite3 *, void *, int, const char *const*, sqlite3_vtab **, char **, int, int +); + +/* +** Rtree virtual table module xCreate method. +*/ +static int rtreeCreate( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVtab, + char **pzErr +){ + return rtreeInit(db, pAux, argc, argv, ppVtab, pzErr, 1, (int)pAux); +} + +/* +** Rtree virtual table module xConnect method. +*/ +static int rtreeConnect( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVtab, + char **pzErr +){ + return rtreeInit(db, pAux, argc, argv, ppVtab, pzErr, 0, (int)pAux); +} + +/* +** Increment the r-tree reference count. +*/ +static void rtreeReference(Rtree *pRtree){ + pRtree->nBusy++; +} + +/* +** Decrement the r-tree reference count. When the reference count reaches +** zero the structure is deleted. +*/ +static void rtreeRelease(Rtree *pRtree){ + pRtree->nBusy--; + if( pRtree->nBusy==0 ){ + sqlite3_finalize(pRtree->pReadNode); + sqlite3_finalize(pRtree->pWriteNode); + sqlite3_finalize(pRtree->pDeleteNode); + sqlite3_finalize(pRtree->pReadRowid); + sqlite3_finalize(pRtree->pWriteRowid); + sqlite3_finalize(pRtree->pDeleteRowid); + sqlite3_finalize(pRtree->pReadParent); + sqlite3_finalize(pRtree->pWriteParent); + sqlite3_finalize(pRtree->pDeleteParent); + sqlite3_free(pRtree); + } +} + +/* +** Rtree virtual table module xDisconnect method. +*/ +static int rtreeDisconnect(sqlite3_vtab *pVtab){ + rtreeRelease((Rtree *)pVtab); + return SQLITE_OK; +} + +/* +** Rtree virtual table module xDestroy method. +*/ +static int rtreeDestroy(sqlite3_vtab *pVtab){ + Rtree *pRtree = (Rtree *)pVtab; + int rc; + char *zCreate = sqlite3_mprintf( + "DROP TABLE '%q'.'%q_node';" + "DROP TABLE '%q'.'%q_rowid';" + "DROP TABLE '%q'.'%q_parent';", + pRtree->zDb, pRtree->zName, + pRtree->zDb, pRtree->zName, + pRtree->zDb, pRtree->zName + ); + if( !zCreate ){ + rc = SQLITE_NOMEM; + }else{ + rc = sqlite3_exec(pRtree->db, zCreate, 0, 0, 0); + sqlite3_free(zCreate); + } + if( rc==SQLITE_OK ){ + rtreeRelease(pRtree); + } + + return rc; +} + +/* +** Rtree virtual table module xOpen method. +*/ +static int rtreeOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + int rc = SQLITE_NOMEM; + RtreeCursor *pCsr; + + pCsr = (RtreeCursor *)sqlite3_malloc(sizeof(RtreeCursor)); + if( pCsr ){ + memset(pCsr, 0, sizeof(RtreeCursor)); + pCsr->base.pVtab = pVTab; + rc = SQLITE_OK; + } + *ppCursor = (sqlite3_vtab_cursor *)pCsr; + + return rc; +} + +/* +** Rtree virtual table module xClose method. +*/ +static int rtreeClose(sqlite3_vtab_cursor *cur){ + Rtree *pRtree = (Rtree *)(cur->pVtab); + int rc; + RtreeCursor *pCsr = (RtreeCursor *)cur; + sqlite3_free(pCsr->aConstraint); + rc = nodeRelease(pRtree, pCsr->pNode); + sqlite3_free(pCsr); + return rc; +} + +/* +** Rtree virtual table module xEof method. +** +** Return non-zero if the cursor does not currently point to a valid +** record (i.e if the scan has finished), or zero otherwise. +*/ +static int rtreeEof(sqlite3_vtab_cursor *cur){ + RtreeCursor *pCsr = (RtreeCursor *)cur; + return (pCsr->pNode==0); +} + +/* +** Cursor pCursor currently points to a cell in a non-leaf page. +** Return true if the sub-tree headed by the cell is filtered +** (excluded) by the constraints in the pCursor->aConstraint[] +** array, or false otherwise. +*/ +static int testRtreeCell(Rtree *pRtree, RtreeCursor *pCursor){ + RtreeCell cell; + int ii; + int bRes = 0; + + nodeGetCell(pRtree, pCursor->pNode, pCursor->iCell, &cell); + for(ii=0; bRes==0 && ii<pCursor->nConstraint; ii++){ + RtreeConstraint *p = &pCursor->aConstraint[ii]; + double cell_min = DCOORD(cell.aCoord[(p->iCoord>>1)*2]); + double cell_max = DCOORD(cell.aCoord[(p->iCoord>>1)*2+1]); + + assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE + || p->op==RTREE_GT || p->op==RTREE_EQ + ); + + switch( p->op ){ + case RTREE_LE: case RTREE_LT: bRes = p->rValue<cell_min; break; + case RTREE_GE: case RTREE_GT: bRes = p->rValue>cell_max; break; + case RTREE_EQ: + bRes = (p->rValue>cell_max || p->rValue<cell_min); + break; + } + } + + return bRes; +} + +/* +** Return true if the cell that cursor pCursor currently points to +** would be filtered (excluded) by the constraints in the +** pCursor->aConstraint[] array, or false otherwise. +** +** This function assumes that the cell is part of a leaf node. +*/ +static int testRtreeEntry(Rtree *pRtree, RtreeCursor *pCursor){ + RtreeCell cell; + int ii; + + nodeGetCell(pRtree, pCursor->pNode, pCursor->iCell, &cell); + for(ii=0; ii<pCursor->nConstraint; ii++){ + RtreeConstraint *p = &pCursor->aConstraint[ii]; + double coord = DCOORD(cell.aCoord[p->iCoord]); + int res; + assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE + || p->op==RTREE_GT || p->op==RTREE_EQ + ); + switch( p->op ){ + case RTREE_LE: res = (coord<=p->rValue); break; + case RTREE_LT: res = (coord<p->rValue); break; + case RTREE_GE: res = (coord>=p->rValue); break; + case RTREE_GT: res = (coord>p->rValue); break; + case RTREE_EQ: res = (coord==p->rValue); break; + } + + if( !res ) return 1; + } + + return 0; +} + +/* +** Cursor pCursor currently points at a node that heads a sub-tree of +** height iHeight (if iHeight==0, then the node is a leaf). Descend +** to point to the left-most cell of the sub-tree that matches the +** configured constraints. +*/ +static int descendToCell( + Rtree *pRtree, + RtreeCursor *pCursor, + int iHeight, + int *pEof /* OUT: Set to true if cannot descend */ +){ + int isEof; + int rc; + int ii; + RtreeNode *pChild; + sqlite3_int64 iRowid; + + RtreeNode *pSavedNode = pCursor->pNode; + int iSavedCell = pCursor->iCell; + + assert( iHeight>=0 ); + + if( iHeight==0 ){ + isEof = testRtreeEntry(pRtree, pCursor); + }else{ + isEof = testRtreeCell(pRtree, pCursor); + } + if( isEof || iHeight==0 ){ + *pEof = isEof; + return SQLITE_OK; + } + + iRowid = nodeGetRowid(pRtree, pCursor->pNode, pCursor->iCell); + rc = nodeAcquire(pRtree, iRowid, pCursor->pNode, &pChild); + if( rc!=SQLITE_OK ){ + return rc; + } + + nodeRelease(pRtree, pCursor->pNode); + pCursor->pNode = pChild; + isEof = 1; + for(ii=0; isEof && ii<NCELL(pChild); ii++){ + pCursor->iCell = ii; + rc = descendToCell(pRtree, pCursor, iHeight-1, &isEof); + if( rc!=SQLITE_OK ){ + return rc; + } + } + + if( isEof ){ + assert( pCursor->pNode==pChild ); + nodeReference(pSavedNode); + nodeRelease(pRtree, pChild); + pCursor->pNode = pSavedNode; + pCursor->iCell = iSavedCell; + } + + *pEof = isEof; + return SQLITE_OK; +} + +/* +** One of the cells in node pNode is guaranteed to have a 64-bit +** integer value equal to iRowid. Return the index of this cell. +*/ +static int nodeRowidIndex(Rtree *pRtree, RtreeNode *pNode, i64 iRowid){ + int ii; + for(ii=0; nodeGetRowid(pRtree, pNode, ii)!=iRowid; ii++){ + assert( ii<(NCELL(pNode)-1) ); + } + return ii; +} + +/* +** Return the index of the cell containing a pointer to node pNode +** in its parent. If pNode is the root node, return -1. +*/ +static int nodeParentIndex(Rtree *pRtree, RtreeNode *pNode){ + RtreeNode *pParent = pNode->pParent; + if( pParent ){ + return nodeRowidIndex(pRtree, pParent, pNode->iNode); + } + return -1; +} + +/* +** Rtree virtual table module xNext method. +*/ +static int rtreeNext(sqlite3_vtab_cursor *pVtabCursor){ + Rtree *pRtree = (Rtree *)(pVtabCursor->pVtab); + RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor; + int rc = SQLITE_OK; + + if( pCsr->iStrategy==1 ){ + /* This "scan" is a direct lookup by rowid. There is no next entry. */ + nodeRelease(pRtree, pCsr->pNode); + pCsr->pNode = 0; + } + + else if( pCsr->pNode ){ + /* Move to the next entry that matches the configured constraints. */ + int iHeight = 0; + while( pCsr->pNode ){ + RtreeNode *pNode = pCsr->pNode; + int nCell = NCELL(pNode); + for(pCsr->iCell++; pCsr->iCell<nCell; pCsr->iCell++){ + int isEof; + rc = descendToCell(pRtree, pCsr, iHeight, &isEof); + if( rc!=SQLITE_OK || !isEof ){ + return rc; + } + } + pCsr->pNode = pNode->pParent; + pCsr->iCell = nodeParentIndex(pRtree, pNode); + nodeReference(pCsr->pNode); + nodeRelease(pRtree, pNode); + iHeight++; + } + } + + return rc; +} + +/* +** Rtree virtual table module xRowid method. +*/ +static int rtreeRowid(sqlite3_vtab_cursor *pVtabCursor, sqlite_int64 *pRowid){ + Rtree *pRtree = (Rtree *)pVtabCursor->pVtab; + RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor; + + assert(pCsr->pNode); + *pRowid = nodeGetRowid(pRtree, pCsr->pNode, pCsr->iCell); + + return SQLITE_OK; +} + +/* +** Rtree virtual table module xColumn method. +*/ +static int rtreeColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){ + Rtree *pRtree = (Rtree *)cur->pVtab; + RtreeCursor *pCsr = (RtreeCursor *)cur; + + if( i==0 ){ + i64 iRowid = nodeGetRowid(pRtree, pCsr->pNode, pCsr->iCell); + sqlite3_result_int64(ctx, iRowid); + }else{ + RtreeCoord c; + nodeGetCoord(pRtree, pCsr->pNode, pCsr->iCell, i-1, &c); + if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ + sqlite3_result_double(ctx, c.f); + }else{ + assert( pRtree->eCoordType==RTREE_COORD_INT32 ); + sqlite3_result_int(ctx, c.i); + } + } + + return SQLITE_OK; +} + +/* +** Use nodeAcquire() to obtain the leaf node containing the record with +** rowid iRowid. If successful, set *ppLeaf to point to the node and +** return SQLITE_OK. If there is no such record in the table, set +** *ppLeaf to 0 and return SQLITE_OK. If an error occurs, set *ppLeaf +** to zero and return an SQLite error code. +*/ +static int findLeafNode(Rtree *pRtree, i64 iRowid, RtreeNode **ppLeaf){ + int rc; + *ppLeaf = 0; + sqlite3_bind_int64(pRtree->pReadRowid, 1, iRowid); + if( sqlite3_step(pRtree->pReadRowid)==SQLITE_ROW ){ + i64 iNode = sqlite3_column_int64(pRtree->pReadRowid, 0); + rc = nodeAcquire(pRtree, iNode, 0, ppLeaf); + sqlite3_reset(pRtree->pReadRowid); + }else{ + rc = sqlite3_reset(pRtree->pReadRowid); + } + return rc; +} + + +/* +** Rtree virtual table module xFilter method. +*/ +static int rtreeFilter( + sqlite3_vtab_cursor *pVtabCursor, + int idxNum, const char *idxStr, + int argc, sqlite3_value **argv +){ + Rtree *pRtree = (Rtree *)pVtabCursor->pVtab; + RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor; + + RtreeNode *pRoot = 0; + int ii; + int rc = SQLITE_OK; + + rtreeReference(pRtree); + + sqlite3_free(pCsr->aConstraint); + pCsr->aConstraint = 0; + pCsr->iStrategy = idxNum; + + if( idxNum==1 ){ + /* Special case - lookup by rowid. */ + RtreeNode *pLeaf; /* Leaf on which the required cell resides */ + i64 iRowid = sqlite3_value_int64(argv[0]); + rc = findLeafNode(pRtree, iRowid, &pLeaf); + pCsr->pNode = pLeaf; + if( pLeaf && rc==SQLITE_OK ){ + pCsr->iCell = nodeRowidIndex(pRtree, pLeaf, iRowid); + } + }else{ + /* Normal case - r-tree scan. Set up the RtreeCursor.aConstraint array + ** with the configured constraints. + */ + if( argc>0 ){ + pCsr->aConstraint = sqlite3_malloc(sizeof(RtreeConstraint)*argc); + pCsr->nConstraint = argc; + if( !pCsr->aConstraint ){ + rc = SQLITE_NOMEM; + }else{ + assert( (idxStr==0 && argc==0) || strlen(idxStr)==argc*2 ); + for(ii=0; ii<argc; ii++){ + RtreeConstraint *p = &pCsr->aConstraint[ii]; + p->op = idxStr[ii*2]; + p->iCoord = idxStr[ii*2+1]-'a'; + p->rValue = sqlite3_value_double(argv[ii]); + } + } + } + + if( rc==SQLITE_OK ){ + pCsr->pNode = 0; + rc = nodeAcquire(pRtree, 1, 0, &pRoot); + } + if( rc==SQLITE_OK ){ + int isEof = 1; + int nCell = NCELL(pRoot); + pCsr->pNode = pRoot; + for(pCsr->iCell=0; rc==SQLITE_OK && pCsr->iCell<nCell; pCsr->iCell++){ + assert( pCsr->pNode==pRoot ); + rc = descendToCell(pRtree, pCsr, pRtree->iDepth, &isEof); + if( !isEof ){ + break; + } + } + if( rc==SQLITE_OK && isEof ){ + assert( pCsr->pNode==pRoot ); + nodeRelease(pRtree, pRoot); + pCsr->pNode = 0; + } + assert( rc!=SQLITE_OK || !pCsr->pNode || pCsr->iCell<NCELL(pCsr->pNode) ); + } + } + + rtreeRelease(pRtree); + return rc; +} + +/* +** Rtree virtual table module xBestIndex method. There are three +** table scan strategies to choose from (in order from most to +** least desirable): +** +** idxNum idxStr Strategy +** ------------------------------------------------ +** 1 Unused Direct lookup by rowid. +** 2 See below R-tree query. +** 3 Unused Full table scan. +** ------------------------------------------------ +** +** If strategy 1 or 3 is used, then idxStr is not meaningful. If strategy +** 2 is used, idxStr is formatted to contain 2 bytes for each +** constraint used. The first two bytes of idxStr correspond to +** the constraint in sqlite3_index_info.aConstraintUsage[] with +** (argvIndex==1) etc. +** +** The first of each pair of bytes in idxStr identifies the constraint +** operator as follows: +** +** Operator Byte Value +** ---------------------- +** = 0x41 ('A') +** <= 0x42 ('B') +** < 0x43 ('C') +** >= 0x44 ('D') +** > 0x45 ('E') +** ---------------------- +** +** The second of each pair of bytes identifies the coordinate column +** to which the constraint applies. The leftmost coordinate column +** is 'a', the second from the left 'b' etc. +*/ +static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ + int rc = SQLITE_OK; + int ii, cCol; + + int iIdx = 0; + char zIdxStr[RTREE_MAX_DIMENSIONS*8+1]; + memset(zIdxStr, 0, sizeof(zIdxStr)); + + assert( pIdxInfo->idxStr==0 ); + for(ii=0; ii<pIdxInfo->nConstraint; ii++){ + struct sqlite3_index_constraint *p = &pIdxInfo->aConstraint[ii]; + + if( p->usable && p->iColumn==0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ ){ + /* We have an equality constraint on the rowid. Use strategy 1. */ + int jj; + for(jj=0; jj<ii; jj++){ + pIdxInfo->aConstraintUsage[jj].argvIndex = 0; + pIdxInfo->aConstraintUsage[jj].omit = 0; + } + pIdxInfo->idxNum = 1; + pIdxInfo->aConstraintUsage[ii].argvIndex = 1; + pIdxInfo->aConstraintUsage[jj].omit = 1; + return SQLITE_OK; + } + + if( p->usable && p->iColumn>0 ){ + u8 op = 0; + switch( p->op ){ + case SQLITE_INDEX_CONSTRAINT_EQ: op = RTREE_EQ; break; + case SQLITE_INDEX_CONSTRAINT_GT: op = RTREE_GT; break; + case SQLITE_INDEX_CONSTRAINT_LE: op = RTREE_LE; break; + case SQLITE_INDEX_CONSTRAINT_LT: op = RTREE_LT; break; + case SQLITE_INDEX_CONSTRAINT_GE: op = RTREE_GE; break; + } + if( op ){ + /* Make sure this particular constraint has not been used before. + ** If it has been used before, ignore it. + ** + ** A <= or < can be used if there is a prior >= or >. + ** A >= or > can be used if there is a prior < or <=. + ** A <= or < is disqualified if there is a prior <=, <, or ==. + ** A >= or > is disqualified if there is a prior >=, >, or ==. + ** A == is disqualifed if there is any prior constraint. + */ + int j, opmsk; + static const unsigned char compatible[] = { 0, 0, 1, 1, 2, 2 }; + assert( compatible[RTREE_EQ & 7]==0 ); + assert( compatible[RTREE_LT & 7]==1 ); + assert( compatible[RTREE_LE & 7]==1 ); + assert( compatible[RTREE_GT & 7]==2 ); + assert( compatible[RTREE_GE & 7]==2 ); + cCol = p->iColumn - 1 + 'a'; + opmsk = compatible[op & 7]; + for(j=0; j<iIdx; j+=2){ + if( zIdxStr[j+1]==cCol && (compatible[zIdxStr[j] & 7] & opmsk)!=0 ){ + op = 0; + break; + } + } + } + if( op ){ + assert( iIdx<sizeof(zIdxStr)-1 ); + zIdxStr[iIdx++] = op; + zIdxStr[iIdx++] = cCol; + pIdxInfo->aConstraintUsage[ii].argvIndex = (iIdx/2); + pIdxInfo->aConstraintUsage[ii].omit = 1; + } + } + } + + pIdxInfo->idxNum = 2; + pIdxInfo->needToFreeIdxStr = 1; + if( iIdx>0 && 0==(pIdxInfo->idxStr = sqlite3_mprintf("%s", zIdxStr)) ){ + return SQLITE_NOMEM; + } + return rc; +} + +/* +** Return the N-dimensional volumn of the cell stored in *p. +*/ +static float cellArea(Rtree *pRtree, RtreeCell *p){ + float area = 1.0; + int ii; + for(ii=0; ii<(pRtree->nDim*2); ii+=2){ + area = area * (DCOORD(p->aCoord[ii+1]) - DCOORD(p->aCoord[ii])); + } + return area; +} + +/* +** Return the margin length of cell p. The margin length is the sum +** of the objects size in each dimension. +*/ +static float cellMargin(Rtree *pRtree, RtreeCell *p){ + float margin = 0.0; + int ii; + for(ii=0; ii<(pRtree->nDim*2); ii+=2){ + margin += (DCOORD(p->aCoord[ii+1]) - DCOORD(p->aCoord[ii])); + } + return margin; +} + +/* +** Store the union of cells p1 and p2 in p1. +*/ +static void cellUnion(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){ + int ii; + if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ + for(ii=0; ii<(pRtree->nDim*2); ii+=2){ + p1->aCoord[ii].f = MIN(p1->aCoord[ii].f, p2->aCoord[ii].f); + p1->aCoord[ii+1].f = MAX(p1->aCoord[ii+1].f, p2->aCoord[ii+1].f); + } + }else{ + for(ii=0; ii<(pRtree->nDim*2); ii+=2){ + p1->aCoord[ii].i = MIN(p1->aCoord[ii].i, p2->aCoord[ii].i); + p1->aCoord[ii+1].i = MAX(p1->aCoord[ii+1].i, p2->aCoord[ii+1].i); + } + } +} + +/* +** Return the amount cell p would grow by if it were unioned with pCell. +*/ +static float cellGrowth(Rtree *pRtree, RtreeCell *p, RtreeCell *pCell){ + float area; + RtreeCell cell; + memcpy(&cell, p, sizeof(RtreeCell)); + area = cellArea(pRtree, &cell); + cellUnion(pRtree, &cell, pCell); + return (cellArea(pRtree, &cell)-area); +} + +#if VARIANT_RSTARTREE_CHOOSESUBTREE || VARIANT_RSTARTREE_SPLIT +static float cellOverlap( + Rtree *pRtree, + RtreeCell *p, + RtreeCell *aCell, + int nCell, + int iExclude +){ + int ii; + float overlap = 0.0; + for(ii=0; ii<nCell; ii++){ + if( ii!=iExclude ){ + int jj; + float o = 1.0; + for(jj=0; jj<(pRtree->nDim*2); jj+=2){ + double x1; + double x2; + + x1 = MAX(DCOORD(p->aCoord[jj]), DCOORD(aCell[ii].aCoord[jj])); + x2 = MIN(DCOORD(p->aCoord[jj+1]), DCOORD(aCell[ii].aCoord[jj+1])); + + if( x2<x1 ){ + o = 0.0; + break; + }else{ + o = o * (x2-x1); + } + } + overlap += o; + } + } + return overlap; +} +#endif + +#if VARIANT_RSTARTREE_CHOOSESUBTREE +static float cellOverlapEnlargement( + Rtree *pRtree, + RtreeCell *p, + RtreeCell *pInsert, + RtreeCell *aCell, + int nCell, + int iExclude +){ + float before; + float after; + before = cellOverlap(pRtree, p, aCell, nCell, iExclude); + cellUnion(pRtree, p, pInsert); + after = cellOverlap(pRtree, p, aCell, nCell, iExclude); + return after-before; +} +#endif + + +/* +** This function implements the ChooseLeaf algorithm from Gutman[84]. +** ChooseSubTree in r*tree terminology. +*/ +static int ChooseLeaf( + Rtree *pRtree, /* Rtree table */ + RtreeCell *pCell, /* Cell to insert into rtree */ + int iHeight, /* Height of sub-tree rooted at pCell */ + RtreeNode **ppLeaf /* OUT: Selected leaf page */ +){ + int rc; + int ii; + RtreeNode *pNode; + rc = nodeAcquire(pRtree, 1, 0, &pNode); + + for(ii=0; rc==SQLITE_OK && ii<(pRtree->iDepth-iHeight); ii++){ + int iCell; + sqlite3_int64 iBest; + + float fMinGrowth; + float fMinArea; + float fMinOverlap; + + int nCell = NCELL(pNode); + RtreeCell cell; + RtreeNode *pChild; + + RtreeCell *aCell = 0; + +#if VARIANT_RSTARTREE_CHOOSESUBTREE + if( ii==(pRtree->iDepth-1) ){ + int jj; + aCell = sqlite3_malloc(sizeof(RtreeCell)*nCell); + if( !aCell ){ + rc = SQLITE_NOMEM; + nodeRelease(pRtree, pNode); + pNode = 0; + continue; + } + for(jj=0; jj<nCell; jj++){ + nodeGetCell(pRtree, pNode, jj, &aCell[jj]); + } + } +#endif + + /* Select the child node which will be enlarged the least if pCell + ** is inserted into it. Resolve ties by choosing the entry with + ** the smallest area. + */ + for(iCell=0; iCell<nCell; iCell++){ + float growth; + float area; + float overlap = 0.0; + nodeGetCell(pRtree, pNode, iCell, &cell); + growth = cellGrowth(pRtree, &cell, pCell); + area = cellArea(pRtree, &cell); +#if VARIANT_RSTARTREE_CHOOSESUBTREE + if( ii==(pRtree->iDepth-1) ){ + overlap = cellOverlapEnlargement(pRtree,&cell,pCell,aCell,nCell,iCell); + } +#endif + if( (iCell==0) + || (overlap<fMinOverlap) + || (overlap==fMinOverlap && growth<fMinGrowth) + || (overlap==fMinOverlap && growth==fMinGrowth && area<fMinArea) + ){ + fMinOverlap = overlap; + fMinGrowth = growth; + fMinArea = area; + iBest = cell.iRowid; + } + } + + sqlite3_free(aCell); + rc = nodeAcquire(pRtree, iBest, pNode, &pChild); + nodeRelease(pRtree, pNode); + pNode = pChild; + } + + *ppLeaf = pNode; + return rc; +} + +/* +** A cell with the same content as pCell has just been inserted into +** the node pNode. This function updates the bounding box cells in +** all ancestor elements. +*/ +static void AdjustTree( + Rtree *pRtree, /* Rtree table */ + RtreeNode *pNode, /* Adjust ancestry of this node. */ + RtreeCell *pCell /* This cell was just inserted */ +){ + RtreeNode *p = pNode; + while( p->pParent ){ + RtreeCell cell; + RtreeNode *pParent = p->pParent; + int iCell = nodeParentIndex(pRtree, p); + + nodeGetCell(pRtree, pParent, iCell, &cell); + if( cellGrowth(pRtree, &cell, pCell)>0.0 ){ + cellUnion(pRtree, &cell, pCell); + nodeOverwriteCell(pRtree, pParent, &cell, iCell); + } + + p = pParent; + } +} + +/* +** Write mapping (iRowid->iNode) to the <rtree>_rowid table. +*/ +static int rowidWrite(Rtree *pRtree, sqlite3_int64 iRowid, sqlite3_int64 iNode){ + sqlite3_bind_int64(pRtree->pWriteRowid, 1, iRowid); + sqlite3_bind_int64(pRtree->pWriteRowid, 2, iNode); + sqlite3_step(pRtree->pWriteRowid); + return sqlite3_reset(pRtree->pWriteRowid); +} + +/* +** Write mapping (iNode->iPar) to the <rtree>_parent table. +*/ +static int parentWrite(Rtree *pRtree, sqlite3_int64 iNode, sqlite3_int64 iPar){ + sqlite3_bind_int64(pRtree->pWriteParent, 1, iNode); + sqlite3_bind_int64(pRtree->pWriteParent, 2, iPar); + sqlite3_step(pRtree->pWriteParent); + return sqlite3_reset(pRtree->pWriteParent); +} + +static int rtreeInsertCell(Rtree *, RtreeNode *, RtreeCell *, int); + +#if VARIANT_GUTTMAN_LINEAR_SPLIT +/* +** Implementation of the linear variant of the PickNext() function from +** Guttman[84]. +*/ +static RtreeCell *LinearPickNext( + Rtree *pRtree, + RtreeCell *aCell, + int nCell, + RtreeCell *pLeftBox, + RtreeCell *pRightBox, + int *aiUsed +){ + int ii; + for(ii=0; aiUsed[ii]; ii++); + aiUsed[ii] = 1; + return &aCell[ii]; +} + +/* +** Implementation of the linear variant of the PickSeeds() function from +** Guttman[84]. +*/ +static void LinearPickSeeds( + Rtree *pRtree, + RtreeCell *aCell, + int nCell, + int *piLeftSeed, + int *piRightSeed +){ + int i; + int iLeftSeed = 0; + int iRightSeed = 1; + float maxNormalInnerWidth = 0.0; + + /* Pick two "seed" cells from the array of cells. The algorithm used + ** here is the LinearPickSeeds algorithm from Gutman[1984]. The + ** indices of the two seed cells in the array are stored in local + ** variables iLeftSeek and iRightSeed. + */ + for(i=0; i<pRtree->nDim; i++){ + float x1 = aCell[0].aCoord[i*2]; + float x2 = aCell[0].aCoord[i*2+1]; + float x3 = x1; + float x4 = x2; + int jj; + + int iCellLeft = 0; + int iCellRight = 0; + + for(jj=1; jj<nCell; jj++){ + float left = aCell[jj].aCoord[i*2]; + float right = aCell[jj].aCoord[i*2+1]; + + if( left<x1 ) x1 = left; + if( right>x4 ) x4 = right; + if( left>x3 ){ + x3 = left; + iCellRight = jj; + } + if( right<x2 ){ + x2 = right; + iCellLeft = jj; + } + } + + if( x4!=x1 ){ + float normalwidth = (x3 - x2) / (x4 - x1); + if( normalwidth>maxNormalInnerWidth ){ + iLeftSeed = iCellLeft; + iRightSeed = iCellRight; + } + } + } + + *piLeftSeed = iLeftSeed; + *piRightSeed = iRightSeed; +} +#endif /* VARIANT_GUTTMAN_LINEAR_SPLIT */ + +#if VARIANT_GUTTMAN_QUADRATIC_SPLIT +/* +** Implementation of the quadratic variant of the PickNext() function from +** Guttman[84]. +*/ +static RtreeCell *QuadraticPickNext( + Rtree *pRtree, + RtreeCell *aCell, + int nCell, + RtreeCell *pLeftBox, + RtreeCell *pRightBox, + int *aiUsed +){ + #define FABS(a) ((a)<0.0?-1.0*(a):(a)) + + int iSelect = -1; + float fDiff; + int ii; + for(ii=0; ii<nCell; ii++){ + if( aiUsed[ii]==0 ){ + float left = cellGrowth(pRtree, pLeftBox, &aCell[ii]); + float right = cellGrowth(pRtree, pLeftBox, &aCell[ii]); + float diff = FABS(right-left); + if( iSelect<0 || diff>fDiff ){ + fDiff = diff; + iSelect = ii; + } + } + } + aiUsed[iSelect] = 1; + return &aCell[iSelect]; +} + +/* +** Implementation of the quadratic variant of the PickSeeds() function from +** Guttman[84]. +*/ +static void QuadraticPickSeeds( + Rtree *pRtree, + RtreeCell *aCell, + int nCell, + int *piLeftSeed, + int *piRightSeed +){ + int ii; + int jj; + + int iLeftSeed = 0; + int iRightSeed = 1; + float fWaste = 0.0; + + for(ii=0; ii<nCell; ii++){ + for(jj=ii+1; jj<nCell; jj++){ + float right = cellArea(pRtree, &aCell[jj]); + float growth = cellGrowth(pRtree, &aCell[ii], &aCell[jj]); + float waste = growth - right; + + if( waste>fWaste ){ + iLeftSeed = ii; + iRightSeed = jj; + fWaste = waste; + } + } + } + + *piLeftSeed = iLeftSeed; + *piRightSeed = iRightSeed; +} +#endif /* VARIANT_GUTTMAN_QUADRATIC_SPLIT */ + +/* +** Arguments aIdx, aDistance and aSpare all point to arrays of size +** nIdx. The aIdx array contains the set of integers from 0 to +** (nIdx-1) in no particular order. This function sorts the values +** in aIdx according to the indexed values in aDistance. For +** example, assuming the inputs: +** +** aIdx = { 0, 1, 2, 3 } +** aDistance = { 5.0, 2.0, 7.0, 6.0 } +** +** this function sets the aIdx array to contain: +** +** aIdx = { 0, 1, 2, 3 } +** +** The aSpare array is used as temporary working space by the +** sorting algorithm. +*/ +static void SortByDistance( + int *aIdx, + int nIdx, + float *aDistance, + int *aSpare +){ + if( nIdx>1 ){ + int iLeft = 0; + int iRight = 0; + + int nLeft = nIdx/2; + int nRight = nIdx-nLeft; + int *aLeft = aIdx; + int *aRight = &aIdx[nLeft]; + + SortByDistance(aLeft, nLeft, aDistance, aSpare); + SortByDistance(aRight, nRight, aDistance, aSpare); + + memcpy(aSpare, aLeft, sizeof(int)*nLeft); + aLeft = aSpare; + + while( iLeft<nLeft || iRight<nRight ){ + if( iLeft==nLeft ){ + aIdx[iLeft+iRight] = aRight[iRight]; + iRight++; + }else if( iRight==nRight ){ + aIdx[iLeft+iRight] = aLeft[iLeft]; + iLeft++; + }else{ + float fLeft = aDistance[aLeft[iLeft]]; + float fRight = aDistance[aRight[iRight]]; + if( fLeft<fRight ){ + aIdx[iLeft+iRight] = aLeft[iLeft]; + iLeft++; + }else{ + aIdx[iLeft+iRight] = aRight[iRight]; + iRight++; + } + } + } + +#if 0 + /* Check that the sort worked */ + { + int jj; + for(jj=1; jj<nIdx; jj++){ + float left = aDistance[aIdx[jj-1]]; + float right = aDistance[aIdx[jj]]; + assert( left<=right ); + } + } +#endif + } +} + +/* +** Arguments aIdx, aCell and aSpare all point to arrays of size +** nIdx. The aIdx array contains the set of integers from 0 to +** (nIdx-1) in no particular order. This function sorts the values +** in aIdx according to dimension iDim of the cells in aCell. The +** minimum value of dimension iDim is considered first, the +** maximum used to break ties. +** +** The aSpare array is used as temporary working space by the +** sorting algorithm. +*/ +static void SortByDimension( + Rtree *pRtree, + int *aIdx, + int nIdx, + int iDim, + RtreeCell *aCell, + int *aSpare +){ + if( nIdx>1 ){ + + int iLeft = 0; + int iRight = 0; + + int nLeft = nIdx/2; + int nRight = nIdx-nLeft; + int *aLeft = aIdx; + int *aRight = &aIdx[nLeft]; + + SortByDimension(pRtree, aLeft, nLeft, iDim, aCell, aSpare); + SortByDimension(pRtree, aRight, nRight, iDim, aCell, aSpare); + + memcpy(aSpare, aLeft, sizeof(int)*nLeft); + aLeft = aSpare; + while( iLeft<nLeft || iRight<nRight ){ + double xleft1 = DCOORD(aCell[aLeft[iLeft]].aCoord[iDim*2]); + double xleft2 = DCOORD(aCell[aLeft[iLeft]].aCoord[iDim*2+1]); + double xright1 = DCOORD(aCell[aRight[iRight]].aCoord[iDim*2]); + double xright2 = DCOORD(aCell[aRight[iRight]].aCoord[iDim*2+1]); + if( (iLeft!=nLeft) && ((iRight==nRight) + || (xleft1<xright1) + || (xleft1==xright1 && xleft2<xright2) + )){ + aIdx[iLeft+iRight] = aLeft[iLeft]; + iLeft++; + }else{ + aIdx[iLeft+iRight] = aRight[iRight]; + iRight++; + } + } + +#if 0 + /* Check that the sort worked */ + { + int jj; + for(jj=1; jj<nIdx; jj++){ + float xleft1 = aCell[aIdx[jj-1]].aCoord[iDim*2]; + float xleft2 = aCell[aIdx[jj-1]].aCoord[iDim*2+1]; + float xright1 = aCell[aIdx[jj]].aCoord[iDim*2]; + float xright2 = aCell[aIdx[jj]].aCoord[iDim*2+1]; + assert( xleft1<=xright1 && (xleft1<xright1 || xleft2<=xright2) ); + } + } +#endif + } +} + +#if VARIANT_RSTARTREE_SPLIT +/* +** Implementation of the R*-tree variant of SplitNode from Beckman[1990]. +*/ +static int splitNodeStartree( + Rtree *pRtree, + RtreeCell *aCell, + int nCell, + RtreeNode *pLeft, + RtreeNode *pRight, + RtreeCell *pBboxLeft, + RtreeCell *pBboxRight +){ + int **aaSorted; + int *aSpare; + int ii; + + int iBestDim; + int iBestSplit; + float fBestMargin; + + int nByte = (pRtree->nDim+1)*(sizeof(int*)+nCell*sizeof(int)); + + aaSorted = (int **)sqlite3_malloc(nByte); + if( !aaSorted ){ + return SQLITE_NOMEM; + } + + aSpare = &((int *)&aaSorted[pRtree->nDim])[pRtree->nDim*nCell]; + memset(aaSorted, 0, nByte); + for(ii=0; ii<pRtree->nDim; ii++){ + int jj; + aaSorted[ii] = &((int *)&aaSorted[pRtree->nDim])[ii*nCell]; + for(jj=0; jj<nCell; jj++){ + aaSorted[ii][jj] = jj; + } + SortByDimension(pRtree, aaSorted[ii], nCell, ii, aCell, aSpare); + } + + for(ii=0; ii<pRtree->nDim; ii++){ + float margin = 0.0; + float fBestOverlap; + float fBestArea; + int iBestLeft; + int nLeft; + + for( + nLeft=RTREE_MINCELLS(pRtree); + nLeft<=(nCell-RTREE_MINCELLS(pRtree)); + nLeft++ + ){ + RtreeCell left; + RtreeCell right; + int kk; + float overlap; + float area; + + memcpy(&left, &aCell[aaSorted[ii][0]], sizeof(RtreeCell)); + memcpy(&right, &aCell[aaSorted[ii][nCell-1]], sizeof(RtreeCell)); + for(kk=1; kk<(nCell-1); kk++){ + if( kk<nLeft ){ + cellUnion(pRtree, &left, &aCell[aaSorted[ii][kk]]); + }else{ + cellUnion(pRtree, &right, &aCell[aaSorted[ii][kk]]); + } + } + margin += cellMargin(pRtree, &left); + margin += cellMargin(pRtree, &right); + overlap = cellOverlap(pRtree, &left, &right, 1, -1); + area = cellArea(pRtree, &left) + cellArea(pRtree, &right); + if( (nLeft==RTREE_MINCELLS(pRtree)) + || (overlap<fBestOverlap) + || (overlap==fBestOverlap && area<fBestArea) + ){ + iBestLeft = nLeft; + fBestOverlap = overlap; + fBestArea = area; + } + } + + if( ii==0 || margin<fBestMargin ){ + iBestDim = ii; + fBestMargin = margin; + iBestSplit = iBestLeft; + } + } + + memcpy(pBboxLeft, &aCell[aaSorted[iBestDim][0]], sizeof(RtreeCell)); + memcpy(pBboxRight, &aCell[aaSorted[iBestDim][iBestSplit]], sizeof(RtreeCell)); + for(ii=0; ii<nCell; ii++){ + RtreeNode *pTarget = (ii<iBestSplit)?pLeft:pRight; + RtreeCell *pBbox = (ii<iBestSplit)?pBboxLeft:pBboxRight; + RtreeCell *pCell = &aCell[aaSorted[iBestDim][ii]]; + nodeInsertCell(pRtree, pTarget, pCell); + cellUnion(pRtree, pBbox, pCell); + } + + sqlite3_free(aaSorted); + return SQLITE_OK; +} +#endif + +#if VARIANT_GUTTMAN_SPLIT +/* +** Implementation of the regular R-tree SplitNode from Guttman[1984]. +*/ +static int splitNodeGuttman( + Rtree *pRtree, + RtreeCell *aCell, + int nCell, + RtreeNode *pLeft, + RtreeNode *pRight, + RtreeCell *pBboxLeft, + RtreeCell *pBboxRight +){ + int iLeftSeed = 0; + int iRightSeed = 1; + int *aiUsed; + int i; + + aiUsed = sqlite3_malloc(sizeof(int)*nCell); + memset(aiUsed, 0, sizeof(int)*nCell); + + PickSeeds(pRtree, aCell, nCell, &iLeftSeed, &iRightSeed); + + memcpy(pBboxLeft, &aCell[iLeftSeed], sizeof(RtreeCell)); + memcpy(pBboxRight, &aCell[iRightSeed], sizeof(RtreeCell)); + nodeInsertCell(pRtree, pLeft, &aCell[iLeftSeed]); + nodeInsertCell(pRtree, pRight, &aCell[iRightSeed]); + aiUsed[iLeftSeed] = 1; + aiUsed[iRightSeed] = 1; + + for(i=nCell-2; i>0; i--){ + RtreeCell *pNext; + pNext = PickNext(pRtree, aCell, nCell, pBboxLeft, pBboxRight, aiUsed); + float diff = + cellGrowth(pRtree, pBboxLeft, pNext) - + cellGrowth(pRtree, pBboxRight, pNext) + ; + if( (RTREE_MINCELLS(pRtree)-NCELL(pRight)==i) + || (diff>0.0 && (RTREE_MINCELLS(pRtree)-NCELL(pLeft)!=i)) + ){ + nodeInsertCell(pRtree, pRight, pNext); + cellUnion(pRtree, pBboxRight, pNext); + }else{ + nodeInsertCell(pRtree, pLeft, pNext); + cellUnion(pRtree, pBboxLeft, pNext); + } + } + + sqlite3_free(aiUsed); + return SQLITE_OK; +} +#endif + +static int updateMapping( + Rtree *pRtree, + i64 iRowid, + RtreeNode *pNode, + int iHeight +){ + int (*xSetMapping)(Rtree *, sqlite3_int64, sqlite3_int64); + xSetMapping = ((iHeight==0)?rowidWrite:parentWrite); + if( iHeight>0 ){ + RtreeNode *pChild = nodeHashLookup(pRtree, iRowid); + if( pChild ){ + nodeRelease(pRtree, pChild->pParent); + nodeReference(pNode); + pChild->pParent = pNode; + } + } + return xSetMapping(pRtree, iRowid, pNode->iNode); +} + +static int SplitNode( + Rtree *pRtree, + RtreeNode *pNode, + RtreeCell *pCell, + int iHeight +){ + int i; + int newCellIsRight = 0; + + int rc = SQLITE_OK; + int nCell = NCELL(pNode); + RtreeCell *aCell; + int *aiUsed; + + RtreeNode *pLeft = 0; + RtreeNode *pRight = 0; + + RtreeCell leftbbox; + RtreeCell rightbbox; + + /* Allocate an array and populate it with a copy of pCell and + ** all cells from node pLeft. Then zero the original node. + */ + aCell = sqlite3_malloc((sizeof(RtreeCell)+sizeof(int))*(nCell+1)); + if( !aCell ){ + rc = SQLITE_NOMEM; + goto splitnode_out; + } + aiUsed = (int *)&aCell[nCell+1]; + memset(aiUsed, 0, sizeof(int)*(nCell+1)); + for(i=0; i<nCell; i++){ + nodeGetCell(pRtree, pNode, i, &aCell[i]); + } + nodeZero(pRtree, pNode); + memcpy(&aCell[nCell], pCell, sizeof(RtreeCell)); + nCell++; + + if( pNode->iNode==1 ){ + pRight = nodeNew(pRtree, pNode, 1); + pLeft = nodeNew(pRtree, pNode, 1); + pRtree->iDepth++; + pNode->isDirty = 1; + writeInt16(pNode->zData, pRtree->iDepth); + }else{ + pLeft = pNode; + pRight = nodeNew(pRtree, pLeft->pParent, 1); + nodeReference(pLeft); + } + + if( !pLeft || !pRight ){ + rc = SQLITE_NOMEM; + goto splitnode_out; + } + + memset(pLeft->zData, 0, pRtree->iNodeSize); + memset(pRight->zData, 0, pRtree->iNodeSize); + + rc = AssignCells(pRtree, aCell, nCell, pLeft, pRight, &leftbbox, &rightbbox); + if( rc!=SQLITE_OK ){ + goto splitnode_out; + } + + /* Ensure both child nodes have node numbers assigned to them. */ + if( (0==pRight->iNode && SQLITE_OK!=(rc = nodeWrite(pRtree, pRight))) + || (0==pLeft->iNode && SQLITE_OK!=(rc = nodeWrite(pRtree, pLeft))) + ){ + goto splitnode_out; + } + + rightbbox.iRowid = pRight->iNode; + leftbbox.iRowid = pLeft->iNode; + + if( pNode->iNode==1 ){ + rc = rtreeInsertCell(pRtree, pLeft->pParent, &leftbbox, iHeight+1); + if( rc!=SQLITE_OK ){ + goto splitnode_out; + } + }else{ + RtreeNode *pParent = pLeft->pParent; + int iCell = nodeParentIndex(pRtree, pLeft); + nodeOverwriteCell(pRtree, pParent, &leftbbox, iCell); + AdjustTree(pRtree, pParent, &leftbbox); + } + if( (rc = rtreeInsertCell(pRtree, pRight->pParent, &rightbbox, iHeight+1)) ){ + goto splitnode_out; + } + + for(i=0; i<NCELL(pRight); i++){ + i64 iRowid = nodeGetRowid(pRtree, pRight, i); + rc = updateMapping(pRtree, iRowid, pRight, iHeight); + if( iRowid==pCell->iRowid ){ + newCellIsRight = 1; + } + if( rc!=SQLITE_OK ){ + goto splitnode_out; + } + } + if( pNode->iNode==1 ){ + for(i=0; i<NCELL(pLeft); i++){ + i64 iRowid = nodeGetRowid(pRtree, pLeft, i); + rc = updateMapping(pRtree, iRowid, pLeft, iHeight); + if( rc!=SQLITE_OK ){ + goto splitnode_out; + } + } + }else if( newCellIsRight==0 ){ + rc = updateMapping(pRtree, pCell->iRowid, pLeft, iHeight); + } + + if( rc==SQLITE_OK ){ + rc = nodeRelease(pRtree, pRight); + pRight = 0; + } + if( rc==SQLITE_OK ){ + rc = nodeRelease(pRtree, pLeft); + pLeft = 0; + } + +splitnode_out: + nodeRelease(pRtree, pRight); + nodeRelease(pRtree, pLeft); + sqlite3_free(aCell); + return rc; +} + +static int fixLeafParent(Rtree *pRtree, RtreeNode *pLeaf){ + int rc = SQLITE_OK; + if( pLeaf->iNode!=1 && pLeaf->pParent==0 ){ + sqlite3_bind_int64(pRtree->pReadParent, 1, pLeaf->iNode); + if( sqlite3_step(pRtree->pReadParent)==SQLITE_ROW ){ + i64 iNode = sqlite3_column_int64(pRtree->pReadParent, 0); + rc = nodeAcquire(pRtree, iNode, 0, &pLeaf->pParent); + }else{ + rc = SQLITE_ERROR; + } + sqlite3_reset(pRtree->pReadParent); + if( rc==SQLITE_OK ){ + rc = fixLeafParent(pRtree, pLeaf->pParent); + } + } + return rc; +} + +static int deleteCell(Rtree *, RtreeNode *, int, int); + +static int removeNode(Rtree *pRtree, RtreeNode *pNode, int iHeight){ + int rc; + RtreeNode *pParent; + int iCell; + + assert( pNode->nRef==1 ); + + /* Remove the entry in the parent cell. */ + iCell = nodeParentIndex(pRtree, pNode); + pParent = pNode->pParent; + pNode->pParent = 0; + if( SQLITE_OK!=(rc = deleteCell(pRtree, pParent, iCell, iHeight+1)) + || SQLITE_OK!=(rc = nodeRelease(pRtree, pParent)) + ){ + return rc; + } + + /* Remove the xxx_node entry. */ + sqlite3_bind_int64(pRtree->pDeleteNode, 1, pNode->iNode); + sqlite3_step(pRtree->pDeleteNode); + if( SQLITE_OK!=(rc = sqlite3_reset(pRtree->pDeleteNode)) ){ + return rc; + } + + /* Remove the xxx_parent entry. */ + sqlite3_bind_int64(pRtree->pDeleteParent, 1, pNode->iNode); + sqlite3_step(pRtree->pDeleteParent); + if( SQLITE_OK!=(rc = sqlite3_reset(pRtree->pDeleteParent)) ){ + return rc; + } + + /* Remove the node from the in-memory hash table and link it into + ** the Rtree.pDeleted list. Its contents will be re-inserted later on. + */ + nodeHashDelete(pRtree, pNode); + pNode->iNode = iHeight; + pNode->pNext = pRtree->pDeleted; + pNode->nRef++; + pRtree->pDeleted = pNode; + + return SQLITE_OK; +} + +static void fixBoundingBox(Rtree *pRtree, RtreeNode *pNode){ + RtreeNode *pParent = pNode->pParent; + if( pParent ){ + int ii; + int nCell = NCELL(pNode); + RtreeCell box; /* Bounding box for pNode */ + nodeGetCell(pRtree, pNode, 0, &box); + for(ii=1; ii<nCell; ii++){ + RtreeCell cell; + nodeGetCell(pRtree, pNode, ii, &cell); + cellUnion(pRtree, &box, &cell); + } + box.iRowid = pNode->iNode; + ii = nodeParentIndex(pRtree, pNode); + nodeOverwriteCell(pRtree, pParent, &box, ii); + fixBoundingBox(pRtree, pParent); + } +} + +/* +** Delete the cell at index iCell of node pNode. After removing the +** cell, adjust the r-tree data structure if required. +*/ +static int deleteCell(Rtree *pRtree, RtreeNode *pNode, int iCell, int iHeight){ + int rc; + + if( SQLITE_OK!=(rc = fixLeafParent(pRtree, pNode)) ){ + return rc; + } + + /* Remove the cell from the node. This call just moves bytes around + ** the in-memory node image, so it cannot fail. + */ + nodeDeleteCell(pRtree, pNode, iCell); + + /* If the node is not the tree root and now has less than the minimum + ** number of cells, remove it from the tree. Otherwise, update the + ** cell in the parent node so that it tightly contains the updated + ** node. + */ + if( pNode->iNode!=1 ){ + RtreeNode *pParent = pNode->pParent; + if( (pParent->iNode!=1 || NCELL(pParent)!=1) + && (NCELL(pNode)<RTREE_MINCELLS(pRtree)) + ){ + rc = removeNode(pRtree, pNode, iHeight); + }else{ + fixBoundingBox(pRtree, pNode); + } + } + + return rc; +} + +static int Reinsert( + Rtree *pRtree, + RtreeNode *pNode, + RtreeCell *pCell, + int iHeight +){ + int *aOrder; + int *aSpare; + RtreeCell *aCell; + float *aDistance; + int nCell; + float aCenterCoord[RTREE_MAX_DIMENSIONS]; + int iDim; + int ii; + int rc = SQLITE_OK; + + memset(aCenterCoord, 0, sizeof(float)*RTREE_MAX_DIMENSIONS); + + nCell = NCELL(pNode)+1; + + /* Allocate the buffers used by this operation. The allocation is + ** relinquished before this function returns. + */ + aCell = (RtreeCell *)sqlite3_malloc(nCell * ( + sizeof(RtreeCell) + /* aCell array */ + sizeof(int) + /* aOrder array */ + sizeof(int) + /* aSpare array */ + sizeof(float) /* aDistance array */ + )); + if( !aCell ){ + return SQLITE_NOMEM; + } + aOrder = (int *)&aCell[nCell]; + aSpare = (int *)&aOrder[nCell]; + aDistance = (float *)&aSpare[nCell]; + + for(ii=0; ii<nCell; ii++){ + if( ii==(nCell-1) ){ + memcpy(&aCell[ii], pCell, sizeof(RtreeCell)); + }else{ + nodeGetCell(pRtree, pNode, ii, &aCell[ii]); + } + aOrder[ii] = ii; + for(iDim=0; iDim<pRtree->nDim; iDim++){ + aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2]); + aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2+1]); + } + } + for(iDim=0; iDim<pRtree->nDim; iDim++){ + aCenterCoord[iDim] = aCenterCoord[iDim]/((float)nCell*2.0); + } + + for(ii=0; ii<nCell; ii++){ + aDistance[ii] = 0.0; + for(iDim=0; iDim<pRtree->nDim; iDim++){ + float coord = DCOORD(aCell[ii].aCoord[iDim*2+1]) - + DCOORD(aCell[ii].aCoord[iDim*2]); + aDistance[ii] += (coord-aCenterCoord[iDim])*(coord-aCenterCoord[iDim]); + } + } + + SortByDistance(aOrder, nCell, aDistance, aSpare); + nodeZero(pRtree, pNode); + + for(ii=0; rc==SQLITE_OK && ii<(nCell-(RTREE_MINCELLS(pRtree)+1)); ii++){ + RtreeCell *p = &aCell[aOrder[ii]]; + nodeInsertCell(pRtree, pNode, p); + if( p->iRowid==pCell->iRowid ){ + if( iHeight==0 ){ + rc = rowidWrite(pRtree, p->iRowid, pNode->iNode); + }else{ + rc = parentWrite(pRtree, p->iRowid, pNode->iNode); + } + } + } + if( rc==SQLITE_OK ){ + fixBoundingBox(pRtree, pNode); + } + for(; rc==SQLITE_OK && ii<nCell; ii++){ + /* Find a node to store this cell in. pNode->iNode currently contains + ** the height of the sub-tree headed by the cell. + */ + RtreeNode *pInsert; + RtreeCell *p = &aCell[aOrder[ii]]; + rc = ChooseLeaf(pRtree, p, iHeight, &pInsert); + if( rc==SQLITE_OK ){ + int rc2; + rc = rtreeInsertCell(pRtree, pInsert, p, iHeight); + rc2 = nodeRelease(pRtree, pInsert); + if( rc==SQLITE_OK ){ + rc = rc2; + } + } + } + + sqlite3_free(aCell); + return rc; +} + +/* +** Insert cell pCell into node pNode. Node pNode is the head of a +** subtree iHeight high (leaf nodes have iHeight==0). +*/ +static int rtreeInsertCell( + Rtree *pRtree, + RtreeNode *pNode, + RtreeCell *pCell, + int iHeight +){ + int rc = SQLITE_OK; + if( iHeight>0 ){ + RtreeNode *pChild = nodeHashLookup(pRtree, pCell->iRowid); + if( pChild ){ + nodeRelease(pRtree, pChild->pParent); + nodeReference(pNode); + pChild->pParent = pNode; + } + } + if( nodeInsertCell(pRtree, pNode, pCell) ){ +#if VARIANT_RSTARTREE_REINSERT + if( iHeight<=pRtree->iReinsertHeight || pNode->iNode==1){ + rc = SplitNode(pRtree, pNode, pCell, iHeight); + }else{ + pRtree->iReinsertHeight = iHeight; + rc = Reinsert(pRtree, pNode, pCell, iHeight); + } +#else + rc = SplitNode(pRtree, pNode, pCell, iHeight); +#endif + }else{ + AdjustTree(pRtree, pNode, pCell); + if( iHeight==0 ){ + rc = rowidWrite(pRtree, pCell->iRowid, pNode->iNode); + }else{ + rc = parentWrite(pRtree, pCell->iRowid, pNode->iNode); + } + } + return rc; +} + +static int reinsertNodeContent(Rtree *pRtree, RtreeNode *pNode){ + int ii; + int rc = SQLITE_OK; + int nCell = NCELL(pNode); + + for(ii=0; rc==SQLITE_OK && ii<nCell; ii++){ + RtreeNode *pInsert; + RtreeCell cell; + nodeGetCell(pRtree, pNode, ii, &cell); + + /* Find a node to store this cell in. pNode->iNode currently contains + ** the height of the sub-tree headed by the cell. + */ + rc = ChooseLeaf(pRtree, &cell, pNode->iNode, &pInsert); + if( rc==SQLITE_OK ){ + int rc2; + rc = rtreeInsertCell(pRtree, pInsert, &cell, pNode->iNode); + rc2 = nodeRelease(pRtree, pInsert); + if( rc==SQLITE_OK ){ + rc = rc2; + } + } + } + return rc; +} + +/* +** Select a currently unused rowid for a new r-tree record. +*/ +static int newRowid(Rtree *pRtree, i64 *piRowid){ + int rc; + sqlite3_bind_null(pRtree->pWriteRowid, 1); + sqlite3_bind_null(pRtree->pWriteRowid, 2); + sqlite3_step(pRtree->pWriteRowid); + rc = sqlite3_reset(pRtree->pWriteRowid); + *piRowid = sqlite3_last_insert_rowid(pRtree->db); + return rc; +} + +#ifndef NDEBUG +static int hashIsEmpty(Rtree *pRtree){ + int ii; + for(ii=0; ii<HASHSIZE; ii++){ + assert( !pRtree->aHash[ii] ); + } + return 1; +} +#endif + +/* +** The xUpdate method for rtree module virtual tables. +*/ +int rtreeUpdate( + sqlite3_vtab *pVtab, + int nData, + sqlite3_value **azData, + sqlite_int64 *pRowid +){ + Rtree *pRtree = (Rtree *)pVtab; + int rc = SQLITE_OK; + + rtreeReference(pRtree); + + assert(nData>=1); + assert(hashIsEmpty(pRtree)); + + /* If azData[0] is not an SQL NULL value, it is the rowid of a + ** record to delete from the r-tree table. The following block does + ** just that. + */ + if( sqlite3_value_type(azData[0])!=SQLITE_NULL ){ + i64 iDelete; /* The rowid to delete */ + RtreeNode *pLeaf; /* Leaf node containing record iDelete */ + int iCell; /* Index of iDelete cell in pLeaf */ + RtreeNode *pRoot; + + /* Obtain a reference to the root node to initialise Rtree.iDepth */ + rc = nodeAcquire(pRtree, 1, 0, &pRoot); + + /* Obtain a reference to the leaf node that contains the entry + ** about to be deleted. + */ + if( rc==SQLITE_OK ){ + iDelete = sqlite3_value_int64(azData[0]); + rc = findLeafNode(pRtree, iDelete, &pLeaf); + } + + /* Delete the cell in question from the leaf node. */ + if( rc==SQLITE_OK ){ + int rc2; + iCell = nodeRowidIndex(pRtree, pLeaf, iDelete); + rc = deleteCell(pRtree, pLeaf, iCell, 0); + rc2 = nodeRelease(pRtree, pLeaf); + if( rc==SQLITE_OK ){ + rc = rc2; + } + } + + /* Delete the corresponding entry in the <rtree>_rowid table. */ + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pRtree->pDeleteRowid, 1, iDelete); + sqlite3_step(pRtree->pDeleteRowid); + rc = sqlite3_reset(pRtree->pDeleteRowid); + } + + /* Check if the root node now has exactly one child. If so, remove + ** it, schedule the contents of the child for reinsertion and + ** reduce the tree height by one. + ** + ** This is equivalent to copying the contents of the child into + ** the root node (the operation that Gutman's paper says to perform + ** in this scenario). + */ + if( rc==SQLITE_OK && pRtree->iDepth>0 ){ + if( rc==SQLITE_OK && NCELL(pRoot)==1 ){ + RtreeNode *pChild; + i64 iChild = nodeGetRowid(pRtree, pRoot, 0); + rc = nodeAcquire(pRtree, iChild, pRoot, &pChild); + if( rc==SQLITE_OK ){ + rc = removeNode(pRtree, pChild, pRtree->iDepth-1); + } + if( rc==SQLITE_OK ){ + pRtree->iDepth--; + writeInt16(pRoot->zData, pRtree->iDepth); + pRoot->isDirty = 1; + } + } + } + + /* Re-insert the contents of any underfull nodes removed from the tree. */ + for(pLeaf=pRtree->pDeleted; pLeaf; pLeaf=pRtree->pDeleted){ + if( rc==SQLITE_OK ){ + rc = reinsertNodeContent(pRtree, pLeaf); + } + pRtree->pDeleted = pLeaf->pNext; + sqlite3_free(pLeaf); + } + + /* Release the reference to the root node. */ + if( rc==SQLITE_OK ){ + rc = nodeRelease(pRtree, pRoot); + }else{ + nodeRelease(pRtree, pRoot); + } + } + + /* If the azData[] array contains more than one element, elements + ** (azData[2]..azData[argc-1]) contain a new record to insert into + ** the r-tree structure. + */ + if( rc==SQLITE_OK && nData>1 ){ + /* Insert a new record into the r-tree */ + RtreeCell cell; + int ii; + RtreeNode *pLeaf; + + /* Populate the cell.aCoord[] array. The first coordinate is azData[3]. */ + assert( nData==(pRtree->nDim*2 + 3) ); + if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ + for(ii=0; ii<(pRtree->nDim*2); ii+=2){ + cell.aCoord[ii].f = (float)sqlite3_value_double(azData[ii+3]); + cell.aCoord[ii+1].f = (float)sqlite3_value_double(azData[ii+4]); + if( cell.aCoord[ii].f>cell.aCoord[ii+1].f ){ + rc = SQLITE_CONSTRAINT; + goto constraint; + } + } + }else{ + for(ii=0; ii<(pRtree->nDim*2); ii+=2){ + cell.aCoord[ii].i = sqlite3_value_int(azData[ii+3]); + cell.aCoord[ii+1].i = sqlite3_value_int(azData[ii+4]); + if( cell.aCoord[ii].i>cell.aCoord[ii+1].i ){ + rc = SQLITE_CONSTRAINT; + goto constraint; + } + } + } + + /* Figure out the rowid of the new row. */ + if( sqlite3_value_type(azData[2])==SQLITE_NULL ){ + rc = newRowid(pRtree, &cell.iRowid); + }else{ + cell.iRowid = sqlite3_value_int64(azData[2]); + sqlite3_bind_int64(pRtree->pReadRowid, 1, cell.iRowid); + if( SQLITE_ROW==sqlite3_step(pRtree->pReadRowid) ){ + sqlite3_reset(pRtree->pReadRowid); + rc = SQLITE_CONSTRAINT; + goto constraint; + } + rc = sqlite3_reset(pRtree->pReadRowid); + } + + if( rc==SQLITE_OK ){ + rc = ChooseLeaf(pRtree, &cell, 0, &pLeaf); + } + if( rc==SQLITE_OK ){ + int rc2; + pRtree->iReinsertHeight = -1; + rc = rtreeInsertCell(pRtree, pLeaf, &cell, 0); + rc2 = nodeRelease(pRtree, pLeaf); + if( rc==SQLITE_OK ){ + rc = rc2; + } + } + } + +constraint: + rtreeRelease(pRtree); + return rc; +} + +/* +** The xRename method for rtree module virtual tables. +*/ +static int rtreeRename(sqlite3_vtab *pVtab, const char *zNewName){ + Rtree *pRtree = (Rtree *)pVtab; + int rc = SQLITE_NOMEM; + char *zSql = sqlite3_mprintf( + "ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";" + "ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";" + "ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";" + , pRtree->zDb, pRtree->zName, zNewName + , pRtree->zDb, pRtree->zName, zNewName + , pRtree->zDb, pRtree->zName, zNewName + ); + if( zSql ){ + rc = sqlite3_exec(pRtree->db, zSql, 0, 0, 0); + sqlite3_free(zSql); + } + return rc; +} + +static sqlite3_module rtreeModule = { + 0, /* iVersion */ + rtreeCreate, /* xCreate - create a table */ + rtreeConnect, /* xConnect - connect to an existing table */ + rtreeBestIndex, /* xBestIndex - Determine search strategy */ + rtreeDisconnect, /* xDisconnect - Disconnect from a table */ + rtreeDestroy, /* xDestroy - Drop a table */ + rtreeOpen, /* xOpen - open a cursor */ + rtreeClose, /* xClose - close a cursor */ + rtreeFilter, /* xFilter - configure scan constraints */ + rtreeNext, /* xNext - advance a cursor */ + rtreeEof, /* xEof */ + rtreeColumn, /* xColumn - read data */ + rtreeRowid, /* xRowid - read data */ + rtreeUpdate, /* xUpdate - write data */ + 0, /* xBegin - begin transaction */ + 0, /* xSync - sync transaction */ + 0, /* xCommit - commit transaction */ + 0, /* xRollback - rollback transaction */ + 0, /* xFindFunction - function overloading */ + rtreeRename /* xRename - rename the table */ +}; + +static int rtreeSqlInit( + Rtree *pRtree, + sqlite3 *db, + const char *zDb, + const char *zPrefix, + int isCreate +){ + int rc = SQLITE_OK; + + #define N_STATEMENT 9 + static const char *azSql[N_STATEMENT] = { + /* Read and write the xxx_node table */ + "SELECT data FROM '%q'.'%q_node' WHERE nodeno = :1", + "INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(:1, :2)", + "DELETE FROM '%q'.'%q_node' WHERE nodeno = :1", + + /* Read and write the xxx_rowid table */ + "SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = :1", + "INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(:1, :2)", + "DELETE FROM '%q'.'%q_rowid' WHERE rowid = :1", + + /* Read and write the xxx_parent table */ + "SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = :1", + "INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(:1, :2)", + "DELETE FROM '%q'.'%q_parent' WHERE nodeno = :1" + }; + sqlite3_stmt **appStmt[N_STATEMENT]; + int i; + + pRtree->db = db; + + if( isCreate ){ + char *zCreate = sqlite3_mprintf( +"CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY, data BLOB);" +"CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY, nodeno INTEGER);" +"CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY, parentnode INTEGER);" +"INSERT INTO '%q'.'%q_node' VALUES(1, zeroblob(%d))", + zDb, zPrefix, zDb, zPrefix, zDb, zPrefix, zDb, zPrefix, pRtree->iNodeSize + ); + if( !zCreate ){ + return SQLITE_NOMEM; + } + rc = sqlite3_exec(db, zCreate, 0, 0, 0); + sqlite3_free(zCreate); + if( rc!=SQLITE_OK ){ + return rc; + } + } + + appStmt[0] = &pRtree->pReadNode; + appStmt[1] = &pRtree->pWriteNode; + appStmt[2] = &pRtree->pDeleteNode; + appStmt[3] = &pRtree->pReadRowid; + appStmt[4] = &pRtree->pWriteRowid; + appStmt[5] = &pRtree->pDeleteRowid; + appStmt[6] = &pRtree->pReadParent; + appStmt[7] = &pRtree->pWriteParent; + appStmt[8] = &pRtree->pDeleteParent; + + for(i=0; i<N_STATEMENT && rc==SQLITE_OK; i++){ + char *zSql = sqlite3_mprintf(azSql[i], zDb, zPrefix); + if( zSql ){ + rc = sqlite3_prepare_v2(db, zSql, -1, appStmt[i], 0); + }else{ + rc = SQLITE_NOMEM; + } + sqlite3_free(zSql); + } + + return rc; +} + +/* +** This routine queries database handle db for the page-size used by +** database zDb. If successful, the page-size in bytes is written to +** *piPageSize and SQLITE_OK returned. Otherwise, and an SQLite error +** code is returned. +*/ +static int getPageSize(sqlite3 *db, const char *zDb, int *piPageSize){ + int rc = SQLITE_NOMEM; + char *zSql; + sqlite3_stmt *pStmt = 0; + + zSql = sqlite3_mprintf("PRAGMA %Q.page_size", zDb); + if( !zSql ){ + return SQLITE_NOMEM; + } + + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + sqlite3_free(zSql); + if( rc!=SQLITE_OK ){ + return rc; + } + + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + *piPageSize = sqlite3_column_int(pStmt, 0); + } + return sqlite3_finalize(pStmt); +} + +/* +** This function is the implementation of both the xConnect and xCreate +** methods of the r-tree virtual table. +** +** argv[0] -> module name +** argv[1] -> database name +** argv[2] -> table name +** argv[...] -> column names... +*/ +static int rtreeInit( + sqlite3 *db, /* Database connection */ + void *pAux, /* Pointer to head of rtree list */ + int argc, const char *const*argv, /* Parameters to CREATE TABLE statement */ + sqlite3_vtab **ppVtab, /* OUT: New virtual table */ + char **pzErr, /* OUT: Error message, if any */ + int isCreate, /* True for xCreate, false for xConnect */ + int eCoordType /* One of the RTREE_COORD_* constants */ +){ + int rc = SQLITE_OK; + int iPageSize = 0; + Rtree *pRtree; + int nDb; /* Length of string argv[1] */ + int nName; /* Length of string argv[2] */ + + const char *aErrMsg[] = { + 0, /* 0 */ + "Wrong number of columns for an rtree table", /* 1 */ + "Too few columns for an rtree table", /* 2 */ + "Too many columns for an rtree table" /* 3 */ + }; + + int iErr = (argc<6) ? 2 : argc>(RTREE_MAX_DIMENSIONS*2+4) ? 3 : argc%2; + if( aErrMsg[iErr] ){ + *pzErr = sqlite3_mprintf("%s", aErrMsg[iErr]); + return SQLITE_ERROR; + } + + rc = getPageSize(db, argv[1], &iPageSize); + if( rc!=SQLITE_OK ){ + return rc; + } + + /* Allocate the sqlite3_vtab structure */ + nDb = strlen(argv[1]); + nName = strlen(argv[2]); + pRtree = (Rtree *)sqlite3_malloc(sizeof(Rtree)+nDb+nName+2); + if( !pRtree ){ + return SQLITE_NOMEM; + } + memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2); + pRtree->nBusy = 1; + pRtree->base.pModule = &rtreeModule; + pRtree->zDb = (char *)&pRtree[1]; + pRtree->zName = &pRtree->zDb[nDb+1]; + pRtree->nDim = (argc-4)/2; + pRtree->nBytesPerCell = 8 + pRtree->nDim*4*2; + pRtree->eCoordType = eCoordType; + memcpy(pRtree->zDb, argv[1], nDb); + memcpy(pRtree->zName, argv[2], nName); + + /* Figure out the node size to use. By default, use 64 bytes less than + ** the database page-size. This ensures that each node is stored on + ** a single database page. + ** + ** If the databasd page-size is so large that more than RTREE_MAXCELLS + ** entries would fit in a single node, use a smaller node-size. + */ + pRtree->iNodeSize = iPageSize-64; + if( (4+pRtree->nBytesPerCell*RTREE_MAXCELLS)<pRtree->iNodeSize ){ + pRtree->iNodeSize = 4+pRtree->nBytesPerCell*RTREE_MAXCELLS; + } + + /* Create/Connect to the underlying relational database schema. If + ** that is successful, call sqlite3_declare_vtab() to configure + ** the r-tree table schema. + */ + if( (rc = rtreeSqlInit(pRtree, db, argv[1], argv[2], isCreate)) ){ + *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db)); + }else{ + char *zSql = sqlite3_mprintf("CREATE TABLE x(%s", argv[3]); + char *zTmp; + int ii; + for(ii=4; zSql && ii<argc; ii++){ + zTmp = zSql; + zSql = sqlite3_mprintf("%s, %s", zTmp, argv[ii]); + sqlite3_free(zTmp); + } + if( zSql ){ + zTmp = zSql; + zSql = sqlite3_mprintf("%s);", zTmp); + sqlite3_free(zTmp); + } + if( !zSql || sqlite3_declare_vtab(db, zSql) ){ + rc = SQLITE_NOMEM; + } + sqlite3_free(zSql); + } + + if( rc==SQLITE_OK ){ + *ppVtab = (sqlite3_vtab *)pRtree; + }else{ + rtreeRelease(pRtree); + } + return rc; +} + + +/* +** Implementation of a scalar function that decodes r-tree nodes to +** human readable strings. This can be used for debugging and analysis. +** +** The scalar function takes two arguments, a blob of data containing +** an r-tree node, and the number of dimensions the r-tree indexes. +** For a two-dimensional r-tree structure called "rt", to deserialize +** all nodes, a statement like: +** +** SELECT rtreenode(2, data) FROM rt_node; +** +** The human readable string takes the form of a Tcl list with one +** entry for each cell in the r-tree node. Each entry is itself a +** list, containing the 8-byte rowid/pageno followed by the +** <num-dimension>*2 coordinates. +*/ +static void rtreenode(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){ + char *zText = 0; + RtreeNode node; + Rtree tree; + int ii; + + memset(&node, 0, sizeof(RtreeNode)); + memset(&tree, 0, sizeof(Rtree)); + tree.nDim = sqlite3_value_int(apArg[0]); + tree.nBytesPerCell = 8 + 8 * tree.nDim; + node.zData = (u8 *)sqlite3_value_blob(apArg[1]); + + for(ii=0; ii<NCELL(&node); ii++){ + char zCell[512]; + int nCell = 0; + RtreeCell cell; + int jj; + + nodeGetCell(&tree, &node, ii, &cell); + sqlite3_snprintf(512-nCell,&zCell[nCell],"%d", cell.iRowid); + nCell = strlen(zCell); + for(jj=0; jj<tree.nDim*2; jj++){ + sqlite3_snprintf(512-nCell,&zCell[nCell]," %f",(double)cell.aCoord[jj].f); + nCell = strlen(zCell); + } + + if( zText ){ + char *zTextNew = sqlite3_mprintf("%s {%s}", zText, zCell); + sqlite3_free(zText); + zText = zTextNew; + }else{ + zText = sqlite3_mprintf("{%s}", zCell); + } + } + + sqlite3_result_text(ctx, zText, -1, sqlite3_free); +} + +static void rtreedepth(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){ + if( sqlite3_value_type(apArg[0])!=SQLITE_BLOB + || sqlite3_value_bytes(apArg[0])<2 + ){ + sqlite3_result_error(ctx, "Invalid argument to rtreedepth()", -1); + }else{ + u8 *zBlob = (u8 *)sqlite3_value_blob(apArg[0]); + sqlite3_result_int(ctx, readInt16(zBlob)); + } +} + +/* +** Register the r-tree module with database handle db. This creates the +** virtual table module "rtree" and the debugging/analysis scalar +** function "rtreenode". +*/ +int sqlite3RtreeInit(sqlite3 *db){ + int rc = SQLITE_OK; + + if( rc==SQLITE_OK ){ + int utf8 = SQLITE_UTF8; + rc = sqlite3_create_function(db, "rtreenode", 2, utf8, 0, rtreenode, 0, 0); + } + if( rc==SQLITE_OK ){ + int utf8 = SQLITE_UTF8; + rc = sqlite3_create_function(db, "rtreedepth", 1, utf8, 0,rtreedepth, 0, 0); + } + if( rc==SQLITE_OK ){ + void *c = (void *)RTREE_COORD_REAL32; + rc = sqlite3_create_module_v2(db, "rtree", &rtreeModule, c, 0); + } + if( rc==SQLITE_OK ){ + void *c = (void *)RTREE_COORD_INT32; + rc = sqlite3_create_module_v2(db, "rtree_i32", &rtreeModule, c, 0); + } + + return rc; +} + +#if !SQLITE_CORE +int sqlite3_extension_init( + sqlite3 *db, + char **pzErrMsg, + const sqlite3_api_routines *pApi +){ + SQLITE_EXTENSION_INIT2(pApi) + return sqlite3RtreeInit(db); +} +#endif + +#endif diff --git a/third_party/sqlite/ext/rtree/rtree.h b/third_party/sqlite/ext/rtree/rtree.h new file mode 100755 index 0000000..1fdbccc --- /dev/null +++ b/third_party/sqlite/ext/rtree/rtree.h @@ -0,0 +1,26 @@ +/* +** 2008 May 26 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This header file is used by programs that want to link against the +** RTREE library. All it does is declare the sqlite3RtreeInit() interface. +*/ +#include "sqlite3.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +int sqlite3RtreeInit(sqlite3 *db); + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ diff --git a/third_party/sqlite/ext/rtree/rtree1.test b/third_party/sqlite/ext/rtree/rtree1.test new file mode 100755 index 0000000..ee9866f --- /dev/null +++ b/third_party/sqlite/ext/rtree/rtree1.test @@ -0,0 +1,364 @@ +# 2008 Feb 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# The focus of this file is testing the r-tree extension. +# +# $Id: rtree1.test,v 1.5 2008/07/14 15:37:01 danielk1977 Exp $ +# + +if {![info exists testdir]} { + set testdir [file join [file dirname $argv0] .. .. test] +} +source [file join [file dirname [info script]] rtree_util.tcl] +source $testdir/tester.tcl + +# Test plan: +# +# rtree-1.*: Creating/destroying r-tree tables. +# rtree-2.*: Test the implicit constraints - unique rowid and +# (coord[N]<=coord[N+1]) for even values of N. Also +# automatic assigning of rowid values. +# rtree-3.*: Linear scans of r-tree data. +# rtree-4.*: Test INSERT +# rtree-5.*: Test DELETE +# rtree-6.*: Test UPDATE +# rtree-7.*: Test renaming an r-tree table. +# rtree-8.*: Test constrained scans of r-tree data. +# + +ifcapable !rtree { + finish_test + return +} + +#---------------------------------------------------------------------------- +# Test cases rtree-1.* test CREATE and DROP table statements. +# + +# Test creating and dropping an rtree table. +# +do_test rtree-1.1.1 { + execsql { CREATE VIRTUAL TABLE t1 USING rtree(ii, x1, x2, y1, y2) } +} {} +do_test rtree-1.1.2 { + execsql { SELECT name FROM sqlite_master ORDER BY name } +} {t1 t1_node t1_parent t1_rowid} +do_test rtree-1.1.3 { + execsql { + DROP TABLE t1; + SELECT name FROM sqlite_master ORDER BY name; + } +} {} + +# Test creating and dropping an rtree table with an odd name in +# an attached database. +# +do_test rtree-1.2.1 { + file delete -force test2.db + execsql { + ATTACH 'test2.db' AS aux; + CREATE VIRTUAL TABLE aux.'a" "b' USING rtree(ii, x1, x2, y1, y2); + } +} {} +do_test rtree-1.2.2 { + execsql { SELECT name FROM sqlite_master ORDER BY name } +} {} +do_test rtree-1.2.3 { + execsql { SELECT name FROM aux.sqlite_master ORDER BY name } +} {{a" "b} {a" "b_node} {a" "b_parent} {a" "b_rowid}} +do_test rtree-1.2.4 { + execsql { + DROP TABLE aux.'a" "b'; + SELECT name FROM aux.sqlite_master ORDER BY name; + } +} {} + +# Test that the logic for checking the number of columns specified +# for an rtree table. Acceptable values are odd numbers between 3 and +# 11, inclusive. +# +set cols [list i1 i2 i3 i4 i5 i6 i7 i8 i9 iA iB iC iD iE iF iG iH iI iJ iK] +for {set nCol 1} {$nCol<[llength $cols]} {incr nCol} { + + set columns [join [lrange $cols 0 [expr {$nCol-1}]] ,] + + set X {0 {}} + if {$nCol%2 == 0} { set X {1 {Wrong number of columns for an rtree table}} } + if {$nCol < 3} { set X {1 {Too few columns for an rtree table}} } + if {$nCol > 11} { set X {1 {Too many columns for an rtree table}} } + + do_test rtree-1.3.$nCol { + catchsql " + CREATE VIRTUAL TABLE t1 USING rtree($columns); + " + } $X + + catchsql { DROP TABLE t1 } +} + +# Test that it is possible to open an existing database that contains +# r-tree tables. +# +do_test rtree-1.4.1 { + execsql { + CREATE VIRTUAL TABLE t1 USING rtree(ii, x1, x2); + INSERT INTO t1 VALUES(1, 5.0, 10.0); + INSERT INTO t1 VALUES(2, 15.0, 20.0); + } +} {} +do_test rtree-1.4.2 { + db close + sqlite3 db test.db + execsql { SELECT * FROM t1 ORDER BY ii } +} {1 5.0 10.0 2 15.0 20.0} +do_test rtree-1.4.3 { + execsql { DROP TABLE t1 } +} {} + +# Test that it is possible to create an r-tree table with ridiculous +# column names. +# +do_test rtree-1.5.1 { + execsql { + CREATE VIRTUAL TABLE t1 USING rtree("the key", "x dim.", "x2'dim"); + INSERT INTO t1 VALUES(1, 2, 3); + SELECT "the key", "x dim.", "x2'dim" FROM t1; + } +} {1 2.0 3.0} +do_test rtree-1.5.1 { + execsql { DROP TABLE t1 } +} {} + +# Force the r-tree constructor to fail. +# +do_test rtree-1.6.1 { + execsql { CREATE TABLE t1_rowid(a); } + catchsql { + CREATE VIRTUAL TABLE t1 USING rtree("the key", "x dim.", "x2'dim"); + } +} {1 {table "t1_rowid" already exists}} +do_test rtree-1.6.1 { + execsql { DROP TABLE t1_rowid } +} {} + +#---------------------------------------------------------------------------- +# Test cases rtree-2.* +# +do_test rtree-2.1.1 { + execsql { + CREATE VIRTUAL TABLE t1 USING rtree(ii, x1, x2, y1, y2); + SELECT * FROM t1; + } +} {} + +do_test rtree-2.1.2 { + execsql { INSERT INTO t1 VALUES(NULL, 1, 3, 2, 4) } + execsql { SELECT * FROM t1 } +} {1 1.0 3.0 2.0 4.0} +do_test rtree-2.1.3 { + execsql { INSERT INTO t1 VALUES(NULL, 1, 3, 2, 4) } + execsql { SELECT rowid FROM t1 ORDER BY rowid } +} {1 2} +do_test rtree-2.1.3 { + execsql { INSERT INTO t1 VALUES(NULL, 1, 3, 2, 4) } + execsql { SELECT ii FROM t1 ORDER BY ii } +} {1 2 3} + +do_test rtree-2.2.1 { + catchsql { INSERT INTO t1 VALUES(2, 1, 3, 2, 4) } +} {1 {constraint failed}} +do_test rtree-2.2.2 { + catchsql { INSERT INTO t1 VALUES(4, 1, 3, 4, 2) } +} {1 {constraint failed}} +do_test rtree-2.2.3 { + catchsql { INSERT INTO t1 VALUES(4, 3, 1, 2, 4) } +} {1 {constraint failed}} +do_test rtree-2.2.4 { + execsql { SELECT ii FROM t1 ORDER BY ii } +} {1 2 3} + +do_test rtree-2.X { + execsql { DROP TABLE t1 } +} {} + +#---------------------------------------------------------------------------- +# Test cases rtree-3.* test linear scans of r-tree table data. To test +# this we have to insert some data into an r-tree, but that is not the +# focus of these tests. +# +do_test rtree-3.1.1 { + execsql { + CREATE VIRTUAL TABLE t1 USING rtree(ii, x1, x2, y1, y2); + SELECT * FROM t1; + } +} {} +do_test rtree-3.1.2 { + execsql { + INSERT INTO t1 VALUES(5, 1, 3, 2, 4); + SELECT * FROM t1; + } +} {5 1.0 3.0 2.0 4.0} +do_test rtree-3.1.3 { + execsql { + INSERT INTO t1 VALUES(6, 2, 6, 4, 8); + SELECT * FROM t1; + } +} {5 1.0 3.0 2.0 4.0 6 2.0 6.0 4.0 8.0} + +# Test the constraint on the coordinates (c[i]<=c[i+1] where (i%2==0)): +do_test rtree-3.2.1 { + catchsql { INSERT INTO t1 VALUES(7, 2, 6, 4, 3) } +} {1 {constraint failed}} +do_test rtree-3.2.2 { + catchsql { INSERT INTO t1 VALUES(8, 2, 6, 3, 3) } +} {0 {}} + +#---------------------------------------------------------------------------- +# Test cases rtree-5.* test DELETE operations. +# +do_test rtree-5.1.1 { + execsql { CREATE VIRTUAL TABLE t2 USING rtree(ii, x1, x2) } +} {} +do_test rtree-5.1.2 { + execsql { + INSERT INTO t2 VALUES(1, 10, 20); + INSERT INTO t2 VALUES(2, 30, 40); + INSERT INTO t2 VALUES(3, 50, 60); + SELECT * FROM t2 ORDER BY ii; + } +} {1 10.0 20.0 2 30.0 40.0 3 50.0 60.0} +do_test rtree-5.1.3 { + execsql { + DELETE FROM t2 WHERE ii=2; + SELECT * FROM t2 ORDER BY ii; + } +} {1 10.0 20.0 3 50.0 60.0} +do_test rtree-5.1.4 { + execsql { + DELETE FROM t2 WHERE ii=1; + SELECT * FROM t2 ORDER BY ii; + } +} {3 50.0 60.0} +do_test rtree-5.1.5 { + execsql { + DELETE FROM t2 WHERE ii=3; + SELECT * FROM t2 ORDER BY ii; + } +} {} +do_test rtree-5.1.6 { + execsql { SELECT * FROM t2_rowid } +} {} + +#---------------------------------------------------------------------------- +# Test cases rtree-5.* test UPDATE operations. +# +do_test rtree-6.1.1 { + execsql { CREATE VIRTUAL TABLE t3 USING rtree(ii, x1, x2, y1, y2) } +} {} +do_test rtree-6.1.2 { + execsql { + INSERT INTO t3 VALUES(1, 2, 3, 4, 5); + UPDATE t3 SET x2=5; + SELECT * FROM t3; + } +} {1 2.0 5.0 4.0 5.0} +do_test rtree-6.1.3 { + execsql { UPDATE t3 SET ii = 2 } + execsql { SELECT * FROM t3 } +} {2 2.0 5.0 4.0 5.0} + +#---------------------------------------------------------------------------- +# Test cases rtree-7.* test rename operations. +# +do_test rtree-7.1.1 { + execsql { + CREATE VIRTUAL TABLE t4 USING rtree(ii, x1, x2, y1, y2, z1, z2); + INSERT INTO t4 VALUES(1, 2, 3, 4, 5, 6, 7); + } +} {} +do_test rtree-7.1.2 { + execsql { ALTER TABLE t4 RENAME TO t5 } + execsql { SELECT * FROM t5 } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} +do_test rtree-7.1.3 { + db close + sqlite3 db test.db + execsql { SELECT * FROM t5 } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} +do_test rtree-7.1.4 { + execsql { ALTER TABLE t5 RENAME TO 'raisara "one"'''} + execsql { SELECT * FROM "raisara ""one""'" } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} +do_test rtree-7.1.5 { + execsql { SELECT * FROM 'raisara "one"''' } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} +do_test rtree-7.1.6 { + execsql { ALTER TABLE "raisara ""one""'" RENAME TO "abc 123" } + execsql { SELECT * FROM "abc 123" } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} +do_test rtree-7.1.7 { + db close + sqlite3 db test.db + execsql { SELECT * FROM "abc 123" } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} + +# An error midway through a rename operation. +do_test rtree-7.2.1 { + execsql { + CREATE TABLE t4_node(a); + } + catchsql { ALTER TABLE "abc 123" RENAME TO t4 } +} {1 {SQL logic error or missing database}} +do_test rtree-7.2.2 { + execsql { SELECT * FROM "abc 123" } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} +do_test rtree-7.2.3 { + execsql { + DROP TABLE t4_node; + CREATE TABLE t4_rowid(a); + } + catchsql { ALTER TABLE "abc 123" RENAME TO t4 } +} {1 {SQL logic error or missing database}} +do_test rtree-7.2.4 { + db close + sqlite3 db test.db + execsql { SELECT * FROM "abc 123" } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} +do_test rtree-7.2.5 { + execsql { DROP TABLE t4_rowid } + execsql { ALTER TABLE "abc 123" RENAME TO t4 } + execsql { SELECT * FROM t4 } +} {1 2.0 3.0 4.0 5.0 6.0 7.0} + + +#---------------------------------------------------------------------------- +# Test cases rtree-8.* +# + +# Test that the function to determine if a leaf cell is part of the +# result set works. +do_test rtree-8.1.1 { + execsql { + CREATE VIRTUAL TABLE t6 USING rtree(ii, x1, x2); + INSERT INTO t6 VALUES(1, 3, 7); + INSERT INTO t6 VALUES(2, 4, 6); + } +} {} +do_test rtree-8.1.2 { execsql { SELECT ii FROM t6 WHERE x1>2 } } {1 2} +do_test rtree-8.1.3 { execsql { SELECT ii FROM t6 WHERE x1>3 } } {2} +do_test rtree-8.1.4 { execsql { SELECT ii FROM t6 WHERE x1>4 } } {} +do_test rtree-8.1.5 { execsql { SELECT ii FROM t6 WHERE x1>5 } } {} +do_test rtree-8.1.6 { execsql { SELECT ii FROM t6 WHERE x1<3 } } {} +do_test rtree-8.1.7 { execsql { SELECT ii FROM t6 WHERE x1<4 } } {1} +do_test rtree-8.1.8 { execsql { SELECT ii FROM t6 WHERE x1<5 } } {1 2} + + +finish_test diff --git a/third_party/sqlite/ext/rtree/rtree2.test b/third_party/sqlite/ext/rtree/rtree2.test new file mode 100755 index 0000000..7e38c8f --- /dev/null +++ b/third_party/sqlite/ext/rtree/rtree2.test @@ -0,0 +1,152 @@ +# 2008 Feb 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# The focus of this file is testing the r-tree extension. +# +# $Id: rtree2.test,v 1.4 2008/07/14 15:37:01 danielk1977 Exp $ +# + +if {![info exists testdir]} { + set testdir [file join [file dirname $argv0] .. .. test] +} +source [file join [file dirname [info script]] rtree_util.tcl] +source $testdir/tester.tcl + +ifcapable !rtree { + finish_test + return +} + +set ::NROW 1000 +set ::NDEL 10 +set ::NSELECT 100 + +if {[info exists ISQUICK] && $ISQUICK} { + set ::NROW 100 + set ::NSELECT 10 +} + +foreach module {rtree_i32 rtree} { + for {set nDim 1} {$nDim <= 5} {incr nDim} { + + do_test rtree2-$module.$nDim.1 { + set cols [list] + foreach c [list c0 c1 c2 c3 c4 c5 c6 c7 c8 c9] { + lappend cols "$c REAL" + } + set cols [join [lrange $cols 0 [expr {$nDim*2-1}]] ", "] + execsql " + CREATE VIRTUAL TABLE t1 USING ${module}(ii, $cols); + CREATE TABLE t2 (ii, $cols); + " + } {} + + do_test rtree2-$module.$nDim.2 { + db transaction { + for {set ii 0} {$ii < $::NROW} {incr ii} { + #puts "Row $ii" + set values [list] + for {set jj 0} {$jj<$nDim*2} {incr jj} { + lappend values [expr int(rand()*1000)] + } + set values [join $values ,] + #puts [rtree_treedump db t1] + #puts "INSERT INTO t2 VALUES($ii, $values)" + set rc [catch {db eval "INSERT INTO t1 VALUES($ii, $values)"}] + if {$rc} { + incr ii -1 + } else { + db eval "INSERT INTO t2 VALUES($ii, $values)" + } + #if {[rtree_check db t1]} { + #puts [rtree_treedump db t1] + #exit + #} + } + } + + set t1 [execsql {SELECT * FROM t1 ORDER BY ii}] + set t2 [execsql {SELECT * FROM t2 ORDER BY ii}] + set rc [expr {$t1 eq $t2}] + if {$rc != 1} { + puts $t1 + puts $t2 + } + set rc + } {1} + + do_test rtree2-$module.$nDim.3 { + rtree_check db t1 + } 0 + + set OPS [list < > <= >= =] + for {set ii 0} {$ii < $::NSELECT} {incr ii} { + do_test rtree2-$module.$nDim.4.$ii.1 { + set where [list] + foreach look_three_dots! {. . .} { + set colidx [expr int(rand()*($nDim*2+1))-1] + if {$colidx<0} { + set col ii + } else { + set col "c$colidx" + } + set op [lindex $OPS [expr int(rand()*[llength $OPS])]] + set val [expr int(rand()*1000)] + lappend where "$col $op $val" + } + set where [join $where " AND "] + + set t1 [execsql "SELECT * FROM t1 WHERE $where ORDER BY ii"] + set t2 [execsql "SELECT * FROM t2 WHERE $where ORDER BY ii"] + set rc [expr {$t1 eq $t2}] + if {$rc != 1} { + #puts $where + puts $t1 + puts $t2 + #puts [rtree_treedump db t1] + #breakpoint + #set t1 [execsql "SELECT * FROM t1 WHERE $where ORDER BY ii"] + #exit + } + set rc + } {1} + } + + for {set ii 0} {$ii < $::NROW} {incr ii $::NDEL} { + #puts [rtree_treedump db t1] + do_test rtree2-$module.$nDim.5.$ii.1 { + execsql "DELETE FROM t2 WHERE ii <= $::ii" + execsql "DELETE FROM t1 WHERE ii <= $::ii" + + set t1 [execsql {SELECT * FROM t1 ORDER BY ii}] + set t2 [execsql {SELECT * FROM t2 ORDER BY ii}] + set rc [expr {$t1 eq $t2}] + if {$rc != 1} { + puts $t1 + puts $t2 + } + set rc + } {1} + do_test rtree2-$module.$nDim.5.$ii.2 { + rtree_check db t1 + } {0} + } + + do_test rtree2-$module.$nDim.6 { + execsql { + DROP TABLE t1; + DROP TABLE t2; + } + } {} + } +} + +finish_test diff --git a/third_party/sqlite/ext/rtree/rtree3.test b/third_party/sqlite/ext/rtree/rtree3.test new file mode 100755 index 0000000..b83ceeb4 --- /dev/null +++ b/third_party/sqlite/ext/rtree/rtree3.test @@ -0,0 +1,74 @@ +# 2008 Feb 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# The focus of this file is testing that the r-tree correctly handles +# out-of-memory conditions. +# +# $Id: rtree3.test,v 1.2 2008/06/23 15:55:52 danielk1977 Exp $ +# + +if {![info exists testdir]} { + set testdir [file join [file dirname $argv0] .. .. test] +} +source $testdir/tester.tcl + +ifcapable !rtree { + finish_test + return +} + +# Only run these tests if memory debugging is turned on. +# +source $testdir/malloc_common.tcl +if {!$MEMDEBUG} { + puts "Skipping malloc tests: not compiled with -DSQLITE_MEMDEBUG..." + finish_test + return +} + +do_malloc_test rtree3-1 -sqlbody { + BEGIN TRANSACTION; + CREATE VIRTUAL TABLE rt USING rtree(ii, x1, x2, y1, y2); + INSERT INTO rt VALUES(NULL, 3, 5, 7, 9); + INSERT INTO rt VALUES(NULL, 13, 15, 17, 19); + DELETE FROM rt WHERE ii = 1; + SELECT * FROM rt; + SELECT ii FROM rt WHERE ii = 2; + COMMIT; +} +do_malloc_test rtree3-2 -sqlprep { + CREATE VIRTUAL TABLE rt USING rtree(ii, x1, x2, y1, y2); + INSERT INTO rt VALUES(NULL, 3, 5, 7, 9); +} -sqlbody { + DROP TABLE rt; +} + + +do_malloc_test rtree3-3 -sqlprep { + CREATE VIRTUAL TABLE rt USING rtree(ii, x1, x2, y1, y2); + INSERT INTO rt VALUES(NULL, 3, 5, 7, 9); +} -tclbody { + db eval BEGIN + for {set ii 0} {$ii < 100} {incr ii} { + set f [expr rand()] + db eval {INSERT INTO rt VALUES(NULL, $f*10.0, $f*10.0, $f*15.0, $f*15.0)} + } + db eval COMMIT + db eval BEGIN + for {set ii 0} {$ii < 100} {incr ii} { + set f [expr rand()] + db eval { DELETE FROM rt WHERE x1<($f*10.0) AND x1>($f*10.5) } + } + db eval COMMIT +} + +finish_test + diff --git a/third_party/sqlite/ext/rtree/rtree4.test b/third_party/sqlite/ext/rtree/rtree4.test new file mode 100755 index 0000000..d73e7a6 --- /dev/null +++ b/third_party/sqlite/ext/rtree/rtree4.test @@ -0,0 +1,236 @@ +# 2008 May 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Randomized test cases for the rtree extension. +# +# $Id: rtree4.test,v 1.3 2008/06/23 15:55:52 danielk1977 Exp $ +# + +if {![info exists testdir]} { + set testdir [file join [file dirname $argv0] .. .. test] +} +source $testdir/tester.tcl + +ifcapable !rtree { + finish_test + return +} + +set ::NROW 2500 +if {[info exists ISQUICK] && $ISQUICK} { + set ::NROW 250 +} + +# Return a floating point number between -X and X. +# +proc rand {X} { + return [expr {int((rand()-0.5)*1024.0*$X)/512.0}] +} + +# Return a positive floating point number less than or equal to X +# +proc randincr {X} { + while 1 { + set r [expr {int(rand()*$X*32.0)/32.0}] + if {$r>0.0} {return $r} + } +} + +# Scramble the $inlist into a random order. +# +proc scramble {inlist} { + set y {} + foreach x $inlist { + lappend y [list [expr {rand()}] $x] + } + set y [lsort $y] + set outlist {} + foreach x $y { + lappend outlist [lindex $x 1] + } + return $outlist +} + +# Always use the same random seed so that the sequence of tests +# is repeatable. +# +expr {srand(1234)} + +# Run these tests for all number of dimensions between 1 and 5. +# +for {set nDim 1} {$nDim<=5} {incr nDim} { + + # Construct an rtree virtual table and an ordinary btree table + # to mirror it. The ordinary table should be much slower (since + # it has to do a full table scan) but should give the exact same + # answers. + # + do_test rtree4-$nDim.1 { + set clist {} + set cklist {} + for {set i 0} {$i<$nDim} {incr i} { + lappend clist mn$i mx$i + lappend cklist "mn$i<mx$i" + } + db eval "DROP TABLE IF EXISTS rx" + db eval "DROP TABLE IF EXISTS bx" + db eval "CREATE VIRTUAL TABLE rx USING rtree(id, [join $clist ,])" + db eval "CREATE TABLE bx(id INTEGER PRIMARY KEY,\ + [join $clist ,], CHECK( [join $cklist { AND }] ))" + } {} + + # Do many insertions of small objects. Do both overlapping and + # contained-within queries after each insert to verify that all + # is well. + # + unset -nocomplain where + for {set i 1} {$i<$::NROW} {incr i} { + # Do a random insert + # + do_test rtree-$nDim.2.$i.1 { + set vlist {} + for {set j 0} {$j<$nDim} {incr j} { + set mn [rand 10000] + set mx [expr {$mn+[randincr 50]}] + lappend vlist $mn $mx + } + db eval "INSERT INTO rx VALUES(NULL, [join $vlist ,])" + db eval "INSERT INTO bx VALUES(NULL, [join $vlist ,])" + } {} + + # Do a contained-in query on all dimensions + # + set where {} + for {set j 0} {$j<$nDim} {incr j} { + set mn [rand 10000] + set mx [expr {$mn+[randincr 500]}] + lappend where mn$j>=$mn mx$j<=$mx + } + set where "WHERE [join $where { AND }]" + do_test rtree-$nDim.2.$i.2 { + list $where [db eval "SELECT id FROM rx $where ORDER BY id"] + } [list $where [db eval "SELECT id FROM bx $where ORDER BY id"]] + + # Do an overlaps query on all dimensions + # + set where {} + for {set j 0} {$j<$nDim} {incr j} { + set mn [rand 10000] + set mx [expr {$mn+[randincr 500]}] + lappend where mx$j>=$mn mn$j<=$mx + } + set where "WHERE [join $where { AND }]" + do_test rtree-$nDim.2.$i.3 { + list $where [db eval "SELECT id FROM rx $where ORDER BY id"] + } [list $where [db eval "SELECT id FROM bx $where ORDER BY id"]] + + # Do a contained-in query with surplus contraints at the beginning. + # This should force a full-table scan on the rtree. + # + set where {} + for {set j 0} {$j<$nDim} {incr j} { + lappend where mn$j>-10000 mx$j<10000 + } + for {set j 0} {$j<$nDim} {incr j} { + set mn [rand 10000] + set mx [expr {$mn+[randincr 500]}] + lappend where mn$j>=$mn mx$j<=$mx + } + set where "WHERE [join $where { AND }]" + do_test rtree-$nDim.2.$i.3 { + list $where [db eval "SELECT id FROM rx $where ORDER BY id"] + } [list $where [db eval "SELECT id FROM bx $where ORDER BY id"]] + + # Do an overlaps query with surplus contraints at the beginning. + # This should force a full-table scan on the rtree. + # + set where {} + for {set j 0} {$j<$nDim} {incr j} { + lappend where mn$j>=-10000 mx$j<=10000 + } + for {set j 0} {$j<$nDim} {incr j} { + set mn [rand 10000] + set mx [expr {$mn+[randincr 500]}] + lappend where mx$j>$mn mn$j<$mx + } + set where "WHERE [join $where { AND }]" + do_test rtree-$nDim.2.$i.4 { + list $where [db eval "SELECT id FROM rx $where ORDER BY id"] + } [list $where [db eval "SELECT id FROM bx $where ORDER BY id"]] + + # Do a contained-in query with surplus contraints at the end + # + set where {} + for {set j 0} {$j<$nDim} {incr j} { + set mn [rand 10000] + set mx [expr {$mn+[randincr 500]}] + lappend where mn$j>=$mn mx$j<$mx + } + for {set j [expr {$nDim-1}]} {$j>=0} {incr j -1} { + lappend where mn$j>=-10000 mx$j<10000 + } + set where "WHERE [join $where { AND }]" + do_test rtree-$nDim.2.$i.5 { + list $where [db eval "SELECT id FROM rx $where ORDER BY id"] + } [list $where [db eval "SELECT id FROM bx $where ORDER BY id"]] + + # Do an overlaps query with surplus contraints at the end + # + set where {} + for {set j [expr {$nDim-1}]} {$j>=0} {incr j -1} { + set mn [rand 10000] + set mx [expr {$mn+[randincr 500]}] + lappend where mx$j>$mn mn$j<=$mx + } + for {set j 0} {$j<$nDim} {incr j} { + lappend where mx$j>-10000 mn$j<=10000 + } + set where "WHERE [join $where { AND }]" + do_test rtree-$nDim.2.$i.6 { + list $where [db eval "SELECT id FROM rx $where ORDER BY id"] + } [list $where [db eval "SELECT id FROM bx $where ORDER BY id"]] + + # Do a contained-in query with surplus contraints where the + # constraints appear in a random order. + # + set where {} + for {set j 0} {$j<$nDim} {incr j} { + set mn1 [rand 10000] + set mn2 [expr {$mn1+[randincr 100]}] + set mx1 [expr {$mn2+[randincr 400]}] + set mx2 [expr {$mx1+[randincr 100]}] + lappend where mn$j>=$mn1 mn$j>$mn2 mx$j<$mx1 mx$j<=$mx2 + } + set where "WHERE [join [scramble $where] { AND }]" + do_test rtree-$nDim.2.$i.7 { + list $where [db eval "SELECT id FROM rx $where ORDER BY id"] + } [list $where [db eval "SELECT id FROM bx $where ORDER BY id"]] + + # Do an overlaps query with surplus contraints where the + # constraints appear in a random order. + # + set where {} + for {set j 0} {$j<$nDim} {incr j} { + set mn1 [rand 10000] + set mn2 [expr {$mn1+[randincr 100]}] + set mx1 [expr {$mn2+[randincr 400]}] + set mx2 [expr {$mx1+[randincr 100]}] + lappend where mx$j>=$mn1 mx$j>$mn2 mn$j<$mx1 mn$j<=$mx2 + } + set where "WHERE [join [scramble $where] { AND }]" + do_test rtree-$nDim.2.$i.8 { + list $where [db eval "SELECT id FROM rx $where ORDER BY id"] + } [list $where [db eval "SELECT id FROM bx $where ORDER BY id"]] + } + +} + +finish_test diff --git a/third_party/sqlite/ext/rtree/rtree5.test b/third_party/sqlite/ext/rtree/rtree5.test new file mode 100755 index 0000000..4fa007f --- /dev/null +++ b/third_party/sqlite/ext/rtree/rtree5.test @@ -0,0 +1,80 @@ +# 2008 Jul 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# The focus of this file is testing the r-tree extension when it is +# configured to store values as 32 bit integers. +# +# $Id: rtree5.test,v 1.1 2008/07/14 15:37:01 danielk1977 Exp $ +# + +if {![info exists testdir]} { + set testdir [file join [file dirname $argv0] .. .. test] +} +source $testdir/tester.tcl + +ifcapable !rtree { + finish_test + return +} + +do_test rtree5-1.0 { + execsql { CREATE VIRTUAL TABLE t1 USING rtree_i32(id, x1, x2, y1, y2) } +} {} +do_test rtree5-1.1 { + execsql { INSERT INTO t1 VALUES(1, 5, 10, 4, 11.2) } +} {} +do_test rtree5-1.2 { + execsql { SELECT * FROM t1 } +} {1 5 10 4 11} +do_test rtree5-1.3 { + execsql { SELECT typeof(x1) FROM t1 } +} {integer} + +do_test rtree5-1.4 { + execsql { SELECT x1==5 FROM t1 } +} {1} +do_test rtree5-1.5 { + execsql { SELECT x1==5.2 FROM t1 } +} {0} +do_test rtree5-1.6 { + execsql { SELECT x1==5.0 FROM t1 } +} {1} + +do_test rtree5-1.7 { + execsql { SELECT count(*) FROM t1 WHERE x1==5 } +} {1} +do_test rtree5-1.8 { + execsql { SELECT count(*) FROM t1 WHERE x1==5.2 } +} {0} +do_test rtree5-1.9 { + execsql { SELECT count(*) FROM t1 WHERE x1==5.0 } +} {1} + +do_test rtree5-1.10 { + execsql { SELECT (1<<31)-5, (1<<31)-1, -1*(1<<31), -1*(1<<31)+5 } +} {2147483643 2147483647 -2147483648 -2147483643} +do_test rtree5-1.10 { + execsql { + INSERT INTO t1 VALUES(2, (1<<31)-5, (1<<31)-1, -1*(1<<31), -1*(1<<31)+5) + } +} {} +do_test rtree5-1.12 { + execsql { SELECT * FROM t1 WHERE id=2 } +} {2 2147483643 2147483647 -2147483648 -2147483643} +do_test rtree5-1.13 { + execsql { + SELECT * FROM t1 WHERE + x1=2147483643 AND x2=2147483647 AND + y1=-2147483648 AND y2=-2147483643 + } +} {2 2147483643 2147483647 -2147483648 -2147483643} + +finish_test diff --git a/third_party/sqlite/ext/rtree/rtree_perf.tcl b/third_party/sqlite/ext/rtree/rtree_perf.tcl new file mode 100755 index 0000000..fa3a4d3 --- /dev/null +++ b/third_party/sqlite/ext/rtree/rtree_perf.tcl @@ -0,0 +1,76 @@ + +set testdir [file join [file dirname $argv0] .. .. test] +source $testdir/tester.tcl + +ifcapable !rtree { + finish_test + return +} + +set NROW 10000 +set NQUERY 500 + +puts "Generating $NROW rows of data..." +set data [list] +for {set ii 0} {$ii < $NROW} {incr ii} { + set x1 [expr {rand()*1000}] + set x2 [expr {$x1+rand()*50}] + set y1 [expr {rand()*1000}] + set y2 [expr {$y1+rand()*50}] + lappend data $x1 $x2 $y1 $y2 +} +puts "Finished generating data" + + +set sql1 {CREATE TABLE btree(ii INTEGER PRIMARY KEY, x1, x2, y1, y2)} +set sql2 {CREATE VIRTUAL TABLE rtree USING rtree(ii, x1, x2, y1, y2)} +puts "Creating tables:" +puts " $sql1" +puts " $sql2" +db eval $sql1 +db eval $sql2 + +db eval "pragma cache_size=100" + +puts -nonewline "Inserting into btree... " +flush stdout +set btree_time [time {db transaction { + set ii 1 + foreach {x1 x2 y1 y2} $data { + db eval {INSERT INTO btree VALUES($ii, $x1, $x2, $y1, $y2)} + incr ii + } +}}] +puts "$btree_time" + +puts -nonewline "Inserting into rtree... " +flush stdout +set rtree_time [time {db transaction { + set ii 1 + foreach {x1 x2 y1 y2} $data { + incr ii + db eval {INSERT INTO rtree VALUES($ii, $x1, $x2, $y1, $y2)} + } +}}] +puts "$rtree_time" + + +puts -nonewline "Selecting from btree... " +flush stdout +set btree_select_time [time { + foreach {x1 x2 y1 y2} [lrange $data 0 [expr $NQUERY*4-1]] { + db eval {SELECT * FROM btree WHERE x1<$x1 AND x2>$x2 AND y1<$y1 AND y2>$y2} + } +}] +puts "$btree_select_time" + +puts -nonewline "Selecting from rtree... " +flush stdout +set rtree_select_time [time { + foreach {x1 x2 y1 y2} [lrange $data 0 [expr $NQUERY*4-1]] { + db eval {SELECT * FROM rtree WHERE x1<$x1 AND x2>$x2 AND y1<$y1 AND y2>$y2} + } +}] +puts "$rtree_select_time" + + diff --git a/third_party/sqlite/ext/rtree/rtree_util.tcl b/third_party/sqlite/ext/rtree/rtree_util.tcl new file mode 100755 index 0000000..55482e4 --- /dev/null +++ b/third_party/sqlite/ext/rtree/rtree_util.tcl @@ -0,0 +1,195 @@ +# 2008 Feb 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains Tcl code that may be useful for testing or +# analyzing r-tree structures created with this module. It is +# used by both test procedures and the r-tree viewer application. +# +# $Id: rtree_util.tcl,v 1.1 2008/05/26 18:41:54 danielk1977 Exp $ +# + + +#-------------------------------------------------------------------------- +# PUBLIC API: +# +# rtree_depth +# rtree_ndim +# rtree_node +# rtree_mincells +# rtree_check +# rtree_dump +# rtree_treedump +# + +proc rtree_depth {db zTab} { + $db one "SELECT rtreedepth(data) FROM ${zTab}_node WHERE nodeno=1" +} + +proc rtree_nodedepth {db zTab iNode} { + set iDepth [rtree_depth $db $zTab] + + set ii $iNode + while {$ii != 1} { + set sql "SELECT parentnode FROM ${zTab}_parent WHERE nodeno = $ii" + set ii [db one $sql] + incr iDepth -1 + } + + return $iDepth +} + +# Return the number of dimensions of the rtree. +# +proc rtree_ndim {db zTab} { + set nDim [expr {(([llength [$db eval "pragma table_info($zTab)"]]/6)-1)/2}] +} + +# Return the contents of rtree node $iNode. +# +proc rtree_node {db zTab iNode {iPrec 6}} { + set nDim [rtree_ndim $db $zTab] + set sql " + SELECT rtreenode($nDim, data) FROM ${zTab}_node WHERE nodeno = $iNode + " + set node [db one $sql] + + set nCell [llength $node] + set nCoord [expr $nDim*2] + for {set ii 0} {$ii < $nCell} {incr ii} { + for {set jj 1} {$jj <= $nCoord} {incr jj} { + set newval [format "%.${iPrec}f" [lindex $node $ii $jj]] + lset node $ii $jj $newval + } + } + set node +} + +proc rtree_mincells {db zTab} { + set n [$db one "select length(data) FROM ${zTab}_node LIMIT 1"] + set nMax [expr {int(($n-4)/(8+[rtree_ndim $db $zTab]*2*4))}] + return [expr {int($nMax/3)}] +} + +# An integrity check for the rtree $zTab accessible via database +# connection $db. +# +proc rtree_check {db zTab} { + array unset ::checked + + # Check each r-tree node. + set rc [catch { + rtree_node_check $db $zTab 1 [rtree_depth $db $zTab] + } msg] + if {$rc && $msg ne ""} { error $msg } + + # Check that the _rowid and _parent tables have the right + # number of entries. + set nNode [$db one "SELECT count(*) FROM ${zTab}_node"] + set nRow [$db one "SELECT count(*) FROM ${zTab}"] + set nRowid [$db one "SELECT count(*) FROM ${zTab}_rowid"] + set nParent [$db one "SELECT count(*) FROM ${zTab}_parent"] + + if {$nNode != ($nParent+1)} { + error "Wrong number of entries in ${zTab}_parent" + } + if {$nRow != $nRowid} { + error "Wrong number of entries in ${zTab}_rowid" + } + + return $rc +} + +proc rtree_node_check {db zTab iNode iDepth} { + if {[info exists ::checked($iNode)]} { error "Second ref to $iNode" } + set ::checked($iNode) 1 + + set node [rtree_node $db $zTab $iNode] + if {$iNode!=1 && [llength $node]==0} { error "No such node: $iNode" } + + if {$iNode != 1 && [llength $node]<[rtree_mincells $db $zTab]} { + puts "Node $iNode: Has only [llength $node] cells" + error "" + } + if {$iNode == 1 && [llength $node]==1 && [rtree_depth $db $zTab]>0} { + set depth [rtree_depth $db $zTab] + puts "Node $iNode: Has only 1 child (tree depth is $depth)" + error "" + } + + set nDim [expr {([llength [lindex $node 0]]-1)/2}] + + if {$iDepth > 0} { + set d [expr $iDepth-1] + foreach cell $node { + set shouldbe [rtree_node_check $db $zTab [lindex $cell 0] $d] + if {$cell ne $shouldbe} { + puts "Node $iNode: Cell is: {$cell}, should be {$shouldbe}" + error "" + } + } + } + + set mapping_table "${zTab}_parent" + set mapping_sql "SELECT parentnode FROM $mapping_table WHERE rowid = \$rowid" + if {$iDepth==0} { + set mapping_table "${zTab}_rowid" + set mapping_sql "SELECT nodeno FROM $mapping_table WHERE rowid = \$rowid" + } + foreach cell $node { + set rowid [lindex $cell 0] + set mapping [db one $mapping_sql] + if {$mapping != $iNode} { + puts "Node $iNode: $mapping_table entry for cell $rowid is $mapping" + error "" + } + } + + set ret [list $iNode] + for {set ii 1} {$ii <= $nDim*2} {incr ii} { + set f [lindex $node 0 $ii] + foreach cell $node { + set f2 [lindex $cell $ii] + if {($ii%2)==1 && $f2<$f} {set f $f2} + if {($ii%2)==0 && $f2>$f} {set f $f2} + } + lappend ret $f + } + return $ret +} + +proc rtree_dump {db zTab} { + set zRet "" + set nDim [expr {(([llength [$db eval "pragma table_info($zTab)"]]/6)-1)/2}] + set sql "SELECT nodeno, rtreenode($nDim, data) AS node FROM ${zTab}_node" + $db eval $sql { + append zRet [format "% -10s %s\n" $nodeno $node] + } + set zRet +} + +proc rtree_nodetreedump {db zTab zIndent iDepth iNode} { + set ret "" + set node [rtree_node $db $zTab $iNode 1] + append ret [format "%-3d %s%s\n" $iNode $zIndent $node] + if {$iDepth>0} { + foreach cell $node { + set i [lindex $cell 0] + append ret [rtree_nodetreedump $db $zTab "$zIndent " [expr $iDepth-1] $i] + } + } + set ret +} + +proc rtree_treedump {db zTab} { + set d [rtree_depth $db $zTab] + rtree_nodetreedump $db $zTab "" $d 1 +} + diff --git a/third_party/sqlite/ext/rtree/viewrtree.tcl b/third_party/sqlite/ext/rtree/viewrtree.tcl new file mode 100755 index 0000000..2b4dd1b --- /dev/null +++ b/third_party/sqlite/ext/rtree/viewrtree.tcl @@ -0,0 +1,189 @@ + +load ./libsqlite3.dylib +#package require sqlite3 +source [file join [file dirname $argv0] rtree_util.tcl] + +wm title . "SQLite r-tree viewer" + +if {[llength $argv]!=1} { + puts stderr "Usage: $argv0 <database-file>" + puts stderr "" + exit +} +sqlite3 db [lindex $argv 0] + +canvas .c -background white -width 400 -height 300 -highlightthickness 0 + +button .b -text "Parent Node" -command { + set sql "SELECT parentnode FROM $::O(zTab)_parent WHERE nodeno = $::O(iNode)" + set ::O(iNode) [db one $sql] + if {$::O(iNode) eq ""} {set ::O(iNode) 1} + view_node +} + +set O(iNode) 1 +set O(zTab) "" +set O(listbox_captions) [list] +set O(listbox_itemmap) [list] +set O(listbox_highlight) -1 + +listbox .l -listvariable ::O(listbox_captions) -yscrollcommand {.ls set} +scrollbar .ls -command {.l yview} +label .status -font courier -anchor w +label .title -anchor w -text "Node 1:" -background white -borderwidth 0 + + +set rtree_tables [list] +db eval { + SELECT name + FROM sqlite_master + WHERE type='table' AND sql LIKE '%virtual%table%using%rtree%' +} { + set nCol [expr [llength [db eval "pragma table_info($name)"]]/6] + if {$nCol != 5} { + puts stderr "Not viewing $name - is not 2-dimensional" + } else { + lappend rtree_tables [list Table $name] + } +} +if {$rtree_tables eq ""} { + puts stderr "Cannot find an r-tree table in database [lindex $argv 0]" + puts stderr "" + exit +} +eval tk_optionMenu .select option_var $rtree_tables +trace add variable option_var write set_option_var +proc set_option_var {args} { + set ::O(zTab) [lindex $::option_var 1] + set ::O(iNode) 1 + view_node +} +set ::O(zTab) [lindex $::rtree_tables 0 1] + +bind .l <1> {listbox_click [.l nearest %y]} +bind .l <Motion> {listbox_mouseover [.l nearest %y]} +bind .l <Leave> {listbox_mouseover -1} + +proc listbox_click {sel} { + if {$sel ne ""} { + set ::O(iNode) [lindex $::O(listbox_captions) $sel 1] + view_node + } +} +proc listbox_mouseover {i} { + set oldid [lindex $::O(listbox_itemmap) $::O(listbox_highlight)] + .c itemconfigure $oldid -fill "" + + .l selection clear 0 end + .status configure -text "" + if {$i>=0} { + set id [lindex $::O(listbox_itemmap) $i] + .c itemconfigure $id -fill grey + .c lower $id + set ::O(listbox_highlight) $i + .l selection set $i + .status configure -text [cell_report db $::O(zTab) $::O(iNode) $i] + } +} + +grid configure .select -row 0 -column 0 -columnspan 2 -sticky nsew +grid configure .b -row 1 -column 0 -columnspan 2 -sticky nsew +grid configure .l -row 2 -column 0 -sticky nsew +grid configure .status -row 3 -column 0 -columnspan 3 -sticky nsew + +grid configure .title -row 0 -column 2 -sticky nsew +grid configure .c -row 1 -column 2 -rowspan 2 -sticky nsew +grid configure .ls -row 2 -column 1 -sticky nsew + +grid columnconfigure . 2 -weight 1 +grid rowconfigure . 2 -weight 1 + +proc node_bbox {data} { + set xmin 0 + set xmax 0 + set ymin 0 + set ymax 0 + foreach {rowid xmin xmax ymin ymax} [lindex $data 0] break + foreach cell [lrange $data 1 end] { + foreach {rowid x1 x2 y1 y2} $cell break + if {$x1 < $xmin} {set xmin $x1} + if {$x2 > $xmax} {set xmax $x2} + if {$y1 < $ymin} {set ymin $y1} + if {$y2 > $ymax} {set ymax $y2} + } + list $xmin $xmax $ymin $ymax +} + +proc view_node {} { + set iNode $::O(iNode) + set zTab $::O(zTab) + + set data [rtree_node db $zTab $iNode 12] + set depth [rtree_nodedepth db $zTab $iNode] + + .c delete all + set ::O(listbox_captions) [list] + set ::O(listbox_itemmap) [list] + set $::O(listbox_highlight) -1 + + .b configure -state normal + if {$iNode == 1} {.b configure -state disabled} + .title configure -text "Node $iNode: [cell_report db $zTab $iNode -1]" + + foreach {xmin xmax ymin ymax} [node_bbox $data] break + set total_area 0.0 + + set xscale [expr {double([winfo width .c]-20)/($xmax-$xmin)}] + set yscale [expr {double([winfo height .c]-20)/($ymax-$ymin)}] + + set xoff [expr {10.0 - $xmin*$xscale}] + set yoff [expr {10.0 - $ymin*$yscale}] + + foreach cell $data { + foreach {rowid x1 x2 y1 y2} $cell break + set total_area [expr {$total_area + ($x2-$x1)*($y2-$y1)}] + set x1 [expr {$x1*$xscale + $xoff}] + set x2 [expr {$x2*$xscale + $xoff}] + set y1 [expr {$y1*$yscale + $yoff}] + set y2 [expr {$y2*$yscale + $yoff}] + + set id [.c create rectangle $x1 $y1 $x2 $y2] + if {$depth>0} { + lappend ::O(listbox_captions) "Node $rowid" + lappend ::O(listbox_itemmap) $id + } + } +} + +proc cell_report {db zTab iParent iCell} { + set data [rtree_node db $zTab $iParent 12] + set cell [lindex $data $iCell] + + foreach {xmin xmax ymin ymax} [node_bbox $data] break + set total_area [expr ($xmax-$xmin)*($ymax-$ymin)] + + if {$cell eq ""} { + set cell_area 0.0 + foreach cell $data { + foreach {rowid x1 x2 y1 y2} $cell break + set cell_area [expr $cell_area+($x2-$x1)*($y2-$y1)] + } + set cell_area [expr $cell_area/[llength $data]] + set zReport [format "Size = %.1f x %.1f Average child area = %.1f%%" \ + [expr $xmax-$xmin] [expr $ymax-$ymin] [expr 100.0*$cell_area/$total_area]\ + ] + append zReport " Sub-tree height: [rtree_nodedepth db $zTab $iParent]" + } else { + foreach {rowid x1 x2 y1 y2} $cell break + set cell_area [expr ($x2-$x1)*($y2-$y1)] + set zReport [format "Size = %.1f x %.1f Area = %.1f%%" \ + [expr $x2-$x1] [expr $y2-$y1] [expr 100.0*$cell_area/$total_area] + ] + } + + return $zReport +} + +view_node +bind .c <Configure> view_node + |