summaryrefslogtreecommitdiffstats
path: root/third_party/sqlite/tool
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/sqlite/tool')
-rwxr-xr-xthird_party/sqlite/tool/diffdb.c44
-rwxr-xr-xthird_party/sqlite/tool/fragck.tcl149
-rwxr-xr-xthird_party/sqlite/tool/lemon.c4866
-rwxr-xr-xthird_party/sqlite/tool/lempar.c813
-rwxr-xr-xthird_party/sqlite/tool/memleak.awk29
-rwxr-xr-xthird_party/sqlite/tool/memleak2.awk29
-rwxr-xr-xthird_party/sqlite/tool/memleak3.tcl233
-rwxr-xr-xthird_party/sqlite/tool/mkkeywordhash.c559
-rwxr-xr-xthird_party/sqlite/tool/mkopts.tcl51
-rwxr-xr-xthird_party/sqlite/tool/mksqlite3c.tcl288
-rwxr-xr-xthird_party/sqlite/tool/mksqlite3internalh.tcl146
-rwxr-xr-xthird_party/sqlite/tool/omittest.tcl215
-rwxr-xr-xthird_party/sqlite/tool/opcodeDoc.awk23
-rwxr-xr-xthird_party/sqlite/tool/report1.txt66
-rwxr-xr-xthird_party/sqlite/tool/showdb.c86
-rwxr-xr-xthird_party/sqlite/tool/showjournal.c76
-rwxr-xr-xthird_party/sqlite/tool/soak1.tcl103
-rwxr-xr-xthird_party/sqlite/tool/space_used.tcl111
-rwxr-xr-xthird_party/sqlite/tool/spaceanal.tcl863
-rwxr-xr-xthird_party/sqlite/tool/speedtest.tcl275
-rwxr-xr-xthird_party/sqlite/tool/speedtest16.c169
-rwxr-xr-xthird_party/sqlite/tool/speedtest2.tcl207
-rwxr-xr-xthird_party/sqlite/tool/speedtest8.c351
-rwxr-xr-xthird_party/sqlite/tool/speedtest8inst1.c216
24 files changed, 9968 insertions, 0 deletions
diff --git a/third_party/sqlite/tool/diffdb.c b/third_party/sqlite/tool/diffdb.c
new file mode 100755
index 0000000..0537d38
--- /dev/null
+++ b/third_party/sqlite/tool/diffdb.c
@@ -0,0 +1,44 @@
+/*
+** A utility for printing the differences between two SQLite database files.
+*/
+#include <stdio.h>
+#include <ctype.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+
+#define PAGESIZE 1024
+static int db1 = -1;
+static int db2 = -1;
+
+int main(int argc, char **argv){
+ int iPg;
+ unsigned char a1[PAGESIZE], a2[PAGESIZE];
+ if( argc!=3 ){
+ fprintf(stderr,"Usage: %s FILENAME FILENAME\n", argv[0]);
+ exit(1);
+ }
+ db1 = open(argv[1], O_RDONLY);
+ if( db1<0 ){
+ fprintf(stderr,"%s: can't open %s\n", argv[0], argv[1]);
+ exit(1);
+ }
+ db2 = open(argv[2], O_RDONLY);
+ if( db2<0 ){
+ fprintf(stderr,"%s: can't open %s\n", argv[0], argv[2]);
+ exit(1);
+ }
+ iPg = 1;
+ while( read(db1, a1, PAGESIZE)==PAGESIZE && read(db2,a2,PAGESIZE)==PAGESIZE ){
+ if( memcmp(a1,a2,PAGESIZE) ){
+ printf("Page %d\n", iPg);
+ }
+ iPg++;
+ }
+ printf("%d pages checked\n", iPg-1);
+ close(db1);
+ close(db2);
+}
diff --git a/third_party/sqlite/tool/fragck.tcl b/third_party/sqlite/tool/fragck.tcl
new file mode 100755
index 0000000..35e76f4
--- /dev/null
+++ b/third_party/sqlite/tool/fragck.tcl
@@ -0,0 +1,149 @@
+# Run this TCL script using "testfixture" to get a report that shows
+# the sequence of database pages used by a particular table or index.
+# This information is used for fragmentation analysis.
+#
+
+# Get the name of the database to analyze
+#
+
+if {[llength $argv]!=2} {
+ puts stderr "Usage: $argv0 database-name table-or-index-name"
+ exit 1
+}
+set file_to_analyze [lindex $argv 0]
+if {![file exists $file_to_analyze]} {
+ puts stderr "No such file: $file_to_analyze"
+ exit 1
+}
+if {![file readable $file_to_analyze]} {
+ puts stderr "File is not readable: $file_to_analyze"
+ exit 1
+}
+if {[file size $file_to_analyze]<512} {
+ puts stderr "Empty or malformed database: $file_to_analyze"
+ exit 1
+}
+set objname [lindex $argv 1]
+
+# Open the database
+#
+sqlite3 db [lindex $argv 0]
+set DB [btree_open [lindex $argv 0] 1000 0]
+
+# This proc is a wrapper around the btree_cursor_info command. The
+# second argument is an open btree cursor returned by [btree_cursor].
+# The first argument is the name of an array variable that exists in
+# the scope of the caller. If the third argument is non-zero, then
+# info is returned for the page that lies $up entries upwards in the
+# tree-structure. (i.e. $up==1 returns the parent page, $up==2 the
+# grandparent etc.)
+#
+# The following entries in that array are filled in with information retrieved
+# using [btree_cursor_info]:
+#
+# $arrayvar(page_no) = The page number
+# $arrayvar(entry_no) = The entry number
+# $arrayvar(page_entries) = Total number of entries on this page
+# $arrayvar(cell_size) = Cell size (local payload + header)
+# $arrayvar(page_freebytes) = Number of free bytes on this page
+# $arrayvar(page_freeblocks) = Number of free blocks on the page
+# $arrayvar(payload_bytes) = Total payload size (local + overflow)
+# $arrayvar(header_bytes) = Header size in bytes
+# $arrayvar(local_payload_bytes) = Local payload size
+# $arrayvar(parent) = Parent page number
+#
+proc cursor_info {arrayvar csr {up 0}} {
+ upvar $arrayvar a
+ foreach [list a(page_no) \
+ a(entry_no) \
+ a(page_entries) \
+ a(cell_size) \
+ a(page_freebytes) \
+ a(page_freeblocks) \
+ a(payload_bytes) \
+ a(header_bytes) \
+ a(local_payload_bytes) \
+ a(parent) \
+ a(first_ovfl) ] [btree_cursor_info $csr $up] break
+}
+
+# Determine the page-size of the database. This global variable is used
+# throughout the script.
+#
+set pageSize [db eval {PRAGMA page_size}]
+
+# Find the root page of table or index to be analyzed. Also find out
+# if the object is a table or an index.
+#
+if {$objname=="sqlite_master"} {
+ set rootpage 1
+ set type table
+} else {
+ db eval {
+ SELECT rootpage, type FROM sqlite_master
+ WHERE name=$objname
+ } break
+ if {![info exists rootpage]} {
+ puts stderr "no such table or index: $objname"
+ exit 1
+ }
+ if {$type!="table" && $type!="index"} {
+ puts stderr "$objname is something other than a table or index"
+ exit 1
+ }
+ if {![string is integer -strict $rootpage]} {
+ puts stderr "invalid root page for $objname: $rootpage"
+ exit 1
+ }
+}
+
+# The cursor $csr is pointing to an entry. Print out information
+# about the page that $up levels above that page that contains
+# the entry. If $up==0 use the page that contains the entry.
+#
+# If information about the page has been printed already, then
+# this is a no-op.
+#
+proc page_info {csr up} {
+ global seen
+ cursor_info ci $csr $up
+ set pg $ci(page_no)
+ if {[info exists seen($pg)]} return
+ set seen($pg) 1
+
+ # Do parent pages first
+ #
+ if {$ci(parent)} {
+ page_info $csr [expr {$up+1}]
+ }
+
+ # Find the depth of this page
+ #
+ set depth 1
+ set i $up
+ while {$ci(parent)} {
+ incr i
+ incr depth
+ cursor_info ci $csr $i
+ }
+
+ # print the results
+ #
+ puts [format {LEVEL %d: %6d} $depth $pg]
+}
+
+
+
+
+# Loop through the object and print out page numbers
+#
+set csr [btree_cursor $DB $rootpage 0]
+for {btree_first $csr} {![btree_eof $csr]} {btree_next $csr} {
+ page_info $csr 0
+ set i 1
+ foreach pg [btree_ovfl_info $DB $csr] {
+ puts [format {OVFL %3d: %6d} $i $pg]
+ incr i
+ }
+}
+exit 0
diff --git a/third_party/sqlite/tool/lemon.c b/third_party/sqlite/tool/lemon.c
new file mode 100755
index 0000000..155fdbc
--- /dev/null
+++ b/third_party/sqlite/tool/lemon.c
@@ -0,0 +1,4866 @@
+/*
+** This file contains all sources (including headers) to the LEMON
+** LALR(1) parser generator. The sources have been combined into a
+** single file to make it easy to include LEMON in the source tree
+** and Makefile of another program.
+**
+** The author of this program disclaims copyright.
+*/
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <ctype.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#ifndef __WIN32__
+# if defined(_WIN32) || defined(WIN32)
+# define __WIN32__
+# endif
+#endif
+
+#ifdef __WIN32__
+extern int access();
+#else
+#include <unistd.h>
+#endif
+
+/* #define PRIVATE static */
+#define PRIVATE
+
+#ifdef TEST
+#define MAXRHS 5 /* Set low to exercise exception code */
+#else
+#define MAXRHS 1000
+#endif
+
+static char *msort(char*,char**,int(*)(const char*,const char*));
+
+static struct action *Action_new(void);
+static struct action *Action_sort(struct action *);
+
+/********** From the file "build.h" ************************************/
+void FindRulePrecedences();
+void FindFirstSets();
+void FindStates();
+void FindLinks();
+void FindFollowSets();
+void FindActions();
+
+/********* From the file "configlist.h" *********************************/
+void Configlist_init(/* void */);
+struct config *Configlist_add(/* struct rule *, int */);
+struct config *Configlist_addbasis(/* struct rule *, int */);
+void Configlist_closure(/* void */);
+void Configlist_sort(/* void */);
+void Configlist_sortbasis(/* void */);
+struct config *Configlist_return(/* void */);
+struct config *Configlist_basis(/* void */);
+void Configlist_eat(/* struct config * */);
+void Configlist_reset(/* void */);
+
+/********* From the file "error.h" ***************************************/
+void ErrorMsg(const char *, int,const char *, ...);
+
+/****** From the file "option.h" ******************************************/
+struct s_options {
+ enum { OPT_FLAG=1, OPT_INT, OPT_DBL, OPT_STR,
+ OPT_FFLAG, OPT_FINT, OPT_FDBL, OPT_FSTR} type;
+ char *label;
+ char *arg;
+ char *message;
+};
+int OptInit(/* char**,struct s_options*,FILE* */);
+int OptNArgs(/* void */);
+char *OptArg(/* int */);
+void OptErr(/* int */);
+void OptPrint(/* void */);
+
+/******** From the file "parse.h" *****************************************/
+void Parse(/* struct lemon *lemp */);
+
+/********* From the file "plink.h" ***************************************/
+struct plink *Plink_new(/* void */);
+void Plink_add(/* struct plink **, struct config * */);
+void Plink_copy(/* struct plink **, struct plink * */);
+void Plink_delete(/* struct plink * */);
+
+/********** From the file "report.h" *************************************/
+void Reprint(/* struct lemon * */);
+void ReportOutput(/* struct lemon * */);
+void ReportTable(/* struct lemon * */);
+void ReportHeader(/* struct lemon * */);
+void CompressTables(/* struct lemon * */);
+void ResortStates(/* struct lemon * */);
+
+/********** From the file "set.h" ****************************************/
+void SetSize(/* int N */); /* All sets will be of size N */
+char *SetNew(/* void */); /* A new set for element 0..N */
+void SetFree(/* char* */); /* Deallocate a set */
+
+int SetAdd(/* char*,int */); /* Add element to a set */
+int SetUnion(/* char *A,char *B */); /* A <- A U B, thru element N */
+
+#define SetFind(X,Y) (X[Y]) /* True if Y is in set X */
+
+/********** From the file "struct.h" *************************************/
+/*
+** Principal data structures for the LEMON parser generator.
+*/
+
+typedef enum {LEMON_FALSE=0, LEMON_TRUE} Boolean;
+
+/* Symbols (terminals and nonterminals) of the grammar are stored
+** in the following: */
+struct symbol {
+ char *name; /* Name of the symbol */
+ int index; /* Index number for this symbol */
+ enum {
+ TERMINAL,
+ NONTERMINAL,
+ MULTITERMINAL
+ } type; /* Symbols are all either TERMINALS or NTs */
+ struct rule *rule; /* Linked list of rules of this (if an NT) */
+ struct symbol *fallback; /* fallback token in case this token doesn't parse */
+ int prec; /* Precedence if defined (-1 otherwise) */
+ enum e_assoc {
+ LEFT,
+ RIGHT,
+ NONE,
+ UNK
+ } assoc; /* Associativity if precedence is defined */
+ char *firstset; /* First-set for all rules of this symbol */
+ Boolean lambda; /* True if NT and can generate an empty string */
+ int useCnt; /* Number of times used */
+ char *destructor; /* Code which executes whenever this symbol is
+ ** popped from the stack during error processing */
+ int destLineno; /* Line number for start of destructor */
+ char *datatype; /* The data type of information held by this
+ ** object. Only used if type==NONTERMINAL */
+ int dtnum; /* The data type number. In the parser, the value
+ ** stack is a union. The .yy%d element of this
+ ** union is the correct data type for this object */
+ /* The following fields are used by MULTITERMINALs only */
+ int nsubsym; /* Number of constituent symbols in the MULTI */
+ struct symbol **subsym; /* Array of constituent symbols */
+};
+
+/* Each production rule in the grammar is stored in the following
+** structure. */
+struct rule {
+ struct symbol *lhs; /* Left-hand side of the rule */
+ char *lhsalias; /* Alias for the LHS (NULL if none) */
+ int lhsStart; /* True if left-hand side is the start symbol */
+ int ruleline; /* Line number for the rule */
+ int nrhs; /* Number of RHS symbols */
+ struct symbol **rhs; /* The RHS symbols */
+ char **rhsalias; /* An alias for each RHS symbol (NULL if none) */
+ int line; /* Line number at which code begins */
+ char *code; /* The code executed when this rule is reduced */
+ struct symbol *precsym; /* Precedence symbol for this rule */
+ int index; /* An index number for this rule */
+ Boolean canReduce; /* True if this rule is ever reduced */
+ struct rule *nextlhs; /* Next rule with the same LHS */
+ struct rule *next; /* Next rule in the global list */
+};
+
+/* A configuration is a production rule of the grammar together with
+** a mark (dot) showing how much of that rule has been processed so far.
+** Configurations also contain a follow-set which is a list of terminal
+** symbols which are allowed to immediately follow the end of the rule.
+** Every configuration is recorded as an instance of the following: */
+struct config {
+ struct rule *rp; /* The rule upon which the configuration is based */
+ int dot; /* The parse point */
+ char *fws; /* Follow-set for this configuration only */
+ struct plink *fplp; /* Follow-set forward propagation links */
+ struct plink *bplp; /* Follow-set backwards propagation links */
+ struct state *stp; /* Pointer to state which contains this */
+ enum {
+ COMPLETE, /* The status is used during followset and */
+ INCOMPLETE /* shift computations */
+ } status;
+ struct config *next; /* Next configuration in the state */
+ struct config *bp; /* The next basis configuration */
+};
+
+/* Every shift or reduce operation is stored as one of the following */
+struct action {
+ struct symbol *sp; /* The look-ahead symbol */
+ enum e_action {
+ SHIFT,
+ ACCEPT,
+ REDUCE,
+ ERROR,
+ SSCONFLICT, /* A shift/shift conflict */
+ SRCONFLICT, /* Was a reduce, but part of a conflict */
+ RRCONFLICT, /* Was a reduce, but part of a conflict */
+ SH_RESOLVED, /* Was a shift. Precedence resolved conflict */
+ RD_RESOLVED, /* Was reduce. Precedence resolved conflict */
+ NOT_USED /* Deleted by compression */
+ } type;
+ union {
+ struct state *stp; /* The new state, if a shift */
+ struct rule *rp; /* The rule, if a reduce */
+ } x;
+ struct action *next; /* Next action for this state */
+ struct action *collide; /* Next action with the same hash */
+};
+
+/* Each state of the generated parser's finite state machine
+** is encoded as an instance of the following structure. */
+struct state {
+ struct config *bp; /* The basis configurations for this state */
+ struct config *cfp; /* All configurations in this set */
+ int statenum; /* Sequential number for this state */
+ struct action *ap; /* Array of actions for this state */
+ int nTknAct, nNtAct; /* Number of actions on terminals and nonterminals */
+ int iTknOfst, iNtOfst; /* yy_action[] offset for terminals and nonterms */
+ int iDflt; /* Default action */
+};
+#define NO_OFFSET (-2147483647)
+
+/* A followset propagation link indicates that the contents of one
+** configuration followset should be propagated to another whenever
+** the first changes. */
+struct plink {
+ struct config *cfp; /* The configuration to which linked */
+ struct plink *next; /* The next propagate link */
+};
+
+/* The state vector for the entire parser generator is recorded as
+** follows. (LEMON uses no global variables and makes little use of
+** static variables. Fields in the following structure can be thought
+** of as begin global variables in the program.) */
+struct lemon {
+ struct state **sorted; /* Table of states sorted by state number */
+ struct rule *rule; /* List of all rules */
+ int nstate; /* Number of states */
+ int nrule; /* Number of rules */
+ int nsymbol; /* Number of terminal and nonterminal symbols */
+ int nterminal; /* Number of terminal symbols */
+ struct symbol **symbols; /* Sorted array of pointers to symbols */
+ int errorcnt; /* Number of errors */
+ struct symbol *errsym; /* The error symbol */
+ struct symbol *wildcard; /* Token that matches anything */
+ char *name; /* Name of the generated parser */
+ char *arg; /* Declaration of the 3th argument to parser */
+ char *tokentype; /* Type of terminal symbols in the parser stack */
+ char *vartype; /* The default type of non-terminal symbols */
+ char *start; /* Name of the start symbol for the grammar */
+ char *stacksize; /* Size of the parser stack */
+ char *include; /* Code to put at the start of the C file */
+ char *error; /* Code to execute when an error is seen */
+ char *overflow; /* Code to execute on a stack overflow */
+ char *failure; /* Code to execute on parser failure */
+ char *accept; /* Code to execute when the parser excepts */
+ char *extracode; /* Code appended to the generated file */
+ char *tokendest; /* Code to execute to destroy token data */
+ char *vardest; /* Code for the default non-terminal destructor */
+ char *filename; /* Name of the input file */
+ char *outname; /* Name of the current output file */
+ char *tokenprefix; /* A prefix added to token names in the .h file */
+ int nconflict; /* Number of parsing conflicts */
+ int tablesize; /* Size of the parse tables */
+ int basisflag; /* Print only basis configurations */
+ int has_fallback; /* True if any %fallback is seen in the grammar */
+ char *argv0; /* Name of the program */
+};
+
+#define MemoryCheck(X) if((X)==0){ \
+ extern void memory_error(); \
+ memory_error(); \
+}
+
+/**************** From the file "table.h" *********************************/
+/*
+** All code in this file has been automatically generated
+** from a specification in the file
+** "table.q"
+** by the associative array code building program "aagen".
+** Do not edit this file! Instead, edit the specification
+** file, then rerun aagen.
+*/
+/*
+** Code for processing tables in the LEMON parser generator.
+*/
+
+/* Routines for handling a strings */
+
+char *Strsafe();
+
+void Strsafe_init(/* void */);
+int Strsafe_insert(/* char * */);
+char *Strsafe_find(/* char * */);
+
+/* Routines for handling symbols of the grammar */
+
+struct symbol *Symbol_new();
+int Symbolcmpp(/* struct symbol **, struct symbol ** */);
+void Symbol_init(/* void */);
+int Symbol_insert(/* struct symbol *, char * */);
+struct symbol *Symbol_find(/* char * */);
+struct symbol *Symbol_Nth(/* int */);
+int Symbol_count(/* */);
+struct symbol **Symbol_arrayof(/* */);
+
+/* Routines to manage the state table */
+
+int Configcmp(/* struct config *, struct config * */);
+struct state *State_new();
+void State_init(/* void */);
+int State_insert(/* struct state *, struct config * */);
+struct state *State_find(/* struct config * */);
+struct state **State_arrayof(/* */);
+
+/* Routines used for efficiency in Configlist_add */
+
+void Configtable_init(/* void */);
+int Configtable_insert(/* struct config * */);
+struct config *Configtable_find(/* struct config * */);
+void Configtable_clear(/* int(*)(struct config *) */);
+/****************** From the file "action.c" *******************************/
+/*
+** Routines processing parser actions in the LEMON parser generator.
+*/
+
+/* Allocate a new parser action */
+static struct action *Action_new(void){
+ static struct action *freelist = 0;
+ struct action *new;
+
+ if( freelist==0 ){
+ int i;
+ int amt = 100;
+ freelist = (struct action *)calloc(amt, sizeof(struct action));
+ if( freelist==0 ){
+ fprintf(stderr,"Unable to allocate memory for a new parser action.");
+ exit(1);
+ }
+ for(i=0; i<amt-1; i++) freelist[i].next = &freelist[i+1];
+ freelist[amt-1].next = 0;
+ }
+ new = freelist;
+ freelist = freelist->next;
+ return new;
+}
+
+/* Compare two actions for sorting purposes. Return negative, zero, or
+** positive if the first action is less than, equal to, or greater than
+** the first
+*/
+static int actioncmp(
+ struct action *ap1,
+ struct action *ap2
+){
+ int rc;
+ rc = ap1->sp->index - ap2->sp->index;
+ if( rc==0 ){
+ rc = (int)ap1->type - (int)ap2->type;
+ }
+ if( rc==0 && ap1->type==REDUCE ){
+ rc = ap1->x.rp->index - ap2->x.rp->index;
+ }
+ return rc;
+}
+
+/* Sort parser actions */
+static struct action *Action_sort(
+ struct action *ap
+){
+ ap = (struct action *)msort((char *)ap,(char **)&ap->next,
+ (int(*)(const char*,const char*))actioncmp);
+ return ap;
+}
+
+void Action_add(app,type,sp,arg)
+struct action **app;
+enum e_action type;
+struct symbol *sp;
+char *arg;
+{
+ struct action *new;
+ new = Action_new();
+ new->next = *app;
+ *app = new;
+ new->type = type;
+ new->sp = sp;
+ if( type==SHIFT ){
+ new->x.stp = (struct state *)arg;
+ }else{
+ new->x.rp = (struct rule *)arg;
+ }
+}
+/********************** New code to implement the "acttab" module ***********/
+/*
+** This module implements routines use to construct the yy_action[] table.
+*/
+
+/*
+** The state of the yy_action table under construction is an instance of
+** the following structure
+*/
+typedef struct acttab acttab;
+struct acttab {
+ int nAction; /* Number of used slots in aAction[] */
+ int nActionAlloc; /* Slots allocated for aAction[] */
+ struct {
+ int lookahead; /* Value of the lookahead token */
+ int action; /* Action to take on the given lookahead */
+ } *aAction, /* The yy_action[] table under construction */
+ *aLookahead; /* A single new transaction set */
+ int mnLookahead; /* Minimum aLookahead[].lookahead */
+ int mnAction; /* Action associated with mnLookahead */
+ int mxLookahead; /* Maximum aLookahead[].lookahead */
+ int nLookahead; /* Used slots in aLookahead[] */
+ int nLookaheadAlloc; /* Slots allocated in aLookahead[] */
+};
+
+/* Return the number of entries in the yy_action table */
+#define acttab_size(X) ((X)->nAction)
+
+/* The value for the N-th entry in yy_action */
+#define acttab_yyaction(X,N) ((X)->aAction[N].action)
+
+/* The value for the N-th entry in yy_lookahead */
+#define acttab_yylookahead(X,N) ((X)->aAction[N].lookahead)
+
+/* Free all memory associated with the given acttab */
+void acttab_free(acttab *p){
+ free( p->aAction );
+ free( p->aLookahead );
+ free( p );
+}
+
+/* Allocate a new acttab structure */
+acttab *acttab_alloc(void){
+ acttab *p = calloc( 1, sizeof(*p) );
+ if( p==0 ){
+ fprintf(stderr,"Unable to allocate memory for a new acttab.");
+ exit(1);
+ }
+ memset(p, 0, sizeof(*p));
+ return p;
+}
+
+/* Add a new action to the current transaction set
+*/
+void acttab_action(acttab *p, int lookahead, int action){
+ if( p->nLookahead>=p->nLookaheadAlloc ){
+ p->nLookaheadAlloc += 25;
+ p->aLookahead = realloc( p->aLookahead,
+ sizeof(p->aLookahead[0])*p->nLookaheadAlloc );
+ if( p->aLookahead==0 ){
+ fprintf(stderr,"malloc failed\n");
+ exit(1);
+ }
+ }
+ if( p->nLookahead==0 ){
+ p->mxLookahead = lookahead;
+ p->mnLookahead = lookahead;
+ p->mnAction = action;
+ }else{
+ if( p->mxLookahead<lookahead ) p->mxLookahead = lookahead;
+ if( p->mnLookahead>lookahead ){
+ p->mnLookahead = lookahead;
+ p->mnAction = action;
+ }
+ }
+ p->aLookahead[p->nLookahead].lookahead = lookahead;
+ p->aLookahead[p->nLookahead].action = action;
+ p->nLookahead++;
+}
+
+/*
+** Add the transaction set built up with prior calls to acttab_action()
+** into the current action table. Then reset the transaction set back
+** to an empty set in preparation for a new round of acttab_action() calls.
+**
+** Return the offset into the action table of the new transaction.
+*/
+int acttab_insert(acttab *p){
+ int i, j, k, n;
+ assert( p->nLookahead>0 );
+
+ /* Make sure we have enough space to hold the expanded action table
+ ** in the worst case. The worst case occurs if the transaction set
+ ** must be appended to the current action table
+ */
+ n = p->mxLookahead + 1;
+ if( p->nAction + n >= p->nActionAlloc ){
+ int oldAlloc = p->nActionAlloc;
+ p->nActionAlloc = p->nAction + n + p->nActionAlloc + 20;
+ p->aAction = realloc( p->aAction,
+ sizeof(p->aAction[0])*p->nActionAlloc);
+ if( p->aAction==0 ){
+ fprintf(stderr,"malloc failed\n");
+ exit(1);
+ }
+ for(i=oldAlloc; i<p->nActionAlloc; i++){
+ p->aAction[i].lookahead = -1;
+ p->aAction[i].action = -1;
+ }
+ }
+
+ /* Scan the existing action table looking for an offset where we can
+ ** insert the current transaction set. Fall out of the loop when that
+ ** offset is found. In the worst case, we fall out of the loop when
+ ** i reaches p->nAction, which means we append the new transaction set.
+ **
+ ** i is the index in p->aAction[] where p->mnLookahead is inserted.
+ */
+ for(i=0; i<p->nAction+p->mnLookahead; i++){
+ if( p->aAction[i].lookahead<0 ){
+ for(j=0; j<p->nLookahead; j++){
+ k = p->aLookahead[j].lookahead - p->mnLookahead + i;
+ if( k<0 ) break;
+ if( p->aAction[k].lookahead>=0 ) break;
+ }
+ if( j<p->nLookahead ) continue;
+ for(j=0; j<p->nAction; j++){
+ if( p->aAction[j].lookahead==j+p->mnLookahead-i ) break;
+ }
+ if( j==p->nAction ){
+ break; /* Fits in empty slots */
+ }
+ }else if( p->aAction[i].lookahead==p->mnLookahead ){
+ if( p->aAction[i].action!=p->mnAction ) continue;
+ for(j=0; j<p->nLookahead; j++){
+ k = p->aLookahead[j].lookahead - p->mnLookahead + i;
+ if( k<0 || k>=p->nAction ) break;
+ if( p->aLookahead[j].lookahead!=p->aAction[k].lookahead ) break;
+ if( p->aLookahead[j].action!=p->aAction[k].action ) break;
+ }
+ if( j<p->nLookahead ) continue;
+ n = 0;
+ for(j=0; j<p->nAction; j++){
+ if( p->aAction[j].lookahead<0 ) continue;
+ if( p->aAction[j].lookahead==j+p->mnLookahead-i ) n++;
+ }
+ if( n==p->nLookahead ){
+ break; /* Same as a prior transaction set */
+ }
+ }
+ }
+ /* Insert transaction set at index i. */
+ for(j=0; j<p->nLookahead; j++){
+ k = p->aLookahead[j].lookahead - p->mnLookahead + i;
+ p->aAction[k] = p->aLookahead[j];
+ if( k>=p->nAction ) p->nAction = k+1;
+ }
+ p->nLookahead = 0;
+
+ /* Return the offset that is added to the lookahead in order to get the
+ ** index into yy_action of the action */
+ return i - p->mnLookahead;
+}
+
+/********************** From the file "build.c" *****************************/
+/*
+** Routines to construction the finite state machine for the LEMON
+** parser generator.
+*/
+
+/* Find a precedence symbol of every rule in the grammar.
+**
+** Those rules which have a precedence symbol coded in the input
+** grammar using the "[symbol]" construct will already have the
+** rp->precsym field filled. Other rules take as their precedence
+** symbol the first RHS symbol with a defined precedence. If there
+** are not RHS symbols with a defined precedence, the precedence
+** symbol field is left blank.
+*/
+void FindRulePrecedences(xp)
+struct lemon *xp;
+{
+ struct rule *rp;
+ for(rp=xp->rule; rp; rp=rp->next){
+ if( rp->precsym==0 ){
+ int i, j;
+ for(i=0; i<rp->nrhs && rp->precsym==0; i++){
+ struct symbol *sp = rp->rhs[i];
+ if( sp->type==MULTITERMINAL ){
+ for(j=0; j<sp->nsubsym; j++){
+ if( sp->subsym[j]->prec>=0 ){
+ rp->precsym = sp->subsym[j];
+ break;
+ }
+ }
+ }else if( sp->prec>=0 ){
+ rp->precsym = rp->rhs[i];
+ }
+ }
+ }
+ }
+ return;
+}
+
+/* Find all nonterminals which will generate the empty string.
+** Then go back and compute the first sets of every nonterminal.
+** The first set is the set of all terminal symbols which can begin
+** a string generated by that nonterminal.
+*/
+void FindFirstSets(lemp)
+struct lemon *lemp;
+{
+ int i, j;
+ struct rule *rp;
+ int progress;
+
+ for(i=0; i<lemp->nsymbol; i++){
+ lemp->symbols[i]->lambda = LEMON_FALSE;
+ }
+ for(i=lemp->nterminal; i<lemp->nsymbol; i++){
+ lemp->symbols[i]->firstset = SetNew();
+ }
+
+ /* First compute all lambdas */
+ do{
+ progress = 0;
+ for(rp=lemp->rule; rp; rp=rp->next){
+ if( rp->lhs->lambda ) continue;
+ for(i=0; i<rp->nrhs; i++){
+ struct symbol *sp = rp->rhs[i];
+ if( sp->type!=TERMINAL || sp->lambda==LEMON_FALSE ) break;
+ }
+ if( i==rp->nrhs ){
+ rp->lhs->lambda = LEMON_TRUE;
+ progress = 1;
+ }
+ }
+ }while( progress );
+
+ /* Now compute all first sets */
+ do{
+ struct symbol *s1, *s2;
+ progress = 0;
+ for(rp=lemp->rule; rp; rp=rp->next){
+ s1 = rp->lhs;
+ for(i=0; i<rp->nrhs; i++){
+ s2 = rp->rhs[i];
+ if( s2->type==TERMINAL ){
+ progress += SetAdd(s1->firstset,s2->index);
+ break;
+ }else if( s2->type==MULTITERMINAL ){
+ for(j=0; j<s2->nsubsym; j++){
+ progress += SetAdd(s1->firstset,s2->subsym[j]->index);
+ }
+ break;
+ }else if( s1==s2 ){
+ if( s1->lambda==LEMON_FALSE ) break;
+ }else{
+ progress += SetUnion(s1->firstset,s2->firstset);
+ if( s2->lambda==LEMON_FALSE ) break;
+ }
+ }
+ }
+ }while( progress );
+ return;
+}
+
+/* Compute all LR(0) states for the grammar. Links
+** are added to between some states so that the LR(1) follow sets
+** can be computed later.
+*/
+PRIVATE struct state *getstate(/* struct lemon * */); /* forward reference */
+void FindStates(lemp)
+struct lemon *lemp;
+{
+ struct symbol *sp;
+ struct rule *rp;
+
+ Configlist_init();
+
+ /* Find the start symbol */
+ if( lemp->start ){
+ sp = Symbol_find(lemp->start);
+ if( sp==0 ){
+ ErrorMsg(lemp->filename,0,
+"The specified start symbol \"%s\" is not \
+in a nonterminal of the grammar. \"%s\" will be used as the start \
+symbol instead.",lemp->start,lemp->rule->lhs->name);
+ lemp->errorcnt++;
+ sp = lemp->rule->lhs;
+ }
+ }else{
+ sp = lemp->rule->lhs;
+ }
+
+ /* Make sure the start symbol doesn't occur on the right-hand side of
+ ** any rule. Report an error if it does. (YACC would generate a new
+ ** start symbol in this case.) */
+ for(rp=lemp->rule; rp; rp=rp->next){
+ int i;
+ for(i=0; i<rp->nrhs; i++){
+ if( rp->rhs[i]==sp ){ /* FIX ME: Deal with multiterminals */
+ ErrorMsg(lemp->filename,0,
+"The start symbol \"%s\" occurs on the \
+right-hand side of a rule. This will result in a parser which \
+does not work properly.",sp->name);
+ lemp->errorcnt++;
+ }
+ }
+ }
+
+ /* The basis configuration set for the first state
+ ** is all rules which have the start symbol as their
+ ** left-hand side */
+ for(rp=sp->rule; rp; rp=rp->nextlhs){
+ struct config *newcfp;
+ rp->lhsStart = 1;
+ newcfp = Configlist_addbasis(rp,0);
+ SetAdd(newcfp->fws,0);
+ }
+
+ /* Compute the first state. All other states will be
+ ** computed automatically during the computation of the first one.
+ ** The returned pointer to the first state is not used. */
+ (void)getstate(lemp);
+ return;
+}
+
+/* Return a pointer to a state which is described by the configuration
+** list which has been built from calls to Configlist_add.
+*/
+PRIVATE void buildshifts(/* struct lemon *, struct state * */); /* Forwd ref */
+PRIVATE struct state *getstate(lemp)
+struct lemon *lemp;
+{
+ struct config *cfp, *bp;
+ struct state *stp;
+
+ /* Extract the sorted basis of the new state. The basis was constructed
+ ** by prior calls to "Configlist_addbasis()". */
+ Configlist_sortbasis();
+ bp = Configlist_basis();
+
+ /* Get a state with the same basis */
+ stp = State_find(bp);
+ if( stp ){
+ /* A state with the same basis already exists! Copy all the follow-set
+ ** propagation links from the state under construction into the
+ ** preexisting state, then return a pointer to the preexisting state */
+ struct config *x, *y;
+ for(x=bp, y=stp->bp; x && y; x=x->bp, y=y->bp){
+ Plink_copy(&y->bplp,x->bplp);
+ Plink_delete(x->fplp);
+ x->fplp = x->bplp = 0;
+ }
+ cfp = Configlist_return();
+ Configlist_eat(cfp);
+ }else{
+ /* This really is a new state. Construct all the details */
+ Configlist_closure(lemp); /* Compute the configuration closure */
+ Configlist_sort(); /* Sort the configuration closure */
+ cfp = Configlist_return(); /* Get a pointer to the config list */
+ stp = State_new(); /* A new state structure */
+ MemoryCheck(stp);
+ stp->bp = bp; /* Remember the configuration basis */
+ stp->cfp = cfp; /* Remember the configuration closure */
+ stp->statenum = lemp->nstate++; /* Every state gets a sequence number */
+ stp->ap = 0; /* No actions, yet. */
+ State_insert(stp,stp->bp); /* Add to the state table */
+ buildshifts(lemp,stp); /* Recursively compute successor states */
+ }
+ return stp;
+}
+
+/*
+** Return true if two symbols are the same.
+*/
+int same_symbol(a,b)
+struct symbol *a;
+struct symbol *b;
+{
+ int i;
+ if( a==b ) return 1;
+ if( a->type!=MULTITERMINAL ) return 0;
+ if( b->type!=MULTITERMINAL ) return 0;
+ if( a->nsubsym!=b->nsubsym ) return 0;
+ for(i=0; i<a->nsubsym; i++){
+ if( a->subsym[i]!=b->subsym[i] ) return 0;
+ }
+ return 1;
+}
+
+/* Construct all successor states to the given state. A "successor"
+** state is any state which can be reached by a shift action.
+*/
+PRIVATE void buildshifts(lemp,stp)
+struct lemon *lemp;
+struct state *stp; /* The state from which successors are computed */
+{
+ struct config *cfp; /* For looping thru the config closure of "stp" */
+ struct config *bcfp; /* For the inner loop on config closure of "stp" */
+ struct config *new; /* */
+ struct symbol *sp; /* Symbol following the dot in configuration "cfp" */
+ struct symbol *bsp; /* Symbol following the dot in configuration "bcfp" */
+ struct state *newstp; /* A pointer to a successor state */
+
+ /* Each configuration becomes complete after it contibutes to a successor
+ ** state. Initially, all configurations are incomplete */
+ for(cfp=stp->cfp; cfp; cfp=cfp->next) cfp->status = INCOMPLETE;
+
+ /* Loop through all configurations of the state "stp" */
+ for(cfp=stp->cfp; cfp; cfp=cfp->next){
+ if( cfp->status==COMPLETE ) continue; /* Already used by inner loop */
+ if( cfp->dot>=cfp->rp->nrhs ) continue; /* Can't shift this config */
+ Configlist_reset(); /* Reset the new config set */
+ sp = cfp->rp->rhs[cfp->dot]; /* Symbol after the dot */
+
+ /* For every configuration in the state "stp" which has the symbol "sp"
+ ** following its dot, add the same configuration to the basis set under
+ ** construction but with the dot shifted one symbol to the right. */
+ for(bcfp=cfp; bcfp; bcfp=bcfp->next){
+ if( bcfp->status==COMPLETE ) continue; /* Already used */
+ if( bcfp->dot>=bcfp->rp->nrhs ) continue; /* Can't shift this one */
+ bsp = bcfp->rp->rhs[bcfp->dot]; /* Get symbol after dot */
+ if( !same_symbol(bsp,sp) ) continue; /* Must be same as for "cfp" */
+ bcfp->status = COMPLETE; /* Mark this config as used */
+ new = Configlist_addbasis(bcfp->rp,bcfp->dot+1);
+ Plink_add(&new->bplp,bcfp);
+ }
+
+ /* Get a pointer to the state described by the basis configuration set
+ ** constructed in the preceding loop */
+ newstp = getstate(lemp);
+
+ /* The state "newstp" is reached from the state "stp" by a shift action
+ ** on the symbol "sp" */
+ if( sp->type==MULTITERMINAL ){
+ int i;
+ for(i=0; i<sp->nsubsym; i++){
+ Action_add(&stp->ap,SHIFT,sp->subsym[i],(char*)newstp);
+ }
+ }else{
+ Action_add(&stp->ap,SHIFT,sp,(char *)newstp);
+ }
+ }
+}
+
+/*
+** Construct the propagation links
+*/
+void FindLinks(lemp)
+struct lemon *lemp;
+{
+ int i;
+ struct config *cfp, *other;
+ struct state *stp;
+ struct plink *plp;
+
+ /* Housekeeping detail:
+ ** Add to every propagate link a pointer back to the state to
+ ** which the link is attached. */
+ for(i=0; i<lemp->nstate; i++){
+ stp = lemp->sorted[i];
+ for(cfp=stp->cfp; cfp; cfp=cfp->next){
+ cfp->stp = stp;
+ }
+ }
+
+ /* Convert all backlinks into forward links. Only the forward
+ ** links are used in the follow-set computation. */
+ for(i=0; i<lemp->nstate; i++){
+ stp = lemp->sorted[i];
+ for(cfp=stp->cfp; cfp; cfp=cfp->next){
+ for(plp=cfp->bplp; plp; plp=plp->next){
+ other = plp->cfp;
+ Plink_add(&other->fplp,cfp);
+ }
+ }
+ }
+}
+
+/* Compute all followsets.
+**
+** A followset is the set of all symbols which can come immediately
+** after a configuration.
+*/
+void FindFollowSets(lemp)
+struct lemon *lemp;
+{
+ int i;
+ struct config *cfp;
+ struct plink *plp;
+ int progress;
+ int change;
+
+ for(i=0; i<lemp->nstate; i++){
+ for(cfp=lemp->sorted[i]->cfp; cfp; cfp=cfp->next){
+ cfp->status = INCOMPLETE;
+ }
+ }
+
+ do{
+ progress = 0;
+ for(i=0; i<lemp->nstate; i++){
+ for(cfp=lemp->sorted[i]->cfp; cfp; cfp=cfp->next){
+ if( cfp->status==COMPLETE ) continue;
+ for(plp=cfp->fplp; plp; plp=plp->next){
+ change = SetUnion(plp->cfp->fws,cfp->fws);
+ if( change ){
+ plp->cfp->status = INCOMPLETE;
+ progress = 1;
+ }
+ }
+ cfp->status = COMPLETE;
+ }
+ }
+ }while( progress );
+}
+
+static int resolve_conflict();
+
+/* Compute the reduce actions, and resolve conflicts.
+*/
+void FindActions(lemp)
+struct lemon *lemp;
+{
+ int i,j;
+ struct config *cfp;
+ struct state *stp;
+ struct symbol *sp;
+ struct rule *rp;
+
+ /* Add all of the reduce actions
+ ** A reduce action is added for each element of the followset of
+ ** a configuration which has its dot at the extreme right.
+ */
+ for(i=0; i<lemp->nstate; i++){ /* Loop over all states */
+ stp = lemp->sorted[i];
+ for(cfp=stp->cfp; cfp; cfp=cfp->next){ /* Loop over all configurations */
+ if( cfp->rp->nrhs==cfp->dot ){ /* Is dot at extreme right? */
+ for(j=0; j<lemp->nterminal; j++){
+ if( SetFind(cfp->fws,j) ){
+ /* Add a reduce action to the state "stp" which will reduce by the
+ ** rule "cfp->rp" if the lookahead symbol is "lemp->symbols[j]" */
+ Action_add(&stp->ap,REDUCE,lemp->symbols[j],(char *)cfp->rp);
+ }
+ }
+ }
+ }
+ }
+
+ /* Add the accepting token */
+ if( lemp->start ){
+ sp = Symbol_find(lemp->start);
+ if( sp==0 ) sp = lemp->rule->lhs;
+ }else{
+ sp = lemp->rule->lhs;
+ }
+ /* Add to the first state (which is always the starting state of the
+ ** finite state machine) an action to ACCEPT if the lookahead is the
+ ** start nonterminal. */
+ Action_add(&lemp->sorted[0]->ap,ACCEPT,sp,0);
+
+ /* Resolve conflicts */
+ for(i=0; i<lemp->nstate; i++){
+ struct action *ap, *nap;
+ struct state *stp;
+ stp = lemp->sorted[i];
+ /* assert( stp->ap ); */
+ stp->ap = Action_sort(stp->ap);
+ for(ap=stp->ap; ap && ap->next; ap=ap->next){
+ for(nap=ap->next; nap && nap->sp==ap->sp; nap=nap->next){
+ /* The two actions "ap" and "nap" have the same lookahead.
+ ** Figure out which one should be used */
+ lemp->nconflict += resolve_conflict(ap,nap,lemp->errsym);
+ }
+ }
+ }
+
+ /* Report an error for each rule that can never be reduced. */
+ for(rp=lemp->rule; rp; rp=rp->next) rp->canReduce = LEMON_FALSE;
+ for(i=0; i<lemp->nstate; i++){
+ struct action *ap;
+ for(ap=lemp->sorted[i]->ap; ap; ap=ap->next){
+ if( ap->type==REDUCE ) ap->x.rp->canReduce = LEMON_TRUE;
+ }
+ }
+ for(rp=lemp->rule; rp; rp=rp->next){
+ if( rp->canReduce ) continue;
+ ErrorMsg(lemp->filename,rp->ruleline,"This rule can not be reduced.\n");
+ lemp->errorcnt++;
+ }
+}
+
+/* Resolve a conflict between the two given actions. If the
+** conflict can't be resolved, return non-zero.
+**
+** NO LONGER TRUE:
+** To resolve a conflict, first look to see if either action
+** is on an error rule. In that case, take the action which
+** is not associated with the error rule. If neither or both
+** actions are associated with an error rule, then try to
+** use precedence to resolve the conflict.
+**
+** If either action is a SHIFT, then it must be apx. This
+** function won't work if apx->type==REDUCE and apy->type==SHIFT.
+*/
+static int resolve_conflict(apx,apy,errsym)
+struct action *apx;
+struct action *apy;
+struct symbol *errsym; /* The error symbol (if defined. NULL otherwise) */
+{
+ struct symbol *spx, *spy;
+ int errcnt = 0;
+ assert( apx->sp==apy->sp ); /* Otherwise there would be no conflict */
+ if( apx->type==SHIFT && apy->type==SHIFT ){
+ apy->type = SSCONFLICT;
+ errcnt++;
+ }
+ if( apx->type==SHIFT && apy->type==REDUCE ){
+ spx = apx->sp;
+ spy = apy->x.rp->precsym;
+ if( spy==0 || spx->prec<0 || spy->prec<0 ){
+ /* Not enough precedence information. */
+ apy->type = SRCONFLICT;
+ errcnt++;
+ }else if( spx->prec>spy->prec ){ /* Lower precedence wins */
+ apy->type = RD_RESOLVED;
+ }else if( spx->prec<spy->prec ){
+ apx->type = SH_RESOLVED;
+ }else if( spx->prec==spy->prec && spx->assoc==RIGHT ){ /* Use operator */
+ apy->type = RD_RESOLVED; /* associativity */
+ }else if( spx->prec==spy->prec && spx->assoc==LEFT ){ /* to break tie */
+ apx->type = SH_RESOLVED;
+ }else{
+ assert( spx->prec==spy->prec && spx->assoc==NONE );
+ apy->type = SRCONFLICT;
+ errcnt++;
+ }
+ }else if( apx->type==REDUCE && apy->type==REDUCE ){
+ spx = apx->x.rp->precsym;
+ spy = apy->x.rp->precsym;
+ if( spx==0 || spy==0 || spx->prec<0 ||
+ spy->prec<0 || spx->prec==spy->prec ){
+ apy->type = RRCONFLICT;
+ errcnt++;
+ }else if( spx->prec>spy->prec ){
+ apy->type = RD_RESOLVED;
+ }else if( spx->prec<spy->prec ){
+ apx->type = RD_RESOLVED;
+ }
+ }else{
+ assert(
+ apx->type==SH_RESOLVED ||
+ apx->type==RD_RESOLVED ||
+ apx->type==SSCONFLICT ||
+ apx->type==SRCONFLICT ||
+ apx->type==RRCONFLICT ||
+ apy->type==SH_RESOLVED ||
+ apy->type==RD_RESOLVED ||
+ apy->type==SSCONFLICT ||
+ apy->type==SRCONFLICT ||
+ apy->type==RRCONFLICT
+ );
+ /* The REDUCE/SHIFT case cannot happen because SHIFTs come before
+ ** REDUCEs on the list. If we reach this point it must be because
+ ** the parser conflict had already been resolved. */
+ }
+ return errcnt;
+}
+/********************* From the file "configlist.c" *************************/
+/*
+** Routines to processing a configuration list and building a state
+** in the LEMON parser generator.
+*/
+
+static struct config *freelist = 0; /* List of free configurations */
+static struct config *current = 0; /* Top of list of configurations */
+static struct config **currentend = 0; /* Last on list of configs */
+static struct config *basis = 0; /* Top of list of basis configs */
+static struct config **basisend = 0; /* End of list of basis configs */
+
+/* Return a pointer to a new configuration */
+PRIVATE struct config *newconfig(){
+ struct config *new;
+ if( freelist==0 ){
+ int i;
+ int amt = 3;
+ freelist = (struct config *)calloc( amt, sizeof(struct config) );
+ if( freelist==0 ){
+ fprintf(stderr,"Unable to allocate memory for a new configuration.");
+ exit(1);
+ }
+ for(i=0; i<amt-1; i++) freelist[i].next = &freelist[i+1];
+ freelist[amt-1].next = 0;
+ }
+ new = freelist;
+ freelist = freelist->next;
+ return new;
+}
+
+/* The configuration "old" is no longer used */
+PRIVATE void deleteconfig(old)
+struct config *old;
+{
+ old->next = freelist;
+ freelist = old;
+}
+
+/* Initialized the configuration list builder */
+void Configlist_init(){
+ current = 0;
+ currentend = &current;
+ basis = 0;
+ basisend = &basis;
+ Configtable_init();
+ return;
+}
+
+/* Initialized the configuration list builder */
+void Configlist_reset(){
+ current = 0;
+ currentend = &current;
+ basis = 0;
+ basisend = &basis;
+ Configtable_clear(0);
+ return;
+}
+
+/* Add another configuration to the configuration list */
+struct config *Configlist_add(rp,dot)
+struct rule *rp; /* The rule */
+int dot; /* Index into the RHS of the rule where the dot goes */
+{
+ struct config *cfp, model;
+
+ assert( currentend!=0 );
+ model.rp = rp;
+ model.dot = dot;
+ cfp = Configtable_find(&model);
+ if( cfp==0 ){
+ cfp = newconfig();
+ cfp->rp = rp;
+ cfp->dot = dot;
+ cfp->fws = SetNew();
+ cfp->stp = 0;
+ cfp->fplp = cfp->bplp = 0;
+ cfp->next = 0;
+ cfp->bp = 0;
+ *currentend = cfp;
+ currentend = &cfp->next;
+ Configtable_insert(cfp);
+ }
+ return cfp;
+}
+
+/* Add a basis configuration to the configuration list */
+struct config *Configlist_addbasis(rp,dot)
+struct rule *rp;
+int dot;
+{
+ struct config *cfp, model;
+
+ assert( basisend!=0 );
+ assert( currentend!=0 );
+ model.rp = rp;
+ model.dot = dot;
+ cfp = Configtable_find(&model);
+ if( cfp==0 ){
+ cfp = newconfig();
+ cfp->rp = rp;
+ cfp->dot = dot;
+ cfp->fws = SetNew();
+ cfp->stp = 0;
+ cfp->fplp = cfp->bplp = 0;
+ cfp->next = 0;
+ cfp->bp = 0;
+ *currentend = cfp;
+ currentend = &cfp->next;
+ *basisend = cfp;
+ basisend = &cfp->bp;
+ Configtable_insert(cfp);
+ }
+ return cfp;
+}
+
+/* Compute the closure of the configuration list */
+void Configlist_closure(lemp)
+struct lemon *lemp;
+{
+ struct config *cfp, *newcfp;
+ struct rule *rp, *newrp;
+ struct symbol *sp, *xsp;
+ int i, dot;
+
+ assert( currentend!=0 );
+ for(cfp=current; cfp; cfp=cfp->next){
+ rp = cfp->rp;
+ dot = cfp->dot;
+ if( dot>=rp->nrhs ) continue;
+ sp = rp->rhs[dot];
+ if( sp->type==NONTERMINAL ){
+ if( sp->rule==0 && sp!=lemp->errsym ){
+ ErrorMsg(lemp->filename,rp->line,"Nonterminal \"%s\" has no rules.",
+ sp->name);
+ lemp->errorcnt++;
+ }
+ for(newrp=sp->rule; newrp; newrp=newrp->nextlhs){
+ newcfp = Configlist_add(newrp,0);
+ for(i=dot+1; i<rp->nrhs; i++){
+ xsp = rp->rhs[i];
+ if( xsp->type==TERMINAL ){
+ SetAdd(newcfp->fws,xsp->index);
+ break;
+ }else if( xsp->type==MULTITERMINAL ){
+ int k;
+ for(k=0; k<xsp->nsubsym; k++){
+ SetAdd(newcfp->fws, xsp->subsym[k]->index);
+ }
+ break;
+ }else{
+ SetUnion(newcfp->fws,xsp->firstset);
+ if( xsp->lambda==LEMON_FALSE ) break;
+ }
+ }
+ if( i==rp->nrhs ) Plink_add(&cfp->fplp,newcfp);
+ }
+ }
+ }
+ return;
+}
+
+/* Sort the configuration list */
+void Configlist_sort(){
+ current = (struct config *)msort((char *)current,(char **)&(current->next),Configcmp);
+ currentend = 0;
+ return;
+}
+
+/* Sort the basis configuration list */
+void Configlist_sortbasis(){
+ basis = (struct config *)msort((char *)current,(char **)&(current->bp),Configcmp);
+ basisend = 0;
+ return;
+}
+
+/* Return a pointer to the head of the configuration list and
+** reset the list */
+struct config *Configlist_return(){
+ struct config *old;
+ old = current;
+ current = 0;
+ currentend = 0;
+ return old;
+}
+
+/* Return a pointer to the head of the configuration list and
+** reset the list */
+struct config *Configlist_basis(){
+ struct config *old;
+ old = basis;
+ basis = 0;
+ basisend = 0;
+ return old;
+}
+
+/* Free all elements of the given configuration list */
+void Configlist_eat(cfp)
+struct config *cfp;
+{
+ struct config *nextcfp;
+ for(; cfp; cfp=nextcfp){
+ nextcfp = cfp->next;
+ assert( cfp->fplp==0 );
+ assert( cfp->bplp==0 );
+ if( cfp->fws ) SetFree(cfp->fws);
+ deleteconfig(cfp);
+ }
+ return;
+}
+/***************** From the file "error.c" *********************************/
+/*
+** Code for printing error message.
+*/
+
+/* Find a good place to break "msg" so that its length is at least "min"
+** but no more than "max". Make the point as close to max as possible.
+*/
+static int findbreak(msg,min,max)
+char *msg;
+int min;
+int max;
+{
+ int i,spot;
+ char c;
+ for(i=spot=min; i<=max; i++){
+ c = msg[i];
+ if( c=='\t' ) msg[i] = ' ';
+ if( c=='\n' ){ msg[i] = ' '; spot = i; break; }
+ if( c==0 ){ spot = i; break; }
+ if( c=='-' && i<max-1 ) spot = i+1;
+ if( c==' ' ) spot = i;
+ }
+ return spot;
+}
+
+/*
+** The error message is split across multiple lines if necessary. The
+** splits occur at a space, if there is a space available near the end
+** of the line.
+*/
+#define ERRMSGSIZE 10000 /* Hope this is big enough. No way to error check */
+#define LINEWIDTH 79 /* Max width of any output line */
+#define PREFIXLIMIT 30 /* Max width of the prefix on each line */
+void ErrorMsg(const char *filename, int lineno, const char *format, ...){
+ char errmsg[ERRMSGSIZE];
+ char prefix[PREFIXLIMIT+10];
+ int errmsgsize;
+ int prefixsize;
+ int availablewidth;
+ va_list ap;
+ int end, restart, base;
+
+ va_start(ap, format);
+ /* Prepare a prefix to be prepended to every output line */
+ if( lineno>0 ){
+ sprintf(prefix,"%.*s:%d: ",PREFIXLIMIT-10,filename,lineno);
+ }else{
+ sprintf(prefix,"%.*s: ",PREFIXLIMIT-10,filename);
+ }
+ prefixsize = strlen(prefix);
+ availablewidth = LINEWIDTH - prefixsize;
+
+ /* Generate the error message */
+ vsprintf(errmsg,format,ap);
+ va_end(ap);
+ errmsgsize = strlen(errmsg);
+ /* Remove trailing '\n's from the error message. */
+ while( errmsgsize>0 && errmsg[errmsgsize-1]=='\n' ){
+ errmsg[--errmsgsize] = 0;
+ }
+
+ /* Print the error message */
+ base = 0;
+ while( errmsg[base]!=0 ){
+ end = restart = findbreak(&errmsg[base],0,availablewidth);
+ restart += base;
+ while( errmsg[restart]==' ' ) restart++;
+ fprintf(stdout,"%s%.*s\n",prefix,end,&errmsg[base]);
+ base = restart;
+ }
+}
+/**************** From the file "main.c" ************************************/
+/*
+** Main program file for the LEMON parser generator.
+*/
+
+/* Report an out-of-memory condition and abort. This function
+** is used mostly by the "MemoryCheck" macro in struct.h
+*/
+void memory_error(){
+ fprintf(stderr,"Out of memory. Aborting...\n");
+ exit(1);
+}
+
+static int nDefine = 0; /* Number of -D options on the command line */
+static char **azDefine = 0; /* Name of the -D macros */
+
+/* This routine is called with the argument to each -D command-line option.
+** Add the macro defined to the azDefine array.
+*/
+static void handle_D_option(char *z){
+ char **paz;
+ nDefine++;
+ azDefine = realloc(azDefine, sizeof(azDefine[0])*nDefine);
+ if( azDefine==0 ){
+ fprintf(stderr,"out of memory\n");
+ exit(1);
+ }
+ paz = &azDefine[nDefine-1];
+ *paz = malloc( strlen(z)+1 );
+ if( *paz==0 ){
+ fprintf(stderr,"out of memory\n");
+ exit(1);
+ }
+ strcpy(*paz, z);
+ for(z=*paz; *z && *z!='='; z++){}
+ *z = 0;
+}
+
+
+/* The main program. Parse the command line and do it... */
+int main(argc,argv)
+int argc;
+char **argv;
+{
+ static int version = 0;
+ static int rpflag = 0;
+ static int basisflag = 0;
+ static int compress = 0;
+ static int quiet = 0;
+ static int statistics = 0;
+ static int mhflag = 0;
+ static struct s_options options[] = {
+ {OPT_FLAG, "b", (char*)&basisflag, "Print only the basis in report."},
+ {OPT_FLAG, "c", (char*)&compress, "Don't compress the action table."},
+ {OPT_FSTR, "D", (char*)handle_D_option, "Define an %ifdef macro."},
+ {OPT_FLAG, "g", (char*)&rpflag, "Print grammar without actions."},
+ {OPT_FLAG, "m", (char*)&mhflag, "Output a makeheaders compatible file"},
+ {OPT_FLAG, "q", (char*)&quiet, "(Quiet) Don't print the report file."},
+ {OPT_FLAG, "s", (char*)&statistics,
+ "Print parser stats to standard output."},
+ {OPT_FLAG, "x", (char*)&version, "Print the version number."},
+ {OPT_FLAG,0,0,0}
+ };
+ int i;
+ struct lemon lem;
+
+ OptInit(argv,options,stderr);
+ if( version ){
+ printf("Lemon version 1.0\n");
+ exit(0);
+ }
+ if( OptNArgs()!=1 ){
+ fprintf(stderr,"Exactly one filename argument is required.\n");
+ exit(1);
+ }
+ memset(&lem, 0, sizeof(lem));
+ lem.errorcnt = 0;
+
+ /* Initialize the machine */
+ Strsafe_init();
+ Symbol_init();
+ State_init();
+ lem.argv0 = argv[0];
+ lem.filename = OptArg(0);
+ lem.basisflag = basisflag;
+ Symbol_new("$");
+ lem.errsym = Symbol_new("error");
+ lem.errsym->useCnt = 0;
+
+ /* Parse the input file */
+ Parse(&lem);
+ if( lem.errorcnt ) exit(lem.errorcnt);
+ if( lem.nrule==0 ){
+ fprintf(stderr,"Empty grammar.\n");
+ exit(1);
+ }
+
+ /* Count and index the symbols of the grammar */
+ lem.nsymbol = Symbol_count();
+ Symbol_new("{default}");
+ lem.symbols = Symbol_arrayof();
+ for(i=0; i<=lem.nsymbol; i++) lem.symbols[i]->index = i;
+ qsort(lem.symbols,lem.nsymbol+1,sizeof(struct symbol*),
+ (int(*)())Symbolcmpp);
+ for(i=0; i<=lem.nsymbol; i++) lem.symbols[i]->index = i;
+ for(i=1; isupper(lem.symbols[i]->name[0]); i++);
+ lem.nterminal = i;
+
+ /* Generate a reprint of the grammar, if requested on the command line */
+ if( rpflag ){
+ Reprint(&lem);
+ }else{
+ /* Initialize the size for all follow and first sets */
+ SetSize(lem.nterminal+1);
+
+ /* Find the precedence for every production rule (that has one) */
+ FindRulePrecedences(&lem);
+
+ /* Compute the lambda-nonterminals and the first-sets for every
+ ** nonterminal */
+ FindFirstSets(&lem);
+
+ /* Compute all LR(0) states. Also record follow-set propagation
+ ** links so that the follow-set can be computed later */
+ lem.nstate = 0;
+ FindStates(&lem);
+ lem.sorted = State_arrayof();
+
+ /* Tie up loose ends on the propagation links */
+ FindLinks(&lem);
+
+ /* Compute the follow set of every reducible configuration */
+ FindFollowSets(&lem);
+
+ /* Compute the action tables */
+ FindActions(&lem);
+
+ /* Compress the action tables */
+ if( compress==0 ) CompressTables(&lem);
+
+ /* Reorder and renumber the states so that states with fewer choices
+ ** occur at the end. */
+ ResortStates(&lem);
+
+ /* Generate a report of the parser generated. (the "y.output" file) */
+ if( !quiet ) ReportOutput(&lem);
+
+ /* Generate the source code for the parser */
+ ReportTable(&lem, mhflag);
+
+ /* Produce a header file for use by the scanner. (This step is
+ ** omitted if the "-m" option is used because makeheaders will
+ ** generate the file for us.) */
+ if( !mhflag ) ReportHeader(&lem);
+ }
+ if( statistics ){
+ printf("Parser statistics: %d terminals, %d nonterminals, %d rules\n",
+ lem.nterminal, lem.nsymbol - lem.nterminal, lem.nrule);
+ printf(" %d states, %d parser table entries, %d conflicts\n",
+ lem.nstate, lem.tablesize, lem.nconflict);
+ }
+ if( lem.nconflict ){
+ fprintf(stderr,"%d parsing conflicts.\n",lem.nconflict);
+ }
+ exit(lem.errorcnt + lem.nconflict);
+ return (lem.errorcnt + lem.nconflict);
+}
+/******************** From the file "msort.c" *******************************/
+/*
+** A generic merge-sort program.
+**
+** USAGE:
+** Let "ptr" be a pointer to some structure which is at the head of
+** a null-terminated list. Then to sort the list call:
+**
+** ptr = msort(ptr,&(ptr->next),cmpfnc);
+**
+** In the above, "cmpfnc" is a pointer to a function which compares
+** two instances of the structure and returns an integer, as in
+** strcmp. The second argument is a pointer to the pointer to the
+** second element of the linked list. This address is used to compute
+** the offset to the "next" field within the structure. The offset to
+** the "next" field must be constant for all structures in the list.
+**
+** The function returns a new pointer which is the head of the list
+** after sorting.
+**
+** ALGORITHM:
+** Merge-sort.
+*/
+
+/*
+** Return a pointer to the next structure in the linked list.
+*/
+#define NEXT(A) (*(char**)(((unsigned long)A)+offset))
+
+/*
+** Inputs:
+** a: A sorted, null-terminated linked list. (May be null).
+** b: A sorted, null-terminated linked list. (May be null).
+** cmp: A pointer to the comparison function.
+** offset: Offset in the structure to the "next" field.
+**
+** Return Value:
+** A pointer to the head of a sorted list containing the elements
+** of both a and b.
+**
+** Side effects:
+** The "next" pointers for elements in the lists a and b are
+** changed.
+*/
+static char *merge(
+ char *a,
+ char *b,
+ int (*cmp)(const char*,const char*),
+ int offset
+){
+ char *ptr, *head;
+
+ if( a==0 ){
+ head = b;
+ }else if( b==0 ){
+ head = a;
+ }else{
+ if( (*cmp)(a,b)<0 ){
+ ptr = a;
+ a = NEXT(a);
+ }else{
+ ptr = b;
+ b = NEXT(b);
+ }
+ head = ptr;
+ while( a && b ){
+ if( (*cmp)(a,b)<0 ){
+ NEXT(ptr) = a;
+ ptr = a;
+ a = NEXT(a);
+ }else{
+ NEXT(ptr) = b;
+ ptr = b;
+ b = NEXT(b);
+ }
+ }
+ if( a ) NEXT(ptr) = a;
+ else NEXT(ptr) = b;
+ }
+ return head;
+}
+
+/*
+** Inputs:
+** list: Pointer to a singly-linked list of structures.
+** next: Pointer to pointer to the second element of the list.
+** cmp: A comparison function.
+**
+** Return Value:
+** A pointer to the head of a sorted list containing the elements
+** orginally in list.
+**
+** Side effects:
+** The "next" pointers for elements in list are changed.
+*/
+#define LISTSIZE 30
+static char *msort(
+ char *list,
+ char **next,
+ int (*cmp)(const char*,const char*)
+){
+ unsigned long offset;
+ char *ep;
+ char *set[LISTSIZE];
+ int i;
+ offset = (unsigned long)next - (unsigned long)list;
+ for(i=0; i<LISTSIZE; i++) set[i] = 0;
+ while( list ){
+ ep = list;
+ list = NEXT(list);
+ NEXT(ep) = 0;
+ for(i=0; i<LISTSIZE-1 && set[i]!=0; i++){
+ ep = merge(ep,set[i],cmp,offset);
+ set[i] = 0;
+ }
+ set[i] = ep;
+ }
+ ep = 0;
+ for(i=0; i<LISTSIZE; i++) if( set[i] ) ep = merge(ep,set[i],cmp,offset);
+ return ep;
+}
+/************************ From the file "option.c" **************************/
+static char **argv;
+static struct s_options *op;
+static FILE *errstream;
+
+#define ISOPT(X) ((X)[0]=='-'||(X)[0]=='+'||strchr((X),'=')!=0)
+
+/*
+** Print the command line with a carrot pointing to the k-th character
+** of the n-th field.
+*/
+static void errline(n,k,err)
+int n;
+int k;
+FILE *err;
+{
+ int spcnt, i;
+ if( argv[0] ) fprintf(err,"%s",argv[0]);
+ spcnt = strlen(argv[0]) + 1;
+ for(i=1; i<n && argv[i]; i++){
+ fprintf(err," %s",argv[i]);
+ spcnt += strlen(argv[i])+1;
+ }
+ spcnt += k;
+ for(; argv[i]; i++) fprintf(err," %s",argv[i]);
+ if( spcnt<20 ){
+ fprintf(err,"\n%*s^-- here\n",spcnt,"");
+ }else{
+ fprintf(err,"\n%*shere --^\n",spcnt-7,"");
+ }
+}
+
+/*
+** Return the index of the N-th non-switch argument. Return -1
+** if N is out of range.
+*/
+static int argindex(n)
+int n;
+{
+ int i;
+ int dashdash = 0;
+ if( argv!=0 && *argv!=0 ){
+ for(i=1; argv[i]; i++){
+ if( dashdash || !ISOPT(argv[i]) ){
+ if( n==0 ) return i;
+ n--;
+ }
+ if( strcmp(argv[i],"--")==0 ) dashdash = 1;
+ }
+ }
+ return -1;
+}
+
+static char emsg[] = "Command line syntax error: ";
+
+/*
+** Process a flag command line argument.
+*/
+static int handleflags(i,err)
+int i;
+FILE *err;
+{
+ int v;
+ int errcnt = 0;
+ int j;
+ for(j=0; op[j].label; j++){
+ if( strncmp(&argv[i][1],op[j].label,strlen(op[j].label))==0 ) break;
+ }
+ v = argv[i][0]=='-' ? 1 : 0;
+ if( op[j].label==0 ){
+ if( err ){
+ fprintf(err,"%sundefined option.\n",emsg);
+ errline(i,1,err);
+ }
+ errcnt++;
+ }else if( op[j].type==OPT_FLAG ){
+ *((int*)op[j].arg) = v;
+ }else if( op[j].type==OPT_FFLAG ){
+ (*(void(*)())(op[j].arg))(v);
+ }else if( op[j].type==OPT_FSTR ){
+ (*(void(*)())(op[j].arg))(&argv[i][2]);
+ }else{
+ if( err ){
+ fprintf(err,"%smissing argument on switch.\n",emsg);
+ errline(i,1,err);
+ }
+ errcnt++;
+ }
+ return errcnt;
+}
+
+/*
+** Process a command line switch which has an argument.
+*/
+static int handleswitch(i,err)
+int i;
+FILE *err;
+{
+ int lv = 0;
+ double dv = 0.0;
+ char *sv = 0, *end;
+ char *cp;
+ int j;
+ int errcnt = 0;
+ cp = strchr(argv[i],'=');
+ assert( cp!=0 );
+ *cp = 0;
+ for(j=0; op[j].label; j++){
+ if( strcmp(argv[i],op[j].label)==0 ) break;
+ }
+ *cp = '=';
+ if( op[j].label==0 ){
+ if( err ){
+ fprintf(err,"%sundefined option.\n",emsg);
+ errline(i,0,err);
+ }
+ errcnt++;
+ }else{
+ cp++;
+ switch( op[j].type ){
+ case OPT_FLAG:
+ case OPT_FFLAG:
+ if( err ){
+ fprintf(err,"%soption requires an argument.\n",emsg);
+ errline(i,0,err);
+ }
+ errcnt++;
+ break;
+ case OPT_DBL:
+ case OPT_FDBL:
+ dv = strtod(cp,&end);
+ if( *end ){
+ if( err ){
+ fprintf(err,"%sillegal character in floating-point argument.\n",emsg);
+ errline(i,((unsigned long)end)-(unsigned long)argv[i],err);
+ }
+ errcnt++;
+ }
+ break;
+ case OPT_INT:
+ case OPT_FINT:
+ lv = strtol(cp,&end,0);
+ if( *end ){
+ if( err ){
+ fprintf(err,"%sillegal character in integer argument.\n",emsg);
+ errline(i,((unsigned long)end)-(unsigned long)argv[i],err);
+ }
+ errcnt++;
+ }
+ break;
+ case OPT_STR:
+ case OPT_FSTR:
+ sv = cp;
+ break;
+ }
+ switch( op[j].type ){
+ case OPT_FLAG:
+ case OPT_FFLAG:
+ break;
+ case OPT_DBL:
+ *(double*)(op[j].arg) = dv;
+ break;
+ case OPT_FDBL:
+ (*(void(*)())(op[j].arg))(dv);
+ break;
+ case OPT_INT:
+ *(int*)(op[j].arg) = lv;
+ break;
+ case OPT_FINT:
+ (*(void(*)())(op[j].arg))((int)lv);
+ break;
+ case OPT_STR:
+ *(char**)(op[j].arg) = sv;
+ break;
+ case OPT_FSTR:
+ (*(void(*)())(op[j].arg))(sv);
+ break;
+ }
+ }
+ return errcnt;
+}
+
+int OptInit(a,o,err)
+char **a;
+struct s_options *o;
+FILE *err;
+{
+ int errcnt = 0;
+ argv = a;
+ op = o;
+ errstream = err;
+ if( argv && *argv && op ){
+ int i;
+ for(i=1; argv[i]; i++){
+ if( argv[i][0]=='+' || argv[i][0]=='-' ){
+ errcnt += handleflags(i,err);
+ }else if( strchr(argv[i],'=') ){
+ errcnt += handleswitch(i,err);
+ }
+ }
+ }
+ if( errcnt>0 ){
+ fprintf(err,"Valid command line options for \"%s\" are:\n",*a);
+ OptPrint();
+ exit(1);
+ }
+ return 0;
+}
+
+int OptNArgs(){
+ int cnt = 0;
+ int dashdash = 0;
+ int i;
+ if( argv!=0 && argv[0]!=0 ){
+ for(i=1; argv[i]; i++){
+ if( dashdash || !ISOPT(argv[i]) ) cnt++;
+ if( strcmp(argv[i],"--")==0 ) dashdash = 1;
+ }
+ }
+ return cnt;
+}
+
+char *OptArg(n)
+int n;
+{
+ int i;
+ i = argindex(n);
+ return i>=0 ? argv[i] : 0;
+}
+
+void OptErr(n)
+int n;
+{
+ int i;
+ i = argindex(n);
+ if( i>=0 ) errline(i,0,errstream);
+}
+
+void OptPrint(){
+ int i;
+ int max, len;
+ max = 0;
+ for(i=0; op[i].label; i++){
+ len = strlen(op[i].label) + 1;
+ switch( op[i].type ){
+ case OPT_FLAG:
+ case OPT_FFLAG:
+ break;
+ case OPT_INT:
+ case OPT_FINT:
+ len += 9; /* length of "<integer>" */
+ break;
+ case OPT_DBL:
+ case OPT_FDBL:
+ len += 6; /* length of "<real>" */
+ break;
+ case OPT_STR:
+ case OPT_FSTR:
+ len += 8; /* length of "<string>" */
+ break;
+ }
+ if( len>max ) max = len;
+ }
+ for(i=0; op[i].label; i++){
+ switch( op[i].type ){
+ case OPT_FLAG:
+ case OPT_FFLAG:
+ fprintf(errstream," -%-*s %s\n",max,op[i].label,op[i].message);
+ break;
+ case OPT_INT:
+ case OPT_FINT:
+ fprintf(errstream," %s=<integer>%*s %s\n",op[i].label,
+ (int)(max-strlen(op[i].label)-9),"",op[i].message);
+ break;
+ case OPT_DBL:
+ case OPT_FDBL:
+ fprintf(errstream," %s=<real>%*s %s\n",op[i].label,
+ (int)(max-strlen(op[i].label)-6),"",op[i].message);
+ break;
+ case OPT_STR:
+ case OPT_FSTR:
+ fprintf(errstream," %s=<string>%*s %s\n",op[i].label,
+ (int)(max-strlen(op[i].label)-8),"",op[i].message);
+ break;
+ }
+ }
+}
+/*********************** From the file "parse.c" ****************************/
+/*
+** Input file parser for the LEMON parser generator.
+*/
+
+/* The state of the parser */
+struct pstate {
+ char *filename; /* Name of the input file */
+ int tokenlineno; /* Linenumber at which current token starts */
+ int errorcnt; /* Number of errors so far */
+ char *tokenstart; /* Text of current token */
+ struct lemon *gp; /* Global state vector */
+ enum e_state {
+ INITIALIZE,
+ WAITING_FOR_DECL_OR_RULE,
+ WAITING_FOR_DECL_KEYWORD,
+ WAITING_FOR_DECL_ARG,
+ WAITING_FOR_PRECEDENCE_SYMBOL,
+ WAITING_FOR_ARROW,
+ IN_RHS,
+ LHS_ALIAS_1,
+ LHS_ALIAS_2,
+ LHS_ALIAS_3,
+ RHS_ALIAS_1,
+ RHS_ALIAS_2,
+ PRECEDENCE_MARK_1,
+ PRECEDENCE_MARK_2,
+ RESYNC_AFTER_RULE_ERROR,
+ RESYNC_AFTER_DECL_ERROR,
+ WAITING_FOR_DESTRUCTOR_SYMBOL,
+ WAITING_FOR_DATATYPE_SYMBOL,
+ WAITING_FOR_FALLBACK_ID,
+ WAITING_FOR_WILDCARD_ID
+ } state; /* The state of the parser */
+ struct symbol *fallback; /* The fallback token */
+ struct symbol *lhs; /* Left-hand side of current rule */
+ char *lhsalias; /* Alias for the LHS */
+ int nrhs; /* Number of right-hand side symbols seen */
+ struct symbol *rhs[MAXRHS]; /* RHS symbols */
+ char *alias[MAXRHS]; /* Aliases for each RHS symbol (or NULL) */
+ struct rule *prevrule; /* Previous rule parsed */
+ char *declkeyword; /* Keyword of a declaration */
+ char **declargslot; /* Where the declaration argument should be put */
+ int insertLineMacro; /* Add #line before declaration insert */
+ int *decllinenoslot; /* Where to write declaration line number */
+ enum e_assoc declassoc; /* Assign this association to decl arguments */
+ int preccounter; /* Assign this precedence to decl arguments */
+ struct rule *firstrule; /* Pointer to first rule in the grammar */
+ struct rule *lastrule; /* Pointer to the most recently parsed rule */
+};
+
+/* Parse a single token */
+static void parseonetoken(psp)
+struct pstate *psp;
+{
+ char *x;
+ x = Strsafe(psp->tokenstart); /* Save the token permanently */
+#if 0
+ printf("%s:%d: Token=[%s] state=%d\n",psp->filename,psp->tokenlineno,
+ x,psp->state);
+#endif
+ switch( psp->state ){
+ case INITIALIZE:
+ psp->prevrule = 0;
+ psp->preccounter = 0;
+ psp->firstrule = psp->lastrule = 0;
+ psp->gp->nrule = 0;
+ /* Fall thru to next case */
+ case WAITING_FOR_DECL_OR_RULE:
+ if( x[0]=='%' ){
+ psp->state = WAITING_FOR_DECL_KEYWORD;
+ }else if( islower(x[0]) ){
+ psp->lhs = Symbol_new(x);
+ psp->nrhs = 0;
+ psp->lhsalias = 0;
+ psp->state = WAITING_FOR_ARROW;
+ }else if( x[0]=='{' ){
+ if( psp->prevrule==0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+"There is no prior rule opon which to attach the code \
+fragment which begins on this line.");
+ psp->errorcnt++;
+ }else if( psp->prevrule->code!=0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+"Code fragment beginning on this line is not the first \
+to follow the previous rule.");
+ psp->errorcnt++;
+ }else{
+ psp->prevrule->line = psp->tokenlineno;
+ psp->prevrule->code = &x[1];
+ }
+ }else if( x[0]=='[' ){
+ psp->state = PRECEDENCE_MARK_1;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Token \"%s\" should be either \"%%\" or a nonterminal name.",
+ x);
+ psp->errorcnt++;
+ }
+ break;
+ case PRECEDENCE_MARK_1:
+ if( !isupper(x[0]) ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "The precedence symbol must be a terminal.");
+ psp->errorcnt++;
+ }else if( psp->prevrule==0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "There is no prior rule to assign precedence \"[%s]\".",x);
+ psp->errorcnt++;
+ }else if( psp->prevrule->precsym!=0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+"Precedence mark on this line is not the first \
+to follow the previous rule.");
+ psp->errorcnt++;
+ }else{
+ psp->prevrule->precsym = Symbol_new(x);
+ }
+ psp->state = PRECEDENCE_MARK_2;
+ break;
+ case PRECEDENCE_MARK_2:
+ if( x[0]!=']' ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Missing \"]\" on precedence mark.");
+ psp->errorcnt++;
+ }
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ break;
+ case WAITING_FOR_ARROW:
+ if( x[0]==':' && x[1]==':' && x[2]=='=' ){
+ psp->state = IN_RHS;
+ }else if( x[0]=='(' ){
+ psp->state = LHS_ALIAS_1;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Expected to see a \":\" following the LHS symbol \"%s\".",
+ psp->lhs->name);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case LHS_ALIAS_1:
+ if( isalpha(x[0]) ){
+ psp->lhsalias = x;
+ psp->state = LHS_ALIAS_2;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "\"%s\" is not a valid alias for the LHS \"%s\"\n",
+ x,psp->lhs->name);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case LHS_ALIAS_2:
+ if( x[0]==')' ){
+ psp->state = LHS_ALIAS_3;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Missing \")\" following LHS alias name \"%s\".",psp->lhsalias);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case LHS_ALIAS_3:
+ if( x[0]==':' && x[1]==':' && x[2]=='=' ){
+ psp->state = IN_RHS;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Missing \"->\" following: \"%s(%s)\".",
+ psp->lhs->name,psp->lhsalias);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case IN_RHS:
+ if( x[0]=='.' ){
+ struct rule *rp;
+ rp = (struct rule *)calloc( sizeof(struct rule) +
+ sizeof(struct symbol*)*psp->nrhs + sizeof(char*)*psp->nrhs, 1);
+ if( rp==0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Can't allocate enough memory for this rule.");
+ psp->errorcnt++;
+ psp->prevrule = 0;
+ }else{
+ int i;
+ rp->ruleline = psp->tokenlineno;
+ rp->rhs = (struct symbol**)&rp[1];
+ rp->rhsalias = (char**)&(rp->rhs[psp->nrhs]);
+ for(i=0; i<psp->nrhs; i++){
+ rp->rhs[i] = psp->rhs[i];
+ rp->rhsalias[i] = psp->alias[i];
+ }
+ rp->lhs = psp->lhs;
+ rp->lhsalias = psp->lhsalias;
+ rp->nrhs = psp->nrhs;
+ rp->code = 0;
+ rp->precsym = 0;
+ rp->index = psp->gp->nrule++;
+ rp->nextlhs = rp->lhs->rule;
+ rp->lhs->rule = rp;
+ rp->next = 0;
+ if( psp->firstrule==0 ){
+ psp->firstrule = psp->lastrule = rp;
+ }else{
+ psp->lastrule->next = rp;
+ psp->lastrule = rp;
+ }
+ psp->prevrule = rp;
+ }
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ }else if( isalpha(x[0]) ){
+ if( psp->nrhs>=MAXRHS ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Too many symbols on RHS of rule beginning at \"%s\".",
+ x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }else{
+ psp->rhs[psp->nrhs] = Symbol_new(x);
+ psp->alias[psp->nrhs] = 0;
+ psp->nrhs++;
+ }
+ }else if( (x[0]=='|' || x[0]=='/') && psp->nrhs>0 ){
+ struct symbol *msp = psp->rhs[psp->nrhs-1];
+ if( msp->type!=MULTITERMINAL ){
+ struct symbol *origsp = msp;
+ msp = calloc(1,sizeof(*msp));
+ memset(msp, 0, sizeof(*msp));
+ msp->type = MULTITERMINAL;
+ msp->nsubsym = 1;
+ msp->subsym = calloc(1,sizeof(struct symbol*));
+ msp->subsym[0] = origsp;
+ msp->name = origsp->name;
+ psp->rhs[psp->nrhs-1] = msp;
+ }
+ msp->nsubsym++;
+ msp->subsym = realloc(msp->subsym, sizeof(struct symbol*)*msp->nsubsym);
+ msp->subsym[msp->nsubsym-1] = Symbol_new(&x[1]);
+ if( islower(x[1]) || islower(msp->subsym[0]->name[0]) ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Cannot form a compound containing a non-terminal");
+ psp->errorcnt++;
+ }
+ }else if( x[0]=='(' && psp->nrhs>0 ){
+ psp->state = RHS_ALIAS_1;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Illegal character on RHS of rule: \"%s\".",x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case RHS_ALIAS_1:
+ if( isalpha(x[0]) ){
+ psp->alias[psp->nrhs-1] = x;
+ psp->state = RHS_ALIAS_2;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "\"%s\" is not a valid alias for the RHS symbol \"%s\"\n",
+ x,psp->rhs[psp->nrhs-1]->name);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case RHS_ALIAS_2:
+ if( x[0]==')' ){
+ psp->state = IN_RHS;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Missing \")\" following LHS alias name \"%s\".",psp->lhsalias);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_RULE_ERROR;
+ }
+ break;
+ case WAITING_FOR_DECL_KEYWORD:
+ if( isalpha(x[0]) ){
+ psp->declkeyword = x;
+ psp->declargslot = 0;
+ psp->decllinenoslot = 0;
+ psp->insertLineMacro = 1;
+ psp->state = WAITING_FOR_DECL_ARG;
+ if( strcmp(x,"name")==0 ){
+ psp->declargslot = &(psp->gp->name);
+ psp->insertLineMacro = 0;
+ }else if( strcmp(x,"include")==0 ){
+ psp->declargslot = &(psp->gp->include);
+ }else if( strcmp(x,"code")==0 ){
+ psp->declargslot = &(psp->gp->extracode);
+ }else if( strcmp(x,"token_destructor")==0 ){
+ psp->declargslot = &psp->gp->tokendest;
+ }else if( strcmp(x,"default_destructor")==0 ){
+ psp->declargslot = &psp->gp->vardest;
+ }else if( strcmp(x,"token_prefix")==0 ){
+ psp->declargslot = &psp->gp->tokenprefix;
+ psp->insertLineMacro = 0;
+ }else if( strcmp(x,"syntax_error")==0 ){
+ psp->declargslot = &(psp->gp->error);
+ }else if( strcmp(x,"parse_accept")==0 ){
+ psp->declargslot = &(psp->gp->accept);
+ }else if( strcmp(x,"parse_failure")==0 ){
+ psp->declargslot = &(psp->gp->failure);
+ }else if( strcmp(x,"stack_overflow")==0 ){
+ psp->declargslot = &(psp->gp->overflow);
+ }else if( strcmp(x,"extra_argument")==0 ){
+ psp->declargslot = &(psp->gp->arg);
+ psp->insertLineMacro = 0;
+ }else if( strcmp(x,"token_type")==0 ){
+ psp->declargslot = &(psp->gp->tokentype);
+ psp->insertLineMacro = 0;
+ }else if( strcmp(x,"default_type")==0 ){
+ psp->declargslot = &(psp->gp->vartype);
+ psp->insertLineMacro = 0;
+ }else if( strcmp(x,"stack_size")==0 ){
+ psp->declargslot = &(psp->gp->stacksize);
+ psp->insertLineMacro = 0;
+ }else if( strcmp(x,"start_symbol")==0 ){
+ psp->declargslot = &(psp->gp->start);
+ psp->insertLineMacro = 0;
+ }else if( strcmp(x,"left")==0 ){
+ psp->preccounter++;
+ psp->declassoc = LEFT;
+ psp->state = WAITING_FOR_PRECEDENCE_SYMBOL;
+ }else if( strcmp(x,"right")==0 ){
+ psp->preccounter++;
+ psp->declassoc = RIGHT;
+ psp->state = WAITING_FOR_PRECEDENCE_SYMBOL;
+ }else if( strcmp(x,"nonassoc")==0 ){
+ psp->preccounter++;
+ psp->declassoc = NONE;
+ psp->state = WAITING_FOR_PRECEDENCE_SYMBOL;
+ }else if( strcmp(x,"destructor")==0 ){
+ psp->state = WAITING_FOR_DESTRUCTOR_SYMBOL;
+ }else if( strcmp(x,"type")==0 ){
+ psp->state = WAITING_FOR_DATATYPE_SYMBOL;
+ }else if( strcmp(x,"fallback")==0 ){
+ psp->fallback = 0;
+ psp->state = WAITING_FOR_FALLBACK_ID;
+ }else if( strcmp(x,"wildcard")==0 ){
+ psp->state = WAITING_FOR_WILDCARD_ID;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Unknown declaration keyword: \"%%%s\".",x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Illegal declaration keyword: \"%s\".",x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }
+ break;
+ case WAITING_FOR_DESTRUCTOR_SYMBOL:
+ if( !isalpha(x[0]) ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Symbol name missing after %destructor keyword");
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }else{
+ struct symbol *sp = Symbol_new(x);
+ psp->declargslot = &sp->destructor;
+ psp->decllinenoslot = &sp->destLineno;
+ psp->insertLineMacro = 1;
+ psp->state = WAITING_FOR_DECL_ARG;
+ }
+ break;
+ case WAITING_FOR_DATATYPE_SYMBOL:
+ if( !isalpha(x[0]) ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Symbol name missing after %destructor keyword");
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }else{
+ struct symbol *sp = Symbol_new(x);
+ psp->declargslot = &sp->datatype;
+ psp->insertLineMacro = 0;
+ psp->state = WAITING_FOR_DECL_ARG;
+ }
+ break;
+ case WAITING_FOR_PRECEDENCE_SYMBOL:
+ if( x[0]=='.' ){
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ }else if( isupper(x[0]) ){
+ struct symbol *sp;
+ sp = Symbol_new(x);
+ if( sp->prec>=0 ){
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Symbol \"%s\" has already be given a precedence.",x);
+ psp->errorcnt++;
+ }else{
+ sp->prec = psp->preccounter;
+ sp->assoc = psp->declassoc;
+ }
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Can't assign a precedence to \"%s\".",x);
+ psp->errorcnt++;
+ }
+ break;
+ case WAITING_FOR_DECL_ARG:
+ if( x[0]=='{' || x[0]=='\"' || isalnum(x[0]) ){
+ char *zOld, *zNew, *zBuf, *z;
+ int nOld, n, nLine, nNew, nBack;
+ int addLineMacro;
+ char zLine[50];
+ zNew = x;
+ if( zNew[0]=='"' || zNew[0]=='{' ) zNew++;
+ nNew = strlen(zNew);
+ if( *psp->declargslot ){
+ zOld = *psp->declargslot;
+ }else{
+ zOld = "";
+ }
+ nOld = strlen(zOld);
+ n = nOld + nNew + 20;
+ addLineMacro = psp->insertLineMacro &&
+ (psp->decllinenoslot==0 || psp->decllinenoslot[0]!=0);
+ if( addLineMacro ){
+ for(z=psp->filename, nBack=0; *z; z++){
+ if( *z=='\\' ) nBack++;
+ }
+ sprintf(zLine, "#line %d ", psp->tokenlineno);
+ nLine = strlen(zLine);
+ n += nLine + strlen(psp->filename) + nBack;
+ }
+ *psp->declargslot = zBuf = realloc(*psp->declargslot, n);
+ zBuf += nOld;
+ if( addLineMacro ){
+ if( nOld && zBuf[-1]!='\n' ){
+ *(zBuf++) = '\n';
+ }
+ memcpy(zBuf, zLine, nLine);
+ zBuf += nLine;
+ *(zBuf++) = '"';
+ for(z=psp->filename; *z; z++){
+ if( *z=='\\' ){
+ *(zBuf++) = '\\';
+ }
+ *(zBuf++) = *z;
+ }
+ *(zBuf++) = '"';
+ *(zBuf++) = '\n';
+ }
+ if( psp->decllinenoslot && psp->decllinenoslot[0]==0 ){
+ psp->decllinenoslot[0] = psp->tokenlineno;
+ }
+ memcpy(zBuf, zNew, nNew);
+ zBuf += nNew;
+ *zBuf = 0;
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ }else{
+ ErrorMsg(psp->filename,psp->tokenlineno,
+ "Illegal argument to %%%s: %s",psp->declkeyword,x);
+ psp->errorcnt++;
+ psp->state = RESYNC_AFTER_DECL_ERROR;
+ }
+ break;
+ case WAITING_FOR_FALLBACK_ID:
+ if( x[0]=='.' ){
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ }else if( !isupper(x[0]) ){
+ ErrorMsg(psp->filename, psp->tokenlineno,
+ "%%fallback argument \"%s\" should be a token", x);
+ psp->errorcnt++;
+ }else{
+ struct symbol *sp = Symbol_new(x);
+ if( psp->fallback==0 ){
+ psp->fallback = sp;
+ }else if( sp->fallback ){
+ ErrorMsg(psp->filename, psp->tokenlineno,
+ "More than one fallback assigned to token %s", x);
+ psp->errorcnt++;
+ }else{
+ sp->fallback = psp->fallback;
+ psp->gp->has_fallback = 1;
+ }
+ }
+ break;
+ case WAITING_FOR_WILDCARD_ID:
+ if( x[0]=='.' ){
+ psp->state = WAITING_FOR_DECL_OR_RULE;
+ }else if( !isupper(x[0]) ){
+ ErrorMsg(psp->filename, psp->tokenlineno,
+ "%%wildcard argument \"%s\" should be a token", x);
+ psp->errorcnt++;
+ }else{
+ struct symbol *sp = Symbol_new(x);
+ if( psp->gp->wildcard==0 ){
+ psp->gp->wildcard = sp;
+ }else{
+ ErrorMsg(psp->filename, psp->tokenlineno,
+ "Extra wildcard to token: %s", x);
+ psp->errorcnt++;
+ }
+ }
+ break;
+ case RESYNC_AFTER_RULE_ERROR:
+/* if( x[0]=='.' ) psp->state = WAITING_FOR_DECL_OR_RULE;
+** break; */
+ case RESYNC_AFTER_DECL_ERROR:
+ if( x[0]=='.' ) psp->state = WAITING_FOR_DECL_OR_RULE;
+ if( x[0]=='%' ) psp->state = WAITING_FOR_DECL_KEYWORD;
+ break;
+ }
+}
+
+/* Run the preprocessor over the input file text. The global variables
+** azDefine[0] through azDefine[nDefine-1] contains the names of all defined
+** macros. This routine looks for "%ifdef" and "%ifndef" and "%endif" and
+** comments them out. Text in between is also commented out as appropriate.
+*/
+static void preprocess_input(char *z){
+ int i, j, k, n;
+ int exclude = 0;
+ int start = 0;
+ int lineno = 1;
+ int start_lineno = 1;
+ for(i=0; z[i]; i++){
+ if( z[i]=='\n' ) lineno++;
+ if( z[i]!='%' || (i>0 && z[i-1]!='\n') ) continue;
+ if( strncmp(&z[i],"%endif",6)==0 && isspace(z[i+6]) ){
+ if( exclude ){
+ exclude--;
+ if( exclude==0 ){
+ for(j=start; j<i; j++) if( z[j]!='\n' ) z[j] = ' ';
+ }
+ }
+ for(j=i; z[j] && z[j]!='\n'; j++) z[j] = ' ';
+ }else if( (strncmp(&z[i],"%ifdef",6)==0 && isspace(z[i+6]))
+ || (strncmp(&z[i],"%ifndef",7)==0 && isspace(z[i+7])) ){
+ if( exclude ){
+ exclude++;
+ }else{
+ for(j=i+7; isspace(z[j]); j++){}
+ for(n=0; z[j+n] && !isspace(z[j+n]); n++){}
+ exclude = 1;
+ for(k=0; k<nDefine; k++){
+ if( strncmp(azDefine[k],&z[j],n)==0 && strlen(azDefine[k])==n ){
+ exclude = 0;
+ break;
+ }
+ }
+ if( z[i+3]=='n' ) exclude = !exclude;
+ if( exclude ){
+ start = i;
+ start_lineno = lineno;
+ }
+ }
+ for(j=i; z[j] && z[j]!='\n'; j++) z[j] = ' ';
+ }
+ }
+ if( exclude ){
+ fprintf(stderr,"unterminated %%ifdef starting on line %d\n", start_lineno);
+ exit(1);
+ }
+}
+
+/* In spite of its name, this function is really a scanner. It read
+** in the entire input file (all at once) then tokenizes it. Each
+** token is passed to the function "parseonetoken" which builds all
+** the appropriate data structures in the global state vector "gp".
+*/
+void Parse(gp)
+struct lemon *gp;
+{
+ struct pstate ps;
+ FILE *fp;
+ char *filebuf;
+ int filesize;
+ int lineno;
+ int c;
+ char *cp, *nextcp;
+ int startline = 0;
+
+ memset(&ps, '\0', sizeof(ps));
+ ps.gp = gp;
+ ps.filename = gp->filename;
+ ps.errorcnt = 0;
+ ps.state = INITIALIZE;
+
+ /* Begin by reading the input file */
+ fp = fopen(ps.filename,"rb");
+ if( fp==0 ){
+ ErrorMsg(ps.filename,0,"Can't open this file for reading.");
+ gp->errorcnt++;
+ return;
+ }
+ fseek(fp,0,2);
+ filesize = ftell(fp);
+ rewind(fp);
+ filebuf = (char *)malloc( filesize+1 );
+ if( filebuf==0 ){
+ ErrorMsg(ps.filename,0,"Can't allocate %d of memory to hold this file.",
+ filesize+1);
+ gp->errorcnt++;
+ return;
+ }
+ if( fread(filebuf,1,filesize,fp)!=filesize ){
+ ErrorMsg(ps.filename,0,"Can't read in all %d bytes of this file.",
+ filesize);
+ free(filebuf);
+ gp->errorcnt++;
+ return;
+ }
+ fclose(fp);
+ filebuf[filesize] = 0;
+
+ /* Make an initial pass through the file to handle %ifdef and %ifndef */
+ preprocess_input(filebuf);
+
+ /* Now scan the text of the input file */
+ lineno = 1;
+ for(cp=filebuf; (c= *cp)!=0; ){
+ if( c=='\n' ) lineno++; /* Keep track of the line number */
+ if( isspace(c) ){ cp++; continue; } /* Skip all white space */
+ if( c=='/' && cp[1]=='/' ){ /* Skip C++ style comments */
+ cp+=2;
+ while( (c= *cp)!=0 && c!='\n' ) cp++;
+ continue;
+ }
+ if( c=='/' && cp[1]=='*' ){ /* Skip C style comments */
+ cp+=2;
+ while( (c= *cp)!=0 && (c!='/' || cp[-1]!='*') ){
+ if( c=='\n' ) lineno++;
+ cp++;
+ }
+ if( c ) cp++;
+ continue;
+ }
+ ps.tokenstart = cp; /* Mark the beginning of the token */
+ ps.tokenlineno = lineno; /* Linenumber on which token begins */
+ if( c=='\"' ){ /* String literals */
+ cp++;
+ while( (c= *cp)!=0 && c!='\"' ){
+ if( c=='\n' ) lineno++;
+ cp++;
+ }
+ if( c==0 ){
+ ErrorMsg(ps.filename,startline,
+"String starting on this line is not terminated before the end of the file.");
+ ps.errorcnt++;
+ nextcp = cp;
+ }else{
+ nextcp = cp+1;
+ }
+ }else if( c=='{' ){ /* A block of C code */
+ int level;
+ cp++;
+ for(level=1; (c= *cp)!=0 && (level>1 || c!='}'); cp++){
+ if( c=='\n' ) lineno++;
+ else if( c=='{' ) level++;
+ else if( c=='}' ) level--;
+ else if( c=='/' && cp[1]=='*' ){ /* Skip comments */
+ int prevc;
+ cp = &cp[2];
+ prevc = 0;
+ while( (c= *cp)!=0 && (c!='/' || prevc!='*') ){
+ if( c=='\n' ) lineno++;
+ prevc = c;
+ cp++;
+ }
+ }else if( c=='/' && cp[1]=='/' ){ /* Skip C++ style comments too */
+ cp = &cp[2];
+ while( (c= *cp)!=0 && c!='\n' ) cp++;
+ if( c ) lineno++;
+ }else if( c=='\'' || c=='\"' ){ /* String a character literals */
+ int startchar, prevc;
+ startchar = c;
+ prevc = 0;
+ for(cp++; (c= *cp)!=0 && (c!=startchar || prevc=='\\'); cp++){
+ if( c=='\n' ) lineno++;
+ if( prevc=='\\' ) prevc = 0;
+ else prevc = c;
+ }
+ }
+ }
+ if( c==0 ){
+ ErrorMsg(ps.filename,ps.tokenlineno,
+"C code starting on this line is not terminated before the end of the file.");
+ ps.errorcnt++;
+ nextcp = cp;
+ }else{
+ nextcp = cp+1;
+ }
+ }else if( isalnum(c) ){ /* Identifiers */
+ while( (c= *cp)!=0 && (isalnum(c) || c=='_') ) cp++;
+ nextcp = cp;
+ }else if( c==':' && cp[1]==':' && cp[2]=='=' ){ /* The operator "::=" */
+ cp += 3;
+ nextcp = cp;
+ }else if( (c=='/' || c=='|') && isalpha(cp[1]) ){
+ cp += 2;
+ while( (c = *cp)!=0 && (isalnum(c) || c=='_') ) cp++;
+ nextcp = cp;
+ }else{ /* All other (one character) operators */
+ cp++;
+ nextcp = cp;
+ }
+ c = *cp;
+ *cp = 0; /* Null terminate the token */
+ parseonetoken(&ps); /* Parse the token */
+ *cp = c; /* Restore the buffer */
+ cp = nextcp;
+ }
+ free(filebuf); /* Release the buffer after parsing */
+ gp->rule = ps.firstrule;
+ gp->errorcnt = ps.errorcnt;
+}
+/*************************** From the file "plink.c" *********************/
+/*
+** Routines processing configuration follow-set propagation links
+** in the LEMON parser generator.
+*/
+static struct plink *plink_freelist = 0;
+
+/* Allocate a new plink */
+struct plink *Plink_new(){
+ struct plink *new;
+
+ if( plink_freelist==0 ){
+ int i;
+ int amt = 100;
+ plink_freelist = (struct plink *)calloc( amt, sizeof(struct plink) );
+ if( plink_freelist==0 ){
+ fprintf(stderr,
+ "Unable to allocate memory for a new follow-set propagation link.\n");
+ exit(1);
+ }
+ for(i=0; i<amt-1; i++) plink_freelist[i].next = &plink_freelist[i+1];
+ plink_freelist[amt-1].next = 0;
+ }
+ new = plink_freelist;
+ plink_freelist = plink_freelist->next;
+ return new;
+}
+
+/* Add a plink to a plink list */
+void Plink_add(plpp,cfp)
+struct plink **plpp;
+struct config *cfp;
+{
+ struct plink *new;
+ new = Plink_new();
+ new->next = *plpp;
+ *plpp = new;
+ new->cfp = cfp;
+}
+
+/* Transfer every plink on the list "from" to the list "to" */
+void Plink_copy(to,from)
+struct plink **to;
+struct plink *from;
+{
+ struct plink *nextpl;
+ while( from ){
+ nextpl = from->next;
+ from->next = *to;
+ *to = from;
+ from = nextpl;
+ }
+}
+
+/* Delete every plink on the list */
+void Plink_delete(plp)
+struct plink *plp;
+{
+ struct plink *nextpl;
+
+ while( plp ){
+ nextpl = plp->next;
+ plp->next = plink_freelist;
+ plink_freelist = plp;
+ plp = nextpl;
+ }
+}
+/*********************** From the file "report.c" **************************/
+/*
+** Procedures for generating reports and tables in the LEMON parser generator.
+*/
+
+/* Generate a filename with the given suffix. Space to hold the
+** name comes from malloc() and must be freed by the calling
+** function.
+*/
+PRIVATE char *file_makename(lemp,suffix)
+struct lemon *lemp;
+char *suffix;
+{
+ char *name;
+ char *cp;
+
+ name = malloc( strlen(lemp->filename) + strlen(suffix) + 5 );
+ if( name==0 ){
+ fprintf(stderr,"Can't allocate space for a filename.\n");
+ exit(1);
+ }
+ strcpy(name,lemp->filename);
+ cp = strrchr(name,'.');
+ if( cp ) *cp = 0;
+ strcat(name,suffix);
+ return name;
+}
+
+/* Open a file with a name based on the name of the input file,
+** but with a different (specified) suffix, and return a pointer
+** to the stream */
+PRIVATE FILE *file_open(lemp,suffix,mode)
+struct lemon *lemp;
+char *suffix;
+char *mode;
+{
+ FILE *fp;
+
+ if( lemp->outname ) free(lemp->outname);
+ lemp->outname = file_makename(lemp, suffix);
+ fp = fopen(lemp->outname,mode);
+ if( fp==0 && *mode=='w' ){
+ fprintf(stderr,"Can't open file \"%s\".\n",lemp->outname);
+ lemp->errorcnt++;
+ return 0;
+ }
+ return fp;
+}
+
+/* Duplicate the input file without comments and without actions
+** on rules */
+void Reprint(lemp)
+struct lemon *lemp;
+{
+ struct rule *rp;
+ struct symbol *sp;
+ int i, j, maxlen, len, ncolumns, skip;
+ printf("// Reprint of input file \"%s\".\n// Symbols:\n",lemp->filename);
+ maxlen = 10;
+ for(i=0; i<lemp->nsymbol; i++){
+ sp = lemp->symbols[i];
+ len = strlen(sp->name);
+ if( len>maxlen ) maxlen = len;
+ }
+ ncolumns = 76/(maxlen+5);
+ if( ncolumns<1 ) ncolumns = 1;
+ skip = (lemp->nsymbol + ncolumns - 1)/ncolumns;
+ for(i=0; i<skip; i++){
+ printf("//");
+ for(j=i; j<lemp->nsymbol; j+=skip){
+ sp = lemp->symbols[j];
+ assert( sp->index==j );
+ printf(" %3d %-*.*s",j,maxlen,maxlen,sp->name);
+ }
+ printf("\n");
+ }
+ for(rp=lemp->rule; rp; rp=rp->next){
+ printf("%s",rp->lhs->name);
+ /* if( rp->lhsalias ) printf("(%s)",rp->lhsalias); */
+ printf(" ::=");
+ for(i=0; i<rp->nrhs; i++){
+ sp = rp->rhs[i];
+ printf(" %s", sp->name);
+ if( sp->type==MULTITERMINAL ){
+ for(j=1; j<sp->nsubsym; j++){
+ printf("|%s", sp->subsym[j]->name);
+ }
+ }
+ /* if( rp->rhsalias[i] ) printf("(%s)",rp->rhsalias[i]); */
+ }
+ printf(".");
+ if( rp->precsym ) printf(" [%s]",rp->precsym->name);
+ /* if( rp->code ) printf("\n %s",rp->code); */
+ printf("\n");
+ }
+}
+
+void ConfigPrint(fp,cfp)
+FILE *fp;
+struct config *cfp;
+{
+ struct rule *rp;
+ struct symbol *sp;
+ int i, j;
+ rp = cfp->rp;
+ fprintf(fp,"%s ::=",rp->lhs->name);
+ for(i=0; i<=rp->nrhs; i++){
+ if( i==cfp->dot ) fprintf(fp," *");
+ if( i==rp->nrhs ) break;
+ sp = rp->rhs[i];
+ fprintf(fp," %s", sp->name);
+ if( sp->type==MULTITERMINAL ){
+ for(j=1; j<sp->nsubsym; j++){
+ fprintf(fp,"|%s",sp->subsym[j]->name);
+ }
+ }
+ }
+}
+
+/* #define TEST */
+#if 0
+/* Print a set */
+PRIVATE void SetPrint(out,set,lemp)
+FILE *out;
+char *set;
+struct lemon *lemp;
+{
+ int i;
+ char *spacer;
+ spacer = "";
+ fprintf(out,"%12s[","");
+ for(i=0; i<lemp->nterminal; i++){
+ if( SetFind(set,i) ){
+ fprintf(out,"%s%s",spacer,lemp->symbols[i]->name);
+ spacer = " ";
+ }
+ }
+ fprintf(out,"]\n");
+}
+
+/* Print a plink chain */
+PRIVATE void PlinkPrint(out,plp,tag)
+FILE *out;
+struct plink *plp;
+char *tag;
+{
+ while( plp ){
+ fprintf(out,"%12s%s (state %2d) ","",tag,plp->cfp->stp->statenum);
+ ConfigPrint(out,plp->cfp);
+ fprintf(out,"\n");
+ plp = plp->next;
+ }
+}
+#endif
+
+/* Print an action to the given file descriptor. Return FALSE if
+** nothing was actually printed.
+*/
+int PrintAction(struct action *ap, FILE *fp, int indent){
+ int result = 1;
+ switch( ap->type ){
+ case SHIFT:
+ fprintf(fp,"%*s shift %d",indent,ap->sp->name,ap->x.stp->statenum);
+ break;
+ case REDUCE:
+ fprintf(fp,"%*s reduce %d",indent,ap->sp->name,ap->x.rp->index);
+ break;
+ case ACCEPT:
+ fprintf(fp,"%*s accept",indent,ap->sp->name);
+ break;
+ case ERROR:
+ fprintf(fp,"%*s error",indent,ap->sp->name);
+ break;
+ case SRCONFLICT:
+ case RRCONFLICT:
+ fprintf(fp,"%*s reduce %-3d ** Parsing conflict **",
+ indent,ap->sp->name,ap->x.rp->index);
+ break;
+ case SSCONFLICT:
+ fprintf(fp,"%*s shift %d ** Parsing conflict **",
+ indent,ap->sp->name,ap->x.stp->statenum);
+ break;
+ case SH_RESOLVED:
+ case RD_RESOLVED:
+ case NOT_USED:
+ result = 0;
+ break;
+ }
+ return result;
+}
+
+/* Generate the "y.output" log file */
+void ReportOutput(lemp)
+struct lemon *lemp;
+{
+ int i;
+ struct state *stp;
+ struct config *cfp;
+ struct action *ap;
+ FILE *fp;
+
+ fp = file_open(lemp,".out","wb");
+ if( fp==0 ) return;
+ for(i=0; i<lemp->nstate; i++){
+ stp = lemp->sorted[i];
+ fprintf(fp,"State %d:\n",stp->statenum);
+ if( lemp->basisflag ) cfp=stp->bp;
+ else cfp=stp->cfp;
+ while( cfp ){
+ char buf[20];
+ if( cfp->dot==cfp->rp->nrhs ){
+ sprintf(buf,"(%d)",cfp->rp->index);
+ fprintf(fp," %5s ",buf);
+ }else{
+ fprintf(fp," ");
+ }
+ ConfigPrint(fp,cfp);
+ fprintf(fp,"\n");
+#if 0
+ SetPrint(fp,cfp->fws,lemp);
+ PlinkPrint(fp,cfp->fplp,"To ");
+ PlinkPrint(fp,cfp->bplp,"From");
+#endif
+ if( lemp->basisflag ) cfp=cfp->bp;
+ else cfp=cfp->next;
+ }
+ fprintf(fp,"\n");
+ for(ap=stp->ap; ap; ap=ap->next){
+ if( PrintAction(ap,fp,30) ) fprintf(fp,"\n");
+ }
+ fprintf(fp,"\n");
+ }
+ fprintf(fp, "----------------------------------------------------\n");
+ fprintf(fp, "Symbols:\n");
+ for(i=0; i<lemp->nsymbol; i++){
+ int j;
+ struct symbol *sp;
+
+ sp = lemp->symbols[i];
+ fprintf(fp, " %3d: %s", i, sp->name);
+ if( sp->type==NONTERMINAL ){
+ fprintf(fp, ":");
+ if( sp->lambda ){
+ fprintf(fp, " <lambda>");
+ }
+ for(j=0; j<lemp->nterminal; j++){
+ if( sp->firstset && SetFind(sp->firstset, j) ){
+ fprintf(fp, " %s", lemp->symbols[j]->name);
+ }
+ }
+ }
+ fprintf(fp, "\n");
+ }
+ fclose(fp);
+ return;
+}
+
+/* Search for the file "name" which is in the same directory as
+** the exacutable */
+PRIVATE char *pathsearch(argv0,name,modemask)
+char *argv0;
+char *name;
+int modemask;
+{
+ char *pathlist;
+ char *path,*cp;
+ char c;
+
+#ifdef __WIN32__
+ cp = strrchr(argv0,'\\');
+#else
+ cp = strrchr(argv0,'/');
+#endif
+ if( cp ){
+ c = *cp;
+ *cp = 0;
+ path = (char *)malloc( strlen(argv0) + strlen(name) + 2 );
+ if( path ) sprintf(path,"%s/%s",argv0,name);
+ *cp = c;
+ }else{
+ extern char *getenv();
+ pathlist = getenv("PATH");
+ if( pathlist==0 ) pathlist = ".:/bin:/usr/bin";
+ path = (char *)malloc( strlen(pathlist)+strlen(name)+2 );
+ if( path!=0 ){
+ while( *pathlist ){
+ cp = strchr(pathlist,':');
+ if( cp==0 ) cp = &pathlist[strlen(pathlist)];
+ c = *cp;
+ *cp = 0;
+ sprintf(path,"%s/%s",pathlist,name);
+ *cp = c;
+ if( c==0 ) pathlist = "";
+ else pathlist = &cp[1];
+ if( access(path,modemask)==0 ) break;
+ }
+ }
+ }
+ return path;
+}
+
+/* Given an action, compute the integer value for that action
+** which is to be put in the action table of the generated machine.
+** Return negative if no action should be generated.
+*/
+PRIVATE int compute_action(lemp,ap)
+struct lemon *lemp;
+struct action *ap;
+{
+ int act;
+ switch( ap->type ){
+ case SHIFT: act = ap->x.stp->statenum; break;
+ case REDUCE: act = ap->x.rp->index + lemp->nstate; break;
+ case ERROR: act = lemp->nstate + lemp->nrule; break;
+ case ACCEPT: act = lemp->nstate + lemp->nrule + 1; break;
+ default: act = -1; break;
+ }
+ return act;
+}
+
+#define LINESIZE 1000
+/* The next cluster of routines are for reading the template file
+** and writing the results to the generated parser */
+/* The first function transfers data from "in" to "out" until
+** a line is seen which begins with "%%". The line number is
+** tracked.
+**
+** if name!=0, then any word that begin with "Parse" is changed to
+** begin with *name instead.
+*/
+PRIVATE void tplt_xfer(name,in,out,lineno)
+char *name;
+FILE *in;
+FILE *out;
+int *lineno;
+{
+ int i, iStart;
+ char line[LINESIZE];
+ while( fgets(line,LINESIZE,in) && (line[0]!='%' || line[1]!='%') ){
+ (*lineno)++;
+ iStart = 0;
+ if( name ){
+ for(i=0; line[i]; i++){
+ if( line[i]=='P' && strncmp(&line[i],"Parse",5)==0
+ && (i==0 || !isalpha(line[i-1]))
+ ){
+ if( i>iStart ) fprintf(out,"%.*s",i-iStart,&line[iStart]);
+ fprintf(out,"%s",name);
+ i += 4;
+ iStart = i+1;
+ }
+ }
+ }
+ fprintf(out,"%s",&line[iStart]);
+ }
+}
+
+/* The next function finds the template file and opens it, returning
+** a pointer to the opened file. */
+PRIVATE FILE *tplt_open(lemp)
+struct lemon *lemp;
+{
+ static char templatename[] = "lempar.c";
+ char buf[1000];
+ FILE *in;
+ char *tpltname;
+ char *cp;
+
+ cp = strrchr(lemp->filename,'.');
+ if( cp ){
+ sprintf(buf,"%.*s.lt",(int)(cp-lemp->filename),lemp->filename);
+ }else{
+ sprintf(buf,"%s.lt",lemp->filename);
+ }
+ if( access(buf,004)==0 ){
+ tpltname = buf;
+ }else if( access(templatename,004)==0 ){
+ tpltname = templatename;
+ }else{
+ tpltname = pathsearch(lemp->argv0,templatename,0);
+ }
+ if( tpltname==0 ){
+ fprintf(stderr,"Can't find the parser driver template file \"%s\".\n",
+ templatename);
+ lemp->errorcnt++;
+ return 0;
+ }
+ in = fopen(tpltname,"rb");
+ if( in==0 ){
+ fprintf(stderr,"Can't open the template file \"%s\".\n",templatename);
+ lemp->errorcnt++;
+ return 0;
+ }
+ return in;
+}
+
+/* Print a #line directive line to the output file. */
+PRIVATE void tplt_linedir(out,lineno,filename)
+FILE *out;
+int lineno;
+char *filename;
+{
+ fprintf(out,"#line %d \"",lineno);
+ while( *filename ){
+ if( *filename == '\\' ) putc('\\',out);
+ putc(*filename,out);
+ filename++;
+ }
+ fprintf(out,"\"\n");
+}
+
+/* Print a string to the file and keep the linenumber up to date */
+PRIVATE void tplt_print(out,lemp,str,lineno)
+FILE *out;
+struct lemon *lemp;
+char *str;
+int *lineno;
+{
+ if( str==0 ) return;
+ (*lineno)++;
+ while( *str ){
+ if( *str=='\n' ) (*lineno)++;
+ putc(*str,out);
+ str++;
+ }
+ if( str[-1]!='\n' ){
+ putc('\n',out);
+ (*lineno)++;
+ }
+ tplt_linedir(out,*lineno+2,lemp->outname);
+ (*lineno)+=2;
+ return;
+}
+
+/*
+** The following routine emits code for the destructor for the
+** symbol sp
+*/
+void emit_destructor_code(out,sp,lemp,lineno)
+FILE *out;
+struct symbol *sp;
+struct lemon *lemp;
+int *lineno;
+{
+ char *cp = 0;
+
+ int linecnt = 0;
+ if( sp->type==TERMINAL ){
+ cp = lemp->tokendest;
+ if( cp==0 ) return;
+ fprintf(out,"{\n"); (*lineno)++;
+ }else if( sp->destructor ){
+ cp = sp->destructor;
+ fprintf(out,"{\n"); (*lineno)++;
+ tplt_linedir(out,sp->destLineno,lemp->filename); (*lineno)++;
+ }else if( lemp->vardest ){
+ cp = lemp->vardest;
+ if( cp==0 ) return;
+ fprintf(out,"{\n"); (*lineno)++;
+ }else{
+ assert( 0 ); /* Cannot happen */
+ }
+ for(; *cp; cp++){
+ if( *cp=='$' && cp[1]=='$' ){
+ fprintf(out,"(yypminor->yy%d)",sp->dtnum);
+ cp++;
+ continue;
+ }
+ if( *cp=='\n' ) linecnt++;
+ fputc(*cp,out);
+ }
+ (*lineno) += 3 + linecnt;
+ fprintf(out,"\n");
+ tplt_linedir(out,*lineno,lemp->outname);
+ fprintf(out,"}\n");
+ return;
+}
+
+/*
+** Return TRUE (non-zero) if the given symbol has a destructor.
+*/
+int has_destructor(sp, lemp)
+struct symbol *sp;
+struct lemon *lemp;
+{
+ int ret;
+ if( sp->type==TERMINAL ){
+ ret = lemp->tokendest!=0;
+ }else{
+ ret = lemp->vardest!=0 || sp->destructor!=0;
+ }
+ return ret;
+}
+
+/*
+** Append text to a dynamically allocated string. If zText is 0 then
+** reset the string to be empty again. Always return the complete text
+** of the string (which is overwritten with each call).
+**
+** n bytes of zText are stored. If n==0 then all of zText up to the first
+** \000 terminator is stored. zText can contain up to two instances of
+** %d. The values of p1 and p2 are written into the first and second
+** %d.
+**
+** If n==-1, then the previous character is overwritten.
+*/
+PRIVATE char *append_str(char *zText, int n, int p1, int p2){
+ static char *z = 0;
+ static int alloced = 0;
+ static int used = 0;
+ int c;
+ char zInt[40];
+
+ if( zText==0 ){
+ used = 0;
+ return z;
+ }
+ if( n<=0 ){
+ if( n<0 ){
+ used += n;
+ assert( used>=0 );
+ }
+ n = strlen(zText);
+ }
+ if( n+sizeof(zInt)*2+used >= alloced ){
+ alloced = n + sizeof(zInt)*2 + used + 200;
+ z = realloc(z, alloced);
+ }
+ if( z==0 ) return "";
+ while( n-- > 0 ){
+ c = *(zText++);
+ if( c=='%' && n>0 && zText[0]=='d' ){
+ sprintf(zInt, "%d", p1);
+ p1 = p2;
+ strcpy(&z[used], zInt);
+ used += strlen(&z[used]);
+ zText++;
+ n--;
+ }else{
+ z[used++] = c;
+ }
+ }
+ z[used] = 0;
+ return z;
+}
+
+/*
+** zCode is a string that is the action associated with a rule. Expand
+** the symbols in this string so that the refer to elements of the parser
+** stack.
+*/
+PRIVATE void translate_code(struct lemon *lemp, struct rule *rp){
+ char *cp, *xp;
+ int i;
+ char lhsused = 0; /* True if the LHS element has been used */
+ char used[MAXRHS]; /* True for each RHS element which is used */
+
+ for(i=0; i<rp->nrhs; i++) used[i] = 0;
+ lhsused = 0;
+
+ if( rp->code==0 ){
+ rp->code = "\n";
+ rp->line = rp->ruleline;
+ }
+
+ append_str(0,0,0,0);
+ for(cp=rp->code; *cp; cp++){
+ if( isalpha(*cp) && (cp==rp->code || (!isalnum(cp[-1]) && cp[-1]!='_')) ){
+ char saved;
+ for(xp= &cp[1]; isalnum(*xp) || *xp=='_'; xp++);
+ saved = *xp;
+ *xp = 0;
+ if( rp->lhsalias && strcmp(cp,rp->lhsalias)==0 ){
+ append_str("yygotominor.yy%d",0,rp->lhs->dtnum,0);
+ cp = xp;
+ lhsused = 1;
+ }else{
+ for(i=0; i<rp->nrhs; i++){
+ if( rp->rhsalias[i] && strcmp(cp,rp->rhsalias[i])==0 ){
+ if( cp!=rp->code && cp[-1]=='@' ){
+ /* If the argument is of the form @X then substituted
+ ** the token number of X, not the value of X */
+ append_str("yymsp[%d].major",-1,i-rp->nrhs+1,0);
+ }else{
+ struct symbol *sp = rp->rhs[i];
+ int dtnum;
+ if( sp->type==MULTITERMINAL ){
+ dtnum = sp->subsym[0]->dtnum;
+ }else{
+ dtnum = sp->dtnum;
+ }
+ append_str("yymsp[%d].minor.yy%d",0,i-rp->nrhs+1, dtnum);
+ }
+ cp = xp;
+ used[i] = 1;
+ break;
+ }
+ }
+ }
+ *xp = saved;
+ }
+ append_str(cp, 1, 0, 0);
+ } /* End loop */
+
+ /* Check to make sure the LHS has been used */
+ if( rp->lhsalias && !lhsused ){
+ ErrorMsg(lemp->filename,rp->ruleline,
+ "Label \"%s\" for \"%s(%s)\" is never used.",
+ rp->lhsalias,rp->lhs->name,rp->lhsalias);
+ lemp->errorcnt++;
+ }
+
+ /* Generate destructor code for RHS symbols which are not used in the
+ ** reduce code */
+ for(i=0; i<rp->nrhs; i++){
+ if( rp->rhsalias[i] && !used[i] ){
+ ErrorMsg(lemp->filename,rp->ruleline,
+ "Label %s for \"%s(%s)\" is never used.",
+ rp->rhsalias[i],rp->rhs[i]->name,rp->rhsalias[i]);
+ lemp->errorcnt++;
+ }else if( rp->rhsalias[i]==0 ){
+ if( has_destructor(rp->rhs[i],lemp) ){
+ append_str(" yy_destructor(%d,&yymsp[%d].minor);\n", 0,
+ rp->rhs[i]->index,i-rp->nrhs+1);
+ }else{
+ /* No destructor defined for this term */
+ }
+ }
+ }
+ if( rp->code ){
+ cp = append_str(0,0,0,0);
+ rp->code = Strsafe(cp?cp:"");
+ }
+}
+
+/*
+** Generate code which executes when the rule "rp" is reduced. Write
+** the code to "out". Make sure lineno stays up-to-date.
+*/
+PRIVATE void emit_code(out,rp,lemp,lineno)
+FILE *out;
+struct rule *rp;
+struct lemon *lemp;
+int *lineno;
+{
+ char *cp;
+ int linecnt = 0;
+
+ /* Generate code to do the reduce action */
+ if( rp->code ){
+ tplt_linedir(out,rp->line,lemp->filename);
+ fprintf(out,"{%s",rp->code);
+ for(cp=rp->code; *cp; cp++){
+ if( *cp=='\n' ) linecnt++;
+ } /* End loop */
+ (*lineno) += 3 + linecnt;
+ fprintf(out,"}\n");
+ tplt_linedir(out,*lineno,lemp->outname);
+ } /* End if( rp->code ) */
+
+ return;
+}
+
+/*
+** Print the definition of the union used for the parser's data stack.
+** This union contains fields for every possible data type for tokens
+** and nonterminals. In the process of computing and printing this
+** union, also set the ".dtnum" field of every terminal and nonterminal
+** symbol.
+*/
+void print_stack_union(out,lemp,plineno,mhflag)
+FILE *out; /* The output stream */
+struct lemon *lemp; /* The main info structure for this parser */
+int *plineno; /* Pointer to the line number */
+int mhflag; /* True if generating makeheaders output */
+{
+ int lineno = *plineno; /* The line number of the output */
+ char **types; /* A hash table of datatypes */
+ int arraysize; /* Size of the "types" array */
+ int maxdtlength; /* Maximum length of any ".datatype" field. */
+ char *stddt; /* Standardized name for a datatype */
+ int i,j; /* Loop counters */
+ int hash; /* For hashing the name of a type */
+ char *name; /* Name of the parser */
+
+ /* Allocate and initialize types[] and allocate stddt[] */
+ arraysize = lemp->nsymbol * 2;
+ types = (char**)calloc( arraysize, sizeof(char*) );
+ for(i=0; i<arraysize; i++) types[i] = 0;
+ maxdtlength = 0;
+ if( lemp->vartype ){
+ maxdtlength = strlen(lemp->vartype);
+ }
+ for(i=0; i<lemp->nsymbol; i++){
+ int len;
+ struct symbol *sp = lemp->symbols[i];
+ if( sp->datatype==0 ) continue;
+ len = strlen(sp->datatype);
+ if( len>maxdtlength ) maxdtlength = len;
+ }
+ stddt = (char*)malloc( maxdtlength*2 + 1 );
+ if( types==0 || stddt==0 ){
+ fprintf(stderr,"Out of memory.\n");
+ exit(1);
+ }
+
+ /* Build a hash table of datatypes. The ".dtnum" field of each symbol
+ ** is filled in with the hash index plus 1. A ".dtnum" value of 0 is
+ ** used for terminal symbols. If there is no %default_type defined then
+ ** 0 is also used as the .dtnum value for nonterminals which do not specify
+ ** a datatype using the %type directive.
+ */
+ for(i=0; i<lemp->nsymbol; i++){
+ struct symbol *sp = lemp->symbols[i];
+ char *cp;
+ if( sp==lemp->errsym ){
+ sp->dtnum = arraysize+1;
+ continue;
+ }
+ if( sp->type!=NONTERMINAL || (sp->datatype==0 && lemp->vartype==0) ){
+ sp->dtnum = 0;
+ continue;
+ }
+ cp = sp->datatype;
+ if( cp==0 ) cp = lemp->vartype;
+ j = 0;
+ while( isspace(*cp) ) cp++;
+ while( *cp ) stddt[j++] = *cp++;
+ while( j>0 && isspace(stddt[j-1]) ) j--;
+ stddt[j] = 0;
+ if( strcmp(stddt, lemp->tokentype)==0 ){
+ sp->dtnum = 0;
+ continue;
+ }
+ hash = 0;
+ for(j=0; stddt[j]; j++){
+ hash = hash*53 + stddt[j];
+ }
+ hash = (hash & 0x7fffffff)%arraysize;
+ while( types[hash] ){
+ if( strcmp(types[hash],stddt)==0 ){
+ sp->dtnum = hash + 1;
+ break;
+ }
+ hash++;
+ if( hash>=arraysize ) hash = 0;
+ }
+ if( types[hash]==0 ){
+ sp->dtnum = hash + 1;
+ types[hash] = (char*)malloc( strlen(stddt)+1 );
+ if( types[hash]==0 ){
+ fprintf(stderr,"Out of memory.\n");
+ exit(1);
+ }
+ strcpy(types[hash],stddt);
+ }
+ }
+
+ /* Print out the definition of YYTOKENTYPE and YYMINORTYPE */
+ name = lemp->name ? lemp->name : "Parse";
+ lineno = *plineno;
+ if( mhflag ){ fprintf(out,"#if INTERFACE\n"); lineno++; }
+ fprintf(out,"#define %sTOKENTYPE %s\n",name,
+ lemp->tokentype?lemp->tokentype:"void*"); lineno++;
+ if( mhflag ){ fprintf(out,"#endif\n"); lineno++; }
+ fprintf(out,"typedef union {\n"); lineno++;
+ fprintf(out," %sTOKENTYPE yy0;\n",name); lineno++;
+ for(i=0; i<arraysize; i++){
+ if( types[i]==0 ) continue;
+ fprintf(out," %s yy%d;\n",types[i],i+1); lineno++;
+ free(types[i]);
+ }
+ if( lemp->errsym->useCnt ){
+ fprintf(out," int yy%d;\n",lemp->errsym->dtnum); lineno++;
+ }
+ free(stddt);
+ free(types);
+ fprintf(out,"} YYMINORTYPE;\n"); lineno++;
+ *plineno = lineno;
+}
+
+/*
+** Return the name of a C datatype able to represent values between
+** lwr and upr, inclusive.
+*/
+static const char *minimum_size_type(int lwr, int upr){
+ if( lwr>=0 ){
+ if( upr<=255 ){
+ return "unsigned char";
+ }else if( upr<65535 ){
+ return "unsigned short int";
+ }else{
+ return "unsigned int";
+ }
+ }else if( lwr>=-127 && upr<=127 ){
+ return "signed char";
+ }else if( lwr>=-32767 && upr<32767 ){
+ return "short";
+ }else{
+ return "int";
+ }
+}
+
+/*
+** Each state contains a set of token transaction and a set of
+** nonterminal transactions. Each of these sets makes an instance
+** of the following structure. An array of these structures is used
+** to order the creation of entries in the yy_action[] table.
+*/
+struct axset {
+ struct state *stp; /* A pointer to a state */
+ int isTkn; /* True to use tokens. False for non-terminals */
+ int nAction; /* Number of actions */
+};
+
+/*
+** Compare to axset structures for sorting purposes
+*/
+static int axset_compare(const void *a, const void *b){
+ struct axset *p1 = (struct axset*)a;
+ struct axset *p2 = (struct axset*)b;
+ return p2->nAction - p1->nAction;
+}
+
+/*
+** Write text on "out" that describes the rule "rp".
+*/
+static void writeRuleText(FILE *out, struct rule *rp){
+ int j;
+ fprintf(out,"%s ::=", rp->lhs->name);
+ for(j=0; j<rp->nrhs; j++){
+ struct symbol *sp = rp->rhs[j];
+ fprintf(out," %s", sp->name);
+ if( sp->type==MULTITERMINAL ){
+ int k;
+ for(k=1; k<sp->nsubsym; k++){
+ fprintf(out,"|%s",sp->subsym[k]->name);
+ }
+ }
+ }
+}
+
+
+/* Generate C source code for the parser */
+void ReportTable(lemp, mhflag)
+struct lemon *lemp;
+int mhflag; /* Output in makeheaders format if true */
+{
+ FILE *out, *in;
+ char line[LINESIZE];
+ int lineno;
+ struct state *stp;
+ struct action *ap;
+ struct rule *rp;
+ struct acttab *pActtab;
+ int i, j, n;
+ char *name;
+ int mnTknOfst, mxTknOfst;
+ int mnNtOfst, mxNtOfst;
+ struct axset *ax;
+
+ in = tplt_open(lemp);
+ if( in==0 ) return;
+ out = file_open(lemp,".c","wb");
+ if( out==0 ){
+ fclose(in);
+ return;
+ }
+ lineno = 1;
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate the include code, if any */
+ tplt_print(out,lemp,lemp->include,&lineno);
+ if( mhflag ){
+ char *name = file_makename(lemp, ".h");
+ fprintf(out,"#include \"%s\"\n", name); lineno++;
+ free(name);
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate #defines for all tokens */
+ if( mhflag ){
+ char *prefix;
+ fprintf(out,"#if INTERFACE\n"); lineno++;
+ if( lemp->tokenprefix ) prefix = lemp->tokenprefix;
+ else prefix = "";
+ for(i=1; i<lemp->nterminal; i++){
+ fprintf(out,"#define %s%-30s %2d\n",prefix,lemp->symbols[i]->name,i);
+ lineno++;
+ }
+ fprintf(out,"#endif\n"); lineno++;
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate the defines */
+ fprintf(out,"#define YYCODETYPE %s\n",
+ minimum_size_type(0, lemp->nsymbol+5)); lineno++;
+ fprintf(out,"#define YYNOCODE %d\n",lemp->nsymbol+1); lineno++;
+ fprintf(out,"#define YYACTIONTYPE %s\n",
+ minimum_size_type(0, lemp->nstate+lemp->nrule+5)); lineno++;
+ if( lemp->wildcard ){
+ fprintf(out,"#define YYWILDCARD %d\n",
+ lemp->wildcard->index); lineno++;
+ }
+ print_stack_union(out,lemp,&lineno,mhflag);
+ fprintf(out, "#ifndef YYSTACKDEPTH\n"); lineno++;
+ if( lemp->stacksize ){
+ fprintf(out,"#define YYSTACKDEPTH %s\n",lemp->stacksize); lineno++;
+ }else{
+ fprintf(out,"#define YYSTACKDEPTH 100\n"); lineno++;
+ }
+ fprintf(out, "#endif\n"); lineno++;
+ if( mhflag ){
+ fprintf(out,"#if INTERFACE\n"); lineno++;
+ }
+ name = lemp->name ? lemp->name : "Parse";
+ if( lemp->arg && lemp->arg[0] ){
+ int i;
+ i = strlen(lemp->arg);
+ while( i>=1 && isspace(lemp->arg[i-1]) ) i--;
+ while( i>=1 && (isalnum(lemp->arg[i-1]) || lemp->arg[i-1]=='_') ) i--;
+ fprintf(out,"#define %sARG_SDECL %s;\n",name,lemp->arg); lineno++;
+ fprintf(out,"#define %sARG_PDECL ,%s\n",name,lemp->arg); lineno++;
+ fprintf(out,"#define %sARG_FETCH %s = yypParser->%s\n",
+ name,lemp->arg,&lemp->arg[i]); lineno++;
+ fprintf(out,"#define %sARG_STORE yypParser->%s = %s\n",
+ name,&lemp->arg[i],&lemp->arg[i]); lineno++;
+ }else{
+ fprintf(out,"#define %sARG_SDECL\n",name); lineno++;
+ fprintf(out,"#define %sARG_PDECL\n",name); lineno++;
+ fprintf(out,"#define %sARG_FETCH\n",name); lineno++;
+ fprintf(out,"#define %sARG_STORE\n",name); lineno++;
+ }
+ if( mhflag ){
+ fprintf(out,"#endif\n"); lineno++;
+ }
+ fprintf(out,"#define YYNSTATE %d\n",lemp->nstate); lineno++;
+ fprintf(out,"#define YYNRULE %d\n",lemp->nrule); lineno++;
+ if( lemp->errsym->useCnt ){
+ fprintf(out,"#define YYERRORSYMBOL %d\n",lemp->errsym->index); lineno++;
+ fprintf(out,"#define YYERRSYMDT yy%d\n",lemp->errsym->dtnum); lineno++;
+ }
+ if( lemp->has_fallback ){
+ fprintf(out,"#define YYFALLBACK 1\n"); lineno++;
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate the action table and its associates:
+ **
+ ** yy_action[] A single table containing all actions.
+ ** yy_lookahead[] A table containing the lookahead for each entry in
+ ** yy_action. Used to detect hash collisions.
+ ** yy_shift_ofst[] For each state, the offset into yy_action for
+ ** shifting terminals.
+ ** yy_reduce_ofst[] For each state, the offset into yy_action for
+ ** shifting non-terminals after a reduce.
+ ** yy_default[] Default action for each state.
+ */
+
+ /* Compute the actions on all states and count them up */
+ ax = calloc(lemp->nstate*2, sizeof(ax[0]));
+ if( ax==0 ){
+ fprintf(stderr,"malloc failed\n");
+ exit(1);
+ }
+ for(i=0; i<lemp->nstate; i++){
+ stp = lemp->sorted[i];
+ ax[i*2].stp = stp;
+ ax[i*2].isTkn = 1;
+ ax[i*2].nAction = stp->nTknAct;
+ ax[i*2+1].stp = stp;
+ ax[i*2+1].isTkn = 0;
+ ax[i*2+1].nAction = stp->nNtAct;
+ }
+ mxTknOfst = mnTknOfst = 0;
+ mxNtOfst = mnNtOfst = 0;
+
+ /* Compute the action table. In order to try to keep the size of the
+ ** action table to a minimum, the heuristic of placing the largest action
+ ** sets first is used.
+ */
+ qsort(ax, lemp->nstate*2, sizeof(ax[0]), axset_compare);
+ pActtab = acttab_alloc();
+ for(i=0; i<lemp->nstate*2 && ax[i].nAction>0; i++){
+ stp = ax[i].stp;
+ if( ax[i].isTkn ){
+ for(ap=stp->ap; ap; ap=ap->next){
+ int action;
+ if( ap->sp->index>=lemp->nterminal ) continue;
+ action = compute_action(lemp, ap);
+ if( action<0 ) continue;
+ acttab_action(pActtab, ap->sp->index, action);
+ }
+ stp->iTknOfst = acttab_insert(pActtab);
+ if( stp->iTknOfst<mnTknOfst ) mnTknOfst = stp->iTknOfst;
+ if( stp->iTknOfst>mxTknOfst ) mxTknOfst = stp->iTknOfst;
+ }else{
+ for(ap=stp->ap; ap; ap=ap->next){
+ int action;
+ if( ap->sp->index<lemp->nterminal ) continue;
+ if( ap->sp->index==lemp->nsymbol ) continue;
+ action = compute_action(lemp, ap);
+ if( action<0 ) continue;
+ acttab_action(pActtab, ap->sp->index, action);
+ }
+ stp->iNtOfst = acttab_insert(pActtab);
+ if( stp->iNtOfst<mnNtOfst ) mnNtOfst = stp->iNtOfst;
+ if( stp->iNtOfst>mxNtOfst ) mxNtOfst = stp->iNtOfst;
+ }
+ }
+ free(ax);
+
+ /* Output the yy_action table */
+ fprintf(out,"static const YYACTIONTYPE yy_action[] = {\n"); lineno++;
+ n = acttab_size(pActtab);
+ for(i=j=0; i<n; i++){
+ int action = acttab_yyaction(pActtab, i);
+ if( action<0 ) action = lemp->nstate + lemp->nrule + 2;
+ if( j==0 ) fprintf(out," /* %5d */ ", i);
+ fprintf(out, " %4d,", action);
+ if( j==9 || i==n-1 ){
+ fprintf(out, "\n"); lineno++;
+ j = 0;
+ }else{
+ j++;
+ }
+ }
+ fprintf(out, "};\n"); lineno++;
+
+ /* Output the yy_lookahead table */
+ fprintf(out,"static const YYCODETYPE yy_lookahead[] = {\n"); lineno++;
+ for(i=j=0; i<n; i++){
+ int la = acttab_yylookahead(pActtab, i);
+ if( la<0 ) la = lemp->nsymbol;
+ if( j==0 ) fprintf(out," /* %5d */ ", i);
+ fprintf(out, " %4d,", la);
+ if( j==9 || i==n-1 ){
+ fprintf(out, "\n"); lineno++;
+ j = 0;
+ }else{
+ j++;
+ }
+ }
+ fprintf(out, "};\n"); lineno++;
+
+ /* Output the yy_shift_ofst[] table */
+ fprintf(out, "#define YY_SHIFT_USE_DFLT (%d)\n", mnTknOfst-1); lineno++;
+ n = lemp->nstate;
+ while( n>0 && lemp->sorted[n-1]->iTknOfst==NO_OFFSET ) n--;
+ fprintf(out, "#define YY_SHIFT_MAX %d\n", n-1); lineno++;
+ fprintf(out, "static const %s yy_shift_ofst[] = {\n",
+ minimum_size_type(mnTknOfst-1, mxTknOfst)); lineno++;
+ for(i=j=0; i<n; i++){
+ int ofst;
+ stp = lemp->sorted[i];
+ ofst = stp->iTknOfst;
+ if( ofst==NO_OFFSET ) ofst = mnTknOfst - 1;
+ if( j==0 ) fprintf(out," /* %5d */ ", i);
+ fprintf(out, " %4d,", ofst);
+ if( j==9 || i==n-1 ){
+ fprintf(out, "\n"); lineno++;
+ j = 0;
+ }else{
+ j++;
+ }
+ }
+ fprintf(out, "};\n"); lineno++;
+
+ /* Output the yy_reduce_ofst[] table */
+ fprintf(out, "#define YY_REDUCE_USE_DFLT (%d)\n", mnNtOfst-1); lineno++;
+ n = lemp->nstate;
+ while( n>0 && lemp->sorted[n-1]->iNtOfst==NO_OFFSET ) n--;
+ fprintf(out, "#define YY_REDUCE_MAX %d\n", n-1); lineno++;
+ fprintf(out, "static const %s yy_reduce_ofst[] = {\n",
+ minimum_size_type(mnNtOfst-1, mxNtOfst)); lineno++;
+ for(i=j=0; i<n; i++){
+ int ofst;
+ stp = lemp->sorted[i];
+ ofst = stp->iNtOfst;
+ if( ofst==NO_OFFSET ) ofst = mnNtOfst - 1;
+ if( j==0 ) fprintf(out," /* %5d */ ", i);
+ fprintf(out, " %4d,", ofst);
+ if( j==9 || i==n-1 ){
+ fprintf(out, "\n"); lineno++;
+ j = 0;
+ }else{
+ j++;
+ }
+ }
+ fprintf(out, "};\n"); lineno++;
+
+ /* Output the default action table */
+ fprintf(out, "static const YYACTIONTYPE yy_default[] = {\n"); lineno++;
+ n = lemp->nstate;
+ for(i=j=0; i<n; i++){
+ stp = lemp->sorted[i];
+ if( j==0 ) fprintf(out," /* %5d */ ", i);
+ fprintf(out, " %4d,", stp->iDflt);
+ if( j==9 || i==n-1 ){
+ fprintf(out, "\n"); lineno++;
+ j = 0;
+ }else{
+ j++;
+ }
+ }
+ fprintf(out, "};\n"); lineno++;
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate the table of fallback tokens.
+ */
+ if( lemp->has_fallback ){
+ for(i=0; i<lemp->nterminal; i++){
+ struct symbol *p = lemp->symbols[i];
+ if( p->fallback==0 ){
+ fprintf(out, " 0, /* %10s => nothing */\n", p->name);
+ }else{
+ fprintf(out, " %3d, /* %10s => %s */\n", p->fallback->index,
+ p->name, p->fallback->name);
+ }
+ lineno++;
+ }
+ }
+ tplt_xfer(lemp->name, in, out, &lineno);
+
+ /* Generate a table containing the symbolic name of every symbol
+ */
+ for(i=0; i<lemp->nsymbol; i++){
+ sprintf(line,"\"%s\",",lemp->symbols[i]->name);
+ fprintf(out," %-15s",line);
+ if( (i&3)==3 ){ fprintf(out,"\n"); lineno++; }
+ }
+ if( (i&3)!=0 ){ fprintf(out,"\n"); lineno++; }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate a table containing a text string that describes every
+ ** rule in the rule set of the grammar. This information is used
+ ** when tracing REDUCE actions.
+ */
+ for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){
+ assert( rp->index==i );
+ fprintf(out," /* %3d */ \"", i);
+ writeRuleText(out, rp);
+ fprintf(out,"\",\n"); lineno++;
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which executes every time a symbol is popped from
+ ** the stack while processing errors or while destroying the parser.
+ ** (In other words, generate the %destructor actions)
+ */
+ if( lemp->tokendest ){
+ int once = 1;
+ for(i=0; i<lemp->nsymbol; i++){
+ struct symbol *sp = lemp->symbols[i];
+ if( sp==0 || sp->type!=TERMINAL ) continue;
+ if( once ){
+ fprintf(out, " /* TERMINAL Destructor */\n"); lineno++;
+ once = 0;
+ }
+ fprintf(out," case %d: /* %s */\n",
+ sp->index, sp->name); lineno++;
+ }
+ for(i=0; i<lemp->nsymbol && lemp->symbols[i]->type!=TERMINAL; i++);
+ if( i<lemp->nsymbol ){
+ emit_destructor_code(out,lemp->symbols[i],lemp,&lineno);
+ fprintf(out," break;\n"); lineno++;
+ }
+ }
+ if( lemp->vardest ){
+ struct symbol *dflt_sp = 0;
+ int once = 1;
+ for(i=0; i<lemp->nsymbol; i++){
+ struct symbol *sp = lemp->symbols[i];
+ if( sp==0 || sp->type==TERMINAL ||
+ sp->index<=0 || sp->destructor!=0 ) continue;
+ if( once ){
+ fprintf(out, " /* Default NON-TERMINAL Destructor */\n"); lineno++;
+ once = 0;
+ }
+ fprintf(out," case %d: /* %s */\n",
+ sp->index, sp->name); lineno++;
+ dflt_sp = sp;
+ }
+ if( dflt_sp!=0 ){
+ emit_destructor_code(out,dflt_sp,lemp,&lineno);
+ }
+ fprintf(out," break;\n"); lineno++;
+ }
+ for(i=0; i<lemp->nsymbol; i++){
+ struct symbol *sp = lemp->symbols[i];
+ if( sp==0 || sp->type==TERMINAL || sp->destructor==0 ) continue;
+ fprintf(out," case %d: /* %s */\n",
+ sp->index, sp->name); lineno++;
+
+ /* Combine duplicate destructors into a single case */
+ for(j=i+1; j<lemp->nsymbol; j++){
+ struct symbol *sp2 = lemp->symbols[j];
+ if( sp2 && sp2->type!=TERMINAL && sp2->destructor
+ && sp2->dtnum==sp->dtnum
+ && strcmp(sp->destructor,sp2->destructor)==0 ){
+ fprintf(out," case %d: /* %s */\n",
+ sp2->index, sp2->name); lineno++;
+ sp2->destructor = 0;
+ }
+ }
+
+ emit_destructor_code(out,lemp->symbols[i],lemp,&lineno);
+ fprintf(out," break;\n"); lineno++;
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which executes whenever the parser stack overflows */
+ tplt_print(out,lemp,lemp->overflow,&lineno);
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate the table of rule information
+ **
+ ** Note: This code depends on the fact that rules are number
+ ** sequentually beginning with 0.
+ */
+ for(rp=lemp->rule; rp; rp=rp->next){
+ fprintf(out," { %d, %d },\n",rp->lhs->index,rp->nrhs); lineno++;
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which execution during each REDUCE action */
+ for(rp=lemp->rule; rp; rp=rp->next){
+ translate_code(lemp, rp);
+ }
+ for(rp=lemp->rule; rp; rp=rp->next){
+ struct rule *rp2;
+ if( rp->code==0 ) continue;
+ fprintf(out," case %d: /* ", rp->index);
+ writeRuleText(out, rp);
+ fprintf(out, " */\n"); lineno++;
+ for(rp2=rp->next; rp2; rp2=rp2->next){
+ if( rp2->code==rp->code ){
+ fprintf(out," case %d: /* ", rp2->index);
+ writeRuleText(out, rp2);
+ fprintf(out," */\n"); lineno++;
+ rp2->code = 0;
+ }
+ }
+ emit_code(out,rp,lemp,&lineno);
+ fprintf(out," break;\n"); lineno++;
+ }
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which executes if a parse fails */
+ tplt_print(out,lemp,lemp->failure,&lineno);
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which executes when a syntax error occurs */
+ tplt_print(out,lemp,lemp->error,&lineno);
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Generate code which executes when the parser accepts its input */
+ tplt_print(out,lemp,lemp->accept,&lineno);
+ tplt_xfer(lemp->name,in,out,&lineno);
+
+ /* Append any addition code the user desires */
+ tplt_print(out,lemp,lemp->extracode,&lineno);
+
+ fclose(in);
+ fclose(out);
+ return;
+}
+
+/* Generate a header file for the parser */
+void ReportHeader(lemp)
+struct lemon *lemp;
+{
+ FILE *out, *in;
+ char *prefix;
+ char line[LINESIZE];
+ char pattern[LINESIZE];
+ int i;
+
+ if( lemp->tokenprefix ) prefix = lemp->tokenprefix;
+ else prefix = "";
+ in = file_open(lemp,".h","rb");
+ if( in ){
+ for(i=1; i<lemp->nterminal && fgets(line,LINESIZE,in); i++){
+ sprintf(pattern,"#define %s%-30s %2d\n",prefix,lemp->symbols[i]->name,i);
+ if( strcmp(line,pattern) ) break;
+ }
+ fclose(in);
+ if( i==lemp->nterminal ){
+ /* No change in the file. Don't rewrite it. */
+ return;
+ }
+ }
+ out = file_open(lemp,".h","wb");
+ if( out ){
+ for(i=1; i<lemp->nterminal; i++){
+ fprintf(out,"#define %s%-30s %2d\n",prefix,lemp->symbols[i]->name,i);
+ }
+ fclose(out);
+ }
+ return;
+}
+
+/* Reduce the size of the action tables, if possible, by making use
+** of defaults.
+**
+** In this version, we take the most frequent REDUCE action and make
+** it the default. Except, there is no default if the wildcard token
+** is a possible look-ahead.
+*/
+void CompressTables(lemp)
+struct lemon *lemp;
+{
+ struct state *stp;
+ struct action *ap, *ap2;
+ struct rule *rp, *rp2, *rbest;
+ int nbest, n;
+ int i;
+ int usesWildcard;
+
+ for(i=0; i<lemp->nstate; i++){
+ stp = lemp->sorted[i];
+ nbest = 0;
+ rbest = 0;
+ usesWildcard = 0;
+
+ for(ap=stp->ap; ap; ap=ap->next){
+ if( ap->type==SHIFT && ap->sp==lemp->wildcard ){
+ usesWildcard = 1;
+ }
+ if( ap->type!=REDUCE ) continue;
+ rp = ap->x.rp;
+ if( rp->lhsStart ) continue;
+ if( rp==rbest ) continue;
+ n = 1;
+ for(ap2=ap->next; ap2; ap2=ap2->next){
+ if( ap2->type!=REDUCE ) continue;
+ rp2 = ap2->x.rp;
+ if( rp2==rbest ) continue;
+ if( rp2==rp ) n++;
+ }
+ if( n>nbest ){
+ nbest = n;
+ rbest = rp;
+ }
+ }
+
+ /* Do not make a default if the number of rules to default
+ ** is not at least 1 or if the wildcard token is a possible
+ ** lookahead.
+ */
+ if( nbest<1 || usesWildcard ) continue;
+
+
+ /* Combine matching REDUCE actions into a single default */
+ for(ap=stp->ap; ap; ap=ap->next){
+ if( ap->type==REDUCE && ap->x.rp==rbest ) break;
+ }
+ assert( ap );
+ ap->sp = Symbol_new("{default}");
+ for(ap=ap->next; ap; ap=ap->next){
+ if( ap->type==REDUCE && ap->x.rp==rbest ) ap->type = NOT_USED;
+ }
+ stp->ap = Action_sort(stp->ap);
+ }
+}
+
+
+/*
+** Compare two states for sorting purposes. The smaller state is the
+** one with the most non-terminal actions. If they have the same number
+** of non-terminal actions, then the smaller is the one with the most
+** token actions.
+*/
+static int stateResortCompare(const void *a, const void *b){
+ const struct state *pA = *(const struct state**)a;
+ const struct state *pB = *(const struct state**)b;
+ int n;
+
+ n = pB->nNtAct - pA->nNtAct;
+ if( n==0 ){
+ n = pB->nTknAct - pA->nTknAct;
+ }
+ return n;
+}
+
+
+/*
+** Renumber and resort states so that states with fewer choices
+** occur at the end. Except, keep state 0 as the first state.
+*/
+void ResortStates(lemp)
+struct lemon *lemp;
+{
+ int i;
+ struct state *stp;
+ struct action *ap;
+
+ for(i=0; i<lemp->nstate; i++){
+ stp = lemp->sorted[i];
+ stp->nTknAct = stp->nNtAct = 0;
+ stp->iDflt = lemp->nstate + lemp->nrule;
+ stp->iTknOfst = NO_OFFSET;
+ stp->iNtOfst = NO_OFFSET;
+ for(ap=stp->ap; ap; ap=ap->next){
+ if( compute_action(lemp,ap)>=0 ){
+ if( ap->sp->index<lemp->nterminal ){
+ stp->nTknAct++;
+ }else if( ap->sp->index<lemp->nsymbol ){
+ stp->nNtAct++;
+ }else{
+ stp->iDflt = compute_action(lemp, ap);
+ }
+ }
+ }
+ }
+ qsort(&lemp->sorted[1], lemp->nstate-1, sizeof(lemp->sorted[0]),
+ stateResortCompare);
+ for(i=0; i<lemp->nstate; i++){
+ lemp->sorted[i]->statenum = i;
+ }
+}
+
+
+/***************** From the file "set.c" ************************************/
+/*
+** Set manipulation routines for the LEMON parser generator.
+*/
+
+static int size = 0;
+
+/* Set the set size */
+void SetSize(n)
+int n;
+{
+ size = n+1;
+}
+
+/* Allocate a new set */
+char *SetNew(){
+ char *s;
+ s = (char*)calloc( size, 1);
+ if( s==0 ){
+ extern void memory_error();
+ memory_error();
+ }
+ return s;
+}
+
+/* Deallocate a set */
+void SetFree(s)
+char *s;
+{
+ free(s);
+}
+
+/* Add a new element to the set. Return TRUE if the element was added
+** and FALSE if it was already there. */
+int SetAdd(s,e)
+char *s;
+int e;
+{
+ int rv;
+ assert( e>=0 && e<size );
+ rv = s[e];
+ s[e] = 1;
+ return !rv;
+}
+
+/* Add every element of s2 to s1. Return TRUE if s1 changes. */
+int SetUnion(s1,s2)
+char *s1;
+char *s2;
+{
+ int i, progress;
+ progress = 0;
+ for(i=0; i<size; i++){
+ if( s2[i]==0 ) continue;
+ if( s1[i]==0 ){
+ progress = 1;
+ s1[i] = 1;
+ }
+ }
+ return progress;
+}
+/********************** From the file "table.c" ****************************/
+/*
+** All code in this file has been automatically generated
+** from a specification in the file
+** "table.q"
+** by the associative array code building program "aagen".
+** Do not edit this file! Instead, edit the specification
+** file, then rerun aagen.
+*/
+/*
+** Code for processing tables in the LEMON parser generator.
+*/
+
+PRIVATE int strhash(x)
+char *x;
+{
+ int h = 0;
+ while( *x) h = h*13 + *(x++);
+ return h;
+}
+
+/* Works like strdup, sort of. Save a string in malloced memory, but
+** keep strings in a table so that the same string is not in more
+** than one place.
+*/
+char *Strsafe(y)
+char *y;
+{
+ char *z;
+
+ if( y==0 ) return 0;
+ z = Strsafe_find(y);
+ if( z==0 && (z=malloc( strlen(y)+1 ))!=0 ){
+ strcpy(z,y);
+ Strsafe_insert(z);
+ }
+ MemoryCheck(z);
+ return z;
+}
+
+/* There is one instance of the following structure for each
+** associative array of type "x1".
+*/
+struct s_x1 {
+ int size; /* The number of available slots. */
+ /* Must be a power of 2 greater than or */
+ /* equal to 1 */
+ int count; /* Number of currently slots filled */
+ struct s_x1node *tbl; /* The data stored here */
+ struct s_x1node **ht; /* Hash table for lookups */
+};
+
+/* There is one instance of this structure for every data element
+** in an associative array of type "x1".
+*/
+typedef struct s_x1node {
+ char *data; /* The data */
+ struct s_x1node *next; /* Next entry with the same hash */
+ struct s_x1node **from; /* Previous link */
+} x1node;
+
+/* There is only one instance of the array, which is the following */
+static struct s_x1 *x1a;
+
+/* Allocate a new associative array */
+void Strsafe_init(){
+ if( x1a ) return;
+ x1a = (struct s_x1*)malloc( sizeof(struct s_x1) );
+ if( x1a ){
+ x1a->size = 1024;
+ x1a->count = 0;
+ x1a->tbl = (x1node*)malloc(
+ (sizeof(x1node) + sizeof(x1node*))*1024 );
+ if( x1a->tbl==0 ){
+ free(x1a);
+ x1a = 0;
+ }else{
+ int i;
+ x1a->ht = (x1node**)&(x1a->tbl[1024]);
+ for(i=0; i<1024; i++) x1a->ht[i] = 0;
+ }
+ }
+}
+/* Insert a new record into the array. Return TRUE if successful.
+** Prior data with the same key is NOT overwritten */
+int Strsafe_insert(data)
+char *data;
+{
+ x1node *np;
+ int h;
+ int ph;
+
+ if( x1a==0 ) return 0;
+ ph = strhash(data);
+ h = ph & (x1a->size-1);
+ np = x1a->ht[h];
+ while( np ){
+ if( strcmp(np->data,data)==0 ){
+ /* An existing entry with the same key is found. */
+ /* Fail because overwrite is not allows. */
+ return 0;
+ }
+ np = np->next;
+ }
+ if( x1a->count>=x1a->size ){
+ /* Need to make the hash table bigger */
+ int i,size;
+ struct s_x1 array;
+ array.size = size = x1a->size*2;
+ array.count = x1a->count;
+ array.tbl = (x1node*)malloc(
+ (sizeof(x1node) + sizeof(x1node*))*size );
+ if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
+ array.ht = (x1node**)&(array.tbl[size]);
+ for(i=0; i<size; i++) array.ht[i] = 0;
+ for(i=0; i<x1a->count; i++){
+ x1node *oldnp, *newnp;
+ oldnp = &(x1a->tbl[i]);
+ h = strhash(oldnp->data) & (size-1);
+ newnp = &(array.tbl[i]);
+ if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
+ newnp->next = array.ht[h];
+ newnp->data = oldnp->data;
+ newnp->from = &(array.ht[h]);
+ array.ht[h] = newnp;
+ }
+ free(x1a->tbl);
+ *x1a = array;
+ }
+ /* Insert the new data */
+ h = ph & (x1a->size-1);
+ np = &(x1a->tbl[x1a->count++]);
+ np->data = data;
+ if( x1a->ht[h] ) x1a->ht[h]->from = &(np->next);
+ np->next = x1a->ht[h];
+ x1a->ht[h] = np;
+ np->from = &(x1a->ht[h]);
+ return 1;
+}
+
+/* Return a pointer to data assigned to the given key. Return NULL
+** if no such key. */
+char *Strsafe_find(key)
+char *key;
+{
+ int h;
+ x1node *np;
+
+ if( x1a==0 ) return 0;
+ h = strhash(key) & (x1a->size-1);
+ np = x1a->ht[h];
+ while( np ){
+ if( strcmp(np->data,key)==0 ) break;
+ np = np->next;
+ }
+ return np ? np->data : 0;
+}
+
+/* Return a pointer to the (terminal or nonterminal) symbol "x".
+** Create a new symbol if this is the first time "x" has been seen.
+*/
+struct symbol *Symbol_new(x)
+char *x;
+{
+ struct symbol *sp;
+
+ sp = Symbol_find(x);
+ if( sp==0 ){
+ sp = (struct symbol *)calloc(1, sizeof(struct symbol) );
+ MemoryCheck(sp);
+ sp->name = Strsafe(x);
+ sp->type = isupper(*x) ? TERMINAL : NONTERMINAL;
+ sp->rule = 0;
+ sp->fallback = 0;
+ sp->prec = -1;
+ sp->assoc = UNK;
+ sp->firstset = 0;
+ sp->lambda = LEMON_FALSE;
+ sp->destructor = 0;
+ sp->destLineno = 0;
+ sp->datatype = 0;
+ sp->useCnt = 0;
+ Symbol_insert(sp,sp->name);
+ }
+ sp->useCnt++;
+ return sp;
+}
+
+/* Compare two symbols for working purposes
+**
+** Symbols that begin with upper case letters (terminals or tokens)
+** must sort before symbols that begin with lower case letters
+** (non-terminals). Other than that, the order does not matter.
+**
+** We find experimentally that leaving the symbols in their original
+** order (the order they appeared in the grammar file) gives the
+** smallest parser tables in SQLite.
+*/
+int Symbolcmpp(struct symbol **a, struct symbol **b){
+ int i1 = (**a).index + 10000000*((**a).name[0]>'Z');
+ int i2 = (**b).index + 10000000*((**b).name[0]>'Z');
+ return i1-i2;
+}
+
+/* There is one instance of the following structure for each
+** associative array of type "x2".
+*/
+struct s_x2 {
+ int size; /* The number of available slots. */
+ /* Must be a power of 2 greater than or */
+ /* equal to 1 */
+ int count; /* Number of currently slots filled */
+ struct s_x2node *tbl; /* The data stored here */
+ struct s_x2node **ht; /* Hash table for lookups */
+};
+
+/* There is one instance of this structure for every data element
+** in an associative array of type "x2".
+*/
+typedef struct s_x2node {
+ struct symbol *data; /* The data */
+ char *key; /* The key */
+ struct s_x2node *next; /* Next entry with the same hash */
+ struct s_x2node **from; /* Previous link */
+} x2node;
+
+/* There is only one instance of the array, which is the following */
+static struct s_x2 *x2a;
+
+/* Allocate a new associative array */
+void Symbol_init(){
+ if( x2a ) return;
+ x2a = (struct s_x2*)malloc( sizeof(struct s_x2) );
+ if( x2a ){
+ x2a->size = 128;
+ x2a->count = 0;
+ x2a->tbl = (x2node*)malloc(
+ (sizeof(x2node) + sizeof(x2node*))*128 );
+ if( x2a->tbl==0 ){
+ free(x2a);
+ x2a = 0;
+ }else{
+ int i;
+ x2a->ht = (x2node**)&(x2a->tbl[128]);
+ for(i=0; i<128; i++) x2a->ht[i] = 0;
+ }
+ }
+}
+/* Insert a new record into the array. Return TRUE if successful.
+** Prior data with the same key is NOT overwritten */
+int Symbol_insert(data,key)
+struct symbol *data;
+char *key;
+{
+ x2node *np;
+ int h;
+ int ph;
+
+ if( x2a==0 ) return 0;
+ ph = strhash(key);
+ h = ph & (x2a->size-1);
+ np = x2a->ht[h];
+ while( np ){
+ if( strcmp(np->key,key)==0 ){
+ /* An existing entry with the same key is found. */
+ /* Fail because overwrite is not allows. */
+ return 0;
+ }
+ np = np->next;
+ }
+ if( x2a->count>=x2a->size ){
+ /* Need to make the hash table bigger */
+ int i,size;
+ struct s_x2 array;
+ array.size = size = x2a->size*2;
+ array.count = x2a->count;
+ array.tbl = (x2node*)malloc(
+ (sizeof(x2node) + sizeof(x2node*))*size );
+ if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
+ array.ht = (x2node**)&(array.tbl[size]);
+ for(i=0; i<size; i++) array.ht[i] = 0;
+ for(i=0; i<x2a->count; i++){
+ x2node *oldnp, *newnp;
+ oldnp = &(x2a->tbl[i]);
+ h = strhash(oldnp->key) & (size-1);
+ newnp = &(array.tbl[i]);
+ if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
+ newnp->next = array.ht[h];
+ newnp->key = oldnp->key;
+ newnp->data = oldnp->data;
+ newnp->from = &(array.ht[h]);
+ array.ht[h] = newnp;
+ }
+ free(x2a->tbl);
+ *x2a = array;
+ }
+ /* Insert the new data */
+ h = ph & (x2a->size-1);
+ np = &(x2a->tbl[x2a->count++]);
+ np->key = key;
+ np->data = data;
+ if( x2a->ht[h] ) x2a->ht[h]->from = &(np->next);
+ np->next = x2a->ht[h];
+ x2a->ht[h] = np;
+ np->from = &(x2a->ht[h]);
+ return 1;
+}
+
+/* Return a pointer to data assigned to the given key. Return NULL
+** if no such key. */
+struct symbol *Symbol_find(key)
+char *key;
+{
+ int h;
+ x2node *np;
+
+ if( x2a==0 ) return 0;
+ h = strhash(key) & (x2a->size-1);
+ np = x2a->ht[h];
+ while( np ){
+ if( strcmp(np->key,key)==0 ) break;
+ np = np->next;
+ }
+ return np ? np->data : 0;
+}
+
+/* Return the n-th data. Return NULL if n is out of range. */
+struct symbol *Symbol_Nth(n)
+int n;
+{
+ struct symbol *data;
+ if( x2a && n>0 && n<=x2a->count ){
+ data = x2a->tbl[n-1].data;
+ }else{
+ data = 0;
+ }
+ return data;
+}
+
+/* Return the size of the array */
+int Symbol_count()
+{
+ return x2a ? x2a->count : 0;
+}
+
+/* Return an array of pointers to all data in the table.
+** The array is obtained from malloc. Return NULL if memory allocation
+** problems, or if the array is empty. */
+struct symbol **Symbol_arrayof()
+{
+ struct symbol **array;
+ int i,size;
+ if( x2a==0 ) return 0;
+ size = x2a->count;
+ array = (struct symbol **)calloc(size, sizeof(struct symbol *));
+ if( array ){
+ for(i=0; i<size; i++) array[i] = x2a->tbl[i].data;
+ }
+ return array;
+}
+
+/* Compare two configurations */
+int Configcmp(a,b)
+struct config *a;
+struct config *b;
+{
+ int x;
+ x = a->rp->index - b->rp->index;
+ if( x==0 ) x = a->dot - b->dot;
+ return x;
+}
+
+/* Compare two states */
+PRIVATE int statecmp(a,b)
+struct config *a;
+struct config *b;
+{
+ int rc;
+ for(rc=0; rc==0 && a && b; a=a->bp, b=b->bp){
+ rc = a->rp->index - b->rp->index;
+ if( rc==0 ) rc = a->dot - b->dot;
+ }
+ if( rc==0 ){
+ if( a ) rc = 1;
+ if( b ) rc = -1;
+ }
+ return rc;
+}
+
+/* Hash a state */
+PRIVATE int statehash(a)
+struct config *a;
+{
+ int h=0;
+ while( a ){
+ h = h*571 + a->rp->index*37 + a->dot;
+ a = a->bp;
+ }
+ return h;
+}
+
+/* Allocate a new state structure */
+struct state *State_new()
+{
+ struct state *new;
+ new = (struct state *)calloc(1, sizeof(struct state) );
+ MemoryCheck(new);
+ return new;
+}
+
+/* There is one instance of the following structure for each
+** associative array of type "x3".
+*/
+struct s_x3 {
+ int size; /* The number of available slots. */
+ /* Must be a power of 2 greater than or */
+ /* equal to 1 */
+ int count; /* Number of currently slots filled */
+ struct s_x3node *tbl; /* The data stored here */
+ struct s_x3node **ht; /* Hash table for lookups */
+};
+
+/* There is one instance of this structure for every data element
+** in an associative array of type "x3".
+*/
+typedef struct s_x3node {
+ struct state *data; /* The data */
+ struct config *key; /* The key */
+ struct s_x3node *next; /* Next entry with the same hash */
+ struct s_x3node **from; /* Previous link */
+} x3node;
+
+/* There is only one instance of the array, which is the following */
+static struct s_x3 *x3a;
+
+/* Allocate a new associative array */
+void State_init(){
+ if( x3a ) return;
+ x3a = (struct s_x3*)malloc( sizeof(struct s_x3) );
+ if( x3a ){
+ x3a->size = 128;
+ x3a->count = 0;
+ x3a->tbl = (x3node*)malloc(
+ (sizeof(x3node) + sizeof(x3node*))*128 );
+ if( x3a->tbl==0 ){
+ free(x3a);
+ x3a = 0;
+ }else{
+ int i;
+ x3a->ht = (x3node**)&(x3a->tbl[128]);
+ for(i=0; i<128; i++) x3a->ht[i] = 0;
+ }
+ }
+}
+/* Insert a new record into the array. Return TRUE if successful.
+** Prior data with the same key is NOT overwritten */
+int State_insert(data,key)
+struct state *data;
+struct config *key;
+{
+ x3node *np;
+ int h;
+ int ph;
+
+ if( x3a==0 ) return 0;
+ ph = statehash(key);
+ h = ph & (x3a->size-1);
+ np = x3a->ht[h];
+ while( np ){
+ if( statecmp(np->key,key)==0 ){
+ /* An existing entry with the same key is found. */
+ /* Fail because overwrite is not allows. */
+ return 0;
+ }
+ np = np->next;
+ }
+ if( x3a->count>=x3a->size ){
+ /* Need to make the hash table bigger */
+ int i,size;
+ struct s_x3 array;
+ array.size = size = x3a->size*2;
+ array.count = x3a->count;
+ array.tbl = (x3node*)malloc(
+ (sizeof(x3node) + sizeof(x3node*))*size );
+ if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
+ array.ht = (x3node**)&(array.tbl[size]);
+ for(i=0; i<size; i++) array.ht[i] = 0;
+ for(i=0; i<x3a->count; i++){
+ x3node *oldnp, *newnp;
+ oldnp = &(x3a->tbl[i]);
+ h = statehash(oldnp->key) & (size-1);
+ newnp = &(array.tbl[i]);
+ if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
+ newnp->next = array.ht[h];
+ newnp->key = oldnp->key;
+ newnp->data = oldnp->data;
+ newnp->from = &(array.ht[h]);
+ array.ht[h] = newnp;
+ }
+ free(x3a->tbl);
+ *x3a = array;
+ }
+ /* Insert the new data */
+ h = ph & (x3a->size-1);
+ np = &(x3a->tbl[x3a->count++]);
+ np->key = key;
+ np->data = data;
+ if( x3a->ht[h] ) x3a->ht[h]->from = &(np->next);
+ np->next = x3a->ht[h];
+ x3a->ht[h] = np;
+ np->from = &(x3a->ht[h]);
+ return 1;
+}
+
+/* Return a pointer to data assigned to the given key. Return NULL
+** if no such key. */
+struct state *State_find(key)
+struct config *key;
+{
+ int h;
+ x3node *np;
+
+ if( x3a==0 ) return 0;
+ h = statehash(key) & (x3a->size-1);
+ np = x3a->ht[h];
+ while( np ){
+ if( statecmp(np->key,key)==0 ) break;
+ np = np->next;
+ }
+ return np ? np->data : 0;
+}
+
+/* Return an array of pointers to all data in the table.
+** The array is obtained from malloc. Return NULL if memory allocation
+** problems, or if the array is empty. */
+struct state **State_arrayof()
+{
+ struct state **array;
+ int i,size;
+ if( x3a==0 ) return 0;
+ size = x3a->count;
+ array = (struct state **)malloc( sizeof(struct state *)*size );
+ if( array ){
+ for(i=0; i<size; i++) array[i] = x3a->tbl[i].data;
+ }
+ return array;
+}
+
+/* Hash a configuration */
+PRIVATE int confighash(a)
+struct config *a;
+{
+ int h=0;
+ h = h*571 + a->rp->index*37 + a->dot;
+ return h;
+}
+
+/* There is one instance of the following structure for each
+** associative array of type "x4".
+*/
+struct s_x4 {
+ int size; /* The number of available slots. */
+ /* Must be a power of 2 greater than or */
+ /* equal to 1 */
+ int count; /* Number of currently slots filled */
+ struct s_x4node *tbl; /* The data stored here */
+ struct s_x4node **ht; /* Hash table for lookups */
+};
+
+/* There is one instance of this structure for every data element
+** in an associative array of type "x4".
+*/
+typedef struct s_x4node {
+ struct config *data; /* The data */
+ struct s_x4node *next; /* Next entry with the same hash */
+ struct s_x4node **from; /* Previous link */
+} x4node;
+
+/* There is only one instance of the array, which is the following */
+static struct s_x4 *x4a;
+
+/* Allocate a new associative array */
+void Configtable_init(){
+ if( x4a ) return;
+ x4a = (struct s_x4*)malloc( sizeof(struct s_x4) );
+ if( x4a ){
+ x4a->size = 64;
+ x4a->count = 0;
+ x4a->tbl = (x4node*)malloc(
+ (sizeof(x4node) + sizeof(x4node*))*64 );
+ if( x4a->tbl==0 ){
+ free(x4a);
+ x4a = 0;
+ }else{
+ int i;
+ x4a->ht = (x4node**)&(x4a->tbl[64]);
+ for(i=0; i<64; i++) x4a->ht[i] = 0;
+ }
+ }
+}
+/* Insert a new record into the array. Return TRUE if successful.
+** Prior data with the same key is NOT overwritten */
+int Configtable_insert(data)
+struct config *data;
+{
+ x4node *np;
+ int h;
+ int ph;
+
+ if( x4a==0 ) return 0;
+ ph = confighash(data);
+ h = ph & (x4a->size-1);
+ np = x4a->ht[h];
+ while( np ){
+ if( Configcmp(np->data,data)==0 ){
+ /* An existing entry with the same key is found. */
+ /* Fail because overwrite is not allows. */
+ return 0;
+ }
+ np = np->next;
+ }
+ if( x4a->count>=x4a->size ){
+ /* Need to make the hash table bigger */
+ int i,size;
+ struct s_x4 array;
+ array.size = size = x4a->size*2;
+ array.count = x4a->count;
+ array.tbl = (x4node*)malloc(
+ (sizeof(x4node) + sizeof(x4node*))*size );
+ if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
+ array.ht = (x4node**)&(array.tbl[size]);
+ for(i=0; i<size; i++) array.ht[i] = 0;
+ for(i=0; i<x4a->count; i++){
+ x4node *oldnp, *newnp;
+ oldnp = &(x4a->tbl[i]);
+ h = confighash(oldnp->data) & (size-1);
+ newnp = &(array.tbl[i]);
+ if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
+ newnp->next = array.ht[h];
+ newnp->data = oldnp->data;
+ newnp->from = &(array.ht[h]);
+ array.ht[h] = newnp;
+ }
+ free(x4a->tbl);
+ *x4a = array;
+ }
+ /* Insert the new data */
+ h = ph & (x4a->size-1);
+ np = &(x4a->tbl[x4a->count++]);
+ np->data = data;
+ if( x4a->ht[h] ) x4a->ht[h]->from = &(np->next);
+ np->next = x4a->ht[h];
+ x4a->ht[h] = np;
+ np->from = &(x4a->ht[h]);
+ return 1;
+}
+
+/* Return a pointer to data assigned to the given key. Return NULL
+** if no such key. */
+struct config *Configtable_find(key)
+struct config *key;
+{
+ int h;
+ x4node *np;
+
+ if( x4a==0 ) return 0;
+ h = confighash(key) & (x4a->size-1);
+ np = x4a->ht[h];
+ while( np ){
+ if( Configcmp(np->data,key)==0 ) break;
+ np = np->next;
+ }
+ return np ? np->data : 0;
+}
+
+/* Remove all data from the table. Pass each data to the function "f"
+** as it is removed. ("f" may be null to avoid this step.) */
+void Configtable_clear(f)
+int(*f)(/* struct config * */);
+{
+ int i;
+ if( x4a==0 || x4a->count==0 ) return;
+ if( f ) for(i=0; i<x4a->count; i++) (*f)(x4a->tbl[i].data);
+ for(i=0; i<x4a->size; i++) x4a->ht[i] = 0;
+ x4a->count = 0;
+ return;
+}
diff --git a/third_party/sqlite/tool/lempar.c b/third_party/sqlite/tool/lempar.c
new file mode 100755
index 0000000..f5fafd4
--- /dev/null
+++ b/third_party/sqlite/tool/lempar.c
@@ -0,0 +1,813 @@
+/* Driver template for the LEMON parser generator.
+** The author disclaims copyright to this source code.
+*/
+/* First off, code is included that follows the "include" declaration
+** in the input grammar file. */
+#include <stdio.h>
+%%
+/* Next is all token values, in a form suitable for use by makeheaders.
+** This section will be null unless lemon is run with the -m switch.
+*/
+/*
+** These constants (all generated automatically by the parser generator)
+** specify the various kinds of tokens (terminals) that the parser
+** understands.
+**
+** Each symbol here is a terminal symbol in the grammar.
+*/
+%%
+/* Make sure the INTERFACE macro is defined.
+*/
+#ifndef INTERFACE
+# define INTERFACE 1
+#endif
+/* The next thing included is series of defines which control
+** various aspects of the generated parser.
+** YYCODETYPE is the data type used for storing terminal
+** and nonterminal numbers. "unsigned char" is
+** used if there are fewer than 250 terminals
+** and nonterminals. "int" is used otherwise.
+** YYNOCODE is a number of type YYCODETYPE which corresponds
+** to no legal terminal or nonterminal number. This
+** number is used to fill in empty slots of the hash
+** table.
+** YYFALLBACK If defined, this indicates that one or more tokens
+** have fall-back values which should be used if the
+** original value of the token will not parse.
+** YYACTIONTYPE is the data type used for storing terminal
+** and nonterminal numbers. "unsigned char" is
+** used if there are fewer than 250 rules and
+** states combined. "int" is used otherwise.
+** ParseTOKENTYPE is the data type used for minor tokens given
+** directly to the parser from the tokenizer.
+** YYMINORTYPE is the data type used for all minor tokens.
+** This is typically a union of many types, one of
+** which is ParseTOKENTYPE. The entry in the union
+** for base tokens is called "yy0".
+** YYSTACKDEPTH is the maximum depth of the parser's stack. If
+** zero the stack is dynamically sized using realloc()
+** ParseARG_SDECL A static variable declaration for the %extra_argument
+** ParseARG_PDECL A parameter declaration for the %extra_argument
+** ParseARG_STORE Code to store %extra_argument into yypParser
+** ParseARG_FETCH Code to extract %extra_argument from yypParser
+** YYNSTATE the combined number of states.
+** YYNRULE the number of rules in the grammar
+** YYERRORSYMBOL is the code number of the error symbol. If not
+** defined, then do no error processing.
+*/
+%%
+#define YY_NO_ACTION (YYNSTATE+YYNRULE+2)
+#define YY_ACCEPT_ACTION (YYNSTATE+YYNRULE+1)
+#define YY_ERROR_ACTION (YYNSTATE+YYNRULE)
+
+/* The yyzerominor constant is used to initialize instances of
+** YYMINORTYPE objects to zero. */
+static const YYMINORTYPE yyzerominor;
+
+/* Next are the tables used to determine what action to take based on the
+** current state and lookahead token. These tables are used to implement
+** functions that take a state number and lookahead value and return an
+** action integer.
+**
+** Suppose the action integer is N. Then the action is determined as
+** follows
+**
+** 0 <= N < YYNSTATE Shift N. That is, push the lookahead
+** token onto the stack and goto state N.
+**
+** YYNSTATE <= N < YYNSTATE+YYNRULE Reduce by rule N-YYNSTATE.
+**
+** N == YYNSTATE+YYNRULE A syntax error has occurred.
+**
+** N == YYNSTATE+YYNRULE+1 The parser accepts its input.
+**
+** N == YYNSTATE+YYNRULE+2 No such action. Denotes unused
+** slots in the yy_action[] table.
+**
+** The action table is constructed as a single large table named yy_action[].
+** Given state S and lookahead X, the action is computed as
+**
+** yy_action[ yy_shift_ofst[S] + X ]
+**
+** If the index value yy_shift_ofst[S]+X is out of range or if the value
+** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X or if yy_shift_ofst[S]
+** is equal to YY_SHIFT_USE_DFLT, it means that the action is not in the table
+** and that yy_default[S] should be used instead.
+**
+** The formula above is for computing the action when the lookahead is
+** a terminal symbol. If the lookahead is a non-terminal (as occurs after
+** a reduce action) then the yy_reduce_ofst[] array is used in place of
+** the yy_shift_ofst[] array and YY_REDUCE_USE_DFLT is used in place of
+** YY_SHIFT_USE_DFLT.
+**
+** The following are the tables generated in this section:
+**
+** yy_action[] A single table containing all actions.
+** yy_lookahead[] A table containing the lookahead for each entry in
+** yy_action. Used to detect hash collisions.
+** yy_shift_ofst[] For each state, the offset into yy_action for
+** shifting terminals.
+** yy_reduce_ofst[] For each state, the offset into yy_action for
+** shifting non-terminals after a reduce.
+** yy_default[] Default action for each state.
+*/
+%%
+#define YY_SZ_ACTTAB (int)(sizeof(yy_action)/sizeof(yy_action[0]))
+
+/* The next table maps tokens into fallback tokens. If a construct
+** like the following:
+**
+** %fallback ID X Y Z.
+**
+** appears in the grammar, then ID becomes a fallback token for X, Y,
+** and Z. Whenever one of the tokens X, Y, or Z is input to the parser
+** but it does not parse, the type of the token is changed to ID and
+** the parse is retried before an error is thrown.
+*/
+#ifdef YYFALLBACK
+static const YYCODETYPE yyFallback[] = {
+%%
+};
+#endif /* YYFALLBACK */
+
+/* The following structure represents a single element of the
+** parser's stack. Information stored includes:
+**
+** + The state number for the parser at this level of the stack.
+**
+** + The value of the token stored at this level of the stack.
+** (In other words, the "major" token.)
+**
+** + The semantic value stored at this level of the stack. This is
+** the information used by the action routines in the grammar.
+** It is sometimes called the "minor" token.
+*/
+struct yyStackEntry {
+ YYACTIONTYPE stateno; /* The state-number */
+ YYCODETYPE major; /* The major token value. This is the code
+ ** number for the token at this stack level */
+ YYMINORTYPE minor; /* The user-supplied minor token value. This
+ ** is the value of the token */
+};
+typedef struct yyStackEntry yyStackEntry;
+
+/* The state of the parser is completely contained in an instance of
+** the following structure */
+struct yyParser {
+ int yyidx; /* Index of top element in stack */
+#ifdef YYTRACKMAXSTACKDEPTH
+ int yyidxMax; /* Maximum value of yyidx */
+#endif
+ int yyerrcnt; /* Shifts left before out of the error */
+ ParseARG_SDECL /* A place to hold %extra_argument */
+#if YYSTACKDEPTH<=0
+ int yystksz; /* Current side of the stack */
+ yyStackEntry *yystack; /* The parser's stack */
+#else
+ yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */
+#endif
+};
+typedef struct yyParser yyParser;
+
+#ifndef NDEBUG
+#include <stdio.h>
+static FILE *yyTraceFILE = 0;
+static char *yyTracePrompt = 0;
+#endif /* NDEBUG */
+
+#ifndef NDEBUG
+/*
+** Turn parser tracing on by giving a stream to which to write the trace
+** and a prompt to preface each trace message. Tracing is turned off
+** by making either argument NULL
+**
+** Inputs:
+** <ul>
+** <li> A FILE* to which trace output should be written.
+** If NULL, then tracing is turned off.
+** <li> A prefix string written at the beginning of every
+** line of trace output. If NULL, then tracing is
+** turned off.
+** </ul>
+**
+** Outputs:
+** None.
+*/
+void ParseTrace(FILE *TraceFILE, char *zTracePrompt){
+ yyTraceFILE = TraceFILE;
+ yyTracePrompt = zTracePrompt;
+ if( yyTraceFILE==0 ) yyTracePrompt = 0;
+ else if( yyTracePrompt==0 ) yyTraceFILE = 0;
+}
+#endif /* NDEBUG */
+
+#ifndef NDEBUG
+/* For tracing shifts, the names of all terminals and nonterminals
+** are required. The following table supplies these names */
+static const char *const yyTokenName[] = {
+%%
+};
+#endif /* NDEBUG */
+
+#ifndef NDEBUG
+/* For tracing reduce actions, the names of all rules are required.
+*/
+static const char *const yyRuleName[] = {
+%%
+};
+#endif /* NDEBUG */
+
+
+#if YYSTACKDEPTH<=0
+/*
+** Try to increase the size of the parser stack.
+*/
+static void yyGrowStack(yyParser *p){
+ int newSize;
+ yyStackEntry *pNew;
+
+ newSize = p->yystksz*2 + 100;
+ pNew = realloc(p->yystack, newSize*sizeof(pNew[0]));
+ if( pNew ){
+ p->yystack = pNew;
+ p->yystksz = newSize;
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sStack grows to %d entries!\n",
+ yyTracePrompt, p->yystksz);
+ }
+#endif
+ }
+}
+#endif
+
+/*
+** This function allocates a new parser.
+** The only argument is a pointer to a function which works like
+** malloc.
+**
+** Inputs:
+** A pointer to the function used to allocate memory.
+**
+** Outputs:
+** A pointer to a parser. This pointer is used in subsequent calls
+** to Parse and ParseFree.
+*/
+void *ParseAlloc(void *(*mallocProc)(size_t)){
+ yyParser *pParser;
+ pParser = (yyParser*)(*mallocProc)( (size_t)sizeof(yyParser) );
+ if( pParser ){
+ pParser->yyidx = -1;
+#ifdef YYTRACKMAXSTACKDEPTH
+ pParser->yyidxMax = 0;
+#endif
+#if YYSTACKDEPTH<=0
+ yyGrowStack(pParser);
+#endif
+ }
+ return pParser;
+}
+
+/* The following function deletes the value associated with a
+** symbol. The symbol can be either a terminal or nonterminal.
+** "yymajor" is the symbol code, and "yypminor" is a pointer to
+** the value.
+*/
+static void yy_destructor(
+ yyParser *yypParser, /* The parser */
+ YYCODETYPE yymajor, /* Type code for object to destroy */
+ YYMINORTYPE *yypminor /* The object to be destroyed */
+){
+ ParseARG_FETCH;
+ switch( yymajor ){
+ /* Here is inserted the actions which take place when a
+ ** terminal or non-terminal is destroyed. This can happen
+ ** when the symbol is popped from the stack during a
+ ** reduce or during error processing or when a parser is
+ ** being destroyed before it is finished parsing.
+ **
+ ** Note: during a reduce, the only symbols destroyed are those
+ ** which appear on the RHS of the rule, but which are not used
+ ** inside the C code.
+ */
+%%
+ default: break; /* If no destructor action specified: do nothing */
+ }
+}
+
+/*
+** Pop the parser's stack once.
+**
+** If there is a destructor routine associated with the token which
+** is popped from the stack, then call it.
+**
+** Return the major token number for the symbol popped.
+*/
+static int yy_pop_parser_stack(yyParser *pParser){
+ YYCODETYPE yymajor;
+ yyStackEntry *yytos = &pParser->yystack[pParser->yyidx];
+
+ if( pParser->yyidx<0 ) return 0;
+#ifndef NDEBUG
+ if( yyTraceFILE && pParser->yyidx>=0 ){
+ fprintf(yyTraceFILE,"%sPopping %s\n",
+ yyTracePrompt,
+ yyTokenName[yytos->major]);
+ }
+#endif
+ yymajor = yytos->major;
+ yy_destructor(pParser, yymajor, &yytos->minor);
+ pParser->yyidx--;
+ return yymajor;
+}
+
+/*
+** Deallocate and destroy a parser. Destructors are all called for
+** all stack elements before shutting the parser down.
+**
+** Inputs:
+** <ul>
+** <li> A pointer to the parser. This should be a pointer
+** obtained from ParseAlloc.
+** <li> A pointer to a function used to reclaim memory obtained
+** from malloc.
+** </ul>
+*/
+void ParseFree(
+ void *p, /* The parser to be deleted */
+ void (*freeProc)(void*) /* Function used to reclaim memory */
+){
+ yyParser *pParser = (yyParser*)p;
+ if( pParser==0 ) return;
+ while( pParser->yyidx>=0 ) yy_pop_parser_stack(pParser);
+#if YYSTACKDEPTH<=0
+ free(pParser->yystack);
+#endif
+ (*freeProc)((void*)pParser);
+}
+
+/*
+** Return the peak depth of the stack for a parser.
+*/
+#ifdef YYTRACKMAXSTACKDEPTH
+int ParseStackPeak(void *p){
+ yyParser *pParser = (yyParser*)p;
+ return pParser->yyidxMax;
+}
+#endif
+
+/*
+** Find the appropriate action for a parser given the terminal
+** look-ahead token iLookAhead.
+**
+** If the look-ahead token is YYNOCODE, then check to see if the action is
+** independent of the look-ahead. If it is, return the action, otherwise
+** return YY_NO_ACTION.
+*/
+static int yy_find_shift_action(
+ yyParser *pParser, /* The parser */
+ YYCODETYPE iLookAhead /* The look-ahead token */
+){
+ int i;
+ int stateno = pParser->yystack[pParser->yyidx].stateno;
+
+ if( stateno>YY_SHIFT_MAX || (i = yy_shift_ofst[stateno])==YY_SHIFT_USE_DFLT ){
+ return yy_default[stateno];
+ }
+ assert( iLookAhead!=YYNOCODE );
+ i += iLookAhead;
+ if( i<0 || i>=YY_SZ_ACTTAB || yy_lookahead[i]!=iLookAhead ){
+ if( iLookAhead>0 ){
+#ifdef YYFALLBACK
+ int iFallback; /* Fallback token */
+ if( iLookAhead<sizeof(yyFallback)/sizeof(yyFallback[0])
+ && (iFallback = yyFallback[iLookAhead])!=0 ){
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE, "%sFALLBACK %s => %s\n",
+ yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]);
+ }
+#endif
+ return yy_find_shift_action(pParser, iFallback);
+ }
+#endif
+#ifdef YYWILDCARD
+ {
+ int j = i - iLookAhead + YYWILDCARD;
+ if( j>=0 && j<YY_SZ_ACTTAB && yy_lookahead[j]==YYWILDCARD ){
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n",
+ yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[YYWILDCARD]);
+ }
+#endif /* NDEBUG */
+ return yy_action[j];
+ }
+ }
+#endif /* YYWILDCARD */
+ }
+ return yy_default[stateno];
+ }else{
+ return yy_action[i];
+ }
+}
+
+/*
+** Find the appropriate action for a parser given the non-terminal
+** look-ahead token iLookAhead.
+**
+** If the look-ahead token is YYNOCODE, then check to see if the action is
+** independent of the look-ahead. If it is, return the action, otherwise
+** return YY_NO_ACTION.
+*/
+static int yy_find_reduce_action(
+ int stateno, /* Current state number */
+ YYCODETYPE iLookAhead /* The look-ahead token */
+){
+ int i;
+#ifdef YYERRORSYMBOL
+ if( stateno>YY_REDUCE_MAX ){
+ return yy_default[stateno];
+ }
+#else
+ assert( stateno<=YY_REDUCE_MAX );
+#endif
+ i = yy_reduce_ofst[stateno];
+ assert( i!=YY_REDUCE_USE_DFLT );
+ assert( iLookAhead!=YYNOCODE );
+ i += iLookAhead;
+#ifdef YYERRORSYMBOL
+ if( i<0 || i>=YY_SZ_ACTTAB || yy_lookahead[i]!=iLookAhead ){
+ return yy_default[stateno];
+ }
+#else
+ assert( i>=0 && i<YY_SZ_ACTTAB );
+ assert( yy_lookahead[i]==iLookAhead );
+#endif
+ return yy_action[i];
+}
+
+/*
+** The following routine is called if the stack overflows.
+*/
+static void yyStackOverflow(yyParser *yypParser, YYMINORTYPE *yypMinor){
+ ParseARG_FETCH;
+ yypParser->yyidx--;
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sStack Overflow!\n",yyTracePrompt);
+ }
+#endif
+ while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser);
+ /* Here code is inserted which will execute if the parser
+ ** stack every overflows */
+%%
+ ParseARG_STORE; /* Suppress warning about unused %extra_argument var */
+}
+
+/*
+** Perform a shift action.
+*/
+static void yy_shift(
+ yyParser *yypParser, /* The parser to be shifted */
+ int yyNewState, /* The new state to shift in */
+ int yyMajor, /* The major token to shift in */
+ YYMINORTYPE *yypMinor /* Pointer to the minor token to shift in */
+){
+ yyStackEntry *yytos;
+ yypParser->yyidx++;
+#ifdef YYTRACKMAXSTACKDEPTH
+ if( yypParser->yyidx>yypParser->yyidxMax ){
+ yypParser->yyidxMax = yypParser->yyidx;
+ }
+#endif
+#if YYSTACKDEPTH>0
+ if( yypParser->yyidx>=YYSTACKDEPTH ){
+ yyStackOverflow(yypParser, yypMinor);
+ return;
+ }
+#else
+ if( yypParser->yyidx>=yypParser->yystksz ){
+ yyGrowStack(yypParser);
+ if( yypParser->yyidx>=yypParser->yystksz ){
+ yyStackOverflow(yypParser, yypMinor);
+ return;
+ }
+ }
+#endif
+ yytos = &yypParser->yystack[yypParser->yyidx];
+ yytos->stateno = yyNewState;
+ yytos->major = yyMajor;
+ yytos->minor = *yypMinor;
+#ifndef NDEBUG
+ if( yyTraceFILE && yypParser->yyidx>0 ){
+ int i;
+ fprintf(yyTraceFILE,"%sShift %d\n",yyTracePrompt,yyNewState);
+ fprintf(yyTraceFILE,"%sStack:",yyTracePrompt);
+ for(i=1; i<=yypParser->yyidx; i++)
+ fprintf(yyTraceFILE," %s",yyTokenName[yypParser->yystack[i].major]);
+ fprintf(yyTraceFILE,"\n");
+ }
+#endif
+}
+
+/* The following table contains information about every rule that
+** is used during the reduce.
+*/
+static const struct {
+ YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */
+ unsigned char nrhs; /* Number of right-hand side symbols in the rule */
+} yyRuleInfo[] = {
+%%
+};
+
+static void yy_accept(yyParser*); /* Forward Declaration */
+
+/*
+** Perform a reduce action and the shift that must immediately
+** follow the reduce.
+*/
+static void yy_reduce(
+ yyParser *yypParser, /* The parser */
+ int yyruleno /* Number of the rule by which to reduce */
+){
+ int yygoto; /* The next state */
+ int yyact; /* The next action */
+ YYMINORTYPE yygotominor; /* The LHS of the rule reduced */
+ yyStackEntry *yymsp; /* The top of the parser's stack */
+ int yysize; /* Amount to pop the stack */
+ ParseARG_FETCH;
+ yymsp = &yypParser->yystack[yypParser->yyidx];
+#ifndef NDEBUG
+ if( yyTraceFILE && yyruleno>=0
+ && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){
+ fprintf(yyTraceFILE, "%sReduce [%s].\n", yyTracePrompt,
+ yyRuleName[yyruleno]);
+ }
+#endif /* NDEBUG */
+
+ /* Silence complaints from purify about yygotominor being uninitialized
+ ** in some cases when it is copied into the stack after the following
+ ** switch. yygotominor is uninitialized when a rule reduces that does
+ ** not set the value of its left-hand side nonterminal. Leaving the
+ ** value of the nonterminal uninitialized is utterly harmless as long
+ ** as the value is never used. So really the only thing this code
+ ** accomplishes is to quieten purify.
+ **
+ ** 2007-01-16: The wireshark project (www.wireshark.org) reports that
+ ** without this code, their parser segfaults. I'm not sure what there
+ ** parser is doing to make this happen. This is the second bug report
+ ** from wireshark this week. Clearly they are stressing Lemon in ways
+ ** that it has not been previously stressed... (SQLite ticket #2172)
+ */
+ /*memset(&yygotominor, 0, sizeof(yygotominor));*/
+ yygotominor = yyzerominor;
+
+
+ switch( yyruleno ){
+ /* Beginning here are the reduction cases. A typical example
+ ** follows:
+ ** case 0:
+ ** #line <lineno> <grammarfile>
+ ** { ... } // User supplied code
+ ** #line <lineno> <thisfile>
+ ** break;
+ */
+%%
+ };
+ yygoto = yyRuleInfo[yyruleno].lhs;
+ yysize = yyRuleInfo[yyruleno].nrhs;
+ yypParser->yyidx -= yysize;
+ yyact = yy_find_reduce_action(yymsp[-yysize].stateno,yygoto);
+ if( yyact < YYNSTATE ){
+#ifdef NDEBUG
+ /* If we are not debugging and the reduce action popped at least
+ ** one element off the stack, then we can push the new element back
+ ** onto the stack here, and skip the stack overflow test in yy_shift().
+ ** That gives a significant speed improvement. */
+ if( yysize ){
+ yypParser->yyidx++;
+ yymsp -= yysize-1;
+ yymsp->stateno = yyact;
+ yymsp->major = yygoto;
+ yymsp->minor = yygotominor;
+ }else
+#endif
+ {
+ yy_shift(yypParser,yyact,yygoto,&yygotominor);
+ }
+ }else{
+ assert( yyact == YYNSTATE + YYNRULE + 1 );
+ yy_accept(yypParser);
+ }
+}
+
+/*
+** The following code executes when the parse fails
+*/
+static void yy_parse_failed(
+ yyParser *yypParser /* The parser */
+){
+ ParseARG_FETCH;
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt);
+ }
+#endif
+ while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser);
+ /* Here code is inserted which will be executed whenever the
+ ** parser fails */
+%%
+ ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */
+}
+
+/*
+** The following code executes when a syntax error first occurs.
+*/
+static void yy_syntax_error(
+ yyParser *yypParser, /* The parser */
+ int yymajor, /* The major type of the error token */
+ YYMINORTYPE yyminor /* The minor type of the error token */
+){
+ ParseARG_FETCH;
+#define TOKEN (yyminor.yy0)
+%%
+ ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */
+}
+
+/*
+** The following is executed when the parser accepts
+*/
+static void yy_accept(
+ yyParser *yypParser /* The parser */
+){
+ ParseARG_FETCH;
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt);
+ }
+#endif
+ while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser);
+ /* Here code is inserted which will be executed whenever the
+ ** parser accepts */
+%%
+ ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */
+}
+
+/* The main parser program.
+** The first argument is a pointer to a structure obtained from
+** "ParseAlloc" which describes the current state of the parser.
+** The second argument is the major token number. The third is
+** the minor token. The fourth optional argument is whatever the
+** user wants (and specified in the grammar) and is available for
+** use by the action routines.
+**
+** Inputs:
+** <ul>
+** <li> A pointer to the parser (an opaque structure.)
+** <li> The major token number.
+** <li> The minor token number.
+** <li> An option argument of a grammar-specified type.
+** </ul>
+**
+** Outputs:
+** None.
+*/
+void Parse(
+ void *yyp, /* The parser */
+ int yymajor, /* The major token code number */
+ ParseTOKENTYPE yyminor /* The value for the token */
+ ParseARG_PDECL /* Optional %extra_argument parameter */
+){
+ YYMINORTYPE yyminorunion;
+ int yyact; /* The parser action. */
+ int yyendofinput; /* True if we are at the end of input */
+#ifdef YYERRORSYMBOL
+ int yyerrorhit = 0; /* True if yymajor has invoked an error */
+#endif
+ yyParser *yypParser; /* The parser */
+
+ /* (re)initialize the parser, if necessary */
+ yypParser = (yyParser*)yyp;
+ if( yypParser->yyidx<0 ){
+#if YYSTACKDEPTH<=0
+ if( yypParser->yystksz <=0 ){
+ /*memset(&yyminorunion, 0, sizeof(yyminorunion));*/
+ yyminorunion = yyzerominor;
+ yyStackOverflow(yypParser, &yyminorunion);
+ return;
+ }
+#endif
+ yypParser->yyidx = 0;
+ yypParser->yyerrcnt = -1;
+ yypParser->yystack[0].stateno = 0;
+ yypParser->yystack[0].major = 0;
+ }
+ yyminorunion.yy0 = yyminor;
+ yyendofinput = (yymajor==0);
+ ParseARG_STORE;
+
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sInput %s\n",yyTracePrompt,yyTokenName[yymajor]);
+ }
+#endif
+
+ do{
+ yyact = yy_find_shift_action(yypParser,yymajor);
+ if( yyact<YYNSTATE ){
+ assert( !yyendofinput ); /* Impossible to shift the $ token */
+ yy_shift(yypParser,yyact,yymajor,&yyminorunion);
+ yypParser->yyerrcnt--;
+ yymajor = YYNOCODE;
+ }else if( yyact < YYNSTATE + YYNRULE ){
+ yy_reduce(yypParser,yyact-YYNSTATE);
+ }else{
+ assert( yyact == YY_ERROR_ACTION );
+#ifdef YYERRORSYMBOL
+ int yymx;
+#endif
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sSyntax Error!\n",yyTracePrompt);
+ }
+#endif
+#ifdef YYERRORSYMBOL
+ /* A syntax error has occurred.
+ ** The response to an error depends upon whether or not the
+ ** grammar defines an error token "ERROR".
+ **
+ ** This is what we do if the grammar does define ERROR:
+ **
+ ** * Call the %syntax_error function.
+ **
+ ** * Begin popping the stack until we enter a state where
+ ** it is legal to shift the error symbol, then shift
+ ** the error symbol.
+ **
+ ** * Set the error count to three.
+ **
+ ** * Begin accepting and shifting new tokens. No new error
+ ** processing will occur until three tokens have been
+ ** shifted successfully.
+ **
+ */
+ if( yypParser->yyerrcnt<0 ){
+ yy_syntax_error(yypParser,yymajor,yyminorunion);
+ }
+ yymx = yypParser->yystack[yypParser->yyidx].major;
+ if( yymx==YYERRORSYMBOL || yyerrorhit ){
+#ifndef NDEBUG
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE,"%sDiscard input token %s\n",
+ yyTracePrompt,yyTokenName[yymajor]);
+ }
+#endif
+ yy_destructor(yypParser, yymajor,&yyminorunion);
+ yymajor = YYNOCODE;
+ }else{
+ while(
+ yypParser->yyidx >= 0 &&
+ yymx != YYERRORSYMBOL &&
+ (yyact = yy_find_reduce_action(
+ yypParser->yystack[yypParser->yyidx].stateno,
+ YYERRORSYMBOL)) >= YYNSTATE
+ ){
+ yy_pop_parser_stack(yypParser);
+ }
+ if( yypParser->yyidx < 0 || yymajor==0 ){
+ yy_destructor(yypParser,yymajor,&yyminorunion);
+ yy_parse_failed(yypParser);
+ yymajor = YYNOCODE;
+ }else if( yymx!=YYERRORSYMBOL ){
+ YYMINORTYPE u2;
+ u2.YYERRSYMDT = 0;
+ yy_shift(yypParser,yyact,YYERRORSYMBOL,&u2);
+ }
+ }
+ yypParser->yyerrcnt = 3;
+ yyerrorhit = 1;
+#else /* YYERRORSYMBOL is not defined */
+ /* This is what we do if the grammar does not define ERROR:
+ **
+ ** * Report an error message, and throw away the input token.
+ **
+ ** * If the input token is $, then fail the parse.
+ **
+ ** As before, subsequent error messages are suppressed until
+ ** three input tokens have been successfully shifted.
+ */
+ if( yypParser->yyerrcnt<=0 ){
+ yy_syntax_error(yypParser,yymajor,yyminorunion);
+ }
+ yypParser->yyerrcnt = 3;
+ yy_destructor(yypParser,yymajor,&yyminorunion);
+ if( yyendofinput ){
+ yy_parse_failed(yypParser);
+ }
+ yymajor = YYNOCODE;
+#endif
+ }
+ }while( yymajor!=YYNOCODE && yypParser->yyidx>=0 );
+ return;
+}
diff --git a/third_party/sqlite/tool/memleak.awk b/third_party/sqlite/tool/memleak.awk
new file mode 100755
index 0000000..928d3b6
--- /dev/null
+++ b/third_party/sqlite/tool/memleak.awk
@@ -0,0 +1,29 @@
+#
+# This script looks for memory leaks by analyzing the output of "sqlite"
+# when compiled with the SQLITE_DEBUG=2 option.
+#
+/[0-9]+ malloc / {
+ mem[$6] = $0
+}
+/[0-9]+ realloc / {
+ mem[$8] = "";
+ mem[$10] = $0
+}
+/[0-9]+ free / {
+ if (mem[$6]=="") {
+ print "*** free without a malloc at",$6
+ }
+ mem[$6] = "";
+ str[$6] = ""
+}
+/^string at / {
+ addr = $4
+ sub("string at " addr " is ","")
+ str[addr] = $0
+}
+END {
+ for(addr in mem){
+ if( mem[addr]=="" ) continue
+ print mem[addr], str[addr]
+ }
+}
diff --git a/third_party/sqlite/tool/memleak2.awk b/third_party/sqlite/tool/memleak2.awk
new file mode 100755
index 0000000..5d81b70
--- /dev/null
+++ b/third_party/sqlite/tool/memleak2.awk
@@ -0,0 +1,29 @@
+# This AWK script reads the output of testfixture when compiled for memory
+# debugging. It generates SQL commands that can be fed into an sqlite
+# instance to determine what memory is never freed. A typical usage would
+# be as follows:
+#
+# make -f memleak.mk fulltest 2>mem.out
+# awk -f ../sqlite/tool/memleak2.awk mem.out | ./sqlite :memory:
+#
+# The job performed by this script is the same as that done by memleak.awk.
+# The difference is that this script uses much less memory when the size
+# of the mem.out file is huge.
+#
+BEGIN {
+ print "CREATE TABLE mem(loc INTEGER PRIMARY KEY, src);"
+}
+/[0-9]+ malloc / {
+ print "INSERT INTO mem VALUES(" strtonum($6) ",'" $0 "');"
+}
+/[0-9]+ realloc / {
+ print "INSERT INTO mem VALUES(" strtonum($10) \
+ ",(SELECT src FROM mem WHERE loc=" strtonum($8) "));"
+ print "DELETE FROM mem WHERE loc=" strtonum($8) ";"
+}
+/[0-9]+ free / {
+ print "DELETE FROM mem WHERE loc=" strtonum($6) ";"
+}
+END {
+ print "SELECT src FROM mem;"
+}
diff --git a/third_party/sqlite/tool/memleak3.tcl b/third_party/sqlite/tool/memleak3.tcl
new file mode 100755
index 0000000..3c6e9b9
--- /dev/null
+++ b/third_party/sqlite/tool/memleak3.tcl
@@ -0,0 +1,233 @@
+#/bin/sh
+# \
+exec `which tclsh` $0 "$@"
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+######################################################################
+
+set doco "
+This script is a tool to help track down memory leaks in the sqlite
+library. The library must be compiled with the preprocessor symbol
+SQLITE_MEMDEBUG set to at least 2. It must be set to 3 to enable stack
+traces.
+
+To use, run the leaky application and save the standard error output.
+Then, execute this program with the first argument the name of the
+application binary (or interpreter) and the second argument the name of the
+text file that contains the collected stderr output.
+
+If all goes well a summary of unfreed allocations is printed out. If the
+GNU C library is in use and SQLITE_DEBUG is 3 or greater a stack trace is
+printed out for each unmatched allocation.
+
+If the \"-r <n>\" option is passed, then the program stops and prints out
+the state of the heap immediately after the <n>th call to malloc() or
+realloc().
+
+Example:
+
+$ ./testfixture ../sqlite/test/select1.test 2> memtrace.out
+$ tclsh $argv0 ?-r <malloc-number>? ./testfixture memtrace.out
+"
+
+
+proc usage {} {
+ set prg [file tail $::argv0]
+ puts "Usage: $prg ?-r <malloc-number>? <binary file> <mem trace file>"
+ puts ""
+ puts [string trim $::doco]
+ exit -1
+}
+
+proc shift {listvar} {
+ upvar $listvar l
+ set ret [lindex $l 0]
+ set l [lrange $l 1 end]
+ return $ret
+}
+
+# Argument handling. The following vars are set:
+#
+# $exe - the name of the executable (i.e. "testfixture" or "./sqlite3")
+# $memfile - the name of the file containing the trace output.
+# $report_at - The malloc number to stop and report at. Or -1 to read
+# all of $memfile.
+#
+set report_at -1
+while {[llength $argv]>2} {
+ set arg [shift argv]
+ switch -- $arg {
+ "-r" {
+ set report_at [shift argv]
+ }
+ default {
+ usage
+ }
+ }
+}
+if {[llength $argv]!=2} usage
+set exe [lindex $argv 0]
+set memfile [lindex $argv 1]
+
+# If stack traces are enabled, the 'addr2line' program is called to
+# translate a binary stack address into a human-readable form.
+set addr2line addr2line
+
+# When the SQLITE_MEMDEBUG is set as described above, SQLite prints
+# out a line for each malloc(), realloc() or free() call that the
+# library makes. If SQLITE_MEMDEBUG is 3, then a stack trace is printed
+# out before each malloc() and realloc() line.
+#
+# This program parses each line the SQLite library outputs and updates
+# the following global Tcl variables to reflect the "current" state of
+# the heap used by SQLite.
+#
+set nBytes 0 ;# Total number of bytes currently allocated.
+set nMalloc 0 ;# Total number of malloc()/realloc() calls.
+set nPeak 0 ;# Peak of nBytes.
+set iPeak 0 ;# nMalloc when nPeak was set.
+#
+# More detailed state information is stored in the $memmap array.
+# Each key in the memmap array is the address of a chunk of memory
+# currently allocated from the heap. The value is a list of the
+# following form
+#
+# {<number-of-bytes> <malloc id> <stack trace>}
+#
+array unset memmap
+
+proc process_input {input_file array_name} {
+ upvar $array_name mem
+ set input [open $input_file]
+
+ set MALLOC {([[:digit:]]+) malloc ([[:digit:]]+) bytes at 0x([[:xdigit:]]+)}
+ # set STACK {^[[:digit:]]+: STACK: (.*)$}
+ set STACK {^STACK: (.*)$}
+ set FREE {[[:digit:]]+ free ([[:digit:]]+) bytes at 0x([[:xdigit:]]+)}
+ set REALLOC {([[:digit:]]+) realloc ([[:digit:]]+) to ([[:digit:]]+)}
+ append REALLOC { bytes at 0x([[:xdigit:]]+) to 0x([[:xdigit:]]+)}
+
+ set stack ""
+ while { ![eof $input] } {
+ set line [gets $input]
+ if {[regexp $STACK $line dummy stack]} {
+ # Do nothing. The variable $stack now stores the hexadecimal stack dump
+ # for the next malloc() or realloc().
+
+ } elseif { [regexp $MALLOC $line dummy mallocid bytes addr] } {
+ # If this is a 'malloc' line, set an entry in the mem array. Each entry
+ # is a list of length three, the number of bytes allocated , the malloc
+ # number and the stack dump when it was allocated.
+ set mem($addr) [list $bytes "malloc $mallocid" $stack]
+ set stack ""
+
+ # Increase the current heap usage
+ incr ::nBytes $bytes
+
+ # Increase the number of malloc() calls
+ incr ::nMalloc
+
+ if {$::nBytes > $::nPeak} {
+ set ::nPeak $::nBytes
+ set ::iPeak $::nMalloc
+ }
+
+ } elseif { [regexp $FREE $line dummy bytes addr] } {
+ # If this is a 'free' line, remove the entry from the mem array. If the
+ # entry does not exist, or is the wrong number of bytes, announce a
+ # problem. This is more likely a bug in the regular expressions for
+ # this script than an SQLite defect.
+ if { [lindex $mem($addr) 0] != $bytes } {
+ error "byte count mismatch"
+ }
+ unset mem($addr)
+
+ # Decrease the current heap usage
+ incr ::nBytes [expr -1 * $bytes]
+
+ } elseif { [regexp $REALLOC $line dummy mallocid ob b oa a] } {
+ # "free" the old allocation in the internal model:
+ incr ::nBytes [expr -1 * $ob]
+ unset mem($oa);
+
+ # "malloc" the new allocation
+ set mem($a) [list $b "realloc $mallocid" $stack]
+ incr ::nBytes $b
+ set stack ""
+
+ # Increase the number of malloc() calls
+ incr ::nMalloc
+
+ if {$::nBytes > $::nPeak} {
+ set ::nPeak $::nBytes
+ set ::iPeak $::nMalloc
+ }
+
+ } else {
+ # puts "REJECT: $line"
+ }
+
+ if {$::nMalloc==$::report_at} report
+ }
+
+ close $input
+}
+
+proc printstack {stack} {
+ set fcount 10
+ if {[llength $stack]<10} {
+ set fcount [llength $stack]
+ }
+ foreach frame [lrange $stack 1 $fcount] {
+ foreach {f l} [split [exec $::addr2line -f --exe=$::exe $frame] \n] {}
+ puts [format "%-30s %s" $f $l]
+ }
+ if {[llength $stack]>0 } {puts ""}
+}
+
+proc report {} {
+
+ foreach key [array names ::memmap] {
+ set stack [lindex $::memmap($key) 2]
+ set bytes [lindex $::memmap($key) 0]
+ lappend summarymap($stack) $bytes
+ }
+
+ set sorted [list]
+ foreach stack [array names summarymap] {
+ set allocs $summarymap($stack)
+ set sum 0
+ foreach a $allocs {
+ incr sum $a
+ }
+ lappend sorted [list $sum $stack]
+ }
+
+ set sorted [lsort -integer -index 0 $sorted]
+ foreach s $sorted {
+ set sum [lindex $s 0]
+ set stack [lindex $s 1]
+ set allocs $summarymap($stack)
+ puts "$sum bytes in [llength $allocs] chunks ($allocs)"
+ printstack $stack
+ }
+
+ # Print out summary statistics
+ puts "Total allocations : $::nMalloc"
+ puts "Total outstanding allocations: [array size ::memmap]"
+ puts "Current heap usage : $::nBytes bytes"
+ puts "Peak heap usage : $::nPeak bytes (malloc #$::iPeak)"
+
+ exit
+}
+
+process_input $memfile memmap
+report
+
+
+
diff --git a/third_party/sqlite/tool/mkkeywordhash.c b/third_party/sqlite/tool/mkkeywordhash.c
new file mode 100755
index 0000000..3a34224
--- /dev/null
+++ b/third_party/sqlite/tool/mkkeywordhash.c
@@ -0,0 +1,559 @@
+/*
+** Compile and run this standalone program in order to generate code that
+** implements a function that will translate alphabetic identifiers into
+** parser token codes.
+*/
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+/*
+** A header comment placed at the beginning of generated code.
+*/
+static const char zHdr[] =
+ "/***** This file contains automatically generated code ******\n"
+ "**\n"
+ "** The code in this file has been automatically generated by\n"
+ "**\n"
+ "** $Header: /sqlite/sqlite/tool/mkkeywordhash.c,v 1.31 2007/07/30 18:26:20 rse Exp $\n"
+ "**\n"
+ "** The code in this file implements a function that determines whether\n"
+ "** or not a given identifier is really an SQL keyword. The same thing\n"
+ "** might be implemented more directly using a hand-written hash table.\n"
+ "** But by using this automatically generated code, the size of the code\n"
+ "** is substantially reduced. This is important for embedded applications\n"
+ "** on platforms with limited memory.\n"
+ "*/\n"
+;
+
+/*
+** All the keywords of the SQL language are stored as in a hash
+** table composed of instances of the following structure.
+*/
+typedef struct Keyword Keyword;
+struct Keyword {
+ char *zName; /* The keyword name */
+ char *zTokenType; /* Token value for this keyword */
+ int mask; /* Code this keyword if non-zero */
+ int id; /* Unique ID for this record */
+ int hash; /* Hash on the keyword */
+ int offset; /* Offset to start of name string */
+ int len; /* Length of this keyword, not counting final \000 */
+ int prefix; /* Number of characters in prefix */
+ int longestSuffix; /* Longest suffix that is a prefix on another word */
+ int iNext; /* Index in aKeywordTable[] of next with same hash */
+ int substrId; /* Id to another keyword this keyword is embedded in */
+ int substrOffset; /* Offset into substrId for start of this keyword */
+};
+
+/*
+** Define masks used to determine which keywords are allowed
+*/
+#ifdef SQLITE_OMIT_ALTERTABLE
+# define ALTER 0
+#else
+# define ALTER 0x00000001
+#endif
+#define ALWAYS 0x00000002
+#ifdef SQLITE_OMIT_ANALYZE
+# define ANALYZE 0
+#else
+# define ANALYZE 0x00000004
+#endif
+#ifdef SQLITE_OMIT_ATTACH
+# define ATTACH 0
+#else
+# define ATTACH 0x00000008
+#endif
+#ifdef SQLITE_OMIT_AUTOINCREMENT
+# define AUTOINCR 0
+#else
+# define AUTOINCR 0x00000010
+#endif
+#ifdef SQLITE_OMIT_CAST
+# define CAST 0
+#else
+# define CAST 0x00000020
+#endif
+#ifdef SQLITE_OMIT_COMPOUND_SELECT
+# define COMPOUND 0
+#else
+# define COMPOUND 0x00000040
+#endif
+#ifdef SQLITE_OMIT_CONFLICT_CLAUSE
+# define CONFLICT 0
+#else
+# define CONFLICT 0x00000080
+#endif
+#ifdef SQLITE_OMIT_EXPLAIN
+# define EXPLAIN 0
+#else
+# define EXPLAIN 0x00000100
+#endif
+#ifdef SQLITE_OMIT_FOREIGN_KEY
+# define FKEY 0
+#else
+# define FKEY 0x00000200
+#endif
+#ifdef SQLITE_OMIT_PRAGMA
+# define PRAGMA 0
+#else
+# define PRAGMA 0x00000400
+#endif
+#ifdef SQLITE_OMIT_REINDEX
+# define REINDEX 0
+#else
+# define REINDEX 0x00000800
+#endif
+#ifdef SQLITE_OMIT_SUBQUERY
+# define SUBQUERY 0
+#else
+# define SUBQUERY 0x00001000
+#endif
+#ifdef SQLITE_OMIT_TRIGGER
+# define TRIGGER 0
+#else
+# define TRIGGER 0x00002000
+#endif
+#if defined(SQLITE_OMIT_AUTOVACUUM) && \
+ (defined(SQLITE_OMIT_VACUUM) || defined(SQLITE_OMIT_ATTACH))
+# define VACUUM 0
+#else
+# define VACUUM 0x00004000
+#endif
+#ifdef SQLITE_OMIT_VIEW
+# define VIEW 0
+#else
+# define VIEW 0x00008000
+#endif
+#ifdef SQLITE_OMIT_VIRTUALTABLE
+# define VTAB 0
+#else
+# define VTAB 0x00010000
+#endif
+#ifdef SQLITE_OMIT_AUTOVACUUM
+# define AUTOVACUUM 0
+#else
+# define AUTOVACUUM 0x00020000
+#endif
+
+/*
+** These are the keywords
+*/
+static Keyword aKeywordTable[] = {
+ { "ABORT", "TK_ABORT", CONFLICT|TRIGGER },
+ { "ADD", "TK_ADD", ALTER },
+ { "AFTER", "TK_AFTER", TRIGGER },
+ { "ALL", "TK_ALL", ALWAYS },
+ { "ALTER", "TK_ALTER", ALTER },
+ { "ANALYZE", "TK_ANALYZE", ANALYZE },
+ { "AND", "TK_AND", ALWAYS },
+ { "AS", "TK_AS", ALWAYS },
+ { "ASC", "TK_ASC", ALWAYS },
+ { "ATTACH", "TK_ATTACH", ATTACH },
+ { "AUTOINCREMENT", "TK_AUTOINCR", AUTOINCR },
+ { "BEFORE", "TK_BEFORE", TRIGGER },
+ { "BEGIN", "TK_BEGIN", ALWAYS },
+ { "BETWEEN", "TK_BETWEEN", ALWAYS },
+ { "BY", "TK_BY", ALWAYS },
+ { "CASCADE", "TK_CASCADE", FKEY },
+ { "CASE", "TK_CASE", ALWAYS },
+ { "CAST", "TK_CAST", CAST },
+ { "CHECK", "TK_CHECK", ALWAYS },
+ { "COLLATE", "TK_COLLATE", ALWAYS },
+ { "COLUMN", "TK_COLUMNKW", ALTER },
+ { "COMMIT", "TK_COMMIT", ALWAYS },
+ { "CONFLICT", "TK_CONFLICT", CONFLICT },
+ { "CONSTRAINT", "TK_CONSTRAINT", ALWAYS },
+ { "CREATE", "TK_CREATE", ALWAYS },
+ { "CROSS", "TK_JOIN_KW", ALWAYS },
+ { "CURRENT_DATE", "TK_CTIME_KW", ALWAYS },
+ { "CURRENT_TIME", "TK_CTIME_KW", ALWAYS },
+ { "CURRENT_TIMESTAMP","TK_CTIME_KW", ALWAYS },
+ { "DATABASE", "TK_DATABASE", ATTACH },
+ { "DEFAULT", "TK_DEFAULT", ALWAYS },
+ { "DEFERRED", "TK_DEFERRED", ALWAYS },
+ { "DEFERRABLE", "TK_DEFERRABLE", FKEY },
+ { "DELETE", "TK_DELETE", ALWAYS },
+ { "DESC", "TK_DESC", ALWAYS },
+ { "DETACH", "TK_DETACH", ATTACH },
+ { "DISTINCT", "TK_DISTINCT", ALWAYS },
+ { "DROP", "TK_DROP", ALWAYS },
+ { "END", "TK_END", ALWAYS },
+ { "EACH", "TK_EACH", TRIGGER },
+ { "ELSE", "TK_ELSE", ALWAYS },
+ { "ESCAPE", "TK_ESCAPE", ALWAYS },
+ { "EXCEPT", "TK_EXCEPT", COMPOUND },
+ { "EXCLUSIVE", "TK_EXCLUSIVE", ALWAYS },
+ { "EXISTS", "TK_EXISTS", ALWAYS },
+ { "EXPLAIN", "TK_EXPLAIN", EXPLAIN },
+ { "FAIL", "TK_FAIL", CONFLICT|TRIGGER },
+ { "FOR", "TK_FOR", TRIGGER },
+ { "FOREIGN", "TK_FOREIGN", FKEY },
+ { "FROM", "TK_FROM", ALWAYS },
+ { "FULL", "TK_JOIN_KW", ALWAYS },
+ { "GLOB", "TK_LIKE_KW", ALWAYS },
+ { "GROUP", "TK_GROUP", ALWAYS },
+ { "HAVING", "TK_HAVING", ALWAYS },
+ { "IF", "TK_IF", ALWAYS },
+ { "IGNORE", "TK_IGNORE", CONFLICT|TRIGGER },
+ { "IMMEDIATE", "TK_IMMEDIATE", ALWAYS },
+ { "IN", "TK_IN", ALWAYS },
+ { "INDEX", "TK_INDEX", ALWAYS },
+ { "INITIALLY", "TK_INITIALLY", FKEY },
+ { "INNER", "TK_JOIN_KW", ALWAYS },
+ { "INSERT", "TK_INSERT", ALWAYS },
+ { "INSTEAD", "TK_INSTEAD", TRIGGER },
+ { "INTERSECT", "TK_INTERSECT", COMPOUND },
+ { "INTO", "TK_INTO", ALWAYS },
+ { "IS", "TK_IS", ALWAYS },
+ { "ISNULL", "TK_ISNULL", ALWAYS },
+ { "JOIN", "TK_JOIN", ALWAYS },
+ { "KEY", "TK_KEY", ALWAYS },
+ { "LEFT", "TK_JOIN_KW", ALWAYS },
+ { "LIKE", "TK_LIKE_KW", ALWAYS },
+ { "LIMIT", "TK_LIMIT", ALWAYS },
+ { "MATCH", "TK_MATCH", ALWAYS },
+ { "NATURAL", "TK_JOIN_KW", ALWAYS },
+ { "NOT", "TK_NOT", ALWAYS },
+ { "NOTNULL", "TK_NOTNULL", ALWAYS },
+ { "NULL", "TK_NULL", ALWAYS },
+ { "OF", "TK_OF", ALWAYS },
+ { "OFFSET", "TK_OFFSET", ALWAYS },
+ { "ON", "TK_ON", ALWAYS },
+ { "OR", "TK_OR", ALWAYS },
+ { "ORDER", "TK_ORDER", ALWAYS },
+ { "OUTER", "TK_JOIN_KW", ALWAYS },
+ { "PLAN", "TK_PLAN", EXPLAIN },
+ { "PRAGMA", "TK_PRAGMA", PRAGMA },
+ { "PRIMARY", "TK_PRIMARY", ALWAYS },
+ { "QUERY", "TK_QUERY", EXPLAIN },
+ { "RAISE", "TK_RAISE", TRIGGER },
+ { "REFERENCES", "TK_REFERENCES", FKEY },
+ { "REGEXP", "TK_LIKE_KW", ALWAYS },
+ { "REINDEX", "TK_REINDEX", REINDEX },
+ { "RENAME", "TK_RENAME", ALTER },
+ { "REPLACE", "TK_REPLACE", CONFLICT },
+ { "RESTRICT", "TK_RESTRICT", FKEY },
+ { "RIGHT", "TK_JOIN_KW", ALWAYS },
+ { "ROLLBACK", "TK_ROLLBACK", ALWAYS },
+ { "ROW", "TK_ROW", TRIGGER },
+ { "SELECT", "TK_SELECT", ALWAYS },
+ { "SET", "TK_SET", ALWAYS },
+ { "TABLE", "TK_TABLE", ALWAYS },
+ { "TEMP", "TK_TEMP", ALWAYS },
+ { "TEMPORARY", "TK_TEMP", ALWAYS },
+ { "THEN", "TK_THEN", ALWAYS },
+ { "TO", "TK_TO", ALTER },
+ { "TRANSACTION", "TK_TRANSACTION", ALWAYS },
+ { "TRIGGER", "TK_TRIGGER", TRIGGER },
+ { "UNION", "TK_UNION", COMPOUND },
+ { "UNIQUE", "TK_UNIQUE", ALWAYS },
+ { "UPDATE", "TK_UPDATE", ALWAYS },
+ { "USING", "TK_USING", ALWAYS },
+ { "VACUUM", "TK_VACUUM", VACUUM },
+ { "VALUES", "TK_VALUES", ALWAYS },
+ { "VIEW", "TK_VIEW", VIEW },
+ { "VIRTUAL", "TK_VIRTUAL", VTAB },
+ { "WHEN", "TK_WHEN", ALWAYS },
+ { "WHERE", "TK_WHERE", ALWAYS },
+};
+
+/* Number of keywords */
+static int nKeyword = (sizeof(aKeywordTable)/sizeof(aKeywordTable[0]));
+
+/* An array to map all upper-case characters into their corresponding
+** lower-case character.
+*/
+const unsigned char sqlite3UpperToLower[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 97, 98, 99,100,101,102,103,
+ 104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,
+ 122, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104,105,106,107,
+ 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,
+ 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
+ 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,
+ 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,
+ 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,
+ 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,
+ 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,
+ 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,
+ 252,253,254,255
+};
+#define UpperToLower sqlite3UpperToLower
+
+/*
+** Comparision function for two Keyword records
+*/
+static int keywordCompare1(const void *a, const void *b){
+ const Keyword *pA = (Keyword*)a;
+ const Keyword *pB = (Keyword*)b;
+ int n = pA->len - pB->len;
+ if( n==0 ){
+ n = strcmp(pA->zName, pB->zName);
+ }
+ return n;
+}
+static int keywordCompare2(const void *a, const void *b){
+ const Keyword *pA = (Keyword*)a;
+ const Keyword *pB = (Keyword*)b;
+ int n = pB->longestSuffix - pA->longestSuffix;
+ if( n==0 ){
+ n = strcmp(pA->zName, pB->zName);
+ }
+ return n;
+}
+static int keywordCompare3(const void *a, const void *b){
+ const Keyword *pA = (Keyword*)a;
+ const Keyword *pB = (Keyword*)b;
+ int n = pA->offset - pB->offset;
+ return n;
+}
+
+/*
+** Return a KeywordTable entry with the given id
+*/
+static Keyword *findById(int id){
+ int i;
+ for(i=0; i<nKeyword; i++){
+ if( aKeywordTable[i].id==id ) break;
+ }
+ return &aKeywordTable[i];
+}
+
+/*
+** This routine does the work. The generated code is printed on standard
+** output.
+*/
+int main(int argc, char **argv){
+ int i, j, k, h;
+ int bestSize, bestCount;
+ int count;
+ int nChar;
+ int totalLen = 0;
+ int aHash[1000]; /* 1000 is much bigger than nKeyword */
+
+ /* Remove entries from the list of keywords that have mask==0 */
+ for(i=j=0; i<nKeyword; i++){
+ if( aKeywordTable[i].mask==0 ) continue;
+ if( j<i ){
+ aKeywordTable[j] = aKeywordTable[i];
+ }
+ j++;
+ }
+ nKeyword = j;
+
+ /* Fill in the lengths of strings and hashes for all entries. */
+ for(i=0; i<nKeyword; i++){
+ Keyword *p = &aKeywordTable[i];
+ p->len = strlen(p->zName);
+ totalLen += p->len;
+ p->hash = (UpperToLower[(int)p->zName[0]]*4) ^
+ (UpperToLower[(int)p->zName[p->len-1]]*3) ^ p->len;
+ p->id = i+1;
+ }
+
+ /* Sort the table from shortest to longest keyword */
+ qsort(aKeywordTable, nKeyword, sizeof(aKeywordTable[0]), keywordCompare1);
+
+ /* Look for short keywords embedded in longer keywords */
+ for(i=nKeyword-2; i>=0; i--){
+ Keyword *p = &aKeywordTable[i];
+ for(j=nKeyword-1; j>i && p->substrId==0; j--){
+ Keyword *pOther = &aKeywordTable[j];
+ if( pOther->substrId ) continue;
+ if( pOther->len<=p->len ) continue;
+ for(k=0; k<=pOther->len-p->len; k++){
+ if( memcmp(p->zName, &pOther->zName[k], p->len)==0 ){
+ p->substrId = pOther->id;
+ p->substrOffset = k;
+ break;
+ }
+ }
+ }
+ }
+
+ /* Compute the longestSuffix value for every word */
+ for(i=0; i<nKeyword; i++){
+ Keyword *p = &aKeywordTable[i];
+ if( p->substrId ) continue;
+ for(j=0; j<nKeyword; j++){
+ Keyword *pOther;
+ if( j==i ) continue;
+ pOther = &aKeywordTable[j];
+ if( pOther->substrId ) continue;
+ for(k=p->longestSuffix+1; k<p->len && k<pOther->len; k++){
+ if( memcmp(&p->zName[p->len-k], pOther->zName, k)==0 ){
+ p->longestSuffix = k;
+ }
+ }
+ }
+ }
+
+ /* Sort the table into reverse order by length */
+ qsort(aKeywordTable, nKeyword, sizeof(aKeywordTable[0]), keywordCompare2);
+
+ /* Fill in the offset for all entries */
+ nChar = 0;
+ for(i=0; i<nKeyword; i++){
+ Keyword *p = &aKeywordTable[i];
+ if( p->offset>0 || p->substrId ) continue;
+ p->offset = nChar;
+ nChar += p->len;
+ for(k=p->len-1; k>=1; k--){
+ for(j=i+1; j<nKeyword; j++){
+ Keyword *pOther = &aKeywordTable[j];
+ if( pOther->offset>0 || pOther->substrId ) continue;
+ if( pOther->len<=k ) continue;
+ if( memcmp(&p->zName[p->len-k], pOther->zName, k)==0 ){
+ p = pOther;
+ p->offset = nChar - k;
+ nChar = p->offset + p->len;
+ p->zName += k;
+ p->len -= k;
+ p->prefix = k;
+ j = i;
+ k = p->len;
+ }
+ }
+ }
+ }
+ for(i=0; i<nKeyword; i++){
+ Keyword *p = &aKeywordTable[i];
+ if( p->substrId ){
+ p->offset = findById(p->substrId)->offset + p->substrOffset;
+ }
+ }
+
+ /* Sort the table by offset */
+ qsort(aKeywordTable, nKeyword, sizeof(aKeywordTable[0]), keywordCompare3);
+
+ /* Figure out how big to make the hash table in order to minimize the
+ ** number of collisions */
+ bestSize = nKeyword;
+ bestCount = nKeyword*nKeyword;
+ for(i=nKeyword/2; i<=2*nKeyword; i++){
+ for(j=0; j<i; j++) aHash[j] = 0;
+ for(j=0; j<nKeyword; j++){
+ h = aKeywordTable[j].hash % i;
+ aHash[h] *= 2;
+ aHash[h]++;
+ }
+ for(j=count=0; j<i; j++) count += aHash[j];
+ if( count<bestCount ){
+ bestCount = count;
+ bestSize = i;
+ }
+ }
+
+ /* Compute the hash */
+ for(i=0; i<bestSize; i++) aHash[i] = 0;
+ for(i=0; i<nKeyword; i++){
+ h = aKeywordTable[i].hash % bestSize;
+ aKeywordTable[i].iNext = aHash[h];
+ aHash[h] = i+1;
+ }
+
+ /* Begin generating code */
+ printf("%s", zHdr);
+ printf("/* Hash score: %d */\n", bestCount);
+ printf("static int keywordCode(const char *z, int n){\n");
+ printf(" /* zText[] encodes %d bytes of keywords in %d bytes */\n",
+ totalLen + nKeyword, nChar+1 );
+
+ printf(" static const char zText[%d] =\n", nChar+1);
+ for(i=j=0; i<nKeyword; i++){
+ Keyword *p = &aKeywordTable[i];
+ if( p->substrId ) continue;
+ if( j==0 ) printf(" \"");
+ printf("%s", p->zName);
+ j += p->len;
+ if( j>60 ){
+ printf("\"\n");
+ j = 0;
+ }
+ }
+ printf("%s;\n", j>0 ? "\"" : " ");
+
+ printf(" static const unsigned char aHash[%d] = {\n", bestSize);
+ for(i=j=0; i<bestSize; i++){
+ if( j==0 ) printf(" ");
+ printf(" %3d,", aHash[i]);
+ j++;
+ if( j>12 ){
+ printf("\n");
+ j = 0;
+ }
+ }
+ printf("%s };\n", j==0 ? "" : "\n");
+
+ printf(" static const unsigned char aNext[%d] = {\n", nKeyword);
+ for(i=j=0; i<nKeyword; i++){
+ if( j==0 ) printf(" ");
+ printf(" %3d,", aKeywordTable[i].iNext);
+ j++;
+ if( j>12 ){
+ printf("\n");
+ j = 0;
+ }
+ }
+ printf("%s };\n", j==0 ? "" : "\n");
+
+ printf(" static const unsigned char aLen[%d] = {\n", nKeyword);
+ for(i=j=0; i<nKeyword; i++){
+ if( j==0 ) printf(" ");
+ printf(" %3d,", aKeywordTable[i].len+aKeywordTable[i].prefix);
+ j++;
+ if( j>12 ){
+ printf("\n");
+ j = 0;
+ }
+ }
+ printf("%s };\n", j==0 ? "" : "\n");
+
+ printf(" static const unsigned short int aOffset[%d] = {\n", nKeyword);
+ for(i=j=0; i<nKeyword; i++){
+ if( j==0 ) printf(" ");
+ printf(" %3d,", aKeywordTable[i].offset);
+ j++;
+ if( j>12 ){
+ printf("\n");
+ j = 0;
+ }
+ }
+ printf("%s };\n", j==0 ? "" : "\n");
+
+ printf(" static const unsigned char aCode[%d] = {\n", nKeyword);
+ for(i=j=0; i<nKeyword; i++){
+ char *zToken = aKeywordTable[i].zTokenType;
+ if( j==0 ) printf(" ");
+ printf("%s,%*s", zToken, (int)(14-strlen(zToken)), "");
+ j++;
+ if( j>=5 ){
+ printf("\n");
+ j = 0;
+ }
+ }
+ printf("%s };\n", j==0 ? "" : "\n");
+
+ printf(" int h, i;\n");
+ printf(" if( n<2 ) return TK_ID;\n");
+ printf(" h = ((charMap(z[0])*4) ^\n"
+ " (charMap(z[n-1])*3) ^\n"
+ " n) %% %d;\n", bestSize);
+ printf(" for(i=((int)aHash[h])-1; i>=0; i=((int)aNext[i])-1){\n");
+ printf(" if( aLen[i]==n &&"
+ " sqlite3StrNICmp(&zText[aOffset[i]],z,n)==0 ){\n");
+ printf(" return aCode[i];\n");
+ printf(" }\n");
+ printf(" }\n");
+ printf(" return TK_ID;\n");
+ printf("}\n");
+ printf("int sqlite3KeywordCode(const unsigned char *z, int n){\n");
+ printf(" return keywordCode((char*)z, n);\n");
+ printf("}\n");
+
+ return 0;
+}
diff --git a/third_party/sqlite/tool/mkopts.tcl b/third_party/sqlite/tool/mkopts.tcl
new file mode 100755
index 0000000..e3ddcb9
--- /dev/null
+++ b/third_party/sqlite/tool/mkopts.tcl
@@ -0,0 +1,51 @@
+#!/usr/bin/tclsh
+#
+# This script is used to generate the array of strings and the enum
+# that appear at the beginning of the C code implementation of a
+# a TCL command and that define the available subcommands for that
+# TCL command.
+
+set prefix {}
+while {![eof stdin]} {
+ set line [gets stdin]
+ if {$line==""} continue
+ regsub -all "\[ \t\n,\]+" [string trim $line] { } line
+ foreach token [split $line { }] {
+ if {![regexp {(([a-zA-Z]+)_)?([_a-zA-Z]+)} $token all px p2 name]} continue
+ lappend namelist [string tolower $name]
+ if {$px!=""} {set prefix $p2}
+ }
+}
+
+puts " static const char *${prefix}_strs\[\] = \173"
+set col 0
+proc put_item x {
+ global col
+ if {$col==0} {puts -nonewline " "}
+ if {$col<2} {
+ puts -nonewline [format " %-21s" $x]
+ incr col
+ } else {
+ puts $x
+ set col 0
+ }
+}
+proc finalize {} {
+ global col
+ if {$col>0} {puts {}}
+ set col 0
+}
+
+foreach name [lsort $namelist] {
+ put_item \"$name\",
+}
+put_item 0
+finalize
+puts " \175;"
+puts " enum ${prefix}_enum \173"
+foreach name [lsort $namelist] {
+ regsub -all {@} $name {} name
+ put_item ${prefix}_[string toupper $name],
+}
+finalize
+puts " \175;"
diff --git a/third_party/sqlite/tool/mksqlite3c.tcl b/third_party/sqlite/tool/mksqlite3c.tcl
new file mode 100755
index 0000000..98aede2
--- /dev/null
+++ b/third_party/sqlite/tool/mksqlite3c.tcl
@@ -0,0 +1,288 @@
+#!/usr/bin/tclsh
+#
+# To build a single huge source file holding all of SQLite (or at
+# least the core components - the test harness, shell, and TCL
+# interface are omitted.) first do
+#
+# make target_source
+#
+# The make target above moves all of the source code files into
+# a subdirectory named "tsrc". (This script expects to find the files
+# there and will not work if they are not found.) There are a few
+# generated C code files that are also added to the tsrc directory.
+# For example, the "parse.c" and "parse.h" files to implement the
+# the parser are derived from "parse.y" using lemon. And the
+# "keywordhash.h" files is generated by a program named "mkkeywordhash".
+#
+# After the "tsrc" directory has been created and populated, run
+# this script:
+#
+# tclsh mksqlite3c.tcl
+#
+# The amalgamated SQLite code will be written into sqlite3.c
+#
+
+# Begin by reading the "sqlite3.h" header file. Count the number of lines
+# in this file and extract the version number. That information will be
+# needed in order to generate the header of the amalgamation.
+#
+if {[lsearch $argv --nostatic]>=0} {
+ set addstatic 0
+} else {
+ set addstatic 1
+}
+set in [open tsrc/sqlite3.h]
+set cnt 0
+set VERSION ?????
+while {![eof $in]} {
+ set line [gets $in]
+ if {$line=="" && [eof $in]} break
+ incr cnt
+ regexp {#define\s+SQLITE_VERSION\s+"(.*)"} $line all VERSION
+}
+close $in
+
+# Open the output file and write a header comment at the beginning
+# of the file.
+#
+set out [open sqlite3.c w]
+set today [clock format [clock seconds] -format "%Y-%m-%d %H:%M:%S UTC" -gmt 1]
+puts $out [subst \
+{/******************************************************************************
+** This file is an amalgamation of many separate C source files from SQLite
+** version $VERSION. By combining all the individual C code files into this
+** single large file, the entire code can be compiled as a one translation
+** unit. This allows many compilers to do optimizations that would not be
+** possible if the files were compiled separately. Performance improvements
+** of 5% are more are commonly seen when SQLite is compiled as a single
+** translation unit.
+**
+** This file is all you need to compile SQLite. To use SQLite in other
+** programs, you need this file and the "sqlite3.h" header file that defines
+** the programming interface to the SQLite library. (If you do not have
+** the "sqlite3.h" header file at hand, you will find a copy in the first
+** $cnt lines past this header comment.) Additional code files may be
+** needed if you want a wrapper to interface SQLite with your choice of
+** programming language. The code for the "sqlite3" command-line shell
+** is also in a separate file. This file contains only code for the core
+** SQLite library.
+**
+** This amalgamation was generated on $today.
+*/
+#define SQLITE_CORE 1
+#define SQLITE_AMALGAMATION 1}]
+if {$addstatic} {
+ puts $out \
+{#ifndef SQLITE_PRIVATE
+# define SQLITE_PRIVATE static
+#endif
+#ifndef SQLITE_API
+# define SQLITE_API
+#endif}
+}
+
+# These are the header files used by SQLite. The first time any of these
+# files are seen in a #include statement in the C code, include the complete
+# text of the file in-line. The file only needs to be included once.
+#
+foreach hdr {
+ btree.h
+ btreeInt.h
+ fts3.h
+ fts3_hash.h
+ fts3_tokenizer.h
+ hash.h
+ hwtime.h
+ keywordhash.h
+ mutex.h
+ opcodes.h
+ os_common.h
+ os.h
+ os_os2.h
+ pager.h
+ parse.h
+ rtree.h
+ sqlite3ext.h
+ sqlite3.h
+ sqliteInt.h
+ sqliteLimit.h
+ vdbe.h
+ vdbeInt.h
+} {
+ set available_hdr($hdr) 1
+}
+set available_hdr(sqliteInt.h) 0
+
+# 78 stars used for comment formatting.
+set s78 \
+{*****************************************************************************}
+
+# Insert a comment into the code
+#
+proc section_comment {text} {
+ global out s78
+ set n [string length $text]
+ set nstar [expr {60 - $n}]
+ set stars [string range $s78 0 $nstar]
+ puts $out "/************** $text $stars/"
+}
+
+# Read the source file named $filename and write it into the
+# sqlite3.c output file. If any #include statements are seen,
+# process them approprately.
+#
+proc copy_file {filename} {
+ global seen_hdr available_hdr out addstatic
+ set tail [file tail $filename]
+ section_comment "Begin file $tail"
+ set in [open $filename r]
+ set varpattern {^[a-zA-Z][a-zA-Z_0-9 *]+(sqlite3[_a-zA-Z0-9]+)(\[|;| =)}
+ set declpattern {[a-zA-Z][a-zA-Z_0-9 ]+ \*?(sqlite3[_a-zA-Z0-9]+)\(}
+ if {[file extension $filename]==".h"} {
+ set declpattern " *$declpattern"
+ }
+ set declpattern ^$declpattern
+ while {![eof $in]} {
+ set line [gets $in]
+ if {[regexp {^#\s*include\s+["<]([^">]+)[">]} $line all hdr]} {
+ if {[info exists available_hdr($hdr)]} {
+ if {$available_hdr($hdr)} {
+ if {$hdr!="os_common.h" && $hdr!="hwtime.h"} {
+ set available_hdr($hdr) 0
+ }
+ section_comment "Include $hdr in the middle of $tail"
+ copy_file tsrc/$hdr
+ section_comment "Continuing where we left off in $tail"
+ }
+ } elseif {![info exists seen_hdr($hdr)]} {
+ set seen_hdr($hdr) 1
+ puts $out $line
+ }
+ } elseif {[regexp {^#ifdef __cplusplus} $line]} {
+ puts $out "#if 0"
+ } elseif {[regexp {^#line} $line]} {
+ # Skip #line directives.
+ } elseif {$addstatic && ![regexp {^(static|typedef)} $line]} {
+ if {[regexp $declpattern $line all funcname]} {
+ # Add the SQLITE_PRIVATE or SQLITE_API keyword before functions.
+ # so that linkage can be modified at compile-time.
+ if {[regexp {^sqlite3_} $funcname]} {
+ puts $out "SQLITE_API $line"
+ } else {
+ puts $out "SQLITE_PRIVATE $line"
+ }
+ } elseif {[regexp $varpattern $line all varname]} {
+ # Add the SQLITE_PRIVATE before variable declarations or
+ # definitions for internal use
+ if {![regexp {^sqlite3_} $varname]} {
+ regsub {^extern } $line {} line
+ puts $out "SQLITE_PRIVATE $line"
+ } else {
+ regsub {^SQLITE_EXTERN } $line {} line
+ puts $out "SQLITE_API $line"
+ }
+ } elseif {[regexp {^(SQLITE_EXTERN )?void \(\*sqlite3IoTrace\)} $line]} {
+ regsub {^SQLITE_EXTERN } $line {} line
+ puts $out "SQLITE_PRIVATE $line"
+ } else {
+ puts $out $line
+ }
+ } else {
+ puts $out $line
+ }
+ }
+ close $in
+ section_comment "End of $tail"
+}
+
+
+# Process the source files. Process files containing commonly
+# used subroutines first in order to help the compiler find
+# inlining opportunities.
+#
+foreach file {
+ sqliteInt.h
+
+ global.c
+ status.c
+ date.c
+ os.c
+
+ fault.c
+ mem1.c
+ mem2.c
+ mem3.c
+ mem5.c
+ mem6.c
+ mutex.c
+ mutex_os2.c
+ mutex_unix.c
+ mutex_w32.c
+ malloc.c
+ printf.c
+ random.c
+ utf.c
+ util.c
+ hash.c
+ opcodes.c
+
+ os_os2.c
+ os_unix.c
+ os_win.c
+
+ bitvec.c
+ pager.c
+
+ btmutex.c
+ btree.c
+
+ vdbefifo.c
+ vdbemem.c
+ vdbeaux.c
+ vdbeapi.c
+ vdbe.c
+ vdbeblob.c
+ journal.c
+
+ expr.c
+ alter.c
+ analyze.c
+ attach.c
+ auth.c
+ build.c
+ callback.c
+ delete.c
+ func.c
+ insert.c
+ legacy.c
+ loadext.c
+ pragma.c
+ prepare.c
+ select.c
+ table.c
+ trigger.c
+ update.c
+ vacuum.c
+ vtab.c
+ where.c
+
+ parse.c
+
+ tokenize.c
+ complete.c
+
+ main.c
+
+ fts3.c
+ fts3_hash.c
+ fts3_porter.c
+ fts3_tokenizer.c
+ fts3_tokenizer1.c
+
+ rtree.c
+ icu.c
+} {
+ copy_file tsrc/$file
+}
+
+close $out
diff --git a/third_party/sqlite/tool/mksqlite3internalh.tcl b/third_party/sqlite/tool/mksqlite3internalh.tcl
new file mode 100755
index 0000000..f02a62d
--- /dev/null
+++ b/third_party/sqlite/tool/mksqlite3internalh.tcl
@@ -0,0 +1,146 @@
+#!/usr/bin/tclsh
+#
+# To build a single huge source file holding all of SQLite (or at
+# least the core components - the test harness, shell, and TCL
+# interface are omitted.) first do
+#
+# make target_source
+#
+# The make target above moves all of the source code files into
+# a subdirectory named "tsrc". (This script expects to find the files
+# there and will not work if they are not found.) There are a few
+# generated C code files that are also added to the tsrc directory.
+# For example, the "parse.c" and "parse.h" files to implement the
+# the parser are derived from "parse.y" using lemon. And the
+# "keywordhash.h" files is generated by a program named "mkkeywordhash".
+#
+# After the "tsrc" directory has been created and populated, run
+# this script:
+#
+# tclsh mksqlite3c.tcl
+#
+# The amalgamated SQLite code will be written into sqlite3.c
+#
+
+# Begin by reading the "sqlite3.h" header file. Count the number of lines
+# in this file and extract the version number. That information will be
+# needed in order to generate the header of the amalgamation.
+#
+set in [open tsrc/sqlite3.h]
+set cnt 0
+set VERSION ?????
+while {![eof $in]} {
+ set line [gets $in]
+ if {$line=="" && [eof $in]} break
+ incr cnt
+ regexp {#define\s+SQLITE_VERSION\s+"(.*)"} $line all VERSION
+}
+close $in
+
+# Open the output file and write a header comment at the beginning
+# of the file.
+#
+set out [open sqlite3internal.h w]
+set today [clock format [clock seconds] -format "%Y-%m-%d %H:%M:%S UTC" -gmt 1]
+puts $out [subst \
+{/******************************************************************************
+** This file is an amalgamation of many private header files from SQLite
+** version $VERSION.
+*/}]
+
+# These are the header files used by SQLite. The first time any of these
+# files are seen in a #include statement in the C code, include the complete
+# text of the file in-line. The file only needs to be included once.
+#
+foreach hdr {
+ btree.h
+ btreeInt.h
+ hash.h
+ hwtime.h
+ keywordhash.h
+ opcodes.h
+ os_common.h
+ os.h
+ os_os2.h
+ pager.h
+ parse.h
+ sqlite3ext.h
+ sqlite3.h
+ sqliteInt.h
+ sqliteLimit.h
+ vdbe.h
+ vdbeInt.h
+} {
+ set available_hdr($hdr) 1
+}
+
+# 78 stars used for comment formatting.
+set s78 \
+{*****************************************************************************}
+
+# Insert a comment into the code
+#
+proc section_comment {text} {
+ global out s78
+ set n [string length $text]
+ set nstar [expr {60 - $n}]
+ set stars [string range $s78 0 $nstar]
+ puts $out "/************** $text $stars/"
+}
+
+# Read the source file named $filename and write it into the
+# sqlite3.c output file. If any #include statements are seen,
+# process them approprately.
+#
+proc copy_file {filename} {
+ global seen_hdr available_hdr out
+ set tail [file tail $filename]
+ section_comment "Begin file $tail"
+ set in [open $filename r]
+ while {![eof $in]} {
+ set line [gets $in]
+ if {[regexp {^#\s*include\s+["<]([^">]+)[">]} $line all hdr]} {
+ if {[info exists available_hdr($hdr)]} {
+ if {$available_hdr($hdr)} {
+ section_comment "Include $hdr in the middle of $tail"
+ copy_file tsrc/$hdr
+ section_comment "Continuing where we left off in $tail"
+ }
+ } elseif {![info exists seen_hdr($hdr)]} {
+ set seen_hdr($hdr) 1
+ puts $out $line
+ }
+ } elseif {[regexp {^#ifdef __cplusplus} $line]} {
+ puts $out "#if 0"
+ } elseif {[regexp {^#line} $line]} {
+ # Skip #line directives.
+ } else {
+ puts $out $line
+ }
+ }
+ close $in
+ section_comment "End of $tail"
+}
+
+
+# Process the source files. Process files containing commonly
+# used subroutines first in order to help the compiler find
+# inlining opportunities.
+#
+foreach file {
+ sqliteInt.h
+ sqlite3.h
+ btree.h
+ hash.h
+ os.h
+ pager.h
+ parse.h
+ sqlite3ext.h
+ vdbe.h
+} {
+ if {$available_hdr($file)} {
+ copy_file tsrc/$file
+ }
+}
+
+close $out
diff --git a/third_party/sqlite/tool/omittest.tcl b/third_party/sqlite/tool/omittest.tcl
new file mode 100755
index 0000000..13a70cd2
--- /dev/null
+++ b/third_party/sqlite/tool/omittest.tcl
@@ -0,0 +1,215 @@
+
+set rcsid {$Id: omittest.tcl,v 1.6 2008/08/04 03:51:24 danielk1977 Exp $}
+
+# Documentation for this script. This may be output to stderr
+# if the script is invoked incorrectly.
+set ::USAGE_MESSAGE {
+This Tcl script is used to test the various compile time options
+available for omitting code (the SQLITE_OMIT_xxx options). It
+should be invoked as follows:
+
+ <script> ?-makefile PATH-TO-MAKEFILE?
+
+The default value for ::MAKEFILE is "../Makefile.linux.gcc".
+
+This script builds the testfixture program and runs the SQLite test suite
+once with each SQLITE_OMIT_ option defined and then once with all options
+defined together. Each run is performed in a seperate directory created
+as a sub-directory of the current directory by the script. The output
+of the build is saved in <sub-directory>/build.log. The output of the
+test-suite is saved in <sub-directory>/test.log.
+
+Almost any SQLite makefile (except those generated by configure - see below)
+should work. The following properties are required:
+
+ * The makefile should support the "testfixture" target.
+ * The makefile should support the "test" target.
+ * The makefile should support the variable "OPTS" as a way to pass
+ options from the make command line to lemon and the C compiler.
+
+More precisely, the following two invocations must be supported:
+
+ make -f $::MAKEFILE testfixture OPTS="-DSQLITE_OMIT_ALTERTABLE=1"
+ make -f $::MAKEFILE test
+
+Makefiles generated by the sqlite configure program cannot be used as
+they do not respect the OPTS variable.
+}
+
+
+# Build a testfixture executable and run quick.test using it. The first
+# parameter is the name of the directory to create and use to run the
+# test in. The second parameter is a list of OMIT symbols to define
+# when doing so. For example:
+#
+# run_quick_test /tmp/testdir {SQLITE_OMIT_TRIGGER SQLITE_OMIT_VIEW}
+#
+#
+proc run_quick_test {dir omit_symbol_list} {
+ # Compile the value of the OPTS Makefile variable.
+ set opts "-DSQLITE_MEMDEBUG -DSQLITE_DEBUG"
+ if {$::tcl_platform(platform)=="windows"} {
+ append opts " -DSQLITE_OS_WIN=1"
+ } else {
+ append opts " -DSQLITE_OS_UNIX=1"
+ }
+ foreach sym $omit_symbol_list {
+ append opts " -D${sym}=1"
+ }
+
+ # Create the directory and do the build. If an error occurs return
+ # early without attempting to run the test suite.
+ file mkdir $dir
+ puts -nonewline "Building $dir..."
+ flush stdout
+catch {
+ file copy -force ./config.h $dir
+ file copy -force ./libtool $dir
+}
+ set rc [catch {
+ exec make -C $dir -f $::MAKEFILE testfixture OPTS=$opts >& $dir/build.log
+ }]
+ if {$rc} {
+ puts "No good. See $dir/build.log."
+ return
+ } else {
+ puts "Ok"
+ }
+
+ # Create an empty file "$dir/sqlite3". This is to trick the makefile out
+ # of trying to build the sqlite shell. The sqlite shell won't build
+ # with some of the OMIT options (i.e OMIT_COMPLETE).
+ set sqlite3_dummy $dir/sqlite3
+ if {$::tcl_platform(platform)=="windows"} {
+ append sqlite3_dummy ".exe"
+ }
+ if {![file exists $sqlite3_dummy]} {
+ set wr [open $sqlite3_dummy w]
+ puts $wr "dummy"
+ close $wr
+ }
+
+ # Run the test suite.
+ puts -nonewline "Testing $dir..."
+ flush stdout
+ set rc [catch {
+ exec make -C $dir -f $::MAKEFILE test OPTS=$opts >& $dir/test.log
+ }]
+ if {$rc} {
+ puts "No good. See $dir/test.log."
+ } else {
+ puts "Ok"
+ }
+}
+
+
+# This proc processes the command line options passed to this script.
+# Currently the only option supported is "-makefile", default
+# "../Makefile.linux-gcc". Set the ::MAKEFILE variable to the value of this
+# option.
+#
+proc process_options {argv} {
+ if {$::tcl_platform(platform)=="windows"} {
+ set ::MAKEFILE ../Makefile ;# Default value
+ } else {
+ set ::MAKEFILE ../Makefile.linux-gcc ;# Default value
+ }
+ for {set i 0} {$i < [llength $argv]} {incr i} {
+ switch -- [lindex $argv $i] {
+ -makefile {
+ incr i
+ set ::MAKEFILE [lindex $argv $i]
+ }
+
+ default {
+ puts stderr [string trim $::USAGE_MESSAGE]
+ exit -1
+ }
+ }
+ set ::MAKEFILE [file normalize $::MAKEFILE]
+ }
+}
+
+# Main routine.
+#
+
+proc main {argv} {
+ # List of SQLITE_OMIT_XXX symbols supported by SQLite.
+ set ::SYMBOLS [list \
+ SQLITE_OMIT_ALTERTABLE \
+ SQLITE_OMIT_ANALYZE \
+ SQLITE_OMIT_ATTACH \
+ SQLITE_OMIT_AUTHORIZATION \
+ SQLITE_OMIT_AUTOINCREMENT \
+ SQLITE_OMIT_AUTOINIT \
+ SQLITE_OMIT_AUTOVACUUM \
+ SQLITE_OMIT_BETWEEN_OPTIMIZATION \
+ SQLITE_OMIT_BLOB_LITERAL \
+ SQLITE_OMIT_BUILTIN_TEST \
+ SQLITE_OMIT_CAST \
+ SQLITE_OMIT_CHECK \
+ SQLITE_OMIT_COMPLETE \
+ SQLITE_OMIT_COMPOUND_SELECT \
+ SQLITE_OMIT_CONFLICT_CLAUSE \
+ SQLITE_OMIT_DATETIME_FUNCS \
+ SQLITE_OMIT_DECLTYPE \
+ SQLITE_OMIT_DISKIO \
+ SQLITE_OMIT_EXPLAIN \
+ SQLITE_OMIT_FLAG_PRAGMAS \
+ SQLITE_OMIT_FLOATING_POINT \
+ SQLITE_OMIT_FOREIGN_KEY \
+ SQLITE_OMIT_GET_TABLE \
+ SQLITE_OMIT_GLOBALRECOVER \
+ SQLITE_OMIT_INCRBLOB \
+ SQLITE_OMIT_INTEGRITY_CHECK \
+ SQLITE_OMIT_LIKE_OPTIMIZATION \
+ SQLITE_OMIT_LOAD_EXTENSION \
+ SQLITE_OMIT_LOCALTIME \
+ SQLITE_OMIT_MEMORYDB \
+ SQLITE_OMIT_OR_OPTIMIZATION \
+ SQLITE_OMIT_PAGER_PRAGMAS \
+ SQLITE_OMIT_PARSER \
+ SQLITE_OMIT_PRAGMA \
+ SQLITE_OMIT_PROGRESS_CALLBACK \
+ SQLITE_OMIT_QUICKBALANCE \
+ SQLITE_OMIT_REINDEX \
+ SQLITE_OMIT_SCHEMA_PRAGMAS \
+ SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS \
+ SQLITE_OMIT_SHARED_CACHE \
+ SQLITE_OMIT_SUBQUERY \
+ SQLITE_OMIT_TCL_VARIABLE \
+ SQLITE_OMIT_TEMPDB \
+ SQLITE_OMIT_TRACE \
+ SQLITE_OMIT_TRIGGER \
+ SQLITE_OMIT_UTF16 \
+ SQLITE_OMIT_VACUUM \
+ SQLITE_OMIT_VIEW \
+ SQLITE_OMIT_VIRTUALTABLE \
+ SQLITE_OMIT_XFER_OPT \
+ ]
+
+ # Process any command line options.
+ process_options $argv
+
+ # First try a test with all OMIT symbols except SQLITE_OMIT_FLOATING_POINT
+ # and SQLITE_OMIT_PRAGMA defined. The former doesn't work (causes segfaults)
+ # and the latter is currently incompatible with the test suite (this should
+ # be fixed, but it will be a lot of work).
+ set allsyms [list]
+ foreach s $::SYMBOLS {
+ if {$s!="SQLITE_OMIT_FLOATING_POINT" && $s!="SQLITE_OMIT_PRAGMA"} {
+ lappend allsyms $s
+ }
+ }
+ run_quick_test test_OMIT_EVERYTHING $allsyms
+
+ # Now try one quick.test with each of the OMIT symbols defined. Included
+ # are the OMIT_FLOATING_POINT and OMIT_PRAGMA symbols, even though we
+ # know they will fail. It's good to be reminded of this from time to time.
+ foreach sym $::SYMBOLS {
+ set dirname "test_[string range $sym 7 end]"
+ run_quick_test $dirname $sym
+ }
+}
+
+main $argv
diff --git a/third_party/sqlite/tool/opcodeDoc.awk b/third_party/sqlite/tool/opcodeDoc.awk
new file mode 100755
index 0000000..4920106
--- /dev/null
+++ b/third_party/sqlite/tool/opcodeDoc.awk
@@ -0,0 +1,23 @@
+#
+# Extract opcode documentation for sqliteVdbe.c and generate HTML
+#
+BEGIN {
+ print "<html><body bgcolor=white>"
+ print "<h1>SQLite Virtual Database Engine Opcodes</h1>"
+ print "<table>"
+}
+/ Opcode: /,/\*\// {
+ if( $2=="Opcode:" ){
+ printf "<tr><td>%s&nbsp;%s&nbsp;%s&nbsp;%s</td>\n<td>\n", $3, $4, $5, $6
+ }else if( $1=="*/" ){
+ printf "</td></tr>\n"
+ }else if( NF>1 ){
+ sub(/^ *\*\* /,"")
+ gsub(/</,"&lt;")
+ gsub(/&/,"&amp;")
+ print
+ }
+}
+END {
+ print "</table></body></html>"
+}
diff --git a/third_party/sqlite/tool/report1.txt b/third_party/sqlite/tool/report1.txt
new file mode 100755
index 0000000..7820b8c
--- /dev/null
+++ b/third_party/sqlite/tool/report1.txt
@@ -0,0 +1,66 @@
+The SQL database used for ACD contains 113 tables and indices implemented
+in GDBM. The following are statistics on the sizes of keys and data
+within these tables and indices.
+
+Entries: 962080
+Size: 45573853
+Avg Size: 48
+Key Size: 11045299
+Avg Key Size: 12
+Max Key Size: 99
+
+
+ Size of key Cummulative
+ and data Instances Percentage
+------------ ---------- -----------
+ 0..8 266 0%
+ 9..12 5485 0%
+ 13..16 73633 8%
+ 17..24 180918 27%
+ 25..32 209823 48%
+ 33..40 148995 64%
+ 41..48 76304 72%
+ 49..56 14346 73%
+ 57..64 15725 75%
+ 65..80 44916 80%
+ 81..96 127815 93%
+ 97..112 34769 96%
+ 113..128 13314 98%
+ 129..144 8098 99%
+ 145..160 3355 99%
+ 161..176 1159 99%
+ 177..192 629 99%
+ 193..208 221 99%
+ 209..224 210 99%
+ 225..240 129 99%
+ 241..256 57 99%
+ 257..288 496 99%
+ 289..320 60 99%
+ 321..352 37 99%
+ 353..384 46 99%
+ 385..416 22 99%
+ 417..448 24 99%
+ 449..480 26 99%
+ 481..512 27 99%
+ 513..1024 471 99%
+ 1025..2048 389 99%
+ 2049..4096 182 99%
+ 4097..8192 74 99%
+ 8193..16384 34 99%
+16385..32768 17 99%
+32769..65536 5 99%
+65537..131073 3 100%
+
+
+This information is gathered to help design the new built-in
+backend for sqlite 2.0. Note in particular that 99% of all
+database entries have a combined key and data size of less than
+144 bytes. So if a leaf node in the new database is able to
+store 144 bytes of combined key and data, only 1% of the leaves
+will require overflow pages. Furthermore, note that no key
+is larger than 99 bytes, so if the key will never be on an
+overflow page.
+
+The average combined size of key+data is 48. Add in 16 bytes of
+overhead for a total of 64. That means that a 1K page will
+store (on average) about 16 entries.
diff --git a/third_party/sqlite/tool/showdb.c b/third_party/sqlite/tool/showdb.c
new file mode 100755
index 0000000..b2ed562
--- /dev/null
+++ b/third_party/sqlite/tool/showdb.c
@@ -0,0 +1,86 @@
+/*
+** A utility for printing all or part of an SQLite database file.
+*/
+#include <stdio.h>
+#include <ctype.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+
+static int pagesize = 1024;
+static int db = -1;
+static int mxPage = 0;
+static int perLine = 32;
+
+static void out_of_memory(void){
+ fprintf(stderr,"Out of memory...\n");
+ exit(1);
+}
+
+static print_page(int iPg){
+ unsigned char *aData;
+ int i, j;
+ aData = malloc(pagesize);
+ if( aData==0 ) out_of_memory();
+ lseek(db, (iPg-1)*pagesize, SEEK_SET);
+ read(db, aData, pagesize);
+ fprintf(stdout, "Page %d:\n", iPg);
+ for(i=0; i<pagesize; i += perLine){
+ fprintf(stdout, " %03x: ",i);
+ for(j=0; j<perLine; j++){
+ fprintf(stdout,"%02x ", aData[i+j]);
+ }
+ for(j=0; j<perLine; j++){
+ fprintf(stdout,"%c", isprint(aData[i+j]) ? aData[i+j] : '.');
+ }
+ fprintf(stdout,"\n");
+ }
+ free(aData);
+}
+
+int main(int argc, char **argv){
+ struct stat sbuf;
+ if( argc<2 ){
+ fprintf(stderr,"Usage: %s FILENAME ?PAGE? ...\n", argv[0]);
+ exit(1);
+ }
+ db = open(argv[1], O_RDONLY);
+ if( db<0 ){
+ fprintf(stderr,"%s: can't open %s\n", argv[0], argv[1]);
+ exit(1);
+ }
+ fstat(db, &sbuf);
+ mxPage = sbuf.st_size/pagesize + 1;
+ if( argc==2 ){
+ int i;
+ for(i=1; i<=mxPage; i++) print_page(i);
+ }else{
+ int i;
+ for(i=2; i<argc; i++){
+ int iStart, iEnd;
+ char *zLeft;
+ iStart = strtol(argv[i], &zLeft, 0);
+ if( zLeft && strcmp(zLeft,"..end")==0 ){
+ iEnd = mxPage;
+ }else if( zLeft && zLeft[0]=='.' && zLeft[1]=='.' ){
+ iEnd = strtol(&zLeft[2], 0, 0);
+ }else{
+ iEnd = iStart;
+ }
+ if( iStart<1 || iEnd<iStart || iEnd>mxPage ){
+ fprintf(stderr,
+ "Page argument should be LOWER?..UPPER?. Range 1 to %d\n",
+ mxPage);
+ exit(1);
+ }
+ while( iStart<=iEnd ){
+ print_page(iStart);
+ iStart++;
+ }
+ }
+ }
+ close(db);
+}
diff --git a/third_party/sqlite/tool/showjournal.c b/third_party/sqlite/tool/showjournal.c
new file mode 100755
index 0000000..ec93c91
--- /dev/null
+++ b/third_party/sqlite/tool/showjournal.c
@@ -0,0 +1,76 @@
+/*
+** A utility for printing an SQLite database journal.
+*/
+#include <stdio.h>
+#include <ctype.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+
+static int pagesize = 1024;
+static int db = -1;
+static int mxPage = 0;
+
+static void out_of_memory(void){
+ fprintf(stderr,"Out of memory...\n");
+ exit(1);
+}
+
+static print_page(int iPg){
+ unsigned char *aData;
+ int i, j;
+ aData = malloc(pagesize);
+ if( aData==0 ) out_of_memory();
+ read(db, aData, pagesize);
+ fprintf(stdout, "Page %d:\n", iPg);
+ for(i=0; i<pagesize; i += 16){
+ fprintf(stdout, " %03x: ",i);
+ for(j=0; j<16; j++){
+ fprintf(stdout,"%02x ", aData[i+j]);
+ }
+ for(j=0; j<16; j++){
+ fprintf(stdout,"%c", isprint(aData[i+j]) ? aData[i+j] : '.');
+ }
+ fprintf(stdout,"\n");
+ }
+ free(aData);
+}
+
+int main(int argc, char **argv){
+ struct stat sbuf;
+ unsigned int u;
+ int rc;
+ unsigned char zBuf[10];
+ unsigned char zBuf2[sizeof(u)];
+ if( argc!=2 ){
+ fprintf(stderr,"Usage: %s FILENAME\n", argv[0]);
+ exit(1);
+ }
+ db = open(argv[1], O_RDONLY);
+ if( db<0 ){
+ fprintf(stderr,"%s: can't open %s\n", argv[0], argv[1]);
+ exit(1);
+ }
+ read(db, zBuf, 8);
+ if( zBuf[7]==0xd6 ){
+ read(db, &u, sizeof(u));
+ printf("Records in Journal: %u\n", u);
+ read(db, &u, sizeof(u));
+ printf("Magic Number: 0x%08x\n", u);
+ }
+ read(db, zBuf2, sizeof(zBuf2));
+ u = zBuf2[0]<<24 | zBuf2[1]<<16 | zBuf2[2]<<8 | zBuf2[3];
+ printf("Database Size: %u\n", u);
+ while( read(db, zBuf2, sizeof(zBuf2))==sizeof(zBuf2) ){
+ u = zBuf2[0]<<24 | zBuf2[1]<<16 | zBuf2[2]<<8 | zBuf2[3];
+ print_page(u);
+ if( zBuf[7]==0xd6 ){
+ read(db, &u, sizeof(u));
+ printf("Checksum: 0x%08x\n", u);
+ }
+ }
+ close(db);
+}
diff --git a/third_party/sqlite/tool/soak1.tcl b/third_party/sqlite/tool/soak1.tcl
new file mode 100755
index 0000000..846f905
--- /dev/null
+++ b/third_party/sqlite/tool/soak1.tcl
@@ -0,0 +1,103 @@
+#!/usr/bin/tclsh
+#
+# Usage:
+#
+# tclsh soak1.tcl local-makefile.mk ?target? ?scenario?
+#
+# This generates many variations on local-makefile.mk (by modifing
+# the OPT = lines) and runs them will fulltest, one by one. The
+# constructed makefiles are named "soak1.mk".
+#
+# If ?target? is provided, that is the makefile target that is run.
+# The default is "fulltest"
+#
+# If ?scenario? is provided, it is the name of a single scenario to
+# be run. All other scenarios are skipped.
+#
+set localmake [lindex $argv 0]
+set target [lindex $argv 1]
+set scene [lindex $argv 2]
+if {$target==""} {set target fulltest}
+if {$scene==""} {set scene all}
+
+set in [open $localmake]
+set maketxt [read $in]
+close $in
+regsub -all {\\\n} $maketxt {} maketxt
+#set makefilename "soak1-[expr {int(rand()*1000000000)}].mk"
+set makefilename "soak1.mk"
+
+# Generate a makefile
+#
+proc generate_makefile {pattern} {
+ global makefilename maketxt
+ set out [open $makefilename w]
+ set seen_opt 0
+ foreach line [split $maketxt \n] {
+ if {[regexp {^ *#? *OPTS[ =+]} $line]} {
+ if {!$seen_opt} {
+ puts $out "OPTS += -DSQLITE_NO_SYNC=1"
+ foreach x $pattern {
+ puts $out "OPTS += -D$x"
+ }
+ set seen_opt 1
+ }
+ } else {
+ puts $out $line
+ }
+ }
+ close $out
+}
+
+# Run a test
+#
+proc scenario {id title pattern} {
+ global makefilename target scene
+ if {$scene!="all" && $scene!=$id && $scene!=$title} return
+ puts "**************** $title ***************"
+ generate_makefile $pattern
+ exec make -f $makefilename clean >@stdout 2>@stdout
+ exec make -f $makefilename $target >@stdout 2>@stdout
+}
+
+###############################################################################
+# Add new scenarios here
+#
+scenario 0 {Default} {}
+scenario 1 {Debug} {
+ SQLITE_DEBUG=1
+ SQLITE_MEMDEBUG=1
+}
+scenario 2 {Everything} {
+ SQLITE_DEBUG=1
+ SQLITE_MEMDEBUG=1
+ SQLITE_ENABLE_MEMORY_MANAGEMENT=1
+ SQLITE_ENABLE_COLUMN_METADATA=1
+ SQLITE_ENABLE_LOAD_EXTENSION=1 HAVE_DLOPEN=1
+ SQLITE_ENABLE_MEMORY_MANAGEMENT=1
+}
+scenario 3 {Customer-1} {
+ SQLITE_DEBUG=1 SQLITE_MEMDEBUG=1
+ SQLITE_THREADSAFE=1 SQLITE_OS_UNIX=1
+ SQLITE_DISABLE_LFS=1
+ SQLITE_DEFAULT_AUTOVACUUM=1
+ SQLITE_DEFAULT_PAGE_SIZE=1024
+ SQLITE_MAX_PAGE_SIZE=4096
+ SQLITE_DEFAULT_CACHE_SIZE=64
+ SQLITE_DEFAULT_TEMP_CACHE_SIZE=32
+ SQLITE_TEMP_STORE=3
+ SQLITE_OMIT_PROGRESS_CALLBACK=1
+ SQLITE_OMIT_LOAD_EXTENSION=1
+ SQLITE_OMIT_VIRTUALTABLE=1
+ SQLITE_ENABLE_IOTRACE=1
+}
+scenario 4 {Small-Cache} {
+ SQLITE_DEBUG=1 SQLITE_MEMDEBUG=1
+ SQLITE_THREADSAFE=1 SQLITE_OS_UNIX=1
+ SQLITE_DEFAULT_AUTOVACUUM=1
+ SQLITE_DEFAULT_PAGE_SIZE=1024
+ SQLITE_MAX_PAGE_SIZE=2048
+ SQLITE_DEFAULT_CACHE_SIZE=13
+ SQLITE_DEFAULT_TEMP_CACHE_SIZE=11
+ SQLITE_TEMP_STORE=1
+}
diff --git a/third_party/sqlite/tool/space_used.tcl b/third_party/sqlite/tool/space_used.tcl
new file mode 100755
index 0000000..2044aa3
--- /dev/null
+++ b/third_party/sqlite/tool/space_used.tcl
@@ -0,0 +1,111 @@
+# Run this TCL script using "testfixture" in order get a report that shows
+# how much disk space is used by a particular data to actually store data
+# versus how much space is unused.
+#
+
+# Get the name of the database to analyze
+#
+if {[llength $argv]!=1} {
+ puts stderr "Usage: $argv0 database-name"
+ exit 1
+}
+set file_to_analyze [lindex $argv 0]
+
+# Open the database
+#
+sqlite db [lindex $argv 0]
+set DB [btree_open [lindex $argv 0]]
+
+# Output the schema for the generated report
+#
+puts \
+{BEGIN;
+CREATE TABLE space_used(
+ name clob, -- Name of a table or index in the database file
+ is_index boolean, -- TRUE if it is an index, false for a table
+ payload int, -- Total amount of data stored in this table or index
+ pri_pages int, -- Number of primary pages used
+ ovfl_pages int, -- Number of overflow pages used
+ pri_unused int, -- Number of unused bytes on primary pages
+ ovfl_unused int -- Number of unused bytes on overflow pages
+);}
+
+# This query will be used to find the root page number for every index and
+# table in the database.
+#
+set sql {
+ SELECT name, type, rootpage FROM sqlite_master
+ UNION ALL
+ SELECT 'sqlite_master', 'table', 2
+ ORDER BY 1
+}
+
+# Initialize variables used for summary statistics.
+#
+set total_size 0
+set total_primary 0
+set total_overflow 0
+set total_unused_primary 0
+set total_unused_ovfl 0
+
+# Analyze every table in the database, one at a time.
+#
+foreach {name type rootpage} [db eval $sql] {
+ set cursor [btree_cursor $DB $rootpage 0]
+ set go [btree_first $cursor]
+ set size 0
+ catch {unset pg_used}
+ set unused_ovfl 0
+ set n_overflow 0
+ while {$go==0} {
+ set payload [btree_payload_size $cursor]
+ incr size $payload
+ set stat [btree_cursor_dump $cursor]
+ set pgno [lindex $stat 0]
+ set freebytes [lindex $stat 4]
+ set pg_used($pgno) $freebytes
+ if {$payload>238} {
+ set n [expr {($payload-238+1019)/1020}]
+ incr n_overflow $n
+ incr unused_ovfl [expr {$n*1020+238-$payload}]
+ }
+ set go [btree_next $cursor]
+ }
+ btree_close_cursor $cursor
+ set n_primary [llength [array names pg_used]]
+ set unused_primary 0
+ foreach x [array names pg_used] {incr unused_primary $pg_used($x)}
+ regsub -all ' $name '' name
+ puts -nonewline "INSERT INTO space_used VALUES('$name'"
+ puts -nonewline ",[expr {$type=="index"}]"
+ puts ",$size,$n_primary,$n_overflow,$unused_primary,$unused_ovfl);"
+ incr total_size $size
+ incr total_primary $n_primary
+ incr total_overflow $n_overflow
+ incr total_unused_primary $unused_primary
+ incr total_unused_ovfl $unused_ovfl
+}
+
+# Output summary statistics:
+#
+puts "-- Total payload size: $total_size"
+puts "-- Total pages used: $total_primary primary and $total_overflow overflow"
+set file_pgcnt [expr {[file size [lindex $argv 0]]/1024}]
+puts -nonewline "-- Total unused bytes on primary pages: $total_unused_primary"
+if {$total_primary>0} {
+ set upp [expr {$total_unused_primary/$total_primary}]
+ puts " (avg $upp bytes/page)"
+} else {
+ puts ""
+}
+puts -nonewline "-- Total unused bytes on overflow pages: $total_unused_ovfl"
+if {$total_overflow>0} {
+ set upp [expr {$total_unused_ovfl/$total_overflow}]
+ puts " (avg $upp bytes/page)"
+} else {
+ puts ""
+}
+set n_free [expr {$file_pgcnt-$total_primary-$total_overflow}]
+if {$n_free>0} {incr n_free -1}
+puts "-- Total pages on freelist: $n_free"
+puts "COMMIT;"
diff --git a/third_party/sqlite/tool/spaceanal.tcl b/third_party/sqlite/tool/spaceanal.tcl
new file mode 100755
index 0000000..3718357
--- /dev/null
+++ b/third_party/sqlite/tool/spaceanal.tcl
@@ -0,0 +1,863 @@
+# Run this TCL script using "testfixture" in order get a report that shows
+# how much disk space is used by a particular data to actually store data
+# versus how much space is unused.
+#
+
+if {[catch {
+
+# Get the name of the database to analyze
+#
+#set argv $argv0
+if {[llength $argv]!=1} {
+ puts stderr "Usage: $argv0 database-name"
+ exit 1
+}
+set file_to_analyze [lindex $argv 0]
+if {![file exists $file_to_analyze]} {
+ puts stderr "No such file: $file_to_analyze"
+ exit 1
+}
+if {![file readable $file_to_analyze]} {
+ puts stderr "File is not readable: $file_to_analyze"
+ exit 1
+}
+if {[file size $file_to_analyze]<512} {
+ puts stderr "Empty or malformed database: $file_to_analyze"
+ exit 1
+}
+
+# Maximum distance between pages before we consider it a "gap"
+#
+set MAXGAP 3
+
+# Open the database
+#
+sqlite3 db [lindex $argv 0]
+set DB [btree_open [lindex $argv 0] 1000 0]
+
+# In-memory database for collecting statistics. This script loops through
+# the tables and indices in the database being analyzed, adding a row for each
+# to an in-memory database (for which the schema is shown below). It then
+# queries the in-memory db to produce the space-analysis report.
+#
+sqlite3 mem :memory:
+set tabledef\
+{CREATE TABLE space_used(
+ name clob, -- Name of a table or index in the database file
+ tblname clob, -- Name of associated table
+ is_index boolean, -- TRUE if it is an index, false for a table
+ nentry int, -- Number of entries in the BTree
+ leaf_entries int, -- Number of leaf entries
+ payload int, -- Total amount of data stored in this table or index
+ ovfl_payload int, -- Total amount of data stored on overflow pages
+ ovfl_cnt int, -- Number of entries that use overflow
+ mx_payload int, -- Maximum payload size
+ int_pages int, -- Number of interior pages used
+ leaf_pages int, -- Number of leaf pages used
+ ovfl_pages int, -- Number of overflow pages used
+ int_unused int, -- Number of unused bytes on interior pages
+ leaf_unused int, -- Number of unused bytes on primary pages
+ ovfl_unused int, -- Number of unused bytes on overflow pages
+ gap_cnt int -- Number of gaps in the page layout
+);}
+mem eval $tabledef
+
+proc integerify {real} {
+ if {[string is double -strict $real]} {
+ return [expr {int($real)}]
+ } else {
+ return 0
+ }
+}
+mem function int integerify
+
+# Quote a string for use in an SQL query. Examples:
+#
+# [quote {hello world}] == {'hello world'}
+# [quote {hello world's}] == {'hello world''s'}
+#
+proc quote {txt} {
+ regsub -all ' $txt '' q
+ return '$q'
+}
+
+# This proc is a wrapper around the btree_cursor_info command. The
+# second argument is an open btree cursor returned by [btree_cursor].
+# The first argument is the name of an array variable that exists in
+# the scope of the caller. If the third argument is non-zero, then
+# info is returned for the page that lies $up entries upwards in the
+# tree-structure. (i.e. $up==1 returns the parent page, $up==2 the
+# grandparent etc.)
+#
+# The following entries in that array are filled in with information retrieved
+# using [btree_cursor_info]:
+#
+# $arrayvar(page_no) = The page number
+# $arrayvar(entry_no) = The entry number
+# $arrayvar(page_entries) = Total number of entries on this page
+# $arrayvar(cell_size) = Cell size (local payload + header)
+# $arrayvar(page_freebytes) = Number of free bytes on this page
+# $arrayvar(page_freeblocks) = Number of free blocks on the page
+# $arrayvar(payload_bytes) = Total payload size (local + overflow)
+# $arrayvar(header_bytes) = Header size in bytes
+# $arrayvar(local_payload_bytes) = Local payload size
+# $arrayvar(parent) = Parent page number
+#
+proc cursor_info {arrayvar csr {up 0}} {
+ upvar $arrayvar a
+ foreach [list a(page_no) \
+ a(entry_no) \
+ a(page_entries) \
+ a(cell_size) \
+ a(page_freebytes) \
+ a(page_freeblocks) \
+ a(payload_bytes) \
+ a(header_bytes) \
+ a(local_payload_bytes) \
+ a(parent) \
+ a(first_ovfl) ] [btree_cursor_info $csr $up] break
+}
+
+# Determine the page-size of the database. This global variable is used
+# throughout the script.
+#
+set pageSize [db eval {PRAGMA page_size}]
+
+# Analyze every table in the database, one at a time.
+#
+# The following query returns the name and root-page of each table in the
+# database, including the sqlite_master table.
+#
+set sql {
+ SELECT name, rootpage FROM sqlite_master
+ WHERE type='table' AND rootpage>0
+ UNION ALL
+ SELECT 'sqlite_master', 1
+ ORDER BY 1
+}
+set wideZero [expr {10000000000 - 10000000000}]
+foreach {name rootpage} [db eval $sql] {
+ puts stderr "Analyzing table $name..."
+
+ # Code below traverses the table being analyzed (table name $name), using the
+ # btree cursor $cursor. Statistics related to table $name are accumulated in
+ # the following variables:
+ #
+ set total_payload $wideZero ;# Payload space used by all entries
+ set total_ovfl $wideZero ;# Payload space on overflow pages
+ set unused_int $wideZero ;# Unused space on interior nodes
+ set unused_leaf $wideZero ;# Unused space on leaf nodes
+ set unused_ovfl $wideZero ;# Unused space on overflow pages
+ set cnt_ovfl $wideZero ;# Number of entries that use overflows
+ set cnt_leaf_entry $wideZero ;# Number of leaf entries
+ set cnt_int_entry $wideZero ;# Number of interor entries
+ set mx_payload $wideZero ;# Maximum payload size
+ set ovfl_pages $wideZero ;# Number of overflow pages used
+ set leaf_pages $wideZero ;# Number of leaf pages
+ set int_pages $wideZero ;# Number of interior pages
+ set gap_cnt 0 ;# Number of holes in the page sequence
+ set prev_pgno 0 ;# Last page number seen
+
+ # As the btree is traversed, the array variable $seen($pgno) is set to 1
+ # the first time page $pgno is encountered.
+ #
+ catch {unset seen}
+
+ # The following loop runs once for each entry in table $name. The table
+ # is traversed using the btree cursor stored in variable $csr
+ #
+ set csr [btree_cursor $DB $rootpage 0]
+ for {btree_first $csr} {![btree_eof $csr]} {btree_next $csr} {
+ incr cnt_leaf_entry
+
+ # Retrieve information about the entry the btree-cursor points to into
+ # the array variable $ci (cursor info).
+ #
+ cursor_info ci $csr
+
+ # Check if the payload of this entry is greater than the current
+ # $mx_payload statistic for the table. Also increase the $total_payload
+ # statistic.
+ #
+ if {$ci(payload_bytes)>$mx_payload} {set mx_payload $ci(payload_bytes)}
+ incr total_payload $ci(payload_bytes)
+
+ # If this entry uses overflow pages, then update the $cnt_ovfl,
+ # $total_ovfl, $ovfl_pages and $unused_ovfl statistics.
+ #
+ set ovfl [expr {$ci(payload_bytes)-$ci(local_payload_bytes)}]
+ if {$ovfl} {
+ incr cnt_ovfl
+ incr total_ovfl $ovfl
+ set n [expr {int(ceil($ovfl/($pageSize-4.0)))}]
+ incr ovfl_pages $n
+ incr unused_ovfl [expr {$n*($pageSize-4) - $ovfl}]
+ set pglist [btree_ovfl_info $DB $csr]
+ } else {
+ set pglist {}
+ }
+
+ # If this is the first table entry analyzed for the page, then update
+ # the page-related statistics $leaf_pages and $unused_leaf. Also, if
+ # this page has a parent page that has not been analyzed, retrieve
+ # info for the parent and update statistics for it too.
+ #
+ if {![info exists seen($ci(page_no))]} {
+ set seen($ci(page_no)) 1
+ incr leaf_pages
+ incr unused_leaf $ci(page_freebytes)
+ set pglist "$ci(page_no) $pglist"
+
+ # Now check if the page has a parent that has not been analyzed. If
+ # so, update the $int_pages, $cnt_int_entry and $unused_int statistics
+ # accordingly. Then check if the parent page has a parent that has
+ # not yet been analyzed etc.
+ #
+ # set parent $ci(parent_page_no)
+ for {set up 1} \
+ {$ci(parent)!=0 && ![info exists seen($ci(parent))]} {incr up} \
+ {
+ # Mark the parent as seen.
+ #
+ set seen($ci(parent)) 1
+
+ # Retrieve info for the parent and update statistics.
+ cursor_info ci $csr $up
+ incr int_pages
+ incr cnt_int_entry $ci(page_entries)
+ incr unused_int $ci(page_freebytes)
+
+ # parent pages come before their first child
+ set pglist "$ci(page_no) $pglist"
+ }
+ }
+
+ # Check the page list for fragmentation
+ #
+ foreach pg $pglist {
+ if {$pg!=$prev_pgno+1 && $prev_pgno>0} {
+ incr gap_cnt
+ }
+ set prev_pgno $pg
+ }
+ }
+ btree_close_cursor $csr
+
+ # Handle the special case where a table contains no data. In this case
+ # all statistics are zero, except for the number of leaf pages (1) and
+ # the unused bytes on leaf pages ($pageSize - 8).
+ #
+ # An exception to the above is the sqlite_master table. If it is empty
+ # then all statistics are zero except for the number of leaf pages (1),
+ # and the number of unused bytes on leaf pages ($pageSize - 112).
+ #
+ if {[llength [array names seen]]==0} {
+ set leaf_pages 1
+ if {$rootpage==1} {
+ set unused_leaf [expr {$pageSize-112}]
+ } else {
+ set unused_leaf [expr {$pageSize-8}]
+ }
+ }
+
+ # Insert the statistics for the table analyzed into the in-memory database.
+ #
+ set sql "INSERT INTO space_used VALUES("
+ append sql [quote $name]
+ append sql ",[quote $name]"
+ append sql ",0"
+ append sql ",[expr {$cnt_leaf_entry+$cnt_int_entry}]"
+ append sql ",$cnt_leaf_entry"
+ append sql ",$total_payload"
+ append sql ",$total_ovfl"
+ append sql ",$cnt_ovfl"
+ append sql ",$mx_payload"
+ append sql ",$int_pages"
+ append sql ",$leaf_pages"
+ append sql ",$ovfl_pages"
+ append sql ",$unused_int"
+ append sql ",$unused_leaf"
+ append sql ",$unused_ovfl"
+ append sql ",$gap_cnt"
+ append sql );
+ mem eval $sql
+}
+
+# Analyze every index in the database, one at a time.
+#
+# The query below returns the name, associated table and root-page number
+# for every index in the database.
+#
+set sql {
+ SELECT name, tbl_name, rootpage FROM sqlite_master WHERE type='index'
+ ORDER BY 2, 1
+}
+foreach {name tbl_name rootpage} [db eval $sql] {
+ puts stderr "Analyzing index $name of table $tbl_name..."
+
+ # Code below traverses the index being analyzed (index name $name), using the
+ # btree cursor $cursor. Statistics related to index $name are accumulated in
+ # the following variables:
+ #
+ set total_payload $wideZero ;# Payload space used by all entries
+ set total_ovfl $wideZero ;# Payload space on overflow pages
+ set unused_leaf $wideZero ;# Unused space on leaf nodes
+ set unused_ovfl $wideZero ;# Unused space on overflow pages
+ set cnt_ovfl $wideZero ;# Number of entries that use overflows
+ set cnt_leaf_entry $wideZero ;# Number of leaf entries
+ set mx_payload $wideZero ;# Maximum payload size
+ set ovfl_pages $wideZero ;# Number of overflow pages used
+ set leaf_pages $wideZero ;# Number of leaf pages
+ set gap_cnt 0 ;# Number of holes in the page sequence
+ set prev_pgno 0 ;# Last page number seen
+
+ # As the btree is traversed, the array variable $seen($pgno) is set to 1
+ # the first time page $pgno is encountered.
+ #
+ catch {unset seen}
+
+ # The following loop runs once for each entry in index $name. The index
+ # is traversed using the btree cursor stored in variable $csr
+ #
+ set csr [btree_cursor $DB $rootpage 0]
+ for {btree_first $csr} {![btree_eof $csr]} {btree_next $csr} {
+ incr cnt_leaf_entry
+
+ # Retrieve information about the entry the btree-cursor points to into
+ # the array variable $ci (cursor info).
+ #
+ cursor_info ci $csr
+
+ # Check if the payload of this entry is greater than the current
+ # $mx_payload statistic for the table. Also increase the $total_payload
+ # statistic.
+ #
+ set payload [btree_keysize $csr]
+ if {$payload>$mx_payload} {set mx_payload $payload}
+ incr total_payload $payload
+
+ # If this entry uses overflow pages, then update the $cnt_ovfl,
+ # $total_ovfl, $ovfl_pages and $unused_ovfl statistics.
+ #
+ set ovfl [expr {$payload-$ci(local_payload_bytes)}]
+ if {$ovfl} {
+ incr cnt_ovfl
+ incr total_ovfl $ovfl
+ set n [expr {int(ceil($ovfl/($pageSize-4.0)))}]
+ incr ovfl_pages $n
+ incr unused_ovfl [expr {$n*($pageSize-4) - $ovfl}]
+ }
+
+ # If this is the first table entry analyzed for the page, then update
+ # the page-related statistics $leaf_pages and $unused_leaf.
+ #
+ if {![info exists seen($ci(page_no))]} {
+ set seen($ci(page_no)) 1
+ incr leaf_pages
+ incr unused_leaf $ci(page_freebytes)
+ set pg $ci(page_no)
+ if {$prev_pgno>0 && $pg!=$prev_pgno+1} {
+ incr gap_cnt
+ }
+ set prev_pgno $ci(page_no)
+ }
+ }
+ btree_close_cursor $csr
+
+ # Handle the special case where a index contains no data. In this case
+ # all statistics are zero, except for the number of leaf pages (1) and
+ # the unused bytes on leaf pages ($pageSize - 8).
+ #
+ if {[llength [array names seen]]==0} {
+ set leaf_pages 1
+ set unused_leaf [expr {$pageSize-8}]
+ }
+
+ # Insert the statistics for the index analyzed into the in-memory database.
+ #
+ set sql "INSERT INTO space_used VALUES("
+ append sql [quote $name]
+ append sql ",[quote $tbl_name]"
+ append sql ",1"
+ append sql ",$cnt_leaf_entry"
+ append sql ",$cnt_leaf_entry"
+ append sql ",$total_payload"
+ append sql ",$total_ovfl"
+ append sql ",$cnt_ovfl"
+ append sql ",$mx_payload"
+ append sql ",0"
+ append sql ",$leaf_pages"
+ append sql ",$ovfl_pages"
+ append sql ",0"
+ append sql ",$unused_leaf"
+ append sql ",$unused_ovfl"
+ append sql ",$gap_cnt"
+ append sql );
+ mem eval $sql
+}
+
+# Generate a single line of output in the statistics section of the
+# report.
+#
+proc statline {title value {extra {}}} {
+ set len [string length $title]
+ set dots [string range {......................................} $len end]
+ set len [string length $value]
+ set sp2 [string range { } $len end]
+ if {$extra ne ""} {
+ set extra " $extra"
+ }
+ puts "$title$dots $value$sp2$extra"
+}
+
+# Generate a formatted percentage value for $num/$denom
+#
+proc percent {num denom {of {}}} {
+ if {$denom==0.0} {return ""}
+ set v [expr {$num*100.0/$denom}]
+ set of {}
+ if {$v==100.0 || $v<0.001 || ($v>1.0 && $v<99.0)} {
+ return [format {%5.1f%% %s} $v $of]
+ } elseif {$v<0.1 || $v>99.9} {
+ return [format {%7.3f%% %s} $v $of]
+ } else {
+ return [format {%6.2f%% %s} $v $of]
+ }
+}
+
+proc divide {num denom} {
+ if {$denom==0} {return 0.0}
+ return [format %.2f [expr double($num)/double($denom)]]
+}
+
+# Generate a subreport that covers some subset of the database.
+# the $where clause determines which subset to analyze.
+#
+proc subreport {title where} {
+ global pageSize file_pgcnt
+
+ # Query the in-memory database for the sum of various statistics
+ # for the subset of tables/indices identified by the WHERE clause in
+ # $where. Note that even if the WHERE clause matches no rows, the
+ # following query returns exactly one row (because it is an aggregate).
+ #
+ # The results of the query are stored directly by SQLite into local
+ # variables (i.e. $nentry, $nleaf etc.).
+ #
+ mem eval "
+ SELECT
+ int(sum(nentry)) AS nentry,
+ int(sum(leaf_entries)) AS nleaf,
+ int(sum(payload)) AS payload,
+ int(sum(ovfl_payload)) AS ovfl_payload,
+ max(mx_payload) AS mx_payload,
+ int(sum(ovfl_cnt)) as ovfl_cnt,
+ int(sum(leaf_pages)) AS leaf_pages,
+ int(sum(int_pages)) AS int_pages,
+ int(sum(ovfl_pages)) AS ovfl_pages,
+ int(sum(leaf_unused)) AS leaf_unused,
+ int(sum(int_unused)) AS int_unused,
+ int(sum(ovfl_unused)) AS ovfl_unused,
+ int(sum(gap_cnt)) AS gap_cnt
+ FROM space_used WHERE $where" {} {}
+
+ # Output the sub-report title, nicely decorated with * characters.
+ #
+ puts ""
+ set len [string length $title]
+ set stars [string repeat * [expr 65-$len]]
+ puts "*** $title $stars"
+ puts ""
+
+ # Calculate statistics and store the results in TCL variables, as follows:
+ #
+ # total_pages: Database pages consumed.
+ # total_pages_percent: Pages consumed as a percentage of the file.
+ # storage: Bytes consumed.
+ # payload_percent: Payload bytes used as a percentage of $storage.
+ # total_unused: Unused bytes on pages.
+ # avg_payload: Average payload per btree entry.
+ # avg_fanout: Average fanout for internal pages.
+ # avg_unused: Average unused bytes per btree entry.
+ # ovfl_cnt_percent: Percentage of btree entries that use overflow pages.
+ #
+ set total_pages [expr {$leaf_pages+$int_pages+$ovfl_pages}]
+ set total_pages_percent [percent $total_pages $file_pgcnt]
+ set storage [expr {$total_pages*$pageSize}]
+ set payload_percent [percent $payload $storage {of storage consumed}]
+ set total_unused [expr {$ovfl_unused+$int_unused+$leaf_unused}]
+ set avg_payload [divide $payload $nleaf]
+ set avg_unused [divide $total_unused $nleaf]
+ if {$int_pages>0} {
+ # TODO: Is this formula correct?
+ set nTab [mem eval "
+ SELECT count(*) FROM (
+ SELECT DISTINCT tblname FROM space_used WHERE $where AND is_index=0
+ )
+ "]
+ set avg_fanout [mem eval "
+ SELECT (sum(leaf_pages+int_pages)-$nTab)/sum(int_pages) FROM space_used
+ WHERE $where AND is_index = 0
+ "]
+ set avg_fanout [format %.2f $avg_fanout]
+ }
+ set ovfl_cnt_percent [percent $ovfl_cnt $nleaf {of all entries}]
+
+ # Print out the sub-report statistics.
+ #
+ statline {Percentage of total database} $total_pages_percent
+ statline {Number of entries} $nleaf
+ statline {Bytes of storage consumed} $storage
+ statline {Bytes of payload} $payload $payload_percent
+ statline {Average payload per entry} $avg_payload
+ statline {Average unused bytes per entry} $avg_unused
+ if {[info exists avg_fanout]} {
+ statline {Average fanout} $avg_fanout
+ }
+ if {$total_pages>1} {
+ set fragmentation [percent $gap_cnt [expr {$total_pages-1}] {fragmentation}]
+ statline {Fragmentation} $fragmentation
+ }
+ statline {Maximum payload per entry} $mx_payload
+ statline {Entries that use overflow} $ovfl_cnt $ovfl_cnt_percent
+ if {$int_pages>0} {
+ statline {Index pages used} $int_pages
+ }
+ statline {Primary pages used} $leaf_pages
+ statline {Overflow pages used} $ovfl_pages
+ statline {Total pages used} $total_pages
+ if {$int_unused>0} {
+ set int_unused_percent \
+ [percent $int_unused [expr {$int_pages*$pageSize}] {of index space}]
+ statline "Unused bytes on index pages" $int_unused $int_unused_percent
+ }
+ statline "Unused bytes on primary pages" $leaf_unused \
+ [percent $leaf_unused [expr {$leaf_pages*$pageSize}] {of primary space}]
+ statline "Unused bytes on overflow pages" $ovfl_unused \
+ [percent $ovfl_unused [expr {$ovfl_pages*$pageSize}] {of overflow space}]
+ statline "Unused bytes on all pages" $total_unused \
+ [percent $total_unused $storage {of all space}]
+ return 1
+}
+
+# Calculate the overhead in pages caused by auto-vacuum.
+#
+# This procedure calculates and returns the number of pages used by the
+# auto-vacuum 'pointer-map'. If the database does not support auto-vacuum,
+# then 0 is returned. The two arguments are the size of the database file in
+# pages and the page size used by the database (in bytes).
+proc autovacuum_overhead {filePages pageSize} {
+
+ # Read the value of meta 4. If non-zero, then the database supports
+ # auto-vacuum. It would be possible to use "PRAGMA auto_vacuum" instead,
+ # but that would not work if the SQLITE_OMIT_PRAGMA macro was defined
+ # when the library was built.
+ set meta4 [lindex [btree_get_meta $::DB] 4]
+
+ # If the database is not an auto-vacuum database or the file consists
+ # of one page only then there is no overhead for auto-vacuum. Return zero.
+ if {0==$meta4 || $filePages==1} {
+ return 0
+ }
+
+ # The number of entries on each pointer map page. The layout of the
+ # database file is one pointer-map page, followed by $ptrsPerPage other
+ # pages, followed by a pointer-map page etc. The first pointer-map page
+ # is the second page of the file overall.
+ set ptrsPerPage [expr double($pageSize/5)]
+
+ # Return the number of pointer map pages in the database.
+ return [expr int(ceil( ($filePages-1.0)/($ptrsPerPage+1.0) ))]
+}
+
+
+# Calculate the summary statistics for the database and store the results
+# in TCL variables. They are output below. Variables are as follows:
+#
+# pageSize: Size of each page in bytes.
+# file_bytes: File size in bytes.
+# file_pgcnt: Number of pages in the file.
+# file_pgcnt2: Number of pages in the file (calculated).
+# av_pgcnt: Pages consumed by the auto-vacuum pointer-map.
+# av_percent: Percentage of the file consumed by auto-vacuum pointer-map.
+# inuse_pgcnt: Data pages in the file.
+# inuse_percent: Percentage of pages used to store data.
+# free_pgcnt: Free pages calculated as (<total pages> - <in-use pages>)
+# free_pgcnt2: Free pages in the file according to the file header.
+# free_percent: Percentage of file consumed by free pages (calculated).
+# free_percent2: Percentage of file consumed by free pages (header).
+# ntable: Number of tables in the db.
+# nindex: Number of indices in the db.
+# nautoindex: Number of indices created automatically.
+# nmanindex: Number of indices created manually.
+# user_payload: Number of bytes of payload in table btrees
+# (not including sqlite_master)
+# user_percent: $user_payload as a percentage of total file size.
+
+set file_bytes [file size $file_to_analyze]
+set file_pgcnt [expr {$file_bytes/$pageSize}]
+
+set av_pgcnt [autovacuum_overhead $file_pgcnt $pageSize]
+set av_percent [percent $av_pgcnt $file_pgcnt]
+
+set sql {SELECT sum(leaf_pages+int_pages+ovfl_pages) FROM space_used}
+set inuse_pgcnt [expr int([mem eval $sql])]
+set inuse_percent [percent $inuse_pgcnt $file_pgcnt]
+
+set free_pgcnt [expr $file_pgcnt-$inuse_pgcnt-$av_pgcnt]
+set free_percent [percent $free_pgcnt $file_pgcnt]
+set free_pgcnt2 [lindex [btree_get_meta $DB] 0]
+set free_percent2 [percent $free_pgcnt2 $file_pgcnt]
+
+set file_pgcnt2 [expr {$inuse_pgcnt+$free_pgcnt2+$av_pgcnt}]
+
+set ntable [db eval {SELECT count(*)+1 FROM sqlite_master WHERE type='table'}]
+set nindex [db eval {SELECT count(*) FROM sqlite_master WHERE type='index'}]
+set sql {SELECT count(*) FROM sqlite_master WHERE name LIKE 'sqlite_autoindex%'}
+set nautoindex [db eval $sql]
+set nmanindex [expr {$nindex-$nautoindex}]
+
+# set total_payload [mem eval "SELECT sum(payload) FROM space_used"]
+set user_payload [mem one {SELECT int(sum(payload)) FROM space_used
+ WHERE NOT is_index AND name NOT LIKE 'sqlite_master'}]
+set user_percent [percent $user_payload $file_bytes]
+
+# Output the summary statistics calculated above.
+#
+puts "/** Disk-Space Utilization Report For $file_to_analyze"
+catch {
+ puts "*** As of [clock format [clock seconds] -format {%Y-%b-%d %H:%M:%S}]"
+}
+puts ""
+statline {Page size in bytes} $pageSize
+statline {Pages in the whole file (measured)} $file_pgcnt
+statline {Pages in the whole file (calculated)} $file_pgcnt2
+statline {Pages that store data} $inuse_pgcnt $inuse_percent
+statline {Pages on the freelist (per header)} $free_pgcnt2 $free_percent2
+statline {Pages on the freelist (calculated)} $free_pgcnt $free_percent
+statline {Pages of auto-vacuum overhead} $av_pgcnt $av_percent
+statline {Number of tables in the database} $ntable
+statline {Number of indices} $nindex
+statline {Number of named indices} $nmanindex
+statline {Automatically generated indices} $nautoindex
+statline {Size of the file in bytes} $file_bytes
+statline {Bytes of user payload stored} $user_payload $user_percent
+
+# Output table rankings
+#
+puts ""
+puts "*** Page counts for all tables with their indices ********************"
+puts ""
+mem eval {SELECT tblname, count(*) AS cnt,
+ int(sum(int_pages+leaf_pages+ovfl_pages)) AS size
+ FROM space_used GROUP BY tblname ORDER BY size+0 DESC, tblname} {} {
+ statline [string toupper $tblname] $size [percent $size $file_pgcnt]
+}
+
+# Output subreports
+#
+if {$nindex>0} {
+ subreport {All tables and indices} 1
+}
+subreport {All tables} {NOT is_index}
+if {$nindex>0} {
+ subreport {All indices} {is_index}
+}
+foreach tbl [mem eval {SELECT name FROM space_used WHERE NOT is_index
+ ORDER BY name}] {
+ regsub ' $tbl '' qn
+ set name [string toupper $tbl]
+ set n [mem eval "SELECT count(*) FROM space_used WHERE tblname='$qn'"]
+ if {$n>1} {
+ subreport "Table $name and all its indices" "tblname='$qn'"
+ subreport "Table $name w/o any indices" "name='$qn'"
+ subreport "Indices of table $name" "tblname='$qn' AND is_index"
+ } else {
+ subreport "Table $name" "name='$qn'"
+ }
+}
+
+# Output instructions on what the numbers above mean.
+#
+puts {
+*** Definitions ******************************************************
+
+Page size in bytes
+
+ The number of bytes in a single page of the database file.
+ Usually 1024.
+
+Number of pages in the whole file
+}
+puts \
+" The number of $pageSize-byte pages that go into forming the complete
+ database"
+puts \
+{
+Pages that store data
+
+ The number of pages that store data, either as primary B*Tree pages or
+ as overflow pages. The number at the right is the data pages divided by
+ the total number of pages in the file.
+
+Pages on the freelist
+
+ The number of pages that are not currently in use but are reserved for
+ future use. The percentage at the right is the number of freelist pages
+ divided by the total number of pages in the file.
+
+Pages of auto-vacuum overhead
+
+ The number of pages that store data used by the database to facilitate
+ auto-vacuum. This is zero for databases that do not support auto-vacuum.
+
+Number of tables in the database
+
+ The number of tables in the database, including the SQLITE_MASTER table
+ used to store schema information.
+
+Number of indices
+
+ The total number of indices in the database.
+
+Number of named indices
+
+ The number of indices created using an explicit CREATE INDEX statement.
+
+Automatically generated indices
+
+ The number of indices used to implement PRIMARY KEY or UNIQUE constraints
+ on tables.
+
+Size of the file in bytes
+
+ The total amount of disk space used by the entire database files.
+
+Bytes of user payload stored
+
+ The total number of bytes of user payload stored in the database. The
+ schema information in the SQLITE_MASTER table is not counted when
+ computing this number. The percentage at the right shows the payload
+ divided by the total file size.
+
+Percentage of total database
+
+ The amount of the complete database file that is devoted to storing
+ information described by this category.
+
+Number of entries
+
+ The total number of B-Tree key/value pairs stored under this category.
+
+Bytes of storage consumed
+
+ The total amount of disk space required to store all B-Tree entries
+ under this category. The is the total number of pages used times
+ the pages size.
+
+Bytes of payload
+
+ The amount of payload stored under this category. Payload is the data
+ part of table entries and the key part of index entries. The percentage
+ at the right is the bytes of payload divided by the bytes of storage
+ consumed.
+
+Average payload per entry
+
+ The average amount of payload on each entry. This is just the bytes of
+ payload divided by the number of entries.
+
+Average unused bytes per entry
+
+ The average amount of free space remaining on all pages under this
+ category on a per-entry basis. This is the number of unused bytes on
+ all pages divided by the number of entries.
+
+Fragmentation
+
+ The percentage of pages in the table or index that are not
+ consecutive in the disk file. Many filesystems are optimized
+ for sequential file access so smaller fragmentation numbers
+ sometimes result in faster queries, especially for larger
+ database files that do not fit in the disk cache.
+
+Maximum payload per entry
+
+ The largest payload size of any entry.
+
+Entries that use overflow
+
+ The number of entries that user one or more overflow pages.
+
+Total pages used
+
+ This is the number of pages used to hold all information in the current
+ category. This is the sum of index, primary, and overflow pages.
+
+Index pages used
+
+ This is the number of pages in a table B-tree that hold only key (rowid)
+ information and no data.
+
+Primary pages used
+
+ This is the number of B-tree pages that hold both key and data.
+
+Overflow pages used
+
+ The total number of overflow pages used for this category.
+
+Unused bytes on index pages
+
+ The total number of bytes of unused space on all index pages. The
+ percentage at the right is the number of unused bytes divided by the
+ total number of bytes on index pages.
+
+Unused bytes on primary pages
+
+ The total number of bytes of unused space on all primary pages. The
+ percentage at the right is the number of unused bytes divided by the
+ total number of bytes on primary pages.
+
+Unused bytes on overflow pages
+
+ The total number of bytes of unused space on all overflow pages. The
+ percentage at the right is the number of unused bytes divided by the
+ total number of bytes on overflow pages.
+
+Unused bytes on all pages
+
+ The total number of bytes of unused space on all primary and overflow
+ pages. The percentage at the right is the number of unused bytes
+ divided by the total number of bytes.
+}
+
+# Output a dump of the in-memory database. This can be used for more
+# complex offline analysis.
+#
+puts "**********************************************************************"
+puts "The entire text of this report can be sourced into any SQL database"
+puts "engine for further analysis. All of the text above is an SQL comment."
+puts "The data used to generate this report follows:"
+puts "*/"
+puts "BEGIN;"
+puts $tabledef
+unset -nocomplain x
+mem eval {SELECT * FROM space_used} x {
+ puts -nonewline "INSERT INTO space_used VALUES"
+ set sep (
+ foreach col $x(*) {
+ set v $x($col)
+ if {$v=="" || ![string is double $v]} {set v [quote $v]}
+ puts -nonewline $sep$v
+ set sep ,
+ }
+ puts ");"
+}
+puts "COMMIT;"
+
+} err]} {
+ puts "ERROR: $err"
+ puts $errorInfo
+ exit 1
+}
diff --git a/third_party/sqlite/tool/speedtest.tcl b/third_party/sqlite/tool/speedtest.tcl
new file mode 100755
index 0000000..ef39dc5
--- /dev/null
+++ b/third_party/sqlite/tool/speedtest.tcl
@@ -0,0 +1,275 @@
+#!/usr/bin/tclsh
+#
+# Run this script using TCLSH to do a speed comparison between
+# various versions of SQLite and PostgreSQL and MySQL
+#
+
+# Run a test
+#
+set cnt 1
+proc runtest {title} {
+ global cnt
+ set sqlfile test$cnt.sql
+ puts "<h2>Test $cnt: $title</h2>"
+ incr cnt
+ set fd [open $sqlfile r]
+ set sql [string trim [read $fd [file size $sqlfile]]]
+ close $fd
+ set sx [split $sql \n]
+ set n [llength $sx]
+ if {$n>8} {
+ set sql {}
+ for {set i 0} {$i<3} {incr i} {append sql [lindex $sx $i]<br>\n}
+ append sql "<i>... [expr {$n-6}] lines omitted</i><br>\n"
+ for {set i [expr {$n-3}]} {$i<$n} {incr i} {
+ append sql [lindex $sx $i]<br>\n
+ }
+ } else {
+ regsub -all \n [string trim $sql] <br> sql
+ }
+ puts "<blockquote>"
+ puts "$sql"
+ puts "</blockquote><table border=0 cellpadding=0 cellspacing=0>"
+ set format {<tr><td>%s</td><td align="right">&nbsp;&nbsp;&nbsp;%.3f</td></tr>}
+ set delay 1000
+# exec sync; after $delay;
+# set t [time "exec psql drh <$sqlfile" 1]
+# set t [expr {[lindex $t 0]/1000000.0}]
+# puts [format $format PostgreSQL: $t]
+ exec sync; after $delay;
+ set t [time "exec mysql -f drh <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format MySQL: $t]
+# set t [time "exec ./sqlite232 s232.db <$sqlfile" 1]
+# set t [expr {[lindex $t 0]/1000000.0}]
+# puts [format $format {SQLite 2.3.2:} $t]
+# set t [time "exec ./sqlite-100 s100.db <$sqlfile" 1]
+# set t [expr {[lindex $t 0]/1000000.0}]
+# puts [format $format {SQLite 2.4 (cache=100):} $t]
+ exec sync; after $delay;
+ set t [time "exec ./sqlite248 s2k.db <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format {SQLite 2.4.8:} $t]
+ exec sync; after $delay;
+ set t [time "exec ./sqlite248 sns.db <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format {SQLite 2.4.8 (nosync):} $t]
+ exec sync; after $delay;
+ set t [time "exec ./sqlite2412 s2kb.db <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format {SQLite 2.4.12:} $t]
+ exec sync; after $delay;
+ set t [time "exec ./sqlite2412 snsb.db <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format {SQLite 2.4.12 (nosync):} $t]
+# set t [time "exec ./sqlite-t1 st1.db <$sqlfile" 1]
+# set t [expr {[lindex $t 0]/1000000.0}]
+# puts [format $format {SQLite 2.4 (test):} $t]
+ puts "</table>"
+}
+
+# Initialize the environment
+#
+expr srand(1)
+catch {exec /bin/sh -c {rm -f s*.db}}
+set fd [open clear.sql w]
+puts $fd {
+ drop table t1;
+ drop table t2;
+}
+close $fd
+catch {exec psql drh <clear.sql}
+catch {exec mysql drh <clear.sql}
+set fd [open 2kinit.sql w]
+puts $fd {
+ PRAGMA default_cache_size=2000;
+ PRAGMA default_synchronous=on;
+}
+close $fd
+exec ./sqlite248 s2k.db <2kinit.sql
+exec ./sqlite2412 s2kb.db <2kinit.sql
+set fd [open nosync-init.sql w]
+puts $fd {
+ PRAGMA default_cache_size=2000;
+ PRAGMA default_synchronous=off;
+}
+close $fd
+exec ./sqlite248 sns.db <nosync-init.sql
+exec ./sqlite2412 snsb.db <nosync-init.sql
+set ones {zero one two three four five six seven eight nine
+ ten eleven twelve thirteen fourteen fifteen sixteen seventeen
+ eighteen nineteen}
+set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety}
+proc number_name {n} {
+ if {$n>=1000} {
+ set txt "[number_name [expr {$n/1000}]] thousand"
+ set n [expr {$n%1000}]
+ } else {
+ set txt {}
+ }
+ if {$n>=100} {
+ append txt " [lindex $::ones [expr {$n/100}]] hundred"
+ set n [expr {$n%100}]
+ }
+ if {$n>=20} {
+ append txt " [lindex $::tens [expr {$n/10}]]"
+ set n [expr {$n%10}]
+ }
+ if {$n>0} {
+ append txt " [lindex $::ones $n]"
+ }
+ set txt [string trim $txt]
+ if {$txt==""} {set txt zero}
+ return $txt
+}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd "CREATE TABLE t1(a INTEGER, b INTEGER, c VARCHAR(100));"
+for {set i 1} {$i<=1000} {incr i} {
+ set r [expr {int(rand()*100000)}]
+ puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
+}
+close $fd
+runtest {1000 INSERTs}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+puts $fd "CREATE TABLE t2(a INTEGER, b INTEGER, c VARCHAR(100));"
+for {set i 1} {$i<=25000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ puts $fd "INSERT INTO t2 VALUES($i,$r,'[number_name $r]');"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {25000 INSERTs in a transaction}
+
+
+
+set fd [open test$cnt.sql w]
+for {set i 0} {$i<100} {incr i} {
+ set lwr [expr {$i*100}]
+ set upr [expr {($i+10)*100}]
+ puts $fd "SELECT count(*), avg(b) FROM t2 WHERE b>=$lwr AND b<$upr;"
+}
+close $fd
+runtest {100 SELECTs without an index}
+
+
+
+set fd [open test$cnt.sql w]
+for {set i 1} {$i<=100} {incr i} {
+ puts $fd "SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%[number_name $i]%';"
+}
+close $fd
+runtest {100 SELECTs on a string comparison}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd {CREATE INDEX i2a ON t2(a);}
+puts $fd {CREATE INDEX i2b ON t2(b);}
+close $fd
+runtest {Creating an index}
+
+
+
+set fd [open test$cnt.sql w]
+for {set i 0} {$i<5000} {incr i} {
+ set lwr [expr {$i*100}]
+ set upr [expr {($i+1)*100}]
+ puts $fd "SELECT count(*), avg(b) FROM t2 WHERE b>=$lwr AND b<$upr;"
+}
+close $fd
+runtest {5000 SELECTs with an index}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+for {set i 0} {$i<1000} {incr i} {
+ set lwr [expr {$i*10}]
+ set upr [expr {($i+1)*10}]
+ puts $fd "UPDATE t1 SET b=b*2 WHERE a>=$lwr AND a<$upr;"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {1000 UPDATEs without an index}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+for {set i 1} {$i<=25000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ puts $fd "UPDATE t2 SET b=$r WHERE a=$i;"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {25000 UPDATEs with an index}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+for {set i 1} {$i<=25000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ puts $fd "UPDATE t2 SET c='[number_name $r]' WHERE a=$i;"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {25000 text UPDATEs with an index}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+puts $fd "INSERT INTO t1 SELECT * FROM t2;"
+puts $fd "INSERT INTO t2 SELECT * FROM t1;"
+puts $fd "COMMIT;"
+close $fd
+runtest {INSERTs from a SELECT}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd {DELETE FROM t2 WHERE c LIKE '%fifty%';}
+close $fd
+runtest {DELETE without an index}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd {DELETE FROM t2 WHERE a>10 AND a<20000;}
+close $fd
+runtest {DELETE with an index}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd {INSERT INTO t2 SELECT * FROM t1;}
+close $fd
+runtest {A big INSERT after a big DELETE}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd {BEGIN;}
+puts $fd {DELETE FROM t1;}
+for {set i 1} {$i<=3000} {incr i} {
+ set r [expr {int(rand()*100000)}]
+ puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
+}
+puts $fd {COMMIT;}
+close $fd
+runtest {A big DELETE followed by many small INSERTs}
+
+
+
+set fd [open test$cnt.sql w]
+puts $fd {DROP TABLE t1;}
+puts $fd {DROP TABLE t2;}
+close $fd
+runtest {DROP TABLE}
diff --git a/third_party/sqlite/tool/speedtest16.c b/third_party/sqlite/tool/speedtest16.c
new file mode 100755
index 0000000..e81f1a6
--- /dev/null
+++ b/third_party/sqlite/tool/speedtest16.c
@@ -0,0 +1,169 @@
+/*
+** Performance test for SQLite.
+**
+** This program reads ASCII text from a file named on the command-line.
+** It converts each SQL statement into UTF16 and submits it to SQLite
+** for evaluation. A new UTF16 database is created at the beginning of
+** the program. All statements are timed using the high-resolution timer
+** built into Intel-class processors.
+**
+** To compile this program, first compile the SQLite library separately
+** will full optimizations. For example:
+**
+** gcc -c -O6 -DSQLITE_THREADSAFE=0 sqlite3.c
+**
+** Then link against this program. But to do optimize this program
+** because that defeats the hi-res timer.
+**
+** gcc speedtest16.c sqlite3.o -ldl -I../src
+**
+** Then run this program with a single argument which is the name of
+** a file containing SQL script that you want to test:
+**
+** ./a.out database.db test.sql
+*/
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <unistd.h>
+#include "sqlite3.h"
+
+/*
+** hwtime.h contains inline assembler code for implementing
+** high-performance timing routines.
+*/
+#include "hwtime.h"
+
+/*
+** Convert a zero-terminated ASCII string into a zero-terminated
+** UTF-16le string. Memory to hold the returned string comes
+** from malloc() and should be freed by the caller.
+*/
+static void *asciiToUtf16le(const char *z){
+ int n = strlen(z);
+ char *z16;
+ int i, j;
+
+ z16 = malloc( n*2 + 2 );
+ for(i=j=0; i<=n; i++){
+ z16[j++] = z[i];
+ z16[j++] = 0;
+ }
+ return (void*)z16;
+}
+
+/*
+** Timers
+*/
+static sqlite_uint64 prepTime = 0;
+static sqlite_uint64 runTime = 0;
+static sqlite_uint64 finalizeTime = 0;
+
+/*
+** Prepare and run a single statement of SQL.
+*/
+static void prepareAndRun(sqlite3 *db, const char *zSql){
+ void *utf16;
+ sqlite3_stmt *pStmt;
+ const void *stmtTail;
+ sqlite_uint64 iStart, iElapse;
+ int rc;
+
+ printf("****************************************************************\n");
+ printf("SQL statement: [%s]\n", zSql);
+ utf16 = asciiToUtf16le(zSql);
+ iStart = sqlite3Hwtime();
+ rc = sqlite3_prepare16_v2(db, utf16, -1, &pStmt, &stmtTail);
+ iElapse = sqlite3Hwtime() - iStart;
+ prepTime += iElapse;
+ printf("sqlite3_prepare16_v2() returns %d in %llu cycles\n", rc, iElapse);
+ if( rc==SQLITE_OK ){
+ int nRow = 0;
+ iStart = sqlite3Hwtime();
+ while( (rc=sqlite3_step(pStmt))==SQLITE_ROW ){ nRow++; }
+ iElapse = sqlite3Hwtime() - iStart;
+ runTime += iElapse;
+ printf("sqlite3_step() returns %d after %d rows in %llu cycles\n",
+ rc, nRow, iElapse);
+ iStart = sqlite3Hwtime();
+ rc = sqlite3_finalize(pStmt);
+ iElapse = sqlite3Hwtime() - iStart;
+ finalizeTime += iElapse;
+ printf("sqlite3_finalize() returns %d in %llu cycles\n", rc, iElapse);
+ }
+ free(utf16);
+}
+
+int main(int argc, char **argv){
+ void *utf16;
+ sqlite3 *db;
+ int rc;
+ int nSql;
+ char *zSql;
+ int i, j;
+ FILE *in;
+ sqlite_uint64 iStart, iElapse;
+ sqlite_uint64 iSetup = 0;
+ int nStmt = 0;
+ int nByte = 0;
+
+ if( argc!=3 ){
+ fprintf(stderr, "Usage: %s FILENAME SQL-SCRIPT\n"
+ "Runs SQL-SCRIPT as UTF16 against a UTF16 database\n",
+ argv[0]);
+ exit(1);
+ }
+ in = fopen(argv[2], "r");
+ fseek(in, 0L, SEEK_END);
+ nSql = ftell(in);
+ zSql = malloc( nSql+1 );
+ fseek(in, 0L, SEEK_SET);
+ nSql = fread(zSql, 1, nSql, in);
+ zSql[nSql] = 0;
+
+ printf("SQLite version: %d\n", sqlite3_libversion_number());
+ unlink(argv[1]);
+ utf16 = asciiToUtf16le(argv[1]);
+ iStart = sqlite3Hwtime();
+ rc = sqlite3_open16(utf16, &db);
+ iElapse = sqlite3Hwtime() - iStart;
+ iSetup = iElapse;
+ printf("sqlite3_open16() returns %d in %llu cycles\n", rc, iElapse);
+ free(utf16);
+ for(i=j=0; j<nSql; j++){
+ if( zSql[j]==';' ){
+ int isComplete;
+ char c = zSql[j+1];
+ zSql[j+1] = 0;
+ isComplete = sqlite3_complete(&zSql[i]);
+ zSql[j+1] = c;
+ if( isComplete ){
+ zSql[j] = 0;
+ while( i<j && isspace(zSql[i]) ){ i++; }
+ if( i<j ){
+ nStmt++;
+ nByte += j-i;
+ prepareAndRun(db, &zSql[i]);
+ }
+ zSql[j] = ';';
+ i = j+1;
+ }
+ }
+ }
+ iStart = sqlite3Hwtime();
+ sqlite3_close(db);
+ iElapse = sqlite3Hwtime() - iStart;
+ iSetup += iElapse;
+ printf("sqlite3_close() returns in %llu cycles\n", iElapse);
+ printf("\n");
+ printf("Statements run: %15d\n", nStmt);
+ printf("Bytes of SQL text: %15d\n", nByte);
+ printf("Total prepare time: %15llu cycles\n", prepTime);
+ printf("Total run time: %15llu cycles\n", runTime);
+ printf("Total finalize time: %15llu cycles\n", finalizeTime);
+ printf("Open/Close time: %15llu cycles\n", iSetup);
+ printf("Total Time: %15llu cycles\n",
+ prepTime + runTime + finalizeTime + iSetup);
+ return 0;
+}
diff --git a/third_party/sqlite/tool/speedtest2.tcl b/third_party/sqlite/tool/speedtest2.tcl
new file mode 100755
index 0000000..4fd632d
--- /dev/null
+++ b/third_party/sqlite/tool/speedtest2.tcl
@@ -0,0 +1,207 @@
+#!/usr/bin/tclsh
+#
+# Run this script using TCLSH to do a speed comparison between
+# various versions of SQLite and PostgreSQL and MySQL
+#
+
+# Run a test
+#
+set cnt 1
+proc runtest {title} {
+ global cnt
+ set sqlfile test$cnt.sql
+ puts "<h2>Test $cnt: $title</h2>"
+ incr cnt
+ set fd [open $sqlfile r]
+ set sql [string trim [read $fd [file size $sqlfile]]]
+ close $fd
+ set sx [split $sql \n]
+ set n [llength $sx]
+ if {$n>8} {
+ set sql {}
+ for {set i 0} {$i<3} {incr i} {append sql [lindex $sx $i]<br>\n}
+ append sql "<i>... [expr {$n-6}] lines omitted</i><br>\n"
+ for {set i [expr {$n-3}]} {$i<$n} {incr i} {
+ append sql [lindex $sx $i]<br>\n
+ }
+ } else {
+ regsub -all \n [string trim $sql] <br> sql
+ }
+ puts "<blockquote>"
+ puts "$sql"
+ puts "</blockquote><table border=0 cellpadding=0 cellspacing=0>"
+ set format {<tr><td>%s</td><td align="right">&nbsp;&nbsp;&nbsp;%.3f</td></tr>}
+ set delay 1000
+ exec sync; after $delay;
+ set t [time "exec psql drh <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format PostgreSQL: $t]
+ exec sync; after $delay;
+ set t [time "exec mysql -f drh <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format MySQL: $t]
+# set t [time "exec ./sqlite232 s232.db <$sqlfile" 1]
+# set t [expr {[lindex $t 0]/1000000.0}]
+# puts [format $format {SQLite 2.3.2:} $t]
+# set t [time "exec ./sqlite-100 s100.db <$sqlfile" 1]
+# set t [expr {[lindex $t 0]/1000000.0}]
+# puts [format $format {SQLite 2.4 (cache=100):} $t]
+ exec sync; after $delay;
+ set t [time "exec ./sqlite240 s2k.db <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format {SQLite 2.4:} $t]
+ exec sync; after $delay;
+ set t [time "exec ./sqlite240 sns.db <$sqlfile" 1]
+ set t [expr {[lindex $t 0]/1000000.0}]
+ puts [format $format {SQLite 2.4 (nosync):} $t]
+# set t [time "exec ./sqlite-t1 st1.db <$sqlfile" 1]
+# set t [expr {[lindex $t 0]/1000000.0}]
+# puts [format $format {SQLite 2.4 (test):} $t]
+ puts "</table>"
+}
+
+# Initialize the environment
+#
+expr srand(1)
+catch {exec /bin/sh -c {rm -f s*.db}}
+set fd [open clear.sql w]
+puts $fd {
+ drop table t1;
+ drop table t2;
+}
+close $fd
+catch {exec psql drh <clear.sql}
+catch {exec mysql drh <clear.sql}
+set fd [open 2kinit.sql w]
+puts $fd {
+ PRAGMA default_cache_size=2000;
+ PRAGMA default_synchronous=on;
+}
+close $fd
+exec ./sqlite240 s2k.db <2kinit.sql
+exec ./sqlite-t1 st1.db <2kinit.sql
+set fd [open nosync-init.sql w]
+puts $fd {
+ PRAGMA default_cache_size=2000;
+ PRAGMA default_synchronous=off;
+}
+close $fd
+exec ./sqlite240 sns.db <nosync-init.sql
+set ones {zero one two three four five six seven eight nine
+ ten eleven twelve thirteen fourteen fifteen sixteen seventeen
+ eighteen nineteen}
+set tens {{} ten twenty thirty forty fifty sixty seventy eighty ninety}
+proc number_name {n} {
+ if {$n>=1000} {
+ set txt "[number_name [expr {$n/1000}]] thousand"
+ set n [expr {$n%1000}]
+ } else {
+ set txt {}
+ }
+ if {$n>=100} {
+ append txt " [lindex $::ones [expr {$n/100}]] hundred"
+ set n [expr {$n%100}]
+ }
+ if {$n>=20} {
+ append txt " [lindex $::tens [expr {$n/10}]]"
+ set n [expr {$n%10}]
+ }
+ if {$n>0} {
+ append txt " [lindex $::ones $n]"
+ }
+ set txt [string trim $txt]
+ if {$txt==""} {set txt zero}
+ return $txt
+}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+puts $fd "CREATE TABLE t1(a INTEGER, b INTEGER, c VARCHAR(100));"
+for {set i 1} {$i<=25000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {25000 INSERTs in a transaction}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "DELETE FROM t1;"
+close $fd
+runtest {DELETE everything}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+for {set i 1} {$i<=25000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {25000 INSERTs in a transaction}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "DELETE FROM t1;"
+close $fd
+runtest {DELETE everything}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+for {set i 1} {$i<=25000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {25000 INSERTs in a transaction}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "DELETE FROM t1;"
+close $fd
+runtest {DELETE everything}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+for {set i 1} {$i<=25000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {25000 INSERTs in a transaction}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "DELETE FROM t1;"
+close $fd
+runtest {DELETE everything}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "BEGIN;"
+for {set i 1} {$i<=25000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ puts $fd "INSERT INTO t1 VALUES($i,$r,'[number_name $r]');"
+}
+puts $fd "COMMIT;"
+close $fd
+runtest {25000 INSERTs in a transaction}
+
+
+set fd [open test$cnt.sql w]
+puts $fd "DELETE FROM t1;"
+close $fd
+runtest {DELETE everything}
+
+
+set fd [open test$cnt.sql w]
+puts $fd {DROP TABLE t1;}
+close $fd
+runtest {DROP TABLE}
diff --git a/third_party/sqlite/tool/speedtest8.c b/third_party/sqlite/tool/speedtest8.c
new file mode 100755
index 0000000..5972947
--- /dev/null
+++ b/third_party/sqlite/tool/speedtest8.c
@@ -0,0 +1,351 @@
+/*
+** Performance test for SQLite.
+**
+** This program reads ASCII text from a file named on the command-line
+** and submits that text to SQLite for evaluation. A new database
+** is created at the beginning of the program. All statements are
+** timed using the high-resolution timer built into Intel-class processors.
+**
+** To compile this program, first compile the SQLite library separately
+** will full optimizations. For example:
+**
+** gcc -c -O6 -DSQLITE_THREADSAFE=0 sqlite3.c
+**
+** Then link against this program. But to do optimize this program
+** because that defeats the hi-res timer.
+**
+** gcc speedtest8.c sqlite3.o -ldl -I../src
+**
+** Then run this program with a single argument which is the name of
+** a file containing SQL script that you want to test:
+**
+** ./a.out test.db test.sql
+*/
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <time.h>
+
+#if defined(_MSC_VER)
+#include <windows.h>
+#else
+#include <unistd.h>
+#include <sys/times.h>
+#include <sched.h>
+#endif
+
+#include "sqlite3.h"
+
+/*
+** hwtime.h contains inline assembler code for implementing
+** high-performance timing routines.
+*/
+#include "hwtime.h"
+
+/*
+** Timers
+*/
+static sqlite_uint64 prepTime = 0;
+static sqlite_uint64 runTime = 0;
+static sqlite_uint64 finalizeTime = 0;
+
+/*
+** Prepare and run a single statement of SQL.
+*/
+static void prepareAndRun(sqlite3 *db, const char *zSql, int bQuiet){
+ sqlite3_stmt *pStmt;
+ const char *stmtTail;
+ sqlite_uint64 iStart, iElapse;
+ int rc;
+
+ if (!bQuiet){
+ printf("***************************************************************\n");
+ }
+ if (!bQuiet) printf("SQL statement: [%s]\n", zSql);
+ iStart = sqlite3Hwtime();
+ rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, &stmtTail);
+ iElapse = sqlite3Hwtime() - iStart;
+ prepTime += iElapse;
+ if (!bQuiet){
+ printf("sqlite3_prepare_v2() returns %d in %llu cycles\n", rc, iElapse);
+ }
+ if( rc==SQLITE_OK ){
+ int nRow = 0;
+ iStart = sqlite3Hwtime();
+ while( (rc=sqlite3_step(pStmt))==SQLITE_ROW ){ nRow++; }
+ iElapse = sqlite3Hwtime() - iStart;
+ runTime += iElapse;
+ if (!bQuiet){
+ printf("sqlite3_step() returns %d after %d rows in %llu cycles\n",
+ rc, nRow, iElapse);
+ }
+ iStart = sqlite3Hwtime();
+ rc = sqlite3_finalize(pStmt);
+ iElapse = sqlite3Hwtime() - iStart;
+ finalizeTime += iElapse;
+ if (!bQuiet){
+ printf("sqlite3_finalize() returns %d in %llu cycles\n", rc, iElapse);
+ }
+ }
+}
+
+/***************************************************************************
+** The "overwrite" VFS is an overlay over the default VFS. It modifies
+** the xTruncate operation on journal files so that xTruncate merely
+** writes zeros into the first 50 bytes of the file rather than truely
+** truncating the file.
+**
+** The following variables are initialized to be the virtual function
+** tables for the overwrite VFS.
+*/
+static sqlite3_vfs overwrite_vfs;
+static sqlite3_io_methods overwrite_methods;
+
+/*
+** The truncate method for journal files in the overwrite VFS.
+*/
+static int overwriteTruncate(sqlite3_file *pFile, sqlite_int64 size){
+ int rc;
+ static const char buf[50];
+ if( size ){
+ return SQLITE_IOERR;
+ }
+ rc = pFile->pMethods->xWrite(pFile, buf, sizeof(buf), 0);
+ if( rc==SQLITE_OK ){
+ rc = pFile->pMethods->xSync(pFile, SQLITE_SYNC_NORMAL);
+ }
+ return rc;
+}
+
+/*
+** The delete method for journal files in the overwrite VFS.
+*/
+static int overwriteDelete(sqlite3_file *pFile){
+ return overwriteTruncate(pFile, 0);
+}
+
+/*
+** The open method for overwrite VFS. If the file being opened is
+** a journal file then substitute the alternative xTruncate method.
+*/
+static int overwriteOpen(
+ sqlite3_vfs *pVfs,
+ const char *zName,
+ sqlite3_file *pFile,
+ int flags,
+ int *pOutFlags
+){
+ int rc;
+ sqlite3_vfs *pRealVfs;
+ int isJournal;
+
+ isJournal = (flags & (SQLITE_OPEN_MAIN_JOURNAL|SQLITE_OPEN_TEMP_JOURNAL))!=0;
+ pRealVfs = (sqlite3_vfs*)pVfs->pAppData;
+ rc = pRealVfs->xOpen(pRealVfs, zName, pFile, flags, pOutFlags);
+ if( rc==SQLITE_OK && isJournal ){
+ if( overwrite_methods.xTruncate==0 ){
+ sqlite3_io_methods temp;
+ memcpy(&temp, pFile->pMethods, sizeof(temp));
+ temp.xTruncate = overwriteTruncate;
+ memcpy(&overwrite_methods, &temp, sizeof(temp));
+ }
+ pFile->pMethods = &overwrite_methods;
+ }
+ return rc;
+}
+
+/*
+** Overlay the overwrite VFS over top of the current default VFS
+** and make the overlay VFS the new default.
+**
+** This routine can only be evaluated once. On second and subsequent
+** executions it becomes a no-op.
+*/
+static void registerOverwriteVfs(void){
+ sqlite3_vfs *pBase;
+ if( overwrite_vfs.iVersion ) return;
+ pBase = sqlite3_vfs_find(0);
+ memcpy(&overwrite_vfs, pBase, sizeof(overwrite_vfs));
+ overwrite_vfs.pAppData = pBase;
+ overwrite_vfs.xOpen = overwriteOpen;
+ overwrite_vfs.zName = "overwriteVfs";
+ sqlite3_vfs_register(&overwrite_vfs, 1);
+}
+
+int main(int argc, char **argv){
+ sqlite3 *db;
+ int rc;
+ int nSql;
+ char *zSql;
+ int i, j;
+ FILE *in;
+ sqlite_uint64 iStart, iElapse;
+ sqlite_uint64 iSetup = 0;
+ int nStmt = 0;
+ int nByte = 0;
+ const char *zArgv0 = argv[0];
+ int bQuiet = 0;
+#if !defined(_MSC_VER)
+ struct tms tmsStart, tmsEnd;
+ clock_t clkStart, clkEnd;
+#endif
+
+#ifdef HAVE_OSINST
+ extern sqlite3_vfs *sqlite3_instvfs_binarylog(char *, char *, char *);
+ extern void sqlite3_instvfs_destroy(sqlite3_vfs *);
+ sqlite3_vfs *pVfs = 0;
+#endif
+
+ while (argc>3)
+ {
+ if( argc>3 && strcmp(argv[1], "-overwrite")==0 ){
+ registerOverwriteVfs();
+ argv++;
+ argc--;
+ continue;
+ }
+
+#ifdef HAVE_OSINST
+ if( argc>4 && (strcmp(argv[1], "-log")==0) ){
+ pVfs = sqlite3_instvfs_binarylog("oslog", 0, argv[2]);
+ sqlite3_vfs_register(pVfs, 1);
+ argv += 2;
+ argc -= 2;
+ continue;
+ }
+#endif
+
+ /*
+ ** Increasing the priority slightly above normal can help with
+ ** repeatability of testing. Note that with Cygwin, -5 equates
+ ** to "High", +5 equates to "Low", and anything in between
+ ** equates to "Normal".
+ */
+ if( argc>4 && (strcmp(argv[1], "-priority")==0) ){
+#if defined(_MSC_VER)
+ int new_priority = atoi(argv[2]);
+ if(!SetPriorityClass(GetCurrentProcess(),
+ (new_priority<=-5) ? HIGH_PRIORITY_CLASS :
+ (new_priority<=0) ? ABOVE_NORMAL_PRIORITY_CLASS :
+ (new_priority==0) ? NORMAL_PRIORITY_CLASS :
+ (new_priority<5) ? BELOW_NORMAL_PRIORITY_CLASS :
+ IDLE_PRIORITY_CLASS)){
+ printf ("error setting priority\n");
+ exit(2);
+ }
+#else
+ struct sched_param myParam;
+ sched_getparam(0, &myParam);
+ printf ("Current process priority is %d.\n", (int)myParam.sched_priority);
+ myParam.sched_priority = atoi(argv[2]);
+ printf ("Setting process priority to %d.\n", (int)myParam.sched_priority);
+ if (sched_setparam (0, &myParam) != 0){
+ printf ("error setting priority\n");
+ exit(2);
+ }
+#endif
+ argv += 2;
+ argc -= 2;
+ continue;
+ }
+
+ if( argc>3 && strcmp(argv[1], "-quiet")==0 ){
+ bQuiet = -1;
+ argv++;
+ argc--;
+ continue;
+ }
+
+ break;
+ }
+
+ if( argc!=3 ){
+ fprintf(stderr, "Usage: %s [options] FILENAME SQL-SCRIPT\n"
+ "Runs SQL-SCRIPT against a UTF8 database\n"
+ "\toptions:\n"
+ "\t-overwrite\n"
+#ifdef HAVE_OSINST
+ "\t-log <log>\n"
+#endif
+ "\t-priority <value> : set priority of task\n"
+ "\t-quiet : only display summary results\n",
+ zArgv0);
+ exit(1);
+ }
+
+ in = fopen(argv[2], "r");
+ fseek(in, 0L, SEEK_END);
+ nSql = ftell(in);
+ zSql = malloc( nSql+1 );
+ fseek(in, 0L, SEEK_SET);
+ nSql = fread(zSql, 1, nSql, in);
+ zSql[nSql] = 0;
+
+ printf("SQLite version: %d\n", sqlite3_libversion_number());
+ unlink(argv[1]);
+#if !defined(_MSC_VER)
+ clkStart = times(&tmsStart);
+#endif
+ iStart = sqlite3Hwtime();
+ rc = sqlite3_open(argv[1], &db);
+ iElapse = sqlite3Hwtime() - iStart;
+ iSetup = iElapse;
+ if (!bQuiet) printf("sqlite3_open() returns %d in %llu cycles\n", rc, iElapse);
+ for(i=j=0; j<nSql; j++){
+ if( zSql[j]==';' ){
+ int isComplete;
+ char c = zSql[j+1];
+ zSql[j+1] = 0;
+ isComplete = sqlite3_complete(&zSql[i]);
+ zSql[j+1] = c;
+ if( isComplete ){
+ zSql[j] = 0;
+ while( i<j && isspace(zSql[i]) ){ i++; }
+ if( i<j ){
+ int n = j - i;
+ if( n>=6 && memcmp(&zSql[i], ".crash",6)==0 ) exit(1);
+ nStmt++;
+ nByte += n;
+ prepareAndRun(db, &zSql[i], bQuiet);
+ }
+ zSql[j] = ';';
+ i = j+1;
+ }
+ }
+ }
+ iStart = sqlite3Hwtime();
+ sqlite3_close(db);
+ iElapse = sqlite3Hwtime() - iStart;
+#if !defined(_MSC_VER)
+ clkEnd = times(&tmsEnd);
+#endif
+ iSetup += iElapse;
+ if (!bQuiet) printf("sqlite3_close() returns in %llu cycles\n", iElapse);
+
+ printf("\n");
+ printf("Statements run: %15d stmts\n", nStmt);
+ printf("Bytes of SQL text: %15d bytes\n", nByte);
+ printf("Total prepare time: %15llu cycles\n", prepTime);
+ printf("Total run time: %15llu cycles\n", runTime);
+ printf("Total finalize time: %15llu cycles\n", finalizeTime);
+ printf("Open/Close time: %15llu cycles\n", iSetup);
+ printf("Total time: %15llu cycles\n",
+ prepTime + runTime + finalizeTime + iSetup);
+
+#if !defined(_MSC_VER)
+ printf("\n");
+ printf("Total user CPU time: %15.3g secs\n", (tmsEnd.tms_utime - tmsStart.tms_utime)/(double)CLOCKS_PER_SEC );
+ printf("Total system CPU time: %15.3g secs\n", (tmsEnd.tms_stime - tmsStart.tms_stime)/(double)CLOCKS_PER_SEC );
+ printf("Total real time: %15.3g secs\n", (clkEnd -clkStart)/(double)CLOCKS_PER_SEC );
+#endif
+
+#ifdef HAVE_OSINST
+ if( pVfs ){
+ sqlite3_instvfs_destroy(pVfs);
+ printf("vfs log written to %s\n", argv[0]);
+ }
+#endif
+
+ return 0;
+}
diff --git a/third_party/sqlite/tool/speedtest8inst1.c b/third_party/sqlite/tool/speedtest8inst1.c
new file mode 100755
index 0000000..f0cb544
--- /dev/null
+++ b/third_party/sqlite/tool/speedtest8inst1.c
@@ -0,0 +1,216 @@
+/*
+** Performance test for SQLite.
+**
+** This program reads ASCII text from a file named on the command-line
+** and submits that text to SQLite for evaluation. A new database
+** is created at the beginning of the program. All statements are
+** timed using the high-resolution timer built into Intel-class processors.
+**
+** To compile this program, first compile the SQLite library separately
+** will full optimizations. For example:
+**
+** gcc -c -O6 -DSQLITE_THREADSAFE=0 sqlite3.c
+**
+** Then link against this program. But to do optimize this program
+** because that defeats the hi-res timer.
+**
+** gcc speedtest8.c sqlite3.o -ldl -I../src
+**
+** Then run this program with a single argument which is the name of
+** a file containing SQL script that you want to test:
+**
+** ./a.out test.db test.sql
+*/
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include "sqlite3.h"
+
+#include "test_osinst.c"
+
+/*
+** Prepare and run a single statement of SQL.
+*/
+static void prepareAndRun(sqlite3_vfs *pInstVfs, sqlite3 *db, const char *zSql){
+ sqlite3_stmt *pStmt;
+ const char *stmtTail;
+ int rc;
+ char zMessage[1024];
+ zMessage[1023] = '\0';
+
+ sqlite3_uint64 iTime;
+
+ sqlite3_snprintf(1023, zMessage, "sqlite3_prepare_v2: %s", zSql);
+ sqlite3_instvfs_binarylog_marker(pInstVfs, zMessage);
+
+ iTime = sqlite3Hwtime();
+ rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, &stmtTail);
+ iTime = sqlite3Hwtime() - iTime;
+ sqlite3_instvfs_binarylog_call(pInstVfs,BINARYLOG_PREPARE_V2,iTime,rc,zSql);
+
+ if( rc==SQLITE_OK ){
+ int nRow = 0;
+
+ sqlite3_snprintf(1023, zMessage, "sqlite3_step loop: %s", zSql);
+ sqlite3_instvfs_binarylog_marker(pInstVfs, zMessage);
+ iTime = sqlite3Hwtime();
+ while( (rc=sqlite3_step(pStmt))==SQLITE_ROW ){ nRow++; }
+ iTime = sqlite3Hwtime() - iTime;
+ sqlite3_instvfs_binarylog_call(pInstVfs, BINARYLOG_STEP, iTime, rc, zSql);
+
+ sqlite3_snprintf(1023, zMessage, "sqlite3_finalize: %s", zSql);
+ sqlite3_instvfs_binarylog_marker(pInstVfs, zMessage);
+ iTime = sqlite3Hwtime();
+ rc = sqlite3_finalize(pStmt);
+ iTime = sqlite3Hwtime() - iTime;
+ sqlite3_instvfs_binarylog_call(pInstVfs, BINARYLOG_FINALIZE, iTime, rc, zSql);
+ }
+}
+
+static int stringcompare(const char *zLeft, const char *zRight){
+ int ii;
+ for(ii=0; zLeft[ii] && zRight[ii]; ii++){
+ if( zLeft[ii]!=zRight[ii] ) return 0;
+ }
+ return( zLeft[ii]==zRight[ii] );
+}
+
+static char *readScriptFile(const char *zFile, int *pnScript){
+ sqlite3_vfs *pVfs = sqlite3_vfs_find(0);
+ sqlite3_file *p;
+ int rc;
+ sqlite3_int64 nByte;
+ char *zData = 0;
+ int flags = SQLITE_OPEN_READONLY|SQLITE_OPEN_MAIN_DB;
+
+ p = (sqlite3_file *)malloc(pVfs->szOsFile);
+ rc = pVfs->xOpen(pVfs, zFile, p, flags, &flags);
+ if( rc!=SQLITE_OK ){
+ goto error_out;
+ }
+
+ rc = p->pMethods->xFileSize(p, &nByte);
+ if( rc!=SQLITE_OK ){
+ goto close_out;
+ }
+
+ zData = (char *)malloc(nByte+1);
+ rc = p->pMethods->xRead(p, zData, nByte, 0);
+ if( rc!=SQLITE_OK ){
+ goto close_out;
+ }
+ zData[nByte] = '\0';
+
+ p->pMethods->xClose(p);
+ free(p);
+ *pnScript = nByte;
+ return zData;
+
+close_out:
+ p->pMethods->xClose(p);
+
+error_out:
+ free(p);
+ free(zData);
+ return 0;
+}
+
+int main(int argc, char **argv){
+
+ const char zUsageMsg[] =
+ "Usage: %s options...\n"
+ " where available options are:\n"
+ "\n"
+ " -db DATABASE-FILE (database file to operate on)\n"
+ " -script SCRIPT-FILE (script file to read sql from)\n"
+ " -log LOG-FILE (log file to create)\n"
+ " -logdata (log all data to log file)\n"
+ "\n"
+ " Options -db, -script and -log are compulsory\n"
+ "\n"
+ ;
+
+ const char *zDb = 0;
+ const char *zScript = 0;
+ const char *zLog = 0;
+ int logdata = 0;
+
+ int ii;
+ int i, j;
+ int rc;
+
+ sqlite3_vfs *pInstVfs; /* Instrumentation VFS */
+
+ char *zSql = 0;
+ int nSql;
+
+ sqlite3 *db;
+
+ for(ii=1; ii<argc; ii++){
+ if( stringcompare("-db", argv[ii]) && (ii+1)<argc ){
+ zDb = argv[++ii];
+ }
+
+ else if( stringcompare("-script", argv[ii]) && (ii+1)<argc ){
+ zScript = argv[++ii];
+ }
+
+ else if( stringcompare("-log", argv[ii]) && (ii+1)<argc ){
+ zLog = argv[++ii];
+ }
+
+ else if( stringcompare("-logdata", argv[ii]) ){
+ logdata = 1;
+ }
+
+ else {
+ goto usage;
+ }
+ }
+ if( !zDb || !zScript || !zLog ) goto usage;
+
+ zSql = readScriptFile(zScript, &nSql);
+ if( !zSql ){
+ fprintf(stderr, "Failed to read script file\n");
+ return -1;
+ }
+
+ pInstVfs = sqlite3_instvfs_binarylog("logging", 0, zLog, logdata);
+
+ rc = sqlite3_open_v2(
+ zDb, &db, SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, "logging"
+ );
+ if( rc!=SQLITE_OK ){
+ fprintf(stderr, "Failed to open db: %s\n", sqlite3_errmsg(db));
+ return -2;
+ }
+
+ for(i=j=0; j<nSql; j++){
+ if( zSql[j]==';' ){
+ int isComplete;
+ char c = zSql[j+1];
+ zSql[j+1] = 0;
+ isComplete = sqlite3_complete(&zSql[i]);
+ zSql[j+1] = c;
+ if( isComplete ){
+ zSql[j] = 0;
+ while( i<j && isspace(zSql[i]) ){ i++; }
+ if( i<j ){
+ prepareAndRun(pInstVfs, db, &zSql[i]);
+ }
+ zSql[j] = ';';
+ i = j+1;
+ }
+ }
+ }
+
+ sqlite3_instvfs_destroy(pInstVfs);
+ return 0;
+
+usage:
+ fprintf(stderr, zUsageMsg, argv[0]);
+ return -3;
+}