diff options
author | mpcomplete@google.com <mpcomplete@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-01-06 22:39:41 +0000 |
---|---|---|
committer | mpcomplete@google.com <mpcomplete@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-01-06 22:39:41 +0000 |
commit | 586381f8db3497c24c11f96234f1879b34e74bc7 (patch) | |
tree | 99f7d18350289b135ef6dd5c161baba8bce668a3 /third_party/sqlite/test/pageropt.test | |
parent | 6e3b12ff2cbbe8c481f986c8f0dd230bb50add2a (diff) | |
download | chromium_src-586381f8db3497c24c11f96234f1879b34e74bc7.zip chromium_src-586381f8db3497c24c11f96234f1879b34e74bc7.tar.gz chromium_src-586381f8db3497c24c11f96234f1879b34e74bc7.tar.bz2 |
Upgrade our sqlite to 3.6.1, with the local changes made by Gears. I'm
checking in the full sqlite tree to make upstream merges easier. This means
we'll have generated sources split out from the originals.
One important change this makes is that "BEGIN" now defaults to "BEGIN
IMMEDIATE" rather than "BEGIN DEFERRED". This doesn't affect us because we
don't use unqualified BEGIN statements.
The full CL is too big for Rietveld. I'm splitting it into 2. This one is
reviewable. The other CL is just a fresh drop of:
//depot/googleclient/gears/opensource/third_party/sqlite_google
Review URL: http://codereview.chromium.org/15067
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@7623 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'third_party/sqlite/test/pageropt.test')
-rwxr-xr-x | third_party/sqlite/test/pageropt.test | 201 |
1 files changed, 201 insertions, 0 deletions
diff --git a/third_party/sqlite/test/pageropt.test b/third_party/sqlite/test/pageropt.test new file mode 100755 index 0000000..3ab1ff5 --- /dev/null +++ b/third_party/sqlite/test/pageropt.test @@ -0,0 +1,201 @@ +# 2007 April 12 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# The focus of the tests in this file are to verify that the +# pager optimizations implemented in version 3.3.14 work. +# +# $Id: pageropt.test,v 1.4 2008/04/14 01:00:58 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +ifcapable {!pager_pragmas||secure_delete} { + finish_test + return +} + +# Run the SQL statement supplied by the argument and return +# the results. Prepend four integers to the beginning of the +# result which are +# +# (1) The number of page reads from the database +# (2) The number of page writes to the database +# (3) The number of page writes to the journal +# (4) The number of cache pages freed +# +proc pagercount_sql {sql {db db}} { + global sqlite3_pager_readdb_count + global sqlite3_pager_writedb_count + global sqlite3_pager_writej_count + global sqlite3_pager_pgfree_count + set sqlite3_pager_readdb_count 0 + set sqlite3_pager_writedb_count 0 + set sqlite3_pager_writej_count 0 + set sqlite3_pager_pgfree_count 0 + set r [$db eval $sql] + set cnt [list $sqlite3_pager_readdb_count \ + $sqlite3_pager_writedb_count \ + $sqlite3_pager_writej_count \ + $sqlite3_pager_pgfree_count] + return [concat $cnt $r] +} + +# Setup the test database +# +do_test pageropt-1.1 { + sqlite3_soft_heap_limit 0 + execsql { + PRAGMA auto_vacuum = OFF; + PRAGMA page_size = 1024; + } + pagercount_sql { + CREATE TABLE t1(x); + } +} {0 2 0 0} +do_test pageropt-1.2 { + pagercount_sql { + INSERT INTO t1 VALUES(randomblob(5000)); + } +} {0 6 2 0} + +# Verify that values remain in cache on for subsequent reads. +# We should not have to go back to disk. +# +do_test pageropt-1.3 { + pagercount_sql { + SELECT length(x) FROM t1 + } +} {0 0 0 0 5000} + +# If another thread reads the database, the original cache +# remains valid. +# +sqlite3 db2 test.db +set blobcontent [db2 one {SELECT hex(x) FROM t1}] +do_test pageropt-1.4 { + pagercount_sql { + SELECT hex(x) FROM t1 + } +} [list 0 0 0 0 $blobcontent] + +# But if the other thread modifies the database, then the cache +# must refill. +# +do_test pageropt-1.5 { + db2 eval {CREATE TABLE t2(y)} + pagercount_sql { + SELECT hex(x) FROM t1 + } +} [list 6 0 0 6 $blobcontent] +do_test pageropt-1.6 { + pagercount_sql { + SELECT hex(x) FROM t1 + } +} [list 0 0 0 0 $blobcontent] + +# Verify that the last page of an overflow chain is not read from +# disk when deleting a row. The one row of t1(x) has four pages +# of overflow. So deleting that row from t1 should involve reading +# the sqlite_master table (1 page) the main page of t1 (1 page) and +# the three overflow pages of t1 for a total of 5 pages. +# +# Pages written are page 1 (for the freelist pointer), the root page +# of the table, and one of the overflow chain pointers because it +# becomes the trunk of the freelist. Total 3. +# +do_test pageropt-2.1 { + db close + sqlite3 db test.db + pagercount_sql { + DELETE FROM t1 WHERE rowid=1 + } +} {5 3 3 0} + +# When pulling pages off of the freelist, there is no reason +# to actually bring in the old content. +# +do_test pageropt-2.2 { + db close + sqlite3 db test.db + pagercount_sql { + INSERT INTO t1 VALUES(randomblob(1500)); + } +} {3 4 3 0} +do_test pageropt-2.3 { + pagercount_sql { + INSERT INTO t1 VALUES(randomblob(1500)); + } +} {0 4 3 0} + +# Note the new optimization that when pulling the very last page off of the +# freelist we do not read the content of that page. +# +do_test pageropt-2.4 { + pagercount_sql { + INSERT INTO t1 VALUES(randomblob(1500)); + } +} {0 5 3 0} + +# Appending a large quantity of data does not involve writing much +# to the journal file. +# +do_test pageropt-3.1 { + pagercount_sql { + INSERT INTO t2 SELECT * FROM t1; + } +} {1 7 2 0} + +# Once again, we do not need to read the last page of an overflow chain +# while deleting. +# +do_test pageropt-3.2 { + pagercount_sql { + DROP TABLE t2; + } +} {0 2 3 0} +do_test pageropt-3.3 { + pagercount_sql { + DELETE FROM t1; + } +} {0 3 3 0} + +# There are now 11 pages on the freelist. Move them all into an +# overflow chain by inserting a single large record. Starting from +# a cold cache, only page 1, the root page of table t1, and the trunk +# of the freelist need to be read (3 pages). And only those three +# pages need to be journalled. But 13 pages need to be written: +# page1, the root page of table t1, and an 11 page overflow chain. +# +do_test pageropt-4.1 { + db close + sqlite3 db test.db + pagercount_sql { + INSERT INTO t1 VALUES(randomblob(11300)) + } +} {3 13 3 0} + +# Now we delete that big entries starting from a cold cache and an +# empty freelist. The first 10 of the 11 pages overflow chain have +# to be read, together with page1 and the root of the t1 table. 12 +# reads total. But only page1, the t1 root, and the trunk of the +# freelist need to be journalled and written back. +# +do_test pageropt-4.2 { + db close + sqlite3 db test.db + pagercount_sql { + DELETE FROM t1 + } +} {12 3 3 0} + +sqlite3_soft_heap_limit $soft_limit +catch {db2 close} +finish_test |