diff options
author | phajdan.jr@chromium.org <phajdan.jr@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-04-30 08:15:31 +0000 |
---|---|---|
committer | phajdan.jr@chromium.org <phajdan.jr@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-04-30 08:15:31 +0000 |
commit | 82df52ead5f3e27ac2c745b89c3d4e7475d54918 (patch) | |
tree | 0203f2bcc6ec0d20c24437b763f3aad1bf9ea863 /net/tools | |
parent | 228cb40ea43f32a355844adf3ef0602439aeecc3 (diff) | |
download | chromium_src-82df52ead5f3e27ac2c745b89c3d4e7475d54918.zip chromium_src-82df52ead5f3e27ac2c745b89c3d4e7475d54918.tar.gz chromium_src-82df52ead5f3e27ac2c745b89c3d4e7475d54918.tar.bz2 |
Revert "[Third time landing] Python implementation of sync server, for testing."
This broke the memory waterfall, failed to import protobuf.
TBR=nick
Review URL: http://codereview.chromium.org/1822001
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@46048 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/tools')
-rwxr-xr-x | net/tools/testserver/chromiumsync.py | 653 | ||||
-rwxr-xr-x | net/tools/testserver/chromiumsync_test.py | 317 | ||||
-rw-r--r-- | net/tools/testserver/testserver.py | 61 |
3 files changed, 3 insertions, 1028 deletions
diff --git a/net/tools/testserver/chromiumsync.py b/net/tools/testserver/chromiumsync.py deleted file mode 100755 index 2268bbd..0000000 --- a/net/tools/testserver/chromiumsync.py +++ /dev/null @@ -1,653 +0,0 @@ -#!/usr/bin/python2.4 -# Copyright (c) 2010 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""An implementation of the server side of the Chromium sync protocol. - -The details of the protocol are described mostly by comments in the protocol -buffer definition at chrome/browser/sync/protocol/sync.proto. -""" - -import operator -import random -import threading - -import autofill_specifics_pb2 -import bookmark_specifics_pb2 -import preference_specifics_pb2 -import theme_specifics_pb2 -import typed_url_specifics_pb2 -import sync_pb2 - -# An enumeration of the various kinds of data that can be synced. -# Over the wire, this enumeration is not used: a sync object's type is -# inferred by which EntitySpecifics extension it has. But in the context -# of a program, it is useful to have an enumeration. -ALL_TYPES = ( - TOP_LEVEL, # The type of the 'Google Chrome' folder. - BOOKMARK, - AUTOFILL, - TYPED_URL, - PREFERENCE, - # PASSWORD, # Disabled since there's no specifics proto. - # SESSION, - THEME) = range(6) - -# Given a sync type from ALL_TYPES, find the extension token corresponding -# to that datatype. Note that TOP_LEVEL has no such token. -SYNC_TYPE_TO_EXTENSION = { - BOOKMARK: bookmark_specifics_pb2.bookmark, - AUTOFILL: autofill_specifics_pb2.autofill, - TYPED_URL: typed_url_specifics_pb2.typed_url, - PREFERENCE: preference_specifics_pb2.preference, - # PASSWORD: password_specifics_pb2.password, # Disabled - # SESSION: session_specifics_pb2.session, # Disabled - THEME: theme_specifics_pb2.theme, - } - -# The parent ID used to indicate a top-level node. -ROOT_ID = '0' - -def GetEntryType(entry): - """Extract the sync type from a SyncEntry. - - Args: - entry: A SyncEntity protobuf object whose type to determine. - Returns: - A value from ALL_TYPES if the entry's type can be determined, or None - if the type cannot be determined. - """ - if entry.server_defined_unique_tag == 'google_chrome': - return TOP_LEVEL - entry_types = GetEntryTypesFromSpecifics(entry.specifics) - if not entry_types: - return None - # It is presupposed that the entry has at most one specifics extension - # present. If there is more than one, either there's a bug, or else - # the caller should use GetEntryTypes. - if len(entry_types) > 1: - raise 'GetEntryType called with multiple extensions present.' - return entry_types[0] - -def GetEntryTypesFromSpecifics(specifics): - """Determine the sync types indicated by an EntitySpecifics's extension(s). - - If the specifics have more than one recognized extension (as commonly - happens with the requested_types field of GetUpdatesMessage), all types - will be returned. Callers must handle the possibility of the returned - value having more than one item. - - Args: - specifics: A EntitySpecifics protobuf message whose extensions to - enumerate. - Returns: - A list of the sync types (values from ALL_TYPES) assocated with each - recognized extension of the specifics message. - """ - entry_types = [] - for data_type, extension in SYNC_TYPE_TO_EXTENSION.iteritems(): - if specifics.HasExtension(extension): - entry_types.append(data_type) - return entry_types - -def GetRequestedTypes(get_updates_message): - """Determine the sync types requested by a client GetUpdates operation.""" - types = GetEntryTypesFromSpecifics( - get_updates_message.requested_types) - if types: - types.append(TOP_LEVEL) - return types - -def GetDefaultEntitySpecifics(data_type): - """Get an EntitySpecifics having a sync type's default extension value. - """ - specifics = sync_pb2.EntitySpecifics() - if data_type in SYNC_TYPE_TO_EXTENSION: - extension_handle = SYNC_TYPE_TO_EXTENSION[data_type] - specifics.Extensions[extension_handle].SetInParent() - return specifics - -def DeepCopyOfProto(proto): - """Return a deep copy of a protocol buffer.""" - new_proto = type(proto)() - new_proto.MergeFrom(proto) - return new_proto - - -class PermanentItem(object): - """A specification of one server-created permanent item. - - Attributes: - tag: A known-to-the-client value that uniquely identifies a server-created - permanent item. - name: The human-readable display name for this item. - parent_tag: The tag of the permanent item's parent. If ROOT_ID, indicates - a top-level item. Otherwise, this must be the tag value of some other - server-created permanent item. - sync_type: A value from ALL_TYPES, giving the datatype of this permanent - item. This controls which types of client GetUpdates requests will - cause the permanent item to be created and returned. - """ - - def __init__(self, tag, name, parent_tag, sync_type): - self.tag = tag - self.name = name - self.parent_tag = parent_tag - self.sync_type = sync_type - -class SyncDataModel(object): - """Models the account state of one sync user. - """ - _BATCH_SIZE = 100 - - # Specify all the permanent items that a model might need. - _PERMANENT_ITEM_SPECS = [ - PermanentItem('google_chrome', name='Google Chrome', - parent_tag=ROOT_ID, sync_type=TOP_LEVEL), - PermanentItem('google_chrome_bookmarks', name='Bookmarks', - parent_tag='google_chrome', sync_type=BOOKMARK), - PermanentItem('bookmark_bar', name='Bookmark Bar', - parent_tag='google_chrome_bookmarks', sync_type=BOOKMARK), - PermanentItem('other_bookmarks', name='Other Bookmarks', - parent_tag='google_chrome_bookmarks', sync_type=BOOKMARK), - PermanentItem('google_chrome_preferences', name='Preferences', - parent_tag='google_chrome', sync_type=PREFERENCE), - PermanentItem('google_chrome_autofill', name='Autofill', - parent_tag='google_chrome', sync_type=AUTOFILL), - # TODO(nick): Disabled since the protocol does not support them yet. - # PermanentItem('google_chrome_passwords', name='Passwords', - # parent_tag='google_chrome', sync_type=PASSWORD), - # PermanentItem('google_chrome_sessions', name='Sessions', - # parent_tag='google_chrome', SESSION), - PermanentItem('google_chrome_themes', name='Themes', - parent_tag='google_chrome', sync_type=THEME), - PermanentItem('google_chrome_typed_urls', name='Typed URLs', - parent_tag='google_chrome', sync_type=TYPED_URL), - ] - - def __init__(self): - self._version = 0 - - # Monotonically increasing version number. The next object change will - # take on this value + 1. - self._entries = {} - - # TODO(nick): uuid.uuid1() is better, but python 2.5 only. - self.store_birthday = '%0.30f' % random.random() - - def _SaveEntry(self, entry): - """Insert or update an entry in the change log, and give it a new version. - - The ID fields of this entry are assumed to be valid server IDs. This - entry will be updated with a new version number and sync_timestamp. - - Args: - entry: The entry to be added or updated. - """ - self._version = self._version + 1 - entry.version = self._version - entry.sync_timestamp = self._version - - # Preserve the originator info, which the client is not required to send - # when updating. - base_entry = self._entries.get(entry.id_string) - if base_entry: - entry.originator_cache_guid = base_entry.originator_cache_guid - entry.originator_client_item_id = base_entry.originator_client_item_id - - self._entries[entry.id_string] = DeepCopyOfProto(entry) - - def _ServerTagToId(self, tag): - """Determine the server ID from a server-unique tag. - - The resulting value is guaranteed not to collide with the other ID - generation methods. - - Args: - tag: The unique, known-to-the-client tag of a server-generated item. - """ - if tag and tag != ROOT_ID: - return '<server tag>%s' % tag - else: - return tag - - def _ClientTagToId(self, tag): - """Determine the server ID from a client-unique tag. - - The resulting value is guaranteed not to collide with the other ID - generation methods. - - Args: - tag: The unique, opaque-to-the-server tag of a client-tagged item. - """ - return '<client tag>%s' % tag - - def _ClientIdToId(self, client_guid, client_item_id): - """Compute a unique server ID from a client-local ID tag. - - The resulting value is guaranteed not to collide with the other ID - generation methods. - - Args: - client_guid: A globally unique ID that identifies the client which - created this item. - client_item_id: An ID that uniquely identifies this item on the client - which created it. - """ - # Using the client ID info is not required here (we could instead generate - # a random ID), but it's useful for debugging. - return '<server ID originally>%s/%s' % (client_guid, client_item_id) - - def _WritePosition(self, entry, parent_id, prev_id=None): - """Convert from a relative position into an absolute, numeric position. - - Clients specify positions using the predecessor-based references; the - server stores and reports item positions using sparse integer values. - This method converts from the former to the latter. - - Args: - entry: The entry for which to compute a position. Its ID field are - assumed to be server IDs. This entry will have its parent_id_string - and position_in_parent fields updated; its insert_after_item_id field - will be cleared. - parent_id: The ID of the entry intended as the new parent. - prev_id: The ID of the entry intended as the new predecessor. If this - is None, or an ID of an object which is not a child of the new parent, - the entry will be positioned at the end (right) of the ordering. If - the empty ID (''), this will be positioned at the front (left) of the - ordering. Otherwise, the entry will be given a position_in_parent - value placing it just after (to the right of) the new predecessor. - """ - PREFERRED_GAP = 2 ** 20 - # Compute values at the beginning or end. - def ExtendRange(current_limit_entry, sign_multiplier): - if current_limit_entry.id_string == entry.id_string: - step = 0 - else: - step = sign_multiplier * PREFERRED_GAP - return current_limit_entry.position_in_parent + step - - siblings = [x for x in self._entries.values() - if x.parent_id_string == parent_id and not x.deleted] - siblings = sorted(siblings, key=operator.attrgetter('position_in_parent')) - if prev_id == entry.id_string: - prev_id = '' - if not siblings: - # First item in this container; start in the middle. - entry.position_in_parent = 0 - elif prev_id == '': - # A special value in the protocol. Insert at first position. - entry.position_in_parent = ExtendRange(siblings[0], -1) - else: - # Consider items along with their successors. - for a, b in zip(siblings, siblings[1:]): - if a.id_string != prev_id: - continue - elif b.id_string == entry.id_string: - # We're already in place; don't change anything. - entry.position_in_parent = b.position_in_parent - else: - # Interpolate new position between two others. - entry.position_in_parent = ( - a.position_in_parent * 7 + b.position_in_parent) / 8 - break - else: - # Insert at end. Includes the case where prev_id is None. - entry.position_in_parent = ExtendRange(siblings[-1], +1) - - entry.parent_id_string = parent_id - entry.ClearField('insert_after_item_id') - - def _ItemExists(self, id_string): - """Determine whether an item exists in the changelog.""" - return id_string in self._entries - - def _CreatePermanentItem(self, spec): - """Create one permanent item from its spec, if it doesn't exist. - - The resulting item is added to the changelog. - - Args: - spec: A PermanentItem object holding the properties of the item to create. - """ - id_string = self._ServerTagToId(spec.tag) - if self._ItemExists(id_string): - return - print 'Creating permanent item: %s' % spec.name - entry = sync_pb2.SyncEntity() - entry.id_string = id_string - entry.non_unique_name = spec.name - entry.name = spec.name - entry.server_defined_unique_tag = spec.tag - entry.folder = True - entry.deleted = False - entry.specifics.CopyFrom(GetDefaultEntitySpecifics(spec.sync_type)) - self._WritePosition(entry, self._ServerTagToId(spec.parent_tag)) - self._SaveEntry(entry) - - def _CreatePermanentItems(self, requested_types): - """Ensure creation of all permanent items for a given set of sync types. - - Args: - requested_types: A list of sync data types from ALL_TYPES. - Permanent items of only these types will be created. - """ - for spec in self._PERMANENT_ITEM_SPECS: - if spec.sync_type in requested_types: - self._CreatePermanentItem(spec) - - def GetChangesFromTimestamp(self, requested_types, timestamp): - """Get entries which have changed since a given timestamp, oldest first. - - The returned entries are limited to being _BATCH_SIZE many. The entries - are returned in strict version order. - - Args: - requested_types: A list of sync data types from ALL_TYPES. - Only items of these types will be retrieved; others will be filtered - out. - timestamp: A timestamp / version number. Only items that have changed - more recently than this value will be retrieved; older items will - be filtered out. - Returns: - A tuple of (version, entries). Version is a new timestamp value, which - should be used as the starting point for the next query. Entries is the - batch of entries meeting the current timestamp query. - """ - if timestamp == 0: - self._CreatePermanentItems(requested_types) - change_log = sorted(self._entries.values(), - key=operator.attrgetter('version')) - new_changes = [x for x in change_log if x.version > timestamp] - # Pick batch_size new changes, and then filter them. This matches - # the RPC behavior of the production sync server. - batch = new_changes[:self._BATCH_SIZE] - if not batch: - # Client is up to date. - return (timestamp, []) - - # Restrict batch to requested types. Tombstones are untyped - # and will always get included. - filtered = [] - for x in batch: - if (GetEntryType(x) in requested_types) or x.deleted: - filtered.append(DeepCopyOfProto(x)) - # The new client timestamp is the timestamp of the last item in the - # batch, even if that item was filtered out. - return (batch[-1].version, filtered) - - def _CheckVersionForCommit(self, entry): - """Perform an optimistic concurrency check on the version number. - - Clients are only allowed to commit if they report having seen the most - recent version of an object. - - Args: - entry: A sync entity from the client. It is assumed that ID fields - have been converted to server IDs. - Returns: - A boolean value indicating whether the client's version matches the - newest server version for the given entry. - """ - if entry.id_string in self._entries: - if (self._entries[entry.id_string].version != entry.version and - not self._entries[entry.id_string].deleted): - # Version mismatch that is not a tombstone recreation. - return False - else: - if entry.version != 0: - # Edit to an item that does not exist. - return False - return True - - def _CheckParentIdForCommit(self, entry): - """Check that the parent ID referenced in a SyncEntity actually exists. - - Args: - entry: A sync entity from the client. It is assumed that ID fields - have been converted to server IDs. - Returns: - A boolean value indicating whether the entity's parent ID is an object - that actually exists (and is not deleted) in the current account state. - """ - if entry.parent_id_string == ROOT_ID: - # This is generally allowed. - return True - if entry.parent_id_string not in self._entries: - print 'Warning: Client sent unknown ID. Should never happen.' - return False - if entry.parent_id_string == entry.id_string: - print 'Warning: Client sent circular reference. Should never happen.' - return False - if self._entries[entry.parent_id_string].deleted: - # This can happen in a race condition between two clients. - return False - if not self._entries[entry.parent_id_string].folder: - print 'Warning: Client sent non-folder parent. Should never happen.' - return False - return True - - def _RewriteIdsAsServerIds(self, entry, cache_guid, commit_session): - """Convert ID fields in a client sync entry to server IDs. - - A commit batch sent by a client may contain new items for which the - server has not generated IDs yet. And within a commit batch, later - items are allowed to refer to earlier items. This method will - generate server IDs for new items, as well as rewrite references - to items whose server IDs were generated earlier in the batch. - - Args: - entry: The client sync entry to modify. - cache_guid: The globally unique ID of the client that sent this - commit request. - commit_session: A dictionary mapping the original IDs to the new server - IDs, for any items committed earlier in the batch. - """ - if entry.version == 0: - if entry.HasField('client_defined_unique_tag'): - # When present, this should determine the item's ID. - new_id = self._ClientTagToId(entry.client_defined_unique_tag) - else: - new_id = self._ClientIdToId(cache_guid, entry.id_string) - entry.originator_cache_guid = cache_guid - entry.originator_client_item_id = entry.id_string - commit_session[entry.id_string] = new_id # Remember the remapping. - entry.id_string = new_id - if entry.parent_id_string in commit_session: - entry.parent_id_string = commit_session[entry.parent_id_string] - if entry.insert_after_item_id in commit_session: - entry.insert_after_item_id = commit_session[entry.insert_after_item_id] - - def CommitEntry(self, entry, cache_guid, commit_session): - """Attempt to commit one entry to the user's account. - - Args: - entry: A SyncEntity protobuf representing desired object changes. - cache_guid: A string value uniquely identifying the client; this - is used for ID generation and will determine the originator_cache_guid - if the entry is new. - commit_session: A dictionary mapping client IDs to server IDs for any - objects committed earlier this session. If the entry gets a new ID - during commit, the change will be recorded here. - Returns: - A SyncEntity reflecting the post-commit value of the entry, or None - if the entry was not committed due to an error. - """ - entry = DeepCopyOfProto(entry) - - # Generate server IDs for this entry, and write generated server IDs - # from earlier entries into the message's fields, as appropriate. The - # ID generation state is stored in 'commit_session'. - self._RewriteIdsAsServerIds(entry, cache_guid, commit_session) - - # Perform the optimistic concurrency check on the entry's version number. - # Clients are not allowed to commit unless they indicate that they've seen - # the most recent version of an object. - if not self._CheckVersionForCommit(entry): - return None - - # Check the validity of the parent ID; it must exist at this point. - # TODO(nick): Implement cycle detection and resolution. - if not self._CheckParentIdForCommit(entry): - return None - - # At this point, the commit is definitely going to happen. - - # Deletion works by storing a limited record for an entry, called a - # tombstone. A sync server must track deleted IDs forever, since it does - # not keep track of client knowledge (there's no deletion ACK event). - if entry.deleted: - # Only the ID, version and deletion state are preserved on a tombstone. - # TODO(nick): Does the production server not preserve the type? Not - # doing so means that tombstones cannot be filtered based on - # requested_types at GetUpdates time. - tombstone = sync_pb2.SyncEntity() - tombstone.id_string = entry.id_string - tombstone.deleted = True - tombstone.name = '' # 'name' is a required field; we're stuck with it. - entry = tombstone - else: - # Comments in sync.proto detail how the representation of positional - # ordering works: the 'insert_after_item_id' field specifies a - # predecessor during Commit operations, but the 'position_in_parent' - # field provides an absolute ordering in GetUpdates contexts. Here - # we convert from the former to the latter. Specifically, we'll - # generate a numeric position placing the item just after the object - # identified by 'insert_after_item_id', and then clear the - # 'insert_after_item_id' field so that it's not sent back to the client - # during later GetUpdates requests. - if entry.HasField('insert_after_item_id'): - self._WritePosition(entry, entry.parent_id_string, - entry.insert_after_item_id) - else: - self._WritePosition(entry, entry.parent_id_string) - - # Preserve the originator info, which the client is not required to send - # when updating. - base_entry = self._entries.get(entry.id_string) - if base_entry and not entry.HasField("originator_cache_guid"): - entry.originator_cache_guid = base_entry.originator_cache_guid - entry.originator_client_item_id = base_entry.originator_client_item_id - - # Commit the change. This also updates the version number. - self._SaveEntry(entry) - # TODO(nick): Handle recursive deletion. - return entry - -class TestServer(object): - """An object to handle requests for one (and only one) Chrome Sync account. - - TestServer consumes the sync command messages that are the outermost - layers of the protocol, performs the corresponding actions on its - SyncDataModel, and constructs an appropropriate response message. - """ - - def __init__(self): - # The implementation supports exactly one account; its state is here. - self.account = SyncDataModel() - self.account_lock = threading.Lock() - - def HandleCommand(self, raw_request): - """Decode and handle a sync command from a raw input of bytes. - - This is the main entry point for this class. It is safe to call this - method from multiple threads. - - Args: - raw_request: An iterable byte sequence to be interpreted as a sync - protocol command. - Returns: - A tuple (response_code, raw_response); the first value is an HTTP - result code, while the second value is a string of bytes which is the - serialized reply to the command. - """ - self.account_lock.acquire() - try: - request = sync_pb2.ClientToServerMessage() - request.MergeFromString(raw_request) - contents = request.message_contents - - response = sync_pb2.ClientToServerResponse() - response.error_code = sync_pb2.ClientToServerResponse.SUCCESS - response.store_birthday = self.account.store_birthday - - if contents == sync_pb2.ClientToServerMessage.AUTHENTICATE: - print 'Authenticate' - # We accept any authentication token, and support only one account. - # TODO(nick): Mock out the GAIA authentication as well; hook up here. - response.authenticate.user.email = 'syncjuser@chromium' - response.authenticate.user.display_name = 'Sync J User' - elif contents == sync_pb2.ClientToServerMessage.COMMIT: - print 'Commit' - self.HandleCommit(request.commit, response.commit) - elif contents == sync_pb2.ClientToServerMessage.GET_UPDATES: - print ('GetUpdates from timestamp %d' % - request.get_updates.from_timestamp) - self.HandleGetUpdates(request.get_updates, response.get_updates) - return (200, response.SerializeToString()) - finally: - self.account_lock.release() - - def HandleCommit(self, commit_message, commit_response): - """Respond to a Commit request by updating the user's account state. - - Commit attempts stop after the first error, returning a CONFLICT result - for any unattempted entries. - - Args: - commit_message: A sync_pb.CommitMessage protobuf holding the content - of the client's request. - commit_response: A sync_pb.CommitResponse protobuf into which a reply - to the client request will be written. - """ - commit_response.SetInParent() - batch_failure = False - session = {} # Tracks ID renaming during the commit operation. - guid = commit_message.cache_guid - for entry in commit_message.entries: - server_entry = None - if not batch_failure: - # Try to commit the change to the account. - server_entry = self.account.CommitEntry(entry, guid, session) - - # An entryresponse is returned in both success and failure cases. - reply = commit_response.entryresponse.add() - if not server_entry: - reply.response_type = sync_pb2.CommitResponse.CONFLICT - reply.error_message = 'Conflict.' - batch_failure = True # One failure halts the batch. - else: - reply.response_type = sync_pb2.CommitResponse.SUCCESS - # These are the properties that the server is allowed to override - # during commit; the client wants to know their values at the end - # of the operation. - reply.id_string = server_entry.id_string - if not server_entry.deleted: - reply.parent_id_string = server_entry.parent_id_string - reply.position_in_parent = server_entry.position_in_parent - reply.version = server_entry.version - reply.name = server_entry.name - reply.non_unique_name = server_entry.non_unique_name - - def HandleGetUpdates(self, update_request, update_response): - """Respond to a GetUpdates request by querying the user's account. - - Args: - update_request: A sync_pb.GetUpdatesMessage protobuf holding the content - of the client's request. - update_response: A sync_pb.GetUpdatesResponse protobuf into which a reply - to the client request will be written. - """ - update_response.SetInParent() - requested_types = GetRequestedTypes(update_request) - new_timestamp, entries = self.account.GetChangesFromTimestamp( - requested_types, update_request.from_timestamp) - - # If the client is up to date, we are careful not to set the - # new_timestamp field. - if new_timestamp != update_request.from_timestamp: - update_response.new_timestamp = new_timestamp - for e in entries: - reply = update_response.entries.add() - reply.CopyFrom(e) diff --git a/net/tools/testserver/chromiumsync_test.py b/net/tools/testserver/chromiumsync_test.py deleted file mode 100755 index bb73d05..0000000 --- a/net/tools/testserver/chromiumsync_test.py +++ /dev/null @@ -1,317 +0,0 @@ -#!/usr/bin/python2.4 -# Copyright (c) 2010 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Tests exercising chromiumsync and SyncDataModel.""" - -import unittest - -from google.protobuf import text_format - -import chromiumsync -import sync_pb2 - -class SyncDataModelTest(unittest.TestCase): - def setUp(self): - self.model = chromiumsync.SyncDataModel() - - def AddToModel(self, proto): - self.model._entries[proto.id_string] = proto - - def testPermanentItemSpecs(self): - SPECS = chromiumsync.SyncDataModel._PERMANENT_ITEM_SPECS - # parent_tags must be declared before use. - declared_specs = set(['0']) - for spec in SPECS: - self.assertTrue(spec.parent_tag in declared_specs) - declared_specs.add(spec.tag) - # Every sync datatype should have a permanent folder associated with it. - unique_datatypes = set([x.sync_type for x in SPECS]) - self.assertEqual(unique_datatypes, - set(chromiumsync.ALL_TYPES)) - - def testSaveEntry(self): - proto = sync_pb2.SyncEntity() - proto.id_string = 'abcd'; - proto.version = 0; - self.assertFalse(self.model._ItemExists(proto.id_string)) - self.model._SaveEntry(proto) - self.assertEqual(1, proto.version) - self.assertTrue(self.model._ItemExists(proto.id_string)) - self.model._SaveEntry(proto) - self.assertEqual(2, proto.version) - proto.version = 0 - self.assertTrue(self.model._ItemExists(proto.id_string)) - self.assertEqual(2, self.model._entries[proto.id_string].version) - - def testWritePosition(self): - def MakeProto(id_string, parent, position): - proto = sync_pb2.SyncEntity() - proto.id_string = id_string - proto.position_in_parent = position - proto.parent_id_string = parent - self.AddToModel(proto) - - MakeProto('a', 'X', 1000) - MakeProto('b', 'X', 1800) - MakeProto('c', 'X', 2600) - MakeProto('a1', 'Z', 1007) - MakeProto('a2', 'Z', 1807) - MakeProto('a3', 'Z', 2607) - MakeProto('s', 'Y', 10000) - - def AssertPositionResult(my_id, parent_id, prev_id, expected_position): - entry = sync_pb2.SyncEntity() - entry.id_string = my_id - self.model._WritePosition(entry, parent_id, prev_id) - self.assertEqual(expected_position, entry.position_in_parent) - self.assertEqual(parent_id, entry.parent_id_string) - self.assertFalse(entry.HasField('insert_after_item_id')) - - AssertPositionResult('new', 'new_parent', '', 0) - AssertPositionResult('new', 'Y', '', 10000 - (2 ** 20)) - AssertPositionResult('new', 'Y', 's', 10000 + (2 ** 20)) - AssertPositionResult('s', 'Y', '', 10000) - AssertPositionResult('s', 'Y', 's', 10000) - AssertPositionResult('a1', 'Z', '', 1007) - - AssertPositionResult('new', 'X', '', 1000 - (2 ** 20)) - AssertPositionResult('new', 'X', 'a', 1100) - AssertPositionResult('new', 'X', 'b', 1900) - AssertPositionResult('new', 'X', 'c', 2600 + (2 ** 20)) - - AssertPositionResult('a1', 'X', '', 1000 - (2 ** 20)) - AssertPositionResult('a1', 'X', 'a', 1100) - AssertPositionResult('a1', 'X', 'b', 1900) - AssertPositionResult('a1', 'X', 'c', 2600 + (2 ** 20)) - - AssertPositionResult('a', 'X', '', 1000) - AssertPositionResult('a', 'X', 'b', 1900) - AssertPositionResult('a', 'X', 'c', 2600 + (2 ** 20)) - - AssertPositionResult('b', 'X', '', 1000 - (2 ** 20)) - AssertPositionResult('b', 'X', 'a', 1800) - AssertPositionResult('b', 'X', 'c', 2600 + (2 ** 20)) - - AssertPositionResult('c', 'X', '', 1000 - (2 ** 20)) - AssertPositionResult('c', 'X', 'a', 1100) - AssertPositionResult('c', 'X', 'b', 2600) - - def testCreatePermanentItems(self): - self.model._CreatePermanentItems(chromiumsync.ALL_TYPES) - self.assertEqual(len(chromiumsync.ALL_TYPES) + 2, - len(self.model._entries)) - - def ExpectedPermanentItemCount(self, sync_type): - if sync_type == chromiumsync.BOOKMARK: - return 4 - elif sync_type == chromiumsync.TOP_LEVEL: - return 1 - else: - return 2 - - def testGetChangesFromTimestampZeroForEachType(self): - for sync_type in chromiumsync.ALL_TYPES: - self.model = chromiumsync.SyncDataModel() - request_types = [sync_type, chromiumsync.TOP_LEVEL] - - version, changes = self.model.GetChangesFromTimestamp(request_types, 0) - - expected_count = self.ExpectedPermanentItemCount(sync_type) - self.assertEqual(expected_count, version) - self.assertEqual(expected_count, len(changes)) - self.assertEqual('google_chrome', changes[0].server_defined_unique_tag) - for change in changes: - self.assertTrue(change.HasField('server_defined_unique_tag')) - self.assertEqual(change.version, change.sync_timestamp) - self.assertTrue(change.version <= version) - - # Test idempotence: another GetUpdates from ts=0 shouldn't recreate. - version, changes = self.model.GetChangesFromTimestamp(request_types, 0) - self.assertEqual(expected_count, version) - self.assertEqual(expected_count, len(changes)) - - # Doing a wider GetUpdates from timestamp zero shouldn't recreate either. - new_version, changes = self.model.GetChangesFromTimestamp( - chromiumsync.ALL_TYPES, 0) - self.assertEqual(len(chromiumsync.SyncDataModel._PERMANENT_ITEM_SPECS), - new_version) - self.assertEqual(new_version, len(changes)) - version, changes = self.model.GetChangesFromTimestamp(request_types, 0) - self.assertEqual(new_version, version) - self.assertEqual(expected_count, len(changes)) - - def testBatchSize(self): - for sync_type in chromiumsync.ALL_TYPES[1:]: - specifics = chromiumsync.GetDefaultEntitySpecifics(sync_type) - self.model = chromiumsync.SyncDataModel() - request_types = [sync_type, chromiumsync.TOP_LEVEL] - - for i in range(self.model._BATCH_SIZE*3): - entry = sync_pb2.SyncEntity() - entry.id_string = 'batch test %d' % i - entry.specifics.CopyFrom(specifics) - self.model._SaveEntry(entry) - version, changes = self.model.GetChangesFromTimestamp(request_types, 0) - self.assertEqual(self.model._BATCH_SIZE, version) - version, changes = self.model.GetChangesFromTimestamp(request_types, - version) - self.assertEqual(self.model._BATCH_SIZE*2, version) - version, changes = self.model.GetChangesFromTimestamp(request_types, - version) - self.assertEqual(self.model._BATCH_SIZE*3, version) - expected_dingleberry = self.ExpectedPermanentItemCount(sync_type) - version, changes = self.model.GetChangesFromTimestamp(request_types, - version) - self.assertEqual(self.model._BATCH_SIZE*3 + expected_dingleberry, - version) - - # Now delete a third of the items. - for i in xrange(self.model._BATCH_SIZE*3 - 1, 0, -3): - entry = sync_pb2.SyncEntity() - entry.id_string = 'batch test %d' % i - entry.deleted = True - self.model._SaveEntry(entry) - - # The batch counts shouldn't change. - version, changes = self.model.GetChangesFromTimestamp(request_types, 0) - self.assertEqual(self.model._BATCH_SIZE, len(changes)) - version, changes = self.model.GetChangesFromTimestamp(request_types, - version) - self.assertEqual(self.model._BATCH_SIZE, len(changes)) - version, changes = self.model.GetChangesFromTimestamp(request_types, - version) - self.assertEqual(self.model._BATCH_SIZE, len(changes)) - expected_dingleberry = self.ExpectedPermanentItemCount(sync_type) - version, changes = self.model.GetChangesFromTimestamp(request_types, - version) - self.assertEqual(expected_dingleberry, len(changes)) - self.assertEqual(self.model._BATCH_SIZE*4 + expected_dingleberry, version) - - def testCommitEachDataType(self): - for sync_type in chromiumsync.ALL_TYPES[1:]: - specifics = chromiumsync.GetDefaultEntitySpecifics(sync_type) - self.model = chromiumsync.SyncDataModel() - my_cache_guid = '112358132134' - parent = 'foobar' - commit_session = {} - - # Start with a GetUpdates from timestamp 0, to populate permanent items. - original_version, original_changes = ( - self.model.GetChangesFromTimestamp([sync_type], 0)) - - def DoCommit(original=None, id='', name=None, parent=None, prev=None): - proto = sync_pb2.SyncEntity() - if original is not None: - proto.version = original.version - proto.id_string = original.id_string - proto.parent_id_string = original.parent_id_string - proto.name = original.name - else: - proto.id_string = id - proto.version = 0 - proto.specifics.CopyFrom(specifics) - if name is not None: - proto.name = name - if parent: - proto.parent_id_string = parent.id_string - if prev: - proto.insert_after_item_id = prev.id_string - else: - proto.insert_after_item_id = '' - proto.folder = True - proto.deleted = False - result = self.model.CommitEntry(proto, my_cache_guid, commit_session) - self.assertTrue(result) - return (proto, result) - - # Commit a new item. - proto1, result1 = DoCommit(name='namae', id='Foo', - parent=original_changes[-1]) - # Commit an item whose parent is another item (referenced via the - # pre-commit ID). - proto2, result2 = DoCommit(name='Secondo', id='Bar', - parent=proto1) - # Commit a sibling of the second item. - proto3, result3 = DoCommit(name='Third!', id='Baz', - parent=proto1, prev=proto2) - - self.assertEqual(3, len(commit_session)) - for p, r in [(proto1, result1), (proto2, result2), (proto3, result3)]: - self.assertNotEqual(r.id_string, p.id_string) - self.assertEqual(r.originator_client_item_id, p.id_string) - self.assertEqual(r.originator_cache_guid, my_cache_guid) - self.assertTrue(r is not self.model._entries[r.id_string], - "Commit result didn't make a defensive copy.") - self.assertTrue(p is not self.model._entries[r.id_string], - "Commit result didn't make a defensive copy.") - self.assertEqual(commit_session.get(p.id_string), r.id_string) - self.assertTrue(r.version > original_version) - self.assertEqual(result1.parent_id_string, proto1.parent_id_string) - self.assertEqual(result2.parent_id_string, result1.id_string) - version, changes = self.model.GetChangesFromTimestamp([sync_type], - original_version) - self.assertEqual(3, len(changes)) - self.assertEqual(original_version + 3, version) - self.assertEqual([result1, result2, result3], changes) - for c in changes: - self.assertTrue(c is not self.model._entries[c.id_string], - "GetChanges didn't make a defensive copy.") - self.assertTrue(result2.position_in_parent < result3.position_in_parent) - self.assertEqual(0, result2.position_in_parent) - - # Now update the items so that the second item is the parent of the - # first; with the first sandwiched between two new items (4 and 5). - # Do this in a new commit session, meaning we'll reference items from - # the first batch by their post-commit, server IDs. - commit_session = {} - old_cache_guid = my_cache_guid - my_cache_guid = 'A different GUID' - proto2b, result2b = DoCommit(original=result2, - parent=original_changes[-1]) - proto4, result4 = DoCommit(id='ID4', name='Four', - parent=result2, prev=None) - proto1b, result1b = DoCommit(original=result1, - parent=result2, prev=proto4) - proto5, result5 = DoCommit(id='ID5', name='Five', parent=result2, - prev=result1) - - self.assertEqual(2, len(commit_session), - 'Only new items in second batch should be in the session') - for p, r, original in [(proto2b, result2b, proto2), - (proto4, result4, proto4), - (proto1b, result1b, proto1), - (proto5, result5, proto5)]: - self.assertEqual(r.originator_client_item_id, original.id_string) - if original is not p: - self.assertEqual(r.id_string, p.id_string, - 'Ids should be stable after first commit') - self.assertEqual(r.originator_cache_guid, old_cache_guid) - else: - self.assertNotEqual(r.id_string, p.id_string) - self.assertEqual(r.originator_cache_guid, my_cache_guid) - self.assertEqual(commit_session.get(p.id_string), r.id_string) - self.assertTrue(r is not self.model._entries[r.id_string], - "Commit result didn't make a defensive copy.") - self.assertTrue(p is not self.model._entries[r.id_string], - "Commit didn't make a defensive copy.") - self.assertTrue(r.version > p.version) - version, changes = self.model.GetChangesFromTimestamp([sync_type], - original_version) - self.assertEqual(5, len(changes)) - self.assertEqual(original_version + 7, version) - self.assertEqual([result3, result2b, result4, result1b, result5], changes) - for c in changes: - self.assertTrue(c is not self.model._entries[c.id_string], - "GetChanges didn't make a defensive copy.") - self.assertTrue(result4.parent_id_string == - result1b.parent_id_string == - result5.parent_id_string == - result2b.id_string) - self.assertTrue(result4.position_in_parent < - result1b.position_in_parent < - result5.position_in_parent) - -if __name__ == '__main__': - unittest.main()
\ No newline at end of file diff --git a/net/tools/testserver/testserver.py b/net/tools/testserver/testserver.py index 8e3df5e..94ad3da 100644 --- a/net/tools/testserver/testserver.py +++ b/net/tools/testserver/testserver.py @@ -1,5 +1,5 @@ #!/usr/bin/python2.4 -# Copyright (c) 2006-2010 The Chromium Authors. All rights reserved. +# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. @@ -22,13 +22,9 @@ import shutil import SocketServer import sys import time -import urllib2 - -import pyftpdlib.ftpserver import tlslite import tlslite.api - -import chromiumsync +import pyftpdlib.ftpserver try: import hashlib @@ -129,14 +125,12 @@ class TestPageHandler(BaseHTTPServer.BaseHTTPRequestHandler): self.ContentTypeHandler, self.ServerRedirectHandler, self.ClientRedirectHandler, - self.ChromiumSyncTimeHandler, self.MultipartHandler, self.DefaultResponseHandler] self._post_handlers = [ self.WriteFile, self.EchoTitleHandler, self.EchoAllHandler, - self.ChromiumSyncCommandHandler, self.EchoHandler] + self._get_handlers self._put_handlers = [ self.WriteFile, @@ -155,8 +149,6 @@ class TestPageHandler(BaseHTTPServer.BaseHTTPRequestHandler): BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request, client_address, socket_server) - # Class variable; shared across requests. - _sync_handler = chromiumsync.TestServer() def _ShouldHandleRequest(self, handler_name): """Determines if the path can be handled by the handler. @@ -1004,39 +996,6 @@ class TestPageHandler(BaseHTTPServer.BaseHTTPRequestHandler): return True - def ChromiumSyncTimeHandler(self): - """Handle Chromium sync .../time requests. - - The syncer sometimes checks server reachability by examining /time. - """ - test_name = "/chromiumsync/time" - if not self._ShouldHandleRequest(test_name): - return False - - self.send_response(200) - self.send_header('Content-type', 'text/html') - self.end_headers() - return True - - def ChromiumSyncCommandHandler(self): - """Handle a chromiumsync command arriving via http. - - This covers all sync protocol commands: authentication, getupdates, and - commit. - """ - test_name = "/chromiumsync/command" - if not self._ShouldHandleRequest(test_name): - return False - - length = int(self.headers.getheader('content-length')) - raw_request = self.rfile.read(length) - - http_response, raw_reply = self._sync_handler.HandleCommand(raw_request) - self.send_response(http_response) - self.end_headers() - self.wfile.write(raw_reply) - return True - def MultipartHandler(self): """Send a multipart response (10 text/html pages).""" test_name = "/multipart" @@ -1166,24 +1125,13 @@ def MakeDataDir(): # Create the default path to our data dir, relative to the exe dir. my_data_dir = os.path.dirname(sys.argv[0]) my_data_dir = os.path.join(my_data_dir, "..", "..", "..", "..", - "test", "data") + "test", "data") #TODO(ibrar): Must use Find* funtion defined in google\tools #i.e my_data_dir = FindUpward(my_data_dir, "test", "data") return my_data_dir -def TryKillingOldServer(port): - # Note that an HTTP /kill request to the FTP server has the effect of - # killing it. - for protocol in ["http", "https"]: - try: - urllib2.urlopen("%s://localhost:%d/kill" % (protocol, port)).read() - print "Killed old server instance on port %d (via %s)" % (port, protocol) - except urllib2.URLError: - # Common case, indicates no server running. - pass - def main(options, args): # redirect output to a log file so it doesn't spam the unit test output logfile = open('testserver.log', 'w') @@ -1191,9 +1139,6 @@ def main(options, args): port = options.port - # Try to free up the port if there's an orphaned old instance. - TryKillingOldServer(port) - if options.server_type == SERVER_HTTP: if options.cert: # let's make sure the cert file exists. |