diff options
-rw-r--r-- | chrome/browser/autofill/autofill_regexes.cc | 1 | ||||
-rw-r--r-- | chrome/browser/prefs/pref_service_unittest.cc | 25 | ||||
-rw-r--r-- | chrome/browser/themes/browser_theme_pack.cc | 4 | ||||
-rw-r--r-- | chrome_frame/simple_resource_loader.cc | 12 | ||||
-rwxr-xr-x | tools/data_pack/data_pack.py | 26 | ||||
-rwxr-xr-x | tools/data_pack/repack.py | 20 | ||||
-rwxr-xr-x | tools/grit/grit/format/data_pack.py | 61 | ||||
-rw-r--r-- | tools/grit/grit/format/data_pack_unittest.py | 16 | ||||
-rw-r--r-- | tools/grit/grit/node/include.py | 5 | ||||
-rw-r--r-- | tools/grit/grit/node/message.py | 18 | ||||
-rw-r--r-- | ui/base/resource/data_pack.cc | 39 | ||||
-rw-r--r-- | ui/base/resource/data_pack.h | 21 | ||||
-rw-r--r-- | ui/base/resource/data_pack_literal.cc | 15 | ||||
-rw-r--r-- | ui/base/resource/data_pack_unittest.cc | 18 | ||||
-rw-r--r-- | ui/base/resource/resource_bundle.cc | 18 |
15 files changed, 198 insertions, 101 deletions
diff --git a/chrome/browser/autofill/autofill_regexes.cc b/chrome/browser/autofill/autofill_regexes.cc index b1ed629..61b4fa0 100644 --- a/chrome/browser/autofill/autofill_regexes.cc +++ b/chrome/browser/autofill/autofill_regexes.cc @@ -81,4 +81,3 @@ bool MatchesPattern(const string16& input, const string16& pattern) { } } // namespace autofill - diff --git a/chrome/browser/prefs/pref_service_unittest.cc b/chrome/browser/prefs/pref_service_unittest.cc index f2f6935..c1ebd2b 100644 --- a/chrome/browser/prefs/pref_service_unittest.cc +++ b/chrome/browser/prefs/pref_service_unittest.cc @@ -27,31 +27,6 @@ using testing::_; using testing::Mock; -// TODO(port): port this test to POSIX. -#if defined(OS_WIN) -TEST(PrefServiceTest, LocalizedPrefs) { - TestingPrefService prefs; - const char kBoolean[] = "boolean"; - const char kInteger[] = "integer"; - const char kString[] = "string"; - prefs.RegisterLocalizedBooleanPref(kBoolean, IDS_LOCALE_BOOL); - prefs.RegisterLocalizedIntegerPref(kInteger, IDS_LOCALE_INT); - prefs.RegisterLocalizedStringPref(kString, IDS_LOCALE_STRING); - - // The locale default should take preference over the user default. - EXPECT_FALSE(prefs.GetBoolean(kBoolean)); - EXPECT_EQ(1, prefs.GetInteger(kInteger)); - EXPECT_EQ("hello", prefs.GetString(kString)); - - prefs.SetBoolean(kBoolean, true); - EXPECT_TRUE(prefs.GetBoolean(kBoolean)); - prefs.SetInteger(kInteger, 5); - EXPECT_EQ(5, prefs.GetInteger(kInteger)); - prefs.SetString(kString, "foo"); - EXPECT_EQ("foo", prefs.GetString(kString)); -} -#endif - TEST(PrefServiceTest, NoObserverFire) { TestingPrefService prefs; diff --git a/chrome/browser/themes/browser_theme_pack.cc b/chrome/browser/themes/browser_theme_pack.cc index d348077..d050ebe 100644 --- a/chrome/browser/themes/browser_theme_pack.cc +++ b/chrome/browser/themes/browser_theme_pack.cc @@ -29,7 +29,7 @@ namespace { // Version number of the current theme pack. We just throw out and rebuild // theme packs that aren't int-equal to this. -const int kThemePackVersion = 17; +const int kThemePackVersion = 18; // IDs that are in the DataPack won't clash with the positive integer // uint16. kHeaderID should always have the maximum value because we want the @@ -450,7 +450,7 @@ bool BrowserThemePack::WriteToDisk(FilePath path) const { RepackImages(prepared_images_, &reencoded_images); AddRawImagesTo(reencoded_images, &resources); - return ui::DataPack::WritePack(path, resources); + return ui::DataPack::WritePack(path, resources, ui::DataPack::BINARY); } bool BrowserThemePack::GetTint(int id, color_utils::HSL* hsl) const { diff --git a/chrome_frame/simple_resource_loader.cc b/chrome_frame/simple_resource_loader.cc index 2d1e80a..a28a62c 100644 --- a/chrome_frame/simple_resource_loader.cc +++ b/chrome_frame/simple_resource_loader.cc @@ -241,10 +241,14 @@ std::wstring SimpleResourceLoader::GetLocalizedResource(int message_id) { return std::wstring(); } - // Data pack encodes strings as UTF16. - DCHECK_EQ(data.length() % 2, 0U); - string16 msg(reinterpret_cast<const char16*>(data.data()), - data.length() / 2); + // Data pack encodes strings as either UTF8 or UTF16. + string16 msg; + if (data_pack_->GetTextEncodingType() == ui::DataPack::UTF16) { + msg = string16(reinterpret_cast<const char16*>(data.data()), + data.length() / 2); + } else if (data_pack_->GetTextEncodingType() == ui::DataPack::UTF8) { + msg = UTF8ToUTF16(data); + } return msg; } diff --git a/tools/data_pack/data_pack.py b/tools/data_pack/data_pack.py index 93a6cfd..aeeeb78 100755 --- a/tools/data_pack/data_pack.py +++ b/tools/data_pack/data_pack.py @@ -12,25 +12,33 @@ to point to grit scripts. import struct -FILE_FORMAT_VERSION = 3 -HEADER_LENGTH = 2 * 4 # Two uint32s. (file version and number of entries) +FILE_FORMAT_VERSION = 4 +HEADER_LENGTH = 2 * 4 + 1 # Two uint32s. (file version, number of entries) and + # one uint8 (encoding of text resources) +BINARY, UTF8, UTF16 = range(3) class WrongFileVersion(Exception): pass +class DataPackContents: + def __init__(self, resources, encoding): + self.resources = resources + self.encoding = encoding + def ReadDataPack(input_file): """Reads a data pack file and returns a dictionary.""" data = open(input_file, "rb").read() original_data = data # Read the header. - version, num_entries = struct.unpack("<II", data[:HEADER_LENGTH]) + version, num_entries, encoding = struct.unpack("<IIB", data[:HEADER_LENGTH]) if version != FILE_FORMAT_VERSION: + print "Wrong file version in ", input_file raise WrongFileVersion resources = {} if num_entries == 0: - return resources + return DataPackContents(resources, encoding) # Read the index and data. data = data[HEADER_LENGTH:] @@ -41,15 +49,15 @@ def ReadDataPack(input_file): next_id, next_offset = struct.unpack("<HI", data[:kIndexEntrySize]) resources[id] = original_data[offset:next_offset] - return resources + return DataPackContents(resources, encoding) -def WriteDataPack(resources, output_file): +def WriteDataPack(resources, output_file, encoding): """Write a map of id=>data into output_file as a data pack.""" ids = sorted(resources.keys()) file = open(output_file, "wb") # Write file header. - file.write(struct.pack("<II", FILE_FORMAT_VERSION, len(ids))) + file.write(struct.pack("<IIB", FILE_FORMAT_VERSION, len(ids), encoding)) # Each entry is a uint16 and a uint32. We have one extra entry for the last # item. @@ -70,9 +78,9 @@ def WriteDataPack(resources, output_file): def main(): # Just write a simple file. data = { 1: "", 4: "this is id 4", 6: "this is id 6", 10: "" } - WriteDataPack(data, "datapack1.pak") + WriteDataPack(data, "datapack1.pak", UTF8) data2 = { 1000: "test", 5: "five" } - WriteDataPack(data2, "datapack2.pak") + WriteDataPack(data2, "datapack2.pak", UTF8) print "wrote datapack1 and datapack2 to current directory." if __name__ == '__main__': diff --git a/tools/data_pack/repack.py b/tools/data_pack/repack.py index 2729b10..335fe6f 100755 --- a/tools/data_pack/repack.py +++ b/tools/data_pack/repack.py @@ -20,17 +20,29 @@ def RePack(output_file, input_files): """Write a new data pack to |output_file| based on a list of filenames (|input_files|)""" resources = {} + encoding = None for filename in input_files: - new_resources = data_pack.ReadDataPack(filename) + new_content = data_pack.ReadDataPack(filename) # Make sure we have no dups. - duplicate_keys = set(new_resources.keys()) & set(resources.keys()) + duplicate_keys = set(new_content.resources.keys()) & set(resources.keys()) if len(duplicate_keys) != 0: raise exceptions.KeyError("Duplicate keys: " + str(list(duplicate_keys))) - resources.update(new_resources) + # Make sure encoding is consistent. + if encoding in (None, data_pack.BINARY): + encoding = new_content.encoding + elif new_content.encoding not in (data_pack.BINARY, encoding): + raise exceptions.KeyError("Inconsistent encodings: " + + str(encoding) + " vs " + + str(new_content.encoding)) - data_pack.WriteDataPack(resources, output_file) + resources.update(new_content.resources) + + # Encoding is 0 for BINARY, 1 for UTF8 and 2 for UTF16 + if encoding is None: + encoding = data_pack.BINARY + data_pack.WriteDataPack(resources, output_file, encoding) def main(argv): if len(argv) < 3: diff --git a/tools/grit/grit/format/data_pack.py b/tools/grit/grit/format/data_pack.py index 01c0c9e..87db064 100755 --- a/tools/grit/grit/format/data_pack.py +++ b/tools/grit/grit/format/data_pack.py @@ -19,12 +19,19 @@ from grit.node import message from grit.node import misc -FILE_FORMAT_VERSION = 3 -HEADER_LENGTH = 2 * 4 # Two uint32s. (file version and number of entries) +PACK_FILE_VERSION = 4 +HEADER_LENGTH = 2 * 4 + 1 # Two uint32s. (file version, number of entries) and + # one uint8 (encoding of text resources) +BINARY, UTF8, UTF16 = range(3) class WrongFileVersion(Exception): pass +class DataPackContents: + def __init__(self, resources, encoding): + self.resources = resources + self.encoding = encoding + class DataPack(interface.ItemFormatter): '''Writes out the data pack file format (platform agnostic resource file).''' def Format(self, item, lang='en', begin_item=True, output_dir='.'): @@ -36,9 +43,9 @@ class DataPack(interface.ItemFormatter): nodes = DataPack.GetDataNodes(item) data = {} for node in nodes: - id, value = node.GetDataPackPair(lang) + id, value = node.GetDataPackPair(lang, UTF8) data[id] = value - return DataPack.WriteDataPackToString(data) + return DataPack.WriteDataPackToString(data, UTF8) @staticmethod def GetDataNodes(item): @@ -63,13 +70,15 @@ class DataPack(interface.ItemFormatter): original_data = data # Read the header. - version, num_entries = struct.unpack("<II", data[:HEADER_LENGTH]) - if version != FILE_FORMAT_VERSION: + version, num_entries, encoding = struct.unpack("<IIB", + data[:HEADER_LENGTH]) + if version != PACK_FILE_VERSION: + print "Wrong file version in ", input_file raise WrongFileVersion resources = {} if num_entries == 0: - return resources + return DataPackContents(resources, encoding) # Read the index and data. data = data[HEADER_LENGTH:] @@ -80,18 +89,18 @@ class DataPack(interface.ItemFormatter): next_id, next_offset = struct.unpack("<HI", data[:kIndexEntrySize]) resources[id] = original_data[offset:next_offset] - return resources + return DataPackContents(resources, encoding) @staticmethod - def WriteDataPackToString(resources): + def WriteDataPackToString(resources, encoding): """Write a map of id=>data into a string in the data pack format and return it.""" ids = sorted(resources.keys()) ret = [] # Write file header. - ret.append(struct.pack("<II", FILE_FORMAT_VERSION, len(ids))) - HEADER_LENGTH = 2 * 4 # Two uint32s. + ret.append(struct.pack("<IIB", PACK_FILE_VERSION, len(ids), encoding)) + HEADER_LENGTH = 2 * 4 + 1 # Two uint32s and one uint8. # Each entry is a uint16 + a uint32s. We have one extra entry for the last # item. @@ -111,10 +120,10 @@ class DataPack(interface.ItemFormatter): return ''.join(ret) @staticmethod - def WriteDataPack(resources, output_file): + def WriteDataPack(resources, output_file, encoding): """Write a map of id=>data into output_file as a data pack.""" file = open(output_file, "wb") - content = DataPack.WriteDataPackToString(resources) + content = DataPack.WriteDataPackToString(resources, encoding) file.write(content) @staticmethod @@ -122,25 +131,37 @@ class DataPack(interface.ItemFormatter): """Write a new data pack to |output_file| based on a list of filenames (|input_files|)""" resources = {} + encoding = None for filename in input_files: - new_resources = DataPack.ReadDataPack(filename) + new_content = DataPack.ReadDataPack(filename) - # Make sure we have no duplicates. - duplicate_keys = set(new_resources.keys()) & set(resources.keys()) + # Make sure we have no dups. + duplicate_keys = set(new_content.resources.keys()) & set(resources.keys()) if len(duplicate_keys) != 0: raise exceptions.KeyError("Duplicate keys: " + str(list(duplicate_keys))) - resources.update(new_resources) + # Make sure encoding is consistent. + if encoding in (None, BINARY): + encoding = new_content.encoding + elif new_content.encoding not in (BINARY, encoding): + raise exceptions.KeyError("Inconsistent encodings: " + + str(encoding) + " vs " + + str(new_content.encoding)) + + resources.update(new_content.resources) - DataPack.WriteDataPack(resources, output_file) + # Encoding is 0 for BINARY, 1 for UTF8 and 2 for UTF16 + if encoding is None: + encoding = BINARY + DataPack.WriteDataPack(resources, output_file, encoding) def main(): # Just write a simple file. data = { 1: "", 4: "this is id 4", 6: "this is id 6", 10: "" } - WriteDataPack(data, "datapack1.pak") + DataPack.WriteDataPack(data, "datapack1.pak", UTF8) data2 = { 1000: "test", 5: "five" } - WriteDataPack(data2, "datapack2.pak") + DataPack.WriteDataPack(data2, "datapack2.pak", UTF8) print "wrote datapack1 and datapack2 to current directory." if __name__ == '__main__': diff --git a/tools/grit/grit/format/data_pack_unittest.py b/tools/grit/grit/format/data_pack_unittest.py index 35966639..8de54ef 100644 --- a/tools/grit/grit/format/data_pack_unittest.py +++ b/tools/grit/grit/format/data_pack_unittest.py @@ -16,15 +16,17 @@ from grit.format import data_pack class FormatDataPackUnittest(unittest.TestCase): def testWriteDataPack(self): expected = ( - '\x03\x00\x00\x00\x04\x00\x00\x00' # header (version, no. entries) - '\x01\x00\x26\x00\x00\x00' # index entry 1 - '\x04\x00\x26\x00\x00\x00' # index entry 4 - '\x06\x00\x32\x00\x00\x00' # index entry 6 - '\x0a\x00\x3e\x00\x00\x00' # index entry 10 - '\x00\x00\x3e\x00\x00\x00' # extra entry for the size of last + '\x04\x00\x00\x00' # header(version + '\x04\x00\x00\x00' # no. entries, + '\x01' # encoding) + '\x01\x00\x27\x00\x00\x00' # index entry 1 + '\x04\x00\x27\x00\x00\x00' # index entry 4 + '\x06\x00\x33\x00\x00\x00' # index entry 6 + '\x0a\x00\x3f\x00\x00\x00' # index entry 10 + '\x00\x00\x3f\x00\x00\x00' # extra entry for the size of last 'this is id 4this is id 6') # data input = { 1: "", 4: "this is id 4", 6: "this is id 6", 10: "" } - output = data_pack.DataPack.WriteDataPackToString(input) + output = data_pack.DataPack.WriteDataPackToString(input, data_pack.UTF8) self.failUnless(output == expected) diff --git a/tools/grit/grit/node/include.py b/tools/grit/grit/node/include.py index b073bc7..dc29315 100644 --- a/tools/grit/grit/node/include.py +++ b/tools/grit/grit/node/include.py @@ -74,7 +74,7 @@ class IncludeNode(base.Node): ''' return self.FilenameToOpen() - def GetDataPackPair(self, lang): + def GetDataPackPair(self, lang, encoding): '''Returns a (id, string) pair that represents the resource id and raw bytes of the data. This is used to generate the data pack data file. ''' @@ -90,6 +90,8 @@ class IncludeNode(base.Node): data = infile.read() infile.close() + # Include does not care about the encoding, because it only returns binary + # data. return id, data def Flatten(self, output_dir): @@ -136,4 +138,3 @@ class IncludeNode(base.Node): node.EndParsing() return node Construct = staticmethod(Construct) - diff --git a/tools/grit/grit/node/message.py b/tools/grit/grit/node/message.py index a48a645..3c5ac64 100644 --- a/tools/grit/grit/node/message.py +++ b/tools/grit/grit/node/message.py @@ -1,5 +1,5 @@ #!/usr/bin/python2.4 -# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. +# Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. @@ -19,6 +19,7 @@ from grit import exception from grit import tclib from grit import util +BINARY, UTF8, UTF16 = range(3) # Finds whitespace at the start and end of a string which can be multiline. _WHITESPACE = re.compile('(?P<start>\s*)(?P<body>.+?)(?P<end>\s*)\Z', @@ -186,7 +187,7 @@ class MessageNode(base.ContentNode): else: return self.attrs['offset'] - def GetDataPackPair(self, lang): + def GetDataPackPair(self, lang, encoding): '''Returns a (id, string) pair that represents the string id and the string in utf8. This is used to generate the data pack data file. ''' @@ -199,10 +200,15 @@ class MessageNode(base.ContentNode): # Windows automatically translates \n to a new line, but GTK+ doesn't. # Manually do the conversion here rather than at run time. message = message.replace("\\n", "\n") - # |message| is a python unicode string, so convert to a utf16 byte stream - # because that's the format of datapacks. We skip the first 2 bytes - # because it is the BOM. - return id, message.encode('utf16')[2:] + # |message| is a python unicode string, so convert to a byte stream that + # has the correct encoding requested for the datapacks. We skip the first + # 2 bytes of text resources because it is the BOM. + if encoding == UTF8: + return id, message.encode('utf8') + if encoding == UTF16: + return id, message.encode('utf16')[2:] + # Default is BINARY + return id, message # static method def Construct(parent, message, name, desc='', meaning='', translateable=True): diff --git a/ui/base/resource/data_pack.cc b/ui/base/resource/data_pack.cc index c4b1594..5a0dcf1 100644 --- a/ui/base/resource/data_pack.cc +++ b/ui/base/resource/data_pack.cc @@ -17,9 +17,9 @@ namespace { -static const uint32 kFileFormatVersion = 3; -// Length of file header: version and entry count. -static const size_t kHeaderLength = 2 * sizeof(uint32); +static const uint32 kFileFormatVersion = 4; +// Length of file header: version, entry count and text encoding type. +static const size_t kHeaderLength = 2 * sizeof(uint32) + sizeof(uint8); #pragma pack(push,2) struct DataPackEntry { @@ -60,7 +60,7 @@ enum LoadErrors { namespace ui { // In .cc for MemoryMappedFile dtor. -DataPack::DataPack() : resource_count_(0) { +DataPack::DataPack() : resource_count_(0), text_encoding_type_(BINARY) { } DataPack::~DataPack() { } @@ -83,7 +83,7 @@ bool DataPack::Load(const FilePath& path) { } // Parse the header of the file. - // First uint32: version; second: resource count. + // First uint32: version; second: resource count; const uint32* ptr = reinterpret_cast<const uint32*>(mmap_->data()); uint32 version = ptr[0]; if (version != kFileFormatVersion) { @@ -96,6 +96,17 @@ bool DataPack::Load(const FilePath& path) { } resource_count_ = ptr[1]; + // third: text encoding. + const uint8* ptr_encoding = reinterpret_cast<const uint8*>(ptr + 2); + text_encoding_type_ = static_cast<TextEncodingType>(*ptr_encoding); + if (text_encoding_type_ != UTF8 && text_encoding_type_ != UTF16 && + text_encoding_type_ != BINARY) { + LOG(ERROR) << "Bad data pack text encoding: got " << text_encoding_type_ + << ", expected between " << BINARY << " and " << UTF16; + mmap_.reset(); + return false; + } + // Sanity check the file. // 1) Check we have enough entries. if (kHeaderLength + resource_count_ * sizeof(DataPackEntry) > @@ -163,7 +174,8 @@ RefCountedStaticMemory* DataPack::GetStaticMemory(uint16 resource_id) const { // static bool DataPack::WritePack(const FilePath& path, - const std::map<uint16, base::StringPiece>& resources) { + const std::map<uint16, base::StringPiece>& resources, + TextEncodingType textEncodingType) { FILE* file = file_util::OpenFile(path, "wb"); if (!file) return false; @@ -183,6 +195,21 @@ bool DataPack::WritePack(const FilePath& path, return false; } + if (textEncodingType != UTF8 && textEncodingType != UTF16 && + textEncodingType != BINARY) { + LOG(ERROR) << "Invalid text encoding type, got " << textEncodingType + << ", expected between " << BINARY << " and " << UTF16; + file_util::CloseFile(file); + return false; + } + + uint8 write_buffer = textEncodingType; + if (fwrite(&write_buffer, sizeof(uint8), 1, file) != 1) { + LOG(ERROR) << "Failed to write file text resources encoding"; + file_util::CloseFile(file); + return false; + } + // Each entry is a uint16 + a uint32. We have an extra entry after the last // item so we can compute the size of the list item. uint32 index_length = (entry_count + 1) * sizeof(DataPackEntry); diff --git a/ui/base/resource/data_pack.h b/ui/base/resource/data_pack.h index f6ee78d..0cc3b31 100644 --- a/ui/base/resource/data_pack.h +++ b/ui/base/resource/data_pack.h @@ -31,6 +31,13 @@ namespace ui { class UI_EXPORT DataPack { public: + // What type of encoding the text resources use. + enum TextEncodingType { + BINARY, + UTF8, + UTF16 + }; + DataPack(); ~DataPack(); @@ -47,9 +54,16 @@ class UI_EXPORT DataPack { // for localization strings. RefCountedStaticMemory* GetStaticMemory(uint16 resource_id) const; - // Writes a pack file containing |resources| to |path|. + // Writes a pack file containing |resources| to |path|. If there are any + // text resources to be written, their encoding must already agree to the + // |textEncodingType| specified. If no text resources are present, please + // indicate BINARY. static bool WritePack(const FilePath& path, - const std::map<uint16, base::StringPiece>& resources); + const std::map<uint16, base::StringPiece>& resources, + TextEncodingType textEncodingType); + + // Get the encoding type of text resources. + TextEncodingType GetTextEncodingType() const { return text_encoding_type_; } private: // The memory-mapped data. @@ -58,6 +72,9 @@ class UI_EXPORT DataPack { // Number of resources in the data. size_t resource_count_; + // Type of encoding for text resources. + TextEncodingType text_encoding_type_; + DISALLOW_COPY_AND_ASSIGN(DataPack); }; diff --git a/ui/base/resource/data_pack_literal.cc b/ui/base/resource/data_pack_literal.cc index 4a32fa9..510d07a 100644 --- a/ui/base/resource/data_pack_literal.cc +++ b/ui/base/resource/data_pack_literal.cc @@ -7,13 +7,14 @@ namespace ui { extern const char kSamplePakContents[] = { - 0x03, 0x00, 0x00, 0x00, // header(version - 0x04, 0x00, 0x00, 0x00, // no. entries) - 0x01, 0x00, 0x26, 0x00, 0x00, 0x00, // index entry 1 - 0x04, 0x00, 0x26, 0x00, 0x00, 0x00, // index entry 4 - 0x06, 0x00, 0x32, 0x00, 0x00, 0x00, // index entry 6 - 0x0a, 0x00, 0x3e, 0x00, 0x00, 0x00, // index entry 10 - 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00, // extra entry for the size of last + 0x04, 0x00, 0x00, 0x00, // header(version + 0x04, 0x00, 0x00, 0x00, // no. entries + 0x01, // encoding) + 0x01, 0x00, 0x27, 0x00, 0x00, 0x00, // index entry 1 + 0x04, 0x00, 0x27, 0x00, 0x00, 0x00, // index entry 4 + 0x06, 0x00, 0x33, 0x00, 0x00, 0x00, // index entry 6 + 0x0a, 0x00, 0x3f, 0x00, 0x00, 0x00, // index entry 10 + 0x00, 0x00, 0x3f, 0x00, 0x00, 0x00, // extra entry for the size of last 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'i', 'd', ' ', '4', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'i', 'd', ' ', '6' }; diff --git a/ui/base/resource/data_pack_unittest.cc b/ui/base/resource/data_pack_unittest.cc index fe3613c..1c0bd7a 100644 --- a/ui/base/resource/data_pack_unittest.cc +++ b/ui/base/resource/data_pack_unittest.cc @@ -12,6 +12,12 @@ namespace ui { +class DataPackTest + : public testing::TestWithParam<DataPack::TextEncodingType> { + public: + DataPackTest() {} +}; + extern const char kSamplePakContents[]; extern const size_t kSamplePakSize; @@ -44,6 +50,13 @@ TEST(DataPackTest, Load) { ASSERT_FALSE(pack.GetStringPiece(140, &data)); } +INSTANTIATE_TEST_CASE_P(WriteBINARY, DataPackTest, ::testing::Values( + DataPack::BINARY)); +INSTANTIATE_TEST_CASE_P(WriteUTF8, DataPackTest, ::testing::Values( + DataPack::UTF8)); +INSTANTIATE_TEST_CASE_P(WriteUTF16, DataPackTest, ::testing::Values( + DataPack::UTF16)); + TEST(DataPackTest, LoadFileWithTruncatedHeader) { FilePath data_path; PathService::Get(base::DIR_SOURCE_ROOT, &data_path); @@ -54,7 +67,7 @@ TEST(DataPackTest, LoadFileWithTruncatedHeader) { ASSERT_FALSE(pack.Load(data_path)); } -TEST(DataPackTest, Write) { +TEST_P(DataPackTest, Write) { ScopedTempDir dir; ASSERT_TRUE(dir.CreateUniqueTempDir()); FilePath file = dir.path().Append(FILE_PATH_LITERAL("data.pak")); @@ -71,11 +84,12 @@ TEST(DataPackTest, Write) { resources.insert(std::make_pair(15, base::StringPiece(fifteen))); resources.insert(std::make_pair(3, base::StringPiece(three))); resources.insert(std::make_pair(4, base::StringPiece(four))); - ASSERT_TRUE(DataPack::WritePack(file, resources)); + ASSERT_TRUE(DataPack::WritePack(file, resources, GetParam())); // Now try to read the data back in. DataPack pack; ASSERT_TRUE(pack.Load(file)); + EXPECT_EQ(pack.GetTextEncodingType(), GetParam()); base::StringPiece data; ASSERT_TRUE(pack.GetStringPiece(1, &data)); diff --git a/ui/base/resource/resource_bundle.cc b/ui/base/resource/resource_bundle.cc index 7ae0f4e..f1722ff 100644 --- a/ui/base/resource/resource_bundle.cc +++ b/ui/base/resource/resource_bundle.cc @@ -11,6 +11,7 @@ #include "base/stl_util.h" #include "base/string_piece.h" #include "base/synchronization/lock.h" +#include "base/utf_string_conversions.h" #include "build/build_config.h" #include "third_party/skia/include/core/SkBitmap.h" #include "ui/base/l10n/l10n_util.h" @@ -187,10 +188,19 @@ string16 ResourceBundle::GetLocalizedString(int message_id) { } } - // Data pack encodes strings as UTF16. - DCHECK_EQ(data.length() % 2, 0U); - string16 msg(reinterpret_cast<const char16*>(data.data()), - data.length() / 2); + // Strings should not be loaded from a data pack that contains binary data. + DCHECK(locale_resources_data_->GetTextEncodingType() == DataPack::UTF16 || + locale_resources_data_->GetTextEncodingType() == DataPack::UTF8) + << "requested localized string from binary pack file"; + + // Data pack encodes strings as either UTF8 or UTF16. + string16 msg; + if (locale_resources_data_->GetTextEncodingType() == DataPack::UTF16) { + msg = string16(reinterpret_cast<const char16*>(data.data()), + data.length() / 2); + } else if (locale_resources_data_->GetTextEncodingType() == DataPack::UTF8) { + msg = UTF8ToUTF16(data); + } return msg; } |