diff options
author | brettw <brettw@chromium.org> | 2015-07-06 16:53:00 -0700 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2015-07-06 23:54:14 +0000 |
commit | 8cc24ae2b51f9db4a16011eb1ab7dbfca0eb6d54 (patch) | |
tree | 876a4d239206096f180630fc3657a1aa58a74630 /base/strings | |
parent | f359166a9a5dc5c4ea15b0b718b643fc06d3c870 (diff) | |
download | chromium_src-8cc24ae2b51f9db4a16011eb1ab7dbfca0eb6d54.zip chromium_src-8cc24ae2b51f9db4a16011eb1ab7dbfca0eb6d54.tar.gz chromium_src-8cc24ae2b51f9db4a16011eb1ab7dbfca0eb6d54.tar.bz2 |
Replace remaining Tokenize calls to SplitString
SplitString is now more general and does the job of Tokenize with specific parameters.
The biggest change is in time_util.cc where the old return pattern better matched how the code was structured. With the new style the conditionals are more nested.
Some simple cases were changed to StringPieces when copies were not required.
BUG=506920, 506255
Review URL: https://codereview.chromium.org/1219263002
Cr-Commit-Position: refs/heads/master@{#337520}
Diffstat (limited to 'base/strings')
-rw-r--r-- | base/strings/string_util.cc | 24 | ||||
-rw-r--r-- | base/strings/string_util.h | 16 | ||||
-rw-r--r-- | base/strings/string_util_unittest.cc | 102 |
3 files changed, 0 insertions, 142 deletions
diff --git a/base/strings/string_util.cc b/base/strings/string_util.cc index e2b7311..380d455 100644 --- a/base/strings/string_util.cc +++ b/base/strings/string_util.cc @@ -810,30 +810,6 @@ void ReplaceSubstringsAfterOffset(std::string* str, } // namespace base -size_t Tokenize(const base::string16& str, - const base::string16& delimiters, - std::vector<base::string16>* tokens) { - *tokens = base::SplitString( - str, delimiters, base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY); - return tokens->size(); -} - -size_t Tokenize(const std::string& str, - const std::string& delimiters, - std::vector<std::string>* tokens) { - *tokens = base::SplitString( - str, delimiters, base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY); - return tokens->size(); -} - -size_t Tokenize(const base::StringPiece& str, - const base::StringPiece& delimiters, - std::vector<base::StringPiece>* tokens) { - *tokens = base::SplitStringPiece( - str, delimiters, base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY); - return tokens->size(); -} - template<typename STR> static STR JoinStringT(const std::vector<STR>& parts, const STR& sep) { if (parts.empty()) diff --git a/base/strings/string_util.h b/base/strings/string_util.h index f5cf0b3..c231609 100644 --- a/base/strings/string_util.h +++ b/base/strings/string_util.h @@ -492,22 +492,6 @@ inline typename string_type::value_type* WriteInto(string_type* str, //----------------------------------------------------------------------------- -// Splits a string into its fields delimited by any of the characters in -// |delimiters|. Each field is added to the |tokens| vector. Returns the -// number of tokens found. -// -// DEPRECATED. Use base::SplitString for new code (these just forward). -// TODO(brettw) convert callers and delete these forwarders. -BASE_EXPORT size_t Tokenize(const base::string16& str, - const base::string16& delimiters, - std::vector<base::string16>* tokens); -BASE_EXPORT size_t Tokenize(const std::string& str, - const std::string& delimiters, - std::vector<std::string>* tokens); -BASE_EXPORT size_t Tokenize(const base::StringPiece& str, - const base::StringPiece& delimiters, - std::vector<base::StringPiece>* tokens); - // Does the opposite of SplitString(). BASE_EXPORT base::string16 JoinString(const std::vector<base::string16>& parts, base::char16 s); diff --git a/base/strings/string_util_unittest.cc b/base/strings/string_util_unittest.cc index ad7ff24..02b184c 100644 --- a/base/strings/string_util_unittest.cc +++ b/base/strings/string_util_unittest.cc @@ -669,108 +669,6 @@ TEST(StringUtilTest, HexDigitToInt) { EXPECT_EQ(15, HexDigitToInt('f')); } -// Test for Tokenize -template <typename STR> -void TokenizeTest() { - std::vector<STR> r; - size_t size; - - size = Tokenize(STR("This is a string"), STR(" "), &r); - EXPECT_EQ(4U, size); - ASSERT_EQ(4U, r.size()); - EXPECT_EQ(r[0], STR("This")); - EXPECT_EQ(r[1], STR("is")); - EXPECT_EQ(r[2], STR("a")); - EXPECT_EQ(r[3], STR("string")); - r.clear(); - - size = Tokenize(STR("one,two,three"), STR(","), &r); - EXPECT_EQ(3U, size); - ASSERT_EQ(3U, r.size()); - EXPECT_EQ(r[0], STR("one")); - EXPECT_EQ(r[1], STR("two")); - EXPECT_EQ(r[2], STR("three")); - r.clear(); - - size = Tokenize(STR("one,two:three;four"), STR(",:"), &r); - EXPECT_EQ(3U, size); - ASSERT_EQ(3U, r.size()); - EXPECT_EQ(r[0], STR("one")); - EXPECT_EQ(r[1], STR("two")); - EXPECT_EQ(r[2], STR("three;four")); - r.clear(); - - size = Tokenize(STR("one,two:three;four"), STR(";,:"), &r); - EXPECT_EQ(4U, size); - ASSERT_EQ(4U, r.size()); - EXPECT_EQ(r[0], STR("one")); - EXPECT_EQ(r[1], STR("two")); - EXPECT_EQ(r[2], STR("three")); - EXPECT_EQ(r[3], STR("four")); - r.clear(); - - size = Tokenize(STR("one, two, three"), STR(","), &r); - EXPECT_EQ(3U, size); - ASSERT_EQ(3U, r.size()); - EXPECT_EQ(r[0], STR("one")); - EXPECT_EQ(r[1], STR(" two")); - EXPECT_EQ(r[2], STR(" three")); - r.clear(); - - size = Tokenize(STR("one, two, three, "), STR(","), &r); - EXPECT_EQ(4U, size); - ASSERT_EQ(4U, r.size()); - EXPECT_EQ(r[0], STR("one")); - EXPECT_EQ(r[1], STR(" two")); - EXPECT_EQ(r[2], STR(" three")); - EXPECT_EQ(r[3], STR(" ")); - r.clear(); - - size = Tokenize(STR("one, two, three,"), STR(","), &r); - EXPECT_EQ(3U, size); - ASSERT_EQ(3U, r.size()); - EXPECT_EQ(r[0], STR("one")); - EXPECT_EQ(r[1], STR(" two")); - EXPECT_EQ(r[2], STR(" three")); - r.clear(); - - size = Tokenize(STR(), STR(","), &r); - EXPECT_EQ(0U, size); - ASSERT_EQ(0U, r.size()); - r.clear(); - - size = Tokenize(STR(","), STR(","), &r); - EXPECT_EQ(0U, size); - ASSERT_EQ(0U, r.size()); - r.clear(); - - size = Tokenize(STR(",;:."), STR(".:;,"), &r); - EXPECT_EQ(0U, size); - ASSERT_EQ(0U, r.size()); - r.clear(); - - size = Tokenize(STR("\t\ta\t"), STR("\t"), &r); - EXPECT_EQ(1U, size); - ASSERT_EQ(1U, r.size()); - EXPECT_EQ(r[0], STR("a")); - r.clear(); - - size = Tokenize(STR("\ta\t\nb\tcc"), STR("\n"), &r); - EXPECT_EQ(2U, size); - ASSERT_EQ(2U, r.size()); - EXPECT_EQ(r[0], STR("\ta\t")); - EXPECT_EQ(r[1], STR("b\tcc")); - r.clear(); -} - -TEST(StringUtilTest, TokenizeStdString) { - TokenizeTest<std::string>(); -} - -TEST(StringUtilTest, TokenizeStringPiece) { - TokenizeTest<StringPiece>(); -} - // Test for JoinString TEST(StringUtilTest, JoinString) { std::vector<std::string> in; |