summaryrefslogtreecommitdiffstats
path: root/skia
diff options
context:
space:
mode:
authormotek@chromium.org <motek@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-04-17 10:39:07 +0000
committermotek@chromium.org <motek@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-04-17 10:39:07 +0000
commitbef5cc1a8083a1a495b4341dfeba5d4f01889f9e (patch)
treea956f26e0235dad20936a3f495dfb54842ec2f68 /skia
parentca690b0ccb3684bec0a7a5ed2406346762b57ba7 (diff)
downloadchromium_src-bef5cc1a8083a1a495b4341dfeba5d4f01889f9e.zip
chromium_src-bef5cc1a8083a1a495b4341dfeba5d4f01889f9e.tar.gz
chromium_src-bef5cc1a8083a1a495b4341dfeba5d4f01889f9e.tar.bz2
Complete (but inefficient) implementation of the image retargetting method.
This CL contains: 1. Convolution of arbitrary channel with arbitrary kernel (convolver*). 2. Gaussian gradient magnitude implementation. 3. Image profile (X and Y projections) computations. 4. Otsu-like thresholding of profiles. 5. Image decimation. 6. The main routine which binds it all together. Note: 1 and 2 do work, but remain main sources of suckiness due to performance problems. I actively work on this. Still, I'd love to get the current state in to establish a baseline and for viewing pleasure of those who are interested. BUG=155269 Review URL: https://codereview.chromium.org/13947013 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@194565 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'skia')
-rw-r--r--skia/ext/convolver.cc196
-rw-r--r--skia/ext/convolver.h54
-rw-r--r--skia/ext/convolver_unittest.cc149
3 files changed, 396 insertions, 3 deletions
diff --git a/skia/ext/convolver.cc b/skia/ext/convolver.cc
index cbfa931..2057b22 100644
--- a/skia/ext/convolver.cc
+++ b/skia/ext/convolver.cc
@@ -4,8 +4,10 @@
#include <algorithm>
+#include "base/logging.h"
#include "skia/ext/convolver.h"
#include "skia/ext/convolver_SSE2.h"
+#include "third_party/skia/include/core/SkSize.h"
#include "third_party/skia/include/core/SkTypes.h"
namespace skia {
@@ -22,6 +24,17 @@ inline unsigned char ClampTo8(int a) {
return 255;
}
+// Takes the value produced by accumulating element-wise product of image with
+// a kernel and brings it back into range.
+// All of the filter scaling factors are in fixed point with kShiftBits bits of
+// fractional part.
+inline unsigned char BringBackTo8(int a, bool take_absolute) {
+ a >>= ConvolutionFilter1D::kShiftBits;
+ if (take_absolute)
+ a = std::abs(a);
+ return ClampTo8(a);
+}
+
// Stores a list of rows in a circular buffer. The usage is you write into it
// by calling AdvanceRow. It will keep track of which row in the buffer it
// should use next, and the total number of rows added.
@@ -271,6 +284,7 @@ void ConvolutionFilter1D::AddFilter(int filter_offset,
// cases it is beneficial to only store the central factors.
// For a scaling to 1/4th in each dimension using a Lanczos-2 filter on
// a 1080p image this optimization gives a ~10% speed improvement.
+ int filter_size = filter_length;
int first_non_zero = 0;
while (first_non_zero < filter_length && filter_values[first_non_zero] == 0)
first_non_zero++;
@@ -298,12 +312,27 @@ void ConvolutionFilter1D::AddFilter(int filter_offset,
instance.data_location = (static_cast<int>(filter_values_.size()) -
filter_length);
instance.offset = filter_offset;
- instance.length = filter_length;
+ instance.trimmed_length = filter_length;
+ instance.length = filter_size;
filters_.push_back(instance);
max_filter_ = std::max(max_filter_, filter_length);
}
+const ConvolutionFilter1D::Fixed* ConvolutionFilter1D::GetSingleFilter(
+ int* specified_filter_length,
+ int* filter_offset,
+ int* filter_length) const {
+ const FilterInstance& filter = filters_[0];
+ *filter_offset = filter.offset;
+ *filter_length = filter.trimmed_length;
+ *specified_filter_length = filter.length;
+ if (filter.trimmed_length == 0)
+ return NULL;
+
+ return &filter_values_[filter.data_location];
+}
+
typedef void (*ConvolveVertically_pointer)(
const ConvolutionFilter1D::Fixed* filter_values,
int filter_length,
@@ -478,4 +507,169 @@ void BGRAConvolve2D(const unsigned char* source_data,
}
}
+void SingleChannelConvolveX1D(const unsigned char* source_data,
+ int source_byte_row_stride,
+ int input_channel_index,
+ int input_channel_count,
+ const ConvolutionFilter1D& filter,
+ const SkISize& image_size,
+ unsigned char* output,
+ int output_byte_row_stride,
+ int output_channel_index,
+ int output_channel_count,
+ bool absolute_values) {
+ int filter_offset, filter_length, filter_size;
+ // Very much unlike BGRAConvolve2D, here we expect to have the same filter
+ // for all pixels.
+ const ConvolutionFilter1D::Fixed* filter_values =
+ filter.GetSingleFilter(&filter_size, &filter_offset, &filter_length);
+
+ if (filter_values == NULL || image_size.width() < filter_size) {
+ NOTREACHED();
+ return;
+ }
+
+ int centrepoint = filter_length / 2;
+ if (filter_size - filter_offset != 2 * filter_offset) {
+ // This means the original filter was not symmetrical AND
+ // got clipped from one side more than from the other.
+ centrepoint = filter_size / 2 - filter_offset;
+ }
+
+ const unsigned char* source_data_row = source_data;
+ unsigned char* output_row = output;
+
+ for (int r = 0; r < image_size.height(); ++r) {
+ unsigned char* target_byte = output_row + output_channel_index;
+ // Process the lead part, padding image to the left with the first pixel.
+ int c = 0;
+ for (; c < centrepoint; ++c, target_byte += output_channel_count) {
+ int accval = 0;
+ int i = 0;
+ int pixel_byte_index = input_channel_index;
+ for (; i < centrepoint - c; ++i) // Padding part.
+ accval += filter_values[i] * source_data_row[pixel_byte_index];
+
+ for (; i < filter_length; ++i, pixel_byte_index += input_channel_count)
+ accval += filter_values[i] * source_data_row[pixel_byte_index];
+
+ *target_byte = BringBackTo8(accval, absolute_values);
+ }
+
+ // Now for the main event.
+ for (; c < image_size.width() - centrepoint;
+ ++c, target_byte += output_channel_count) {
+ int accval = 0;
+ int pixel_byte_index = (c - centrepoint) * input_channel_count +
+ input_channel_index;
+
+ for (int i = 0; i < filter_length;
+ ++i, pixel_byte_index += input_channel_count) {
+ accval += filter_values[i] * source_data_row[pixel_byte_index];
+ }
+
+ *target_byte = BringBackTo8(accval, absolute_values);
+ }
+
+ for (; c < image_size.width(); ++c, target_byte += output_channel_count) {
+ int accval = 0;
+ int overlap_taps = image_size.width() - c + centrepoint;
+ int pixel_byte_index = (c - centrepoint) * input_channel_count +
+ input_channel_index;
+ int i = 0;
+ for (; i < overlap_taps - 1; ++i, pixel_byte_index += input_channel_count)
+ accval += filter_values[i] * source_data_row[pixel_byte_index];
+
+ for (; i < filter_length; ++i)
+ accval += filter_values[i] * source_data_row[pixel_byte_index];
+
+ *target_byte = BringBackTo8(accval, absolute_values);
+ }
+
+ source_data_row += source_byte_row_stride;
+ output_row += output_byte_row_stride;
+ }
+}
+
+void SingleChannelConvolveY1D(const unsigned char* source_data,
+ int source_byte_row_stride,
+ int input_channel_index,
+ int input_channel_count,
+ const ConvolutionFilter1D& filter,
+ const SkISize& image_size,
+ unsigned char* output,
+ int output_byte_row_stride,
+ int output_channel_index,
+ int output_channel_count,
+ bool absolute_values) {
+ int filter_offset, filter_length, filter_size;
+ // Very much unlike BGRAConvolve2D, here we expect to have the same filter
+ // for all pixels.
+ const ConvolutionFilter1D::Fixed* filter_values =
+ filter.GetSingleFilter(&filter_size, &filter_offset, &filter_length);
+
+ if (filter_values == NULL || image_size.height() < filter_size) {
+ NOTREACHED();
+ return;
+ }
+
+ int centrepoint = filter_length / 2;
+ if (filter_size - filter_offset != 2 * filter_offset) {
+ // This means the original filter was not symmetrical AND
+ // got clipped from one side more than from the other.
+ centrepoint = filter_size / 2 - filter_offset;
+ }
+
+ for (int c = 0; c < image_size.width(); ++c) {
+ unsigned char* target_byte = output + c * output_channel_count +
+ output_channel_index;
+ int r = 0;
+
+ for (; r < centrepoint; ++r, target_byte += output_byte_row_stride) {
+ int accval = 0;
+ int i = 0;
+ int pixel_byte_index = c * input_channel_count + input_channel_index;
+
+ for (; i < centrepoint - r; ++i) // Padding part.
+ accval += filter_values[i] * source_data[pixel_byte_index];
+
+ for (; i < filter_length; ++i, pixel_byte_index += source_byte_row_stride)
+ accval += filter_values[i] * source_data[pixel_byte_index];
+
+ *target_byte = BringBackTo8(accval, absolute_values);
+ }
+
+ for (; r < image_size.height() - centrepoint;
+ ++r, target_byte += output_byte_row_stride) {
+ int accval = 0;
+ int pixel_byte_index = (r - centrepoint) * source_byte_row_stride +
+ c * input_channel_count + input_channel_index;
+ for (int i = 0; i < filter_length;
+ ++i, pixel_byte_index += source_byte_row_stride) {
+ accval += filter_values[i] * source_data[pixel_byte_index];
+ }
+
+ *target_byte = BringBackTo8(accval, absolute_values);
+ }
+
+ for (; r < image_size.height();
+ ++r, target_byte += output_byte_row_stride) {
+ int accval = 0;
+ int overlap_taps = image_size.height() - r + centrepoint;
+ int pixel_byte_index = (r - centrepoint) * source_byte_row_stride +
+ c * input_channel_count + input_channel_index;
+ int i = 0;
+ for (; i < overlap_taps - 1;
+ ++i, pixel_byte_index += source_byte_row_stride) {
+ accval += filter_values[i] * source_data[pixel_byte_index];
+ }
+
+ for (; i < filter_length; ++i)
+ accval += filter_values[i] * source_data[pixel_byte_index];
+
+ *target_byte = BringBackTo8(accval, absolute_values);
+ }
+ }
+}
+
} // namespace skia
diff --git a/skia/ext/convolver.h b/skia/ext/convolver.h
index 3065338..6da703c 100644
--- a/skia/ext/convolver.h
+++ b/skia/ext/convolver.h
@@ -10,6 +10,7 @@
#include "base/basictypes.h"
#include "base/cpu.h"
+#include "third_party/skia/include/core/SkSize.h"
#include "third_party/skia/include/core/SkTypes.h"
// We can build SSE2 optimized versions for all x86 CPUs
@@ -99,13 +100,24 @@ class ConvolutionFilter1D {
int* filter_length) const {
const FilterInstance& filter = filters_[value_offset];
*filter_offset = filter.offset;
- *filter_length = filter.length;
- if (filter.length == 0) {
+ *filter_length = filter.trimmed_length;
+ if (filter.trimmed_length == 0) {
return NULL;
}
return &filter_values_[filter.data_location];
}
+ // Retrieves the filter for the offset 0, presumed to be the one and only.
+ // The offset and length of the filter values are put into the corresponding
+ // out arguments (see AddFilter). Note that |filter_legth| and
+ // |specified_filter_length| may be different if leading/trailing zeros of the
+ // original floating point form were clipped.
+ // There will be |filter_length| values in the return array.
+ // Returns NULL if the filter is 0-length (for instance when all floating
+ // point values passed to AddFilter were clipped to 0).
+ const Fixed* GetSingleFilter(int* specified_filter_length,
+ int* filter_offset,
+ int* filter_length) const;
inline void PaddingForSIMD() {
// Padding |padding_count| of more dummy coefficients after the coefficients
@@ -128,6 +140,11 @@ class ConvolutionFilter1D {
int offset;
// Number of values in this filter instance.
+ int trimmed_length;
+
+ // Filter length as specified. Note that this may be different from
+ // 'trimmed_length' if leading/trailing zeros of the original floating
+ // point form were clipped differently on each tail.
int length;
};
@@ -169,6 +186,39 @@ SK_API void BGRAConvolve2D(const unsigned char* source_data,
int output_byte_row_stride,
unsigned char* output,
bool use_simd_if_possible);
+
+// Does a 1D convolution of the given source image along the X dimension on
+// a single channel of the bitmap.
+//
+// The function uses the same convolution kernel for each pixel. That kernel
+// must be added to |filter| at offset 0. This is a most straightforward
+// implementation of convolution, intended chiefly for development purposes.
+SK_API void SingleChannelConvolveX1D(const unsigned char* source_data,
+ int source_byte_row_stride,
+ int input_channel_index,
+ int input_channel_count,
+ const ConvolutionFilter1D& filter,
+ const SkISize& image_size,
+ unsigned char* output,
+ int output_byte_row_stride,
+ int output_channel_index,
+ int output_channel_count,
+ bool absolute_values);
+
+// Does a 1D convolution of the given source image along the Y dimension on
+// a single channel of the bitmap.
+SK_API void SingleChannelConvolveY1D(const unsigned char* source_data,
+ int source_byte_row_stride,
+ int input_channel_index,
+ int input_channel_count,
+ const ConvolutionFilter1D& filter,
+ const SkISize& image_size,
+ unsigned char* output,
+ int output_byte_row_stride,
+ int output_channel_index,
+ int output_channel_count,
+ bool absolute_values);
+
} // namespace skia
#endif // SKIA_EXT_CONVOLVER_H_
diff --git a/skia/ext/convolver_unittest.cc b/skia/ext/convolver_unittest.cc
index 377ed8e..b877655 100644
--- a/skia/ext/convolver_unittest.cc
+++ b/skia/ext/convolver_unittest.cc
@@ -324,4 +324,153 @@ TEST(Convolver, SIMDVerification) {
}
}
+TEST(Convolver, SeparableSingleConvolution) {
+ static const int kImgWidth = 1024;
+ static const int kImgHeight = 1024;
+ static const int kChannelCount = 3;
+ static const int kStrideSlack = 22;
+ ConvolutionFilter1D filter;
+ const float box[5] = { 0.2f, 0.2f, 0.2f, 0.2f, 0.2f };
+ filter.AddFilter(0, box, 5);
+
+ // Allocate a source image and set to 0.
+ const int src_row_stride = kImgWidth * kChannelCount + kStrideSlack;
+ int src_byte_count = src_row_stride * kImgHeight;
+ std::vector<unsigned char> input;
+ const int signal_x = kImgWidth / 2;
+ const int signal_y = kImgHeight / 2;
+ input.resize(src_byte_count, 0);
+ // The image has a single impulse pixel in channel 1, smack in the middle.
+ const int non_zero_pixel_index =
+ signal_y * src_row_stride + signal_x * kChannelCount + 1;
+ input[non_zero_pixel_index] = 255;
+
+ // Destination will be a single channel image with stide matching width.
+ const int dest_row_stride = kImgWidth;
+ const int dest_byte_count = dest_row_stride * kImgHeight;
+ std::vector<unsigned char> output;
+ output.resize(dest_byte_count);
+
+ // Apply convolution in X.
+ SingleChannelConvolveX1D(&input[0], src_row_stride, 1, kChannelCount,
+ filter, SkISize::Make(kImgWidth, kImgHeight),
+ &output[0], dest_row_stride, 0, 1, false);
+ for (int x = signal_x - 2; x <= signal_x + 2; ++x)
+ EXPECT_GT(output[signal_y * dest_row_stride + x], 0);
+
+ EXPECT_EQ(output[signal_y * dest_row_stride + signal_x - 3], 0);
+ EXPECT_EQ(output[signal_y * dest_row_stride + signal_x + 3], 0);
+
+ // Apply convolution in Y.
+ SingleChannelConvolveY1D(&input[0], src_row_stride, 1, kChannelCount,
+ filter, SkISize::Make(kImgWidth, kImgHeight),
+ &output[0], dest_row_stride, 0, 1, false);
+ for (int y = signal_y - 2; y <= signal_y + 2; ++y)
+ EXPECT_GT(output[y * dest_row_stride + signal_x], 0);
+
+ EXPECT_EQ(output[(signal_y - 3) * dest_row_stride + signal_x], 0);
+ EXPECT_EQ(output[(signal_y + 3) * dest_row_stride + signal_x], 0);
+
+ EXPECT_EQ(output[signal_y * dest_row_stride + signal_x - 1], 0);
+ EXPECT_EQ(output[signal_y * dest_row_stride + signal_x + 1], 0);
+
+ // The main point of calling this is to invoke the routine on input without
+ // padding.
+ std::vector<unsigned char> output2;
+ output2.resize(dest_byte_count);
+ SingleChannelConvolveX1D(&output[0], dest_row_stride, 0, 1,
+ filter, SkISize::Make(kImgWidth, kImgHeight),
+ &output2[0], dest_row_stride, 0, 1, false);
+ // This should be a result of 2D convolution.
+ for (int x = signal_x - 2; x <= signal_x + 2; ++x) {
+ for (int y = signal_y - 2; y <= signal_y + 2; ++y)
+ EXPECT_GT(output2[y * dest_row_stride + x], 0);
+ }
+ EXPECT_EQ(output2[0], 0);
+ EXPECT_EQ(output2[dest_row_stride - 1], 0);
+ EXPECT_EQ(output2[dest_byte_count - 1], 0);
+}
+
+TEST(Convolver, SeparableSingleConvolutionEdges) {
+ // The purpose of this test is to check if the implementation treats correctly
+ // edges of the image.
+ static const int kImgWidth = 600;
+ static const int kImgHeight = 800;
+ static const int kChannelCount = 3;
+ static const int kStrideSlack = 22;
+ static const int kChannel = 1;
+ ConvolutionFilter1D filter;
+ const float box[5] = { 0.2f, 0.2f, 0.2f, 0.2f, 0.2f };
+ filter.AddFilter(0, box, 5);
+
+ // Allocate a source image and set to 0.
+ int src_row_stride = kImgWidth * kChannelCount + kStrideSlack;
+ int src_byte_count = src_row_stride * kImgHeight;
+ std::vector<unsigned char> input(src_byte_count);
+
+ // Draw a frame around the image.
+ for (int i = 0; i < src_byte_count; ++i) {
+ int row = i / src_row_stride;
+ int col = i % src_row_stride / kChannelCount;
+ int channel = i % src_row_stride % kChannelCount;
+ if (channel != kChannel || col > kImgWidth) {
+ input[i] = 255;
+ } else if (row == 0 || col == 0 ||
+ col == kImgWidth - 1 || row == kImgHeight - 1) {
+ input[i] = 100;
+ } else if (row == 1 || col == 1 ||
+ col == kImgWidth - 2 || row == kImgHeight - 2) {
+ input[i] = 200;
+ } else {
+ input[i] = 0;
+ }
+ }
+
+ // Destination will be a single channel image with stide matching width.
+ int dest_row_stride = kImgWidth;
+ int dest_byte_count = dest_row_stride * kImgHeight;
+ std::vector<unsigned char> output;
+ output.resize(dest_byte_count);
+
+ // Apply convolution in X.
+ SingleChannelConvolveX1D(&input[0], src_row_stride, 1, kChannelCount,
+ filter, SkISize::Make(kImgWidth, kImgHeight),
+ &output[0], dest_row_stride, 0, 1, false);
+
+ // Sadly, comparison is not as simple as retaining all values.
+ int invalid_values = 0;
+ const unsigned char first_value = output[0];
+ EXPECT_TRUE(std::abs(100 - first_value) <= 1);
+ for (int i = 0; i < dest_row_stride; ++i) {
+ if (output[i] != first_value)
+ ++invalid_values;
+ }
+ EXPECT_EQ(0, invalid_values);
+
+ int test_row = 22;
+ EXPECT_NEAR(output[test_row * dest_row_stride], 100, 1);
+ EXPECT_NEAR(output[test_row * dest_row_stride + 1], 80, 1);
+ EXPECT_NEAR(output[test_row * dest_row_stride + 2], 60, 1);
+ EXPECT_NEAR(output[test_row * dest_row_stride + 3], 40, 1);
+ EXPECT_NEAR(output[(test_row + 1) * dest_row_stride - 1], 100, 1);
+ EXPECT_NEAR(output[(test_row + 1) * dest_row_stride - 2], 80, 1);
+ EXPECT_NEAR(output[(test_row + 1) * dest_row_stride - 3], 60, 1);
+ EXPECT_NEAR(output[(test_row + 1) * dest_row_stride - 4], 40, 1);
+
+ SingleChannelConvolveY1D(&input[0], src_row_stride, 1, kChannelCount,
+ filter, SkISize::Make(kImgWidth, kImgHeight),
+ &output[0], dest_row_stride, 0, 1, false);
+
+ int test_column = 42;
+ EXPECT_NEAR(output[test_column], 100, 1);
+ EXPECT_NEAR(output[test_column + dest_row_stride], 80, 1);
+ EXPECT_NEAR(output[test_column + dest_row_stride * 2], 60, 1);
+ EXPECT_NEAR(output[test_column + dest_row_stride * 3], 40, 1);
+
+ EXPECT_NEAR(output[test_column + dest_row_stride * (kImgHeight - 1)], 100, 1);
+ EXPECT_NEAR(output[test_column + dest_row_stride * (kImgHeight - 2)], 80, 1);
+ EXPECT_NEAR(output[test_column + dest_row_stride * (kImgHeight - 3)], 60, 1);
+ EXPECT_NEAR(output[test_column + dest_row_stride * (kImgHeight - 4)], 40, 1);
+}
+
} // namespace skia