Decent speed improvement for HybridBinarizer::calculateBlackPoints(). The

approach is to stop testing for min/max values once the dynamic range
condition is satisfied.

On "typical" images, the function is about 20% faster.  Worst-case images
(low contrast images) can be slower but they are less likely to be read
successfully.

This change makes no change to test results, only performance.



git-svn-id: https://zxing.googlecode.com/svn/trunk@2174 59b500cc-1b3d-0410-9834-0bbf25fbcc57
This commit is contained in:
evansepdx@gmail.com 2012-02-10 01:43:47 +00:00
parent 9d714829c5
commit a3f11f293c

View file

@ -156,8 +156,11 @@ namespace {
}
}
int* HybridBinarizer::calculateBlackPoints(unsigned char* luminances, int subWidth, int subHeight,
int width, int height) {
const int minDynamicRange = 24;
int *blackPoints = new int[subHeight * subWidth];
for (int y = 0; y < subHeight; y++) {
int yoffset = y << BLOCK_SIZE_POWER;
@ -178,6 +181,7 @@ int* HybridBinarizer::calculateBlackPoints(unsigned char* luminances, int subWid
for (int xx = 0; xx < BLOCK_SIZE; xx++) {
int pixel = luminances[offset + xx] & 0xFF;
sum += pixel;
// still looking for good contrast
if (pixel < min) {
min = pixel;
}
@ -185,12 +189,22 @@ int* HybridBinarizer::calculateBlackPoints(unsigned char* luminances, int subWid
max = pixel;
}
}
// short-circuit min/max tests once dynamic range is met
if (max - min > minDynamicRange) {
// finish the rest of the rows quickly
for (yy++, offset += width; yy < BLOCK_SIZE; yy++, offset += width) {
for (int xx = 0; xx < BLOCK_SIZE; xx += 2) {
sum += luminances[offset + xx] & 0xFF;
sum += luminances[offset + xx + 1] & 0xFF;
}
}
}
}
// See
// http://groups.google.com/group/zxing/browse_thread/thread/d06efa2c35a7ddc0
int average = sum >> 6;
if (max - min <= 24) {
int average = sum >> (BLOCK_SIZE_POWER * 2);
if (max - min <= minDynamicRange) {
average = min >> 1;
if (y > 0 && x > 0) {
int bp = getBlackPointFromNeighbors(blackPoints, subWidth, x, y);