Refactored the MonochromeBitmapSource class hierarchy into LuminanceSource, Binarizer, and BinaryBitmap classes. The unit tests pass and I believe the change is complete, but there are some clients like J2ME and Bug that I can't build.

This change will allow new thresholding algorithms to be developed and tested, as well as implemented per platform if needed (e.g. in JNI on Android). It should also perform better when multiple 2D Readers are installed, because the underlying image will only be converted to 1 bit once. It also allows some platforms to return luminance data without a copy.

The current state of this checkin is to use the old black point algorithm, which now lives in GlobalHistogramBinarizer. This will be our benchmark and fallback for slower platforms. Going forward we will begin to use LocalBlockBinarizer as I shake out the edge cases and tune it for performance. Currently it is unused.

IMPORTANT: I have temporarily included three methods from MonochromeBitmapSource at the end of BinaryBitmap as a way to make this change in stages. They are deprecated and will be removed soon. All of the Reader classes will need to change the way they examine 1 bit pixels to use the new getBlackRow() and getBlackMatrix() calls.

git-svn-id: https://zxing.googlecode.com/svn/trunk@993 59b500cc-1b3d-0410-9834-0bbf25fbcc57
This commit is contained in:
dswitkin 2009-06-26 17:49:45 +00:00
parent 54d65e315d
commit 167197ec7a
53 changed files with 1639 additions and 1642 deletions

View file

@ -16,18 +16,22 @@
package com.google.zxing.client.android;
import com.google.zxing.BarcodeFormat;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.DecodeHintType;
import com.google.zxing.MultiFormatReader;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import com.google.zxing.common.GlobalHistogramBinarizer;
import android.content.SharedPreferences;
import android.graphics.Rect;
import android.os.Bundle;
import android.os.Handler;
import android.os.Looper;
import android.os.Message;
import android.preference.PreferenceManager;
import android.util.Log;
import com.google.zxing.BarcodeFormat;
import com.google.zxing.DecodeHintType;
import com.google.zxing.MultiFormatReader;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import java.util.Hashtable;
import java.util.Vector;
@ -159,10 +163,12 @@ final class DecodeThread extends Thread {
long start = System.currentTimeMillis();
boolean success;
Result rawResult = null;
YUVMonochromeBitmapSource source = new YUVMonochromeBitmapSource(data, width, height,
CameraManager.get().getFramingRect());
Rect rect = CameraManager.get().getFramingRect();
YUVLuminanceSource source = new YUVLuminanceSource(data, width, height, rect.left, rect.top,
rect.width(), rect.height());
BinaryBitmap bitmap = new BinaryBitmap(new GlobalHistogramBinarizer(source));
try {
rawResult = mMultiFormatReader.decodeWithState(source);
rawResult = mMultiFormatReader.decodeWithState(bitmap);
success = true;
} catch (ReaderException e) {
success = false;

View file

@ -0,0 +1,128 @@
/*
* Copyright 2009 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing.client.android;
import com.google.zxing.LuminanceSource;
import android.graphics.Bitmap;
/**
* This object extends LuminanceSource around an array of YUV data returned from the camera driver,
* with the option to crop to a rectangle within the full data. This can be used to exclude
* superfluous pixels around the perimeter and speed up decoding.
*
* @author dswitkin@google.com (Daniel Switkin)
*/
public final class YUVLuminanceSource extends LuminanceSource {
private final byte[] yuvData;
private final int dataWidth;
private final int dataHeight;
private final int left;
private final int top;
public YUVLuminanceSource(byte[] yuvData, int dataWidth, int dataHeight, int left, int top,
int width, int height) {
super(width, height);
if (left + width > dataWidth || top + height > dataHeight) {
throw new IllegalArgumentException("Crop rectangle does not fit within image data.");
}
this.yuvData = yuvData;
this.dataWidth = dataWidth;
this.dataHeight = dataHeight;
this.left = left;
this.top = top;
}
public byte[] getRow(int y, byte[] row) {
if (y < 0 || y >= getHeight()) {
throw new IllegalArgumentException("Requested row is outside the image: " + y);
}
int width = getWidth();
if (row == null || row.length < width) {
row = new byte[width];
}
int offset = (y + top) * dataWidth + left;
byte[] yuv = yuvData;
for (int x = 0; x < width; x++) {
row[x] = yuv[offset + x];
}
return row;
}
public byte[] getMatrix() {
int width = getWidth();
int height = getHeight();
// If the caller asks for the entire underlying image, save the copy and give them the
// original data. The docs specifically warn that result.length must be ignored.
if (width == dataWidth && height == dataHeight) {
return yuvData;
}
int area = width * height;
byte[] matrix = new byte[area];
byte[] yuv = yuvData;
int inputOffset = top * dataWidth + left;
for (int y = 0; y < height; y++) {
int outputOffset = y * width;
for (int x = 0; x < width; x++) {
// TODO: Compare performance with using System.arraycopy().
matrix[outputOffset + x] = yuv[inputOffset + x];
}
inputOffset += dataWidth;
}
return matrix;
}
public boolean isCropSupported() {
return true;
}
public LuminanceSource crop(int left, int top, int width, int height) {
return new YUVLuminanceSource(yuvData, dataWidth, dataHeight, left, top, width, height);
}
/**
* Creates a greyscale Android Bitmap from the YUV data based on the crop rectangle.
*
* @return An 8888 bitmap.
*/
public Bitmap renderToBitmap() {
int width = getWidth();
int height = getHeight();
int[] pixels = new int[width * height];
byte[] yuv = yuvData;
int inputOffset = top * dataWidth + left;
for (int y = 0; y < height; y++) {
int outputOffset = y * width;
for (int x = 0; x < width; x++) {
int grey = yuv[inputOffset + x] & 0xff;
pixels[outputOffset + x] = (0xff << 24) | (grey << 16) | (grey << 8) | grey;
}
inputOffset += dataWidth;
}
Bitmap bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
bitmap.setPixels(pixels, 0, width, 0, 0, width, height);
return bitmap;
}
}

View file

@ -1,156 +0,0 @@
/*
* Copyright (C) 2008 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing.client.android;
import android.graphics.Bitmap;
import android.graphics.Rect;
import com.google.zxing.common.BaseMonochromeBitmapSource;
/**
* This object implements MonochromeBitmapSource around an array of YUV data, giving you the option
* to crop to a rectangle within the full data. This can be used to exclude superfluous pixels
* around the perimeter and speed up decoding.
*
* @author Sean Owen
* @author Daniel Switkin
*/
public final class YUVMonochromeBitmapSource extends BaseMonochromeBitmapSource {
private final byte[] mYUVData;
private final int mDataWidth;
private final int mCropTop;
private final int mCropLeft;
/**
* Builds an object around a YUV buffer from the camera. The image is not cropped.
*
* @param yuvData A byte array of planar Y data, followed by interleaved U and V
* @param dataWidth The width of the Y data
* @param dataHeight The height of the Y data
*/
public YUVMonochromeBitmapSource(byte[] yuvData, int dataWidth, int dataHeight) {
this(yuvData, dataWidth, dataHeight, 0, 0, dataHeight, dataWidth);
}
/**
* Builds an object around a YUV buffer from the camera. THe image is cropped and only
* that part of the image is evaluated.
*
* @param yuvData A byte array of planar Y data, followed by interleaved U and V
* @param dataWidth The width of the Y data
* @param dataHeight The height of the Y data
* @param crop The rectangle within the yuvData to expose to MonochromeBitmapSource users
*/
public YUVMonochromeBitmapSource(byte[] yuvData, int dataWidth, int dataHeight, Rect crop) {
this(yuvData, dataWidth, dataHeight, crop.top, crop.left, crop.bottom, crop.right);
}
/**
* Builds an object around a YUV buffer from the camera. The image is cropped and only
* that part of the image is evaluated.
*
* @param yuvData A byte array of planar Y data, followed by interleaved U and V
* @param dataWidth The width of the Y data
* @param dataHeight The height of the Y data
* @param cropTop Top coordinate of rectangle to crop
* @param cropLeft Left coordinate of rectangle to crop
* @param cropBottom Bottom coordinate of rectangle to crop
* @param cropRight Right coordinate of rectangle to crop
*/
public YUVMonochromeBitmapSource(byte[] yuvData,
int dataWidth,
int dataHeight,
int cropTop,
int cropLeft,
int cropBottom,
int cropRight) {
super(cropRight - cropLeft, cropBottom - cropTop);
if (cropRight - cropLeft > dataWidth || cropBottom - cropTop > dataHeight) {
throw new IllegalArgumentException();
}
mYUVData = yuvData;
mDataWidth = dataWidth;
this.mCropTop = cropTop;
this.mCropLeft = cropLeft;
}
/**
* The Y channel is stored as planar data at the head of the array, so we just ignore the
* interleavd U and V which follow it.
*
* @param x The x coordinate to fetch within crop
* @param y The y coordinate to fetch within crop
* @return The luminance as an int, from 0-255
*/
@Override
public int getLuminance(int x, int y) {
return mYUVData[(y + mCropTop) * mDataWidth + x + mCropLeft] & 0xff;
}
@Override
public int[] getLuminanceRow(int y, int[] row) {
int width = getWidth();
if (row == null || row.length < width) {
row = new int[width];
}
int offset = (y + mCropTop) * mDataWidth + mCropLeft;
byte[] yuvData = mYUVData;
for (int x = 0; x < width; x++) {
row[x] = yuvData[offset + x] & 0xff;
}
return row;
}
@Override
public int[] getLuminanceColumn(int x, int[] column) {
int height = getHeight();
if (column == null || column.length < height) {
column = new int[height];
}
int dataWidth = mDataWidth;
int offset = mCropTop * dataWidth + mCropLeft + x;
byte[] yuvData = mYUVData;
for (int y = 0; y < height; y++) {
column[y] = yuvData[offset] & 0xff;
offset += dataWidth;
}
return column;
}
/**
* Create a greyscale Android Bitmap from the YUV data based on the crop rectangle.
*
* @return An 8888 bitmap.
*/
public Bitmap renderToBitmap() {
int width = getWidth();
int height = getHeight();
int[] pixels = new int[width * height];
byte[] yuvData = mYUVData;
for (int y = 0, base = mCropTop * mDataWidth + mCropLeft; y < height; y++, base += mDataWidth) {
for (int x = 0; x < width; x++) {
int grey = yuvData[base + x] & 0xff;
pixels[y * width + x] = (0xff << 24) | (grey << 16) | (grey << 8) | grey;
}
}
Bitmap bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
bitmap.setPixels(pixels, 0, width, 0, 0, width, height);
return bitmap;
}
}

View file

@ -16,17 +16,20 @@
package com.google.zxing.client.androidtest;
import android.os.Debug;
import android.os.Message;
import android.util.Log;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.MultiFormatReader;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import com.google.zxing.common.GlobalHistogramBinarizer;
import android.os.Debug;
import android.os.Message;
import android.util.Log;
import java.io.File;
import java.io.FileNotFoundException;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
final class BenchmarkThread extends Thread {
@ -75,9 +78,9 @@ final class BenchmarkThread extends Thread {
}
private BenchmarkItem decode(String path) {
RGBMonochromeBitmapSource source;
RGBLuminanceSource source;
try {
source = new RGBMonochromeBitmapSource(path);
source = new RGBLuminanceSource(path);
} catch (FileNotFoundException e) {
Log.e(TAG, e.toString());
return null;
@ -91,7 +94,8 @@ final class BenchmarkThread extends Thread {
// scheduling and what else is happening in the system.
long now = Debug.threadCpuTimeNanos();
try {
result = mMultiFormatReader.decodeWithState(source);
BinaryBitmap bitmap = new BinaryBitmap(new GlobalHistogramBinarizer(source));
result = mMultiFormatReader.decodeWithState(bitmap);
success = true;
} catch (ReaderException e) {
success = false;

View file

@ -1,5 +1,5 @@
/*
* Copyright (C) 2008 Google Inc.
* Copyright 2009 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,20 +16,75 @@
package com.google.zxing.client.androidtest;
import com.google.zxing.LuminanceSource;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import com.google.zxing.common.BaseMonochromeBitmapSource;
import java.io.FileNotFoundException;
public final class RGBMonochromeBitmapSource extends BaseMonochromeBitmapSource {
/**
* This class is used to help decode images from files which arrive as RGB data from
* Android bitmaps. It does not support cropping or rotation.
*
* @author dswitkin@google.com (Daniel Switkin)
*/
public final class RGBLuminanceSource extends LuminanceSource {
private final byte[] mLuminances;
private final byte[] luminances;
public RGBMonochromeBitmapSource(String path) throws FileNotFoundException {
public RGBLuminanceSource(String path) throws FileNotFoundException {
this(loadBitmap(path));
}
public RGBLuminanceSource(Bitmap bitmap) {
super(bitmap.getWidth(), bitmap.getHeight());
int width = bitmap.getWidth();
int height = bitmap.getHeight();
int[] pixels = new int[width * height];
bitmap.getPixels(pixels, 0, width, 0, 0, width, height);
// In order to measure pure decoding speed, we convert the entire image to a greyscale array
// up front, which is the same as the Y channel of the YUVLuminanceSource in the real app.
luminances = new byte[width * height];
for (int y = 0; y < height; y++) {
int offset = y * height;
for (int x = 0; x < width; x++) {
int pixel = pixels[offset + x];
int r = (pixel >> 16) & 0xff;
int g = (pixel >> 8) & 0xff;
int b = pixel & 0xff;
if (r == g && g == b) {
// Image is already greyscale, so pick any channel.
luminances[offset + x] = (byte) r;
} else {
// Calculate luminance cheaply, favoring green.
luminances[offset + x] = (byte) ((r + g + g + b) >> 2);
}
}
}
}
public byte[] getRow(int y, byte[] row) {
if (y < 0 || y >= getHeight()) {
throw new IllegalArgumentException("Requested row is outside the image: " + y);
}
int width = getWidth();
if (row == null || row.length < width) {
row = new byte[width];
}
System.arraycopy(luminances, y * width, row, 0, width);
return row;
}
// Since this class does not support cropping, the underlying byte array already contains
// exactly what the caller is asking for, so give it to them without a copy.
public byte[] getMatrix() {
return luminances;
}
private static Bitmap loadBitmap(String path) throws FileNotFoundException {
Bitmap bitmap = BitmapFactory.decodeFile(path);
if (bitmap == null) {
@ -38,65 +93,4 @@ public final class RGBMonochromeBitmapSource extends BaseMonochromeBitmapSource
return bitmap;
}
public RGBMonochromeBitmapSource(Bitmap bitmap) {
super(bitmap.getWidth(), bitmap.getHeight());
int width = bitmap.getWidth();
int height = bitmap.getHeight();
int[] pixels = new int[width * height];
bitmap.getPixels(pixels, 0, width, 0, 0, width, height);
// In order to measure pure decoding speed, we convert the entire image to a greyscale array up
// front, which is the same as the Y channel of the YUVMonochromeBitmapSource in the real app.
mLuminances = new byte[width * height];
for (int y = 0; y < height; y++) {
int offset = y * height;
for (int x = 0; x < width; x++) {
int pixel = pixels[offset + x];
int r = (pixel >> 16) & 0xff;
int g = (pixel >> 8) & 0xff;
int b = pixel & 0xff;
if (r == g && g == b) {
// Image is already greyscale, so pick any channel
mLuminances[offset + x] = (byte) r;
} else {
// Calculate luminance cheaply, favoring green
mLuminances[offset + x] = (byte) ((r + g + g + b) >> 2);
}
}
}
}
@Override
public int getLuminance(int x, int y) {
return mLuminances[y * getWidth() + x] & 0xff;
}
@Override
public int[] getLuminanceRow(int y, int[] row) {
int width = getWidth();
if (row == null || row.length < width) {
row = new int[width];
}
int offset = y * width;
for (int x = 0; x < width; x++) {
row[x] = mLuminances[offset + x] & 0xff;
}
return row;
}
@Override
public int[] getLuminanceColumn(int x, int[] column) {
int width = getWidth();
int height = getHeight();
if (column == null || column.length < height) {
column = new int[height];
}
int offset = x;
for (int y = 0; y < height; y++) {
column[y] = mLuminances[offset] & 0xff;
offset += width;
}
return column;
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright 2008 ZXing authors
* Copyright 2009 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,30 +16,32 @@
package com.google.zxing.client.bug;
import com.google.zxing.LuminanceSource;
import com.google.zxing.ReaderException;
import com.google.zxing.common.BaseMonochromeBitmapSource;
import java.awt.Image;
import java.awt.image.PixelGrabber;
/**
* <p>An implementation based on AWT's {@link Image} representation.
* This can be used on CDC devices or other devices that do not have access to the
* Mobile Information Device Profile and thus do not have access to
* javax.microedition.lcdui.Image.</p>
* An implementation based on AWT's Image representation. This can be used on CDC devices
* or other devices that do not have access to the Mobile Information Device Profile
* and thus do not have access to javax.microedition.lcdui.Image.
*
* @author dswitkin@google.com (Daniel Switkin)
* @author David Albert
* @author Sean Owen
*/
public final class AWTImageMonochromeBitmapSource extends BaseMonochromeBitmapSource {
public final class AWTImageLuminanceSource extends LuminanceSource {
private final int[] pixels;
public AWTImageMonochromeBitmapSource(Image image) throws ReaderException {
public AWTImageLuminanceSource(Image image) throws ReaderException {
super(image.getWidth(null), image.getHeight(null));
int height = getHeight();
int width = getWidth();
pixels = new int[height * width];
int height = getHeight();
pixels = new int[width * height];
// Seems best in this situation to grab all pixels upfront. Grabbing any individual pixel
// entails creating a relatively expensive object and calling through several methods.
PixelGrabber grabber = new PixelGrabber(image, 0, 0, width, height, pixels, 0, width);
@ -50,47 +52,43 @@ public final class AWTImageMonochromeBitmapSource extends BaseMonochromeBitmapSo
}
}
/**
* See <code>com.google.zxing.client.j2me.LCDUIImageMonochromeBitmapSource</code> for more explanation
* of the computation used in this method.
*/
public int getLuminance(int x, int y) {
int pixel = pixels[y * getWidth() + x];
return (((pixel & 0x00FF0000) >> 16) +
((pixel & 0x0000FF00) >> 7) +
(pixel & 0x000000FF )) >> 2;
}
public int[] getLuminanceRow(int y, int[] row) {
public byte[] getRow(int y, byte[] row) {
if (y < 0 || y >= getHeight()) {
throw new IllegalArgumentException("Requested row is outside the image: " + y);
}
int width = getWidth();
if (row == null || row.length < width) {
row = new int[width];
row = new byte[width];
}
int offset = y * width;
for (int x = 0; x < width; x++) {
int pixel = pixels[offset + x];
row[x] = (((pixel & 0x00FF0000) >> 16) +
((pixel & 0x0000FF00) >> 7) +
(pixel & 0x000000FF )) >> 2;
int luminance = (((pixel & 0x00FF0000) >> 16) +
((pixel & 0x0000FF00) >> 7) +
(pixel & 0x000000FF )) >> 2;
row[x] = (byte) luminance;
}
return row;
}
public int[] getLuminanceColumn(int x, int[] column) {
int height = getHeight();
public byte[] getMatrix() {
int width = getWidth();
if (column == null || column.length < height) {
column = new int[height];
}
int offset = x;
int height = getHeight();
int area = width * height;
byte[] matrix = new byte[area];
for (int y = 0; y < height; y++) {
int pixel = pixels[offset];
column[y] = (((pixel & 0x00FF0000) >> 16) +
((pixel & 0x0000FF00) >> 7) +
(pixel & 0x000000FF )) >> 2;
offset += width;
int offset = y * width;
for (int x = 0; x < width; x++) {
int pixel = pixels[offset + x];
int luminance = (((pixel & 0x00FF0000) >> 16) +
((pixel & 0x0000FF00) >> 7) +
(pixel & 0x000000FF )) >> 2;
matrix[x] = (byte) luminance;
}
}
return column;
return matrix;
}
}

View file

@ -16,25 +16,23 @@
package com.google.zxing.client.bug.app;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.LuminanceSource;
import com.google.zxing.MultiFormatReader;
import com.google.zxing.Reader;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import com.google.zxing.client.bug.AWTImageLuminanceSource;
import com.google.zxing.client.bug.ImageCanvas;
import com.google.zxing.common.GlobalHistogramBinarizer;
import com.buglabs.bug.module.camera.pub.ICameraDevice;
import com.buglabs.bug.module.camera.pub.ICameraModuleControl;
import com.buglabs.device.ButtonEvent;
import com.buglabs.device.IButtonEventListener;
import com.buglabs.device.IButtonEventProvider;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.MultiFormatReader;
import com.google.zxing.Reader;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import com.google.zxing.client.bug.AWTImageMonochromeBitmapSource;
import com.google.zxing.client.bug.ImageCanvas;
import java.awt.BorderLayout;
import java.awt.Color;
import java.awt.Frame;
import java.awt.Image;
import java.awt.Label;
import java.awt.Toolkit;
import java.awt.*;
import java.awt.image.ImageObserver;
import java.io.IOException;
@ -43,80 +41,82 @@ import java.io.IOException;
*/
public final class BugBarcodeApp implements IButtonEventListener, ImageObserver {
private final ICameraDevice camera;
private final ICameraDevice camera;
private final ICameraModuleControl cameraControl;
private final Frame frame;
private Image image;
private ImageCanvas imageCanvas;
private Label barcodeLabel;
private boolean pictureTaken;
private final Reader reader;
private final Frame frame;
private Image image;
private ImageCanvas imageCanvas;
private Label barcodeLabel;
private boolean pictureTaken;
private final Reader reader;
public BugBarcodeApp(Frame frame,
ICameraDevice camera,
ICameraModuleControl cameraControl,
IButtonEventProvider buttonProvider) {
this.frame = frame;
this.camera = camera;
this.reader = new MultiFormatReader();
this.cameraControl = cameraControl;
pictureTaken = false;
buttonProvider.addListener(this);
createUI();
}
private void createUI() {
frame.setTitle("BugBarcode");
frame.setBackground(Color.WHITE);
frame.setLayout(new BorderLayout());
barcodeLabel = new Label("Take a picture of a barcode!", Label.CENTER);
frame.add(barcodeLabel, BorderLayout.SOUTH);
imageCanvas = new ImageCanvas(null);
frame.setVisible(true);
}
private void shoot() throws IOException {
// get image from camera for use with physical bug
cameraControl.setLEDFlash(true);
image = Toolkit.getDefaultToolkit().createImage(camera.getImage()).getScaledInstance(400, 300, Image.SCALE_FAST);
cameraControl.setLEDFlash(false);
if (Toolkit.getDefaultToolkit().prepareImage(image, -1, -1, this)) {
drawAndScan();
}
}
private void drawAndScan() {
imageCanvas.setImage(image.getScaledInstance(216, 150, Image.SCALE_FAST));
if (!pictureTaken) {
frame.add(imageCanvas, BorderLayout.CENTER);
pictureTaken = true;
frame.setVisible(true);
}
imageCanvas.repaint();
try {
MonochromeBitmapSource source = new AWTImageMonochromeBitmapSource(image);
Result result = reader.decode(source);
barcodeLabel.setText(result.getText());
} catch (ReaderException re) {
barcodeLabel.setText("I can't find a barcode here");
}
}
public void buttonEvent(ButtonEvent event) {
if (event.getButton() == ButtonEvent.BUTTON_HOTKEY_1 && event.getAction() == 0) {
ICameraDevice camera,
ICameraModuleControl cameraControl,
IButtonEventProvider buttonProvider) {
this.frame = frame;
this.camera = camera;
this.reader = new MultiFormatReader();
this.cameraControl = cameraControl;
pictureTaken = false;
buttonProvider.addListener(this);
createUI();
}
private void createUI() {
frame.setTitle("BugBarcode");
frame.setBackground(Color.WHITE);
frame.setLayout(new BorderLayout());
barcodeLabel = new Label("Take a picture of a barcode!", Label.CENTER);
frame.add(barcodeLabel, BorderLayout.SOUTH);
imageCanvas = new ImageCanvas(null);
frame.setVisible(true);
}
private void shoot() throws IOException {
// get image from camera for use with physical bug
cameraControl.setLEDFlash(true);
image = Toolkit.getDefaultToolkit().createImage(camera.getImage()).getScaledInstance(400, 300,
Image.SCALE_FAST);
cameraControl.setLEDFlash(false);
if (Toolkit.getDefaultToolkit().prepareImage(image, -1, -1, this)) {
drawAndScan();
}
}
private void drawAndScan() {
imageCanvas.setImage(image.getScaledInstance(216, 150, Image.SCALE_FAST));
if (!pictureTaken) {
frame.add(imageCanvas, BorderLayout.CENTER);
pictureTaken = true;
frame.setVisible(true);
}
imageCanvas.repaint();
try {
LuminanceSource source = new AWTImageLuminanceSource(image);
BinaryBitmap bitmap = new BinaryBitmap(new GlobalHistogramBinarizer(source));
Result result = reader.decode(bitmap);
barcodeLabel.setText(result.getText());
} catch (ReaderException re) {
barcodeLabel.setText("I can't find a barcode here");
}
}
public void buttonEvent(ButtonEvent event) {
if (event.getButton() == ButtonEvent.BUTTON_HOTKEY_1 && event.getAction() == 0) {
try {
shoot();
} catch (IOException ioe) {
// continue
}
}
}
}
public boolean imageUpdate(Image img, int infoflags, int x, int y, int width, int height) {
if ((infoflags & ALLBITS) != 0) {
public boolean imageUpdate(Image img, int infoflags, int x, int y, int width, int height) {
if ((infoflags & ALLBITS) != 0) {
drawAndScan();
return false;
}
return true;
}
}
return true;
}
}

View file

@ -0,0 +1,80 @@
/*
* Copyright 2009 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing;
import com.google.zxing.common.BitArray;
import com.google.zxing.common.BitMatrix;
/**
* This class hierarchy provides a set of methods to convert luminance data to 1 bit data.
* It allows the algorithm to vary polymorphically, for example allowing a very expensive
* thresholding technique for servers and a fast one for mobile. It also permits the implementation
* to vary, e.g. a JNI version for Android and a Java fallback version for other platforms.
*
* @author dswitkin@google.com (Daniel Switkin)
*/
public abstract class Binarizer {
private final LuminanceSource source;
public Binarizer(LuminanceSource source) {
if (source == null) {
throw new IllegalArgumentException("Source must be non-null.");
}
this.source = source;
}
public LuminanceSource getLuminanceSource() {
return source;
}
/**
* Converts one row of luminance data to 1 bit data. May actually do the conversion, or return
* cached data. Callers should assume this method is expensive and call it as seldom as possible.
* This method is intended for decoding 1D barcodes and may choose to apply sharpening.
* For callers which only examine one row of pixels at a time, the same BitArray should be reused
* and passed in with each call for performance. However it is legal to keep more than one row
* at a time if needed.
*
* @param y The row to fetch, 0 <= y < bitmap height.
* @param row An optional preallocated array. If null or too small, it will be ignored.
* If used, the Binarizer will call BitArray.clear(). Always use the returned object.
* @return The array of bits for this row (true means black).
*/
public abstract BitArray getBlackRow(int y, BitArray row) throws ReaderException;
/**
* Converts a 2D array of luminance data to 1 bit data. As above, assume this method is expensive
* and do not call it repeatedly. This method is intended for decoding 2D barcodes and may or
* may not apply sharpening. Therefore, a row from this matrix may not be identical to one
* fetched using getBlackRow(), so don't mix and match between them.
*
* @return The 2D array of bits for the image (true means black).
*/
public abstract BitMatrix getBlackMatrix() throws ReaderException;
/**
* Creates a new object with the same type as this Binarizer implementation, but with pristine
* state. This is needed because Binarizer implementations may be stateful, e.g. keeping a cache
* of 1 bit data. See Effective Java for why we can't use Java's clone() method.
*
* @param source The LuminanceSource this Binarizer will operate on.
* @return A new concrete Binarizer implementation object.
*/
public abstract Binarizer createBinarizer(LuminanceSource source);
}

View file

@ -0,0 +1,189 @@
/*
* Copyright 2009 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing;
import com.google.zxing.common.BitArray;
import com.google.zxing.common.BitMatrix;
/**
* This class is the core bitmap class used by ZXing to represent 1 bit data. Reader objects
* accept a BinaryBitmap and attempt to decode it.
*
* @author dswitkin@google.com (Daniel Switkin)
*/
public final class BinaryBitmap {
private final Binarizer binarizer;
private BitMatrix matrix;
public BinaryBitmap(Binarizer binarizer) {
if (binarizer == null) {
throw new IllegalArgumentException("Binarizer must be non-null.");
}
this.binarizer = binarizer;
matrix = null;
}
/**
* @return The width of the bitmap.
*/
public int getWidth() {
return binarizer.getLuminanceSource().getWidth();
}
/**
* @return The height of the bitmap.
*/
public int getHeight() {
return binarizer.getLuminanceSource().getHeight();
}
/**
* Converts one row of luminance data to 1 bit data. May actually do the conversion, or return
* cached data. Callers should assume this method is expensive and call it as seldom as possible.
* This method is intended for decoding 1D barcodes and may choose to apply sharpening.
*
* @param y The row to fetch, 0 <= y < bitmap height.
* @param row An optional preallocated array. If null or too small, it will be ignored.
* If used, the Binarizer will call BitArray.clear(). Always use the returned object.
* @return The array of bits for this row (true means black).
*/
public BitArray getBlackRow(int y, BitArray row) throws ReaderException {
return binarizer.getBlackRow(y, row);
}
/**
* Converts a 2D array of luminance data to 1 bit. As above, assume this method is expensive
* and do not call it repeatedly. This method is intended for decoding 2D barcodes and may or
* may not apply sharpening. Therefore, a row from this matrix may not be identical to one
* fetched using getBlackRow(), so don't mix and match between them.
*
* @return The 2D array of bits for the image (true means black).
*/
public BitMatrix getBlackMatrix() throws ReaderException {
// The matrix is created on demand the first time it is requested, then cached. There are two
// reasons for this:
// 1. This work will never be done if the caller only installs 1D Reader objects.
// 2. This work will only be done once even if the caller installs multiple 2D Readers.
if (matrix == null) {
matrix = binarizer.getBlackMatrix();
}
return matrix;
}
/**
* @return Whether this bitmap can be cropped.
*/
public boolean isCropSupported() {
return binarizer.getLuminanceSource().isCropSupported();
}
/**
* Returns a new object with cropped image data. Implementations may keep a reference to the
* original data rather than a copy. Only callable if isCropSupported() is true.
*
* @param left The left coordinate, 0 <= left < getWidth().
* @param top The top coordinate, 0 <= top <= getHeight().
* @param width The width of the rectangle to crop.
* @param height The height of the rectangle to crop.
* @return A cropped version of this object.
*/
public BinaryBitmap crop(int left, int top, int width, int height) {
LuminanceSource newSource = binarizer.getLuminanceSource().crop(left, top, width, height);
return new BinaryBitmap(binarizer.createBinarizer(newSource));
}
/**
* @return Whether this bitmap supports counter-clockwise rotation.
*/
public boolean isRotateSupported() {
return binarizer.getLuminanceSource().isRotateSupported();
}
/**
* Returns a new object with rotated image data. Only callable if isRotateSupported() is true.
*
* @return A rotated version of this object.
*/
public BinaryBitmap rotateCounterClockwise() {
LuminanceSource newSource = binarizer.getLuminanceSource().rotateCounterClockwise();
return new BinaryBitmap(binarizer.createBinarizer(newSource));
}
// FIXME: REMOVE!
// These three methods are TEMPORARY and should be removed by the end of July 2009.
// They are only here so the transition from MonochromeBitmapSource to BinaryBitmap
// can be done in stages. We need to go through all the Reader objects and convert
// these calls to getBlackRow() and getBlackMatrix() at the top of this file.
//
// TIP: Replace calls to isBlack() with a single call to getBlackMatrix(), then call
// BitMatrix.get(x, y) per pixel.
public boolean isBlack(int x, int y) throws ReaderException {
if (matrix == null) {
matrix = binarizer.getBlackMatrix();
}
return matrix.get(x, y);
}
// FIXME: REMOVE!
//
// TIP: 2D Readers should replace uses of this method with a single call to getBlackMatrix(),
// then perform random access on that BitMatrix as needed. The version of getBlackRow() with
// two arguments is only meant for 1D Readers, which I've already converted.
public BitArray getBlackRow(int y, BitArray row, int startX, int getWidth)
throws ReaderException {
if (row == null || row.getSize() < getWidth) {
row = new BitArray(getWidth);
} else {
row.clear();
}
if (matrix == null) {
matrix = binarizer.getBlackMatrix();
}
for (int x = 0; x < getWidth; x++) {
if (matrix.get(startX + x, y)) {
row.set(x);
}
}
return row;
}
// FIXME: REMOVE!
//
// TIP: Replace calls to getBlackColumn() with a single call to getBlackMatrix(), then
// perform random access on that BitMatrix as needed.
public BitArray getBlackColumn(int x, BitArray column, int startY, int getHeight)
throws ReaderException {
if (column == null || column.getSize() < getHeight) {
column = new BitArray(getHeight);
} else {
column.clear();
}
if (matrix == null) {
matrix = binarizer.getBlackMatrix();
}
for (int y = 0; y < getHeight; y++) {
if (matrix.get(x, startY + y)) {
column.set(y);
}
}
return column;
}
}

View file

@ -1,40 +0,0 @@
/*
* Copyright 2007 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing;
/**
* <p>Enumerates different methods of sampling an imagine to estimate a black point.</p>
*
* @author Sean Owen
* @author dswitkin@google.com (Daniel Switkin)
*/
public final class BlackPointEstimationMethod {
/**
* Method probably most suitable for use with 2D barcdoe format.
*/
public static final BlackPointEstimationMethod TWO_D_SAMPLING = new BlackPointEstimationMethod();
/**
* Method probably most suitable for 1D barcode decoding, where one row at a time is sampled.
*/
public static final BlackPointEstimationMethod ROW_SAMPLING = new BlackPointEstimationMethod();
private BlackPointEstimationMethod() {
// do nothing
}
}

View file

@ -23,7 +23,7 @@ package com.google.zxing;
*
* @author Sean Owen
* @author dswitkin@google.com (Daniel Switkin)
* @see Reader#decode(MonochromeBitmapSource, java.util.Hashtable)
* @see Reader#decode(BinaryBitmap,java.util.Hashtable)
*/
public final class DecodeHintType {

View file

@ -0,0 +1,113 @@
/*
* Copyright 2009 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing;
/**
* The purpose of this class hierarchy is to abstract different bitmap implementations across
* platforms into a standard interface for requesting greyscale luminance values. The interface
* only provides immutable methods; therefore crop and rotation create copies. This is to ensure
* that one Reader does not modify the original luminance source and leave it in an unknown state
* for other Readers in the chain.
*
* @author dswitkin@google.com (Daniel Switkin)
*/
public abstract class LuminanceSource {
private final int width;
private final int height;
public LuminanceSource(int width, int height) {
this.width = width;
this.height = height;
}
/**
* Fetches one row of luminance data from the underlying platform's bitmap. Values range from
* 0 (black) to 255 (white). Because Java does not have an unsigned byte type, callers will have
* to bitwise and with 0xff for each value. It is preferrable for implementations of this method
* to only fetch this row rather than the whole image, since no 2D Readers may be installed and
* getMatrix() may never be called.
*
* @param y The row to fetch, 0 <= y < getHeight().
* @param row An optional preallocated array. If null or too small, it will be ignored.
* Always use the returned object, and ignore the .length of the array.
* @return An array containing the luminance data.
*/
public abstract byte[] getRow(int y, byte[] row);
/**
* Fetches luminance data for the underlying bitmap. Values should be fetched using:
* int luminance = array[y * width + x] & 0xff;
*
* @return A row-major 2D array of luminance values. Do not use result.length as it may be
* larger than width * height bytes on some platforms. Do not modify the contents
* of the result.
*/
public abstract byte[] getMatrix();
/**
* @return The width of the bitmap.
*/
public final int getWidth() {
return width;
}
/**
* @return The height of the bitmap.
*/
public final int getHeight() {
return height;
}
/**
* @return Whether this subclass supports cropping.
*/
public boolean isCropSupported() {
return false;
}
/**
* Returns a new object with cropped image data. Implementations may keep a reference to the
* original data rather than a copy. Only callable if isCropSupported() is true.
*
* @param left The left coordinate, 0 <= left < getWidth().
* @param top The top coordinate, 0 <= top <= getHeight().
* @param width The width of the rectangle to crop.
* @param height The height of the rectangle to crop.
* @return A cropped version of this object.
*/
public LuminanceSource crop(int left, int top, int width, int height) {
throw new RuntimeException("This luminance source does not support cropping.");
}
/**
* @return Whether this subclass supports counter-clockwise rotation.
*/
public boolean isRotateSupported() {
return false;
}
/**
* Returns a new object with rotated image data. Only callable if isRotateSupported() is true.
*
* @return A rotated version of this object.
*/
public LuminanceSource rotateCounterClockwise() {
throw new RuntimeException("This luminance source does not support rotation.");
}
}

View file

@ -1,139 +0,0 @@
/*
* Copyright 2007 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing;
import com.google.zxing.common.BitArray;
/**
* <p>Encapsulates a generic black-and-white bitmap -- a collection of pixels in two dimensions.
* This unifies many possible representations, like AWT's <code>BufferedImage</code>.</p>
*
* @author Sean Owen
* @author dswitkin@google.com (Daniel Switkin)
*/
public interface MonochromeBitmapSource {
/**
* @param x horizontal offset, from left, of the pixel
* @param y vertical offset, from top, of the pixel
* @return true iff the pixel at (x,y) is black
*/
boolean isBlack(int x, int y);
/**
* <p>Returns an entire row of black/white pixels as an array of bits, where "true" means "black".
* This is a sort of "bulk get" operation intended to enable efficient access in
* certain situations.</p>
*
* @param y vertical offset, from top, of the row of pixels
* @param row if not null, {@link BitArray} to write pixels into. If null, a new {@link BitArray}
* is allocated and returned.
* @param startX horizontal offset, from left, from which to start getting pixels
* @param getWidth number of pixels to get from the row
* @return {@link BitArray} representing the (subset of the) row of pixels. If row parameter
* was not null, it is returned.
*/
BitArray getBlackRow(int y, BitArray row, int startX, int getWidth);
/**
* Entirely analogous to {@link #getBlackRow(int, BitArray, int, int)} but gets a column.
*/
BitArray getBlackColumn(int x, BitArray column, int startY, int getHeight);
BitArray getBlackDiagonal(int x, int y, int dx, int dy, BitArray diagonal, int size);
/**
* @return height of underlying image
*/
int getHeight();
/**
* @return width of underlying image
*/
int getWidth();
/**
* <p>Estimates black point according to the given method, which is optionally parameterized by
* a single int argument. For {@link BlackPointEstimationMethod#ROW_SAMPLING}, this
* specifies the row to sample.</p>
*
* <p>The estimated value will be used in subsequent computations that rely on an estimated black
* point.</p>
*
* @param method black point estimation method
* @param argument method-specific argument
*/
void estimateBlackPoint(BlackPointEstimationMethod method, int argument) throws ReaderException;
/**
* @return {@link BlackPointEstimationMethod} representing last sampling method used
*/
BlackPointEstimationMethod getLastEstimationMethod();
/**
* <p>Optional operation which returns an implementation based on the same underlying
* image, but which behaves as if the underlying image had been rotated 90 degrees
* counterclockwise. This is useful in the context of 1D barcodes and the
* {@link DecodeHintType#TRY_HARDER} decode hint, and is only intended to be
* used in non-resource-constrained environments. Hence, implementations
* of this class which are only used in resource-constrained mobile environments
* don't have a need to implement this.</p>
*
* @throws IllegalArgumentException if not supported
*/
MonochromeBitmapSource rotateCounterClockwise();
/**
* @return true iff rotation is supported
* @see #rotateCounterClockwise()
*/
boolean isRotateSupported();
/**
* Retrieves the luminance at the pixel x,y in the bitmap. This method is only used for estimating
* the black point and implementing getBlackRow() - it is not meant for decoding, hence it is not
* part of MonochromeBitmapSource itself, and is protected.
*
* @param x The x coordinate in the image.
* @param y The y coordinate in the image.
* @return The luminance value between 0 and 255.
*/
int getLuminance(int x, int y);
/**
* This is the main mechanism for retrieving luminance data. It is dramatically more efficient
* than repeatedly calling getLuminance(). As above, this is not meant for decoders.
*
* @param y The row to fetch
* @param row The array to write luminance values into. It is <b>strongly</b> suggested that you
* allocate this yourself, making sure row.length >= getWidth(), and reuse the same
* array on subsequent calls for performance. If you pass null, you will be flogged,
* but then I will take pity on you and allocate a sufficient array internally.
* @return The array containing the luminance data. This is the same as row if it was usable.
*/
int[] getLuminanceRow(int y, int[] row);
/**
* The same as getLuminanceRow(), but for columns.
*
* @param x The column to fetch
* @param column The array to write luminance values into. See above.
* @return The array containing the luminance data.
*/
int[] getLuminanceColumn(int x, int[] column);
}

View file

@ -38,7 +38,7 @@ public final class MultiFormatReader implements Reader {
private Vector readers;
/**
* This version of decode honors the intent of Reader.decode(MonochromeBitmapSource) in that it
* This version of decode honors the intent of Reader.decode(BinaryBitmap) in that it
* passes null as a hint to the decoders. However, that makes it inefficient to call repeatedly.
* Use setHints() followed by decodeWithState() for continuous scan applications.
*
@ -46,7 +46,7 @@ public final class MultiFormatReader implements Reader {
* @return The contents of the image
* @throws ReaderException Any errors which occurred
*/
public Result decode(MonochromeBitmapSource image) throws ReaderException {
public Result decode(BinaryBitmap image) throws ReaderException {
setHints(null);
return decodeInternal(image);
}
@ -59,7 +59,7 @@ public final class MultiFormatReader implements Reader {
* @return The contents of the image
* @throws ReaderException Any errors which occurred
*/
public Result decode(MonochromeBitmapSource image, Hashtable hints) throws ReaderException {
public Result decode(BinaryBitmap image, Hashtable hints) throws ReaderException {
setHints(hints);
return decodeInternal(image);
}
@ -72,7 +72,7 @@ public final class MultiFormatReader implements Reader {
* @return The contents of the image
* @throws ReaderException Any errors which occurred
*/
public Result decodeWithState(MonochromeBitmapSource image) throws ReaderException {
public Result decodeWithState(BinaryBitmap image) throws ReaderException {
// Make sure to set up the default state so we don't crash
if (readers == null) {
setHints(null);
@ -101,8 +101,7 @@ public final class MultiFormatReader implements Reader {
formats.contains(BarcodeFormat.EAN_8) ||
formats.contains(BarcodeFormat.CODE_39) ||
formats.contains(BarcodeFormat.CODE_128) ||
formats.contains(BarcodeFormat.ITF) ||
formats.contains(BarcodeFormat.PDF417);
formats.contains(BarcodeFormat.ITF);
// Put 1D readers upfront in "normal" mode
if (addOneDReader && !tryHarder) {
readers.addElement(new MultiFormatOneDReader(hints));
@ -139,7 +138,7 @@ public final class MultiFormatReader implements Reader {
}
}
private Result decodeInternal(MonochromeBitmapSource image) throws ReaderException {
private Result decodeInternal(BinaryBitmap image) throws ReaderException {
int size = readers.size();
for (int i = 0; i < size; i++) {
Reader reader = (Reader) readers.elementAt(i);

View file

@ -39,19 +39,19 @@ public interface Reader {
* @return String which the barcode encodes
* @throws ReaderException if the barcode cannot be located or decoded for any reason
*/
Result decode(MonochromeBitmapSource image) throws ReaderException;
Result decode(BinaryBitmap image) throws ReaderException;
/**
* Locates and decodes a barcode in some format within an image. This method also accepts
* hints, each possibly associated to some data, which may help the implementation decode.
*
* @param image image of barcode to decode
* @param hints passed as a {@link Hashtable} from {@link DecodeHintType} to aribtrary data. The
* @param hints passed as a {@link java.util.Hashtable} from {@link com.google.zxing.DecodeHintType} to aribtrary data. The
* meaning of the data depends upon the hint type. The implementation may or may not do
* anything with these hints.
* @return String which the barcode encodes
* @throws ReaderException if the barcode cannot be located or decoded for any reason
*/
Result decode(MonochromeBitmapSource image, Hashtable hints) throws ReaderException;
Result decode(BinaryBitmap image, Hashtable hints) throws ReaderException;
}

View file

@ -1,205 +0,0 @@
/*
* Copyright 2008 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing.common;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.BlackPointEstimationMethod;
import com.google.zxing.ReaderException;
/**
* @author dswitkin@google.com (Daniel Switkin)
*/
public abstract class BaseMonochromeBitmapSource implements MonochromeBitmapSource {
private final int height;
private final int width;
private int blackPoint;
private BlackPointEstimationMethod lastMethod;
private int lastArgument;
private int[] luminances = null;
protected BaseMonochromeBitmapSource(int width, int height) {
this.height = height;
this.width = width;
blackPoint = 0x7F;
lastMethod = null;
lastArgument = 0;
}
private void initLuminances() {
if (luminances == null) {
int max = width > height ? width : height;
luminances = new int[max];
}
}
public boolean isBlack(int x, int y) {
return getLuminance(x, y) < blackPoint;
}
public BitArray getBlackRow(int y, BitArray row, int startX, int getWidth) {
if (row == null || row.getSize() < getWidth) {
row = new BitArray(getWidth);
} else {
row.clear();
}
// Reuse the same int array each time
initLuminances();
int[] localLuminances = getLuminanceRow(y, luminances);
// If the current decoder calculated the blackPoint based on one row, assume we're trying to
// decode a 1D barcode, and apply some sharpening.
if (lastMethod.equals(BlackPointEstimationMethod.ROW_SAMPLING)) {
int left = localLuminances[startX];
int center = localLuminances[startX + 1];
for (int x = 1; x < getWidth - 1; x++) {
int right = localLuminances[startX + x + 1];
// Simple -1 4 -1 box filter with a weight of 2
int luminance = ((center << 2) - left - right) >> 1;
if (luminance < blackPoint) {
row.set(x);
}
left = center;
center = right;
}
} else {
for (int x = 0; x < getWidth; x++) {
if (localLuminances[startX + x] < blackPoint) {
row.set(x);
}
}
}
return row;
}
public BitArray getBlackColumn(int x, BitArray column, int startY, int getHeight) {
if (column == null || column.getSize() < getHeight) {
column = new BitArray(getHeight);
} else {
column.clear();
}
// Reuse the same int array each time
initLuminances();
int[] localLuminances = getLuminanceColumn(x, luminances);
// We don't handle "row sampling" specially here
for (int y = 0; y < getHeight; y++) {
if (localLuminances[startY + y] < blackPoint) {
column.set(y);
}
}
return column;
}
public BitArray getBlackDiagonal(int x, int y, int dx, int dy, BitArray diagonal, int size) {
if (diagonal == null || diagonal.getSize() < size) {
diagonal = new BitArray(size);
} else {
diagonal.clear();
}
for (int i = 0; i < size; i++) {
if (isBlack(x, y)) {
diagonal.set(i);
}
x += dx;
y += dy;
}
return diagonal;
}
public void estimateBlackPoint(BlackPointEstimationMethod method, int argument)
throws ReaderException {
if (!method.equals(lastMethod) || argument != lastArgument) {
blackPoint = BlackPointEstimator.estimate(this, method, argument);
lastMethod = method;
lastArgument = argument;
}
}
public BlackPointEstimationMethod getLastEstimationMethod() {
return lastMethod;
}
public MonochromeBitmapSource rotateCounterClockwise() {
throw new IllegalArgumentException("Rotate not supported");
}
public boolean isRotateSupported() {
return false;
}
public final int getHeight() {
return height;
}
public final int getWidth() {
return width;
}
public String toString() {
StringBuffer result = new StringBuffer(height * (width + 1));
BitArray row = new BitArray(width);
for (int i = 0; i < height; i++) {
row = getBlackRow(i, row, 0, width);
for (int j = 0; j < width; j++) {
result.append(row.get(j) ? "X " : " ");
}
result.append('\n');
}
return result.toString();
}
// These methods below should not need to exist because they are defined in the interface that
// this abstract class implements. However this seems to cause problems on some Nokias.
// So we write these redundant declarations.
/**
* Retrieves the luminance at the pixel x,y in the bitmap. This method is only used for estimating
* the black point and implementing getBlackRow() - it is not meant for decoding, hence it is not
* part of MonochromeBitmapSource itself, and is protected.
*
* @param x The x coordinate in the image.
* @param y The y coordinate in the image.
* @return The luminance value between 0 and 255.
*/
public abstract int getLuminance(int x, int y);
/**
* This is the main mechanism for retrieving luminance data. It is dramatically more efficient
* than repeatedly calling getLuminance(). As above, this is not meant for decoders.
*
* @param y The row to fetch
* @param row The array to write luminance values into. It is <b>strongly</b> suggested that you
* allocate this yourself, making sure row.length >= getWidth(), and reuse the same
* array on subsequent calls for performance. If you pass null, you will be flogged,
* but then I will take pity on you and allocate a sufficient array internally.
* @return The array containing the luminance data. This is the same as row if it was usable.
*/
public abstract int[] getLuminanceRow(int y, int[] row);
/**
* The same as getLuminanceRow(), but for columns.
*
* @param x The column to fetch
* @param column The array to write luminance values into. See above.
* @return The array containing the luminance data.
*/
public abstract int[] getLuminanceColumn(int x, int[] column);
}

View file

@ -1,176 +0,0 @@
/*
* Copyright 2007 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing.common;
import com.google.zxing.BlackPointEstimationMethod;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.ReaderException;
/**
* <p>Encapsulates logic that estimates the optimal "black point", the luminance value
* which is the best line between "white" and "black" in a grayscale image.</p>
*
* <p>For an interesting discussion of this issue, see
* <a href="http://webdiis.unizar.es/~neira/12082/thresholding.pdf">this paper</a>.</p>
*
* NOTE: This class is not thread-safe.
*
* @author Sean Owen
* @author dswitkin@google.com (Daniel Switkin)
*/
public final class BlackPointEstimator {
private static final int LUMINANCE_BITS = 5;
private static final int LUMINANCE_SHIFT = 8 - LUMINANCE_BITS;
private static final int LUMINANCE_BUCKETS = 1 << LUMINANCE_BITS;
private static int[] luminances = null;
private static int[] histogram = null;
private BlackPointEstimator() {
}
private static void initArrays(int luminanceSize) {
if (luminances == null || luminances.length < luminanceSize) {
luminances = new int[luminanceSize];
}
if (histogram == null) {
histogram = new int[LUMINANCE_BUCKETS];
} else {
for (int x = 0; x < LUMINANCE_BUCKETS; x++) {
histogram[x] = 0;
}
}
}
/**
* Calculates the black point for the supplied bitmap.
*
* @param source The bitmap to analyze.
* @param method The pixel sampling technique to use.
* @param argument The row index in the case of ROW_SAMPLING, otherwise ignored.
* @return The black point as an integer 0-255.
* @throws ReaderException An exception thrown if the blackpoint cannot be determined.
*/
public static int estimate(MonochromeBitmapSource source, BlackPointEstimationMethod method,
int argument) throws ReaderException {
int width = source.getWidth();
int height = source.getHeight();
initArrays(width);
if (method.equals(BlackPointEstimationMethod.TWO_D_SAMPLING)) {
// We used to sample a diagonal in the 2D case, but it missed a lot of pixels, and it required
// n calls to getLuminance(). We had a net improvement of 63 blackbox tests decoded by
// sampling several rows from the middle of the image, using getLuminanceRow(). We read more
// pixels total, but with fewer function calls, and more continguous memory.
for (int y = 1; y < 5; y++) {
int row = height * y / 5;
int[] localLuminances = source.getLuminanceRow(row, luminances);
int right = width * 4 / 5;
for (int x = width / 5; x < right; x++) {
histogram[localLuminances[x] >> LUMINANCE_SHIFT]++;
}
}
} else if (method.equals(BlackPointEstimationMethod.ROW_SAMPLING)) {
if (argument < 0 || argument >= height) {
throw new IllegalArgumentException("Row is not within the image: " + argument);
}
int[] localLuminances = source.getLuminanceRow(argument, luminances);
for (int x = 0; x < width; x++) {
histogram[localLuminances[x] >> LUMINANCE_SHIFT]++;
}
} else {
throw new IllegalArgumentException("Unknown method");
}
return findBestValley(histogram) << LUMINANCE_SHIFT;
}
/**
* <p>Given an array of <em>counts</em> of luminance values (i.e. a histogram), this method
* decides which bucket of values corresponds to the black point -- which bucket contains the
* count of the brightest luminance values that should be considered "black".</p>
*
* @param buckets an array of <em>counts</em> of luminance values
* @return index within argument of bucket corresponding to brightest values which should be
* considered "black"
* @throws ReaderException if "black" and "white" appear to be very close in luminance
*/
public static int findBestValley(int[] buckets) throws ReaderException {
int numBuckets = buckets.length;
int maxBucketCount = 0;
// Find tallest peak in histogram
int firstPeak = 0;
int firstPeakSize = 0;
for (int i = 0; i < numBuckets; i++) {
if (buckets[i] > firstPeakSize) {
firstPeak = i;
firstPeakSize = buckets[i];
}
if (buckets[i] > maxBucketCount) {
maxBucketCount = buckets[i];
}
}
// Find second-tallest peak -- well, another peak that is tall and not
// so close to the first one
int secondPeak = 0;
int secondPeakScore = 0;
for (int i = 0; i < numBuckets; i++) {
int distanceToBiggest = i - firstPeak;
// Encourage more distant second peaks by multiplying by square of distance
int score = buckets[i] * distanceToBiggest * distanceToBiggest;
if (score > secondPeakScore) {
secondPeak = i;
secondPeakScore = score;
}
}
// Put firstPeak first
if (firstPeak > secondPeak) {
int temp = firstPeak;
firstPeak = secondPeak;
secondPeak = temp;
}
// Kind of arbitrary; if the two peaks are very close, then we figure there is so little
// dynamic range in the image, that discriminating black and white is too error-prone.
// Decoding the image/line is either pointless, or may in some cases lead to a false positive
// for 1D formats, which are relatively lenient.
// We arbitrarily say "close" is "<= 1/16 of the total histogram buckets apart"
if (secondPeak - firstPeak <= numBuckets >> 4) {
throw ReaderException.getInstance();
}
// Find a valley between them that is low and closer to the white peak
int bestValley = secondPeak - 1;
int bestValleyScore = -1;
for (int i = secondPeak - 1; i > firstPeak; i--) {
int fromFirst = i - firstPeak;
// Favor a "valley" that is not too close to either peak -- especially not the black peak --
// and that has a low value of course
int score = fromFirst * fromFirst * (secondPeak - i) * (maxBucketCount - buckets[i]);
if (score > bestValleyScore) {
bestValley = i;
bestValleyScore = score;
}
}
return bestValley;
}
}

View file

@ -1,114 +0,0 @@
/*
* Copyright 2009 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing.common;
import com.google.zxing.BlackPointEstimationMethod;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.ReaderException;
/**
* Encapulates a cropped region of another {@link MonochromeBitmapSource}.
*
* @author Sean Owen
*/
public final class CroppedMonochromeBitmapSource implements MonochromeBitmapSource {
private final MonochromeBitmapSource delegate;
private final int left;
private final int top;
private final int right;
private final int bottom;
/**
* Creates an instance that uses only a region of the given image as a source of pixels to decode.
*
* @param delegate image to decode a region of
* @param left x coordinate of leftmost pixels to decode
* @param top y coordinate of topmost pixels to decode
* @param right one more than the x coordinate of rightmost pixels to decode, i.e. we will decode
* pixels whose x coordinate is in [left,right)
* @param bottom likewise, one more than the y coordinate of the bottommost pixels to decode
*/
public CroppedMonochromeBitmapSource(MonochromeBitmapSource delegate,
int left, int top, int right, int bottom) {
this.delegate = delegate;
this.left = left;
this.top = top;
this.right = right;
this.bottom = bottom;
}
public boolean isBlack(int x, int y) {
return delegate.isBlack(left + x, top + y);
}
public BitArray getBlackRow(int y, BitArray row, int startX, int getWidth) {
return delegate.getBlackRow(top + y, row, left + startX, getWidth);
}
public BitArray getBlackColumn(int x, BitArray column, int startY, int getHeight) {
return delegate.getBlackColumn(left + x, column, top + startY, getHeight);
}
public int getHeight() {
return bottom - top;
}
public int getWidth() {
return right - left;
}
public BitArray getBlackDiagonal(int x, int y, int dx, int dy, BitArray diagonal, int size) {
return delegate.getBlackDiagonal(left + x, top + y, dx, dy, diagonal, size);
}
public void estimateBlackPoint(BlackPointEstimationMethod method, int argument)
throws ReaderException {
// Hmm, the delegate will probably base this on the whole image though...
delegate.estimateBlackPoint(method, argument);
}
public BlackPointEstimationMethod getLastEstimationMethod() {
return delegate.getLastEstimationMethod();
}
public MonochromeBitmapSource rotateCounterClockwise() {
MonochromeBitmapSource rotated = delegate.rotateCounterClockwise();
return new CroppedMonochromeBitmapSource(rotated,
top,
delegate.getWidth() - right,
bottom,
delegate.getWidth() - left);
}
public boolean isRotateSupported() {
return delegate.isRotateSupported();
}
public int getLuminance(int x, int y) {
return delegate.getLuminance(x, y);
}
public int[] getLuminanceRow(int y, int[] row) {
return delegate.getLuminanceRow(y, row);
}
public int[] getLuminanceColumn(int x, int[] column) {
return delegate.getLuminanceColumn(x, column);
}
}

View file

@ -16,15 +16,15 @@
package com.google.zxing.common;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.ReaderException;
import com.google.zxing.BinaryBitmap;
/**
* @author Sean Owen
*/
public final class DefaultGridSampler extends GridSampler {
public BitMatrix sampleGrid(MonochromeBitmapSource image,
public BitMatrix sampleGrid(BinaryBitmap image,
int dimension,
float p1ToX, float p1ToY,
float p2ToX, float p2ToY,

View file

@ -0,0 +1,194 @@
/*
* Copyright 2009 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing.common;
import com.google.zxing.Binarizer;
import com.google.zxing.LuminanceSource;
import com.google.zxing.ReaderException;
/**
* This Binarizer implementation uses the old ZXing global histogram approach. It is suitable
* for low-end mobile devices which don't have enough CPU or memory to use a local thresholding
* algorithm. However, because it picks a global black point, it cannot handle difficult shadows
* and gradients.
*
* @author dswitkin@google.com (Daniel Switkin)
* @author Sean Owen
*/
public final class GlobalHistogramBinarizer extends Binarizer {
private static final int LUMINANCE_BITS = 5;
private static final int LUMINANCE_SHIFT = 8 - LUMINANCE_BITS;
private static final int LUMINANCE_BUCKETS = 1 << LUMINANCE_BITS;
private byte[] luminances = null;
private int[] buckets = null;
public GlobalHistogramBinarizer(LuminanceSource source) {
super(source);
}
// Applies simple sharpening to the row data to improve performance of the 1D Readers.
public BitArray getBlackRow(int y, BitArray row) throws ReaderException {
LuminanceSource source = getLuminanceSource();
int width = source.getWidth();
if (row == null || row.getSize() < width) {
row = new BitArray(width);
} else {
row.clear();
}
initArrays(width);
byte[] localLuminances = source.getRow(y, luminances);
int[] localBuckets = buckets;
for (int x = 0; x < width; x++) {
int pixel = localLuminances[x] & 0xff;
localBuckets[pixel >> LUMINANCE_SHIFT]++;
}
int blackPoint = estimateBlackPoint(localBuckets);
int left = localLuminances[0] & 0xff;
int center = localLuminances[1] & 0xff;
for (int x = 1; x < width - 1; x++) {
int right = localLuminances[x + 1] & 0xff;
// A simple -1 4 -1 box filter with a weight of 2.
int luminance = ((center << 2) - left - right) >> 1;
if (luminance < blackPoint) {
row.set(x);
}
left = center;
center = right;
}
return row;
}
// Does not sharpen the data, as this call is intended to only be used by 2D Readers.
public BitMatrix getBlackMatrix() throws ReaderException {
LuminanceSource source = getLuminanceSource();
int width = source.getWidth();
int height = source.getHeight();
BitMatrix matrix = new BitMatrix(width, height);
// Quickly calculates the histogram by sampling four rows from the image. This proved to be
// more robust on the blackbox tests than sampling a diagonal as we used to do.
initArrays(width);
int[] localBuckets = buckets;
for (int y = 1; y < 5; y++) {
int row = height * y / 5;
byte[] localLuminances = source.getRow(row, luminances);
int right = width * 4 / 5;
for (int x = width / 5; x < right; x++) {
int pixel = localLuminances[x] & 0xff;
localBuckets[pixel >> LUMINANCE_SHIFT]++;
}
}
int blackPoint = estimateBlackPoint(localBuckets);
// We delay reading the entire image luminance until the black point estimation succeeds.
// Although we end up reading four rows twice, it is consistent with our motto of
// "fail quickly" which is necessary for continuous scanning.
byte[] localLuminances = source.getMatrix();
for (int y = 0; y < height; y++) {
int offset = y * width;
for (int x = 0; x< width; x++) {
int pixel = localLuminances[offset + x] & 0xff;
if (pixel < blackPoint) {
matrix.set(x, y);
}
}
}
return matrix;
}
public Binarizer createBinarizer(LuminanceSource source) {
return new GlobalHistogramBinarizer(source);
}
private void initArrays(int luminanceSize) {
if (luminances == null || luminances.length < luminanceSize) {
luminances = new byte[luminanceSize];
}
if (buckets == null) {
buckets = new int[LUMINANCE_BUCKETS];
} else {
for (int x = 0; x < LUMINANCE_BUCKETS; x++) {
buckets[x] = 0;
}
}
}
private static int estimateBlackPoint(int[] buckets) throws ReaderException {
// Find the tallest peak in the histogram.
int numBuckets = buckets.length;
int maxBucketCount = 0;
int firstPeak = 0;
int firstPeakSize = 0;
for (int i = 0; i < numBuckets; i++) {
if (buckets[i] > firstPeakSize) {
firstPeak = i;
firstPeakSize = buckets[i];
}
if (buckets[i] > maxBucketCount) {
maxBucketCount = buckets[i];
}
}
// Find the second-tallest peak which is somewhat far from the tallest peak.
int secondPeak = 0;
int secondPeakScore = 0;
for (int i = 0; i < numBuckets; i++) {
int distanceToBiggest = i - firstPeak;
// Encourage more distant second peaks by multiplying by square of distance.
int score = buckets[i] * distanceToBiggest * distanceToBiggest;
if (score > secondPeakScore) {
secondPeak = i;
secondPeakScore = score;
}
}
// Make sure firstPeak corresponds to the black peak.
if (firstPeak > secondPeak) {
int temp = firstPeak;
firstPeak = secondPeak;
secondPeak = temp;
}
// If there is too little contrast in the image to pick a meaningful black point, throw rather
// than waste time trying to decode the image, and risk false positives.
// TODO: It might be worth comparing the brightest and darkest pixels seen, rather than the
// two peaks, to determine the contrast.
if (secondPeak - firstPeak <= numBuckets >> 4) {
throw ReaderException.getInstance();
}
// Find a valley between them that is low and closer to the white peak.
int bestValley = secondPeak - 1;
int bestValleyScore = -1;
for (int i = secondPeak - 1; i > firstPeak; i--) {
int fromFirst = i - firstPeak;
int score = fromFirst * fromFirst * (secondPeak - i) * (maxBucketCount - buckets[i]);
if (score > bestValleyScore) {
bestValley = i;
bestValleyScore = score;
}
}
return bestValley << LUMINANCE_SHIFT;
}
}

View file

@ -16,7 +16,7 @@
package com.google.zxing.common;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.ReaderException;
/**
@ -82,7 +82,7 @@ public abstract class GridSampler {
* @throws ReaderException if image can't be sampled, for example, if the transformation defined
* by the given points is invalid or results in sampling outside the image boundaries
*/
public abstract BitMatrix sampleGrid(MonochromeBitmapSource image,
public abstract BitMatrix sampleGrid(BinaryBitmap image,
int dimension,
float p1ToX, float p1ToY,
float p2ToX, float p2ToY,
@ -108,7 +108,7 @@ public abstract class GridSampler {
* @param points actual points in x1,y1,...,xn,yn form
* @throws ReaderException if an endpoint is lies outside the image boundaries
*/
protected static void checkAndNudgePoints(MonochromeBitmapSource image, float[] points)
protected static void checkAndNudgePoints(BinaryBitmap image, float[] points)
throws ReaderException {
int width = image.getWidth();
int height = image.getHeight();

View file

@ -0,0 +1,163 @@
/*
* Copyright 2009 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing.common;
import com.google.zxing.Binarizer;
import com.google.zxing.ReaderException;
import com.google.zxing.LuminanceSource;
/**
* This class implements a local thresholding algorithm, which while slower than the
* GlobalHistogramBinarizer, is fairly efficient for what it does. It is designed for
* high frequency images of barcodes with black data on white backgrounds. For this application,
* it does a much better job than a global blackpoint with severe shadows and gradients.
* However it tends to produce artifacts on lower frequency images and is therefore not
* a good general purpose binarizer for uses outside ZXing.
*
* @author dswitkin@google.com (Daniel Switkin)
*/
public final class LocalBlockBinarizer extends Binarizer {
private BitMatrix matrix = null;
public LocalBlockBinarizer(LuminanceSource source) {
super(source);
}
public BitArray getBlackRow(int y, BitArray row) throws ReaderException {
binarizeEntireImage();
return matrix.getRow(y, row);
}
public BitMatrix getBlackMatrix() throws ReaderException {
binarizeEntireImage();
return matrix;
}
public Binarizer createBinarizer(LuminanceSource source) {
return new LocalBlockBinarizer(source);
}
// Calculates the final BitMatrix once for all requests. This could be called once from the
// constructor instead, but there are some advantages to doing it lazily, such as making
// profiling easier, and not doing heavy lifting when callers don't expect it.
private void binarizeEntireImage() {
if (matrix == null) {
LuminanceSource source = getLuminanceSource();
byte[] luminances = source.getMatrix();
int width = source.getWidth();
int height = source.getHeight();
sharpenRow(luminances, width, height);
int subWidth = width >> 3;
int subHeight = height >> 3;
int[][] blackPoints = calculateBlackPoints(luminances, subWidth, subHeight, width);
matrix = new BitMatrix(width, height);
calculateThresholdForBlock(luminances, subWidth, subHeight, width, blackPoints, matrix);
}
}
// For each 8x8 block in the image, calculate the average black point using a 5x5 grid
// of the blocks around it. Also handles the corner cases, but will ignore up to 7 pixels
// on the right edge and 7 pixels at the bottom of the image if the overall dimsions are not
// multiples of eight. In practice, leaving those pixels white does not seem to be a problem.
private static void calculateThresholdForBlock(byte[] luminances, int subWidth, int subHeight,
int stride, int[][] blackPoints, BitMatrix matrix) {
for (int y = 0; y < subHeight; y++) {
for (int x = 0; x < subWidth; x++) {
int sum = 0;
int left = (x > 1) ? x : 2;
left = (left < subWidth - 2) ? left : subWidth - 3;
int top = (y > 1) ? y : 2;
top = (top < subHeight - 2) ? top : subHeight - 3;
for (int z = -2; z <= 2; z++) {
sum += blackPoints[top + z][left - 2];
sum += blackPoints[top + z][left - 1];
sum += blackPoints[top + z][left];
sum += blackPoints[top + z][left + 1];
sum += blackPoints[top + z][left + 2];
}
int average = sum / 25;
threshold8x8Block(luminances, x * 8, y * 8, average, stride, matrix);
}
}
}
// Applies a single threshold to an 8x8 block of pixels.
private static void threshold8x8Block(byte[] luminances, int xoffset, int yoffset, int threshold,
int stride, BitMatrix matrix) {
for (int y = 0; y < 8; y++) {
int offset = (yoffset + y) * stride + xoffset;
for (int x = 0; x < 8; x++) {
int pixel = luminances[offset + x] & 0xff;
if (pixel < threshold) {
matrix.set(xoffset + x, yoffset + y);
}
}
}
}
// Calculates a single black point for each 8x8 block of pixels and saves it away.
private static int[][] calculateBlackPoints(byte[] luminances, int subWidth, int subHeight,
int stride) {
int[][] blackPoints = new int[subHeight][subWidth];
for (int y = 0; y < subHeight; y++) {
for (int x = 0; x < subWidth; x++) {
int sum = 0;
int min = 255;
int max = 0;
for (int yy = 0; yy < 8; yy++) {
int offset = (y * 8 + yy) * stride + (x * 8);
for (int xx = 0; xx < 8; xx++) {
int pixel = luminances[offset + xx] & 0xff;
sum += pixel;
if (pixel < min) {
min = pixel;
}
if (pixel > max) {
max = pixel;
}
}
}
// If the contrast is inadequate, use half the minimum, so that this block will be
// treated as part of the white background, but won't drag down neighboring blocks
// too much.
int average = (max - min > 24) ? (sum >> 6) : (min >> 1);
blackPoints[y][x] = average;
}
}
return blackPoints;
}
// Applies a simple -1 4 -1 box filter with a weight of 2 to each row.
private static void sharpenRow(byte[] luminances, int width, int height) {
for (int y = 0; y < height; y++) {
int offset = y * width;
int left = luminances[offset] & 0xff;
int center = luminances[offset + 1] & 0xff;
for (int x = 1; x < width - 1; x++) {
int right = luminances[offset + x + 1] & 0xff;
luminances[x] = (byte)(((center << 2) - left - right) >> 1);
left = center;
center = right;
}
}
}
}

View file

@ -16,16 +16,15 @@
package com.google.zxing.common.detector;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.ReaderException;
import com.google.zxing.ResultPoint;
import com.google.zxing.BlackPointEstimationMethod;
import com.google.zxing.common.BitArray;
/**
* <p>A somewhat generic detector that looks for a barcode-like rectangular region within an image.
* It looks within a mostly white region of an image for a region of black and white, but mostly black.
* It returns the four corners of the region, as best it can determine.</p>
* It looks within a mostly white region of an image for a region of black and white, but mostly
* black. It returns the four corners of the region, as best it can determine.</p>
*
* @author Sean Owen
*/
@ -33,9 +32,9 @@ public final class MonochromeRectangleDetector {
private static final int MAX_MODULES = 32;
private final MonochromeBitmapSource image;
private final BinaryBitmap image;
public MonochromeRectangleDetector(MonochromeBitmapSource image) {
public MonochromeRectangleDetector(BinaryBitmap image) {
this.image = image;
}
@ -43,17 +42,13 @@ public final class MonochromeRectangleDetector {
* <p>Detects a rectangular region of black and white -- mostly black -- with a region of mostly
* white, in an image.</p>
*
* @return {@link ResultPoint}[] describing the corners of the rectangular region. The first and last points
* are opposed on the diagonal, as are the second and third. The first point will be the topmost point and
* the last, the bottommost. The second point will be leftmost and the third, the rightmost
* @return {@link ResultPoint}[] describing the corners of the rectangular region. The first and
* last points are opposed on the diagonal, as are the second and third. The first point will be
* the topmost point and the last, the bottommost. The second point will be leftmost and the
* third, the rightmost
* @throws ReaderException if no Data Matrix Code can be found
*/
public ResultPoint[] detect() throws ReaderException {
if (!BlackPointEstimationMethod.TWO_D_SAMPLING.equals(image.getLastEstimationMethod())) {
image.estimateBlackPoint(BlackPointEstimationMethod.TWO_D_SAMPLING, 0);
}
int height = image.getHeight();
int width = image.getWidth();
int halfHeight = height >> 1;
@ -65,16 +60,21 @@ public final class MonochromeRectangleDetector {
int maxI = height;
int minJ = 0;
int maxJ = width;
ResultPoint pointA = findCornerFromCenter(halfHeight, -iSkip, minI, maxI, halfWidth, 0, minJ, maxJ, halfWidth >> 1);
ResultPoint pointA = findCornerFromCenter(halfHeight, -iSkip, minI, maxI, halfWidth, 0,
minJ, maxJ, halfWidth >> 1);
minI = (int) pointA.getY() - 1;
ResultPoint pointB = findCornerFromCenter(halfHeight, 0, minI, maxI, halfWidth, -jSkip, minJ, maxJ, halfHeight >> 1);
ResultPoint pointB = findCornerFromCenter(halfHeight, 0, minI, maxI, halfWidth, -jSkip,
minJ, maxJ, halfHeight >> 1);
minJ = (int) pointB.getX() - 1;
ResultPoint pointC = findCornerFromCenter(halfHeight, 0, minI, maxI, halfWidth, jSkip, minJ, maxJ, halfHeight >> 1);
ResultPoint pointC = findCornerFromCenter(halfHeight, 0, minI, maxI, halfWidth, jSkip,
minJ, maxJ, halfHeight >> 1);
maxJ = (int) pointC.getX() + 1;
ResultPoint pointD = findCornerFromCenter(halfHeight, iSkip, minI, maxI, halfWidth, 0, minJ, maxJ, halfWidth >> 1);
ResultPoint pointD = findCornerFromCenter(halfHeight, iSkip, minI, maxI, halfWidth, 0,
minJ, maxJ, halfWidth >> 1);
maxI = (int) pointD.getY() + 1;
// Go try to find point A again with better information -- might have been off at first.
pointA = findCornerFromCenter(halfHeight, -iSkip, minI, maxI, halfWidth, 0, minJ, maxJ, halfWidth >> 2);
pointA = findCornerFromCenter(halfHeight, -iSkip, minI, maxI, halfWidth, 0, minJ, maxJ,
halfWidth >> 2);
return new ResultPoint[] { pointA, pointB, pointC, pointD };
}
@ -84,7 +84,8 @@ public final class MonochromeRectangleDetector {
* point which should be within the barcode.
*
* @param centerI center's i componennt (vertical)
* @param di change in i per step. If scanning up this is negative; down, positive; left or right, 0
* @param di change in i per step. If scanning up this is negative; down, positive;
* left or right, 0
* @param minI minimum value of i to search through (meaningless when di == 0)
* @param maxI maximum value of i
* @param centerJ center's j component (horizontal)
@ -145,23 +146,27 @@ public final class MonochromeRectangleDetector {
}
/**
* Computes the start and end of a region of pixels, either horizontally or vertically, that could be
* part of a Data Matrix barcode.
* Computes the start and end of a region of pixels, either horizontally or vertically, that could
* be part of a Data Matrix barcode.
*
* @param fixedDimension if scanning horizontally, this is the row (the fixed vertical location) where
* we are scanning. If scanning vertically it's the colummn, the fixed horizontal location
* @param maxWhiteRun largest run of white pixels that can still be considered part of the barcode region
* @param fixedDimension if scanning horizontally, this is the row (the fixed vertical location)
* where we are scanning. If scanning vertically it's the colummn, the fixed horizontal location
* @param maxWhiteRun largest run of white pixels that can still be considered part of the
* barcode region
* @param minDim minimum pixel location, horizontally or vertically, to consider
* @param maxDim maximum pixel location, horizontally or vertically, to consider
* @param horizontal if true, we're scanning left-right, instead of up-down
* @return int[] with start and end of found range, or null if no such range is found (e.g. only white was found)
* @return int[] with start and end of found range, or null if no such range is found
* (e.g. only white was found)
*/
private int[] blackWhiteRange(int fixedDimension, int maxWhiteRun, int minDim, int maxDim, boolean horizontal) {
private int[] blackWhiteRange(int fixedDimension, int maxWhiteRun, int minDim, int maxDim,
boolean horizontal) throws ReaderException {
int center = (minDim + maxDim) >> 1;
BitArray rowOrColumn = horizontal ? image.getBlackRow(fixedDimension, null, 0, image.getWidth())
: image.getBlackColumn(fixedDimension, null, 0, image.getHeight());
BitArray rowOrColumn = horizontal ?
image.getBlackRow(fixedDimension, null, 0, image.getWidth()) :
image.getBlackColumn(fixedDimension, null, 0, image.getHeight());
// Scan left/up first
int start = center;

View file

@ -18,12 +18,13 @@ package com.google.zxing.datamatrix;
import com.google.zxing.BarcodeFormat;
import com.google.zxing.DecodeHintType;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.Reader;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import com.google.zxing.ResultPoint;
import com.google.zxing.ResultMetadataType;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.common.BitMatrix;
import com.google.zxing.common.DecoderResult;
import com.google.zxing.common.DetectorResult;
@ -49,11 +50,11 @@ public final class DataMatrixReader implements Reader {
* @return a String representing the content encoded by the Data Matrix code
* @throws ReaderException if a Data Matrix code cannot be found, or cannot be decoded
*/
public Result decode(MonochromeBitmapSource image) throws ReaderException {
public Result decode(BinaryBitmap image) throws ReaderException {
return decode(image, null);
}
public Result decode(MonochromeBitmapSource image, Hashtable hints)
public Result decode(BinaryBitmap image, Hashtable hints)
throws ReaderException {
DecoderResult decoderResult;
ResultPoint[] points;
@ -79,7 +80,7 @@ public final class DataMatrixReader implements Reader {
* around it. This is a specialized method that works exceptionally fast in this special
* case.
*/
private static BitMatrix extractPureBits(MonochromeBitmapSource image) throws ReaderException {
private static BitMatrix extractPureBits(BinaryBitmap image) throws ReaderException {
// Now need to determine module size in pixels
int height = image.getHeight();

View file

@ -16,9 +16,10 @@
package com.google.zxing.datamatrix.detector;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.ReaderException;
import com.google.zxing.ResultPoint;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.common.BitMatrix;
import com.google.zxing.common.Collections;
import com.google.zxing.common.Comparator;
@ -45,10 +46,10 @@ public final class Detector {
private static final Integer[] INTEGERS =
{ new Integer(0), new Integer(1), new Integer(2), new Integer(3), new Integer(4) };
private final MonochromeBitmapSource image;
private final BinaryBitmap image;
private final MonochromeRectangleDetector rectangleDetector;
public Detector(MonochromeBitmapSource image) {
public Detector(BinaryBitmap image) {
this.image = image;
rectangleDetector = new MonochromeRectangleDetector(image);
}
@ -165,7 +166,7 @@ public final class Detector {
table.put(key, value == null ? INTEGERS[1] : INTEGERS[value.intValue() + 1]);
}
private static BitMatrix sampleGrid(MonochromeBitmapSource image,
private static BitMatrix sampleGrid(BinaryBitmap image,
ResultPoint topLeft,
ResultPoint bottomLeft,
ResultPoint bottomRight,
@ -204,7 +205,8 @@ public final class Detector {
/**
* Counts the number of black/white transitions between two points, using something like Bresenham's algorithm.
*/
private ResultPointsAndTransitions transitionsBetween(ResultPoint from, ResultPoint to) {
private ResultPointsAndTransitions transitionsBetween(ResultPoint from, ResultPoint to)
throws ReaderException {
// See QR Code Detector, sizeOfBlackWhiteBlackRun()
int fromX = (int) from.getX();
int fromY = (int) from.getY();

View file

@ -16,11 +16,10 @@
package com.google.zxing.multi;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.Reader;
import com.google.zxing.Result;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.ReaderException;
import com.google.zxing.common.CroppedMonochromeBitmapSource;
import com.google.zxing.Result;
import java.util.Hashtable;
@ -41,56 +40,57 @@ public final class ByQuadrantReader implements Reader {
this.delegate = delegate;
}
public Result decode(MonochromeBitmapSource image) throws ReaderException {
public Result decode(BinaryBitmap image) throws ReaderException {
return decode(image, null);
}
public Result decode(MonochromeBitmapSource image, Hashtable hints) throws ReaderException {
public Result decode(BinaryBitmap image, Hashtable hints) throws ReaderException {
int width = image.getWidth();
int height = image.getHeight();
int halfWidth = width / 2;
int halfHeight = height / 2;
MonochromeBitmapSource topLeft = new CroppedMonochromeBitmapSource(image, 0, 0, halfWidth,
halfHeight);
try {
return delegate.decode(topLeft, hints);
} catch (ReaderException re) {
// continue
{
BinaryBitmap topLeft = image.crop(0, 0, halfWidth, halfHeight);
try {
return delegate.decode(topLeft, hints);
} catch (ReaderException re) {
// continue
}
}
MonochromeBitmapSource topRight = new CroppedMonochromeBitmapSource(image, halfWidth, 0, width,
halfHeight);
try {
return delegate.decode(topRight, hints);
} catch (ReaderException re) {
// continue
{
BinaryBitmap topRight = image.crop(halfWidth, 0, width, halfHeight);
try {
return delegate.decode(topRight, hints);
} catch (ReaderException re) {
// continue
}
}
MonochromeBitmapSource bottomLeft = new CroppedMonochromeBitmapSource(image, 0, halfHeight,
halfWidth, height);
try {
return delegate.decode(bottomLeft, hints);
} catch (ReaderException re) {
// continue
{
BinaryBitmap bottomLeft = image.crop(0, halfHeight, halfWidth, height);
try {
return delegate.decode(bottomLeft, hints);
} catch (ReaderException re) {
// continue
}
}
MonochromeBitmapSource bottomRight = new CroppedMonochromeBitmapSource(image, halfWidth,
halfHeight, width, height);
try {
return delegate.decode(bottomRight, hints);
} catch (ReaderException re) {
// continue
{
BinaryBitmap bottomRight = image.crop(halfWidth, halfHeight, width, height);
try {
return delegate.decode(bottomRight, hints);
} catch (ReaderException re) {
// continue
}
}
int quarterWidth = halfWidth / 2;
int quarterHeight = halfHeight / 2;
MonochromeBitmapSource center = new CroppedMonochromeBitmapSource(image,
quarterWidth,
quarterHeight,
width - quarterWidth,
height - quarterHeight);
BinaryBitmap center = image.crop(quarterWidth, quarterHeight, width - quarterWidth,
height - quarterHeight);
return delegate.decode(center, hints);
}

View file

@ -18,10 +18,9 @@ package com.google.zxing.multi;
import com.google.zxing.Reader;
import com.google.zxing.Result;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.ReaderException;
import com.google.zxing.ResultPoint;
import com.google.zxing.common.CroppedMonochromeBitmapSource;
import java.util.Hashtable;
import java.util.Vector;
@ -50,11 +49,11 @@ public final class GenericMultipleBarcodeReader implements MultipleBarcodeReader
this.delegate = delegate;
}
public Result[] decodeMultiple(MonochromeBitmapSource image) throws ReaderException {
public Result[] decodeMultiple(BinaryBitmap image) throws ReaderException {
return decodeMultiple(image, null);
}
public Result[] decodeMultiple(MonochromeBitmapSource image, Hashtable hints)
public Result[] decodeMultiple(BinaryBitmap image, Hashtable hints)
throws ReaderException {
Vector results = new Vector();
doDecodeMultiple(image, hints, results, 0, 0);
@ -69,7 +68,7 @@ public final class GenericMultipleBarcodeReader implements MultipleBarcodeReader
return resultArray;
}
private void doDecodeMultiple(MonochromeBitmapSource image,
private void doDecodeMultiple(BinaryBitmap image,
Hashtable hints,
Vector results,
int xOffset,
@ -121,20 +120,16 @@ public final class GenericMultipleBarcodeReader implements MultipleBarcodeReader
}
if (minX > MIN_DIMENSION_TO_RECUR) {
doDecodeMultiple(new CroppedMonochromeBitmapSource(image, 0, 0, (int) minX, height),
hints, results, 0, 0);
doDecodeMultiple(image.crop(0, 0, (int) minX, height), hints, results, 0, 0);
}
if (minY > MIN_DIMENSION_TO_RECUR) {
doDecodeMultiple(new CroppedMonochromeBitmapSource(image, 0, 0, width, (int) minY),
hints, results, 0, 0);
doDecodeMultiple(image.crop(0, 0, width, (int) minY), hints, results, 0, 0);
}
if (maxX < width - MIN_DIMENSION_TO_RECUR) {
doDecodeMultiple(new CroppedMonochromeBitmapSource(image, (int) maxX, 0, width, height),
hints, results, (int) maxX, 0);
doDecodeMultiple(image.crop((int) maxX, 0, width, height), hints, results, (int) maxX, 0);
}
if (maxY < height - MIN_DIMENSION_TO_RECUR) {
doDecodeMultiple(new CroppedMonochromeBitmapSource(image, 0, (int) maxY, width, height),
hints, results, 0, (int) maxY);
doDecodeMultiple(image.crop(0, (int) maxY, width, height), hints, results, 0, (int) maxY);
}
}

View file

@ -17,7 +17,7 @@
package com.google.zxing.multi;
import com.google.zxing.Result;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.ReaderException;
import java.util.Hashtable;
@ -30,8 +30,8 @@ import java.util.Hashtable;
*/
public interface MultipleBarcodeReader {
Result[] decodeMultiple(MonochromeBitmapSource image) throws ReaderException;
Result[] decodeMultiple(BinaryBitmap image) throws ReaderException;
Result[] decodeMultiple(MonochromeBitmapSource image, Hashtable hints) throws ReaderException;
Result[] decodeMultiple(BinaryBitmap image, Hashtable hints) throws ReaderException;
}

View file

@ -17,7 +17,7 @@
package com.google.zxing.multi.qrcode;
import com.google.zxing.BarcodeFormat;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import com.google.zxing.ResultMetadataType;
@ -41,18 +41,19 @@ public final class QRCodeMultiReader extends QRCodeReader implements MultipleBar
private static final Result[] EMPTY_RESULT_ARRAY = new Result[0];
public Result[] decodeMultiple(MonochromeBitmapSource image) throws ReaderException {
public Result[] decodeMultiple(BinaryBitmap image) throws ReaderException {
return decodeMultiple(image, null);
}
public Result[] decodeMultiple(MonochromeBitmapSource image, Hashtable hints) throws ReaderException {
public Result[] decodeMultiple(BinaryBitmap image, Hashtable hints) throws ReaderException {
Vector results = new Vector();
DetectorResult[] detectorResult = new MultiDetector(image).detectMulti(hints);
for (int i = 0; i < detectorResult.length; i++) {
try {
DecoderResult decoderResult = getDecoder().decode(detectorResult[i].getBits());
ResultPoint[] points = detectorResult[i].getPoints();
Result result = new Result(decoderResult.getText(), decoderResult.getRawBytes(), points, BarcodeFormat.QR_CODE);
Result result = new Result(decoderResult.getText(), decoderResult.getRawBytes(), points,
BarcodeFormat.QR_CODE);
if (decoderResult.getByteSegments() != null) {
result.putMetadata(ResultMetadataType.BYTE_SEGMENTS, decoderResult.getByteSegments());
}
@ -72,4 +73,4 @@ public final class QRCodeMultiReader extends QRCodeReader implements MultipleBar
}
}
}
}

View file

@ -16,8 +16,7 @@
package com.google.zxing.multi.qrcode.detector;
import com.google.zxing.BlackPointEstimationMethod;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.ReaderException;
import com.google.zxing.common.DetectorResult;
import com.google.zxing.qrcode.detector.Detector;
@ -37,16 +36,12 @@ public final class MultiDetector extends Detector {
private static final DetectorResult[] EMPTY_DETECTOR_RESULTS = new DetectorResult[0];
public MultiDetector(MonochromeBitmapSource image) {
public MultiDetector(BinaryBitmap image) {
super(image);
}
public DetectorResult[] detectMulti(Hashtable hints) throws ReaderException {
MonochromeBitmapSource image = getImage();
if (!BlackPointEstimationMethod.TWO_D_SAMPLING.equals(image.getLastEstimationMethod())) {
image.estimateBlackPoint(BlackPointEstimationMethod.TWO_D_SAMPLING, 0);
}
BinaryBitmap image = getImage();
MultiFinderPatternFinder finder = new MultiFinderPatternFinder(image);
FinderPatternInfo[] info = finder.findMulti(hints);

View file

@ -17,7 +17,7 @@
package com.google.zxing.multi.qrcode.detector;
import com.google.zxing.DecodeHintType;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.ReaderException;
import com.google.zxing.ResultPoint;
import com.google.zxing.common.BitArray;
@ -48,10 +48,13 @@ final class MultiFinderPatternFinder extends FinderPatternFinder {
private static final FinderPatternInfo[] EMPTY_RESULT_ARRAY = new FinderPatternInfo[0];
// TODO MIN_MODULE_COUNT and MAX_MODULE_COUNT would be great
// hints to ask the user for since it limits the number of regions to decode
private static final float MAX_MODULE_COUNT_PER_EDGE = 180; // max. legal count of modules per QR code edge (177)
private static final float MIN_MODULE_COUNT_PER_EDGE = 9; // min. legal count per modules per QR code edge (11)
// TODO MIN_MODULE_COUNT and MAX_MODULE_COUNT would be great hints to ask the user for
// since it limits the number of regions to decode
// max. legal count of modules per QR code edge (177)
private static final float MAX_MODULE_COUNT_PER_EDGE = 180;
// min. legal count per modules per QR code edge (11)
private static final float MIN_MODULE_COUNT_PER_EDGE = 9;
/**
* More or less arbitrary cutoff point for determining if two finder patterns might belong
@ -84,7 +87,7 @@ final class MultiFinderPatternFinder extends FinderPatternFinder {
*
* @param image image to search
*/
MultiFinderPatternFinder(MonochromeBitmapSource image) {
MultiFinderPatternFinder(BinaryBitmap image) {
super(image);
}
@ -124,12 +127,13 @@ final class MultiFinderPatternFinder extends FinderPatternFinder {
* - feature similar module sizes
* - are placed in a distance so the estimated module count is within the QR specification
* - have similar distance between upper left/right and left top/bottom finder patterns
* - form a triangle with 90° angle (checked by comparing top right/bottom left distance with pythagoras)
* - form a triangle with 90° angle (checked by comparing top right/bottom left distance
* with pythagoras)
*
* Note: we allow each point to be used for more than one code region: this might seem counterintuitive at first,
* but the performance penalty is not that big. At this point, we cannot make a good quality decision whether
* the three finders actually represent a QR code, or are just by chance layouted so it looks like there might
* be a QR code there.
* Note: we allow each point to be used for more than one code region: this might seem
* counterintuitive at first, but the performance penalty is not that big. At this point,
* we cannot make a good quality decision whether the three finders actually represent
* a QR code, or are just by chance layouted so it looks like there might be a QR code there.
* So, if the layout seems right, lets have the decoder try to decode.
*/
@ -184,7 +188,8 @@ final class MultiFinderPatternFinder extends FinderPatternFinder {
// Check the sizes
float estimatedModuleCount = ((dA + dB) / p1.getEstimatedModuleSize()) / 2;
if (estimatedModuleCount > MAX_MODULE_COUNT_PER_EDGE || estimatedModuleCount < MIN_MODULE_COUNT_PER_EDGE) {
if (estimatedModuleCount > MAX_MODULE_COUNT_PER_EDGE ||
estimatedModuleCount < MIN_MODULE_COUNT_PER_EDGE) {
continue;
}
@ -223,7 +228,7 @@ final class MultiFinderPatternFinder extends FinderPatternFinder {
public FinderPatternInfo[] findMulti(Hashtable hints) throws ReaderException {
boolean tryHarder = hints != null && hints.containsKey(DecodeHintType.TRY_HARDER);
MonochromeBitmapSource image = getImage();
BinaryBitmap image = getImage();
int maxI = image.getHeight();
int maxJ = image.getWidth();
// We are looking for black/white/black/white/black modules in

View file

@ -16,9 +16,8 @@
package com.google.zxing.oned;
import com.google.zxing.BlackPointEstimationMethod;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.DecodeHintType;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import com.google.zxing.ResultMetadataType;
@ -39,24 +38,25 @@ public abstract class AbstractOneDReader implements OneDReader {
private static final int INTEGER_MATH_SHIFT = 8;
static final int PATTERN_MATCH_RESULT_SCALE_FACTOR = 1 << INTEGER_MATH_SHIFT;
public final Result decode(MonochromeBitmapSource image) throws ReaderException {
public final Result decode(BinaryBitmap image) throws ReaderException {
return decode(image, null);
}
public final Result decode(MonochromeBitmapSource image, Hashtable hints) throws ReaderException {
public final Result decode(BinaryBitmap image, Hashtable hints) throws ReaderException {
try {
return doDecode(image, hints);
} catch (ReaderException re) {
boolean tryHarder = hints != null && hints.containsKey(DecodeHintType.TRY_HARDER);
if (tryHarder && image.isRotateSupported()) {
MonochromeBitmapSource rotatedImage = image.rotateCounterClockwise();
BinaryBitmap rotatedImage = image.rotateCounterClockwise();
Result result = doDecode(rotatedImage, hints);
// Record that we found it rotated 90 degrees CCW / 270 degrees CW
Hashtable metadata = result.getResultMetadata();
int orientation = 270;
if (metadata != null && metadata.containsKey(ResultMetadataType.ORIENTATION)) {
// But if we found it reversed in doDecode(), add in that result here:
orientation = (orientation + ((Integer) metadata.get(ResultMetadataType.ORIENTATION)).intValue()) % 360;
orientation = (orientation +
((Integer) metadata.get(ResultMetadataType.ORIENTATION)).intValue()) % 360;
}
result.putMetadata(ResultMetadataType.ORIENTATION, new Integer(orientation));
return result;
@ -80,7 +80,7 @@ public abstract class AbstractOneDReader implements OneDReader {
* @return The contents of the decoded barcode
* @throws ReaderException Any spontaneous errors which occur
*/
private Result doDecode(MonochromeBitmapSource image, Hashtable hints) throws ReaderException {
private Result doDecode(BinaryBitmap image, Hashtable hints) throws ReaderException {
int width = image.getWidth();
int height = image.getHeight();
BitArray row = new BitArray(width);
@ -108,11 +108,10 @@ public abstract class AbstractOneDReader implements OneDReader {
// Estimate black point for this row and load it:
try {
image.estimateBlackPoint(BlackPointEstimationMethod.ROW_SAMPLING, rowNumber);
row = image.getBlackRow(rowNumber, row);
} catch (ReaderException re) {
continue;
}
row = image.getBlackRow(rowNumber, row, 0, width);
}
// While we have the image data in a BitArray, it's fairly cheap to reverse it in place to
// handle decoding upside down barcodes.
@ -152,7 +151,8 @@ public abstract class AbstractOneDReader implements OneDReader {
* @param row row to count from
* @param start offset into row to start at
* @param counters array into which to record counts
* @throws ReaderException if counters cannot be filled entirely from row before running out of pixels
* @throws ReaderException if counters cannot be filled entirely from row before running out
* of pixels
*/
static void recordPattern(BitArray row, int start, int[] counters) throws ReaderException {
int numCounters = counters.length;
@ -238,6 +238,7 @@ public abstract class AbstractOneDReader implements OneDReader {
// method of an interface it implements, but it is causing NoSuchMethodError
// issues on some Nokia JVMs. So we add this superfluous declaration:
public abstract Result decodeRow(int rowNumber, BitArray row, Hashtable hints) throws ReaderException;
public abstract Result decodeRow(int rowNumber, BitArray row, Hashtable hints)
throws ReaderException;
}

View file

@ -17,9 +17,9 @@
package com.google.zxing.oned;
import com.google.zxing.BarcodeFormat;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.common.BitArray;
import java.util.Hashtable;
@ -42,11 +42,11 @@ public final class UPCAReader implements UPCEANReader {
return maybeReturnResult(ean13Reader.decodeRow(rowNumber, row, hints));
}
public Result decode(MonochromeBitmapSource image) throws ReaderException {
public Result decode(BinaryBitmap image) throws ReaderException {
return maybeReturnResult(ean13Reader.decode(image));
}
public Result decode(MonochromeBitmapSource image, Hashtable hints) throws ReaderException {
public Result decode(BinaryBitmap image, Hashtable hints) throws ReaderException {
return maybeReturnResult(ean13Reader.decode(image, hints));
}

View file

@ -17,8 +17,8 @@
package com.google.zxing.pdf417;
import com.google.zxing.BarcodeFormat;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.DecodeHintType;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.Reader;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
@ -48,11 +48,11 @@ public final class PDF417Reader implements Reader {
* @return a String representing the content encoded by the PDF417 code
* @throws ReaderException if a PDF417 code cannot be found, or cannot be decoded
*/
public Result decode(MonochromeBitmapSource image) throws ReaderException {
public Result decode(BinaryBitmap image) throws ReaderException {
return decode(image, null);
}
public Result decode(MonochromeBitmapSource image, Hashtable hints)
public Result decode(BinaryBitmap image, Hashtable hints)
throws ReaderException {
DecoderResult decoderResult;
ResultPoint[] points;
@ -65,7 +65,8 @@ public final class PDF417Reader implements Reader {
decoderResult = decoder.decode(detectorResult.getBits());
points = detectorResult.getPoints();
}
return new Result(decoderResult.getText(), decoderResult.getRawBytes(), points, BarcodeFormat.PDF417);
return new Result(decoderResult.getText(), decoderResult.getRawBytes(), points,
BarcodeFormat.PDF417);
}
/**
@ -74,7 +75,7 @@ public final class PDF417Reader implements Reader {
* around it. This is a specialized method that works exceptionally fast in this special
* case.
*/
private static BitMatrix extractPureBits(MonochromeBitmapSource image) throws ReaderException {
private static BitMatrix extractPureBits(BinaryBitmap image) throws ReaderException {
// Now need to determine module size in pixels
int height = image.getHeight();

View file

@ -16,8 +16,7 @@
package com.google.zxing.pdf417.detector;
import com.google.zxing.BlackPointEstimationMethod;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.ReaderException;
import com.google.zxing.ResultPoint;
import com.google.zxing.common.BitArray;
@ -50,9 +49,9 @@ public final class Detector {
private static final int[] STOP_PATTERN_REVERSE = {1, 2, 1, 1, 1, 3, 1, 1,
7}; // 1111111 0 1 000 1 0 1 00 1
private final MonochromeBitmapSource image;
private final BinaryBitmap image;
public Detector(MonochromeBitmapSource image) {
public Detector(BinaryBitmap image) {
this.image = image;
}
@ -80,11 +79,6 @@ public final class Detector {
* @throws ReaderException if no PDF417 Code can be found
*/
public DetectorResult detect(Hashtable hints) throws ReaderException {
if (!BlackPointEstimationMethod.TWO_D_SAMPLING.equals(image
.getLastEstimationMethod())) {
image.estimateBlackPoint(BlackPointEstimationMethod.TWO_D_SAMPLING, 0);
}
ResultPoint[] vertices = findVertices(image);
if (vertices == null) { // Couldn't find the vertices
// Maybe the image is rotated 180 degrees?
@ -130,7 +124,7 @@ public final class Detector {
* area vertices[6] x, y top right codeword area vertices[7] x, y
* bottom right codeword area
*/
private static ResultPoint[] findVertices(MonochromeBitmapSource image) {
private static ResultPoint[] findVertices(BinaryBitmap image) throws ReaderException {
int height = image.getHeight();
int width = image.getWidth();
@ -211,7 +205,7 @@ public final class Detector {
* area vertices[6] x, y top right codeword area vertices[7] x, y
* bottom right codeword area
*/
private static ResultPoint[] findVertices180(MonochromeBitmapSource image) {
private static ResultPoint[] findVertices180(BinaryBitmap image) throws ReaderException {
int height = image.getHeight();
int width = image.getWidth();
@ -315,10 +309,8 @@ public final class Detector {
* @param moduleWidth estimated module size
* @return the number of modules in a row.
*/
private static int computeDimension(ResultPoint topLeft,
ResultPoint topRight, ResultPoint bottomLeft, ResultPoint bottomRight,
float moduleWidth) {
private static int computeDimension(ResultPoint topLeft, ResultPoint topRight,
ResultPoint bottomLeft, ResultPoint bottomRight, float moduleWidth) {
int topRowDimension = round(ResultPoint
.distance(topLeft, topRight)
/ moduleWidth);
@ -337,9 +329,9 @@ public final class Detector {
*/
}
private static BitMatrix sampleGrid(MonochromeBitmapSource image,
ResultPoint topLeft, ResultPoint bottomLeft, ResultPoint topRight,
ResultPoint bottomRight, int dimension) throws ReaderException {
private static BitMatrix sampleGrid(BinaryBitmap image, ResultPoint topLeft,
ResultPoint bottomLeft, ResultPoint topRight, ResultPoint bottomRight, int dimension)
throws ReaderException {
// Note that unlike in the QR Code sampler, we didn't find the center of
// modules, but the

View file

@ -18,12 +18,12 @@ package com.google.zxing.qrcode;
import com.google.zxing.BarcodeFormat;
import com.google.zxing.DecodeHintType;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.Reader;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import com.google.zxing.ResultPoint;
import com.google.zxing.ResultMetadataType;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.common.BitMatrix;
import com.google.zxing.common.DecoderResult;
import com.google.zxing.common.DetectorResult;
@ -53,11 +53,11 @@ public class QRCodeReader implements Reader {
* @return a String representing the content encoded by the QR code
* @throws ReaderException if a QR code cannot be found, or cannot be decoded
*/
public Result decode(MonochromeBitmapSource image) throws ReaderException {
public Result decode(BinaryBitmap image) throws ReaderException {
return decode(image, null);
}
public Result decode(MonochromeBitmapSource image, Hashtable hints)
public Result decode(BinaryBitmap image, Hashtable hints)
throws ReaderException {
DecoderResult decoderResult;
ResultPoint[] points;
@ -84,7 +84,7 @@ public class QRCodeReader implements Reader {
* around it. This is a specialized method that works exceptionally fast in this special
* case.
*/
private static BitMatrix extractPureBits(MonochromeBitmapSource image) throws ReaderException {
private static BitMatrix extractPureBits(BinaryBitmap image) throws ReaderException {
// Now need to determine module size in pixels
int height = image.getHeight();

View file

@ -16,8 +16,8 @@
package com.google.zxing.qrcode.detector;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.ReaderException;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.common.BitArray;
import java.util.Vector;
@ -38,7 +38,7 @@ import java.util.Vector;
*/
final class AlignmentPatternFinder {
private final MonochromeBitmapSource image;
private final BinaryBitmap image;
private final Vector possibleCenters;
private final int startX;
private final int startY;
@ -57,7 +57,7 @@ final class AlignmentPatternFinder {
* @param height height of region to search
* @param moduleSize estimated module size so far
*/
AlignmentPatternFinder(MonochromeBitmapSource image,
AlignmentPatternFinder(BinaryBitmap image,
int startX,
int startY,
int width,
@ -186,8 +186,9 @@ final class AlignmentPatternFinder {
* observed in any reading state, based on the results of the horizontal scan
* @return vertical center of alignment pattern, or {@link Float#NaN} if not found
*/
private float crossCheckVertical(int startI, int centerJ, int maxCount, int originalStateCountTotal) {
MonochromeBitmapSource image = this.image;
private float crossCheckVertical(int startI, int centerJ, int maxCount,
int originalStateCountTotal) throws ReaderException {
BinaryBitmap image = this.image;
int maxI = image.getHeight();
int[] stateCount = crossCheckStateCount;
@ -249,7 +250,8 @@ final class AlignmentPatternFinder {
* @param j end of possible alignment pattern in row
* @return {@link AlignmentPattern} if we have found the same pattern twice, or null if not
*/
private AlignmentPattern handlePossibleCenter(int[] stateCount, int i, int j) {
private AlignmentPattern handlePossibleCenter(int[] stateCount, int i, int j)
throws ReaderException {
int stateCountTotal = stateCount[0] + stateCount[1] + stateCount[2];
float centerJ = centerFromEnd(stateCount, j);
float centerI = crossCheckVertical(i, (int) centerJ, 2 * stateCount[1], stateCountTotal);

View file

@ -16,10 +16,9 @@
package com.google.zxing.qrcode.detector;
import com.google.zxing.BlackPointEstimationMethod;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.ReaderException;
import com.google.zxing.ResultPoint;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.common.BitMatrix;
import com.google.zxing.common.DetectorResult;
import com.google.zxing.common.GridSampler;
@ -35,13 +34,13 @@ import java.util.Hashtable;
*/
public class Detector {
private final MonochromeBitmapSource image;
private final BinaryBitmap image;
public Detector(MonochromeBitmapSource image) {
public Detector(BinaryBitmap image) {
this.image = image;
}
protected MonochromeBitmapSource getImage() {
protected BinaryBitmap getImage() {
return image;
}
@ -64,11 +63,7 @@ public class Detector {
*/
public DetectorResult detect(Hashtable hints) throws ReaderException {
MonochromeBitmapSource image = this.image;
if (!BlackPointEstimationMethod.TWO_D_SAMPLING.equals(image.getLastEstimationMethod())) {
image.estimateBlackPoint(BlackPointEstimationMethod.TWO_D_SAMPLING, 0);
}
BinaryBitmap image = this.image;
FinderPatternFinder finder = new FinderPatternFinder(image);
FinderPatternInfo info = finder.find(hints);
@ -129,7 +124,7 @@ public class Detector {
return new DetectorResult(bits, points);
}
private static BitMatrix sampleGrid(MonochromeBitmapSource image,
private static BitMatrix sampleGrid(BinaryBitmap image,
ResultPoint topLeft,
ResultPoint topRight,
ResultPoint bottomLeft,
@ -202,7 +197,8 @@ public class Detector {
* <p>Computes an average estimated module size based on estimated derived from the positions
* of the three finder patterns.</p>
*/
private float calculateModuleSize(ResultPoint topLeft, ResultPoint topRight, ResultPoint bottomLeft) {
private float calculateModuleSize(ResultPoint topLeft, ResultPoint topRight,
ResultPoint bottomLeft) throws ReaderException {
// Take the average
return (calculateModuleSizeOneWay(topLeft, topRight) +
calculateModuleSizeOneWay(topLeft, bottomLeft)) / 2.0f;
@ -213,7 +209,8 @@ public class Detector {
* {@link #sizeOfBlackWhiteBlackRunBothWays(int, int, int, int)} to figure the
* width of each, measuring along the axis between their centers.</p>
*/
private float calculateModuleSizeOneWay(ResultPoint pattern, ResultPoint otherPattern) {
private float calculateModuleSizeOneWay(ResultPoint pattern, ResultPoint otherPattern)
throws ReaderException {
float moduleSizeEst1 = sizeOfBlackWhiteBlackRunBothWays((int) pattern.getX(),
(int) pattern.getY(),
(int) otherPattern.getX(),
@ -238,7 +235,8 @@ public class Detector {
* a finder pattern by looking for a black-white-black run from the center in the direction
* of another point (another finder pattern center), and in the opposite direction too.</p>
*/
private float sizeOfBlackWhiteBlackRunBothWays(int fromX, int fromY, int toX, int toY) {
private float sizeOfBlackWhiteBlackRunBothWays(int fromX, int fromY, int toX, int toY)
throws ReaderException {
float result = sizeOfBlackWhiteBlackRun(fromX, fromY, toX, toY);
@ -269,7 +267,8 @@ public class Detector {
* <p>This is used when figuring out how wide a finder pattern is, when the finder pattern
* may be skewed or rotated.</p>
*/
private float sizeOfBlackWhiteBlackRun(int fromX, int fromY, int toX, int toY) {
private float sizeOfBlackWhiteBlackRun(int fromX, int fromY, int toX, int toY)
throws ReaderException {
// Mild variant of Bresenham's algorithm;
// see http://en.wikipedia.org/wiki/Bresenham's_line_algorithm
boolean steep = Math.abs(toY - fromY) > Math.abs(toX - fromX);

View file

@ -16,8 +16,8 @@
package com.google.zxing.qrcode.detector;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.DecodeHintType;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.ReaderException;
import com.google.zxing.ResultPoint;
import com.google.zxing.common.BitArray;
@ -42,7 +42,7 @@ public class FinderPatternFinder {
protected static final int MAX_MODULES = 57; // support up to version 10 for mobile clients
private static final int INTEGER_MATH_SHIFT = 8;
private final MonochromeBitmapSource image;
private final BinaryBitmap image;
private final Vector possibleCenters;
private boolean hasSkipped;
private final int[] crossCheckStateCount;
@ -52,13 +52,13 @@ public class FinderPatternFinder {
*
* @param image image to search
*/
public FinderPatternFinder(MonochromeBitmapSource image) {
public FinderPatternFinder(BinaryBitmap image) {
this.image = image;
this.possibleCenters = new Vector();
this.crossCheckStateCount = new int[5];
}
protected MonochromeBitmapSource getImage() {
protected BinaryBitmap getImage() {
return image;
}
@ -230,8 +230,9 @@ public class FinderPatternFinder {
* observed in any reading state, based on the results of the horizontal scan
* @return vertical center of finder pattern, or {@link Float#NaN} if not found
*/
private float crossCheckVertical(int startI, int centerJ, int maxCount, int originalStateCountTotal) {
MonochromeBitmapSource image = this.image;
private float crossCheckVertical(int startI, int centerJ, int maxCount,
int originalStateCountTotal) throws ReaderException {
BinaryBitmap image = this.image;
int maxI = image.getHeight();
int[] stateCount = getCrossCheckStateCount();
@ -287,7 +288,8 @@ public class FinderPatternFinder {
// If we found a finder-pattern-like section, but its size is more than 20% different than
// the original, assume it's a false positive
int stateCountTotal = stateCount[0] + stateCount[1] + stateCount[2] + stateCount[3] + stateCount[4];
int stateCountTotal = stateCount[0] + stateCount[1] + stateCount[2] + stateCount[3] +
stateCount[4];
if (5 * Math.abs(stateCountTotal - originalStateCountTotal) >= originalStateCountTotal) {
return Float.NaN;
}
@ -300,8 +302,9 @@ public class FinderPatternFinder {
* except it reads horizontally instead of vertically. This is used to cross-cross
* check a vertical cross check and locate the real center of the alignment pattern.</p>
*/
private float crossCheckHorizontal(int startJ, int centerI, int maxCount, int originalStateCountTotal) {
MonochromeBitmapSource image = this.image;
private float crossCheckHorizontal(int startJ, int centerI, int maxCount,
int originalStateCountTotal) throws ReaderException {
BinaryBitmap image = this.image;
int maxJ = image.getWidth();
int[] stateCount = getCrossCheckStateCount();
@ -354,7 +357,8 @@ public class FinderPatternFinder {
// If we found a finder-pattern-like section, but its size is significantly different than
// the original, assume it's a false positive
int stateCountTotal = stateCount[0] + stateCount[1] + stateCount[2] + stateCount[3] + stateCount[4];
int stateCountTotal = stateCount[0] + stateCount[1] + stateCount[2] + stateCount[3] +
stateCount[4];
if (5 * Math.abs(stateCountTotal - originalStateCountTotal) >= originalStateCountTotal) {
return Float.NaN;
}
@ -378,10 +382,9 @@ public class FinderPatternFinder {
* @param j end of possible finder pattern in row
* @return true if a finder pattern candidate was found this time
*/
protected boolean handlePossibleCenter(int[] stateCount,
int i,
int j) {
int stateCountTotal = stateCount[0] + stateCount[1] + stateCount[2] + stateCount[3] + stateCount[4];
protected boolean handlePossibleCenter(int[] stateCount, int i, int j) throws ReaderException {
int stateCountTotal = stateCount[0] + stateCount[1] + stateCount[2] + stateCount[3] +
stateCount[4];
float centerJ = centerFromEnd(stateCount, j);
float centerI = crossCheckVertical(i, (int) centerJ, stateCount[2], stateCountTotal);
if (!Float.isNaN(centerI)) {

View file

@ -17,12 +17,13 @@
package com.google.zxing.common;
import com.google.zxing.BarcodeFormat;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.DecodeHintType;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.LuminanceSource;
import com.google.zxing.Reader;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import com.google.zxing.client.j2se.BufferedImageMonochromeBitmapSource;
import com.google.zxing.client.j2se.BufferedImageLuminanceSource;
import junit.framework.TestCase;
@ -182,11 +183,12 @@ public abstract class AbstractBlackBoxTestCase extends TestCase {
for (int x = 0; x < testCount; x++) {
float rotation = testResults.get(x).getRotation();
BufferedImage rotatedImage = rotateImage(image, rotation);
MonochromeBitmapSource source = new BufferedImageMonochromeBitmapSource(rotatedImage);
if (decode(source, rotation, expectedText, false)) {
LuminanceSource source = new BufferedImageLuminanceSource(rotatedImage);
BinaryBitmap bitmap = new BinaryBitmap(new GlobalHistogramBinarizer(source));
if (decode(bitmap, rotation, expectedText, false)) {
passedCounts[x]++;
}
if (decode(source, rotation, expectedText, true)) {
if (decode(bitmap, rotation, expectedText, true)) {
tryHarderCounts[x]++;
}
}
@ -227,7 +229,7 @@ public abstract class AbstractBlackBoxTestCase extends TestCase {
return new SummaryResults(totalFound, totalMustPass, totalTests);
}
private boolean decode(MonochromeBitmapSource source, float rotation, String expectedText,
private boolean decode(BinaryBitmap source, float rotation, String expectedText,
boolean tryHarder) {
Result result;
String suffix = " (" + (tryHarder ? "try harder, " : "") + "rotation: " + rotation + ')';

View file

@ -16,18 +16,20 @@
package com.google.zxing.common;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.LuminanceSource;
import com.google.zxing.MultiFormatReader;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import com.google.zxing.client.j2se.BufferedImageMonochromeBitmapSource;
import com.google.zxing.client.j2se.BufferedImageLuminanceSource;
import javax.imageio.ImageIO;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
import java.util.List;
import javax.imageio.ImageIO;
/**
* This abstract class looks for negative results, i.e. it only allows a certain number of false
@ -106,10 +108,11 @@ public abstract class AbstractNegativeBlackBoxTestCase extends AbstractBlackBoxT
*/
private boolean checkForFalsePositives(BufferedImage image, float rotationInDegrees) {
BufferedImage rotatedImage = rotateImage(image, rotationInDegrees);
MonochromeBitmapSource source = new BufferedImageMonochromeBitmapSource(rotatedImage);
LuminanceSource source = new BufferedImageLuminanceSource(rotatedImage);
BinaryBitmap bitmap = new BinaryBitmap(new GlobalHistogramBinarizer(source));
Result result;
try {
result = getReader().decode(source);
result = getReader().decode(bitmap);
System.out.println("Found false positive: '" + result.getText() + "' with format '" +
result.getBarcodeFormat() + "' (rotation: " + rotationInDegrees + ')');
return false;
@ -118,13 +121,14 @@ public abstract class AbstractNegativeBlackBoxTestCase extends AbstractBlackBoxT
// Try "try harder" getMode
try {
result = getReader().decode(source, TRY_HARDER_HINT);
System.out.println("Try harder found false positive: '" + result.getText() + "' with format '" +
result.getBarcodeFormat() + "' (rotation: " + rotationInDegrees + ')');
result = getReader().decode(bitmap, TRY_HARDER_HINT);
System.out.println("Try harder found false positive: '" + result.getText() +
"' with format '" + result.getBarcodeFormat() + "' (rotation: " +
rotationInDegrees + ')');
return false;
} catch (ReaderException re) {
}
return true;
}
}
}

View file

@ -1,43 +0,0 @@
/*
* Copyright 2008 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing.common;
import com.google.zxing.ReaderException;
import junit.framework.TestCase;
/**
* @author Sean Owen
*/
public final class BlackPointEstimatorTestCase extends TestCase {
public void testBasic() throws ReaderException {
int[] histogram = { 0, 0, 11, 43, 37, 18, 3, 1, 0, 0, 13, 36, 24, 0, 11, 2 };
int point = BlackPointEstimator.findBestValley(histogram);
assertEquals(8, point);
}
public void testTooLittleRange() {
try {
int[] histogram = { 0, 0, 0, 0, 0, 0, 1, 43, 48, 18, 3, 1, 0, 0, 0, 0 };
BlackPointEstimator.findBestValley(histogram);
fail("Should have thrown an exception");
} catch (ReaderException re) {
// good
}
}
}

View file

@ -0,0 +1,93 @@
/*
* Copyright 2009 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing.client.j2me;
import com.google.zxing.LuminanceSource;
/**
* A LuminanceSource based on Java ME's Image class. It does not support cropping or rotation.
*
* @author dswitkin@google.com (Daniel Switkin)
* @author Sean Owen
*/
public final class LCDUIImageLuminanceSource extends LuminanceSource {
private final Image image;
private int[] rgbData;
public LCDUIImageLuminanceSource(Image image) {
super(image.getWidth(), image.getHeight());
this.image = image;
}
// Instead of multiplying by 306, 601, 117, we multiply by 256, 512, 256, so that
// the multiplies can be implemented as shifts.
//
// Really, it's:
//
// return ((((pixel >> 16) & 0xFF) << 8) +
// (((pixel >> 8) & 0xFF) << 9) +
// (( pixel & 0xFF) << 8)) >> 10;
//
// That is, we're replacing the coefficients in the original with powers of two,
// which can be implemented as shifts, even though changing the coefficients slightly
// alters the conversion. The difference is not significant for our purposes.
public byte[] getRow(int y, byte[] row) {
if (y < 0 || y >= getHeight()) {
throw new IllegalArgumentException("Requested row is outside the image: " + y);
}
int width = getWidth();
if (row == null || row.length < width) {
row = new byte[width];
}
if (rgbData == null || rgbData.length < width) {
rgbData = new int[width];
}
image.getRGB(rgbData, 0, width, 0, y, width, 1);
for (int x = 0; x < width; x++) {
int pixel = rgbData[x];
int luminance = (((pixel & 0x00FF0000) >> 16) +
((pixel & 0x0000FF00) >> 7) +
(pixel & 0x000000FF )) >> 2;
row[x] = (byte) luminance;
}
return row;
}
public byte[] getMatrix() {
int width = getWidth();
int height = getHeight();
int area = width * height;
byte[] matrix = new byte[area];
int[] rgb = new int[area];
image.getRGB(rgb, 0, width, 0, 0, width, height);
for (int y = 0; y < height; y++) {
int offset = y * width;
for (int x = 0; x < width; x++) {
int pixel = rgb[offset + x];
int luminance = (((pixel & 0x00FF0000) >> 16) +
((pixel & 0x0000FF00) >> 7) +
(pixel & 0x000000FF )) >> 2;
matrix[offset + x] = (byte) luminance;
}
}
return matrix;
}
}

View file

@ -1,93 +0,0 @@
/*
* Copyright 2007 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing.client.j2me;
import com.google.zxing.common.BaseMonochromeBitmapSource;
import javax.microedition.lcdui.Image;
/**
* <p>An implementation based on Java ME's {@link Image} representation.</p>
*
* @author Sean Owen
* @author Daniel Switkin (dswitkin@google.com)
*/
public final class LCDUIImageMonochromeBitmapSource extends BaseMonochromeBitmapSource {
private final Image image;
private final int[] pixelHolder;
public LCDUIImageMonochromeBitmapSource(Image image) {
super(image.getWidth(), image.getHeight());
this.image = image;
pixelHolder = new int[1];
}
// This is expensive and should be used very sparingly.
public int getLuminance(int x, int y) {
image.getRGB(pixelHolder, 0, getWidth(), x, y, 1, 1);
int pixel = pixelHolder[0];
// Instead of multiplying by 306, 601, 117, we multiply by 256, 512, 256, so that
// the multiplies can be implemented as shifts.
//
// Really, it's:
//
// return ((((pixel >> 16) & 0xFF) << 8) +
// (((pixel >> 8) & 0xFF) << 9) +
// (( pixel & 0xFF) << 8)) >> 10;
//
// That is, we're replacing the coefficients in the original with powers of two,
// which can be implemented as shifts, even though changing the coefficients slightly
// corrupts the conversion. Not significant for our purposes.
return (((pixel & 0x00FF0000) >> 16) +
((pixel & 0x0000FF00) >> 7) +
(pixel & 0x000000FF )) >> 2;
}
// For efficiency, the RGB data and the luminance data share the same array.
public int[] getLuminanceRow(int y, int[] row) {
int width = getWidth();
if (row == null || row.length < width) {
row = new int[width];
}
image.getRGB(row, 0, width, 0, y, width, 1);
for (int x = 0; x < width; x++) {
int pixel = row[x];
row[x] = (((pixel & 0x00FF0000) >> 16) +
((pixel & 0x0000FF00) >> 7) +
(pixel & 0x000000FF )) >> 2;
}
return row;
}
public int[] getLuminanceColumn(int x, int[] column) {
int height = getHeight();
if (column == null || column.length < height) {
column = new int[height];
}
image.getRGB(column, 0, 1, x, 0, 1, height);
for (int y = 0; y < height; y++) {
int pixel = column[y];
column[y] = (((pixel & 0x00FF0000) >> 16) +
((pixel & 0x0000FF00) >> 7) +
(pixel & 0x000000FF )) >> 2;
}
return column;
}
}

View file

@ -16,11 +16,13 @@
package com.google.zxing.client.j2me;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.LuminanceSource;
import com.google.zxing.MultiFormatReader;
import com.google.zxing.Reader;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import com.google.zxing.common.GlobalHistogramBinarizer;
import javax.microedition.lcdui.Image;
import javax.microedition.media.MediaException;
@ -76,9 +78,10 @@ final class SnapshotThread implements Runnable {
multimediaManager.setFocus(player);
byte[] snapshot = takeSnapshot();
Image capturedImage = Image.createImage(snapshot, 0, snapshot.length);
MonochromeBitmapSource source = new LCDUIImageMonochromeBitmapSource(capturedImage);
LuminanceSource source = new LCDUIImageLuminanceSource(capturedImage);
BinaryBitmap bitmap = new BinaryBitmap(new GlobalHistogramBinarizer(source));
Reader reader = new MultiFormatReader();
Result result = reader.decode(source);
Result result = reader.decode(bitmap);
zXingMIDlet.handleDecodedText(result);
} catch (ReaderException re) {
// Show a friendlier message on a mere failure to read the barcode

View file

@ -0,0 +1,138 @@
/*
* Copyright 2009 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing.client.j2se;
import com.google.zxing.LuminanceSource;
import java.awt.image.BufferedImage;
import java.awt.image.BufferedImageOp;
import java.awt.image.AffineTransformOp;
import java.awt.geom.AffineTransform;
/**
* This LuminanceSource implementation is meant for J2SE clients and our blackbox unit tests.
*
* @author dswitkin@google.com (Daniel Switkin)
* @author Sean Owen
*/
public final class BufferedImageLuminanceSource extends LuminanceSource {
private final BufferedImage image;
private final int left;
private final int top;
private int[] rgbData;
public BufferedImageLuminanceSource(BufferedImage image) {
this(image, 0, 0, image.getWidth(), image.getHeight());
}
public BufferedImageLuminanceSource(BufferedImage image, int left, int top, int width,
int height) {
super(width, height);
int sourceWidth = image.getWidth();
int sourceHeight = image.getHeight();
if (left + width > sourceWidth || top + height > sourceHeight) {
throw new IllegalArgumentException("Crop rectangle does not fit within image data.");
}
this.image = image;
this.left = left;
this.top = top;
}
// These methods use an integer calculation for luminance derived from:
// <code>Y = 0.299R + 0.587G + 0.114B</code>
public byte[] getRow(int y, byte[] row) {
if (y < 0 || y >= getHeight()) {
throw new IllegalArgumentException("Requested row is outside the image: " + y);
}
int width = getWidth();
if (row == null || row.length < width) {
row = new byte[width];
}
if (rgbData == null || rgbData.length < width) {
rgbData = new int[width];
}
image.getRGB(left, top + y, width, 1, rgbData, 0, image.getWidth());
for (int x = 0; x < width; x++) {
int pixel = rgbData[x];
int luminance = (306 * ((pixel >> 16) & 0xFF) +
601 * ((pixel >> 8) & 0xFF) +
117 * (pixel & 0xFF)) >> 10;
row[x] = (byte) luminance;
}
return row;
}
public byte[] getMatrix() {
int width = getWidth();
int height = getHeight();
int area = width * height;
byte[] matrix = new byte[area];
int[] rgb = new int[area];
image.getRGB(left, top, width, height, rgb, 0, image.getWidth());
for (int y = 0; y < height; y++) {
int offset = y * width;
for (int x = 0; x < width; x++) {
int pixel = rgb[offset + x];
int luminance = (306 * ((pixel >> 16) & 0xFF) +
601 * ((pixel >> 8) & 0xFF) +
117 * (pixel & 0xFF)) >> 10;
matrix[offset + x] = (byte) luminance;
}
}
return matrix;
}
public boolean isCropSupported() {
return true;
}
public LuminanceSource crop(int left, int top, int width, int height) {
return new BufferedImageLuminanceSource(image, left, top, width, height);
}
// Can't run AffineTransforms on images of unknown format.
public boolean isRotateSupported() {
return image.getType() != BufferedImage.TYPE_CUSTOM;
}
public LuminanceSource rotateCounterClockwise() {
if (!isRotateSupported()) {
throw new IllegalStateException("Rotate not supported");
}
int sourceWidth = image.getWidth();
int sourceHeight = image.getHeight();
// Rotate 90 degrees counterclockwise.
AffineTransform transform = new AffineTransform(0.0, -1.0, 1.0, 0.0, 0.0, sourceWidth);
BufferedImageOp op = new AffineTransformOp(transform, AffineTransformOp.TYPE_NEAREST_NEIGHBOR);
// Note width/height are flipped since we are rotating 90 degrees.
BufferedImage rotatedImage = new BufferedImage(sourceHeight, sourceWidth, image.getType());
op.filter(image, rotatedImage);
// Maintain the cropped region, but rotate it too.
int width = getWidth();
return new BufferedImageLuminanceSource(rotatedImage, top, sourceWidth - (left + width),
getHeight(), width);
}
}

View file

@ -1,166 +0,0 @@
/*
* Copyright 2008 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing.client.j2se;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.common.BaseMonochromeBitmapSource;
import java.awt.geom.AffineTransform;
import java.awt.image.AffineTransformOp;
import java.awt.image.BufferedImage;
import java.awt.image.BufferedImageOp;
/**
* <p>An implementation based upon {@link BufferedImage}. This provides access to the
* underlying image as if it were a monochrome image. Behind the scenes, it is evaluating
* the luminance of the underlying image by retrieving its pixels' RGB values.</p>
*
* <p>This may also be used to construct a {@link MonochromeBitmapSource}
* based on a region of a {@link BufferedImage}; see
* {@link #BufferedImageMonochromeBitmapSource(BufferedImage, int, int, int, int)}.</p>
*
* @author Sean Owen
* @author Daniel Switkin (dswitkin@google.com)
*/
public final class BufferedImageMonochromeBitmapSource extends BaseMonochromeBitmapSource {
private final BufferedImage image;
private final int left;
private final int top;
/**
* Creates an instance that uses the entire given image as a source of pixels to decode.
*
* @param image image to decode
*/
public BufferedImageMonochromeBitmapSource(BufferedImage image) {
this(image, 0, 0, image.getWidth(), image.getHeight());
}
/**
* Creates an instance that uses only a region of the given image as a source of pixels to decode.
*
* @param image image to decode a region of
* @param left x coordinate of leftmost pixels to decode
* @param top y coordinate of topmost pixels to decode
* @param right one more than the x coordinate of rightmost pixels to decode, i.e. we will decode
* pixels whose x coordinate is in [left,right)
* @param bottom likewise, one more than the y coordinate of the bottommost pixels to decode
*/
public BufferedImageMonochromeBitmapSource(BufferedImage image, int left, int top, int right,
int bottom) {
super(right - left, bottom - top);
this.image = image;
int sourceHeight = image.getHeight();
int sourceWidth = image.getWidth();
if (left < 0 || top < 0 || right > sourceWidth || bottom > sourceHeight || right <= left ||
bottom <= top) {
throw new IllegalArgumentException("Invalid bounds: (" + top + ',' + left + ") (" + right +
',' + bottom + ')');
}
this.left = left;
this.top = top;
}
/**
* @return underlying {@link BufferedImage} behind this instance. Note that even if this instance
* only uses a subset of the full image, the returned value here represents the entire backing
* image.
*/
public BufferedImage getImage() {
return image;
}
@Override
public MonochromeBitmapSource rotateCounterClockwise() {
if (!isRotateSupported()) {
throw new IllegalStateException("Rotate not supported");
}
int sourceWidth = image.getWidth();
int sourceHeight = image.getHeight();
// 90 degrees counterclockwise:
AffineTransform transform = new AffineTransform(0.0, -1.0, 1.0, 0.0, 0.0, sourceWidth);
BufferedImageOp op = new AffineTransformOp(transform, AffineTransformOp.TYPE_NEAREST_NEIGHBOR);
// Note width/height are flipped since we are rotating 90 degrees:
BufferedImage rotatedImage = new BufferedImage(sourceHeight, sourceWidth, image.getType());
op.filter(image, rotatedImage);
return new BufferedImageMonochromeBitmapSource(rotatedImage,
top,
sourceWidth - (left + getWidth()),
top + getHeight(),
sourceWidth - left);
}
@Override
public boolean isRotateSupported() {
// Can't run AffineTransforms on images of unknown format
return image.getType() != BufferedImage.TYPE_CUSTOM;
}
/**
* Extracts luminance from a pixel from this source. By default, the source is assumed to use RGB,
* so this implementation computes luminance is a function of a red, green and blue components as
* follows:
*
* <code>Y = 0.299R + 0.587G + 0.114B</code>
*
* where R, G, and B are values in [0,1].
*/
@Override
public int getLuminance(int x, int y) {
int pixel = image.getRGB(left + x, top + y);
// Coefficients add up to 1024 to make the divide into a fast shift
return (306 * ((pixel >> 16) & 0xFF) +
601 * ((pixel >> 8) & 0xFF) +
117 * (pixel & 0xFF)) >> 10;
}
@Override
public int[] getLuminanceRow(int y, int[] row) {
int width = getWidth();
if (row == null || row.length < width) {
row = new int[width];
}
image.getRGB(left, top + y, width, 1, row, 0, width);
for (int x = 0; x < width; x++) {
int pixel = row[x];
row[x] = (306 * ((pixel >> 16) & 0xFF) +
601 * ((pixel >> 8) & 0xFF) +
117 * (pixel & 0xFF)) >> 10;
}
return row;
}
@Override
public int[] getLuminanceColumn(int x, int[] column) {
int height = getHeight();
if (column == null || column.length < height) {
column = new int[height];
}
image.getRGB(left + x, top, 1, height, column, 0, 1);
for (int y = 0; y < height; y++) {
int pixel = column[y];
column[y] = (306 * ((pixel >> 16) & 0xFF) +
601 * ((pixel >> 8) & 0xFF) +
117 * (pixel & 0xFF)) >> 10;
}
return column;
}
}

View file

@ -16,15 +16,17 @@
package com.google.zxing.client.j2se;
import com.google.zxing.BlackPointEstimationMethod;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.DecodeHintType;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.LuminanceSource;
import com.google.zxing.MultiFormatReader;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import com.google.zxing.client.result.ParsedResult;
import com.google.zxing.client.result.ResultParser;
import com.google.zxing.common.BitArray;
import com.google.zxing.common.BitMatrix;
import com.google.zxing.common.GlobalHistogramBinarizer;
import java.awt.image.BufferedImage;
import java.io.File;
@ -165,11 +167,12 @@ public final class CommandLineRunner {
return null;
}
try {
MonochromeBitmapSource source = new BufferedImageMonochromeBitmapSource(image);
LuminanceSource source = new BufferedImageLuminanceSource(image);
BinaryBitmap bitmap = new BinaryBitmap(new GlobalHistogramBinarizer(source));
if (dumpBlackPoint) {
dumpBlackPoint(uri, image, source);
dumpBlackPoint(uri, image, bitmap);
}
Result result = new MultiFormatReader().decode(source, hints);
Result result = new MultiFormatReader().decode(bitmap, hints);
ParsedResult parsedResult = ResultParser.parseResult(result);
System.out.println(uri.toString() + " (format: " + result.getBarcodeFormat() +
", type: " + parsedResult.getType() + "):\nRaw result:\n" + result.getText() +
@ -184,14 +187,15 @@ public final class CommandLineRunner {
// Writes out a single PNG which is three times the width of the input image, containing from left
// to right: the original image, the row sampling monochrome version, and the 2D sampling
// monochrome version.
private static void dumpBlackPoint(URI uri, BufferedImage image, MonochromeBitmapSource source) {
// TODO: Update to compare different Binarizer implementations.
private static void dumpBlackPoint(URI uri, BufferedImage image, BinaryBitmap bitmap) {
String inputName = uri.getPath();
if (inputName.contains(".mono.png")) {
return;
}
int width = source.getWidth();
int height = source.getHeight();
int width = bitmap.getWidth();
int height = bitmap.getHeight();
int stride = width * 3;
int[] pixels = new int[stride * height];
@ -207,16 +211,16 @@ public final class CommandLineRunner {
BitArray row = new BitArray(width);
for (int y = 0; y < height; y++) {
try {
source.estimateBlackPoint(BlackPointEstimationMethod.ROW_SAMPLING, y);
row = bitmap.getBlackRow(y, row);
} catch (ReaderException e) {
// If the row histogram failed, draw a red line and keep going
// If fetching the row failed, draw a red line and keep going.
int offset = y * stride + width;
for (int x = 0; x < width; x++) {
pixels[offset + x] = 0xffff0000;
}
continue;
}
row = source.getBlackRow(y, row, 0, width);
int offset = y * stride + width;
for (int x = 0; x < width; x++) {
if (row.get(x)) {
@ -229,12 +233,11 @@ public final class CommandLineRunner {
// 2D sampling
try {
source.estimateBlackPoint(BlackPointEstimationMethod.TWO_D_SAMPLING, 0);
for (int y = 0; y < height; y++) {
row = source.getBlackRow(y, row, 0, width);
BitMatrix matrix = bitmap.getBlackMatrix();
int offset = y * stride + width * 2;
for (int x = 0; x < width; x++) {
if (row.get(x)) {
if (matrix.get(x, y)) {
pixels[offset + x] = 0xff000000;
} else {
pixels[offset + x] = 0xffffffff;

View file

@ -16,27 +16,22 @@
package com.google.zxing.client.j2se;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.LuminanceSource;
import com.google.zxing.MultiFormatReader;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import com.google.zxing.common.GlobalHistogramBinarizer;
import javax.imageio.ImageIO;
import javax.swing.Icon;
import javax.swing.ImageIcon;
import javax.swing.JFileChooser;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JTextArea;
import java.awt.Container;
import java.awt.Dimension;
import java.awt.FlowLayout;
import java.awt.*;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;
import java.net.MalformedURLException;
import javax.imageio.ImageIO;
import javax.swing.*;
/**
* <p>Simple GUI frontend to the library. Right now, only decodes a local file.
* This definitely needs some improvement. Just throwing something down to start.</p>
@ -91,14 +86,15 @@ public final class GUIRunner extends JFrame {
if (image == null) {
return "Could not decode image";
}
MonochromeBitmapSource source = new BufferedImageMonochromeBitmapSource(image);
LuminanceSource source = new BufferedImageLuminanceSource(image);
BinaryBitmap bitmap = new BinaryBitmap(new GlobalHistogramBinarizer(source));
Result result;
try {
result = new MultiFormatReader().decode(source);
result = new MultiFormatReader().decode(bitmap);
} catch (ReaderException re) {
return re.toString();
}
return result.getText();
}
}
}

View file

@ -16,10 +16,12 @@
package com.google.zxing.client.j2se;
import com.google.zxing.BlackPointEstimationMethod;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.LuminanceSource;
import com.google.zxing.ReaderException;
import com.google.zxing.common.BitArray;
import com.google.zxing.common.BitMatrix;
import com.google.zxing.common.GlobalHistogramBinarizer;
import java.awt.image.BufferedImage;
import java.io.File;
@ -29,10 +31,13 @@ import java.net.URI;
import javax.imageio.ImageIO;
/**
* Utility application for evaluating the effectiveness of the BlackPointEstimator used by
* MonochromeBitmapSource. Given a set of images on the command line, it converts each to a
* black-and-white PNG. The result is placed in a file based on the input name, with either
* "_converted_row" or "_converted_2d" appended.
* A utility application for evaluating the effectiveness of various thresholding algorithms.
* Given a set of images on the command line, it converts each to a black-and-white PNG.
* The result is placed in a file based on the input name, with either ".row.png" or ".2d.png"
* appended.
*
* TODO: Needs to be updated to accept different Binarizer implementations.
* TODO: Consider whether to keep this separate app, as CommandLineRunner has similar functionality.
*
* @author alasdair@google.com (Alasdair Mackintosh)
* @author dswitkin@google.com (Daniel Switkin)
@ -43,7 +48,7 @@ public final class ImageConverter {
private static final int WHITE = 0xFFFFFFFF;
private static final int BLACK = 0xFF000000;
private static final int RED = 0xFFFF0000;
private static BlackPointEstimationMethod sMethod = BlackPointEstimationMethod.ROW_SAMPLING;
private static boolean rowSampling = false;
private ImageConverter() {
}
@ -51,9 +56,9 @@ public final class ImageConverter {
public static void main(String[] args) throws Exception {
for (String arg : args) {
if (arg.equals("-row")) {
sMethod = BlackPointEstimationMethod.ROW_SAMPLING;
rowSampling = true;
} else if (arg.equals("-2d")) {
sMethod = BlackPointEstimationMethod.TWO_D_SAMPLING;
rowSampling = false;
} else if (arg.startsWith("-")) {
System.out.println("Ignoring unrecognized option: " + arg);
}
@ -90,28 +95,18 @@ public final class ImageConverter {
private static void convertImage(URI uri) throws IOException {
BufferedImage image = ImageIO.read(uri.toURL());
MonochromeBitmapSource src = new BufferedImageMonochromeBitmapSource(image);
int width = src.getWidth();
int height = src.getHeight();
LuminanceSource source = new BufferedImageLuminanceSource(image);
BinaryBitmap bitmap = new BinaryBitmap(new GlobalHistogramBinarizer(source));
int width = bitmap.getWidth();
int height = bitmap.getHeight();
BufferedImage result = new BufferedImage(width, height, BufferedImage.TYPE_INT_ARGB);
BitArray array = new BitArray(width);
try {
// Run the 2D sampling once up front
if (sMethod == BlackPointEstimationMethod.TWO_D_SAMPLING) {
src.estimateBlackPoint(sMethod, 0);
}
} catch (ReaderException e) {
System.out.println(e.toString());
return;
}
if (rowSampling) {
for (int y = 0; y < height; y++) {
// Run the 1D sampling once per row
if (sMethod == BlackPointEstimationMethod.ROW_SAMPLING) {
try {
src.estimateBlackPoint(sMethod, y);
array = bitmap.getBlackRow(y, array);
} catch (ReaderException e) {
// Draw rows with insufficient dynamic range in red
for (int x = 0; x < width; x++) {
@ -119,15 +114,23 @@ public final class ImageConverter {
}
continue;
}
}
// Fetch the entire row at once, then fill out the result image
src.getBlackRow(y, array, 0, width);
for (int x = 0; x < width; x++) {
result.setRGB(x, y, array.get(x) ? BLACK : WHITE);
}
}
} else {
try {
BitMatrix matrix = bitmap.getBlackMatrix();
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
result.setRGB(x, y, matrix.get(x, y) ? BLACK : WHITE);
}
}
} catch (ReaderException e) {
}
}
File output = getOutput(uri);
System.out.printf("Writing output to %s\n", output);
@ -168,7 +171,7 @@ public final class ImageConverter {
if (dotpos != -1) {
name = name.substring(0, dotpos);
}
String suffix = (sMethod == BlackPointEstimationMethod.ROW_SAMPLING) ? "row" : "2d";
String suffix = rowSampling ? "row" : "2d";
result = new File(name + '.' + suffix + '.' + FORMAT);
}
return result;

View file

@ -16,13 +16,15 @@
package com.google.zxing.client.rim;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.DecodeHintType;
import com.google.zxing.MonochromeBitmapSource;
import com.google.zxing.LuminanceSource;
import com.google.zxing.MultiFormatReader;
import com.google.zxing.Reader;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import com.google.zxing.client.j2me.LCDUIImageMonochromeBitmapSource;
import com.google.zxing.common.GlobalHistogramBinarizer;
import com.google.zxing.client.j2me.LCDUIImageLuminanceSource;
import com.google.zxing.client.rim.persistence.AppSettings;
import com.google.zxing.client.rim.persistence.history.DecodeHistory;
import com.google.zxing.client.rim.persistence.history.DecodeHistoryItem;
@ -238,13 +240,14 @@ final class ZXingLMMainScreen extends MainScreen {
if (capturedImage != null) {
Log.info("Got image...");
MonochromeBitmapSource source = new LCDUIImageMonochromeBitmapSource(capturedImage);
LuminanceSource source = new LCDUIImageLuminanceSource(capturedImage);
BinaryBitmap bitmap = new BinaryBitmap(new GlobalHistogramBinarizer(source));
Result result;
ReasonableTimer decodingTimer = null;
try {
decodingTimer = new ReasonableTimer();
Log.info("Attempting to decode image...");
result = reader.decode(source, readerHints);
result = reader.decode(bitmap, readerHints);
decodingTimer.finished();
} catch (ReaderException e) {
Log.error("Could not decode image: " + e);

View file

@ -16,11 +16,22 @@
package com.google.zxing.web;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.LuminanceSource;
import com.google.zxing.MultiFormatReader;
import com.google.zxing.Reader;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import com.google.zxing.client.j2se.BufferedImageMonochromeBitmapSource;
import com.google.zxing.client.j2se.BufferedImageLuminanceSource;
import com.google.zxing.common.GlobalHistogramBinarizer;
import java.awt.image.BufferedImage;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.Properties;
import java.util.TimerTask;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.imageio.ImageIO;
import javax.mail.Address;
@ -29,21 +40,14 @@ import javax.mail.Flags;
import javax.mail.Folder;
import javax.mail.Message;
import javax.mail.MessagingException;
import javax.mail.Service;
import javax.mail.Session;
import javax.mail.Store;
import javax.mail.Transport;
import javax.mail.Service;
import javax.mail.internet.InternetAddress;
import javax.mail.internet.MimeBodyPart;
import javax.mail.internet.MimeMessage;
import javax.mail.internet.MimeMultipart;
import java.awt.image.BufferedImage;
import java.io.UnsupportedEncodingException;
import java.io.IOException;
import java.util.Properties;
import java.util.TimerTask;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* A {@link TimerTask} which repeatedly checks an e-mail account for messages with an attached
@ -119,7 +123,8 @@ final class DecodeEmailTask extends TimerTask {
}
}
private void processMessage(Session session, Message message) throws MessagingException, IOException {
private void processMessage(Session session, Message message) throws MessagingException,
IOException {
Object content = message.getContent();
if (content instanceof MimeMultipart) {
MimeMultipart mimeContent = (MimeMultipart) content;
@ -141,7 +146,9 @@ final class DecodeEmailTask extends TimerTask {
Reader reader = new MultiFormatReader();
Result result = null;
try {
result = reader.decode(new BufferedImageMonochromeBitmapSource(image), DecodeServlet.HINTS);
LuminanceSource source = new BufferedImageLuminanceSource(image);
BinaryBitmap bitmap = new BinaryBitmap(new GlobalHistogramBinarizer(source));
result = reader.decode(bitmap, DecodeServlet.HINTS);
} catch (ReaderException re) {
log.info("Decoding FAILED");
}

View file

@ -16,15 +16,19 @@
package com.google.zxing.web;
import com.google.zxing.BarcodeFormat;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.DecodeHintType;
import com.google.zxing.LuminanceSource;
import com.google.zxing.MultiFormatReader;
import com.google.zxing.Reader;
import com.google.zxing.ReaderException;
import com.google.zxing.Result;
import com.google.zxing.BarcodeFormat;
import com.google.zxing.client.j2se.BufferedImageMonochromeBitmapSource;
import com.google.zxing.client.j2se.BufferedImageLuminanceSource;
import com.google.zxing.client.result.ParsedResult;
import com.google.zxing.client.result.ResultParser;
import com.google.zxing.common.GlobalHistogramBinarizer;
import org.apache.commons.fileupload.FileItem;
import org.apache.commons.fileupload.FileUploadException;
import org.apache.commons.fileupload.disk.DiskFileItemFactory;
@ -36,8 +40,8 @@ import org.apache.http.HttpMessage;
import org.apache.http.HttpResponse;
import org.apache.http.HttpVersion;
import org.apache.http.client.HttpClient;
import org.apache.http.client.params.HttpClientParams;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.params.HttpClientParams;
import org.apache.http.conn.scheme.PlainSocketFactory;
import org.apache.http.conn.scheme.Scheme;
import org.apache.http.conn.scheme.SchemeRegistry;
@ -48,12 +52,6 @@ import org.apache.http.params.BasicHttpParams;
import org.apache.http.params.HttpParams;
import org.apache.http.params.HttpProtocolParams;
import javax.imageio.ImageIO;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.awt.image.BufferedImage;
import java.io.IOException;
import java.io.InputStream;
@ -68,6 +66,13 @@ import java.util.List;
import java.util.Vector;
import java.util.logging.Logger;
import javax.imageio.ImageIO;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* {@link HttpServlet} which decodes images containing barcodes. Given a URL, it will
* retrieve the image and decode it. It can also process image files uploaded via POST.
@ -122,8 +127,8 @@ public final class DecodeServlet extends HttpServlet {
}
@Override
protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
protected void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
String imageURIString = request.getParameter("u");
if (imageURIString == null || imageURIString.length() == 0) {
response.sendRedirect("badurl.jspx");
@ -213,8 +218,8 @@ public final class DecodeServlet extends HttpServlet {
}
private static void processStream(InputStream is, HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
private static void processStream(InputStream is, HttpServletRequest request,
HttpServletResponse response) throws ServletException, IOException {
BufferedImage image = ImageIO.read(is);
if (image == null) {
response.sendRedirect("badimage.jspx");
@ -224,7 +229,9 @@ public final class DecodeServlet extends HttpServlet {
Reader reader = new MultiFormatReader();
Result result;
try {
result = reader.decode(new BufferedImageMonochromeBitmapSource(image), HINTS);
LuminanceSource source = new BufferedImageLuminanceSource(image);
BinaryBitmap bitmap = new BinaryBitmap(new GlobalHistogramBinarizer(source));
result = reader.decode(bitmap, HINTS);
} catch (ReaderException re) {
log.info("DECODE FAILED: " + re.toString());
response.sendRedirect("notfound.jspx");