Related
I'm developing an android app to recognize text in particular plate, as in photo here:
I have to recognize the texts in white (e.g. near to "Mod."). I'm using Google ML Kit's text recognition APIs, but it fails. So, I'm using OpenCV to edit image but I don't know how to emphasize the (white) texts so OCR recognize it. I tried more stuff, like contrast, brightness, gamma correction, adaptive thresholding, but the cases vary a lot depending on how the photo is taken. Do you have any ideas?
Thank u very much.
I coded this example in Python (since OpenCV's SIFT in Android is paid) but you can still use this to understand how to solve it.
First I created this image as a template:
Step 1: Load images
""" 1. Load images """
# load image of plate
src_path = "nRHzD.jpg"
src = cv2.imread(src_path)
# load template of plate (to be looked for)
src_template_path = "nRHzD_template.jpg"
src_template = cv2.imread(src_template_path)
Step 2: Find the template using SIFT and perspective transformation
# convert images to gray scale
src_gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
src_template_gray = cv2.cvtColor(src_template, cv2.COLOR_BGR2GRAY)
# use SIFT to find template
n_matches_min = 10
template_found, homography = find_template(src_gray, src_template_gray, n_matches_min)
warp = transform_perspective_and_crop(homography, src, src_gray, src_template)
warp_gray = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)
warp_hsv = cv2.cvtColor(warp, cv2.COLOR_BGR2HSV)
template_hsv = cv2.cvtColor(src_template, cv2.COLOR_BGR2HSV)
Step 3: Find regions of interest (using the green parts of the template image)
green_hsv_lower_bound = [50, 250, 250]
green_hsv_upper_bound = [60, 255, 255]
mask_rois, mask_rois_img = crop_img_in_hsv_range(warp, template_hsv, green_hsv_lower_bound, green_hsv_upper_bound)
roi_list = separate_rois(mask_rois, warp_gray)
# sort the rois by distance to top right corner -> x (value[1]) + y (value[2])
roi_list = sorted(roi_list, key=lambda values: values[1]+values[2])
Step 4: Apply a Canny Edge detection to the rois (regions of interest)
for i, roi in enumerate(roi_list):
roi_img, roi_x_offset, roi_y_offset = roi
print("#roi:{} x:{} y:{}".format(i, roi_x_offset, roi_y_offset))
roi_img_blur_threshold = cv2.Canny(roi_img, 40, 200)
cv2.imshow("ROI image", roi_img_blur_threshold)
cv2.waitKey()
There are many ways for you to detect the digits, one of the easiest approaches is to run a K-Means Clustering on each of the contours.
Full code:
""" This code shows a way of getting the digit's edges in a pre-defined position (in green) """
import cv2
import numpy as np
def find_template(src_gray, src_template_gray, n_matches_min):
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
""" find grid using SIFT """
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(src_template_gray, None)
kp2, des2 = sift.detectAndCompute(src_gray, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good) > n_matches_min:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h_template, w_template = src_template_gray.shape
pts = np.float32([[0, 0], [0, h_template - 1], [w_template - 1, h_template - 1], [w_template - 1,0]]).reshape(-1,1,2)
homography = cv2.perspectiveTransform(pts, M)
else:
print "Not enough matches are found - %d/%d" % (len(good), n_matches_min)
matchesMask = None
# show matches
draw_params = dict(matchColor = (0, 255, 0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
if matchesMask:
src_gray_copy = src_gray.copy()
sift_matches = cv2.polylines(src_gray_copy, [np.int32(homography)], True, 255, 2, cv2.LINE_AA)
sift_matches = cv2.drawMatches(src_template_gray, kp1, src_gray_copy, kp2, good, None, **draw_params)
return sift_matches, homography
def transform_perspective_and_crop(homography, src, src_gray, src_template_gray):
""" get mask and contour of template """
mask_img_template = np.zeros(src_gray.shape, dtype=np.uint8)
mask_img_template = cv2.polylines(mask_img_template, [np.int32(homography)], True, 255, 1, cv2.LINE_AA)
_ret, contours, hierarchy = cv2.findContours(mask_img_template, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
template_contour = None
# approximate the contour
c = contours[0]
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if our approximated contour has four points, then
# we can assume that we have found our template
warp = None
if len(approx) == 4:
template_contour = approx
cv2.drawContours(mask_img_template, [template_contour] , -1, (255,0,0), -1)
""" Transform perspective """
# now that we have our template contour, we need to determine
# the top-left, top-right, bottom-right, and bottom-left
# points so that we can later warp the image -- we'll start
# by reshaping our contour to be our finals and initializing
# our output rectangle in top-left, top-right, bottom-right,
# and bottom-left order
pts = template_contour.reshape(4, 2)
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point has the smallest sum whereas the
# bottom-right has the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# compute the difference between the points -- the top-right
# will have the minumum difference and the bottom-left will
# have the maximum difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# now that we have our rectangle of points, let's compute
# the width of our new image
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
# ...and now for the height of our new image
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
# take the maximum of the width and height values to reach
# our final dimensions
maxWidth = max(int(widthA), int(widthB))
maxHeight = max(int(heightA), int(heightB))
# construct our destination points which will be used to
# map the screen to a top-down, "birds eye" view
homography = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# calculate the perspective transform matrix and warp
# the perspective to grab the screen
M = cv2.getPerspectiveTransform(rect, homography)
warp = cv2.warpPerspective(src, M, (maxWidth, maxHeight))
# resize warp
h_template, w_template, _n_channels = src_template_gray.shape
warp = cv2.resize(warp, (w_template, h_template), interpolation=cv2.INTER_AREA)
return warp
def crop_img_in_hsv_range(img, hsv, lower_bound, upper_bound):
mask = cv2.inRange(hsv, np.array(lower_bound), np.array(upper_bound))
# do an MORPH_OPEN (erosion followed by dilation) to remove isolated pixels
kernel = np.ones((5,5), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(img, img, mask=mask)
return mask, res
def separate_rois(column_mask, img_gray):
# go through each of the boxes
# https://stackoverflow.com/questions/41592039/contouring-a-binary-mask-with-opencv-python
border = cv2.copyMakeBorder(column_mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)
_, contours, hierarchy = cv2.findContours(border, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE, offset=(-1, -1))
cell_list = []
for contour in contours:
cell_mask = np.zeros_like(img_gray) # Create mask where white is what we want, black otherwise
cv2.drawContours(cell_mask, [contour], -1, 255, -1) # Draw filled contour in mask
# turn that mask into a rectangle
(x,y,w,h) = cv2.boundingRect(contour)
#print("x:{} y:{} w:{} h:{}".format(x, y, w, h))
cv2.rectangle(cell_mask, (x, y), (x+w, y+h), 255, -1)
# copy the img_gray using that mask
img_tmp_region = cv2.bitwise_and(img_gray, img_gray, mask= cell_mask)
# Now crop
(y, x) = np.where(cell_mask == 255)
(top_y, top_x) = (np.min(y), np.min(x))
(bottom_y, bottom_x) = (np.max(y), np.max(x))
img_tmp_region = img_tmp_region[top_y:bottom_y+1, top_x:bottom_x+1]
cell_list.append([img_tmp_region, top_x, top_y])
return cell_list
""" 1. Load images """
# load image of plate
src_path = "nRHzD.jpg"
src = cv2.imread(src_path)
# load template of plate (to be looked for)
src_template_path = "nRHzD_template.jpg"
src_template = cv2.imread(src_template_path)
""" 2. Find the plate (using the template image) and crop it into a rectangle """
# convert images to gray scale
src_gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
src_template_gray = cv2.cvtColor(src_template, cv2.COLOR_BGR2GRAY)
# use SIFT to find template
n_matches_min = 10
template_found, homography = find_template(src_gray, src_template_gray, n_matches_min)
warp = transform_perspective_and_crop(homography, src, src_gray, src_template)
warp_gray = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)
warp_hsv = cv2.cvtColor(warp, cv2.COLOR_BGR2HSV)
template_hsv = cv2.cvtColor(src_template, cv2.COLOR_BGR2HSV)
""" 3. Find regions of interest (using the green parts of the template image) """
green_hsv_lower_bound = [50, 250, 250]
green_hsv_upper_bound = [60, 255, 255]
mask_rois, mask_rois_img = crop_img_in_hsv_range(warp, template_hsv, green_hsv_lower_bound, green_hsv_upper_bound)
roi_list = separate_rois(mask_rois, warp_gray)
# sort the rois by distance to top right corner -> x (value[1]) + y (value[2])
roi_list = sorted(roi_list, key=lambda values: values[1]+values[2])
""" 4. Apply a Canny Edge detection to the rois (regions of interest) """
for i, roi in enumerate(roi_list):
roi_img, roi_x_offset, roi_y_offset = roi
print("#roi:{} x:{} y:{}".format(i, roi_x_offset, roi_y_offset))
roi_img_blur_threshold = cv2.Canny(roi_img, 40, 200)
cv2.imshow("ROI image", roi_img_blur_threshold)
cv2.waitKey()
i want to get a list which contains entire OrderInfo Object, for example, if i do this, it is result which the result i want to.
def find_all(self):
result_list = []
orderDao = DaoUtil.DaoGeneric()
session = orderDao.getSession()
try:
for row in session.query(OrderInfo).all():
result_list.append({
'id':row.id,
'name': row.name,
'age': row.age,
'create_time': row.create_time.strftime("%Y-%m-%d %H:%M:%S"),
'update_time': row.update_time.strftime("%Y-%m-%d %H:%M:%S"),
'version': row.version
})
session.commit()
except Exception, e:
print e
session.rollback()
return result_list
but i want to a list which contains OrderInfo object from the query, because the result which the query return have other columns (the simple list of all DeclarativeBase's instances.) except OrderInfo{id,name,age,create_time,update_time,version}, the query do not return OrderInfo object directly. the following which i want to:
def find_all(self):
result_list = []
orderDao = DaoUtil.DaoGeneric()
session = orderDao.getSession()
try:
for row in session.query(OrderInfo).all():
result_list.append(row.orderInfo) // if the row has a property for orderInfo Object, because the result which java can achieve , the example for java is : List<OrderInfo> orderList = session.query(); please help to achieve it
session.commit()
except Exception, e:
print e
session.rollback()
return result_list
beacause i use sqlalchemy in python just now, i am not very sure. How to get a list which contains OrderInfo Object from query in sqlalchemy
#univerio
for row in session.query(OrderInfo).all():
the line of variable 's row which includes the following column:
_decl_class_registry,
_sa_class_manager,
_sa_instance_state,
metadata,
query,
id,
name,
age,
create_time,
update_time ,
version
only this (OrderInfo{id, name, age, create_time,update_time,version}) is what i want to get, other columns which i do not want to get.
OrderInfo:
from sqlalchemy import Column, Integer, String, Date, DateTime
from sqlalchemy.ext.declarative import declarative_base
Base=declarative_base()
class OrderInfo(Base):
__tablename__ = 'order_info'
# __table__ = 'order_info'
id = Column(Integer, primary_key=True)
name = Column(String(100))
age = Column(Integer)
create_time = Column(Date)
update_time = Column(Date)
version = Column(Integer)
#univerio, i write here because the comments has limit of words, it achieve this result, like this:
for row in session.query(OrderInfo).all():
result_list.append({
'id':row.id,
'name': row.name,
'age': row.age,
'create_time': row.create_time.strftime("%Y-%m-%d %H:%M:%S"),
'update_time': row.update_time.strftime("%Y-%m-%d %H:%M:%S"),
'version': row.version
})
the OrderInfo has very few columns, if the OrderInfo object has more than serveral hundred columns, but it will take a long time, so i want find the result which can simple achieve this function that is similar to List in Java
#univerio,
i find the answer which i want to get:
def find_all(self):
result_list = []
orderDao = DaoUtil.DaoGeneric()
session = orderDao.getSession()
try:
for row in session.query(OrderInfo).all():
result_list.append(DictUtil.object_as_dict(row))
session.commit()
except Exception, e:
print e
session.rollback()
return result_list
def object_as_dict(obj):
result = {instance.key: getattr(obj, instance.key) for instance in inspect(obj).mapper.column_attrs}
print result
return result
output :
[
{'updateTime': datetime.datetime(2017, 6, 15, 13, 56, 16), 'bankName': u'ICBC', 'bankNo': u'6228480666622220011', 'createTime': datetime.datetime(2017, 6, 15, 13, 56, 16), u'version': 0, u'id': 1},
{'updateTime': datetime.datetime(2017, 6, 15, 13, 57, 40), 'bankName': u'ICBC', 'bankNo': u'6228480666622220011', 'createTime': datetime.datetime(2017, 6, 15, 13, 57, 40), u'version': 0, u'id': 2},
{'updateTime': datetime.datetime(2017, 6, 15, 13, 58), 'bankName': u'ICBC', 'bankNo': u'6228480666622220011', 'createTime': datetime.datetime(2017, 6, 15, 13, 58), u'version': 0, u'id': 3}
]
I've been following this tutorial on how to use WEKA and I have reached a point where my code will not run. I realize that I am using a different version of Weka 3.8 as opposed to 3.6 as shown in the tutorial but I thought I made the necessary changes. I get an error message on the line linearRegression.buildClassifier(dataset); and I don't know why.
Error message:
Jul 19, 2016 10:47:21 AM com.github.fommil.netlib.BLAS <clinit>
WARNING: Failed to load implementation from: com.github.fommil.netlib.NativeSystemBLAS
Jul 19, 2016 10:47:21 AM com.github.fommil.netlib.BLAS <clinit>
WARNING: Failed to load implementation from: com.github.fommil.netlib.NativeRefBLAS
Jul 19, 2016 10:47:21 AM com.github.fommil.netlib.LAPACK <clinit>
WARNING: Failed to load implementation from: com.github.fommil.netlib.NativeSystemLAPACK
Jul 19, 2016 10:47:21 AM com.github.fommil.netlib.LAPACK <clinit>
WARNING: Failed to load implementation from: com.github.fommil.netlib.NativeRefLAPACK
Code:
// Define each attribute (or column), and give it a numerical column
// number
// Likely, a better design wouldn't require the column number, but
// would instead get it from the index in the container
Attribute a1 = new Attribute("houseSize", 0);
Attribute a2 = new Attribute("lotSize", 1);
Attribute a3 = new Attribute("bedrooms", 2);
Attribute a4 = new Attribute("granite", 3);
Attribute a5 = new Attribute("bathroom", 4);
Attribute a6 = new Attribute("sellingPrice", 5);
// Each element must be added to a FastVector, a custom
// container used in this version of Weka.
// Later versions of Weka corrected this mistake by only
// using an ArrayList
ArrayList<Attribute> attrs = new ArrayList<>();
attrs.add(a1);
attrs.add(a2);
attrs.add(a3);
attrs.add(a4);
attrs.add(a5);
attrs.add(a6);
// Each data instance needs to create an Instance class
// The constructor requires the number of columns that
// will be defined. In this case, this is a good design,
// since you can pass in empty values where they exist.
Instance i1 = new DenseInstance(6);
i1.setValue(a1, 3529);
i1.setValue(a2, 9191);
i1.setValue(a3, 6);
i1.setValue(a4, 0);
i1.setValue(a5, 0);
i1.setValue(a6, 205000);
Instance i2 = new DenseInstance(6);
i1.setValue(a1, 3247);
i1.setValue(a2, 10061);
i1.setValue(a3, 5);
i1.setValue(a4, 1);
i1.setValue(a5, 1);
i1.setValue(a6, 224900);
Instance i3 = new DenseInstance(6);
i1.setValue(a1, 4032);
i1.setValue(a2, 10150);
i1.setValue(a3, 5);
i1.setValue(a4, 0);
i1.setValue(a5, 1);
i1.setValue(a6, 197900);
Instance i4 = new DenseInstance(6);
i1.setValue(a1, 2397);
i1.setValue(a2, 14156);
i1.setValue(a3, 4);
i1.setValue(a4, 1);
i1.setValue(a5, 0);
i1.setValue(a6, 189900);
Instance i5 = new DenseInstance(6);
i1.setValue(a1, 2200);
i1.setValue(a2, 9600);
i1.setValue(a3, 4);
i1.setValue(a4, 0);
i1.setValue(a5, 1);
i1.setValue(a6, 195000);
Instance i6 = new DenseInstance(6);
i1.setValue(a1, 3536);
i1.setValue(a2, 19994);
i1.setValue(a3, 6);
i1.setValue(a4, 1);
i1.setValue(a5, 1);
i1.setValue(a6, 325000);
Instance i7 = new DenseInstance(6);
i1.setValue(a1, 2983);
i1.setValue(a2, 9365);
i1.setValue(a3, 5);
i1.setValue(a4, 0);
i1.setValue(a5, 1);
i1.setValue(a6, 230000);
// Each Instance has to be added to a larger container, the
// Instances class. In the constructor for this class, you
// must give it a name, pass along the Attributes that
// are used in the data set, and the number of
// Instance objects to be added. Again, probably not ideal design
// to require the number of objects to be added in the constructor,
// especially since you can specify 0 here, and then add Instance
// objects, and it will return the correct value later (so in
// other words, you should just pass in '0' here)
Instances dataset = new Instances("housePrices", attrs, 7);
dataset.add(i1);
dataset.add(i2);
dataset.add(i3);
dataset.add(i4);
dataset.add(i5);
dataset.add(i6);
dataset.add(i7);
// In the Instances class, we need to set the column that is
// the output (aka the dependent variable). You should remember
// that some data mining methods are used to predict an output
// variable, and regression is one of them.
dataset.setClassIndex(dataset.numAttributes() - 1);
// Create the LinearRegression model, which is the data mining
// model we're using in this example
linearRegression = new LinearRegression();
try {
// This method does the "magic", and will compute the regression
// model. It takes the entire dataset we've defined to this point
// When this method completes, all our "data mining" will be
// complete
// and it is up to you to get information from the results
linearRegression.buildClassifier(dataset);
} catch (Exception e) {
e.printStackTrace();
}
}
That is not an error, but a warning. Weka cannot find some Linear Algebra Libraries (LAPACK, BLAS) during the startup of your little java app. It does not need them anyway, for the linear regression task of fitting a curve to 7 data points.
(Read this for reference https://github.com/fommil/netlib-java)
To get rid of the message, you can redirect the STDERR output of your program to /dev/null .
Using the Package Manager, I just installed the Weka Package netlibNativeLinux (or try netlibNativeWindows or netlibOSX, whatever), included its jars to the build-path, and got this warning:
Jul 20, 2016 10:20:32 AM com.github.fommil.jni.JniLoader liberalLoad
INFO: successfully loaded /tmp/jniloader5044252696376965086netlib-native_system-linux-x86_64.so
Jul 20, 2016 10:20:32 AM com.github.fommil.jni.JniLoader load
INFO: already loaded netlib-native_system-linux-x86_64.so
I also got the output 219328.35717359098 - just as the tutorial said. Did you forget to include the last lines of codes from the tutorial, especially
System.out.println(myHouseValue);
?
I'm new to opencv's svm's. I tried a sample classifier but it only returns 0 as the predicted label. I even used the value 5 for training as well as the prediction.
I've been changing the values for about a hundred times but i just don't get what's wrong. I'm using OpenCV 3.0 with Java. Here's my code:
Mat labels = new Mat(new Size(1,4),CvType.CV_32SC1);
labels.put(0, 0, 1);
labels.put(1, 0, 1);
labels.put(2, 0, 1);
labels.put(3, 0, 0);
Mat data = new Mat(new Size(1,4),CvType.CV_32FC1);
data.put(0, 0, 5);
data.put(1, 0, 2);
data.put(2, 0, 3);
data.put(3, 0, 8);
Mat testSamples = new Mat(new Size(1,1),CvType.CV_32FC1);
testSamples.put(0,0,5);
SVM svm = SVM.create();
TermCriteria criteria = new TermCriteria(TermCriteria.EPS + TermCriteria.MAX_ITER,100,0.1);
svm.setKernel(SVM.LINEAR);
svm.setType(SVM.C_SVC);
svm.setGamma(0.5);
svm.setNu(0.5);
svm.setC(1);
svm.setTermCriteria(criteria);
//data is N x 64 trained data Mat , labels is N x 1 label Mat with integer values;
svm.train(data, Ml.ROW_SAMPLE, labels);
Mat results = new Mat();
int predictedClass = (int) svm.predict(testSamples, results, 0);
Even if i change the lables to 1 and 2, I still get 0.0 as a response. So something has to be absolutely wrong... I just don't know what to do. Please help! :)
I had a similar problem in C++. I'm not too sure if it's the same in Java but in C++ the predictions were saved in the results Matrix instead of returned as a float.
I have problems to get ICA component fastICA in R. when i try to extract 6 component from fastICA function it give only one component but there should be 6 components . upto 5 it was worked perfectly but after the 5 it gives different different number of components.can anyone tell me what is the reason for that
Function and Parameters:
ICA6 <- fastICA(X, 6, alg.typ = "parallel", fun = "logcosh", alpha = 1,
method = "R", row.norm = FALSE, maxit = 200, tol = 0.0001, verbose = TRUE)
here is the last few lines of my output for ICA6 and ICA5
hpc-admin#aiken:~/gayan$ tail ICA6
[1992,] -1.755614e-01
[1993,] -1.931838e-01
[1994,] -1.403488e-01
[1995,] 4.952370e-01
[1996,] 3.798545e-02
[1997,] -8.870945e-02
[1998,] -1.847535e-01
[1999,] 2.084906e-01
[2000,] 2.235841e-01
hpc-admin#aiken:~/gayan$ tail ICA5
[1992,] -4.449966e-02 2.348224e-02 -0.1296879740 4.220189e-02 -0.1751827781
[1993,] -7.690094e-02 1.725353e-02 -0.1153838819 1.694351e-01 -0.1308105118
[1994,] -4.777415e-02 2.299214e-02 -0.1259907838 -6.011591e-03 -0.1605316621
[1995,] 4.354237e-02 2.295694e-02 -0.2499377363 -2.227481e-01 0.4414782035
[1996,] -3.848286e-02 2.121986e-02 -0.1361600020 -8.448882e-02 -0.0005046113
[1997,] -3.030994e-03 2.285310e-02 -0.1407370888 -1.215308e-02 -0.1062227838
[1998,] -3.988264e-03 2.335983e-02 -0.1497881709 1.787074e-02 -0.1982725941
[1999,] -7.483824e-02 1.096696e-02 -0.0672348301 -1.665848e-01 0.1489732404
[2000,] -7.123032e-01 1.123832e-02 0.5153474842 -1.785166e-01 0.1632015019