first commit

main
Fan-Wu Geoffrey Yang 3 years ago
commit 2e78d8f365

Binary file not shown.

Binary file not shown.

@ -0,0 +1,237 @@
#!/usr/bin/env python
'''
This module contains some common routines used by other samples.
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
from functools import reduce
import numpy as np
import cv2
# built-in modules
import os
import itertools as it
from contextlib import contextmanager
image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm']
class Bunch(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def __str__(self):
return str(self.__dict__)
def splitfn(fn):
path, fn = os.path.split(fn)
name, ext = os.path.splitext(fn)
return path, name, ext
def anorm2(a):
return (a*a).sum(-1)
def anorm(a):
return np.sqrt( anorm2(a) )
def homotrans(H, x, y):
xs = H[0, 0]*x + H[0, 1]*y + H[0, 2]
ys = H[1, 0]*x + H[1, 1]*y + H[1, 2]
s = H[2, 0]*x + H[2, 1]*y + H[2, 2]
return xs/s, ys/s
def to_rect(a):
a = np.ravel(a)
if len(a) == 2:
a = (0, 0, a[0], a[1])
return np.array(a, np.float64).reshape(2, 2)
def rect2rect_mtx(src, dst):
src, dst = to_rect(src), to_rect(dst)
cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
tx, ty = dst[0] - src[0] * (cx, cy)
M = np.float64([[ cx, 0, tx],
[ 0, cy, ty],
[ 0, 0, 1]])
return M
def lookat(eye, target, up = (0, 0, 1)):
fwd = np.asarray(target, np.float64) - eye
fwd /= anorm(fwd)
right = np.cross(fwd, up)
right /= anorm(right)
down = np.cross(fwd, right)
R = np.float64([right, down, fwd])
tvec = -np.dot(R, eye)
return R, tvec
def mtx2rvec(R):
w, u, vt = cv2.SVDecomp(R - np.eye(3))
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
axis = np.cross(vt[0], vt[1])
return axis * np.arctan2(s, c)
def draw_str(dst, target, s):
x, y = target
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.LINE_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
self.colors_func = colors_func
self.dirty = False
self.show()
cv2.setMouseCallback(self.windowname, self.on_mouse)
def show(self):
cv2.imshow(self.windowname, self.dests[0])
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv2.EVENT_LBUTTONDOWN:
self.prev_pt = pt
elif event == cv2.EVENT_LBUTTONUP:
self.prev_pt = None
if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON:
for dst, color in zip(self.dests, self.colors_func()):
cv2.line(dst, self.prev_pt, pt, color, 5)
self.dirty = True
self.prev_pt = pt
self.show()
# palette data from matplotlib/_cm.py
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
cmap_data = { 'jet' : _jet_data }
def make_cmap(name, n=256):
data = cmap_data[name]
xs = np.linspace(0.0, 1.0, n)
channels = []
eps = 1e-6
for ch_name in ['blue', 'green', 'red']:
ch_data = data[ch_name]
xp, yp = [], []
for x, y1, y2 in ch_data:
xp += [x, x+eps]
yp += [y1, y2]
ch = np.interp(xs, xp, yp)
channels.append(ch)
return np.uint8(np.array(channels).T*255)
def nothing(*arg, **kw):
pass
def clock():
return cv2.getTickCount() / cv2.getTickFrequency()
@contextmanager
def Timer(msg):
print(msg, '...',)
start = clock()
try:
yield
finally:
print("%.2f ms" % ((clock()-start)*1000))
class StatValue:
def __init__(self, smooth_coef = 0.5):
self.value = None
self.smooth_coef = smooth_coef
def update(self, v):
if self.value is None:
self.value = v
else:
c = self.smooth_coef
self.value = c * self.value + (1.0-c) * v
class RectSelector:
def __init__(self, win, callback):
self.win = win
self.callback = callback
cv2.setMouseCallback(win, self.onmouse)
self.drag_start = None
self.drag_rect = None
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv2.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
return
if self.drag_start:
if flags & cv2.EVENT_FLAG_LBUTTON:
xo, yo = self.drag_start
x0, y0 = np.minimum([xo, yo], [x, y])
x1, y1 = np.maximum([xo, yo], [x, y])
self.drag_rect = None
if x1-x0 > 0 and y1-y0 > 0:
self.drag_rect = (x0, y0, x1, y1)
else:
rect = self.drag_rect
self.drag_start = None
self.drag_rect = None
if rect:
self.callback(rect)
def draw(self, vis):
if not self.drag_rect:
return False
x0, y0, x1, y1 = self.drag_rect
cv2.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
return True
@property
def dragging(self):
return self.drag_rect is not None
def grouper(n, iterable, fillvalue=None):
'''grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx'''
args = [iter(iterable)] * n
if PY3:
output = it.zip_longest(fillvalue=fillvalue, *args)
else:
output = it.izip_longest(fillvalue=fillvalue, *args)
return output
def mosaic(w, imgs):
'''Make a grid from images.
w -- number of grid columns
imgs -- images (must have same size and format)
'''
imgs = iter(imgs)
if PY3:
img0 = next(imgs)
else:
img0 = imgs.next()
pad = np.zeros_like(img0)
imgs = it.chain([img0], imgs)
rows = grouper(w, imgs, pad)
return np.vstack(map(np.hstack, rows))
def getsize(img):
h, w = img.shape[:2]
return w, h
def mdot(*args):
return reduce(np.dot, args)
def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
for kp in keypoints:
x, y = kp.pt
cv2.circle(vis, (int(x), int(y)), 2, color)

@ -0,0 +1 @@
Subproject commit 5791a82cc9b84e7401441e90cc9ceefeca24f742

@ -0,0 +1,130 @@
#!/usr/bin/env python
'''
Wiener deconvolution.
Sample shows how DFT can be used to perform Weiner deconvolution [1]
of an image with user-defined point spread function (PSF)
Usage:
deconvolution.py [--circle]
[--angle <degrees>]
[--d <diameter>]
[--snr <signal/noise ratio in db>]
[<input image>]
Use sliders to adjust PSF paramitiers.
Keys:
SPACE - switch btw linear/cirular PSF
ESC - exit
Examples:
deconvolution.py --angle 135 --d 22 ../data/licenseplate_motion.jpg
(image source: http://www.topazlabs.com/infocus/_images/licenseplate_compare.jpg)
deconvolution.py --angle 86 --d 31 ../data/text_motion.jpg
deconvolution.py --circle --d 19 ../data/text_defocus.jpg
(image source: compact digital photo camera, no artificial distortion)
[1] http://en.wikipedia.org/wiki/Wiener_deconvolution
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2
# local module
# from common import nothing
def blur_edge(img, d=31):
h, w = img.shape[:2]
img_pad = cv2.copyMakeBorder(img, d, d, d, d, cv2.BORDER_WRAP)
img_blur = cv2.GaussianBlur(img_pad, (2*d+1, 2*d+1), -1)[d:-d,d:-d]
y, x = np.indices((h, w))
dist = np.dstack([x, w-x-1, y, h-y-1]).min(-1)
w = np.minimum(np.float32(dist)/d, 1.0)
return img*w + img_blur*(1-w)
def motion_kernel(angle, d, sz=65):
kern = np.ones((1, d), np.float32)
c, s = np.cos(angle), np.sin(angle)
A = np.float32([[c, -s, 0], [s, c, 0]])
sz2 = sz // 2
A[:,2] = (sz2, sz2) - np.dot(A[:,:2], ((d-1)*0.5, 0))
kern = cv2.warpAffine(kern, A, (sz, sz), flags=cv2.INTER_CUBIC)
return kern
def defocus_kernel(d, sz=65):
kern = np.zeros((sz, sz), np.uint8)
cv2.circle(kern, (sz, sz), d, 255, -1, cv2.LINE_AA, shift=1)
kern = np.float32(kern) / 255.0
return kern
if __name__ == '__main__':
print(__doc__)
import sys, getopt
opts, args = getopt.getopt(sys.argv[1:], '', ['circle', 'angle=', 'd=', 'snr='])
opts = dict(opts)
try:
fn = args[0]
except:
fn = '../data/licenseplate_motion.jpg'
win = 'deconvolution'
img = cv2.imread(fn, 0)
if img is None:
print('Failed to load fn1:', fn1)
sys.exit(1)
img = np.float32(img)/255.0
cv2.imshow('input', img)
img = blur_edge(img)
IMG = cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT)
defocus = '--circle' in opts
def update(_):
ang = np.deg2rad( cv2.getTrackbarPos('angle', win) )
d = cv2.getTrackbarPos('d', win)
noise = 10**(-0.1*cv2.getTrackbarPos('SNR (db)', win))
if defocus:
psf = defocus_kernel(d)
else:
psf = motion_kernel(ang, d)
cv2.imshow('psf', psf)
psf /= psf.sum()
psf_pad = np.zeros_like(img)
kh, kw = psf.shape
psf_pad[:kh, :kw] = psf
PSF = cv2.dft(psf_pad, flags=cv2.DFT_COMPLEX_OUTPUT, nonzeroRows = kh)
PSF2 = (PSF**2).sum(-1)
iPSF = PSF / (PSF2 + noise)[...,np.newaxis]
RES = cv2.mulSpectrums(IMG, iPSF, 0)
res = cv2.idft(RES, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT )
res = np.roll(res, -kh//2, 0)
res = np.roll(res, -kw//2, 1)
cv2.imshow(win, res)
cv2.namedWindow(win)
cv2.namedWindow('psf', 0)
cv2.createTrackbar('angle', win, int(opts.get('--angle', 135)), 180, update)
cv2.createTrackbar('d', win, int(opts.get('--d', 22)), 50, update)
cv2.createTrackbar('SNR (db)', win, int(opts.get('--snr', 25)), 50, update)
update(None)
while True:
ch = cv2.waitKey()
if ch == 27:
break
if ch == ord(' '):
defocus = not defocus
update(None)

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

@ -0,0 +1,15 @@
import cv2
from cv2 import dnn_superres
# Create an SR object - only function that differs from c++ code
sr = dnn_superres.DnnSuperResImpl_create()
# Read image
image = cv2.imread('./images/person.jpg')
# Read the desired model
path = "EDSR_x4.pb"
sr.readModel(path)
# Set the desired model and scale to get correct pre- and post-processing
sr.setModel("edsr", 4)
# Upscale the image
result = sr.upsample(image)
# Save the image
cv2.imwrite("./upscaled.png", result)

@ -0,0 +1,81 @@
#!/usr/bin/env python
'''
Watershed segmentation
=========
This program demonstrates the watershed segmentation algorithm
in OpenCV: watershed().
Usage
-----
watershed.py [image filename]
Keys
----
1-7 - switch marker color
SPACE - update segmentation
r - reset
a - toggle autoupdate
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2
from common import Sketcher
class App:
def __init__(self, fn):
self.img = cv2.imread(fn)
if self.img is None:
raise Exception('Failed to load image file: %s' % fn)
h, w = self.img.shape[:2]
self.markers = np.zeros((h, w), np.int32)
self.markers_vis = self.img.copy()
self.cur_marker = 1
self.colors = np.int32( list(np.ndindex(2, 2, 2)) ) * 255
self.auto_update = True
self.sketch = Sketcher('img', [self.markers_vis, self.markers], self.get_colors)
def get_colors(self):
return list(map(int, self.colors[self.cur_marker])), self.cur_marker
def watershed(self):
m = self.markers.copy()
cv2.watershed(self.img, m)
overlay = self.colors[np.maximum(m, 0)]
vis = cv2.addWeighted(self.img, 0.5, overlay, 0.5, 0.0, dtype=cv2.CV_8UC3)
cv2.imshow('watershed', vis)
def run(self):
while cv2.getWindowProperty('img', 0) != -1 or cv2.getWindowProperty('watershed', 0) != -1:
ch = cv2.waitKey(50)
if ch == 27:
break
if ch >= ord('1') and ch <= ord('7'):
self.cur_marker = ch - ord('0')
print('marker: ', self.cur_marker)
if ch == ord(' ') or (self.sketch.dirty and self.auto_update):
self.watershed()
self.sketch.dirty = False
if ch in [ord('a'), ord('A')]:
self.auto_update = not self.auto_update
print('auto_update if', ['off', 'on'][self.auto_update])
if ch in [ord('r'), ord('R')]:
self.markers[:] = 0
self.markers_vis[:] = self.img
self.sketch.show()
cv2.destroyAllWindows()
if __name__ == '__main__':
import sys
try:
fn = sys.argv[1]
except:
fn = '../data/fruits.jpg'
print(__doc__)
App(fn).run()
Loading…
Cancel
Save