本文整理汇总了Python中scipy.lena函数的典型用法代码示例。如果您正苦于以下问题:Python lena函数的具体用法?Python lena怎么用?Python lena使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lena函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_connect_regions
def test_connect_regions():
lena = sp.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
nose.tools.assert_equal(ndimage.label(mask)[1],
cs_graph_components(graph)[0])
开发者ID:poolio,项目名称:scikit-learn,代码行数:7,代码来源:test_image.py
示例2: __init__
def __init__(self):
QMainWindow.__init__(self, None,
"FFTLab Main Window",
Qt.WType_TopLevel | Qt.WDestructiveClose)
self.file_menu = QPopupMenu(self)
self.file_menu.insertItem('&Quit', self.file_quit, Qt.CTRL + Qt.Key_Q)
self.menuBar().insertItem('&File', self.file_menu)
self.help_menu = QPopupMenu(self)
self.menuBar().insertSeparator()
self.menuBar().insertItem('&Help', self.help_menu)
self.help_menu.insertItem('&About', self.about)
self.main_widget = QWidget(self, "Main widget")
data = ((lena()/255.)).astype("complex64")
kernel = np.ones((6,6)).astype("complex64")
#data = np.random.uniform(0,1,(8,8)).astype("complex64")
#kernel = np.random.uniform(0,1,(7,7)).astype("complex64")
#power_spec = fftshift(log(abs(signal.fftn(data))))
gpu_conv = fftconvolve2d(data,kernel)
cpu_conv = fftconvolve(data.real, kernel.real, mode="valid")
info("GPU shape = (%s, %s)" % gpu_conv.shape)
info("CPU shape = (%s, %s)" % cpu_conv.shape)
check_results(cpu_conv, gpu_conv)
data_c = ImageCanvas(data.real, self.main_widget)
kernel_c = ImageCanvas(kernel.real, self.main_widget)
gpu_conv_c = ImageCanvas(gpu_conv, self.main_widget)
cpu_conv_c = ImageCanvas(cpu_conv, self.main_widget)
#power_spec = ImageCanvas(power_spec,self.main_widget)
data_label = QLabel("Input Data (lena)", self.main_widget)
data_label.setAlignment(QLabel.AlignCenter)
kernel_label = QLabel("Convolution Kernel", self.main_widget)
kernel_label.setAlignment(QLabel.AlignCenter)
gpu_conv_label = QLabel("GPU fftconvolve (CUDA)", self.main_widget)
gpu_conv_label.setAlignment(QLabel.AlignCenter)
cpu_conv_label = QLabel("CPU fftconvolve (NumPy)", self.main_widget)
cpu_conv_label.setAlignment(QLabel.AlignCenter)
g = QGridLayout(self.main_widget)
g.addWidget(data_label,0,0)
g.addWidget(kernel_label,0,1)
g.addWidget(data_c,1,0)
g.addWidget(kernel_c,1,1)
g.addWidget(gpu_conv_label,2,0)
g.addWidget(cpu_conv_label,2,1)
g.addWidget(gpu_conv_c,3,0)
g.addWidget(cpu_conv_c,3,1)
self.main_widget.setFocus()
self.setCentralWidget(self.main_widget)
self.statusBar().message("%s - v%s" % (PROGNAME, PROG_VERSION) , 2000)
开发者ID:npinto,项目名称:python-cuda,代码行数:60,代码来源:fftlab.py
示例3: test_connect_regions_with_grid
def test_connect_regions_with_grid():
lena = sp.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, **{"mask": mask})
assert_equal(ndimage.label(mask)[1], cs_graph_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, **{"mask": mask, "dtype": None})
assert_equal(ndimage.label(mask)[1], cs_graph_components(graph)[0])
开发者ID:vincentschut,项目名称:scikit-learn,代码行数:9,代码来源:test_image.py
示例4: __init__
def __init__(self):
iris = datasets.load_iris()
self._x_iris = iris.data
self._y_iris = iris.target
try:
self._lena = sp.lena()
except AttributeError:
from scipy import misc
self._lena = misc.lena()
开发者ID:haisland0909,项目名称:python_practice,代码行数:9,代码来源:unsupervisedlearningsample.py
示例5: test_connect_regions_with_grid
def test_connect_regions_with_grid():
lena = sp.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, **{'mask' : mask})
nose.tools.assert_equal(ndimage.label(mask)[1],
cs_graph_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, **{'mask' : mask, 'dtype' : None})
nose.tools.assert_equal(ndimage.label(mask)[1],
cs_graph_components(graph)[0])
开发者ID:poolio,项目名称:scikit-learn,代码行数:11,代码来源:test_image.py
示例6: test_tvdenoise
def test_tvdenoise():
lena = scipy.lena().astype(np.float)
noisy_lena = lena + 0.2 * lena.std()*np.random.randn(*lena.shape)
denoised_lena_W5 = tvdenoise(lena, niter=10, W=5.0)
denoised_lena_W50 = tvdenoise(lena, niter=10, W=50.)
grad_mag_lena = gradient_magnitude(lena).sum()
grad_mag_noisy = gradient_magnitude(noisy_lena).sum()
grad_mag_denoised_W5 = gradient_magnitude(denoised_lena_W5).sum()
grad_mag_denoised_W50 = gradient_magnitude(denoised_lena_W50).sum()
assert grad_mag_noisy > max(grad_mag_denoised_W5, grad_mag_denoised_W50)
assert grad_mag_denoised_W5 > grad_mag_denoised_W50
assert grad_mag_denoised_W5 > 0.5 * grad_mag_lena
开发者ID:maelp,项目名称:scikits.image,代码行数:12,代码来源:test_tvdenoise.py
示例7: main
def main():
x=lena()
a=int(32)
y=wv.misc.per_ext2d(x,a)
z=wv.misc.symm_ext2d(x,a)
plt.subplot(2,1,1)
plt.imshow(y,cmap=cm.gray)
plt.xlabel('Periodic Ext')
plt.subplot(2,1,2)
plt.imshow(z,cmap=cm.gray)
plt.xlabel('Symmetric Ext')
plt.show()
开发者ID:iklaush,项目名称:wavepy,代码行数:17,代码来源:exttest2.py
示例8: unsupervisedLearningTest02
def unsupervisedLearningTest02():
from sklearn import cluster
import scipy as sp
import numpy as np
try:
lena = sp.lena()
except AttributeError:
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1))
k_means = cluster.KMeans(n_clusters = 5, n_init = 1)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
print lena_compressed
开发者ID:hyliu0302,项目名称:scikit-learn-notes,代码行数:19,代码来源:myScikitLearnFcns.py
示例9: test_tv_denoise_2d
def test_tv_denoise_2d(self):
"""
Apply the TV denoising algorithm on the lena image provided
by scipy
"""
import scipy
# lena image
lena = scipy.lena().astype(np.float)
# add noise to lena
lena += 0.5 * lena.std()*np.random.randn(*lena.shape)
# denoise
denoised_lena = F.tv_denoise(lena, weight=60.0)
# which dtype?
assert denoised_lena.dtype in [np.float, np.float32, np.float64]
from scipy import ndimage
grad = ndimage.morphological_gradient(lena, size=((3,3)))
grad_denoised = ndimage.morphological_gradient(denoised_lena, size=((3,3)))
# test if the total variation has decreased
assert np.sqrt((grad_denoised**2).sum()) < np.sqrt((grad**2).sum()) / 2
denoised_lena_int = F.tv_denoise(lena.astype(np.int32), \
weight=60.0, keep_type=True)
assert denoised_lena_int.dtype is np.dtype('int32')
开发者ID:GaelVaroquaux,项目名称:scikits.image,代码行数:22,代码来源:test_tv_denoise.py
示例10: range
import numpy as np
import scipy
import matplotlib.pyplot as plt
lena = scipy.lena()
lena[10:13, 20:23]
lena[100:120] = 255
lx, ly = lena.shape
X, Y = np.ogrid[0:lx, 0:ly]
mask = (X - lx/2)**2 + (Y - ly/2)**2 > lx*ly/4
lena[mask] = 0
lena[range(400), range(400)] = 255
plt.figure(figsize=(3,3))
plt.axes([0, 0, 1, 1])
plt.imshow(lena, cmap=plt.cm.gray)
plt.axis('off')
plt.show()
开发者ID:VirgileFritsch,项目名称:scipy-lecture-notes,代码行数:20,代码来源:plot_numpy_array.py
示例11: assert
#type_converters = converters.blitz
)
assert(not context.has_key("__inlineargs__"))
context["__inlineargs__"]=args
context["__inlinekwargs__"]=kwargs
r= eval("inline(*__inlineargs__,**__inlinekwargs__)",globals(),context)
context["__inlineargs__"]=None
return r
return fct
if __name__=="__main__":
import time
st=time.clock()
lena=scipy.lena().reshape(512,512,1).repeat(3,axis=2).astype(numpy.uint8).swapaxes(0,1).copy('F')
cimg_code("do_test( a_array );",
"""
#include <CImg.h>
using namespace cimg_library;
int do_test(PyArrayObject * npimg ) {
assert(npimg->nd==3);
printf("%p %d x %d x %d\\n",npimg->data,npimg->dimensions[1],npimg->dimensions[0],npimg->dimensions[2]);
CImg<unsigned char> image(npimg->data,npimg->dimensions[1],npimg->dimensions[0],1,npimg->dimensions[2]), visu(500,400,1,3,0);
image=image.blur(2.5);
return 0;
}
""",True)(a=lena)
print "done in ",time.clock()-st, "seconds";
开发者ID:matthiascy,项目名称:pycvf,代码行数:30,代码来源:cimg.py
示例12:
import numpy as np
import scipy as sp
import harris
im = sp.lena()
harrisim = harris.compute_harris_response(im)
filtered_coords = harris.get_harris_points(harrisim, 6)
harris.plot_harris_points(im, filtered_coords)
开发者ID:NelleV,项目名称:ROVAR,代码行数:9,代码来源:test_lena.py
示例13: lena
0 0 x x x 0 0
0 0 0 x 0 0 0
0 0 0 0 0 0 0
Once you have a numpy expression that works correctly, time it
using time.time (or time.clock on windows).
Use scipy.weave.blitz to run the same expression. Again time it.
Compare the speeds of the two function and calculate the speed-up
(numpy_time/weave_time).
Plot two images that result from the two approaches and compare them.
"""
import time
from numpy import empty, float64
from scipy import lena
from scipy import weave
from matplotlib.pylab import subplot, imshow, title, show, gray, figure
img = lena()
expr = """avg_img =( img[1:-1 ,1:-1] # center
+ img[ :-2 ,1:-1] # left
+ img[2: ,1:-1] # right
+ img[1:-1 , :-2] # top
+ img[1:-1 ,2: ] # bottom
) / 5.0"""
开发者ID:DivyaShanmugam,项目名称:pydanny-event-notes,代码行数:29,代码来源:blitz_inline_compare.py
示例14: test_write_frame_image
def test_write_frame_image():
img = Image.fromarray(lena()).convert("RGB")
b = TheoraEncoder(VIDEO_DIR+"/b.ogv", img.size[0], img.size[1])
b.write_frame_image(img)
开发者ID:certik,项目名称:python-theora,代码行数:4,代码来源:test_encoder.py
示例15:
import scipy as sp
import numpy as np
import pylab as pl
l = sp.lena()
l_ = l[235:235+153, 205:162+205]
t = pl.imread('tarek.jpg')
t = t[::-1, ...]
t_ = t.sum(axis=-1)
################################################################################
pl.figure(0, figsize=(12, 4.5))
pl.gray()
pl.clf()
pl.axes([0, 0, 0.3, 1])
pl.imshow(t_.copy())
pl.axis('off')
pl.axes([0.33, 0, 0.3, 1])
pl.imshow(l_.copy())
pl.axis('off')
t_ = t_.astype(np.float)
t_ /= t_.max()
l_ = l_.astype(np.float)
l_ /= l_.max()
pl.axes([0.66, 0, 0.3, 1])
pl.imshow(t_ + l_)
pl.axis('off')
开发者ID:GaelVaroquaux,项目名称:scipy-tutorials,代码行数:31,代码来源:interlude.py
示例16: time
print __doc__
from time import time
import pylab as pl
import scipy as sp
import numpy as np
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = sp.lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print 'Distorting image...'
distorted = lena.copy()
distorted[:, height / 2:] += 0.075 * np.random.randn(width, height / 2)
# Extract all clean patches from the left half of the image
print 'Extracting clean patches...'
t0 = time()
patch_size = (7, 7)
开发者ID:Yangqing,项目名称:scikit-learn,代码行数:31,代码来源:plot_image_denoising.py
示例17: min
shift = numpy.zeros(2)
count = min(paths[key]["count"])
for npa in paths[key]["shift"]:
shift += npa
d.append({"path": key, "shift": shift, "count": count})
d.sort(mysort)
return d
if __name__ == "__main__":
# lena1 = numpy.zeros((512, 512))
# scipy.lena()
# lena1[100:150, 160:200] = 1
ao1, ao2 = 5, 3
print ("Absolute offset is %s,%s" % (ao1, ao2))
lena1 = scipy.lena()
lena2 = numpy.zeros_like(lena1)
lena2[ao1:, ao2:] = lena1[:-ao1, :-ao2]
# out = Visual_SURF(lena1, lena2)
"""
out = feature.surf2(lena1, lena2, verbose=1)
print "clacShift", calcShift(out)
# raw_input("Enter to continue")
out2 = feature.reduce_orsa(out)
# print "SURF: %s keypoint; ORSA -> %s" % (out.shape[0], out2.shape[0])
# out = out2
print "*" * 80
# out = feature.sift2(lena1, lena2, verbose=1)
out = Visual_SIFT(lena1, lena2)
print "clacShift", calcShift(out)
开发者ID:srrcboy,项目名称:imageAlignment,代码行数:31,代码来源:test.py
示例18:
import numpy as np
import scipy as sp
import pylab as pl
from scipy import ndimage, signal
l = sp.lena()[200:-140, 190:-150]
l = l/float(l.max())
pl.figure(figsize=(12, 4.5))
pl.axes([0.15, 0, 0.3, 1])
pl.gray()
pl.imshow(l, vmin=0, vmax=1)
pl.title('Ground truth')
pl.axis('off')
pl.axes([0.5, 0, 0.3, 1])
g = l + .13*np.random.normal(size=l.shape)
pl.imshow(g, vmin=0, vmax=1)
pl.title('Noisy observation')
pl.axis('off')
开发者ID:GaelVaroquaux,项目名称:scipy-tutorials,代码行数:17,代码来源:demo_filtering1.py
示例19: __init__
#CV_FOURCC('U', '2', '6', '3') = H263 codec
#CV_FOURCC('I', '2', '6', '3') = H263I codec
#CV_FOURCC('F', 'L', 'V', '1') = FLV1 codec
class CvVideoWriter:
def __init__(self,fname,fps = 25,frameW = 320,frameH = 200, codec="MJPG", isColor=1):
self.writer=cvCreateVideoWriter(fname,
CV_FOURCC(codec[0],codec[1],codec[2],codec[3]),
fps,
cvSize(frameW,frameH),
isColor)
def push(self,img):
cvWriteFrame(self.writer,img.copy('C'))
def __del__(self):
pass
#cvReleaseVideoWriter(self.writer)
# import pycvf.lib.video.cvvideowriter as cvw; cvw.CvVideoWriter("/tmp/out1.avi")
if __name__ == "__main__":
import numpy,scipy
from pycvf.lib.graphics.rescale import Rescaler2d
rsc=Rescaler2d((320,200))
c=CvVideoWriter("/tmp/test.mpg")
ib=scipy.lena().reshape((512,512,1)).repeat(3,axis=2)
for i in range(100):
print "Frame ",i
ib[:,:,0]=i*3
c.push(rsc.process(ib).astype(numpy.uint8))
开发者ID:matthiascy,项目名称:pycvf,代码行数:29,代码来源:videowriter_opencv.py
示例20: _downsampled_lena
def _downsampled_lena():
lena = sp.lena()
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 16.0
return lena
开发者ID:vincentschut,项目名称:scikit-learn,代码行数:6,代码来源:test_image.py
注:本文中的scipy.lena函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论