本文整理汇总了Python中sift.process_image函数的典型用法代码示例。如果您正苦于以下问题:Python process_image函数的具体用法?Python process_image怎么用?Python process_image使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了process_image函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: sift_pan_desc_generator
def sift_pan_desc_generator(path='/home/aurora/hdd/workspace/PycharmProjects/data/N20040103G/'):
filelists = getFiles(path)
feature = []
for index, file in enumerate(filelists):
sift.process_image(file, 'pan'+str(index)+'.sift')
feature.append('pan'+str(index)+'.sift')
return feature
开发者ID:auroua,项目名称:aurora_detection,代码行数:7,代码来源:sift_test.py
示例2: cbir_train
def cbir_train(train_path, voc_name, db_name, n_subsample=2000, n_cluster=2000, subfeatsampling=10):
voc_name = voc_name + '_' + str(n_subsample) + '_' + str(n_cluster) + '_' + str(subfeatsampling)
db_name = db_name[:-3] + '_' + str(n_subsample) + '_' + str(n_cluster) + '_' + str(subfeatsampling) + db_name[-3:]
imlist, featlist = cbir_utils.create_imglist_featlist(train_path)
imlist = imlist[:n_subsample]
featlist = featlist[:n_subsample]
### generate sift feature
nbr_images = len(imlist)
''''''
for i in range(nbr_images):
sift.process_image(imlist[i], featlist[i], mask = True)
### generate visual word
voc = visual_word.Vocabulary(voc_name)
voc.train(featlist, n_cluster, subfeatsampling)
with open(voc_name+'.pkl', 'wb') as f:
cPickle.dump(voc, f)
print 'vocabulary is', voc.name, voc.nbr_word
### generate image index
with open(voc_name+'.pkl', 'rb') as f:
voc = cPickle.load(f)
indx = image_search.Indexer(db_name, voc)
indx.create_tables()
for i in range(nbr_images):
locs, descr = sift.read_features_from_file(featlist[i])
indx.add_to_index(imlist[i], descr)
indx.db_commit()
print 'generate index finish!'
print 'training over'
开发者ID:yangxian10,项目名称:CBIR_py,代码行数:35,代码来源:cbir_demo.py
示例3: sift_aurora_desc_generator
def sift_aurora_desc_generator(path, des):
filelists = getFiles(path)
feature = []
for index, file in enumerate(filelists):
sift.process_image(file, des+str(index)+'.sift')
feature.append(des+str(index)+'.sift')
return feature
开发者ID:auroua,项目名称:aurora_detection,代码行数:7,代码来源:aurora_sift_graph.py
示例4: extract_sift_feature
def extract_sift_feature(fname):
fn, fext = os.path.splitext(os.path.basename(fname))
if not os.path.exists(TMP_DIR + fn + '.key'):
im = Image.open(fname)
im_l = im.convert('L')
im_l.save(TMP_DIR + fn + '.pgm', 'PPM')
sift.process_image(TMP_DIR + fn + '.pgm', TMP_DIR + fn + '.key')
os.remove(TMP_DIR + fn + '.pgm')
开发者ID:zlike,项目名称:affivir,代码行数:8,代码来源:sift_runner.py
示例5: plot_sift_feature
def plot_sift_feature(im):
#imname = ’empire.jpg’
#im1 = array(Image.open(imname).convert(’L’))
tmpFile = 'tmp.sift'
sift.process_image(im,tmpFile)
l1,d1 = sift.read_features_from_file(tmpFile)
figure()
gray()
sift.plot_features(im,l1,circle=True)
show()
开发者ID:tianwalker2012,项目名称:handpa,代码行数:10,代码来源:image_play.py
示例6: get_krt
def get_krt(im1, im2):
ims = [im1, im2]
sifts = []
for x in range(2):
sifts.append(ims[x][:-3]+"sift")
# compute features
#sift.process_image('../../data/book_frontal.JPG','../../data/im0.sift')
sift.process_image(ims[0],sifts[0])
l0,d0 = sift.read_features_from_file(sifts[0])
#sift.process_image('../../data/book_perspective.JPG','../../data/im1.sift')
sift.process_image(ims[1],sifts[1])
l1,d1 = sift.read_features_from_file(sifts[1])
# match features and estimate homography
matches = sift.match_twosided(d0,d1)
ndx = matches.nonzero()[0]
fp = homography.make_homog(l0[ndx,:2].T)
ndx2 = [int(matches[i]) for i in ndx]
print len(ndx2)
tp = homography.make_homog(l1[ndx2,:2].T)
model = homography.RansacModel()
H,ransac_data = homography.H_from_ransac(fp,tp,model)
# camera calibration
#K = camera.my_calibration((747,1000))
K = camera.my_calibration((Image.open(im2).size))
# 3D points at plane z=0 with sides of length 0.2
box = cube.cube_points([0,0,0.1],0.1)
# project bottom square in first image
cam1 = camera.Camera( hstack((K,dot(K,array([[0],[0],[-1]])) )) )
# first points are the bottom square
box_cam1 = cam1.project(homography.make_homog(box[:,:5]))
# use H to transfer points to the second image
print dot(H,box_cam1)
box_trans = homography.normalize(dot(H,box_cam1))
# compute second camera matrix from cam1 and H
cam2 = camera.Camera(dot(H,cam1.P))
A = dot(linalg.inv(K),cam2.P[:,:3])
A = array([A[:,0],A[:,1],cross(A[:,0],A[:,1])]).T
cam2.P[:,:3] = dot(K,A)
# project with the second camera
box_cam2 = cam2.project(homography.make_homog(box))
# test: projecting point on z=0 should give the same
point = array([1,1,0,1]).T
print homography.normalize(dot(dot(H,cam1.P),point))
print cam2.project(point)
import pickle
with open('%s.pkl' % ims[1][:-4],'w') as f:
pickle.dump(K,f)
pickle.dump(dot(linalg.inv(K),cam2.P),f)
sys.stderr.write("K and Rt dumped to %s.pkl\n" % ims[1][:-4])
开发者ID:ak352,项目名称:pycv,代码行数:54,代码来源:test_cube.py
示例7: get_sift_lowe
def get_sift_lowe(img):
features_fname = img + '.sift'
if os.path.isfile(features_fname) == False:
is_size_zero = sift.process_image(img, features_fname)
if is_size_zero:
os.remove(features_fname)
sift.process_image(img, features_fname)
if os.path.isfile(features_fname) and os.path.getsize(features_fname) == 0:
os.remove(features_fname)
sift.process_image(img, features_fname)
locs, desc = sift.read_features_from_file(features_fname)
return desc
开发者ID:afshaanmaz,项目名称:FoodClassifier,代码行数:12,代码来源:utils.py
示例8: extractSift
def extractSift(input_files):
all_features_dict = {}
for i, fname in enumerate(input_files):
features_fname = fname + '.sift'
if exists(features_fname) == False:
print "calculating sift features for", fname
sift.process_image(fname, features_fname)
print "gathering sift features for", fname,
locs, descriptors = sift.read_features_from_file(features_fname)
print descriptors.shape
all_features_dict[fname] = descriptors
return all_features_dict
开发者ID:navinpai,项目名称:CS706,代码行数:12,代码来源:learn.py
示例9: extractSift
def extractSift(input_files,target_folder):
all_features_dict = {}
count=0
for i,fname in enumerate(input_files):
features_fname = target_folder+'/'+fname.split('/')[2].split('.')[0]+'.sift'
if exists(features_fname) == False:
print("Calculating sift features for ",fname)
sift.process_image(fname, features_fname,count)
count+=1
locs, descriptors = sift.read_features_from_file(features_fname)
all_features_dict[fname] = (locs,descriptors)
os.chdir('..')
return all_features_dict
开发者ID:parulsethi,项目名称:espier,代码行数:13,代码来源:vector_quantize.py
示例10: find_matches
def find_matches(image_names, root):
l = {}
d = {}
n = len(image_names)
for i, im in enumerate(image_names):
resultname = os.path.join(root, '{}.sift'.format(im))
if not os.path.isfile(resultname):
sift.process_image(os.path.join(root, '{}.png'.format(im)), resultname)
l[i], d[i] = sift.read_features_from_file(resultname)
matches = {}
for i in range(n - 1):
matches[i] = sift.match(d[i + 1], d[i])
return matches, l, d
开发者ID:softtrainee,项目名称:arlab,代码行数:14,代码来源:stitch.py
示例11: extractSift
def extractSift(input_files):
all_features_dict = {}
count = 0
for i,fname in enumerate(input_files):
# path to store resulting sift files
features_fname = 'sift_output/'+fname.split('/')[2].split('.')[0]+'.sift'
if count == 0:
os.chdir('siftDemoV4')
print("Calculating sift features for ",fname)
sift.process_image(fname,features_fname,count)
count+=1
locs, descriptors = sift.read_features_from_file(features_fname)
all_features_dict[fname] = descriptors
os.chdir('..')
return all_features_dict
开发者ID:parulsethi,项目名称:espier,代码行数:15,代码来源:visual_words.py
示例12: extractSift
def extractSift(input_files):
print "extracting Sift features"
all_features_dict = {}
for i, fname in enumerate(input_files):
rest_of_path = fname[:-(len(os.path.basename(fname)))]
rest_of_path = os.path.join(rest_of_path, "sift")
rest_of_path = os.path.join(rest_of_path, os.path.basename(fname))
features_fname = rest_of_path + '.sift'
if os.path.exists(features_fname) == False:
# print "calculating sift features for", fname
sift.process_image(fname, features_fname)
# print "gathering sift features for", fname,
locs, descriptors = sift.read_features_from_file(features_fname)
# print descriptors.shape
all_features_dict[fname] = descriptors
return all_features_dict
开发者ID:ioanachelu,项目名称:bag-of-visual-words,代码行数:16,代码来源:utils.py
示例13: extractMF
def extractMF(filename):
features_fname = filename + '.sift'
sift.process_image(filename, features_fname)
locs, descriptors = sift.read_features_from_file(features_fname)
sh = min(locs.shape[0], 1000)
res = np.zeros((sh,SIZE_LOCAL_FEATURE)).astype(np.float32)
extra = [20,False,True,False,0,0,0]
WIN = 5
for i in range(sh):
x = np.int32(round(locs[i][0]))
y = np.int32(round(locs[i][1]))
I = Image.open(filename)
Nx,Ny = I.size
a = sg.spec(I.crop((max(x-WIN,0),max(y-WIN,0),min(x+WIN,Nx-1),min(y+WIN,Ny-1))),extra)
res[i] = a
print res.shape
return res
开发者ID:rbaravalle,项目名称:europeanfood,代码行数:17,代码来源:classifierBrod.py
示例14: len
def __main__:
nbr_images = len(imlist)
featlist = [ imlist[i][:-3] + 'sif' for i in range(nbr_images))
for i in range(nbr_images):
sift.process_image(imlist[i],featlist[i])
voc = vocabularly.Vocabulary('ukbenchtest')
voc.train(featlist,1000,10)
with open('vocabulary.pkl', 'wb') as f:
pickle.dump(voc,f)
print 'vocabulary is:', voc.name, voc.nbr_wods
nbr_images = len(imlist)
with open('vocabulary.pkl', 'rb') as f:
voc = pickle.load(f)
indx = imagesearch.Indexer('test.db',voc)
indx.create_tables()
for i in range(nbr_images)[:100]:
locs,descr = sift.read_features_from_file(featlist[i])
indx.add_to_index(imlist[i],descr)
indx.db_commit()
con = sqlite.connect('test.db')
print con.execute('select count (filename) from imlist').fetchone()
print con.execute('select * from imlist').fetchone()
src = imagesearch.Searcher('test.db')
locs,descr = sift.read_features_from_file(featlist[0])
iw = voc.project(descr)
print 'ask using a histogram...'
print src.candidates_from_histogram(iw)[:10]
print 'try a query...'
print src.query(imlist[0])[:10]
开发者ID:rickbhardwaj,项目名称:videoprocessing,代码行数:46,代码来源:ImageSearchMain.py
示例15: extractSift
def extractSift(input_files):
print "extracting Sift features"
all_features_dict = {}
#all_features = zeros([1,128])
for i, fname in enumerate(input_files):
features_fname = fname + '.sift'
if exists(features_fname) == False:
print "calculating sift features for", fname
sift.process_image(fname, features_fname)
locs, descriptors = sift.read_features_from_file(features_fname)
# print descriptors.shape
all_features_dict[fname] = descriptors
# if all_features.shape[0] == 1:
# all_features = descriptors
# else:
# all_features = concatenate((all_features, descriptors), axis = 0)
return all_features_dict
开发者ID:kds079,项目名称:Top-K-Photos-in-a-Local-Region,代码行数:18,代码来源:testsift.py
示例16: detect_features
def detect_features(self, faces, image_filename):
""" Detects Features in an list of faces and returns the images """
logging.debug('start detect %s features for file %s (train.py)' %(parameter.description_method, image_filename))
keypoints = []
descriptors = []
# detect features and save
for i in range(len(faces)):
if not parameter.face_enlargement == None:
faces[i] = tools.enlarge_image(faces[i], parameter.face_enlargement)
logging.debug('Cropped face from file %s has been enlarged with factor %.3f (surf-detector.py)' % (image_filename, parameter.face_enlargement))
face_numpy = np.asarray(faces[i]) # convert image for further processing
# compute surf calculation
if parameter.description_method == 'surf':
surf = cv2.SURF(parameter.hessian_threshold, parameter.nOctaves, parameter.nOctaveLayers) # threshold, number of octaves, number of octave layers within each octave (http://opencv.itseez.com/modules/features2d/doc/feature_detection_and_description.html, http://www.mathworks.de/help/toolbox/vision/ref/detectsurffeatures.html)
tmpkeypoints, tmpdescriptors = surf.detect(face_numpy, None, False) # extracting the SURF keys
if len(tmpdescriptors) == 0:
logging.warn('No descriptors found for a face in file %s (surf-detector.py)' % (image_filename))
else:
tmpdescriptors.shape = (-1, surf.descriptorSize()) # change the shape of the descriptor from 1-dim to 2-dim (notwendig, damit die Funktionen - match_bruteforce - bei der Suche funktionieren)
logging.info('%d Features found in file %s: face number %d (surf-detector.py)' % (len(tmpdescriptors), image_filename, (i+1)))
# compute sift calculation
if parameter.description_method == 'sift':
cv.SaveImage('tmp-sift.jpg', faces[i])
sift.process_image('tmp-sift.jpg',"tmp.sift")
l1,tmpdescriptors = sift.read_features_from_file("tmp.sift")
tmpkeypoints = []
if tmpdescriptors == None:
logging.warn('No descriptors found for a face in file %s (surf-detector.py)' % (image_filename))
else:
for j in range(len(l1)):
keypoint = cv2.KeyPoint(l1[j][0], l1[j][1], l1[j][2], l1[j][3])
tmpkeypoints.append(keypoint)
logging.info('%d Features found in file %s: face number %d (surf-detector.py)' % (len(tmpdescriptors), image_filename, (i+1)))
keypoints.append(tmpkeypoints) # add keypoints do list even when none are found
descriptors.append(tmpdescriptors) # add descriptors do list even when none are found
return(keypoints, descriptors)
开发者ID:azhargiri,项目名称:SURF-Face-Detection,代码行数:44,代码来源:feature_calculator.py
示例17: extractSift
def extractSift(input_files):
print "extracting Sift features"
all_features_dict = {}
for i, fname in enumerate(input_files):
features_fname = fname + '.sift'
if exists(features_fname) == False:
#print "calculating sift features for", fname
sift.process_image(fname, features_fname)
#print "gathering sift features for", fname,
locs, descriptors = sift.read_features_from_file(features_fname)
# check if there is description for the image
if len(descriptors) > 0:
print descriptors.shape
all_features_dict[fname] = descriptors
return all_features_dict
开发者ID:PierreHao,项目名称:BoVW-LSTM,代码行数:20,代码来源:common.py
示例18: loadImage
def loadImage(self, file):
if file in self.dictFileImage:
return self.dictFileImage[file];
else:
id = self.totalImages;
pixels = pylab.flipud(pylab.imread(file));
# SIFT features
partName, partDot, partExt = file.rpartition('.');
keyFile = ''.join(partName + partDot + "key");
pgmFile = ''.join(partName + partDot + "pgm");
if os.path.exists(pgmFile) == False:
#pylab.imsave(pgmFile, pixels);
if len(pixels.shape) == 2:
pilImage = Image.fromarray(pixels, 'L');
else:
h = pixels.shape[0];
w = pixels.shape[1];
pixelsGray = np.matrix(np.zeros((h, w)), dtype=np.uint8);
for i in range(h):
for j in range(w):
pixelsGray[i, j] = (np.mean(pixels[i, j])).astype(np.uint8);
pilImage = Image.fromarray(pixelsGray, 'L');
pilImage.save(pgmFile);
if os.path.exists(keyFile) == False:
sift.process_image(pgmFile, keyFile);
loc, des = sift.read_features_from_file(keyFile);
#im.features = [Feature(im.id, des[i], loc[i]) for i in range(len(des))];
#print "Total features: ", len(im.features)
im = ImageObject(id, pixels, loc, des);
# add to dictionary
self.dictFileImage[file] = im;
self.dictIdImage[im.id] = im;
# increase total images
self.totalImages += 1;
#print "Total images: ", self.totalImages;
return im;
开发者ID:songuke,项目名称:pymosaick,代码行数:41,代码来源:ImageMosaick.py
示例19: make_keypoints
def make_keypoints(img):
'''
makes, saves the key file
'''
if img.filename_resized:
name = img.filename_resized
else:
name = img.filename
kp_name = name.split('.')[-2] + '.key'
if sift.process_image(img.filename_pgm, kp_name):
img.filename_keypoints = kp_name
return img
else:
print 'Error while making keypoints. Exit.'
exit(-1)
开发者ID:lepilepi,项目名称:pic-vid-organizr_beta,代码行数:16,代码来源:sift_turbo.py
示例20: extractSift
def extractSift(filename):
features_fname = filename + '.sift'
sift.process_image(filename, features_fname)
locs, descriptors = sift.read_features_from_file(features_fname)
return descriptors
开发者ID:rbaravalle,项目名称:europeanfood,代码行数:5,代码来源:classifierBrod.py
注:本文中的sift.process_image函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论