• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python sift.read_features_from_file函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sift.read_features_from_file函数的典型用法代码示例。如果您正苦于以下问题:Python read_features_from_file函数的具体用法?Python read_features_from_file怎么用?Python read_features_from_file使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了read_features_from_file函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: train

  def train(self, featurefiles, k=100, subsampling=10):
    """Train a vocabulary from features in files listed in |featurefiles| using
    k-means with k words. Subsampling of training data can be used for speedup.
    """
    image_count = len(featurefiles)

    descr = []
    descr.append(sift.read_features_from_file(featurefiles[0])[1])
    descriptors = descr[0]  # Stack features for k-means.
    for i in numpy.arange(1, image_count):
      descr.append(sift.read_features_from_file(featurefiles[i])[1])
      descriptors = numpy.vstack((descriptors, descr[i]))

    # Run k-means.
    self.voc, distortion = vq.kmeans(descriptors[::subsampling, :], k, 1)
    self.word_count = self.voc.shape[0]

    # Project training data on vocabulary.
    imwords = numpy.zeros((image_count, self.word_count))
    for i in range(image_count):
      imwords[i] = self.project(descr[i])

    occurence_count = numpy.sum((imwords > 0)*1, axis=0)
    
    self.idf = numpy.log(image_count / (occurence_count + 1.0))
    self.trainingdata = featurefiles
开发者ID:PhilomontPhlea,项目名称:PhleaBytesCV,代码行数:26,代码来源:vocabulary.py


示例2: train

    def train(self, featurefiles, k=100, subsampling=10):
        nbr_images = len(featurefiles)
        descr = []
        descr.append(sift.read_features_from_file(featurefiles[0])[1])
        descriptors = descr[0]
        print "begin loading image feature files..."
        for i in np.arange(1, nbr_images):
            descr.append(sift.read_features_from_file(featurefiles[i])[1])
#                descriptors = np.vstack((descriptors, descr[i]))
            descriptors = np.vstack((descriptors, descr[i][::subsampling,:]))
            if i%100 == 0:
                print i, "images have been loaded..."
        print "finish loading image feature files!"

#        self.voc, distortion = cluster.kmeans(descriptors[::subsampling,:], k, 1)
        print "begin MiniBatchKMeans cluster....patient"
        mbk = MiniBatchKMeans(k, init="k-means++", compute_labels=False, n_init=3, init_size=3*k)
#        mbk.fit(descriptors[::subsampling,:])
        mbk.fit(descriptors)
        self.voc = mbk.cluster_centers_
        print "cluster finish!"
        self.nbr_word = self.voc.shape[0]
        imwords = np.zeros((nbr_images, self.nbr_word))
        for i in xrange(nbr_images):
            imwords[i] = self.project(descr[i])

        nbr_occurences = np.sum((imwords > 0)*1, axis=0)
        self.idf = np.log( (1.0*nbr_images) / (1.0*nbr_occurences+1) )
        self.traindata = featurefiles
开发者ID:yangxian10,项目名称:CBIR_py,代码行数:29,代码来源:visual_word.py


示例3: train

 def train(self,featurefiles,k=100,subsampling=10):
     """ Train a vocabulary from features in files listed 
         in featurefiles using k-means with k number of words. 
         Subsampling of training data can be used for speedup. """
     
     nbr_images = len(featurefiles)
     # read the features from file
     descr = []
     descr.append(sift.read_features_from_file(featurefiles[0])[1])
     descriptors = descr[0] #stack all features for k-means
     for i in arange(1,nbr_images):
         descr.append(sift.read_features_from_file(featurefiles[i])[1])
         descriptors = vstack((descriptors,descr[i]))
         
     # k-means: last number determines number of runs
     self.voc,distortion = kmeans(descriptors[::subsampling,:],k,1)
     self.nbr_words = self.voc.shape[0]
     
     # go through all training images and project on vocabulary
     imwords = zeros((nbr_images,self.nbr_words))
     for i in range( nbr_images ):
         imwords[i] = self.project(descr[i])
     
     nbr_occurences = sum( (imwords > 0)*1 ,axis=0)
     
     self.idf = log( (1.0*nbr_images) / (1.0*nbr_occurences+1) )
     self.trainingdata = featurefiles
开发者ID:Adon-m,项目名称:PCV,代码行数:27,代码来源:vocabulary.py


示例4: train

  def train(self,featurefiles,k=100,subsampling=10):
    """ featurefilesに列挙されたファイルから特徴量を読み込み
      k平均法とk個のビジュアルワードを用いてボキャブラリを
      学習する。subsamplingで教師データを間引いて高速化可能 """

    nbr_images = len(featurefiles)
    # ファイルから特徴量を読み込む
    descr = []
    descr.append(sift.read_features_from_file(featurefiles[0])[1])
    descriptors = descr[0] #stack all features for k-means
    for i in arange(1,nbr_images):
      descr.append(sift.read_features_from_file(featurefiles[i])[1])
      descriptors = vstack((descriptors,descr[i]))

    # k平均法:最後の数字で試行数を指定する
    self.voc,distortion = kmeans(descriptors[::subsampling,:],k,1)
    self.nbr_words = self.voc.shape[0]

    # 教師画像を順番にボキャブラリに射影する
    imwords = zeros((nbr_images,self.nbr_words))
    for i in range( nbr_images ):
      imwords[i] = self.project(descr[i])

    nbr_occurences = sum( (imwords > 0)*1 ,axis=0)

    self.idf = log( (1.0*nbr_images) / (1.0*nbr_occurences+1) )
    self.trainingdata = featurefiles
开发者ID:Hironsan,项目名称:ComputerVision,代码行数:27,代码来源:vocabulary.py


示例5: get_sift_match

def get_sift_match(f1, f2):
	fn1, fext1 = os.path.splitext(os.path.basename(f1))
	fn2, fext2 = os.path.splitext(os.path.basename(f2))
	try:
		l1, d1 = sift.read_features_from_file(TMP_DIR + fn1 + '.key')
		l2, d2 = sift.read_features_from_file(TMP_DIR + fn2 + '.key')
		
		return sift.score(d1, d2)
	except:
		return 0.0
开发者ID:zlike,项目名称:affivir,代码行数:10,代码来源:sift_runner.py


示例6: get_krt

def get_krt(im1, im2):
    ims = [im1, im2]
    sifts = []
    for x in range(2):
        sifts.append(ims[x][:-3]+"sift")

    # compute features                                                        
    #sift.process_image('../../data/book_frontal.JPG','../../data/im0.sift')
    sift.process_image(ims[0],sifts[0])

    l0,d0 = sift.read_features_from_file(sifts[0])
    #sift.process_image('../../data/book_perspective.JPG','../../data/im1.sift')
    sift.process_image(ims[1],sifts[1])
    l1,d1 = sift.read_features_from_file(sifts[1])
    # match features and estimate homography                                        
    matches = sift.match_twosided(d0,d1)
    ndx = matches.nonzero()[0]
    fp = homography.make_homog(l0[ndx,:2].T)
    ndx2 = [int(matches[i]) for i in ndx]
    print len(ndx2)
    tp = homography.make_homog(l1[ndx2,:2].T)
    model = homography.RansacModel()
    H,ransac_data = homography.H_from_ransac(fp,tp,model)


    # camera calibration
    #K = camera.my_calibration((747,1000))
    K = camera.my_calibration((Image.open(im2).size))
    # 3D points at plane z=0 with sides of length 0.2
    box = cube.cube_points([0,0,0.1],0.1)
    # project bottom square in first image
    cam1 = camera.Camera( hstack((K,dot(K,array([[0],[0],[-1]])) )) )
    # first points are the bottom square
    box_cam1 = cam1.project(homography.make_homog(box[:,:5]))
    # use H to transfer points to the second image
    print dot(H,box_cam1)
    box_trans = homography.normalize(dot(H,box_cam1))
    # compute second camera matrix from cam1 and H
    cam2 = camera.Camera(dot(H,cam1.P))
    A = dot(linalg.inv(K),cam2.P[:,:3])
    A = array([A[:,0],A[:,1],cross(A[:,0],A[:,1])]).T
    cam2.P[:,:3] = dot(K,A)
    # project with the second camera
    box_cam2 = cam2.project(homography.make_homog(box))
    # test: projecting point on z=0 should give the same
    point = array([1,1,0,1]).T
    print homography.normalize(dot(dot(H,cam1.P),point))
    print cam2.project(point)

    import pickle
    with open('%s.pkl' % ims[1][:-4],'w') as f:
        pickle.dump(K,f)
        pickle.dump(dot(linalg.inv(K),cam2.P),f)
    sys.stderr.write("K and Rt dumped to %s.pkl\n" % ims[1][:-4])
开发者ID:ak352,项目名称:pycv,代码行数:54,代码来源:test_cube.py


示例7: cbir_train

def cbir_train(train_path, voc_name, db_name, n_subsample=2000, n_cluster=2000, subfeatsampling=10):
    voc_name = voc_name + '_' + str(n_subsample) + '_' + str(n_cluster) + '_' + str(subfeatsampling)
    db_name = db_name[:-3] + '_' + str(n_subsample) + '_' + str(n_cluster) + '_' + str(subfeatsampling) + db_name[-3:]

    imlist, featlist = cbir_utils.create_imglist_featlist(train_path)
    imlist = imlist[:n_subsample]
    featlist = featlist[:n_subsample]

    ### generate sift feature
    nbr_images = len(imlist)
    ''''''
    for i in range(nbr_images):
        sift.process_image(imlist[i], featlist[i], mask = True)

    ### generate visual word
    voc = visual_word.Vocabulary(voc_name)
    voc.train(featlist, n_cluster, subfeatsampling)
    with open(voc_name+'.pkl', 'wb') as f:
        cPickle.dump(voc, f)
    print 'vocabulary is', voc.name, voc.nbr_word

    ### generate image index
    with open(voc_name+'.pkl', 'rb') as f:
        voc = cPickle.load(f)

    indx = image_search.Indexer(db_name, voc)
    indx.create_tables()

    for i in range(nbr_images):
        locs, descr = sift.read_features_from_file(featlist[i])
        indx.add_to_index(imlist[i], descr)

    indx.db_commit()
    print 'generate index finish!'
    print 'training over'
开发者ID:yangxian10,项目名称:CBIR_py,代码行数:35,代码来源:cbir_demo.py


示例8: get_descriptors

def get_descriptors(img):
    # ImageObjectet var, aminek van mar filename_keypoints attributuma
    '''
    returns the image as array, the location of features, and the descriptors
    '''
    loc,desc = sift.read_features_from_file(img.filename_keypoints)
    return loc, desc
开发者ID:lepilepi,项目名称:pic-vid-organizr_beta,代码行数:7,代码来源:sift_turbo.py


示例9: runSurf

 def runSurf(self):
     #save a grayscale image
     im = self.image.convert("L")
     im.save(self.filename + "_gray.pgm", "PPM")  
     surfexec = surfpath + " -i " +  self.filename + "_gray.pgm" + " -o " + self.filename + "_result.txt"
     print surfexec
     os.system(surfexec)
     self.locators, self.descriptors = sift.read_features_from_file(self.filename + "_result.txt")
开发者ID:drwelby,项目名称:UAV-Ortho,代码行数:8,代码来源:uav.py


示例10: len

def __main__:
	
	nbr_images = len(imlist)
	featlist = [ imlist[i][:-3] + 'sif' for i in range(nbr_images))

	for i in range(nbr_images):
		sift.process_image(imlist[i],featlist[i])

	voc = vocabularly.Vocabulary('ukbenchtest')
	voc.train(featlist,1000,10)

	with open('vocabulary.pkl', 'wb') as f:
		pickle.dump(voc,f)
	print 'vocabulary is:', voc.name, voc.nbr_wods


	nbr_images = len(imlist)

	with open('vocabulary.pkl', 'rb') as f:
		voc = pickle.load(f)


	indx = imagesearch.Indexer('test.db',voc)
	indx.create_tables()

	for i in range(nbr_images)[:100]:
		locs,descr = sift.read_features_from_file(featlist[i])
		indx.add_to_index(imlist[i],descr)

	indx.db_commit()


	con = sqlite.connect('test.db')
	print con.execute('select count (filename) from imlist').fetchone()
	print con.execute('select * from imlist').fetchone()


	src = imagesearch.Searcher('test.db')
	locs,descr = sift.read_features_from_file(featlist[0])
	iw = voc.project(descr)

	print 'ask using a histogram...'
	print src.candidates_from_histogram(iw)[:10]

	print 'try a query...'
	print src.query(imlist[0])[:10]
开发者ID:rickbhardwaj,项目名称:videoprocessing,代码行数:46,代码来源:ImageSearchMain.py


示例11: train

  def train(self,featurefiles,k=100,subsampling=10):
    """ featurefilesに列挙されたファイルから特徴量を読み込み
      k平均法とk個のビジュアルワードを用いてボキャブラリを
      学習する。subsamplingで教師データを間引いて高速化可能 """

    nbr_images = len(featurefiles)
    # ファイルから特徴量を読み込む
    #points = []
    descr = []
    descr.append(sift.read_features_from_file(featurefiles[0])[1])
    # optional.view feature points.
    #points.append( np.array(sift.read_features_from_file(featurefiles[0])[0][:,0:2]) ) # stock of x,y axis value
    descriptors = descr[0] #stack all features for k-means
    #pointors = points[0]
    for i in arange(1,nbr_images):
      descr.append(sift.read_features_from_file(featurefiles[i])[1])
      #points.append( np.array(sift.read_features_from_file(featurefiles[i])[0][:,0:2]) ) # stock of x,y axis value
      descriptors = vstack((descriptors,descr[i]))
    
    # k平均法:最後の数字で試行数を指定する
    self.voc,distortion = kmeans(descriptors[::subsampling,:],k,1)
    self.nbr_words = self.voc.shape[0]
    
    # 重心を保存しておく
    with open('voc_centroid.pkl','wb') as f:
        pickle.dump(self.voc,f)
    """
    # ワードとx,y座標の辞書作成
    dic = []
    for i in xrange(len(nbr_images)):
        dic[i] = {}
        dic[i][]
    """
    
    # 教師画像を順番にボキャブラリに射影する
    imwords = zeros((nbr_images,self.nbr_words))
    for i in xrange(1): #xrange( nbr_images ):
      # imwords[i] = self.project(descr[i], points[i]) # PLSAを使う場合はこちらを使用する
      imwords[i] = self.project(descr[i])

    nbr_occurences = sum( (imwords > 0)*1 ,axis=0)

    self.idf = log( (1.0*nbr_images) / (1.0*nbr_occurences+1) )
    self.trainingdata = featurefiles
开发者ID:shimaXX,项目名称:workspace,代码行数:44,代码来源:vocabulary.py


示例12: plot_sift_feature

def plot_sift_feature(im):
    #imname = ’empire.jpg’
    #im1 = array(Image.open(imname).convert(’L’))
    tmpFile = 'tmp.sift'
    sift.process_image(im,tmpFile)
    l1,d1 = sift.read_features_from_file(tmpFile)
    figure()
    gray()
    sift.plot_features(im,l1,circle=True)
    show()
开发者ID:tianwalker2012,项目名称:handpa,代码行数:10,代码来源:image_play.py


示例13: read_feature_labels

def read_feature_labels(path):
  featlist = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.dsift')]
  features = []
  for featfile in featlist:
    l, d = sift.read_features_from_file(featfile)
    features.append(d.flatten())

  features = array(features)

  return features
开发者ID:hvva,项目名称:autopsy-CV,代码行数:10,代码来源:train.py


示例14: runsift

 def runsift(self):
     #save a grayscale image
     imsize = self.image.size
     im = self.image.resize((imsize[0]/10, imsize[1]/10))
     im = im.convert("L")
     im.save(self.filename + "_gray.pgm", "PPM")
     siftexec = siftpath + self.filename + "_gray.pgm >" + self.filename + "_result.txt"
     print siftexec
     os.system(siftexec)
     self.locators, self.descriptors = sift.read_features_from_file(self.filename + "_result.txt")
开发者ID:drwelby,项目名称:UAV-Ortho,代码行数:10,代码来源:uav.py


示例15: extractSift

def extractSift(input_files):
    all_features_dict = {}
    for i, fname in enumerate(input_files):
        features_fname = fname + '.sift'
        if exists(features_fname) == False:
            print "calculating sift features for", fname
            sift.process_image(fname, features_fname)
        print "gathering sift features for", fname,
        locs, descriptors = sift.read_features_from_file(features_fname)
        print descriptors.shape
        all_features_dict[fname] = descriptors
    return all_features_dict
开发者ID:navinpai,项目名称:CS706,代码行数:12,代码来源:learn.py


示例16: get_sift_lowe

def get_sift_lowe(img):
    features_fname = img + '.sift'
    if os.path.isfile(features_fname) == False:
        is_size_zero = sift.process_image(img, features_fname)
        if is_size_zero:
            os.remove(features_fname)
            sift.process_image(img, features_fname)
    if os.path.isfile(features_fname) and os.path.getsize(features_fname) == 0:
        os.remove(features_fname)
        sift.process_image(img, features_fname)
    locs, desc = sift.read_features_from_file(features_fname)
    return desc
开发者ID:afshaanmaz,项目名称:FoodClassifier,代码行数:12,代码来源:utils.py


示例17: read_feature_labels

def read_feature_labels(path):	
  featlist = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.' + typeFeats)]
  features = []

  for featfile in featlist:
    l, d = sift.read_features_from_file(featfile)
    features.append(d.flatten())

  features = array(features)
  labels = [featfile.split('/')[-1][0] for featfile in featlist]

  return features, featlist
开发者ID:hvva,项目名称:autopsy-CV,代码行数:12,代码来源:video_extract_win.py


示例18: extractSift

def extractSift(input_files,target_folder):
	all_features_dict = {}
	count=0
	for i,fname in enumerate(input_files):
		features_fname = target_folder+'/'+fname.split('/')[2].split('.')[0]+'.sift'
		if exists(features_fname) == False:
			print("Calculating sift features for ",fname)
			sift.process_image(fname, features_fname,count)
			count+=1
		locs, descriptors = sift.read_features_from_file(features_fname)
		all_features_dict[fname] = (locs,descriptors)
	os.chdir('..')
	return all_features_dict
开发者ID:parulsethi,项目名称:espier,代码行数:13,代码来源:vector_quantize.py


示例19: find_matches

def find_matches(image_names, root):
    l = {}
    d = {}
    n = len(image_names)
    for i, im in enumerate(image_names):
        resultname = os.path.join(root, '{}.sift'.format(im))
        if not os.path.isfile(resultname):
            sift.process_image(os.path.join(root, '{}.png'.format(im)), resultname)
        l[i], d[i] = sift.read_features_from_file(resultname)

    matches = {}
    for i in range(n - 1):
        matches[i] = sift.match(d[i + 1], d[i])
    return matches, l, d
开发者ID:softtrainee,项目名称:arlab,代码行数:14,代码来源:stitch.py


示例20: read_gesture_features_labels

def read_gesture_features_labels(path):
    featlist = [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.dsift')]
   # print featlist
    #read the feature
    features = []
    for featfile in featlist:
        l,d = sift.read_features_from_file(featfile)
        features.append(d.flatten())
    features = np.array(features)
    
    #create labels
    labels = [featfile1.split('/')[-1][0] for featfile1 in featlist]
    
    return features,np.array(labels)
开发者ID:rayjim,项目名称:python_proj,代码行数:14,代码来源:file_tools.py



注:本文中的sift.read_features_from_file函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python image.generate_image函数代码示例发布时间:2022-05-27
下一篇:
Python sift.process_image函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap