• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python cython_nms.nms函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中utils.cython_nms.nms函数的典型用法代码示例。如果您正苦于以下问题:Python nms函数的具体用法?Python nms怎么用?Python nms使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了nms函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: apply_nms

def apply_nms(all_boxes, thresh):
    """Apply non-maximum suppression to all predicted boxes output by the
    test_net method.
    """
    num_classes = len(all_boxes)
    num_images = len(all_boxes[0])
    nms_boxes = [[[] for _ in xrange(num_images)]
                 for _ in xrange(num_classes)]
    for cls_ind in xrange(num_classes):
        for im_ind in xrange(num_images):
            dets = all_boxes[cls_ind][im_ind]
            if dets == []:
                continue

            x1 = dets[:, 0]
            y1 = dets[:, 1]
            x2 = dets[:, 2]
            y2 = dets[:, 3]
            scores = dets[:, 4]
            inds = np.where((x2 > x1) & (y2 > y1) & (scores > cfg.TEST.DET_THRESHOLD))[0]
            dets = dets[inds,:]
            if dets == []:
                continue

            keep = nms(dets, thresh)
            if len(keep) == 0:
                continue
            nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
    return nms_boxes
开发者ID:Charlie-Huang,项目名称:SubCNN,代码行数:29,代码来源:test.py


示例2: demoRest

def demoRest(net, image_name, classes, box_file, obj_proposals, im_file, im):
    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(net, im, obj_proposals)
    timer.toc()
    print ('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0])

    # Visualize detections for each class
    CONF_THRESH = 0.8
    NMS_THRESH = 0.3
    for cls in classes:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        cls_boxes = cls_boxes[keep, :]
        cls_scores = cls_scores[keep]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        print 'All {} detections with p({} | box) >= {:.1f}'.format(cls, cls,
                                                                    CONF_THRESH)
        vis_detections(im, cls, dets, thresh=CONF_THRESH)
开发者ID:kevinisbest,项目名称:ESARec,代码行数:26,代码来源:demos.py


示例3: demo

def demo(net, im, scale_factor, classes):
    """Detect object classes in an image using pre-computed object proposals."""

    im2 = cv2.resize(im, (0,0), fx=1.0/scale_factor, fy=1.0/scale_factor)

    obj_proposals_in = []
    dlib.find_candidate_object_locations(im2, obj_proposals_in, min_size=70)

    obj_proposals = np.empty((len(obj_proposals_in),4))
    for idx in range(len(obj_proposals_in)):
        obj_proposals[idx] = [obj_proposals_in[idx].left(), obj_proposals_in[idx].top(), obj_proposals_in[idx].right(), obj_proposals_in[idx].bottom()]

    # Detect all object classes and regress object bounds
    scores, boxes = im_detect(net, im2, obj_proposals)

    # Visualize detections for each class
    CONF_THRESH = 0.8
    NMS_THRESH = 0.3
    for cls in classes:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]

    return [im2, cls, dets, CONF_THRESH]
开发者ID:asbroad,项目名称:fast-rcnn,代码行数:28,代码来源:webcam.py


示例4: detect

    def detect(self, img):
        bbox = self.bbox(img)

        scores, boxes = im_detect(self.net, img, bbox)

        result = []

        CONF_THRESH = 0.8
        NMS_THRESH = 0.3
        for cls in self.CLASSES[1:]:
            cls_ind = self.CLASSES.index(cls)
            cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
            cls_scores = scores[:, cls_ind]
            dets = np.hstack((cls_boxes,
                              cls_scores[:, np.newaxis])).astype(np.float32)
            keep = nms(dets, NMS_THRESH)
            dets = dets[keep, :]

            inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
            if len(inds) == 0:
                continue

            for i in inds:
                bbox = dets[i, :4]
                x1, y1, x2, y2 = map(int, bbox)
                result.append({
                    "label": cls,
                    "bbox": [x1, y1, x2, y2]
                })

        return result
开发者ID:mitmul,项目名称:cvmodules,代码行数:31,代码来源:detect.py


示例5: demo

def demo(net, image_name, classes):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load pre-computed Selected Search object proposals
    box_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo',
                            image_name + '_boxes.mat')
    obj_proposals = sio.loadmat(box_file)['boxes']

    # Load the demo image
    im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '.jpg')
    im = cv2.imread(im_file)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(net, im, obj_proposals)
    timer.toc()
    print ('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0])

    # Visualize detections for each class
    CONF_THRESH = 0.8
    NMS_THRESH = 0.3
    for cls in classes:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        print 'All {} detections with p({} | box) >= {:.1f}'.format(cls, cls,
                                                                    CONF_THRESH)
        vis_detections(im, cls, dets, thresh=CONF_THRESH)
开发者ID:kolesman,项目名称:fast-rcnn,代码行数:34,代码来源:demo.py


示例6: detect_one

def detect_one(name, thresh=0.75):
  im = cv2.imread(osp.join('/home/hezheqi/data/tmp/p2', name))
  scores, polys = im_detect(sess, net, im)
  print(scores)
  boxes = np.zeros((polys.shape[0], 8), dtype=polys.dtype)
  boxes[:, 0] = np.min(polys[:, 0:8:2], axis=1)
  boxes[:, 1] = np.min(polys[:, 1:8:2], axis=1)
  boxes[:, 2] = np.max(polys[:, 0:8:2], axis=1)
  boxes[:, 3] = np.max(polys[:, 1:8:2], axis=1)
  boxes[:, 4] = np.min(polys[:, 8::2], axis=1)
  boxes[:, 5] = np.min(polys[:, 9::2], axis=1)
  boxes[:, 6] = np.max(polys[:, 8::2], axis=1)
  boxes[:, 7] = np.max(polys[:, 9::2], axis=1)
  for j in range(1, NUM_CLASSES):
    inds = np.where(scores[:, j] > thresh)[0]
    cls_scores = scores[inds, j]
    cls_boxes = boxes[inds, j * 4:(j + 1) * 4]
    cls_polys = polys[inds, j * 8:(j + 1) * 8]
    cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
      .astype(np.float32, copy=False)
    cls_dets_poly = cls_polys.astype(np.float32, copy=False)
    keep = nms(cls_dets, cfg.TEST.NMS)
    # cls_dets = cls_dets[keep, :]
    cls_dets = cls_boxes[keep, :]
    cls_dets_poly = cls_dets_poly[keep, :]
    cls_scores = cls_scores[:, np.newaxis]
    cls_scores = cls_scores[keep, :]
    cls_dets = np.hstack((cls_dets, cls_dets_poly, cls_scores))
    print(cls_dets)
    vis_detections(im, cls_dets)
    cv2.imwrite(osp.join(out_dir, name), im)
    fout = open(osp.join(out_dir, 'txt', name[:-4]+'.txt'), 'w')
    for det in cls_dets:
      fout.write('{}\n'.format(' '.join(str(int(d)) for d in det[4:12])))
开发者ID:lz20061213,项目名称:quadrilateral,代码行数:34,代码来源:text_demo.py


示例7: recognize_img

def recognize_img(net, image_name, box_file, classes):
    obj_proposals = sio.loadmat(box_file)['boxes']
    # Load the demo image
    im = cv2.imread(image_name)
    # Detect all object classes and regress object bounds
    scores, boxes = im_detect(net, im, obj_proposals)
    #print type(boxes)
    #dims = boxes.shape
    #rows = dims[0]
    #cols = dims[1]

    # Visualize detections for each class
    CONF_THRESH = 0.85
    NMS_THRESH = 0.3
    data_list = []
    for cls in classes:    	
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        cls_boxes = cls_boxes[keep, :]
        cls_scores = cls_scores[keep]
        dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        tmplist = get_detection_box(cls, dets, thresh=CONF_THRESH)
        if len(tmplist) == 0:
            continue
        data_list.extend(tmplist)
    data_list.sort(key=lambda obj:obj.get('xoffset'), reverse=False)
    #data_list = char_roi_filter(data_list)
    str = ''
    for elem in data_list:
        str = str + elem.get('char')
    return str
开发者ID:winjia,项目名称:my-fastrcnn,代码行数:35,代码来源:fastrcnn.py


示例8: apply_nms

    def apply_nms(all_boxes, thresh,intra_class_nms=False):
        """Apply non-maximum suppression to all predicted boxes output."""
        num_classes = len(all_boxes)
        num_images = len(all_boxes[0])
        nms_boxes = [[[] for _ in xrange(num_images)]
                     for _ in xrange(num_classes)]
	for im_ind in xrange(num_images):
            for cls_ind in xrange(num_classes):
                dets = all_boxes[cls_ind][im_ind]
                if dets == []:
                    continue
                if not 'keep_box_all_class' in vars():
                    dets_aug = dets
                else:
                    dets_aug = np.row_stack((keep_box_all_class,dets))
                keep = nms(dets_aug, thresh)
                if len(keep) == 0:continue
                if intra_class_nms:
                    keep_box_all_class = dets_aug[keep, :].copy()
                else:
                    nms_boxes[cls_ind][im_ind] = dets_aug[keep, :].copy()
            
            if intra_class_nms:
                #run over all classes to match image with class
                keep_set = set([tuple(x) for x in keep_box_all_class])
                for cls_ind in xrange(num_classes):
                    class_set = set([tuple(x) for x in all_boxes[cls_ind][im_ind]])
                    nms_boxes[cls_ind][im_ind] = np.array([x for x in class_set & keep_set]).copy()
                del keep_box_all_class
           
        return nms_boxes
开发者ID:guyrose3,项目名称:gen_scene_graph,代码行数:31,代码来源:obj_detector.py


示例9: demo

def demo(net, image_name, classes, ssdir, imgdir, normdir, savefile):
    """Detect object classes in an image using pre-computed object proposals."""

    box_file = os.path.join(ssdir, image_name + '.mat')
    obj_proposals = sio.loadmat(box_file)['boxes']

    # Load the demo image
    im_file = os.path.join(imgdir, image_name + '.jpg')
    im = cv2.imread(im_file)
    #print(np.shape(im))

    # Load the demo image
    norm_file = os.path.join(normdir, image_name + '.jpg')
    norm_im = cv2.imread(norm_file)
    norm_im = cv2.resize(norm_im, (im.shape[0], im.shape[1]) )
    
    im = (im, norm_im)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(net, im, obj_proposals)
    timer.toc()
    # print ('Detection took {:.3f}s for '
    #       '{:d} object proposals').format(timer.total_time, boxes.shape[0])

    # Visualize detections for each class
    CONF_THRESH = 0.8
    NMS_THRESH = 0.3
    thresh = 0.3
    fid = open(savefile,'w')

    cnt = 0

    for cls in classes:
        cnt = cnt + 1
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        inds = np.where(dets[:, -1] >= thresh)[0]
        for i in inds:
            bbox = dets[i, :4]
            score = dets[i, -1]
            fid.write('{0:d}'.format(cnt))
            fid.write(' ')
            fid.write('{0:.3f}'.format(score))
            for j in range(4):
                fid.write(' ')
                fid.write('{0:.3f}'.format(bbox[j]))
            fid.write('\n')

    fid.close()
开发者ID:xiaolonw,项目名称:fast-rcnn-normg,代码行数:56,代码来源:demo3.py


示例10: demo

def demo(net, image_name, box_file, out_img, classes):
    obj_proposals = sio.loadmat(box_file)['boxes']
    # Load the demo image
    im_file = image_name#os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '.jpg')
    im = cv2.imread(im_file)
    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(net, im, obj_proposals)
    #print type(boxes)
    dims = boxes.shape
    print dims
    rows = dims[0]
    cols = dims[1]
    #for elem in boxes.flat:
    #	print elem
    print '-===-=-==-==-=-====================--------'
    timer.toc()
    print ('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0])

    # Visualize detections for each class
    CONF_THRESH = 0.85
    NMS_THRESH = 0.3
    img = im[:, :, (2, 1, 0)]
    fig, ax = plt.subplots(figsize=(12, 12))
    ax.imshow(im, aspect='equal')
    data_list = [];
    for cls in classes:    	
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        #print cls_boxes
        #print '================='
        cls_scores = scores[:, cls_ind]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        #print cls
        cls_boxes = cls_boxes[keep, :]

        cls_scores = cls_scores[keep]
        dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)

        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        
        tmplist = vis_detections(ax, cls, dets, thresh=CONF_THRESH)
        if len(tmplist) == 0:
            continue
        data_list.extend(tmplist)
    #print data_list
    #print '====================='
    plt.savefig(out_img)
    data_list.sort(key=lambda obj:obj.get('xoffset'), reverse=False)
    str = ''
    for elem in data_list:
        str = str + elem.get('char')
    return str
开发者ID:winjia,项目名称:my-fastrcnn,代码行数:56,代码来源:ccdemo.py


示例11: test_net

def test_net(sess, net, imdb, weights_filename, max_per_image=100, thresh=0.05):
  np.random.seed(cfg.RNG_SEED)
  """Test a Fast R-CNN network on an image database."""
  num_images = len(imdb.image_index)
  # all detections are collected into:
  #  all_boxes[cls][image] = N x 5 array of detections in
  #  (x1, y1, x2, y2, score)
  all_boxes = [[[] for _ in range(num_images)]
         for _ in range(imdb.num_classes)]

  output_dir = get_output_dir(imdb, weights_filename)
  # timers
  _t = {'im_detect' : Timer(), 'misc' : Timer()}

  for i in range(num_images):
    im = cv2.imread(imdb.image_path_at(i))

    _t['im_detect'].tic()
    scores, boxes = im_detect(sess, net, im)
    _t['im_detect'].toc()

    _t['misc'].tic()

    # skip j = 0, because it's the background class
    for j in range(1, imdb.num_classes):
      inds = np.where(scores[:, j] > thresh)[0]
      cls_scores = scores[inds, j]
      cls_boxes = boxes[inds, j*4:(j+1)*4]
      cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
        .astype(np.float32, copy=False)
      keep = nms(cls_dets, cfg.TEST.NMS)
      cls_dets = cls_dets[keep, :]
      all_boxes[j][i] = cls_dets

    # Limit to max_per_image detections *over all classes*
    if max_per_image > 0:
      image_scores = np.hstack([all_boxes[j][i][:, -1]
                    for j in range(1, imdb.num_classes)])
      if len(image_scores) > max_per_image:
        image_thresh = np.sort(image_scores)[-max_per_image]
        for j in range(1, imdb.num_classes):
          keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
          all_boxes[j][i] = all_boxes[j][i][keep, :]
    _t['misc'].toc()

    print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
        .format(i + 1, num_images, _t['im_detect'].average_time,
            _t['misc'].average_time))

  det_file = os.path.join(output_dir, 'detections.pkl')
  with open(det_file, 'wb') as f:
    pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

  print('Evaluating detections')
  imdb.evaluate_detections(all_boxes, output_dir)
开发者ID:BoAdBo,项目名称:AlphaPose,代码行数:55,代码来源:test.py


示例12: demo

def demo(net, image_name, classes):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load pre-computed Selected Search object proposals
    # box_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo',image_name + '_boxes.mat')
    test_mats_path = '/home/tanshen/fast-rcnn/data/kaggle/test_bbox'
    box_file = os.path.join(test_mats_path ,image_name + '_boxes.mat')
    obj_proposals = sio.loadmat(box_file)['boxes']

    # Load the demo image
    test_images_path = '/home/tanshen/fast-rcnn/data/kaggle/ImagesTest'
    # im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '.jpg')
    im_file = os.path.join(test_images_path, image_name + '.jpg')
    im = cv2.imread(im_file)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(net, im, obj_proposals)
    timer.toc()
   # print ('Detection took {:.3f}s for '
   #        '{:d} object proposals').format(timer.total_time, boxes.shape[0])

    # Visualize detections for each class
    CONF_THRESH = 0
    NMS_THRESH = 0.3
    max_inds = 0
    max_score = 0.0
    for cls in classes:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        cls_boxes = cls_boxes[keep, :]
        cls_scores = cls_scores[keep]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
       # print 'All {} detections with p({} | box) >= {:.1f} in {}'.format(cls, cls,
       #                                                             CONF_THRESH, image_name)
        #if get_max!=[]: 

        [ind,tmp]=get_max(im, cls, dets, thresh=CONF_THRESH)
        #print image_name,cls,tmp

        #vis_detections(im, cls, dets, image_name, thresh=CONF_THRESH)
        #print dets[:,-1]
    #print image_name,max_score
        file.writelines([image_name,'\t',cls,'\t',str(tmp),'\n'])
        if(max_score<tmp):
            max_score=tmp
            cls_max=cls
    print image_name,cls_max,max_score
开发者ID:tanshen,项目名称:fast-rcnn,代码行数:54,代码来源:demo_kaggle_all_ts.py


示例13: runDetection

def runDetection (net, basePath, testFileName,classes):
    ftest = open(testFileName,'r')
    imageFileName = basePath+'/' + ftest.readline().strip()
    num = 1
    outputFile = open('CarDetectionResult_window_30000.txt','w')
    while imageFileName:
	print imageFileName
	print 'now is ',num
	num += 1
	imageFileBaseName = os.path.basename(imageFileName)
	imageFileDir = os.path.dirname(imageFileName)
	boxFileName = imageFileDir +'/'+imageFileBaseName.replace('.jpg','_boxes.mat')
	print boxFileName
	obj_proposals = sio.loadmat(boxFileName)['boxes']
	#obj_proposals[:,2] = obj_proposals[:, 2] + obj_proposals[:, 0]
	#obj_proposals[:,3] = obj_proposals[:, 3] + obj_proposals[:, 1]
	im = cv2.imread(imageFileName)
        
	timer = Timer()
	timer.tic()
	scores, boxes = im_detect(net, im, obj_proposals)
	timer.toc()
	print ('Detection took {:.3f} for '
               '{:d} object proposals').format(timer.total_time, boxes.shape[0])
	
	CONF_THRESH = 0.8
	NMS_THRESH = 0.3
        for cls in classes:
            cls_ind = CLASSES.index(cls)
            cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
            cls_scores = scores[:, cls_ind]
            dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
            keep = nms(dets, NMS_THRESH)
            dets = dets[keep, :]
            print 'All {} detections with p({} | box) >= {:.1f}'.format(cls, cls,
                                                                    CONF_THRESH)
	
	    inds = np.where(dets[:, -1] >= CONF_THRESH)[0]   
	    print 'Detected car number ', inds.size
	    if len(inds) != 0:
	        outputFile.write(imageFileName+' ')
		outputFile.write(str(inds.size)+' ')
	        for i in inds:
		    bbox = dets[i, :4]
		    outputFile.write(str(int(bbox[0]))+' '+ str(int(bbox[1]))+' '+ str(int(bbox[2]))+' '+ str(int(bbox[3]))+' ')
	        outputFile.write('\n')
	    else:
	        outputFile.write(imageFileName +' 0' '\n')
	temp = ftest.readline().strip()
	if temp: 
	    imageFileName = basePath+'/' + temp
	else:
	    break
开发者ID:wangsd01,项目名称:py-faster-rcnn,代码行数:54,代码来源:CAR.py


示例14: detect

 def detect(self, image_name, mode, mixed=True):
     
     # DJDJ
     # Load the demo image
     #im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name)
     #im = cv2.imread(im_file)
     
     im = cv2.imread(image_name)
 
     # Detect all object classes and regress object bounds
     for i in range(1):
         timer = Timer()
         timer.tic()
         if mixed:
             scores, boxes = im_detect_mixed(self.net, im)
         else:
             scores, boxes = im_detect(self.net, im, obj_proposals)
         timer.toc()
         print ('Detection took {:.3f}s for '
                '{:d} object proposals').format(timer.total_time, boxes.shape[0])
 
     # Visualize detections for each class
     CONF_THRESH = 0.8
     NMS_THRESH = 0.3
     timer = Timer()
     result = {}
     
     if mode == '3':     # Car mode
         classes = CLASSES_CAR
     else:
         classes = CLASSES
         
     for cls in CLASSES:
         if mode == '3' and (cls in CLASSES_CAR) == False:     # Car mode
             continue
         
         cls_ind = CLASSES.index(cls)
         cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
         cls_scores = scores[:, cls_ind]
         dets = np.hstack((cls_boxes,
                           cls_scores[:, np.newaxis])).astype(np.float32)
         
         timer.tic()
         keep = nms(dets, NMS_THRESH)
         timer.toc()
         
         dets = dets[keep, :]
         result[cls_ind] = dets
         #print 'All {} detections with p({} | box) >= {:.1f}'.format(cls, cls, CONF_THRESH)
         #vis_detections(im, cls, dets, thresh=CONF_THRESH)
     #print ('nms took {:.3f}s').format(timer.total_time)
     
     return result        
开发者ID:only4hj,项目名称:fast-rcnn,代码行数:53,代码来源:detect_engine.py


示例15: recognize_checkcode_img

def recognize_checkcode_img(net, image_name, classes):
    boxes = get_selective_search_boxes(image_name)
    if boxes is None:
        dict = {}
        dict['ccvalue'] = ''
        dict['rects'] = []
        dict['code'] = 1
        return dict
    #im = cv2.imread(image_name)
    im = load_image(image_name)
    #print im
    #print type(im)
    #print im.shape
    #cv2.imwrite('asasdf.jpg', im)
    scores, boxes = im_detect(net, im, boxes)
    CONF_THRESH = 0.5
    NMS_THRESH = 0.1
    data_list = []
    for cls in classes:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        cls_boxes = cls_boxes[keep, :]
        cls_scores = cls_scores[keep]
        dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        tmplist = get_detection_box(cls, dets, thresh=CONF_THRESH)
        if len(tmplist) == 0:
            continue
        data_list.extend(tmplist)
    data_list.sort(key=lambda obj:obj.get('xoffset'), reverse=False)
    #
    #print data_list
    #print len(data_list)
    #print '-=-=-=-=-=-=-=-='
    data_list = rect_filter(data_list, 0.85)
    #print len(data_list)
    #print '-=-=-=-=-=-=-=-='
    data_list = char_roi_filter(data_list)
    #print len(res_list)
    #print '-=-=-=-=-=-=-=-='
    str = ''
    for elem in data_list:
        str = str + elem.get('char')
    #print res_list
    dict = {}
    dict['ccvalue'] = str
    dict['rects'] = data_list
    dict['code'] = 0
    #print dict
    return dict
开发者ID:winjia,项目名称:my-fastrcnn,代码行数:53,代码来源:fastrcnn.py


示例16: demo

def demo(net, image_name):

    # get the proposals by using the shell to use c++ codes    
    os.system(
        '/media/DataDisk/twwang/fast-rcnn/rcnn_test/proposals_for_python.sh' \
        + ' ' + image_name)
    
    # Load computed Selected Search object proposals
    data = open('/home/twwang/temp_proposal', "rb").read()
    number_proposals = struct.unpack("i", data[0:4])[0]
    number_edge = struct.unpack("i", data[4:8])[0]
    assert number_edge == 4, 'The size is not matched!\n' + \
        'Note that the first two variables are the number of proposals\n' + \
        ' and number of coordinates in a box, which is 4 by default\n'
    
    #cfg.NUM_PPS = 10
    number_proposals = min(cfg.NUM_PPS, number_proposals)
    obj_proposals = np.asarray(struct.unpack(
        str(number_proposals * 4) + 'f',
        data[8: 8 + 16 * number_proposals])).reshape(number_proposals, 4)
    
    im = cv2.imread(image_name)
    #print im.shape
    #im = cv2.flip(im, 0)
    #im = cv2.transpose(im)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    if cfg.MULTI_LABEL:
        scores, boxes, multi_labels = im_detect(net, im, obj_proposals)
    else:
        scores, boxes = im_detect(net, im, obj_proposals)        
    timer.toc()
    
    print ('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0])

    # Visualize detections for each class

    for cls in ['Upper', 'Lower', 'Whole']:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        
        vis_detections(im, cls, dets, image_name, thresh=CONF_THRESH)
    print ('The demo image is save as {}').format("/home/twwang/demo_results/" + \
        os.path.split(image_name)[1])
开发者ID:twtygqyy,项目名称:clothesDetection,代码行数:52,代码来源:three_demo.py


示例17: mytest

def mytest(net, imageName):
    '''it is a simple test for one image'''
    obj_proposals = getProposal(imageName)
    im = cv2.imread(imageName)
    scores, boxes = im_detect(net, im, obj_proposals)

    # visualizing
    CONF_THRESH = 0.8
    NMS_THRESH = 0.3

    # change the order ?
    im = im[:, :, (2, 1, 0)]
    for cls in np.arange(len(CLASSES)):
        '''test the score on all the classes'''

        cls_boxes = boxes[:, 4 * cls: 4 * (cls + 1)]  # get boxes
        cls_scores = scores[:, cls]
        # compute the nms results
        dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])
                         ).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]

        # plot if necessary
        indexs = np.where(dets[:, -1] >= CONF_THRESH)[0]
        if indexs == 0:  # not necessary
            continue
        fig, ax = plt.subplot(figsize=(12, 12))
        ax.imshow(im, aspect='equal')
        for i in indexs:
            bbox = dets[i, :4]
            score = dets[i, -1]

            ax.add_patch(plt.Rectangle((bbox[0], bbox[1]),
                                       bbox[2] - bbox[0],
                                       bbox[3] - bbox[1],
                                       fill=False,
                                       edgecolor='red',
                                       linewidth=3.5
                                       )
                         )
        ax.text(bbox[0], bbox[1] - 2,
                '{:s} {:.3f}'.format(CLASSES[cls], score),
                bbox=dict(facecolor='blue', alpha=0.5),
                fontsize=14, color='white')

        ax.set_title(('{} detections with '
                      'p({} | box) >= {:.1f}').format(CLASSES[cls], CLASSES[cls], CONF_THRESH), fontsize=14)

        plt.axis('off')
        plt.tight_layout()
        plt.draw()
开发者ID:moonblue333,项目名称:clothesDetection,代码行数:52,代码来源:mytools.py


示例18: callback2

  def callback2(self,bbox):
    global cv_image

    # obj_proposals
    bbox_num = len(bbox.data) / 4;
    print "bbox_num:", bbox_num
    obj_proposals = np.array( [ [0 for i in range(0,4)] for j in range(0,bbox_num) ] )
    for i in range( 0, bbox_num ):
      obj_proposals[ i ][ 0 ] = bbox.data[ 4 * i ]
      obj_proposals[ i ][ 1 ] = bbox.data[ 4 * i + 2 ]
      obj_proposals[ i ][ 2 ] = bbox.data[ 4 * i + 1 ]
      obj_proposals[ i ][ 3 ] = bbox.data[ 4 * i + 3 ]
    
    ##############################################
    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(self.net, cv_image, obj_proposals)
    timer.toc()
    print ('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0])

    # insert predicted class label to each box
    labels = np.array([0 for i in range(0,boxes.shape[0])])
    for i in range(0,boxes.shape[0]):
        tmpscores = scores[i, :]
        labels[ i ] = np.argmax(tmpscores)
        
    # Visualize detections for each class
    output_image = cv_image
    for cls_ind in range(1,len(CLASSES)):
        if cls_ind not in labels:
            continue
        inds = np.where( labels == cls_ind )
        cls_boxes = boxes[inds, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[inds, cls_ind]
        cls_boxes = cls_boxes[ 0 ]
        cls_scores = cls_scores[ 0 ]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, self.NMS_THRESH)
        dets = dets[keep, :]
        for i in range(0,dets.shape[0]):
            if dets[i, -1] > self.CONF_THRESH:
                bbox_ = dets[i, :4]
                print "      DETECTED! ", CLASSES[ cls_ind ], dets[i, :]
                cv2.rectangle(output_image, (bbox_[ 0 ], bbox_[ 1 ]),(bbox_[ 2 ], bbox_[ 3 ]),(0,0,255),2)
                cv2.putText(output_image,CLASSES[ cls_ind ],(bbox_[ 0 ], bbox_[ 1 ]),cv2.FONT_HERSHEY_COMPLEX, 1.,(0,0,255),2)

    cv2.imshow("Image window", output_image)
    cv2.waitKey(3)
开发者ID:kanezaki,项目名称:selective_search_3d,代码行数:51,代码来源:do_fast-rcnn.py


示例19: test_net

def test_net(net, imdb):
    """Test a Fast R-CNN network on an image database."""
    num_images = len(imdb.image_index)
    # heuristic: keep an average of 40 detections per class per images prior
    # to NMS
    max_per_set = 40 * num_images
    # heuristic: keep at most 100 detection per class per image prior to NMS
    max_per_image = 100
    # detection thresold for each class (this is adaptively set based on the
    # max_per_set constraint)
    thresh = -np.inf * np.ones(imdb.num_classes)
    # top_scores will hold one minheap of scores per class (used to enforce
    # the max_per_set constraint)
    top_scores = [[] for _ in xrange(imdb.num_classes)]
    # all detections are collected into:
    #    all_boxes[cls][image] = N x 5 array of detections in
    #    (x1, y1, x2, y2, score)
    all_boxes = [[[] for _ in xrange(num_images)]
                 for _ in xrange(imdb.num_classes)]

    output_dir = get_output_dir(imdb, net)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # timers
    _t = {'im_detect' : Timer(), 'misc' : Timer()}

    roidb = imdb.roidb
    for i in xrange(num_images):
        im = cv2.imread(imdb.image_path_at(i))
        _t['im_detect'].tic()
        scores, boxes = im_detect(net, im, roidb[i]['boxes'])
        _t['im_detect'].toc()

        _t['misc'].tic()
        inds = np.where(roidb[i]['gt_classes'] == 0)[0]
        cls_scores = scores[inds, 1]
        cls_boxes = boxes[inds, 4:8]
        dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32, copy=False)
        sio.savemat('%s/%d.mat' % (output_dir, i), {'dets': dets})
        if 0:
            keep = nms(dets, 0.3)
            vis_detections(im, imdb.classes[1], dets[keep, :])
        _t['misc'].toc()

        print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
              .format(i + 1, num_images, _t['im_detect'].average_time,
                      _t['misc'].average_time)
    """
开发者ID:byangderek,项目名称:CRAFT,代码行数:49,代码来源:test.py


示例20: demo

def demo(net, im_file, classes):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load pre-computed Selected Search object proposals
    #box_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo',image_name + '_boxes.mat')
    #obj_proposals = sio.loadmat(box_file)['boxes']
    # im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '.jpg')
    # im_file = os.path.join('/media/wxie/UNTITLED/vision_log/rgb', image_name + '.jpg')

    # Dummy bounding box list with only 1 bounding box the size of the image
    im = cv2.imread(im_file)
    img_size_box = np.array([[0,0,im.shape[1]-1,im.shape[0]-1]])

    timer2 = Timer()
    timer2.tic()
    obj_proposals = run_dlib_selective_search(im_file)
    timer2.toc()
    print ('Proposal selective search took {:.3f}s').format(timer2.total_time)


    # Load the demo image
    im = cv2.imread(im_file)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(net, im, obj_proposals)
    # scores, boxes = im_detect(net, im, img_size_box)
    timer.toc()
    print ('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0])

    # Visualize detections for each class
    CONF_THRESH = 0.8
    NMS_THRESH = 0.3
    for cls in classes:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        cls_boxes = cls_boxes[keep, :]
        cls_scores = cls_scores[keep]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        print 'All {} detections with p({} | box) >= {:.1f}'.format(cls, cls,
                                                                    CONF_THRESH)
        vis_detections(im, cls, dets, thresh=CONF_THRESH)
开发者ID:willxie,项目名称:fast-rcnn,代码行数:49,代码来源:demo_dlib_ss.py



注:本文中的utils.cython_nms.nms函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python datasets.Datasets类代码示例发布时间:2022-05-26
下一篇:
Python cython_bbox.bbox_overlaps函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap