• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python coco.COCO类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pycocotools.coco.COCO的典型用法代码示例。如果您正苦于以下问题:Python COCO类的具体用法?Python COCO怎么用?Python COCO使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了COCO类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: evaluate

def evaluate():
    cocoGt = COCO('annotations.json')
    cocoDt = cocoGt.loadRes('detections.json')
    cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
开发者ID:cyberCBM,项目名称:DetectO,代码行数:7,代码来源:face_detector_accuracy.py


示例2: coco_eval

def coco_eval(ann_fn, json_fn, save_fn):
    coco = COCO(ann_fn)
    coco_res = coco.loadRes(json_fn)
    coco_evaluator = COCOEvalCap(coco, coco_res)
    # comment below line to evaluate the full validation or testing set. 
    coco_evaluator.params['image_id'] = coco_res.getImgIds()
    coco_evaluator.evaluate(save_fn)
开发者ID:qyouurcs,项目名称:seq_style,代码行数:7,代码来源:caption_nil_training_dp_eval.py


示例3: ablate

def ablate(imgIds = [], mode ='destroy', out_path="tmp", coco = coco, ct = None,  **args):
    """[ablation entry point 2.0]
    Created to accomodate background-destroying ablation. Will dispatch all
    old ablations (gaussian, blackout, & median) to gen_ablation."""

    if ct is None:
        ct = coco_text.COCO_Text(os.path.join(CD, 'COCO_Text.json'))
    if imgIds == []:
        imgIds = ct.getImgIds(imgIds=ct.train, catIds=[('legibility','legible')])
        imgIds = [imgIds[np.random.randint(0,len(imgIds))]]

    #dispatch to old ablation entry point
    if mode in ['gaussian', 'blackout', 'median']:
        return gen_ablation(imgIds, mode, ct, out_path=out_path, **args)

    #else do destroy_bg
    if coco is None:
        coco = COCO('%s/annotations/instances_%s.json'%(DATA_PATH,DATA_TYPE))
    imgs = coco.loadImgs(imgIds)
    results = []
    for idx, img in enumerate(imgs):
        print("Ablating image {}/{} with id {} ".format(idx+1, len(imgIds), img['id']))
        ori_file_name = os.path.join(CD, DATA_PATH, DATA_TYPE, img['file_name'])
        orig = io.imread(ori_file_name)

        if mode == 'destroy':
            ablt = destroy_bg(orig, img['id'], coco, **args)
        elif mode == 'median_bg':
            ablt = median_bg(orig, img['id'], coco, **args)

        out_file_name = os.path.join(CD, "..", out_path, "%s_%s"%(mode, img['file_name']))
        io.imsave(out_file_name, ablt)

        results.append((img['id'], ori_file_name, out_file_name))
    return results
开发者ID:stevenygd,项目名称:coco-text,代码行数:35,代码来源:ablation.py


示例4: main

def main(argv):
	## Parsing the command	
	in_path = ''
	out_path = ''
	ann_path = ''
	try:
		opts, args = getopt.getopt(argv,"hi:o:a:",["in=","out=","annotation="])
	except getopt.GetoptError:
		print 'test.py -i <inputfile> -o <outputfile> -a <annotationfile>'
		sys.exit(2)
	for opt, arg in opts:
		if opt == '-h':
			print 'test.py -i <inputfile> -o <outputfile> -a <annotationfile>'
			sys.exit()
		elif opt in ("-i", "--in"):
			in_path = arg
		elif opt in ("-o", "--out"):
			out_path = arg
		elif opt in ("-a", "--annotation"):
			ann_path = arg
	print('Performing evaluation using Coco Python API...')
	_COCO = COCO(ann_path)
	_cats = _COCO.loadCats(_COCO.getCatIds())
	_classes = tuple(['__background__'] + [c['name'] for c in _cats])
	_do_eval(in_path,out_path, _COCO, _classes)
开发者ID:879229395,项目名称:fast-rcnn-torch,代码行数:25,代码来源:evaluate_coco.py


示例5: language_eval

def language_eval(dataset, preds):
    import sys
    if 'coco' in dataset:
        sys.path.append("coco-caption")
        annFile = 'coco-caption/annotations/captions_val2014.json'
    else:
        sys.path.append("f30k-caption")
        annFile = 'f30k-caption/annotations/dataset_flickr30k.json'
    from pycocotools.coco import COCO
    from pycocoevalcap.eval import COCOEvalCap

    encoder.FLOAT_REPR = lambda o: format(o, '.3f')

    coco = COCO(annFile)
    valids = coco.getImgIds()

    # filter results to only those in MSCOCO validation set (will be about a third)
    preds_filt = [p for p in preds if p['image_id'] in valids]
    print 'using %d/%d predictions' % (len(preds_filt), len(preds))
    json.dump(preds_filt, open('tmp.json', 'w')) # serialize to temporary json file. Sigh, COCO API...

    resFile = 'tmp.json'
    cocoRes = coco.loadRes(resFile)
    cocoEval = COCOEvalCap(coco, cocoRes)
    cocoEval.params['image_id'] = cocoRes.getImgIds()
    cocoEval.evaluate()

    # create output dictionary
    out = {}
    for metric, score in cocoEval.eval.items():
        out[metric] = score

    return out
开发者ID:ruotianluo,项目名称:neuraltalk2-tensorflow,代码行数:33,代码来源:eval_utils.py


示例6: __init__

 def __init__(self, annotation_file=None):
     """
     Constructor of SALICON helper class for reading and visualizing annotations.
     :param annotation_file (str): location of annotation file
     :return:
     """
     COCO.__init__(self,annotation_file=annotation_file)
开发者ID:caomw,项目名称:salicon-api,代码行数:7,代码来源:salicon.py


示例7: language_eval

def language_eval(input_data, savedir, split):
  if type(input_data) == str: # Filename given.
    checkpoint = json.load(open(input_data, 'r'))
    preds = checkpoint
  elif type(input_data) == list: # Direct predictions give.
    preds = input_data

  annFile = 'third_party/coco-caption/annotations/captions_val2014.json'
  coco = COCO(annFile)
  valids = coco.getImgIds()

  # Filter results to only those in MSCOCO validation set (will be about a third)
  preds_filt = [p for p in preds if p['image_id'] in valids]
  print 'Using %d/%d predictions' % (len(preds_filt), len(preds))
  resFile = osp.join(savedir, 'result_%s.json' % (split))
  json.dump(preds_filt, open(resFile, 'w')) # Serialize to temporary json file. Sigh, COCO API...

  cocoRes = coco.loadRes(resFile)
  cocoEval = COCOEvalCap(coco, cocoRes)
  cocoEval.params['image_id'] = cocoRes.getImgIds()
  cocoEval.evaluate()

  # Create output dictionary.
  out = {}
  for metric, score in cocoEval.eval.items():
    out[metric] = score

  # Return aggregate and per image score.
  return out, cocoEval.evalImgs
开发者ID:reem94,项目名称:convcap,代码行数:29,代码来源:evaluate.py


示例8: main

def main():
    random.seed(123)

    dataDir='/home/gchrupala/repos/coco'
    dataType='val2014'
    cap = COCO('%s/annotations/captions_%s.json'%(dataDir,dataType))
    coco = COCO('%s/annotations/instances_%s.json'%(dataDir,dataType))
    imgCat = {}
    for cat,imgs in coco.catToImgs.items():
        for img in imgs:
            if img in imgCat:
                imgCat[img].add(cat)
            else:
                imgCat[img]=set([cat])

    with open('hard2.csv','w') as file:
        writer = csv.writer(file)
        writer.writerow(["desc", "url_1", "url_2", "url_3", "url_4" ])
        imgIds = random.sample(coco.getImgIds(), 1000)
        for img in coco.loadImgs(imgIds):
            if img['id'] not in imgCat:
                continue
            cats = imgCat[img['id']]
            desc = random.sample(cap.imgToAnns[img['id']],1)[0]
            imgs = coco.loadImgs(random.sample(sum([ coco.getImgIds(catIds=[cat]) 
                                                     for cat in cats ],[]),3))
            urls = [ img['coco_url'] ] + [ img['coco_url'] for img in imgs ]
            random.shuffle(urls)
            writer.writerow([desc['caption']] + urls )
开发者ID:gchrupala,项目名称:reimaginet,代码行数:29,代码来源:sample.py


示例9: coco_eval

def coco_eval(candidates_file, references_file):
  """
    Given the candidates and references, the coco-caption module is 
    used to calculate various metrics. Returns a list of dictionaries containing:
    -BLEU
    -ROUGE
    -METEOR
    -CIDEr
  """

  # This is used to suppress the output of coco-eval:
  old_stdout = sys.stdout
  sys.stdout = open(os.devnull, "w")
  try:
    # Derived from example code in coco-captions repo
    coco    = COCO( references_file )
    cocoRes = coco.loadRes( candidates_file )
  
    cocoEval = COCOEvalCap(coco, cocoRes)

    cocoEval.evaluate()
  finally:
    # Change back to standard output
    sys.stdout.close()
    sys.stdout = old_stdout
  
  return cocoEval.evalImgs
开发者ID:text-machine-lab,项目名称:MUTT,代码行数:27,代码来源:metrics.py


示例10: main

def main(argv):
    input_json = 'results/' + sys.argv[1]

    annFile = 'annotations/captions_val2014.json'
    coco = COCO(annFile)
    valids = coco.getImgIds()

    checkpoint = json.load(open(input_json, 'r'))
    preds = checkpoint['val_predictions']

    # filter results to only those in MSCOCO validation set (will be about a third)
    preds_filt = [p for p in preds if p['image_id'] in valids]
    print 'using %d/%d predictions' % (len(preds_filt), len(preds))
    json.dump(preds_filt, open('tmp.json', 'w')) # serialize to temporary json file. Sigh, COCO API...

    resFile = 'tmp.json'
    cocoRes = coco.loadRes(resFile)
    cocoEval = COCOEvalCap(coco, cocoRes)
    cocoEval.params['image_id'] = cocoRes.getImgIds()
    cocoEval.evaluate()

    # create output dictionary
    out = {}
    for metric, score in cocoEval.eval.items():
        out[metric] = score
    # serialize to file, to be read from Lua
    json.dump(out, open(input_json + '_out.json', 'w'))
开发者ID:telin0411,项目名称:CS231A_Project,代码行数:27,代码来源:myeval.py


示例11: main

def main():
  HASH_IMG_NAME = True
  pylab.rcParams['figure.figsize'] = (10.0, 8.0)
  json.encoder.FLOAT_REPR = lambda o: format(o, '.3f')

  parser = argparse.ArgumentParser()
  parser.add_argument("-i", "--inputfile", type=str, required=True,
      help='File containing model-generated/hypothesis sentences.')
  parser.add_argument("-r", "--references", type=str, required=True,
      help='JSON File containing references/groundtruth sentences.')
  args = parser.parse_args()
  prediction_file = args.inputfile
  reference_file = args.references
  json_predictions_file = '{0}.json'.format(prediction_file)
  
  crf = CocoResFormat()
  crf.read_file(prediction_file, HASH_IMG_NAME)
  crf.dump_json(json_predictions_file)
   
  # create coco object and cocoRes object.
  coco = COCO(reference_file)
  cocoRes = coco.loadRes(json_predictions_file)
  
  # create cocoEval object.
  cocoEval = COCOEvalCap(coco, cocoRes)
  
  # evaluate results
  cocoEval.evaluate()
  
  # print output evaluation scores
  for metric, score in cocoEval.eval.items():
    print '%s: %.3f'%(metric, score)
开发者ID:meteora9479,项目名称:caption-eval,代码行数:32,代码来源:run_evaluations.py


示例12: __init__

    def __init__(self, root_dir, data_dir, anno_file):
        coco = COCO(os.path.join(root_dir, anno_file))
        anns = coco.loadAnns(coco.getAnnIds())

        self.coco = coco
        self.anns = anns
        self.vocab = None  # Later set from outside
        self.coco_root = root_dir
        self.coco_data = data_dir
开发者ID:Fhrozen,项目名称:chainer,代码行数:9,代码来源:datasets.py


示例13: cocoval

def cocoval(detected_json):
    eval_json = config.eval_json
    eval_gt = COCO(eval_json)

    eval_dt = eval_gt.loadRes(detected_json)
    cocoEval = COCOeval(eval_gt, eval_dt, iouType='bbox')

    # cocoEval.params.imgIds = eval_gt.getImgIds()
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
开发者ID:Zumbalamambo,项目名称:light_head_rcnn,代码行数:11,代码来源:cocoval.py


示例14: score_generation

def score_generation(gt_filename=None, generation_result=None):

  coco_dict = read_json(generation_result)
  coco = COCO(gt_filename)
  generation_coco = coco.loadRes(generation_result)
  coco_evaluator = COCOEvalCap(coco, generation_coco)
  #coco_image_ids = [self.sg.image_path_to_id[image_path]
  #                  for image_path in self.images]
  coco_image_ids = [j['image_id'] for j in coco_dict]
  coco_evaluator.params['image_id'] = coco_image_ids
  results = coco_evaluator.evaluate(return_results=True)
  return results
开发者ID:luukhoavn,项目名称:DCC,代码行数:12,代码来源:eval_sentences.py


示例15: _load_gt_roidb

    def _load_gt_roidb(self):
        _coco = COCO(self._anno_file)
        # deal with class names
        cats = [cat['name'] for cat in _coco.loadCats(_coco.getCatIds())]
        class_to_coco_ind = dict(zip(cats, _coco.getCatIds()))
        class_to_ind = dict(zip(self.classes, range(self.num_classes)))
        coco_ind_to_class_ind = dict([(class_to_coco_ind[cls], class_to_ind[cls])
                                     for cls in self.classes[1:]])

        image_ids = _coco.getImgIds()
        gt_roidb = [self._load_annotation(_coco, coco_ind_to_class_ind, index) for index in image_ids]
        return gt_roidb
开发者ID:dpom,项目名称:incubator-mxnet,代码行数:12,代码来源:coco.py


示例16: __init__

    def __init__(self,dataType,usingSet,dataDir,savefileDir):
        #setpath
        self.dataType = dataType
        self.usingSet = usingSet
        self.dataDir = dataDir
        self.savefileDir = savefileDir
        self.InsFile='%s/annotations/instances_%s.json'%(dataDir,dataType)
        self.CapFile='%s/annotations/captions_%s.json'%(dataDir,dataType)
        

        self.SALICON = pickle.load(open('%s/%s.p'%(savefileDir,usingSet),'rb'))
        self.Ins_ID = pickle.load(open('%s/Ins_ID_%s.p'%(savefileDir,usingSet),'rb'))
        
        self.category = pickle.load(open('%s/category.p'%savefileDir,'rb'))

        self.category_idx = pickle.load(open('%s/cat_dict_idx.p'%savefileDir,'rb'))#eg., person -- 1
        self.category_supercategory_idx = pickle.load(open('%s/cat_dict_supercat.p'%savefileDir,'rb')) #eg., person--human 
        self.supercategory_idx = pickle.load(open('%s/supercate_id.p'%savefileDir,'rb'))#eg., food--1
        
        self.imsal_dict = pickle.load(open('%s/imsal_dict_%s.p'%(savefileDir,usingSet),'rb'))
        
        self.Ins_coco = COCO(self.InsFile)
        self.Cap_coco = COCO(self.CapFile)
        self.cat_list = self.Ins_coco.cats#category list  (official)
        
        wordmat = sio.loadmat('%s/word_mat_%s.mat'%(savefileDir,usingSet))
        wordmat = wordmat['word_mat']
        self.wordmat = wordmat[:,0]
        
        
        self.correction_list = ['men','man','kid','boy','baby']
        
        self.nounlist = []
        self.nounID = []
        self.Cardi_Noun = []
        self.Seque_Noun = []
        
        
        self.size_norm = float(640*480)
        self.loc_norm = float(math.sqrt(640**2+480**2)) 
        
        
        
        self.saliencydict_c = {}
        self.saliencydict_s = {}
        
        #******************10-03-2016 update***********************
        self.saliencydict_i = {}
        self.transformer = TfidfTransformer()
开发者ID:Yanakz,项目名称:Caption,代码行数:49,代码来源:CaptionSaliency.py


示例17: language_eval

def language_eval(dataset, preds, model_id, split):
    import sys
    if 'coco' in dataset:
        sys.path.append("coco-caption")
        annFile = 'coco-caption/annotations/captions_val2014.json'
    elif 'msvd' in dataset:
        sys.path.append('coco-caption')
        annFile = 'coco-caption/annotations/coco_ref_msvd.json'
    elif 'kuaishou' in dataset:
        sys.path.append('coco-caption')
        annFile = 'coco-caption/annotations/coco_ref_kuaishou.json'
    else:
        sys.path.append("f30k-caption")
        annFile = 'f30k-caption/annotations/dataset_flickr30k.json'
    from pycocotools.coco import COCO
    from pycocoevalcap.eval import COCOEvalCap

    encoder.FLOAT_REPR = lambda o: format(o, '.3f')

    if not os.path.isdir('eval_results'):
        os.mkdir('eval_results')
    cache_path = os.path.join('eval_results/', model_id + '_' + split + '.json')

    coco = COCO(annFile)
    valids = coco.getImgIds()

    # filter results to only those in MSCOCO validation set (will be about a third)
    preds_filt = [p for p in preds if p['image_id'] in valids]
    print('using %d/%d predictions' % (len(preds_filt), len(preds)))
    json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...

    cocoRes = coco.loadRes(cache_path)
    cocoEval = COCOEvalCap(coco, cocoRes)
    cocoEval.params['image_id'] = cocoRes.getImgIds()
    cocoEval.evaluate()

    # create output dictionary
    out = {}
    for metric, score in cocoEval.eval.items():
        out[metric] = score

    imgToEval = cocoEval.imgToEval
    for p in preds_filt:
        image_id, caption = p['image_id'], p['caption']
        imgToEval[image_id]['caption'] = caption
    with open(cache_path, 'w') as outfile:
        json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)

    return out
开发者ID:nagizeroiw,项目名称:ImageCaptioning.pytorch,代码行数:49,代码来源:eval_utils.py


示例18: create_tokcap

def create_tokcap(data_folder=DATA_FOLDER):
    cap = COCO(COCO_TRAIN_CAP_FILE)
    
    listedCapMap = {}
    for i in cap.loadAnns(cap.getAnnIds()):
        listedCapMap[i['id']] = [dict([('caption',i['caption']), ('image_id', i['image_id'])])]
    tokenizedListedCapMap = PTBTokenizer().tokenize(listedCapMap)
    
    tokcap = [] #map caption ids to a map of its tokenized caption and image id
    for i, j in tokenizedListedCapMap.iteritems():
        tokcap += [(i, dict([('caption', j[0]), ('image_id', listedCapMap[i][0]['image_id'])]))]
    
    f = open(data_folder + '/preprocessed/tokcap.json', 'w')
    json.dump(tokcap, f)
    f.close()
开发者ID:duchesneaumathieu,项目名称:Image-Captioning,代码行数:15,代码来源:create_files.py


示例19: split_valid

def split_valid(data_folder=DATA_FOLDER):
    cap = COCO(COCO_VALID_CAP_FILE)
    imgIds = cap.getImgIds()
    random.seed(0)
    random.shuffle(imgIds)
    mid = len(imgIds)/2
    vimgids, timgids = imgIds[:mid], imgIds[mid:]

    f = open(data_folder + '/preprocessed/valimgids.json', 'w')
    json.dump(vimgids, f)
    f.close()
    
    f = open(data_folder + '/preprocessed/tesimgids.json', 'w')
    json.dump(timgids, f)
    f.close()
开发者ID:duchesneaumathieu,项目名称:Image-Captioning,代码行数:15,代码来源:create_files.py


示例20: Resize_Image

class Resize_Image():

    def __init__(self, imgeDir, resizeImageDir):
        self.ImageDir = imgeDir
        self.ResizeImageDir = resizeImageDir
        self.dataDir = APP_ROOT + "/Data/"
        self.dataType = 'val2014'
        self.annFile = '%s/annotations/instances_%s.json'\
                       % (self.dataDir, self.dataType)

        # initialize COCO api for instance annotations
        self.coco = COCO(self.annFile)

        # display COCO categories and supercategories
        self.cats = self.coco.loadCats(self.coco.getCatIds())
        self.names = [cat['name'] for cat in self.cats]
        self.ids = [cat['id'] for cat in self.cats]
        self.name_ids = {}
        # get all images containing given categories, select one at random
        self.img_dict = {}

    def resize_image(self):

        for i in range(len(self.names)):
            if self.ids[i] not in self.name_ids:
                self.name_ids.update({self.names[i]: self.ids[i]})
        self.__image_dict_update()

    def __image_dict_update(self):

        for name in self.names:
            catIds = self.coco.getCatIds(catNms=[name])
            imgIds = self.coco.getImgIds(catIds=catIds)
            for i in range(len(imgIds)):
                img = self.coco.loadImgs(imgIds[i])[0]
                if img["file_name"] not in self.img_dict:
                    self.img_dict.update({img["file_name"]: name})
        self.__output_resize_images()

    def __output_resize_images(self):

        for k, v in sorted(self.img_dict.items(), key=lambda x: x[0]):
            ImageFile = '%s/%s' % (self.ImageDir, k)
            pil_im = Image.open(ImageFile)
            out = pil_im.resize((255, 255))
            save_image = '%s/%s' % (self.ResizeImageDir, k)
            out.save(save_image)
            print(save_image + " " + str(self.name_ids[v]))
开发者ID:SnowMasaya,项目名称:Chainer_Image_Caption_code,代码行数:48,代码来源:resize_image.py



注:本文中的pycocotools.coco.COCO类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python cocoeval.COCOeval类代码示例发布时间:2022-05-25
下一篇:
Python data.Data类代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap