• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python generic.columns_from_table函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tkp.db.generic.columns_from_table函数的典型用法代码示例。如果您正苦于以下问题:Python columns_from_table函数的具体用法?Python columns_from_table怎么用?Python columns_from_table使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了columns_from_table函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_two_field_overlap_new_transient

    def test_two_field_overlap_new_transient(self):
        """Now for something more interesting - two overlapping fields, 4 sources:
        one steady source only in lower field,
        one steady source in both fields,
        one steady source only in upper field,
        one transient source in both fields but only at 2nd timestep.
        """
        n_images = 2
        xtr_radius = 1.5
        im_params = db_subs.example_dbimage_datasets(n_images,
                                                     xtr_radius=xtr_radius)
        im_params[1]['centre_decl'] += xtr_radius * 1

        imgs = []

        lower_steady_src = db_subs.example_extractedsource_tuple(
                                ra=im_params[0]['centre_ra'],
                                dec=im_params[0]['centre_decl'] - 0.5 * xtr_radius)
        upper_steady_src = db_subs.example_extractedsource_tuple(
                                ra=im_params[1]['centre_ra'],
                                dec=im_params[1]['centre_decl'] + 0.5 * xtr_radius)
        overlap_steady_src = db_subs.example_extractedsource_tuple(
                                ra=im_params[0]['centre_ra'],
                                dec=im_params[0]['centre_decl'] + 0.2 * xtr_radius)
        overlap_transient = db_subs.example_extractedsource_tuple(
                                ra=im_params[0]['centre_ra'],
                                dec=im_params[0]['centre_decl'] + 0.8 * xtr_radius)

        imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[0]))
        imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[1]))

        imgs[0].insert_extracted_sources([lower_steady_src, overlap_steady_src])
        nd_posns = dbmon.get_nulldetections(imgs[0].id, deRuiter_r=1)
        self.assertEqual(len(nd_posns), 0)
        imgs[0].associate_extracted_sources(deRuiter_r=0.1)

        imgs[1].insert_extracted_sources([upper_steady_src, overlap_steady_src,
                                          overlap_transient])
        nd_posns = dbmon.get_nulldetections(imgs[1].id, deRuiter_r=1)
        self.assertEqual(len(nd_posns), 0)
        imgs[1].associate_extracted_sources(deRuiter_r=0.1)

        runcats = columns_from_table('runningcatalog',
                                where={'dataset': self.dataset.id})
        self.assertEqual(len(runcats), 4) #sanity check.

        monlist = columns_from_table('monitoringlist',
                                where={'dataset': self.dataset.id})
        self.assertEqual(len(monlist), 1)

        transients_qry = """\
        SELECT *
          FROM transient tr
              ,runningcatalog rc
        WHERE rc.dataset = %s
          AND tr.runcat = rc.id
        """
        self.database.cursor.execute(transients_qry, (self.dataset.id,))
        transients = get_db_rows_as_dicts(self.database.cursor)
        self.assertEqual(len(transients), 1)
开发者ID:jdswinbank,项目名称:tkp,代码行数:60,代码来源:test_skyregion.py


示例2: test_basic_same_field_case

    def test_basic_same_field_case(self):
        """ Here we start with 1 source in image0.
        We then add image1 (same field as image0), with a double association
        for the source, and check assocskyrgn updates correctly.
       """
        n_images = 2
        im_params = db_subs.generate_timespaced_dbimages_data(n_images)

        idx = 0
        src_a = db_subs.example_extractedsource_tuple(
                        ra=im_params[idx]['centre_ra'],
                        dec=im_params[idx]['centre_decl'])

        src_b = src_a._replace(ra=src_a.ra + 1. / 60.) # 1 arcminute offset
        imgs = []
        imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[idx]))
        imgs[idx].insert_extracted_sources([src_a])
        imgs[idx].associate_extracted_sources(deRuiter_r, new_source_sigma_margin)

        idx = 1
        imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[idx]))
        imgs[idx].insert_extracted_sources([src_a, src_b])
        imgs[idx].associate_extracted_sources(deRuiter_r, new_source_sigma_margin)
        imgs[idx].update()
        runcats = columns_from_table('runningcatalog',
                                where={'dataset':self.dataset.id})
        self.assertEqual(len(runcats), 2) #Just a sanity check.
        skyassocs = columns_from_table('assocskyrgn',
                                   where={'skyrgn':imgs[idx]._data['skyrgn']})
        self.assertEqual(len(skyassocs), 2)
开发者ID:bartscheers,项目名称:so_tkp,代码行数:30,代码来源:test_skyregion.py


示例3: test_infinite

    def test_infinite(self):
        # Check that database insertion doesn't choke on infinite errors.

        dataset = DataSet(data={'description': 'example dataset'},
                           database=self.database)
        image = Image(dataset=dataset, data=db_subs.example_dbimage_data_dict())

        # Inserting a standard example extractedsource should be fine
        extracted_source = db_subs.example_extractedsource_tuple()
        image.insert_extracted_sources([extracted_source])
        inserted = columns_from_table('extractedsource',
                                      where= {'image' : image.id})
        self.assertEqual(len(inserted), 1)

        # But if the source has infinite errors we drop it and log a warning
        extracted_source = db_subs.example_extractedsource_tuple(error_radius=float('inf'),
                                                                 peak_err=float('inf'),
                                                                 flux_err=float('inf'))

                # We will add a handler to the root logger which catches all log
        # output in a buffer.
        iostream = BytesIO()
        hdlr = logging.StreamHandler(iostream)
        logging.getLogger().addHandler(hdlr)

        image.insert_extracted_sources([extracted_source])

        logging.getLogger().removeHandler(hdlr)
        # We want to be sure that the error has been appropriately logged.
        self.assertIn("Dropped source fit with infinite flux errors",
                      iostream.getvalue())

        inserted = columns_from_table('extractedsource',
                                      where= {'image' : image.id})
        self.assertEqual(len(inserted), 1)
开发者ID:bartscheers,项目名称:so_tkp,代码行数:35,代码来源:test_orm.py


示例4: test_only_first_epoch_source

    def test_only_first_epoch_source(self):
        """test_only_first_epoch_source

        - Pretend to extract a source only from the first image.
        - Run source association for each image, as we would in TraP.
        - Check the image source listing works
        - Check runcat and assocxtrsource are correct.

        """


        first_epoch = True
        extracted_source_ids=[]
        for im in self.im_params:
            self.db_imgs.append( Image( data=im, dataset=self.dataset) )
            last_img =self.db_imgs[-1]

            if first_epoch:
                last_img.insert_extracted_sources(
                    [db_subs.example_extractedsource_tuple()],'blind')

            last_img.associate_extracted_sources(deRuiter_r,
                                                 new_source_sigma_margin)

            #First, check the runcat has been updated correctly:
            running_cat = columns_from_table(table="runningcatalog",
                                           keywords=['datapoints'],
                                           where={"dataset":self.dataset.id})
            self.assertEqual(len(running_cat), 1)
            self.assertEqual(running_cat[0]['datapoints'], 1)

            last_img.update()
            last_img.update_sources()
            img_xtrsrc_ids = [src.id for src in last_img.sources]
#            print "ImageID:", last_img.id
#            print "Imgs sources:", img_xtrsrc_ids
            if first_epoch:
                self.assertEqual(len(img_xtrsrc_ids),1)
                extracted_source_ids.extend(img_xtrsrc_ids)
                assocxtrsrcs_rows = columns_from_table(table="assocxtrsource",
                                           keywords=['runcat', 'xtrsrc' ],
                                           where={"xtrsrc":img_xtrsrc_ids[0]})
                self.assertEqual(len(assocxtrsrcs_rows),1)
                self.assertEqual(assocxtrsrcs_rows[0]['xtrsrc'], img_xtrsrc_ids[0])
            else:
                self.assertEqual(len(img_xtrsrc_ids),0)

            first_epoch=False


        #Assocxtrsources still ok after multiple images?
        self.assertEqual(len(extracted_source_ids),1)
        assocxtrsrcs_rows = columns_from_table(table="assocxtrsource",
                                           keywords=['runcat', 'xtrsrc' ],
                                           where={"xtrsrc":extracted_source_ids[0]})
        self.assertEqual(len(assocxtrsrcs_rows),1)

        self.assertEqual(assocxtrsrcs_rows[0]['xtrsrc'], extracted_source_ids[0],
                         "Runcat xtrsrc entry must match the only extracted source")
开发者ID:gijzelaerr,项目名称:tkp-1,代码行数:59,代码来源:test_algorithms.py


示例5: test_single_fixed_source

    def test_single_fixed_source(self):
        """test_single_fixed_source

        - Pretend to extract the same source in each of a series of images.
        - Perform source association
        - Check the image source listing works
        - Check runcat, assocxtrsource.
        """

        fixed_src_runcat_id = None
        for img_idx, im in enumerate(self.im_params):
            self.db_imgs.append( Image(data=im, dataset=self.dataset))
            last_img = self.db_imgs[-1]
            insert_extracted_sources(last_img._id,
                [db_subs.example_extractedsource_tuple()],'blind')
            associate_extracted_sources(last_img._id, deRuiter_r,
                                        new_source_sigma_margin)

            running_cat = columns_from_table(table="runningcatalog",
                                           keywords=['id', 'datapoints'],
                                           where={"dataset":self.dataset.id})
            self.assertEqual(len(running_cat), 1)
            self.assertEqual(running_cat[0]['datapoints'], img_idx+1)

            # Check runcat ID does not change for a steady single source
            if img_idx == 0:
                fixed_src_runcat_id = running_cat[0]['id']
                self.assertIsNotNone(fixed_src_runcat_id, "No runcat id assigned to source")
            self.assertEqual(running_cat[0]['id'], fixed_src_runcat_id,
                             "Multiple runcat ids for same fixed source")


            runcat_flux = columns_from_table(table="runningcatalog_flux",
                               keywords=['f_datapoints'],
                               where={"runcat":fixed_src_runcat_id})
            self.assertEqual(len(runcat_flux),1)
            self.assertEqual(img_idx+1, runcat_flux[0]['f_datapoints'])

            last_img.update()
            last_img.update_sources()
            img_xtrsrc_ids = [src.id for src in last_img.sources]
            self.assertEqual(len(img_xtrsrc_ids), 1)

            #Get the association row for most recent extraction:
            assocxtrsrcs_rows = columns_from_table(table="assocxtrsource",
                                       keywords=['runcat', 'xtrsrc' ],
                                       where={"xtrsrc":img_xtrsrc_ids[0]})
#            print "ImageID:", last_img.id
#            print "Imgs sources:", img_xtrsrc_ids
#            print "Assoc entries:", assocxtrsrcs_rows
#            print "First extracted source id:", ds_source_ids[0]
#            if len(assocxtrsrcs_rows):
#                print "Associated source:", assocxtrsrcs_rows[0]['xtrsrc']
            self.assertEqual(len(assocxtrsrcs_rows),1,
                             msg="No entries in assocxtrsrcs for image number "+str(img_idx))
            self.assertEqual(assocxtrsrcs_rows[0]['runcat'], fixed_src_runcat_id,
                             "Mismatched runcat id in assocxtrsrc table")
开发者ID:ajstewart,项目名称:tkp,代码行数:57,代码来源:test_algorithms.py


示例6: test_basic_insertion

    def test_basic_insertion(self):
        """Here we begin with a single insertion, and check a relevant entry
        exists in the skyregion table.

        The key logic checked here is that inserting an image with duplicate
        skyregion will return the same skyrgn id as the first image of that field,
        conversely a new region results in a new skyrgn entry.

        """
        self.database = tkp.db.Database()
        db_subs.delete_test_database(self.database)

        self.dataset = tkp.db.DataSet(database=self.database,
                data={'description': "Skyrgn:" + self._testMethodName})
        n_images = 3
        im_params = db_subs.generate_timespaced_dbimages_data(n_images)

        ##First image:
        image0 = tkp.db.Image(dataset=self.dataset, data=im_params[0])
        image0.update()

        skyrgns = columns_from_table('skyregion',
                                             where={'dataset':self.dataset.id})
#        if self.clean_table:
        self.assertEqual(len(skyrgns), 1)
        rgn_keys = ['centre_ra', 'centre_decl', 'xtr_radius']
        first_skyrgn_id = None
        for db_row in skyrgns:
            if all([db_row[k] == im_params[0][k] for k in rgn_keys]):
                first_skyrgn_id = db_row['id']
        self.assertNotEqual(first_skyrgn_id, None)
        self.assertEqual(image0._data['skyrgn'], first_skyrgn_id)

        ##Second, identical image:
        image1 = tkp.db.Image(dataset=self.dataset, data=im_params[1])
        image1.update()
        self.assertEqual(image1._data['skyrgn'], first_skyrgn_id)

        ##Third, different image:
        im_params[2]['centre_ra'] += im_params[2]['xtr_radius'] * 0.5
        image2 = tkp.db.Image(dataset=self.dataset, data=im_params[2])
        image2.update()
        self.assertNotEqual(image2._data['skyrgn'], first_skyrgn_id)
        skyrgns = columns_from_table('skyregion',
                                             where={'dataset':self.dataset.id})
        for db_row in skyrgns:
            if all([db_row[k] == im_params[2][k] for k in rgn_keys]):
                second_skyrgn_id = db_row['id']
        self.assertNotEqual(second_skyrgn_id, None)
        self.assertEqual(image2._data['skyrgn'], second_skyrgn_id)
开发者ID:bartscheers,项目名称:so_tkp,代码行数:50,代码来源:test_skyregion.py


示例7: test_new_skyregion_insertion

    def test_new_skyregion_insertion(self):
        """Here we test the association logic executed upon insertion of a
        new skyregion.

        We expect that any pre-existing entries in the runningcatalog
        which lie within the field of view will be marked as
        'within this region', through the presence of an entry in table
        ``assocskyrgn``.
        Conversely sources outside the FoV should not be marked as related.

        We begin with img0, with a source at centre.
        Then we add 2 more (empty) images/fields at varying positions.
        """
        n_images = 6
        im_params = db_subs.generate_timespaced_dbimages_data(n_images)

        src_in_img0 = db_subs.example_extractedsource_tuple(
                        ra=im_params[0]['centre_ra'],
                        dec=im_params[0]['centre_decl'],)

        ##First image:
        image0 = tkp.db.Image(dataset=self.dataset, data=im_params[0])
        image0.insert_extracted_sources([src_in_img0])
        image0.associate_extracted_sources(deRuiter_r, new_source_sigma_margin)
        image0.update()

        runcats = columns_from_table('runningcatalog',
                                where={'dataset':self.dataset.id})
        self.assertEqual(len(runcats), 1) #Just a sanity check.
        ##Second, different *But overlapping* image:
        idx = 1
        im_params[idx]['centre_decl'] += im_params[idx]['xtr_radius'] * 0.9
        image1 = tkp.db.Image(dataset=self.dataset, data=im_params[idx])
        image1.update()

        assocs = columns_from_table('assocskyrgn',
                                    where={'skyrgn':image1._data['skyrgn']})
        self.assertEqual(len(assocs), 1)
        self.assertEqual(assocs[0]['runcat'], runcats[0]['id'])

        ##Third, different *and NOT overlapping* image:
        idx = 2
        im_params[idx]['centre_decl'] += im_params[idx]['xtr_radius'] * 1.1
        image2 = tkp.db.Image(dataset=self.dataset, data=im_params[idx])
        image2.update()
        assocs = columns_from_table('assocskyrgn',
                                    where={'skyrgn':image2._data['skyrgn']})
        self.assertEqual(len(assocs), 0)
开发者ID:bartscheers,项目名称:so_tkp,代码行数:48,代码来源:test_skyregion.py


示例8: test_single_fixed_source

    def test_single_fixed_source(self):
        """test_single_fixed_source

        - Pretend to extract the same source in each of a series of images.
        - Perform source association
        - Check the image source listing works
        - Check runcat, assocxtrsource.
        """

        imgs_loaded = 0
        first_image = True
        fixed_src_runcat_id = None
        for im in self.im_params:
            self.db_imgs.append( Image( data=im, dataset=self.dataset) )
            last_img =self.db_imgs[-1]
            last_img.insert_extracted_sources([db_subs.example_extractedsource_tuple()])
            last_img.associate_extracted_sources(deRuiter_r=3.7)
            imgs_loaded+=1
            running_cat = columns_from_table(table="runningcatalog",
                                           keywords=['id', 'datapoints'],
                                           where={"dataset":self.dataset.id})
            self.assertEqual(len(running_cat), 1)
            self.assertEqual(running_cat[0]['datapoints'], imgs_loaded)
            if first_image:
                fixed_src_runcat_id = running_cat[0]['id']
                self.assertIsNotNone(fixed_src_runcat_id, "No runcat id assigned to source")
            self.assertEqual(running_cat[0]['id'], fixed_src_runcat_id,
                             "Multiple runcat ids for same fixed source")

            last_img.update()
            last_img.update_sources()
            img_xtrsrc_ids = [src.id for src in last_img.sources]
            self.assertEqual(len(img_xtrsrc_ids), 1)

            #Get the association row for most recent extraction:
            assocxtrsrcs_rows = columns_from_table(table="assocxtrsource",
                                       keywords=['runcat', 'xtrsrc' ],
                                       where={"xtrsrc":img_xtrsrc_ids[0]})
#            print "ImageID:", last_img.id
#            print "Imgs sources:", img_xtrsrc_ids
#            print "Assoc entries:", assocxtrsrcs_rows
#            print "First extracted source id:", ds_source_ids[0]
#            if len(assocxtrsrcs_rows):
#                print "Associated source:", assocxtrsrcs_rows[0]['xtrsrc']
            self.assertEqual(len(assocxtrsrcs_rows),1,
                             msg="No entries in assocxtrsrcs for image number "+str(imgs_loaded))
            self.assertEqual(assocxtrsrcs_rows[0]['runcat'], fixed_src_runcat_id,
                             "Mismatched runcat id in assocxtrsrc table")
开发者ID:hughbg,项目名称:tkp,代码行数:48,代码来源:test_algorithms.py


示例9: TestMeridianLowerEdgeCase

    def TestMeridianLowerEdgeCase(self):
        """What happens if a source is right on the meridian?"""

        dataset = DataSet(data={'description':"Assoc 1-to-1:" +
                                self._testMethodName})
        n_images = 3
        im_params = db_subs.example_dbimage_datasets(n_images, centre_ra=0.5,
                                                      centre_decl=10)
        src_list = []
        src0 = db_subs.example_extractedsource_tuple(ra=0.0002, dec=10.5,
                                             ra_fit_err=0.01, dec_fit_err=0.01)
        src_list.append(src0)
        src1 = src0._replace(ra=0.0003)
        src_list.append(src1)
        src2 = src0._replace(ra=0.0004)
        src_list.append(src2)

        for idx, im in enumerate(im_params):
            im['centre_ra'] = 359.9
            image = tkp.db.Image(dataset=dataset, data=im)
            image.insert_extracted_sources([src_list[idx]])
            associate_extracted_sources(image.id, deRuiter_r=3.717)
        runcat = columns_from_table('runningcatalog', ['datapoints', 'wm_ra'],
                                   where={'dataset':dataset.id})
#        print "***\nRESULTS:", runcat, "\n*****"
        self.assertEqual(len(runcat), 1)
        self.assertEqual(runcat[0]['datapoints'], 3)
        avg_ra = (src0.ra + src1.ra +src2.ra)/3
        self.assertAlmostEqual(runcat[0]['wm_ra'], avg_ra)
开发者ID:jdswinbank,项目名称:tkp,代码行数:29,代码来源:test_associations.py


示例10: _sync_with_database

 def _sync_with_database(self):
     """Update object attributes from the database"""
     results = columns_from_table(self.TABLE, keywords=None, where={self.ID: self._id})
     # Shallow copy, but that's ok: all database values are
     # immutable (including datetime objects)
     if results:
         self._data = results[0].copy()
     else:
         self._data = {}
开发者ID:hughbg,项目名称:tkp,代码行数:9,代码来源:orm.py


示例11: _sync_with_database

 def _sync_with_database(self):
     """Update object attributes from the database"""
     results = columns_from_table(self.TABLE, keywords=None, where={self.ID: self._id})
     # Shallow copy, but that's ok: all database values are
     # immutable (including datetime objects)
     if results:
         # force to dict since sqlalchemy RowProxy doesn't have a copy
         self._data = dict(results[0]).copy()
     else:
         self._data = {}
开发者ID:Error323,项目名称:tkp,代码行数:10,代码来源:orm.py


示例12: TestDeRuiterCalculation

    def TestDeRuiterCalculation(self):
        """Check all the unit conversions are correct"""
        dataset = DataSet(data={'description':"Assoc 1-to-1:" + self._testMethodName})
        n_images = 2
        im_params = db_subs.example_dbimage_datasets(n_images, centre_ra=10,
                                                     centre_decl=0)


        #Note ra / ra_fit_err are in degrees.
        # ra_sys_err is in arcseconds, but we set it = 0 so doesn't matter.
        #ra_fit_err cannot be zero or we get div by zero errors.
        #Also, there is a hard limit on association radii:
        #currently this defaults to 0.03 degrees== 108 arcseconds
        src0 = db_subs.example_extractedsource_tuple(ra=10.00, dec=0.0,
                                             ra_fit_err=0.1, dec_fit_err=1.00,
                                             ra_sys_err=0.0, dec_sys_err=0.0)
        src1 = db_subs.example_extractedsource_tuple(ra=10.02, dec=0.0,
                                             ra_fit_err=0.1, dec_fit_err=1.00,
                                             ra_sys_err=0.0, dec_sys_err=0.0)
        src_list = [src0, src1]
        #NB dec_fit_err nonzero, but since delta_dec==0 this simplifies to:
        expected_DR_radius = math.sqrt((src1.ra - src0.ra) ** 2 /
                               (src0.ra_fit_err ** 2 + src1.ra_fit_err ** 2))
#        print "Expected DR", expected_DR_radius

        for idx in [0, 1]:
            image = tkp.db.Image(dataset=dataset,
                                data=im_params[idx])
            image.insert_extracted_sources([src_list[idx]])
            #Peform very loose association since we just want to store DR value.
            associate_extracted_sources(image.id, deRuiter_r=100)
        runcat = columns_from_table('runningcatalog', ['id'],
                                   where={'dataset':dataset.id})
#        print "***\nRESULTS:", runcat, "\n*****"
        self.assertEqual(len(runcat), 1)
        assoc = columns_from_table('assocxtrsource', ['r'],
                                   where={'runcat':runcat[0]['id']})
#        print "Got assocs:", assoc
        self.assertEqual(len(assoc), 2)
        self.assertAlmostEqual(assoc[1]['r'], expected_DR_radius)
开发者ID:jdswinbank,项目名称:tkp,代码行数:40,代码来源:test_associations.py


示例13: test_two_field_overlap_nulling_src

    def test_two_field_overlap_nulling_src(self):
        """Similar to above, but one source disappears:
        Two overlapping fields, 4 sources:
        one steady source only in lower field,
        one steady source in both fields,
        one steady source only in upper field,
        one transient source in both fields but only at *1st* timestep.
        """
        n_images = 2
        xtr_radius = 1.5
        im_params = db_subs.generate_timespaced_dbimages_data(n_images,
                                                     xtr_radius=xtr_radius)
        im_params[1]['centre_decl'] += xtr_radius * 1

        imgs = []

        lower_steady_src = db_subs.example_extractedsource_tuple(
                                ra=im_params[0]['centre_ra'],
                                dec=im_params[0]['centre_decl'] - 0.5 * xtr_radius)
        upper_steady_src = db_subs.example_extractedsource_tuple(
                                ra=im_params[1]['centre_ra'],
                                dec=im_params[1]['centre_decl'] + 0.5 * xtr_radius)
        overlap_steady_src = db_subs.example_extractedsource_tuple(
                                ra=im_params[0]['centre_ra'],
                                dec=im_params[0]['centre_decl'] + 0.2 * xtr_radius)
        overlap_transient = db_subs.example_extractedsource_tuple(
                                ra=im_params[0]['centre_ra'],
                                dec=im_params[0]['centre_decl'] + 0.8 * xtr_radius)

        imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[0]))
        imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[1]))

        imgs[0].insert_extracted_sources([lower_steady_src, overlap_steady_src,
                                          overlap_transient])
        imgs[0].associate_extracted_sources(deRuiter_r=0.1,
                                new_source_sigma_margin=new_source_sigma_margin)
        nd_posns = dbnd.get_nulldetections(imgs[0].id)
        self.assertEqual(len(nd_posns), 0)

        imgs[1].insert_extracted_sources([upper_steady_src, overlap_steady_src])
        imgs[1].associate_extracted_sources(deRuiter_r=0.1,
                                new_source_sigma_margin=new_source_sigma_margin)
        #This time we don't expect to get an immediate transient detection,
        #but we *do* expect to get a null-source forced extraction request:
        nd_posns = dbnd.get_nulldetections(imgs[1].id)
        self.assertEqual(len(nd_posns), 1)

        runcats = columns_from_table('runningcatalog',
                                where={'dataset':self.dataset.id})
        self.assertEqual(len(runcats), 4) #sanity check.
开发者ID:bartscheers,项目名称:so_tkp,代码行数:50,代码来源:test_skyregion.py


示例14: test_null_case_sequential

    def test_null_case_sequential(self):
        """test_null_case_sequential

        -Check extractedsource insertion routines can deal with empty input!
        -Check source association can too

        """
        for im in self.im_params:
            self.db_imgs.append(Image( data=im, dataset=self.dataset))
            self.db_imgs[-1].insert_extracted_sources([])
            self.db_imgs[-1].associate_extracted_sources(deRuiter_r=3.7)
            running_cat = columns_from_table(table="runningcatalog",
                                           keywords="*",
                                           where={"dataset":self.dataset.id})
            self.assertEqual(len(running_cat), 0)
开发者ID:hughbg,项目名称:tkp,代码行数:15,代码来源:test_algorithms.py


示例15: runcat_entries

    def runcat_entries(self):
        """
        Returns:
            list: a list of dictionarys representing rows in runningcatalog,
            for all sources belonging to this dataset

            Column 'id' is returned with the key 'runcat'

            Currently only returns 3 columns:
            [{'runcat,'xtrsrc','datapoints'}]
        """
        return columns_from_table(
            "runningcatalog",
            keywords=["id", "xtrsrc", "datapoints"],
            alias={"id": "runcat"},
            where={"dataset": self.id},
        )
开发者ID:Error323,项目名称:tkp,代码行数:17,代码来源:orm.py


示例16: test_one2oneflux

    def test_one2oneflux(self):
        dataset = tkp.db.DataSet(database=self.database, data={'description': 'flux test set: 1-1'})
        n_images = 3
        im_params = db_subs.generate_timespaced_dbimages_data(n_images)

        src_list = []
        src = db_subs.example_extractedsource_tuple()
        src0 = src._replace(flux=2.0)
        src_list.append(src0)
        src1 = src._replace(flux=2.5)
        src_list.append(src1)
        src2 = src._replace(flux=2.4)
        src_list.append(src2)

        for idx, im in enumerate(im_params):
            image = tkp.db.Image(database=self.database, dataset=dataset, data=im)
            image.insert_extracted_sources([src_list[idx]])
            associate_extracted_sources(image.id, deRuiter_r=3.717)

        query = """\
        SELECT rf.avg_f_int
          FROM runningcatalog r
              ,runningcatalog_flux rf
         WHERE r.dataset = %(dataset)s
           AND r.id = rf.runcat
        """
        self.database.cursor.execute(query, {'dataset': dataset.id})
        result = zip(*self.database.cursor.fetchall())
        avg_f_int = result[0]
        self.assertEqual(len(avg_f_int), 1)
        py_metrics = db_subs.lightcurve_metrics(src_list)
        self.assertAlmostEqual(avg_f_int[0], py_metrics[-1]['avg_f_int'])
        runcat_id = columns_from_table('runningcatalog',
                                       where={'dataset':dataset.id})
        self.assertEqual(len(runcat_id),1)
        runcat_id = runcat_id[0]['id']
        # Check evolution of variability indices
        db_metrics = db_queries.get_assoc_entries(self.database,
                                                           runcat_id)
        self.assertEqual(len(db_metrics), n_images)
        # Compare the python- and db-calculated values
        for i in range(len(db_metrics)):
            for key in ('v_int','eta_int'):
                self.assertAlmostEqual(db_metrics[i][key], py_metrics[i][key])
开发者ID:gijzelaerr,项目名称:tkp-1,代码行数:44,代码来源:test_fluxes.py


示例17: test_two_field_basic_case

    def test_two_field_basic_case(self):
        """
        Here we create 2 disjoint image fields, with one source at centre of
        each, and check that the second source inserted does not get flagged as
        newsource.
        """
        n_images = 2
        xtr_radius = 1.5
        im_params = db_subs.generate_timespaced_dbimages_data(n_images,
                                                     xtr_radius=xtr_radius)
        im_params[1]['centre_decl'] += xtr_radius * 2 + 0.5

        imgs = []
        for idx in range(len(im_params)):
            imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[idx]))

        for idx in range(len(im_params)):
            central_src = db_subs.example_extractedsource_tuple(
                                    ra=im_params[idx]['centre_ra'],
                                    dec=im_params[idx]['centre_decl'])

            imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[idx]))
            imgs[idx].insert_extracted_sources([central_src])
            imgs[idx].associate_extracted_sources(deRuiter_r, new_source_sigma_margin)

        runcats = columns_from_table('runningcatalog',
                                where={'dataset':self.dataset.id})

        self.assertEqual(len(runcats), 2) #Just a sanity check.

        newsources_qry = """\
        SELECT *
          FROM newsource tr
              ,runningcatalog rc
        WHERE rc.dataset = %s
          AND tr.runcat = rc.id
        """
        self.database.cursor.execute(newsources_qry, (self.dataset.id,))
        newsources = get_db_rows_as_dicts(self.database.cursor)
        self.assertEqual(len(newsources), 0)
开发者ID:bartscheers,项目名称:so_tkp,代码行数:40,代码来源:test_skyregion.py


示例18: TestCrossMeridian

    def TestCrossMeridian(self):
        """
        A source is observed in two skyregions: one which crosses the
        meridian, and one which does not. We check that the associated source
        has the correct weighted mean RA.

        See also #4497.
        """
        dataset = DataSet(data={'description': "Test:" + self._testMethodName})

        im_list = [
            db_subs.example_dbimage_datasets(
                n_images=1, centre_ra=0, centre_decl=0, xtr_radius=10
            )[0],
            db_subs.example_dbimage_datasets(
                n_images=1, centre_ra=0, centre_decl=0, xtr_radius=10
            )[0],
            db_subs.example_dbimage_datasets(
                n_images=1, centre_ra=15, centre_decl=0, xtr_radius=10
            )[0],
            db_subs.example_dbimage_datasets(
                n_images=1, centre_ra=15, centre_decl=0, xtr_radius=10
            )[0],
        ]

        source_ra = 7.5
        src = db_subs.example_extractedsource_tuple(ra=source_ra, dec=0)

        for im in im_list:
            image = tkp.db.Image(dataset=dataset, data=im)
            image.insert_extracted_sources([src])
            associate_extracted_sources(image.id, deRuiter_r=3.717)

        runcat = columns_from_table('runningcatalog', ['wm_ra'],
            where={'dataset': dataset.id}
        )
        self.assertAlmostEqual(runcat[0]['wm_ra'], source_ra)
开发者ID:jdswinbank,项目名称:tkp,代码行数:37,代码来源:test_associations.py


示例19: test_many2manyflux_reduced_to_two_1to1

    def test_many2manyflux_reduced_to_two_1to1(self):
        """
        (See also assoc. test test_many2many_reduced_to_two_1to1 )
        In this test-case we cross-associate between a rhombus of sources spread
        about a central position, east-west in the first image,
        north-south in the second.

        The latter, north-south pair are slightly offset towards positive RA
        and negative RA respectively.

        The result is that the candidate associations are pruned down to
        two one-to-one pairings..
        """
        dataset = tkp.db.DataSet(database=self.database, data={'description': 'flux test set: n-m, ' + self._testMethodName})
        n_images = 2
        im_params = db_subs.generate_timespaced_dbimages_data(n_images)
        centre_ra, centre_dec =  123., 10.5,
        offset_deg = 20 / 3600. #20 arcsec
        tiny_offset_deg = 1 / 3600. #1 arcsec

        eastern_src = db_subs.example_extractedsource_tuple(
            ra=centre_ra + offset_deg,
            dec=centre_dec,
            peak = 1.5, peak_err = 1e-1,
            flux = 3.0, flux_err = 1e-1,)

        western_src = db_subs.example_extractedsource_tuple(
            ra=centre_ra - offset_deg,
            dec=centre_dec,
            peak = 1.7, peak_err = 1e-1,
            flux = 3.2, flux_err = 1e-1,)

        northern_source = db_subs.example_extractedsource_tuple(
            ra=centre_ra + tiny_offset_deg,
            dec=centre_dec + offset_deg,
            peak = 1.8, peak_err = 1e-1,
            flux = 3.3, flux_err = 1e-1,
            )

        southern_source = db_subs.example_extractedsource_tuple(
            ra=centre_ra - tiny_offset_deg,
            dec=centre_dec - offset_deg,
            peak = 1.4, peak_err = 1e-1,
            flux = 2.9, flux_err = 1e-1,)

        # image 1
        image1 = tkp.db.Image(database=self.database, dataset=dataset,
                              data=im_params[0])
        dbgen.insert_extracted_sources(
            image1.id, [eastern_src,western_src], 'blind')
        associate_extracted_sources(image1.id, deRuiter_r = 3.717)

        # image 2
        image2 = tkp.db.Image(database=self.database, dataset=dataset,
                              data=im_params[1])
        dbgen.insert_extracted_sources(
            image2.id, [northern_source, southern_source], 'blind')
        associate_extracted_sources(image2.id, deRuiter_r = 3.717)

        # Manually compose the lists of sources we expect to see associated
        # into runningcatalog entries:
        # NB img1_srclist[1] has larger RA value.
        lightcurves_sorted_by_ra =[]
        lightcurves_sorted_by_ra.append( [western_src, southern_source])
        lightcurves_sorted_by_ra.append( [eastern_src, northern_source])

        #Check the summary statistics (avg flux, etc)
        query = """\
        SELECT rf.avg_f_int
              ,rf.avg_f_int_sq
              ,avg_weighted_f_int
              ,avg_f_int_weight
          FROM runningcatalog r
              ,runningcatalog_flux rf
         WHERE r.dataset = %(dataset)s
           AND r.id = rf.runcat
        ORDER BY r.wm_ra, r.wm_decl
        """
        self.database.cursor.execute(query, {'dataset': dataset.id})
        runcat_flux_entries = get_db_rows_as_dicts(self.database.cursor)
        self.assertEqual(len(runcat_flux_entries), len(lightcurves_sorted_by_ra))

        for idx, flux_summary in enumerate(runcat_flux_entries):
            py_results = db_subs.lightcurve_metrics(lightcurves_sorted_by_ra[idx])
            for key in flux_summary.keys():
                self.assertAlmostEqual(flux_summary[key], py_results[-1][key])

        #Now check the per-timestep statistics (variability indices)
        sorted_runcat_ids = columns_from_table('runningcatalog',
                                               where={'dataset':dataset.id},
                                               order='wm_ra,wm_decl')
        sorted_runcat_ids = [entry['id'] for entry in sorted_runcat_ids]

        for idx, rcid in enumerate(sorted_runcat_ids):
            db_indices = db_queries.get_assoc_entries(self.database,
                                                                   rcid)
            py_indices = db_subs.lightcurve_metrics(lightcurves_sorted_by_ra[idx])
            self.assertEqual(len(db_indices), len(py_indices))
            for nstep in range(len(db_indices)):
                for key in ('v_int', 'eta_int', 'f_datapoints'):
#.........这里部分代码省略.........
开发者ID:gijzelaerr,项目名称:tkp-1,代码行数:101,代码来源:test_fluxes.py



鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap