• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python pyfits.new_table函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pyfits.new_table函数的典型用法代码示例。如果您正苦于以下问题:Python new_table函数的具体用法?Python new_table怎么用?Python new_table使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了new_table函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: savefits

    def savefits(self,filename,bary=False):
        '''
        outputs a FITS file containing the histogram lightcurve
        v1.0 Kieran O'Brien - Dec 2011
        '''
#        try:
        if bary:
            print 'saving barytime corrected lightcurve'
            col1=pyfits.Column(name='BARYTIME',format='E', array=self.barytimes)
            col2=pyfits.Column(name='COUNTS',format='E', array=self.obsheights)
            cols=pyfits.ColDefs([col1,col2])
            tbhdu=pyfits.new_table(cols)
            hdu=pyfits.PrimaryHDU(0)
            thdulist=pyfits.HDUList([hdu,tbhdu])
            thdulist.writeto(filename)
        else:
            print 'saving uncorrected lightcurve'
            col1=pyfits.Column(name='TIME',format='E', array=self.obstimes)
            col2=pyfits.Column(name='COUNTS',format='E', array=self.obsheights)
            cols=pyfits.ColDefs([col1,col2])
            tbhdu=pyfits.new_table(cols)
            hdu=pyfits.PrimaryHDU(0)
            thdulist=pyfits.HDUList([hdu,tbhdu])
            thdulist.writeto(filename)
#        except:
#            print 'ERROR: problem writing file'
        return
开发者ID:RupertDodkins,项目名称:ARCONS-pipeline-1,代码行数:27,代码来源:ARCONS_Cubes.py


示例2: pixelizeCatalog

def pixelizeCatalog(infiles, config, force=False):
    """
    Break catalog up into a set of healpix files.
    """
    nside_catalog = config['coords']['nside_catalog']
    nside_pixel = config['coords']['nside_pixel']
    outdir = mkdir(config['catalog']['dirname'])
    filenames = config.getFilenames()
    
    for ii,infile in enumerate(infiles):
        logger.info('(%i/%i) %s'%(ii+1, len(infiles), infile))
        f = pyfits.open(infile)
        data = f[1].data
        header = f[1].header
        logger.info("%i objects found"%len(data))
        if not len(data): continue
        glon,glat = cel2gal(data['RA'],data['DEC'])
        catalog_pix = ang2pix(nside_catalog,glon,glat,coord='GAL')
        pixel_pix = ang2pix(nside_pixel,glon,glat,coord='GAL')
        names = [n.upper() for n in data.columns.names]
        ra_idx = names.index('RA'); dec_idx = names.index('DEC')
        idx = ra_idx if ra_idx > dec_idx else dec_idx
        catalog_pix_name = 'PIX%i'%nside_catalog
        pixel_pix_name = 'PIX%i'%nside_pixel

        coldefs = pyfits.ColDefs(
            [pyfits.Column(name='GLON',format='1D',array=glon),
             pyfits.Column(name='GLAT',format='1D',array=glat),
             pyfits.Column(name=catalog_pix_name,format='1J',array=catalog_pix),
             pyfits.Column(name=pixel_pix_name  ,format='1J',array=pixel_pix)]
        )
        hdu = pyfits.new_table(data.columns[:idx+1]+coldefs+data.columns[idx+1:])
        table = hdu.data

        for pix in numpy.unique(catalog_pix):
            logger.debug("Processing pixel %s"%pix)
            outfile = filenames.data['catalog'][pix]
            if not os.path.exists(outfile):
                logger.debug("Creating %s"%outfile)
                names = [n.upper() for n in table.columns.names]
                formats = table.columns.formats
                columns = [pyfits.Column(n,f) for n,f in zip(names,formats)]
                out = pyfits.HDUList([pyfits.PrimaryHDU(),pyfits.new_table(columns)])
                out[1].header['NSIDE'] = nside_catalog
                out[1].header['PIX'] = pix
                out.writeto(outfile)
            hdulist = pyfits.open(outfile,mode='update')
            t1 = hdulist[1].data
            # Could we speed up with sorting and indexing?
            t2 = table[ table[catalog_pix_name] == pix ]
            nrows1 = t1.shape[0]
            nrows2 = t2.shape[0]
            nrows = nrows1 + nrows2
            out = pyfits.new_table(t1.columns, nrows=nrows)
            for name in t1.columns.names:
                out.data.field(name)[nrows1:]=t2.field(name)
            hdulist[1] = out
            logger.debug("Writing %s"%outfile)
            hdulist.flush()
            hdulist.close()
开发者ID:balbinot,项目名称:ugali,代码行数:60,代码来源:pixelize.py


示例3: writeSparseHealpixMap

def writeSparseHealpixMap(pix, data_dict, nside, outfile,
                          distance_modulus_array = None,
                          coordsys = 'NULL', ordering = 'NULL',
                          header_dict = None):
    """
    Sparse HEALPix maps are used to efficiently store maps of the sky by only
    writing out the pixels that contain data.

    Three-dimensional data can be saved by supplying a distance modulus array
    which is stored in a separate extension.
    
    coordsys [gal, cel]
    ordering [ring, nest]
    """

    hdul = pyfits.HDUList()

    # Pixel data extension
    columns_array = [pyfits.Column(name = 'PIX',
                                   format = 'K',
                                   array = pix)]

    for key in data_dict.keys():
        if data_dict[key].shape[0] != len(pix):
            logger.warning('First dimension of column %s (%i) does not match number of pixels (%i).'%(key,
                                                                                                      data_dict[key].shape[0],
                                                                                                      len(pix)))
        
        if len(data_dict[key].shape) == 1:
            columns_array.append(pyfits.Column(name = key,
                                               format = 'E',
                                               array = data_dict[key]))
        elif len(data_dict[key].shape) == 2:
            columns_array.append(pyfits.Column(name = key,
                                               format = '%iE'%(data_dict[key].shape[1]),
                                               array = data_dict[key]))
        else:
            logger.warning('Unexpected number of data dimensions for column %s.'%(key))
    
    hdu_pix_data = pyfits.new_table(columns_array)
    hdu_pix_data.header.update('NSIDE', nside)
    hdu_pix_data.header.update('COORDSYS', coordsys.upper())
    hdu_pix_data.header.update('ORDERING', ordering.upper())
    hdu_pix_data.header.update(header_dict)
    hdu_pix_data.name = 'PIX_DATA'
    hdul.append(hdu_pix_data)

    # Distance modulus extension
    if distance_modulus_array is not None:
        hdu_distance_modulus = pyfits.new_table([pyfits.Column(name = 'DISTANCE_MODULUS',
                                                               format = 'E',
                                                               array = distance_modulus_array)])
        hdu_distance_modulus.name = 'DISTANCE_MODULUS'
        hdul.append(hdu_distance_modulus)

    hdul.writeto(outfile, clobber = True)
开发者ID:balbinot,项目名称:ugali,代码行数:56,代码来源:skymap.py


示例4: main

def main():
   import glob
   import numpy as np
   import pyfits as pf
   import matplotlib.pyplot as plt

   dirlst = glob.glob('*cloud*')
   dirlst.sort()

   for dirname in dirlst:
      print dirname

      filelist = glob.glob(dirname+'/*.21')
      filelist.sort()

      hdu = pf.PrimaryHDU(np.arange(100))
      hdulist = pf.HDUList([hdu])

      for fname in filelist:
         logg, partsize, temperature, metal, lam, flam = getDataCloudy(fname)
         c1 = pf.Column(name='Wavelength',format='E',array=lam)
         c2 = pf.Column(name='Flux',format='E',array=flam)
         tbhdu = pf.new_table([c1,c2])
         tbhdu.header.update('LOGG',logg)
         tbhdu.header.update('TEMPERAT',temperature,'units K')
         tbhdu.header.update('PARTSIZE',partsize,'units microns')
         tbhdu.header.update('Z',metal)
         hdulist.append(tbhdu)
   
      hdulist.writeto(dirname+'.fits')

   dirlst = glob.glob('clr*')
   dirlst.sort()

   for dirname in dirlst:
      print dirname

      filelist = glob.glob(dirname+'/*.clr')
      filelist.sort()

      hdu = pf.PrimaryHDU(np.arange(100))
      hdulist = pf.HDUList([hdu])

      for fname in filelist:
         logg, temperature, eddy, metal, lam, flam = getDataClr(fname)
         c1 = pf.Column(name='Wavelength',format='E',array=lam)
         c2 = pf.Column(name='Flux',format='E',array=flam)
         tbhdu = pf.new_table([c1,c2])
         tbhdu.header.update('LOGG',logg)
         tbhdu.header.update('TEMPERAT',temperature,'units K')
         tbhdu.header.update('EDDYCOEF',eddy)
         tbhdu.header.update('Z',metal)
         hdulist.append(tbhdu)
   
      hdulist.writeto(dirname+'.fits')
开发者ID:abhijithrajan,项目名称:SpecFit,代码行数:55,代码来源:genFits.py


示例5: add_bins

def add_bins(sname='final'):
    p = pyfits.open(data_path+'gz2sample_%s_abs_regions.fits'%sname)
    d = p['data'].data
    redshift = d.field('redshift')
    zmask = notNaN(redshift)
    oldcols = p['data'].columns
    bincols = {}
    cols = []
    for c in oldcols:
	cols.append(pyfits.Column(name=c.name, format=c.format,
				  array=d.field(c.name)))
    for k in bins.keys():
        x = d.field(k)[zmask]
        bin_min, bin_max, bin_step = bins[k]
        xbin = N.zeros(redshift.shape, N.int) - 9999
        xbinz = (N.floor((x - bin_min) / bin_step)).astype(N.int)
        maxbin = int(round((bin_max - bin_min) / bin_step))
        print k, maxbin
        low = xbinz < 0
        high = xbinz >= maxbin
        xbinz[low] = -999
        xbinz[high] = 999
        xbin[zmask] = xbinz
        name = ('%s_simple_bin'%k).upper()
        cols.append(pyfits.Column(name=name,
                                  format='I', array=xbin))
        bin = N.arange(0, maxbin, 1)
        min = bin * bin_step + bin_min
        max = min + bin_step
        center = min + 0.5*bin_step
        bincols[k] = [pyfits.Column(name='bin', format='I', array=bin),
                      pyfits.Column(name='min', format='E', array=min),
                      pyfits.Column(name='max', format='E', array=max),
                      pyfits.Column(name='centre', 
                                    format='E', array=center)]
    hdulist = pyfits.HDUList()
    hdulist.append(pyfits.PrimaryHDU())
    tbhdu=pyfits.new_table(cols)
    tbhdu.name = 'data'
    hdulist.append(tbhdu)
    for k in bincols.keys():
	c = bincols[k]
	tbhdu=pyfits.new_table(c)
        tbhdu.name = '%s_simple_bins'%k
	hdulist.append(tbhdu)
        outfile = data_path+'gz2sample_%s_abs_regions_bins.fits'%sname
    file_exists = os.path.isfile(outfile)
    if file_exists:
	os.remove(outfile)
    hdulist.writeto(outfile)
    p.close()
开发者ID:vrooje,项目名称:galaxyzoo2,代码行数:51,代码来源:gz2sample.py


示例6: _defineVariables

    def _defineVariables(self):
        """
        Helper funtion to define pertinent variables from catalog data.
        """
        self.objid = self.data.field(self.config['catalog']['objid_field'])
        self.lon = self.data.field(self.config['catalog']['lon_field'])
        self.lat = self.data.field(self.config['catalog']['lat_field'])

        #if self.config['catalog']['coordsys'].lower() == 'cel' \
        #   and self.config['coords']['coordsys'].lower() == 'gal':
        #    logger.info('Converting catalog objects from CELESTIAL to GALACTIC cboordinates')
        #    self.lon, self.lat = ugali.utils.projector.celToGal(self.lon, self.lat)
        #elif self.config['catalog']['coordsys'].lower() == 'gal' \
        #   and self.config['coords']['coordsys'].lower() == 'cel':
        #    logger.info('Converting catalog objects from GALACTIC to CELESTIAL coordinates')
        #    self.lon, self.lat = ugali.utils.projector.galToCel(self.lon, self.lat)

        self.mag_1 = self.data.field(self.config['catalog']['mag_1_field'])
        self.mag_err_1 = self.data.field(self.config['catalog']['mag_err_1_field'])
        self.mag_2 = self.data.field(self.config['catalog']['mag_2_field'])
        self.mag_err_2 = self.data.field(self.config['catalog']['mag_err_2_field'])

        if self.config['catalog']['mc_source_id_field'] is not None:
            if self.config['catalog']['mc_source_id_field'] in self.data.names:
                self.mc_source_id = self.data.field(self.config['catalog']['mc_source_id_field'])
                logger.info('Found %i MC source objects'%(numpy.sum(self.mc_source_id > 0)))
            else:
                #ADW: This is pretty kludgy, please fix... (FIXME)
                columns_array = [pyfits.Column(name = self.config['catalog']['mc_source_id_field'],
                                               format = 'I',
                                               array = numpy.zeros(len(self.data)))]
                hdu = pyfits.new_table(columns_array)
                self.data = pyfits.new_table(pyfits.new_table(self.data.view(np.recarray)).columns + hdu.columns).data
                self.mc_source_id = self.data.field(self.config['catalog']['mc_source_id_field'])

        # should be @property
        if self.config['catalog']['band_1_detection']:
            self.mag = self.mag_1
            self.mag_err = self.mag_err_1
        else:
            self.mag = self.mag_2
            self.mag_err = self.mag_err_2
            
        # should be @property
        self.color = self.mag_1 - self.mag_2
        self.color_err = numpy.sqrt(self.mag_err_1**2 + self.mag_err_2**2)

        logger.info('Catalog contains %i objects'%(len(self.data)))
开发者ID:balbinot,项目名称:ugali,代码行数:48,代码来源:catalog.py


示例7: write

    def write(self, file_name) :
        """Write stored data to file.
        
        Take all the data stored in the Writer (from added DataBlocks) and
        write it to a fits file with the passed file name.
        """

        # Add the data
        Col = pyfits.Column(name='DATA', format=self.data_format, 
                            array=self.data)
        columns = [Col,]
        
        # Add all the other stored fields.
        for field_name in self.field.iterkeys() :
            Col = pyfits.Column(name=field_name,
                                format=self.formats[field_name],
                                array=self.field[field_name])
            columns.append(Col)
        coldefs = pyfits.ColDefs(columns)
        # Creat fits header data units, one for the table and the mandatory
        # primary.
        tbhdu = pyfits.new_table(coldefs)
        prihdu = pyfits.PrimaryHDU()
        # Add the write history.
        fname_abbr = ku.abbreviate_file_path(file_name)
        self.history.add('Written to file.', ('File name: ' + fname_abbr,))
        # Add the history to the header.
        bf.write_history_header(prihdu.header, self.history)

        # Combine the HDUs and write to file.
        hdulist = pyfits.HDUList([prihdu, tbhdu])
        hdulist.writeto(file_name, clobber=True)
        if self.feedback > 0 :
            print 'Wrote data to file: ' + fname_abbr
开发者ID:adam-lewis,项目名称:analysis_IM,代码行数:34,代码来源:fitsGBT.py


示例8: RepTable

def RepTable ( tabin ,
               rowmask=None ,
               colsel=None,
               keycopylist = ["dz"]
               ):

    "Replicate a fits table"


    
    if rowmask ==None:
        rowmask = numpy.ones(len(tabin.data),
                             numpy.bool)

    nrowsout=sum(rowmask)

    
    tabout=pyfits.new_table( CopyColDefs(tabin),
                             nrows=nrowsout)

    for k in keycopylist:
        if k in tabin.header.keys():
            tabout.header.update ( k , tabin.header[k] )

    if colsel == None:
        colsel = tabin.columns.names

    if nrowsout > 0 :
        for cname in colsel:
            tabout.data[cname]=tabin.data.field(cname)[ rowmask]

    return tabout
开发者ID:bnikolic,项目名称:oof,代码行数:32,代码来源:iofits4.py


示例9: save

  def save(self, pathname=None, sim_num=0, file_ext='fits', planet=None):
    if not pathname: pathname = '.'
    full_path = os.path.expanduser(os.path.join(pathname, str(sim_num), 'static'))
    try:
      os.makedirs(full_path)
    except os.error:
      pass
    
    filename = os.path.join(full_path, self.opt.name)
   
    if file_ext == 'fits':
      prihdr = pyfits.Header()
      prihdr['wavsol_0'] = (self.opt.ld().base[0], 'reference pixel wl')
      prihdr['wavsol_1'] = (self.opt.ld().base[1], '')
      prihdr['wavsol_2'] = (self.opt.ld().base[2], 'reference pixel')
      prihdr['BUNITS']   = "{:>18s}".format(str(self.fp.units))
      if planet:
	prihdr['NAME'] = ("{:>18s}".format(planet.planet.name), '')
	prihdr['T14'] = (float(planet.t14), str(planet.t14.units))
	prihdr['PERIOD'] = (float(planet.planet.P), 
			    str(planet.planet.P.units))
	
      fp_hdu = pyfits.PrimaryHDU(self.fp, header=prihdr)
      tb_hdu = pyfits.new_table(pyfits.ColDefs([
	pyfits.Column(name='wl', format='E', array=self.wl_solution),
	pyfits.Column(name='cr', format='E', array=self.planet.sed),
	pyfits.Column(name='star', format='E', array=self.star.sed)]))
	
      
      hdulist = pyfits.HDUList([fp_hdu, tb_hdu])
      hdulist.writeto(filename + '.' + file_ext, clobber=True)
    else:
      exolib.exosim_error('channel.save - file format not supported')
开发者ID:subisarkar,项目名称:ExoSimSubi2,代码行数:33,代码来源:channel.py


示例10: writeSpotFITS

def writeSpotFITS(spotDir, data):

    phdu = pyfits.PrimaryHDU()
    phdr = phdu.header
    phdr.update('pixscale', 0.001, 'mm/pixel')

    cols = []
    cols.append(pyfits.Column(name='fiberIdx',
                              format='I',
                              array=data['fiberIdx']))
    cols.append(pyfits.Column(name='wavelength',
                              format='D',
                              array=data['wavelength']))
    cols.append(pyfits.Column(name='spot_xc',
                              format='D',
                              array=data['spot_xc']))
    cols.append(pyfits.Column(name='spot_yc',
                              format='D',
                              array=data['spot_yc']))
    spots = data['spot'][:]
    spots.shape = (len(spots), 256*256)
    cols.append(pyfits.Column(name='spot',
                              format='%dE' % (256*256),
                              dim='(256,256)',
                              array=spots))
    colDefs = pyfits.ColDefs(cols)

    thdu = pyfits.new_table(colDefs)
    hdulist = pyfits.HDUList([phdu, thdu])

    hdulist.writeto(os.path.join(spotDir, 'spots.fits'), 
                    checksum=True, clobber=True)
开发者ID:Subaru-PFS,项目名称:drp_instmodel,代码行数:32,代码来源:zemaxSpots.py


示例11: Combine

def Combine( flist , fout,
             overwrite=0):
    "Combines columns from various fits files"

    """flist needs to be of format ( (fname, col-prefix), ... ) """

    tabins= [ (pyfits.open( x[0] )[1], x[1]) for x in flist ]

    coldefs = []
    for tab,prefix in tabins:
        tabcds=tab.columns
        print tabcds.formats
        coldefs.extend( CopyColDefs( tab, prefix))

        
    tabout= pyfits.new_table( coldefs , nrows=len(tabins[0][0].data))

    for tab,prefix in tabins:
        tabcds=tab.columns
        for cname in tabcds.names:
            tabout.data.field(prefix+cname)._copyFrom(tab.data.field(cname))

    
    
    Write([pyfits.PrimaryHDU(), tabout],
          fout,
          overwrite=overwrite)
开发者ID:bnikolic,项目名称:oof,代码行数:27,代码来源:iofits4.py


示例12: mwrfits

def mwrfits(filename,data,hdu=1,colnames=None,keys=None):
    """Write columns to a fits file in a table extension.

    Parameters
    ----------
    filename : str
      The fits file name
    data : list of 1D arrays
      A list of 1D arrays to write in the table
    hdu : int, optional
      The header where to write the data. Default: 1
    colnames : list of str
      The column names
    keys : dict-like
      A dictionary with keywords to write in the header
    """
    # Check the inputs
    if colnames is not None:
        if len(colnames) != len(data):
            raise ValueError("colnames and data must the same length")
    else:
        colnames = ['']*len(data)
    cols=[]
    for line in xrange(len(data)):
        cols.append(pf.Column(name=colnames[line],
                               format=getformat(data[line]),
                               array=data[line]))
    tbhdu = pf.new_table(cols)
    if type(keys) is dict:
        for k,v in keys.items():
            tbhdu.header.update(k,v)
    # write the file
    tbhdu.writeto(filename,clobber=True)
开发者ID:montefra,项目名称:healpy,代码行数:33,代码来源:fitsfunc.py


示例13: convertCosmos

def convertCosmos(inName, outName):
    inFile = open(inName, "r")
    table = asciitable.read(inFile, Reader=asciitable.FixedWidthTwoLine, delimiter='|', header_start=0,
                            data_start=4, data_end=-1)

    schema = pyfits.ColDefs([column for column in MAPPING.values()])
    outHdu = pyfits.new_table(schema, nrows=len(table))
    outData = outHdu.data

    for name, column in MAPPING.items():
        outData.field(column.name)[:] = table.field(name)

    for f in FILTERS:
        mag = outData.field(f)
        err = outData.field(f + "_err")
        indices = numpy.where(numpy.logical_or(mag < 0, mag > 50))
        mag[indices] = numpy.NAN
        err[indices] = numpy.NAN

    outHdu.writeto(outName, clobber=True)
    print "Wrote %s" % outName
    print "To create an astrometry.net catalogue, execute:"
    outBase = outName.replace(".fits", "")
    print "build-index -i %s -o %s_and_0.fits -I 77770 -P0 -n 100 -S r -L 20 -E -M -j 0.4" % (inName, outBase)
    for i in range(1, 5):
        print "build-index -1 %s_and_0.fits -o %s_and_%d.fits -I 7777%d -P%d -n 100 -S r -L 10 -E -M -j 0.4 &" % (outBase, outBase, i, i, i)
开发者ID:HyperSuprime-Cam,项目名称:hscMisc,代码行数:26,代码来源:readCosmos.py


示例14: subsetSchlafly

def subsetSchlafly(inName, outName):
    inFile = pyfits.open(inName)
    inData = inFile[1].data

    schema = pyfits.ColDefs([pyfits.Column(name="id", format="K"),
                             pyfits.Column(name="ra", format="D"),
                             pyfits.Column(name="dec", format="D")] +
                            [pyfits.Column(name=name, format="E") for name in FILTERS] +
                            [pyfits.Column(name=name + "_err", format="E") for name in FILTERS] +
                            [pyfits.Column(name=name + "_stdev", format="E") for name in FILTERS]
                            )

    outHdu = pyfits.new_table(schema, nrows=len(inData))
    outData = outHdu.data

    outData.ident = inData.obj_id
    outData.ra = inData.ra
    outData.dec = inData.dec
    for i, f in enumerate(FILTERS):
        # Some of the below (e.g., "mean") are functions in the pyfits.FITS_rec class,
        # so we need to access them differently than just grabbing an attribute.
        mean = outData.field(f)
        err = outData.field(f + "_err")
        stdev = outData.field(f + "_stdev")

        mean[:] = inData.field("mean")[:,i]
        err[:] = inData.field("err")[:,i]
        stdev[:] = inData.field("stdev")[:,i]

    outHdu.writeto(outName, clobber=True)
    print "Wrote %s" % outName
    inFile.close()
开发者ID:HyperSuprime-Cam,项目名称:hscMisc,代码行数:32,代码来源:schlafly2and.py


示例15: write_cmd_file

def write_cmd_file(near_targ, target):
  '''
  Takes the rec array of sources near target and the rec array of the target and produces a fits table.
  '''

  # Columns to be in the fits table: these data are for the nearby sources
  c1 = pyfits.Column(name='HSTID', format='20A', array=near_targ['hstid'])
  c2 = pyfits.Column(name='RA', format='F', array=near_targ['degra'])
  c3 = pyfits.Column(name='DEC', format='F', array=near_targ['degdec'])
  c4 = pyfits.Column(name='V', format='F', array=near_targ['v'])
  c5 = pyfits.Column(name='VERR', format='F', array=near_targ['verr'])
  c6 = pyfits.Column(name='BV', format='F', array=near_targ['bvcol'])
  c7 = pyfits.Column(name='BVERR', format='F', array=near_targ['bvcolerr'])
  c8 = pyfits.Column(name='VI', format='F', array=near_targ['vicol'])
  c9 = pyfits.Column(name='VIERR', format='F', array=near_targ['vicolerr'])

  # Make table
  table_hdu = pyfits.new_table([c1, c2, c3, c4, c5, c6, c7, c8, c9])


  # Updates header with contains the target's info
  table_hdu.header.update(key='HSTID', value=target['hstid'])
  table_hdu.header.update(key='LBTID', value=target['lbtid'])
  table_hdu.header.update(key='RA', value=str(target['ra']))
  table_hdu.header.update(key='DEC', value=str(target['dec']))     

  # Table data cannot be the Primary HDU, so we make an empty Primary HDU
  phdu = pyfits.PrimaryHDU()

  # Zeroth extension is empty, first extension contains the table  
  hdulist = pyfits.HDUList([phdu, table_hdu])
  hdulist.writeto(target['lbtid']+'.fits')
开发者ID:skylergrammer,项目名称:Astro-Python,代码行数:32,代码来源:find_nearby.py


示例16: write_fits

def write_fits(magpat, fits_output_file):
    """Save a magnification pattern to a FITS file.

    The pattern itself is saved in the pimary HDU of the FITS file.
    The coordinates of the source plane rectangle occupied by the
    pattern are stored in the header fields

        MAGPATX0, MAGPATY0, MAGPATX1, MAGPATY1

    The lens list is stored in a binary table HDU named "LENSES".

    Parameters:

        magpat           magnification pattern to save
        fits_output_file
                         file name of the output file
    """
    img_hdu = pyfits.PrimaryHDU(magpat)
    region = magpat.region
    img_hdu.header.update("ctype1", " ")
    img_hdu.header.update("crpix1", 0.5)
    img_hdu.header.update("crval1", region.x)
    img_hdu.header.update("cdelt1", region.width / magpat.params.xpixels)
    img_hdu.header.update("ctype2", " ")
    img_hdu.header.update("crpix2", 0.5)
    img_hdu.header.update("crval2", region.y)
    img_hdu.header.update("cdelt2", region.height / magpat.params.ypixels)
    for s in ["x0", "y0", "x1", "y1"]:
        img_hdu.header.update("magpat" + s, getattr(region, s))
    lens_hdu = pyfits.new_table(magpat.lenses)
    lens_hdu.name = "lenses"
    pyfits.HDUList([img_hdu, lens_hdu]).writeto(fits_output_file, clobber=True)
    utils.logger.info("Wrote magnification pattern to %s", fits_output_file)
开发者ID:smarnach,项目名称:luckylensing,代码行数:33,代码来源:fits.py


示例17: mwrfits

def mwrfits(filename, data, hdu=1, colnames=None, keys=None):
    """Write columns to a fits file in a table extension.

    Input:
      - filename: the fits file name
      - data: a list of 1D arrays to write in the table
    Parameters:
      - hdu: header where to write the data. Default: 1
      - colnames: the column names
      - keys: a dictionary with keywords to write in the header
    """
    # Check the inputs
    if colnames is not None:
        if len(colnames) != len(data):
            raise ValueError("colnames and data must the same length")
    else:
        colnames = [""] * len(data)
    cols = []
    for line in xrange(len(data)):
        cols.append(pyf.Column(name=colnames[line], format=getformat(data[line]), array=data[line]))
    coldefs = pyf.ColDefs(cols)
    tbhdu = pyf.new_table(coldefs)
    if type(keys) is dict:
        for k, v in keys.items():
            tbhdu.header.update(k, v)
    # write the file
    tbhdu.writeto(filename, clobber=True)
开发者ID:Alwnikrotikz,项目名称:healpy,代码行数:27,代码来源:fitsfunc.py


示例18: guidertable

def guidertable(els, mintime, maxtime):
   """Extract the guider data from the els"""
   #extract the guider data from the els
   sel_cmd='_timestamp_, guidance_available, ee50, mag50'
   tab_cmd='tpc_guidance_status__timestamp '
   log_cmd="_timestamp_>'%s' and _timestamp_<'%s'" % (mintime, maxtime)
   gui_rec=saltmysql.select(els, sel_cmd, tab_cmd, log_cmd)
   if len(gui_rec)<2:  return None


   gtime_list=[]
   ee50_arr=np.zeros(len(gui_rec))
   mag50_arr=np.zeros(len(gui_rec))
   avail_list=[]
   for i in range(len(gui_rec)):
       gtime_list.append(gui_rec[i][0])
       ee50_arr[i]=gui_rec[i][2]
       mag50_arr[i]=gui_rec[i][3]
       avail_list.append(gui_rec[i][1])
   avail_arr=(np.array(avail_list)=='T')

   #write the results to a fits table
   guicol=[]
   guicol.append(pyfits.Column(name='Timestamp', format='20A', array=gtime_list))
   guicol.append(pyfits.Column(name='Available', format='L', array=avail_arr ))
   guicol.append(pyfits.Column(name='EE50', format='F', array=ee50_arr ))
   guicol.append(pyfits.Column(name='mag50', format='F', array=mag50_arr ))

   guitab= saltio.fitscolumns(guicol)
   guihdu= pyfits.new_table(guitab)
   guihdu.name='Guider'
   return guihdu
开发者ID:saltastro,项目名称:pipetools,代码行数:32,代码来源:saltelsdata.py


示例19: seeingtable

def seeingtable(sdb, mintime, maxtime):
   #extract the seeing data from the sdb
   sel_cmd='DateTime, Mass, Dimm'
   tab_cmd='MassDimm'
   log_cmd="DateTime>'%s' and DateTime<'%s'" % (mintime, maxtime)
   see_rec=saltmysql.select(sdb, sel_cmd, tab_cmd, log_cmd)
   if len(see_rec)<2:  return None

   stime_list=[]
   mass_arr=np.zeros(len(see_rec))
   dimm_arr=np.zeros(len(see_rec))
   for i in range(len(see_rec)):
       stime_list.append(see_rec[i][0])
       mass_arr[i]=see_rec[i][1]
       dimm_arr[i]=see_rec[i][2]
 
   seecol=[]
   seecol.append(pyfits.Column(name='Timestamp', format='20A', array=stime_list))
   seecol.append(pyfits.Column(name='MASS', format='F', array=mass_arr ))
   seecol.append(pyfits.Column(name='DIMM', format='F', array=dimm_arr ))

   seetab= saltio.fitscolumns(seecol)
   seehdu= pyfits.new_table(seetab)
   seehdu.name='Seeing'
   return seehdu
开发者ID:saltastro,项目名称:pipetools,代码行数:25,代码来源:saltelsdata.py


示例20: cl2fits

def cl2fits(cl, filename, lcut):
    """cl2fits(cl, filename, lcut)"""

    table=[pyf.Column(name='TEMPERATURE',format='1D',array=cl[0:lcut+1])]
    #print table
    tbhdu=pyf.new_table(table)
    tbhdu.writeto(filename, clobber=True)
开发者ID:aurelienbl,项目名称:pylib,代码行数:7,代码来源:abl_lib.py



注:本文中的pyfits.new_table函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python pyfits.open函数代码示例发布时间:2022-05-25
下一篇:
Python pyfits.info函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap