• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python OCO_Matrix.OCO_Matrix类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中OCO_Matrix.OCO_Matrix的典型用法代码示例。如果您正苦于以下问题:Python OCO_Matrix类的具体用法?Python OCO_Matrix怎么用?Python OCO_Matrix使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了OCO_Matrix类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: create_log_p_profile

def create_log_p_profile(input_file, output_file, column, val0, lapse_rate):

    # Load existing file
    file_obj = OCO_Matrix(input_file)
    num_rows = file_obj.dims[0]

    val0 = float(val0)
    lapse_rate = float(lapse_rate)

    # Find existing pressure bounds
    src_pres_col = file_obj.labels_lower.index("pressure")
    pressure = numpy.zeros(num_rows, dtype=float)

    for row in range(0, num_rows):
        pressure[row] = float(file_obj.data[row][src_pres_col])
   
    if column.isdigit():
        dest_prof_col = column
    else:
        dest_prof_col = file_obj.labels_lower.index(column.lower())

    # create log p profile
    for row in range(num_rows-1,0,-1):
       file_obj.data[row, dest_prof_col] = val0 - lapse_rate * (math.log(pressure[num_rows-1])-math.log(pressure[row]))

    file_obj.write(output_file)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:26,代码来源:create_log_p_profile.py


示例2: remove_bad_data_all

def remove_bad_data_all(input_file, output_file, check_col, check_val):

    # Load existing file
    file_obj = OCO_Matrix(input_file)
    num_rows = file_obj.dims[0]

    if check_col.isdigit():
        check_col = int(check_col)
    else:
        check_col = file_obj.labels_lower.index(check_col.lower())
    
    good_mask = []

    for row_idx in range(num_rows):
        if not re.search(str(check_val).lower(), str(file_obj.data[row_idx, check_col]).lower()):
            good_mask.append(row_idx)

    cleaned_data = numpy.zeros((len(good_mask), file_obj.dims[1]), dtype=float)

    new_data_idx = 0
    for good_row in good_mask:
        cleaned_data[new_data_idx, :] = file_obj.data[good_row, :]
        new_data_idx += 1
    
    file_obj.data = cleaned_data
    file_obj.write(output_file)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:26,代码来源:remove_bad_data.py


示例3: Process_File

def Process_File(source, destination, fileKeywords, moduleSections, valuesDict, mapDict):

    if len(moduleSections) > 1:
       raise Exception('Only one instance of %s allowed per FILE block' % os.path.basename(__file__))

    if source == destination:
       raise Exception('Will not write albedo into source spectra file. dest_filename must be defined')

    instrument_name = Apply_Template(moduleSections[0].Get_Keyword_Value('instrument_name'), valuesDict, mapDict=mapDict)

    if instrument_name == None or len(instrument_name) == 0:
        raise Exception('instrument_name keyword must be specified for module: %s' % os.path.basename(__file__))

    # Load spectra data source
    spec_file_obj = OCO_Matrix(source)

    radiances = []
    for pixel_range in spec_file_obj.pixel_ranges():
        radiances.append( spec_file_obj[ASCII_SPECTRA_RADIANCE_COLUMN][pixel_range[0]:pixel_range[1]] )

    # Get SZA value
    try:
       sza_r = math.radians( float(spec_file_obj.header[ASCII_SPECTRA_SZA_KEYWORD].split()[0]) )
    except KeyError:
       raise KeyError('Could not find header keyword %s in spectrum file: %s' % (ASCII_SPECTRA_SZA_KEYWORD, source))

    # Create apriori file from radiance data
    create_albedo_apriori_from_radiance(radiances, sza_r, instrument_name, destination)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:28,代码来源:create_surface_apriori.py


示例4: scale_cov_by_corr

def scale_cov_by_corr(input_file, output_file, scale_factor):

    # Load existing file
    matrix_obj = OCO_Matrix(input_file)

    rows = range(matrix_obj.dims[0])
    cols = range(matrix_obj.dims[1])

    data_new = numpy.zeros((matrix_obj.dims[0], matrix_obj.dims[1]), dtype=float)

    for row_idx in rows:
        for col_idx in cols:
            rho_old = matrix_obj.data[row_idx, col_idx] / \
                      (math.sqrt(matrix_obj.data[row_idx, row_idx]) * math.sqrt(matrix_obj.data[col_idx, col_idx]))

            if rho_old < 0.0:
                sign = -1.0
            else:
                sign = 1.0
                
            fact_new = float(scale_factor) * (1.0 - abs(rho_old))
            if abs(rho_old) < 1e-40:
                rho_new = 0.0
            elif fact_new > 1.0:
                rho_new = 0.0
            else:
                rho_new = 1.0 - fact_new

            data_new[row_idx, col_idx] = sign * rho_new * \
                                         (math.sqrt(matrix_obj.data[row_idx, row_idx]) * math.sqrt(matrix_obj.data[col_idx, col_idx]))
            
    matrix_obj.data = data_new
    matrix_obj.write(output_file)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:33,代码来源:scale_cov_by_corr.py


示例5: average_profiles

def average_profiles(input_file_list, output_file):

    input_file_obj = open(input_file_list)
    first_file = input_file_obj.readline()
    input_file_obj.close()

    first_obj = OCO_Matrix(first_file.strip())
    dst_data = zeros((first_obj.dims[0], first_obj.dims[1]), dtype=float)
    pres_col = first_obj.labels_lower.index("pressure")
    dst_data[:, pres_col] = first_obj.data[:, pres_col]
    
    input_file_obj = open(input_file_list)

    count = 0
    for curr_atm_file in input_file_obj.readlines():
        curr_atm_file = curr_atm_file.strip()
        
        # Load existing file
        print "Loading %s" % curr_atm_file
        file_obj = OCO_Matrix(curr_atm_file)

        for col in range(file_obj.dims[1]):
            if col != pres_col:
                dst_data[:, col] += file_obj.data[:, col]

        count += 1
    
    for col in range(dst_data.shape[1]):
        if col != pres_col:        
            dst_data[:, col] /= count

    first_obj.data = dst_data
    first_obj.write(output_file)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:33,代码来源:average_profiles.py


示例6: noisify_spectra_file

def noisify_spectra_file(input_radiance_file, output_radiance_file, **kwarg):

    # Load existing file
    matrix_obj = OCO_Matrix(input_radiance_file)

    noisify_spectra_obj(matrix_obj, **kwargs)

    matrix_obj.write(output_radiance_file, auto_size_cols=False)    
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:8,代码来源:noisify_spectra.py


示例7: Process_File

def Process_File(source, destination, fileKeywords, moduleSections, valuesDict, mapDict, buffer_objs):

    matrix_obj = OCO_Matrix(source)
    for noisifySect in moduleSections:       
        noise_cut_off = Apply_Template(noisifySect.Get_Keyword_Value('noise_cut_off'), valuesDict, mapDict=mapDict)
        pixel_rows    = Apply_Template(noisifySect.Get_Keyword_Value('pixel_rows'), valuesDict, mapDict=mapDict)

        noisify_spectra_obj(matrix_obj, row_range_spec=pixel_rows, noise_cut_off=noise_cut_off)
        
    matrix_obj.write(destination, auto_size_cols=False)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:10,代码来源:noisify_spectra_module.py


示例8: get_data_object

def get_data_object(data_filename):

    # Try to load data using OCO_Matrix class
    try:
        data_obj = OCO_Matrix(data_filename)
        return data_obj
    except:
        pass

    # Now load file as tabled data
    table_file_obj = open(data_filename, 'r')
    file_lines = table_file_obj.readlines()
    table_file_obj.close()

    # Seperate each line by spaces. Keep count of maximum
    # number of columns seen for when file is added so we can
    # know how to size the resultng matrix
    max_cols = 0
    file_rows = []
    for line in file_lines:
        if line.find('#') < 0 and len(line.strip()) != 0:
            line_cols = line.strip().split()
            file_rows.append(line_cols)
            max_cols = max(max_cols, len(line_cols))

#    data_mat = numpy.zeros((len(file_rows), max_cols), dtype=float)
    data_mat = numpy.zeros((len(file_rows), max_cols), dtype=numpy.chararray)

    for row_idx in range(len(file_rows)):
        num_cols = len(file_rows[row_idx])
        for col_idx in range(num_cols):
            col_value = file_rows[row_idx][col_idx]
            data_mat[row_idx][col_idx] = col_value
#            try:
#                data_mat[row_idx][col_idx] = float(col_value)
#            except:
#                data_mat[row_idx][col_idx] = fill_value

    # Create label names based on filename and index or else can
    # not select specific columns
    label_base = os.path.basename(data_filename)
    label_base = label_base[0:label_base.rfind('.')] # Remove extension

    data_labels = []    
    for col_idx in range(max_cols):
        data_labels.append( get_column_format(max_cols) % (label_base, col_idx) )
    
    # Save data into OCO Matrix object
    data_obj = OCO_Matrix()
    data_obj.dims = [len(file_rows), max_cols]
    data_obj.labels = data_labels
    data_obj.data = data_mat
    
    return data_obj
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:54,代码来源:gather_data.py


示例9: set_noise_col

def set_noise_col(input_file, output_file, snr):

    snr = float(snr)

    file_obj = OCO_Matrix(input_file)

    rad_col   = file_obj.labels_lower.index("radiance")
    noise_col = file_obj.labels_lower.index("noise")

    noise_val = max( file_obj.data[:, rad_col] ) / snr

    file_obj.data[:, noise_col] = noise_val

    file_obj.write(output_file)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:14,代码来源:set_noise_col_snr.py


示例10: Process_File

def Process_File(source, destination, fileKeywords, moduleSections, valuesDict, mapDict):
    logger = logging.getLogger(os.path.basename(__file__))

    for currSection in moduleSections:
        if str(source) == str(destination):
            raise IOError("source and destination must be different. will not overwrite source file")

    rows = Apply_Template(moduleSections[0].Get_Keyword_Value("rows"), valuesDict, mapDict=mapDict)
    columns = Apply_Template(moduleSections[0].Get_Keyword_Value("columns"), valuesDict, mapDict=mapDict)
    identifier = Apply_Template(moduleSections[0].Get_Keyword_Value("identifier"), valuesDict, mapDict=mapDict)
    initial_value = Apply_Template(moduleSections[0].Get_Keyword_Value("initial_value"), valuesDict, mapDict=mapDict)
    map_filename = Apply_Template(moduleSections[0].Get_Keyword_Value("map_filename"), valuesDict, mapDict=mapDict)
    modify = moduleSections[0].Get_Keyword_Value("modify")

    # Load ranges from RANGES section of module
    max_range_val = None
    range_values = {}
    for range_sect in moduleSections[0].Get_Section("->RANGES"):
        for range_spec in range_sect.Get_Matrix_Data():
            (range_name, range_str) = range_spec

            if range_str.find(",") > 0:
                curr_range = [float(val) for val in range_str.split(",")]
            else:
                curr_range = [float(val) for val in range_str.split()]

            if max_range_val == None:
                max_range_val = max(curr_range)
            else:
                max_range_val = max(max_range_val, max(curr_range))

            range_values[range_name] = curr_range

    if len(range_values) == 0:
        logger.error("No index range list supplied for operating on source: %s" % source)
        return

    # Load source for data to map agains
    data_obj = OCO_Matrix(source)

    # Set columns to all if argument not supplied,
    # Otherwise try parsing as an index range list failing that try
    # using the specified columns as label names
    if columns == None:
        columns = range(data_obj.dims[1])
    else:
        try:
            columns = index_range_list(columns)
        except ValueError, TypeError:
            columns = data_obj.find_labels(columns, match_case=False, indexes=True)
开发者ID:nasa,项目名称:RtRetrievalFramework,代码行数:50,代码来源:range_map.py


示例11: adjust_file_for_trend

    def adjust_file_for_trend(self, src_apriori_file, time_struct, dst_apriori_file=None):

       if dst_apriori_file == None:
           dst_apriori_file = src_apriori_file

       if type(dst_apriori_file) is str and os.path.realpath(os.path.dirname(dst_apriori_file)) == os.path.realpath(self.apriori_db_path):
           raise IOError('Can not modify apriori file as located in database path, it must be copied first')

       apriori_obj = OCO_Matrix(src_apriori_file)
       co2_col_idx = apriori_obj.labels.index(apriori_obj.find_labels(CO2_APRIORI_CO2_COL)[0])

       co2_offset = self.get_apriori_offset(time_struct, debug_values=apriori_obj.header)
       apriori_obj.data[:, co2_col_idx] += co2_offset
      
       apriori_obj.header['co2_offset']  = co2_offset

       apriori_obj.write(dst_apriori_file)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:17,代码来源:Apriori_DB.py


示例12: make_diag_only_cov

def make_diag_only_cov(input_file, output_file):

    # Load existing file
    matrix_obj = OCO_Matrix(input_file)

    rows = range(matrix_obj.dims[0])
    cols = range(matrix_obj.dims[1])

    data_new = numpy.zeros((matrix_obj.dims[0], matrix_obj.dims[1]), dtype=float)

    for row_idx in rows:
        for col_idx in cols:
            if row_idx == col_idx:
                data_new[row_idx, col_idx] = matrix_obj.data[row_idx, col_idx]
            
    matrix_obj.data = data_new
    matrix_obj.write(output_file)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:17,代码来源:make_diag_only_cov.py


示例13: offset_column

def offset_column(input_file, output_file, columns, offset, method, pressure_range=None):

    # Load existing file
    matrix_obj = OCO_Matrix(input_file)
   
    # Add ability to specify cols individually or using a * to goto end
    cols = index_range_list(columns)

    if offset.isdigit():
        offset = float(offset)
    else:
        offset = eval(offset)

    if pressure_range != None:
        pres_col = matrix_obj.labels_lower.index("pressure")

        pres_range_arr = pressure_range.split(',')
        pres_val_beg = float(pres_range_arr[0])
        pres_val_end = float(pres_range_arr[1])

        pres_idx_beg = 0
        pres_idx_end = matrix_obj.dims[0]

        pres_column = []
        [ pres_column.append(float(val[pres_col])) for val in matrix_obj.data ]

        pres_idx_curr = 0
        beg_found = False
        for pres_val in pres_column:            
            if pres_val >= pres_val_beg and not beg_found:
                pres_idx_beg = pres_idx_curr
                beg_found = True
        
            if pres_val <= pres_val_end:
                pres_idx_end = pres_idx_curr + 1

            pres_idx_curr += 1

        target_rows = range(pres_idx_beg, pres_idx_end)

    else:
        target_rows = range(matrix_obj.dims[0])

    for rowIdx in target_rows:
        for colIdx in cols:

            #print 'old_val[%d][%d] = %f' % (rowIdx, colIdx, matrix_obj.data[rowIdx][colIdx])
            
            if method == '/':
                matrix_obj.data[rowIdx][colIdx] = matrix_obj.data[rowIdx][colIdx] / offset
            elif method == '-':
                matrix_obj.data[rowIdx][colIdx] = matrix_obj.data[rowIdx][colIdx] - offset
            elif method == '*':
                matrix_obj.data[rowIdx][colIdx] = matrix_obj.data[rowIdx][colIdx] * offset
            else:
                matrix_obj.data[rowIdx][colIdx] = matrix_obj.data[rowIdx][colIdx] + offset

            #print 'new_val[%d][%d] = %f' % (rowIdx, colIdx, matrix_obj.data[rowIdx][colIdx])

    matrix_obj.write(output_file)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:60,代码来源:offset_column.py


示例14: create_mean_psurf

def create_mean_psurf(runlog_file, psurf_file):

    print 'runlog_file = ', runlog_file
    print 'psurf_file = ', psurf_file
    
    runlog_fobj = open(runlog_file, "r")

    header_cols = runlog_fobj.readline().split()

    pout_col = header_cols.index('pout')

    pouts = []
    for runlog_line in runlog_fobj.readlines():
        runlog_parts = runlog_line.split()
        pouts.append(float(runlog_parts[pout_col]))
        
    runlog_fobj.close()
    
    avg_psurf = mean(pouts) * 1e2

    out_mat_obj = OCO_Matrix()
    out_mat_obj.file_id = "Mean surface pressure from runlog file: %s" % runlog_file
    out_mat_obj.labels = ['LEVEL', 'PSURF']
    out_mat_obj.data = ones((1, 2), dtype=float)
    out_mat_obj.data[0, 1] = avg_psurf
    out_mat_obj.write(psurf_file)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:26,代码来源:create_runlog_mean_psurf.py


示例15: write_xco2_file

def write_xco2_file(log_sounding_dict, xco2_filename):
    xco2_fileobj = OCO_Matrix()
    xco2_fileobj.file_id = 'True xco2 from orbit simulator'
    xco2_fileobj.labels = [XCO2_LABEL_NAME]
    xco2_fileobj.data = numpy.zeros((1,1), dtype=float)
    xco2_fileobj.data[0,0] = log_sounding_dict[XCO2_COL_NAME]
    xco2_fileobj.write(xco2_filename)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:7,代码来源:extract_orbit_sim_data.py


示例16: random_column

def random_column(input_file, output_file, columns, mean, std_dev):

    # Load existing file
    matrix_obj = OCO_Matrix(input_file)
   
    # Add ability to specify cols individually or using a * to goto end
    cols = index_range_list(columns)
    
    mean = float(mean)
    std_dev = float(std_dev)

    target_rows = range(matrix_obj.dims[0])

    for rowIdx in target_rows:
        for colIdx in cols:
            matrix_obj.data[rowIdx][colIdx] = random.gauss(mean, std_dev)

    matrix_obj.write(output_file)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:18,代码来源:random_column.py


示例17: scale_ils_table

def scale_ils_table(input_file, output_file, scale_factor):

    # Load existing file
    print "Reading %s" % input_file
    file_obj = OCO_Matrix(input_file)

    scale_factor = float(scale_factor)

    for row_idx in range(file_obj.dims[0]):
        for lbl_idx in range(file_obj.dims[1]):
            if file_obj.labels_lower[lbl_idx].find("ils_delta_lambda_") == 0:
                file_obj.data[row_idx, lbl_idx] *= scale_factor

            if file_obj.labels_lower[lbl_idx].find("ils_response_") == 0:
                file_obj.data[row_idx, lbl_idx] = (1.0 / scale_factor) * file_obj.data[row_idx, lbl_idx]

    print "Writing %s" % output_file
    file_obj.write(output_file)
开发者ID:nasa,项目名称:RtRetrievalFramework,代码行数:18,代码来源:scale_ils_table.py


示例18: interpol_cov

def interpol_cov(input_file, output_file, src_pressure_file, dst_pressure_file):

    # Load existing file
    file_obj = OCO_Matrix(input_file)

    # Find existing pressure bounds
    pres_src_obj = OCO_Matrix(src_pressure_file)
    src_pres_col = pres_src_obj.labels_lower.index("pressure")
    num_levels_src = pres_src_obj.dims[0]
    pressure_src = pres_src_obj.data[:, src_pres_col]

    pres_dst_obj = OCO_Matrix(dst_pressure_file)
    dst_pres_col = pres_dst_obj.labels_lower.index("pressure")
    num_levels_dst = pres_dst_obj.dims[0]
    pressure_dst = pres_dst_obj.data[:, dst_pres_col]

    M = numpy.zeros((num_levels_dst, num_levels_src), dtype=float)

    # Setup Interpolation Matrix 
    for i in range(num_levels_dst):
        for j in range(num_levels_src):
            if (pressure_dst[i] <= pressure_src[j]):
                lev = j
                break

        if (lev > 0):
            M[i, lev] = \
                 (math.log(pressure_dst[i]) - math.log(pressure_src[lev-1]))      \
                 / (math.log(pressure_src[lev])                                \
                    - math.log(pressure_src[lev-1]))
        else:
            M[i, lev] = 1.0

        if (i > 0):
            M[i, lev-1] =                                 \
                 (-math.log(pressure_dst[i]) + math.log(pressure_src[lev-1]))     \
                 / (math.log(pressure_src[lev])                                \
                    - math.log(pressure_src[lev-1])) + 1

    # Use interpolation matrix to create new covariance
    shat_out = (mat(M) * mat(file_obj.data)) * transpose(mat(M))

    file_obj.data = shat_out
    file_obj.write(output_file, auto_size_cols=False)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:44,代码来源:interpol_cov.py


示例19: extract_sv_jacobians

def extract_sv_jacobians(pd_file, names_file, rad_conv_file):
    conv_obj = OCO_Matrix(rad_conv_file, read_data=False)
    
    names_obj = OCO_Matrix(names_file, as_strings=True)
    sv_names = names_obj['Element Name'][:,0]

    pd_obj = OCO_Matrix(pd_file)

    for name_re, output_filename in SV_NAMES_MATCH.items():
        file_indexes = []
        for curr_idx, curr_name in enumerate(sv_names):
            if re.search(name_re, curr_name):
                file_indexes.append(curr_idx)

        print output_filename, file_indexes

        out_obj = OCO_Matrix()
        out_obj.pixels = conv_obj.pixels
        out_obj.data = pd_obj.data[:,file_indexes]
        out_obj.write(output_filename)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:20,代码来源:extract_sv_jacobians.py


示例20: write_albedo_file

def write_albedo_file(output_file, albedo_data, header_values=None):
   albedo_obj = OCO_Matrix()
   if header_values != None:
      albedo_obj.header.update(header_values)

   albedo_obj.header['center_wavelengths'] = ' '.join([str(wl) for wl in ALBEDO_CENTER_WAVELENGTHS])
   albedo_obj.labels = [ ALBEDO_COL_TMPL % (idx+1) for idx in range(albedo_data.shape[1]) ]
   albedo_obj.data = albedo_data
   albedo_obj.file_id = 'Surface albedo data'
   albedo_obj.write(output_file)
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:10,代码来源:create_surface_apriori.py



注:本文中的OCO_Matrix.OCO_Matrix类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python Folder.manage_addFolder函数代码示例发布时间:2022-05-24
下一篇:
Python gp.gp_Vec函数代码示例发布时间:2022-05-24
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap