• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python pyasdf.ASDFDataSet类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pyasdf.ASDFDataSet的典型用法代码示例。如果您正苦于以下问题:Python ASDFDataSet类的具体用法?Python ASDFDataSet怎么用?Python ASDFDataSet使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了ASDFDataSet类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: write_new_synt_asdf

    def write_new_synt_asdf(self, file_prefix):
        new_synt_dict = self._sort_new_synt()

        for tag, win_array in new_synt_dict.iteritems():
            filename = "%s.%s.h5" % (file_prefix, tag)
            if os.path.exists(filename):
                os.remove(filename)
                logger.info("Output file exists, removed: %s" % filename)
            else:
                logger.info("Output new synt asdf: %s" % filename)

            ds = ASDFDataSet(filename, mode='w')
            added_list = []
            for window in win_array:
                synt_id = window.datalist['new_synt'].id
                # skip duplicate obsd location id.
                # for example, II.AAK.00.BHZ and II.AAK.10.BHZ will
                # be treated as different traces. But the synt and
                # new synt will be the same. So we only add one
                if synt_id in added_list:
                    continue
                else:
                    added_list.append(synt_id)
                ds.add_waveforms(window.datalist['new_synt'], tag=tag)
            # add stationxml
            _staxml_asdf = self._asdf_file_dict['synt']
            ds_sta = ASDFDataSet(_staxml_asdf)
            self.__add_staxml_from_other_asdf(ds, ds_sta)
            ds.flush()
开发者ID:fmagnoni,项目名称:pycmt3d,代码行数:29,代码来源:data_container.py


示例2: test_adding_arbitrary_files

def test_adding_arbitrary_files(tmpdir):
    """
    Tests that adding arbitrary files works.
    """
    test_filename = os.path.join(tmpdir.strpath, "temp.json")
    test_dict = {"a": 1, "b": 2}
    with open(test_filename, "wt") as fh:
        json.dump(test_dict, fh, sort_keys=True)

    asdf_filename = os.path.join(tmpdir.strpath, "test.h5")
    data_set = ASDFDataSet(asdf_filename)

    data_set.add_auxiliary_data_file(
        test_filename, tag="test_file", parameters={"1": 1})

    data_set.__del__()
    del data_set

    new_data_set = ASDFDataSet(asdf_filename)
    # Extraction works the same as always, but now has a special attribute,
    # that returns the data as a BytesIO.
    aux_data = new_data_set.auxiliary_data.File.test_file
    assert aux_data.parameters == {"1": 1}
    assert aux_data.tag == "test_file"

    new_test_dict = json.loads(aux_data.file.read().decode())
    assert test_dict == new_test_dict

    aux_data.file.seek(0, 0)

    with open(test_filename, "rb") as fh:
        assert fh.read() == aux_data.file.read()
开发者ID:petrrr,项目名称:pyasdf,代码行数:32,代码来源:test_asdf_data_set.py


示例3: test_get_provenance_document_for_id

def test_get_provenance_document_for_id(tmpdir):
    asdf_filename = os.path.join(tmpdir.strpath, "test.h5")
    data_set = ASDFDataSet(asdf_filename)

    filename = os.path.join(data_dir,
                            "example_schematic_processing_chain.xml")

    doc = prov.read(filename)
    data_set.provenance["test_provenance"] = doc

    assert data_set.provenance.get_provenance_document_for_id(
            '{http://seisprov.org/seis_prov/0.1/#}sp002_dt_f87sf7sf78') == \
        {"name": "test_provenance", "document": doc}

    assert data_set.provenance.get_provenance_document_for_id(
            '{http://seisprov.org/seis_prov/0.1/#}sp004_lp_f87sf7sf78') == \
        {"name": "test_provenance", "document": doc}

    # Id not found.
    with pytest.raises(ASDFValueError) as err:
        data_set.provenance.get_provenance_document_for_id(
            '{http://seisprov.org/seis_prov/0.1/#}bogus_id')

    assert err.value.args[0] == (
        "Document containing id "
        "'{http://seisprov.org/seis_prov/0.1/#}bogus_id'"
        " not found in the data set.")

    # Not a qualified id.
    with pytest.raises(ASDFValueError) as err:
        data_set.provenance.get_provenance_document_for_id("bla")

    assert err.value.args[0] == ("Not a valid qualified name.")

    data_set.__del__()
开发者ID:petrrr,项目名称:pyasdf,代码行数:35,代码来源:test_asdf_data_set.py


示例4: test_tag_iterator

def test_tag_iterator(example_data_set):
    """
    Tests the tag iterator.
    """
    data_set = ASDFDataSet(example_data_set.filename)

    expected_ids = ["AE.113A..BHE", "AE.113A..BHN", "AE.113A..BHZ", "TA.POKR..BHE", "TA.POKR..BHN", "TA.POKR..BHZ"]

    for st, inv in data_set.itertag("raw_recording"):
        for tr in st:
            assert tr.id in expected_ids
            expected_ids.remove(tr.id)
            assert bool(
                inv.select(
                    network=tr.stats.network,
                    station=tr.stats.station,
                    channel=tr.stats.channel,
                    location=tr.stats.location,
                ).networks
            )

    assert expected_ids == []

    # It will only return matching tags.
    count = 0
    for _ in data_set.itertag("random"):
        count += 1
    assert count == 0
开发者ID:QuLogic,项目名称:pyasdf,代码行数:28,代码来源:test_asdf_data_set.py


示例5: test_detailed_event_association_is_persistent_through_processing

def test_detailed_event_association_is_persistent_through_processing(
        example_data_set):
    """
    Processing a file with an associated event and storing it again should
    keep the association for all the possible event tags..
    """
    data_set = ASDFDataSet(example_data_set.filename)
    # Store a new waveform.
    event = data_set.events[0]
    origin = event.origins[0]
    magnitude = event.magnitudes[0]
    focmec = event.focal_mechanisms[0]

    tr = obspy.read()[0]
    tr.stats.network = "BW"
    tr.stats.station = "RJOB"

    data_set.add_waveforms(tr, tag="random", event_id=event,
                           origin_id=origin, focal_mechanism_id=focmec,
                           magnitude_id=magnitude)

    new_st = data_set.waveforms.BW_RJOB.random
    new_st.taper(max_percentage=0.05, type="cosine")

    data_set.add_waveforms(new_st, tag="processed")
    processed_st = data_set.waveforms.BW_RJOB.processed
    assert event.resource_id == processed_st[0].stats.asdf.event_id
    assert origin.resource_id == processed_st[0].stats.asdf.origin_id
    assert magnitude.resource_id == processed_st[0].stats.asdf.magnitude_id
    assert focmec.resource_id == processed_st[0].stats.asdf.focal_mechanism_id
开发者ID:petrrr,项目名称:pyasdf,代码行数:30,代码来源:test_asdf_data_set.py


示例6: write_new_syn_file

    def write_new_syn_file(self, file_format="sac", outputdir=".",
                           eventname=None, suffix=None):
        """
        Write out new synthetic file based on new cmtsolution
        :return:
        """
        if not os.path.exists(outputdir):
            os.makedirs(outputdir)

        # sort the new synthetic data
        new_synt_dict = {}
        for window in self.window:
            tag = window.tag['synt']
            if tag not in new_synt_dict.keys():
                new_synt_dict[tag] = []
            new_synt_dict[tag].append(window)

        if file_format.upper() == "SAC":
            for tag, win_array in new_synt_dict.iteritems():
                if eventname is None:
                    targetdir = os.path.join(outputdir, tag)
                else:
                    targetdir = os.path.join(outputdir, "%s_%s"
                                             % (eventname, tag))
                if not os.path.exists(targetdir):
                    os.makedirs(targetdir)
                for window in win_array:
                    sta = window.station
                    nw = window.network
                    component = window.component
                    location = window.location
                    filename = "%s.%s.%s.%s.sac" \
                               % (sta, nw, location, component)
                    outputfn = os.path.join(targetdir, filename)
                    new_synt = window.datalist['new_synt']
                    new_synt.write(outputfn, format='SAC')
        elif file_format.upper() == "ASDF":
            for tag, win_array in new_synt_dict.iteritems():
                if eventname is None:
                    outputfn = os.path.join(outputdir, "new_synt.%s.h5" % tag)
                else:
                    if suffix is None:
                        outputfn = os.path.join(outputdir, "%s.new_synt.%s.h5"
                                                % (eventname, tag))
                    else:
                        outputfn = os.path.join(
                            outputdir, "%s.%s.new_synt.%s.h5"
                            % (eventname, suffix, tag))
                if os.path.exists(outputfn):
                    os.remove(outputfn)
                ds = ASDFDataSet(outputfn)
                for window in win_array:
                    ds.add_waveforms(window.datalist['new_synt'], tag=tag)
                # add stationxml
        else:
            raise NotImplementedError
开发者ID:rmodrak,项目名称:pycmt3d,代码行数:56,代码来源:window.py


示例7: _core

    def _core(self, path, param):
        """
        Core function that handles one pair of asdf file(observed and
        synthetic), windows and configuration for adjoint source

        :param path: path information, path of observed asdf, synthetic
            asdf, windows files, observed tag, synthetic tag, output adjoint
            file, figure mode and figure directory
        :type path: dict
        :param param: parameter information for constructing adjoint source
        :type param: dict
        :return:
        """
        adjoint_param = param["adjoint_config"]

        obsd_file = path["obsd_asdf"]
        synt_file = path["synt_asdf"]
        obsd_tag = path["obsd_tag"]
        synt_tag = path["synt_tag"]
        window_file = path["window_file"]
        output_filename = path["output_file"]

        self.check_input_file(obsd_file)
        self.check_input_file(synt_file)
        self.check_input_file(window_file)
        self.check_output_file(output_filename)

        obsd_ds = self.load_asdf(obsd_file, mode="r")
        synt_ds = self.load_asdf(synt_file, mode="r")

        windows = self.load_windows(window_file)

        adj_src_type = adjoint_param["adj_src_type"]
        adjoint_param.pop("adj_src_type", None)

        config = load_adjoint_config(adjoint_param, adj_src_type)

        if self.mpi_mode and self.rank == 0:
            output_ds = ASDFDataSet(output_filename, mpi=False)
            if output_ds.events:
                output_ds.events = obsd_ds.events
            del output_ds
        if self.mpi_mode:
            self.comm.barrier()

        measure_adj_func = \
            partial(measure_adjoint_wrapper, config=config,
                    obsd_tag=obsd_tag, synt_tag=synt_tag,
                    windows=windows,
                    adj_src_type=adj_src_type)

        results = obsd_ds.process_two_files(synt_ds, measure_adj_func)

        if self.rank == 0:
            print("output filename: %s" % output_filename)
            write_measurements(results, output_filename)
开发者ID:wjlei1990,项目名称:pypaw,代码行数:56,代码来源:measure_adjoint.py


示例8: test_str_method_provenance_documents

def test_str_method_provenance_documents(tmpdir):
    asdf_filename = os.path.join(tmpdir.strpath, "test.h5")
    data_set = ASDFDataSet(asdf_filename)

    filename = os.path.join(data_dir, "example_schematic_processing_chain.xml")
    data_set.add_provenance_document(filename, name="test_provenance")

    assert str(data_set.provenance) == (
        "1 Provenance Document(s):\n\ttest_provenance"
    )
开发者ID:petrrr,项目名称:pyasdf,代码行数:10,代码来源:test_asdf_data_set.py


示例9: test_provenance_list_command

def test_provenance_list_command(tmpdir):
    asdf_filename = os.path.join(tmpdir.strpath, "test.h5")
    data_set = ASDFDataSet(asdf_filename)

    filename = os.path.join(data_dir,
                            "example_schematic_processing_chain.xml")

    # Add it as a document.
    doc = prov.read(filename, format="xml")
    data_set.add_provenance_document(doc, name="test_provenance")

    assert data_set.provenance.list() == ["test_provenance"]
开发者ID:petrrr,项目名称:pyasdf,代码行数:12,代码来源:test_asdf_data_set.py


示例10: test_coordinate_extraction_but_no_stationxml

def test_coordinate_extraction_but_no_stationxml(tmpdir):
    """
    Tests what happens if no stationxml is defined for a station.
    """
    asdf_filename = os.path.join(tmpdir.strpath, "test.h5")
    data_path = os.path.join(data_dir, "small_sample_data_set")

    data_set = ASDFDataSet(asdf_filename)
    for filename in glob.glob(os.path.join(data_path, "*.mseed")):
        data_set.add_waveforms(filename, tag="raw_recording")

    # If not stationxml exists it should just return an empty dictionary.
    assert data_set.get_all_coordinates() == {}
开发者ID:petrrr,项目名称:pyasdf,代码行数:13,代码来源:test_asdf_data_set.py


示例11: test_event_association_is_persistent_through_processing

def test_event_association_is_persistent_through_processing(example_data_set):
    """
    Processing a file with an associated event and storing it again should
    keep the association.
    """
    data_set = ASDFDataSet(example_data_set.filename)
    st = data_set.waveforms.TA_POKR.raw_recording
    event_id = st[0].stats.asdf.event_id

    st.taper(max_percentage=0.05, type="cosine")

    data_set.add_waveforms(st, tag="processed")
    processed_st = data_set.waveforms.TA_POKR.processed
    assert event_id == processed_st[0].stats.asdf.event_id
开发者ID:petrrr,项目名称:pyasdf,代码行数:14,代码来源:test_asdf_data_set.py


示例12: test_waveform_accessor_printing

def test_waveform_accessor_printing(example_data_set):
    """
    Pretty printing of the waveform accessor proxy objects.
    """
    data_set = ASDFDataSet(example_data_set.filename)

    assert data_set.waveforms.AE_113A.__str__() == (
        "Contents of the data set for station AE.113A:\n"
        "    - Has a StationXML file\n"
        "    - 1 Waveform Tag(s):\n"
        "        raw_recording")

    data_set.__del__()
    del data_set
开发者ID:petrrr,项目名称:pyasdf,代码行数:14,代码来源:test_asdf_data_set.py


示例13: test_accessing_non_existent_tag_raises

def test_accessing_non_existent_tag_raises(example_data_set):
    """
    Accessing a non-existing station should raise.
    """
    data_set = ASDFDataSet(example_data_set.filename)

    try:
        with pytest.raises(WaveformNotInFileException) as excinfo:
            data_set.waveforms.AE_113A.asdfasdf

        assert excinfo.value.args[0] == ("Tag 'asdfasdf' not part of the data "
                                         "set for station 'AE.113A'.")
    finally:
        data_set.__del__()
开发者ID:petrrr,项目名称:pyasdf,代码行数:14,代码来源:test_asdf_data_set.py


示例14: test_extract_all_coordinates

def test_extract_all_coordinates(example_data_set):
    """
    Tests the extraction of all coordinates.
    """
    data_set = ASDFDataSet(example_data_set.filename)

    assert data_set.get_all_coordinates() == {
       "AE.113A": {
           "latitude": 32.7683,
           "longitude": -113.7667,
           "elevation_in_m": 118.0},

       "TA.POKR": {
        "latitude": 65.1171,
        "longitude": -147.4335,
        "elevation_in_m": 501.0}}
开发者ID:petrrr,项目名称:pyasdf,代码行数:16,代码来源:test_asdf_data_set.py


示例15: test_adding_same_event_twice_raises

def test_adding_same_event_twice_raises(tmpdir):
    """
    Adding the same event twice raises.
    """
    asdf_filename = os.path.join(tmpdir.strpath, "test.h5")
    data_path = os.path.join(data_dir, "small_sample_data_set")

    data_set = ASDFDataSet(asdf_filename)

    # Add once, all good.
    data_set.add_quakeml(os.path.join(data_path, "quake.xml"))
    assert len(data_set.events) == 1

    # Adding again should raise an error.
    with pytest.raises(ValueError):
        data_set.add_quakeml(os.path.join(data_path, "quake.xml"))
开发者ID:petrrr,项目名称:pyasdf,代码行数:16,代码来源:test_asdf_data_set.py


示例16: test_adding_a_provenance_record

def test_adding_a_provenance_record(tmpdir):
    """
    Tests adding a provenance record.
    """
    asdf_filename = os.path.join(tmpdir.strpath, "test.h5")
    data_set = ASDFDataSet(asdf_filename)

    filename = os.path.join(data_dir, "example_schematic_processing_chain.xml")

    # Add it as a document.
    doc = prov.read(filename, format="xml")
    data_set.add_provenance_document(doc, name="test_provenance")
    del data_set

    # Read it again.
    data_set = ASDFDataSet(asdf_filename)
    assert data_set.provenance.test_provenance == doc
开发者ID:petrrr,项目名称:pyasdf,代码行数:17,代码来源:test_asdf_data_set.py


示例17: test_data_set_creation

def test_data_set_creation(tmpdir):
    """
    Test data set creation with a small test dataset.

    It tests that the the stuff that goes in is correctly saved and
    can be retrieved again.
    """
    asdf_filename = os.path.join(tmpdir.strpath, "test.h5")
    data_path = os.path.join(data_dir, "small_sample_data_set")

    data_set = ASDFDataSet(asdf_filename)

    for filename in glob.glob(os.path.join(data_path, "*.mseed")):
        data_set.add_waveforms(filename, tag="raw_recording")

    for filename in glob.glob(os.path.join(data_path, "*.xml")):
        if "quake.xml" in filename:
            data_set.add_quakeml(filename)
        else:
            data_set.add_stationxml(filename)

    # Flush and finish writing.
    del data_set

    # Open once again
    data_set = ASDFDataSet(asdf_filename)

    # ObsPy is tested enough to make this comparison meaningful.
    for station in (("AE", "113A"), ("TA", "POKR")):
        # Test the waveforms
        stream_asdf = \
            getattr(data_set.waveforms, "%s_%s" % station).raw_recording
        stream_file = obspy.read(os.path.join(
            data_path, "%s.%s.*.mseed" % station))
        # Delete the file format specific stats attributes. These are
        # meaningless inside ASDF data sets.
        for trace in stream_file:
            del trace.stats.mseed
            del trace.stats._format
        for trace in stream_asdf:
            del trace.stats.asdf
            del trace.stats._format
        assert stream_asdf == stream_file

        # Test the inventory data.
        inv_asdf = \
            getattr(data_set.waveforms, "%s_%s" % station).StationXML
        inv_file = obspy.read_inventory(
            os.path.join(data_path, "%s.%s..BH*.xml" % station))
        assert inv_file == inv_asdf
    # Test the event.
    cat_file = obspy.readEvents(os.path.join(data_path, "quake.xml"))
    cat_asdf = data_set.events
    # from IPython.core.debugger import Tracer; Tracer(colors="Linux")()
    assert cat_file == cat_asdf
开发者ID:petrrr,项目名称:pyasdf,代码行数:55,代码来源:test_asdf_data_set.py


示例18: example_data_set

def example_data_set(tmpdir):
    """
    Fixture creating a small example file.
    """
    asdf_filename = os.path.join(tmpdir.strpath, "test.h5")
    data_path = os.path.join(data_dir, "small_sample_data_set")

    data_set = ASDFDataSet(asdf_filename)

    for filename in glob.glob(os.path.join(data_path, "*.xml")):
        if "quake.xml" in filename:
            data_set.add_quakeml(filename)
        else:
            data_set.add_stationxml(filename)

    for filename in glob.glob(os.path.join(data_path, "*.mseed")):
        data_set.add_waveforms(filename, tag="raw_recording",
                               event_id=data_set.events[0])

    # Flush and finish writing.
    del data_set

    # Return filename and path to tempdir, no need to always create a
    # new one.
    return Namespace(filename=asdf_filename, tmpdir=tmpdir.strpath)
开发者ID:petrrr,项目名称:pyasdf,代码行数:25,代码来源:test_asdf_data_set.py


示例19: test_processing_multiprocessing

def test_processing_multiprocessing(example_data_set):
    """
    Tests the processing using multiprocessing.
    """
    def null_processing(st, inv):
        return st

    data_set = ASDFDataSet(example_data_set.filename)
    output_filename = os.path.join(example_data_set.tmpdir, "output.h5")
    # Do not actually do anything. Apply an empty function.
    data_set.process(null_processing, output_filename,
                     {"raw_recording": "raw_recording"})

    del data_set
    data_set = ASDFDataSet(example_data_set.filename)
    out_data_set = ASDFDataSet(output_filename)

    assert data_set == out_data_set
开发者ID:petrrr,项目名称:pyasdf,代码行数:18,代码来源:test_asdf_data_set.py


示例20: test_str_method_of_aux_data

def test_str_method_of_aux_data(tmpdir):
    asdf_filename = os.path.join(tmpdir.strpath, "test.h5")
    data_set = ASDFDataSet(asdf_filename)

    # With provenance id.
    data = np.random.random((10, 10))
    # The data must NOT start with a number.
    data_type = "RandomArray"
    tag = "test_data"
    parameters = {"a": 1, "b": 2.0, "e": "hallo"}
    provenance_id = "{http://example.org}test"

    data_set.add_auxiliary_data(data=data, data_type=data_type,
                                tag=tag, parameters=parameters,
                                provenance_id=provenance_id)
    assert \
        str(data_set.auxiliary_data.RandomArray.test_data) == (
            "Auxiliary Data of Type 'RandomArray'\n"
            "\tTag: 'test_data'\n"
            "\tProvenance ID: '{http://example.org}test'\n"
            "\tData shape: '(10, 10)', dtype: 'float64'\n"
            "\tParameters:\n"
            "\t\ta: 1\n"
            "\t\tb: 2.0\n"
            "\t\te: hallo")

    # Without.
    data = np.random.random((10, 10))
    # The data must NOT start with a number.
    data_type = "RandomArray"
    tag = "test_data_2"
    parameters = {"a": 1, "b": 2.0, "e": "hallo"}

    data_set.add_auxiliary_data(data=data, data_type=data_type,
                                tag=tag, parameters=parameters)
    assert \
        str(data_set.auxiliary_data.RandomArray.test_data_2) == (
            "Auxiliary Data of Type 'RandomArray'\n"
            "\tTag: 'test_data_2'\n"
            "\tData shape: '(10, 10)', dtype: 'float64'\n"
            "\tParameters:\n"
            "\t\ta: 1\n"
            "\t\tb: 2.0\n"
            "\t\te: hallo")
开发者ID:petrrr,项目名称:pyasdf,代码行数:44,代码来源:test_asdf_data_set.py



注:本文中的pyasdf.ASDFDataSet类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python biz.ExpressionParser类代码示例发布时间:2022-05-25
下一篇:
Python parquet.write_table函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap