• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.save函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.save函数的典型用法代码示例。如果您正苦于以下问题:Python save函数的具体用法?Python save怎么用?Python save使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了save函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: segment_request

def segment_request(request):
    max_iteration=int(request[:,0].max()//optimal_interval)
    for i in range(max_iteration+1):
        temp=request[np.logical_and(request[:,0]<=optimal_interval*(i+1),request[:,0]>=(optimal_interval*i+1))]
        temp[:,0]=temp[:,0]-optimal_interval*i
        np.save('experiment_%d/new_passenger_%d.npy'% (experiment,i),temp)
    return max_iteration
开发者ID:locknard,项目名称:demand-responsive-transit,代码行数:7,代码来源:IP_0322.py


示例2: main

def main():

    for i in list(range(4))[::-1]:
        print(i+1)
        time.sleep(1)


    paused = False
    while(True):

        if not paused:
            # 800x600 windowed mode
            screen = grab_screen(region=(0,40,800,640))
            last_time = time.time()
            screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
            screen = cv2.resize(screen, (160,120))
            # resize to something a bit more acceptable for a CNN
            keys = key_check()
            output = keys_to_output(keys)
            training_data.append([screen,output])
            
            if len(training_data) % 1000 == 0:
                print(len(training_data))
                np.save(file_name,training_data)

        keys = key_check()
        if 'T' in keys:
            if paused:
                paused = False
                print('unpaused!')
                time.sleep(1)
            else:
                print('Pausing!')
                paused = True
                time.sleep(1)
开发者ID:gcm0621,项目名称:pygta5,代码行数:35,代码来源:create_training_data.py


示例3: compute_signif_conf_Z_list

def compute_signif_conf_Z_list(cor_mat_file,conf_cor_mat_file,coords_file):       
        
    import rpy,os
    import nibabel as nib
    import numpy as np
    
    from dmgraphanalysis.utils_cor import export_List_net_from_list,export_Louvain_net_from_list
    from dmgraphanalysis.utils_cor import return_signif_conf_net_list
    from dmgraphanalysis.utils_plot import plot_cormat
    
    print "loading cor_mat_file"
    
    cor_mat = np.load(cor_mat_file)
    
    print "loading conf_cor_mat_file"
    
    conf_cor_mat = np.load(conf_cor_mat_file)
    
    print 'load coords'
    
    coords = np.array(np.loadtxt(coords_file),dtype = int)
    
    print "computing net_list by thresholding conf_cor_mat based on distance and net_threshold"
    
    net_list,binary_signif_matrix = return_signif_conf_net_list(cor_mat,conf_cor_mat)
    
    print binary_signif_matrix.shape
    
    print "saving binary_signif_matrix"
    
    binary_signif_matrix_file = os.path.abspath('binary_signif_matrix.npy')
    
    np.save(binary_signif_matrix_file,binary_signif_matrix)
    
    print "plotting binary_signif_matrix"
    
    plot_binary_signif_matrix_file = os.path.abspath('binary_signif_matrix.eps')
    
    plot_cormat(plot_binary_signif_matrix_file,binary_signif_matrix,list_labels = [])
    
    ## Z correl_mat as list of edges
    
    print "saving net_list as list of edges"
    
    net_List_file = os.path.abspath('net_List_signif_conf.txt')
    
    export_List_net_from_list(net_List_file,net_list)
    
    ### Z correl_mat as Louvain format
    
    print "saving net_list as Louvain format"
    
    net_Louvain_file = os.path.abspath('net_Louvain_signif_conf.txt')
    
    export_Louvain_net_from_list(net_Louvain_file,net_list,coords)
    
    #net_List_file = ''
    #net_Louvain_file = ''
    
    return net_List_file, net_Louvain_file
开发者ID:Lx37,项目名称:dmgraphanalysis,代码行数:60,代码来源:modularity.py


示例4: plotForce

def plotForce():
    figure(size=3,aspect=0.5)
    subplot(1,2,1)
    from EvalTraj import plotFF
    plotFF(vp=351,t=28,f=900,cm=0.6,foffset=8)
    subplot_annotate()
    
    subplot(1,2,2)
    for i in [1,2,3,4]:
        R=np.squeeze(np.load('Rdpse%d.npy'%i))
        R=stats.nanmedian(R,axis=2)[:,1:,:]
        dps=np.linspace(-1,1,201)[1:]
        plt.plot(dps,R[:,:,2].mean(0));
    plt.legend([0,0.1,0.2,0.3],loc=3) 
    i=2
    R=np.squeeze(np.load('Rdpse%d.npy'%i))
    R=stats.nanmedian(R,axis=2)[:,1:,:]
    mn=np.argmin(R,axis=1)
    y=np.random.randn(mn.shape[0])*0.00002+0.0438
    plt.plot(np.sort(dps[mn[:,2]]),y,'+',mew=1,ms=6,mec=[ 0.39  ,  0.76,  0.64])
    plt.xlabel('Displacement of Force Origin')
    plt.ylabel('Average Net Force Magnitude')
    hh=dps[mn[:,2]]
    err=np.std(hh)/np.sqrt(hh.shape[0])*stats.t.ppf(0.975,hh.shape[0])
    err2=np.std(hh)/np.sqrt(hh.shape[0])*stats.t.ppf(0.75,hh.shape[0])
    m=np.mean(hh)
    print m, m-err,m+err
    np.save('force',[m, m-err,m+err,m-err2,m+err2])
    plt.xlim([-0.5,0.5])
    plt.ylim([0.0435,0.046])
    plt.grid(b=True,axis='x')
    subplot_annotate()
开发者ID:simkovic,项目名称:wolfpackRevisited,代码行数:32,代码来源:Evaluation.py


示例5: __init__

    def __init__(
            self, save_data=True,
            use_saved_features=True, use_saved_npz=True):
        'Init by getting all the works.'
        self._self_dir = path.abspath(path.dirname(__file__))
        self.use_saved_features = use_saved_features
        self.use_saved_npz = use_saved_npz
        self.save_data = save_data
        self.npz_data = None
        self._vectorizer = DictVectorizer()

        if save_data and not path.isdir('features'):
            os.mkdir('features')

        work_fname = 'features/all_works.npy'
        if use_saved_features and path.isfile(work_fname):
            self._works = np.load(work_fname)
        else:
            works = []
            with open(self._get_path('work_list/AllWorks.txt')) as f:
                for line in f:
                    works.append(line.split('-')[0])
            self._works = np.array(works)
            if self.save_data:
                np.save('features/all_works', self.works)
开发者ID:fcchou,项目名称:CS229-project,代码行数:25,代码来源:features.py


示例6: vectorize_and_aggregate

def vectorize_and_aggregate(in_data_file_list, mask_file, matrix_name, parcellation_path, fwhm, use_diagonal,
                            use_fishers_z, df_file, df_col_names):
    import os, pickle
    import numpy as np
    from LeiCA_LIFE.learning.prepare_data_utils import vectorize_ss

    # get an example of the data:
    #save_template: template file; for behav: col names
    vectorized_data, data_type, masker, save_template = vectorize_ss(in_data_file_list[0], mask_file, matrix_name,
                                                                     parcellation_path, fwhm, use_diagonal,
                                                                     use_fishers_z, df_file,
                                                                     df_col_names)
    vectorized_data = np.zeros((len(in_data_file_list), vectorized_data.shape[1]))
    vectorized_data.fill(np.nan)

    for i, in_data_file_ss in enumerate(in_data_file_list):
        vectorized_data[i, :], _, _, _ = vectorize_ss(in_data_file_ss, mask_file, matrix_name, parcellation_path, fwhm,
                                                      use_diagonal, use_fishers_z, df_file, df_col_names)

    vectorized_aggregated_file = os.path.abspath('vectorized_aggregated_data.npy')
    np.save(vectorized_aggregated_file, vectorized_data)

    unimodal_backprojection_info = {'data_type': data_type,
                                    'masker': masker,
                                    'save_template': save_template
                                    }
    unimodal_backprojection_info_file = os.path.abspath('unimodal_backprojection_info.pkl')
    pickle.dump(unimodal_backprojection_info, open(unimodal_backprojection_info_file, 'w'))
    return vectorized_aggregated_file, unimodal_backprojection_info_file
开发者ID:fliem,项目名称:LeiCA_LIFE,代码行数:29,代码来源:prepare_data_utils.py


示例7: train_word2id

def train_word2id():
    """把训练集的所有词转成对应的id。"""
    time0 = time.time()
    print('Processing train data.')
    df_train = pd.read_csv('../raw_data/question_train_set.txt', sep='\t', usecols=[0, 2, 4],
                           names=['question_id', 'word_title', 'word_content'], dtype={'question_id': object})
    print('training question number %d ' % len(df_train))
    # 没有 content 的问题用 title 来替换
    na_content_indexs = list()
    for i in tqdm(xrange(len(df_train))):
        word_content = df_train.word_content.values[i]
        if type(word_content) is float:
            na_content_indexs.append(i)
    print('There are %d train questions without content.' % len(na_content_indexs))
    for na_index in tqdm(na_content_indexs):
        df_train.at[na_index, 'word_content'] = df_train.at[na_index, 'word_title']
    # 没有 title 的问题, 丢弃
    na_title_indexs = list()
    for i in xrange(len(df_train)):
        word_title = df_train.word_title.values[i]
        if type(word_title) is float:
            na_title_indexs.append(i)
    print('There are %d train questions without title.' % len(na_title_indexs))
    df_train = df_train.drop(na_title_indexs)
    print('After dropping, training question number(should be 2999952) = %d' % len(df_train))
    # 转为 id 形式
    p = Pool()
    train_title = np.asarray(p.map(get_id4words, df_train.word_title.values))
    np.save('../data/wd_train_title.npy', train_title)
    train_content = np.asarray(p.map(get_id4words, df_train.word_content.values))
    np.save('../data/wd_train_content.npy', train_content)
    p.close()
    p.join()
    print('Finished changing the training words to ids. Costed time %g s' % (time.time() - time0))
开发者ID:brucexia6116,项目名称:zhihu-text-classification,代码行数:34,代码来源:word2id.py


示例8: consolidate_games

    def consolidate_games(self, name, samples):
        print('>>> Creating consolidated numpy arrays')

        if self.use_generator:
            print('>>> Return generator')
            generator = DataGenerator(self.data_dir, samples)
            return generator

        files_needed = set(file_name for file_name, index in samples)
        print('>>> Total number of files: ' + str(len(files_needed)))

        file_names = []
        for zip_file_name in files_needed:
            file_name = zip_file_name.replace('.zip', '') + name
            file_names.append(file_name)

        feature_list = []
        label_list = []
        for file_name in file_names:
            X = np.load(self.data_dir + '/' + file_name + '_features.npy')
            y = np.load(self.data_dir + '/' + file_name + '_labels.npy')
            feature_list.append(X)
            label_list.append(y)
            print('>>> Done')

        features = np.concatenate(feature_list, axis=0)
        labels = np.concatenate(label_list, axis=0)

        feature_file = self.data_dir + '/' + str(self.num_planes) + '_plane_features_' + name
        label_file = self.data_dir + '/' + str(self.num_planes) + '_plane_labels_' + name

        np.save(feature_file, features)
        np.save(label_file, labels)

        return features, labels
开发者ID:Riashat,项目名称:betago,代码行数:35,代码来源:base_processor.py


示例9: concat_ts

def concat_ts(all_ts_files):
    
    import numpy as np
    import os
    
    print all_ts_files

    for i,ts_file in enumerate(all_ts_files):
    
        ## loading ROI coordinates
        ts = np.load(ts_file)
        
        #print "all_ts: " 
        print ts.shape
        
        if i == 0:
            concat_ts = ts.copy()
            #print concat_ts.shape
        else:
            concat_ts = np.concatenate((concat_ts,ts),axis = 0)
            #print concat_ts.shape

    print concat_ts.shape

    ### saving time series
    concat_ts_file = os.path.abspath("concat_ts.npy")
    np.save(concat_ts_file,concat_ts)
    
        
    return concat_ts_file
开发者ID:annapasca,项目名称:neuropype_ephy,代码行数:30,代码来源:import_mat.py


示例10: test_word2id

def test_word2id():
    """把测试集的所有词转成对应的id。"""
    time0 = time.time()
    print('Processing eval data.')
    df_eval = pd.read_csv('../raw_data/question_eval_set.txt', sep='\t', usecols=[0, 2, 4],
                          names=['question_id', 'word_title', 'word_content'], dtype={'question_id': object})
    print('test question number %d' % len(df_eval))
    # 没有 title 的问题用 content 来替换
    na_title_indexs = list()
    for i in xrange(len(df_eval)):
        word_title = df_eval.word_title.values[i]
        if type(word_title) is float:
            na_title_indexs.append(i)
    print('There are %d test questions without title.' % len(na_title_indexs))
    for na_index in na_title_indexs:
        df_eval.at[na_index, 'word_title'] = df_eval.at[na_index, 'word_content']
    # 没有 content 的问题用 title 来替换
    na_content_indexs = list()
    for i in tqdm(xrange(len(df_eval))):
        word_content = df_eval.word_content.values[i]
        if type(word_content) is float:
            na_content_indexs.append(i)
    print('There are %d test questions without content.' % len(na_content_indexs))
    for na_index in tqdm(na_content_indexs):
        df_eval.at[na_index, 'word_content'] = df_eval.at[na_index, 'word_title']
    # 转为 id 形式
    p = Pool()
    eval_title = np.asarray(p.map(get_id4words, df_eval.word_title.values))
    np.save('../data/wd_eval_title.npy', eval_title)
    eval_content = np.asarray(p.map(get_id4words, df_eval.word_content.values))
    np.save('../data/wd_eval_content.npy', eval_content)
    p.close()
    p.join()
    print('Finished changing the eval words to ids. Costed time %g s' % (time.time() - time0))
开发者ID:brucexia6116,项目名称:zhihu-text-classification,代码行数:34,代码来源:word2id.py


示例11: get_buffer_callback

 def get_buffer_callback(overviewBuffers,overflow,triggeredAt,triggered,auto_stop,nValues):
     
     #print('Callback for saving to disk')
     #create filename based on actual timestamp
     #filename = time.strftime("%Y%m%d_%H_%M_%S_%f.csv")
     filename=datetime.datetime.now()
     filename= filename.strftime("%Y%m%d_%H_%M_%S_%f")
     CH1='CH1_' + filename 
     #CH2='CH2_' + filename
     
     #cast 2d-pointer from c- callback into python pointer 
     ob = ctypes.cast(overviewBuffers,ctypes.POINTER(ctypes.POINTER(ctypes.c_short)))
     
     #create array from pointer data ob[0]-> CH1 ob[1]-> CH2
     streamed_data_CH1=np.fromiter(ob[0], dtype=np.short, count=nValues)
     #streamed_data_CH2=np.fromiter(ob[1], dtype=np.short, count=nValues)
                 
     #save array data into numpy fileformat
     path1 = os.path.normpath('C:\\Users\ckattmann\Documents\GitHub\pqpico\Data')+'/'+CH1
     #path2 = os.path.normpath('C:\\Users\ckattmann\Documents\GitHub\pqpico\Data')+'/'+CH2
                 
     np.save(path1,streamed_data_CH1)
     #np.save(path2,streamed_data_CH2)
     #print('File saved:',CH1,CH2)
     
     return 0
开发者ID:kipfer,项目名称:pqpico,代码行数:26,代码来源:Picoscope2000.py


示例12: main

def main(root='/tmp/measurements', output=None):
    data = []
    for s in os.listdir(root):
        subject = []
        for b in os.listdir(os.path.join(root, s)):
            block = []
            bweight, bspeed, bhand, bpaths = b.split('-')[1:]
            for t in os.listdir(os.path.join(root, s, b)):
                thand, tspeed = re.search(r'(left|right)-speed_(\d\.\d+)', t).groups()
                config = np.tile([
                    C[bweight], C[bspeed], C[bhand], C[bpaths],
                    C[thand], float(tspeed)], (120, 1))
                block.append(
                    np.hstack([
                        config,
                        np.loadtxt(os.path.join(root, s, b, t),
                                   skiprows=1, delimiter=',')]))
            subject.append(block)
        if len(subject) == 3:
            data.append(subject)
        else:
            print('incorrect block count! discarding {}'.format(s))
    data = np.array(data)
    logging.info('loaded data %s', data.shape)
    if output:
        np.save(output, data.astype('f'))
开发者ID:EmbodiedCognition,项目名称:tracing-experiment,代码行数:26,代码来源:import-csvs.py


示例13: Cluster

def Cluster(param, DATA_FOLDER):
    ts = ListaSet(param)
    Data = scipy.io.loadmat('./data/filter_template3.mat')
    Filter2 = np.rot90( Data['Filter2'], 2)

#    corList = []
#    TVList= []
    corArr = np.empty(ts.get_num_images())
    TVArr = np.empty(ts.get_num_images())

    for i in range( ts.get_num_images()):
#    for i in range( 100):
        tmp = ts.get_input(i)
        tmp2 = tmp - np.mean( tmp)
        tmp3 = tmp2 / np.linalg.norm(tmp2, 'fro')
#        Cor = scipy.signal.convolve2d(tmp3, Filter2, 'valid')
#        corList.append( Cor)    
        corArr[i] = scipy.signal.convolve2d(tmp3, Filter2, 'valid')
    
        dx = scipy.ndimage.sobel(tmp, 0)
        dy = scipy.ndimage.sobel(tmp, 1)
        mag = np.hypot(dx, dy)
#        TVList.append( np.sum(mag))
        TVArr[i] = np.sum(mag)

        if i % 10000 == 0:
            print i

    np.save( DATA_FOLDER+'/trainCorrelation.npy', corArr)
    np.save( DATA_FOLDER+'/trainTotalVariation.npy', TVArr)
    return
开发者ID:lelegan,项目名称:DLSR,代码行数:31,代码来源:ListaPrvd_regr.py


示例14: sample_lnprob

def sample_lnprob(weight_index):
    import emcee

    ndim = 4
    nwalkers = 8 * ndim
    print("using {} walkers".format(nwalkers))
    p0 = np.vstack((np.random.uniform(-0.5, 2, size=(1, nwalkers)),
                    np.random.uniform(50, 300, size=(1, nwalkers)),
                    np.random.uniform(0.2, 1.5, size=(1, nwalkers)),
                    np.random.uniform(0.2, 1.5, size=(1, nwalkers)))).T

    sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(weight_index,), threads=cfg['threads'])

    print("Running Sampler")
    pos, prob, state = sampler.run_mcmc(p0, cfg['burn_in'])

    print("Burn-in complete")
    sampler.reset()
    sampler.run_mcmc(pos, cfg['samples'])

    samples = sampler.flatchain
    np.save(cfg['outdir'] + "samples_w{}.npy".format(weight_index), samples)

    import triangle
    fig = triangle.corner(samples)
    fig.savefig(cfg['outdir'] + "triangle_w{}.png".format(weight_index))
开发者ID:kgullikson88,项目名称:Starfish,代码行数:26,代码来源:optimize_emulator.py


示例15: assertCubeDataAlmostEqual

    def assertCubeDataAlmostEqual(self, cube, reference_filename, *args, **kwargs):
        reference_path = self.get_result_path(reference_filename)
        if self._check_reference_file(reference_path):
            kwargs.setdefault("err_msg", "Reference file %s" % reference_path)

            result = np.load(reference_path)
            if isinstance(result, np.lib.npyio.NpzFile):
                self.assertIsInstance(cube.data, ma.MaskedArray, "Cube data was not a masked array.")
                # Avoid comparing any non-initialised array data.
                data = cube.data.filled()
                np.testing.assert_array_almost_equal(data, result["data"], *args, **kwargs)
                np.testing.assert_array_equal(cube.data.mask, result["mask"])
            else:
                np.testing.assert_array_almost_equal(cube.data, result, *args, **kwargs)
        else:
            self._ensure_folder(reference_path)
            logger.warning("Creating result file: %s", reference_path)
            if isinstance(cube.data, ma.MaskedArray):
                # Avoid recording any non-initialised array data.
                data = cube.data.filled()
                with open(reference_path, "wb") as reference_file:
                    np.savez(reference_file, data=data, mask=cube.data.mask)
            else:
                with open(reference_path, "wb") as reference_file:
                    np.save(reference_file, cube.data)
开发者ID:djkirkham,项目名称:iris,代码行数:25,代码来源:__init__.py


示例16: labels

 def labels(self):
     '''
     Decide the labels
     2 for unsecure Jos, 1 for secure Jos, 0 for others
     '''
     #TODO consider labeling different authors for multiclass assignment
     fname = 'features/labels'
     if self.use_saved_features and path.isfile(fname):
         return np.load(fname)
     with open(self._get_path('work_list/Josquin_secure.txt')) as f:
         secure_jos = set(f.read().splitlines())
     labels = []
     for work in self._works:
         label = 0
         if 'Ock' in work:
             label = -1
         if 'Jos' in work:
             if work in secure_jos:
                 label = 1
             else:
                 label = 2
         labels.append(label)
     labels = np.array(labels, dtype=int)
     if self.save_data:
         np.save(fname, labels)
     return labels
开发者ID:fcchou,项目名称:CS229-project,代码行数:26,代码来源:features.py


示例17: convert_single_propagators

def convert_single_propagators(files):
    "Construct pion correlators from individual overlap propagators."
    # Some basic checks on the input.
    head0, config0, middle0, mass0 = files[0].split('.')
    for f in files:
        check_length(f)
        head, config, middle, mass = f.split('.')
        if (head != head0) or (middle != middle0) or (mass != mass0):
            print "You might not want to combine these!"
            return 1

    # Construct the block of correlators.
    correlators = []
    for f in files:
        print f
        correlators.append(pion_correlator(f))
    correlators = np.array(correlators)

    # Basic checks on the output.
    print correlators.shape
    assert (len(files), nt) == correlators.shape

    # Write output.
    m = float('0.'+mass0)
    np.save(correlator_name(m), correlators) 
    print correlators[0]
开发者ID:atlytle,项目名称:tifr,代码行数:26,代码来源:read_overlap.py


示例18: generate

    def generate( self, out_path, aux, idx_in, idx_out ):
        scheme_high = amico.lut.create_high_resolution_scheme( self.scheme, b_scale = 1 )
        protocolHR = self.scheme2noddi( scheme_high )

        nATOMS = len(self.IC_ODs)*len(self.IC_VFs) + 1
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Coupled contributions
        IC_KAPPAs = 1 / np.tan(self.IC_ODs*np.pi/2)
        for kappa in IC_KAPPAs:
            signal_ic = self.synth_meas_watson_SH_cyl_neuman_PGSE( np.array([self.dPar*1E-6, 0, kappa]), protocolHR['grad_dirs'], np.squeeze(protocolHR['gradient_strength']), np.squeeze(protocolHR['delta']), np.squeeze(protocolHR['smalldel']), np.array([0,0,1]), 0 )

            for v_ic in self.IC_VFs:
                dPerp = self.dPar*1E-6 * (1 - v_ic)
                signal_ec = self.synth_meas_watson_hindered_diffusion_PGSE( np.array([self.dPar*1E-6, dPerp, kappa]), protocolHR['grad_dirs'], np.squeeze(protocolHR['gradient_strength']), np.squeeze(protocolHR['delta']), np.squeeze(protocolHR['smalldel']), np.array([0,0,1]) )

                signal = v_ic*signal_ic + (1-v_ic)*signal_ec
                lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False )
                np.save( pjoin( out_path, 'A_%03d.npy'%progress.i) , lm )
                progress.update()

        # Isotropic
        signal = self.synth_meas_iso_GPD( self.dIso*1E-6, protocolHR)
        lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True )
        np.save( pjoin( out_path, 'A_%03d.npy'%progress.i) , lm )
        progress.update()
开发者ID:davidrs06,项目名称:AMICO,代码行数:26,代码来源:models.py


示例19: calculate_and_save

def calculate_and_save():
    energies = np.empty((B_list.size, 2**number_of_spins))
    for i,B in enumerate(B_list):
        print i
        calc = ising_calculator_AFM(number_of_spins, alpha, B)
        energies[i,:] = calc.find_energies()
    np.save('energy_array', energies)
开发者ID:HaeffnerLab,项目名称:HaeffnerLabLattice,代码行数:7,代码来源:plot_eigenenergies.py


示例20: relax_system

def relax_system(mesh):

    sim = Sim(mesh, chi=1e-3, name='relax', driver='llbar_full')

    sim.driver.set_tols(rtol=1e-7, atol=1e-7)
    sim.Ms = 8.0e5
    sim.driver.alpha = 0.1
    sim.beta = 0
    sim.driver.gamma = 2.211e5

    sim.set_m((1, 0.25, 0.1))
    # sim.set_m(np.load('m0.npy'))

    A = 1.3e-11
    exch = UniformExchange(A=A)
    sim.add(exch)

    mT = 795.7747154594767
    zeeman = Zeeman([-100 * mT, 4.3 * mT, 0], name='H')
    sim.add(zeeman, save_field=True)

    demag = Demag()
    sim.add(demag)

    ONE_DEGREE_PER_NS = 17453292.52

    sim.relax(dt=1e-12, stopping_dmdt=0.01,
              max_steps=5000, save_m_steps=100, save_vtk_steps=50)

    np.save('m0.npy', sim.spin)
开发者ID:computationalmodelling,项目名称:fidimag,代码行数:30,代码来源:relax_system.py



注:本文中的numpy.save函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.savetxt函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.s函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap