• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.arange函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.arange函数的典型用法代码示例。如果您正苦于以下问题:Python arange函数的具体用法?Python arange怎么用?Python arange使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了arange函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _update_datamap

    def _update_datamap(self):
        self._last_region = []
        # Create a new grid of the appropriate size, initialize it with new
        # Cell instance (of type self.celltype), and perform point insertion
        # on the new data.
        if self._data is None:
            self._cellgrid = array([], dtype=object)
            self._cell_lefts = array([])
            self._cell_bottoms = array([])
        else:
            num_x_cells, num_y_cells = self._calc_grid_dimensions()
            self._cellgrid = zeros((num_x_cells, num_y_cells), dtype=object)
            for i in range(num_x_cells):
                for j in range(num_y_cells):
                    self._cellgrid[i,j] = self.celltype(parent=self)
            ll, ur = self._extents
            cell_width = ur[0]/num_x_cells
            cell_height = ur[1]/num_y_cells

            # calculate the left and bottom edges of all the cells and store
            # them in two arrays
            self._cell_lefts = arange(ll[0], ll[0]+ur[0]-cell_width/2, step=cell_width)
            self._cell_bottoms = arange(ll[1], ll[1]+ur[1]-cell_height/2, step=cell_height)

            self._cell_extents = (cell_width, cell_height)

            # insert the data points
            self._basic_insertion(self.celltype)
        return
开发者ID:5n1p,项目名称:chaco,代码行数:29,代码来源:subdivision_mapper.py


示例2: test_basic_instantiation

 def test_basic_instantiation(self):
     '''
     Tests the basic instantiation of the SHIFT class
     '''
     # Instantiatiation with float
     self.model = Shift(5.0)
     np.testing.assert_array_almost_equal(self.model.target_magnitudes,
                                          np.array([5.0]))
     self.assertEqual(self.model.number_magnitudes, 1)
     # Instantiation with a numpy array
     self.model = Shift(np.arange(5., 8., 0.5))
     np.testing.assert_array_almost_equal(self.model.target_magnitudes,
                                          np.arange(5., 8., 0.5))
     self.assertEqual(self.model.number_magnitudes, 6)
     # Instantiation with  list
     self.model = Shift([5., 6., 7., 8.])
     np.testing.assert_array_almost_equal(self.model.target_magnitudes,
                                          np.array([5., 6., 7., 8.]))
     self.assertEqual(self.model.number_magnitudes, 4)
     # Otherwise raise an error
     with self.assertRaises(ValueError) as ae:
         self.model = Shift(None)
     self.assertEqual(ae.exception.message,
                      'Minimum magnitudes must be float, list or array')
     # Check regionalisation - assuming defaults
     self.model = Shift(5.0)
     for region in self.model.regionalisation.keys():
         self.assertDictEqual(BIRD_GLOBAL_PARAMETERS[region],
                              self.model.regionalisation[region])
     np.testing.assert_array_almost_equal(np.log10(self.model.base_rate),
                                          np.array([-20.74610902]))
开发者ID:atalayayele,项目名称:hmtk,代码行数:31,代码来源:test_shift_calculator.py


示例3: compute_dr_wrt

    def compute_dr_wrt(self, wrt):

        if wrt not in (self.v, self.rt, self.t):
            return
        
        if wrt is self.t:
            if not hasattr(self, '_drt') or self._drt.shape[0] != self.v.r.size:                
                IS = np.arange(self.v.r.size)
                JS = IS % 3
                data = np.ones(len(IS))
                self._drt = sp.csc_matrix((data, (IS, JS)))
            return self._drt
        
        if wrt is self.rt:
            rot, rot_dr = cv2.Rodrigues(self.rt.r)
            rot_dr = rot_dr.reshape((3,3,3))
            dr = np.einsum('abc, zc -> zba', rot_dr, self.v.r).reshape((-1,3))
            return dr
        
        if wrt is self.v:
            rot = cv2.Rodrigues(self.rt.r)[0]
            
            IS = np.repeat(np.arange(self.v.r.size), 3)
            JS = np.repeat(np.arange(self.v.r.size).reshape((-1,3)), 3, axis=0)
            data = np.vstack([rot for i in range(self.v.r.size/3)])
            result = sp.csc_matrix((data.ravel(), (IS.ravel(), JS.ravel())))
            return result
开发者ID:cadik,项目名称:opendr,代码行数:27,代码来源:camera.py


示例4: scree_plot

def scree_plot(pca_obj, fname=None): 
    '''
    Scree plot for variance & cumulative variance by component from PCA. 

    Arguments: 
        - pca_obj: a fitted sklearn PCA instance
        - fname: path to write plot to file

    Output: 
        - scree plot 
    '''   
    components = pca_obj.n_components_ 
    variance = pca.explained_variance_ratio_
    plt.figure()
    plt.plot(np.arange(1, components + 1), np.cumsum(variance), label='Cumulative Variance')
    plt.plot(np.arange(1, components + 1), variance, label='Variance')
    plt.xlim([0.8, components]); plt.ylim([0.0, 1.01])
    plt.xlabel('No. Components', labelpad=11); plt.ylabel('Variance Explained', labelpad=11)
    plt.legend(loc='best') 
    plt.tight_layout() 
    if fname is not None:
        plt.savefig(fname)
        plt.close() 
    else:
        plt.show() 
    return 
开发者ID:thomasbrawner,项目名称:python_tools,代码行数:26,代码来源:scree_plot.py


示例5: test_array_richcompare_legacy_weirdness

    def test_array_richcompare_legacy_weirdness(self):
        # It doesn't really work to use assert_deprecated here, b/c part of
        # the point of assert_deprecated is to check that when warnings are
        # set to "error" mode then the error is propagated -- which is good!
        # But here we are testing a bunch of code that is deprecated *because*
        # it has the habit of swallowing up errors and converting them into
        # different warnings. So assert_warns will have to be sufficient.
        assert_warns(FutureWarning, lambda: np.arange(2) == "a")
        assert_warns(FutureWarning, lambda: np.arange(2) != "a")
        # No warning for scalar comparisons
        with warnings.catch_warnings():
            warnings.filterwarnings("error")
            assert_(not (np.array(0) == "a"))
            assert_(np.array(0) != "a")
            assert_(not (np.int16(0) == "a"))
            assert_(np.int16(0) != "a")

        for arg1 in [np.asarray(0), np.int16(0)]:
            struct = np.zeros(2, dtype="i4,i4")
            for arg2 in [struct, "a"]:
                for f in [operator.lt, operator.le, operator.gt, operator.ge]:
                    if sys.version_info[0] >= 3:
                        # py3
                        with warnings.catch_warnings() as l:
                            warnings.filterwarnings("always")
                            assert_raises(TypeError, f, arg1, arg2)
                            assert_(not l)
                    else:
                        # py2
                        assert_warns(DeprecationWarning, f, arg1, arg2)
开发者ID:EelcoHoogendoorn,项目名称:numpy,代码行数:30,代码来源:test_deprecations.py


示例6: __unit_test_onset_function

def __unit_test_onset_function(metric):
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        # First, test for a warning on empty onsets
        metric(np.array([]), np.arange(10))
        assert len(w) == 1
        assert issubclass(w[-1].category, UserWarning)
        assert str(w[-1].message) == "Reference onsets are empty."
        metric(np.arange(10), np.array([]))
        assert len(w) == 2
        assert issubclass(w[-1].category, UserWarning)
        assert str(w[-1].message) == "Estimated onsets are empty."
        # And that the metric is 0
        assert np.allclose(metric(np.array([]), np.array([])), 0)

    # Now test validation function - onsets must be 1d ndarray
    onsets = np.array([[1., 2.]])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)
    # onsets must be in seconds (so not huge)
    onsets = np.array([1e10, 1e11])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)
    # onsets must be sorted
    onsets = np.array([2., 1.])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)

    # Valid onsets which are the same produce a score of 1 for all metrics
    onsets = np.arange(10, dtype=np.float)
    assert np.allclose(metric(onsets, onsets), 1)
开发者ID:justinsalamon,项目名称:mir_eval,代码行数:28,代码来源:test_onset.py


示例7: barplot

def barplot(grouped_df, _column, statistic, levels=[0]):
    means = grouped_df.groupby(level=levels).mean()
    bar_width = 1.0/(len(means.index))
    error_config = {'ecolor': '0.1'}
    sems = grouped_df.groupby(level=levels).sem().fillna(0)
    fig = plt.figure()
    fig.set_size_inches(10,6)
    ax = fig.add_subplot(1,1,1)
    
    plt.bar(np.arange(0.1,(len(means.index)+0.1),1), 
            means[_column].fillna(0), 
            color= '#AAAAAA',
            yerr=sems[_column].fillna(0),
            error_kw=error_config,
            label=list(means.index))

    if means[_column].values.min() >= 0:
        ax.set_ylim(0,1.1*((means[_column] + sems[_column]).values.max()))
    else:
        ax.set_ylim(1.1*((means[_column] - sems[_column]).values.min()),1.1*((means[_column] + sems[_column]).values.max()))
    
    ax.set_ylabel(statistic + ' ' + u"\u00B1" + ' SEM', fontsize=20)   # +/- sign is u"\u00B1"
    ax.set_xticks(np.arange(0.1+bar_width/2.0,(len(means.index)+0.1+(bar_width/2.0)),1)) 
    ax.set_xticklabels(list(means.index), rotation=90)
    ax.tick_params(axis='y', labelsize=16 )
    ax.set_xlabel('Group', fontsize=20)
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    ax.yaxis.set_ticks_position('left')
    ax.xaxis.set_ticks_position('bottom')
    return fig
开发者ID:dbath,项目名称:wahnsinn,代码行数:31,代码来源:utilities.py


示例8: test_sort_index_multicolumn

    def test_sort_index_multicolumn(self):
        import random
        A = np.arange(5).repeat(20)
        B = np.tile(np.arange(5), 20)
        random.shuffle(A)
        random.shuffle(B)
        frame = DataFrame({'A': A, 'B': B,
                           'C': np.random.randn(100)})

        # use .sort_values #9816
        with tm.assert_produces_warning(FutureWarning):
            frame.sort_index(by=['A', 'B'])
        result = frame.sort_values(by=['A', 'B'])
        indexer = np.lexsort((frame['B'], frame['A']))
        expected = frame.take(indexer)
        assert_frame_equal(result, expected)

        # use .sort_values #9816
        with tm.assert_produces_warning(FutureWarning):
            frame.sort_index(by=['A', 'B'], ascending=False)
        result = frame.sort_values(by=['A', 'B'], ascending=False)
        indexer = np.lexsort((frame['B'].rank(ascending=False),
                              frame['A'].rank(ascending=False)))
        expected = frame.take(indexer)
        assert_frame_equal(result, expected)

        # use .sort_values #9816
        with tm.assert_produces_warning(FutureWarning):
            frame.sort_index(by=['B', 'A'])
        result = frame.sort_values(by=['B', 'A'])
        indexer = np.lexsort((frame['A'], frame['B']))
        expected = frame.take(indexer)
        assert_frame_equal(result, expected)
开发者ID:AlexisMignon,项目名称:pandas,代码行数:33,代码来源:test_sorting.py


示例9: test_sort_index_different_sortorder

    def test_sort_index_different_sortorder(self):
        A = np.arange(20).repeat(5)
        B = np.tile(np.arange(5), 20)

        indexer = np.random.permutation(100)
        A = A.take(indexer)
        B = B.take(indexer)

        df = DataFrame({'A': A, 'B': B,
                        'C': np.random.randn(100)})

        # use .sort_values #9816
        with tm.assert_produces_warning(FutureWarning):
            df.sort_index(by=['A', 'B'], ascending=[1, 0])
        result = df.sort_values(by=['A', 'B'], ascending=[1, 0])

        ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
        expected = df.take(ex_indexer)
        assert_frame_equal(result, expected)

        # test with multiindex, too
        idf = df.set_index(['A', 'B'])

        result = idf.sort_index(ascending=[1, 0])
        expected = idf.take(ex_indexer)
        assert_frame_equal(result, expected)

        # also, Series!
        result = idf['C'].sort_index(ascending=[1, 0])
        assert_series_equal(result, expected['C'])
开发者ID:AlexisMignon,项目名称:pandas,代码行数:30,代码来源:test_sorting.py


示例10: get_gabors

    def get_gabors(self, rf):
        lams =  float(rf[0])/self.sfs # lambda = 1./sf  #1./np.array([.1,.25,.4])
        sigma = rf[0]/2./np.pi
        # rf = [100,100]
        gabors = np.zeros(( len(oris),len(phases),len(lams), rf[0], rf[1] ))

        i = np.arange(-rf[0]/2+1,rf[0]/2+1)
        #print i
        j = np.arange(-rf[1]/2+1,rf[1]/2+1)
        ii,jj = np.meshgrid(i,j)
        for o, theta in enumerate(self.oris):
            x = ii*np.cos(theta) + jj*np.sin(theta)
            y = -ii*np.sin(theta) + jj*np.cos(theta)

            for p, phase in enumerate(self.phases):
                for s, lam in enumerate(lams):
                    fxx = np.cos(2*np.pi*x/lam + phase) * np.exp(-(x**2+y**2)/(2*sigma**2))
                    fxx -= np.mean(fxx)
                    fxx /= np.linalg.norm(fxx)

                    #if p==0:
                        #plt.subplot(len(oris),len(lams),count+1)
                        #plt.imshow(fxx,cmap=mpl.cm.gray,interpolation='bicubic')
                        #count+=1

                    gabors[o,p,s,:,:] = fxx
        plt.show()
        return gabors
开发者ID:Pulvinar,项目名称:psychopy_ext,代码行数:28,代码来源:models.py


示例11: test_groupby_groups_datetimeindex

    def test_groupby_groups_datetimeindex(self):
        # GH#1430
        periods = 1000
        ind = pd.date_range(start='2012/1/1', freq='5min', periods=periods)
        df = DataFrame({'high': np.arange(periods),
                        'low': np.arange(periods)}, index=ind)
        grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))

        # it works!
        groups = grouped.groups
        assert isinstance(list(groups.keys())[0], datetime)

        # GH#11442
        index = pd.date_range('2015/01/01', periods=5, name='date')
        df = pd.DataFrame({'A': [5, 6, 7, 8, 9],
                           'B': [1, 2, 3, 4, 5]}, index=index)
        result = df.groupby(level='date').groups
        dates = ['2015-01-05', '2015-01-04', '2015-01-03',
                 '2015-01-02', '2015-01-01']
        expected = {pd.Timestamp(date): pd.DatetimeIndex([date], name='date')
                    for date in dates}
        tm.assert_dict_equal(result, expected)

        grouped = df.groupby(level='date')
        for date in dates:
            result = grouped.get_group(date)
            data = [[df.loc[date, 'A'], df.loc[date, 'B']]]
            expected_index = pd.DatetimeIndex([date], name='date')
            expected = pd.DataFrame(data,
                                    columns=list('AB'),
                                    index=expected_index)
            tm.assert_frame_equal(result, expected)
开发者ID:sinhrks,项目名称:pandas,代码行数:32,代码来源:test_timegrouper.py


示例12: get_new_columns

    def get_new_columns(self):
        if self.value_columns is None:
            return self.removed_level

        stride = len(self.removed_level)
        width = len(self.value_columns)
        propagator = np.repeat(np.arange(width), stride)
        if isinstance(self.value_columns, MultiIndex):
            new_levels = self.value_columns.levels + [self.removed_level]
            new_names = self.value_columns.names + [self.removed_name]

            new_labels = [lab.take(propagator)
                          for lab in self.value_columns.labels]
            new_labels.append(np.tile(np.arange(stride), width))
        else:
            new_levels = [self.value_columns, self.removed_level]
            new_names = [self.value_columns.name, self.removed_name]

            new_labels = []

            new_labels.append(propagator)
            new_labels.append(np.tile(np.arange(stride), width))

        return MultiIndex(levels=new_levels, labels=new_labels,
                          names=new_names)
开发者ID:AjayRamanathan,项目名称:pandas,代码行数:25,代码来源:reshape.py


示例13: test_stratified_shuffle_split_init

def test_stratified_shuffle_split_init():
    X = np.arange(7)
    y = np.asarray([0, 1, 1, 1, 2, 2, 2])
    # Check that error is raised if there is a class with only one sample
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(3, 0.2).split(X, y))

    # Check that error is raised if the test set size is smaller than n_classes
    assert_raises(ValueError, next, StratifiedShuffleSplit(3, 2).split(X, y))
    # Check that error is raised if the train set size is smaller than
    # n_classes
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(3, 3, 2).split(X, y))

    X = np.arange(9)
    y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
    # Check that errors are raised if there is not enough samples
    assert_raises(ValueError, StratifiedShuffleSplit, 3, 0.5, 0.6)
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(3, 8, 0.6).split(X, y))
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(3, 0.6, 8).split(X, y))

    # Train size or test size too small
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(train_size=2).split(X, y))
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(test_size=2).split(X, y))
开发者ID:absolutelyNoWarranty,项目名称:scikit-learn,代码行数:28,代码来源:test_split.py


示例14: test_as_float_array

def test_as_float_array():
    # Test function for as_float_array
    X = np.ones((3, 10), dtype=np.int32)
    X = X + np.arange(10, dtype=np.int32)
    # Checks that the return type is ok
    X2 = as_float_array(X, copy=False)
    np.testing.assert_equal(X2.dtype, np.float32)
    # Another test
    X = X.astype(np.int64)
    X2 = as_float_array(X, copy=True)
    # Checking that the array wasn't overwritten
    assert_true(as_float_array(X, False) is not X)
    # Checking that the new type is ok
    np.testing.assert_equal(X2.dtype, np.float64)
    # Here, X is of the right type, it shouldn't be modified
    X = np.ones((3, 2), dtype=np.float32)
    assert_true(as_float_array(X, copy=False) is X)
    # Test that if X is fortran ordered it stays
    X = np.asfortranarray(X)
    assert_true(np.isfortran(as_float_array(X, copy=True)))

    # Test the copy parameter with some matrices
    matrices = [
        np.matrix(np.arange(5)),
        sp.csc_matrix(np.arange(5)).toarray(),
        sparse_random_matrix(10, 10, density=0.10).toarray()
    ]
    for M in matrices:
        N = as_float_array(M, copy=True)
        N[0, 0] = np.nan
        assert_false(np.isnan(M).any())
开发者ID:Afey,项目名称:scikit-learn,代码行数:31,代码来源:test_validation.py


示例15: test_series

    def test_series(self):

        # GH6407
        # inferring series

        # invalid type of Series
        for s in [ Series(np.arange(10)),
                   Series(np.arange(10.))]:
            self.assertRaises(TypeError, lambda : infer_freq(s))

        # a non-convertible string
        self.assertRaises(ValueError, lambda : infer_freq(Series(['foo','bar'])))

        # cannot infer on PeriodIndex
        for freq in [None, 'L', 'Y']:
            s = Series(period_range('2013',periods=10,freq=freq))
            self.assertRaises(TypeError, lambda : infer_freq(s))

        # DateTimeIndex
        for freq in ['M', 'L', 'S']:
            s = Series(date_range('20130101',periods=10,freq=freq))
            inferred = infer_freq(s)
            self.assertEqual(inferred,freq)

        s = Series(date_range('20130101','20130110'))
        inferred = infer_freq(s)
        self.assertEqual(inferred,'D')
开发者ID:B-Rich,项目名称:pandas,代码行数:27,代码来源:test_frequencies.py


示例16: draw

def draw(data, classes, model, resolution=100):
    mycm = mpl.cm.get_cmap('Paired')
    
    one_min, one_max = data[:, 0].min()-0.1, data[:, 0].max()+0.1
    two_min, two_max = data[:, 1].min()-0.1, data[:, 1].max()+0.1
    xx1, xx2 = np.meshgrid(np.arange(one_min, one_max, (one_max-one_min)/resolution),
                     np.arange(two_min, two_max, (two_max-two_min)/resolution))
    
    inputs = np.c_[xx1.ravel(), xx2.ravel()]
    z = []
    for i in range(len(inputs)):
        z.append(predict(model, inputs[i])[0])
    result = np.array(z).reshape(xx1.shape)
    
    plt.contourf(xx1, xx2, result, cmap=mycm)
    plt.scatter(data[:, 0], data[:, 1], s=50, c=classes, cmap=mycm)
    
    t = np.zeros(15)
    for i in range(15):
        if i < 5:
            t[i] = 0
        elif i < 10:
            t[i] = 1
        else:
            t[i] = 2
    plt.scatter(model[:, 0], model[:, 1], s=150, c=t, cmap=mycm)
    
    plt.xlim([0, 10])
    plt.ylim([0, 10])
    
    plt.show()
开发者ID:jayshonzs,项目名称:ESL,代码行数:31,代码来源:LVQ.py


示例17: all_patches

def all_patches(padded_brain,i,predict_patchsize,obs_patchsize,num_channels):
    
    image = padded_brain[i]
    ishape_h , ishape_w = padded_brain.shape[1:3]
    #ipdb.set_trace()
    #ipdb.set_trace()
    half_obs_patchsize = obs_patchsize/2
    half_predict_patchsize = predict_patchsize/2
    extended_image = np.zeros((ishape_h+obs_patchsize-predict_patchsize,ishape_w+obs_patchsize-predict_patchsize,num_channels))
    extended_image[half_obs_patchsize - half_predict_patchsize   : -(half_obs_patchsize - half_predict_patchsize),half_obs_patchsize - half_predict_patchsize  : -(half_obs_patchsize - half_predict_patchsize)]= image
    num_patches_rows = ishape_h // predict_patchsize
    num_patches_cols = ishape_w // predict_patchsize
    
    list_patches = np.zeros((num_patches_cols*num_patches_rows, obs_patchsize, obs_patchsize, num_channels))
    index = 0
    h_range = np.arange(obs_patchsize/2,ishape_h+obs_patchsize/2,predict_patchsize)
    #h_range = h_range[:-1]
    v_range = np.arange(obs_patchsize/2,ishape_w+obs_patchsize/2,predict_patchsize)
    #v_range = v_range[:-1]
    #ipdb.set_trace()
    for index_h in h_range:
        for index_w in v_range:
            patch_brian = extended_image[index_h-obs_patchsize/2: index_h+obs_patchsize/2 ,index_w-obs_patchsize/2: index_w+obs_patchsize/2,:]
            #if patch_brian.shape == (38,29,4):
            #   ipdb.set_trace()
             
            list_patches[index,:,:,:] = patch_brian
            index += 1
    #ipdb.set_trace()
    assert index == num_patches_rows*num_patches_cols
    return list_patches       
开发者ID:havaeimo,项目名称:PL2_added_layers,代码行数:31,代码来源:generate_prediction_patch.py


示例18: main

def main():
    """The main function."""

    # Build data ################

    x = np.arange(0, 10000, 500)
    y = np.arange(0, 1, 0.05)

    xx, yy = np.meshgrid(x, y)
    z = np.power(xx,yy)

    print "xx ="
    print xx
    print "yy ="
    print yy
    print "z ="
    print z

    # Plot data #################

    fig = plt.figure()
    ax = axes3d.Axes3D(fig)

    surf = ax.plot_surface(xx, yy, z, cmap=cm.jet, rstride=1, cstride=1, color='b', shade=True)

    ax.set_xlabel("X")
    ax.set_ylabel("Y")
    ax.set_zlabel("Z")

    fig.colorbar(surf, shrink=0.5, aspect=5)

    plt.show()
开发者ID:gandalfvn,项目名称:mcts-1,代码行数:32,代码来源:pw_func.py


示例19: main

def main():
    y, x = svmutil.svm_read_problem("char_recon_shuffled.db")
    x_train = x[:1800]
    y_train = y[:1800]
    x_val = x[1800:]
    y_val = y[1800:]

    gamma_c_pairs = [GammaCPair(1.0 / (2.0 * (3.0 ** log_sigma) ** 2), 3.0 ** log_C)
                     for log_sigma in [7]
                     for log_C     in [3]
                    ]

    log_log_pairs = [[log_sigma, log_C]
                     for log_sigma in np.arange(6, 10, 0.5)
                     for log_C     in np.arange(0, 5, 0.5)
                    ]

    def cv(gamma_c):
        return get_cross_val(x_train, y_train, x_val, y_val, gamma_c)

    cross_val = []

    for gamma_c in gamma_c_pairs:
        cross_val.append(cv(gamma_c))

    f = open("gamma_c", "w")
    for i in range(len(gamma_c_pairs)):
        f.write("{0}   {1}   {2}\n".format(log_log_pairs[i][0], log_log_pairs[i][1], cross_val[i]))
    f.close()
开发者ID:odarbelaeze,项目名称:python_hwrecon,代码行数:29,代码来源:train_model.py


示例20: adwin_ultrafast_M_set_input

	def adwin_ultrafast_M_set_input (self, msmnt_result = [],  nr_coeff = 1):

		self.max_k = 2**self.N
		discr_steps = 2*self.max_k+1
		self.msmnt_phases = np.zeros((self.reps,self.N))
		self.msmnt_times = np.zeros(self.N)
		self.msmnt_results = np.zeros((self.reps,self.N))

		m = np.zeros (self.N+1)
		t = np.zeros (self.N+1)
		th = np.zeros(self.N+1)

		p_real = np.zeros (discr_steps)
		p_imag = np.zeros (discr_steps)
		p_real [0] = 1./(2*np.pi)
		t[0] = 2**self.N

		for n in np.arange(self.N)+1:
			t[n] = int(2**(self.N-n))
			k_opt = int(2**(self.N-n+1))
			th[n] = -0.5*np.angle(-1j*p_imag[k_opt]+p_real[k_opt])

			nr_ones = msmnt_result [n-1]
			nr_zeros = self.M - nr_ones

			for mmm in np.arange(nr_ones):
				for j in np.arange(nr_coeff)-nr_coeff/2:
					if (k_opt+j*t[n]>=0):
						p_real, p_imag = self.adwin_update (p_real = p_real, p_imag = p_imag, meas_res = 1, phase = th[n], tn = t[n], k=k_opt+j*t[n])
			for mmm in np.arange(nr_zeros):
				for j in np.arange(nr_coeff)-nr_coeff/2:
					if (k_opt+j*t[n]>=0):
						p_real, p_imag = self.adwin_update (p_real = p_real, p_imag = p_imag, meas_res = 0, phase = th[n], tn = t[n], k=k_opt+j*t[n])

		return th[1:]
开发者ID:machielblok,项目名称:analysis,代码行数:35,代码来源:adwin_debug_magnetometry.py.REMOTE.8375.py



注:本文中的numpy.arange函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.arccos函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.apply_over_axes函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap