• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.asscalar函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.asscalar函数的典型用法代码示例。如果您正苦于以下问题:Python asscalar函数的具体用法?Python asscalar怎么用?Python asscalar使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了asscalar函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _node_to_dict

    def _node_to_dict(self, node):
        '''
        This method help master to save MasterNode in JSON format.

        Parameter(s):
            node: MasterNode - Root node of tree that will change to dict type.
        Reutrn(s):
            result: dict - Dict type of tree.
        '''
        if node == None:
            return None

        result = {}

        if node.prop == None:
            result['prop'] = None
        else:
            result['prop'] = list(node.prop)

        if node.theta == None:
            result['theta'] = None
            result['tau'] = None
        else:
            result['theta'] = np.asscalar(node.theta)
            result['tau'] = np.asscalar(node.tau)

        result['left'] = self._node_to_dict(node.left)
        result['right'] = self._node_to_dict(node.right)

        return result
开发者ID:wasit7,项目名称:ImageSearch,代码行数:30,代码来源:master.py


示例2: build_seq_block

def build_seq_block(sub_num, stims, sub_A_sd, sub_B_sd, block_size):
    # block stimulus list and shuffle within each block
    q = len(stims.index)
    stims = [stims.iloc[:q//2,], stims.iloc[q//2:,]]
    stims = [x.reindex(np.random.permutation(x.index)) for x in stims]
    shuffle(stims)
    stims = [[x.iloc[k:(k+block_size),] for k in range(0, q//2, block_size)] for x in stims]
    stims = pd.concat([val for pair in zip(stims[0], stims[1]) for val in pair])

    # inter-stimulus interval is randomly selected from [1,2,3,4]
    # the first ISI is removed (so sequence begins with a stim presentation)
    ISI = np.delete(np.repeat(2, len(stims.index), axis=0), 0)

    # create matrix of stimulus predictors and add ISIs
    X = np.diag(stims['effect'])
    X = np.apply_along_axis(func1d=insert_ISI, axis=0, arr=X, ISI=ISI)

    # reorder the columns so they are in the same order (0-39) for everyone
    X = X[:,[list(stims['stim']).index([i]) for i in range(len(stims.index))]]

    # now convolve all predictors with double gamma HRF
    X = np.apply_along_axis(func1d=np.convolve, axis=0, arr=X, v=spm_hrf(1))

    # build and return this subject's dataframe
    df = pd.DataFrame(X)
    df['time'] = range(len(df.index))
    df['sub_num'] = sub_num
    # df['sub_intercept'] = np.asscalar(np.random.normal(size=1))
    df['sub_A'] = np.asscalar(np.random.normal(size=1, scale=sub_A_sd))
    df['sub_B'] = np.asscalar(np.random.normal(size=1, scale=sub_B_sd))
    return df
开发者ID:tyarkoni,项目名称:nipymc,代码行数:31,代码来源:xsim.py


示例3: launch_configuration

    def launch_configuration(self, part):
        if self._is_direct:
            max_smem = self._max_shared_memory_needed_per_set_element
            smem_offset = max_smem * _WARPSIZE
            max_block = _device.get_attribute(driver.device_attribute.MAX_BLOCK_DIM_X)
            if max_smem == 0:
                block_size = max_block
            else:
                threads_per_sm = _AVAILABLE_SHARED_MEMORY / max_smem
                block_size = min(max_block, (threads_per_sm / _WARPSIZE) * _WARPSIZE)
            max_grid = _device.get_attribute(driver.device_attribute.MAX_GRID_DIM_X)
            grid_size = min(max_grid, (block_size + part.size) / block_size)

            grid_size = np.asscalar(np.int64(grid_size))
            block_size = (block_size, 1, 1)
            grid_size = (grid_size, 1, 1)

            required_smem = np.asscalar(max_smem * np.prod(block_size))
            return {'op2stride': self._it_space.size,
                    'smem_offset': smem_offset,
                    'WARPSIZE': _WARPSIZE,
                    'required_smem': required_smem,
                    'block_size': block_size,
                    'grid_size': grid_size}
        else:
            return {'op2stride': self._it_space.size,
                    'WARPSIZE': 32}
开发者ID:jabooth,项目名称:PyOP2,代码行数:27,代码来源:cuda.py


示例4: evaluate

    def evaluate(self, state_batch):

        # Get an action batch
        actions = self.sess.run(self.action_output, feed_dict={self.map_input: state_batch})

        # Create summaries for the actions
        actions_mean = np.mean(np.asarray(actions, dtype=float), axis=0)
        self.actions_mean_plot += actions_mean

        # Only save files every PLOT_STEP steps
        if self.train_counter % PLOT_STEP == 0:

            self.actions_mean_plot /= PLOT_STEP

            summary_action_0 = tf.Summary(value=[tf.Summary.Value(tag='actions_mean[0]',
                                                                  simple_value=np.asscalar(
                                                                      self.actions_mean_plot[0]))])
            summary_action_1 = tf.Summary(value=[tf.Summary.Value(tag='actions_mean[1]',
                                                                  simple_value=np.asscalar(
                                                                      self.actions_mean_plot[1]))])
            self.summary_writer.add_summary(summary_action_0, self.train_counter)
            self.summary_writer.add_summary(summary_action_1, self.train_counter)

            self.actions_mean_plot = [0, 0]

        return actions
开发者ID:JakobBreuninger,项目名称:neurobotics,代码行数:26,代码来源:actor.py


示例5: test_exclude_targets_combinations_subjectchunks

def test_exclude_targets_combinations_subjectchunks():
    partitioner = ChainNode([NFoldPartitioner(attr='subjects'),
                             ExcludeTargetsCombinationsPartitioner(
                                 k=1,
                                 targets_attr='chunks',
                                 space='partitions')],
                            space='partitions')
    # targets do not need even to be defined!
    ds = Dataset(np.arange(18).reshape(9, 2),
                 sa={'chunks': np.arange(9) // 3,
                     'subjects': np.arange(9) % 3})
    dss = list(partitioner.generate(ds))
    assert_equal(len(dss), 9)

    testing_subjs, testing_chunks = [], []
    for ds_ in dss:
        testing_partition = ds_.sa.partitions == 2
        training_partition = ds_.sa.partitions == 1
        # must be scalars -- so implicit test here
        # if not -- would be error
        testing_subj = np.asscalar(np.unique(ds_.sa.subjects[testing_partition]))
        testing_subjs.append(testing_subj)
        testing_chunk = np.asscalar(np.unique(ds_.sa.chunks[testing_partition]))
        testing_chunks.append(testing_chunk)
        # and those must not appear for training
        ok_(not testing_subj in ds_.sa.subjects[training_partition])
        ok_(not testing_chunk in ds_.sa.chunks[training_partition])
    # and we should have gone through all chunks/subjs pairs
    testing_pairs = set(zip(testing_subjs, testing_chunks))
    assert_equal(len(testing_pairs), 9)
    # yoh: equivalent to set(itertools.product(range(3), range(3))))
    #      but .product is N/A for python2.5
    assert_equal(testing_pairs, set(zip(*np.where(np.ones((3,3))))))
开发者ID:Soletmons,项目名称:PyMVPA,代码行数:33,代码来源:test_generators.py


示例6: find_tip_coordination

def find_tip_coordination(a, bondlength=2.6, bulk_nn=4):
    """
    Find position of tip in crack cluster from coordination
    """
    i, j = neighbour_list("ij", a, bondlength)
    nn = np.bincount(i, minlength=len(a))

    a.set_array('n_neighb', nn)
    g = a.get_array('groups')

    y = a.positions[:, 1]
    above = (nn < bulk_nn) & (g != 0) & (y > a.cell[1,1]/2.0)
    below = (nn < bulk_nn) & (g != 0) & (y < a.cell[1,1]/2.0)

    a.set_array('above', above)
    a.set_array('below', below)

    bond1 = np.asscalar(above.nonzero()[0][a.positions[above, 0].argmax()])
    bond2 = np.asscalar(below.nonzero()[0][a.positions[below, 0].argmax()])

    # These need to be ints, otherwise they are no JSON serializable.
    a.info['bond1'] = bond1
    a.info['bond2'] = bond2

    return bond1, bond2
开发者ID:libAtoms,项目名称:matscipy,代码行数:25,代码来源:crack.py


示例7: lpc_formants

def lpc_formants(signal, sr, num_formants, max_freq, time_step,
                 win_len, window_shape='gaussian'):
    output = {}
    new_sr = 2 * max_freq
    alpha = np.exp(-2 * np.pi * 50 * (1 / new_sr))
    proc = lfilter([1., -alpha], 1, signal)
    if sr > new_sr:
        proc = librosa.resample(proc, sr, new_sr)
    nperseg = int(win_len * new_sr)
    nperstep = int(time_step * new_sr)
    if window_shape == 'gaussian':
        window = gaussian(nperseg + 2, 0.45 * (nperseg - 1) / 2)[1:nperseg + 1]
    else:
        window = np.hanning(nperseg + 2)[1:nperseg + 1]
    indices = np.arange(int(nperseg / 2), proc.shape[0] - int(nperseg / 2) + 1, nperstep)
    num_frames = len(indices)
    for i in range(num_frames):
        if nperseg % 2 != 0:
            X = proc[indices[i] - int(nperseg / 2):indices[i] + int(nperseg / 2) + 1]
        else:
            X = proc[indices[i] - int(nperseg / 2):indices[i] + int(nperseg / 2)]
        frqs, bw = process_frame(X, window, num_formants, new_sr)
        formants = []
        for j, f in enumerate(frqs):
            if f < 50:
                continue
            if f > max_freq - 50:
                continue
            formants.append((np.asscalar(f), np.asscalar(bw[j])))
        missing = num_formants - len(formants)
        if missing:
            formants += [(None, None)] * missing
        output[indices[i] / new_sr] = formants
    return output
开发者ID:mmcauliffe,项目名称:python-acoustic-similarity,代码行数:34,代码来源:lpc.py


示例8: rformat

def rformat(item, precision=2, pretty=True):
    #NOTE: LOOK AT pprint
    '''
    Apply numerical formatting recursively for arbitrarily nested iterators, 
    optionally applying a conversion function on each item.
    '''
    if isinstance(item, str):
        return item
    
    if isinstance(item, (int, float)):
        return minfloatformat(item, precision)
        
    try:                #array-like items with len(item) in [0,1]
        #NOTE: This will suppress the type representation of the object str
        if isinstance(np.asscalar(item), str):
            #np.asscalar converts np types to python builtin types (Phew!!)
            return str(item)
            
        if isinstance(np.asscalar(item), (int, float)):
            return minfloatformat(item, precision)
    except:
        #Item is not str, int, float, or convertible to such...
        pass
    
    if isinstance(item, np.ndarray):
        return np.array2string(item, precision=precision)
        #NOTE:  lots more functionality here
        
    return pformat(item)
开发者ID:apodemus,项目名称:recipes,代码行数:29,代码来源:string.py


示例9: __init__

    def __init__(self, train_plans, purchased_plan):
        classes, indices, y = np.unique(purchased_plan.values, return_index=True, return_inverse=True)
        lov_classes, lov_indices, y_lov = np.unique(train_plans.values, return_index=True, return_inverse=True)
        old_to_new_purchased = dict()
        old_to_new_lov = dict()
        for k in range(len(classes)):
            # create inverse mapping that returns new class label given the old class label
            old_to_new_purchased[str(np.asscalar(purchased_plan.values[indices[k]]))] = k
        for k in range(len(lov_classes)):
            old_to_new_lov[str(np.asscalar(train_plans.values[lov_indices[k]]))] = k
        self.old_to_new = old_to_new_purchased
        self.old_to_new_lov = old_to_new_lov
        self.nclasses_purchased = len(classes)
        self.nclasses_lov = len(np.unique(train_plans.values))
        self.classes = classes
        self.classes_lov = lov_classes
        self.priors = np.zeros((self.nclasses_purchased, self.nclasses_lov))
        new_id = pd.Series(data=y, index=purchased_plan.index)
        for j in xrange(self.nclasses_lov):
            class_counts = np.bincount(new_id.ix[train_plans[train_plans == lov_classes[j]].index],
                                       minlength=len(classes))
            # priors[k, j] is fraction in class k (new label) with last observed value as class j (new label)
            if np.sum(class_counts) > 0:
                self.priors[:, j] = class_counts / float(np.sum(class_counts))

        prior_norm = self.priors.sum(axis=0)
        prior_norm[prior_norm == 0] = 1.0  # don't divide by zero
        self.priors /= prior_norm  # normalize so probabilities sum to one
开发者ID:brandonckelly,项目名称:allstate,代码行数:28,代码来源:boost_truncated_history2.py


示例10: getLowerLimbAngles

 def getLowerLimbAngles(self, tf, side):
     """
     Defines the joint angles of the human legs, starting from the position
     of the tf generated accordin to the data coming from the kinect
     
     @param tf tf
     @param 'L' for left lower limb, 'R' for right lower limb
     """
     if side == 'L':        
         self.last_updated, sys_hip = utils.getSkeletonTransformation(self.id, tf, 'left_hip', self.kin_frame, self.last_updated)
         self.last_updated, sys_knee = utils.getSkeletonTransformation(self.id, tf, 'left_knee', self.kin_frame, self.last_updated)
         self.last_updated, sys_foot = utils.getSkeletonTransformation(self.id, tf, 'left_foot', self.kin_frame, self.last_updated)
     else:            
         self.last_updated, sys_hip = utils.getSkeletonTransformation(self.id, tf, 'right_hip', self.kin_frame, self.last_updated)
         self.last_updated, sys_knee = utils.getSkeletonTransformation(self.id, tf, 'right_knee', self.kin_frame, self.last_updated)
         self.last_updated, sys_foot = utils.getSkeletonTransformation(self.id, tf, 'right_foot', self.kin_frame, self.last_updated)
         
     if sys_hip is None or sys_knee is None or sys_foot is None:
         return None
     
     vect_kh = (sys_hip[0:3,3] - sys_knee[0:3,3])/  \
               numpy.linalg.norm([sys_hip[0:3,3] - sys_knee[0:3,3]])
     vect_fk = (sys_knee[0:3,3] - sys_foot[0:3,3])/ \
               numpy.linalg.norm([sys_knee[0:3,3] - sys_foot[0:3,3]])
     q2 = - numpy.arccos(utils.checkArg(numpy.asscalar(numpy.dot(vect_kh.T,vect_fk))))
     
     q1 = numpy.asscalar(numpy.arccos(vect_kh[1]))                                       #[0,pi]
     if numpy.asscalar(numpy.arcsin(vect_kh[2])) < 0:                                    #[-pi,pi]
         q1 = -q1 
         
     return [q1, q2]
开发者ID:personalrobotics,项目名称:humanpy,代码行数:31,代码来源:humantracking_kinect1.py


示例11: stations_json

def stations_json():

    stations = np.recfromcsv('chi-stations.csv', delimiter=',')

    output = {'type': "FeatureCollection", 'features':[]}

    for s in stations:

        output['features'].append({
            'type': "Feature",
            "id": np.asscalar(s[0]),
            "geometry": {
                "type":"Point",
                "coordinates":[np.asscalar(s[2]),np.asscalar(s[1])] #long, lat
            },
            "geometry_name": "origin_geom",
            "properties": {
                'name': s[3]
            }})

    f = io.open('chi-stations.json', 'w', encoding='utf-8') 
    f.write(unicode(json.dumps(output, ensure_ascii=False)))
    f.close()

    json_output=open('chi-stations.json')
    output_data = json.load(json_output)
    pprint(output_data)
    json_output.close()
开发者ID:inachen,项目名称:CS171-Final-Project,代码行数:28,代码来源:dataclean.py


示例12: noisy_alignment_similarity_transform

def noisy_alignment_similarity_transform(source, target, noise_type='uniform',
                                         noise_percentage=0.1,
                                         allow_alignment_rotation=False):
    r"""
    Constructs and perturbs the optimal similarity transform between the source
    and target shapes by adding noise to its parameters.

    Parameters
    ----------
    source : `menpo.shape.PointCloud`
        The source pointcloud instance used in the alignment
    target : `menpo.shape.PointCloud`
        The target pointcloud instance used in the alignment
    noise_type : ``{'uniform', 'gaussian'}``, optional
        The type of noise to be added.
    noise_percentage : `float` in ``(0, 1)`` or `list` of `len` `3`, optional
        The standard percentage of noise to be added. If `float`, then the same
        amount of noise is applied to the scale, rotation and translation
        parameters of the optimal similarity transform. If `list` of
        `float` it must have length 3, where the first, second and third elements
        denote the amount of noise to be applied to the scale, rotation and
        translation parameters, respectively.
    allow_alignment_rotation : `bool`, optional
        If ``False``, then the rotation is not considered when computing the
        optimal similarity transform between source and target.

    Returns
    -------
    noisy_alignment_similarity_transform : `menpo.transform.Similarity`
        The noisy Similarity Transform between source and target.
    """
    if isinstance(noise_percentage, float):
        noise_percentage = [noise_percentage] * 3
    elif len(noise_percentage) == 1:
        noise_percentage *= 3

    similarity = AlignmentSimilarity(source, target,
                                     rotation=allow_alignment_rotation)

    if noise_type is 'gaussian':
        s = noise_percentage[0] * (0.5 / 3) * np.asscalar(np.random.randn(1))
        r = noise_percentage[1] * (180 / 3) * np.asscalar(np.random.randn(1))
        t = noise_percentage[2] * (target.range() / 3) * np.random.randn(2)

        s = scale_about_centre(target, 1 + s)
        r = rotate_ccw_about_centre(target, r)
        t = Translation(t, source.n_dims)
    elif noise_type is 'uniform':
        s = noise_percentage[0] * 0.5 * (2 * np.asscalar(np.random.randn(1)) - 1)
        r = noise_percentage[1] * 180 * (2 * np.asscalar(np.random.rand(1)) - 1)
        t = noise_percentage[2] * target.range() * (2 * np.random.rand(2) - 1)

        s = scale_about_centre(target, 1. + s)
        r = rotate_ccw_about_centre(target, r)
        t = Translation(t, source.n_dims)
    else:
        raise ValueError('Unexpected noise type. '
                         'Supported values are {gaussian, uniform}')

    return similarity.compose_after(t.compose_after(s.compose_after(r)))
开发者ID:geshiming,项目名称:menpofit,代码行数:60,代码来源:fitter.py


示例13: run_epoch

def run_epoch(session, m, mode):

    total_cost = 0.0
    num_samples_seen= 0
    total_num_correct_predictions= 0

    if mode == 'training':
        if flags.first_training_epoch:
            flags.first_training_epoch= False

        num_correct_predictions,num_samples, _ = session.run([m.num_correct_predictions,m.num_samples, m.train_op])

        avg_accuracy = num_correct_predictions/num_samples
        print("Traversed through %d samples." %num_samples_seen)
        return np.asscalar(avg_accuracy)

    else:
        if flags.first_validation_epoch or flags.testing_epoch:
            flags.first_validation_epoch= False
            flags.testing_epoch= False

        cost, num_correct_predictions,num_samples = session.run([m.cost ,m.num_correct_predictions,m.num_samples])

        accuracy= num_correct_predictions/num_samples
        print("total cost is %.4f" %total_cost)
        return np.asscalar(accuracy)
开发者ID:AaronZhouQian,项目名称:lstm_tensorflow_imdb,代码行数:26,代码来源:lstm_tf_imdb3.py


示例14: get_statistics

  def get_statistics(self, attribute=0):
    attribute = self._storage["attribute/%s" % attribute]

    if "min" not in attribute.attrs or "max" not in attribute.attrs:
      attribute_min = None
      attribute_max = None

      chunk_size = 1000
      for begin in numpy.arange(0, len(attribute), chunk_size):
        slice = attribute[begin : begin + chunk_size]
        if attribute.dtype.char in ["O", "S", "U"]:
          data_min = min(slice)
          data_max = max(slice)
          attribute_min = str(data_min) if attribute_min is None else str(min(data_min, attribute_min))
          attribute_max = str(data_max) if attribute_max is None else str(max(data_max, attribute_max))
        else:
          slice = slice[numpy.invert(numpy.isnan(slice))]
          if len(slice):
            data_min = numpy.asscalar(slice.min())
            data_max = numpy.asscalar(slice.max())
            attribute_min = data_min if attribute_min is None else min(data_min, attribute_min)
            attribute_max = data_max if attribute_max is None else max(data_max, attribute_max)

      if attribute_min is not None:
        attribute.attrs["min"] = attribute_min
      if attribute_max is not None:
        attribute.attrs["max"] = attribute_max

    return dict(min=attribute.attrs.get("min", None), max=attribute.attrs.get("max", None))
开发者ID:gitter-badger,项目名称:slycat,代码行数:29,代码来源:hdf5.py


示例15: __init__

    def __init__(self, obj):
        self.obj = obj
        parameters = []
        names = []
        ties = {}

        def add_par(p, name):
            if not isinstance(p, Parameter):
                p = Parameter(p,p)
            for par_check in parameters + [None]:
                if p is par_check:
                    break
            if par_check is not None:
                # if the above loop encountered a break, it
                # means the parameter is tied

                # we will rename the parameter so that when it is printed it
                # better reflects how it is used
                new_name = tied_name(names[parameters.index(p)], name)
                names[parameters.index(p)] = new_name

                if new_name in ties:
                    # if there is already an existing tie group we need to
                    # do a few things to get the name right
                    group = ties[new_name]

                else:
                    group = [name]

                group.append(name)
                ties[new_name] = group

            else:
                if not p.fixed:
                    parameters.append(p)
                    names.append(name)

        # find all the Parameter's in the obj
        for name, par in sorted(iter(obj.parameters.items()), key=lambda x: x[0]):
            if isinstance(par, ComplexParameter):
                add_par(par.real, name+'.real')
                add_par(par.imag, name+'.imag')
            elif isinstance(par, dict):
                for key, val in par.items():
                    add_par(val, name + '_' + key)
            elif isinstance(par, xr.DataArray):
                if len(par.dims)==1:
                    dimname = par.dims[0]
                else:
                    raise ParameterSpecificationError('Multi-dimensional parameters are not supported')
                for key in par[dimname]:
                    add_par(np.asscalar(par.sel(**{dimname:key})),name+'_'+np.asscalar(key))
            elif isinstance(par, Parameter):
                add_par(par, name)

        parameters = deepcopy(parameters)
        for i, name in enumerate(names):
            parameters[i].name = name
        self.parameters = parameters
        self.ties = ties
开发者ID:barkls,项目名称:holopy,代码行数:60,代码来源:model.py


示例16: _findGaps

 def _findGaps(self, workspace_name, min_i, max_i):
     """
     Find workspace indexes with a low overall intensity
     A histogram with low intensity contains zero-intensity values for many
     of the energy values (Energy is the X-axis)
     :param workspace_name:
     :param min_i: minimum workspace index to look for
     :param max_i: 1+maximum workspace index to look for
     :return: chunks of consecutive workspace indexes with low overall intensity
     """
     zero_fraction = list()  # for each histogram, count the number of zeros
     workspace = sapi.mtd[workspace_name]
     for index in range(min_i, max_i):
         y = workspace.dataY(index)
         zero_fraction.append(1.0 - (1. * numpy.count_nonzero(y)) / len(y))
     # Find workspace indexes zero fraction above a reasonable threshold
     threshold = numpy.mean(zero_fraction) + 2 * numpy.std(zero_fraction)  # above twice the standard deviation
     high_zero_fraction = min_i + (numpy.where(zero_fraction > threshold))[0]
     # split the high_zero_fraction indexes into chunks of consecutive indexes
     #  Example: if high_zero_fraction=[3,7,8,9,11,15,16], then we split into [3],[7,8,9], [11], [15,16]
     gaps = list()  # intensity gaps, because high zero fraction means low overall intensity
     gap = [numpy.asscalar(high_zero_fraction[0]), ]
     for index in range(1, len(high_zero_fraction)):
         if high_zero_fraction[index] - high_zero_fraction[index - 1] == 1:
             gap.append(numpy.asscalar(high_zero_fraction[index]))  # two consecutive indexes
         else:
             gaps.append(gap)
             gap = [numpy.asscalar(high_zero_fraction[index]), ]
     gaps.append(gap)  # final dangling gap has to be appended
     return gaps  # a list of lists
开发者ID:samueljackson92,项目名称:mantid,代码行数:30,代码来源:DPDFreduction.py


示例17: preprocess

    def preprocess(self):
        """

        :return:
        """
        self.N_particles = hypers['N_particles'].value

        # Set up initial state distribution
        # Initial state is centered around the steady state
        D = sz_dtype(self.population.latent_dtype)
        self.mu_initial = self.population.steady_state().reshape((D,1))

        # TODO: Implement a distribution over the initial variances
        sig_initial = np.ones(1, dtype=self.population.latent_dtype)
        sig_initial.fill(np.asscalar(hypers['sig_ch_init'].value))
        for neuron in self.population.neurons:
            for compartment in neuron.compartments:
                sig_initial[neuron.name][compartment.name]['V'] = hypers['sig_V_init'].value
        self.sig_initial = as_matrix(sig_initial)

        # TODO: Implement a distribution over the  transition noise
        sig_trans = np.ones(1, dtype=self.population.latent_dtype)
        sig_trans.fill(np.asscalar(hypers['sig_ch'].value))
        for neuron in self.population.neurons:
            for compartment in neuron.compartments:
                sig_trans[neuron.name][compartment.name]['V'] = hypers['sig_V'].value
        self.sig_trans = as_matrix(sig_trans)
开发者ID:HIPS,项目名称:optofit,代码行数:27,代码来源:mcmc_transitions.py


示例18: getSrcCellsValueRange

def getSrcCellsValueRange( mainSheet, totalFields, valsCol, i, j=0, k=0, \
                           numProducts=0, numRates=0, specFieldsInd=[] ):
    ''' Returns lists of Values and Names of the Source cells
        for the given i and j indices '''
    # 2 1's b/c spreadInd start from 0 while rows start from 1 in excel
    src_RowIndex = ((k-1)*numProducts*totalFields)* np.array(len(specFieldsInd)*[1])\
                   + ((i-1)*totalFields)* np.array(len(specFieldsInd)*[1])\
                   + np.array(specFieldsInd)+ np.array(len(specFieldsInd)*[1])\
                   + np.array(len(specFieldsInd)*[1])
    temp = Cell( mainSheet, np.asscalar( src_RowIndex[0] ), valsCol ).horizontal
    temp = np.array([ i for i in temp if i!=None])
    
    srcCells_Val = np.zeros( (len(specFieldsInd), len( temp )) )
    srcCells_Name = np.empty( srcCells_Val.shape, dtype = 'S10' )

    for row in range(0, len(specFieldsInd)):
        temp_Val = Cell( mainSheet, np.asscalar( src_RowIndex[row] ), \
                                valsCol ).horizontal
        srcCells = Cell( mainSheet, np.asscalar( src_RowIndex[row] ), \
                         valsCol ).horizontal_range 
        temp_Name = [ cell.name for cell in srcCells ]

        # Ignore all NoneType cells
        srcCells_Name[row] = np.array([ temp_Name[i] for i,item in \
                                        enumerate(temp_Val) if item!=None])
        srcCells_Val[row] = np.array([ float( re.sub('[^\d\.\-]','', str(i)) ) \
                                       for i in temp_Val if i!=None ])
        
    return srcCells_Val, srcCells_Name
开发者ID:cradbold,项目名称:data-testing,代码行数:29,代码来源:NDTC23a_BalanceWaterfallCalc.py


示例19: _numpy_to_values

    def _numpy_to_values(data, default_range, append):
        '''Convert a NumPy array to values attribute'''
        def to_list_no_index(xvals, yvals):
            return [{"x": x, "y": np.asscalar(y)}
                    for x, y in zip(xvals, yvals)]

        if len(data.shape) == 1 or data.shape[1] == 1:
            xvals = default_range(data.shape[0], append)
            values = to_list_no_index(xvals, data)
        elif len(data.shape) == 2:
            if data.shape[1] == 2:
                # NumPy arrays and matrices have different iteration rules.
                if isinstance(data, np.matrix):
                    xidx = (0, 0)
                    yidx = (0, 1)
                else:
                    xidx = 0
                    yidx = 1

                xvals = [np.asscalar(row[xidx]) for row in data]
                yvals = [np.asscalar(row[yidx]) for row in data]
                values = [{"x": x, "y": y} for x, y in zip(xvals, yvals)]
            else:
                raise ValueError('arrays with > 2 columns not supported')
        else:
            raise ValueError('invalid dimensions for ndarray')

        return values
开发者ID:aashish24,项目名称:vincent,代码行数:28,代码来源:vincent.py


示例20: __fitmodel1d

    def __fitmodel1d(self, Y):
        """Helper for apply_along_axis()"""
        res = self._res
        results = self._model_gen(Y, self._exog).fit()
        t_to_z = lambda t, df: stats.norm.ppf(stats.t.cdf(t, df))
        if isinstance(res, np.ndarray):
            if len(res.shape) == 1:
                tstats = results.t_test(self._res)
                return [np.asscalar(i) for i in [tstats.tvalue,
                                                 tstats.pvalue,
                                                 tstats.effect,
                                                 tstats.sd,
                                                 np.array(tstats.df_denom),
                                                 t_to_z(tstats.tvalue, tstats.df_denom)]]

            elif len(res.shape) == 2:
                fstats = results.f_test(self._res)
                return [np.asscalar(i) for i in
                            [fstats.fvalue,
                             fstats.pvalue]] + [fstats.df_num,
                                                fstats.df_denom]
            else:
                raise ValueError("Test specification (via `res`) has to be 1d or 2d array")
        elif isinstance(res, str):
            return results.__getattribute__(res)
        else:
            return res(results)
开发者ID:Arthurkorn,项目名称:PyMVPA,代码行数:27,代码来源:statsmodels_adaptor.py



注:本文中的numpy.asscalar函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.atleast_1d函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.asmatrix函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap