• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tqdm.tqdm函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tqdm.tqdm函数的典型用法代码示例。如果您正苦于以下问题:Python tqdm函数的具体用法?Python tqdm怎么用?Python tqdm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了tqdm函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: augment_arrays

def augment_arrays(project):

    array_path = os.path.join(project['path'], 'array')
    augmented_path = os.path.join(project['path'], 'augmented')
    shutil.rmtree(augmented_path,ignore_errors=True)
    os.makedirs(augmented_path)

    if project['augmentations'] is None:
        print('No augmentations selected: copying train arrays as is.')
        files = os.listdir(array_path)
        for file in tqdm(files):
            shutil.copy(os.path.join(array_path, file),augmented_path)

    else:
        print('Generating image augmentations:')

        for img_idx, (array, label, label_name) in tqdm(enumerate(gen_arrays_from_dir(array_path))):
            split_label_name = '-'.join(label_name.split('-')[2:-1])
            for aug_idx, (array_aug, label_aug) in enumerate(gen_augment_arrays(array, label, project['augmentations'], project['category_rounds'][split_label_name])):
                cat_idx = np.argmax(label_aug)
                cat = project['categories'][cat_idx]
                img_name = '{}-{:02d}-img-{}-{}'.format(img_idx, aug_idx,
                                                            cat, cat_idx)
                label_name = '{}-{:02d}-label-{}-{}'.format(img_idx, aug_idx,
                                                            cat, cat_idx)
                aug_path = os.path.join(augmented_path, img_name)
                label_path = os.path.join(augmented_path, label_name)
                np.save(aug_path, array_aug)
                np.save(label_path, label_aug)

    project['is_augmented'] = True
    return project
开发者ID:codealphago,项目名称:transfer,代码行数:32,代码来源:augment_arrays.py


示例2: to_html

    def to_html(self, outdir, template=None):

        pages_set = self.pages_set

        if template is None:
            template = textwrap.dedent("""\
                <html>
                    <head>
                        <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
                        <title>Page {page}</title>
                        <link rel="stylesheet" type="text/css" href="teletext.css" title="Default Style"/>
                        <link rel="alternative stylesheet" type="text/css" href="teletext-noscanlines.css" title="No Scanlines"/>
                        <script type="text/javascript" src="cssswitch.js"></script>
                    </head>
                    <body onload="set_style_from_cookie()">
                    {body}
                    </body>
                </html>
            """)

        for magazineno, magazine in tqdm(self.magazines.items(), desc='Magazines', unit='M'):
            for pageno, page in tqdm(magazine.pages.items(), desc='Pages', unit='P'):
                pagestr = f'{magazineno}{pageno:02x}'
                outfile = open(os.path.join(outdir, f'{pagestr}.html'), 'w')
                body = '\n'.join(
                    subpage.to_html(pages_set) for n, subpage in sorted(page.subpages.items())
                )
                outfile.write(template.format(page=pagestr, body=body))
开发者ID:ali1234,项目名称:vhs-teletext,代码行数:28,代码来源:service.py


示例3: find_duplicates

def find_duplicates(directories):
    for d in directories:
        if not os.path.exists(d):
            raise ValueError("Directory %s does not exist" % d)
        elif not os.path.isdir(d):
            raise ValueError("Expected %s to be a directory" % d)

    file_hashes = defaultdict(set)

    print("Scanning for files…")

    all_files = deque()
    for filename in tqdm(find_files(directories)):
        all_files.append(filename)

    print("Hashing %d files" % len(all_files))

    with ThreadPoolExecutor() as executor:
        for filename, digest in tqdm(
            executor.map(get_file_hash, all_files), total=len(all_files)
        ):

            file_hashes[digest].add(filename)

    for digest, filenames in file_hashes.items():
        if len(filenames) < 2:
            continue
        else:
            yield digest, filenames
开发者ID:acdha,项目名称:unix_tools,代码行数:29,代码来源:dupinator.py


示例4: generate_code

 def generate_code(self, Modal, bit, generate):
     batch_size = 128
     if generate=="label":
         num_data = Modal.shape[0]
         index = np.linspace(0, num_data - 1, num_data).astype(int)
         B = np.zeros([num_data, bit], dtype=np.float32)
         for iter in tqdm(xrange(num_data / batch_size + 1)):
             ind = index[iter * batch_size: min((iter + 1) * batch_size, num_data)]
             label = Modal[ind, :].astype(np.float32)
             label = label.reshape([label.shape[0], 1, label.shape[1], 1])
             Hsh_L = self.Hsh_L.eval(feed_dict={self.ph['label_input']: label})
             B[ind, :] = Hsh_L
     elif generate=="image":
         num_data = len(Modal)
         index = np.linspace(0, num_data - 1, num_data).astype(int)
         B = np.zeros([num_data, bit], dtype=np.float32)
         for iter in tqdm(xrange(num_data / batch_size + 1)):
             ind = index[iter * batch_size: min((iter + 1) * batch_size, num_data)]
             mean_pixel = np.repeat(self.meanpix[:, :, :, np.newaxis], len(ind), axis=3)
             image = Modal[ind,:,:,:].astype(np.float64)
             image = image - mean_pixel.astype(np.float64).transpose(3, 0, 1, 2)
             Hsh_I = self.Hsh_I.eval(feed_dict={self.ph['image_input']: image})
             B[ind, :] = Hsh_I
     else:
         num_data = Modal.shape[0]
         index = np.linspace(0, num_data - 1, num_data).astype(int)
         B = np.zeros([num_data, bit], dtype=np.float32)
         for iter in tqdm(xrange(num_data / batch_size + 1)):
             ind = index[iter * batch_size: min((iter + 1) * batch_size, num_data)]
             text = Modal[ind, :].astype(np.float32)
             text = text.reshape([text.shape[0], 1, text.shape[1], 1])
             Hsh_T = self.Hsh_T.eval(feed_dict={self.ph['text_input']: text})
             B[ind, :] = Hsh_T
     B = np.sign(B)
     return B
开发者ID:StatML,项目名称:SSAH,代码行数:35,代码来源:SSAH.py


示例5: train_word2id

def train_word2id():
    """把训练集的所有词转成对应的id。"""
    time0 = time.time()
    print('Processing train data.')
    df_train = pd.read_csv('../raw_data/question_train_set.txt', sep='\t', usecols=[0, 2, 4],
                           names=['question_id', 'word_title', 'word_content'], dtype={'question_id': object})
    print('training question number %d ' % len(df_train))
    # 没有 content 的问题用 title 来替换
    na_content_indexs = list()
    for i in tqdm(xrange(len(df_train))):
        word_content = df_train.word_content.values[i]
        if type(word_content) is float:
            na_content_indexs.append(i)
    print('There are %d train questions without content.' % len(na_content_indexs))
    for na_index in tqdm(na_content_indexs):
        df_train.at[na_index, 'word_content'] = df_train.at[na_index, 'word_title']
    # 没有 title 的问题, 丢弃
    na_title_indexs = list()
    for i in xrange(len(df_train)):
        word_title = df_train.word_title.values[i]
        if type(word_title) is float:
            na_title_indexs.append(i)
    print('There are %d train questions without title.' % len(na_title_indexs))
    df_train = df_train.drop(na_title_indexs)
    print('After dropping, training question number(should be 2999952) = %d' % len(df_train))
    # 转为 id 形式
    p = Pool()
    train_title = np.asarray(p.map(get_id4words, df_train.word_title.values))
    np.save('../data/wd_train_title.npy', train_title)
    train_content = np.asarray(p.map(get_id4words, df_train.word_content.values))
    np.save('../data/wd_train_content.npy', train_content)
    p.close()
    p.join()
    print('Finished changing the training words to ids. Costed time %g s' % (time.time() - time0))
开发者ID:brucexia6116,项目名称:zhihu-text-classification,代码行数:34,代码来源:word2id.py


示例6: createDataTxt

def createDataTxt(imagePath, annotationPath, imagesInDir, split=False):
    JPG = '.jpg'
    TRAINING = 'training/'
    VALIDATION = 'validation/'

    if split:
        annotatedImages = os.listdir(annotationPath)
        # np.random.shuffle(annotatedImages)
        splitSize = ceil(len(annotatedImages) * 0.85)

        annotatedImagesTrain = annotatedImages[:splitSize]
        annotatedImagesValidation = annotatedImages[splitSize:]
    else:
        annotatedImagesTrain = os.listdir(join(annotationPath, TRAINING))
        annotatedImagesValidation = os.listdir(join(annotationPath, VALIDATION))

    with open(imagesInDir + 'train.txt', 'w') as file:
        for ann in tqdm(annotatedImagesTrain, desc='Writing train.txt for input dataset'):
            if isfile(join(imagePath, TRAINING, splitext(ann)[0]) + JPG):
                file.write(' '.join(
                    [join(imagePath, TRAINING, splitext(ann)[0]) + JPG,
                     join(annotationPath, TRAINING, ann)]) + '\n')

    with open(imagesInDir + 'val.txt', 'w') as file:
        for annv in tqdm(annotatedImagesValidation, desc='Writing valid.txt for input dataset'):
            if isfile(join(imagePath, VALIDATION, splitext(annv)[0]) + JPG):
                file.write(' '.join(
                    [join(imagePath, VALIDATION, splitext(annv)[0]) + JPG,
                     join(annotationPath, VALIDATION, annv)]) + '\n')

    return
开发者ID:ruyi345,项目名称:Fully-convolutional-networks-TF,代码行数:31,代码来源:dataGenerator.py


示例7: pro_progess

def pro_progess(filepath="../data"):
    height = 299
    train_files = os.listdir(filepath + '/train')
    train = np.zeros((len(train_files), height, height, 3), dtype=np.uint8)
    labels = list(filter(lambda x: x[:3] == 'dog', train_files))

    test_files = os.listdir(filepath + '/test')
    test = np.zeros((len(test_files), height, height, 3), dtype=np.uint8)

    for i in tqdm(range(len(train_files))):
        filename = filepath + train_files[i]
        img = cv2.imread(filename)
        img = cv2.resize(img, (height, height))
        train[i] = img[:, :, ::-1]

    for i in tqdm(range(len(test_files))):
        filename = filepath + test_files[i]
        img = cv2.imread(filename)
        img = cv2.resize(img, (height, height))
        test[i] = img[:, :, ::-1]

    print ('Training Data Size = %.2 GB' % (sys.getsizeof(train)/1024**3))
    print ('Testing Data Size = %.2 GB' % (sys.getsizeof(test)/1024**3))
    X_train, X_val, y_train, y_val = train_test_split(
        train, labels, shuffle=True, test_size=0.2, random_state=42)
    return X_train, X_val, y_train, y_val
开发者ID:Suluo,项目名称:Kaggle,代码行数:26,代码来源:get_data.py


示例8: normalize_features

def normalize_features(X_train, X_test):
    n_features = X_train.shape[1]

    feature_sums = np.sum(X_test, axis=1)
    nonblack_vectors = np.where(feature_sums > 0,1,0)
    #print nonblack_vectors.shape

    mask = []
    for x in range(X_test.shape[0]):
        mask.append([nonblack_vectors[x]]*n_features)
    mask = np.array(mask)

    X_test_nonblack = X_test[np.where(feature_sums > 0)]

    X = np.concatenate((X_train, X_test_nonblack))
    #print X, X.shape

    mean = np.mean(X,axis=0)
    std = np.std(X,axis=0)

    for d in tqdm(range(len(X_train))):
        X_train[d] = (X_train[d] - mean) / std
    for d in tqdm(range(len(X_test))):
        X_test[d] = (X_test[d] - mean) / std

    #Make once fully black vectors fully black again
    X_test = X_test*mask

    return X_train, X_test
开发者ID:gzuidhof,项目名称:cad,代码行数:29,代码来源:rebalance.py


示例9: make_tqdm_iterator

def make_tqdm_iterator(**kwargs):
    options = {
        "file": sys.stdout,
        "leave": True
    }
    options.update(kwargs)

    if session_type() == 'kernel':
        # from IPython import display
        # capture_stderr = StringIO()
        # with RedirectStdStreams(stderr=capture_stderr):
            # try:
                # iterator = tqdm_notebook(**options)
            # except:
                # failed = True
            # else:
                # failed = False
                # err_out = capture_stderr.getvalue()
        # capture_stderr.close()
        # if failed or err_out.lower().find("widget javascript not detected") > -1:
            # display.clear_output(wait=True)
            # iterator = tqdm(**options)
        iterator = tqdm(**options)

    else:
        iterator = tqdm(**options)
    return iterator
开发者ID:rgolovnya,项目名称:featuretools,代码行数:27,代码来源:gen_utils.py


示例10: scan_dir

def scan_dir(path, dir_json):
    # Preprocess the total files count
    for root, dirs, files in tqdm(os.walk(path)):
        for name in files:
            path = os.path.join(root, name)
            if os.path.getsize(path) > (25*1024*1024):
                ext = os.path.splitext(name)[1]
                if ext in EXT:
                    movie_name.append(name)

    with tqdm(total=len(movie_name), leave=True, unit='B',
              unit_scale=True) as pbar:
        for name in movie_name:
            data = get_movie_info(name)
            pbar.update()
            if data is not None and data['Response'] == 'True':
                for key, val in data.items():
                    if val == "N/A":
                        data[key] = "-"  # Should N/A be replaced with `-`?
                movies.append(data)
            else:
                if data is not None:
                    movie_not_found.append(name)
        with open(dir_json, "w") as out:
            json.dump(movies, out, indent=2)
开发者ID:iCHAIT,项目名称:moviemon,代码行数:25,代码来源:moviemon.py


示例11: compare_assemblies

def compare_assemblies(assemblies, chunk_size = 2000, identity_threshold = 0.40):
    """
    compares a set of assemblies:
    assemblies is a dictionary with names of the assemblies as keys and fasta-files of the assemblies as values
    """
    similarities = {}


    print "make blast dbs"
    for subject_name, subject in tqdm(assemblies.iteritems()):
        blast_db_cmd = ["makeblastdb" ,"-in", subject, "-dbtype", "nucl", "-out", subject]
        with open("/dev/null") as null:
            blastdb_return = call(blast_db_cmd, stdout=null)

    print "Run the hell out of it"
    for scaff_name, scaff in tqdm(assemblies.iteritems()):
        similarities[scaff_name] = {}
        chopped_up_query = "tmp.fasta"
        nb_chunks = len(cut_up_fasta(scaff, chopped_up_query, chunk_size))
        for subject_name, subject in assemblies.iteritems():
            nics = find_NICs(chopped_up_query, subject, identity_threshold, blast_db = False)
#            print scaff_name, "vs", subject_name
            similarities[scaff_name][subject_name] = len(nics.keys())/nb_chunks
    os.remove(chopped_up_query)

    print "clean up"
    for subject_name, subject in tqdm(assemblies.iteritems()):
        blast_db_files = [subject + ".nhr", subject + ".nin",  subject + ".nsq"]
        for f in blast_db_files:
            os.remove(f)


    similars =  DataFrame.from_dict(similarities)
    return similars
开发者ID:moritzbuck,项目名称:MiComPy,代码行数:34,代码来源:intrasimilarity.py


示例12: run

def run():
    batch_size = 4000

    print 'reading image hashes from image_hashes.csv...',
    t0 = time()
    global df_hashes
    df_hashes = pd.read_csv('image_hashes.csv')
    df_hashes.set_index('image_id', inplace=1)
    print 'took %0.5fs' % (time() - t0)

    pool = avito_utils.PoolWrapper(processes=4)

    print 'processing train data...'
    t0 = time()
    df = pd.read_csv('../input/ItemPairs_train.csv')
    delete_file_if_exists('features_imagehash_train.csv')

    for batch_no, batch in tqdm(list(prepare_batches(df, batch_size))):
        features = process_batch(batch, pool)
        append_to_csv(features, 'features_imagehash_train.csv')

    print 'processing train data took %0.5fs' % (time() - t0)

    print 'processinig test data...'
    t0 = time()
    df = pd.read_csv('../input/ItemPairs_test.csv')
    delete_file_if_exists('features_imagehash_test.csv')

    for batch_no, batch in tqdm(list(prepare_batches(df, batch_size))):
        features = process_batch(batch, pool)
        append_to_csv(features, 'features_imagehash_test.csv')

    print 'processing test data took %0.5fs' % (time() - t0)

    pool.close()
开发者ID:alexeygrigorev,项目名称:avito-duplicates-kaggle,代码行数:35,代码来源:calculate_imagehash_features.py


示例13: run

def run(*args):
    """Reset the in_stock Card property. It was set to True by default, it
    should be False. So each card that was bought once or added from
    an inventory should be to True.
    """

    yes_answers = ["y", "Y", "o", "O", ""]
    go_all_cards = raw_input("Go with all cards ? [Y/n]")
    go_inventories = raw_input("Go with cards applied from inventories ? [Y/n]")

    if go_all_cards in yes_answers:
        print("Setting all cards to not in stock...")
        for card in tqdm(Card.objects.all()):
            card.in_stock = False
            card.save()

    if go_inventories in yes_answers:
        print("Registering cards applied from inventories...")
        for inv in tqdm(Inventory.objects.filter(applied=True)):
            print("Going with inv {}".format(inv.name))
            for card_set in inv.inventorycopies_set.all():
                card_set.card.in_stock = True
                card_set.card.save()

    print("All done.")
开发者ID:vindarel,项目名称:abelujo,代码行数:25,代码来源:reset_in_stock.py


示例14: download_url

def download_url(url, root, filename, md5):
    from six.moves import urllib

    root = os.path.expanduser(root)
    fpath = os.path.join(root, filename)

    try:
        os.makedirs(root)
    except OSError as e:
        if e.errno == errno.EEXIST:
            pass
        else:
            raise

    # downloads file
    if os.path.isfile(fpath) and check_integrity(fpath, md5):
        print('Using downloaded and verified file: ' + fpath)
    else:
        try:
            print('Downloading ' + url + ' to ' + fpath)
            urllib.request.urlretrieve(
                url, fpath,
                reporthook=gen_bar_updater(tqdm(unit='B', unit_scale=True))
            )
        except:
            if url[:5] == 'https':
                url = url.replace('https:', 'http:')
                print('Failed download. Trying https -> http instead.'
                      ' Downloading ' + url + ' to ' + fpath)
                urllib.request.urlretrieve(
                    url, fpath,
                    reporthook=gen_bar_updater(tqdm(unit='B', unit_scale=True))
                )
开发者ID:Lynkzhang,项目名称:vision,代码行数:33,代码来源:utils.py


示例15: preprocess_simple_predict

def preprocess_simple_predict():
    df = pd.read_csv('data/data_full.csv')
    df = df[df.is_fake==0]
    res_df = df.ID.values
    df_target = df[df.target > 0].drop('ID,is_train,is_fake'.split(','), axis=1)
    target = df_target.target.values
    data = df_target.drop(['target',], axis=1).values.astype(int)
    val_sum = {}
    for i, dat in tqdm(enumerate(data)):
        for d in dat:
            if d <= 0:
                continue
            if d not in val_sum:
                val_sum[d] = [0, 0]
            val_sum[d][0] += target[i]
            val_sum[d][1] += 1
    df['simple_predict'] = 0
    for i, row in tqdm(df.drop('ID,is_train,is_fake,target'.split(','), axis=1).iterrows()):
        summ = 0
        cnt = 0.000001
        for val in row:
            if val not in val_sum or val_sum[val][1] < 10:
                continue
            summ += val_sum[val][0]
            cnt += val_sum[val][1]
        df.loc[i, 'simple_predict'] = summ / cnt
    df[['ID', 'simple_predict']].to_csv('data/feat_simple_predict.csv', index=False)
开发者ID:vlarine,项目名称:kaggle,代码行数:27,代码来源:santander.py


示例16: predict_kfold

    def predict_kfold(cls, X, y, n_folds=10, seed=0, textModel_params={},
                      kfolds=None, pool=None, use_tqdm=True):
        try:
            from tqdm import tqdm
        except ImportError:
            def tqdm(x, **kwargs):
                return x

        le = preprocessing.LabelEncoder().fit(y)
        y = np.array(le.transform(y))
        hy = np.zeros(len(y), dtype=np.int)
        if kfolds is None:
            kfolds = StratifiedKFold(n_splits=n_folds, shuffle=True,
                                     random_state=seed).split(X, y)
        args = [(X, y, tr, ts, textModel_params) for tr, ts in kfolds]
        if pool is not None:
            if use_tqdm:
                res = [x for x in tqdm(pool.imap_unordered(cls.train_predict_pool, args),
                                       desc='Params', total=len(args))]
            else:
                res = [x for x in pool.imap_unordered(cls.train_predict_pool, args)]
        else:
            if use_tqdm:
                args = tqdm(args)
            res = [cls.train_predict_pool(x) for x in args]
        for ts, _hy in res:
            hy[ts] = _hy
        return le.inverse_transform(hy)
开发者ID:INGEOTEC,项目名称:b4msa,代码行数:28,代码来源:classifier.py


示例17: run

def run():

    textfiles = glob.glob('anjuke_new_house/*txt')
    if len(textfiles) != 0:
        print ">> compress files under anjuke_new_house"
        f = zipfile.ZipFile('anjuke_new_house/anjuke_new_house.zip', 'w', zipfile.ZIP_DEFLATED)
        for textfile in tqdm(textfiles):
            f.write(textfile)
            os.remove(textfile)
        f.close()

    textfiles = glob.glob('anjuke_second_house/*txt')
    if len(textfiles) != 0:
        print ">> compress files under anjuke_second_house"
        f = zipfile.ZipFile('anjuke_second_house/anjuke_second_house.zip', 'w', zipfile.ZIP_DEFLATED)
        for textfile in tqdm(textfiles):
            f.write(textfile)
            os.remove(textfile)
        f.close()

    textfiles = glob.glob('anjuke_renting_house/*txt')
    if len(textfiles) != 0:
        print ">> compress files under anjuke_renting_house"
        f = zipfile.ZipFile('anjuke_renting_house/anjuke_renting_house.zip', 'w', zipfile.ZIP_DEFLATED)
        for textfile in tqdm(textfiles):
            f.write(textfile)
            os.remove(textfile)
        f.close()
开发者ID:summychou,项目名称:HousePriceAcrossTheCountry,代码行数:28,代码来源:anjuke_data_compress.py


示例18: test_ascii

def test_ascii():
    """ Test ascii/unicode bar """
    # Test ascii autodetection
    with closing(StringIO()) as our_file:
        with tqdm(total=10, file=our_file, ascii=None) as t:
            assert t.ascii  # TODO: this may fail in the future

    # Test ascii bar
    with closing(StringIO()) as our_file:
        for _ in tqdm(_range(3), total=15, file=our_file, miniters=1,
                      mininterval=0, ascii=True):
            pass
        our_file.seek(0)
        res = our_file.read().strip("\r").split("\r")
    assert '7%|6' in res[1]
    assert '13%|#3' in res[2]
    assert '20%|##' in res[3]

    # Test unicode bar
    with closing(UnicodeIO()) as our_file:
        with tqdm(total=15, file=our_file, ascii=False, mininterval=0) as t:
            for _ in _range(3):
                t.update()
        our_file.seek(0)
        res = our_file.read().strip("\r").split("\r")
    assert "7%|\u258b" in res[1]
    assert "13%|\u2588\u258e" in res[2]
    assert "20%|\u2588\u2588" in res[3]
开发者ID:CrazyPython,项目名称:tqdm,代码行数:28,代码来源:tests_tqdm.py


示例19: read_raw_docs

def read_raw_docs(lines: List[str], size: int, workers: int) -> np.ndarray:
    if size == -1:
        size = len(lines)
    lines = lines[:size]
    documents = np.empty(size, dtype=object)
    memory_impact = sum([sys.getsizeof(s) for s in lines])
    # jeopardy 32862372
    # recipes  187414159
    if memory_impact < 50000000:
        offset = 0
        linebins = np.array_split(lines, workers)  # this is the offending large memory line
        with concurrent.futures.ProcessPoolExecutor() as executor:
            futures = {executor.submit(clean_text, linebins[i]): i
                       for i in range(workers)}
            for future in tqdm(concurrent.futures.as_completed(futures),
                               desc='Tokenizing Documents', total=workers, leave=True):
                index = futures[future]
                for i, line in enumerate(future.result()):
                    documents[offset + i] = line
                offset += len(future.result())
    else:
        print('Use Large Memory Algorithm')
        offset = 0
        with concurrent.futures.ProcessPoolExecutor() as executor:
            futures = {executor.submit(clean_line, lines[i]): i
                       for i in range(size)}
            for future in tqdm(concurrent.futures.as_completed(futures),
                               desc='Tokenizing Documents', total=size, leave=True):
                documents[offset] = future.result()
                offset += 1
    return documents
开发者ID:willzfarmer,项目名称:Python-LSA,代码行数:31,代码来源:LSA.py


示例20: getFeatures

	def getFeatures(self):
		files = glob.glob(self.objectPath+self.preProcessedData+'*.npy')
		split_length = None

		if self.windowSize != "None":
			split_length = self.windowSize * self.samplingFrequency

		split_based = open(self.objectPath+self.dataFeatures+self.featureExtracted, 'w', newline='')
		writer = csv.writer(split_based, delimiter=',')
		header_writen = False
		for file in tqdm(files):
			file_split = file.split('_')
			recording_class = file_split[2]
			recording = np.load(file)
			i = 0
			for channel in tqdm(recording):
				if self.windowSize == "None":
					split_length = len(channel)
				limit = int(len(channel)/split_length)*split_length
				channel = channel[0:limit]
				splits = np.split(channel,limit//split_length)
				j = 1
				for split in tqdm(splits):
					self.channel_data = split
					data_ = self.runPipeline()
					temp = [file_split[0],recording_class,self.channels[i],j]
					features = list(data_[0])
					if not header_writen:
						writer.writerow(  ['filename','experiment_identifier','channel_name','split_number'] + list(data_[1]) )
						header_writen = True
					writer.writerow(temp+features)
					#break
					j += 1
					#break
				i += 1
开发者ID:utkarshshukla2912,项目名称:pyEEGpipeline,代码行数:35,代码来源:featureExtractor.py



注:本文中的tqdm.tqdm函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tqdm.trange函数代码示例发布时间:2022-05-27
下一篇:
Python tq_config.TaskQueueConfig类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap