• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python python_compat.imap函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中python_compat.imap函数的典型用法代码示例。如果您正苦于以下问题:Python imap函数的具体用法?Python imap怎么用?Python imap使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了imap函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _get_section_key

	def _get_section_key(self, section):
		tmp = section.split()
		if not tmp:
			raise ConfigError('Invalid config section %r' % section)
		(cur_section, cur_name_list, cur_tag_map) = (tmp[0], [], {})
		for token in tmp[1:]:
			if ':' in token:
				tag_entry = token.split(':')
				if len(tag_entry) != 2:
					raise ConfigError('Invalid config tag in section %r' % section)
				cur_tag_map[tag_entry[0]] = tag_entry[1]
			elif token:
				cur_name_list.append(token)

		class_section_idx = safe_index(self._class_section_list, cur_section)
		section_idx = safe_index(self._section_list, cur_section)
		if (not self._class_section_list) and (not self._section_list):
			section_idx = 0
		if (class_section_idx is not None) or (section_idx is not None):
			# Section is selected by class or manually
			name_idx_tuple = tuple(imap(lambda n: safe_index(self._section_name_list, n), cur_name_list))
			if None not in name_idx_tuple:  # All names in current section are selected
				cur_tag_name_list = lfilter(cur_tag_map.__contains__, self._section_tag_order)
				left_tag_name_list = lfilter(lambda tn: tn not in self._section_tag_order, cur_tag_map)
				tag_tuple_list = imap(lambda tn: (tn, cur_tag_map[tn]), cur_tag_name_list)
				tag_idx_tuple = tuple(imap(lambda tt: safe_index(self._section_tag_list, tt), tag_tuple_list))
				if (None not in tag_idx_tuple) and not left_tag_name_list:
					return (class_section_idx, section_idx, name_idx_tuple, tag_idx_tuple)
开发者ID:grid-control,项目名称:grid-control,代码行数:28,代码来源:cview_tagged.py


示例2: _parseTime

	def _parseTime(self, time_str):
		result = 0
		entry_map = {'yea': 365 * 24 * 60 * 60, 'day': 24 * 60 * 60, 'hou': 60 * 60, 'min': 60, 'sec': 1}
		tmp = time_str.split()
		for (entry, value) in izip(imap(lambda x: x[:3], tmp[1::2]), imap(int, tmp[::2])):
			result += entry_map[entry] * value
		return result
开发者ID:Fra-nk,项目名称:grid-control,代码行数:7,代码来源:access_arc.py


示例3: _get_sandbox_file_list

	def _get_sandbox_file_list(self, task, sm_list):
		# Prepare all input files
		dep_list = set(ichain(imap(lambda x: x.get_dependency_list(), [task] + sm_list)))
		dep_fn_list = lmap(lambda dep: resolve_path('env.%s.sh' % dep,
			lmap(lambda pkg: get_path_share('', pkg=pkg), os.listdir(get_path_pkg()))), dep_list)
		task_config_dict = dict_union(self._remote_event_handler.get_mon_env_dict(),
			*imap(lambda x: x.get_task_dict(), [task] + sm_list))
		task_config_dict.update({'GC_DEPFILES': str.join(' ', dep_list),
			'GC_USERNAME': self._token.get_user_name(), 'GC_WMS_NAME': self._name})
		task_config_str_list = DictFormat(escape_strings=True).format(
			task_config_dict, format='export %s%s%s\n')
		vn_alias_dict = dict(izip(self._remote_event_handler.get_mon_env_dict().keys(),
			self._remote_event_handler.get_mon_env_dict().keys()))
		vn_alias_dict.update(task.get_var_alias_map())
		vn_alias_str_list = DictFormat(delimeter=' ').format(vn_alias_dict, format='%s%s%s\n')

		# Resolve wildcards in task input files
		def _get_task_fn_list():
			for fpi in task.get_sb_in_fpi_list():
				matched = glob.glob(fpi.path_abs)
				if matched != []:
					for match in matched:
						yield match
				else:
					yield fpi.path_abs
		return lchain([self._remote_event_handler.get_file_list(), dep_fn_list, _get_task_fn_list(), [
			VirtualFile('_config.sh', sorted(task_config_str_list)),
			VirtualFile('_varmap.dat', sorted(vn_alias_str_list))]])
开发者ID:mschnepf,项目名称:grid-control,代码行数:28,代码来源:wms.py


示例4: __init__

	def __init__(self, jobDB, task, jobs = None, configString = ''):
		Report.__init__(self, jobDB, task, jobs, configString)
		catJobs = {}
		catDescDict = {}
		# Assignment of jobs to categories (depending on variables and using datasetnick if available)
		jobConfig = {}
		for jobNum in self._jobs:
			if task:
				jobConfig = task.getJobConfig(jobNum)
			varList = sorted(ifilter(lambda var: '!' not in repr(var), jobConfig.keys()))
			if 'DATASETSPLIT' in varList:
				varList.remove('DATASETSPLIT')
				varList.append('DATASETNICK')
			catKey = str.join('|', imap(lambda var: '%s=%s' % (var, jobConfig[var]), varList))
			catJobs.setdefault(catKey, []).append(jobNum)
			if catKey not in catDescDict:
				catDescDict[catKey] = dict(imap(lambda var: (var, jobConfig[var]), varList))
		# Kill redundant keys from description
		commonVars = dict(imap(lambda var: (var, jobConfig[var]), varList)) # seed with last varList
		for catKey in catDescDict:
			for key in list(commonVars.keys()):
				if key not in catDescDict[catKey].keys():
					commonVars.pop(key)
				elif commonVars[key] != catDescDict[catKey][key]:
					commonVars.pop(key)
		for catKey in catDescDict:
			for commonKey in commonVars:
				catDescDict[catKey].pop(commonKey)
		# Generate job-category map with efficient int keys - catNum becomes the new catKey
		self._job2cat = {}
		self._catDescDict = {}
		for catNum, catKey in enumerate(sorted(catJobs)):
			self._catDescDict[catNum] = catDescDict[catKey]
			self._job2cat.update(dict.fromkeys(catJobs[catKey], catNum))
开发者ID:artus-analysis,项目名称:grid-control,代码行数:34,代码来源:report_gui.py


示例5: __init__

	def __init__(self, config, job_db, task):
		map_cat2jobs = {}
		map_cat2desc = {}
		job_config_dict = {}
		vn_list = []
		for jobnum in job_db.get_job_list():
			if task:
				job_config_dict = task.get_job_dict(jobnum)
			vn_list = lfilter(self._is_not_ignored_vn, sorted(job_config_dict.keys()))
			cat_key = str.join('|', imap(lambda vn: '%s=%s' % (vn, job_config_dict[vn]), vn_list))
			map_cat2jobs.setdefault(cat_key, []).append(jobnum)
			if cat_key not in map_cat2desc:
				map_cat2desc[cat_key] = dict(imap(lambda var: (var, job_config_dict[var]), vn_list))
		# Kill redundant keys from description - seed with last vn_list
		common_var_dict = dict(imap(lambda var: (var, job_config_dict[var]), vn_list))
		for cat_key in map_cat2desc:
			for key in list(common_var_dict.keys()):
				if key not in map_cat2desc[cat_key].keys():
					common_var_dict.pop(key)
				elif common_var_dict[key] != map_cat2desc[cat_key][key]:
					common_var_dict.pop(key)
		for cat_key in map_cat2desc:
			for common_key in common_var_dict:
				map_cat2desc[cat_key].pop(common_key)
		# Generate job-category map with efficient int keys - catNum becomes the new cat_key
		self._job2cat = {}
		self._map_cat2desc = {}
		for cat_num, cat_key in enumerate(sorted(map_cat2jobs)):
			self._map_cat2desc[cat_num] = map_cat2desc[cat_key]
			self._job2cat.update(dict.fromkeys(map_cat2jobs[cat_key], cat_num))
开发者ID:grid-control,项目名称:grid-control,代码行数:30,代码来源:cat_manager.py


示例6: _resyncInternal

	def _resyncInternal(self): # This function is _VERY_ time critical!
		tmp = self._rawSource.resync() # First ask about psource changes
		(redoNewPNum, disableNewPNum, sizeChange) = (set(tmp[0]), set(tmp[1]), tmp[2])
		hashNew = self._rawSource.getHash()
		hashChange = self._storedHash != hashNew
		self._storedHash = hashNew
		if not (redoNewPNum or disableNewPNum or sizeChange or hashChange):
			self._resyncState = None
			return

		psource_old = ParameterAdapter(None, ParameterSource.createInstance('GCDumpParameterSource', self._pathParams))
		psource_new = ParameterAdapter(None, self._rawSource)

		mapJob2PID = {}
		(pAdded, pMissing, _) = self._diffParams(psource_old, psource_new, mapJob2PID, redoNewPNum, disableNewPNum)
		self._source = self._getResyncSource(psource_old, psource_new, mapJob2PID, pAdded, pMissing, disableNewPNum)

		self._mapJob2PID = mapJob2PID # Update Job2PID map
		redoNewPNum = redoNewPNum.difference(disableNewPNum)
		if redoNewPNum or disableNewPNum:
			mapPID2Job = dict(ismap(utils.swap, self._mapJob2PID.items()))
			translate = lambda pNum: mapPID2Job.get(pNum, pNum)
			self._resyncState = (set(imap(translate, redoNewPNum)), set(imap(translate, disableNewPNum)), sizeChange)
		elif sizeChange:
			self._resyncState = (set(), set(), sizeChange)
		# Write resynced state
		self._writeJob2PID(self._pathJob2PID + '.tmp')
		ParameterSource.getClass('GCDumpParameterSource').write(self._pathParams + '.tmp', self)
		os.rename(self._pathJob2PID + '.tmp', self._pathJob2PID)
		os.rename(self._pathParams + '.tmp', self._pathParams)
开发者ID:artus-analysis,项目名称:grid-control,代码行数:30,代码来源:padapter.py


示例7: _resync_adapter

	def _resync_adapter(self, pa_old, pa_new, result_redo, result_disable, size_change):
		(map_jobnum2pnum, pspi_list_added, pspi_list_missing) = _diff_pspi_list(pa_old, pa_new,
			result_redo, result_disable)
		# Reorder and reconstruct parameter space with the following layout:
		# NNNNNNNNNNNNN OOOOOOOOO | source: NEW (==self) and OLD (==from file)
		# <same><added> <missing> | same: both in NEW and OLD, added: only in NEW, missing: only in OLD
		if pspi_list_added:
			_extend_map_jobnum2pnum(map_jobnum2pnum, pa_old.get_job_len(), pspi_list_added)
		if pspi_list_missing:
			# extend the parameter source by placeholders for the missing parameter space points
			psrc_missing = _create_placeholder_psrc(pa_old, pa_new,
				map_jobnum2pnum, pspi_list_missing, result_disable)
			self._psrc = ParameterSource.create_instance('ChainParameterSource',
				self._psrc_raw, psrc_missing)

		self._map_jobnum2pnum = map_jobnum2pnum  # Update Job2PID map
		# Write resynced state
		self._write_jobnum2pnum(self._path_jobnum2pnum + '.tmp')
		ParameterSource.get_class('GCDumpParameterSource').write(self._path_params + '.tmp',
			self.get_job_len(), self.get_job_metadata(), self.iter_jobs())
		os.rename(self._path_jobnum2pnum + '.tmp', self._path_jobnum2pnum)
		os.rename(self._path_params + '.tmp', self._path_params)

		result_redo = result_redo.difference(result_disable)
		if result_redo or result_disable:
			map_pnum2jobnum = reverse_dict(self._map_jobnum2pnum)

			def _translate_pnum(pnum):
				return map_pnum2jobnum.get(pnum, pnum)
			result_redo = set(imap(_translate_pnum, result_redo))
			result_disable = set(imap(_translate_pnum, result_disable))
			return (result_redo, result_disable, size_change)
		return (set(), set(), size_change)
开发者ID:grid-control,项目名称:grid-control,代码行数:33,代码来源:padapter.py


示例8: split_brackets

def split_brackets(tokens, brackets = None, exType = Exception):
	if brackets is None:
		brackets = ['()', '{}', '[]']
	buffer = ''
	stack_bracket = []
	map_close_to_open = dict(imap(lambda x: (x[1], x[0]), brackets))
	position = 0
	for token in tokens:
		position += len(token) # store position for proper error messages
		if token in map_close_to_open.values():
			stack_bracket.append((token, position))
		if token in map_close_to_open.keys():
			if not stack_bracket:
				raise exType('Closing bracket %r at position %d is without opening bracket' % (token, position))
			elif stack_bracket[-1][0] == map_close_to_open[token]:
				stack_bracket.pop()
				if not stack_bracket:
					buffer += token
					yield buffer
					buffer = ''
					continue
			else:
				raise exType('Closing bracket %r at position %d does not match bracket %r at position %d' % (token, position, stack_bracket[-1][0], stack_bracket[-1][1]))
		if stack_bracket:
			buffer += token
		else:
			yield token
	if stack_bracket:
		raise exType('Unclosed brackets %s' % str.join(', ', imap(lambda b_pos: '%r at position %d' % b_pos, stack_bracket)))
开发者ID:thomas-mueller,项目名称:grid-control,代码行数:29,代码来源:__init__.py


示例9: get_job_dict

	def get_job_dict(self, jobnum):
		# Get job dependent environment variables
		job_env_dict = SCRAMTask.get_job_dict(self, jobnum)
		if not self._has_dataset:
			job_env_dict['MAX_EVENTS'] = self._events_per_job
		job_env_dict.update(dict(self._cmssw_search_dict))
		if self._do_gzip_std_output:
			job_env_dict['GZIP_OUT'] = 'yes'
		if self._project_area_tarball_on_se:
			job_env_dict['SE_RUNTIME'] = 'yes'
		if self._project_area:
			job_env_dict['HAS_RUNTIME'] = 'yes'
		job_env_dict['CMSSW_EXEC'] = 'cmsRun'
		job_env_dict['CMSSW_CONFIG'] = str.join(' ', imap(os.path.basename, self._config_fn_list))
		job_env_dict['CMSSW_OLD_RELEASETOP'] = self._old_release_top
		if self.prolog.is_active():
			job_env_dict['CMSSW_PROLOG_EXEC'] = self.prolog.get_command()
			job_env_dict['CMSSW_PROLOG_SB_IN_FILES'] = str.join(' ',
				imap(lambda x: x.path_rel, self.prolog.get_sb_in_fpi_list()))
			job_env_dict['CMSSW_PROLOG_ARGS'] = self.prolog.get_arguments()
		if self.epilog.is_active():
			job_env_dict['CMSSW_EPILOG_EXEC'] = self.epilog.get_command()
			job_env_dict['CMSSW_EPILOG_SB_IN_FILES'] = str.join(' ',
				imap(lambda x: x.path_rel, self.epilog.get_sb_in_fpi_list()))
			job_env_dict['CMSSW_EPILOG_ARGS'] = self.epilog.get_arguments()
		return job_env_dict
开发者ID:grid-control,项目名称:grid-control,代码行数:26,代码来源:task_cmssw.py


示例10: collapse_psp_list

def collapse_psp_list(psp_list, tracked_list, opts):
	psp_dict = {}
	psp_dict_nicks = {}
	header_list = [('COLLATE_JOBS', '# of jobs')]
	if 'DATASETSPLIT' in tracked_list:
		tracked_list.remove('DATASETSPLIT')
		if opts.collapse == 1:
			tracked_list.append('DATASETNICK')
			header_list.append(('DATASETNICK', 'DATASETNICK'))
		elif opts.collapse == 2:
			header_list.append(('COLLATE_NICK', '# of nicks'))
	for pset in psp_list:
		if ('DATASETSPLIT' in pset) and (opts.collapse == 1):
			pset.pop('DATASETSPLIT')
		nickname = None
		if ('DATASETNICK' in pset) and (opts.collapse == 2):
			nickname = pset.pop('DATASETNICK')
		hash_str = md5_hex(repr(lmap(lambda key: pset.get(str(key)), tracked_list)))
		psp_dict.setdefault(hash_str, []).append(pset)
		psp_dict_nicks.setdefault(hash_str, set()).add(nickname)

	def _do_collate(hash_str):
		psp = psp_dict[hash_str][0]
		psp['COLLATE_JOBS'] = len(psp_dict[hash_str])
		psp['COLLATE_NICK'] = len(psp_dict_nicks[hash_str])
		return psp
	psp_list = sorted(imap(_do_collate, psp_dict), key=lambda x: tuple(imap(str, x.values())))
	return (header_list, psp_list)
开发者ID:grid-control,项目名称:grid-control,代码行数:28,代码来源:parameter_info.py


示例11: __init__

	def __init__(self, head, data, delimeter='|'):
		ConsoleTable.__init__(self)
		head = list(head)
		self._delimeter = delimeter
		self._write_line(str.join(self._delimeter, imap(lambda x: x[1], head)))
		for entry in data:
			if isinstance(entry, dict):
				self._write_line(str.join(self._delimeter, imap(lambda x: str(entry.get(x[0], '')), head)))
开发者ID:grid-control,项目名称:grid-control,代码行数:8,代码来源:table.py


示例12: _parse_status

	def _parse_status(self, value, default):
		if any(imap(lambda x: x in value, ['E', 'e'])):
			return Job.UNKNOWN
		if any(imap(lambda x: x in value, ['h', 's', 'S', 'T', 'w'])):
			return Job.QUEUED
		if any(imap(lambda x: x in value, ['r', 't'])):
			return Job.RUNNING
		return Job.READY
开发者ID:Fra-nk,项目名称:grid-control,代码行数:8,代码来源:wms_gridengine.py


示例13: _parse_status

	def _parse_status(self, value, default):
		if any(imap(value.__contains__, ['E', 'e'])):
			return Job.UNKNOWN
		if any(imap(value.__contains__, ['h', 's', 'S', 'T', 'w'])):
			return Job.QUEUED
		if any(imap(value.__contains__, ['r', 't'])):
			return Job.RUNNING
		return Job.READY
开发者ID:grid-control,项目名称:grid-control,代码行数:8,代码来源:wms_gridengine.py


示例14: process

	def process(self, pNum, splitInfo, result):
		if not self._lumi_filter.empty():
			lumi_filter = self._lumi_filter.lookup(splitInfo[DataSplitter.Nickname], is_selector = False)
			if lumi_filter:
				idxRuns = splitInfo[DataSplitter.MetadataHeader].index("Runs")
				iterRuns = ichain(imap(lambda m: m[idxRuns], splitInfo[DataSplitter.Metadata]))
				short_lumi_filter = filterLumiFilter(list(iterRuns), lumi_filter)
				result['LUMI_RANGE'] = str.join(',', imap(lambda lr: '"%s"' % lr, formatLumi(short_lumi_filter)))
开发者ID:artus-analysis,项目名称:grid-control,代码行数:8,代码来源:lumi_proc.py


示例15: process

	def process(self, pnum, partition, result):
		if self.enabled():
			lumi_filter = self._lumi_filter.lookup(partition[DataSplitter.Nickname], is_selector=False)
			if lumi_filter:
				idx_runs = partition[DataSplitter.MetadataHeader].index('Runs')
				iter_run = ichain(imap(lambda m: m[idx_runs], partition[DataSplitter.Metadata]))
				short_lumi_filter = filter_lumi_filter(list(iter_run), lumi_filter)
				iter_lumi_range_str = imap(lambda lr: '"%s"' % lr, format_lumi(short_lumi_filter))
				result['LUMI_RANGE'] = str.join(',', iter_lumi_range_str)
开发者ID:grid-control,项目名称:grid-control,代码行数:9,代码来源:lumi_proc.py


示例16: _readJob2PID

	def _readJob2PID(self):
		fp = ZipFile(self._pathJob2PID, 'r')
		try:
			self.maxN = int(fp.readline())
			if not self.maxN:
				self.maxN = None
			mapInfo = ifilter(identity, imap(str.strip, fp.readline().split(',')))
			self._mapJob2PID = dict(imap(lambda x: tuple(imap(lambda y: int(y.lstrip('!')), x.split(':'))), mapInfo))
			self._activeMap = {}
		finally:
			fp.close()
开发者ID:artus-analysis,项目名称:grid-control,代码行数:11,代码来源:padapter.py


示例17: _read_jobnum2pnum

	def _read_jobnum2pnum(self):
		fp = GZipTextFile(self._path_jobnum2pnum, 'r')
		try:
			def _translate_info(jobnum_pnum_info):
				return tuple(imap(lambda x: int(x.lstrip('!')), jobnum_pnum_info.split(':', 1)))

			int(fp.readline())  # max number of jobs
			jobnum_pnum_info_iter = iidfilter(imap(str.strip, fp.readline().split(',')))
			self._map_jobnum2pnum = dict(imap(_translate_info, jobnum_pnum_info_iter))
			self._can_submit_map = {}
		finally:
			fp.close()
开发者ID:grid-control,项目名称:grid-control,代码行数:12,代码来源:padapter.py


示例18: getEntries

	def getEntries(self, path, metadata, events, seList, objStore):
		datacachePath = os.path.join(objStore.get('GC_WORKDIR', ''), 'datacache.dat')
		source = utils.QM((self._source == '') and os.path.exists(datacachePath), datacachePath, self._source)
		if source and (source not in self._lfnMap):
			pSource = DataProvider.createInstance('ListProvider', createConfig(), source)
			for (n, fl) in imap(lambda b: (b[DataProvider.Dataset], b[DataProvider.FileList]), pSource.getBlocks()):
				self._lfnMap.setdefault(source, {}).update(dict(imap(lambda fi: (self.lfnTrans(fi[DataProvider.URL]), n), fl)))
		pList = set()
		for key in ifilter(lambda k: k in metadata, self._parentKeys):
			pList.update(imap(lambda pPath: self._lfnMap.get(source, {}).get(self.lfnTrans(pPath)), metadata[key]))
		metadata['PARENT_PATH'] = lfilter(identity, pList)
		yield (path, metadata, events, seList, objStore)
开发者ID:thomas-mueller,项目名称:grid-control,代码行数:12,代码来源:scanner_basic.py


示例19: download_job

def download_job(opts, work_dn, status_mon, job_db, job_obj, jobnum):
	if job_obj.get('download') == 'True' and not opts.mark_ignore_dl:
		return status_mon.register_job_result(jobnum, 'All files already downloaded',
			JobDownloadStatus.JOB_ALREADY)

	# Read the file hash entries from job info file
	fi_list = FileInfoProcessor().process(os.path.join(work_dn, 'output', 'job_%d' % jobnum)) or []
	is_download_failed = False
	if not fi_list:
		if opts.mark_empty_fail:
			is_download_failed = True
		else:
			return status_mon.register_job_result(jobnum, 'Job has no output files',
				JobDownloadStatus.JOB_NO_OUTPUT)

	download_result_list = []
	progress = ProgressActivity('Processing output files', len(fi_list))
	for (fi_idx, fi) in enumerate(fi_list):
		progress.update_progress(fi_idx, msg='Processing output file %r' % fi[FileInfo.NameDest])
		download_result_list.append(download_single_file(opts, jobnum, fi_idx, fi, status_mon))
	progress.finish()

	is_download_failed = is_download_failed or any(imap(download_result_list.__contains__, [
		FileDownloadStatus.FILE_TIMEOUT, FileDownloadStatus.FILE_HASH_FAILED,
		FileDownloadStatus.FILE_TRANSFER_FAILED, FileDownloadStatus.FILE_MKDIR_FAILED]))
	is_download_success = all(imap([FileDownloadStatus.FILE_OK,
		FileDownloadStatus.FILE_EXISTS].__contains__, download_result_list))

	# Ignore the first opts.retry number of failed jobs
	retry_count = int(job_obj.get('download attempt', 0))
	if fi_list and is_download_failed and opts.retry and (retry_count < int(opts.retry)):
		set_job_prop(job_db, jobnum, job_obj, 'download attempt', str(retry_count + 1))
		return status_mon.register_job_result(jobnum, 'Download attempt #%d failed' % retry_count + 1,
			JobDownloadStatus.RETRY)

	delete_files(opts, jobnum, fi_list, is_download_failed)

	if is_download_failed:
		if opts.mark_fail:
			# Mark job as failed to trigger resubmission
			job_obj.state = Job.FAILED
			job_db.commit(jobnum, job_obj)
		status_mon.register_job_result(jobnum, 'Download failed', JobDownloadStatus.JOB_FAILED)
	elif is_download_success:
		if opts.mark_dl:
			# Mark as downloaded
			set_job_prop(job_db, jobnum, job_obj, 'download', 'True')
		status_mon.register_job_result(jobnum, 'Download successful', JobDownloadStatus.JOB_OK)
	else:
		# eg. because of SE blacklist
		status_mon.register_job_result(jobnum, 'Download incomplete', JobDownloadStatus.JOB_INCOMPLETE)
开发者ID:grid-control,项目名称:grid-control,代码行数:51,代码来源:se_output_download.py


示例20: _getPartition

			def _getPartition(self, key):
				if not self._cacheKey == key / self._keySize:
					self._cacheKey = key / self._keySize
					subTarFileObj = self._tar.extractfile('%03dXX.tgz' % (key / self._keySize))
					subTarFileObj = BytesBuffer(gzip.GzipFile(fileobj = subTarFileObj).read()) # 3-4x speedup for sequential access
					self._cacheTar = tarfile.open(mode = 'r', fileobj = subTarFileObj)
				fullData = lmap(bytes2str, self._cacheTar.extractfile('%05d' % key).readlines())
				data = self._fmt.parse(lfilter(lambda x: not x.startswith('='), fullData),
					keyParser = {None: int}, valueParser = self._parserMap)
				fileList = imap(lambda x: x[1:], ifilter(lambda x: x.startswith('='), fullData))
				if DataSplitter.CommonPrefix in data:
					fileList = imap(lambda x: '%s/%s' % (data[DataSplitter.CommonPrefix], x), fileList)
				data[DataSplitter.FileList] = lmap(str.strip, fileList)
				return data
开发者ID:Fra-nk,项目名称:grid-control,代码行数:14,代码来源:splitter_io.py



注:本文中的python_compat.imap函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python python_compat.irange函数代码示例发布时间:2022-05-27
下一篇:
Python python_compat.ifilter函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap