• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python nameTools.makeFilenameSafe函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nameTools.makeFilenameSafe函数的典型用法代码示例。如果您正苦于以下问题:Python makeFilenameSafe函数的具体用法?Python makeFilenameSafe怎么用?Python makeFilenameSafe使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了makeFilenameSafe函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: getLink

	def getLink(self, linkDict):
		try:
			linkDict = self.getDownloadInfo(linkDict)
			images = self.getImages(linkDict)
			title  = linkDict['title']
			artist = linkDict['artist']

		except webFunctions.ContentError:
			self.updateDbEntry(linkDict["sourceUrl"], dlState=-2, downloadPath="ERROR", fileName="ERROR: FAILED")
			return False

		if images and title:
			fileN = title+" "+artist+".zip"
			fileN = nt.makeFilenameSafe(fileN)


			# self.log.info("geturl with processing", fileN)
			wholePath = os.path.join(linkDict["dirPath"], fileN)
			wholePath = self.insertCountIfFilenameExists(wholePath)
			self.log.info("Complete filepath: %s", wholePath)

					#Write all downloaded files to the archive.

			try:
				arch = zipfile.ZipFile(wholePath, "w")
			except OSError:
				title = title.encode('ascii','ignore').decode('ascii')
				fileN = title+".zip"
				fileN = nt.makeFilenameSafe(fileN)
				wholePath = os.path.join(linkDict["dirPath"], fileN)
				arch = zipfile.ZipFile(wholePath, "w")

			for imageName, imageContent in images:
				arch.writestr(imageName, imageContent)
			arch.close()


			self.log.info("Successfully Saved to path: %s", wholePath)


			self.updateDbEntry(linkDict["sourceUrl"], downloadPath=linkDict["dirPath"], fileName=fileN)

			# Deduper uses the path info for relinking, so we have to dedup the item after updating the downloadPath and fileN
			dedupState = processDownload.processDownload(None, wholePath, pron=True, deleteDups=True, includePHash=True, rowId=linkDict['dbId'])
			self.log.info( "Done")

			if dedupState:
				self.addTags(sourceUrl=linkDict["sourceUrl"], tags=dedupState)


			self.updateDbEntry(linkDict["sourceUrl"], dlState=2)


			return wholePath

		else:

			self.updateDbEntry(linkDict["sourceUrl"], dlState=-1, downloadPath="ERROR", fileName="ERROR: FAILED")

			return False
开发者ID:GDXN,项目名称:MangaCMS,代码行数:60,代码来源:ContentLoader.py


示例2: insertNames

	def insertNames(self, buId, names):
		self.log.info("Updating name synonym table for %s with %s name(s).", buId, len(names))
		with self.transaction() as cur:


			# delete the old names from the table, so if they're removed from the source, we'll match that.
			cur.execute("DELETE FROM {tableName} WHERE buId=%s;".format(tableName=self.nameMapTableName), (buId, ))

			alreadyAddedNames = []
			for name in names:
				fsSafeName = nt.prepFilenameForMatching(name)
				if not fsSafeName:
					fsSafeName = nt.makeFilenameSafe(name)

				# we have to block duplicate names. Generally, it's pretty common
				# for multiple names to screen down to the same name after
				# passing through `prepFilenameForMatching()`.
				if fsSafeName in alreadyAddedNames:
					continue

				alreadyAddedNames.append(fsSafeName)

				cur.execute("""INSERT INTO %s (buId, name, fsSafeName) VALUES (%%s, %%s, %%s);""" % self.nameMapTableName, (buId, name, fsSafeName))

		self.log.info("Updated!")
开发者ID:MyAnimeDays,项目名称:MangaCMS,代码行数:25,代码来源:MonitorDbBase.py


示例3: getLink

    def getLink(self, link):
        sourceUrl = link["sourceUrl"]
        seriesName = link["seriesName"]
        originFileName = link["originName"]

        self.updateDbEntry(sourceUrl, dlState=1)
        self.log.info("Downloading = '%s', '%s'", seriesName, originFileName)
        dlPath, newDir = self.locateOrCreateDirectoryForSeries(seriesName)

        if link["flags"] == None:
            link["flags"] = ""

        if newDir:
            self.updateDbEntry(sourceUrl, flags=" ".join([link["flags"], "haddir"]))
            self.conn.commit()

        try:
            content, headerName = self.getLinkFile(sourceUrl)
        except:
            self.log.error("Unrecoverable error retreiving content %s", link)
            self.log.error("Traceback: %s", traceback.format_exc())

            self.updateDbEntry(sourceUrl, dlState=-1)
            return

        headerName = urllib.parse.unquote(headerName)

        fName = "%s - %s" % (originFileName, headerName)
        fName = nt.makeFilenameSafe(fName)

        fName, ext = os.path.splitext(fName)
        fName = "%s [CXC Scans]%s" % (fName, ext)

        fqFName = os.path.join(dlPath, fName)
        self.log.info("SaveName = %s", fqFName)

        loop = 1
        while os.path.exists(fqFName):
            fName, ext = os.path.splitext(fName)
            fName = "%s (%d)%s" % (fName, loop, ext)
            fqFName = os.path.join(link["targetDir"], fName)
            loop += 1
        self.log.info("Writing file")

        filePath, fileName = os.path.split(fqFName)

        try:
            with open(fqFName, "wb") as fp:
                fp.write(content)
        except TypeError:
            self.log.error("Failure trying to retreive content from source %s", sourceUrl)
            self.updateDbEntry(sourceUrl, dlState=-4, downloadPath=filePath, fileName=fileName)
            return
            # self.log.info( filePath)

        dedupState = processDownload.processDownload(seriesName, fqFName, deleteDups=True)

        self.log.info("Done")
        self.updateDbEntry(sourceUrl, dlState=2, downloadPath=filePath, fileName=fileName, tags=dedupState)
        return
开发者ID:MyAnimeDays,项目名称:MangaCMS,代码行数:60,代码来源:ContentLoader.py


示例4: getDownloadInfo

	def getDownloadInfo(self, linkDict, soup):

		infoSection = soup.find("div", id='infobox')


		category, tags, artist = self.getCategoryTags(infoSection)
		tags = ' '.join(tags)
		linkDict['artist'] = artist
		linkDict['title'] = self.getFileName(infoSection)
		linkDict['dirPath'] = os.path.join(settings.djOnSettings["dlDir"], nt.makeFilenameSafe(category))

		if not os.path.exists(linkDict["dirPath"]):
			os.makedirs(linkDict["dirPath"])
		else:
			self.log.info("Folder Path already exists?: %s", linkDict["dirPath"])

		self.log.info("Folderpath: %s", linkDict["dirPath"])

		self.log.debug("Linkdict = ")
		for key, value in list(linkDict.items()):
			self.log.debug("		%s - %s", key, value)


		if tags:
			self.log.info("Adding tag info %s", tags)
			self.addTags(sourceUrl=linkDict["sourceUrl"], tags=tags)

		self.updateDbEntry(linkDict["sourceUrl"], seriesName=category, lastUpdate=time.time())

		return linkDict
开发者ID:GDXN,项目名称:MangaCMS,代码行数:30,代码来源:ContentLoader.py


示例5: getDoujinshiUploadDirectory

	def getDoujinshiUploadDirectory(self, seriesName):
		ulDir = self.getExistingDir(seriesName)

		if not ulDir:
			seriesName = nt.getCanonicalMangaUpdatesName(seriesName)
			safeFilename = nt.makeFilenameSafe(seriesName)
			matchName = nt.prepFilenameForMatching(seriesName)
			matchName = matchName.encode('latin-1', 'ignore').decode('latin-1')

			self.checkInitDirs()
			if matchName in self.unsortedDirs:
				ulDir = self.unsortedDirs[matchName]
			elif safeFilename in self.unsortedDirs:
				ulDir = self.unsortedDirs[safeFilename]
			else:

				self.log.info("Need to create container directory for %s", seriesName)
				ulDir = os.path.join(settings.mkSettings["uploadContainerDir"], settings.mkSettings["uploadDir"], safeFilename)
				try:
					self.sftp.mkdir(ulDir)
				except ftplib.error_perm:
					self.log.warn("Directory exists?")
					self.log.warn(traceback.format_exc())


		return ulDir
开发者ID:GDXN,项目名称:MangaCMS,代码行数:26,代码来源:uploader.py


示例6: getUploadDirectory

	def getUploadDirectory(self, seriesName):

		ulDir = self.getExistingDir(seriesName)

		if not ulDir:
			seriesName   = nt.getCanonicalMangaUpdatesName(seriesName)
			safeFilename = nt.makeFilenameSafe(seriesName)
			matchName    = nt.prepFilenameForMatching(seriesName)
			matchName    = matchName.encode('utf-8', 'ignore').decode('utf-8')

			self.checkInitDirs()
			if matchName in self.mainDirs:
				ulDir = self.mainDirs[matchName][0]
			elif seriesName in self.mainDirs:
				ulDir = self.mainDirs[seriesName][0]
			else:

				self.log.info("Need to create container directory for %s", seriesName)
				ulDir = os.path.join(settings.mkSettings["uploadContainerDir"], settings.mkSettings["uploadDir"], safeFilename)
				try:
					self.sftp.mkdir(ulDir)
				except OSError as e:
					# If the error is just a "directory exists" warning, ignore it silently
					if str(e) == 'OSError: File already exists':
						pass
					else:
						self.log.warn("Error creating directory?")
						self.log.warn(traceback.format_exc())


		return ulDir
开发者ID:GDXN,项目名称:MangaCMS,代码行数:31,代码来源:uploader.py


示例7: getUploadDirectory

	def getUploadDirectory(self, seriesName):

		ulDir = self.getExistingDir(seriesName)

		if not ulDir:
			seriesName = nt.getCanonicalMangaUpdatesName(seriesName)
			safeFilename = nt.makeFilenameSafe(seriesName)
			matchName = nt.prepFilenameForMatching(seriesName)
			matchName = matchName.encode('latin-1', 'ignore').decode('latin-1')

			self.checkInitDirs()
			if matchName in self.unsortedDirs:
				ulDir = self.unsortedDirs[matchName]
			elif safeFilename in self.unsortedDirs:
				ulDir = self.unsortedDirs[safeFilename]
			else:

				self.log.info("Need to create container directory for %s", seriesName)
				ulDir = os.path.join(settings.mkSettings["uploadContainerDir"], settings.mkSettings["uploadDir"], safeFilename)
				try:
					self.ftp.mkd(ulDir)
				except ftplib.error_perm as e:
					# If the error is just a "directory exists" warning, ignore it silently
					if str(e).startswith("550") and str(e).endswith('File exists'):
						pass
					else:
						self.log.warn("Error creating directory?")
						self.log.warn(traceback.format_exc())


		return ulDir
开发者ID:GodOfConquest,项目名称:MangaCMS,代码行数:31,代码来源:uploader.py


示例8: getLink

	def getLink(self, link):
		sourceUrl, originFileName = link["sourceUrl"], link["originName"]

		self.log.info( "Should retreive: %s, url - %s", originFileName, sourceUrl)

		self.updateDbEntry(sourceUrl, dlState=1)
		self.conn.commit()

		fileUrl = self.getDownloadUrl(sourceUrl)
		if fileUrl is None:
			self.log.warning("Could not find url!")
			self.deleteRowsByValue(sourceUrl=sourceUrl)
			return


		try:
			content, hName = self.getLinkFile(fileUrl, sourceUrl)
		except:
			self.log.error("Unrecoverable error retreiving content %s", link)
			self.log.error("Traceback: %s", traceback.format_exc())

			self.updateDbEntry(sourceUrl, dlState=-1)
			return

		# print("Content type = ", type(content))


		# And fix %xx crap
		hName = urllib.parse.unquote(hName)

		fName = "%s - %s" % (originFileName, hName)
		fName = nt.makeFilenameSafe(fName)

		fqFName = os.path.join(link["targetDir"], fName)
		self.log.info( "SaveName = %s", fqFName)


		loop = 1
		while os.path.exists(fqFName):
			fName = "%s - (%d) - %s" % (originFileName, loop,  hName)
			fqFName = os.path.join(link["targetDir"], fName)
			loop += 1
		self.log.info( "Writing file")

		filePath, fileName = os.path.split(fqFName)

		try:
			with open(fqFName, "wb") as fp:
				fp.write(content)
		except TypeError:
			self.log.error("Failure trying to retreive content from source %s", sourceUrl)
			return
		#self.log.info( filePath)

		dedupState = processDownload.processDownload(link["seriesName"], fqFName, deleteDups=True, includePHash=True)
		self.log.info( "Done")

		self.updateDbEntry(sourceUrl, dlState=2, downloadPath=filePath, fileName=fileName, tags=dedupState)
		return
开发者ID:GJdan,项目名称:MangaCMS,代码行数:59,代码来源:jzContentLoader.py


示例9: renameSeriesToMatchMangaUpdates

def renameSeriesToMatchMangaUpdates(scanpath):
	idLut = nt.MtNamesMapWrapper("fsName->buId")
	muLut = nt.MtNamesMapWrapper("buId->buName")
	db = DbInterface()
	print("Scanning")
	foundDirs = 0
	contents = os.listdir(scanpath)
	for dirName in contents:
		cName = nt.prepFilenameForMatching(dirName)
		mtId = idLut[cName]
		if mtId and len(mtId) > 1:
			print("Multiple mtId values for '%s' ('%s')" % (cName, dirName))
			print("	", mtId)
			print("	Skipping item")

		elif mtId:
			mtId = mtId.pop()
			mtName = muLut[mtId].pop()
			cMtName = nt.prepFilenameForMatching(mtName)
			if cMtName != cName:
				print("Dir '%s' ('%s')" % (cName, dirName))
				print("	Should be '%s'" % (mtName, ))
				print("	URL: https://www.mangaupdates.com/series.html?id=%s" % (mtId, ))
				oldPath = os.path.join(scanpath, dirName)
				newPath = os.path.join(scanpath, nt.makeFilenameSafe(mtName))
				if not os.path.isdir(oldPath):
					raise ValueError("Not a dir. Wat?")



				print("	old '%s'" % (oldPath, ))
				print("	new '%s'" % (newPath, ))

				newCl = nt.cleanUnicode(newPath)
				if newCl != newPath:
					print("Unicode oddness. Skipping")
					continue

				rating = nt.extractRatingToFloat(oldPath)

				if rating != 0:
					print("	Need to add rating = ", rating)

				mv = query_response_bool("	rename?")

				if mv:

					#
					if os.path.exists(newPath):
						print("Target dir exists! Moving files instead")
						moveFiles(oldPath, newPath)
						os.rmdir(oldPath)
						nt.dirNameProxy.changeRatingPath(newPath, rating)
					else:
						os.rename(oldPath, newPath)
						nt.dirNameProxy.changeRatingPath(newPath, rating)
			foundDirs += 1

	print("Total directories that need renaming", foundDirs)
开发者ID:GDXN,项目名称:MangaCMS,代码行数:59,代码来源:autoOrganize.py


示例10: doDownload

    def doDownload(self, linkDict, retag=False):

        downloadUrl = self.getDownloadUrl(linkDict["dlPage"], linkDict["sourceUrl"])

        if downloadUrl:

            fCont, fName = self.wg.getFileAndName(downloadUrl)

            # self.log.info(len(content))
            if linkDict["originName"] in fName:
                fileN = fName
            else:
                fileN = "%s - %s.zip" % (linkDict["originName"], fName)
                fileN = fileN.replace(".zip .zip", ".zip")

            fileN = nt.makeFilenameSafe(fileN)

            chop = len(fileN) - 4

            wholePath = "ERROR"
            while 1:

                try:
                    fileN = fileN[:chop] + fileN[-4:]
                    # self.log.info("geturl with processing", fileN)
                    wholePath = os.path.join(linkDict["dirPath"], fileN)
                    self.log.info("Complete filepath: %s", wholePath)

                    # Write all downloaded files to the archive.
                    with open(wholePath, "wb") as fp:
                        fp.write(fCont)
                    self.log.info("Successfully Saved to path: %s", wholePath)
                    break
                except IOError:
                    chop = chop - 1
                    self.log.warn("Truncating file length to %s characters.", chop)

            if not linkDict["tags"]:
                linkDict["tags"] = ""

            self.updateDbEntry(linkDict["sourceUrl"], downloadPath=linkDict["dirPath"], fileName=fileN)

            # Deduper uses the path info for relinking, so we have to dedup the item after updating the downloadPath and fileN
            dedupState = processDownload.processDownload(linkDict["seriesName"], wholePath, pron=True)
            self.log.info("Done")

            if dedupState:
                self.addTags(sourceUrl=linkDict["sourceUrl"], tags=dedupState)

            self.updateDbEntry(linkDict["sourceUrl"], dlState=2)
            self.conn.commit()

        else:

            self.updateDbEntry(linkDict["sourceUrl"], dlState=-1, downloadPath="ERROR", fileName="ERROR: FAILED")

            self.conn.commit()
            return False
开发者ID:MyAnimeDays,项目名称:MangaCMS,代码行数:58,代码来源:ContentLoader.py


示例11: doDownload

	def doDownload(self, linkDict, retag=False):

		images = self.fetchImages(linkDict)


		# self.log.info(len(content))

		if images:
			fileN = linkDict['originName']+".zip"
			fileN = nt.makeFilenameSafe(fileN)


			# self.log.info("geturl with processing", fileN)
			wholePath = os.path.join(linkDict["dirPath"], fileN)
			self.log.info("Complete filepath: %s", wholePath)

					#Write all downloaded files to the archive.
			arch = zipfile.ZipFile(wholePath, "w")
			for imageName, imageContent in images:
				arch.writestr(imageName, imageContent)
			arch.close()


			self.log.info("Successfully Saved to path: %s", wholePath)

			if not linkDict["tags"]:
				linkDict["tags"] = ""



			self.updateDbEntry(linkDict["sourceUrl"], downloadPath=linkDict["dirPath"], fileName=fileN)


			# Deduper uses the path info for relinking, so we have to dedup the item after updating the downloadPath and fileN
			dedupState = processDownload.processDownload(None, wholePath, pron=True, deleteDups=True, includePHash=True)
			self.log.info( "Done")

			if dedupState:
				self.addTags(sourceUrl=linkDict["sourceUrl"], tags=dedupState)


			self.updateDbEntry(linkDict["sourceUrl"], dlState=2)
			self.conn.commit()




			return wholePath

		else:

			self.updateDbEntry(linkDict["sourceUrl"], dlState=-1, downloadPath="ERROR", fileName="ERROR: FAILED")

			self.conn.commit()
			return False
开发者ID:GJdan,项目名称:MangaCMS,代码行数:55,代码来源:hbrowseContentLoader.py


示例12: getDirAndFName

	def getDirAndFName(self, soup):
		title = soup.find("div", class_="title")
		if not title:
			raise ValueError("Could not find title. Wat?")
		titleSplit = title.get_text().split("»")
		safePath = [nt.makeFilenameSafe(item.rstrip().lstrip()) for item in titleSplit]
		fqPath = os.path.join(settings.djSettings["dlDir"], *safePath)
		dirPath, fName = fqPath.rsplit("/", 1)
		self.log.debug("dirPath = %s", dirPath)
		self.log.debug("fName = %s", fName)
		return dirPath, fName, title.get_text()
开发者ID:kajeagentspi,项目名称:MangaCMS,代码行数:11,代码来源:djMoeContentLoader.py


示例13: getFile

	def getFile(self, file_data):


		row = self.getRowsByValue(sourceUrl=file_data["baseUrl"], limitByKey=False)
		if row and row[0]['dlState'] != 0:
			return
		if not row:
			self.insertIntoDb(retreivalTime = time.time(),
								sourceUrl   = file_data["baseUrl"],
								originName  = file_data["title"],
								dlState     = 1,
								seriesName  = file_data["title"])

		image_links = self.getFileInfo(file_data)

		images = []
		for imagen, imageurl in image_links:
			imdat = self.get_image(imageurl, file_data['xor_key'])
			images.append((imagen, imdat))

			# filen = nt.makeFilenameSafe(file_data['title'] + " - " + imagen)
			# with open(filen, "wb") as fp:
			# 	fp.write(imdat)




		fileN = '{series} - c{chapNo:03.0f} [MangaBox].zip'.format(series=file_data['title'], chapNo=file_data['chapter'])
		fileN = nt.makeFilenameSafe(fileN)

		dlPath, newDir = self.locateOrCreateDirectoryForSeries(file_data["title"])
		wholePath = os.path.join(dlPath, fileN)


		if newDir:
			self.updateDbEntry(file_data["baseUrl"], flags="haddir")
			self.conn.commit()

		arch = zipfile.ZipFile(wholePath, "w")
		for imageName, imageContent in images:
			arch.writestr(imageName, imageContent)
		arch.close()

		self.log.info("Successfully Saved to path: %s", wholePath)

		dedupState = processDownload.processDownload(file_data["title"], wholePath, deleteDups=True)
		if dedupState:
			self.addTags(sourceUrl=file_data["baseUrl"], tags=dedupState)

		self.updateDbEntry(file_data["baseUrl"], dlState=2, downloadPath=dlPath, fileName=fileN, originName=fileN)

		self.conn.commit()
		self.log.info( "Done")
开发者ID:MyAnimeDays,项目名称:MangaCMS,代码行数:53,代码来源:Loader.py


示例14: getDownloadInfo

	def getDownloadInfo(self, linkDict, retag=False):
		sourcePage = linkDict["sourceUrl"]

		self.log.info("Retreiving item: %s", sourcePage)

		if not retag:
			self.updateDbEntry(linkDict["sourceUrl"], dlState=1)


		cont = self.wg.getpage(sourcePage, addlHeaders={'Referer': 'http://pururin.com/'})
		soup = bs4.BeautifulSoup(cont, "lxml")

		if not soup:
			self.log.critical("No download at url %s! SourceUrl = %s", sourcePage, linkDict["sourceUrl"])
			raise IOError("Invalid webpage")

		category, tags = self.getCategoryTags(soup)
		note = self.getNote(soup)
		tags = ' '.join(tags)

		linkDict['dirPath'] = os.path.join(settings.puSettings["dlDir"], nt.makeFilenameSafe(category))

		if not os.path.exists(linkDict["dirPath"]):
			os.makedirs(linkDict["dirPath"])
		else:
			self.log.info("Folder Path already exists?: %s", linkDict["dirPath"])


		self.log.info("Folderpath: %s", linkDict["dirPath"])
		#self.log.info(os.path.join())

		dlPage = soup.find("a", class_="link-next")
		linkDict["dlLink"] = urllib.parse.urljoin(self.urlBase, dlPage["href"])

		self.log.debug("Linkdict = ")
		for key, value in list(linkDict.items()):
			self.log.debug("		%s - %s", key, value)


		if tags:
			self.log.info("Adding tag info %s", tags)

			self.addTags(sourceUrl=linkDict["sourceUrl"], tags=tags)
		if note:
			self.log.info("Adding note %s", note)
			self.updateDbEntry(linkDict["sourceUrl"], note=note)


		self.updateDbEntry(linkDict["sourceUrl"], seriesName=category, lastUpdate=time.time())



		return linkDict
开发者ID:MyAnimeDays,项目名称:MangaCMS,代码行数:53,代码来源:pururinContentLoader.py


示例15: doDownload

	def doDownload(self, linkDict, retag=False):

		images = self.fetchImages(linkDict)
		# images = ['wat']
		# print(linkDict)
		# self.log.info(len(content))

		if images:
			linkDict["chapterNo"] = float(linkDict["chapterNo"])
			fileN = '{series} - c{chapNo:06.1f} - {sourceName} [crunchyroll].zip'.format(series=linkDict['seriesName'], chapNo=linkDict["chapterNo"], sourceName=linkDict['originName'])
			fileN = nt.makeFilenameSafe(fileN)


			# self.log.info("geturl with processing", fileN)
			wholePath = os.path.join(linkDict["dirPath"], fileN)
			self.log.info("Complete filepath: %s", wholePath)

					#Write all downloaded files to the archive.
			arch = zipfile.ZipFile(wholePath, "w")
			for imageName, imageContent in images:
				arch.writestr(imageName, imageContent)
			arch.close()


			self.log.info("Successfully Saved to path: %s", wholePath)

			if not linkDict["tags"]:
				linkDict["tags"] = ""



			dedupState = processDownload.processDownload(linkDict["seriesName"], wholePath, deleteDups=True)
			self.log.info( "Done")


			if dedupState:
				self.addTags(sourceUrl=linkDict["sourceUrl"], tags=dedupState)

			self.updateDbEntry(linkDict["sourceUrl"], dlState=2, downloadPath=linkDict["dirPath"], fileName=fileN, originName=fileN)

			self.conn.commit()
			return wholePath

		else:

			self.updateDbEntry(linkDict["sourceUrl"], dlState=-1, downloadPath="ERROR", fileName="ERROR: FAILED")

			self.conn.commit()
			return False
开发者ID:GJdan,项目名称:MangaCMS,代码行数:49,代码来源:ContentLoader.py


示例16: getFilenameFromIdName

    def getFilenameFromIdName(self, rowid, filename):
        if not os.path.exists(settings.bookCachePath):
            self.log.warn("Cache directory for book items did not exist. Creating")
            self.log.warn("Directory at path '%s'", settings.bookCachePath)
            os.makedirs(settings.bookCachePath)

            # one new directory per 1000 items.
        dirName = "%s" % (rowid // 1000)
        dirPath = os.path.join(settings.bookCachePath, dirName)
        if not os.path.exists(dirPath):
            os.mkdir(dirPath)

        filename = "ID%s - %s" % (rowid, filename)
        filename = nameTools.makeFilenameSafe(filename)
        fqpath = os.path.join(dirPath, filename)

        return fqpath
开发者ID:asl97,项目名称:MangaCMS,代码行数:17,代码来源:TextDbBase.py


示例17: doDownload

	def doDownload(self, seriesName, dlurl, chapter_name):


		row = self.getRowsByValue(sourceUrl=dlurl, limitByKey=False)
		if row and row[0]['dlState'] != 0:
			return

		if not row:
			self.insertIntoDb(retreivalTime = time.time(),
								sourceUrl   = dlurl,
								originName  = seriesName,
								dlState     = 1,
								seriesName  = seriesName)


		fctnt, fname = self.wg.getFileAndName(dlurl)


		fileN = '{series} - {chap} [YoManga].zip'.format(series=seriesName, chap=chapter_name)
		fileN = nt.makeFilenameSafe(fileN)

		dlPath, newDir = self.locateOrCreateDirectoryForSeries(seriesName)
		wholePath = os.path.join(dlPath, fileN)

		self.log.info("Source name: %s", fname)
		self.log.info("Generated name: %s", fileN)

		if newDir:
			self.updateDbEntry(dlurl, flags="haddir")
			self.conn.commit()

		with open(wholePath, "wb") as fp:
			fp.write(fctnt)

		self.log.info("Successfully Saved to path: %s", wholePath)


		dedupState = processDownload.processDownload(seriesName, wholePath, deleteDups=True)
		if dedupState:
			self.addTags(sourceUrl=dlurl, tags=dedupState)

		self.updateDbEntry(dlurl, dlState=2, downloadPath=dlPath, fileName=fileN, originName=fileN)

		self.conn.commit()
开发者ID:MyAnimeDays,项目名称:MangaCMS,代码行数:44,代码来源:Loader.py


示例18: locateOrCreateDirectoryForSeries

    def locateOrCreateDirectoryForSeries(self, seriesName):

        if self.shouldCanonize:
            canonSeriesName = nt.getCanonicalMangaUpdatesName(seriesName)
        else:
            canonSeriesName = seriesName

        safeBaseName = nt.makeFilenameSafe(canonSeriesName)

        if canonSeriesName in nt.dirNameProxy:
            self.log.info(
                "Have target dir for '%s' Dir = '%s'", canonSeriesName, nt.dirNameProxy[canonSeriesName]["fqPath"]
            )
            return nt.dirNameProxy[canonSeriesName]["fqPath"], False
        else:
            self.log.info("Don't have target dir for: %s, full name = %s", canonSeriesName, seriesName)
            targetDir = os.path.join(settings.baseDir, safeBaseName)
            if not os.path.exists(targetDir):
                try:
                    os.makedirs(targetDir)
                    return targetDir, True

                except FileExistsError:
                    # Probably means the directory was concurrently created by another thread in the background?
                    self.log.critical("Directory doesn't exist, and yet it does?")
                    self.log.critical(traceback.format_exc())
                    pass
                except OSError:
                    self.log.critical("Directory creation failed?")
                    self.log.critical(traceback.format_exc())

            else:
                self.log.warning("Directory not found in dir-dict, but it exists!")
                self.log.warning("Directory-Path: %s", targetDir)
                self.log.warning("Base series name: %s", seriesName)
                self.log.warning("Canonized series name: %s", canonSeriesName)
                self.log.warning("Safe canonized name: %s", safeBaseName)
            return targetDir, False
开发者ID:kajeagentspi,项目名称:MangaCMS,代码行数:38,代码来源:RetreivalDbBase.py


示例19: processDownloadInfo

	def processDownloadInfo(self, linkDict):

		self.updateDbEntry(linkDict["sourceUrl"], dlState=1)

		sourcePage = linkDict["sourceUrl"]
		category   = linkDict['seriesName']

		self.log.info("Retreiving item: %s", sourcePage)

		linkDict['dirPath'] = os.path.join(settings.fkSettings["dlDir"], nt.makeFilenameSafe(category))

		if not os.path.exists(linkDict["dirPath"]):
			os.makedirs(linkDict["dirPath"])
		else:
			self.log.info("Folder Path already exists?: %s", linkDict["dirPath"])

		self.log.info("Folderpath: %s", linkDict["dirPath"])

		self.log.debug("Linkdict = ")
		for key, value in list(linkDict.items()):
			self.log.debug("		%s - %s", key, value)


		return linkDict
开发者ID:kajeagentspi,项目名称:MangaCMS,代码行数:24,代码来源:fkContentLoader.py


示例20: getLink

	def getLink(self, link):
		sourceUrl = link["sourceUrl"]


		try:
			self.log.info( "Should retreive url - %s", sourceUrl)
			self.updateDbEntry(sourceUrl, dlState=1)

			seriesName, chapterVol, imageUrls = self.getContainerPages(sourceUrl)
			if not seriesName and not chapterVol and not imageUrls:
				self.log.critical("Failure on retreiving content at %s", sourceUrl)
				self.log.critical("Page not found - 404")
				self.updateDbEntry(sourceUrl, dlState=-1)
				return

			self.log.info("Downloading = '%s', '%s'", seriesName, chapterVol)
			dlPath, newDir = self.locateOrCreateDirectoryForSeries(seriesName)

			if link["flags"] == None:
				link["flags"] = ""

			if newDir:
				self.updateDbEntry(sourceUrl, flags=" ".join([link["flags"], "haddir"]))
				self.conn.commit()

			chapterNameRaw = " - ".join((seriesName, chapterVol))
			chapterName = nt.makeFilenameSafe(chapterNameRaw)

			fqFName = os.path.join(dlPath, chapterName+" [batoto].zip")

			loop = 1
			while os.path.exists(fqFName):
				fName = "%s - (%d).zip" % (chapterName, loop)
				fqFName = os.path.join(dlPath, fName)
				loop += 1
			self.log.info("Saving to archive = %s", fqFName)

			images = []
			for imgUrl in imageUrls:
				self.log.info("Fetching content for item: %s", imgUrl)
				imageName, imageContent = self.getImage(imgUrl, "http://bato.to/reader")

				images.append([imageName, imageContent])

				if not runStatus.run:
					self.log.info( "Breaking due to exit flag being set")
					self.updateDbEntry(sourceUrl, dlState=0)
					return

			self.log.info("Creating archive with %s images", len(images))

			if not images:
				self.updateDbEntry(sourceUrl, dlState=-1, seriesName=seriesName, originName=chapterNameRaw, tags="error-404")
				return

			#Write all downloaded files to the archive.
			arch = zipfile.ZipFile(fqFName, "w")
			for imageName, imageContent in images:
				arch.writestr(imageName, imageContent)
			arch.close()


			dedupState = processDownload.processDownload(seriesName, fqFName, deleteDups=True, includePHash=False)
			self.log.info( "Done")

			filePath, fileName = os.path.split(fqFName)
			self.updateDbEntry(sourceUrl, dlState=2, downloadPath=filePath, fileName=fileName, seriesName=seriesName, originName=chapterNameRaw, tags=dedupState)
			return



		except Exception:
			self.log.critical("Failure on retreiving content at %s", sourceUrl)
			self.log.critical("Traceback = %s", traceback.format_exc())
			self.updateDbEntry(sourceUrl, dlState=-1)
开发者ID:MyAnimeDays,项目名称:MangaCMS,代码行数:75,代码来源:btContentLoader.py



注:本文中的nameTools.makeFilenameSafe函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python parser.NameParser类代码示例发布时间:2022-05-27
下一篇:
Python nameTools.getCanonicalMangaUpdatesName函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap