Description
P站能搜到结果,这个程序搜不到
我用pixivpy3测试了一下,是可以搜到结果的
`
from pixivpy3 import *
import datetime
from dateutil.relativedelta import relativedelta
import os
import random
import time
from urllib.parse import urlparse
import select
import sys
from inputimeout import inputimeout, TimeoutOccurred
def randSleep(base=0.1, rand=0.5):
time.sleep(base + rand * random.random())
def getFileName(url):
return os.path.basename(urlparse(url).path)
def my_wait():
try:
inputimeout(timeout=1)
except TimeoutOccurred:
return
print('pause')
input()
aapi = AppPixivAPI()
aapi.auth(refresh_token='省略')
bg = datetime.date(2024, 1, 26)
en = datetime.date(2024, 1, 1)
GenshinImpact
原神
BlueArchive
ブルーアーカイブ
蔚蓝档案
碧蓝档案
HonkaiStarRail
崩坏星穹铁道
崩壊:スターレイル
崩壊スターレイル
keys = ["GenshinImpact", "原神", "BlueArchive", "ブルーアーカイブ", "蔚蓝档案", "碧蓝档案", "HonkaiStarRail", "崩坏星穹铁道", "崩壊:スターレイル", "崩壊スターレイル"]
root = "D:/Material"
key_comb = keys[0]
for i in range(1, len(keys)):
key_comb += " OR " + keys[i]
key_comb = "オリジナル OR 創作 OR 原创"
print(key_comb)
cnt = 0
while bg >= en:
for c in characters:
fold = root + "/" + bg.strftime("%Y/%m/") + c
if not os.path.exists(fold):
os.makedirs(fold)
sd = bg.strftime("%Y-%m-%d")
print(sd)
json_result = aapi.search_illust(key_comb, search_target="partial_match_for_tags", start_date=sd, end_date=sd)
while True:
for illust in json_result.illusts:
print(illust.title + ": " + str(illust.total_bookmarks))
if illust.total_bookmarks >= 4000:
print(illust)
fold = root + "/" + bg.strftime("%Y/%m/%d")
if not os.path.exists(fold):
os.makedirs(fold)
if illust.page_count == 1:
url = illust.meta_single_page.original_image_url
if not os.path.exists(fold + "/" + getFileName(url)):
cnt = cnt + 1
print(url)
print(str(cnt) + ": " + fold)
fail = 0
while True:
try:
aapi.download(url, path=fold)
except:
print("failure")
fail = fail + 1
if fail == 5:
sys.exit()
my_wait()
continue
break
print("finished")
my_wait()
# cnt = cnt - 1
else:
for page in illust.meta_pages:
url = page.image_urls.original
if not os.path.exists(fold + "/" + getFileName(url)):
cnt = cnt + 1
print(url)
print(str(cnt) + ": " + fold)
fail = 0
while True:
try:
aapi.download(url, path=fold)
except:
print("failure")
fail = fail + 1
if fail == 5:
sys.exit()
my_wait()
continue
break
print("finished")
my_wait()
next_qs = aapi.parse_qs(json_result.next_url)
randSleep(0.5)
if next_qs is not None:
json_result = aapi.search_illust(**next_qs)
else:
break
bg = bg - datetime.timedelta(days=1)
`