Merge pull request #1 from DemonKingSwarn/patch-1

Chore: made it faster
This commit is contained in:
mrfluffy
2022-06-13 12:49:57 +01:00
committed by GitHub

View File

@@ -1,20 +1,34 @@
#!/usr/bin/env python3
import requests
from bs4 import BeautifulSoup
import httpx
from bs4 import BeautifulSoup as bs
import pyperclip as clip
import os
import subprocess
import pyperclip as clip
os.system('clear')
download_path = os.environ['HOME']+"/Pictures/hart-cli"
from os.path import expanduser
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:101.0) Gecko/20100101 Firefox/101.0"
}
client = httpx.Client(headers=headers, follow_redirects=True)
home = expanduser("~")
download_path = f"{home}/pix/hart-cli"
os.system(f"mkdir -p {download_path}")
item = 0
page_num = 1
URL = "https://yande.re/post?page="+str(page_num)
page = requests.get(URL)
url = f"https://yande.re/post?page={page_num}"
page = client.get(url)
links_arr_full = []
links_arr_preview = []
def get_new_urls():
global URL
global url
global page
global page_num
global soup
@@ -23,16 +37,19 @@ def get_new_urls():
global links_arr_full
global links_preview
global links_arr_preview
os.system("clear")
links_arr_full.clear
links_arr_full.clear
URL = "https://yande.re/post?page="+str(page_num)
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
links_arr_preview.clear
soup = bs(page.content, "html.parser")
main_content = soup.find(id="post-list-posts")
main_content = str(main_content)
main_content = main_content.replace("smallimg","largeimg")
main_content = BeautifulSoup(main_content, features="lxml")
main_content = main_content.replace("smallimg", "largeimg")
main_content = bs(main_content, features="lxml")
main_content = main_content.find(id="post-list-posts")
links_full = main_content.find_all_next("a", class_="directlink largeimg")
links_arr_full = []
links_preview = main_content.find_all_next("img", class_="preview")
@@ -47,39 +64,40 @@ def get_new_urls():
def next():
global item
global page_num
os.system("clear")
if item != len(links_arr_preview)-1:
item+=1
os.system('clear')
item += 1
else:
page_num+=1
item=1
page_num += 1
item = 1
get_new_urls()
os.system('clear')
def previus():
def previous():
global item
global page_num
global links_arr_preview
os.system("clear")
if item != 1:
item-=1
os.system('clear')
item -= 1
else:
page_num-=1
page_num -= 1
get_new_urls()
item= len(links_arr_preview)-1
os.system('clear')
item = len(links_arr_preview)-1
def download():
global item
global links_arr_full
global download_path
command = 'echo ' + links_arr_full[item] + ' | cut -d "%" -f 2 |cut -b 3-8'
name = subprocess.check_output(command, shell=True, text=True, encoding='utf_8')
name = name.strip('\n')
name = str(name)+".jpg"
command = "curl -s -o " + download_path + "/" + name + " " + links_arr_full[item]
os.system(command)
os.system('clear')
os.system("clear")
get_new_urls()
@@ -99,7 +117,7 @@ while True:
if choice == "n":
next()
elif choice == "p":
previus()
previous()
elif choice == "d":
download()
elif choice == "c":
@@ -110,3 +128,5 @@ while True:
exit()
else:
print("invaled awnser")
exit(0)