- Implement sorting categories, feeds and articles lists by time

- Implement watching YouTube and Odysee videos via mpv + yt-dlp
- Implement displaying comments from Reddit
master
VikingKong 3 years ago
parent 50c9aa97c5
commit 104be86a54

@ -16,8 +16,9 @@ class Fetcher:
result = dict([(item["id"], (item["count"], item["newestItemTimestampUsec"], Utils.timestampToDate( result = dict([(item["id"], (item["count"], item["newestItemTimestampUsec"], Utils.timestampToDate(
item["newestItemTimestampUsec"]))) for item in response.json()["unreadcounts"]]) item["newestItemTimestampUsec"]))) for item in response.json()["unreadcounts"]])
self.unreadCounts = result self.unreadCounts = result
self.categories = [{"id": item, "name": item[13:], "count": self.unreadCounts[item][0], "date": self.unreadCounts[item][2]} self.categories = sorted([{"id": item, "name": item[13:], "count": self.unreadCounts[item][0], "date": self.unreadCounts[item][2]}
for item in self.unreadCounts.keys() if item[0:13] == "user/-/label/"] for item in self.unreadCounts.keys() if item[0:13] == "user/-/label/"],
key=lambda item: item["date"], reverse=True)
def getSubscriptions(self): def getSubscriptions(self):
response = httpx.get(self.URL+"/reader/api/0/subscription/list?output=json", headers=self.headers) response = httpx.get(self.URL+"/reader/api/0/subscription/list?output=json", headers=self.headers)
@ -27,7 +28,8 @@ class Fetcher:
return category in self.articles.keys() return category in self.articles.keys()
def feedsFromCategory(self, category): def feedsFromCategory(self, category):
return [item for item in self.feeds if item["categories"][0]["id"] == category and self.unreadCounts[item["id"]][0] != 0] return sorted([item for item in self.feeds if item["categories"][0]["id"] == category and self.unreadCounts[item["id"]][0] != 0],
key=lambda item: self.unreadCounts[item["id"]][2], reverse=True)
def articlesFromCategory(self, category, number): def articlesFromCategory(self, category, number):
if category not in self.articles: if category not in self.articles:

@ -2,7 +2,7 @@ import urwid
import yaml import yaml
import asyncio import asyncio
import warnings import warnings
import os import subprocess
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
from API import Fetcher from API import Fetcher
from Render import Article from Render import Article
@ -155,7 +155,7 @@ class RightPane(urwid.ListBox):
tui.rightBox.set_title(self.article.title + " (" + str(self.article.currentPageNumber) + "/" + tui.rightBox.set_title(self.article.title + " (" + str(self.article.currentPageNumber) + "/" +
str(self.article.numberOfPages) + ")") str(self.article.numberOfPages) + ")")
def keypress(self, size, key): #noqa def keypress(self, size, key): # noqa
if key in ("j", "down"): if key in ("j", "down"):
if not self.isList: if not self.isList:
walker = urwid.SimpleListWalker([urwid.Text(self.article.scrollDown())]) walker = urwid.SimpleListWalker([urwid.Text(self.article.scrollDown())])
@ -202,6 +202,10 @@ class RightPane(urwid.ListBox):
top = Links(self.article.links) top = Links(self.article.links)
tui.createOverlay(urwid.LineBox(top), len(self.article.links)+2) tui.createOverlay(urwid.LineBox(top), len(self.article.links)+2)
return return
elif key == "v":
if self.isList is False:
subprocess.Popen(['firefox', '-new-tab', self.article.url])
return
return super().keypress(size, key) return super().keypress(size, key)
@ -216,9 +220,12 @@ class Links(urwid.ListBox):
def parseLink(self, link): def parseLink(self, link):
ext = link.split(".")[-1] ext = link.split(".")[-1]
if ext.lower() in ("jpg", "jpeg", "gif", "png", "tif", "tiff"): if ext.lower() in ("jpg", "jpeg", "gif", "png", "tif", "tiff"):
os.popen('feh {link}'.format(link=link)) subprocess.Popen(['feh', link])
elif Utils.checkStreamingVideo(link):
tui.destroyOverlay()
subprocess.Popen(['mpv', "--ytdl-format=bestvideo[height<=720]+bestaudio/best[height<=720]", link])
else: else:
os.popen('firefox -new-tab {link}'.format(link=link)) subprocess.Popen(['firefox', '-new-tab', link])
def keypress(self, size, key): def keypress(self, size, key):
if key in ("j", "down"): if key in ("j", "down"):

@ -0,0 +1,24 @@
from bs4 import BeautifulSoup
import httpx
class RedditComments:
def __init__(self, link):
page = httpx.get(link)
content = page.text
self.soup = BeautifulSoup(content)
self.commentObjects = self.soup.find_all("div", "Comment")
self.comments = []
def getHeader(self, commentObj):
headers = commentObj.find_all("a")
username = headers[0]["href"].split("/")[2]
date = headers[1].text
return username + " " + date
def getText(self, commentObj):
return commentObj.find("p").text
def getComments(self):
for co in self.commentObjects:
self.comments.append(self.getHeader(co) + "\n" + self.getText(co) + "\n")

@ -2,31 +2,42 @@ from inscriptis import get_text
import os import os
import Utils import Utils
import html.parser import html.parser
from RedditCommentsParser import RedditComments
class LinkParser(html.parser.HTMLParser): class LinkParser(html.parser.HTMLParser):
def reset(self): def reset(self):
super().reset() super().reset()
self.links = [] self.links = set()
def handle_starttag(self, tag, attrs): def handle_starttag(self, tag, attrs):
if tag == 'a': if tag == 'a':
for (name, value) in attrs: for (name, value) in attrs:
if name == 'href': if name == 'href':
self.links.append(value) self.links.add(value)
class Article: class Article:
def __init__(self, articleObj): def __init__(self, articleObj):
Utils.writeLog(articleObj)
content = articleObj["summary"]["content"] content = articleObj["summary"]["content"]
parser = LinkParser() parser = LinkParser()
for line in content: for line in content:
parser.feed(line) parser.feed(line)
self.links = parser.links self.links = list(parser.links)
Utils.writeLog(self.links)
self.text = get_text(content) self.text = get_text(content)
self.title = articleObj["title"] self.title = articleObj["title"]
self.date = Utils.timestampToDate(articleObj["timestampUsec"]) self.date = Utils.timestampToDate(articleObj["timestampUsec"])
self.url = articleObj["canonical"][0]["href"]
if Utils.checkStreamingVideo(self.url):
self.links.append(self.url)
elif Utils.checkReddit(self.url):
comments_link = Utils.checkRedditComments(self.links)
if comments_link:
commentsObj = RedditComments(comments_link)
commentsObj.getComments()
for comment in commentsObj.comments:
self.text += "\n\n" + comment
self.currentPageNumber = 1 self.currentPageNumber = 1
terminal_width, terminal_height = os.get_terminal_size() terminal_width, terminal_height = os.get_terminal_size()
terminal_width -= 76 terminal_width -= 76
@ -57,7 +68,6 @@ class Article:
pass pass
else: else:
self.currentPageNumber += 1 self.currentPageNumber += 1
print(self.currentPageNumber)
return self.chunks[self.currentPageNumber - 1] return self.chunks[self.currentPageNumber - 1]
def scrollUp(self): def scrollUp(self):
@ -65,5 +75,4 @@ class Article:
pass pass
else: else:
self.currentPageNumber -= 1 self.currentPageNumber -= 1
print(self.currentPageNumber)
return self.chunks[self.currentPageNumber - 1] return self.chunks[self.currentPageNumber - 1]

@ -1,10 +1,26 @@
from datetime import datetime from datetime import datetime
import re
def timestampToDate(ts): def timestampToDate(ts):
return datetime.fromtimestamp(int(ts)/1000000).strftime("%y-%m-%d %H:%M") return datetime.fromtimestamp(int(ts)/1000000).strftime("%y-%m-%d %H:%M")
def checkStreamingVideo(link):
return re.search("^https://www.youtube.com", link) is not None or re.search("^https://player.odycdn.com", link)
def checkReddit(link):
return re.search("^https://www.reddit.com", link) is not None
def checkRedditComments(links):
for link in links:
if re.search("^https://www.reddit.com/[a-z1-9/]+/comments", link) is not None:
return link
return False
def writeLog(text): def writeLog(text):
with open("debug.log", "a") as f: with open("debug.log", "a") as f:
f.write(str(text)) f.write(str(text))

Loading…
Cancel
Save