- Implement sorting categories, feeds and articles lists by time

- Implement watching YouTube and Odysee videos via mpv + yt-dlp
- Implement displaying comments from Reddit
master
VikingKong 3 years ago
parent 50c9aa97c5
commit 104be86a54

@ -16,8 +16,9 @@ class Fetcher:
result = dict([(item["id"], (item["count"], item["newestItemTimestampUsec"], Utils.timestampToDate(
item["newestItemTimestampUsec"]))) for item in response.json()["unreadcounts"]])
self.unreadCounts = result
self.categories = [{"id": item, "name": item[13:], "count": self.unreadCounts[item][0], "date": self.unreadCounts[item][2]}
for item in self.unreadCounts.keys() if item[0:13] == "user/-/label/"]
self.categories = sorted([{"id": item, "name": item[13:], "count": self.unreadCounts[item][0], "date": self.unreadCounts[item][2]}
for item in self.unreadCounts.keys() if item[0:13] == "user/-/label/"],
key=lambda item: item["date"], reverse=True)
def getSubscriptions(self):
response = httpx.get(self.URL+"/reader/api/0/subscription/list?output=json", headers=self.headers)
@ -27,7 +28,8 @@ class Fetcher:
return category in self.articles.keys()
def feedsFromCategory(self, category):
return [item for item in self.feeds if item["categories"][0]["id"] == category and self.unreadCounts[item["id"]][0] != 0]
return sorted([item for item in self.feeds if item["categories"][0]["id"] == category and self.unreadCounts[item["id"]][0] != 0],
key=lambda item: self.unreadCounts[item["id"]][2], reverse=True)
def articlesFromCategory(self, category, number):
if category not in self.articles:

@ -2,7 +2,7 @@ import urwid
import yaml
import asyncio
import warnings
import os
import subprocess
from concurrent.futures import ThreadPoolExecutor
from API import Fetcher
from Render import Article
@ -155,7 +155,7 @@ class RightPane(urwid.ListBox):
tui.rightBox.set_title(self.article.title + " (" + str(self.article.currentPageNumber) + "/" +
str(self.article.numberOfPages) + ")")
def keypress(self, size, key): #noqa
def keypress(self, size, key): # noqa
if key in ("j", "down"):
if not self.isList:
walker = urwid.SimpleListWalker([urwid.Text(self.article.scrollDown())])
@ -202,6 +202,10 @@ class RightPane(urwid.ListBox):
top = Links(self.article.links)
tui.createOverlay(urwid.LineBox(top), len(self.article.links)+2)
return
elif key == "v":
if self.isList is False:
subprocess.Popen(['firefox', '-new-tab', self.article.url])
return
return super().keypress(size, key)
@ -216,9 +220,12 @@ class Links(urwid.ListBox):
def parseLink(self, link):
ext = link.split(".")[-1]
if ext.lower() in ("jpg", "jpeg", "gif", "png", "tif", "tiff"):
os.popen('feh {link}'.format(link=link))
subprocess.Popen(['feh', link])
elif Utils.checkStreamingVideo(link):
tui.destroyOverlay()
subprocess.Popen(['mpv', "--ytdl-format=bestvideo[height<=720]+bestaudio/best[height<=720]", link])
else:
os.popen('firefox -new-tab {link}'.format(link=link))
subprocess.Popen(['firefox', '-new-tab', link])
def keypress(self, size, key):
if key in ("j", "down"):

@ -0,0 +1,24 @@
from bs4 import BeautifulSoup
import httpx
class RedditComments:
def __init__(self, link):
page = httpx.get(link)
content = page.text
self.soup = BeautifulSoup(content)
self.commentObjects = self.soup.find_all("div", "Comment")
self.comments = []
def getHeader(self, commentObj):
headers = commentObj.find_all("a")
username = headers[0]["href"].split("/")[2]
date = headers[1].text
return username + " " + date
def getText(self, commentObj):
return commentObj.find("p").text
def getComments(self):
for co in self.commentObjects:
self.comments.append(self.getHeader(co) + "\n" + self.getText(co) + "\n")

@ -2,31 +2,42 @@ from inscriptis import get_text
import os
import Utils
import html.parser
from RedditCommentsParser import RedditComments
class LinkParser(html.parser.HTMLParser):
def reset(self):
super().reset()
self.links = []
self.links = set()
def handle_starttag(self, tag, attrs):
if tag == 'a':
for (name, value) in attrs:
if name == 'href':
self.links.append(value)
self.links.add(value)
class Article:
def __init__(self, articleObj):
Utils.writeLog(articleObj)
content = articleObj["summary"]["content"]
parser = LinkParser()
for line in content:
parser.feed(line)
self.links = parser.links
Utils.writeLog(self.links)
self.links = list(parser.links)
self.text = get_text(content)
self.title = articleObj["title"]
self.date = Utils.timestampToDate(articleObj["timestampUsec"])
self.url = articleObj["canonical"][0]["href"]
if Utils.checkStreamingVideo(self.url):
self.links.append(self.url)
elif Utils.checkReddit(self.url):
comments_link = Utils.checkRedditComments(self.links)
if comments_link:
commentsObj = RedditComments(comments_link)
commentsObj.getComments()
for comment in commentsObj.comments:
self.text += "\n\n" + comment
self.currentPageNumber = 1
terminal_width, terminal_height = os.get_terminal_size()
terminal_width -= 76
@ -57,7 +68,6 @@ class Article:
pass
else:
self.currentPageNumber += 1
print(self.currentPageNumber)
return self.chunks[self.currentPageNumber - 1]
def scrollUp(self):
@ -65,5 +75,4 @@ class Article:
pass
else:
self.currentPageNumber -= 1
print(self.currentPageNumber)
return self.chunks[self.currentPageNumber - 1]

@ -1,10 +1,26 @@
from datetime import datetime
import re
def timestampToDate(ts):
return datetime.fromtimestamp(int(ts)/1000000).strftime("%y-%m-%d %H:%M")
def checkStreamingVideo(link):
return re.search("^https://www.youtube.com", link) is not None or re.search("^https://player.odycdn.com", link)
def checkReddit(link):
return re.search("^https://www.reddit.com", link) is not None
def checkRedditComments(links):
for link in links:
if re.search("^https://www.reddit.com/[a-z1-9/]+/comments", link) is not None:
return link
return False
def writeLog(text):
with open("debug.log", "a") as f:
f.write(str(text))

Loading…
Cancel
Save