2013-04-14 12:05:30 +00:00
|
|
|
#!/usr/bin/env python
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
#
|
|
|
|
# a at foo dot be - Alexandre Dulaunoy - http://www.foo.be/cgi-bin/wiki.pl/RssAny
|
|
|
|
#
|
|
|
|
# rssmerge.py is a simple script to gather rss feed and merge them in reverse
|
2024-02-11 10:17:49 +00:00
|
|
|
# time order. Useful to keep track of recent events.
|
|
|
|
#
|
|
|
|
# this is still an early prototype and assume that you have full control of the
|
2013-04-14 12:05:30 +00:00
|
|
|
# remote rss feeds (if not you may have some security issues).
|
2024-02-11 10:17:49 +00:00
|
|
|
#
|
2013-04-14 12:05:30 +00:00
|
|
|
# TODO : - rss 2.0 and atom output
|
|
|
|
# - full html output
|
|
|
|
#
|
2024-02-11 10:17:49 +00:00
|
|
|
# example of use :
|
|
|
|
# python3 rssmerge.py --output phtml --maxitem 20 "http://www.foo.be/cgi-bin/wiki.pl?action=journal&tile=AdulauMessyDesk"
|
2013-04-14 12:05:30 +00:00
|
|
|
# "http://api.flickr.com/services/feeds/photos_public.gne?id=31797858@N00&lang=en-us&format=atom" "http://a.6f2.net/cgi-bin/gitweb.cgi?
|
|
|
|
# p=adulau/.git;a=rss" "http://www.librarything.com/rss/reviews/adulau" > /tmp/test.inc
|
|
|
|
|
|
|
|
import feedparser
|
2024-02-11 10:17:49 +00:00
|
|
|
import sys, os
|
2013-04-14 12:05:30 +00:00
|
|
|
import time
|
|
|
|
import datetime
|
2024-02-11 10:17:49 +00:00
|
|
|
import hashlib
|
2013-04-14 12:05:30 +00:00
|
|
|
from optparse import OptionParser
|
2024-02-11 10:17:49 +00:00
|
|
|
import html
|
2024-02-11 10:54:31 +00:00
|
|
|
from bs4 import BeautifulSoup
|
2013-04-14 12:05:30 +00:00
|
|
|
|
|
|
|
feedparser.USER_AGENT = "rssmerge.py +http://www.foo.be/"
|
|
|
|
|
|
|
|
|
2024-02-11 10:17:49 +00:00
|
|
|
def RenderMerge(itemlist, output="text"):
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
|
|
|
|
if output == "text":
|
|
|
|
for item in itemlist:
|
|
|
|
i = i + 1
|
|
|
|
# Keep consistent datetime representation if not use allitem[item[1]]['updated']
|
|
|
|
timetuple = datetime.datetime.fromtimestamp(allitem[item[1]]["epoch"])
|
|
|
|
print(
|
|
|
|
str(i)
|
|
|
|
+ ":"
|
|
|
|
+ allitem[item[1]]["title"]
|
|
|
|
+ ":"
|
|
|
|
+ timetuple.ctime()
|
|
|
|
+ ":"
|
|
|
|
+ allitem[item[1]]["link"]
|
|
|
|
)
|
|
|
|
|
|
|
|
if i == int(options.maxitem):
|
|
|
|
break
|
|
|
|
|
|
|
|
if output == "phtml":
|
|
|
|
print("<ul>")
|
|
|
|
for item in itemlist:
|
|
|
|
i = i + 1
|
|
|
|
# Keep consistent datetime representation if not use allitem[item[1]]['updated']
|
|
|
|
timetuple = datetime.datetime.fromtimestamp(allitem[item[1]]["epoch"])
|
|
|
|
print(
|
|
|
|
'<li><a href="'
|
|
|
|
+ str(str(allitem[item[1]]["link"]))
|
|
|
|
+ '">'
|
|
|
|
+ str(str(html.escape(allitem[item[1]]["title"])))
|
|
|
|
+ "</a> --- (<i>"
|
|
|
|
+ timetuple.ctime()
|
|
|
|
+ "</i>)</li>"
|
|
|
|
)
|
|
|
|
if i == int(options.maxitem):
|
|
|
|
break
|
|
|
|
print("</ul>")
|
|
|
|
|
|
|
|
if output == "markdown":
|
|
|
|
for item in itemlist:
|
|
|
|
i = i + 1
|
|
|
|
timetuple = datetime.datetime.fromtimestamp(allitem[item[1]]["epoch"])
|
|
|
|
print(
|
|
|
|
"- ["
|
|
|
|
+ str(html.escape(allitem[item[1]]["title"]))
|
|
|
|
+ "]("
|
|
|
|
+ str(allitem[item[1]]["link"])
|
|
|
|
+ ")"
|
|
|
|
)
|
|
|
|
if i == int(options.maxitem):
|
|
|
|
break
|
2013-04-14 12:05:30 +00:00
|
|
|
|
|
|
|
|
|
|
|
usage = "usage: %prog [options] url"
|
|
|
|
parser = OptionParser(usage)
|
|
|
|
|
2024-02-11 10:17:49 +00:00
|
|
|
parser.add_option(
|
|
|
|
"-m",
|
|
|
|
"--maxitem",
|
|
|
|
dest="maxitem",
|
2024-02-11 10:54:31 +00:00
|
|
|
default=200,
|
2024-02-11 10:17:49 +00:00
|
|
|
help="maximum item to list in the feed, default 200",
|
|
|
|
)
|
2024-02-11 10:54:31 +00:00
|
|
|
parser.add_option(
|
|
|
|
"-s",
|
|
|
|
"--summarysize",
|
|
|
|
dest="summarysize",
|
|
|
|
default=60,
|
|
|
|
help="maximum size of the summary if a title is not present",
|
|
|
|
)
|
2024-02-11 10:17:49 +00:00
|
|
|
parser.add_option(
|
|
|
|
"-o",
|
|
|
|
"--output",
|
|
|
|
dest="output",
|
2024-02-11 10:54:31 +00:00
|
|
|
default="text",
|
2024-02-11 10:17:49 +00:00
|
|
|
help="output format (text, phtml, markdown), default text",
|
|
|
|
)
|
|
|
|
|
|
|
|
# 2007-11-10 11:25:51
|
|
|
|
pattern = "%Y-%m-%d %H:%M:%S"
|
2013-04-14 12:05:30 +00:00
|
|
|
|
|
|
|
(options, args) = parser.parse_args()
|
|
|
|
|
|
|
|
allitem = {}
|
|
|
|
|
|
|
|
for url in args:
|
|
|
|
|
2024-02-11 10:17:49 +00:00
|
|
|
# print url
|
|
|
|
|
|
|
|
d = feedparser.parse(url)
|
|
|
|
|
|
|
|
for el in d.entries:
|
|
|
|
if "modified_parsed" in el:
|
|
|
|
eldatetime = datetime.datetime.fromtimestamp(
|
|
|
|
time.mktime(el.modified_parsed)
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
eldatetime = datetime.datetime.fromtimestamp(
|
|
|
|
time.mktime(el.published_parsed)
|
|
|
|
)
|
|
|
|
elepoch = int(time.mktime(time.strptime(str(eldatetime), pattern)))
|
|
|
|
h = hashlib.md5()
|
|
|
|
h.update(el.link.encode("utf-8"))
|
|
|
|
linkkey = h.hexdigest()
|
|
|
|
allitem[linkkey] = {}
|
|
|
|
allitem[linkkey]["link"] = str(el.link)
|
|
|
|
allitem[linkkey]["epoch"] = int(elepoch)
|
|
|
|
allitem[linkkey]["updated"] = el.updated
|
2024-02-11 10:54:31 +00:00
|
|
|
if "title" in el:
|
|
|
|
allitem[linkkey]["title"] = html.unescape(el.title)
|
|
|
|
else:
|
|
|
|
cleantext = BeautifulSoup(el.summary, "lxml").text
|
|
|
|
allitem[linkkey]["title"] = cleantext[: options.summarysize]
|
|
|
|
|
|
|
|
print(allitem[linkkey]["title"])
|
2013-04-14 12:05:30 +00:00
|
|
|
|
|
|
|
|
|
|
|
itemlist = []
|
|
|
|
|
2024-02-11 10:17:49 +00:00
|
|
|
for something in list(allitem.keys()):
|
|
|
|
epochkeytuple = (allitem[something]["epoch"], something)
|
|
|
|
itemlist.append(epochkeytuple)
|
2013-04-14 12:05:30 +00:00
|
|
|
|
|
|
|
itemlist.sort()
|
|
|
|
itemlist.reverse()
|
|
|
|
|
2024-02-11 10:17:49 +00:00
|
|
|
RenderMerge(itemlist, options.output)
|