import, mark and search podcast

This commit is contained in:
Your Name 2023-01-29 20:05:30 -05:00 committed by Henri Dickson
parent 7eec7f50a4
commit 7b3873e649
26 changed files with 678 additions and 67 deletions

View file

@ -25,6 +25,7 @@ __all__ = (
"BasicDownloader",
"ProxiedDownloader",
"BasicImageDownloader",
"ProxiedImageDownloader",
"RESPONSE_OK",
"RESPONSE_NETWORK_ERROR",
"RESPONSE_INVALID_CONTENT",

View file

@ -70,7 +70,7 @@ class DownloadError(Exception):
class BasicDownloader:
headers = {
# 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:107.0) Gecko/20100101 Firefox/107.0',
# "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:107.0) Gecko/20100101 Firefox/107.0",
"User-Agent": "Mozilla/5.0 (iPad; CPU OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.2 Mobile/15E148 Safari/604.1",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
@ -129,7 +129,7 @@ class BasicDownloader:
def download(self):
resp, self.response_type = self._download(self.url)
if self.response_type == RESPONSE_OK:
if self.response_type == RESPONSE_OK and resp:
return resp
else:
raise DownloadError(self)

View file

@ -10,6 +10,7 @@ from django.contrib.contenttypes.models import ContentType
from django.utils.baseconv import base62
from simple_history.models import HistoricalRecords
import uuid
from typing import cast
from .utils import DEFAULT_ITEM_COVER, item_cover_path, resource_cover_path
from .mixins import SoftDeleteMixin
from django.conf import settings
@ -30,7 +31,8 @@ class SiteName(models.TextChoices):
IGDB = "igdb", _("IGDB")
Steam = "steam", _("Steam")
Bangumi = "bangumi", _("Bangumi")
ApplePodcast = "apple_podcast", _("苹果播客")
# ApplePodcast = "apple_podcast", _("苹果播客")
RSS = "rss", _("RSS")
class IdType(models.TextChoices):
@ -42,7 +44,7 @@ class IdType(models.TextChoices):
CUBN = "cubn", _("统一书号")
ISRC = "isrc", _("ISRC") # only for songs
GTIN = "gtin", _("GTIN UPC EAN码") # ISBN is separate
Feed = "feed", _("Feed URL")
RSS = "rss", _("RSS Feed URL")
IMDB = "imdb", _("IMDb")
TMDB_TV = "tmdb_tv", _("TMDB剧集")
TMDB_TVSeason = "tmdb_tvseason", _("TMDB剧集")
@ -104,10 +106,10 @@ class ItemCategory(models.TextChoices):
Collection = "collection", _("收藏单")
class SubItemType(models.TextChoices):
Season = "season", _("剧集分季")
Episode = "episode", _("剧集分集")
Version = "version", _("版本")
# class SubItemType(models.TextChoices):
# Season = "season", _("剧集分季")
# Episode = "episode", _("剧集分集")
# Version = "version", _("版本")
# class CreditType(models.TextChoices):
@ -244,7 +246,7 @@ class Item(SoftDeleteMixin, PolymorphicModel):
IdType.GTIN,
IdType.ISRC,
IdType.MusicBrainz,
IdType.Feed,
IdType.RSS,
IdType.IMDB,
]
for t in best_id_types:
@ -419,7 +421,7 @@ class ExternalResource(models.Model):
@property
def site_name(self):
return self.get_site().SITE_NAME
return getattr(self.get_site(), "SITE_NAME")
def update_content(self, resource_content):
self.other_lookup_ids = resource_content.lookup_ids
@ -451,7 +453,7 @@ class ExternalResource(models.Model):
app_label="catalog", model=model.lower()
).first()
if m:
return m.model_class()
return cast(Item, m).model_class()
else:
raise ValueError(f"preferred model {model} does not exist")
return None

View file

@ -89,6 +89,9 @@ class AbstractSite:
data = ResourceContent()
return data
def scrape_additional_data(self):
pass
@classmethod
def get_model_for_resource(cls, resource):
model = resource.get_preferred_model()
@ -183,6 +186,7 @@ class AbstractSite:
resource_content = ResourceContent(**preloaded_content)
else:
resource_content = self.scrape()
if resource_content:
p.update_content(resource_content)
if not p.ready:
_logger.error(f"unable to get resource {self.url} ready")
@ -194,6 +198,7 @@ class AbstractSite:
if p.item:
p.item.merge_data_from_external_resources(ignore_existing_content)
p.item.save()
self.scrape_additional_data()
if auto_link:
for linked_resource in p.required_resources:
linked_site = SiteManager.get_site_by_url(linked_resource["url"])

View file

@ -14,6 +14,11 @@ class Command(BaseCommand):
action="store_true",
help="save to database",
)
parser.add_argument(
"--force",
action="store_true",
help="force redownload",
)
def handle(self, *args, **options):
url = str(options["url"])
@ -23,7 +28,7 @@ class Command(BaseCommand):
return
self.stdout.write(f"Fetching from {site}")
if options["save"]:
resource = site.get_resource_ready()
resource = site.get_resource_ready(ignore_existing_content=options["force"])
pprint.pp(resource.metadata)
pprint.pp(site.get_item())
pprint.pp(site.get_item().metadata)

View file

@ -6,13 +6,72 @@ from django.utils.translation import gettext_lazy as _
class Podcast(Item):
category = ItemCategory.Podcast
url_path = "podcast"
demonstrative = _("这个播客")
feed_url = PrimaryLookupIdDescriptor(IdType.Feed)
apple_podcast = PrimaryLookupIdDescriptor(IdType.ApplePodcast)
demonstrative = _("这档播客")
# apple_podcast = PrimaryLookupIdDescriptor(IdType.ApplePodcast)
# ximalaya = LookupIdDescriptor(IdType.Ximalaya)
# xiaoyuzhou = LookupIdDescriptor(IdType.Xiaoyuzhou)
hosts = jsondata.ArrayField(models.CharField(), default=list)
genre = jsondata.ArrayField(
verbose_name=_("类型"),
base_field=models.CharField(blank=True, default="", max_length=200),
null=True,
blank=True,
default=list,
)
hosts = jsondata.ArrayField(
verbose_name=_("主播"),
base_field=models.CharField(blank=True, default="", max_length=200),
default=list,
)
official_site = jsondata.CharField(
verbose_name=_("官方网站"), max_length=1000, null=True, blank=True
)
METADATA_COPY_LIST = [
"title",
"brief",
"hosts",
"genre",
"official_site",
]
@property
def recent_episodes(self):
return self.episodes.all().order_by("-pub_date")[:10]
@property
def feed_url(self):
if (
self.primary_lookup_id_type != IdType.RSS
and self.primary_lookup_id_value is None
):
return None
return f"http://{self.primary_lookup_id_value}"
# class PodcastEpisode(Item):
# pass
class PodcastEpisode(Item):
category = ItemCategory.Podcast
url_path = "podcastepisode"
# uid = models.UUIDField(default=uuid.uuid4, editable=False, db_index=True)
program = models.ForeignKey(Podcast, models.CASCADE, related_name="episodes")
guid = models.CharField(null=True, max_length=1000)
pub_date = models.DateTimeField()
media_url = models.CharField(null=True, max_length=1000)
# title = models.CharField(default="", max_length=1000)
# description = models.TextField(null=True)
description_html = models.TextField(null=True)
link = models.CharField(null=True, max_length=1000)
cover_url = models.CharField(null=True, max_length=1000)
duration = models.PositiveIntegerField(null=True)
class Meta:
index_together = [
[
"program",
"pub_date",
]
]
unique_together = [["program", "guid"]]

View file

@ -3,31 +3,142 @@ from catalog.podcast.models import *
from catalog.common import *
class ApplePodcastTestCase(TestCase):
# class ApplePodcastTestCase(TestCase):
# def setUp(self):
# pass
# def test_parse(self):
# t_id = "657765158"
# t_url = "https://podcasts.apple.com/us/podcast/%E5%A4%A7%E5%86%85%E5%AF%86%E8%B0%88/id657765158"
# t_url2 = "https://podcasts.apple.com/us/podcast/id657765158"
# p1 = SiteManager.get_site_by_id_type(IdType.ApplePodcast)
# self.assertIsNotNone(p1)
# self.assertEqual(p1.validate_url(t_url), True)
# p2 = SiteManager.get_site_by_url(t_url)
# self.assertEqual(p1.id_to_url(t_id), t_url2)
# self.assertEqual(p2.url_to_id(t_url), t_id)
# @use_local_response
# def test_scrape(self):
# t_url = "https://podcasts.apple.com/gb/podcast/the-new-yorker-radio-hour/id1050430296"
# site = SiteManager.get_site_by_url(t_url)
# self.assertEqual(site.ready, False)
# self.assertEqual(site.id_value, "1050430296")
# site.get_resource_ready()
# self.assertEqual(site.resource.metadata["title"], "The New Yorker Radio Hour")
# # self.assertEqual(site.resource.metadata['feed_url'], 'http://feeds.wnyc.org/newyorkerradiohour')
# self.assertEqual(
# site.resource.metadata["feed_url"],
# "http://feeds.feedburner.com/newyorkerradiohour",
# )
class PodcastRSSFeedTestCase(TestCase):
def setUp(self):
pass
def test_parse(self):
t_id = "657765158"
t_url = "https://podcasts.apple.com/us/podcast/%E5%A4%A7%E5%86%85%E5%AF%86%E8%B0%88/id657765158"
t_url2 = "https://podcasts.apple.com/us/podcast/id657765158"
p1 = SiteManager.get_site_by_id_type(IdType.ApplePodcast)
self.assertIsNotNone(p1)
self.assertEqual(p1.validate_url(t_url), True)
p2 = SiteManager.get_site_by_url(t_url)
self.assertEqual(p1.id_to_url(t_id), t_url2)
self.assertEqual(p2.url_to_id(t_url), t_id)
t_id = "podcasts.files.bbci.co.uk/b006qykl.rss"
t_url = "https://podcasts.files.bbci.co.uk/b006qykl.rss"
site = SiteManager.get_site_by_url(t_url)
self.assertIsNotNone(site)
self.assertEqual(site.ID_TYPE, IdType.RSS)
self.assertEqual(site.id_value, t_id)
# @use_local_response
# def test_scrape_libsyn(self):
# t_url = "https://feeds.feedburner.com/TheLesserBonapartes"
# site = SiteManager.get_site_by_url(t_url)
# site.get_resource_ready()
# self.assertEqual(site.ready, True)
# metadata = site.resource.metadata
# self.assertIsNotNone(site.get_item().recent_episodes[0].title)
# self.assertIsNotNone(site.get_item().recent_episodes[0].link)
# self.assertIsNotNone(site.get_item().recent_episodes[0].media_url)
@use_local_response
def test_scrape(self):
t_url = "https://podcasts.apple.com/gb/podcast/the-new-yorker-radio-hour/id1050430296"
def test_scrape_anchor(self):
t_url = "https://anchor.fm/s/64d6bbe0/podcast/rss"
site = SiteManager.get_site_by_url(t_url)
self.assertEqual(site.ready, False)
self.assertEqual(site.id_value, "1050430296")
site.get_resource_ready()
self.assertEqual(site.resource.metadata["title"], "The New Yorker Radio Hour")
# self.assertEqual(site.resource.metadata['feed_url'], 'http://feeds.wnyc.org/newyorkerradiohour')
self.assertEqual(site.ready, True)
metadata = site.resource.metadata
self.assertIsNotNone(site.get_item().cover.url)
self.assertIsNotNone(site.get_item().recent_episodes[0].title)
self.assertIsNotNone(site.get_item().recent_episodes[0].link)
self.assertIsNotNone(site.get_item().recent_episodes[0].media_url)
@use_local_response
def test_scrape_digforfire(self):
t_url = "https://www.digforfire.net/digforfire_radio_feed.xml"
site = SiteManager.get_site_by_url(t_url)
site.get_resource_ready()
self.assertEqual(site.ready, True)
metadata = site.resource.metadata
self.assertIsNotNone(site.get_item().recent_episodes[0].title)
self.assertIsNotNone(site.get_item().recent_episodes[0].link)
self.assertIsNotNone(site.get_item().recent_episodes[0].media_url)
@use_local_response
def test_scrape_bbc(self):
t_url = "https://podcasts.files.bbci.co.uk/b006qykl.rss"
site = SiteManager.get_site_by_url(t_url)
site.get_resource_ready()
self.assertEqual(site.ready, True)
metadata = site.resource.metadata
self.assertEqual(metadata["title"], "In Our Time")
self.assertEqual(
site.resource.metadata["feed_url"],
"http://feeds.feedburner.com/newyorkerradiohour",
metadata["official_site"], "http://www.bbc.co.uk/programmes/b006qykl"
)
self.assertEqual(metadata["genre"], ["History"])
self.assertEqual(metadata["hosts"], ["BBC Radio 4"])
self.assertIsNotNone(site.get_item().recent_episodes[0].title)
self.assertIsNotNone(site.get_item().recent_episodes[0].link)
self.assertIsNotNone(site.get_item().recent_episodes[0].media_url)
@use_local_response
def test_scrape_rsshub(self):
t_url = "https://rsshub.app/ximalaya/album/51101122/0/shownote"
site = SiteManager.get_site_by_url(t_url)
site.get_resource_ready()
self.assertEqual(site.ready, True)
metadata = site.resource.metadata
self.assertEqual(metadata["title"], "梁文道 · 八分")
self.assertEqual(
metadata["official_site"], "https://www.ximalaya.com/qita/51101122/"
)
self.assertEqual(metadata["genre"], ["人文国学"])
self.assertEqual(metadata["hosts"], ["看理想vistopia"])
self.assertIsNotNone(site.get_item().recent_episodes[0].title)
self.assertIsNotNone(site.get_item().recent_episodes[0].link)
self.assertIsNotNone(site.get_item().recent_episodes[0].media_url)
@use_local_response
def test_scrape_typlog(self):
t_url = "https://tiaodao.typlog.io/feed.xml"
site = SiteManager.get_site_by_url(t_url)
site.get_resource_ready()
self.assertEqual(site.ready, True)
metadata = site.resource.metadata
self.assertEqual(metadata["title"], "跳岛FM")
self.assertEqual(metadata["official_site"], "https://tiaodao.typlog.io/")
self.assertEqual(metadata["genre"], ["Arts", "Books"])
self.assertEqual(metadata["hosts"], ["中信出版·大方"])
self.assertIsNotNone(site.get_item().recent_episodes[0].title)
self.assertIsNotNone(site.get_item().recent_episodes[0].link)
self.assertIsNotNone(site.get_item().recent_episodes[0].media_url)
# @use_local_response
# def test_scrape_lizhi(self):
# t_url = "http://rss.lizhi.fm/rss/14275.xml"
# site = SiteManager.get_site_by_url(t_url)
# self.assertIsNotNone(site)
# site.get_resource_ready()
# self.assertEqual(site.ready, True)
# metadata = site.resource.metadata
# self.assertEqual(metadata["title"], "大内密谈")
# self.assertEqual(metadata["genre"], ["other"])
# self.assertEqual(metadata["hosts"], ["大内密谈"])
# self.assertIsNotNone(site.get_item().recent_episodes[0].title)
# self.assertIsNotNone(site.get_item().recent_episodes[0].link)
# self.assertIsNotNone(site.get_item().recent_episodes[0].media_url)

View file

@ -240,6 +240,30 @@ class Bandcamp:
return results
class ApplePodcast:
@classmethod
def search(cls, q, page=1):
results = []
try:
search_url = f"https://itunes.apple.com/search?entity=podcast&limit={page*SEARCH_PAGE_SIZE}&term={quote_plus(q)}"
r = requests.get(search_url).json()
for p in r["results"][(page - 1) * SEARCH_PAGE_SIZE :]:
results.append(
SearchResultItem(
ItemCategory.Podcast,
SiteName.RSS,
p["feedUrl"],
p["trackName"],
p["artistName"],
"",
p["artworkUrl600"],
)
)
except Exception as e:
logger.error(f"ApplePodcast search '{q}' error: {e}")
return results
class ExternalSources:
@classmethod
def search(cls, c, q, page=1):
@ -248,7 +272,7 @@ class ExternalSources:
results = []
if c == "" or c is None:
c = "all"
if c == "all" or c == "movie":
if c == "all" or c == "movie" or c == "tv":
results.extend(TheMovieDatabase.search(q, page))
if c == "all" or c == "book":
results.extend(GoogleBooks.search(q, page))
@ -256,4 +280,6 @@ class ExternalSources:
if c == "all" or c == "music":
results.extend(Spotify.search(q, page))
results.extend(Bandcamp.search(q, page))
if c == "podcast":
results.extend(ApplePodcast.search(q, page))
return results

View file

@ -1,5 +1,7 @@
from ..common.sites import SiteManager
from .apple_podcast import ApplePodcast
# from .apple_podcast import ApplePodcast
from .rss import RSS
from .douban_book import DoubanBook
from .douban_movie import DoubanMovie
from .douban_music import DoubanMusic

View file

@ -1,21 +1,21 @@
from catalog.common import *
from catalog.models import *
import logging
from .rss import RSS
_logger = logging.getLogger(__name__)
@SiteManager.register
class ApplePodcast(AbstractSite):
SITE_NAME = SiteName.ApplePodcast
# SITE_NAME = SiteName.ApplePodcast
ID_TYPE = IdType.ApplePodcast
URL_PATTERNS = [r"https://[^.]+.apple.com/\w+/podcast/*[^/?]*/id(\d+)"]
WIKI_PROPERTY_ID = "P5842"
DEFAULT_MODEL = Podcast
@classmethod
def id_to_url(self, id_value):
def id_to_url(cls, id_value):
return "https://podcasts.apple.com/us/podcast/id" + id_value
def scrape(self):
@ -32,7 +32,7 @@ class ApplePodcast(AbstractSite):
"cover_image_url": r["artworkUrl600"],
}
)
pd.lookup_ids[IdType.Feed] = pd.metadata.get("feed_url")
pd.lookup_ids[IdType.RSS] = RSS.url_to_id(pd.metadata.get("feed_url"))
if pd.metadata["cover_image_url"]:
imgdl = BasicImageDownloader(pd.metadata["cover_image_url"], self.url)
try:

View file

@ -20,12 +20,12 @@ class Bandcamp(AbstractSite):
DEFAULT_MODEL = Album
@classmethod
def id_to_url(self, id_value):
def id_to_url(cls, id_value):
return f"https://{id_value}"
@classmethod
def validate_url_fallback(self, url):
if re.match(self.URL_PATTERN_FALLBACK, url) is None:
def validate_url_fallback(cls, url):
if re.match(cls.URL_PATTERN_FALLBACK, url) is None:
return False
parsed_url = urllib.parse.urlparse(url)
hostname = parsed_url.netloc

102
catalog/sites/rss.py Normal file
View file

@ -0,0 +1,102 @@
from catalog.common import *
from catalog.models import *
import logging
import podcastparser
import urllib.request
from django.core.cache import cache
from catalog.podcast.models import PodcastEpisode
from datetime import datetime
from django.utils.timezone import make_aware
_logger = logging.getLogger(__name__)
@SiteManager.register
class RSS(AbstractSite):
SITE_NAME = SiteName.RSS
ID_TYPE = IdType.RSS
DEFAULT_MODEL = Podcast
URL_PATTERNS = [r".+[./](rss|xml)"]
@staticmethod
def parse_feed_from_url(url):
if not url:
return None
feed = cache.get(url)
if feed:
return feed
req = urllib.request.Request(url)
req.add_header("User-Agent", "NeoDB/0.5")
try:
feed = podcastparser.parse(url, urllib.request.urlopen(req, timeout=3))
except:
return None
cache.set(url, feed, timeout=300)
return feed
@classmethod
def id_to_url(cls, id_value):
return f"https://{id_value}"
@classmethod
def url_to_id(cls, url: str):
return url.split("://")[1]
@classmethod
def validate_url_fallback(cls, url):
return cls.parse_feed_from_url(url) is not None
def scrape(self):
feed = self.parse_feed_from_url(self.url)
if not feed:
return None
pd = ResourceContent(
metadata={
"title": feed["title"],
"brief": feed["description"],
"hosts": [feed.get("itunes_author")]
if feed.get("itunes_author")
else [],
"official_site": feed.get("link"),
"cover_image_url": feed.get("cover_url"),
"genre": feed.get("itunes_categories", [None])[0],
}
)
pd.lookup_ids[IdType.RSS] = RSS.url_to_id(self.url)
if pd.metadata["cover_image_url"]:
imgdl = BasicImageDownloader(
pd.metadata["cover_image_url"], feed.get("link") or self.url
)
try:
pd.cover_image = imgdl.download().content
pd.cover_image_extention = imgdl.extention
except Exception:
_logger.warn(
f'failed to download cover for {self.url} from {pd.metadata["cover_image_url"]}'
)
return pd
def scrape_additional_data(self):
item = self.get_item()
feed = self.parse_feed_from_url(self.url)
if not feed:
return
for episode in feed["episodes"]:
PodcastEpisode.objects.get_or_create(
program=item,
guid=episode.get("guid"),
defaults={
"title": episode["title"],
"brief": episode.get("description"),
"description_html": episode.get("description_html"),
"cover_url": episode.get("episode_art_url"),
"media_url": episode.get("enclosures")[0].get("url")
if episode.get("enclosures")
else None,
"pub_date": make_aware(
datetime.fromtimestamp(episode.get("published"))
),
"duration": episode.get("duration"),
"link": episode.get("link"),
},
)

View file

@ -41,6 +41,12 @@ function catalog_init(context) {
$(".spoiler", context).on('click', function(){
$(this).toggleClass('revealed');
})
// podcast
$('.source-label__rss', context).parent().on('click', (e)=>{
e.preventDefault();
})
$('.source-label__rss', context).parent().attr('title', 'Copy link here and subscribe in your podcast app');
}
$(function() {

View file

@ -47,7 +47,7 @@
[DELETED]
{% endif %}
{% if item.merged_to_item %}
[MERGED] <a href="{{ item.merged_to_item.url }}">{{ item.merged_to_item.title }}</a>
[MERGED <a href="{{ item.merged_to_item.url }}">{{ item.merged_to_item.title }}</a>]
{% endif %}
{% block title %}
<h5 class="entity-detail__title">
@ -309,5 +309,7 @@
</div>
{% include "partial/_footer.html" %}
</div>
<div class="player">
</div>
</body>
</html>

View file

@ -10,10 +10,161 @@
{% load strip_scheme %}
{% load thumb %}
{% block opengraph %}
<script src=" https://cdn.jsdelivr.net/npm/shikwasa@2.2.0/dist/shikwasa.min.js "></script>
<link href=" https://cdn.jsdelivr.net/npm/shikwasa@2.2.0/dist/style.min.css " rel="stylesheet"></link>
{% endblock %}
<!-- class specific details -->
{% block details %}
<style type="text/css">
.entity-detail .entity-detail__img {
height: unset !important;
}
</style>
<div class="entity-detail__fields">
<div class="entity-detail__rating">
{% if item.rating and item.rating_count >= 5 %}
<span class="entity-detail__rating-star rating-star" data-rating-score="{{ item.rating | floatformat:0 }}"></span>
<span class="entity-detail__rating-score"> {{ item.rating | floatformat:1 }} </span>
<small>({{ item.rating_count }}人评分)</small>
{% else %}
<span> {% trans '评分:评分人数不足' %}</span>
{% endif %}
</div>
<div>
{% if item.genre %}{% trans '类型:' %}
{% for genre in item.genre %}
<span>{{ genre }}</span>{% if not forloop.last %} / {% endif %}
{% endfor %}
{% endif %}
</div>
<div>{% if item.official_site %}
{% trans '网站:' %}{{ item.official_site|urlizetrunc:42 }}
{% endif %}
</div>
</div>
<div class="entity-detail__fields">
<div>{% if item.hosts %}{% trans '主播:' %}
{% for host in item.hosts %}
<span {% if forloop.counter > 5 %}style="display: none;" {% endif %}>
<span class="other_title">{{ host }}</span>
{% if not forloop.last %} / {% endif %}
</span>
{% endfor %}
{% if item.hosts|length > 5 %}
<a href="javascript:void(0);" id="otherTitleMore">{% trans '更多' %}</a>
<script>
$("#otherTitleMore").on('click', function (e) {
$("span.other_title:not(:visible)").each(function (e) {
$(this).parent().removeAttr('style');
});
$(this).remove();
})
</script>
{% endif %}
{% endif %}
</div>
{% if item.last_editor and item.last_editor.preference.show_last_edit %}
<div>{% trans '最近编辑者:' %}<a href="{% url 'journal:user_profile' item.last_editor.mastodon_username %}">{{ item.last_editor | default:"" }}</a></div>
{% endif %}
<div>
{% if user.is_authenticated %}
<a href="{% url 'catalog:edit' item.url_path item.uuid %}">{% trans '编辑' %}{{ item.demonstrative }}</a>
{% endif %}
</div>
</div>
{% endblock %}
<!-- class specific sidebar -->
{% block sidebar %}
<div class="aside-section-wrapper">
<div class="action-panel" id="episodes">
<div class="action-panel__label">{% trans '最近更新' %}</div>
<div >
{% for ep in item.recent_episodes %}
<p>
<a data-media="{{ ep.media_url }}" data-cover="{{ ep.cover_url|default:item.cover }}" class="episode" href="{{ep.link}}">{{ ep.title }}</a>
</p>
{% endfor %}
</div>
<div class="action-panel__button-group action-panel__button-group--center">
<button href="#" class="podlove-subscribe-button-primary action-panel__button">{% trans '订阅' %}</button>
</div>
</div>
</div>
<script>
window.podcastData = {
"title": "{{ item.title | escapejs }}",
"subtitle": "",
"description": "{{ item.brief | escapejs }}",
"cover": "{{ item.cover.url | escapejs }}",
"feeds": [
{
"type": "audio",
"format": "mp3",
"url": "{{ item.feed_url | escapejs }}",
"variant": "high"
}
]
}
</script>
<script class="podlove-subscribe-button" src="https://cdn.podlove.org/subscribe-button/javascripts/app.js" data-json-data="podcastData" data-buttonid="primary" data-language="en" data-hide="true" data-color="#1190C0">
</script>
<script type="text/javascript">
$(()=>{
$('.episode').on('click', e=>{
e.preventDefault();
ele = e.target;
album = "{{ item.title|escapejs }}"
artists = "{{ item.hosts|join:' / '|escapejs }}"
title = $(ele).text();
cover_url = $(ele).data('cover');
media_url = $(ele).data('media');
if (!media_url) return;
if (!window.player) {
window.player = new Shikwasa.Player({
container: () => document.querySelector('.player'),
preload: 'metadata',
autoplay: true,
themeColor: '#1190C0',
fixed: {
type: 'fixed',
position: 'bottom'
},
audio: {
title: title,
cover: cover_url,
src: media_url,
album: album,
artist: artists
}
});
$('.shk-title').on('click', e=>{
window.location = "#episodes";
});
} else {
window.player.update({
title: title,
cover: cover_url,
src: media_url,
album: album,
artist: artists
})
}
window.player.play()
});
$('.footer').attr('style', 'margin-bottom: 120px !important');
});
</script>
{% endblock %}

View file

@ -88,6 +88,10 @@ $bandcamp-color-secondary: white
color: white
background-color: #4285F4
border-color: #4285F4
&.source-label__rss
color: white
background-color: #E1A02F
border-color: #E1A02F
&.source-label__bandcamp
color: $bandcamp-color-secondary
background-color: $bandcamp-color-primary

View file

@ -12,7 +12,7 @@
<div class="navbar__search-box">
<!-- <input type="search" class="" name="q" id="searchInput" required="true" value="{% for v in request.GET.values %}{{ v }}{% endfor %}" -->
<input type="search" class="" name="q" id="searchInput" required="true" value="{% if request.GET.q %}{{ request.GET.q }}{% endif %}"
placeholder="搜索书影音游戏,或输入站外条目链接如 https://movie.douban.com/subject/1297880/ 支持站点列表见页底公告栏">
placeholder="搜索书影音游戏播客,或输入站外条目链接如 https://movie.douban.com/subject/1297880/ 支持站点列表见页底公告栏">
<select class="navbar__search-dropdown" id="searchCategory" name="c">
<option value="all" {% if request.GET.c and request.GET.c == 'all' or not request.GET.c %}selected{% endif %}>{% trans '任意' %}</option>
<option value="book" {% if request.GET.c and request.GET.c == 'book' or '/books/' in request.path %}selected{% endif %}>{% trans '书籍' %}</option>
@ -20,6 +20,8 @@
<option value="tv" {% if request.GET.c and request.GET.c == 'tv' or '/tv/' in request.path %}selected{% endif %}>{% trans '剧集' %}</option>
<option value="music" {% if request.GET.c and request.GET.c == 'music' or '/music/' in request.path %}selected{% endif %}>{% trans '音乐' %}</option>
<option value="game" {% if request.GET.c and request.GET.c == 'game' or '/games/' in request.path %}selected{% endif %}>{% trans '游戏' %}</option>
<option value="podcast" {% if request.GET.c and request.GET.c == 'podcast' or '/podcast/' in request.path %}selected{% endif %}>{% trans '播客' %}</option>
</select>
</div>
<button class="navbar__dropdown-btn">• • •</button>

65
journal/importers/opml.py Normal file
View file

@ -0,0 +1,65 @@
from django.core.files import uploadedfile
import listparser
from catalog.sites.rss import RSS
import openpyxl
import re
from markdownify import markdownify as md
from datetime import datetime
import logging
import pytz
from django.conf import settings
from user_messages import api as msg
import django_rq
from common.utils import GenerateDateUUIDMediaFilePath
import os
from catalog.common import *
from catalog.common.downloaders import *
from journal.models import *
class OPMLImporter:
def __init__(self, user, visibility, mode):
self.user = user
self.visibility = visibility
self.mode = mode
def parse_file(self, uploaded_file):
return listparser.parse(uploaded_file.read()).feeds
def import_from_file(self, uploaded_file):
feeds = self.parse_file(uploaded_file)
if not feeds:
return False
django_rq.get_queue("import").enqueue(self.import_from_file_task, feeds)
return True
def import_from_file_task(self, feeds):
print(f"{self.user} import opml start")
skip = 0
if self.mode == 1:
collection = Collection.objects.create(
owner=self.user, title=f"{self.user.username}的播客订阅列表"
)
for feed in feeds:
print(f"{self.user} import {feed.url}")
res = RSS(feed.url).get_resource_ready()
if not res:
print(f"{self.user} feed error {feed.url}")
continue
item = res.item
if self.mode == 0:
mark = Mark(self.user, item)
if mark.shelfmember:
print(f"{self.user} marked, skip {feed.url}")
skip += 1
else:
mark.update(
ShelfType.PROGRESS, None, None, visibility=self.visibility
)
elif self.mode == 1:
collection.append_item(item)
print(f"{self.user} import opml end")
msg.success(
self.user,
f"OPML导入完成共处理{len(feeds)}篇,已存在{skip}篇。",
)

View file

@ -491,6 +491,9 @@ ShelfTypeNames = [
[ItemCategory.Game, ShelfType.WISHLIST, _("想玩")],
[ItemCategory.Game, ShelfType.PROGRESS, _("在玩")],
[ItemCategory.Game, ShelfType.COMPLETE, _("玩过")],
[ItemCategory.Podcast, ShelfType.WISHLIST, _("想听")],
[ItemCategory.Podcast, ShelfType.PROGRESS, _("在听")],
[ItemCategory.Podcast, ShelfType.COMPLETE, _("听过")],
]

View file

@ -0,0 +1,16 @@
{% extends "list_item_base.html" %}
{% load i18n %}
{% load highlight %}
{% block info %}
{% if item.genre %}{% trans '类型' %}:
{{ item.genre|join:" / "}}
{% endif %}
{% if item.genre %}{% trans '主播' %}:
{{ item.hosts|join:" / "}}
{% endif %}
{% endblock %}

View file

@ -712,6 +712,7 @@ def profile(request, user_name):
ItemCategory.Movie,
ItemCategory.TV,
ItemCategory.Music,
# ItemCategory.Podcast,
ItemCategory.Game,
]
for category in visbile_categories:

View file

@ -10,8 +10,15 @@ while true; do
esac
done
echo "Generating static files..."
python3 manage.py sass common/static/sass/boofilsic.sass common/static/css/boofilsic.min.css -t compressed || exit $?
python3 manage.py sass common/static/sass/boofilsic.sass common/static/css/boofilsic.css || exit $?
python3 manage.py collectstatic --noinput || exit $?
echo "Migrating database..."
python3 manage.py migrate || exit $?
echo "Checking..."
python3 manage.py check
echo "Done. You may reload app, worker and cron"

View file

@ -30,3 +30,5 @@ dnspython
typesense
markdownify
igdb-api-v4
podcastparser
listparser

View file

@ -26,6 +26,7 @@ from django.utils import timezone
import json
from django.contrib import messages
from journal.importers.opml import OPMLImporter
from journal.importers.douban import DoubanImporter
from journal.importers.goodreads import GoodreadsImporter
from journal.models import reset_visibility_for_user
@ -157,3 +158,18 @@ def import_douban(request):
else:
messages.add_message(request, messages.ERROR, _("无法识别文件。"))
return redirect(reverse("users:data"))
@login_required
def import_opml(request):
if request.method == "POST":
importer = OPMLImporter(
request.user,
int(request.POST.get("visibility", 0)),
int(request.POST.get("import_mode", 0)),
)
if importer.import_from_file(request.FILES["file"]):
messages.add_message(request, messages.INFO, _("文件上传成功,等待后台导入。"))
else:
messages.add_message(request, messages.ERROR, _("无法识别文件。"))
return redirect(reverse("users:data"))

View file

@ -25,15 +25,6 @@
<section id="content">
<div class="grid grid--reverse-order">
<div class="grid__main grid__main--reverse-order">
{% if messages %}
<div class="main-section-wrapper">
<ul class="messages">
{% for message in messages %}
<li{% if message.tags %} class="{{ message.tags }}"{% endif %}>{{ message }}</li>
{% endfor %}
</ul>
</div>
{% endif %}
<div class="main-section-wrapper">
<div class="tools-section-wrapper">
@ -112,6 +103,37 @@
</div>
</div>
<div class="main-section-wrapper">
<div class="tools-section-wrapper">
<div class="import-panel">
<h5 class="import-panel__label">{% trans '导入播客订阅列表' %}</h5>
<div class="import-panel__body">
<form action="{% url 'users:import_opml' %}" method="POST" enctype="multipart/form-data" >
{% csrf_token %}
<div class="import-panel__checkbox">
导入方式:
<label for="opml_import_mode_0">
<input id="opml_import_mode_0" type="radio" name="import_mode" value="0" checked> 标记为在听的播客
</label>
<label for="opml_import_mode_1">
<input id="opml_import_mode_1" type="radio" name="import_mode" value="1"> 导入为新收藏单
</label>
<br>
选择OPML文件
<input type="file" name="file" id="excel" required accept=".opml,.xml">
<input type="submit" class="import-panel__button" value="{% trans '导入' %}" id="uploadBtn" />
</div>
<div>
</div>
</form>
</div>
</div>
</div>
</div>
<div class="main-section-wrapper">
<div class="tools-section-wrapper">
<div class="import-panel">

View file

@ -8,11 +8,12 @@ urlpatterns = [
path("connect/", connect, name="connect"),
path("reconnect/", reconnect, name="reconnect"),
path("data/", data, name="data"),
path("data/import_status", data_import_status, name="import_status"),
path("data/import_goodreads", import_goodreads, name="import_goodreads"),
path("data/import_douban", import_douban, name="import_douban"),
path("data/export_reviews", export_reviews, name="export_reviews"),
path("data/export_marks", export_marks, name="export_marks"),
path("data/import/status", data_import_status, name="import_status"),
path("data/import/goodreads", import_goodreads, name="import_goodreads"),
path("data/import/douban", import_douban, name="import_douban"),
path("data/import/opml", import_opml, name="import_opml"),
path("data/export/reviews", export_reviews, name="export_reviews"),
path("data/export/marks", export_marks, name="export_marks"),
path("data/sync_mastodon", sync_mastodon, name="sync_mastodon"),
path("data/reset_visibility", reset_visibility, name="reset_visibility"),
path("data/clear_data", clear_data, name="clear_data"),