new data model: finish remaining work
This commit is contained in:
parent
274fe591a1
commit
fc12938ba2
8 changed files with 658 additions and 51 deletions
236
journal/exporters/mark.py
Normal file
236
journal/exporters/mark.py
Normal file
|
@ -0,0 +1,236 @@
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from django.conf import settings
|
||||
from openpyxl import Workbook
|
||||
from common.utils import GenerateDateUUIDMediaFilePath
|
||||
from datetime import datetime
|
||||
import os
|
||||
from journal.models import *
|
||||
|
||||
|
||||
def export_marks_task(user): # FIXME
|
||||
user.preference.export_status["marks_pending"] = True
|
||||
user.preference.save(update_fields=["export_status"])
|
||||
filename = GenerateDateUUIDMediaFilePath(
|
||||
None, "f.xlsx", settings.MEDIA_ROOT + settings.EXPORT_FILE_PATH_ROOT
|
||||
)
|
||||
if not os.path.exists(os.path.dirname(filename)):
|
||||
os.makedirs(os.path.dirname(filename))
|
||||
heading = ["标题", "简介", "豆瓣评分", "链接", "创建时间", "我的评分", "标签", "评论", "NeoDB链接", "其它ID"]
|
||||
wb = Workbook()
|
||||
# adding write_only=True will speed up but corrupt the xlsx and won't be importable
|
||||
for status, label in [
|
||||
(ShelfType.COMPLETE, "看过"),
|
||||
(ShelfType.PROGRESS, "在看"),
|
||||
(ShelfType.WISHLIST, "想看"),
|
||||
]:
|
||||
ws = wb.create_sheet(title=label)
|
||||
marks = user.shelf_manager.get_members(ItemCategory.Movie, status).order_by(
|
||||
"-edited_time"
|
||||
)
|
||||
ws.append(heading)
|
||||
for mm in marks:
|
||||
mark = mm.mark
|
||||
movie = mark.item
|
||||
title = movie.title
|
||||
summary = (
|
||||
str(movie.year)
|
||||
+ " / "
|
||||
+ ",".join(movie.area)
|
||||
+ " / "
|
||||
+ ",".join(map(lambda x: str(MovieGenreTranslator[x]), movie.genre))
|
||||
+ " / "
|
||||
+ ",".join(movie.director)
|
||||
+ " / "
|
||||
+ ",".join(movie.actor)
|
||||
)
|
||||
tags = ",".join(list(map(lambda m: m.content, mark.tags)))
|
||||
world_rating = (movie.rating / 2) if movie.rating else None
|
||||
timestamp = mark.edited_time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
my_rating = (mark.rating / 2) if mark.rating else None
|
||||
text = mark.text
|
||||
source_url = movie.source_url
|
||||
url = settings.APP_WEBSITE + movie.url
|
||||
line = [
|
||||
title,
|
||||
summary,
|
||||
world_rating,
|
||||
source_url,
|
||||
timestamp,
|
||||
my_rating,
|
||||
tags,
|
||||
text,
|
||||
url,
|
||||
movie.imdb_code,
|
||||
]
|
||||
ws.append(line)
|
||||
|
||||
for status, label in [
|
||||
(ShelfType.COMPLETE, "听过"),
|
||||
(ShelfType.PROGRESS, "在听"),
|
||||
(ShelfType.WISHLIST, "想听"),
|
||||
]:
|
||||
ws = wb.create_sheet(title=label)
|
||||
marks = AlbumMark.objects.filter(owner=user, status=status).order_by(
|
||||
"-edited_time"
|
||||
)
|
||||
ws.append(heading)
|
||||
for mark in marks:
|
||||
album = mark.album
|
||||
title = album.title
|
||||
summary = (
|
||||
",".join(album.artist)
|
||||
+ " / "
|
||||
+ (album.release_date.strftime("%Y") if album.release_date else "")
|
||||
)
|
||||
tags = ",".join(list(map(lambda m: m.content, mark.tags)))
|
||||
world_rating = (album.rating / 2) if album.rating else None
|
||||
timestamp = mark.edited_time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
my_rating = (mark.rating / 2) if mark.rating else None
|
||||
text = mark.text
|
||||
source_url = album.source_url
|
||||
url = settings.APP_WEBSITE + album.get_absolute_url()
|
||||
line = [
|
||||
title,
|
||||
summary,
|
||||
world_rating,
|
||||
source_url,
|
||||
timestamp,
|
||||
my_rating,
|
||||
tags,
|
||||
text,
|
||||
url,
|
||||
"",
|
||||
]
|
||||
ws.append(line)
|
||||
|
||||
for status, label in [
|
||||
(ShelfType.COMPLETE, "读过"),
|
||||
(ShelfType.PROGRESS, "在读"),
|
||||
(ShelfType.WISHLIST, "想读"),
|
||||
]:
|
||||
ws = wb.create_sheet(title=label)
|
||||
marks = BookMark.objects.filter(owner=user, status=status).order_by(
|
||||
"-edited_time"
|
||||
)
|
||||
ws.append(heading)
|
||||
for mark in marks:
|
||||
book = mark.book
|
||||
title = book.title
|
||||
summary = (
|
||||
",".join(book.author)
|
||||
+ " / "
|
||||
+ str(book.pub_year)
|
||||
+ " / "
|
||||
+ book.pub_house
|
||||
)
|
||||
tags = ",".join(list(map(lambda m: m.content, mark.tags)))
|
||||
world_rating = (book.rating / 2) if book.rating else None
|
||||
timestamp = mark.edited_time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
my_rating = (mark.rating / 2) if mark.rating else None
|
||||
text = mark.text
|
||||
source_url = book.source_url
|
||||
url = settings.APP_WEBSITE + book.get_absolute_url()
|
||||
line = [
|
||||
title,
|
||||
summary,
|
||||
world_rating,
|
||||
source_url,
|
||||
timestamp,
|
||||
my_rating,
|
||||
tags,
|
||||
text,
|
||||
url,
|
||||
book.isbn,
|
||||
]
|
||||
ws.append(line)
|
||||
|
||||
for status, label in [
|
||||
(ShelfType.COMPLETE, "玩过"),
|
||||
(ShelfType.PROGRESS, "在玩"),
|
||||
(ShelfType.WISHLIST, "想玩"),
|
||||
]:
|
||||
ws = wb.create_sheet(title=label)
|
||||
marks = GameMark.objects.filter(owner=user, status=status).order_by(
|
||||
"-edited_time"
|
||||
)
|
||||
ws.append(heading)
|
||||
for mark in marks:
|
||||
game = mark.game
|
||||
title = game.title
|
||||
summary = (
|
||||
",".join(game.genre)
|
||||
+ " / "
|
||||
+ ",".join(game.platform)
|
||||
+ " / "
|
||||
+ (game.release_date.strftime("%Y-%m-%d") if game.release_date else "")
|
||||
)
|
||||
tags = ",".join(list(map(lambda m: m.content, mark.tags)))
|
||||
world_rating = (game.rating / 2) if game.rating else None
|
||||
timestamp = mark.edited_time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
my_rating = (mark.rating / 2) if mark.rating else None
|
||||
text = mark.text
|
||||
source_url = game.source_url
|
||||
url = settings.APP_WEBSITE + game.get_absolute_url()
|
||||
line = [
|
||||
title,
|
||||
summary,
|
||||
world_rating,
|
||||
source_url,
|
||||
timestamp,
|
||||
my_rating,
|
||||
tags,
|
||||
text,
|
||||
url,
|
||||
"",
|
||||
]
|
||||
ws.append(line)
|
||||
|
||||
review_heading = [
|
||||
"标题",
|
||||
"评论对象",
|
||||
"链接",
|
||||
"创建时间",
|
||||
"我的评分",
|
||||
"类型",
|
||||
"内容",
|
||||
"评论对象原始链接",
|
||||
"评论对象NeoDB链接",
|
||||
]
|
||||
for category, label in [
|
||||
(ItemCategory.Movie, "影评"),
|
||||
(ItemCategory.Book, "书评"),
|
||||
(ItemCategory.Music, "乐评"),
|
||||
(ItemCategory.Game, "游戏评论"),
|
||||
]:
|
||||
ws = wb.create_sheet(title=label)
|
||||
reviews = Review.objects.filter(owner=user).order_by("-edited_time")
|
||||
ws.append(review_heading)
|
||||
for review in reviews:
|
||||
title = review.title
|
||||
target = "《" + review.item.title + "》"
|
||||
url = review.url
|
||||
timestamp = review.edited_time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
my_rating = None # (mark.rating / 2) if mark.rating else None
|
||||
content = review.content
|
||||
target_source_url = review.item.source_url
|
||||
target_url = review.item.absolute_url
|
||||
line = [
|
||||
title,
|
||||
target,
|
||||
url,
|
||||
timestamp,
|
||||
my_rating,
|
||||
label,
|
||||
content,
|
||||
target_source_url,
|
||||
target_url,
|
||||
]
|
||||
ws.append(line)
|
||||
|
||||
wb.save(filename=filename)
|
||||
user.preference.export_status["marks_pending"] = False
|
||||
user.preference.export_status["marks_file"] = filename
|
||||
user.preference.export_status["marks_date"] = datetime.now().strftime(
|
||||
"%Y-%m-%d %H:%M"
|
||||
)
|
||||
user.preference.save(update_fields=["export_status"])
|
|
@ -283,6 +283,8 @@ class DoubanImporter:
|
|||
print(f"fetching {url}")
|
||||
site.get_resource_ready()
|
||||
item = site.get_item()
|
||||
item.last_editor = user
|
||||
item.save()
|
||||
else:
|
||||
print(f"matched {url}")
|
||||
except Exception as e:
|
||||
|
|
228
journal/importers/goodreads.py
Normal file
228
journal/importers/goodreads.py
Normal file
|
@ -0,0 +1,228 @@
|
|||
import re
|
||||
from lxml import html
|
||||
from datetime import datetime
|
||||
from django.conf import settings
|
||||
from user_messages import api as msg
|
||||
import django_rq
|
||||
from django.utils.timezone import make_aware
|
||||
from catalog.common import *
|
||||
from catalog.models import *
|
||||
from journal.models import *
|
||||
from catalog.common.downloaders import *
|
||||
|
||||
|
||||
re_list = r"^https://www.goodreads.com/list/show/\d+"
|
||||
re_shelf = r"^https://www.goodreads.com/review/list/\d+[^?]*\?shelf=[^&]+"
|
||||
re_profile = r"^https://www.goodreads.com/user/show/(\d+)"
|
||||
gr_rating = {
|
||||
"did not like it": 2,
|
||||
"it was ok": 4,
|
||||
"liked it": 6,
|
||||
"really liked it": 8,
|
||||
"it was amazing": 10,
|
||||
}
|
||||
|
||||
|
||||
class GoodreadsImporter:
|
||||
@classmethod
|
||||
def import_from_url(cls, raw_url, user):
|
||||
match_list = re.match(re_list, raw_url)
|
||||
match_shelf = re.match(re_shelf, raw_url)
|
||||
match_profile = re.match(re_profile, raw_url)
|
||||
if match_profile or match_shelf or match_list:
|
||||
django_rq.get_queue("doufen").enqueue(
|
||||
cls.import_from_url_task, raw_url, user
|
||||
)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def import_from_url_task(cls, url, user):
|
||||
match_list = re.match(re_list, url)
|
||||
match_shelf = re.match(re_shelf, url)
|
||||
match_profile = re.match(re_profile, url)
|
||||
total = 0
|
||||
visibility = user.preference.default_visibility
|
||||
if match_list or match_shelf:
|
||||
shelf = (
|
||||
cls.parse_shelf(match_shelf[0], user)
|
||||
if match_shelf
|
||||
else cls.parse_list(match_list[0], user)
|
||||
)
|
||||
if shelf["title"] and shelf["books"]:
|
||||
collection = Collection.objects.create(
|
||||
title=shelf["title"],
|
||||
brief=shelf["description"]
|
||||
+ "\n\nImported from [Goodreads]("
|
||||
+ url
|
||||
+ ")",
|
||||
owner=user,
|
||||
)
|
||||
for book in shelf["books"]:
|
||||
collection.append_item(
|
||||
book["book"], metadata={"note": book["review"]}
|
||||
)
|
||||
total += 1
|
||||
collection.save()
|
||||
msg.success(user, f'成功从Goodreads导入包含{total}本书的收藏单{shelf["title"]}。')
|
||||
elif match_profile:
|
||||
uid = match_profile[1]
|
||||
shelves = {
|
||||
ShelfType.WISHLIST: f"https://www.goodreads.com/review/list/{uid}?shelf=to-read",
|
||||
ShelfType.PROGRESS: f"https://www.goodreads.com/review/list/{uid}?shelf=currently-reading",
|
||||
ShelfType.COMPLETE: f"https://www.goodreads.com/review/list/{uid}?shelf=read",
|
||||
}
|
||||
for shelf_type in shelves:
|
||||
shelf_url = shelves.get(shelf_type)
|
||||
shelf = cls.parse_shelf(shelf_url, user)
|
||||
for book in shelf["books"]:
|
||||
mark = Mark(user, book["book"])
|
||||
if (
|
||||
mark.shelf_type == shelf_type
|
||||
or mark.shelf_type == ShelfType.COMPLETE
|
||||
or (
|
||||
mark.shelf_type == ShelfType.PROGRESS
|
||||
and shelf_type == ShelfType.WISHLIST
|
||||
)
|
||||
):
|
||||
print(
|
||||
f'Skip {shelf_type}/{book["book"]} bc it was marked {mark.shelf_type}'
|
||||
)
|
||||
else:
|
||||
mark.update(
|
||||
shelf_type,
|
||||
book["review"],
|
||||
book["rating"],
|
||||
visibility=visibility,
|
||||
created_time=book["last_updated"] or timezone.now(),
|
||||
)
|
||||
total += 1
|
||||
msg.success(user, f"成功从Goodreads用户主页导入{total}个标记。")
|
||||
|
||||
@classmethod
|
||||
def get_book(cls, url, user):
|
||||
site = SiteManager.get_site_by_url(url)
|
||||
book = site.get_item()
|
||||
if not book:
|
||||
book = site.get_resource_ready().item
|
||||
book.last_editor = user
|
||||
book.save()
|
||||
return book
|
||||
|
||||
@classmethod
|
||||
def parse_shelf(cls, url, user):
|
||||
# return {'title': 'abc', books: [{'book': obj, 'rating': 10, 'review': 'txt'}, ...]}
|
||||
title = None
|
||||
books = []
|
||||
url_shelf = url + "&view=table"
|
||||
while url_shelf:
|
||||
print(f"Shelf loading {url_shelf}")
|
||||
try:
|
||||
content = BasicDownloader(url_shelf).download().html()
|
||||
title_elem = content.xpath("//span[@class='h1Shelf']/text()")
|
||||
if not title_elem:
|
||||
print(f"Shelf parsing error {url_shelf}")
|
||||
break
|
||||
title = title_elem[0].strip()
|
||||
print("Shelf title: " + title)
|
||||
except Exception:
|
||||
print(f"Shelf loading/parsing error {url_shelf}")
|
||||
break
|
||||
for cell in content.xpath("//tbody[@id='booksBody']/tr"):
|
||||
url_book = (
|
||||
"https://www.goodreads.com"
|
||||
+ cell.xpath(".//td[@class='field title']//a/@href")[0].strip()
|
||||
)
|
||||
# has_review = cell.xpath(
|
||||
# ".//td[@class='field actions']//a/text()")[0].strip() == 'view (with text)'
|
||||
rating_elem = cell.xpath(".//td[@class='field rating']//span/@title")
|
||||
rating = gr_rating.get(rating_elem[0].strip()) if rating_elem else None
|
||||
url_review = (
|
||||
"https://www.goodreads.com"
|
||||
+ cell.xpath(".//td[@class='field actions']//a/@href")[0].strip()
|
||||
)
|
||||
review = ""
|
||||
last_updated = None
|
||||
try:
|
||||
c2 = BasicDownloader(url_shelf).download().html()
|
||||
review_elem = c2.xpath("//div[@itemprop='reviewBody']/text()")
|
||||
review = (
|
||||
"\n".join(p.strip() for p in review_elem) if review_elem else ""
|
||||
)
|
||||
date_elem = c2.xpath("//div[@class='readingTimeline__text']/text()")
|
||||
for d in date_elem:
|
||||
date_matched = re.search(r"(\w+)\s+(\d+),\s+(\d+)", d)
|
||||
if date_matched:
|
||||
last_updated = make_aware(
|
||||
datetime.strptime(
|
||||
date_matched[1]
|
||||
+ " "
|
||||
+ date_matched[2]
|
||||
+ " "
|
||||
+ date_matched[3],
|
||||
"%B %d %Y",
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
print(f"Error loading/parsing review{url_review}, ignored")
|
||||
try:
|
||||
book = cls.get_book(url_book, user)
|
||||
books.append(
|
||||
{
|
||||
"url": url_book,
|
||||
"book": book,
|
||||
"rating": rating,
|
||||
"review": review,
|
||||
"last_updated": last_updated,
|
||||
}
|
||||
)
|
||||
except Exception:
|
||||
print("Error adding " + url_book)
|
||||
pass # likely just download error
|
||||
next_elem = content.xpath("//a[@class='next_page']/@href")
|
||||
url_shelf = (
|
||||
("https://www.goodreads.com" + next_elem[0].strip())
|
||||
if next_elem
|
||||
else None
|
||||
)
|
||||
return {"title": title, "description": "", "books": books}
|
||||
|
||||
@classmethod
|
||||
def parse_list(cls, url, user):
|
||||
# return {'title': 'abc', books: [{'book': obj, 'rating': 10, 'review': 'txt'}, ...]}
|
||||
title = None
|
||||
description = None
|
||||
books = []
|
||||
url_shelf = url
|
||||
while url_shelf:
|
||||
print(f"List loading {url_shelf}")
|
||||
content = BasicDownloader(url_shelf).download().html()
|
||||
title_elem = content.xpath('//h1[@class="gr-h1 gr-h1--serif"]/text()')
|
||||
if not title_elem:
|
||||
print(f"List parsing error {url_shelf}")
|
||||
break
|
||||
title = title_elem[0].strip()
|
||||
description = content.xpath('//div[@class="mediumText"]/text()')[0].strip()
|
||||
print("List title: " + title)
|
||||
for link in content.xpath('//a[@class="bookTitle"]/@href'):
|
||||
url_book = "https://www.goodreads.com" + link
|
||||
try:
|
||||
book = cls.get_book(url_book, user)
|
||||
books.append(
|
||||
{
|
||||
"url": url_book,
|
||||
"book": book,
|
||||
"review": "",
|
||||
}
|
||||
)
|
||||
except Exception:
|
||||
print("Error adding " + url_book)
|
||||
pass # likely just download error
|
||||
next_elem = content.xpath("//a[@class='next_page']/@href")
|
||||
url_shelf = (
|
||||
("https://www.goodreads.com" + next_elem[0].strip())
|
||||
if next_elem
|
||||
else None
|
||||
)
|
||||
return {"title": title, "description": description, "books": books}
|
|
@ -439,7 +439,6 @@ class ListMember(Piece):
|
|||
@cached_property
|
||||
def mark(self):
|
||||
m = Mark(self.owner, self.item)
|
||||
m.shelfmember = self
|
||||
return m
|
||||
|
||||
class Meta:
|
||||
|
@ -485,6 +484,12 @@ class ShelfMember(ListMember):
|
|||
"Shelf", related_name="members", on_delete=models.CASCADE
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def mark(self):
|
||||
m = Mark(self.owner, self.item)
|
||||
m.shelfmember = self
|
||||
return m
|
||||
|
||||
|
||||
class Shelf(List):
|
||||
class Meta:
|
||||
|
@ -915,3 +920,10 @@ class Mark:
|
|||
|
||||
def delete(self):
|
||||
self.update(None, None, None, 0)
|
||||
|
||||
|
||||
def reset_visibility_for_user(user: User, visibility: int):
|
||||
ShelfMember.objects.filter(owner=user).update(visibility=visibility)
|
||||
Comment.objects.filter(owner=user).update(visibility=visibility)
|
||||
Rating.objects.filter(owner=user).update(visibility=visibility)
|
||||
Review.objects.filter(owner=user).update(visibility=visibility)
|
||||
|
|
|
@ -155,6 +155,7 @@ class Command(BaseCommand):
|
|||
else:
|
||||
# TODO convert song to album
|
||||
print(f"{c.owner} {c.id} {c.title} {citem.item} were skipped")
|
||||
CollectionLink.objects.create(old_id=entity.id, new_uid=c.uid)
|
||||
qs = (
|
||||
Legacy_CollectionMark.objects.all()
|
||||
.filter(owner__is_active=True)
|
||||
|
@ -187,7 +188,7 @@ class Command(BaseCommand):
|
|||
try:
|
||||
item_link = LinkModel.objects.get(old_id=entity.item.id)
|
||||
item = Item.objects.get(uid=item_link.new_uid)
|
||||
Review.objects.create(
|
||||
review = Review.objects.create(
|
||||
owner=entity.owner,
|
||||
item=item,
|
||||
title=entity.title,
|
||||
|
@ -197,6 +198,9 @@ class Command(BaseCommand):
|
|||
created_time=entity.created_time,
|
||||
edited_time=entity.edited_time,
|
||||
)
|
||||
ReviewLink.objects.create(
|
||||
old_id=entity.id, new_uid=review.uid
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Convert failed for {typ} {entity.id}: {e}")
|
||||
if options["failstop"]:
|
||||
|
|
|
@ -1,26 +1,37 @@
|
|||
from os import link
|
||||
from django.db import models
|
||||
|
||||
|
||||
class BookLink(models.Model):
|
||||
old_id = models.IntegerField(db_index=True)
|
||||
old_id = models.IntegerField(unique=True)
|
||||
new_uid = models.UUIDField()
|
||||
|
||||
|
||||
class MovieLink(models.Model):
|
||||
old_id = models.IntegerField(db_index=True)
|
||||
old_id = models.IntegerField(unique=True)
|
||||
new_uid = models.UUIDField()
|
||||
|
||||
|
||||
class AlbumLink(models.Model):
|
||||
old_id = models.IntegerField(db_index=True)
|
||||
old_id = models.IntegerField(unique=True)
|
||||
new_uid = models.UUIDField()
|
||||
|
||||
|
||||
class SongLink(models.Model):
|
||||
old_id = models.IntegerField(db_index=True)
|
||||
old_id = models.IntegerField(unique=True)
|
||||
new_uid = models.UUIDField()
|
||||
|
||||
|
||||
class GameLink(models.Model):
|
||||
old_id = models.IntegerField(db_index=True)
|
||||
old_id = models.IntegerField(unique=True)
|
||||
new_uid = models.UUIDField()
|
||||
|
||||
|
||||
class CollectionLink(models.Model):
|
||||
old_id = models.IntegerField(unique=True)
|
||||
new_uid = models.UUIDField()
|
||||
|
||||
|
||||
class ReviewLink(models.Model):
|
||||
old_id = models.IntegerField(unique=True)
|
||||
new_uid = models.UUIDField()
|
||||
|
|
|
@ -40,12 +40,14 @@ from games.models import GameMark, GameReview
|
|||
from music.models import AlbumMark, SongMark, AlbumReview, SongReview
|
||||
from timeline.models import Activity
|
||||
from collection.models import Collection
|
||||
from common.importers.goodreads import GoodreadsImporter
|
||||
|
||||
if settings.ENABLE_NEW_MODEL:
|
||||
from journal.importers.douban import DoubanImporter
|
||||
from journal.importers.goodreads import GoodreadsImporter
|
||||
from journal.models import reset_visibility_for_user
|
||||
else:
|
||||
from common.importers.douban import DoubanImporter
|
||||
from common.importers.goodreads import GoodreadsImporter
|
||||
|
||||
|
||||
@mastodon_request_included
|
||||
|
@ -146,6 +148,9 @@ def reset_visibility(request):
|
|||
if request.method == "POST":
|
||||
visibility = int(request.POST.get("visibility"))
|
||||
visibility = visibility if visibility >= 0 and visibility <= 2 else 0
|
||||
if settings.ENABLE_NEW_MODEL:
|
||||
reset_visibility_for_user(request.user, visibility)
|
||||
else:
|
||||
BookMark.objects.filter(owner=request.user).update(visibility=visibility)
|
||||
MovieMark.objects.filter(owner=request.user).update(visibility=visibility)
|
||||
GameMark.objects.filter(owner=request.user).update(visibility=visibility)
|
||||
|
|
183
users/tasks.py
183
users/tasks.py
|
@ -43,87 +43,184 @@ def refresh_mastodon_data_task(user, token=None):
|
|||
|
||||
|
||||
def export_marks_task(user):
|
||||
user.preference.export_status['marks_pending'] = True
|
||||
user.preference.save(update_fields=['export_status'])
|
||||
filename = GenerateDateUUIDMediaFilePath(None, 'f.xlsx', settings.MEDIA_ROOT + settings.EXPORT_FILE_PATH_ROOT)
|
||||
user.preference.export_status["marks_pending"] = True
|
||||
user.preference.save(update_fields=["export_status"])
|
||||
filename = GenerateDateUUIDMediaFilePath(
|
||||
None, "f.xlsx", settings.MEDIA_ROOT + settings.EXPORT_FILE_PATH_ROOT
|
||||
)
|
||||
if not os.path.exists(os.path.dirname(filename)):
|
||||
os.makedirs(os.path.dirname(filename))
|
||||
heading = ['标题', '简介', '豆瓣评分', '链接', '创建时间', '我的评分', '标签', '评论', 'NeoDB链接', '其它ID']
|
||||
wb = Workbook() # adding write_only=True will speed up but corrupt the xlsx and won't be importable
|
||||
for status, label in [('collect', '看过'), ('do', '在看'), ('wish', '想看')]:
|
||||
heading = ["标题", "简介", "豆瓣评分", "链接", "创建时间", "我的评分", "标签", "评论", "NeoDB链接", "其它ID"]
|
||||
wb = (
|
||||
Workbook()
|
||||
) # adding write_only=True will speed up but corrupt the xlsx and won't be importable
|
||||
for status, label in [("collect", "看过"), ("do", "在看"), ("wish", "想看")]:
|
||||
ws = wb.create_sheet(title=label)
|
||||
marks = MovieMark.objects.filter(owner=user, status=status).order_by("-edited_time")
|
||||
marks = MovieMark.objects.filter(owner=user, status=status).order_by(
|
||||
"-edited_time"
|
||||
)
|
||||
ws.append(heading)
|
||||
for mark in marks:
|
||||
movie = mark.movie
|
||||
title = movie.title
|
||||
summary = str(movie.year) + ' / ' + ','.join(movie.area) + ' / ' + ','.join(map(lambda x: str(MovieGenreTranslator[x]), movie.genre)) + ' / ' + ','.join(movie.director) + ' / ' + ','.join(movie.actor)
|
||||
tags = ','.join(list(map(lambda m: m.content, mark.tags)))
|
||||
summary = (
|
||||
str(movie.year)
|
||||
+ " / "
|
||||
+ ",".join(movie.area)
|
||||
+ " / "
|
||||
+ ",".join(map(lambda x: str(MovieGenreTranslator[x]), movie.genre))
|
||||
+ " / "
|
||||
+ ",".join(movie.director)
|
||||
+ " / "
|
||||
+ ",".join(movie.actor)
|
||||
)
|
||||
tags = ",".join(list(map(lambda m: m.content, mark.tags)))
|
||||
world_rating = (movie.rating / 2) if movie.rating else None
|
||||
timestamp = mark.edited_time.strftime('%Y-%m-%d %H:%M:%S')
|
||||
timestamp = mark.edited_time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
my_rating = (mark.rating / 2) if mark.rating else None
|
||||
text = mark.text
|
||||
source_url = movie.source_url
|
||||
url = settings.APP_WEBSITE + movie.get_absolute_url()
|
||||
line = [title, summary, world_rating, source_url, timestamp, my_rating, tags, text, url, movie.imdb_code]
|
||||
line = [
|
||||
title,
|
||||
summary,
|
||||
world_rating,
|
||||
source_url,
|
||||
timestamp,
|
||||
my_rating,
|
||||
tags,
|
||||
text,
|
||||
url,
|
||||
movie.imdb_code,
|
||||
]
|
||||
ws.append(line)
|
||||
|
||||
for status, label in [('collect', '听过'), ('do', '在听'), ('wish', '想听')]:
|
||||
for status, label in [("collect", "听过"), ("do", "在听"), ("wish", "想听")]:
|
||||
ws = wb.create_sheet(title=label)
|
||||
marks = AlbumMark.objects.filter(owner=user, status=status).order_by("-edited_time")
|
||||
marks = AlbumMark.objects.filter(owner=user, status=status).order_by(
|
||||
"-edited_time"
|
||||
)
|
||||
ws.append(heading)
|
||||
for mark in marks:
|
||||
album = mark.album
|
||||
title = album.title
|
||||
summary = ','.join(album.artist) + ' / ' + (album.release_date.strftime('%Y') if album.release_date else '')
|
||||
tags = ','.join(list(map(lambda m: m.content, mark.tags)))
|
||||
summary = (
|
||||
",".join(album.artist)
|
||||
+ " / "
|
||||
+ (album.release_date.strftime("%Y") if album.release_date else "")
|
||||
)
|
||||
tags = ",".join(list(map(lambda m: m.content, mark.tags)))
|
||||
world_rating = (album.rating / 2) if album.rating else None
|
||||
timestamp = mark.edited_time.strftime('%Y-%m-%d %H:%M:%S')
|
||||
timestamp = mark.edited_time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
my_rating = (mark.rating / 2) if mark.rating else None
|
||||
text = mark.text
|
||||
source_url = album.source_url
|
||||
url = settings.APP_WEBSITE + album.get_absolute_url()
|
||||
line = [title, summary, world_rating, source_url, timestamp, my_rating, tags, text, url, '']
|
||||
line = [
|
||||
title,
|
||||
summary,
|
||||
world_rating,
|
||||
source_url,
|
||||
timestamp,
|
||||
my_rating,
|
||||
tags,
|
||||
text,
|
||||
url,
|
||||
"",
|
||||
]
|
||||
ws.append(line)
|
||||
|
||||
for status, label in [('collect', '读过'), ('do', '在读'), ('wish', '想读')]:
|
||||
for status, label in [("collect", "读过"), ("do", "在读"), ("wish", "想读")]:
|
||||
ws = wb.create_sheet(title=label)
|
||||
marks = BookMark.objects.filter(owner=user, status=status).order_by("-edited_time")
|
||||
marks = BookMark.objects.filter(owner=user, status=status).order_by(
|
||||
"-edited_time"
|
||||
)
|
||||
ws.append(heading)
|
||||
for mark in marks:
|
||||
book = mark.book
|
||||
title = book.title
|
||||
summary = ','.join(book.author) + ' / ' + str(book.pub_year) + ' / ' + book.pub_house
|
||||
tags = ','.join(list(map(lambda m: m.content, mark.tags)))
|
||||
summary = (
|
||||
",".join(book.author)
|
||||
+ " / "
|
||||
+ str(book.pub_year)
|
||||
+ " / "
|
||||
+ book.pub_house
|
||||
)
|
||||
tags = ",".join(list(map(lambda m: m.content, mark.tags)))
|
||||
world_rating = (book.rating / 2) if book.rating else None
|
||||
timestamp = mark.edited_time.strftime('%Y-%m-%d %H:%M:%S')
|
||||
timestamp = mark.edited_time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
my_rating = (mark.rating / 2) if mark.rating else None
|
||||
text = mark.text
|
||||
source_url = book.source_url
|
||||
url = settings.APP_WEBSITE + book.get_absolute_url()
|
||||
line = [title, summary, world_rating, source_url, timestamp, my_rating, tags, text, url, book.isbn]
|
||||
line = [
|
||||
title,
|
||||
summary,
|
||||
world_rating,
|
||||
source_url,
|
||||
timestamp,
|
||||
my_rating,
|
||||
tags,
|
||||
text,
|
||||
url,
|
||||
book.isbn,
|
||||
]
|
||||
ws.append(line)
|
||||
|
||||
for status, label in [('collect', '玩过'), ('do', '在玩'), ('wish', '想玩')]:
|
||||
for status, label in [("collect", "玩过"), ("do", "在玩"), ("wish", "想玩")]:
|
||||
ws = wb.create_sheet(title=label)
|
||||
marks = GameMark.objects.filter(owner=user, status=status).order_by("-edited_time")
|
||||
marks = GameMark.objects.filter(owner=user, status=status).order_by(
|
||||
"-edited_time"
|
||||
)
|
||||
ws.append(heading)
|
||||
for mark in marks:
|
||||
game = mark.game
|
||||
title = game.title
|
||||
summary = ','.join(game.genre) + ' / ' + ','.join(game.platform) + ' / ' + (game.release_date.strftime('%Y-%m-%d') if game.release_date else '')
|
||||
tags = ','.join(list(map(lambda m: m.content, mark.tags)))
|
||||
summary = (
|
||||
",".join(game.genre)
|
||||
+ " / "
|
||||
+ ",".join(game.platform)
|
||||
+ " / "
|
||||
+ (game.release_date.strftime("%Y-%m-%d") if game.release_date else "")
|
||||
)
|
||||
tags = ",".join(list(map(lambda m: m.content, mark.tags)))
|
||||
world_rating = (game.rating / 2) if game.rating else None
|
||||
timestamp = mark.edited_time.strftime('%Y-%m-%d %H:%M:%S')
|
||||
timestamp = mark.edited_time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
my_rating = (mark.rating / 2) if mark.rating else None
|
||||
text = mark.text
|
||||
source_url = game.source_url
|
||||
url = settings.APP_WEBSITE + game.get_absolute_url()
|
||||
line = [title, summary, world_rating, source_url, timestamp, my_rating, tags, text, url, '']
|
||||
line = [
|
||||
title,
|
||||
summary,
|
||||
world_rating,
|
||||
source_url,
|
||||
timestamp,
|
||||
my_rating,
|
||||
tags,
|
||||
text,
|
||||
url,
|
||||
"",
|
||||
]
|
||||
ws.append(line)
|
||||
|
||||
review_heading = ['标题', '评论对象', '链接', '创建时间', '我的评分', '类型', '内容', '评论对象原始链接', '评论对象NeoDB链接']
|
||||
for ReviewModel, label in [(MovieReview, '影评'), (BookReview, '书评'), (AlbumReview, '乐评'), (GameReview, '游戏评论')]:
|
||||
review_heading = [
|
||||
"标题",
|
||||
"评论对象",
|
||||
"链接",
|
||||
"创建时间",
|
||||
"我的评分",
|
||||
"类型",
|
||||
"内容",
|
||||
"评论对象原始链接",
|
||||
"评论对象NeoDB链接",
|
||||
]
|
||||
for ReviewModel, label in [
|
||||
(MovieReview, "影评"),
|
||||
(BookReview, "书评"),
|
||||
(AlbumReview, "乐评"),
|
||||
(GameReview, "游戏评论"),
|
||||
]:
|
||||
ws = wb.create_sheet(title=label)
|
||||
reviews = ReviewModel.objects.filter(owner=user).order_by("-edited_time")
|
||||
ws.append(review_heading)
|
||||
|
@ -131,16 +228,28 @@ def export_marks_task(user):
|
|||
title = review.title
|
||||
target = "《" + review.item.title + "》"
|
||||
url = review.url
|
||||
timestamp = review.edited_time.strftime('%Y-%m-%d %H:%M:%S')
|
||||
timestamp = review.edited_time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
my_rating = None # (mark.rating / 2) if mark.rating else None
|
||||
content = review.content
|
||||
target_source_url = review.item.source_url
|
||||
target_url = review.item.absolute_url
|
||||
line = [title, target, url, timestamp, my_rating, label, content, target_source_url, target_url]
|
||||
line = [
|
||||
title,
|
||||
target,
|
||||
url,
|
||||
timestamp,
|
||||
my_rating,
|
||||
label,
|
||||
content,
|
||||
target_source_url,
|
||||
target_url,
|
||||
]
|
||||
ws.append(line)
|
||||
|
||||
wb.save(filename=filename)
|
||||
user.preference.export_status['marks_pending'] = False
|
||||
user.preference.export_status['marks_file'] = filename
|
||||
user.preference.export_status['marks_date'] = datetime.now().strftime("%Y-%m-%d %H:%M")
|
||||
user.preference.save(update_fields=['export_status'])
|
||||
user.preference.export_status["marks_pending"] = False
|
||||
user.preference.export_status["marks_file"] = filename
|
||||
user.preference.export_status["marks_date"] = datetime.now().strftime(
|
||||
"%Y-%m-%d %H:%M"
|
||||
)
|
||||
user.preference.save(update_fields=["export_status"])
|
||||
|
|
Loading…
Add table
Reference in a new issue