audit log improvement

This commit is contained in:
Your Name 2023-06-19 16:37:35 -04:00 committed by Henri Dickson
parent 44164de246
commit 6a6348c2e8
7 changed files with 109 additions and 103 deletions

View file

@ -377,13 +377,7 @@ class Item(SoftDeleteMixin, PolymorphicModel):
with connection.cursor() as cursor:
cursor.execute(f"DELETE FROM {tbl} WHERE item_ptr_id = %s", [self.pk])
obj = model.objects.get(pk=obj.pk)
LogEntry.objects.log_create(
obj,
action=LogEntry.Action.UPDATE,
changes={
"!recast": [[old_ct.model, old_ct.id], [ct.model, ct.id]],
},
)
obj.log_action({"!recast": [old_ct.model, ct.model]})
return obj
@property
@ -536,13 +530,7 @@ class ExternalResource(models.Model):
return f"{self.pk}:{self.id_type}:{self.id_value or ''} ({self.url})"
def unlink_from_item(self):
LogEntry.objects.log_create(
self.item,
action=LogEntry.Action.UPDATE,
changes={
"__unlink__": [str(self), None],
},
)
self.item.log_action({"!unlink": [str(self), None]})
self.item = None
self.save()

View file

@ -138,6 +138,9 @@ class AbstractSite:
obj["primary_lookup_id_value"] = v
resource.item = model.objects.create(**obj)
if previous_item != resource.item:
if previous_item:
previous_item.log_action({"unmatch": [str(resource), ""]})
resource.item.log_action({"!match": ["", str(resource)]})
resource.save(update_fields=["item"])
return resource.item
@ -149,11 +152,7 @@ class AbstractSite:
if not p.ready:
# raise ValueError(f'resource not ready for {self.url}')
return None
last_item = p.item
item = self.match_or_create_item_for_resource(p)
if last_item != p.item:
p.save()
return item
return self.match_or_create_item_for_resource(p)
@property
def ready(self):

View file

@ -71,7 +71,20 @@ def init_catalog_audit_log():
for cls in Item.__subclasses__():
auditlog.register(
cls,
exclude_fields=["metadata", "created_time", "edited_time", "last_editor"],
exclude_fields=[
"id",
"item_ptr",
"polymorphic_ctype",
"metadata",
"created_time",
"edited_time",
"last_editor",
# related fields are not supported in django-auditlog yet
"lookup_ids",
"external_resources",
"merged_from_items",
"focused_comments",
],
)
auditlog.register(

View file

@ -8,6 +8,7 @@ from rq.job import Job
from django.core.cache import cache
import hashlib
from .typesense import Indexer as TypeSenseIndexer
from auditlog.context import set_actor
# from .meilisearch import Indexer as MeiliSearchIndexer
@ -85,7 +86,7 @@ def query_index(keywords, category=None, tag=None, page=1, prepare_external=True
return items, result.num_pages, result.count, duplicated_items
def enqueue_fetch(url, is_refetch):
def enqueue_fetch(url, is_refetch, user=None):
job_id = "fetch_" + hashlib.md5(url.encode()).hexdigest()
in_progress = False
try:
@ -95,24 +96,25 @@ def enqueue_fetch(url, is_refetch):
in_progress = False
if not in_progress:
django_rq.get_queue("fetch").enqueue(
_fetch_task, url, is_refetch, job_id=job_id
_fetch_task, url, is_refetch, user, job_id=job_id
)
return job_id
def _fetch_task(url, is_refetch):
def _fetch_task(url, is_refetch, user):
item_url = "-"
try:
site = SiteManager.get_site_by_url(url)
if not site:
return None
site.get_resource_ready(ignore_existing_content=is_refetch)
item = site.get_item()
if item:
_logger.info(f"fetched {url} {item.url} {item}")
item_url = item.url
else:
_logger.error(f"fetch {url} failed")
except Exception as e:
_logger.error(f"fetch {url} error {e}")
return item_url
with set_actor(user):
try:
site = SiteManager.get_site_by_url(url)
if not site:
return None
site.get_resource_ready(ignore_existing_content=is_refetch)
item = site.get_item()
if item:
_logger.info(f"fetched {url} {item.url} {item}")
item_url = item.url
else:
_logger.error(f"fetch {url} failed")
except Exception as e:
_logger.error(f"fetch {url} error {e}")
return item_url

View file

@ -68,7 +68,7 @@ def fetch(request, url, is_refetch: bool = False, site: AbstractSite | None = No
"!refetch": [url, None],
}
)
job_id = enqueue_fetch(url, is_refetch)
job_id = enqueue_fetch(url, is_refetch, request.user)
return render(
request,
"fetch_pending.html",

View file

@ -9,6 +9,7 @@ from user_messages import api as msg
import django_rq
from common.utils import GenerateDateUUIDMediaFilePath
import os
from auditlog.context import set_actor
from catalog.common import *
from catalog.common.downloaders import *
from catalog.sites.douban import DoubanDownloader
@ -188,13 +189,14 @@ class DoubanImporter:
print(f"{self.user} import start")
msg.info(self.user, f"开始导入豆瓣标记和评论")
self.update_user_import_status(1)
self.load_sheets()
print(f"{self.user} sheet loaded, {self.total} lines total")
self.update_user_import_status(1)
for name, param in self.mark_sheet_config.items():
self.import_mark_sheet(self.mark_data[name], param[0], name)
for name, param in self.review_sheet_config.items():
self.import_review_sheet(self.review_data[name], name)
with set_actor(self.user):
self.load_sheets()
print(f"{self.user} sheet loaded, {self.total} lines total")
self.update_user_import_status(1)
for name, param in self.mark_sheet_config.items():
self.import_mark_sheet(self.mark_data[name], param[0], name)
for name, param in self.review_sheet_config.items():
self.import_review_sheet(self.review_data[name], name)
self.update_user_import_status(0)
msg.success(
self.user,

View file

@ -3,6 +3,7 @@ from datetime import datetime
from user_messages import api as msg
import django_rq
from django.utils.timezone import make_aware
from auditlog.context import set_actor
from catalog.common import *
from catalog.models import *
from journal.models import *
@ -42,65 +43,66 @@ class GoodreadsImporter:
match_profile = re.match(re_profile, url)
total = 0
visibility = user.preference.default_visibility
if match_list or match_shelf:
shelf = (
cls.parse_shelf(match_shelf[0], user)
if match_shelf
else cls.parse_list(match_list[0], user)
)
if shelf["title"] and shelf["books"]:
collection = Collection.objects.create(
title=shelf["title"],
brief=shelf["description"]
+ "\n\nImported from [Goodreads]("
+ url
+ ")",
owner=user,
with set_actor(user):
if match_list or match_shelf:
shelf = (
cls.parse_shelf(match_shelf[0], user)
if match_shelf
else cls.parse_list(match_list[0], user)
)
for book in shelf["books"]:
collection.append_item(book["book"], note=book["review"])
total += 1
collection.save()
msg.success(user, f'成功从Goodreads导入包含{total}本书的收藏单{shelf["title"]}')
elif match_profile:
uid = match_profile[1]
shelves = {
ShelfType.WISHLIST: f"https://www.goodreads.com/review/list/{uid}?shelf=to-read",
ShelfType.PROGRESS: f"https://www.goodreads.com/review/list/{uid}?shelf=currently-reading",
ShelfType.COMPLETE: f"https://www.goodreads.com/review/list/{uid}?shelf=read",
}
for shelf_type in shelves:
shelf_url = shelves.get(shelf_type)
shelf = cls.parse_shelf(shelf_url, user)
for book in shelf["books"]:
mark = Mark(user, book["book"])
if (
(
mark.shelf_type == shelf_type
and mark.comment_text == book["review"]
)
or (
mark.shelf_type == ShelfType.COMPLETE
and shelf_type != ShelfType.COMPLETE
)
or (
mark.shelf_type == ShelfType.PROGRESS
and shelf_type == ShelfType.WISHLIST
)
):
print(
f'Skip {shelf_type}/{book["book"]} bc it was marked {mark.shelf_type}'
)
else:
mark.update(
shelf_type,
book["review"],
book["rating"],
visibility=visibility,
created_time=book["last_updated"] or timezone.now(),
)
total += 1
msg.success(user, f"成功从Goodreads用户主页导入{total}个标记。")
if shelf["title"] and shelf["books"]:
collection = Collection.objects.create(
title=shelf["title"],
brief=shelf["description"]
+ "\n\nImported from [Goodreads]("
+ url
+ ")",
owner=user,
)
for book in shelf["books"]:
collection.append_item(book["book"], note=book["review"])
total += 1
collection.save()
msg.success(user, f'成功从Goodreads导入包含{total}本书的收藏单{shelf["title"]}')
elif match_profile:
uid = match_profile[1]
shelves = {
ShelfType.WISHLIST: f"https://www.goodreads.com/review/list/{uid}?shelf=to-read",
ShelfType.PROGRESS: f"https://www.goodreads.com/review/list/{uid}?shelf=currently-reading",
ShelfType.COMPLETE: f"https://www.goodreads.com/review/list/{uid}?shelf=read",
}
for shelf_type in shelves:
shelf_url = shelves.get(shelf_type)
shelf = cls.parse_shelf(shelf_url, user)
for book in shelf["books"]:
mark = Mark(user, book["book"])
if (
(
mark.shelf_type == shelf_type
and mark.comment_text == book["review"]
)
or (
mark.shelf_type == ShelfType.COMPLETE
and shelf_type != ShelfType.COMPLETE
)
or (
mark.shelf_type == ShelfType.PROGRESS
and shelf_type == ShelfType.WISHLIST
)
):
print(
f'Skip {shelf_type}/{book["book"]} bc it was marked {mark.shelf_type}'
)
else:
mark.update(
shelf_type,
book["review"],
book["rating"],
visibility=visibility,
created_time=book["last_updated"] or timezone.now(),
)
total += 1
msg.success(user, f"成功从Goodreads用户主页导入{total}个标记。")
@classmethod
def get_book(cls, url, user):