2020-10-04 16:16:50 +02:00
|
|
|
import operator
|
2020-11-23 23:18:14 +01:00
|
|
|
import logging
|
2020-10-04 16:16:50 +02:00
|
|
|
from difflib import SequenceMatcher
|
2020-11-23 23:18:14 +01:00
|
|
|
from urllib.parse import urlparse
|
|
|
|
from django.shortcuts import render, redirect, reverse
|
2020-05-01 22:46:15 +08:00
|
|
|
from django.contrib.auth.decorators import login_required
|
2020-11-23 23:18:14 +01:00
|
|
|
from django.utils.translation import gettext_lazy as _
|
|
|
|
from django.core.paginator import Paginator
|
|
|
|
from django.core.validators import URLValidator
|
|
|
|
from django.core.exceptions import ValidationError, ObjectDoesNotExist
|
|
|
|
from django.db.models import Q, Count
|
|
|
|
from django.http import HttpResponseBadRequest
|
2020-05-01 22:46:15 +08:00
|
|
|
from books.models import Book
|
2020-10-04 16:16:50 +02:00
|
|
|
from movies.models import Movie
|
2021-02-15 21:27:50 +01:00
|
|
|
from music.models import Album, Song, AlbumMark, SongMark
|
2020-11-23 23:18:14 +01:00
|
|
|
from users.models import Report, User
|
|
|
|
from mastodon.decorators import mastodon_request_included
|
2020-05-01 22:46:15 +08:00
|
|
|
from common.models import MarkStatusEnum
|
2020-07-03 15:36:23 +08:00
|
|
|
from common.utils import PageLinksGenerator
|
2020-11-23 23:18:14 +01:00
|
|
|
from common.scraper import scraper_registry
|
2020-12-09 13:47:00 +01:00
|
|
|
from management.models import Announcement
|
2020-05-01 22:46:15 +08:00
|
|
|
|
|
|
|
|
|
|
|
# how many books have in each set at the home page
|
|
|
|
BOOKS_PER_SET = 5
|
|
|
|
|
2020-10-03 23:27:41 +02:00
|
|
|
# how many movies have in each set at the home page
|
|
|
|
MOVIES_PER_SET = 5
|
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
MUSIC_PER_SET = 5
|
|
|
|
|
2020-05-01 22:46:15 +08:00
|
|
|
# how many items are showed in one search result page
|
|
|
|
ITEMS_PER_PAGE = 20
|
|
|
|
|
2020-07-03 15:36:23 +08:00
|
|
|
# how many pages links in the pagination
|
|
|
|
PAGE_LINK_NUMBER = 7
|
|
|
|
|
2020-07-10 21:28:09 +08:00
|
|
|
# max tags on list page
|
|
|
|
TAG_NUMBER_ON_LIST = 5
|
|
|
|
|
2020-11-23 23:18:14 +01:00
|
|
|
logger = logging.getLogger(__name__)
|
2020-05-01 22:46:15 +08:00
|
|
|
|
|
|
|
@login_required
|
|
|
|
def home(request):
|
|
|
|
if request.method == 'GET':
|
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
# really shitty code here
|
|
|
|
|
2020-12-09 13:47:00 +01:00
|
|
|
unread_announcements = Announcement.objects.filter(
|
|
|
|
pk__gt=request.user.read_announcement_index).order_by('-pk')
|
2020-12-09 13:59:59 +01:00
|
|
|
try:
|
|
|
|
request.user.read_announcement_index = Announcement.objects.latest('pk').pk
|
|
|
|
request.user.save(update_fields=['read_announcement_index'])
|
|
|
|
except ObjectDoesNotExist as e:
|
|
|
|
# when there is no annoucenment
|
|
|
|
pass
|
2020-12-09 13:47:00 +01:00
|
|
|
|
2020-07-23 13:36:00 +08:00
|
|
|
do_book_marks = request.user.user_bookmarks.filter(
|
|
|
|
status=MarkStatusEnum.DO).order_by("-edited_time")
|
2020-05-05 23:50:48 +08:00
|
|
|
do_books_more = True if do_book_marks.count() > BOOKS_PER_SET else False
|
|
|
|
|
2020-07-23 13:36:00 +08:00
|
|
|
wish_book_marks = request.user.user_bookmarks.filter(
|
|
|
|
status=MarkStatusEnum.WISH).order_by("-edited_time")
|
2020-05-05 23:50:48 +08:00
|
|
|
wish_books_more = True if wish_book_marks.count() > BOOKS_PER_SET else False
|
|
|
|
|
2020-07-23 13:36:00 +08:00
|
|
|
collect_book_marks = request.user.user_bookmarks.filter(
|
|
|
|
status=MarkStatusEnum.COLLECT).order_by("-edited_time")
|
2020-05-05 23:50:48 +08:00
|
|
|
collect_books_more = True if collect_book_marks.count() > BOOKS_PER_SET else False
|
2020-05-01 22:46:15 +08:00
|
|
|
|
2020-10-03 23:27:41 +02:00
|
|
|
|
|
|
|
do_movie_marks = request.user.user_moviemarks.filter(
|
|
|
|
status=MarkStatusEnum.DO).order_by("-edited_time")
|
|
|
|
do_movies_more = True if do_movie_marks.count() > MOVIES_PER_SET else False
|
|
|
|
|
|
|
|
wish_movie_marks = request.user.user_moviemarks.filter(
|
|
|
|
status=MarkStatusEnum.WISH).order_by("-edited_time")
|
|
|
|
wish_movies_more = True if wish_movie_marks.count() > MOVIES_PER_SET else False
|
|
|
|
|
|
|
|
collect_movie_marks = request.user.user_moviemarks.filter(
|
|
|
|
status=MarkStatusEnum.COLLECT).order_by("-edited_time")
|
|
|
|
collect_movies_more = True if collect_movie_marks.count() > MOVIES_PER_SET else False
|
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
do_music_marks = list(request.user.user_songmarks.filter(status=MarkStatusEnum.DO)[:MUSIC_PER_SET]) \
|
|
|
|
+ list(request.user.user_albummarks.filter(status=MarkStatusEnum.DO)[:MUSIC_PER_SET])
|
|
|
|
do_music_more = True if len(do_music_marks) > MUSIC_PER_SET else False
|
|
|
|
do_music_marks = sorted(do_music_marks, key=lambda e: e.edited_time, reverse=True)[:MUSIC_PER_SET]
|
|
|
|
|
|
|
|
wish_music_marks = list(request.user.user_songmarks.filter(status=MarkStatusEnum.WISH)[:MUSIC_PER_SET]) \
|
|
|
|
+ list(request.user.user_albummarks.filter(status=MarkStatusEnum.WISH)[:MUSIC_PER_SET])
|
|
|
|
wish_music_more = True if len(wish_music_marks) > MUSIC_PER_SET else False
|
|
|
|
wish_music_marks = sorted(wish_music_marks, key=lambda e: e.edited_time, reverse=True)[:MUSIC_PER_SET]
|
|
|
|
|
|
|
|
collect_music_marks = list(request.user.user_songmarks.filter(status=MarkStatusEnum.COLLECT)[:MUSIC_PER_SET]) \
|
|
|
|
+ list(request.user.user_albummarks.filter(status=MarkStatusEnum.COLLECT)[:MUSIC_PER_SET])
|
|
|
|
collect_music_more = True if len(collect_music_marks) > MUSIC_PER_SET else False
|
|
|
|
collect_music_marks = sorted(collect_music_marks, key=lambda e: e.edited_time, reverse=True)[:MUSIC_PER_SET]
|
|
|
|
|
|
|
|
for mark in do_music_marks + wish_music_marks + collect_music_marks:
|
|
|
|
# for template convenience
|
|
|
|
if mark.__class__ == AlbumMark:
|
|
|
|
mark.type = "album"
|
|
|
|
else:
|
|
|
|
mark.type = "song"
|
|
|
|
|
2020-05-01 22:46:15 +08:00
|
|
|
reports = Report.objects.order_by('-submitted_time').filter(is_read=False)
|
|
|
|
# reports = Report.objects.latest('submitted_time').filter(is_read=False)
|
|
|
|
|
|
|
|
return render(
|
|
|
|
request,
|
|
|
|
'common/home.html',
|
|
|
|
{
|
2020-05-05 23:50:48 +08:00
|
|
|
'do_book_marks': do_book_marks[:BOOKS_PER_SET],
|
|
|
|
'wish_book_marks': wish_book_marks[:BOOKS_PER_SET],
|
|
|
|
'collect_book_marks': collect_book_marks[:BOOKS_PER_SET],
|
2020-05-01 22:46:15 +08:00
|
|
|
'do_books_more': do_books_more,
|
|
|
|
'wish_books_more': wish_books_more,
|
|
|
|
'collect_books_more': collect_books_more,
|
2020-10-03 23:27:41 +02:00
|
|
|
'do_movie_marks': do_movie_marks[:MOVIES_PER_SET],
|
|
|
|
'wish_movie_marks': wish_movie_marks[:MOVIES_PER_SET],
|
|
|
|
'collect_movie_marks': collect_movie_marks[:MOVIES_PER_SET],
|
|
|
|
'do_movies_more': do_movies_more,
|
|
|
|
'wish_movies_more': wish_movies_more,
|
|
|
|
'collect_movies_more': collect_movies_more,
|
2021-02-15 21:27:50 +01:00
|
|
|
'do_music_marks': do_music_marks,
|
|
|
|
'wish_music_marks': wish_music_marks,
|
|
|
|
'collect_music_marks': collect_music_marks,
|
|
|
|
'do_music_more': do_music_more,
|
|
|
|
'wish_music_more': wish_music_more,
|
|
|
|
'collect_music_more': collect_music_more,
|
2020-05-01 22:46:15 +08:00
|
|
|
'reports': reports,
|
2020-12-09 13:47:00 +01:00
|
|
|
'unread_announcements': unread_announcements,
|
2020-05-01 22:46:15 +08:00
|
|
|
}
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
return HttpResponseBadRequest()
|
|
|
|
|
|
|
|
|
2020-05-05 23:50:48 +08:00
|
|
|
@login_required
|
2020-05-01 22:46:15 +08:00
|
|
|
def search(request):
|
|
|
|
if request.method == 'GET':
|
|
|
|
|
2020-11-23 23:18:14 +01:00
|
|
|
# test if input serach string is empty or not excluding param ?c=
|
2020-10-15 11:52:59 +02:00
|
|
|
empty_querystring_criteria = {k: v for k, v in request.GET.items() if k != 'c'}
|
|
|
|
if not len(empty_querystring_criteria):
|
2020-10-04 19:37:20 +02:00
|
|
|
return HttpResponseBadRequest()
|
|
|
|
|
2020-11-23 23:18:14 +01:00
|
|
|
# test if user input an URL, if so jump to URL handling function
|
|
|
|
url_validator = URLValidator()
|
2020-11-28 01:50:38 +01:00
|
|
|
input_string = request.GET.get('q', default='').strip()
|
2020-11-23 23:18:14 +01:00
|
|
|
try:
|
|
|
|
url_validator(input_string)
|
|
|
|
# validation success
|
|
|
|
return jump_or_scrape(request, input_string)
|
|
|
|
except ValidationError as e:
|
|
|
|
pass
|
|
|
|
|
2021-02-12 19:23:23 +01:00
|
|
|
# category, book/movie/music etc
|
2020-10-04 16:16:50 +02:00
|
|
|
category = request.GET.get("c", default='').strip().lower()
|
2021-02-15 21:27:50 +01:00
|
|
|
# keywords, seperated by blank space
|
|
|
|
keywords = request.GET.get("q", default='').strip().split()
|
|
|
|
# tag, when tag is provided there should be no keywords , for now
|
|
|
|
tag = request.GET.get("tag", default='')
|
2020-10-04 16:16:50 +02:00
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
# white space string, empty query
|
|
|
|
if not (keywords or tag):
|
|
|
|
return []
|
2021-02-12 19:23:23 +01:00
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
def book_param_handler(**kwargs):
|
2020-10-04 16:16:50 +02:00
|
|
|
# keywords
|
2021-02-15 21:27:50 +01:00
|
|
|
keywords = kwargs.get('keywords')
|
2021-02-12 19:23:23 +01:00
|
|
|
# tag
|
2021-02-15 21:27:50 +01:00
|
|
|
tag = kwargs.get('tag')
|
2021-02-12 19:23:23 +01:00
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
query_args = []
|
|
|
|
q = Q()
|
2020-10-04 16:16:50 +02:00
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
for keyword in keywords:
|
2020-10-04 16:16:50 +02:00
|
|
|
q = q | Q(title__icontains=keyword)
|
|
|
|
q = q | Q(subtitle__icontains=keyword)
|
|
|
|
q = q | Q(orig_title__icontains=keyword)
|
|
|
|
if tag:
|
|
|
|
q = q & Q(book_tags__content__iexact=tag)
|
|
|
|
|
|
|
|
query_args.append(q)
|
|
|
|
queryset = Book.objects.filter(*query_args).distinct()
|
|
|
|
|
|
|
|
def calculate_similarity(book):
|
2020-10-15 11:52:59 +02:00
|
|
|
if keywords:
|
|
|
|
# search by keywords
|
|
|
|
similarity, n = 0, 0
|
|
|
|
for keyword in keywords:
|
|
|
|
similarity += 1/2 * SequenceMatcher(None, keyword, book.title).quick_ratio()
|
|
|
|
+ 1/3 * SequenceMatcher(None, keyword, book.orig_title).quick_ratio()
|
|
|
|
+ 1/6 * SequenceMatcher(None, keyword, book.subtitle).quick_ratio()
|
|
|
|
n += 1
|
|
|
|
book.similarity = similarity / n
|
|
|
|
|
|
|
|
elif tag:
|
|
|
|
# search by single tag
|
|
|
|
book.similarity = 0 if book.rating_number is None else book.rating_number
|
2021-02-12 19:23:23 +01:00
|
|
|
else:
|
|
|
|
book.similarity = 0
|
2020-10-04 16:16:50 +02:00
|
|
|
return book.similarity
|
2020-10-15 11:08:29 +02:00
|
|
|
if len(queryset) > 0:
|
|
|
|
ordered_queryset = sorted(queryset, key=calculate_similarity, reverse=True)
|
|
|
|
else:
|
2020-10-15 11:52:59 +02:00
|
|
|
ordered_queryset = list(queryset)
|
2020-10-04 16:16:50 +02:00
|
|
|
return ordered_queryset
|
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
def movie_param_handler(**kwargs):
|
2020-10-04 16:16:50 +02:00
|
|
|
# keywords
|
2021-02-15 21:27:50 +01:00
|
|
|
keywords = kwargs.get('keywords')
|
2021-02-12 19:23:23 +01:00
|
|
|
# tag
|
2021-02-15 21:27:50 +01:00
|
|
|
tag = kwargs.get('tag')
|
2021-02-12 19:23:23 +01:00
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
query_args = []
|
|
|
|
q = Q()
|
2020-10-04 16:16:50 +02:00
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
for keyword in keywords:
|
2020-10-04 16:16:50 +02:00
|
|
|
q = q | Q(title__icontains=keyword)
|
|
|
|
q = q | Q(other_title__icontains=keyword)
|
|
|
|
q = q | Q(orig_title__icontains=keyword)
|
|
|
|
if tag:
|
|
|
|
q = q & Q(movie_tags__content__iexact=tag)
|
|
|
|
|
|
|
|
query_args.append(q)
|
|
|
|
queryset = Movie.objects.filter(*query_args).distinct()
|
|
|
|
|
|
|
|
def calculate_similarity(movie):
|
2020-10-15 11:52:59 +02:00
|
|
|
if keywords:
|
|
|
|
# search by name
|
|
|
|
similarity, n = 0, 0
|
|
|
|
for keyword in keywords:
|
|
|
|
similarity += 1/2 * SequenceMatcher(None, keyword, movie.title).quick_ratio()
|
|
|
|
+ 1/4 * SequenceMatcher(None, keyword, movie.orig_title).quick_ratio()
|
|
|
|
+ 1/4 * SequenceMatcher(None, keyword, movie.other_title).quick_ratio()
|
|
|
|
n += 1
|
|
|
|
movie.similarity = similarity / n
|
|
|
|
elif tag:
|
|
|
|
# search by single tag
|
|
|
|
movie.similarity = 0 if movie.rating_number is None else movie.rating_number
|
2021-02-12 19:23:23 +01:00
|
|
|
else:
|
|
|
|
movie.similarity = 0
|
2020-10-04 16:16:50 +02:00
|
|
|
return movie.similarity
|
2020-10-15 11:08:29 +02:00
|
|
|
if len(queryset) > 0:
|
|
|
|
ordered_queryset = sorted(queryset, key=calculate_similarity, reverse=True)
|
|
|
|
else:
|
2020-10-15 11:52:59 +02:00
|
|
|
ordered_queryset = list(queryset)
|
2020-10-04 16:16:50 +02:00
|
|
|
return ordered_queryset
|
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
def music_param_handler(**kwargs):
|
2021-02-12 19:23:23 +01:00
|
|
|
# keywords
|
2021-02-15 21:27:50 +01:00
|
|
|
keywords = kwargs.get('keywords')
|
2021-02-12 19:23:23 +01:00
|
|
|
# tag
|
2021-02-15 21:27:50 +01:00
|
|
|
tag = kwargs.get('tag')
|
2021-02-12 19:23:23 +01:00
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
query_args = []
|
|
|
|
q = Q()
|
2021-02-12 19:23:23 +01:00
|
|
|
|
|
|
|
# search albums
|
2021-02-15 21:27:50 +01:00
|
|
|
for keyword in keywords:
|
2021-02-12 19:23:23 +01:00
|
|
|
q = q | Q(title__icontains=keyword)
|
2021-02-15 21:27:50 +01:00
|
|
|
q = q | Q(artist__icontains=keyword)
|
2021-02-12 19:23:23 +01:00
|
|
|
if tag:
|
|
|
|
q = q & Q(album_tags__content__iexact=tag)
|
|
|
|
|
|
|
|
query_args.append(q)
|
2021-02-15 21:27:50 +01:00
|
|
|
album_queryset = Album.objects.filter(*query_args).distinct()
|
|
|
|
|
|
|
|
# extra query args for songs
|
|
|
|
q = Q()
|
|
|
|
for keyword in keywords:
|
|
|
|
q = q | Q(album__title__icontains=keyword)
|
|
|
|
q = q | Q(title__icontains=keyword)
|
|
|
|
q = q | Q(artist__icontains=keyword)
|
|
|
|
if tag:
|
|
|
|
q = q & Q(song_tags__content__iexact=tag)
|
|
|
|
query_args.clear()
|
|
|
|
query_args.append(q)
|
|
|
|
song_queryset = Song.objects.filter(*query_args).distinct()
|
|
|
|
queryset = list(album_queryset) + list(song_queryset)
|
2021-02-12 19:23:23 +01:00
|
|
|
|
|
|
|
def calculate_similarity(music):
|
|
|
|
if keywords:
|
|
|
|
# search by name
|
|
|
|
similarity, n = 0, 0
|
2021-02-15 21:27:50 +01:00
|
|
|
artist_dump = ' '.join(music.artist)
|
2021-02-12 19:23:23 +01:00
|
|
|
for keyword in keywords:
|
2021-02-15 21:27:50 +01:00
|
|
|
if music.__class__ == Album:
|
|
|
|
similarity += 1/2 * SequenceMatcher(None, keyword, music.title).quick_ratio() \
|
|
|
|
+ 1/2 * SequenceMatcher(None, keyword, artist_dump).quick_ratio()
|
|
|
|
elif music.__class__ == Song:
|
|
|
|
similarity += 1/2 * SequenceMatcher(None, keyword, music.title).quick_ratio() \
|
|
|
|
+ 1/6 * SequenceMatcher(None, keyword, artist_dump).quick_ratio() \
|
|
|
|
+ 1/6 * SequenceMatcher(None, keyword, music.album.title).quick_ratio()
|
2021-02-12 19:23:23 +01:00
|
|
|
n += 1
|
|
|
|
music.similarity = similarity / n
|
|
|
|
elif tag:
|
|
|
|
# search by single tag
|
|
|
|
music.similarity = 0 if music.rating_number is None else music.rating_number
|
|
|
|
else:
|
|
|
|
music.similarity = 0
|
|
|
|
return music.similarity
|
|
|
|
if len(queryset) > 0:
|
|
|
|
ordered_queryset = sorted(queryset, key=calculate_similarity, reverse=True)
|
|
|
|
else:
|
|
|
|
ordered_queryset = list(queryset)
|
|
|
|
return ordered_queryset
|
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
def all_param_handler(**kwargs):
|
|
|
|
book_queryset = book_param_handler(**kwargs)
|
|
|
|
movie_queryset = movie_param_handler(**kwargs)
|
|
|
|
music_queryset = music_param_handler(**kwargs)
|
2020-10-04 16:16:50 +02:00
|
|
|
ordered_queryset = sorted(
|
2021-02-12 19:23:23 +01:00
|
|
|
book_queryset + movie_queryset + music_queryset,
|
2020-10-04 16:16:50 +02:00
|
|
|
key=operator.attrgetter('similarity'),
|
|
|
|
reverse=True
|
|
|
|
)
|
|
|
|
return ordered_queryset
|
|
|
|
|
|
|
|
param_handler = {
|
|
|
|
'book': book_param_handler,
|
|
|
|
'movie': movie_param_handler,
|
2021-02-12 19:23:23 +01:00
|
|
|
'music': music_param_handler,
|
2020-10-04 16:16:50 +02:00
|
|
|
'all': all_param_handler,
|
|
|
|
'': all_param_handler
|
|
|
|
}
|
|
|
|
|
|
|
|
try:
|
2021-02-15 21:27:50 +01:00
|
|
|
queryset = param_handler[category](
|
|
|
|
keywords=keywords,
|
|
|
|
tag=tag
|
|
|
|
)
|
2020-10-04 16:16:50 +02:00
|
|
|
except KeyError as e:
|
2021-02-15 21:27:50 +01:00
|
|
|
queryset = param_handler['all'](
|
|
|
|
keywords=keywords,
|
|
|
|
tag=tag
|
|
|
|
)
|
2020-05-01 22:46:15 +08:00
|
|
|
paginator = Paginator(queryset, ITEMS_PER_PAGE)
|
|
|
|
page_number = request.GET.get('page', default=1)
|
|
|
|
items = paginator.get_page(page_number)
|
2020-07-03 15:36:23 +08:00
|
|
|
items.pagination = PageLinksGenerator(PAGE_LINK_NUMBER, page_number, paginator.num_pages)
|
2020-07-10 21:28:09 +08:00
|
|
|
for item in items:
|
|
|
|
item.tag_list = item.get_tags_manager().values('content').annotate(
|
|
|
|
tag_frequency=Count('content')).order_by('-tag_frequency')[:TAG_NUMBER_ON_LIST]
|
2020-05-01 22:46:15 +08:00
|
|
|
|
|
|
|
return render(
|
|
|
|
request,
|
|
|
|
"common/search_result.html",
|
|
|
|
{
|
|
|
|
"items": items,
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
else:
|
2020-07-10 21:28:09 +08:00
|
|
|
return HttpResponseBadRequest()
|
2020-11-23 23:18:14 +01:00
|
|
|
|
|
|
|
|
|
|
|
@login_required
|
|
|
|
@mastodon_request_included
|
|
|
|
def jump_or_scrape(request, url):
|
|
|
|
"""
|
|
|
|
1. match url to registered scrapers
|
|
|
|
2. try to find the url in the db, if exits then jump, else scrape and jump
|
|
|
|
"""
|
|
|
|
|
|
|
|
# redirect to this site
|
|
|
|
this_site = request.get_host()
|
|
|
|
if this_site in url:
|
|
|
|
return redirect(url)
|
|
|
|
|
|
|
|
# match url to registerd sites
|
|
|
|
matched_host = None
|
|
|
|
for host in scraper_registry:
|
|
|
|
if host in url:
|
|
|
|
matched_host = host
|
|
|
|
break
|
|
|
|
|
|
|
|
if matched_host is None:
|
|
|
|
# invalid url
|
|
|
|
return render(request, 'common/error.html', {'msg': _("链接非法,查询失败")})
|
|
|
|
else:
|
|
|
|
scraper = scraper_registry[matched_host]
|
|
|
|
try:
|
|
|
|
# raise ObjectDoesNotExist
|
2020-11-28 01:40:22 +01:00
|
|
|
effective_url = scraper.get_effective_url(url)
|
|
|
|
entity = scraper.data_class.objects.get(source_url=effective_url)
|
2020-11-23 23:18:14 +01:00
|
|
|
# if exists then jump to detail page
|
|
|
|
return redirect(entity)
|
|
|
|
except ObjectDoesNotExist:
|
|
|
|
# scrape if not exists
|
|
|
|
try:
|
2021-02-15 21:27:50 +01:00
|
|
|
scraper.scrape(url)
|
|
|
|
form = scraper.save(request_user=request.user)
|
|
|
|
except Exception as e:
|
|
|
|
logger.error(f"Scrape Failed URL: {url}")
|
|
|
|
logger.error("Expections during saving scraped data:", exc_info=e)
|
2020-11-23 23:18:14 +01:00
|
|
|
return render(request, 'common/error.html', {'msg': _("爬取数据失败😫")})
|
2021-02-15 21:27:50 +01:00
|
|
|
return redirect(form.instance)
|
|
|
|
|