lib.itmens/common/views.py

356 lines
14 KiB
Python
Raw Normal View History

2020-10-04 16:16:50 +02:00
import operator
import logging
2020-10-04 16:16:50 +02:00
from difflib import SequenceMatcher
from urllib.parse import urlparse
from django.shortcuts import render, redirect, reverse
2020-05-01 22:46:15 +08:00
from django.contrib.auth.decorators import login_required
from django.utils.translation import gettext_lazy as _
from django.core.paginator import Paginator
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db.models import Q, Count
from django.http import HttpResponseBadRequest
2020-05-01 22:46:15 +08:00
from books.models import Book
2020-10-04 16:16:50 +02:00
from movies.models import Movie
2021-02-25 19:43:43 +01:00
from games.models import Game
2021-02-15 21:27:50 +01:00
from music.models import Album, Song, AlbumMark, SongMark
2021-02-17 15:08:16 +01:00
from users.models import Report, User, Preference
from mastodon.decorators import mastodon_request_included
2021-02-25 19:43:43 +01:00
from users.views import home as user_home
2020-05-01 22:46:15 +08:00
from common.models import MarkStatusEnum
2020-07-03 15:36:23 +08:00
from common.utils import PageLinksGenerator
from common.scraper import scraper_registry
2021-02-25 19:43:43 +01:00
from common.config import *
2020-12-09 13:47:00 +01:00
from management.models import Announcement
2021-10-11 22:05:30 -04:00
from django.conf import settings
2020-05-01 22:46:15 +08:00
logger = logging.getLogger(__name__)
2020-05-01 22:46:15 +08:00
@login_required
def home(request):
2021-02-25 19:43:43 +01:00
return user_home(request, request.user.id)
2020-05-01 22:46:15 +08:00
2020-05-05 23:50:48 +08:00
@login_required
2020-05-01 22:46:15 +08:00
def search(request):
if request.method == 'GET':
# test if input serach string is empty or not excluding param ?c=
2020-10-15 11:52:59 +02:00
empty_querystring_criteria = {k: v for k, v in request.GET.items() if k != 'c'}
if not len(empty_querystring_criteria):
2020-10-04 19:37:20 +02:00
return HttpResponseBadRequest()
# test if user input an URL, if so jump to URL handling function
url_validator = URLValidator()
2020-11-28 01:50:38 +01:00
input_string = request.GET.get('q', default='').strip()
try:
url_validator(input_string)
# validation success
return jump_or_scrape(request, input_string)
except ValidationError as e:
pass
2021-02-12 19:23:23 +01:00
# category, book/movie/music etc
2020-10-04 16:16:50 +02:00
category = request.GET.get("c", default='').strip().lower()
2021-02-15 21:27:50 +01:00
# keywords, seperated by blank space
2021-02-18 15:50:14 +01:00
# it is better not to split the keywords
keywords = request.GET.get("q", default='').strip()
keywords = [keywords] if keywords else ''
2021-02-15 21:27:50 +01:00
# tag, when tag is provided there should be no keywords , for now
tag = request.GET.get("tag", default='')
2020-10-04 16:16:50 +02:00
2021-02-15 21:27:50 +01:00
# white space string, empty query
if not (keywords or tag):
2021-02-18 15:50:14 +01:00
return render(
request,
"common/search_result.html",
{
"items": None,
}
)
2021-02-12 19:23:23 +01:00
2021-02-15 21:27:50 +01:00
def book_param_handler(**kwargs):
2020-10-04 16:16:50 +02:00
# keywords
2021-02-15 21:27:50 +01:00
keywords = kwargs.get('keywords')
2021-02-12 19:23:23 +01:00
# tag
2021-02-15 21:27:50 +01:00
tag = kwargs.get('tag')
2021-02-12 19:23:23 +01:00
2021-02-15 21:27:50 +01:00
query_args = []
q = Q()
2020-10-04 16:16:50 +02:00
2021-02-15 21:27:50 +01:00
for keyword in keywords:
2020-10-04 16:16:50 +02:00
q = q | Q(title__icontains=keyword)
q = q | Q(subtitle__icontains=keyword)
q = q | Q(orig_title__icontains=keyword)
if tag:
q = q & Q(book_tags__content__iexact=tag)
query_args.append(q)
queryset = Book.objects.filter(*query_args).distinct()
def calculate_similarity(book):
2020-10-15 11:52:59 +02:00
if keywords:
# search by keywords
similarity, n = 0, 0
for keyword in keywords:
similarity += 1/2 * SequenceMatcher(None, keyword, book.title).quick_ratio()
+ 1/3 * SequenceMatcher(None, keyword, book.orig_title).quick_ratio()
+ 1/6 * SequenceMatcher(None, keyword, book.subtitle).quick_ratio()
n += 1
book.similarity = similarity / n
elif tag:
# search by single tag
book.similarity = 0 if book.rating_number is None else book.rating_number
2021-02-12 19:23:23 +01:00
else:
book.similarity = 0
2020-10-04 16:16:50 +02:00
return book.similarity
if len(queryset) > 0:
ordered_queryset = sorted(queryset, key=calculate_similarity, reverse=True)
else:
2020-10-15 11:52:59 +02:00
ordered_queryset = list(queryset)
2020-10-04 16:16:50 +02:00
return ordered_queryset
2021-02-15 21:27:50 +01:00
def movie_param_handler(**kwargs):
2020-10-04 16:16:50 +02:00
# keywords
2021-02-15 21:27:50 +01:00
keywords = kwargs.get('keywords')
2021-02-12 19:23:23 +01:00
# tag
2021-02-15 21:27:50 +01:00
tag = kwargs.get('tag')
2021-02-12 19:23:23 +01:00
2021-02-15 21:27:50 +01:00
query_args = []
q = Q()
2020-10-04 16:16:50 +02:00
2021-02-15 21:27:50 +01:00
for keyword in keywords:
2020-10-04 16:16:50 +02:00
q = q | Q(title__icontains=keyword)
q = q | Q(other_title__icontains=keyword)
q = q | Q(orig_title__icontains=keyword)
if tag:
q = q & Q(movie_tags__content__iexact=tag)
query_args.append(q)
queryset = Movie.objects.filter(*query_args).distinct()
def calculate_similarity(movie):
2020-10-15 11:52:59 +02:00
if keywords:
# search by name
similarity, n = 0, 0
for keyword in keywords:
similarity += 1/2 * SequenceMatcher(None, keyword, movie.title).quick_ratio()
+ 1/4 * SequenceMatcher(None, keyword, movie.orig_title).quick_ratio()
+ 1/4 * SequenceMatcher(None, keyword, movie.other_title).quick_ratio()
n += 1
movie.similarity = similarity / n
elif tag:
# search by single tag
movie.similarity = 0 if movie.rating_number is None else movie.rating_number
2021-02-12 19:23:23 +01:00
else:
movie.similarity = 0
2020-10-04 16:16:50 +02:00
return movie.similarity
if len(queryset) > 0:
ordered_queryset = sorted(queryset, key=calculate_similarity, reverse=True)
else:
2020-10-15 11:52:59 +02:00
ordered_queryset = list(queryset)
2020-10-04 16:16:50 +02:00
return ordered_queryset
2021-02-25 19:43:43 +01:00
def game_param_handler(**kwargs):
# keywords
keywords = kwargs.get('keywords')
# tag
tag = kwargs.get('tag')
query_args = []
q = Q()
for keyword in keywords:
q = q | Q(title__icontains=keyword)
q = q | Q(other_title__icontains=keyword)
q = q | Q(developer__icontains=keyword)
q = q | Q(publisher__icontains=keyword)
if tag:
q = q & Q(game_tags__content__iexact=tag)
query_args.append(q)
queryset = Game.objects.filter(*query_args).distinct()
def calculate_similarity(game):
if keywords:
# search by name
developer_dump = ' '.join(game.developer)
publisher_dump = ' '.join(game.publisher)
similarity, n = 0, 0
for keyword in keywords:
similarity += 1/2 * SequenceMatcher(None, keyword, game.title).quick_ratio()
+ 1/4 * SequenceMatcher(None, keyword, game.other_title).quick_ratio()
+ 1/16 * SequenceMatcher(None, keyword, developer_dump).quick_ratio()
+ 1/16 * SequenceMatcher(None, keyword, publisher_dump).quick_ratio()
n += 1
game.similarity = similarity / n
elif tag:
# search by single tag
game.similarity = 0 if game.rating_number is None else game.rating_number
else:
game.similarity = 0
return game.similarity
if len(queryset) > 0:
ordered_queryset = sorted(queryset, key=calculate_similarity, reverse=True)
else:
ordered_queryset = list(queryset)
return ordered_queryset
2021-02-15 21:27:50 +01:00
def music_param_handler(**kwargs):
2021-02-12 19:23:23 +01:00
# keywords
2021-02-15 21:27:50 +01:00
keywords = kwargs.get('keywords')
2021-02-12 19:23:23 +01:00
# tag
2021-02-15 21:27:50 +01:00
tag = kwargs.get('tag')
2021-02-12 19:23:23 +01:00
2021-02-15 21:27:50 +01:00
query_args = []
q = Q()
2021-02-12 19:23:23 +01:00
# search albums
2021-02-15 21:27:50 +01:00
for keyword in keywords:
2021-02-12 19:23:23 +01:00
q = q | Q(title__icontains=keyword)
2021-02-15 21:27:50 +01:00
q = q | Q(artist__icontains=keyword)
2021-02-12 19:23:23 +01:00
if tag:
q = q & Q(album_tags__content__iexact=tag)
query_args.append(q)
2021-02-15 21:27:50 +01:00
album_queryset = Album.objects.filter(*query_args).distinct()
# extra query args for songs
q = Q()
for keyword in keywords:
q = q | Q(album__title__icontains=keyword)
q = q | Q(title__icontains=keyword)
q = q | Q(artist__icontains=keyword)
if tag:
q = q & Q(song_tags__content__iexact=tag)
query_args.clear()
query_args.append(q)
song_queryset = Song.objects.filter(*query_args).distinct()
queryset = list(album_queryset) + list(song_queryset)
2021-02-12 19:23:23 +01:00
def calculate_similarity(music):
if keywords:
# search by name
similarity, n = 0, 0
2021-02-15 21:27:50 +01:00
artist_dump = ' '.join(music.artist)
2021-02-12 19:23:23 +01:00
for keyword in keywords:
2021-02-15 21:27:50 +01:00
if music.__class__ == Album:
similarity += 1/2 * SequenceMatcher(None, keyword, music.title).quick_ratio() \
+ 1/2 * SequenceMatcher(None, keyword, artist_dump).quick_ratio()
elif music.__class__ == Song:
similarity += 1/2 * SequenceMatcher(None, keyword, music.title).quick_ratio() \
+ 1/6 * SequenceMatcher(None, keyword, artist_dump).quick_ratio() \
+ 1/6 * SequenceMatcher(None, keyword, music.album.title).quick_ratio()
2021-02-12 19:23:23 +01:00
n += 1
music.similarity = similarity / n
elif tag:
# search by single tag
music.similarity = 0 if music.rating_number is None else music.rating_number
else:
music.similarity = 0
return music.similarity
if len(queryset) > 0:
ordered_queryset = sorted(queryset, key=calculate_similarity, reverse=True)
else:
ordered_queryset = list(queryset)
return ordered_queryset
2021-02-15 21:27:50 +01:00
def all_param_handler(**kwargs):
book_queryset = book_param_handler(**kwargs)
movie_queryset = movie_param_handler(**kwargs)
music_queryset = music_param_handler(**kwargs)
2021-02-25 19:43:43 +01:00
game_queryset = game_param_handler(**kwargs)
2020-10-04 16:16:50 +02:00
ordered_queryset = sorted(
2021-02-25 19:43:43 +01:00
book_queryset + movie_queryset + music_queryset + game_queryset,
2020-10-04 16:16:50 +02:00
key=operator.attrgetter('similarity'),
reverse=True
)
return ordered_queryset
param_handler = {
'book': book_param_handler,
'movie': movie_param_handler,
2021-02-12 19:23:23 +01:00
'music': music_param_handler,
2021-02-25 19:43:43 +01:00
'game': game_param_handler,
2020-10-04 16:16:50 +02:00
'all': all_param_handler,
'': all_param_handler
}
2021-02-25 19:43:43 +01:00
categories = [k for k in param_handler.keys() if not k in ['all', '']]
2020-10-04 16:16:50 +02:00
try:
2021-02-15 21:27:50 +01:00
queryset = param_handler[category](
keywords=keywords,
tag=tag
)
2020-10-04 16:16:50 +02:00
except KeyError as e:
2021-02-15 21:27:50 +01:00
queryset = param_handler['all'](
keywords=keywords,
tag=tag
)
2020-05-01 22:46:15 +08:00
paginator = Paginator(queryset, ITEMS_PER_PAGE)
page_number = request.GET.get('page', default=1)
items = paginator.get_page(page_number)
2020-07-03 15:36:23 +08:00
items.pagination = PageLinksGenerator(PAGE_LINK_NUMBER, page_number, paginator.num_pages)
2020-07-10 21:28:09 +08:00
for item in items:
item.tag_list = item.get_tags_manager().values('content').annotate(
tag_frequency=Count('content')).order_by('-tag_frequency')[:TAG_NUMBER_ON_LIST]
2020-05-01 22:46:15 +08:00
return render(
request,
"common/search_result.html",
{
"items": items,
2021-02-25 19:43:43 +01:00
"categories": categories,
2020-05-01 22:46:15 +08:00
}
)
else:
2020-07-10 21:28:09 +08:00
return HttpResponseBadRequest()
@login_required
@mastodon_request_included
def jump_or_scrape(request, url):
"""
1. match url to registered scrapers
2. try to find the url in the db, if exits then jump, else scrape and jump
"""
# redirect to this site
this_site = request.get_host()
if this_site in url:
return redirect(url)
# match url to registerd sites
matched_host = None
for host in scraper_registry:
if host in url:
matched_host = host
break
if matched_host is None:
# invalid url
return render(request, 'common/error.html', {'msg': _("链接非法,查询失败")})
else:
scraper = scraper_registry[matched_host]
try:
# raise ObjectDoesNotExist
effective_url = scraper.get_effective_url(url)
entity = scraper.data_class.objects.get(source_url=effective_url)
# if exists then jump to detail page
return redirect(entity)
except ObjectDoesNotExist:
# scrape if not exists
try:
2021-02-15 21:27:50 +01:00
scraper.scrape(url)
form = scraper.save(request_user=request.user)
except Exception as e:
2021-10-11 22:05:30 -04:00
logger.error(f"Scrape Failed URL: {url}\n{e}")
if settings.DEBUG:
logger.error("Expections during saving scraped data:", exc_info=e)
return render(request, 'common/error.html', {'msg': _("爬取数据失败😫")})
2021-02-15 21:27:50 +01:00
return redirect(form.instance)