2020-10-04 16:16:50 +02:00
|
|
|
import operator
|
2020-11-23 23:18:14 +01:00
|
|
|
import logging
|
2020-10-04 16:16:50 +02:00
|
|
|
from difflib import SequenceMatcher
|
2020-11-23 23:18:14 +01:00
|
|
|
from urllib.parse import urlparse
|
|
|
|
from django.shortcuts import render, redirect, reverse
|
2020-05-01 22:46:15 +08:00
|
|
|
from django.contrib.auth.decorators import login_required
|
2020-11-23 23:18:14 +01:00
|
|
|
from django.utils.translation import gettext_lazy as _
|
|
|
|
from django.core.paginator import Paginator
|
|
|
|
from django.core.validators import URLValidator
|
|
|
|
from django.core.exceptions import ValidationError, ObjectDoesNotExist
|
|
|
|
from django.db.models import Q, Count
|
|
|
|
from django.http import HttpResponseBadRequest
|
2020-05-01 22:46:15 +08:00
|
|
|
from books.models import Book
|
2020-10-04 16:16:50 +02:00
|
|
|
from movies.models import Movie
|
2021-02-25 19:43:43 +01:00
|
|
|
from games.models import Game
|
2021-02-15 21:27:50 +01:00
|
|
|
from music.models import Album, Song, AlbumMark, SongMark
|
2021-02-17 15:08:16 +01:00
|
|
|
from users.models import Report, User, Preference
|
2020-11-23 23:18:14 +01:00
|
|
|
from mastodon.decorators import mastodon_request_included
|
2021-02-25 19:43:43 +01:00
|
|
|
from users.views import home as user_home
|
2022-05-30 22:48:11 -04:00
|
|
|
from timeline.views import timeline as user_timeline
|
2020-05-01 22:46:15 +08:00
|
|
|
from common.models import MarkStatusEnum
|
2020-07-03 15:36:23 +08:00
|
|
|
from common.utils import PageLinksGenerator
|
2022-02-05 11:02:16 -05:00
|
|
|
from common.scraper import get_scraper_by_url, get_normalized_url
|
2021-02-25 19:43:43 +01:00
|
|
|
from common.config import *
|
2021-10-17 22:43:56 -04:00
|
|
|
from common.searcher import ExternalSources
|
2020-12-09 13:47:00 +01:00
|
|
|
from management.models import Announcement
|
2021-10-11 22:05:30 -04:00
|
|
|
from django.conf import settings
|
2021-12-30 15:53:50 -05:00
|
|
|
from common.index import Indexer
|
2022-02-20 19:09:23 -05:00
|
|
|
from django.http import JsonResponse
|
2022-07-23 14:45:47 -04:00
|
|
|
from django.db.utils import IntegrityError
|
2021-12-30 15:53:50 -05:00
|
|
|
|
2020-05-01 22:46:15 +08:00
|
|
|
|
2020-11-23 23:18:14 +01:00
|
|
|
logger = logging.getLogger(__name__)
|
2020-05-01 22:46:15 +08:00
|
|
|
|
2021-10-17 22:43:56 -04:00
|
|
|
|
2020-05-01 22:46:15 +08:00
|
|
|
@login_required
|
|
|
|
def home(request):
|
2022-06-05 22:07:41 -04:00
|
|
|
if request.user.get_preference().classic_homepage:
|
2022-06-11 00:31:33 -04:00
|
|
|
return redirect(reverse("users:home", args=[request.user.mastodon_username]))
|
2022-05-30 22:48:11 -04:00
|
|
|
else:
|
2022-06-11 00:31:33 -04:00
|
|
|
return redirect(reverse("timeline:timeline"))
|
2020-05-01 22:46:15 +08:00
|
|
|
|
|
|
|
|
2022-01-22 14:04:21 -05:00
|
|
|
@login_required
|
|
|
|
def external_search(request):
|
2022-12-31 17:16:47 -05:00
|
|
|
category = request.GET.get("c", default="all").strip().lower()
|
|
|
|
if category == "all":
|
2022-01-22 14:04:21 -05:00
|
|
|
category = None
|
2022-12-31 17:16:47 -05:00
|
|
|
keywords = request.GET.get("q", default="").strip()
|
|
|
|
page_number = int(request.GET.get("page", default=1))
|
2022-11-14 18:34:31 +00:00
|
|
|
items = ExternalSources.search(category, keywords, page_number) if keywords else []
|
2022-12-31 17:16:47 -05:00
|
|
|
dedupe_urls = request.session.get("search_dedupe_urls", [])
|
2022-11-14 18:34:31 +00:00
|
|
|
items = [i for i in items if i.source_url not in dedupe_urls]
|
|
|
|
|
2022-01-22 14:04:21 -05:00
|
|
|
return render(
|
|
|
|
request,
|
|
|
|
"common/external_search_result.html",
|
|
|
|
{
|
2022-11-14 18:34:31 +00:00
|
|
|
"external_items": items,
|
2022-12-31 17:16:47 -05:00
|
|
|
},
|
2022-01-22 14:04:21 -05:00
|
|
|
)
|
|
|
|
|
2021-12-30 15:53:50 -05:00
|
|
|
|
2020-05-01 22:46:15 +08:00
|
|
|
def search(request):
|
2022-12-31 17:16:47 -05:00
|
|
|
if settings.ENABLE_NEW_MODEL:
|
|
|
|
from catalog.views import search as new_search
|
|
|
|
|
|
|
|
return new_search(request)
|
|
|
|
|
2022-01-21 22:05:38 -05:00
|
|
|
if settings.SEARCH_BACKEND is None:
|
2022-01-04 17:12:04 -05:00
|
|
|
return search2(request)
|
2022-12-31 17:16:47 -05:00
|
|
|
category = request.GET.get("c", default="all").strip().lower()
|
|
|
|
if category == "all":
|
2021-12-30 15:53:50 -05:00
|
|
|
category = None
|
2022-12-31 17:16:47 -05:00
|
|
|
keywords = request.GET.get("q", default="").strip()
|
|
|
|
tag = request.GET.get("tag", default="").strip()
|
|
|
|
p = request.GET.get("page", default="1")
|
2022-05-11 18:33:09 -04:00
|
|
|
page_number = int(p) if p.isdigit() else 1
|
2021-12-30 15:53:50 -05:00
|
|
|
if not (keywords or tag):
|
|
|
|
return render(
|
|
|
|
request,
|
|
|
|
"common/search_result.html",
|
|
|
|
{
|
|
|
|
"items": None,
|
2022-12-31 17:16:47 -05:00
|
|
|
},
|
2021-12-30 15:53:50 -05:00
|
|
|
)
|
2022-02-20 19:09:23 -05:00
|
|
|
if request.user.is_authenticated:
|
|
|
|
url_validator = URLValidator()
|
|
|
|
try:
|
|
|
|
url_validator(keywords)
|
|
|
|
# validation success
|
|
|
|
return jump_or_scrape(request, keywords)
|
|
|
|
except ValidationError as e:
|
|
|
|
pass
|
2022-12-31 17:16:47 -05:00
|
|
|
|
2021-12-30 15:53:50 -05:00
|
|
|
result = Indexer.search(keywords, page=page_number, category=category, tag=tag)
|
2022-11-14 18:34:31 +00:00
|
|
|
keys = []
|
|
|
|
items = []
|
|
|
|
urls = []
|
|
|
|
for i in result.items:
|
2022-12-31 17:16:47 -05:00
|
|
|
key = (
|
|
|
|
i.isbn
|
|
|
|
if hasattr(i, "isbn")
|
|
|
|
else (i.imdb_code if hasattr(i, "imdb_code") else None)
|
|
|
|
)
|
2022-11-14 18:34:31 +00:00
|
|
|
if key is None:
|
|
|
|
items.append(i)
|
|
|
|
elif key not in keys:
|
|
|
|
keys.append(key)
|
|
|
|
items.append(i)
|
|
|
|
urls.append(i.source_url)
|
|
|
|
i.tag_list = i.all_tag_list[:TAG_NUMBER_ON_LIST]
|
|
|
|
|
2022-12-31 17:16:47 -05:00
|
|
|
if request.path.endswith(".json/"):
|
|
|
|
return JsonResponse(
|
|
|
|
{
|
|
|
|
"num_pages": result.num_pages,
|
|
|
|
"items": list(map(lambda i: i.get_json(), items)),
|
|
|
|
}
|
|
|
|
)
|
2022-11-14 18:34:31 +00:00
|
|
|
|
2022-12-31 17:16:47 -05:00
|
|
|
request.session["search_dedupe_urls"] = urls
|
2021-12-30 15:53:50 -05:00
|
|
|
return render(
|
|
|
|
request,
|
|
|
|
"common/search_result.html",
|
|
|
|
{
|
2022-11-14 18:34:31 +00:00
|
|
|
"items": items,
|
2022-12-31 17:16:47 -05:00
|
|
|
"pagination": PageLinksGenerator(
|
|
|
|
PAGE_LINK_NUMBER, page_number, result.num_pages
|
|
|
|
),
|
|
|
|
"categories": ["book", "movie", "music", "game"],
|
|
|
|
},
|
2021-12-30 15:53:50 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def search2(request):
|
2022-12-31 17:16:47 -05:00
|
|
|
if request.method == "GET":
|
2020-05-01 22:46:15 +08:00
|
|
|
|
2020-11-23 23:18:14 +01:00
|
|
|
# test if input serach string is empty or not excluding param ?c=
|
2022-12-31 17:16:47 -05:00
|
|
|
empty_querystring_criteria = {k: v for k, v in request.GET.items() if k != "c"}
|
2020-10-15 11:52:59 +02:00
|
|
|
if not len(empty_querystring_criteria):
|
2020-10-04 19:37:20 +02:00
|
|
|
return HttpResponseBadRequest()
|
|
|
|
|
2020-11-23 23:18:14 +01:00
|
|
|
# test if user input an URL, if so jump to URL handling function
|
|
|
|
url_validator = URLValidator()
|
2022-12-31 17:16:47 -05:00
|
|
|
input_string = request.GET.get("q", default="").strip()
|
2020-11-23 23:18:14 +01:00
|
|
|
try:
|
|
|
|
url_validator(input_string)
|
|
|
|
# validation success
|
|
|
|
return jump_or_scrape(request, input_string)
|
|
|
|
except ValidationError as e:
|
|
|
|
pass
|
|
|
|
|
2021-02-12 19:23:23 +01:00
|
|
|
# category, book/movie/music etc
|
2022-12-31 17:16:47 -05:00
|
|
|
category = request.GET.get("c", default="").strip().lower()
|
2021-02-15 21:27:50 +01:00
|
|
|
# keywords, seperated by blank space
|
2021-02-18 15:50:14 +01:00
|
|
|
# it is better not to split the keywords
|
2022-12-31 17:16:47 -05:00
|
|
|
keywords = request.GET.get("q", default="").strip()
|
|
|
|
keywords = [keywords] if keywords else ""
|
2021-02-15 21:27:50 +01:00
|
|
|
# tag, when tag is provided there should be no keywords , for now
|
2022-12-31 17:16:47 -05:00
|
|
|
tag = request.GET.get("tag", default="")
|
2020-10-04 16:16:50 +02:00
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
# white space string, empty query
|
|
|
|
if not (keywords or tag):
|
2021-02-18 15:50:14 +01:00
|
|
|
return render(
|
|
|
|
request,
|
|
|
|
"common/search_result.html",
|
|
|
|
{
|
|
|
|
"items": None,
|
2022-12-31 17:16:47 -05:00
|
|
|
},
|
2021-02-18 15:50:14 +01:00
|
|
|
)
|
2021-02-12 19:23:23 +01:00
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
def book_param_handler(**kwargs):
|
2020-10-04 16:16:50 +02:00
|
|
|
# keywords
|
2022-12-31 17:16:47 -05:00
|
|
|
keywords = kwargs.get("keywords")
|
2021-02-12 19:23:23 +01:00
|
|
|
# tag
|
2022-12-31 17:16:47 -05:00
|
|
|
tag = kwargs.get("tag")
|
2021-02-12 19:23:23 +01:00
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
query_args = []
|
|
|
|
q = Q()
|
2020-10-04 16:16:50 +02:00
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
for keyword in keywords:
|
2020-10-04 16:16:50 +02:00
|
|
|
q = q | Q(title__icontains=keyword)
|
|
|
|
q = q | Q(subtitle__icontains=keyword)
|
|
|
|
q = q | Q(orig_title__icontains=keyword)
|
|
|
|
if tag:
|
|
|
|
q = q & Q(book_tags__content__iexact=tag)
|
|
|
|
|
|
|
|
query_args.append(q)
|
|
|
|
queryset = Book.objects.filter(*query_args).distinct()
|
|
|
|
|
|
|
|
def calculate_similarity(book):
|
2020-10-15 11:52:59 +02:00
|
|
|
if keywords:
|
|
|
|
# search by keywords
|
|
|
|
similarity, n = 0, 0
|
|
|
|
for keyword in keywords:
|
2022-12-31 17:16:47 -05:00
|
|
|
similarity += (
|
|
|
|
1
|
|
|
|
/ 2
|
|
|
|
* SequenceMatcher(None, keyword, book.title).quick_ratio()
|
|
|
|
)
|
|
|
|
+1 / 3 * SequenceMatcher(
|
|
|
|
None, keyword, book.orig_title
|
|
|
|
).quick_ratio()
|
|
|
|
+1 / 6 * SequenceMatcher(
|
|
|
|
None, keyword, book.subtitle
|
|
|
|
).quick_ratio()
|
2020-10-15 11:52:59 +02:00
|
|
|
n += 1
|
|
|
|
book.similarity = similarity / n
|
|
|
|
|
|
|
|
elif tag:
|
|
|
|
# search by single tag
|
2022-12-31 17:16:47 -05:00
|
|
|
book.similarity = (
|
|
|
|
0 if book.rating_number is None else book.rating_number
|
|
|
|
)
|
2021-02-12 19:23:23 +01:00
|
|
|
else:
|
|
|
|
book.similarity = 0
|
2020-10-04 16:16:50 +02:00
|
|
|
return book.similarity
|
2022-12-31 17:16:47 -05:00
|
|
|
|
2020-10-15 11:08:29 +02:00
|
|
|
if len(queryset) > 0:
|
2022-12-31 17:16:47 -05:00
|
|
|
ordered_queryset = sorted(
|
|
|
|
queryset, key=calculate_similarity, reverse=True
|
|
|
|
)
|
2020-10-15 11:08:29 +02:00
|
|
|
else:
|
2020-10-15 11:52:59 +02:00
|
|
|
ordered_queryset = list(queryset)
|
2020-10-04 16:16:50 +02:00
|
|
|
return ordered_queryset
|
2022-05-11 18:33:09 -04:00
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
def movie_param_handler(**kwargs):
|
2020-10-04 16:16:50 +02:00
|
|
|
# keywords
|
2022-12-31 17:16:47 -05:00
|
|
|
keywords = kwargs.get("keywords")
|
2021-02-12 19:23:23 +01:00
|
|
|
# tag
|
2022-12-31 17:16:47 -05:00
|
|
|
tag = kwargs.get("tag")
|
2021-02-12 19:23:23 +01:00
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
query_args = []
|
|
|
|
q = Q()
|
2020-10-04 16:16:50 +02:00
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
for keyword in keywords:
|
2020-10-04 16:16:50 +02:00
|
|
|
q = q | Q(title__icontains=keyword)
|
|
|
|
q = q | Q(other_title__icontains=keyword)
|
|
|
|
q = q | Q(orig_title__icontains=keyword)
|
|
|
|
if tag:
|
|
|
|
q = q & Q(movie_tags__content__iexact=tag)
|
|
|
|
|
|
|
|
query_args.append(q)
|
|
|
|
queryset = Movie.objects.filter(*query_args).distinct()
|
|
|
|
|
|
|
|
def calculate_similarity(movie):
|
2020-10-15 11:52:59 +02:00
|
|
|
if keywords:
|
|
|
|
# search by name
|
|
|
|
similarity, n = 0, 0
|
|
|
|
for keyword in keywords:
|
2022-12-31 17:16:47 -05:00
|
|
|
similarity += (
|
|
|
|
1
|
|
|
|
/ 2
|
|
|
|
* SequenceMatcher(None, keyword, movie.title).quick_ratio()
|
|
|
|
)
|
|
|
|
+1 / 4 * SequenceMatcher(
|
|
|
|
None, keyword, movie.orig_title
|
|
|
|
).quick_ratio()
|
|
|
|
+1 / 4 * SequenceMatcher(
|
|
|
|
None, keyword, movie.other_title
|
|
|
|
).quick_ratio()
|
2020-10-15 11:52:59 +02:00
|
|
|
n += 1
|
|
|
|
movie.similarity = similarity / n
|
|
|
|
elif tag:
|
|
|
|
# search by single tag
|
2022-12-31 17:16:47 -05:00
|
|
|
movie.similarity = (
|
|
|
|
0 if movie.rating_number is None else movie.rating_number
|
|
|
|
)
|
2021-02-12 19:23:23 +01:00
|
|
|
else:
|
|
|
|
movie.similarity = 0
|
2020-10-04 16:16:50 +02:00
|
|
|
return movie.similarity
|
2022-12-31 17:16:47 -05:00
|
|
|
|
2020-10-15 11:08:29 +02:00
|
|
|
if len(queryset) > 0:
|
2022-12-31 17:16:47 -05:00
|
|
|
ordered_queryset = sorted(
|
|
|
|
queryset, key=calculate_similarity, reverse=True
|
|
|
|
)
|
2020-10-15 11:08:29 +02:00
|
|
|
else:
|
2020-10-15 11:52:59 +02:00
|
|
|
ordered_queryset = list(queryset)
|
2020-10-04 16:16:50 +02:00
|
|
|
return ordered_queryset
|
|
|
|
|
2021-02-25 19:43:43 +01:00
|
|
|
def game_param_handler(**kwargs):
|
|
|
|
# keywords
|
2022-12-31 17:16:47 -05:00
|
|
|
keywords = kwargs.get("keywords")
|
2021-02-25 19:43:43 +01:00
|
|
|
# tag
|
2022-12-31 17:16:47 -05:00
|
|
|
tag = kwargs.get("tag")
|
2021-02-25 19:43:43 +01:00
|
|
|
|
|
|
|
query_args = []
|
|
|
|
q = Q()
|
|
|
|
|
|
|
|
for keyword in keywords:
|
|
|
|
q = q | Q(title__icontains=keyword)
|
|
|
|
q = q | Q(other_title__icontains=keyword)
|
|
|
|
q = q | Q(developer__icontains=keyword)
|
|
|
|
q = q | Q(publisher__icontains=keyword)
|
|
|
|
if tag:
|
|
|
|
q = q & Q(game_tags__content__iexact=tag)
|
|
|
|
|
|
|
|
query_args.append(q)
|
|
|
|
queryset = Game.objects.filter(*query_args).distinct()
|
|
|
|
|
|
|
|
def calculate_similarity(game):
|
|
|
|
if keywords:
|
|
|
|
# search by name
|
2022-12-31 17:16:47 -05:00
|
|
|
developer_dump = " ".join(game.developer)
|
|
|
|
publisher_dump = " ".join(game.publisher)
|
2021-02-25 19:43:43 +01:00
|
|
|
similarity, n = 0, 0
|
|
|
|
for keyword in keywords:
|
2022-12-31 17:16:47 -05:00
|
|
|
similarity += (
|
|
|
|
1
|
|
|
|
/ 2
|
|
|
|
* SequenceMatcher(None, keyword, game.title).quick_ratio()
|
|
|
|
)
|
|
|
|
+1 / 4 * SequenceMatcher(
|
|
|
|
None, keyword, game.other_title
|
|
|
|
).quick_ratio()
|
|
|
|
+1 / 16 * SequenceMatcher(
|
|
|
|
None, keyword, developer_dump
|
|
|
|
).quick_ratio()
|
|
|
|
+1 / 16 * SequenceMatcher(
|
|
|
|
None, keyword, publisher_dump
|
|
|
|
).quick_ratio()
|
2021-02-25 19:43:43 +01:00
|
|
|
n += 1
|
|
|
|
game.similarity = similarity / n
|
|
|
|
elif tag:
|
|
|
|
# search by single tag
|
2022-12-31 17:16:47 -05:00
|
|
|
game.similarity = (
|
|
|
|
0 if game.rating_number is None else game.rating_number
|
|
|
|
)
|
2021-02-25 19:43:43 +01:00
|
|
|
else:
|
|
|
|
game.similarity = 0
|
|
|
|
return game.similarity
|
2022-12-31 17:16:47 -05:00
|
|
|
|
2021-02-25 19:43:43 +01:00
|
|
|
if len(queryset) > 0:
|
2022-12-31 17:16:47 -05:00
|
|
|
ordered_queryset = sorted(
|
|
|
|
queryset, key=calculate_similarity, reverse=True
|
|
|
|
)
|
2021-02-25 19:43:43 +01:00
|
|
|
else:
|
|
|
|
ordered_queryset = list(queryset)
|
|
|
|
return ordered_queryset
|
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
def music_param_handler(**kwargs):
|
2021-02-12 19:23:23 +01:00
|
|
|
# keywords
|
2022-12-31 17:16:47 -05:00
|
|
|
keywords = kwargs.get("keywords")
|
2021-02-12 19:23:23 +01:00
|
|
|
# tag
|
2022-12-31 17:16:47 -05:00
|
|
|
tag = kwargs.get("tag")
|
2021-02-12 19:23:23 +01:00
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
query_args = []
|
|
|
|
q = Q()
|
2021-02-12 19:23:23 +01:00
|
|
|
|
|
|
|
# search albums
|
2021-02-15 21:27:50 +01:00
|
|
|
for keyword in keywords:
|
2021-02-12 19:23:23 +01:00
|
|
|
q = q | Q(title__icontains=keyword)
|
2021-02-15 21:27:50 +01:00
|
|
|
q = q | Q(artist__icontains=keyword)
|
2021-02-12 19:23:23 +01:00
|
|
|
if tag:
|
|
|
|
q = q & Q(album_tags__content__iexact=tag)
|
|
|
|
|
|
|
|
query_args.append(q)
|
2021-02-15 21:27:50 +01:00
|
|
|
album_queryset = Album.objects.filter(*query_args).distinct()
|
|
|
|
|
|
|
|
# extra query args for songs
|
|
|
|
q = Q()
|
|
|
|
for keyword in keywords:
|
|
|
|
q = q | Q(album__title__icontains=keyword)
|
|
|
|
q = q | Q(title__icontains=keyword)
|
|
|
|
q = q | Q(artist__icontains=keyword)
|
|
|
|
if tag:
|
|
|
|
q = q & Q(song_tags__content__iexact=tag)
|
|
|
|
query_args.clear()
|
|
|
|
query_args.append(q)
|
|
|
|
song_queryset = Song.objects.filter(*query_args).distinct()
|
|
|
|
queryset = list(album_queryset) + list(song_queryset)
|
2021-02-12 19:23:23 +01:00
|
|
|
|
|
|
|
def calculate_similarity(music):
|
|
|
|
if keywords:
|
|
|
|
# search by name
|
|
|
|
similarity, n = 0, 0
|
2022-12-31 17:16:47 -05:00
|
|
|
artist_dump = " ".join(music.artist)
|
2021-02-12 19:23:23 +01:00
|
|
|
for keyword in keywords:
|
2021-02-15 21:27:50 +01:00
|
|
|
if music.__class__ == Album:
|
2022-12-31 17:16:47 -05:00
|
|
|
similarity += (
|
|
|
|
1
|
|
|
|
/ 2
|
|
|
|
* SequenceMatcher(
|
|
|
|
None, keyword, music.title
|
|
|
|
).quick_ratio()
|
|
|
|
+ 1
|
|
|
|
/ 2
|
|
|
|
* SequenceMatcher(
|
|
|
|
None, keyword, artist_dump
|
|
|
|
).quick_ratio()
|
|
|
|
)
|
2021-02-15 21:27:50 +01:00
|
|
|
elif music.__class__ == Song:
|
2022-12-31 17:16:47 -05:00
|
|
|
similarity += (
|
|
|
|
1
|
|
|
|
/ 2
|
|
|
|
* SequenceMatcher(
|
|
|
|
None, keyword, music.title
|
|
|
|
).quick_ratio()
|
|
|
|
+ 1
|
|
|
|
/ 6
|
|
|
|
* SequenceMatcher(
|
|
|
|
None, keyword, artist_dump
|
|
|
|
).quick_ratio()
|
|
|
|
+ 1
|
|
|
|
/ 6
|
|
|
|
* (
|
|
|
|
SequenceMatcher(
|
|
|
|
None, keyword, music.album.title
|
|
|
|
).quick_ratio()
|
|
|
|
if music.album is not None
|
|
|
|
else 0
|
|
|
|
)
|
|
|
|
)
|
2021-02-12 19:23:23 +01:00
|
|
|
n += 1
|
|
|
|
music.similarity = similarity / n
|
|
|
|
elif tag:
|
|
|
|
# search by single tag
|
2022-12-31 17:16:47 -05:00
|
|
|
music.similarity = (
|
|
|
|
0 if music.rating_number is None else music.rating_number
|
|
|
|
)
|
2021-02-12 19:23:23 +01:00
|
|
|
else:
|
|
|
|
music.similarity = 0
|
|
|
|
return music.similarity
|
2022-12-31 17:16:47 -05:00
|
|
|
|
2021-02-12 19:23:23 +01:00
|
|
|
if len(queryset) > 0:
|
2022-12-31 17:16:47 -05:00
|
|
|
ordered_queryset = sorted(
|
|
|
|
queryset, key=calculate_similarity, reverse=True
|
|
|
|
)
|
2021-02-12 19:23:23 +01:00
|
|
|
else:
|
|
|
|
ordered_queryset = list(queryset)
|
|
|
|
return ordered_queryset
|
|
|
|
|
2021-02-15 21:27:50 +01:00
|
|
|
def all_param_handler(**kwargs):
|
|
|
|
book_queryset = book_param_handler(**kwargs)
|
|
|
|
movie_queryset = movie_param_handler(**kwargs)
|
|
|
|
music_queryset = music_param_handler(**kwargs)
|
2021-02-25 19:43:43 +01:00
|
|
|
game_queryset = game_param_handler(**kwargs)
|
2020-10-04 16:16:50 +02:00
|
|
|
ordered_queryset = sorted(
|
2022-12-31 17:16:47 -05:00
|
|
|
book_queryset + movie_queryset + music_queryset + game_queryset,
|
|
|
|
key=operator.attrgetter("similarity"),
|
|
|
|
reverse=True,
|
2020-10-04 16:16:50 +02:00
|
|
|
)
|
|
|
|
return ordered_queryset
|
|
|
|
|
|
|
|
param_handler = {
|
2022-12-31 17:16:47 -05:00
|
|
|
"book": book_param_handler,
|
|
|
|
"movie": movie_param_handler,
|
|
|
|
"music": music_param_handler,
|
|
|
|
"game": game_param_handler,
|
|
|
|
"all": all_param_handler,
|
|
|
|
"": all_param_handler,
|
2020-10-04 16:16:50 +02:00
|
|
|
}
|
|
|
|
|
2022-12-31 17:16:47 -05:00
|
|
|
categories = [k for k in param_handler.keys() if not k in ["all", ""]]
|
2021-02-25 19:43:43 +01:00
|
|
|
|
2020-10-04 16:16:50 +02:00
|
|
|
try:
|
2022-12-31 17:16:47 -05:00
|
|
|
queryset = param_handler[category](keywords=keywords, tag=tag)
|
2020-10-04 16:16:50 +02:00
|
|
|
except KeyError as e:
|
2022-12-31 17:16:47 -05:00
|
|
|
queryset = param_handler["all"](keywords=keywords, tag=tag)
|
2020-05-01 22:46:15 +08:00
|
|
|
paginator = Paginator(queryset, ITEMS_PER_PAGE)
|
2022-12-31 17:16:47 -05:00
|
|
|
page_number = request.GET.get("page", default=1)
|
2020-05-01 22:46:15 +08:00
|
|
|
items = paginator.get_page(page_number)
|
2022-12-31 17:16:47 -05:00
|
|
|
items.pagination = PageLinksGenerator(
|
|
|
|
PAGE_LINK_NUMBER, page_number, paginator.num_pages
|
|
|
|
)
|
2020-07-10 21:28:09 +08:00
|
|
|
for item in items:
|
2022-12-31 17:16:47 -05:00
|
|
|
item.tag_list = (
|
|
|
|
item.get_tags_manager()
|
|
|
|
.values("content")
|
|
|
|
.annotate(tag_frequency=Count("content"))
|
|
|
|
.order_by("-tag_frequency")[:TAG_NUMBER_ON_LIST]
|
|
|
|
)
|
2020-05-01 22:46:15 +08:00
|
|
|
|
|
|
|
return render(
|
|
|
|
request,
|
|
|
|
"common/search_result.html",
|
|
|
|
{
|
|
|
|
"items": items,
|
2021-02-25 19:43:43 +01:00
|
|
|
"categories": categories,
|
2022-12-31 17:16:47 -05:00
|
|
|
},
|
2020-05-01 22:46:15 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
else:
|
2020-07-10 21:28:09 +08:00
|
|
|
return HttpResponseBadRequest()
|
2020-11-23 23:18:14 +01:00
|
|
|
|
|
|
|
|
|
|
|
@login_required
|
|
|
|
@mastodon_request_included
|
|
|
|
def jump_or_scrape(request, url):
|
|
|
|
"""
|
|
|
|
1. match url to registered scrapers
|
|
|
|
2. try to find the url in the db, if exits then jump, else scrape and jump
|
|
|
|
"""
|
|
|
|
|
|
|
|
# redirect to this site
|
|
|
|
this_site = request.get_host()
|
|
|
|
if this_site in url:
|
|
|
|
return redirect(url)
|
|
|
|
|
2022-02-05 11:02:16 -05:00
|
|
|
url = get_normalized_url(url)
|
|
|
|
scraper = get_scraper_by_url(url)
|
|
|
|
if scraper is None:
|
2020-11-23 23:18:14 +01:00
|
|
|
# invalid url
|
2022-12-31 17:16:47 -05:00
|
|
|
return render(request, "common/error.html", {"msg": _("链接无效,查询失败")})
|
2020-11-23 23:18:14 +01:00
|
|
|
else:
|
|
|
|
try:
|
2020-11-28 01:40:22 +01:00
|
|
|
effective_url = scraper.get_effective_url(url)
|
2022-05-13 22:26:19 -04:00
|
|
|
except ValueError:
|
2022-12-31 17:16:47 -05:00
|
|
|
return render(request, "common/error.html", {"msg": _("链接无效,查询失败")})
|
2022-05-13 22:26:19 -04:00
|
|
|
try:
|
|
|
|
# raise ObjectDoesNotExist
|
2020-11-28 01:40:22 +01:00
|
|
|
entity = scraper.data_class.objects.get(source_url=effective_url)
|
2020-11-23 23:18:14 +01:00
|
|
|
# if exists then jump to detail page
|
2022-12-31 17:16:47 -05:00
|
|
|
if request.path.endswith(".json/"):
|
|
|
|
return JsonResponse({"num_pages": 1, "items": [entity.get_json()]})
|
2020-11-23 23:18:14 +01:00
|
|
|
return redirect(entity)
|
|
|
|
except ObjectDoesNotExist:
|
|
|
|
# scrape if not exists
|
|
|
|
try:
|
2021-02-15 21:27:50 +01:00
|
|
|
scraper.scrape(url)
|
|
|
|
form = scraper.save(request_user=request.user)
|
2022-07-23 14:45:47 -04:00
|
|
|
except IntegrityError as ie: # duplicate key on source_url may be caused by user's double submission
|
|
|
|
try:
|
|
|
|
entity = scraper.data_class.objects.get(source_url=effective_url)
|
|
|
|
return redirect(entity)
|
|
|
|
except Exception as e:
|
|
|
|
logger.error(f"Scrape Failed URL: {url}\n{e}")
|
|
|
|
if settings.DEBUG:
|
2022-12-31 17:16:47 -05:00
|
|
|
logger.error(
|
|
|
|
"Expections during saving scraped data:", exc_info=e
|
|
|
|
)
|
|
|
|
return render(request, "common/error.html", {"msg": _("爬取数据失败😫")})
|
2021-02-15 21:27:50 +01:00
|
|
|
except Exception as e:
|
2021-10-11 22:05:30 -04:00
|
|
|
logger.error(f"Scrape Failed URL: {url}\n{e}")
|
|
|
|
if settings.DEBUG:
|
|
|
|
logger.error("Expections during saving scraped data:", exc_info=e)
|
2022-12-31 17:16:47 -05:00
|
|
|
return render(request, "common/error.html", {"msg": _("爬取数据失败😫")})
|
2021-02-15 21:27:50 +01:00
|
|
|
return redirect(form.instance)
|
|
|
|
|
2022-05-11 18:33:09 -04:00
|
|
|
|
|
|
|
def go_relogin(request):
|
2022-12-31 17:16:47 -05:00
|
|
|
return render(
|
|
|
|
request,
|
|
|
|
"common/error.html",
|
|
|
|
{
|
|
|
|
"url": reverse("users:connect") + "?domain=" + request.user.mastodon_site,
|
|
|
|
"msg": _("信息已保存,但是未能分享到联邦网络"),
|
|
|
|
"secondary_msg": _(
|
|
|
|
"可能是你在联邦网络(Mastodon/Pleroma/...)的登录状态过期了,正在跳转到联邦网络重新登录😼"
|
|
|
|
),
|
|
|
|
},
|
|
|
|
)
|