diff --git a/common/searcher.py b/common/searcher.py index 6792be1f..34f328f8 100644 --- a/common/searcher.py +++ b/common/searcher.py @@ -5,8 +5,10 @@ from django.conf import settings from common.scraper import GoodreadsScraper, get_spotify_token import requests from lxml import html +import logging SEARCH_PAGE_SIZE = 5 # not all apis support page size +logger = logging.getLogger(__name__) class Category(Enum): @@ -44,27 +46,30 @@ class Goodreads: @classmethod def search(self, q, page=1): results = [] - search_url = f'https://www.goodreads.com/search?page={page}&q={quote_plus(q)}' - r = requests.get(search_url) - if r.url.startswith('https://www.goodreads.com/book/show/'): - # Goodreads will 302 if only one result matches ISBN - data, img = GoodreadsScraper.scrape(r.url, r) - subtitle = f"{data['pub_year']} {', '.join(data['author'])} {', '.join(data['translator'])}" - results.append(SearchResultItem(Category.Book, SourceSiteEnum.GOODREADS, - data['source_url'], data['title'], subtitle, - data['brief'], data['cover_url'])) - else: - h = html.fromstring(r.content.decode('utf-8')) - for c in h.xpath('//tr[@itemtype="http://schema.org/Book"]'): - el_cover = c.xpath('.//img[@class="bookCover"]/@src') - cover = el_cover[0] if el_cover else None - el_title = c.xpath('.//a[@class="bookTitle"]//text()') - title = ''.join(el_title).strip() if el_title else None - el_url = c.xpath('.//a[@class="bookTitle"]/@href') - url = 'https://www.goodreads.com' + el_url[0] if el_url else None - el_authors = c.xpath('.//a[@class="authorName"]//text()') - subtitle = ', '.join(el_authors) if el_authors else None - results.append(SearchResultItem(Category.Book, SourceSiteEnum.GOODREADS, url, title, subtitle, '', cover)) + try: + search_url = f'https://www.goodreads.com/search?page={page}&q={quote_plus(q)}' + r = requests.get(search_url) + if r.url.startswith('https://www.goodreads.com/book/show/'): + # Goodreads will 302 if only one result matches ISBN + data, img = GoodreadsScraper.scrape(r.url, r) + subtitle = f"{data['pub_year']} {', '.join(data['author'])} {', '.join(data['translator'])}" + results.append(SearchResultItem(Category.Book, SourceSiteEnum.GOODREADS, + data['source_url'], data['title'], subtitle, + data['brief'], data['cover_url'])) + else: + h = html.fromstring(r.content.decode('utf-8')) + for c in h.xpath('//tr[@itemtype="http://schema.org/Book"]'): + el_cover = c.xpath('.//img[@class="bookCover"]/@src') + cover = el_cover[0] if el_cover else None + el_title = c.xpath('.//a[@class="bookTitle"]//text()') + title = ''.join(el_title).strip() if el_title else None + el_url = c.xpath('.//a[@class="bookTitle"]/@href') + url = 'https://www.goodreads.com' + el_url[0] if el_url else None + el_authors = c.xpath('.//a[@class="authorName"]//text()') + subtitle = ', '.join(el_authors) if el_authors else None + results.append(SearchResultItem(Category.Book, SourceSiteEnum.GOODREADS, url, title, subtitle, '', cover)) + except Exception as e: + logger.error(f"Goodreads search '{q}' error: {e}") return results @@ -72,22 +77,25 @@ class GoogleBooks: @classmethod def search(self, q, page=1): results = [] - api_url = f'https://www.googleapis.com/books/v1/volumes?q={quote_plus(q)}&startIndex={SEARCH_PAGE_SIZE*(page-1)}&maxResults={SEARCH_PAGE_SIZE}&maxAllowedMaturityRating=MATURE' - j = requests.get(api_url).json() - if 'items' in j: - for b in j['items']: - title = b['volumeInfo']['title'] - subtitle = f"{b['volumeInfo']['publishedDate']} {', '.join(b['volumeInfo']['authors'])}" - if 'description' in b['volumeInfo']: - brief = b['volumeInfo']['description'] - elif 'textSnippet' in b['volumeInfo']: - brief = b["volumeInfo"]["textSnippet"]["searchInfo"] - else: - brief = '' - category = Category.Book - url = b['volumeInfo']['infoLink'] - cover = b['volumeInfo']['imageLinks']['thumbnail'] if 'imageLinks' in b['volumeInfo'] else None - results.append(SearchResultItem(category, SourceSiteEnum.GOOGLEBOOKS, url, title, subtitle, brief, cover)) + try: + api_url = f'https://www.googleapis.com/books/v1/volumes?q={quote_plus(q)}&startIndex={SEARCH_PAGE_SIZE*(page-1)}&maxResults={SEARCH_PAGE_SIZE}&maxAllowedMaturityRating=MATURE' + j = requests.get(api_url).json() + if 'items' in j: + for b in j['items']: + title = b['volumeInfo']['title'] + subtitle = f"{b['volumeInfo']['publishedDate']} {', '.join(b['volumeInfo']['authors'])}" + if 'description' in b['volumeInfo']: + brief = b['volumeInfo']['description'] + elif 'textSnippet' in b['volumeInfo']: + brief = b["volumeInfo"]["textSnippet"]["searchInfo"] + else: + brief = '' + category = Category.Book + url = b['volumeInfo']['infoLink'] + cover = b['volumeInfo']['imageLinks']['thumbnail'] if 'imageLinks' in b['volumeInfo'] else None + results.append(SearchResultItem(category, SourceSiteEnum.GOOGLEBOOKS, url, title, subtitle, brief, cover)) + except Exception as e: + logger.error(f"GoogleBooks search '{q}' error: {e}") return results @@ -95,21 +103,24 @@ class TheMovieDatabase: @classmethod def search(self, q, page=1): results = [] - api_url = f'https://api.themoviedb.org/3/search/multi?query={quote_plus(q)}&page={page}&api_key={settings.TMDB_API3_KEY}&language=zh-CN&include_adult=true' - j = requests.get(api_url).json() - for m in j['results']: - if m['media_type'] in ['tv', 'movie']: - url = f"https://www.themoviedb.org/{m['media_type']}/{m['id']}" - if m['media_type'] == 'tv': - cat = Category.TV - title = m['name'] - subtitle = f"{m['first_air_date']} {m['original_name']}" - else: - cat = Category.Movie - title = m['title'] - subtitle = f"{m['release_date']} {m['original_title']}" - cover = f"https://image.tmdb.org/t/p/w500/{m['poster_path']}" - results.append(SearchResultItem(cat, SourceSiteEnum.TMDB, url, title, subtitle, m['overview'], cover)) + try: + api_url = f'https://api.themoviedb.org/3/search/multi?query={quote_plus(q)}&page={page}&api_key={settings.TMDB_API3_KEY}&language=zh-CN&include_adult=true' + j = requests.get(api_url).json() + for m in j['results']: + if m['media_type'] in ['tv', 'movie']: + url = f"https://www.themoviedb.org/{m['media_type']}/{m['id']}" + if m['media_type'] == 'tv': + cat = Category.TV + title = m['name'] + subtitle = f"{m['first_air_date']} {m['original_name']}" + else: + cat = Category.Movie + title = m['title'] + subtitle = f"{m['release_date']} {m['original_title']}" + cover = f"https://image.tmdb.org/t/p/w500/{m['poster_path']}" + results.append(SearchResultItem(cat, SourceSiteEnum.TMDB, url, title, subtitle, m['overview'], cover)) + except Exception as e: + logger.error(f"TMDb search '{q}' error: {e}") return results @@ -117,19 +128,22 @@ class Spotify: @classmethod def search(self, q, page=1): results = [] - api_url = f"https://api.spotify.com/v1/search?q={q}&type=album&limit={SEARCH_PAGE_SIZE}&offset={page*SEARCH_PAGE_SIZE}" - headers = { - 'Authorization': f"Bearer {get_spotify_token()}" - } - j = requests.get(api_url, headers=headers).json() - for a in j['albums']['items']: - title = a['name'] - subtitle = a['release_date'] - for artist in a['artists']: - subtitle += ' ' + artist['name'] - url = a['external_urls']['spotify'] - cover = a['images'][0]['url'] - results.append(SearchResultItem(Category.Music, SourceSiteEnum.SPOTIFY, url, title, subtitle, '', cover)) + try: + api_url = f"https://api.spotify.com/v1/search?q={q}&type=album&limit={SEARCH_PAGE_SIZE}&offset={page*SEARCH_PAGE_SIZE}" + headers = { + 'Authorization': f"Bearer {get_spotify_token()}" + } + j = requests.get(api_url, headers=headers).json() + for a in j['albums']['items']: + title = a['name'] + subtitle = a['release_date'] + for artist in a['artists']: + subtitle += ' ' + artist['name'] + url = a['external_urls']['spotify'] + cover = a['images'][0]['url'] + results.append(SearchResultItem(Category.Music, SourceSiteEnum.SPOTIFY, url, title, subtitle, '', cover)) + except Exception as e: + logger.error(f"Spotify search '{q}' error: {e}") return results diff --git a/common/templates/common/search_result.html b/common/templates/common/search_result.html index 8ed2309d..f9e6375c 100644 --- a/common/templates/common/search_result.html +++ b/common/templates/common/search_result.html @@ -411,7 +411,9 @@ {% empty %} +
  • {% trans '无站内条目匹配' %} +
  • {% endfor %} {% for item in external_items %} @@ -422,7 +424,7 @@
    -
    +