From 4924f5f24879af9dc24294b70de677146cdb7501 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sun, 20 Mar 2022 12:57:23 -0400 Subject: [PATCH] fix search by url error --- common/scraper.py | 2 +- common/views.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/common/scraper.py b/common/scraper.py index fb025b29..daf53bca 100644 --- a/common/scraper.py +++ b/common/scraper.py @@ -231,7 +231,7 @@ def get_scraper_by_url(url): parsed_url = urllib.parse.urlparse(url) hostname = parsed_url.netloc for host in scraper_registry: - if host == hostname: + if host in url: return scraper_registry[host] # TODO move this logic to scraper class try: diff --git a/common/views.py b/common/views.py index d5a24100..4872f3d1 100644 --- a/common/views.py +++ b/common/views.py @@ -392,7 +392,7 @@ def jump_or_scrape(request, url): scraper = get_scraper_by_url(url) if scraper is None: # invalid url - return render(request, 'common/error.html', {'msg': _("链接非法,查询失败")}) + return render(request, 'common/error.html', {'msg': _("链接无效,查询失败")}) else: try: # raise ObjectDoesNotExist