fix search by url error
This commit is contained in:
parent
573789be00
commit
4924f5f248
2 changed files with 2 additions and 2 deletions
|
@ -231,7 +231,7 @@ def get_scraper_by_url(url):
|
|||
parsed_url = urllib.parse.urlparse(url)
|
||||
hostname = parsed_url.netloc
|
||||
for host in scraper_registry:
|
||||
if host == hostname:
|
||||
if host in url:
|
||||
return scraper_registry[host]
|
||||
# TODO move this logic to scraper class
|
||||
try:
|
||||
|
|
|
@ -392,7 +392,7 @@ def jump_or_scrape(request, url):
|
|||
scraper = get_scraper_by_url(url)
|
||||
if scraper is None:
|
||||
# invalid url
|
||||
return render(request, 'common/error.html', {'msg': _("链接非法,查询失败")})
|
||||
return render(request, 'common/error.html', {'msg': _("链接无效,查询失败")})
|
||||
else:
|
||||
try:
|
||||
# raise ObjectDoesNotExist
|
||||
|
|
Loading…
Add table
Reference in a new issue