Merge ce372a5ce5a287418ac7a4851d879ed939dc3cba into cd64fb966e7e9d764e622e42b177a1f13dc65ec0
This commit is contained in:
commit
d4d0eac075
@ -41,6 +41,7 @@
|
|||||||
- ``duckduckgo``
|
- ``duckduckgo``
|
||||||
- ``google``
|
- ``google``
|
||||||
- ``mwmbl``
|
- ``mwmbl``
|
||||||
|
- ``naver``
|
||||||
- ``quark``
|
- ``quark``
|
||||||
- ``qwant``
|
- ``qwant``
|
||||||
- ``seznam``
|
- ``seznam``
|
||||||
|
@ -149,6 +149,21 @@ def mwmbl(query, _lang):
|
|||||||
return [result for result in results if not result.startswith("go: ") and not result.startswith("search: ")]
|
return [result for result in results if not result.startswith("go: ") and not result.startswith("search: ")]
|
||||||
|
|
||||||
|
|
||||||
|
def naver(query, _lang):
|
||||||
|
# Naver search autocompleter
|
||||||
|
url = f"https://ac.search.naver.com/nx/ac?{urlencode({'q': query, 'r_format': 'json', 'st': 0})}"
|
||||||
|
response = get(url)
|
||||||
|
|
||||||
|
results = []
|
||||||
|
|
||||||
|
if response.ok:
|
||||||
|
data = response.json()
|
||||||
|
if data.get('items'):
|
||||||
|
for item in data['items'][0]:
|
||||||
|
results.append(item[0])
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
def qihu360search(query, _lang):
|
def qihu360search(query, _lang):
|
||||||
# 360Search search autocompleter
|
# 360Search search autocompleter
|
||||||
url = f"https://sug.so.360.cn/suggest?{urlencode({'format': 'json', 'word': query})}"
|
url = f"https://sug.so.360.cn/suggest?{urlencode({'format': 'json', 'word': query})}"
|
||||||
@ -300,6 +315,7 @@ backends = {
|
|||||||
'duckduckgo': duckduckgo,
|
'duckduckgo': duckduckgo,
|
||||||
'google': google_complete,
|
'google': google_complete,
|
||||||
'mwmbl': mwmbl,
|
'mwmbl': mwmbl,
|
||||||
|
'naver': naver,
|
||||||
'quark': quark,
|
'quark': quark,
|
||||||
'qwant': qwant,
|
'qwant': qwant,
|
||||||
'seznam': seznam,
|
'seznam': seznam,
|
||||||
|
210
searx/engines/naver.py
Normal file
210
searx/engines/naver.py
Normal file
@ -0,0 +1,210 @@
|
|||||||
|
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||||
|
# pylint: disable=line-too-long
|
||||||
|
"""Naver for SearXNG"""
|
||||||
|
|
||||||
|
from urllib.parse import urlencode
|
||||||
|
from lxml import html
|
||||||
|
|
||||||
|
from searx.exceptions import SearxEngineAPIException, SearxEngineXPathException
|
||||||
|
from searx.result_types import EngineResults, MainResult
|
||||||
|
from searx.utils import (
|
||||||
|
eval_xpath_getindex,
|
||||||
|
eval_xpath_list,
|
||||||
|
eval_xpath,
|
||||||
|
extract_text,
|
||||||
|
extr,
|
||||||
|
html_to_text,
|
||||||
|
parse_duration_string,
|
||||||
|
js_variable_to_python,
|
||||||
|
)
|
||||||
|
|
||||||
|
# engine metadata
|
||||||
|
about = {
|
||||||
|
"website": "https://search.naver.com",
|
||||||
|
"wikidata_id": "Q485639",
|
||||||
|
"use_official_api": False,
|
||||||
|
"require_api_key": False,
|
||||||
|
"results": "HTML",
|
||||||
|
"language": "ko",
|
||||||
|
}
|
||||||
|
|
||||||
|
categories = []
|
||||||
|
paging = True
|
||||||
|
|
||||||
|
time_range_support = True
|
||||||
|
time_range_dict = {"day": "1d", "week": "1w", "month": "1m", "year": "1y"}
|
||||||
|
|
||||||
|
base_url = "https://search.naver.com"
|
||||||
|
|
||||||
|
naver_category = "general"
|
||||||
|
"""Naver supports general, images, news, videos search.
|
||||||
|
|
||||||
|
- ``general``: search for general
|
||||||
|
- ``images``: search for images
|
||||||
|
- ``news``: search for news
|
||||||
|
- ``videos``: search for videos
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Naver cannot set the number of results on one page, set default value for paging
|
||||||
|
naver_category_dict = {
|
||||||
|
"general": {
|
||||||
|
"start": 15,
|
||||||
|
"where": "web",
|
||||||
|
},
|
||||||
|
"images": {
|
||||||
|
"start": 50,
|
||||||
|
"where": "image",
|
||||||
|
},
|
||||||
|
"news": {
|
||||||
|
"start": 10,
|
||||||
|
"where": "news",
|
||||||
|
},
|
||||||
|
"videos": {
|
||||||
|
"start": 48,
|
||||||
|
"where": "video",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def init(_):
|
||||||
|
if naver_category not in ('general', 'images', 'news', 'videos'):
|
||||||
|
raise SearxEngineAPIException(f"Unsupported category: {naver_category}")
|
||||||
|
|
||||||
|
|
||||||
|
def request(query, params):
|
||||||
|
query_params = {
|
||||||
|
"query": query,
|
||||||
|
}
|
||||||
|
|
||||||
|
if naver_category in naver_category_dict:
|
||||||
|
query_params["start"] = (params["pageno"] - 1) * naver_category_dict[naver_category]["start"] + 1
|
||||||
|
query_params["where"] = naver_category_dict[naver_category]["where"]
|
||||||
|
|
||||||
|
if params["time_range"] in time_range_dict:
|
||||||
|
query_params["nso"] = f"p:{time_range_dict[params['time_range']]}"
|
||||||
|
|
||||||
|
params["url"] = f"{base_url}/search.naver?{urlencode(query_params)}"
|
||||||
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
def response(resp) -> EngineResults:
|
||||||
|
parsers = {'general': parse_general, 'images': parse_images, 'news': parse_news, 'videos': parse_videos}
|
||||||
|
|
||||||
|
return parsers[naver_category](resp.text)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_general(data):
|
||||||
|
results = EngineResults()
|
||||||
|
|
||||||
|
dom = html.fromstring(data)
|
||||||
|
|
||||||
|
for item in eval_xpath_list(dom, "//ul[contains(@class, 'lst_total')]/li[contains(@class, 'bx')]"):
|
||||||
|
thumbnail = None
|
||||||
|
try:
|
||||||
|
thumbnail = eval_xpath_getindex(item, ".//div[contains(@class, 'thumb_single')]//img/@data-lazysrc", 0)
|
||||||
|
except (ValueError, TypeError, SearxEngineXPathException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
results.add(
|
||||||
|
MainResult(
|
||||||
|
title=extract_text(eval_xpath(item, ".//a[contains(@class, 'link_tit')]")),
|
||||||
|
url=eval_xpath_getindex(item, ".//a[contains(@class, 'link_tit')]/@href", 0),
|
||||||
|
content=extract_text(
|
||||||
|
eval_xpath(item, ".//div[contains(@class, 'total_dsc_wrap')]//a[contains(@class, 'api_txt_lines')]")
|
||||||
|
),
|
||||||
|
thumbnail=thumbnail,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def parse_images(data):
|
||||||
|
results = []
|
||||||
|
|
||||||
|
match = extr(data, '<script>var imageSearchTabData=', '</script>')
|
||||||
|
if match:
|
||||||
|
json = js_variable_to_python(match.strip())
|
||||||
|
items = json.get('content', {}).get('items', [])
|
||||||
|
|
||||||
|
for item in items:
|
||||||
|
results.append(
|
||||||
|
{
|
||||||
|
"template": "images.html",
|
||||||
|
"url": item.get('link'),
|
||||||
|
"thumbnail_src": item.get('thumb'),
|
||||||
|
"img_src": item.get('originalUrl'),
|
||||||
|
"title": html_to_text(item.get('title')),
|
||||||
|
"source": item.get('source'),
|
||||||
|
"resolution": f"{item.get('orgWidth')} x {item.get('orgHeight')}",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def parse_news(data):
|
||||||
|
results = EngineResults()
|
||||||
|
dom = html.fromstring(data)
|
||||||
|
|
||||||
|
for item in eval_xpath_list(
|
||||||
|
dom, "//div[contains(@class, 'sds-comps-base-layout') and contains(@class, 'sds-comps-full-layout')]"
|
||||||
|
):
|
||||||
|
title = extract_text(eval_xpath(item, ".//span[contains(@class, 'sds-comps-text-type-headline1')]/text()"))
|
||||||
|
|
||||||
|
url = eval_xpath_getindex(item, ".//a[@href and @nocr='1']/@href", 0)
|
||||||
|
|
||||||
|
content = extract_text(eval_xpath(item, ".//span[contains(@class, 'sds-comps-text-type-body1')]"))
|
||||||
|
|
||||||
|
thumbnail = None
|
||||||
|
try:
|
||||||
|
thumbnail = eval_xpath_getindex(
|
||||||
|
item,
|
||||||
|
".//div[contains(@class, 'sds-comps-image') and contains(@class, 'sds-rego-thumb-overlay')]//img[@src]/@src",
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
except (ValueError, TypeError, SearxEngineXPathException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
if title and content and url:
|
||||||
|
results.add(
|
||||||
|
MainResult(
|
||||||
|
title=title,
|
||||||
|
url=url,
|
||||||
|
content=content,
|
||||||
|
thumbnail=thumbnail,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def parse_videos(data):
|
||||||
|
results = []
|
||||||
|
|
||||||
|
dom = html.fromstring(data)
|
||||||
|
|
||||||
|
for item in eval_xpath_list(dom, "//li[contains(@class, 'video_item')]"):
|
||||||
|
thumbnail = None
|
||||||
|
try:
|
||||||
|
thumbnail = eval_xpath_getindex(item, ".//img[contains(@class, 'thumb')]/@src", 0)
|
||||||
|
except (ValueError, TypeError, SearxEngineXPathException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
length = None
|
||||||
|
try:
|
||||||
|
length = parse_duration_string(extract_text(eval_xpath(item, ".//span[contains(@class, 'time')]")))
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
results.append(
|
||||||
|
{
|
||||||
|
"template": "videos.html",
|
||||||
|
"title": extract_text(eval_xpath(item, ".//a[contains(@class, 'info_title')]")),
|
||||||
|
"url": eval_xpath_getindex(item, ".//a[contains(@class, 'info_title')]/@href", 0),
|
||||||
|
"thumbnail": thumbnail,
|
||||||
|
'length': length,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
return results
|
@ -34,7 +34,7 @@ search:
|
|||||||
# Filter results. 0: None, 1: Moderate, 2: Strict
|
# Filter results. 0: None, 1: Moderate, 2: Strict
|
||||||
safe_search: 0
|
safe_search: 0
|
||||||
# Existing autocomplete backends: "360search", "baidu", "brave", "dbpedia", "duckduckgo", "google", "yandex",
|
# Existing autocomplete backends: "360search", "baidu", "brave", "dbpedia", "duckduckgo", "google", "yandex",
|
||||||
# "mwmbl", "seznam", "sogou", "stract", "swisscows", "quark", "qwant", "wikipedia" -
|
# "mwmbl", "naver", "seznam", "sogou", "stract", "swisscows", "quark", "qwant", "wikipedia" -
|
||||||
# leave blank to turn it off by default.
|
# leave blank to turn it off by default.
|
||||||
autocomplete: ""
|
autocomplete: ""
|
||||||
# minimun characters to type before autocompleter starts
|
# minimun characters to type before autocompleter starts
|
||||||
@ -2359,25 +2359,31 @@ engines:
|
|||||||
disabled: true
|
disabled: true
|
||||||
|
|
||||||
- name: naver
|
- name: naver
|
||||||
shortcut: nvr
|
|
||||||
categories: [general, web]
|
categories: [general, web]
|
||||||
engine: xpath
|
engine: naver
|
||||||
paging: true
|
shortcut: nvr
|
||||||
search_url: https://search.naver.com/search.naver?where=webkr&sm=osp_hty&ie=UTF-8&query={query}&start={pageno}
|
disabled: true
|
||||||
url_xpath: //a[@class="link_tit"]/@href
|
|
||||||
title_xpath: //a[@class="link_tit"]
|
- name: naver images
|
||||||
content_xpath: //div[@class="total_dsc_wrap"]/a
|
naver_category: images
|
||||||
first_page_num: 1
|
categories: [images]
|
||||||
page_size: 10
|
engine: naver
|
||||||
|
shortcut: nvri
|
||||||
|
disabled: true
|
||||||
|
|
||||||
|
- name: naver news
|
||||||
|
naver_category: news
|
||||||
|
categories: [news]
|
||||||
|
engine: naver
|
||||||
|
shortcut: nvrn
|
||||||
|
disabled: true
|
||||||
|
|
||||||
|
- name: naver videos
|
||||||
|
naver_category: videos
|
||||||
|
categories: [videos]
|
||||||
|
engine: naver
|
||||||
|
shortcut: nvrv
|
||||||
disabled: true
|
disabled: true
|
||||||
about:
|
|
||||||
website: https://www.naver.com/
|
|
||||||
wikidata_id: Q485639
|
|
||||||
official_api_documentation: https://developers.naver.com/docs/nmt/examples/
|
|
||||||
use_official_api: false
|
|
||||||
require_api_key: false
|
|
||||||
results: HTML
|
|
||||||
language: ko
|
|
||||||
|
|
||||||
- name: rubygems
|
- name: rubygems
|
||||||
shortcut: rbg
|
shortcut: rbg
|
||||||
|
@ -830,6 +830,11 @@ def js_variable_to_python(js_variable):
|
|||||||
s = _JS_DECIMAL_RE.sub(":0.", s)
|
s = _JS_DECIMAL_RE.sub(":0.", s)
|
||||||
# replace the surogate character by colon
|
# replace the surogate character by colon
|
||||||
s = s.replace(chr(1), ':')
|
s = s.replace(chr(1), ':')
|
||||||
|
# replace single-quote followed by comma with double-quote and comma
|
||||||
|
# {"a": "\"12\"',"b": "13"}
|
||||||
|
# becomes
|
||||||
|
# {"a": "\"12\"","b": "13"}
|
||||||
|
s = s.replace("',", "\",")
|
||||||
# load the JSON and return the result
|
# load the JSON and return the result
|
||||||
return json.loads(s)
|
return json.loads(s)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user