1
1
import asyncio
2
2
import random
3
3
import httpx
4
+ from .types import ResultDict
5
+ from typing import List
4
6
from warnings import warn
5
7
from typing import Union
6
8
from bs4 import BeautifulSoup
@@ -19,7 +21,7 @@ def __init__(self, proxies: Union[list, str] = None, default_user_agents: Union[
19
21
20
22
21
23
class Client (BaseClient ):
22
- def search (self , query : str , exact_match : bool = False , ** kwargs ):
24
+ def search (self , query : str , exact_match : bool = False , ** kwargs ) -> List [ ResultDict ] :
23
25
if exact_match :
24
26
query = '"%s"' % query
25
27
@@ -49,7 +51,7 @@ def __init__(self, proxies: Union[list, str] = None, default_user_agents: Union[
49
51
self .loop = asyncio .get_event_loop ()
50
52
super ().__init__ (proxies = proxies , default_user_agents = default_user_agents , random_ua = random_ua )
51
53
52
- async def search (self , query : str , exact_match : bool = False , ** kwargs ):
54
+ async def search (self , query : str , exact_match : bool = False , ** kwargs ) -> List [ ResultDict ] :
53
55
if exact_match :
54
56
query = '"%s"' % query
55
57
@@ -74,7 +76,7 @@ async def search(self, query: str, exact_match: bool = False, **kwargs):
74
76
return await self .loop .run_in_executor (None , parse_page , data )
75
77
76
78
77
- def parse_page (html : Union [str , bytes ]):
79
+ def parse_page (html : Union [str , bytes ]) -> List [ ResultDict ] :
78
80
soup = BeautifulSoup (html , "html.parser" )
79
81
results = []
80
82
for i in soup .find_all ('div' , {'class' : 'links_main' }):
@@ -84,7 +86,7 @@ def parse_page(html: Union[str, bytes]):
84
86
title = i .h2 .a .text
85
87
description = i .find ('a' , {'class' : 'result__snippet' }).text
86
88
url = i .find ('a' , {'class' : 'result__url' }).get ('href' )
87
- results .append (dict (title = title , description = description , url = url ))
89
+ results .append (ResultDict (title = title , description = description , url = url ))
88
90
except AttributeError :
89
91
pass
90
92
return results
0 commit comments