forked from jina-ai/clip-as-service
-
Notifications
You must be signed in to change notification settings - Fork 0
/
example2.py
59 lines (41 loc) · 1.61 KB
/
example2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Han Xiao <[email protected]> <https://hanxiao.github.io>
# NOTE: First install bert-as-service via
# $
# $ pip install bert-serving-server
# $ pip install bert-serving-client
# $
# using BertClient in async way
import sys
from bert_serving.client import BertClient
def send_without_block(bc, data, repeat=10):
# encoding without blocking:
print('sending all data without blocking...')
for _ in range(repeat):
bc.encode(data, blocking=False)
print('all sent!')
if __name__ == '__main__':
bc = BertClient(port=int(sys.argv[1]), port_out=int(sys.argv[2]))
num_repeat = 20
with open('README.md') as fp:
data = [v for v in fp if v.strip()]
send_without_block(bc, data, num_repeat)
num_expect_vecs = len(data) * num_repeat
# then fetch all
print('now waiting until all results are available...')
vecs = bc.fetch_all(concat=True)
print('received %s, expected: %d' % (vecs.shape, num_expect_vecs))
# now send it again
send_without_block(bc, data, num_repeat)
# this time fetch them one by one, due to the async encoding and server scheduling
# sending order is NOT preserved!
for v in bc.fetch():
print('received %s, shape %s' % (v.id, v.content.shape))
# finally let's do encode-fetch at the same time but in async mode
# we do that by building an endless data stream, generating data in an extremely fast speed
def text_gen():
while True:
yield data
for j in bc.encode_async(text_gen(), max_num_batch=20):
print('received %d : %s' % (j.id, j.content))