Skip to content

Commit 0c46318

Browse files
authored
【Hackathon 9th No.22】add unit tests for share_external_data (#3744)
1 parent 9ead10e commit 0c46318

File tree

1 file changed

+131
-0
lines changed

1 file changed

+131
-0
lines changed
Lines changed: 131 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,131 @@
1+
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import multiprocessing as mp
16+
import os
17+
import queue
18+
import unittest
19+
from multiprocessing import Process, Queue
20+
21+
import numpy as np
22+
import paddle
23+
24+
from fastdeploy.model_executor.ops.gpu import set_data_ipc, share_external_data
25+
26+
27+
def _create_test_tensor(shape, dtype):
28+
if "float" in str(dtype):
29+
return paddle.rand(shape=shape, dtype=dtype)
30+
elif "int" in str(dtype):
31+
return paddle.randint(-100, 100, shape=shape, dtype=dtype)
32+
elif "bool" in str(dtype):
33+
return paddle.rand(shape=shape, dtype=dtype) > 0.5
34+
35+
36+
def _producer_proc(shm_name, shape, dtype, ready_q, done_q, error_q):
37+
# Create shared memory
38+
try:
39+
paddle.device.set_device("gpu:0")
40+
t = _create_test_tensor(shape, dtype)
41+
set_data_ipc(t, shm_name)
42+
ready_q.put(("ready", t.numpy().tolist()))
43+
_ = done_q.get(timeout=20)
44+
except Exception as e:
45+
error_q.put(("producer_error", str(e)))
46+
47+
48+
def _consumer_proc(shm_name, shape, dtype, result_q, error_q):
49+
# Shard data
50+
try:
51+
paddle.device.set_device("gpu:0")
52+
dummy = paddle.zeros(shape, dtype=dtype)
53+
shared = share_external_data(dummy, shm_name, shape)
54+
result_q.put(("ok", shared.numpy().tolist()))
55+
except Exception as e:
56+
error_q.put(("consumer_error", str(e)))
57+
58+
59+
# Use spawn to avoid forking CUDA contexts
60+
try:
61+
mp.set_start_method("spawn", force=True)
62+
except RuntimeError:
63+
pass
64+
65+
66+
class TestShareExternalData(unittest.TestCase):
67+
def setUp(self):
68+
paddle.seed(2024)
69+
np.random.seed(42)
70+
71+
if not paddle.device.is_compiled_with_cuda():
72+
self.skipTest("CUDA not available, skipping GPU tests")
73+
74+
# Set device to GPU
75+
paddle.device.set_device("gpu:0")
76+
77+
self.test_shape = [4, 8]
78+
self.dtype = paddle.float32
79+
self.shm_prefix = f"test_share_external_{os.getpid()}"
80+
81+
def _run_minimal_cross_process(self):
82+
ready_q = Queue()
83+
result_q = Queue()
84+
error_q = Queue()
85+
done_q = Queue()
86+
87+
p = Process(
88+
target=_producer_proc, args=(self.shm_prefix, self.test_shape, self.dtype, ready_q, done_q, error_q)
89+
)
90+
p.start()
91+
92+
# wait producer ready
93+
try:
94+
status, original_data = ready_q.get(timeout=20)
95+
self.assertEqual(status, "ready")
96+
except Exception:
97+
p.terminate()
98+
self.fail("Producer did not become ready in time")
99+
100+
c = Process(target=_consumer_proc, args=(self.shm_prefix, self.test_shape, self.dtype, result_q, error_q))
101+
c.start()
102+
c.join(timeout=30)
103+
104+
# signal producer to exit now
105+
done_q.put("done")
106+
p.join(timeout=30)
107+
108+
# check errors first (non-blocking)
109+
errors = []
110+
try:
111+
while True:
112+
errors.append(error_q.get_nowait())
113+
except queue.Empty:
114+
pass
115+
self.assertFalse(errors, f"Errors occurred: {errors}")
116+
117+
# verify data
118+
self.assertFalse(result_q.empty(), "No result from consumer")
119+
status, shared_data = result_q.get()
120+
self.assertEqual(status, "ok")
121+
np.testing.assert_allclose(np.array(original_data), np.array(shared_data), rtol=1e-5)
122+
123+
def test_producer_consumer_processes(self):
124+
self._run_minimal_cross_process()
125+
126+
def tearDown(self):
127+
paddle.device.cuda.empty_cache()
128+
129+
130+
if __name__ == "__main__":
131+
unittest.main()

0 commit comments

Comments
 (0)