@@ -65,19 +65,21 @@ def test_simulate_expectation_cpu_vs_cuda(self):
65
65
circuit_batch_tensor ,
66
66
symbol_names , symbol_values_array .astype (np .float64 ),
67
67
pauli_sums_tensor ),
68
- "CPU"
68
+ "CPU" ,
69
+ num_samples = 100 ,
69
70
)
70
71
71
72
cuda_avg_time , res_cuda = measure_average_runtime (
72
73
lambda : tfq_simulate_ops_cuda .tfq_simulate_expectation (
73
74
circuit_batch_tensor ,
74
75
symbol_names , symbol_values_array .astype (np .float64 ),
75
76
pauli_sums_tensor ),
76
- "CUDA"
77
+ "CUDA" ,
78
+ num_samples = 100 ,
77
79
)
78
80
79
81
# The result should be the similar within a tolerance.
80
- np .testing .assert_allclose (res_cpu , res_cuda , atol = 1e-5 )
82
+ np .testing .assert_allclose (res_cpu , res_cuda , atol = 1e-4 )
81
83
82
84
# CUDA op should be faster than CPU op.
83
85
self .assertGreater (cpu_avg_time , cuda_avg_time )
@@ -107,19 +109,22 @@ def test_simulate_expectation_cpu_vs_cuquantum(self):
107
109
circuit_batch_tensor ,
108
110
symbol_names , symbol_values_array .astype (np .float64 ),
109
111
pauli_sums_tensor ),
110
- "CPU"
111
- )
112
+ "CPU" ,
113
+ num_samples = 100 ,
112
114
115
+ )
116
+
113
117
cuda_avg_time , res_cuda = measure_average_runtime (
114
118
lambda : tfq_simulate_ops_cuquantum .tfq_simulate_expectation (
115
119
circuit_batch_tensor ,
116
120
symbol_names , symbol_values_array .astype (np .float64 ),
117
121
pauli_sums_tensor ),
118
- "cuQuantum"
122
+ "cuQuantum" ,
123
+ num_samples = 100 ,
119
124
)
120
125
121
126
# The result should be the similar within a tolerance.
122
- np .testing .assert_allclose (res_cpu , res_cuda , atol = 1e-5 )
127
+ np .testing .assert_allclose (res_cpu , res_cuda , atol = 1e-4 )
123
128
124
129
# cuQuantum op should be faster than CPU op.
125
130
self .assertGreater (cpu_avg_time , cuda_avg_time )
0 commit comments