Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[tool.black]
line-length = 100
target-version = ['py36', 'py37', 'py38', 'py39']
Original file line number Diff line number Diff line change
Expand Up @@ -175,10 +175,7 @@ def objective_grad(w):
# TODO: do batch eval
y_predict = self._neural_network.forward(x, w)
_, weight_prob_grad = self._neural_network.backward(x, w)
grad += (
self._loss.gradient(y_predict[0], y_target)
@ weight_prob_grad[0, :]
)
grad += self._loss.gradient(y_predict[0], y_target) @ weight_prob_grad[0, :]
return grad

else:
Expand All @@ -198,9 +195,9 @@ def objective_grad(w):
# TODO: do batch eval
_, weight_prob_grad = self._neural_network.backward(x, w)
for i in range(num_classes):
grad += weight_prob_grad[0, i, :].reshape(
grad.shape
) * self._loss(i, y_target)
grad += weight_prob_grad[0, i, :].reshape(grad.shape) * self._loss(
i, y_target
)
return grad

if self._warm_start and self._fit_result is not None:
Expand Down Expand Up @@ -228,9 +225,7 @@ def predict(self, X: np.ndarray) -> np.ndarray: # pylint: disable=invalid-name
The predicted classes.
"""
if self._fit_result is None:
raise QiskitMachineLearningError(
"Model needs to be fit to some training data first!"
)
raise QiskitMachineLearningError("Model needs to be fit to some training data first!")
if self._neural_network.output_shape == (1,):
predict = np.sign(self._neural_network.forward(X, self._fit_result[0]))
else:
Expand Down
12 changes: 3 additions & 9 deletions qiskit_machine_learning/algorithms/classifiers/vqc.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,26 +66,20 @@ def __init__(
num_qubits_ = num_qubits
if feature_map:
if feature_map.num_qubits != num_qubits:
raise QiskitMachineLearningError(
"Incompatible num_qubits and feature_map!"
)
raise QiskitMachineLearningError("Incompatible num_qubits and feature_map!")
feature_map_ = feature_map
else:
feature_map_ = ZZFeatureMap(num_qubits)
if ansatz:
if ansatz.num_qubits != num_qubits:
raise QiskitMachineLearningError(
"Incompatible num_qubits and ansatz!"
)
raise QiskitMachineLearningError("Incompatible num_qubits and ansatz!")
ansatz_ = ansatz
else:
ansatz_ = RealAmplitudes(num_qubits)
else:
if feature_map and ansatz:
if feature_map.num_qubits != ansatz.num_qubits:
raise QiskitMachineLearningError(
"Incompatible feature_map and ansatz!"
)
raise QiskitMachineLearningError("Incompatible feature_map and ansatz!")
feature_map_ = feature_map
ansatz_ = ansatz
num_qubits_ = feature_map.num_qubits
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,17 +60,11 @@ def __init__(self, n_features=1, n_out=1):
activ_function_curr = layer["activation"]
layer_input_size = layer["input_dim"]
layer_output_size = layer["output_dim"]
params_layer = algorithm_globals.random.random(
layer_output_size * layer_input_size
)
params_layer = algorithm_globals.random.random(layer_output_size * layer_input_size)
if activ_function_curr == "leaky_relu":
params_layer = (
params_layer * 2 - np.ones(np.shape(params_layer))
) * 0.7
params_layer = (params_layer * 2 - np.ones(np.shape(params_layer))) * 0.7
elif activ_function_curr == "sigmoid":
params_layer = (
params_layer * 2 - np.ones(np.shape(params_layer))
) * 0.2
params_layer = (params_layer * 2 - np.ones(np.shape(params_layer))) * 0.2
else:
params_layer = params_layer * 2 - np.ones(np.shape(params_layer))
self.parameters = np.append(self.parameters, params_layer)
Expand Down Expand Up @@ -123,9 +117,7 @@ def single_layer_forward_propagation(x_old, w_new, activation="leaky_relu"):
w_curr = self.parameters[pointer:pointer_next]
w_curr = np.reshape(w_curr, (layer_output_size, layer_input_size))
pointer = pointer_next
x_new, z_curr = single_layer_forward_propagation(
x_old, w_curr, activ_function_curr
)
x_new, z_curr = single_layer_forward_propagation(x_old, w_curr, activ_function_curr)

self.memory["a" + str(idx)] = x_old
self.memory["z" + str(layer_idx)] = z_curr
Expand Down Expand Up @@ -290,9 +282,7 @@ def load_model(self, load_dir):
self._discriminator.architecture = np.load(
os.path.join(load_dir, "np_discriminator_architecture.csv")
)
self._discriminator.memory = np.load(
os.path.join(load_dir, "np_discriminator_memory.csv")
)
self._discriminator.memory = np.load(os.path.join(load_dir, "np_discriminator_memory.csv"))
self._discriminator.parameters = np.load(
os.path.join(load_dir, "np_discriminator_params.csv")
)
Expand All @@ -312,9 +302,7 @@ def discriminator_net(self):
def discriminator_net(self, net):
self._discriminator = net

def get_label(
self, x, detach=False
): # pylint: disable=arguments-differ,unused-argument
def get_label(self, x, detach=False): # pylint: disable=arguments-differ,unused-argument
"""
Get data sample labels, i.e. true or fake.

Expand Down Expand Up @@ -346,11 +334,7 @@ def loss(self, x, y, weights=None):
np.multiply(y, np.log(np.maximum(np.ones(np.shape(x)) * 1e-4, x)))
+ np.multiply(
np.ones(np.shape(y)) - y,
np.log(
np.maximum(
np.ones(np.shape(x)) * 1e-4, np.ones(np.shape(x)) - x
)
),
np.log(np.maximum(np.ones(np.shape(x)) * 1e-4, np.ones(np.shape(x)) - x)),
),
weights,
)
Expand All @@ -360,11 +344,7 @@ def loss(self, x, y, weights=None):
np.multiply(y, np.log(np.maximum(np.ones(np.shape(x)) * 1e-4, x)))
+ np.multiply(
np.ones(np.shape(y)) - y,
np.log(
np.maximum(
np.ones(np.shape(x)) * 1e-4, np.ones(np.shape(x)) - x
)
),
np.log(np.maximum(np.ones(np.shape(x)) * 1e-4, np.ones(np.shape(x)) - x)),
)
)

Expand All @@ -388,9 +368,7 @@ def objective_function(params):
self._discriminator.parameters = params
# Train on Real Data
prediction_real = self.get_label(real_batch)
loss_real = self.loss(
prediction_real, np.ones(np.shape(prediction_real)), real_prob
)
loss_real = self.loss(prediction_real, np.ones(np.shape(prediction_real)), real_prob)
prediction_fake = self.get_label(generated_batch)
loss_fake = self.loss(
prediction_fake, np.zeros(np.shape(prediction_fake)), generated_prob
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,7 @@ def __init__(self, n_features: int = 1, n_out: int = 1) -> None:
self._discriminator = DiscriminatorNet(self._n_features, self._n_out)
# optimizer: torch.optim.Optimizer or None, Optimizer initialized w.r.t
# discriminator network parameters.
self._optimizer = optim.Adam(
self._discriminator.parameters(), lr=1e-5, amsgrad=True
)
self._optimizer = optim.Adam(self._discriminator.parameters(), lr=1e-5, amsgrad=True)

self._ret = {} # type: Dict[str, Any]

Expand Down Expand Up @@ -179,9 +177,9 @@ def gradient_penalty(self, x, lambda_=5.0, k=0.01, c=1.0):
z = Variable(x + delta_, requires_grad=True)
o_l = self.get_label(z)
# pylint: disable=no-member
d_g = torch.autograd.grad(
o_l, z, grad_outputs=torch.ones(o_l.size()), create_graph=True
)[0].view(z.size(0), -1)
d_g = torch.autograd.grad(o_l, z, grad_outputs=torch.ones(o_l.size()), create_graph=True)[
0
].view(z.size(0), -1)

return lambda_ * ((d_g.norm(p=2, dim=1) - k) ** 2).mean()

Expand Down Expand Up @@ -229,15 +227,11 @@ def train(
prediction_real = self.get_label(real_batch)

# Calculate error and back propagate
error_real = self.loss(
prediction_real, torch.ones(len(prediction_real), 1), real_prob
)
error_real = self.loss(prediction_real, torch.ones(len(prediction_real), 1), real_prob)
error_real.backward()

# Train on Generated Data
generated_batch = np.reshape(
generated_batch, (len(generated_batch), self._n_features)
)
generated_batch = np.reshape(generated_batch, (len(generated_batch), self._n_features))
generated_prob = np.reshape(generated_prob, (len(generated_prob), 1))
generated_prob = torch.tensor(generated_prob, dtype=torch.float32)
prediction_fake = self.get_label(generated_batch)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ def random(self):
def run(
self,
quantum_instance: Optional[Union[QuantumInstance, Backend, BaseBackend]] = None,
**kwargs
**kwargs,
) -> Dict:
"""Execute the algorithm with selected backend.
Args:
Expand All @@ -193,8 +193,7 @@ def run(
"""
if quantum_instance is None and self.quantum_instance is None:
raise QiskitMachineLearningError(
"A QuantumInstance or Backend "
"must be supplied to run the quantum algorithm."
"A QuantumInstance or Backend must be supplied to run the quantum algorithm."
)
if isinstance(quantum_instance, (BaseBackend, Backend)):
self.set_backend(quantum_instance, **kwargs)
Expand Down Expand Up @@ -386,9 +385,7 @@ def train(self):
items in the truncated data set
"""
if self._snapshot_dir is not None:
with open(
os.path.join(self._snapshot_dir, "output.csv"), mode="w"
) as csv_file:
with open(os.path.join(self._snapshot_dir, "output.csv"), mode="w") as csv_file:
fieldnames = [
"epoch",
"loss_discriminator",
Expand Down Expand Up @@ -424,9 +421,7 @@ def train(self):

# 2. Train Generator
self._generator.discriminator = self._discriminator
ret_g = self._generator.train(
self._quantum_instance, shots=self._batch_size
)
ret_g = self._generator.train(self._quantum_instance, shots=self._batch_size)
g_loss_min = ret_g["loss"]

self._d_loss.append(np.around(float(d_loss_min), 4))
Expand Down Expand Up @@ -465,9 +460,7 @@ def _run(self):
Raises:
QiskitMachineLearningError: invalid backend
"""
if self._quantum_instance.backend_name == (
"unitary_simulator" or "clifford_simulator"
):
if self._quantum_instance.backend_name == ("unitary_simulator" or "clifford_simulator"):
raise QiskitMachineLearningError(
"Chosen backend not supported - "
"Set backend either to statevector_simulator, qasm_simulator"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,22 +80,17 @@ def __init__(
if generator_circuit is None:
circuit = QuantumCircuit(sum(num_qubits))
circuit.h(circuit.qubits)
ansatz = TwoLocal(
sum(num_qubits), "ry", "cz", reps=1, entanglement="circular"
)
ansatz = TwoLocal(sum(num_qubits), "ry", "cz", reps=1, entanglement="circular")
circuit.compose(ansatz, inplace=True)

# Set generator circuit
self.generator_circuit = circuit

self._free_parameters = sorted(
self.generator_circuit.parameters, key=lambda p: p.name
)
self._free_parameters = sorted(self.generator_circuit.parameters, key=lambda p: p.name)

if init_params is None:
init_params = (
algorithm_globals.random.random(self.generator_circuit.num_parameters)
* 2e-2
algorithm_globals.random.random(self.generator_circuit.num_parameters) * 2e-2
)

self._bound_parameters = init_params
Expand Down Expand Up @@ -223,8 +218,7 @@ def optimizer(self, optimizer: Optional[Optimizer] = None) -> None:
self._optimizer = optimizer
else:
raise QiskitMachineLearningError(
"Please provide an Optimizer object to use"
"as the generator optimizer."
"Please provide an Optimizer object to use as the generator optimizer."
)
else:
self._optimizer = ADAM(
Expand Down Expand Up @@ -391,9 +385,7 @@ def objective_function(params):

return objective_function

def _convert_to_gradient_function(
self, gradient_object, quantum_instance, discriminator
):
def _convert_to_gradient_function(self, gradient_object, quantum_instance, discriminator):
"""
Convert to gradient function

Expand Down Expand Up @@ -427,12 +419,8 @@ def gradient_function(current_point):
prediction_generated = discriminator.get_label(generated_data, detach=True)
op = ~CircuitStateFn(primitive=self.generator_circuit)
grad_object = gradient_object.convert(operator=op, params=free_params)
value_dict = {
free_params[i]: current_point[i] for i in range(len(free_params))
}
analytical_gradients = np.array(
grad_object.assign_parameters(value_dict).eval()
)
value_dict = {free_params[i]: current_point[i] for i in range(len(free_params))}
analytical_gradients = np.array(grad_object.assign_parameters(value_dict).eval())
loss_gradients = self.loss(
prediction_generated, np.transpose(analytical_gradients)
).real
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,9 +160,9 @@ def objective_grad(w):
# TODO: do batch eval
_, weight_prob_grad = self._neural_network.backward(x, w)
for i in range(num_classes):
grad += weight_prob_grad[0, i, :].reshape(
grad.shape
) * self._loss(i, y_target)
grad += weight_prob_grad[0, i, :].reshape(grad.shape) * self._loss(
i, y_target
)
return grad

if self._warm_start and self._fit_result is not None:
Expand Down Expand Up @@ -191,9 +191,7 @@ def predict(self, X: np.ndarray) -> np.ndarray: # pylint: disable=invalid-name
"""

if self._fit_result is None:
raise QiskitMachineLearningError(
"Model needs to be fit to some training data first!"
)
raise QiskitMachineLearningError("Model needs to be fit to some training data first!")

# TODO: proper handling of batching
return self._neural_network.forward(X, self._fit_result[0])
Original file line number Diff line number Diff line change
Expand Up @@ -92,9 +92,7 @@ def _check_configuration(self, raise_on_failure=True):
self._ordered_parameters.resize(self.feature_dimension)
elif len(self._ordered_parameters) != self.feature_dimension:
if raise_on_failure:
raise ValueError(
"Mismatching number of parameters and feature dimension."
)
raise ValueError("Mismatching number of parameters and feature dimension.")
return False
return True

Expand Down Expand Up @@ -169,9 +167,7 @@ def _define(self):
if len(param.parameters) == 0:
cleaned_params.append(complex(param))
else:
raise QiskitError(
"Cannot define a ParameterizedInitialize with unbound parameters"
)
raise QiskitError("Cannot define a ParameterizedInitialize with unbound parameters")

# normalize
normalized = np.array(cleaned_params) / np.linalg.norm(cleaned_params)
Expand Down
Loading