torchdrug: [Bug] AttributeError: can't set attribute

In the retrosynthesis tutorial

this code of synthon completion

synthon_optimizer = torch.optim.Adam(synthon_task.parameters(), lr=1e-3)
synthon_solver = core.Engine(synthon_task, synthon_train, synthon_valid,
                             synthon_test, synthon_optimizer,
                             gpus=[0], batch_size=128)
synthon_solver.train(num_epoch=1)
synthon_solver.evaluate("valid")
synthon_solver.save("g2gs_synthon_model.pth")

gives below error

---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
Input In [13], in <cell line: 11>()
      7 synthon_optimizer = torch.optim.Adam(synthon_task.parameters(), lr=1e-3)
      8 synthon_solver = core.Engine(synthon_task, synthon_train, synthon_valid,
      9                              synthon_test, synthon_optimizer,
     10                              gpus=[0], batch_size=128)
---> 11 synthon_solver.train(num_epoch=1)
     12 synthon_solver.evaluate("valid")
     13 synthon_solver.save("g2gs_synthon_model.pth")

File /usr/local/lib/python3.9/dist-packages/torchdrug/core/engine.py:155, in Engine.train(self, num_epoch, batch_per_epoch)
    152 if self.device.type == "cuda":
    153     batch = utils.cuda(batch, device=self.device)
--> 155 loss, metric = model(batch)
    156 if not loss.requires_grad:
    157     raise RuntimeError("Loss doesn't require grad. Did you define any loss in the task?")

File /usr/local/lib/python3.9/dist-packages/torch/nn/modules/module.py:1130, in Module._call_impl(self, *input, **kwargs)
   1126 # If we don't have any hooks, we want to skip the rest of the logic in
   1127 # this function, and just call forward.
   1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1129         or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130     return forward_call(*input, **kwargs)
   1131 # Do not call functions when jit is used
   1132 full_backward_hooks, non_full_backward_hooks = [], []

File /usr/local/lib/python3.9/dist-packages/torchdrug/tasks/retrosynthesis.py:592, in SynthonCompletion.forward(self, batch)
    589 all_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
    590 metric = {}
--> 592 pred, target = self.predict_and_target(batch, all_loss, metric)
    593 node_in_pred, node_out_pred, bond_pred, stop_pred = pred
    594 node_in_target, node_out_target, bond_target, stop_target, size = target

File /usr/local/lib/python3.9/dist-packages/torchdrug/tasks/retrosynthesis.py:984, in SynthonCompletion.predict_and_target(self, batch, all_loss, metric)
    981 with reactant.graph():
    982     reactant.reaction = batch["reaction"]
--> 984 graph1, node_in_target1, node_out_target1, bond_target1, stop_target1 = self.all_edge(reactant, synthon)
    985 graph2, node_in_target2, node_out_target2, bond_target2, stop_target2 = self.all_stop(reactant, synthon)
    987 graph = self._cat([graph1, graph2])

File /usr/local/lib/python3.9/dist-packages/torch/autograd/grad_mode.py:27, in _DecoratorContextManager.__call__.<locals>.decorate_context(*args, **kwargs)
     24 @functools.wraps(func)
     25 def decorate_context(*args, **kwargs):
     26     with self.clone():
---> 27         return func(*args, **kwargs)

File /usr/local/lib/python3.9/dist-packages/torchdrug/tasks/retrosynthesis.py:557, in SynthonCompletion.all_edge(self, reactant, synthon)
    555 assert (graph.num_edges % 2 == 0).all()
    556 # node / edge features may change because we mask some nodes / edges
--> 557 graph, feature_valid = self._update_molecule_feature(graph)
    559 return graph[feature_valid], node_in_target[feature_valid], node_out_target[feature_valid], \
    560        bond_target[feature_valid], stop_target[feature_valid]

File /usr/local/lib/python3.9/dist-packages/torchdrug/tasks/retrosynthesis.py:398, in SynthonCompletion._update_molecule_feature(self, graphs)
    395 bond_type[edge_mask] = new_graphs.bond_type.to(device=graphs.device)
    397 with graphs.node():
--> 398     graphs.node_feature = node_feature
    399 with graphs.edge():
    400     graphs.edge_feature = edge_feature

File /usr/local/lib/python3.9/dist-packages/torchdrug/data/graph.py:160, in Graph.__setattr__(self, key, value)
    158 if hasattr(self, "meta_dict"):
    159     self._check_attribute(key, value)
--> 160 super(Graph, self).__setattr__(key, value)

File /usr/local/lib/python3.9/dist-packages/torchdrug/core/core.py:84, in _MetaContainer.__setattr__(self, key, value)
     82     if types:
     83         self.meta_dict[key] = types.copy()
---> 84 self._setattr(key, value)

File /usr/local/lib/python3.9/dist-packages/torchdrug/core/core.py:93, in _MetaContainer._setattr(self, key, value)
     92 def _setattr(self, key, value):
---> 93     return super(_MetaContainer, self).__setattr__(key, value)

AttributeError: can't set attribute

About this issue

  • Original URL
  • State: closed
  • Created 2 years ago
  • Comments: 19 (3 by maintainers)

Most upvoted comments

File /usr/local/lib/python3.9/dist-packages/torchdrug/core/core.py:93, in _MetaContainer._setattr(self, key, value)
     92 def _setattr(self, key, value):
---> 93     return super(_MetaContainer, self).__setattr__(key, value)

AttributeError: can't set attribute

This is due to a mistake in the commit. @DimGorr Your solution is right. I just fixed it in d282313.

I just found another bug for the dataset. You may get incorrect node feature shape for the synthon solver. This is fixed in a1df0a2. If you are using any old commit, you can use the following lines to construct the dataset instead

synthon_dataset = datasets.USPTO50k("~/molecule-datasets/", as_synthon=True,
                                    atom_feature="synthon_completion",
                                    kekulize=True)

which replaces the argument node_feature with atom_feature.