Neural Prophet
zenml.integrations.neural_prophet
special
Initialization of the Neural Prophet integration.
NeuralProphetIntegration (Integration)
Definition of NeuralProphet integration for ZenML.
Source code in zenml/integrations/neural_prophet/__init__.py
class NeuralProphetIntegration(Integration):
"""Definition of NeuralProphet integration for ZenML."""
NAME = NEURAL_PROPHET
REQUIREMENTS = ["neuralprophet>=0.3.2"]
@classmethod
def activate(cls) -> None:
"""Activates the integration."""
from zenml.integrations.neural_prophet import materializers # noqa
activate()
classmethod
Activates the integration.
Source code in zenml/integrations/neural_prophet/__init__.py
@classmethod
def activate(cls) -> None:
"""Activates the integration."""
from zenml.integrations.neural_prophet import materializers # noqa
materializers
special
Initialization of the Neural Prophet materializer.
neural_prophet_materializer
Implementation of the Neural Prophet materializer.
NeuralProphetMaterializer (BaseMaterializer)
Materializer to read/write NeuralProphet models.
Source code in zenml/integrations/neural_prophet/materializers/neural_prophet_materializer.py
class NeuralProphetMaterializer(BaseMaterializer):
"""Materializer to read/write NeuralProphet models."""
ASSOCIATED_TYPES = (NeuralProphet,)
ASSOCIATED_ARTIFACT_TYPES = (ModelArtifact,)
def handle_input(self, data_type: Type[Any]) -> NeuralProphet:
"""Reads and returns a NeuralProphet model.
Args:
data_type: A NeuralProphet model object.
Returns:
A loaded NeuralProphet model.
"""
super().handle_input(data_type)
return torch.load( # type: ignore[no-untyped-call]
os.path.join(self.artifact.uri, DEFAULT_FILENAME)
) # noqa
def handle_return(self, model: NeuralProphet) -> None:
"""Writes a NeuralProphet model.
Args:
model: A NeuralProphet model object.
"""
super().handle_return(model)
torch.save(model, os.path.join(self.artifact.uri, DEFAULT_FILENAME))
handle_input(self, data_type)
Reads and returns a NeuralProphet model.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
data_type |
Type[Any] |
A NeuralProphet model object. |
required |
Returns:
Type | Description |
---|---|
NeuralProphet |
A loaded NeuralProphet model. |
Source code in zenml/integrations/neural_prophet/materializers/neural_prophet_materializer.py
def handle_input(self, data_type: Type[Any]) -> NeuralProphet:
"""Reads and returns a NeuralProphet model.
Args:
data_type: A NeuralProphet model object.
Returns:
A loaded NeuralProphet model.
"""
super().handle_input(data_type)
return torch.load( # type: ignore[no-untyped-call]
os.path.join(self.artifact.uri, DEFAULT_FILENAME)
) # noqa
handle_return(self, model)
Writes a NeuralProphet model.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
model |
NeuralProphet |
A NeuralProphet model object. |
required |
Source code in zenml/integrations/neural_prophet/materializers/neural_prophet_materializer.py
def handle_return(self, model: NeuralProphet) -> None:
"""Writes a NeuralProphet model.
Args:
model: A NeuralProphet model object.
"""
super().handle_return(model)
torch.save(model, os.path.join(self.artifact.uri, DEFAULT_FILENAME))