Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 24 additions & 1 deletion bindings/pyroot/pythonizations/python/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,30 @@ if(tmva)
ROOT/_pythonization/_tmva/_rtensor.py
ROOT/_pythonization/_tmva/_tree_inference.py
ROOT/_pythonization/_tmva/_utils.py
ROOT/_pythonization/_tmva/_gnn.py)
ROOT/_pythonization/_tmva/_gnn.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/__init__.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/parser.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/__init__.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/batchnorm.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/binary.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/concat.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/conv.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/dense.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/elu.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/flatten.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/identity.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/layernorm.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/leaky_relu.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/permute.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/pooling.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/reshape.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/relu.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/rnn.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/selu.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/sigmoid.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/softmax.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/swish.py
ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/tanh.py)
if(dataframe)
list(APPEND PYROOT_EXTRA_PYTHON_SOURCES
ROOT/_pythonization/_tmva/_batchgenerator.py)
Expand Down
2 changes: 2 additions & 0 deletions bindings/pyroot/pythonizations/python/ROOT/_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -410,8 +410,10 @@ def TMVA(self):
# This line is needed to import the pythonizations in _tmva directory.
# The comment suppresses linter errors about unused imports.
from ._pythonization import _tmva # noqa: F401
from ._pythonization._tmva._sofie._parser._keras.parser import PyKeras

ns = self._fallback_getattr("TMVA")
setattr(ns.Experimental.SOFIE, "PyKeras", PyKeras)
hasRDF = "dataframe" in self.gROOT.GetConfigFeatures()
if hasRDF:
try:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
def get_keras_version() -> str:

import keras

return keras.__version__
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
from cppyy import gbl as gbl_namespace

from .. import get_keras_version


def MakeKerasBatchNorm(layer):
"""
Create a Keras-compatible batch normalization operation using SOFIE framework.

This function takes a dictionary representing a batch normalization layer and its
attributes and constructs a Keras-compatible batch normalization operation using
the SOFIE framework. Batch normalization is used to normalize the activations of
a neural network, typically applied after the convolutional or dense layers.

Parameters:
layer (dict): A dictionary containing layer information including input, output,
gamma, beta, moving mean, moving variance, epsilon,
momentum, data type (assumed to be float), and other relevant information.

Returns:
ROperator_BatchNormalization: A SOFIE framework operator representing the batch normalization operation.
"""

keras_version = get_keras_version()

finput = layer['layerInput']
foutput = layer['layerOutput']
attributes = layer['layerAttributes']
gamma = attributes["gamma"]
beta = attributes["beta"]
moving_mean = attributes["moving_mean"]
moving_variance = attributes["moving_variance"]
fLayerDType = layer["layerDType"]
fNX = str(finput[0])
fNY = str(foutput[0])

if keras_version < '2.16':
fNScale = gamma.name
fNB = beta.name
fNMean = moving_mean.name
fNVar = moving_variance.name
else:
fNScale = gamma.path
fNB = beta.path
fNMean = moving_mean.path
fNVar = moving_variance.path

epsilon = attributes["epsilon"]
momentum = attributes["momentum"]

if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT:
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_BatchNormalization('float')(epsilon, momentum, 0, fNX, fNScale, fNB, fNMean, fNVar, fNY)
else:
raise RuntimeError(
"TMVA::SOFIE - Unsupported - Operator BatchNormalization does not yet support input type " + fLayerDType
)
return op
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
from cppyy import gbl as gbl_namespace


def MakeKerasBinary(layer):
input = layer['layerInput']
output = layer['layerOutput']
fLayerType = layer['layerType']
fLayerDType = layer['layerDType']
fX1 = input[0]
fX2 = input[1]
fY = output[0]
op = None
if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT:
if fLayerType == "Add":
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_BasicBinary(float, gbl_namespace.TMVA.Experimental.SOFIE.EBasicBinaryOperator.Add)(fX1, fX2, fY)
elif fLayerType == "Subtract":
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_BasicBinary(float, gbl_namespace.TMVA.Experimental.SOFIE.EBasicBinaryOperator.Sub)(fX1, fX2, fY)
else:
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_BasicBinary(float, gbl_namespace.TMVA.Experimental.SOFIE.EBasicBinaryOperator.Mul)(fX1, fX2, fY)
else:
raise RuntimeError(
"TMVA::SOFIE - Unsupported - Operator BasicBinary does not yet support input type " + fLayerDType
)
return op
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
from cppyy import gbl as gbl_namespace


def MakeKerasConcat(layer):
finput = layer['layerInput']
foutput = layer['layerOutput']
fLayerDType = layer["layerDType"]
attributes = layer['layerAttributes']
input = [str(i) for i in finput]
output = str(foutput[0])
axis = int(attributes["axis"])
if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT:
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Concat(input, axis, 0, output)
else:
raise RuntimeError(
"TMVA::SOFIE - Unsupported - Operator Concat does not yet support input type " + fLayerDType
)
return op
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import math

from cppyy import gbl as gbl_namespace

from .. import get_keras_version


def MakeKerasConv(layer):
"""
Create a Keras-compatible convolutional layer operation using SOFIE framework.

This function takes a dictionary representing a convolutional layer and its attributes and
constructs a Keras-compatible convolutional layer operation using the SOFIE framework.
A convolutional layer applies a convolution operation between the input tensor and a set
of learnable filters (kernels).

Parameters:
layer (dict): A dictionary containing layer information including input, output,
data type (must be float), weight and bias name, kernel size, dilations, padding and strides.
When padding is same (keep in the same dimensions), the padding shape is calculated.

Returns:
ROperator_Conv: A SOFIE framework operator representing the convolutional layer operation.
"""

keras_version = get_keras_version()

finput = layer['layerInput']
foutput = layer['layerOutput']
fLayerDType = layer['layerDType']
fLayerInputName = finput[0]
fLayerOutputName = foutput[0]
attributes = layer['layerAttributes']
fWeightNames = layer["layerWeight"]
fKernelName = fWeightNames[0]
fBiasName = fWeightNames[1]
fAttrDilations = attributes["dilation_rate"]
fAttrGroup = int(attributes["groups"])
fAttrKernelShape = attributes["kernel_size"]
fKerasPadding = str(attributes["padding"])
fAttrStrides = attributes["strides"]
fAttrPads = []

if fKerasPadding == 'valid':
fAttrAutopad = 'VALID'
elif fKerasPadding == 'same':
fAttrAutopad = 'NOTSET'
if keras_version < '2.16':
fInputShape = attributes['_build_input_shape']
else:
fInputShape = attributes['_build_shapes_dict']['input_shape']
inputHeight = fInputShape[1]
inputWidth = fInputShape[2]
outputHeight = math.ceil(float(inputHeight) / float(fAttrStrides[0]))
outputWidth = math.ceil(float(inputWidth) / float(fAttrStrides[1]))
padding_height = max((outputHeight - 1) * fAttrStrides[0] + fAttrKernelShape[0] - inputHeight, 0)
padding_width = max((outputWidth - 1) * fAttrStrides[1] + fAttrKernelShape[1] - inputWidth, 0)
padding_top = math.floor(padding_height / 2)
padding_bottom = padding_height - padding_top
padding_left = math.floor(padding_width / 2)
padding_right = padding_width - padding_left
fAttrPads = [padding_top, padding_bottom, padding_left, padding_right]
else:
raise RuntimeError(
"TMVA::SOFIE - RModel Keras Parser doesn't yet supports Convolution layer with padding " + fKerasPadding
)
if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT:
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Conv['float'](fAttrAutopad, fAttrDilations, fAttrGroup,
fAttrKernelShape, fAttrPads, fAttrStrides,
fLayerInputName, fKernelName, fBiasName,
fLayerOutputName)
return op
else:
raise RuntimeError(
"TMVA::SOFIE - Unsupported - Operator Conv does not yet support input type " + fLayerDType
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
from cppyy import gbl as gbl_namespace


def MakeKerasDense(layer):
"""
Create a Keras-compatible dense (fully connected) layer operation using SOFIE framework.

This function takes a dictionary representing a dense layer and its attributes and
constructs a Keras-compatible dense (fully connected) layer operation using the SOFIE framework.
A dense layer applies a matrix multiplication between the input tensor and weight matrix,
and adds a bias term.

Parameters:
layer (dict): A dictionary containing layer information including input, output,
layer weight names, and data type - must be float.

Returns:
ROperator_Gemm: A SOFIE framework operator representing the dense layer operation.
"""
finput = layer['layerInput']
foutput = layer['layerOutput']
fLayerDType = layer['layerDType']
fLayerInputName = finput[0]
fLayerOutputName = foutput[0]
fWeightNames = layer["layerWeight"]
fKernelName = fWeightNames[0]
fBiasName = fWeightNames[1]
attr_alpha = 1.0
attr_beta = 1.0
attr_transA = 0
attr_transB = 0
if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT:
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Gemm['float'](attr_alpha, attr_beta, attr_transA, attr_transB, fLayerInputName, fKernelName, fBiasName, fLayerOutputName)
return op
else:
raise RuntimeError(
"TMVA::SOFIE - Unsupported - Operator Gemm does not yet support input type " + fLayerDType
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
from cppyy import gbl as gbl_namespace


def MakeKerasELU(layer):
"""
Create a Keras-compatible exponential linear Unit (ELU) activation operation using SOFIE framework.

This function takes a dictionary representing a layer and its attributes and
constructs a Keras-compatible ELU activation operation using the SOFIE framework.
ELU is an activation function that modifies only the negative part of ReLU by
applying an exponential curve. It allows small negative values instead of zeros.

Parameters:
layer (dict): A dictionary containing layer information including input, output,
and data type, which must be float.

Returns:
ROperator_Elu: A SOFIE framework operator representing the ELU activation operation.
"""
finput = layer['layerInput']
foutput = layer['layerOutput']
fLayerDType = layer['layerDType']
fLayerInputName = finput[0]
fLayerOutputName = foutput[0]
attributes = layer['layerAttributes']
if 'alpha' in attributes.keys():
fAlpha = attributes['alpha']
else:
fAlpha = 1.0
if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT:
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Elu('float')(fAlpha, fLayerInputName, fLayerOutputName)
return op
else:
raise RuntimeError(
"TMVA::SOFIE - Unsupported - Operator Relu does not yet support input type " + fLayerDType
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
from cppyy import gbl as gbl_namespace

from .. import get_keras_version


def MakeKerasFlatten(layer):
"""
Create a Keras-compatible flattening operation using SOFIE framework.

This function takes a dictionary representing a layer and its attributes and
constructs a Keras-compatible flattening operation using the SOFIE framework.
Flattening is the process of converting a multi-dimensional tensor into a
one-dimensional tensor. Assumes layerDtype is float.

Parameters:
layer (dict): A dictionary containing layer information including input, output,
name, data type, and other relevant information.

Returns:
ROperator_Reshape: A SOFIE framework operator representing the flattening operation.
"""

keras_version = get_keras_version()

finput = layer['layerInput']
foutput = layer['layerOutput']
attributes = layer['layerAttributes']
if keras_version < '2.16':
flayername = attributes['_name']
else:
flayername = attributes['name']
fOpMode = gbl_namespace.TMVA.Experimental.SOFIE.ReshapeOpMode.Flatten
fLayerDType = layer['layerDType']

Check failure on line 33 in bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/flatten.py

View workflow job for this annotation

GitHub Actions / ruff

Ruff (F841)

bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/flatten.py:33:5: F841 Local variable `fLayerDType` is assigned to but never used
fNameData = finput[0]
fNameOutput = foutput[0]
fNameShape = flayername + "_shape"
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Reshape(fOpMode, 0, fNameData, fNameShape, fNameOutput)
return op
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from cppyy import gbl as gbl_namespace


def MakeKerasIdentity(layer):
input = layer['layerInput']
output = layer['layerOutput']
fLayerDType = layer['layerDType']
fLayerInputName = input[0]
fLayerOutputName = output[0]
if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT:
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Identity('float')(fLayerInputName, fLayerOutputName)
return op
else:
raise RuntimeError(
"TMVA::SOFIE - Unsupported - Operator Identity does not yet support input type " + fLayerDType
)
Loading
Loading