Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
def get_keras_version() -> str:

import keras
return keras.__version__

return keras.__version__
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
from cppyy import gbl as gbl_namespace

from .. import get_keras_version


def MakeKerasBatchNorm(layer):
def MakeKerasBatchNorm(layer):
"""
Create a Keras-compatible batch normalization operation using SOFIE framework.

Expand All @@ -20,21 +18,22 @@ def MakeKerasBatchNorm(layer):
Returns:
ROperator_BatchNormalization: A SOFIE framework operator representing the batch normalization operation.
"""

from ROOT.TMVA.Experimental import SOFIE

keras_version = get_keras_version()
finput = layer['layerInput']
foutput = layer['layerOutput']
attributes = layer['layerAttributes']

finput = layer["layerInput"]
foutput = layer["layerOutput"]
attributes = layer["layerAttributes"]
gamma = attributes["gamma"]
beta = attributes["beta"]
moving_mean = attributes["moving_mean"]
moving_variance = attributes["moving_variance"]
fLayerDType = layer["layerDType"]
fNX = str(finput[0])
fNY = str(foutput[0])
if keras_version < '2.16':

if keras_version < "2.16":
fNScale = gamma.name
fNB = beta.name
fNMean = moving_mean.name
Expand All @@ -44,14 +43,14 @@ def MakeKerasBatchNorm(layer):
fNB = beta.path
fNMean = moving_mean.path
fNVar = moving_variance.path

epsilon = attributes["epsilon"]
momentum = attributes["momentum"]
if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT:
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_BatchNormalization('float')(epsilon, momentum, 0, fNX, fNScale, fNB, fNMean, fNVar, fNY)

if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT:
op = SOFIE.ROperator_BatchNormalization("float")(epsilon, momentum, 0, fNX, fNScale, fNB, fNMean, fNVar, fNY)
else:
raise RuntimeError(
"TMVA::SOFIE - Unsupported - Operator BatchNormalization does not yet support input type " + fLayerDType
)
return op
return op
Original file line number Diff line number Diff line change
@@ -1,24 +1,23 @@
from cppyy import gbl as gbl_namespace


def MakeKerasBinary(layer):
input = layer['layerInput']
output = layer['layerOutput']
fLayerType = layer['layerType']
fLayerDType = layer['layerDType']
from ROOT.TMVA.Experimental import SOFIE

input = layer["layerInput"]
output = layer["layerOutput"]
fLayerType = layer["layerType"]
fLayerDType = layer["layerDType"]
fX1 = input[0]
fX2 = input[1]
fY = output[0]
op = None
if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT:
if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT:
if fLayerType == "Add":
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_BasicBinary(float, gbl_namespace.TMVA.Experimental.SOFIE.EBasicBinaryOperator.Add)(fX1, fX2, fY)
op = SOFIE.ROperator_BasicBinary(float, SOFIE.EBasicBinaryOperator.Add)(fX1, fX2, fY)
elif fLayerType == "Subtract":
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_BasicBinary(float, gbl_namespace.TMVA.Experimental.SOFIE.EBasicBinaryOperator.Sub)(fX1, fX2, fY)
op = SOFIE.ROperator_BasicBinary(float, SOFIE.EBasicBinaryOperator.Sub)(fX1, fX2, fY)
else:
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_BasicBinary(float, gbl_namespace.TMVA.Experimental.SOFIE.EBasicBinaryOperator.Mul)(fX1, fX2, fY)
op = SOFIE.ROperator_BasicBinary(float, SOFIE.EBasicBinaryOperator.Mul)(fX1, fX2, fY)
else:
raise RuntimeError(
"TMVA::SOFIE - Unsupported - Operator BasicBinary does not yet support input type " + fLayerDType
)
return op
return op
Original file line number Diff line number Diff line change
@@ -1,18 +1,15 @@
from cppyy import gbl as gbl_namespace


def MakeKerasConcat(layer):
finput = layer['layerInput']
foutput = layer['layerOutput']
from ROOT.TMVA.Experimental import SOFIE

finput = layer["layerInput"]
foutput = layer["layerOutput"]
fLayerDType = layer["layerDType"]
attributes = layer['layerAttributes']
attributes = layer["layerAttributes"]
input = [str(i) for i in finput]
output = str(foutput[0])
axis = int(attributes["axis"])
if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT:
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Concat(input, axis, 0, output)
if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT:
op = SOFIE.ROperator_Concat(input, axis, 0, output)
else:
raise RuntimeError(
"TMVA::SOFIE - Unsupported - Operator Concat does not yet support input type " + fLayerDType
)
return op
raise RuntimeError("TMVA::SOFIE - Unsupported - Operator Concat does not yet support input type " + fLayerDType)
return op
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
import math

from cppyy import gbl as gbl_namespace

from .. import get_keras_version


def MakeKerasConv(layer):
def MakeKerasConv(layer):
"""
Create a Keras-compatible convolutional layer operation using SOFIE framework.

Expand All @@ -16,21 +14,22 @@ def MakeKerasConv(layer):

Parameters:
layer (dict): A dictionary containing layer information including input, output,
data type (must be float), weight and bias name, kernel size, dilations, padding and strides.
data type (must be float), weight and bias name, kernel size, dilations, padding and strides.
When padding is same (keep in the same dimensions), the padding shape is calculated.

Returns:
ROperator_Conv: A SOFIE framework operator representing the convolutional layer operation.
"""

from ROOT.TMVA.Experimental import SOFIE

keras_version = get_keras_version()
finput = layer['layerInput']
foutput = layer['layerOutput']
fLayerDType = layer['layerDType']

finput = layer["layerInput"]
foutput = layer["layerOutput"]
fLayerDType = layer["layerDType"]
fLayerInputName = finput[0]
fLayerOutputName = foutput[0]
attributes = layer['layerAttributes']
attributes = layer["layerAttributes"]
fWeightNames = layer["layerWeight"]
fKernelName = fWeightNames[0]
fBiasName = fWeightNames[1]
Expand All @@ -40,15 +39,15 @@ def MakeKerasConv(layer):
fKerasPadding = str(attributes["padding"])
fAttrStrides = attributes["strides"]
fAttrPads = []
if fKerasPadding == 'valid':
fAttrAutopad = 'VALID'
elif fKerasPadding == 'same':
fAttrAutopad = 'NOTSET'
if keras_version < '2.16':
fInputShape = attributes['_build_input_shape']

if fKerasPadding == "valid":
fAttrAutopad = "VALID"
elif fKerasPadding == "same":
fAttrAutopad = "NOTSET"
if keras_version < "2.16":
fInputShape = attributes["_build_input_shape"]
else:
fInputShape = attributes['_build_shapes_dict']['input_shape']
fInputShape = attributes["_build_shapes_dict"]["input_shape"]
inputHeight = fInputShape[1]
inputWidth = fInputShape[2]
outputHeight = math.ceil(float(inputHeight) / float(fAttrStrides[0]))
Expand All @@ -64,13 +63,19 @@ def MakeKerasConv(layer):
raise RuntimeError(
"TMVA::SOFIE - RModel Keras Parser doesn't yet supports Convolution layer with padding " + fKerasPadding
)
if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT:
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Conv['float'](fAttrAutopad, fAttrDilations, fAttrGroup,
fAttrKernelShape, fAttrPads, fAttrStrides,
fLayerInputName, fKernelName, fBiasName,
fLayerOutputName)
if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT:
op = SOFIE.ROperator_Conv["float"](
fAttrAutopad,
fAttrDilations,
fAttrGroup,
fAttrKernelShape,
fAttrPads,
fAttrStrides,
fLayerInputName,
fKernelName,
fBiasName,
fLayerOutputName,
)
return op
else:
raise RuntimeError(
"TMVA::SOFIE - Unsupported - Operator Conv does not yet support input type " + fLayerDType
)
raise RuntimeError("TMVA::SOFIE - Unsupported - Operator Conv does not yet support input type " + fLayerDType)
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
from cppyy import gbl as gbl_namespace


def MakeKerasDense(layer):
"""
Create a Keras-compatible dense (fully connected) layer operation using SOFIE framework.
Expand All @@ -16,23 +13,25 @@ def MakeKerasDense(layer):

Returns:
ROperator_Gemm: A SOFIE framework operator representing the dense layer operation.
"""
finput = layer['layerInput']
foutput = layer['layerOutput']
fLayerDType = layer['layerDType']
"""
from ROOT.TMVA.Experimental import SOFIE

finput = layer["layerInput"]
foutput = layer["layerOutput"]
fLayerDType = layer["layerDType"]
fLayerInputName = finput[0]
fLayerOutputName = foutput[0]
fWeightNames = layer["layerWeight"]
fKernelName = fWeightNames[0]
fBiasName = fWeightNames[1]
attr_alpha = 1.0
attr_beta = 1.0
attr_beta = 1.0
attr_transA = 0
attr_transB = 0
if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT:
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Gemm['float'](attr_alpha, attr_beta, attr_transA, attr_transB, fLayerInputName, fKernelName, fBiasName, fLayerOutputName)
if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT:
op = SOFIE.ROperator_Gemm["float"](
attr_alpha, attr_beta, attr_transA, attr_transB, fLayerInputName, fKernelName, fBiasName, fLayerOutputName
)
return op
else:
raise RuntimeError(
"TMVA::SOFIE - Unsupported - Operator Gemm does not yet support input type " + fLayerDType
)
raise RuntimeError("TMVA::SOFIE - Unsupported - Operator Gemm does not yet support input type " + fLayerDType)
Original file line number Diff line number Diff line change
@@ -1,13 +1,10 @@
from cppyy import gbl as gbl_namespace


def MakeKerasELU(layer):
"""
Create a Keras-compatible exponential linear Unit (ELU) activation operation using SOFIE framework.

This function takes a dictionary representing a layer and its attributes and
constructs a Keras-compatible ELU activation operation using the SOFIE framework.
ELU is an activation function that modifies only the negative part of ReLU by
ELU is an activation function that modifies only the negative part of ReLU by
applying an exponential curve. It allows small negative values instead of zeros.

Parameters:
Expand All @@ -17,20 +14,20 @@ def MakeKerasELU(layer):
Returns:
ROperator_Elu: A SOFIE framework operator representing the ELU activation operation.
"""
finput = layer['layerInput']
foutput = layer['layerOutput']
fLayerDType = layer['layerDType']
from ROOT.TMVA.Experimental import SOFIE

finput = layer["layerInput"]
foutput = layer["layerOutput"]
fLayerDType = layer["layerDType"]
fLayerInputName = finput[0]
fLayerOutputName = foutput[0]
attributes = layer['layerAttributes']
if 'alpha' in attributes.keys():
fAlpha = attributes['alpha']
attributes = layer["layerAttributes"]
if "alpha" in attributes.keys():
fAlpha = attributes["alpha"]
else:
fAlpha = 1.0
if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT:
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Elu('float')(fAlpha, fLayerInputName, fLayerOutputName)
if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT:
op = SOFIE.ROperator_Elu("float")(fAlpha, fLayerInputName, fLayerOutputName)
return op
else:
raise RuntimeError(
"TMVA::SOFIE - Unsupported - Operator Relu does not yet support input type " + fLayerDType
)
raise RuntimeError("TMVA::SOFIE - Unsupported - Operator Relu does not yet support input type " + fLayerDType)
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from cppyy import gbl as gbl_namespace

from .. import get_keras_version


Expand All @@ -19,20 +17,20 @@ def MakeKerasFlatten(layer):
Returns:
ROperator_Reshape: A SOFIE framework operator representing the flattening operation.
"""
from ROOT.TMVA.Experimental import SOFIE

keras_version = get_keras_version()

finput = layer['layerInput']
foutput = layer['layerOutput']
attributes = layer['layerAttributes']
if keras_version < '2.16':
flayername = attributes['_name']
finput = layer["layerInput"]
foutput = layer["layerOutput"]
attributes = layer["layerAttributes"]
if keras_version < "2.16":
flayername = attributes["_name"]
else:
flayername = attributes['name']
fOpMode = gbl_namespace.TMVA.Experimental.SOFIE.ReshapeOpMode.Flatten
fLayerDType = layer['layerDType']
flayername = attributes["name"]
fOpMode = SOFIE.ReshapeOpMode.Flatten
fNameData = finput[0]
fNameOutput = foutput[0]
fNameShape = flayername + "_shape"
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Reshape(fOpMode, 0, fNameData, fNameShape, fNameOutput)
return op
op = SOFIE.ROperator_Reshape(fOpMode, 0, fNameData, fNameShape, fNameOutput)
return op
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
from cppyy import gbl as gbl_namespace


def MakeKerasIdentity(layer):
input = layer['layerInput']
output = layer['layerOutput']
fLayerDType = layer['layerDType']
from ROOT.TMVA.Experimental import SOFIE

input = layer["layerInput"]
output = layer["layerOutput"]
fLayerDType = layer["layerDType"]
fLayerInputName = input[0]
fLayerOutputName = output[0]
if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT:
op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Identity('float')(fLayerInputName, fLayerOutputName)
if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT:
op = SOFIE.ROperator_Identity("float")(fLayerInputName, fLayerOutputName)
return op
else:
raise RuntimeError(
Expand Down
Loading
Loading