diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/__init__.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/__init__.py index 5f48c83e89aa1..a8cc4b78313aa 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/__init__.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/__init__.py @@ -1,5 +1,5 @@ def get_keras_version() -> str: - + import keras - - return keras.__version__ \ No newline at end of file + + return keras.__version__ diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/batchnorm.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/batchnorm.py index 11110851342e3..9c8063cd23e0a 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/batchnorm.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/batchnorm.py @@ -1,9 +1,7 @@ -from cppyy import gbl as gbl_namespace - from .. import get_keras_version -def MakeKerasBatchNorm(layer): +def MakeKerasBatchNorm(layer): """ Create a Keras-compatible batch normalization operation using SOFIE framework. @@ -20,12 +18,13 @@ def MakeKerasBatchNorm(layer): Returns: ROperator_BatchNormalization: A SOFIE framework operator representing the batch normalization operation. """ - + from ROOT.TMVA.Experimental import SOFIE + keras_version = get_keras_version() - - finput = layer['layerInput'] - foutput = layer['layerOutput'] - attributes = layer['layerAttributes'] + + finput = layer["layerInput"] + foutput = layer["layerOutput"] + attributes = layer["layerAttributes"] gamma = attributes["gamma"] beta = attributes["beta"] moving_mean = attributes["moving_mean"] @@ -33,8 +32,8 @@ def MakeKerasBatchNorm(layer): fLayerDType = layer["layerDType"] fNX = str(finput[0]) fNY = str(foutput[0]) - - if keras_version < '2.16': + + if keras_version < "2.16": fNScale = gamma.name fNB = beta.name fNMean = moving_mean.name @@ -44,14 +43,14 @@ def MakeKerasBatchNorm(layer): fNB = beta.path fNMean = moving_mean.path fNVar = moving_variance.path - + epsilon = attributes["epsilon"] momentum = attributes["momentum"] - - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_BatchNormalization('float')(epsilon, momentum, 0, fNX, fNScale, fNB, fNMean, fNVar, fNY) + + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: + op = SOFIE.ROperator_BatchNormalization("float")(epsilon, momentum, 0, fNX, fNScale, fNB, fNMean, fNVar, fNY) else: raise RuntimeError( "TMVA::SOFIE - Unsupported - Operator BatchNormalization does not yet support input type " + fLayerDType ) - return op \ No newline at end of file + return op diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/binary.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/binary.py index 9aa6324af27eb..ba28e1f8f6089 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/binary.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/binary.py @@ -1,24 +1,23 @@ -from cppyy import gbl as gbl_namespace - - def MakeKerasBinary(layer): - input = layer['layerInput'] - output = layer['layerOutput'] - fLayerType = layer['layerType'] - fLayerDType = layer['layerDType'] + from ROOT.TMVA.Experimental import SOFIE + + input = layer["layerInput"] + output = layer["layerOutput"] + fLayerType = layer["layerType"] + fLayerDType = layer["layerDType"] fX1 = input[0] fX2 = input[1] fY = output[0] op = None - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: if fLayerType == "Add": - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_BasicBinary(float, gbl_namespace.TMVA.Experimental.SOFIE.EBasicBinaryOperator.Add)(fX1, fX2, fY) + op = SOFIE.ROperator_BasicBinary(float, SOFIE.EBasicBinaryOperator.Add)(fX1, fX2, fY) elif fLayerType == "Subtract": - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_BasicBinary(float, gbl_namespace.TMVA.Experimental.SOFIE.EBasicBinaryOperator.Sub)(fX1, fX2, fY) + op = SOFIE.ROperator_BasicBinary(float, SOFIE.EBasicBinaryOperator.Sub)(fX1, fX2, fY) else: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_BasicBinary(float, gbl_namespace.TMVA.Experimental.SOFIE.EBasicBinaryOperator.Mul)(fX1, fX2, fY) + op = SOFIE.ROperator_BasicBinary(float, SOFIE.EBasicBinaryOperator.Mul)(fX1, fX2, fY) else: raise RuntimeError( "TMVA::SOFIE - Unsupported - Operator BasicBinary does not yet support input type " + fLayerDType ) - return op \ No newline at end of file + return op diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/concat.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/concat.py index 013afe831585e..2d3fa06d4190a 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/concat.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/concat.py @@ -1,18 +1,15 @@ -from cppyy import gbl as gbl_namespace - - def MakeKerasConcat(layer): - finput = layer['layerInput'] - foutput = layer['layerOutput'] + from ROOT.TMVA.Experimental import SOFIE + + finput = layer["layerInput"] + foutput = layer["layerOutput"] fLayerDType = layer["layerDType"] - attributes = layer['layerAttributes'] + attributes = layer["layerAttributes"] input = [str(i) for i in finput] output = str(foutput[0]) axis = int(attributes["axis"]) - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Concat(input, axis, 0, output) + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: + op = SOFIE.ROperator_Concat(input, axis, 0, output) else: - raise RuntimeError( - "TMVA::SOFIE - Unsupported - Operator Concat does not yet support input type " + fLayerDType - ) - return op \ No newline at end of file + raise RuntimeError("TMVA::SOFIE - Unsupported - Operator Concat does not yet support input type " + fLayerDType) + return op diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/conv.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/conv.py index 047e9d52603e0..2a12fc9a832f6 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/conv.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/conv.py @@ -1,11 +1,9 @@ import math -from cppyy import gbl as gbl_namespace - from .. import get_keras_version -def MakeKerasConv(layer): +def MakeKerasConv(layer): """ Create a Keras-compatible convolutional layer operation using SOFIE framework. @@ -16,21 +14,22 @@ def MakeKerasConv(layer): Parameters: layer (dict): A dictionary containing layer information including input, output, - data type (must be float), weight and bias name, kernel size, dilations, padding and strides. + data type (must be float), weight and bias name, kernel size, dilations, padding and strides. When padding is same (keep in the same dimensions), the padding shape is calculated. Returns: ROperator_Conv: A SOFIE framework operator representing the convolutional layer operation. """ - + from ROOT.TMVA.Experimental import SOFIE + keras_version = get_keras_version() - - finput = layer['layerInput'] - foutput = layer['layerOutput'] - fLayerDType = layer['layerDType'] + + finput = layer["layerInput"] + foutput = layer["layerOutput"] + fLayerDType = layer["layerDType"] fLayerInputName = finput[0] fLayerOutputName = foutput[0] - attributes = layer['layerAttributes'] + attributes = layer["layerAttributes"] fWeightNames = layer["layerWeight"] fKernelName = fWeightNames[0] fBiasName = fWeightNames[1] @@ -40,15 +39,15 @@ def MakeKerasConv(layer): fKerasPadding = str(attributes["padding"]) fAttrStrides = attributes["strides"] fAttrPads = [] - - if fKerasPadding == 'valid': - fAttrAutopad = 'VALID' - elif fKerasPadding == 'same': - fAttrAutopad = 'NOTSET' - if keras_version < '2.16': - fInputShape = attributes['_build_input_shape'] + + if fKerasPadding == "valid": + fAttrAutopad = "VALID" + elif fKerasPadding == "same": + fAttrAutopad = "NOTSET" + if keras_version < "2.16": + fInputShape = attributes["_build_input_shape"] else: - fInputShape = attributes['_build_shapes_dict']['input_shape'] + fInputShape = attributes["_build_shapes_dict"]["input_shape"] inputHeight = fInputShape[1] inputWidth = fInputShape[2] outputHeight = math.ceil(float(inputHeight) / float(fAttrStrides[0])) @@ -64,13 +63,19 @@ def MakeKerasConv(layer): raise RuntimeError( "TMVA::SOFIE - RModel Keras Parser doesn't yet supports Convolution layer with padding " + fKerasPadding ) - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Conv['float'](fAttrAutopad, fAttrDilations, fAttrGroup, - fAttrKernelShape, fAttrPads, fAttrStrides, - fLayerInputName, fKernelName, fBiasName, - fLayerOutputName) + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: + op = SOFIE.ROperator_Conv["float"]( + fAttrAutopad, + fAttrDilations, + fAttrGroup, + fAttrKernelShape, + fAttrPads, + fAttrStrides, + fLayerInputName, + fKernelName, + fBiasName, + fLayerOutputName, + ) return op else: - raise RuntimeError( - "TMVA::SOFIE - Unsupported - Operator Conv does not yet support input type " + fLayerDType - ) \ No newline at end of file + raise RuntimeError("TMVA::SOFIE - Unsupported - Operator Conv does not yet support input type " + fLayerDType) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/dense.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/dense.py index cfcd079dc8909..33cea69476bfd 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/dense.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/dense.py @@ -1,6 +1,3 @@ -from cppyy import gbl as gbl_namespace - - def MakeKerasDense(layer): """ Create a Keras-compatible dense (fully connected) layer operation using SOFIE framework. @@ -16,23 +13,25 @@ def MakeKerasDense(layer): Returns: ROperator_Gemm: A SOFIE framework operator representing the dense layer operation. - """ - finput = layer['layerInput'] - foutput = layer['layerOutput'] - fLayerDType = layer['layerDType'] + """ + from ROOT.TMVA.Experimental import SOFIE + + finput = layer["layerInput"] + foutput = layer["layerOutput"] + fLayerDType = layer["layerDType"] fLayerInputName = finput[0] fLayerOutputName = foutput[0] fWeightNames = layer["layerWeight"] fKernelName = fWeightNames[0] fBiasName = fWeightNames[1] attr_alpha = 1.0 - attr_beta = 1.0 + attr_beta = 1.0 attr_transA = 0 attr_transB = 0 - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Gemm['float'](attr_alpha, attr_beta, attr_transA, attr_transB, fLayerInputName, fKernelName, fBiasName, fLayerOutputName) + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: + op = SOFIE.ROperator_Gemm["float"]( + attr_alpha, attr_beta, attr_transA, attr_transB, fLayerInputName, fKernelName, fBiasName, fLayerOutputName + ) return op else: - raise RuntimeError( - "TMVA::SOFIE - Unsupported - Operator Gemm does not yet support input type " + fLayerDType - ) \ No newline at end of file + raise RuntimeError("TMVA::SOFIE - Unsupported - Operator Gemm does not yet support input type " + fLayerDType) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/elu.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/elu.py index 6d8c1eccbd985..ca7b0f89a3c35 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/elu.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/elu.py @@ -1,13 +1,10 @@ -from cppyy import gbl as gbl_namespace - - def MakeKerasELU(layer): """ Create a Keras-compatible exponential linear Unit (ELU) activation operation using SOFIE framework. This function takes a dictionary representing a layer and its attributes and constructs a Keras-compatible ELU activation operation using the SOFIE framework. - ELU is an activation function that modifies only the negative part of ReLU by + ELU is an activation function that modifies only the negative part of ReLU by applying an exponential curve. It allows small negative values instead of zeros. Parameters: @@ -17,20 +14,20 @@ def MakeKerasELU(layer): Returns: ROperator_Elu: A SOFIE framework operator representing the ELU activation operation. """ - finput = layer['layerInput'] - foutput = layer['layerOutput'] - fLayerDType = layer['layerDType'] + from ROOT.TMVA.Experimental import SOFIE + + finput = layer["layerInput"] + foutput = layer["layerOutput"] + fLayerDType = layer["layerDType"] fLayerInputName = finput[0] fLayerOutputName = foutput[0] - attributes = layer['layerAttributes'] - if 'alpha' in attributes.keys(): - fAlpha = attributes['alpha'] + attributes = layer["layerAttributes"] + if "alpha" in attributes.keys(): + fAlpha = attributes["alpha"] else: fAlpha = 1.0 - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Elu('float')(fAlpha, fLayerInputName, fLayerOutputName) + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: + op = SOFIE.ROperator_Elu("float")(fAlpha, fLayerInputName, fLayerOutputName) return op else: - raise RuntimeError( - "TMVA::SOFIE - Unsupported - Operator Relu does not yet support input type " + fLayerDType - ) + raise RuntimeError("TMVA::SOFIE - Unsupported - Operator Relu does not yet support input type " + fLayerDType) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/flatten.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/flatten.py index 1fd5042b6650a..b05b1687123f4 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/flatten.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/flatten.py @@ -1,5 +1,3 @@ -from cppyy import gbl as gbl_namespace - from .. import get_keras_version @@ -19,20 +17,20 @@ def MakeKerasFlatten(layer): Returns: ROperator_Reshape: A SOFIE framework operator representing the flattening operation. """ + from ROOT.TMVA.Experimental import SOFIE keras_version = get_keras_version() - finput = layer['layerInput'] - foutput = layer['layerOutput'] - attributes = layer['layerAttributes'] - if keras_version < '2.16': - flayername = attributes['_name'] + finput = layer["layerInput"] + foutput = layer["layerOutput"] + attributes = layer["layerAttributes"] + if keras_version < "2.16": + flayername = attributes["_name"] else: - flayername = attributes['name'] - fOpMode = gbl_namespace.TMVA.Experimental.SOFIE.ReshapeOpMode.Flatten - fLayerDType = layer['layerDType'] + flayername = attributes["name"] + fOpMode = SOFIE.ReshapeOpMode.Flatten fNameData = finput[0] fNameOutput = foutput[0] fNameShape = flayername + "_shape" - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Reshape(fOpMode, 0, fNameData, fNameShape, fNameOutput) - return op \ No newline at end of file + op = SOFIE.ROperator_Reshape(fOpMode, 0, fNameData, fNameShape, fNameOutput) + return op diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/identity.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/identity.py index fb3ba6783b8b0..5ee3fd8d43caf 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/identity.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/identity.py @@ -1,14 +1,13 @@ -from cppyy import gbl as gbl_namespace - - def MakeKerasIdentity(layer): - input = layer['layerInput'] - output = layer['layerOutput'] - fLayerDType = layer['layerDType'] + from ROOT.TMVA.Experimental import SOFIE + + input = layer["layerInput"] + output = layer["layerOutput"] + fLayerDType = layer["layerDType"] fLayerInputName = input[0] fLayerOutputName = output[0] - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Identity('float')(fLayerInputName, fLayerOutputName) + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: + op = SOFIE.ROperator_Identity("float")(fLayerInputName, fLayerOutputName) return op else: raise RuntimeError( diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/layernorm.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/layernorm.py index 55b7039ee0e4c..b7cd97764b198 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/layernorm.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/layernorm.py @@ -1,5 +1,3 @@ -from cppyy import gbl as gbl_namespace - from .. import get_keras_version @@ -9,31 +7,32 @@ def MakeKerasLayerNorm(layer): This function takes a dictionary representing a layer normalization layer and its attributes and constructs a Keras-compatible layer normalization operation using - the SOFIE framework. Unlike Batch normalization, Layer normalization used to normalize - the activations of a layer across the entire layer, independently for each sample in + the SOFIE framework. Unlike Batch normalization, Layer normalization used to normalize + the activations of a layer across the entire layer, independently for each sample in the batch. Parameters: layer (dict): A dictionary containing layer information including input, output, - gamma, beta, epsilon, data type (assumed to be float), and other + gamma, beta, epsilon, data type (assumed to be float), and other relevant information. Returns: ROperator_BatchNormalization: A SOFIE framework operator representing the layer normalization operation. """ - + from ROOT.TMVA.Experimental import SOFIE + keras_version = get_keras_version() - - finput = layer['layerInput'] - foutput = layer['layerOutput'] - attributes = layer['layerAttributes'] + + finput = layer["layerInput"] + foutput = layer["layerOutput"] + attributes = layer["layerAttributes"] gamma = attributes["gamma"] beta = attributes["beta"] - axes = attributes['axis'] - if '_build_input_shape' in attributes.keys(): - num_input_shapes = len(attributes['_build_input_shape']) - elif '_build_shapes_dict' in attributes.keys(): - num_input_shapes = len(list(attributes['_build_shapes_dict']['input_shape'])) + axes = attributes["axis"] + if "_build_input_shape" in attributes.keys(): + num_input_shapes = len(attributes["_build_input_shape"]) + elif "_build_shapes_dict" in attributes.keys(): + num_input_shapes = len(list(attributes["_build_shapes_dict"]["input_shape"])) if len(axes) == 1: axis = axes[0] if axis < 0: @@ -42,23 +41,23 @@ def MakeKerasLayerNorm(layer): raise Exception("TMVA.SOFIE - LayerNormalization layer - parsing different axes at once is not supported") fLayerDType = layer["layerDType"] fNX = str(finput[0]) - fNY = str(foutput[0]) - - if keras_version < '2.16': + fNY = str(foutput[0]) + + if keras_version < "2.16": fNScale = gamma.name fNB = beta.name else: fNScale = gamma.path fNB = beta.path - + epsilon = attributes["epsilon"] fNInvStdDev = [] - - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_LayerNormalization('float')(axis, epsilon, 1, fNX, fNScale, fNB, fNY, "", fNInvStdDev) + + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: + op = SOFIE.ROperator_LayerNormalization("float")(axis, epsilon, 1, fNX, fNScale, fNB, fNY, "", fNInvStdDev) else: raise RuntimeError( "TMVA::SOFIE - Unsupported - Operator BatchNormalization does not yet support input type " + fLayerDType ) - - return op \ No newline at end of file + + return op diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/leaky_relu.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/leaky_relu.py index 4eef107d3e5f3..0b56e23ea84a4 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/leaky_relu.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/leaky_relu.py @@ -1,6 +1,3 @@ -from cppyy import gbl as gbl_namespace - - def MakeKerasLeakyRelu(layer): """ Create a Keras-compatible Leaky ReLU activation operation using SOFIE framework. @@ -17,29 +14,28 @@ def MakeKerasLeakyRelu(layer): Returns: ROperator_LeakyRelu: A SOFIE framework operator representing the Leaky ReLU activation operation. """ - - finput = layer['layerInput'] - foutput = layer['layerOutput'] - fLayerDType = layer['layerDType'] + from ROOT.TMVA.Experimental import SOFIE + + finput = layer["layerInput"] + foutput = layer["layerOutput"] + fLayerDType = layer["layerDType"] fLayerInputName = finput[0] fLayerOutputName = foutput[0] - attributes = layer['layerAttributes'] - - if 'alpha' in attributes.keys(): + attributes = layer["layerAttributes"] + + if "alpha" in attributes.keys(): fAlpha = float(attributes["alpha"]) - elif 'negative_slope' in attributes.keys(): - fAlpha = float(attributes['negative_slope']) - elif 'activation' in attributes.keys(): + elif "negative_slope" in attributes.keys(): + fAlpha = float(attributes["negative_slope"]) + elif "activation" in attributes.keys(): fAlpha = 0.2 else: - raise RuntimeError ( - "Failed to extract alpha value from LeakyReLU" - ) - - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_LeakyRelu('float')(fAlpha, fLayerInputName, fLayerOutputName) + raise RuntimeError("Failed to extract alpha value from LeakyReLU") + + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: + op = SOFIE.ROperator_LeakyRelu("float")(fAlpha, fLayerInputName, fLayerOutputName) return op else: raise RuntimeError( "TMVA::SOFIE - Unsupported - Operator LeakyRelu does not yet support input type " + fLayerDType - ) \ No newline at end of file + ) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/permute.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/permute.py index 04daea02235a3..f08f9373d12fd 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/permute.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/permute.py @@ -1,6 +1,3 @@ -from cppyy import gbl as gbl_namespace - - def MakeKerasPermute(layer): """ Create a Keras-compatible permutation operation using SOFIE framework. @@ -17,21 +14,25 @@ def MakeKerasPermute(layer): Returns: ROperator_Transpose: A SOFIE framework operator representing the permutation operation. """ - finput = layer['layerInput'] - foutput = layer['layerOutput'] - fLayerDType = layer['layerDType'] + from ROOT.TMVA.Experimental import SOFIE + + finput = layer["layerInput"] + foutput = layer["layerOutput"] + fLayerDType = layer["layerDType"] fLayerInputName = finput[0] fLayerOutputName = foutput[0] - attributes = layer['layerAttributes'] + attributes = layer["layerAttributes"] fAttributePermute = list(attributes["dims"]) - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: - if len(fAttributePermute) > 0: - fAttributePermute = [0] + fAttributePermute # for the batch dimension from the input - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Transpose('float')(fAttributePermute, fLayerInputName, fLayerOutputName) #gbl_namespace.TMVA.Experimental.SOFIE.fPermuteDims - else: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Transpose('float')(fLayerInputName, fLayerOutputName) + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: + if len(fAttributePermute) > 0: + fAttributePermute = [0] + fAttributePermute # for the batch dimension from the input + op = SOFIE.ROperator_Transpose("float")( + fAttributePermute, fLayerInputName, fLayerOutputName + ) # SOFIE.fPermuteDims + else: + op = SOFIE.ROperator_Transpose("float")(fLayerInputName, fLayerOutputName) return op else: raise RuntimeError( "TMVA::SOFIE - Unsupported - Operator Transpose does not yet support input type " + fLayerDType - ) \ No newline at end of file + ) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/pooling.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/pooling.py index 8d08104cec743..9c8656846feb0 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/pooling.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/pooling.py @@ -1,6 +1,3 @@ -from cppyy import gbl as gbl_namespace - - def MakeKerasPooling(layer): """ Create a Keras-compatible pooling layer operation using SOFIE framework. @@ -17,36 +14,37 @@ def MakeKerasPooling(layer): Returns: ROperator_Pool: A SOFIE framework operator representing the pooling layer operation. """ - + from ROOT.TMVA.Experimental import SOFIE + # Extract attributes from layer data - fLayerDType = layer['layerDType'] - finput = layer['layerInput'] - foutput = layer['layerOutput'] - fLayerType = layer['layerType'] + fLayerDType = layer["layerDType"] + finput = layer["layerInput"] + foutput = layer["layerOutput"] + fLayerType = layer["layerType"] fLayerInputName = finput[0] fLayerOutputName = foutput[0] - pool_atrr = gbl_namespace.TMVA.Experimental.SOFIE.RAttributes_Pool() - attributes = layer['layerAttributes'] + pool_atrr = SOFIE.RAttributes_Pool() + attributes = layer["layerAttributes"] # Set default values for GlobalAveragePooling2D fAttrKernelShape = [] - fKerasPadding = 'valid' + fKerasPadding = "valid" fAttrStrides = [] - if fLayerType != 'GlobalAveragePooling2D': + if fLayerType != "GlobalAveragePooling2D": fAttrKernelShape = attributes["pool_size"] fKerasPadding = str(attributes["padding"]) fAttrStrides = attributes["strides"] - + # Set default values - fAttrDilations = (1,1) - fpads = [0,0,0,0,0,0] + fAttrDilations = (1, 1) + fpads = [0, 0, 0, 0, 0, 0] pool_atrr.ceil_mode = 0 pool_atrr.count_include_pad = 0 pool_atrr.storage_order = 0 - - if fKerasPadding == 'valid': - fAttrAutopad = 'VALID' - elif fKerasPadding == 'same': - fAttrAutopad = 'NOTSET' + + if fKerasPadding == "valid": + fAttrAutopad = "VALID" + elif fKerasPadding == "same": + fAttrAutopad = "NOTSET" else: raise RuntimeError( "TMVA::SOFIE - RModel Keras Parser doesn't yet support Pooling layer with padding " + fKerasPadding @@ -55,25 +53,25 @@ def MakeKerasPooling(layer): pool_atrr.strides = list(fAttrStrides) pool_atrr.pads = fpads pool_atrr.kernel_shape = list(fAttrKernelShape) - pool_atrr.auto_pad = fAttrAutopad - + pool_atrr.auto_pad = fAttrAutopad + # Choose pooling type - if 'Max' in fLayerType: - PoolMode = gbl_namespace.TMVA.Experimental.SOFIE.PoolOpMode.MaxPool - elif 'AveragePool' in fLayerType: - PoolMode = gbl_namespace.TMVA.Experimental.SOFIE.PoolOpMode.AveragePool - elif 'GlobalAverage' in fLayerType: - PoolMode = gbl_namespace.TMVA.Experimental.SOFIE.PoolOpMode.GloabalAveragePool + if "Max" in fLayerType: + PoolMode = SOFIE.PoolOpMode.MaxPool + elif "AveragePool" in fLayerType: + PoolMode = SOFIE.PoolOpMode.AveragePool + elif "GlobalAverage" in fLayerType: + PoolMode = SOFIE.PoolOpMode.GloabalAveragePool else: raise RuntimeError( "TMVA::SOFIE - Unsupported - Operator poolong does not yet support pooling type " + fLayerType ) - + # Create operator - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Pool['float'](PoolMode, pool_atrr, fLayerInputName, fLayerOutputName) + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: + op = SOFIE.ROperator_Pool["float"](PoolMode, pool_atrr, fLayerInputName, fLayerOutputName) return op else: raise RuntimeError( "TMVA::SOFIE - Unsupported - Operator Pooling does not yet support input type " + fLayerDType - ) \ No newline at end of file + ) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/relu.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/relu.py index 24419da59396e..454da16a2b766 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/relu.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/relu.py @@ -1,6 +1,3 @@ -from cppyy import gbl as gbl_namespace - - def MakeKerasReLU(layer): """ Create a Keras-compatible rectified linear unit (ReLU) activation operation using SOFIE framework. @@ -17,15 +14,15 @@ def MakeKerasReLU(layer): Returns: ROperator_Relu: A SOFIE framework operator representing the ReLU activation operation. """ - finput = layer['layerInput'] - foutput = layer['layerOutput'] - fLayerDType = layer['layerDType'] + from ROOT.TMVA.Experimental import SOFIE + + finput = layer["layerInput"] + foutput = layer["layerOutput"] + fLayerDType = layer["layerDType"] fLayerInputName = finput[0] fLayerOutputName = foutput[0] - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Relu('float')(fLayerInputName, fLayerOutputName) + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: + op = SOFIE.ROperator_Relu("float")(fLayerInputName, fLayerOutputName) return op else: - raise RuntimeError( - "TMVA::SOFIE - Unsupported - Operator Relu does not yet support input type " + fLayerDType - ) \ No newline at end of file + raise RuntimeError("TMVA::SOFIE - Unsupported - Operator Relu does not yet support input type " + fLayerDType) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/reshape.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/reshape.py index 5d77978be54c5..ce6901bc4e047 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/reshape.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/reshape.py @@ -1,5 +1,3 @@ -from cppyy import gbl as gbl_namespace - from .. import get_keras_version @@ -17,20 +15,20 @@ def MakeKerasReshape(layer): Returns: ROperator_Reshape: A SOFIE framework operator representing the reshaping operation. """ + from ROOT.TMVA.Experimental import SOFIE keras_version = get_keras_version() - finput = layer['layerInput'] - foutput = layer['layerOutput'] - attributes = layer['layerAttributes'] - if keras_version < '2.16': - flayername = attributes['_name'] + finput = layer["layerInput"] + foutput = layer["layerOutput"] + attributes = layer["layerAttributes"] + if keras_version < "2.16": + flayername = attributes["_name"] else: - flayername = attributes['name'] - fOpMode = gbl_namespace.TMVA.Experimental.SOFIE.ReshapeOpMode.Reshape - fLayerDType = layer['layerDType'] + flayername = attributes["name"] + fOpMode = SOFIE.ReshapeOpMode.Reshape fNameData = finput[0] fNameOutput = foutput[0] fNameShape = flayername + "_shape" - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Reshape(fOpMode, 0, fNameData, fNameShape, fNameOutput) - return op \ No newline at end of file + op = SOFIE.ROperator_Reshape(fOpMode, 0, fNameData, fNameShape, fNameOutput) + return op diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/rnn.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/rnn.py index 3902d501432f0..8d1326e3e5aaa 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/rnn.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/rnn.py @@ -1,7 +1,4 @@ -from cppyy import gbl as gbl_namespace - - -def MakeKerasRNN(layer): +def MakeKerasRNN(layer): """ Create a Keras-compatible RNN (Recurrent Neural Network) layer operation using SOFIE framework. @@ -17,13 +14,14 @@ def MakeKerasRNN(layer): Returns: ROperator_RNN: A SOFIE framework operator representing the RNN layer operation. """ - + from ROOT.TMVA.Experimental import SOFIE + # Extract required information from the layer dictionary - fLayerDType = layer['layerDType'] - finput = layer['layerInput'] - foutput = layer['layerOutput'] - attributes = layer['layerAttributes'] - direction = attributes['direction'] + fLayerDType = layer["layerDType"] + finput = layer["layerInput"] + foutput = layer["layerOutput"] + attributes = layer["layerAttributes"] + direction = attributes["direction"] hidden_size = attributes["hidden_size"] layout = int(attributes["layout"]) nameX = finput[0] @@ -34,60 +32,128 @@ def MakeKerasRNN(layer): nameB = layer["layerWeight"][2] else: nameB = "" - + # Check if the provided activation function is supported - fPActivation = attributes['activation'] - if fPActivation.__name__ not in ['relu', 'sigmoid', 'tanh', 'softsign', 'softplus']: #avoiding functions with parameters + fPActivation = attributes["activation"] + if fPActivation.__name__ not in [ + "relu", + "sigmoid", + "tanh", + "softsign", + "softplus", + ]: # avoiding functions with parameters raise RuntimeError( "TMVA::SOFIE - Unsupported - Operator RNN does not yet support activation function " + fPActivation.__name__ ) - - activations = [fPActivation.__name__[0].upper()+fPActivation.__name__[1:]] - #set default values + activations = [fPActivation.__name__[0].upper() + fPActivation.__name__[1:]] + + # set default values activation_alpha = [] activation_beta = [] clip = 0.0 nameY_h = "" nameInitial_h = "" name_seq_len = "" - - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: - if layer['layerType'] == "SimpleRNN": - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_RNN['float'](activation_alpha, activation_beta, activations, clip, direction, hidden_size, layout, nameX, nameW, nameR, nameB, name_seq_len, nameInitial_h, nameY, nameY_h) - - elif layer['layerType'] == "GRU": - #an additional activation function is required, given by the user - activations.insert(0, attributes['recurrent_activation'].__name__[0].upper() + attributes['recurrent_activation'].__name__[1:]) - - #new variable needed: - linear_before_reset = attributes['linear_before_reset'] - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_GRU['float'](activation_alpha, activation_beta, activations, clip, direction, hidden_size, layout, linear_before_reset, nameX, nameW, nameR, nameB, name_seq_len, nameInitial_h, nameY, nameY_h) - - elif layer['layerType'] == "LSTM": - #an additional activation function is required, the first given by the user, the second set to tanh as default - fPRecurrentActivation = attributes['recurrent_activation'] - if fPActivation.__name__ not in ['relu', 'sigmoid', 'tanh', 'softsign', 'softplus']: #avoiding functions with parameters + + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: + if layer["layerType"] == "SimpleRNN": + op = SOFIE.ROperator_RNN["float"]( + activation_alpha, + activation_beta, + activations, + clip, + direction, + hidden_size, + layout, + nameX, + nameW, + nameR, + nameB, + name_seq_len, + nameInitial_h, + nameY, + nameY_h, + ) + + elif layer["layerType"] == "GRU": + # an additional activation function is required, given by the user + activations.insert( + 0, + attributes["recurrent_activation"].__name__[0].upper() + + attributes["recurrent_activation"].__name__[1:], + ) + + # new variable needed: + linear_before_reset = attributes["linear_before_reset"] + op = SOFIE.ROperator_GRU["float"]( + activation_alpha, + activation_beta, + activations, + clip, + direction, + hidden_size, + layout, + linear_before_reset, + nameX, + nameW, + nameR, + nameB, + name_seq_len, + nameInitial_h, + nameY, + nameY_h, + ) + + elif layer["layerType"] == "LSTM": + # an additional activation function is required, the first given by the user, the second set to tanh as default + fPRecurrentActivation = attributes["recurrent_activation"] + if fPActivation.__name__ not in [ + "relu", + "sigmoid", + "tanh", + "softsign", + "softplus", + ]: # avoiding functions with parameters raise RuntimeError( - "TMVA::SOFIE - Unsupported - Operator RNN does not yet support recurrent activation function " + fPActivation.__name__ + "TMVA::SOFIE - Unsupported - Operator RNN does not yet support recurrent activation function " + + fPActivation.__name__ ) - fPRecurrentActivationName = fPRecurrentActivation.__name__[0].upper()+fPRecurrentActivation.__name__[1:] - activations.insert(0,fPRecurrentActivationName) - activations.insert(2,'Tanh') - - #new variables needed: + fPRecurrentActivationName = fPRecurrentActivation.__name__[0].upper() + fPRecurrentActivation.__name__[1:] + activations.insert(0, fPRecurrentActivationName) + activations.insert(2, "Tanh") + + # new variables needed: input_forget = 0 nameInitial_c = "" - nameP = "" #No peephole connections in keras LSTM model + nameP = "" # No peephole connections in keras LSTM model nameY_c = "" - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_LSTM['float'](activation_alpha, activation_beta, activations, clip, direction, hidden_size, input_forget, layout, nameX, nameW, nameR, nameB, name_seq_len, nameInitial_h, nameInitial_c, nameP, nameY, nameY_h, nameY_c) - - else: + op = SOFIE.ROperator_LSTM["float"]( + activation_alpha, + activation_beta, + activations, + clip, + direction, + hidden_size, + input_forget, + layout, + nameX, + nameW, + nameR, + nameB, + name_seq_len, + nameInitial_h, + nameInitial_c, + nameP, + nameY, + nameY_h, + nameY_c, + ) + + else: raise RuntimeError( - "TMVA::SOFIE - Unsupported - Operator RNN does not yet support operator type " + layer['layerType'] - ) + "TMVA::SOFIE - Unsupported - Operator RNN does not yet support operator type " + layer["layerType"] + ) return op else: - raise RuntimeError( - "TMVA::SOFIE - Unsupported - Operator RNN does not yet support input type " + fLayerDType - ) + raise RuntimeError("TMVA::SOFIE - Unsupported - Operator RNN does not yet support input type " + fLayerDType) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/selu.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/selu.py index 62c386b7e6363..8beacb928dc24 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/selu.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/selu.py @@ -1,6 +1,3 @@ -from cppyy import gbl as gbl_namespace - - def MakeKerasSeLU(layer): """ Create a Keras-compatible scaled exponential linear unit (SeLU) activation operation using SOFIE framework. @@ -17,16 +14,15 @@ def MakeKerasSeLU(layer): Returns: ROperator_Selu: A SOFIE framework operator representing the SeLU activation operation. """ - - finput = layer['layerInput'] - foutput = layer['layerOutput'] - fLayerDType = layer['layerDType'] + from ROOT.TMVA.Experimental import SOFIE + + finput = layer["layerInput"] + foutput = layer["layerOutput"] + fLayerDType = layer["layerDType"] fLayerInputName = finput[0] fLayerOutputName = foutput[0] - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Selu('float')(fLayerInputName, fLayerOutputName) + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: + op = SOFIE.ROperator_Selu("float")(fLayerInputName, fLayerOutputName) return op else: - raise RuntimeError( - "TMVA::SOFIE - Unsupported - Operator Selu does not yet support input type " + fLayerDType - ) \ No newline at end of file + raise RuntimeError("TMVA::SOFIE - Unsupported - Operator Selu does not yet support input type " + fLayerDType) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/sigmoid.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/sigmoid.py index 92e3159822393..a77d2ec05cc61 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/sigmoid.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/sigmoid.py @@ -1,6 +1,3 @@ -from cppyy import gbl as gbl_namespace - - def MakeKerasSigmoid(layer): """ Create a Keras-compatible sigmoid activation operation using SOFIE framework. @@ -17,16 +14,17 @@ def MakeKerasSigmoid(layer): Returns: ROperator_Sigmoid: A SOFIE framework operator representing the sigmoid activation operation. """ - - finput = layer['layerInput'] - foutput = layer['layerOutput'] - fLayerDType = layer['layerDType'] + from ROOT.TMVA.Experimental import SOFIE + + finput = layer["layerInput"] + foutput = layer["layerOutput"] + fLayerDType = layer["layerDType"] fLayerInputName = finput[0] fLayerOutputName = foutput[0] - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Sigmoid('float')(fLayerInputName, fLayerOutputName) + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: + op = SOFIE.ROperator_Sigmoid("float")(fLayerInputName, fLayerOutputName) return op else: raise RuntimeError( "TMVA::SOFIE - Unsupported - Operator Sigmoid does not yet support input type " + fLayerDType - ) \ No newline at end of file + ) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/softmax.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/softmax.py index f23b1f46f6a6d..650154eb77385 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/softmax.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/softmax.py @@ -1,6 +1,3 @@ -from cppyy import gbl as gbl_namespace - - def MakeKerasSoftmax(layer): """ Create a Keras-compatible softmax activation operation using SOFIE framework. @@ -18,16 +15,17 @@ def MakeKerasSoftmax(layer): Returns: ROperator_Softmax: A SOFIE framework operator representing the softmax activation operation. """ - - finput = layer['layerInput'] - foutput = layer['layerOutput'] - fLayerDType = layer['layerDType'] + from ROOT.TMVA.Experimental import SOFIE + + finput = layer["layerInput"] + foutput = layer["layerOutput"] + fLayerDType = layer["layerDType"] fLayerInputName = finput[0] fLayerOutputName = foutput[0] - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Softmax('float')(-1, fLayerInputName, fLayerOutputName) + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: + op = SOFIE.ROperator_Softmax("float")(-1, fLayerInputName, fLayerOutputName) return op else: raise RuntimeError( "TMVA::SOFIE - Unsupported - Operator Softmax does not yet support input type " + fLayerDType - ) \ No newline at end of file + ) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/swish.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/swish.py index db683b9f5f393..287457f208ed4 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/swish.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/swish.py @@ -1,6 +1,3 @@ -from cppyy import gbl as gbl_namespace - - def MakeKerasSwish(layer): """ Create a Keras-compatible swish activation operation using SOFIE framework. @@ -17,16 +14,15 @@ def MakeKerasSwish(layer): Returns: ROperator_Swish: A SOFIE framework operator representing the swish activation operation. """ - - finput = layer['layerInput'] - foutput = layer['layerOutput'] - fLayerDType = layer['layerDType'] + from ROOT.TMVA.Experimental import SOFIE + + finput = layer["layerInput"] + foutput = layer["layerOutput"] + fLayerDType = layer["layerDType"] fLayerInputName = finput[0] fLayerOutputName = foutput[0] - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Swish('float')(fLayerInputName, fLayerOutputName) + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: + op = SOFIE.ROperator_Swish("float")(fLayerInputName, fLayerOutputName) return op else: - raise RuntimeError( - "TMVA::SOFIE - Unsupported - Operator Swish does not yet support input type " + fLayerDType - ) \ No newline at end of file + raise RuntimeError("TMVA::SOFIE - Unsupported - Operator Swish does not yet support input type " + fLayerDType) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/tanh.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/tanh.py index 35020d6c6da76..64ca3578a686f 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/tanh.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/layers/tanh.py @@ -1,6 +1,3 @@ -from cppyy import gbl as gbl_namespace - - def MakeKerasTanh(layer): """ Create a Keras-compatible hyperbolic tangent (tanh) activation operation using SOFIE framework. @@ -17,16 +14,15 @@ def MakeKerasTanh(layer): Returns: ROperator_Tanh: A SOFIE framework operator representing the tanh activation operation. """ - - finput = layer['layerInput'] - foutput = layer['layerOutput'] - fLayerDType = layer['layerDType'] + from ROOT.TMVA.Experimental import SOFIE + + finput = layer["layerInput"] + foutput = layer["layerOutput"] + fLayerDType = layer["layerDType"] fLayerInputName = finput[0] fLayerOutputName = foutput[0] - if gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fLayerDType) == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Tanh('float')(fLayerInputName, fLayerOutputName) + if SOFIE.ConvertStringToType(fLayerDType) == SOFIE.ETensorType.FLOAT: + op = SOFIE.ROperator_Tanh("float")(fLayerInputName, fLayerOutputName) return op else: - raise RuntimeError( - "TMVA::SOFIE - Unsupported - Operator Tanh does not yet support input type " + fLayerDType - ) \ No newline at end of file + raise RuntimeError("TMVA::SOFIE - Unsupported - Operator Tanh does not yet support input type " + fLayerDType) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/parser.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/parser.py index 585af3f9da04c..74d49978bc9b5 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/parser.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_sofie/_parser/_keras/parser.py @@ -1,8 +1,6 @@ import os import time -from cppyy import gbl as gbl_namespace - from . import get_keras_version from .layers.batchnorm import MakeKerasBatchNorm from .layers.binary import MakeKerasBinary @@ -25,8 +23,8 @@ def MakeKerasActivation(layer): - attributes = layer['layerAttributes'] - activation = attributes['activation'] + attributes = layer["layerAttributes"] + activation = attributes["activation"] fLayerActivation = str(activation.__name__) if fLayerActivation in mapKerasLayer.keys(): @@ -34,41 +32,44 @@ def MakeKerasActivation(layer): else: raise Exception("TMVA.SOFIE - parsing keras activation layer " + fLayerActivation + " is not yet supported") + # Set global dictionaries, mapping layers to corresponding functions that create their ROperator instances -mapKerasLayer = {"Activation": MakeKerasActivation, - "Permute": MakeKerasPermute, - "BatchNormalization": MakeKerasBatchNorm, - "LayerNormalization": MakeKerasLayerNorm, - "Reshape": MakeKerasReshape, - "Flatten": MakeKerasFlatten, - "Concatenate": MakeKerasConcat, - "swish": MakeKerasSwish, - "silu": MakeKerasSwish, - "Add": MakeKerasBinary, - "Subtract": MakeKerasBinary, - "Multiply": MakeKerasBinary, - "Softmax": MakeKerasSoftmax, - "tanh": MakeKerasTanh, - # "Identity": MakeKerasIdentity, - # "Dropout": MakeKerasIdentity, - "ReLU": MakeKerasReLU, - "relu": MakeKerasReLU, - "ELU": MakeKerasELU, - "elu": MakeKerasELU, - "selu": MakeKerasSeLU, - "sigmoid": MakeKerasSigmoid, - "LeakyReLU": MakeKerasLeakyRelu, - "leaky_relu": MakeKerasLeakyRelu, - "softmax": MakeKerasSoftmax, - "MaxPooling2D": MakeKerasPooling, - "AveragePooling2D": MakeKerasPooling, - "GlobalAveragePooling2D": MakeKerasPooling, - # "SimpleRNN": MakeKerasRNN, - # "GRU": MakeKerasRNN, - # "LSTM": MakeKerasRNN, - } - -mapKerasLayerWithActivation = {"Dense": MakeKerasDense,"Conv2D": MakeKerasConv} +mapKerasLayer = { + "Activation": MakeKerasActivation, + "Permute": MakeKerasPermute, + "BatchNormalization": MakeKerasBatchNorm, + "LayerNormalization": MakeKerasLayerNorm, + "Reshape": MakeKerasReshape, + "Flatten": MakeKerasFlatten, + "Concatenate": MakeKerasConcat, + "swish": MakeKerasSwish, + "silu": MakeKerasSwish, + "Add": MakeKerasBinary, + "Subtract": MakeKerasBinary, + "Multiply": MakeKerasBinary, + "Softmax": MakeKerasSoftmax, + "tanh": MakeKerasTanh, + # "Identity": MakeKerasIdentity, + # "Dropout": MakeKerasIdentity, + "ReLU": MakeKerasReLU, + "relu": MakeKerasReLU, + "ELU": MakeKerasELU, + "elu": MakeKerasELU, + "selu": MakeKerasSeLU, + "sigmoid": MakeKerasSigmoid, + "LeakyReLU": MakeKerasLeakyRelu, + "leaky_relu": MakeKerasLeakyRelu, + "softmax": MakeKerasSoftmax, + "MaxPooling2D": MakeKerasPooling, + "AveragePooling2D": MakeKerasPooling, + "GlobalAveragePooling2D": MakeKerasPooling, + # "SimpleRNN": MakeKerasRNN, + # "GRU": MakeKerasRNN, + # "LSTM": MakeKerasRNN, +} + +mapKerasLayerWithActivation = {"Dense": MakeKerasDense, "Conv2D": MakeKerasConv} + def add_layer_into_RModel(rmodel, layer_data): """ @@ -89,55 +90,53 @@ def add_layer_into_RModel(rmodel, layer_data): Raises exception: If the provided layer type or activation function is not supported. """ - import numpy as np + from ROOT.TMVA.Experimental import SOFIE keras_version = get_keras_version() - fLayerType = layer_data['layerType'] + fLayerType = layer_data["layerType"] # reshape and flatten layers don't have weights, but they need constant tensor for the shape if fLayerType == "Reshape" or fLayerType == "Flatten": - Attributes = layer_data['layerAttributes'] - if keras_version < '2.16': - LayerName = Attributes['_name'] + Attributes = layer_data["layerAttributes"] + if keras_version < "2.16": + LayerName = Attributes["_name"] else: - LayerName = Attributes['name'] + LayerName = Attributes["name"] if fLayerType == "Reshape": - TargetShape = np.asarray(Attributes['target_shape']).astype("int64") - TargetShape = np.insert(TargetShape,0,1) + TargetShape = np.asarray(Attributes["target_shape"]).astype("int64") + TargetShape = np.insert(TargetShape, 0, 1) else: - if '_build_input_shape' in Attributes.keys(): - input_shape = Attributes['_build_input_shape'] - elif '_build_shapes_dict' in Attributes.keys(): - input_shape = list(Attributes['_build_shapes_dict']['input_shape']) + if "_build_input_shape" in Attributes.keys(): + input_shape = Attributes["_build_input_shape"] + elif "_build_shapes_dict" in Attributes.keys(): + input_shape = list(Attributes["_build_shapes_dict"]["input_shape"]) else: - raise RuntimeError ( - "Failed to extract build input shape from " + fLayerType + " layer" - ) - TargetShape = [ gbl_namespace.TMVA.Experimental.SOFIE.ConvertShapeToLength(input_shape[1:])] + raise RuntimeError("Failed to extract build input shape from " + fLayerType + " layer") + TargetShape = [SOFIE.ConvertShapeToLength(input_shape[1:])] TargetShape = np.asarray(TargetShape) # since the AddInitializedTensor method in RModel requires unique pointer, we call a helper function # in c++ that does the conversion from a regular pointer to unique one in c++ - #print('adding initialized tensor..',LayerName, TargetShape) + # print('adding initialized tensor..',LayerName, TargetShape) shape_tensor_name = LayerName + "_shape" shape_data = TargetShape.data print(TargetShape, shape_data) print(len(TargetShape)) - rmodel.AddInitializedTensor['int64_t'](shape_tensor_name, [len(TargetShape)], shape_data) + rmodel.AddInitializedTensor["int64_t"](shape_tensor_name, [len(TargetShape)], shape_data) # These layers only have one operator - excluding the recurrent layers, in which the activation function(s) # are included in the recurrent operator if fLayerType in mapKerasLayer.keys(): - Attributes = layer_data['layerAttributes'] - inputs = layer_data['layerInput'] - outputs = layer_data['layerOutput'] - if keras_version < '2.16': - LayerName = Attributes['_name'] + Attributes = layer_data["layerAttributes"] + inputs = layer_data["layerInput"] + outputs = layer_data["layerOutput"] + if keras_version < "2.16": + LayerName = Attributes["_name"] else: - LayerName = Attributes['name'] + LayerName = Attributes["name"] # Convoltion/Pooling layers in keras by default assume the channels dimension is the # last one, while in onnx (and the SOFIE's RModel) it is the first one (other than batch @@ -151,59 +150,50 @@ def add_layer_into_RModel(rmodel, layer_data): # transpose layer layer_name + PostTrans output_layer_name fLayerOutput = outputs[0] - if fLayerType == 'GlobalAveragePooling2D': - if layer_data['channels_last']: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Transpose('float')([0, 3, 1, 2], inputs[0], LayerName+"PreTrans") + if fLayerType == "GlobalAveragePooling2D": + if layer_data["channels_last"]: + op = SOFIE.ROperator_Transpose("float")([0, 3, 1, 2], inputs[0], LayerName + "PreTrans") rmodel.AddOperatorReference(op) - inputs[0] = LayerName+"PreTrans" - outputs[0] = LayerName+"Squeeze" + inputs[0] = LayerName + "PreTrans" + outputs[0] = LayerName + "Squeeze" rmodel.AddOperatorReference(mapKerasLayer[fLayerType](layer_data)) - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Reshape( - gbl_namespace.TMVA.Experimental.SOFIE.ReshapeOpMode.Squeeze, - [2, 3], - LayerName + "Squeeze", - fLayerOutput - ) + op = SOFIE.ROperator_Reshape(SOFIE.ReshapeOpMode.Squeeze, [2, 3], LayerName + "Squeeze", fLayerOutput) rmodel.AddOperatorReference(op) # Similar case is with Batchnorm, ONNX assumes that the 'axis' is always 1, but Keras # gives the user the choice of specifying it. So, we have to transpose the input layer # as 'axis' as the first dimension, apply the BatchNormalization operator and then # again tranpose it to bring back the original dimensions - elif fLayerType == 'BatchNormalization': - if '_build_input_shape' in Attributes.keys(): - num_input_shapes = len(Attributes['_build_input_shape']) - elif '_build_shapes_dict' in Attributes.keys(): - num_input_shapes = len(list(Attributes['_build_shapes_dict']['input_shape'])) + elif fLayerType == "BatchNormalization": + if "_build_input_shape" in Attributes.keys(): + num_input_shapes = len(Attributes["_build_input_shape"]) + elif "_build_shapes_dict" in Attributes.keys(): + num_input_shapes = len(list(Attributes["_build_shapes_dict"]["input_shape"])) - axis = Attributes['axis'] + axis = Attributes["axis"] axis = axis[0] if isinstance(axis, list) else axis if axis < 0: axis += num_input_shapes fAttrPerm = list(range(0, num_input_shapes)) fAttrPerm[1] = axis fAttrPerm[axis] = 1 - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Transpose('float')(fAttrPerm, inputs[0], - LayerName+"PreTrans") + op = SOFIE.ROperator_Transpose("float")(fAttrPerm, inputs[0], LayerName + "PreTrans") rmodel.AddOperatorReference(op) inputs[0] = LayerName + "PreTrans" outputs[0] = LayerName + "PostTrans" rmodel.AddOperatorReference(mapKerasLayer[fLayerType](layer_data)) - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Transpose('float')(fAttrPerm, LayerName+"PostTrans", - fLayerOutput) + op = SOFIE.ROperator_Transpose("float")(fAttrPerm, LayerName + "PostTrans", fLayerOutput) rmodel.AddOperatorReference(op) - elif fLayerType == 'MaxPooling2D' or fLayerType == 'AveragePooling2D': - if layer_data['channels_last']: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Transpose('float')([0,3,1,2], inputs[0], - LayerName+"PreTrans") + elif fLayerType == "MaxPooling2D" or fLayerType == "AveragePooling2D": + if layer_data["channels_last"]: + op = SOFIE.ROperator_Transpose("float")([0, 3, 1, 2], inputs[0], LayerName + "PreTrans") rmodel.AddOperatorReference(op) - inputs[0] = LayerName+"PreTrans" - outputs[0] = LayerName+"PostTrans" + inputs[0] = LayerName + "PreTrans" + outputs[0] = LayerName + "PostTrans" rmodel.AddOperatorReference(mapKerasLayer[fLayerType](layer_data)) - if layer_data['channels_last']: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Transpose('float')([0,2,3,1], - LayerName+"PostTrans", fLayerOutput) + if layer_data["channels_last"]: + op = SOFIE.ROperator_Transpose("float")([0, 2, 3, 1], LayerName + "PostTrans", fLayerOutput) rmodel.AddOperatorReference(op) else: @@ -213,74 +203,78 @@ def add_layer_into_RModel(rmodel, layer_data): # These layers require two operators - dense/conv and their activation function elif fLayerType in mapKerasLayerWithActivation.keys(): - Attributes = layer_data['layerAttributes'] - if keras_version < '2.16': - LayerName = Attributes['_name'] + Attributes = layer_data["layerAttributes"] + if keras_version < "2.16": + LayerName = Attributes["_name"] else: - LayerName = Attributes['name'] - fPActivation = Attributes['activation'] + LayerName = Attributes["name"] + fPActivation = Attributes["activation"] LayerActivation = fPActivation.__name__ - if LayerActivation in ['selu', 'sigmoid']: + if LayerActivation in ["selu", "sigmoid"]: rmodel.AddNeededStdLib("cmath") # if there is an activation function after the layer - if LayerActivation != 'linear': + if LayerActivation != "linear": if LayerActivation not in mapKerasLayer.keys(): - raise Exception("TMVA.SOFIE - parsing keras activation function " + LayerActivation + " is not yet supported") - outputs = layer_data['layerOutput'] - inputs = layer_data['layerInput'] + raise Exception( + "TMVA.SOFIE - parsing keras activation function " + LayerActivation + " is not yet supported" + ) + outputs = layer_data["layerOutput"] + inputs = layer_data["layerInput"] fActivationLayerOutput = outputs[0] # like pooling, convolutional layer from keras requires transpose before and after to match # the onnx format # if the data format is channels last (can be set to channels first by the user). - if fLayerType == 'Conv2D': - if layer_data['channels_last']: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Transpose('float')([0,3,1,2], inputs[0], LayerName+"PreTrans") + if fLayerType == "Conv2D": + if layer_data["channels_last"]: + op = SOFIE.ROperator_Transpose("float")([0, 3, 1, 2], inputs[0], LayerName + "PreTrans") rmodel.AddOperatorReference(op) - inputs[0] = LayerName+"PreTrans" + inputs[0] = LayerName + "PreTrans" layer_data["layerInput"] = inputs - outputs[0] = LayerName+fLayerType - layer_data['layerOutput'] = outputs + outputs[0] = LayerName + fLayerType + layer_data["layerOutput"] = outputs op = mapKerasLayerWithActivation[fLayerType](layer_data) rmodel.AddOperatorReference(op) - Activation_layer_input = LayerName+fLayerType - if fLayerType == 'Conv2D': - if layer_data['channels_last']: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Transpose('float')([0,2,3,1], LayerName+fLayerType, LayerName+"PostTrans") + Activation_layer_input = LayerName + fLayerType + if fLayerType == "Conv2D": + if layer_data["channels_last"]: + op = SOFIE.ROperator_Transpose("float")( + [0, 2, 3, 1], LayerName + fLayerType, LayerName + "PostTrans" + ) rmodel.AddOperatorReference(op) Activation_layer_input = LayerName + "PostTrans" # Adding the activation function inputs[0] = Activation_layer_input outputs[0] = fActivationLayerOutput - layer_data['layerInput'] = inputs - layer_data['layerOutput'] = outputs + layer_data["layerInput"] = inputs + layer_data["layerOutput"] = outputs rmodel.AddOperatorReference(mapKerasLayer[LayerActivation](layer_data)) - else: # if layer is conv and the activation is linear, we need to add transpose before and after - if fLayerType == 'Conv2D': - inputs = layer_data['layerInput'] - outputs = layer_data['layerOutput'] + else: # if layer is conv and the activation is linear, we need to add transpose before and after + if fLayerType == "Conv2D": + inputs = layer_data["layerInput"] + outputs = layer_data["layerOutput"] fLayerOutput = outputs[0] - if layer_data['channels_last']: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Transpose('float')([0,3,1,2], inputs[0], LayerName+"PreTrans") + if layer_data["channels_last"]: + op = SOFIE.ROperator_Transpose("float")([0, 3, 1, 2], inputs[0], LayerName + "PreTrans") rmodel.AddOperatorReference(op) - inputs[0] = LayerName+"PreTrans" - layer_data['layerInput'] = inputs - outputs[0] = LayerName+"PostTrans" + inputs[0] = LayerName + "PreTrans" + layer_data["layerInput"] = inputs + outputs[0] = LayerName + "PostTrans" rmodel.AddOperatorReference(mapKerasLayerWithActivation[fLayerType](layer_data)) - if fLayerType == 'Conv2D': - if layer_data['channels_last']: - op = gbl_namespace.TMVA.Experimental.SOFIE.ROperator_Transpose('float')([0,2,3,1], LayerName+"PostTrans", fLayerOutput) + if fLayerType == "Conv2D": + if layer_data["channels_last"]: + op = SOFIE.ROperator_Transpose("float")([0, 2, 3, 1], LayerName + "PostTrans", fLayerOutput) rmodel.AddOperatorReference(op) return rmodel else: raise Exception("TMVA.SOFIE - parsing keras layer " + fLayerType + " is not yet supported") -class PyKeras: +class PyKeras: def Parse(filename, batch_size=1): # If a model does not have a defined batch size, then assuming it is 1 # TensorFlow/Keras is too fragile to import unconditionally. As its presence might break several ROOT @@ -292,10 +286,11 @@ def Parse(filename, batch_size=1): # If a model does not have a defined batch s import keras import numpy as np + from ROOT.TMVA.Experimental import SOFIE keras_version = get_keras_version() - #Check if file exists + # Check if file exists if not os.path.exists(filename): raise RuntimeError("Model file {} not found!".format(filename)) @@ -304,22 +299,22 @@ def Parse(filename, batch_size=1): # If a model does not have a defined batch s keras_model.load_weights(filename) # create new RModel object - sep = '/' - if os.name == 'nt': - sep = '\\' + sep = "/" + if os.name == "nt": + sep = "\\" isep = filename.rfind(sep) filename_nodir = filename if isep != -1: - filename_nodir = filename[isep+1:] + filename_nodir = filename[isep + 1 :] ttime = time.time() gmt_time = time.gmtime(ttime) parsetime = time.asctime(gmt_time) - rmodel = gbl_namespace.TMVA.Experimental.SOFIE.RModel.RModel(filename_nodir, parsetime) + rmodel = SOFIE.RModel.RModel(filename_nodir, parsetime) - print("PyKeras: parsing model ",filename) + print("PyKeras: parsing model ", filename) # iterate over the layers and add them to the RModel # in case of keras 3.x (particularly in sequential models), the layer input and output name conventions are @@ -343,77 +338,85 @@ def Parse(filename, batch_size=1): # If a model does not have a defined batch s # hence, we need to add a custom layer iterator, which would replace the suffix of the layer's input # and output names layer_iter = 0 - is_functional_model = True if keras_model.__class__.__name__ == 'Functional' else False + is_functional_model = True if keras_model.__class__.__name__ == "Functional" else False for layer in keras_model.layers: - layer_data={} - layer_data['layerType']=layer.__class__.__name__ - layer_data['layerAttributes']=layer.__dict__ - #get input names for layer - if keras_version < '2.16' or is_functional_model: - if 'input_layer' in layer.name: - layer_data['layerInput'] = layer.name + layer_data = {} + layer_data["layerType"] = layer.__class__.__name__ + layer_data["layerAttributes"] = layer.__dict__ + # get input names for layer + if keras_version < "2.16" or is_functional_model: + if "input_layer" in layer.name: + layer_data["layerInput"] = layer.name else: - layer_data['layerInput']=[x.name for x in layer.input] if isinstance(layer.input,list) else [layer.input.name] + layer_data["layerInput"] = ( + [x.name for x in layer.input] if isinstance(layer.input, list) else [layer.input.name] + ) else: - #case of Keras3 Sequential model : in this case output of layer is input to following one, but names can be different - if 'input_layer' in layer.input.name: - layer_data['layerInput'] = [layer.input.name] + # case of Keras3 Sequential model : in this case output of layer is input to following one, but names can be different + if "input_layer" in layer.input.name: + layer_data["layerInput"] = [layer.input.name] else: - if (layer_iter == 0) : + if layer_iter == 0: input_layer_name = "tensor_input_" + layer.name - else : - input_layer_name = "tensor_output_" + keras_model.layers[layer_iter-1].name - layer_data['layerInput'] = [input_layer_name] - #get output names of layer - if keras_version < '2.16' or is_functional_model: - layer_data['layerOutput']=[x.name for x in layer.output] if isinstance(layer.output,list) else [layer.output.name] + else: + input_layer_name = "tensor_output_" + keras_model.layers[layer_iter - 1].name + layer_data["layerInput"] = [input_layer_name] + # get output names of layer + if keras_version < "2.16" or is_functional_model: + layer_data["layerOutput"] = ( + [x.name for x in layer.output] if isinstance(layer.output, list) else [layer.output.name] + ) else: - #sequential model in Keras3 + # sequential model in Keras3 output_layer_name = "tensor_output_" + layer.name - layer_data['layerOutput']=[x.name for x in layer.output] if isinstance(layer.output,list) else [output_layer_name] + layer_data["layerOutput"] = ( + [x.name for x in layer.output] if isinstance(layer.output, list) else [output_layer_name] + ) layer_iter += 1 - fLayerType = layer_data['layerType'] - layer_data['layerDType'] = layer.dtype + fLayerType = layer_data["layerType"] + layer_data["layerDType"] = layer.dtype if len(layer.weights) > 0: - if keras_version < '2.16': - layer_data['layerWeight'] = [x.name for x in layer.weights] + if keras_version < "2.16": + layer_data["layerWeight"] = [x.name for x in layer.weights] else: - layer_data['layerWeight'] = [x.path for x in layer.weights] + layer_data["layerWeight"] = [x.path for x in layer.weights] else: - layer_data['layerWeight'] = [] + layer_data["layerWeight"] = [] # for convolutional and pooling layers we need to know the format of the data - if layer_data['layerType'] in ['Conv2D', 'MaxPooling2D', 'AveragePooling2D', 'GlobalAveragePooling2D']: - layer_data['channels_last'] = True if layer.data_format == 'channels_last' else False + if layer_data["layerType"] in ["Conv2D", "MaxPooling2D", "AveragePooling2D", "GlobalAveragePooling2D"]: + layer_data["channels_last"] = True if layer.data_format == "channels_last" else False # for recurrent type layers we need to extract additional unique information - if layer_data['layerType'] in ["SimpleRNN", "LSTM", "GRU"]: - layer_data['layerAttributes']['activation'] = layer.activation - layer_data['layerAttributes']['direction'] = 'backward' if layer.go_backwards else 'forward' - layer_data['layerAttributes']["units"] = layer.units - layer_data['layerAttributes']["layout"] = layer.input.shape[0] is None - layer_data['layerAttributes']["hidden_size"] = layer.output.shape[-1] + if layer_data["layerType"] in ["SimpleRNN", "LSTM", "GRU"]: + layer_data["layerAttributes"]["activation"] = layer.activation + layer_data["layerAttributes"]["direction"] = "backward" if layer.go_backwards else "forward" + layer_data["layerAttributes"]["units"] = layer.units + layer_data["layerAttributes"]["layout"] = layer.input.shape[0] is None + layer_data["layerAttributes"]["hidden_size"] = layer.output.shape[-1] # for GRU and LSTM we need to extract an additional activation function - if layer_data['layerType'] != "SimpleRNN": - layer_data['layerAttributes']['recurrent_activation'] = layer.recurrent_activation + if layer_data["layerType"] != "SimpleRNN": + layer_data["layerAttributes"]["recurrent_activation"] = layer.recurrent_activation # for GRU there are two variants of the reset gate location, we need to know which one is it - if layer_data['layerType'] == "GRU": - layer_data['layerAttributes']['linear_before_reset'] = 1 if layer.reset_after and layer.recurrent_activation.__name__ == "sigmoid" else 0 + if layer_data["layerType"] == "GRU": + layer_data["layerAttributes"]["linear_before_reset"] = ( + 1 if layer.reset_after and layer.recurrent_activation.__name__ == "sigmoid" else 0 + ) # Ignoring the input layer of the model - if(fLayerType == "InputLayer"): + if fLayerType == "InputLayer": continue # Adding any required routines depending on the Layer types for generating inference code. - if (fLayerType == "Dense"): + if fLayerType == "Dense": rmodel.AddBlasRoutines({"Gemm", "Gemv"}) - elif (fLayerType == "BatchNormalization"): + elif fLayerType == "BatchNormalization": rmodel.AddBlasRoutines({"Copy", "Axpy"}) - elif (fLayerType == "Conv1D" or fLayerType == "Conv2D" or fLayerType == "Conv3D"): + elif fLayerType == "Conv1D" or fLayerType == "Conv2D" or fLayerType == "Conv3D": rmodel.AddBlasRoutines({"Gemm", "Axpy"}) rmodel = add_layer_into_RModel(rmodel, layer_data) @@ -421,28 +424,32 @@ def Parse(filename, batch_size=1): # If a model does not have a defined batch s weight = [] for idx in range(len(keras_model.get_weights())): weightProp = {} - if keras_version < '2.16': - weightProp['name'] = keras_model.weights[idx].name + if keras_version < "2.16": + weightProp["name"] = keras_model.weights[idx].name else: - weightProp['name'] = keras_model.weights[idx].path - weightProp['dtype'] = keras_model.get_weights()[idx].dtype.name - if 'conv' in weightProp['name'] and keras_model.weights[idx].shape.ndims == 4: - weightProp['value'] = keras_model.get_weights()[idx].transpose((3, 2, 0, 1)).copy() + weightProp["name"] = keras_model.weights[idx].path + weightProp["dtype"] = keras_model.get_weights()[idx].dtype.name + if "conv" in weightProp["name"] and keras_model.weights[idx].shape.ndims == 4: + weightProp["value"] = keras_model.get_weights()[idx].transpose((3, 2, 0, 1)).copy() else: - weightProp['value'] = keras_model.get_weights()[idx] + weightProp["value"] = keras_model.get_weights()[idx] weight.append(weightProp) # Traversing through all the Weight tensors for weightIter in range(len(weight)): fWeightTensor = weight[weightIter] - fWeightName = fWeightTensor['name'] - fWeightDType = gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fWeightTensor['dtype']) - fWeightTensorValue = fWeightTensor['value'] + fWeightName = fWeightTensor["name"] + fWeightDType = SOFIE.ConvertStringToType(fWeightTensor["dtype"]) + fWeightTensorValue = fWeightTensor["value"] fWeightTensorSize = 1 fWeightTensorShape = [] - #IS IT BATCH SIZE? CHECK ONNX - if 'simple_rnn' in fWeightName or 'lstm' in fWeightName or ('gru' in fWeightName and 'bias' not in fWeightName): + # IS IT BATCH SIZE? CHECK ONNX + if ( + "simple_rnn" in fWeightName + or "lstm" in fWeightName + or ("gru" in fWeightName and "bias" not in fWeightName) + ): fWeightTensorShape.append(1) # Building the shape vector and finding the tensor size @@ -450,35 +457,33 @@ def Parse(filename, batch_size=1): # If a model does not have a defined batch s fWeightTensorShape.append(fWeightTensorValue.shape[j]) fWeightTensorSize *= fWeightTensorValue.shape[j] - if fWeightDType == gbl_namespace.TMVA.Experimental.SOFIE.ETensorType.FLOAT: + if fWeightDType == SOFIE.ETensorType.FLOAT: fWeightArray = fWeightTensorValue # weights conversion format between keras and onnx for lstm: the order of the different # elements (input, output, forget, cell) inside the vector/matrix is different - if 'lstm' in fWeightName: - if 'kernel' in fWeightName: - units = int(fWeightArray.shape[1]/4) - W_i = fWeightArray[:, :units].copy() - W_f = fWeightArray[:, units: units * 2].copy() - W_c = fWeightArray[:, units * 2: units * 3].copy() - W_o = fWeightArray[:, units * 3:].copy() - fWeightArray[:, units: units * 2] = W_o - fWeightArray[:, units * 2: units * 3] = W_f - fWeightArray[:, units * 3:] = W_c - else: #bias - units = int(fWeightArray.shape[0]/4) - W_i = fWeightArray[:units].copy() - W_f = fWeightArray[units: units * 2].copy() - W_c = fWeightArray[units * 2: units * 3].copy() - W_o = fWeightArray[units * 3:].copy() - fWeightArray[units: units * 2] = W_o - fWeightArray[units * 2: units * 3] = W_f - fWeightArray[units * 3:] = W_c + if "lstm" in fWeightName: + if "kernel" in fWeightName: + units = int(fWeightArray.shape[1] / 4) + W_f = fWeightArray[:, units : units * 2].copy() + W_c = fWeightArray[:, units * 2 : units * 3].copy() + W_o = fWeightArray[:, units * 3 :].copy() + fWeightArray[:, units : units * 2] = W_o + fWeightArray[:, units * 2 : units * 3] = W_f + fWeightArray[:, units * 3 :] = W_c + else: # bias + units = int(fWeightArray.shape[0] / 4) + W_f = fWeightArray[units : units * 2].copy() + W_c = fWeightArray[units * 2 : units * 3].copy() + W_o = fWeightArray[units * 3 :].copy() + fWeightArray[units : units * 2] = W_o + fWeightArray[units * 2 : units * 3] = W_f + fWeightArray[units * 3 :] = W_c # need to make specific adjustments for recurrent weights and biases - if ('simple_rnn' in fWeightName or 'lstm' in fWeightName or 'gru' in fWeightName): + if "simple_rnn" in fWeightName or "lstm" in fWeightName or "gru" in fWeightName: # reshaping weight matrices for recurrent layers due to keras-onnx inconsistencies - if 'kernel' in fWeightName: + if "kernel" in fWeightName: fWeightArray = np.transpose(fWeightArray) fWeightTensorShape[1], fWeightTensorShape[2] = fWeightTensorShape[2], fWeightTensorShape[1] @@ -486,24 +491,26 @@ def Parse(filename, batch_size=1): # If a model does not have a defined batch s # the recurrent bias and the cell bias can be the same, in which case we need to add a # vector of zeros for the recurrent bias - if 'bias' in fWeightName and len(fData.shape) == 1: + if "bias" in fWeightName and len(fData.shape) == 1: fWeightTensorShape[1] *= 2 - fRbias = fData.copy()*0 - fData = np.concatenate((fData,fRbias)) + fRbias = fData.copy() * 0 + fData = np.concatenate((fData, fRbias)) else: fData = fWeightArray.flatten() - rmodel.AddInitializedTensor['float'](fWeightName, fWeightTensorShape, fData) + rmodel.AddInitializedTensor["float"](fWeightName, fWeightTensorShape, fData) else: raise TypeError("Type error: TMVA SOFIE does not yet support data layer type: " + fWeightDType) # Extracting input tensor info - if keras_version < '2.16': + if keras_version < "2.16": fPInputs = keras_model.input_names else: fPInputs = [x.name for x in keras_model.inputs] - fPInputShape = keras_model.input_shape if isinstance(keras_model.input_shape, list) else [keras_model.input_shape] + fPInputShape = ( + keras_model.input_shape if isinstance(keras_model.input_shape, list) else [keras_model.input_shape] + ) fPInputDType = [] for idx in range(len(keras_model.inputs)): dtype = keras_model.inputs[idx].dtype.__str__() @@ -514,10 +521,10 @@ def Parse(filename, batch_size=1): # If a model does not have a defined batch s if len(fPInputShape) == 1: inputName = fPInputs[0] - inputDType = gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(fPInputDType[0]) + inputDType = SOFIE.ConvertStringToType(fPInputDType[0]) # convert ot a list to modify batch size inputShape = list(fPInputShape[0]) - #set the batch size in case of -1 or None as first value + # set the batch size in case of -1 or None as first value if inputShape[0] is None or inputShape[0] <= 0: inputShape[0] = batch_size rmodel.AddInputTensorInfo(inputName, inputDType, inputShape) @@ -525,16 +532,16 @@ def Parse(filename, batch_size=1): # If a model does not have a defined batch s else: # Iterating through multiple input tensors for inputName, inputDType, inputShapeTuple in zip(fPInputs, fPInputDType, fPInputShape): - inputDType = gbl_namespace.TMVA.Experimental.SOFIE.ConvertStringToType(inputDType) + inputDType = SOFIE.ConvertStringToType(inputDType) inputShape = list(inputShapeTuple) if inputShape[0] is None or inputShape[0] <= 0: inputShape[0] = batch_size - rmodel.AddInputTensorInfo(inputName, inputDType, inputShape) + rmodel.AddInputTensorInfo(inputName, inputDType, inputShape) rmodel.AddInputTensorName(inputName) # Adding OutputTensorInfos outputNames = [] - if keras_version < '2.16' or is_functional_model: + if keras_version < "2.16" or is_functional_model: for layerName in keras_model.output_names: final_layer = keras_model.get_layer(layerName) output_layer_name = final_layer.output.name