Ошибка в Keras Tuner: NotImplementedError: numpy() is only available when eager execution is enabled

Пытаюсь использовать Keras Tuner. Уже упростил максимально, но все равно выдает внутреннюю ошибку в Keras. Хотя "нетерпеливое выполнение" в tensorflow включил... Подскажите пожалуйста.

Работаю с: Platform: linux
Python: 3.12.2
Pandas: 2.2.2
Numpy: 2.2.3
Tensorflow: 2.18.0
Keras: 3.8.0
Keras_tuner: 1.4.7

# Main
import platform
import os
from sys import platform as pltf
import getpass

import pandas as pd
from pandas import read_csv, DataFrame

import warnings
warnings.filterwarnings("ignore")
pd.options.display.max_rows = 100
#pd.set_option('display.max_colwidth', -1)
pd.set_option('display.max_columns', None)


# Tensorflow
os.environ["KERAS_BACKEND"] = "tensorflow"
os.environ["TF_USE_LEGACY_KERAS"] = '0'
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
# https://www.omi.me/blogs/tensorflow-errors/eager-execution-not-enabled-in-tensorflow-causes-and-how-to-fix
# https://www.thecoderscamp.com/numpy-is-only-available-when-eager-execution-is-enabled/
# Нетерпеливое выполнение — это режим в TensorFlow, который позволяет немедленно выполнять операции, делая TensorFlow более похожим 
# на стандартное программирование на Python. По умолчанию нетерпеливое выполнение не включено в версиях TensorFlow 2.0 и выше.
# Чтобы устранить эту ошибку и использовать NumPy с TensorFlow, необходимо включить нетерпеливое выполнение.tf.compat.v1.enable_eager_execution()
# Вызывая tf.compat.v1.enable_eager_execution(), вы включаете нетерпеливое выполнение в TensorFlow. После включения нетерпеливого выполнения вы
# можете импортировать и использовать NumPy, не сталкиваясь с упомянутой ошибкой.
import numpy as np

# Keras
#from tensorflow import keras
import keras
from keras.models import Sequential, Model
from keras import layers#, BatchNormalization
#from layers import Dense, LSTM, Bidirectional
from keras import Input
from keras import optimizers
import keras_tuner as kt

tf.config.list_physical_devices('GPU')

if pltf == "linux":    
    my_path_data_dir = os.path.join(os.getcwd(), 'tmp')

finish_the_best_df = ['ahпс_Recurrent_one_hat_bidirect']

VERBOSE=2
ACTIVATION='linear'
OPTIMIZER=keras.optimizers.Adam()
LOSS='mae', # Минимизируемая функция потерь
BATCH_SIZE=200

#ACCURACY=True
ACCURACY=False

if ACCURACY:
    # Если accuracy
    METRICS=['accuracy'] # Список метрик для мониторинга
    MONITOR='accuracy'
    HISTORY='accuracy'
    VAL_HISTORY='val_accuracy'
    MODE='max'
    TR_LABEL='Training accuracy'
    VAL_LABEL='Validation accuracy'
    Y_LABEL='Accuracy'
else:
    # Если loss
    METRICS=['loss'] # Список метрик для мониторинга
    MONITOR='loss'
    MODE='max'
    HISTORY='loss'
    VAL_HISTORY='val_loss'
    TR_LABEL='Training loss'
    VAL_LABEL='Validation loss'
    Y_LABEL='Loss'

current_parametr_name=current_model_name=''

%%time

debug=True
#debug=False

# Перебираю все лучшие модели
for i in range(0, len(finish_the_best_df)):
    # "Разбираем" имя модели и получаем имя параметра + имя алгоритма
    name = finish_the_best_df[i]
    #print(name)
    indx_1 = name.find('_')
    indx_2 = name.find('_', indx_1+1)
    current_parametr_name = name[0: indx_1]
    # Т.к. есть в названии базового алгоритма есть слово "Baseline", то различаю "Базовый" от всех остальных...
    if name.find('Baseline') == -1:
        algoritm = name[indx_1+1:] # Подстрока не найдена
    else:
        algoritm = name[indx_1+1: indx_2] # Подстрока Baseline найдена
        
    print()
    print('***')
    print('current_parametr_name=', current_parametr_name, 'algoritm=', algoritm)
    print('***')
    print()

    match algoritm:
        case 'LinearRegression':
            ;
        case 'SGDRegressor':
            ;
        case 'GradientBoostingRegressor':
            ;
        case 'RandomForestRegressor':
            ;
        case 'Simple_dense_autoencoder':
            ;
        case 'Simple_dense_autoencoder_full_data':
            ;
        case 'Recurrent_autoencoder':
            ;
        case 'Recurrent_one_hat_bidirect':
            # Пока не понимаю почему, но перед каждым расчетом нейронной сети надо это переустанавливать...
            # https://www.omi.me/blogs/tensorflow-errors/eager-execution-not-enabled-in-tensorflow-causes-and-how-to-fix
            #tf.compat.v1.enable_eager_execution()
            
            model_name = current_parametr_name+'_Recurrent_one_hat_bidirect_KerasTuner'

            X_train_data = np.zeros((10000,16,45))
            Y_train_data = np.zeros((10000))

            # Определяем функцию
            def build_model_Recurrent_Bidirectional_One_Hat(hp):
                # Tuning your model using Keras Tuner
                # Tune the number of units in the first Dense layer
                hp_units = hp.Int("units", min_value=16, max_value=32, step=8)            
                #hp_units2 = hp.Int("units2", min_value=QTY_PARAMETRS, max_value=QTY_PARAMETRS*2, step=QTY_PARAMETRS)            
                # Between mae & mse
                #hp_loss = hp.Choice("loss", values=["mae", "mse"])

                # Создаем нашу модель
                units = X_train_data.shape[-1]
                input_shape = (X_train_data.shape[-2], X_train_data.shape[-1])

                # X_train_data
                units = X_train_data.shape[-1]
                input_shape = (X_train_data.shape[-2], X_train_data.shape[-1])
    
                input_tensor = Input(shape=input_shape)
                x = keras.layers.Bidirectional(keras.layers.LSTM(units=units, activation=ACTIVATION, return_sequences=True))(input_tensor)
                x = keras.layers.Bidirectional(keras.layers.LSTM(units=hp_units, activation=ACTIVATION, return_sequences=True))(x)
                #x = keras.layers.Bidirectional(layers.LSTM(units=hp_units2, activation=ACTIVATION, return_sequences=True))(x)
                x = keras.layers.Bidirectional(keras.layers.LSTM(units=8, activation=ACTIVATION, return_sequences=True))(x)
                x = keras.layers.Bidirectional(keras.layers.LSTM(units=int(4), activation=ACTIVATION, return_sequences=False))(x)
                output_tensor = keras.layers.Dense(units=1, activation=ACTIVATION)(x)

                model = Model(input_tensor, output_tensor)
                #model.compile(optimizer=OPTIMIZER, loss=hp_loss, metrics=METRICS) # С mse заметно хуже...
                model.compile(optimizer=OPTIMIZER, loss='mse', metrics=METRICS) # С mse заметно хуже...
                #model.summary()
                
                # Raise an error when the model is too large
                num_params = model.count_params()
                if num_params > 120000:
                    raise ValueError(f"Model too large! It contains {num_params} params.")
    
                return model

            # Проверяем наличие модели...
            #have_you_model = adt.do_you_have_the_model(my_path_data_dir, model_name, keras_mark=True, debug=True)
            # Затычка...
            model_file=os.path.join(my_path_data_dir, model_name+'.keras')
            have_you_model = False, model_file

            if have_you_model[0]==False: # Если нет, то создаем...
                print()
                print('Модели нет, создаем RandomSearch простраство...')
                print()
                # Instantiate the tuner build_model_Recurrent_Bidirectional_One_Hat
                tuner = kt.RandomSearch(hypermodel=build_model_Recurrent_Bidirectional_One_Hat,# the hypermodel
                                        objective='accuracy', # MONITOR
                                        #objective=MONITOR, # MONITOR
                                        max_trials=3, # The total number of trials to run during the search.
                                        #max_trials=1, # The total number of trials to run during the search.
                                        executions_per_trial=2,
                                        #executions_per_trial=1,
                                        #max_retries_per_trial=3,# !
                                        max_retries_per_trial=1,# !
                #                        max_consecutive_failed_trials=8, # !
                                        overwrite=True,
                                        #overwrite=False,
                                        directory=my_path_data_dir, # directory to save logs 
                                        project_name=model_name)

                # # hypertuning settings
                # print('Создали пространство для тюнинга...')
                tuner.search_space_summary()   
                # print()

                # Пока не понимаю почему, но перед каждым расчетом нейронной сети надо это переустанавливать...
                # https://www.omi.me/blogs/tensorflow-errors/eager-execution-not-enabled-in-tensorflow-causes-and-how-to-fix
                tf.compat.v1.enable_eager_execution()
                
                # Searching the best hyperparameter
                # Perform hypertuning
                tuner.search(X_train_data,#X_train_recurrent_data,
                             Y_train_data,#Y_train_recurrent_data,
                             epochs=200,
                             batch_size=BATCH_SIZE,
                             shuffle=False,
                             validation_split=0.25,
                             #callbacks=callbacks_list,
                             verbose=VERBOSE)

            #     # Get the top 2 models.
            #     models = tuner.get_best_models(num_models=2)
            #     best_model = models[0]

            #     print()
            #     print('Models...\n', models)
            #     print('After tuner.search...')
               
            #     tuner.results_summary()
            #     print()
            #     print('best_model.summary()...')
            #     best_model.summary()
            #     print()


        case 'Recurrent_one_hat':
            print('Recurrent_one_hat');

--------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) File :123

File ~/miniconda3/envs/P312TF/lib/python3.12/site-packages/keras_tuner/src/engine/base_tuner.py:235, in BaseTuner.search(self, *fit_args, **fit_kwargs) 233 self.on_trial_begin(trial) 234 self._try_run_and_update_trial(trial, *fit_args, **fit_kwargs) --> 235 self.on_trial_end(trial) 236 self.on_search_end()

File ~/miniconda3/envs/P312TF/lib/python3.12/site-packages/keras_tuner/src/engine/base_tuner.py:339, in BaseTuner.on_trial_end(self, trial) 333 def on_trial_end(self, trial): 334 """Called at the end of a trial. 335 336 Args: 337 trial: A Trial instance. 338 """ --> 339 self.oracle.end_trial(trial) 340 self.save()

File ~/miniconda3/envs/P312TF/lib/python3.12/site-packages/keras_tuner/src/engine/oracle.py:108, in synchronized..wrapped_func(*args, **kwargs) 106 LOCKS[oracle].acquire() 107 THREADS[oracle] = thread_name --> 108 ret_val = func(*args, **kwargs) 109 if need_acquire: 110 THREADS[oracle] = None

File ~/miniconda3/envs/P312TF/lib/python3.12/site-packages/keras_tuner/src/engine/oracle.py:588, in Oracle.end_trial(self, trial) 586 if not self._retry(trial): 587 self.end_order.append(trial.trial_id) --> 588 self._check_consecutive_failures() 590 self._save_trial(trial) 591 self.save()

File ~/miniconda3/envs/P312TF/lib/python3.12/site-packages/keras_tuner/src/engine/oracle.py:545, in Oracle._check_consecutive_failures(self) 543 consecutive_failures = 0 544 if consecutive_failures == self.max_consecutive_failed_trials: --> 545 raise RuntimeError( 546 "Number of consecutive failures exceeded the limit " 547 f"of {self.max_consecutive_failed_trials}.\n" 548 + (trial.message or "") 549 )

RuntimeError: Number of consecutive failures exceeded the limit of 3. Traceback (most recent call last): File "/home/sergey/miniconda3/envs/P312TF/lib/python3.12/site-packages/keras_tuner/src/engine/base_tuner.py", line 274, in _try_run_and_update_trial self._run_and_update_trial(trial, *fit_args, **fit_kwargs) File "/home/sergey/miniconda3/envs/P312TF/lib/python3.12/site-packages/keras_tuner/src/engine/base_tuner.py", line 239, in _run_and_update_trial results = self.run_trial(trial, *fit_args, **fit_kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/sergey/miniconda3/envs/P312TF/lib/python3.12/site-packages/keras_tuner/src/engine/tuner.py", line 314, in run_trial obj_value = self._build_and_fit_model(trial, *args, **copied_kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/sergey/miniconda3/envs/P312TF/lib/python3.12/site-packages/keras_tuner/src/engine/tuner.py", line 233, in _build_and_fit_model results = self.hypermodel.fit(hp, model, *args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/sergey/miniconda3/envs/P312TF/lib/python3.12/site-packages/keras_tuner/src/engine/hypermodel.py", line 149, in fit return model.fit(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/sergey/miniconda3/envs/P312TF/lib/python3.12/site-packages/keras/src/utils/traceback_utils.py", line 122, in error_handler raise e.with_traceback(filtered_tb) from None File "/home/sergey/miniconda3/envs/P312TF/lib/python3.12/site-packages/keras/src/backend/tensorflow/core.py", line 155, in convert_to_numpy return np.array(x) ^^^^^^^^^^^ NotImplementedError: numpy() is only available when eager execution is enabled.


Ответы (0 шт):