С Новым годом! Форум программистов, компьютерный форум, киберфорум
Python: ИИ, нейросети, ML, агенты
Войти
Регистрация
Восстановить пароль
Блоги Сообщество Поиск Заказать работу  
 
0 / 0 / 0
Регистрация: 05.07.2019
Сообщений: 43

Нейросети. raise ValueError("The passed save_path is not a valid checkpoint: " + ValueError: The passed save_path is not

11.04.2023, 16:46. Показов 767. Ответов 0

Студворк — интернет-сервис помощи студентам
Здравствуйте! Столкнулся с проблемой которую не могу решить. Подскажите пожалуйста в чем может быть проблема и как ее исправить

mit_data_preprocessing.py
Python
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import cv2
import glob
import numpy as np
 
save_to = 'C:\\Users\\wefy2\\PycharmProjects\\CNN-Facial-Recognition-master1\\data'
all_faces = [img for img in glob.glob('C:\\Users\\wefy2\\PycharmProjects\\CNN-Facial-Recognition-master1\\data\\gt_db\\s*\\*.jpg')]
 
faces_x = []
faces_y = []
 
faceCascade = cv2.CascadeClassifier('data\\haarcascade_frontalface.xml')
 
for i, face in enumerate(all_faces):
    image = cv2.imread(face)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray, 1.3, 5)
 
    if len(faces) == 1:
        x, y, w, h = faces[0]
        cropped_img = image[y:y + h, x:x + w]
 
        faces_x.append(cv2.resize(cropped_img, (128, 128)))
        faces_y.append(int(face.split('\\')[-2][1:]))
 
    print('Finished: ', i, ' Out of: ', len(all_faces))
 
 
faces_x, faces_y = np.array(faces_x), np.array(faces_y)
 
np.save(save_to + 'x_train', faces_x)
np.save(save_to + 'y_train', faces_y)
train_face_id.py
Python
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
 
# loading data
faces_x = np.load('datax_train.npy')
faces_y = np.load('datay_train.npy')
faces_x = tf.expand_dims(faces_x, axis=0)
faces_y = tf.expand_dims(faces_y, axis=0)
train_dataset = tf.data.Dataset.from_tensor_slices((faces_x, faces_y))
print('Faces were loaded successfully.')
 
 
# Construct the fully connected hashing layers
model = tf.keras.Sequential([
    tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding='same',
                           activation='relu', input_shape=(128, 128, 3)),
    tf.keras.layers.MaxPooling2D(pool_size=2),
    tf.keras.layers.Dropout(0.3),
    tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding='same',
                           activation='relu', input_shape=(128, 128, 3)),
    tf.keras.layers.MaxPooling2D(pool_size=2),
    tf.keras.layers.Dropout(0.3),
    tf.keras.layers.Conv2D(filters=32, kernel_size=2,
                           padding='same', activation='relu'),
    tf.keras.layers.MaxPooling2D(pool_size=2),
    tf.keras.layers.Dropout(0.3),
    tf.keras.layers.Conv2D(filters=32, kernel_size=2,
                           padding='same', activation='relu'),
    tf.keras.layers.MaxPooling2D(pool_size=2),
    tf.keras.layers.Dropout(0.3),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(256, activation='relu'),
    tf.keras.layers.Dropout(0.3),
    tf.keras.layers.Dense(128, activation='sigmoid')
])
 
 
# Compile the model
model.compile(
    optimizer=tf.keras.optimizers.Adam(0.001),
    loss=tfa.losses.TripletSemiHardLoss(margin=3.0))
print(model.summary())
print('Model Compiled Successfully.')
 
 
# Train the model
print('Training has started.')
history = model.fit(train_dataset, epochs=10, verbose=1)
 
 
# Save the model
model.save('models/face_id_model')
print('Training is finished.')
test_face_id.py
Python
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import numpy as np
import cv2
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
 
class FaceID:
    def __init__(self):
        model = tf.keras.Sequential()
        net = tf.keras.applications.MobileNet(input_shape=(128, 128, 3), weights='imagenet', include_top=False)
        model.add(net)
        model.add(tf.keras.layers.GlobalAveragePooling2D())
        self.features_extractor = model
 
        self.x_holder = tf.placeholder(shape=[None, 1024], dtype=tf.float32)
        fc_1 = tf.layers.Dense(units=512, activation=tf.nn.relu)(self.x_holder)
        fc_2 = tf.layers.Dense(units=128, activation=tf.nn.sigmoid)(fc_1)
 
        self.face_id = fc_2
 
        self.sess = None
 
    def load_network(self, path='data\\models\\variables\\variables'):
        saver = tf.train.Saver()
        self.sess = tf.Session()
        saver.restore(self.sess, path)
 
    def get_id(self, imgs):
        imgs = imgs.reshape((-1, 128, 128, 3))
        features = self.features_extractor.predict(imgs)
        embeds = self.sess.run([self.face_id], feed_dict={self.x_holder: features})
 
        return embeds[0]
 
 
class FaceExtractor:
    def __init__(self, cascade_path='data\\haarcascade_frontalface.xml'):
        self.faceCascade = cv2.CascadeClassifier(cascade_path)
 
    def extract_single_face_from_path(self, img_path):
        image = cv2.imread(img_path)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        faces = self.faceCascade.detectMultiScale(gray, 1.3, 5)
 
        if len(faces) == 1:
            x, y, w, h = faces[0]
            cropped_img = image[y:y + h, x:x + w]
            return cv2.resize(cropped_img, (128, 128))
        else:
            faces = self.faceCascade.detectMultiScale(gray, 1.3, 10)
            if len(faces) == 1:
                x, y, w, h = faces[0]
                cropped_img = image[y:y + h, x:x + w]
                return cv2.resize(cropped_img, (128, 128))
 
        return None
 
    def faces_from_image(self, image):
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        return self.faceCascade.detectMultiScale(gray, 1.3, 5)
 
test_nn = FaceID()
face_ex = FaceExtractor()
test_nn.load_network()
ref_face = face_ex.extract_single_face_from_path("ref.jpg")
ref_face_hash = test_nn.get_id(ref_face)[0]
cap = cv2.VideoCapture(0)
 
while True:
    ret, frame = cap.read()
    faces = face_ex.faces_from_image(frame)
 
    for face in faces:
        x, y, w, h = face
        cropped_face = cv2.resize(frame[y:y + h, x:x + w], (128, 128))
        cropped_hash = test_nn.get_id(cropped_face)[0]
 
        cv2.rectangle(frame, (x, y), (x + w, y + h), 1, 3)
 
        distance_1 = np.sum(np.power(ref_face_hash - cropped_hash, 2))
 
        if distance_1 <= 3:
            cv2.putText(frame, 'ref ', (x, y + h + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 1, 2, cv2.LINE_AA)
        else:
            cv2.putText(frame, 'Nan ', (x, y + h + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 1, 2, cv2.LINE_AA)
 
    cv2.imshow('My FaceID', frame)
 
    if cv2.waitKey(1) & 0xFF == ord('q'):
        ret, frame = cap.read()
        break
Ошибка как раз в test_face_id.py:
C:\Users\wefy2\AppData\Local\Programs\Py thon\Python310\python.exe C:/Users/wefy2/PycharmProjects/CNN-Facial-Recognition-master1/test_face_id.py
WARNING:tensorflow:From C:\Users\wefy2\AppData\Local\Programs\Py thon\Python310\lib\site-packages\tensorflow\python\compat\v2_com pat.py:107: disable_resource_variables (from tensorflow.python.ops.variable_scope) is deprecated and will be removed in a future version.
Instructions for updating:
non-resource variables are not supported in the long term
WARNING:tensorflow:From C:\Users\wefy2\AppData\Local\Programs\Py thon\Python310\lib\site-packages\keras\layers\normalization\batc h_normalization.py:581: _colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
2023-04-11 14:48:36.671960: W tensorflow/c/c_api.cc:300] Operation '{name:'conv_pw_8_bn/beta/Assign' id:1360 op device:{requested: '', assigned: ''} def:{{{node conv_pw_8_bn/beta/Assign}} = AssignVariableOp[_has_manual_control_dependencies=true, dtype=DT_FLOAT, validate_shape=false](conv_pw_8_bn/beta, conv_pw_8_bn/beta/Initializer/zeros)}}' was changed by setting attribute after it was run by a session. This mutation will have no effect, and will trigger an error in the future. Either don't modify nodes after running them or create a new session.
Traceback (most recent call last):
File "C:\Users\wefy2\PycharmProjects\CNN-Facial-Recognition-master1\test_face_id.py", line 63, in <module>
test_nn.load_network()
File "C:\Users\wefy2\PycharmProjects\CNN-Facial-Recognition-master1\test_face_id.py", line 25, in load_network
saver.restore(self.sess, path)
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\training\save r.py", line 1410, in restore
raise ValueError("The passed save_path is not a valid checkpoint: " +
ValueError: The passed save_path is not a valid checkpoint: data\models\variables\variables

Process finished with exit code 1


Добавлено через 22 минуты
Python 3.7.6
tensorflow - Version: 2.11.0
tensorflow-addons - Version: 0.19.0

Добавлено через 1 час 9 минут
Так же, есть такой вариант кода для другой версии библиотеки
Python
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import tensorflow as tf
import numpy as np
import cv2
import os
 
class FaceID(tf.keras.Model):
    def __init__(self):
        super(FaceID, self).__init__()
        net = tf.keras.applications.MobileNet(input_shape=(128, 128, 3), weights='imagenet', include_top=False)
        self.features_extractor = tf.keras.Sequential([net, tf.keras.layers.GlobalAveragePooling2D()])
        self.fc_1 = tf.keras.layers.Dense(units=512, activation=tf.nn.relu)
        self.fc_2 = tf.keras.layers.Dense(units=128, activation=tf.nn.sigmoid)
 
    def call(self, inputs):
        x = self.features_extractor(inputs)
        x = self.fc_1(x)
        x = self.fc_2(x)
        return x
 
class FaceExtractor:
    def __init__(self, cascade_path='data\\haarcascade_frontalface.xml'):
        self.faceCascade = cv2.CascadeClassifier(cascade_path)
 
    def extract_single_face_from_path(self, img_path):
        image = cv2.imread(img_path)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        faces = self.faceCascade.detectMultiScale(gray, 1.3, 5)
 
        if len(faces) == 1:
            x, y, w, h = faces[0]
            cropped_img = image[y:y + h, x:x + w]
            return cv2.resize(cropped_img, (128, 128))
        else:
            faces = self.faceCascade.detectMultiScale(gray, 1.3, 10)
            if len(faces) == 1:
                x, y, w, h = faces[0]
                cropped_img = image[y:y + h, x:x + w]
                return cv2.resize(cropped_img, (128, 128))
 
        return None
 
    def faces_from_image(self, image):
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        return self.faceCascade.detectMultiScale(gray, 1.3, 5)
 
 
test_nn = FaceID()
face_ex = FaceExtractor()
test_nn.load_weights('models\\face_id_model\\variables\\variables')
checkpoint = tf.train.Checkpoint(model=test_nn)
checkpoint.restore(tf.train.latest_checkpoint('models\\face_id_model\\variables\\variables'))
 
ref_face = face_ex.extract_single_face_from_path("data\\model\\me.jpg")
ref_face_hash = test_nn(tf.expand_dims(ref_face, axis=0)).numpy()[0]
 
cap = cv2.VideoCapture(0)
 
while True:
    ret, frame = cap.read()
 
    if not ret:
        print("Error reading camera feed")
        break
 
    faces = face_ex.faces_from_image(frame)
 
    for face in faces:
        x, y, w, h = face
        cropped_face = cv2.resize(frame[y:y + h, x:x + w], (128, 128))
        cropped_hash = test_nn(tf.expand_dims(cropped_face, axis=0)).numpy()[0]
 
        cv2.rectangle(frame, (x, y), (x + w, y + h), 1, 3)
 
        distance_1 = np.sum(np.power(ref_face_hash - cropped_hash, 2))
 
        if distance_1 <= 3:
            cv2.putText(frame, 'ref ', (x, y + h + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
        else:
            cv2.putText(frame, 'Nan ', (x, y + h + 30), cv2.FONT_HERSHEY_SIM)
 
 
        cv2.imshow('My FaceID', frame)
 
        if cv2.waitKey(1) & 0xFF == ord('q'):
            ret, frame = cap.read()
            break
Но тут ошибка вот такая File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\checkpoint\ch eckpoint.py", line 875, in assert_nontrivial_match
raise AssertionError(
AssertionError: Nothing except the root object matched a checkpointed value. Typically this means that the checkpoint does not match the Python program. The following objects have no matching checkpointed value: [<tf.Variable 'conv_pw_7_bn/gamma:0' shape=(512,) dtype=float32, numpy=
array([1.2445787 , 1.856859 , 0.9711799 , 1.2919407 , 0.46042076, ............. ])


Добавлено через 22 минуты
Я ошибся в первой ошибке с путем к файлу, вот искомая ошибка

C:\Users\wefy2\AppData\Local\Programs\Py thon\Python310\python.exe C:/Users/wefy2/PycharmProjects/CNN-Facial-Recognition-master1/test_face_id.py
WARNING:tensorflow:From C:\Users\wefy2\AppData\Local\Programs\Py thon\Python310\lib\site-packages\tensorflow\python\compat\v2_com pat.py:107: disable_resource_variables (from tensorflow.python.ops.variable_scope) is deprecated and will be removed in a future version.
Instructions for updating:
non-resource variables are not supported in the long term
WARNING:tensorflow:From C:\Users\wefy2\AppData\Local\Programs\Py thon\Python310\lib\site-packages\keras\layers\normalization\batc h_normalization.py:581: _colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
2023-04-11 16:44:46.546199: W tensorflow/c/c_api.cc:300] Operation '{name:'conv_dw_12_bn/gamma/Assign' id:1939 op device:{requested: '', assigned: ''} def:{{{node conv_dw_12_bn/gamma/Assign}} = AssignVariableOp[_has_manual_control_dependencies=true, dtype=DT_FLOAT, validate_shape=false](conv_dw_12_bn/gamma, conv_dw_12_bn/gamma/Initializer/ones)}}' was changed by setting attribute after it was run by a session. This mutation will have no effect, and will trigger an error in the future. Either don't modify nodes after running them or create a new session.
2023-04-11 16:44:47.776006: W tensorflow/core/framework/op_kernel.cc:1830] OP_REQUIRES failed at save_restore_v2_ops.cc:228 : NOT_FOUND: Key conv1/kernel not found in checkpoint
WARNING:tensorflow:Restoring an object-based checkpoint using a name-based saver. This may be somewhat fragile, and will re-build the Saver. Instead, consider loading object-based checkpoints using tf.train.Checkpoint().
Traceback (most recent call last):
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\client\sessio n.py", line 1378, in _do_call
return fn(*args)
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\client\sessio n.py", line 1361, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\client\sessio n.py", line 1454, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._s ession, options, feed_dict,
tensorflow.python.framework.errors_impl. NotFoundError: Key conv1/kernel not found in checkpoint
[[{{node save/RestoreV2}}]]

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\training\save r.py", line 1418, in restore
sess.run(self.saver_def.restore_op_name,
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\client\sessio n.py", line 968, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\client\sessio n.py", line 1191, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\client\sessio n.py", line 1371, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\client\sessio n.py", line 1397, in _do_call
raise type(e)(node_def, op, message) # pylint: disable=no-value-for-parameter
tensorflow.python.framework.errors_impl. NotFoundError: Graph execution error:

Detected at node 'save/RestoreV2' defined at (most recent call last):
File "C:\Users\wefy2\PycharmProjects\CNN-Facial-Recognition-master1\test_face_id.py", line 66, in <module>
test_nn.load_network()
File "C:\Users\wefy2\PycharmProjects\CNN-Facial-Recognition-master1\test_face_id.py", line 25, in load_network
saver = tf.train.Saver()
Node: 'save/RestoreV2'
Key conv1/kernel not found in checkpoint
[[{{node save/RestoreV2}}]]

Original stack trace for 'save/RestoreV2':
File "C:\Users\wefy2\PycharmProjects\CNN-Facial-Recognition-master1\test_face_id.py", line 66, in <module>
test_nn.load_network()
File "C:\Users\wefy2\PycharmProjects\CNN-Facial-Recognition-master1\test_face_id.py", line 25, in load_network
saver = tf.train.Saver()
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\training\save r.py", line 934, in __init__
self.build()
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\training\save r.py", line 946, in build
self._build(self._filename, build_save=True, build_restore=True)
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\training\save r.py", line 974, in _build
self.saver_def = self._builder._build_internal( # pylint: disable=protected-access
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\training\save r.py", line 543, in _build_internal
restore_op = self._AddRestoreOps(filename_tensor, saveables,
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\training\save r.py", line 360, in _AddRestoreOps
all_tensors = self.bulk_restore(filename_tensor, saveables, preferred_shard,
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\training\save r.py", line 611, in bulk_restore
return io_ops.restore_v2(filename_tensor, names, slices, dtypes)
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\ops\gen_io_op s.py", line 1604, in restore_v2
_, _, _op, _outputs = _op_def_library._apply_op_helper(
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\framework\op_ def_library.py", line 795, in _apply_op_helper
op = g._create_op_internal(op_type_name, inputs, dtypes=None,
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\framework\ops .py", line 3814, in _create_op_internal
ret = Operation(


During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "C:\Users\wefy2\PycharmProjects\CNN-Facial-Recognition-master1\test_face_id.py", line 66, in <module>
test_nn.load_network()
File "C:\Users\wefy2\PycharmProjects\CNN-Facial-Recognition-master1\test_face_id.py", line 27, in load_network
saver.restore(self.sess, path)
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\training\save r.py", line 1444, in restore
self._object_restore_saver = saver_from_object_based_checkpoint(
File "C:\Users\wefy2\AppData\Local\Programs\P ython\Python310\lib\site-packages\tensorflow\python\training\save r.py", line 1826, in saver_from_object_based_checkpoint
raise errors.NotFoundError(
tensorflow.python.framework.errors_impl. NotFoundError:

Existing variables not in the checkpoint: conv1/kernel, conv1_bn/beta, conv1_bn/gamma, conv1_bn/moving_mean, conv1_bn/moving_variance, conv_dw_1/depthwise_kernel, conv_dw_10/depthwise_kernel, conv_dw_10_bn/beta, conv_dw_10_bn/gamma, conv_dw_10_bn/moving_mean, conv_dw_10_bn/moving_variance, conv_dw_11/depthwise_kernel, conv_dw_11_bn/beta, conv_dw_11_bn/gamma, conv_dw_11_bn/moving_mean, conv_dw_11_bn/moving_variance, conv_dw_12/depthwise_kernel, conv_dw_12_bn/beta, conv_dw_12_bn/gamma, conv_dw_12_bn/moving_mean, conv_dw_12_bn/moving_variance, conv_dw_13/depthwise_kernel, conv_dw_13_bn/beta, conv_dw_13_bn/gamma, conv_dw_13_bn/moving_mean, conv_dw_13_bn/moving_variance, conv_dw_1_bn/beta, conv_dw_1_bn/gamma, conv_dw_1_bn/moving_mean, conv_dw_1_bn/moving_variance, conv_dw_2/depthwise_kernel, conv_dw_2_bn/beta, conv_dw_2_bn/gamma, conv_dw_2_bn/moving_mean, conv_dw_2_bn/moving_variance, conv_dw_3/depthwise_kernel, conv_dw_3_bn/beta, conv_dw_3_bn/gamma, conv_dw_3_bn/moving_mean, conv_dw_3_bn/moving_variance, conv_dw_4/depthwise_kernel, conv_dw_4_bn/beta, conv_dw_4_bn/gamma, conv_dw_4_bn/moving_mean, conv_dw_4_bn/moving_variance, conv_dw_5/depthwise_kernel, conv_dw_5_bn/beta, conv_dw_5_bn/gamma, conv_dw_5_bn/moving_mean, conv_dw_5_bn/moving_variance, conv_dw_6/depthwise_kernel, conv_dw_6_bn/beta, conv_dw_6_bn/gamma, conv_dw_6_bn/moving_mean, conv_dw_6_bn/moving_variance, conv_dw_7/depthwise_kernel, conv_dw_7_bn/beta, conv_dw_7_bn/gamma, conv_dw_7_bn/moving_mean, conv_dw_7_bn/moving_variance, conv_dw_8/depthwise_kernel, conv_dw_8_bn/beta, conv_dw_8_bn/gamma, conv_dw_8_bn/moving_mean, conv_dw_8_bn/moving_variance, conv_dw_9/depthwise_kernel, conv_dw_9_bn/beta, conv_dw_9_bn/gamma, conv_dw_9_bn/moving_mean, conv_dw_9_bn/moving_variance, conv_pw_1/kernel, conv_pw_10/kernel, conv_pw_10_bn/beta, conv_pw_10_bn/gamma, conv_pw_10_bn/moving_mean, conv_pw_10_bn/moving_variance, conv_pw_11/kernel, conv_pw_11_bn/beta, conv_pw_11_bn/gamma, conv_pw_11_bn/moving_mean, conv_pw_11_bn/moving_variance, conv_pw_12/kernel, conv_pw_12_bn/beta, conv_pw_12_bn/gamma, conv_pw_12_bn/moving_mean, conv_pw_12_bn/moving_variance, conv_pw_13/kernel, conv_pw_13_bn/beta, conv_pw_13_bn/gamma, conv_pw_13_bn/moving_mean, conv_pw_13_bn/moving_variance, conv_pw_1_bn/beta, conv_pw_1_bn/gamma, conv_pw_1_bn/moving_mean, conv_pw_1_bn/moving_variance, conv_pw_2/kernel, conv_pw_2_bn/beta, conv_pw_2_bn/gamma, conv_pw_2_bn/moving_mean, conv_pw_2_bn/moving_variance, conv_pw_3/kernel, conv_pw_3_bn/beta, conv_pw_3_bn/gamma, conv_pw_3_bn/moving_mean, conv_pw_3_bn/moving_variance, conv_pw_4/kernel, conv_pw_4_bn/beta, conv_pw_4_bn/gamma, conv_pw_4_bn/moving_mean, conv_pw_4_bn/moving_variance, conv_pw_5/kernel, conv_pw_5_bn/beta, conv_pw_5_bn/gamma, conv_pw_5_bn/moving_mean, conv_pw_5_bn/moving_variance, conv_pw_6/kernel, conv_pw_6_bn/beta, conv_pw_6_bn/gamma, conv_pw_6_bn/moving_mean, conv_pw_6_bn/moving_variance, conv_pw_7/kernel, conv_pw_7_bn/beta, conv_pw_7_bn/gamma, conv_pw_7_bn/moving_mean, conv_pw_7_bn/moving_variance, conv_pw_8/kernel, conv_pw_8_bn/beta, conv_pw_8_bn/gamma, conv_pw_8_bn/moving_mean, conv_pw_8_bn/moving_variance, conv_pw_9/kernel, conv_pw_9_bn/beta, conv_pw_9_bn/gamma, conv_pw_9_bn/moving_mean, conv_pw_9_bn/moving_variance

Variables names when this checkpoint was written which don't exist now: Adam/m/conv2d/bias, Adam/m/conv2d/kernel, Adam/m/conv2d_1/bias, Adam/m/conv2d_1/kernel, Adam/m/conv2d_2/bias, Adam/m/conv2d_2/kernel, Adam/m/conv2d_3/bias, Adam/m/conv2d_3/kernel, Adam/m/dense/bias, Adam/m/dense/kernel, Adam/m/dense_1/bias, Adam/m/dense_1/kernel, Adam/v/conv2d/bias, Adam/v/conv2d/kernel, Adam/v/conv2d_1/bias, Adam/v/conv2d_1/kernel, Adam/v/conv2d_2/bias, Adam/v/conv2d_2/kernel, Adam/v/conv2d_3/bias, Adam/v/conv2d_3/kernel, Adam/v/dense/bias, Adam/v/dense/kernel, Adam/v/dense_1/bias, Adam/v/dense_1/kernel, conv2d/bias, conv2d/kernel, conv2d_1/bias, conv2d_1/kernel, conv2d_2/bias, conv2d_2/kernel, conv2d_3/bias, conv2d_3/kernel, count, iteration, learning_rate, total

(4 variable name(s) did match)

Could not find some variables in the checkpoint (see names above). Saver was attempting to load an object-based checkpoint (saved using tf.train.Checkpoint or tf.keras.Model.save_weights) using variable names. If the checkpoint was written with eager execution enabled, it's possible that variable names have changed (for example missing a '_1' suffix). It's also possible that there are new variables which did not exist when the checkpoint was written. You can construct a Saver(var_list=...) with only the variables which previously existed, and if variable names have changed you may need to make this a dictionary with the old names as keys. If you're using an Estimator, you'll need to return a tf.train.Saver inside a tf.train.Scaffold from your model_fn.

Process finished with exit code 1
0
cpp_developer
Эксперт
20123 / 5690 / 1417
Регистрация: 09.04.2010
Сообщений: 22,546
Блог
11.04.2023, 16:46
Ответы с готовыми решениями:

session.save_path
Как это сделать? 2;/Temp определяет, что переменные сессий будут храниться в папках вида c:\Temp\0\a\, c:\Temp\0\b\ и т.п ...

Ошибка при Update "Update requires a valid InsertCommand when passed DataRow collection with new rows"
Доброго времени суток! Я в этом всем новичек и поэтому совсем не могу понять в чем ошибка. В общем, имеется вот такой код: ...

Unsupported command passed
Решил собственно автоматизировать активацию Office в домене. Поднял виртуалку залил KMS сервер прописал в CMD файле cd...

0
Надоела реклама? Зарегистрируйтесь и она исчезнет полностью.
raxper
Эксперт
30234 / 6612 / 1498
Регистрация: 28.12.2010
Сообщений: 21,154
Блог
11.04.2023, 16:46
Помогаю со студенческими работами здесь

Only variables can be passed by reference
Здравствуйте, начал учить php, всё бы ничего, но появились косяки) Программа компилируется, но при нажатии на кнопку выводит &quot;Only...

Unknown method passed
using System; using VkNet; using VkNet.Model; using VkNet.Model.RequestParams; using VkNet.Utils; interface Ava { class...

Ошибка Only variables should be passed by reference
Строка $file_ext = strtolower(end(explode('.', $_FILES))); Выдает ошибку &lt;br /&gt; &lt;b&gt;Notice&lt;/b&gt;: Only variables should be...

Argument 2 may not be passed with the 'ref' keyword
Как исправить ошибку: Я не вижу в коде ошибки. Что исправить?

Notice: Only variables should be passed by reference
array_walk(glob(__DIR__ . DIRECTORY_SEPARATOR . 'functions' . DIRECTORY_SEPARATOR . '*.php'), function ($function) { include...


Искать еще темы с ответами

Или воспользуйтесь поиском по форуму:
1
Ответ Создать тему
Новые блоги и статьи
Изучаю kubernetes
lagorue 13.01.2026
А пригодятся-ли мне знания kubernetes в России?
Сукцессия микоризы: основная теория в виде двух уравнений.
anaschu 11.01.2026
https:/ / rutube. ru/ video/ 7a537f578d808e67a3c6fd818a44a5c4/
WordPad для Windows 11
Jel 10.01.2026
WordPad для Windows 11 — это приложение, которое восстанавливает классический текстовый редактор WordPad в операционной системе Windows 11. После того как Microsoft исключила WordPad из. . .
Classic Notepad for Windows 11
Jel 10.01.2026
Old Classic Notepad for Windows 11 Приложение для Windows 11, позволяющее пользователям вернуть классическую версию текстового редактора «Блокнот» из Windows 10. Программа предоставляет более. . .
Почему дизайн решает?
Neotwalker 09.01.2026
В современном мире, где конкуренция за внимание потребителя достигла пика, дизайн становится мощным инструментом для успеха бренда. Это не просто красивый внешний вид продукта или сайта — это. . .
Модель микоризы: классовый агентный подход 3
anaschu 06.01.2026
aa0a7f55b50dd51c5ec569d2d10c54f6/ O1rJuneU_ls https:/ / vkvideo. ru/ video-115721503_456239114
Owen Logic: О недопустимости использования связки «аналоговый ПИД» + RegKZR
ФедосеевПавел 06.01.2026
Owen Logic: О недопустимости использования связки «аналоговый ПИД» + RegKZR ВВЕДЕНИЕ Введу сокращения: аналоговый ПИД — ПИД регулятор с управляющим выходом в виде числа в диапазоне от 0% до. . .
Модель микоризы: классовый агентный подход 2
anaschu 06.01.2026
репозиторий https:/ / github. com/ shumilovas/ fungi ветка по-частям. коммит Create переделка под биомассу. txt вход sc, но sm считается внутри мицелия. кстати, обьем тоже должен там считаться. . . .
КиберФорум - форум программистов, компьютерный форум, программирование
Powered by vBulletin
Copyright ©2000 - 2026, CyberForum.ru