diff --git a/.ipynb_checkpoints/checkLora-checkpoint.py b/.ipynb_checkpoints/checkLora-checkpoint.py
index fb4e8a08efe371917263cf655cc6e6464d46d9a5..10ddf8ee678b31f9ef467e7bad1d2308edcad308 100644
--- a/.ipynb_checkpoints/checkLora-checkpoint.py
+++ b/.ipynb_checkpoints/checkLora-checkpoint.py
@@ -1,25 +1,5 @@
-from transformers import BertForSequenceClassification, BertTokenizer
-from peft import PeftModel
-import torch
+from peft import PeftConfig
 
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
-# Загружаем базовую модель (не BertModel, а BertForSequenceClassification!)
-base_model = BertForSequenceClassification.from_pretrained("bert-base-uncased").to(device)
-
-# Загружаем LoRA адаптер
-model = PeftModel.from_pretrained(base_model, "./fine-tuned-bert-lora").to(device)
-
-# Загружаем токенизатор
-tokenizer = BertTokenizer.from_pretrained("./fine-tuned-bert-lora")
-
-print("LoRA успешно загружена!")
-
-text = "This is a test prompt."
-inputs = tokenizer(text, return_tensors="pt").to(device)
-
-# Запускаем модель
-with torch.no_grad():
-    outputs = model(**inputs)
-
-print(outputs)
\ No newline at end of file
+config = PeftConfig.from_pretrained("./fine-tuned-bert-lora")
+print("Базовая модель:", config.base_model_name_or_path)
+print("LoRA config:", config)
\ No newline at end of file
diff --git a/checkLora.py b/checkLora.py
index fb4e8a08efe371917263cf655cc6e6464d46d9a5..10ddf8ee678b31f9ef467e7bad1d2308edcad308 100644
--- a/checkLora.py
+++ b/checkLora.py
@@ -1,25 +1,5 @@
-from transformers import BertForSequenceClassification, BertTokenizer
-from peft import PeftModel
-import torch
+from peft import PeftConfig
 
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
-# Загружаем базовую модель (не BertModel, а BertForSequenceClassification!)
-base_model = BertForSequenceClassification.from_pretrained("bert-base-uncased").to(device)
-
-# Загружаем LoRA адаптер
-model = PeftModel.from_pretrained(base_model, "./fine-tuned-bert-lora").to(device)
-
-# Загружаем токенизатор
-tokenizer = BertTokenizer.from_pretrained("./fine-tuned-bert-lora")
-
-print("LoRA успешно загружена!")
-
-text = "This is a test prompt."
-inputs = tokenizer(text, return_tensors="pt").to(device)
-
-# Запускаем модель
-with torch.no_grad():
-    outputs = model(**inputs)
-
-print(outputs)
\ No newline at end of file
+config = PeftConfig.from_pretrained("./fine-tuned-bert-lora")
+print("Базовая модель:", config.base_model_name_or_path)
+print("LoRA config:", config)
\ No newline at end of file