From dd1d51ae829afa68470bad256c507c88929f7c2e Mon Sep 17 00:00:00 2001 From: Pavel Dmitriev <162866971+kommentlezz@users.noreply.github.com> Date: Tue, 24 Dec 2024 16:57:38 +0500 Subject: [PATCH] Fix typo: language --- labml_nn/lora/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labml_nn/lora/__init__.py b/labml_nn/lora/__init__.py index 47d3c457..bd3c42c7 100644 --- a/labml_nn/lora/__init__.py +++ b/labml_nn/lora/__init__.py @@ -14,7 +14,7 @@ Low-Rank Adaptation (LoRA) freezes pre-trained model weights and injects trainable rank decomposition matrices into each layer of the transformer. - This makes it possible to efficiently fine-tune large langauge models by + This makes it possible to efficiently fine-tune large language models by reducing trainable parameters by a large factor. Here's [the training code](experiment.html) for training a GPT2 model with LoRA