Skip to content
This repository was archived by the owner on Oct 25, 2024. It is now read-only.

Commit 2dfe386

Browse files
authored
update llava model import path. (#819)
1 parent 6bc9382 commit 2dfe386

File tree

4 files changed

+4
-5
lines changed

4 files changed

+4
-5
lines changed

intel_extension_for_transformers/neural_chat/examples/finetuning/multi_modal/train.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727

2828
from transformers import AutoTokenizer, set_seed, BitsAndBytesConfig
2929
from transformers.integrations.deepspeed import is_deepspeed_available
30-
from intel_extension_for_transformers.transformers.modeling import LlavaMistralForCausalLM
30+
from intel_extension_for_transformers.transformers.modeling.llava_models import LlavaMistralForCausalLM
3131
from llava_utils import *
3232

3333
if is_optimum_habana_available():

intel_extension_for_transformers/transformers/modeling/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,4 +22,3 @@
2222
from .modeling_auto import (AutoModel, AutoModelForCausalLM,
2323
AutoModelForSeq2SeqLM, GPTBigCodeForCausalLM)
2424

25-
from .llava_models.llava_mistral import LlavaMistralForCausalLM

intel_extension_for_transformers/transformers/modeling/llava_models/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,3 +14,5 @@
1414
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1515
# See the License for the specific language governing permissions and
1616
# limitations under the License.
17+
18+
from .llava_mistral import LlavaMistralForCausalLM

tests/CI/test_llava.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,6 @@
1-
import sys
2-
sys.path.append("/data2/lkk/intel-extension-for-transformers")
31
import os
42
import unittest
5-
from intel_extension_for_transformers.transformers.modeling import LlavaMistralForCausalLM
3+
from intel_extension_for_transformers.transformers.modeling.llava_models import LlavaMistralForCausalLM
64
from accelerate import init_empty_weights
75
from transformers import AutoConfig, AutoTokenizer
86
import torch

0 commit comments

Comments
 (0)