Yuto-24 commited on
Commit
330d5ef
1 Parent(s): b8385c9

Update README.md

Browse files

When I run the sample code, I get the 404 error because of the name is different.

I add the `-Chat` at the model_name

## Error Message

```sh
---------------------------------------------------------------------------
HTTPError Traceback (most recent call last)
File /usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py:286, in hf_raise_for_status(response, endpoint_name)
285 try:
--> 286 response.raise_for_status()
287 except HTTPError as e:

File /usr/local/lib/python3.10/dist-packages/requests/models.py:1021, in Response.raise_for_status(self)
1020 if http_error_msg:
-> 1021 raise HTTPError(http_error_msg, response=self)

HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/OrionStarAI/Orion-14B/resolve/main/tokenizer_config.json

The above exception was the direct cause of the following exception:

RepositoryNotFoundError Traceback (most recent call last)
File /usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py:398, in cached_file(path_or_repo_id, filename, cache_dir, force_download, resume_download, proxies, token, revision, local_files_only, subfolder, repo_type, user_agent, _raise_exceptions_for_gated_repo, _raise_exceptions_for_missing_entries, _raise_exceptions_for_connection_errors, _commit_hash, **deprecated_kwargs)
396 try:
397 # Load from URL or cache if already cached
--> 398 resolved_file = hf_hub_download(
399 path_or_repo_id,
400 filename,
401 subfolder=None if len(subfolder) == 0 else subfolder,
402 repo_type=repo_type,
403 revision=revision,
404 cache_dir=cache_dir,
405 user_agent=user_agent,
406 force_download=force_download,
407 proxies=proxies,
408 resume_download=resume_download,
409 token=token,
410 local_files_only=local_files_only,
411 )
412 except GatedRepoError as e:

File /usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py:118, in validate_hf_hub_args.<locals>._inner_fn(*args, **kwargs)
116 kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__name__, has_token=has_token, kwargs=kwargs)
--> 118 return fn(*args, **kwargs)

File /usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py:1368, in hf_hub_download(repo_id, filename, subfolder, repo_type, revision, library_name, library_version, cache_dir, local_dir, local_dir_use_symlinks, user_agent, force_download, force_filename, proxies, etag_timeout, resume_download, token, local_files_only, legacy_cache_layout, endpoint)
1366 elif isinstance(head_call_error, RepositoryNotFoundError) or isinstance(head_call_error, GatedRepoError):
1367 # Repo not found => let's raise the actual error
-> 1368 raise head_call_error
1369 else:
1370 # Otherwise: most likely a connection issue or Hub downtime => let's warn the user

File /usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py:1238, in hf_hub_download(repo_id, filename, subfolder, repo_type, revision, library_name, library_version, cache_dir, local_dir, local_dir_use_symlinks, user_agent, force_download, force_filename, proxies, etag_timeout, resume_download, token, local_files_only, legacy_cache_layout, endpoint)
1237 try:
-> 1238 metadata = get_hf_file_metadata(
1239 url=url,
1240 token=token,
1241 proxies=proxies,
1242 timeout=etag_timeout,
1243 library_name=library_name,
1244 library_version=library_version,
1245 user_agent=user_agent,
1246 )
1247 except EntryNotFoundError as http_error:
1248 # Cache the non-existence of the file and raise

File /usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py:118, in validate_hf_hub_args.<locals>._inner_fn(*args, **kwargs)
116 kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__name__, has_token=has_token, kwargs=kwargs)
--> 118 return fn(*args, **kwargs)

File /usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py:1631, in get_hf_file_metadata(url, token, proxies, timeout, library_name, library_version, user_agent)
1630 # Retrieve metadata
-> 1631 r = _request_wrapper(
1632 method="HEAD",
1633 url=url,
1634 headers=headers,
1635 allow_redirects=False,
1636 follow_relative_redirects=True,
1637 proxies=proxies,
1638 timeout=timeout,
1639 )
1640 hf_raise_for_status(r)

File /usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py:385, in _request_wrapper(method, url, follow_relative_redirects, **params)
384 if follow_relative_redirects:
--> 385 response = _request_wrapper(
386 method=method,
387 url=url,
388 follow_relative_redirects=False,
389 **params,
390 )
392 # If redirection, we redirect only relative paths.
393 # This is useful in case of a renamed repository.

File /usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py:409, in _request_wrapper(method, url, follow_relative_redirects, **params)
408 response = get_session().request(method=method, url=url, **params)
--> 409 hf_raise_for_status(response)
410 return response

File /usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py:323, in hf_raise_for_status(response, endpoint_name)
315 message = (
316 f"{response.status_code} Client Error."
317 + "\n\n"
(...)
321 " make sure you are authenticated."
322 )
--> 323 raise RepositoryNotFoundError(message, response) from e
325 elif response.status_code == 400:

RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-6603a669-3c6b0736481796cc0aa2d220;6e11eeab-cb50-48a6-a37e-78204446bd12)

Repository Not Found for url: https://huggingface.co/OrionStarAI/Orion-14B/resolve/main/tokenizer_config.json.
Please make sure you specified the correct `repo_id` and `repo_type`.
If you are trying to access a private or gated repo, make sure you are authenticated.

The above exception was the direct cause of the following exception:

OSError Traceback (most recent call last)
Cell In[2], line 6
3 from transformers.generation.utils import GenerationConfig
5 model_name = "OrionStarAI/Orion-14B"
----> 6 tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False, trust_remote_code=True)
7 model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto",
8 torch_dtype=torch.bfloat16, trust_remote_code=True)
10 model.generation_config = GenerationConfig.from_pretrained("OrionStarAI/Orion-14B")

File /usr/local/lib/python3.10/dist-packages/transformers/models/auto/tokenization_auto.py:779, in AutoTokenizer.from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs)
776 return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
778 # Next, let's try to use the tokenizer_config file to get the tokenizer class.
--> 779 tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs)
780 if "_commit_hash" in tokenizer_config:
781 kwargs["_commit_hash"] = tokenizer_config["_commit_hash"]

File /usr/local/lib/python3.10/dist-packages/transformers/models/auto/tokenization_auto.py:612, in get_tokenizer_config(pretrained_model_name_or_path, cache_dir, force_download, resume_download, proxies, token, revision, local_files_only, subfolder, **kwargs)
609 token = use_auth_token
611 commit_hash = kwargs.get("_commit_hash", None)
--> 612 resolved_config_file = cached_file(
613 pretrained_model_name_or_path,
614 TOKENIZER_CONFIG_FILE,
615 cache_dir=cache_dir,
616 force_download=force_download,
617 resume_download=resume_download,
618 proxies=proxies,
619 token=token,
620 revision=revision,
621 local_files_only=local_files_only,
622 subfolder=subfolder,
623 _raise_exceptions_for_gated_repo=False,
624 _raise_exceptions_for_missing_entries=False,
625 _raise_exceptions_for_connection_errors=False,
626 _commit_hash=commit_hash,
627 )
628 if resolved_config_file is None:
629 logger.info("Could not locate the tokenizer configuration file, will try to use the model config instead.")

File /usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py:421, in cached_file(path_or_repo_id, filename, cache_dir, force_download, resume_download, proxies, token, revision, local_files_only, subfolder, repo_type, user_agent, _raise_exceptions_for_gated_repo, _raise_exceptions_for_missing_entries, _raise_exceptions_for_connection_errors, _commit_hash, **deprecated_kwargs)
416 raise EnvironmentError(
417 "You are trying to access a gated repo.\nMake sure to have access to it at "
418 f"https://huggingface.co/{path_or_repo_id}.\n{str(e)}"
419 ) from e
420 except RepositoryNotFoundError as e:
--> 421 raise EnvironmentError(
422 f"{path_or_repo_id} is not a local folder and is not a valid model identifier "
423 "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token "
424 "having permission to this repo either by logging in with `huggingface-cli login` or by passing "
425 "`token=<your_token>`"
426 ) from e
427 except RevisionNotFoundError as e:
428 raise EnvironmentError(
429 f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists "
430 "for this model name. Check the model page at "
431 f"'https://huggingface.co/{path_or_repo_id}' for available revisions."
432 ) from e

OSError: OrionStarAI/Orion-14B is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'
If this is a private repository, make sure to pas

Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -245,11 +245,11 @@ import torch
245
  from transformers import AutoModelForCausalLM, AutoTokenizer
246
  from transformers.generation.utils import GenerationConfig
247
 
248
- tokenizer = AutoTokenizer.from_pretrained("OrionStarAI/Orion-14B", use_fast=False, trust_remote_code=True)
249
- model = AutoModelForCausalLM.from_pretrained("OrionStarAI/Orion-14B", device_map="auto",
250
  torch_dtype=torch.bfloat16, trust_remote_code=True)
251
 
252
- model.generation_config = GenerationConfig.from_pretrained("OrionStarAI/Orion-14B")
253
  messages = [{"role": "user", "content": "Hello, what is your name? "}]
254
  response = model.chat(tokenizer, messages, streaming=False)
255
  print(response)
 
245
  from transformers import AutoModelForCausalLM, AutoTokenizer
246
  from transformers.generation.utils import GenerationConfig
247
 
248
+ tokenizer = AutoTokenizer.from_pretrained("OrionStarAI/Orion-14B-Chat", use_fast=False, trust_remote_code=True)
249
+ model = AutoModelForCausalLM.from_pretrained("OrionStarAI/Orion-14B-Chat", device_map="auto",
250
  torch_dtype=torch.bfloat16, trust_remote_code=True)
251
 
252
+ model.generation_config = GenerationConfig.from_pretrained("OrionStarAI/Orion-14B-Chat")
253
  messages = [{"role": "user", "content": "Hello, what is your name? "}]
254
  response = model.chat(tokenizer, messages, streaming=False)
255
  print(response)