|
| 1 | +import argparse |
| 2 | +import re |
| 3 | +import time |
| 4 | +import json |
| 5 | +import torch |
| 6 | +from transformers import AutoConfig, AutoTokenizer |
| 7 | +from transformers.generation import GenerationConfig |
| 8 | +import intel_extension_for_pytorch as ipex |
| 9 | +from intel_extension_for_transformers.transformers import AutoModelForCausalLM, WeightOnlyQuantConfig |
| 10 | +from intel_extension_for_transformers.llm.quantization.utils import convert_dtype_str2torch |
| 11 | +from transformers.utils import check_min_version |
| 12 | + |
| 13 | +parser = argparse.ArgumentParser() |
| 14 | +parser.add_argument( |
| 15 | + "--model", nargs="?", default="Qwen/Qwen-7B-Chat", const="Qwen/Qwen-7B-Chat" |
| 16 | +) |
| 17 | +parser.add_argument("--revision", default=None, type=str) |
| 18 | +parser.add_argument("--trust_remote_code", default=True) |
| 19 | +parser.add_argument( |
| 20 | + "--dataset", nargs="?", default="NeelNanda/pile-10k", const="NeelNanda/pile-10k" |
| 21 | +) |
| 22 | +parser.add_argument( |
| 23 | + "--max-new-tokens", default=32, type=int, help="output max new tokens" |
| 24 | +) |
| 25 | +parser.add_argument( |
| 26 | + "--num_beams", default=1, type=int, help="number of beams" |
| 27 | +) |
| 28 | +parser.add_argument("--output_dir", nargs="?", default="./saved_results") |
| 29 | +parser.add_argument("--int8", action="store_true") |
| 30 | +parser.add_argument( |
| 31 | + "--int8_bf16_mixed", |
| 32 | + action="store_true", |
| 33 | + help="by default it is int8-fp32 mixed, to enable int8 mixed amp bf16 (work on platforms like SPR)", |
| 34 | +) |
| 35 | +parser.add_argument("--peft_model_id", type=str, default=None, help="model_name_or_path of peft model") |
| 36 | +# ============Benchmark configs============== |
| 37 | +parser.add_argument("--benchmark", action="store_true") |
| 38 | +parser.add_argument("--do_profiling", action="store_true") |
| 39 | +parser.add_argument("--profile_token_latency", action="store_true") |
| 40 | +parser.add_argument("--iters", default=10, type=int, help="num iter") |
| 41 | +parser.add_argument("--num_warmup", default=3, type=int, help="num warmup") |
| 42 | +# ============Accuracy configs============== |
| 43 | +parser.add_argument("--accuracy", action="store_true") |
| 44 | +parser.add_argument("--batch_size", default=1, type=int, |
| 45 | + help="batch size num.") |
| 46 | +parser.add_argument("--save_accuracy_path", default=None, |
| 47 | + help="Save accuracy results path.") |
| 48 | +parser.add_argument("--tasks", nargs='+', default=["lambada_openai"], type=str, \ |
| 49 | + help="tasks list for accuracy validation") |
| 50 | +# ============WeightOnlyQuant configs=============== |
| 51 | +parser.add_argument("--woq", action="store_true") |
| 52 | +parser.add_argument("--woq_algo", default="RTN", choices=['RTN'], |
| 53 | + help="Weight-only parameter.") |
| 54 | +parser.add_argument("--woq_dtype", type=str, default="int4_fullrange", |
| 55 | + choices=["int4_fullrange"]) |
| 56 | +parser.add_argument("--woq_group_size", type=int, default=32) |
| 57 | +parser.add_argument("--woq_scheme", default="sym") |
| 58 | +parser.add_argument("--woq_enable_mse_search", action="store_true") |
| 59 | +parser.add_argument("--device", default="xpu") |
| 60 | +parser.add_argument("--compute_dtype", default="fp16") |
| 61 | +# ============BitsAndBytes configs============== |
| 62 | +parser.add_argument("--bitsandbytes", action="store_true") |
| 63 | +parser.add_argument("--load_in_4bit", type=bool, default=False) |
| 64 | +parser.add_argument("--load_in_8bit", type=bool, default=False) |
| 65 | +# ======================================= |
| 66 | +args = parser.parse_args() |
| 67 | +torch_dtype = convert_dtype_str2torch(args.compute_dtype) |
| 68 | + |
| 69 | +# transformers version >= 4.32.0 contained the mpt modeling definition. |
| 70 | +# https://github.com/huggingface/transformers/blob/main/src/transformers/models/mpt/modeling_mpt.py |
| 71 | +check_min_version("4.31.0") |
| 72 | + |
| 73 | +# get model config |
| 74 | +config = AutoConfig.from_pretrained( |
| 75 | + args.model, |
| 76 | + use_cache=True, # to use kv cache. |
| 77 | + trust_remote_code=args.trust_remote_code, |
| 78 | + revision=args.revision, |
| 79 | +) |
| 80 | +generation_config = GenerationConfig.from_pretrained(args.model, trust_remote_code=args.trust_remote_code) |
| 81 | +generation_config.do_sample = False |
| 82 | +user_model = None |
| 83 | + |
| 84 | +# tokenizer |
| 85 | +if config.model_type == "llama": |
| 86 | + from transformers import LlamaTokenizer |
| 87 | + tokenizer = LlamaTokenizer.from_pretrained(args.model) |
| 88 | +else: |
| 89 | + tokenizer = AutoTokenizer.from_pretrained(args.model, trust_remote_code=args.trust_remote_code) |
| 90 | + |
| 91 | +quantization_config = None |
| 92 | +if args.woq: |
| 93 | + quantization_config = WeightOnlyQuantConfig( |
| 94 | + compute_dtype=args.compute_dtype, weight_dtype=args.woq_dtype, |
| 95 | + group_size=args.woq_group_size, scale_dtype=args.compute_dtype |
| 96 | + ) #default is A16W4G16 |
| 97 | + |
| 98 | +# get model |
| 99 | +if quantization_config is not None: |
| 100 | + user_model = AutoModelForCausalLM.from_pretrained(args.model, |
| 101 | + device_map=args.device, |
| 102 | + quantization_config=quantization_config, |
| 103 | + trust_remote_code=args.trust_remote_code, |
| 104 | + fp16=True, |
| 105 | + use_llm_runtime=False |
| 106 | + ) |
| 107 | +elif args.load_in_4bit or args.load_in_8bit: |
| 108 | + # CPU device usage is provided by intel-extension-for-transformers. |
| 109 | + user_model = AutoModelForCausalLM.from_pretrained(args.model, |
| 110 | + device_map=args.device, |
| 111 | + load_in_4bit=args.load_in_4bit, |
| 112 | + load_in_8bit=args.load_in_8bit, |
| 113 | + use_llm_runtime=False |
| 114 | + ) |
| 115 | +if user_model is not None: |
| 116 | + user_model.save_pretrained(args.output_dir) |
| 117 | + tokenizer.save_pretrained(args.output_dir) |
| 118 | + |
| 119 | +if args.benchmark: |
| 120 | + prompt = "它完成了,并提交了。你可以在Android和网络上玩美味生存。在网络上玩是有效的,但你必须模拟多次触摸才能移动桌子." |
| 121 | + |
| 122 | + input_size = tokenizer(prompt, return_tensors="pt").input_ids.size(dim=1) |
| 123 | + print("---- Prompt size:", input_size) |
| 124 | + |
| 125 | + user_model = AutoModelForCausalLM.from_pretrained( |
| 126 | + args.model, trust_remote_code=args.trust_remote_code, device_map=args.device, torch_dtype=torch_dtype) \ |
| 127 | + if user_model is None else user_model |
| 128 | + user_model = ipex.optimize_transformers( |
| 129 | + user_model.eval(), device=args.device, inplace=True, woq=True, dtype=torch_dtype) |
| 130 | + # start |
| 131 | + num_iter = args.iters |
| 132 | + num_warmup = args.num_warmup |
| 133 | + prompt = [prompt] * args.batch_size |
| 134 | + amp_enabled = True |
| 135 | + amp_dtype = torch_dtype |
| 136 | + |
| 137 | + generate_kwargs = dict(do_sample=False, temperature=0.9, num_beams=args.num_beams) |
| 138 | + if args.profile_token_latency: |
| 139 | + generate_kwargs["token_latency"] = True |
| 140 | + |
| 141 | + total_time = 0.0 |
| 142 | + total_list = [] |
| 143 | + with torch.inference_mode(), torch.no_grad(), torch.autocast( |
| 144 | + device_type=args.device, |
| 145 | + enabled=amp_enabled, |
| 146 | + dtype=amp_dtype if amp_enabled else None, |
| 147 | + ): |
| 148 | + for i in range(num_iter + num_warmup): |
| 149 | + with torch.autograd.profiler_legacy.profile(enabled=args.do_profiling, use_xpu=(args.device=="xpu"), record_shapes=False) as prof: |
| 150 | + input_ids = tokenizer( |
| 151 | + prompt, return_tensors="pt").input_ids.to(args.device) |
| 152 | + tic = time.time() |
| 153 | + output = user_model.generate( |
| 154 | + input_ids, max_new_tokens=int(args.max_new_tokens), **generate_kwargs |
| 155 | + ) |
| 156 | + toc = time.time() |
| 157 | + gen_ids = output[0] if args.profile_token_latency else output |
| 158 | + gen_text = tokenizer.batch_decode( |
| 159 | + gen_ids, skip_special_tokens=True) |
| 160 | + if args.device == "xpu": |
| 161 | + torch.xpu.synchronize() |
| 162 | + if args.do_profiling and i >= num_warmup and (i == num_warmup or i == num_iter + num_warmup - 1): |
| 163 | + print(f"Save pt for iter {i}") |
| 164 | + torch.save(prof.key_averages().table( |
| 165 | + sort_by="self_xpu_time_total"), f"./profile_{i}.pt") |
| 166 | + # torch.save(prof.table(sort_by="id", row_limit=-1), |
| 167 | + # './profile_id.pt') |
| 168 | + # torch.save(prof.key_averages( |
| 169 | + # group_by_input_shape=True).table(), "./profile_detail.pt") |
| 170 | + prof.export_chrome_trace(f"./trace_{i}.json") |
| 171 | + input_tokens_lengths = [x.shape[0] for x in input_ids] |
| 172 | + output_tokens_lengths = [x.shape[0] for x in gen_ids] |
| 173 | + total_new_tokens = [ |
| 174 | + o - i if user_model.config.model_type != "t5" else o |
| 175 | + for i, o in zip(input_tokens_lengths, output_tokens_lengths) |
| 176 | + ] |
| 177 | + print(gen_text, total_new_tokens, flush=True) |
| 178 | + print("Iteration: %d, Time: %.6f sec" % (i, toc - tic), flush=True) |
| 179 | + if i >= num_warmup: |
| 180 | + total_time += toc - tic |
| 181 | + if args.profile_token_latency: |
| 182 | + total_list.append(output[1]) |
| 183 | + |
| 184 | + print("\n", "-" * 10, "Summary:", "-" * 10) |
| 185 | + latency = total_time / (num_iter - num_warmup) |
| 186 | + print("Inference latency: %.5f sec." % latency) |
| 187 | + throughput = (args.max_new_tokens + input_size) / latency |
| 188 | + print("Average throughput: {} samples/sec".format(throughput)) |
| 189 | + |
| 190 | + if args.profile_token_latency: |
| 191 | + import numpy as np |
| 192 | + from itertools import chain |
| 193 | + |
| 194 | + first_latency = np.mean([x[0] for x in total_list]) |
| 195 | + average_2n = list(chain(*[x[1:] for x in total_list])) |
| 196 | + average_2n.sort() |
| 197 | + average_2n_latency = np.mean(average_2n) |
| 198 | + print("First token average latency: %.5f sec." % first_latency) |
| 199 | + print("Average 2... latency: %.5f sec." % average_2n_latency) |
| 200 | + print(total_list) |
| 201 | + |
| 202 | + |
| 203 | +if args.accuracy: |
| 204 | + from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate |
| 205 | + user_model = AutoModelForCausalLM.from_pretrained( |
| 206 | + args.model, trust_remote_code=args.trust_remote_code, device_map=args.device, torch_dtype=torch_dtype) \ |
| 207 | + if user_model is None else user_model |
| 208 | + user_model = ipex.optimize_transformers( |
| 209 | + user_model.eval(), device=args.device, inplace=True, woq=True, dtype=torch_dtype) |
| 210 | + results = evaluate( |
| 211 | + model="hf-causal", |
| 212 | + model_args='pretrained='+args.model+',tokenizer='+args.model+',dtype=float32', |
| 213 | + user_model=user_model, |
| 214 | + batch_size=args.batch_size, |
| 215 | + tasks=args.tasks, |
| 216 | + device=args.device |
| 217 | + ) |
| 218 | + dumped = json.dumps(results, indent=2) |
| 219 | + if args.save_accuracy_path: |
| 220 | + with open(args.save_accuracy_path, "w") as f: |
| 221 | + f.write(dumped) |
| 222 | + for task_name in args.tasks: |
| 223 | + if task_name == "wikitext": |
| 224 | + print("Accuracy for %s is: %s" % (task_name, results["results"][task_name]["word_perplexity"])) |
| 225 | + else: |
| 226 | + print("Accuracy for %s is: %s" % (task_name, results["results"][task_name]["acc"])) |
| 227 | + |
0 commit comments