From 58c7849f55bdba23d2c9ab7184bec746d2ab1f67 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Sun, 25 May 2025 22:43:44 +0200 Subject: [PATCH 01/16] mtmd : allow multiple modalities at the same time --- convert_hf_to_gguf.py | 103 ++++-- gguf-py/gguf/constants.py | 1 + gguf-py/gguf/tensor_mapping.py | 5 + tools/mtmd/clip.cpp | 608 ++++++++++++++++++--------------- tools/mtmd/clip.h | 10 +- tools/mtmd/mtmd.cpp | 108 ++++-- 6 files changed, 493 insertions(+), 342 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 91af508a2fb28..27eb2a8f1cf22 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -432,6 +432,9 @@ def load_hparams(dir_model: Path): if "llm_config" in config: # rename for InternVL config["text_config"] = config["llm_config"] + if "thinker_config" in config: + # rename for Qwen2.5-Omni + config["text_config"] = config["thinker_config"]["text_config"] return config @classmethod @@ -1124,15 +1127,16 @@ class MmprojModel(ModelBase): has_vision_encoder: bool = True # by default has_audio_encoder: bool = False + # for models having multiple encoders, we need to separate their hparams + hparams_vision: dict[str, Any] | None = None + hparams_audio: dict[str, Any] | None = None + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.model_arch != gguf.MODEL_ARCH.MMPROJ: raise TypeError("MmprojModel must be subclassed with model_arch = gguf.MODEL_ARCH.MMPROJ") - if self.has_vision_encoder and self.has_audio_encoder: - raise NotImplementedError("both vision + audio not supported yet") - # get n_embd of the text model if "text_config" not in self.hparams: self.hparams["text_config"] = {} @@ -1143,22 +1147,33 @@ def __init__(self, *args, **kwargs): assert self.n_embd_text > 0, "n_embd not found in hparams" # move vision config to the top level, while preserving the original hparams in global_config - self.global_config = self.hparams - - if "vision_config" in self.hparams: - self.hparams = self.hparams["vision_config"] - elif "audio_config" in self.hparams: - self.hparams = self.hparams["audio_config"] - else: + import copy + self.global_config = copy.deepcopy(self.hparams) + self.hparams_vision = self.get_vision_config() + self.hparams_audio = self.get_audio_config() + + if self.hparams_vision is None and self.hparams_audio is None: raise ValueError("vision_config / audio_config not found in hparams") - self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer", "num_layers", "depth"]) + # for compat with vision-only models + self.hparams = self.hparams_vision or self.hparams_audio or self.hparams + + # TODO @ngxson : this is a hack to support both vision and audio encoders + have_multiple_encoders = self.has_audio_encoder and self.has_vision_encoder + self.block_count = 128 if have_multiple_encoders else \ + self.find_hparam(["n_layers", "num_hidden_layers", "n_layer", "num_layers", "depth"], True) self.tensor_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.MMPROJ, self.block_count) # load preprocessor config with open(self.dir_model / "preprocessor_config.json", "r", encoding="utf-8") as f: self.preprocessor_config = json.load(f) + def get_vision_config(self) -> dict[str, Any] | None: + return self.global_config.get("vision_config") + + def get_audio_config(self) -> dict[str, Any] | None: + return self.global_config.get("audio_config") + def set_type(self): self.gguf_writer.add_type(gguf.GGUFType.MMPROJ) @@ -2674,7 +2689,12 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter yield from super().modify_tensors(data_torch, name, bid) -@ModelBase.register("Qwen2VLModel", "Qwen2VLForConditionalGeneration", "Qwen2_5_VLForConditionalGeneration") +@ModelBase.register( + "Qwen2VLModel", + "Qwen2VLForConditionalGeneration", + "Qwen2_5_VLForConditionalGeneration", + "Qwen2_5OmniModel", +) class Qwen2VLModel(TextModel): model_arch = gguf.MODEL_ARCH.QWEN2VL @@ -2692,8 +2712,11 @@ def set_vocab(self): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: del bid # unused - if name.startswith("visual."): - # skip visual tensors + if name.startswith("thinker."): + name = name.replace("thinker.", "") + if name.startswith("visual") or name.startswith("audio") or \ + name.startswith("talker") or name.startswith("token2wav"): + # skip multimodal tensors return [] return [(self.map_tensor_name(name), data_torch)] @@ -2702,21 +2725,27 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter class Qwen2VLVisionModel(MmprojModel): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.hparams["image_size"] = self.hparams.get("image_size", 560) + assert self.hparams_vision is not None + self.hparams_vision["image_size"] = self.hparams_vision.get("image_size", 560) # rename config.json values - self.hparams["num_attention_heads"] = self.hparams.get("num_heads") - self.hparams["num_hidden_layers"] = self.hparams.get("depth") - if "embed_dim" in self.hparams: # qwen2vl - self.hparams["intermediate_size"] = self.hparams.get("hidden_size") - self.hparams["hidden_size"] = self.hparams.get("embed_dim") + self.hparams_vision["num_attention_heads"] = self.hparams_vision.get("num_heads") + self.hparams_vision["num_hidden_layers"] = self.hparams_vision.get("depth") + if "embed_dim" in self.hparams_vision: # qwen2vl + self.hparams_vision["intermediate_size"] = self.hparams_vision.get("hidden_size") + self.hparams_vision["hidden_size"] = self.hparams_vision.get("embed_dim") def set_gguf_parameters(self): super().set_gguf_parameters() - hparams = self.hparams - if self.global_config['model_type'] == 'qwen2_vl': + assert self.hparams_vision is not None + hparams = self.hparams_vision + model_type = self.global_config['model_type'] + if model_type == 'qwen2_vl': self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN2VL) - elif self.global_config['model_type'] == 'qwen2_5_vl': - self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN25VL) + elif model_type == 'qwen2_5_vl' or model_type == 'qwen2_5_omni': + if model_type == 'qwen2_5_omni': + self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN25VL) + else: + self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN25O) self.gguf_writer.add_vision_use_silu(True) # find n_wa_pattern (window attention pattern) fullatt_block_indexes = hparams.get("fullatt_block_indexes") @@ -2774,6 +2803,32 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter return [] # skip other tensors +@ModelBase.register("Qwen2_5OmniModel") +class Qwen25OmniModel(Qwen2VLVisionModel): + has_vision_encoder = True + has_audio_encoder = True + + def get_vision_config(self) -> dict[str, Any] | None: + return self.global_config["thinker_config"].get("vision_config") + + def get_audio_config(self) -> dict[str, Any] | None: + return self.global_config["thinker_config"].get("audio_config") + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + if name.startswith("thinker."): + name = name.replace("thinker.", "") + + if name.startswith("audio_tower"): + # process audio tensors + if "audio_bos_eos_token" in name: + # this tensor is left unused in transformers code + # https://github.com/huggingface/transformers/blob/6e3063422c4b1c014aa60c32b9254fd2902f0f28/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py#L1809 + return [] + return [(self.map_tensor_name(name), data_torch)] + + return super().modify_tensors(data_torch, name, bid) + + @ModelBase.register("InternVisionModel") class InternVisionModel(MmprojModel): def set_gguf_parameters(self): diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index c6255d6867a15..31163effad8f2 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -2260,6 +2260,7 @@ class VisionProjectorType: ULTRAVOX = "ultravox" INTERNVL = "internvl" QWEN2A = "qwen2a" # audio + QWEN25O = "qwen2.5o" # omni # Items here are (block size, type size) diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 4a0615b656812..000ffd00615b5 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -1125,6 +1125,7 @@ class TensorNameMap: MODEL_TENSOR.A_POST_NORM: ( "audio_tower.layer_norm", # ultravox + "audio_tower.ln_post", # qwen2omni ), MODEL_TENSOR.A_ENC_ATTN_Q: ( @@ -1161,12 +1162,16 @@ class TensorNameMap: "audio_tower.layers.{bid}.fc2", # ultravox ), + # note: some tensors below has "audio." pseudo-prefix, to prevent conflicts with vision tensors + # this prefix is added in the conversion code in modify_tensors() + MODEL_TENSOR.A_MMPROJ: ( "audio.multi_modal_projector.linear_{bid}", # ultravox ), MODEL_TENSOR.A_MMPROJ_FC: ( "audio.multi_modal_projector.linear", # qwen2audio + "audio_tower.proj", # qwen2omni ), MODEL_TENSOR.A_MM_NORM_PRE: ( diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp index 6205dad5ae262..290ee33ae465a 100644 --- a/tools/mtmd/clip.cpp +++ b/tools/mtmd/clip.cpp @@ -166,9 +166,6 @@ enum patch_merge_type { }; struct clip_hparams { - bool has_vision = false; - bool has_audio = false; - int32_t image_size; int32_t patch_size; int32_t n_embd; @@ -178,9 +175,13 @@ struct clip_hparams { int32_t n_layer; int32_t proj_scale_factor = 0; // idefics3 + float image_mean[3]; + float image_std[3]; + // for models using dynamic image size, we need to have a smaller image size to warmup // otherwise, user will get OOM everytime they load the model int32_t warmup_image_size = 0; + int32_t warmup_audio_size = 3000; ffn_op_type ffn_op = FFN_GELU; @@ -199,6 +200,10 @@ struct clip_hparams { // audio int32_t n_mel_bins = 0; // whisper preprocessor int32_t proj_stack_factor = 0; // ultravox + + // legacy + bool has_llava_projector = false; + int minicpmv_version = 0; }; struct clip_layer { @@ -236,8 +241,10 @@ struct clip_layer { ggml_tensor * ls_2_w = nullptr; }; -struct clip_vision_model { - struct clip_hparams hparams; +struct clip_model { + clip_modality modality = CLIP_MODALITY_VISION; + projector_type proj_type = PROJECTOR_TYPE_MLP; + clip_hparams hparams; // embeddings ggml_tensor * class_embedding = nullptr; @@ -353,14 +360,7 @@ struct clip_vision_model { }; struct clip_ctx { - bool has_llava_projector = false; - int minicpmv_version = 0; - - struct clip_vision_model vision_model; - projector_type proj_type = PROJECTOR_TYPE_MLP; - - float image_mean[3]; - float image_std[3]; + clip_model model; gguf_context_ptr ctx_gguf; ggml_context_ptr ctx_data; @@ -414,11 +414,15 @@ struct clip_ctx { ggml_backend_free(backend_cpu); } } + + projector_type proj_type() const { + return model.proj_type; + } }; struct clip_graph { clip_ctx * ctx; - const clip_vision_model & model; + const clip_model & model; const clip_hparams & hparams; // we only support single image per batch @@ -441,7 +445,7 @@ struct clip_graph { clip_graph(clip_ctx * ctx, const clip_image_f32 & img) : ctx(ctx), - model(ctx->vision_model), + model(ctx->model), hparams(model.hparams), img(img), patch_size(hparams.patch_size), @@ -473,7 +477,7 @@ struct clip_graph { model.position_embeddings, nullptr); - if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) { + if (ctx->proj_type() == PROJECTOR_TYPE_GEMMA3) { const int batch_size = 1; GGML_ASSERT(n_patches_x == n_patches_y); const int patches_per_image = n_patches_x; @@ -496,7 +500,7 @@ struct clip_graph { ggml_cont(ctx0, ggml_transpose(ctx0, model.mm_input_proj_w)), cur); - } else if (ctx->proj_type == PROJECTOR_TYPE_IDEFICS3) { + } else if (ctx->proj_type() == PROJECTOR_TYPE_IDEFICS3) { // https://github.com/huggingface/transformers/blob/0a950e0bbe1ed58d5401a6b547af19f15f0c195e/src/transformers/models/idefics3/modeling_idefics3.py#L578 const int scale_factor = model.hparams.proj_scale_factor; @@ -630,7 +634,7 @@ struct clip_graph { const int n_pos = n_patches; const int num_position_ids = n_pos * 4; // m-rope requires 4 dim per position - norm_type norm_t = ctx->proj_type == PROJECTOR_TYPE_QWEN25VL + norm_type norm_t = ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL ? NORM_TYPE_RMS // qwen 2.5 vl : NORM_TYPE_NORMAL; // qwen 2 vl @@ -846,11 +850,11 @@ struct clip_graph { const int d_head = 128; int n_head = n_embd/d_head; int num_query = 96; - if (ctx->minicpmv_version == 2) { + if (ctx->model.hparams.minicpmv_version == 2) { num_query = 96; - } else if (ctx->minicpmv_version == 3) { + } else if (ctx->model.hparams.minicpmv_version == 3) { num_query = 64; - } else if (ctx->minicpmv_version == 4) { + } else if (ctx->model.hparams.minicpmv_version == 4) { num_query = 64; } @@ -1067,7 +1071,7 @@ struct clip_graph { int il_last = hparams.n_layer - 1; int deepest_feature_layer = -1; - if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV || ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) { + if (ctx->proj_type() == PROJECTOR_TYPE_MINICPMV || ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE) { il_last += 1; } @@ -1201,7 +1205,7 @@ struct clip_graph { } // llava projector (also used by granite) - if (ctx->has_llava_projector) { + if (ctx->model.hparams.has_llava_projector) { embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]); ggml_tensor * patches = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches); @@ -1215,7 +1219,7 @@ struct clip_graph { // print_tensor_info(embeddings, "embeddings"); // llava projector - if (ctx->proj_type == PROJECTOR_TYPE_MLP) { + if (ctx->proj_type() == PROJECTOR_TYPE_MLP) { embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings); embeddings = ggml_add(ctx0, embeddings, model.mm_0_b); @@ -1225,7 +1229,7 @@ struct clip_graph { embeddings = ggml_add(ctx0, embeddings, model.mm_2_b); } } - else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) { + else if (ctx->proj_type() == PROJECTOR_TYPE_MLP_NORM) { embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings); embeddings = ggml_add(ctx0, embeddings, model.mm_0_b); // ggml_tensor_printf(embeddings, "mm_0_w",0,true,false); @@ -1246,7 +1250,7 @@ struct clip_graph { embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_4_w), model.mm_4_b); } - else if (ctx->proj_type == PROJECTOR_TYPE_LDP) { + else if (ctx->proj_type() == PROJECTOR_TYPE_LDP) { // MobileVLM projector int n_patch = 24; ggml_tensor * mlp_1 = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, embeddings); @@ -1356,7 +1360,7 @@ struct clip_graph { } embeddings = block_1; } - else if (ctx->proj_type == PROJECTOR_TYPE_LDPV2) + else if (ctx->proj_type() == PROJECTOR_TYPE_LDPV2) { int n_patch = 24; ggml_tensor * mlp_0 = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings); @@ -1386,7 +1390,7 @@ struct clip_graph { } // glm projector - else if (ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) { + else if (ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE) { size_t gridsz = (size_t)sqrt(embeddings->ne[1]); embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings,1,0,2,3)); embeddings = ggml_reshape_3d(ctx0, embeddings, gridsz, gridsz, embeddings->ne[1]); @@ -1473,7 +1477,7 @@ struct clip_graph { cb(cur, "after_transformer", -1); - if (ctx->proj_type == PROJECTOR_TYPE_ULTRAVOX) { + if (ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX) { // StackAudioFrames // https://huggingface.co/fixie-ai/ultravox-v0_5-llama-3_2-1b/blob/main/ultravox_model.py { @@ -1518,7 +1522,7 @@ struct clip_graph { cur = ggml_mul_mat(ctx0, model.mm_2_w, cur); } - } else if (ctx->proj_type == PROJECTOR_TYPE_QWEN2A) { + } else if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2A) { // projector cur = ggml_mul_mat(ctx0, model.mm_fc_w, cur); cur = ggml_add(ctx0, cur, model.mm_fc_b); @@ -1668,7 +1672,7 @@ struct clip_graph { } // TODO @ngxson : find a way to move this outside - if (ctx->proj_type == PROJECTOR_TYPE_QWEN2A) { + if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2A) { ggml_tensor * cur = inpL; cur = ggml_transpose(ctx0, cur); cur = ggml_cont(ctx0, cur); @@ -1947,7 +1951,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 ggml_cgraph * res; - switch (ctx->proj_type) { + switch (ctx->proj_type()) { case PROJECTOR_TYPE_GEMMA3: case PROJECTOR_TYPE_IDEFICS3: { @@ -1991,13 +1995,15 @@ struct clip_model_loader { ggml_context_ptr ctx_meta; gguf_context_ptr ctx_gguf; - clip_ctx & ctx_clip; std::string fname; size_t model_size = 0; // in bytes - // TODO @ngxson : we should not pass clip_ctx here, it should be clip_vision_model - clip_model_loader(const char * fname, clip_ctx & ctx_clip) : ctx_clip(ctx_clip), fname(fname) { + bool has_vision = false; + bool has_audio = false; + + // TODO @ngxson : we should not pass clip_ctx here, it should be clip_model + clip_model_loader(const char * fname) : fname(fname) { struct ggml_context * meta = nullptr; struct gguf_init_params params = { @@ -2029,6 +2035,19 @@ struct clip_model_loader { LOG_INF("\n"); } + // modalities + { + get_bool(KEY_HAS_VISION_ENC, has_vision, false); + get_bool(KEY_HAS_AUDIO_ENC, has_audio, false); + + if (has_vision) { + LOG_INF("%s: has vision encoder\n", __func__); + } + if (has_audio) { + LOG_INF("%s: has audio encoder\n", __func__); + } + } + // tensors { for (int i = 0; i < n_tensors; ++i) { @@ -2044,28 +2063,37 @@ struct clip_model_loader { } } - void load_hparams() { - auto & hparams = ctx_clip.vision_model.hparams; + void load_hparams(clip_model & model, clip_modality modality) { + auto & hparams = model.hparams; std::string log_ffn_op; // for logging + // sanity check + if (modality == CLIP_MODALITY_VISION) { + GGML_ASSERT(has_vision); + } else if (modality == CLIP_MODALITY_AUDIO) { + GGML_ASSERT(has_audio); + } + model.modality = modality; + + // projector type std::string proj_type; { get_string(KEY_PROJ_TYPE, proj_type, false); if (!proj_type.empty()) { - ctx_clip.proj_type = clip_projector_type_from_string(proj_type); + model.proj_type = clip_projector_type_from_string(proj_type); } - if (ctx_clip.proj_type == PROJECTOR_TYPE_UNKNOWN) { + if (model.proj_type == PROJECTOR_TYPE_UNKNOWN) { throw std::runtime_error(string_format("%s: unknown projector type: %s\n", __func__, proj_type.c_str())); } } + const bool is_vision = model.modality == CLIP_MODALITY_VISION; + const bool is_audio = model.modality == CLIP_MODALITY_AUDIO; + // other hparams { - get_bool(KEY_HAS_AUDIO_ENC, hparams.has_audio, false); - get_bool(KEY_HAS_VISION_ENC, hparams.has_vision, false); - - const char * prefix = hparams.has_vision ? "vision" : "audio"; + const char * prefix = is_vision ? "vision" : "audio"; get_u32(string_format(KEY_N_EMBD, prefix), hparams.n_embd); get_u32(string_format(KEY_N_HEAD, prefix), hparams.n_head); get_u32(string_format(KEY_N_FF, prefix), hparams.n_ff); @@ -2073,27 +2101,27 @@ struct clip_model_loader { get_u32(string_format(KEY_PROJ_DIM, prefix), hparams.projection_dim); get_f32(string_format(KEY_LAYER_NORM_EPS, prefix), hparams.eps); - if (hparams.has_vision) { + if (is_vision) { get_u32(KEY_IMAGE_SIZE, hparams.image_size); get_u32(KEY_PATCH_SIZE, hparams.patch_size); get_u32(KEY_IMAGE_CROP_RESOLUTION, hparams.image_crop_resolution, false); get_arr_int(KEY_IMAGE_GRID_PINPOINTS, hparams.image_grid_pinpoints, false); - get_i32(KEY_MINICPMV_VERSION, ctx_clip.minicpmv_version, false); // legacy + get_i32(KEY_MINICPMV_VERSION, hparams.minicpmv_version, false); // legacy - } else if (hparams.has_audio) { + } else if (is_audio) { get_u32(KEY_A_NUM_MEL_BINS, hparams.n_mel_bins); } else { - throw std::runtime_error(string_format("%s: neither vision nor audio encoder is present\n", __func__)); + GGML_ASSERT(false && "unknown modality"); } // default warmup value hparams.warmup_image_size = hparams.image_size; - ctx_clip.has_llava_projector = ctx_clip.proj_type == PROJECTOR_TYPE_MLP - || ctx_clip.proj_type == PROJECTOR_TYPE_MLP_NORM - || ctx_clip.proj_type == PROJECTOR_TYPE_LDP - || ctx_clip.proj_type == PROJECTOR_TYPE_LDPV2; + hparams.has_llava_projector = model.proj_type == PROJECTOR_TYPE_MLP + || model.proj_type == PROJECTOR_TYPE_MLP_NORM + || model.proj_type == PROJECTOR_TYPE_LDP + || model.proj_type == PROJECTOR_TYPE_LDPV2; { bool use_gelu = false; @@ -2123,7 +2151,7 @@ struct clip_model_loader { } } - if (hparams.has_vision) { + if (is_vision) { int idx_mean = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_MEAN); int idx_std = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_STD); GGML_ASSERT(idx_mean >= 0 && "image_mean not found"); @@ -2131,8 +2159,8 @@ struct clip_model_loader { const float * mean_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_mean); const float * std_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_std); for (int i = 0; i < 3; ++i) { - ctx_clip.image_mean[i] = mean_data[i]; - ctx_clip.image_std[i] = std_data[i]; + hparams.image_mean[i] = mean_data[i]; + hparams.image_std[i] = std_data[i]; } } @@ -2149,11 +2177,11 @@ struct clip_model_loader { } // model-specific params - switch (ctx_clip.proj_type) { + switch (model.proj_type) { case PROJECTOR_TYPE_MINICPMV: { - if (ctx_clip.minicpmv_version == 0) { - ctx_clip.minicpmv_version = 2; // default to 2 if not set + if (hparams.minicpmv_version == 0) { + hparams.minicpmv_version = 2; // default to 2 if not set } } break; case PROJECTOR_TYPE_IDEFICS3: @@ -2212,7 +2240,7 @@ struct clip_model_loader { case PROJECTOR_TYPE_ULTRAVOX: case PROJECTOR_TYPE_QWEN2A: { - bool require_stack = ctx_clip.proj_type == PROJECTOR_TYPE_ULTRAVOX; + bool require_stack = model.proj_type == PROJECTOR_TYPE_ULTRAVOX; get_u32(KEY_A_PROJ_STACK_FACTOR, hparams.proj_stack_factor, require_stack); if (hparams.n_mel_bins != 128) { throw std::runtime_error(string_format("%s: only 128 mel bins are supported for ultravox\n", __func__)); @@ -2225,23 +2253,22 @@ struct clip_model_loader { } LOG_INF("%s: projector: %s\n", __func__, proj_type.c_str()); - LOG_INF("%s: has_vision_encoder: %d\n", __func__, hparams.has_vision); - LOG_INF("%s: has_audio_encoder: %d\n", __func__, hparams.has_audio); LOG_INF("%s: n_embd: %d\n", __func__, hparams.n_embd); LOG_INF("%s: n_head: %d\n", __func__, hparams.n_head); LOG_INF("%s: n_ff: %d\n", __func__, hparams.n_ff); LOG_INF("%s: n_layer: %d\n", __func__, hparams.n_layer); LOG_INF("%s: ffn_op: %s\n", __func__, log_ffn_op.c_str()); LOG_INF("%s: projection_dim: %d\n", __func__, hparams.projection_dim); - LOG_INF("\n"); - if (hparams.has_vision) { + if (is_vision) { + LOG_INF("\n--- vision hparams ---\n"); LOG_INF("%s: image_size: %d\n", __func__, hparams.image_size); LOG_INF("%s: patch_size: %d\n", __func__, hparams.patch_size); - LOG_INF("%s: has_llava_proj: %d\n", __func__, ctx_clip.has_llava_projector); - LOG_INF("%s: minicpmv_version: %d\n", __func__, ctx_clip.minicpmv_version); + LOG_INF("%s: has_llava_proj: %d\n", __func__, hparams.has_llava_projector); + LOG_INF("%s: minicpmv_version: %d\n", __func__, hparams.minicpmv_version); LOG_INF("%s: proj_scale_factor: %d\n", __func__, hparams.proj_scale_factor); LOG_INF("%s: n_wa_pattern: %d\n", __func__, hparams.n_wa_pattern); - } else if (hparams.has_audio) { + } else if (is_audio) { + LOG_INF("\n--- audio hparams ---\n"); LOG_INF("%s: n_mel_bins: %d\n", __func__, hparams.n_mel_bins); LOG_INF("%s: proj_stack_factor: %d\n", __func__, hparams.proj_stack_factor); } @@ -2251,13 +2278,14 @@ struct clip_model_loader { } } - void load_tensors() { - auto & hparams = ctx_clip.vision_model.hparams; + void load_tensors(clip_ctx & ctx_clip) { + auto & model = ctx_clip.model; + auto & hparams = model.hparams; std::map tensor_offset; std::vector tensors_to_load; // TODO @ngxson : support both audio and video in the future - const char * prefix = hparams.has_audio ? "a" : "v"; + const char * prefix = model.modality == CLIP_MODALITY_AUDIO ? "a" : "v"; // get offsets for (int64_t i = 0; i < gguf_get_n_tensors(ctx_gguf.get()); ++i) { @@ -2292,26 +2320,24 @@ struct clip_model_loader { return cur; }; - auto & vision_model = ctx_clip.vision_model; // TODO: rename this to just "model" + model.class_embedding = get_tensor(TN_CLASS_EMBD, false); - vision_model.class_embedding = get_tensor(TN_CLASS_EMBD, false); + model.pre_ln_w = get_tensor(string_format(TN_LN_PRE, prefix, "weight"), false); + model.pre_ln_b = get_tensor(string_format(TN_LN_PRE, prefix, "bias"), false); - vision_model.pre_ln_w = get_tensor(string_format(TN_LN_PRE, prefix, "weight"), false); - vision_model.pre_ln_b = get_tensor(string_format(TN_LN_PRE, prefix, "bias"), false); + model.post_ln_w = get_tensor(string_format(TN_LN_POST, prefix, "weight"), false); + model.post_ln_b = get_tensor(string_format(TN_LN_POST, prefix, "bias"), false); - vision_model.post_ln_w = get_tensor(string_format(TN_LN_POST, prefix, "weight"), false); - vision_model.post_ln_b = get_tensor(string_format(TN_LN_POST, prefix, "bias"), false); + model.patch_bias = get_tensor(TN_PATCH_BIAS, false); + model.patch_embeddings_0 = get_tensor(TN_PATCH_EMBD, false); + model.patch_embeddings_1 = get_tensor(TN_PATCH_EMBD_1, false); - vision_model.patch_bias = get_tensor(TN_PATCH_BIAS, false); - vision_model.patch_embeddings_0 = get_tensor(TN_PATCH_EMBD, false); - vision_model.patch_embeddings_1 = get_tensor(TN_PATCH_EMBD_1, false); - - vision_model.position_embeddings = get_tensor(string_format(TN_POS_EMBD, prefix), false); + model.position_embeddings = get_tensor(string_format(TN_POS_EMBD, prefix), false); // layers - vision_model.layers.resize(hparams.n_layer); + model.layers.resize(hparams.n_layer); for (int il = 0; il < hparams.n_layer; ++il) { - auto & layer = vision_model.layers[il]; + auto & layer = model.layers[il]; layer.k_w = get_tensor(string_format(TN_ATTN_K, prefix, il, "weight")); layer.q_w = get_tensor(string_format(TN_ATTN_Q, prefix, il, "weight")); layer.v_w = get_tensor(string_format(TN_ATTN_V, prefix, il, "weight")); @@ -2352,166 +2378,166 @@ struct clip_model_loader { } } - switch (ctx_clip.proj_type) { + switch (model.proj_type) { case PROJECTOR_TYPE_MLP: case PROJECTOR_TYPE_MLP_NORM: { // LLaVA projection - vision_model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"), false); - vision_model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"), false); + model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"), false); + model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"), false); // Yi-type llava - vision_model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"), false); - vision_model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false); + model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"), false); + model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false); // missing in Yi-type llava - vision_model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"), false); - vision_model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false); + model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"), false); + model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false); // Yi-type llava - vision_model.mm_3_w = get_tensor(string_format(TN_LLAVA_PROJ, 3, "weight"), false); - vision_model.mm_3_b = get_tensor(string_format(TN_LLAVA_PROJ, 3, "bias"), false); - vision_model.mm_4_w = get_tensor(string_format(TN_LLAVA_PROJ, 4, "weight"), false); - vision_model.mm_4_b = get_tensor(string_format(TN_LLAVA_PROJ, 4, "bias"), false); - if (vision_model.mm_3_w) { + model.mm_3_w = get_tensor(string_format(TN_LLAVA_PROJ, 3, "weight"), false); + model.mm_3_b = get_tensor(string_format(TN_LLAVA_PROJ, 3, "bias"), false); + model.mm_4_w = get_tensor(string_format(TN_LLAVA_PROJ, 4, "weight"), false); + model.mm_4_b = get_tensor(string_format(TN_LLAVA_PROJ, 4, "bias"), false); + if (model.mm_3_w) { // TODO: this is a hack to support Yi-type llava - ctx_clip.proj_type = PROJECTOR_TYPE_MLP_NORM; + model.proj_type = PROJECTOR_TYPE_MLP_NORM; } - vision_model.image_newline = get_tensor(TN_IMAGE_NEWLINE, false); + model.image_newline = get_tensor(TN_IMAGE_NEWLINE, false); } break; case PROJECTOR_TYPE_LDP: { // MobileVLM projection - vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight")); - vision_model.mm_model_mlp_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias")); - vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight")); - vision_model.mm_model_mlp_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias")); - vision_model.mm_model_block_1_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight")); - vision_model.mm_model_block_1_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight")); - vision_model.mm_model_block_1_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias")); - vision_model.mm_model_block_1_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight")); - vision_model.mm_model_block_1_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias")); - vision_model.mm_model_block_1_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight")); - vision_model.mm_model_block_1_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias")); - vision_model.mm_model_block_1_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight")); - vision_model.mm_model_block_1_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight")); - vision_model.mm_model_block_1_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias")); - vision_model.mm_model_block_2_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight")); - vision_model.mm_model_block_2_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight")); - vision_model.mm_model_block_2_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias")); - vision_model.mm_model_block_2_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight")); - vision_model.mm_model_block_2_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias")); - vision_model.mm_model_block_2_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight")); - vision_model.mm_model_block_2_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias")); - vision_model.mm_model_block_2_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight")); - vision_model.mm_model_block_2_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight")); - vision_model.mm_model_block_2_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias")); + model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight")); + model.mm_model_mlp_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias")); + model.mm_model_mlp_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight")); + model.mm_model_mlp_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias")); + model.mm_model_block_1_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight")); + model.mm_model_block_1_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight")); + model.mm_model_block_1_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias")); + model.mm_model_block_1_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight")); + model.mm_model_block_1_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias")); + model.mm_model_block_1_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight")); + model.mm_model_block_1_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias")); + model.mm_model_block_1_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight")); + model.mm_model_block_1_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight")); + model.mm_model_block_1_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias")); + model.mm_model_block_2_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight")); + model.mm_model_block_2_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight")); + model.mm_model_block_2_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias")); + model.mm_model_block_2_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight")); + model.mm_model_block_2_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias")); + model.mm_model_block_2_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight")); + model.mm_model_block_2_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias")); + model.mm_model_block_2_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight")); + model.mm_model_block_2_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight")); + model.mm_model_block_2_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias")); } break; case PROJECTOR_TYPE_LDPV2: { // MobilVLM_V2 projection - vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight")); - vision_model.mm_model_mlp_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias")); - vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight")); - vision_model.mm_model_mlp_2_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "bias")); - vision_model.mm_model_peg_0_w = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "weight")); - vision_model.mm_model_peg_0_b = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "bias")); + model.mm_model_mlp_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight")); + model.mm_model_mlp_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias")); + model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight")); + model.mm_model_mlp_2_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "bias")); + model.mm_model_peg_0_w = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "weight")); + model.mm_model_peg_0_b = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "bias")); } break; case PROJECTOR_TYPE_MINICPMV: { - // vision_model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD); - vision_model.mm_model_pos_embed_k = get_tensor(TN_MINICPMV_POS_EMBD_K); - vision_model.mm_model_query = get_tensor(TN_MINICPMV_QUERY); - vision_model.mm_model_proj = get_tensor(TN_MINICPMV_PROJ); - vision_model.mm_model_kv_proj = get_tensor(TN_MINICPMV_KV_PROJ); - vision_model.mm_model_attn_q_w = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "weight")); - vision_model.mm_model_attn_k_w = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "weight")); - vision_model.mm_model_attn_v_w = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "weight")); - vision_model.mm_model_attn_q_b = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "bias")); - vision_model.mm_model_attn_k_b = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "bias")); - vision_model.mm_model_attn_v_b = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "bias")); - vision_model.mm_model_attn_o_w = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "weight")); - vision_model.mm_model_attn_o_b = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "bias")); - vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_MINICPMV_LN, "q", "weight")); - vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_MINICPMV_LN, "q", "bias")); - vision_model.mm_model_ln_kv_w = get_tensor(string_format(TN_MINICPMV_LN, "kv", "weight")); - vision_model.mm_model_ln_kv_b = get_tensor(string_format(TN_MINICPMV_LN, "kv", "bias")); - vision_model.mm_model_ln_post_w = get_tensor(string_format(TN_MINICPMV_LN, "post", "weight")); - vision_model.mm_model_ln_post_b = get_tensor(string_format(TN_MINICPMV_LN, "post", "bias")); + // model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD); + model.mm_model_pos_embed_k = get_tensor(TN_MINICPMV_POS_EMBD_K); + model.mm_model_query = get_tensor(TN_MINICPMV_QUERY); + model.mm_model_proj = get_tensor(TN_MINICPMV_PROJ); + model.mm_model_kv_proj = get_tensor(TN_MINICPMV_KV_PROJ); + model.mm_model_attn_q_w = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "weight")); + model.mm_model_attn_k_w = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "weight")); + model.mm_model_attn_v_w = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "weight")); + model.mm_model_attn_q_b = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "bias")); + model.mm_model_attn_k_b = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "bias")); + model.mm_model_attn_v_b = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "bias")); + model.mm_model_attn_o_w = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "weight")); + model.mm_model_attn_o_b = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "bias")); + model.mm_model_ln_q_w = get_tensor(string_format(TN_MINICPMV_LN, "q", "weight")); + model.mm_model_ln_q_b = get_tensor(string_format(TN_MINICPMV_LN, "q", "bias")); + model.mm_model_ln_kv_w = get_tensor(string_format(TN_MINICPMV_LN, "kv", "weight")); + model.mm_model_ln_kv_b = get_tensor(string_format(TN_MINICPMV_LN, "kv", "bias")); + model.mm_model_ln_post_w = get_tensor(string_format(TN_MINICPMV_LN, "post", "weight")); + model.mm_model_ln_post_b = get_tensor(string_format(TN_MINICPMV_LN, "post", "bias")); } break; case PROJECTOR_TYPE_GLM_EDGE: { - vision_model.mm_model_adapter_conv_w = get_tensor(string_format(TN_GLM_ADAPER_CONV, "weight")); - vision_model.mm_model_adapter_conv_b = get_tensor(string_format(TN_GLM_ADAPER_CONV, "bias")); - vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR, "weight")); - vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "weight")); - vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "bias")); - vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H, "weight")); - vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE, "weight")); - vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H, "weight")); - vision_model.mm_glm_tok_boi = get_tensor(string_format(TN_TOK_GLM_BOI, "weight")); - vision_model.mm_glm_tok_eoi = get_tensor(string_format(TN_TOK_GLM_EOI, "weight")); + model.mm_model_adapter_conv_w = get_tensor(string_format(TN_GLM_ADAPER_CONV, "weight")); + model.mm_model_adapter_conv_b = get_tensor(string_format(TN_GLM_ADAPER_CONV, "bias")); + model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR, "weight")); + model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "weight")); + model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "bias")); + model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H, "weight")); + model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE, "weight")); + model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H, "weight")); + model.mm_glm_tok_boi = get_tensor(string_format(TN_TOK_GLM_BOI, "weight")); + model.mm_glm_tok_eoi = get_tensor(string_format(TN_TOK_GLM_EOI, "weight")); } break; case PROJECTOR_TYPE_QWEN2VL: case PROJECTOR_TYPE_QWEN25VL: { - vision_model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight")); - vision_model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias")); - vision_model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight")); - vision_model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias")); + model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight")); + model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias")); + model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight")); + model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias")); } break; case PROJECTOR_TYPE_GEMMA3: { - vision_model.mm_input_proj_w = get_tensor(TN_MM_INP_PROJ); - vision_model.mm_soft_emb_norm_w = get_tensor(TN_MM_SOFT_EMB_N); + model.mm_input_proj_w = get_tensor(TN_MM_INP_PROJ); + model.mm_soft_emb_norm_w = get_tensor(TN_MM_SOFT_EMB_N); } break; case PROJECTOR_TYPE_IDEFICS3: { - vision_model.projection = get_tensor(TN_MM_PROJECTOR); + model.projection = get_tensor(TN_MM_PROJECTOR); } break; case PROJECTOR_TYPE_PIXTRAL: { - vision_model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight")); - vision_model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false); - vision_model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight")); - vision_model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false); + model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight")); + model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false); + model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight")); + model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false); // [IMG_BREAK] token embedding - vision_model.token_embd_img_break = get_tensor(TN_TOK_IMG_BREAK); + model.token_embd_img_break = get_tensor(TN_TOK_IMG_BREAK); // for mistral small 3.1 - vision_model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM, false); - vision_model.mm_patch_merger_w = get_tensor(TN_MM_PATCH_MERGER, false); + model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM, false); + model.mm_patch_merger_w = get_tensor(TN_MM_PATCH_MERGER, false); } break; case PROJECTOR_TYPE_ULTRAVOX: { - vision_model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight")); - vision_model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias")); - vision_model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight")); - vision_model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias")); - vision_model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight")); - vision_model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight")); - vision_model.mm_norm_pre_w = get_tensor(string_format(TN_MM_NORM_PRE, "weight")); - vision_model.mm_norm_mid_w = get_tensor(string_format(TN_MM_NORM_MID, "weight")); + model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight")); + model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias")); + model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight")); + model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias")); + model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight")); + model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight")); + model.mm_norm_pre_w = get_tensor(string_format(TN_MM_NORM_PRE, "weight")); + model.mm_norm_mid_w = get_tensor(string_format(TN_MM_NORM_MID, "weight")); } break; case PROJECTOR_TYPE_QWEN2A: { - vision_model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight")); - vision_model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias")); - vision_model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight")); - vision_model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias")); - vision_model.mm_fc_w = get_tensor(string_format(TN_MM_AUDIO_FC, "weight")); - vision_model.mm_fc_b = get_tensor(string_format(TN_MM_AUDIO_FC, "bias")); + model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight")); + model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias")); + model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight")); + model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias")); + model.mm_fc_w = get_tensor(string_format(TN_MM_AUDIO_FC, "weight")); + model.mm_fc_b = get_tensor(string_format(TN_MM_AUDIO_FC, "bias")); } break; case PROJECTOR_TYPE_INTERNVL: { - vision_model.mm_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight")); - vision_model.mm_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias")); - vision_model.mm_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight")); - vision_model.mm_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias")); - vision_model.mm_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight")); - vision_model.mm_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias")); + model.mm_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight")); + model.mm_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias")); + model.mm_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight")); + model.mm_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias")); + model.mm_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight")); + model.mm_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias")); } break; case PROJECTOR_TYPE_LLAMA4: { - vision_model.mm_model_proj = get_tensor(TN_MM_PROJECTOR); - vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight")); - vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight")); + model.mm_model_proj = get_tensor(TN_MM_PROJECTOR); + model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight")); + model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight")); } break; default: GGML_ASSERT(false && "unknown projector type"); @@ -2554,18 +2580,18 @@ struct clip_model_loader { } } - void alloc_compute_meta() { - const auto & hparams = ctx_clip.vision_model.hparams; + void alloc_compute_meta(clip_ctx & ctx_clip) { + const auto & hparams = ctx_clip.model.hparams; ctx_clip.buf_compute_meta.resize(ctx_clip.max_nodes * ggml_tensor_overhead() + ggml_graph_overhead()); // create a fake batch clip_image_f32_batch batch; clip_image_f32_ptr img(clip_image_f32_init()); - if (hparams.has_vision) { + if (ctx_clip.model.modality == CLIP_MODALITY_VISION) { img->nx = hparams.warmup_image_size; img->ny = hparams.warmup_image_size; } else { - img->nx = 1024; // TODO @ngxson : use a better default + img->nx = hparams.warmup_audio_size; img->ny = hparams.n_mel_bins; } img->buf.resize(img->nx * img->ny * 3); @@ -2646,23 +2672,40 @@ struct clip_model_loader { } }; -struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_params) { +std::pair clip_init(const char * fname, struct clip_context_params ctx_params) { g_logger_state.verbosity_thold = ctx_params.verbosity; - clip_ctx * ctx_clip = nullptr; + clip_ctx * ctx_vision = nullptr; + clip_ctx * ctx_audio = nullptr; try { - ctx_clip = new clip_ctx(ctx_params); - clip_model_loader loader(fname, *ctx_clip); - loader.load_hparams(); - loader.load_tensors(); - loader.alloc_compute_meta(); + clip_model_loader loader(fname); + + if (loader.has_vision) { + ctx_vision = new clip_ctx(ctx_params); + loader.load_hparams(ctx_vision->model, CLIP_MODALITY_VISION); + loader.load_tensors(*ctx_vision); + loader.alloc_compute_meta(*ctx_vision); + } + + if (loader.has_audio) { + ctx_audio = new clip_ctx(ctx_params); + loader.load_hparams(ctx_audio->model, CLIP_MODALITY_AUDIO); + loader.load_tensors(*ctx_audio); + loader.alloc_compute_meta(*ctx_audio); + } + } catch (const std::exception & e) { LOG_ERR("%s: failed to load model '%s': %s\n", __func__, fname, e.what()); - delete ctx_clip; - return nullptr; + if (ctx_vision) { + delete ctx_vision; + } + if (ctx_audio) { + delete ctx_audio; + } + return {nullptr, nullptr}; } - return ctx_clip; + return {ctx_vision, ctx_audio}; } struct clip_image_size * clip_image_size_init() { @@ -3023,12 +3066,12 @@ struct llava_uhd { const float ratio = (float)original_width * original_height / (slice_size * slice_size); const int multiple = fmin(ceil(ratio), max_slice_nums); const bool has_slices = (multiple > 1); - const bool has_pinpoints = !ctx->vision_model.hparams.image_grid_pinpoints.empty(); + const bool has_pinpoints = !ctx->model.hparams.image_grid_pinpoints.empty(); if (has_pinpoints) { // has pinpoints, use them to calculate the grid size (e.g. llava-1.6) auto refine_size = llava_uhd::select_best_resolution( - ctx->vision_model.hparams.image_grid_pinpoints, + ctx->model.hparams.image_grid_pinpoints, original_size); res.overview_size = clip_image_size{slice_size, slice_size}; res.refined_size = refine_size; @@ -3250,7 +3293,7 @@ struct llava_uhd { bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, struct clip_image_f32_batch * res_imgs) { clip_image_size original_size{img->nx, img->ny}; bool pad_to_square = true; - auto & params = ctx->vision_model.hparams; + auto & params = ctx->model.hparams; // The model config actually contains all we need to decide on how to preprocess, here we automatically switch to the new llava-1.6 preprocessing if (params.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD) { pad_to_square = false; @@ -3263,7 +3306,7 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, str for (size_t i = 0; i < imgs.size(); ++i) { // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp"); clip_image_f32_ptr res(clip_image_f32_init()); - normalize_image_u8_to_f32(*imgs[i], *res, ctx->image_mean, ctx->image_std); + normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std); res_imgs->entries.push_back(std::move(res)); } @@ -3271,7 +3314,7 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, str res_imgs->grid_y = inst.grid_size.height; return true; - } else if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) { + } else if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL) { clip_image_u8 resized; auto patch_size = params.patch_size * 2; auto new_size = image_manipulation::calc_size_preserved_ratio(original_size, patch_size, params.image_size); @@ -3279,42 +3322,42 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, str clip_image_f32_ptr img_f32(clip_image_f32_init()); // clip_image_f32_ptr res(clip_image_f32_init()); - normalize_image_u8_to_f32(resized, *img_f32, ctx->image_mean, ctx->image_std); + normalize_image_u8_to_f32(resized, *img_f32, params.image_mean, params.image_std); // res_imgs->data[0] = *res; res_imgs->entries.push_back(std::move(img_f32)); return true; } - else if (ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE - || ctx->proj_type == PROJECTOR_TYPE_GEMMA3 - || ctx->proj_type == PROJECTOR_TYPE_IDEFICS3 - || ctx->proj_type == PROJECTOR_TYPE_INTERNVL // TODO @ngxson : support dynamic resolution + else if (ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE + || ctx->proj_type() == PROJECTOR_TYPE_GEMMA3 + || ctx->proj_type() == PROJECTOR_TYPE_IDEFICS3 + || ctx->proj_type() == PROJECTOR_TYPE_INTERNVL // TODO @ngxson : support dynamic resolution ) { clip_image_u8 resized_image; int sz = params.image_size; image_manipulation::resize_and_pad_image(*img, resized_image, {sz, sz}); clip_image_f32_ptr img_f32(clip_image_f32_init()); //clip_image_save_to_bmp(resized_image, "resized.bmp"); - normalize_image_u8_to_f32(resized_image, *img_f32, ctx->image_mean, ctx->image_std); + normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std); res_imgs->entries.push_back(std::move(img_f32)); return true; - } else if (ctx->proj_type == PROJECTOR_TYPE_PIXTRAL) { + } else if (ctx->proj_type() == PROJECTOR_TYPE_PIXTRAL) { clip_image_u8 resized_image; auto new_size = image_manipulation::calc_size_preserved_ratio(original_size, params.patch_size, params.image_size); image_manipulation::bilinear_resize(*img, resized_image, new_size.width, new_size.height); clip_image_f32_ptr img_f32(clip_image_f32_init()); - normalize_image_u8_to_f32(resized_image, *img_f32, ctx->image_mean, ctx->image_std); + normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std); res_imgs->entries.push_back(std::move(img_f32)); return true; - } else if (ctx->proj_type == PROJECTOR_TYPE_LLAMA4) { + } else if (ctx->proj_type() == PROJECTOR_TYPE_LLAMA4) { GGML_ASSERT(!params.image_grid_pinpoints.empty()); auto const inst = llava_uhd::get_slice_instructions(ctx, original_size); std::vector imgs = llava_uhd::slice_image(img, inst); for (size_t i = 0; i < imgs.size(); ++i) { clip_image_f32_ptr res(clip_image_f32_init()); - normalize_image_u8_to_f32(*imgs[i], *res, ctx->image_mean, ctx->image_std); + normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std); res_imgs->entries.push_back(std::move(res)); } @@ -3344,7 +3387,7 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, str image_manipulation::resize_and_pad_image(*img, *temp, clip_image_size{params.image_size, params.image_size}, pad_color); clip_image_f32_ptr res(clip_image_f32_init()); - normalize_image_u8_to_f32(*temp, *res, ctx->image_mean, ctx->image_std); + normalize_image_u8_to_f32(*temp, *res, params.image_mean, params.image_std); res_imgs->entries.push_back(std::move(res)); return true; @@ -3356,7 +3399,7 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, str for (size_t i = 0; i < imgs.size(); ++i) { // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp"); clip_image_f32_ptr res(clip_image_f32_init()); - normalize_image_u8_to_f32(*imgs[i], *res, ctx->image_mean, ctx->image_std); + normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std); res_imgs->entries.push_back(std::move(res)); } @@ -3368,7 +3411,7 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, str } ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) { - return ctx->vision_model.image_newline; + return ctx->model.image_newline; } void clip_free(clip_ctx * ctx) { @@ -3380,8 +3423,8 @@ void clip_free(clip_ctx * ctx) { // deprecated size_t clip_embd_nbytes(const struct clip_ctx * ctx) { - const int32_t nx = ctx->vision_model.hparams.image_size; - const int32_t ny = ctx->vision_model.hparams.image_size; + const int32_t nx = ctx->model.hparams.image_size; + const int32_t ny = ctx->model.hparams.image_size; return clip_embd_nbytes_by_img(ctx, nx, ny); } @@ -3393,99 +3436,99 @@ size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_w, int img_h } int32_t clip_get_image_size(const struct clip_ctx * ctx) { - return ctx->vision_model.hparams.image_size; + return ctx->model.hparams.image_size; } int32_t clip_get_patch_size(const struct clip_ctx * ctx) { - return ctx->vision_model.hparams.patch_size; + return ctx->model.hparams.patch_size; } int32_t clip_get_hidden_size(const struct clip_ctx * ctx) { - return ctx->vision_model.hparams.n_embd; + return ctx->model.hparams.n_embd; } const char * clip_patch_merge_type(const struct clip_ctx * ctx) { - return ctx->vision_model.hparams.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD ? "spatial_unpad" : "flat"; + return ctx->model.hparams.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD ? "spatial_unpad" : "flat"; } const int32_t * clip_image_grid(const struct clip_ctx * ctx) { - if (ctx->vision_model.hparams.image_grid_pinpoints.size()) { - return &ctx->vision_model.hparams.image_grid_pinpoints.front(); + if (ctx->model.hparams.image_grid_pinpoints.size()) { + return &ctx->model.hparams.image_grid_pinpoints.front(); } return nullptr; } size_t get_clip_image_grid_size(const struct clip_ctx * ctx) { - return ctx->vision_model.hparams.image_grid_pinpoints.size(); + return ctx->model.hparams.image_grid_pinpoints.size(); } int clip_n_output_tokens_x(const struct clip_ctx * ctx, struct clip_image_f32 * img) { - const auto & params = ctx->vision_model.hparams; + const auto & params = ctx->model.hparams; const int n_total = clip_n_output_tokens(ctx, img); - if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) { + if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL) { return img->nx / (params.patch_size * 2) + (int)(img->nx % params.patch_size > 0); } return n_total; } int clip_n_output_tokens_y(const struct clip_ctx * ctx, struct clip_image_f32 * img) { - const auto & params = ctx->vision_model.hparams; - if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) { + const auto & params = ctx->model.hparams; + if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL) { return img->ny / (params.patch_size * 2) + (int)(img->ny % params.patch_size > 0); } return 1; } int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * img) { - const auto & params = ctx->vision_model.hparams; + const auto & params = ctx->model.hparams; int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size); - int scale_factor = ctx->vision_model.hparams.proj_scale_factor; + int scale_factor = ctx->model.hparams.proj_scale_factor; - if (ctx->proj_type == PROJECTOR_TYPE_LDP - || ctx->proj_type == PROJECTOR_TYPE_LDPV2 - || ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) { + if (ctx->proj_type() == PROJECTOR_TYPE_LDP + || ctx->proj_type() == PROJECTOR_TYPE_LDPV2 + || ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE) { n_patches /= 4; - if (ctx->vision_model.mm_glm_tok_boi) { + if (ctx->model.mm_glm_tok_boi) { n_patches += 2; // for BOI and EOI token embeddings } - } else if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV) { - if (ctx->minicpmv_version == 2) { + } else if (ctx->proj_type() == PROJECTOR_TYPE_MINICPMV) { + if (params.minicpmv_version == 2) { n_patches = 96; } - else if (ctx->minicpmv_version == 3) { + else if (params.minicpmv_version == 3) { n_patches = 64; } - else if (ctx->minicpmv_version == 4) { + else if (params.minicpmv_version == 4) { n_patches = 64; } else { GGML_ABORT("Unknown minicpmv version"); } - } else if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) { + } else if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL) { int patch_size = params.patch_size * 2; int x_patch = img->nx / patch_size + (int)(img->nx % patch_size > 0); int y_patch = img->ny / patch_size + (int)(img->ny % patch_size > 0); n_patches = x_patch * y_patch; - } else if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) { + } else if (ctx->proj_type() == PROJECTOR_TYPE_GEMMA3) { int n_per_side = params.image_size / params.patch_size; int n_per_side_2d_pool = n_per_side / params.proj_scale_factor; n_patches = n_per_side_2d_pool * n_per_side_2d_pool; - } else if (ctx->proj_type == PROJECTOR_TYPE_IDEFICS3 || ctx->proj_type == PROJECTOR_TYPE_INTERNVL) { + } else if (ctx->proj_type() == PROJECTOR_TYPE_IDEFICS3 || ctx->proj_type() == PROJECTOR_TYPE_INTERNVL) { // both W and H are divided by proj_scale_factor n_patches /= (params.proj_scale_factor * params.proj_scale_factor); - } else if (ctx->proj_type == PROJECTOR_TYPE_PIXTRAL) { + } else if (ctx->proj_type() == PROJECTOR_TYPE_PIXTRAL) { int n_merge = params.spatial_merge_size; int n_patches_x = img->nx / params.patch_size / (n_merge > 0 ? n_merge : 1); int n_patches_y = img->ny / params.patch_size / (n_merge > 0 ? n_merge : 1); n_patches = n_patches_y*n_patches_x + n_patches_y - 1; // + one [IMG_BREAK] per row, except the last row - } else if (ctx->proj_type == PROJECTOR_TYPE_LLAMA4) { + } else if (ctx->proj_type() == PROJECTOR_TYPE_LLAMA4) { n_patches /= (scale_factor * scale_factor); - } else if (ctx->proj_type == PROJECTOR_TYPE_ULTRAVOX) { - const int proj_stack_factor = ctx->vision_model.hparams.proj_stack_factor; + } else if (ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX) { + const int proj_stack_factor = ctx->model.hparams.proj_stack_factor; const int n_len = CLIP_ALIGN(img->nx, proj_stack_factor); n_patches = n_len / proj_stack_factor / 2; - } else if (ctx->proj_type == PROJECTOR_TYPE_QWEN2A) { + } else if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2A) { // divide by 2 because of whisper // another divide by 2 because of nn.AvgPool1d(2, stride=2) n_patches = img->nx / 4; @@ -3606,7 +3649,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima ggml_backend_sched_alloc_graph(ctx->sched.get(), gf); // set inputs - const auto & model = ctx->vision_model; + const auto & model = ctx->model; const auto & hparams = model.hparams; const int image_size_width = imgs.entries[0]->nx; @@ -3696,7 +3739,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima } // set input per projector - switch (ctx->proj_type) { + switch (ctx->proj_type()) { case PROJECTOR_TYPE_MINICPMV: { // inspired from siglip: @@ -3961,80 +4004,81 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima } int clip_n_mmproj_embd(const struct clip_ctx * ctx) { - switch (ctx->proj_type) { + const auto & hparams = ctx->model.hparams; + switch (ctx->proj_type()) { case PROJECTOR_TYPE_LDP: - return ctx->vision_model.mm_model_block_1_block_2_1_b->ne[0]; + return ctx->model.mm_model_block_1_block_2_1_b->ne[0]; case PROJECTOR_TYPE_LDPV2: - return ctx->vision_model.mm_model_peg_0_b->ne[0]; + return ctx->model.mm_model_peg_0_b->ne[0]; case PROJECTOR_TYPE_MLP: case PROJECTOR_TYPE_PIXTRAL: - return ctx->vision_model.mm_2_w->ne[1]; + return ctx->model.mm_2_w->ne[1]; case PROJECTOR_TYPE_MLP_NORM: - return ctx->vision_model.mm_3_b->ne[0]; + return ctx->model.mm_3_b->ne[0]; case PROJECTOR_TYPE_MINICPMV: - if (ctx->minicpmv_version == 2) { + if (hparams.minicpmv_version == 2) { return 4096; - } else if (ctx->minicpmv_version == 3) { + } else if (hparams.minicpmv_version == 3) { return 3584; - } else if (ctx->minicpmv_version == 4) { + } else if (hparams.minicpmv_version == 4) { return 3584; } GGML_ABORT("Unknown minicpmv version"); case PROJECTOR_TYPE_GLM_EDGE: - return ctx->vision_model.mm_model_mlp_3_w->ne[1]; + return ctx->model.mm_model_mlp_3_w->ne[1]; case PROJECTOR_TYPE_QWEN2VL: case PROJECTOR_TYPE_QWEN25VL: - return ctx->vision_model.mm_1_b->ne[0]; + return ctx->model.mm_1_b->ne[0]; case PROJECTOR_TYPE_GEMMA3: - return ctx->vision_model.mm_input_proj_w->ne[0]; + return ctx->model.mm_input_proj_w->ne[0]; case PROJECTOR_TYPE_IDEFICS3: - return ctx->vision_model.projection->ne[1]; + return ctx->model.projection->ne[1]; case PROJECTOR_TYPE_ULTRAVOX: - return ctx->vision_model.mm_2_w->ne[1]; + return ctx->model.mm_2_w->ne[1]; case PROJECTOR_TYPE_INTERNVL: - return ctx->vision_model.mm_3_w->ne[1]; + return ctx->model.mm_3_w->ne[1]; case PROJECTOR_TYPE_LLAMA4: - return ctx->vision_model.mm_model_proj->ne[1]; + return ctx->model.mm_model_proj->ne[1]; case PROJECTOR_TYPE_QWEN2A: - return ctx->vision_model.mm_fc_w->ne[1]; + return ctx->model.mm_fc_w->ne[1]; default: GGML_ABORT("Unknown projector type"); } } int clip_is_minicpmv(const struct clip_ctx * ctx) { - if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV) { - return ctx->minicpmv_version; + if (ctx->proj_type() == PROJECTOR_TYPE_MINICPMV) { + return ctx->model.hparams.minicpmv_version; } return 0; } bool clip_is_glm(const struct clip_ctx * ctx) { - return ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE; + return ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE; } bool clip_is_qwen2vl(const struct clip_ctx * ctx) { - return ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL; + return ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL; } bool clip_is_llava(const struct clip_ctx * ctx) { - return ctx->has_llava_projector; + return ctx->model.hparams.has_llava_projector; } bool clip_is_gemma3(const struct clip_ctx * ctx) { - return ctx->proj_type == PROJECTOR_TYPE_GEMMA3; + return ctx->proj_type() == PROJECTOR_TYPE_GEMMA3; } bool clip_has_vision_encoder(const struct clip_ctx * ctx) { - return ctx->vision_model.hparams.has_vision; + return ctx->model.modality == CLIP_MODALITY_VISION; } bool clip_has_audio_encoder(const struct clip_ctx * ctx) { - return ctx->vision_model.hparams.has_audio; + return ctx->model.modality == CLIP_MODALITY_AUDIO; } bool clip_has_whisper_encoder(const struct clip_ctx * ctx) { - return ctx->proj_type == PROJECTOR_TYPE_ULTRAVOX || ctx->proj_type == PROJECTOR_TYPE_QWEN2A; + return ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX || ctx->proj_type() == PROJECTOR_TYPE_QWEN2A; } bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec) { @@ -4055,7 +4099,7 @@ bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, // projector_type clip_get_projector_type(const struct clip_ctx * ctx) { - return ctx->proj_type; + return ctx->proj_type(); } void clip_image_f32_batch_add_mel(struct clip_image_f32_batch * batch, int n_mel, int n_frames, float * mel) { diff --git a/tools/mtmd/clip.h b/tools/mtmd/clip.h index 5abfcd1a3c418..c02cb2cbb983c 100644 --- a/tools/mtmd/clip.h +++ b/tools/mtmd/clip.h @@ -3,6 +3,7 @@ #include "ggml.h" #include #include +#include // !!! Internal header, to be used by mtmd only !!! @@ -17,12 +18,19 @@ struct clip_image_f32; struct clip_image_u8_batch; struct clip_image_f32_batch; +enum clip_modality { + CLIP_MODALITY_VISION, + CLIP_MODALITY_AUDIO, +}; + struct clip_context_params { bool use_gpu; + enum clip_modality modality; enum ggml_log_level verbosity; }; -struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_params); +// returns pair of contexts +std::pair clip_init(const char * fname, struct clip_context_params ctx_params); void clip_free(struct clip_ctx * ctx); diff --git a/tools/mtmd/mtmd.cpp b/tools/mtmd/mtmd.cpp index c3be91265f331..c78c98d40e122 100644 --- a/tools/mtmd/mtmd.cpp +++ b/tools/mtmd/mtmd.cpp @@ -95,15 +95,14 @@ mtmd_context_params mtmd_context_params_default() { } struct mtmd_context { - struct clip_ctx * ctx_clip; + struct clip_ctx * ctx_v; // vision + struct clip_ctx * ctx_a; // audio const struct llama_model * text_model; std::vector image_embd_v; // image embedding vector bool print_timings; int n_threads; std::string media_marker; - bool has_vision; - bool has_audio; // for llava-uhd style models, we need special tokens in-between slices // minicpmv calls them "slices", llama 4 calls them "tiles" @@ -141,11 +140,14 @@ struct mtmd_context { clip_context_params ctx_clip_params; ctx_clip_params.use_gpu = ctx_params.use_gpu; ctx_clip_params.verbosity = ctx_params.verbosity; - ctx_clip = clip_init(mmproj_fname, ctx_clip_params); - if (!ctx_clip) { + auto res = clip_init(mmproj_fname, ctx_clip_params); + ctx_v = res.first; + ctx_a = res.second; + if (!ctx_v && !ctx_a) { throw std::runtime_error(string_format("Failed to load CLIP model from %s\n", mmproj_fname)); } + clip_ctx * ctx_clip = get_clip_ctx(); if (llama_model_n_embd(text_model) != clip_n_mmproj_embd(ctx_clip)) { throw std::runtime_error(string_format( "mismatch between text model (n_embd = %d) and mmproj (n_embd = %d)\n" @@ -153,9 +155,7 @@ struct mtmd_context { llama_model_n_embd(text_model), clip_n_mmproj_embd(ctx_clip))); } - has_vision = clip_has_vision_encoder(ctx_clip); - has_audio = clip_has_audio_encoder(ctx_clip); - use_mrope = clip_is_qwen2vl(ctx_clip); + use_mrope = clip_is_qwen2vl(ctx_clip); projector_type proj = clip_get_projector_type(ctx_clip); int minicpmv_version = clip_is_minicpmv(ctx_clip); @@ -203,7 +203,7 @@ struct mtmd_context { ov_img_first = false; // overview image is last } - if (clip_has_whisper_encoder(ctx_clip)) { + if (ctx_a && clip_has_whisper_encoder(ctx_a)) { // TODO @ngxson : check if model n_mel is 128 or 80 w_filters = whisper_precalc_filters::get_128_bins(); } @@ -213,14 +213,40 @@ struct mtmd_context { LOG_WRN("%s: llama 4 vision is known to have degraded quality:\n" " https://github.com/ggml-org/llama.cpp/pull/13282\n", __func__); } - if (has_audio) { + if (ctx_a) { LOG_WRN("%s: audio input is in experimental stage and may have reduced quality:\n" " https://github.com/ggml-org/llama.cpp/discussions/13759\n", __func__); } } + // get the main clip ctx + clip_ctx * get_clip_ctx() const { + return ctx_v ? ctx_v : ctx_a; + } + + // get clip ctx based on chunk type + clip_ctx * get_clip_ctx(const mtmd_input_chunk * chunk) const { + if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) { + return ctx_v; + } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_AUDIO) { + return ctx_a; + } + GGML_ABORT("unknown chunk type"); + } + + // both audio and vision contexts have the same projector type + projector_type proj_type() const { + return clip_get_projector_type(get_clip_ctx()); + } + + // both audio and vision contexts have the n_embd output dimension + int n_embd_projected() const { + return clip_n_mmproj_embd(get_clip_ctx()); + } + ~mtmd_context() { - clip_free(ctx_clip); + clip_free(ctx_a); + clip_free(ctx_v); } private: @@ -296,14 +322,14 @@ int32_t mtmd_tokenize(mtmd_context * ctx, std::string prompt_modified(text->text); std::string marker_modified(ctx->media_marker); - projector_type proj_type = clip_get_projector_type(ctx->ctx_clip); + projector_type proj_type = ctx->proj_type(); // for compatibility, we convert image marker to media marker string_replace_all(prompt_modified, MTMD_DEFAULT_IMAGE_MARKER, ctx->media_marker); // a bit hacky here, but works for now // for some models, we need to add prefix and suffix to the image embeddings - if (clip_is_gemma3(ctx->ctx_clip)) { + if (proj_type == PROJECTOR_TYPE_GEMMA3) { // gemma 3 // ... (image embeddings) ... marker_modified = "" + ctx->media_marker + ""; @@ -362,12 +388,12 @@ int32_t mtmd_tokenize(mtmd_context * ctx, }; // utility for splitting batch of multiple images into chunks of batch having single images - auto split_batch_to_chunk = [&ctx](clip_image_f32_batch && batch_f32, const std::string & id) { + auto split_batch_to_chunk = [](clip_ctx * ctx_clip, clip_image_f32_batch && batch_f32, const std::string & id) { std::vector chunks; for (auto & entry : batch_f32.entries) { mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens); - image_tokens->nx = clip_n_output_tokens(ctx->ctx_clip, entry.get()); + image_tokens->nx = clip_n_output_tokens(ctx_clip, entry.get()); image_tokens->ny = 1; image_tokens->batch_f32.entries.push_back(std::move(entry)); image_tokens->id = id; @@ -413,7 +439,7 @@ int32_t mtmd_tokenize(mtmd_context * ctx, return 1; } - if (!ctx->has_vision) { + if (!ctx->ctx_v) { LOG_ERR("%s: error: model does not support vision input\n", __func__); return 2; } @@ -427,7 +453,7 @@ int32_t mtmd_tokenize(mtmd_context * ctx, // preprocess image clip_image_f32_batch batch_f32; - bool ok = clip_image_preprocess(ctx->ctx_clip, img_u8.get(), &batch_f32); + bool ok = clip_image_preprocess(ctx->ctx_v, img_u8.get(), &batch_f32); if (!ok) { LOG_ERR("Unable to preprocess image\n"); return 2; @@ -440,7 +466,7 @@ int32_t mtmd_tokenize(mtmd_context * ctx, || ctx->slice_tmpl == MTMD_SLICE_TMPL_LLAMA4 ) { // split batch into chunks of single images - auto chunks = split_batch_to_chunk(std::move(batch_f32), bitmaps[i_bm]->id); + auto chunks = split_batch_to_chunk(ctx->ctx_v, std::move(batch_f32), bitmaps[i_bm]->id); GGML_ASSERT(chunks.size() > 0); auto ov_chunk = std::move(chunks.front()); @@ -501,14 +527,14 @@ int32_t mtmd_tokenize(mtmd_context * ctx, } else { size_t n_tokens = 0; for (const auto & entry : batch_f32.entries) { - n_tokens += clip_n_output_tokens(ctx->ctx_clip, entry.get()); + n_tokens += clip_n_output_tokens(ctx->ctx_v, entry.get()); } mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens); if (ctx->use_mrope) { // for Qwen2VL, we need this information for M-RoPE decoding positions - image_tokens->nx = clip_n_output_tokens_x(ctx->ctx_clip, batch_f32.entries[0].get()); - image_tokens->ny = clip_n_output_tokens_y(ctx->ctx_clip, batch_f32.entries[0].get()); + image_tokens->nx = clip_n_output_tokens_x(ctx->ctx_v, batch_f32.entries[0].get()); + image_tokens->ny = clip_n_output_tokens_y(ctx->ctx_v, batch_f32.entries[0].get()); image_tokens->use_mrope_pos = true; } else { // other models, we only need the total number of tokens @@ -542,7 +568,7 @@ int32_t mtmd_tokenize(mtmd_context * ctx, return 1; } - if (!ctx->has_audio) { + if (!ctx->ctx_a) { LOG_ERR("%s: error: model does not support audio input\n", __func__); return 2; } @@ -570,7 +596,7 @@ int32_t mtmd_tokenize(mtmd_context * ctx, mel_f32->nx = mel_spec.n_len; mel_f32->ny = mel_spec.n_mel; mel_f32->buf = std::move(mel_spec.data); - size_t n_tokens = clip_n_output_tokens(ctx->ctx_clip, mel_f32.get()); + size_t n_tokens = clip_n_output_tokens(ctx->ctx_a, mel_f32.get()); clip_image_f32_batch batch_f32; batch_f32.is_audio = true; @@ -605,41 +631,54 @@ int32_t mtmd_encode_chunk(mtmd_context * ctx, const mtmd_input_chunk * chunk) { LOG_WRN("mtmd_encode_chunk has no effect for text chunks\n"); return 0; } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) { + if (!ctx->ctx_v) { + LOG_ERR("%s: model does not support vision input\n", __func__); + return 1; + } return mtmd_encode(ctx, chunk->tokens_image.get()); } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_AUDIO) { - int n_mmproj_embd = clip_n_mmproj_embd(ctx->ctx_clip); + if (!ctx->ctx_a) { + LOG_ERR("%s: model does not support audio input\n", __func__); + return 1; + } + int n_mmproj_embd = ctx->n_embd_projected(); ctx->image_embd_v.resize(chunk->tokens_audio->n_tokens * n_mmproj_embd); bool ok = clip_image_batch_encode( - ctx->ctx_clip, + ctx->ctx_a, ctx->n_threads, &chunk->tokens_audio->batch_f32, ctx->image_embd_v.data()); return ok ? 0 : 1; } - LOG_ERR("mtmd_encode_chunk: unknown chunk type %d\n", (int)chunk->type); + LOG_ERR("%s: unknown chunk type %d\n", __func__, (int)chunk->type); return 1; } int32_t mtmd_encode(mtmd_context * ctx, const mtmd_image_tokens * image_tokens) { - int n_mmproj_embd = clip_n_mmproj_embd(ctx->ctx_clip); + clip_ctx * ctx_clip = ctx->ctx_v; + if (!ctx_clip) { + LOG_ERR("%s: this API does not support non-vision input, please use mtmd_encode_chunk instead\n", __func__); + return 1; + } + int n_mmproj_embd = clip_n_mmproj_embd(ctx_clip); ctx->image_embd_v.resize(image_tokens->n_tokens() * n_mmproj_embd); bool ok = false; - if (clip_is_llava(ctx->ctx_clip) || clip_is_minicpmv(ctx->ctx_clip) || clip_is_glm(ctx->ctx_clip)) { + if (clip_is_llava(ctx_clip) || clip_is_minicpmv(ctx_clip) || clip_is_glm(ctx_clip)) { // TODO @ngxson : llava does not support batched encoding ; this should be fixed inside clip_image_batch_encode() const auto & entries = image_tokens->batch_f32.entries; for (size_t i = 0; i < entries.size(); i++) { - int n_tokens_per_image = clip_n_output_tokens(ctx->ctx_clip, entries[i].get()); + int n_tokens_per_image = clip_n_output_tokens(ctx_clip, entries[i].get()); ok = clip_image_encode( - ctx->ctx_clip, + ctx_clip, ctx->n_threads, entries[i].get(), ctx->image_embd_v.data() + i*n_mmproj_embd*n_tokens_per_image); } } else { ok = clip_image_batch_encode( - ctx->ctx_clip, + ctx_clip, ctx->n_threads, &image_tokens->batch_f32, ctx->image_embd_v.data()); @@ -653,8 +692,7 @@ float * mtmd_get_output_embd(mtmd_context * ctx) { } bool mtmd_decode_use_non_causal(mtmd_context * ctx) { - projector_type proj_type = clip_get_projector_type(ctx->ctx_clip); - if (proj_type == PROJECTOR_TYPE_GEMMA3) { + if (ctx->proj_type() == PROJECTOR_TYPE_GEMMA3) { return true; } return false; @@ -665,11 +703,11 @@ bool mtmd_decode_use_mrope(mtmd_context * ctx) { } bool mtmd_support_vision(mtmd_context * ctx) { - return ctx->has_vision; + return ctx->ctx_v != nullptr; } bool mtmd_support_audio(mtmd_context * ctx) { - return ctx->has_audio; + return ctx->ctx_a != nullptr; } // these 2 helpers below use internal clip_image_u8_ptr, From 2782a583dfd3131e98be1056d2f42004ef2210cc Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Mon, 26 May 2025 00:54:06 +0200 Subject: [PATCH 02/16] refactor mtmd tokenizer --- tools/mtmd/mtmd-cli.cpp | 4 +- tools/mtmd/mtmd.cpp | 392 +++++++++++++++++++++++----------------- tools/mtmd/test-2.mp3 | Bin 0 -> 140060 bytes tools/mtmd/tests.sh | 76 ++++---- 4 files changed, 270 insertions(+), 202 deletions(-) create mode 100644 tools/mtmd/test-2.mp3 diff --git a/tools/mtmd/mtmd-cli.cpp b/tools/mtmd/mtmd-cli.cpp index 0f8bb0cdc42dc..a70f11ca9d718 100644 --- a/tools/mtmd/mtmd-cli.cpp +++ b/tools/mtmd/mtmd-cli.cpp @@ -284,7 +284,9 @@ int main(int argc, char ** argv) { if (is_single_turn) { g_is_generating = true; if (params.prompt.find(mtmd_default_marker()) == std::string::npos) { - params.prompt += mtmd_default_marker(); + for (size_t i = 0; i < params.image.size(); i++) { + params.prompt += mtmd_default_marker(); + } } common_chat_msg msg; msg.role = "user"; diff --git a/tools/mtmd/mtmd.cpp b/tools/mtmd/mtmd.cpp index c78c98d40e122..b3d5391c3e5c3 100644 --- a/tools/mtmd/mtmd.cpp +++ b/tools/mtmd/mtmd.cpp @@ -104,6 +104,11 @@ struct mtmd_context { int n_threads; std::string media_marker; + std::string img_beg; + std::string img_end; + std::string aud_beg; + std::string aud_end; + // for llava-uhd style models, we need special tokens in-between slices // minicpmv calls them "slices", llama 4 calls them "tiles" mtmd_slice_tmpl slice_tmpl = MTMD_SLICE_TMPL_NONE; @@ -137,6 +142,10 @@ struct mtmd_context { throw std::runtime_error("custom image_marker is not supported anymore, use media_marker instead"); } + if (media_marker.empty()) { + throw std::runtime_error("media_marker must not be empty"); + } + clip_context_params ctx_clip_params; ctx_clip_params.use_gpu = ctx_params.use_gpu; ctx_clip_params.verbosity = ctx_params.verbosity; @@ -208,6 +217,44 @@ struct mtmd_context { w_filters = whisper_precalc_filters::get_128_bins(); } + // set boi/eoi + projector_type pt = proj_type(); + if (pt == PROJECTOR_TYPE_GEMMA3) { + // ... (image embeddings) ... + img_beg = ""; + img_end = ""; + + } else if (pt == PROJECTOR_TYPE_IDEFICS3) { + // https://github.com/huggingface/transformers/blob/a42ba80fa520c784c8f11a973ca9034e5f859b79/src/transformers/models/idefics3/processing_idefics3.py#L192-L215 + img_beg = ""; + img_end = ""; + + } else if (pt == PROJECTOR_TYPE_PIXTRAL) { + // https://github.com/huggingface/transformers/blob/1cd110c6cb6a6237614130c470e9a902dbc1a4bd/docs/source/en/model_doc/pixtral.md + img_end = "[IMG_END]"; + + } else if (pt == PROJECTOR_TYPE_QWEN2VL || pt == PROJECTOR_TYPE_QWEN25VL) { + // <|vision_start|> ... (image embeddings) ... <|vision_end|> + img_beg = "<|vision_start|>"; + img_end = "<|vision_end|>"; + + } else if (pt == PROJECTOR_TYPE_LLAMA4) { + // (more details in mtmd_context constructor) + img_beg = "<|image_start|>"; + img_end = "<|image_end|>"; + + } else if (pt == PROJECTOR_TYPE_INTERNVL) { + // ... (image embeddings) ... + img_beg = ""; + img_end = ""; + + } else if (pt == PROJECTOR_TYPE_QWEN2A) { + // <|audio_bos|> ... (embeddings) ... <|audio_eos|> + aud_beg = "<|audio_bos|>"; + aud_end = "<|audio_eos|>"; + + } + // warning messages if (proj == PROJECTOR_TYPE_LLAMA4) { LOG_WRN("%s: llama 4 vision is known to have degraded quality:\n" @@ -293,163 +340,107 @@ void mtmd_free(mtmd_context * ctx) { } } -// copied from common_tokenize -static std::vector mtmd_tokenize_text_internal( - const struct llama_vocab * vocab, - const std::string & text, - bool add_special, - bool parse_special) { - // upper limit for the number of tokens - int n_tokens = text.length() + 2 * add_special; - std::vector result(n_tokens); - n_tokens = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); - if (n_tokens < 0) { - result.resize(-n_tokens); - int check = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); - GGML_ASSERT(check == -n_tokens); - } else { - result.resize(n_tokens); - } - return result; -} +struct mtmd_tokenizer { + mtmd_context * ctx; + std::vector bitmaps; -int32_t mtmd_tokenize(mtmd_context * ctx, - mtmd_input_chunks * output, + std::string input_text; + bool add_special; + bool parse_special; + const llama_vocab * vocab; + + mtmd_input_chunks cur; + + mtmd_tokenizer(mtmd_context * ctx, const mtmd_input_text * text, const mtmd_bitmap ** bitmaps, - size_t n_bitmaps) { - auto vocab = llama_model_get_vocab(ctx->text_model); - - std::string prompt_modified(text->text); - std::string marker_modified(ctx->media_marker); - projector_type proj_type = ctx->proj_type(); - - // for compatibility, we convert image marker to media marker - string_replace_all(prompt_modified, MTMD_DEFAULT_IMAGE_MARKER, ctx->media_marker); - - // a bit hacky here, but works for now - // for some models, we need to add prefix and suffix to the image embeddings - if (proj_type == PROJECTOR_TYPE_GEMMA3) { - // gemma 3 - // ... (image embeddings) ... - marker_modified = "" + ctx->media_marker + ""; - string_replace_all(prompt_modified, ctx->media_marker, marker_modified); - - } else if (proj_type == PROJECTOR_TYPE_IDEFICS3) { - // https://github.com/huggingface/transformers/blob/a42ba80fa520c784c8f11a973ca9034e5f859b79/src/transformers/models/idefics3/processing_idefics3.py#L192-L215 - marker_modified = "" + ctx->media_marker + ""; - string_replace_all(prompt_modified, ctx->media_marker, marker_modified); - - } else if (proj_type == PROJECTOR_TYPE_PIXTRAL) { - // https://github.com/huggingface/transformers/blob/1cd110c6cb6a6237614130c470e9a902dbc1a4bd/docs/source/en/model_doc/pixtral.md - marker_modified = ctx->media_marker + "[IMG_END]"; - string_replace_all(prompt_modified, ctx->media_marker, marker_modified); - - } else if (proj_type == PROJECTOR_TYPE_QWEN2VL || proj_type == PROJECTOR_TYPE_QWEN25VL) { - // <|vision_start|> ... (image embeddings) ... <|vision_end|> - marker_modified = "<|vision_start|>" + ctx->media_marker + "<|vision_end|>"; - string_replace_all(prompt_modified, ctx->media_marker, marker_modified); - - } else if (proj_type == PROJECTOR_TYPE_LLAMA4) { - // (more details in mtmd_context constructor) - marker_modified = "<|image_start|>" + ctx->media_marker + "<|image_end|>"; - string_replace_all(prompt_modified, ctx->media_marker, marker_modified); - - } else if (proj_type == PROJECTOR_TYPE_INTERNVL) { - // ... (image embeddings) ... - marker_modified = "" + ctx->media_marker + ""; - string_replace_all(prompt_modified, ctx->media_marker, marker_modified); - - } else if (proj_type == PROJECTOR_TYPE_QWEN2A) { - // <|audio_bos|> ... (embeddings) ... <|audio_eos|> - marker_modified = "<|audio_bos|>" + ctx->media_marker + "<|audio_eos|>"; - string_replace_all(prompt_modified, ctx->media_marker, marker_modified); - - } - - // llava-1.5, llava-1.6, Yi-VL, Yi-34B, granite: don't need to add prefix and suffix - // for glm-edge, BOI and EOI token's embeddings are not present in the text model - - std::vector parts = string_split_str(prompt_modified, ctx->media_marker); - output->entries.clear(); - output->entries.reserve(parts.size()); - - size_t i_bm = 0; - - // utility for adding raw tokens - auto add_text_chunk = [&output](std::vector && tokens) { - mtmd_input_chunk chunk{ - MTMD_INPUT_CHUNK_TYPE_TEXT, - std::move(tokens), - nullptr, // image tokens - nullptr, // audio tokens - }; - output->entries.emplace_back(std::move(chunk)); - }; - - // utility for splitting batch of multiple images into chunks of batch having single images - auto split_batch_to_chunk = [](clip_ctx * ctx_clip, clip_image_f32_batch && batch_f32, const std::string & id) { - std::vector chunks; + size_t n_bitmaps) : ctx(ctx), bitmaps(bitmaps, bitmaps + n_bitmaps) { + add_special = text->add_special; + parse_special = text->parse_special; + input_text = text->text; + vocab = llama_model_get_vocab(ctx->text_model); + + // for compatibility, we convert image marker to media marker + string_replace_all(input_text, MTMD_DEFAULT_IMAGE_MARKER, ctx->media_marker); + } - for (auto & entry : batch_f32.entries) { - mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens); - image_tokens->nx = clip_n_output_tokens(ctx_clip, entry.get()); - image_tokens->ny = 1; - image_tokens->batch_f32.entries.push_back(std::move(entry)); - image_tokens->id = id; + int32_t tokenize(mtmd_input_chunks * output) { + cur.entries.clear(); + std::vector parts = split_text(input_text, ctx->media_marker); + size_t i_bm = 0; // index of the current bitmap + for (auto & part : parts) { + if (part == ctx->media_marker) { + // this is a marker, we should add the next bitmap + if (i_bm >= bitmaps.size()) { + LOG_ERR("%s: error: number of bitmaps (%zu) does not match number of markers (%zu)\n", + __func__, bitmaps.size(), parts.size() - 1); + return 1; + } + const mtmd_bitmap * bitmap = bitmaps[i_bm++]; + int32_t res = add_media(bitmap); + if (res != 0) { + return res; + } + } else { + // this is a text part, we should add it as text + add_text(part, add_special, parse_special); + } + } - mtmd_input_chunk chunk{ - MTMD_INPUT_CHUNK_TYPE_IMAGE, - {}, // text tokens - std::move(image_tokens), - nullptr, // audio tokens - }; - chunks.emplace_back(std::move(chunk)); + if (i_bm != bitmaps.size()) { + LOG_ERR("%s: error: number of bitmaps (%zu) does not match number of markers (%zu)\n", + __func__, bitmaps.size(), parts.size() - 1); + return 1; } - return chunks; - }; + *output = std::move(cur); + + return 0; + } - for (const auto & part : parts) { - // printf("tokenizing part: %s\n", part.c_str()); - bool add_bos = &parts.front() == ∂ - auto tokens = mtmd_tokenize_text_internal(vocab, part, text->add_special && add_bos, text->parse_special); + void add_text(const std::string & txt, bool add_special, bool parse_special) { + auto tokens = mtmd_tokenize_text_internal(vocab, txt, add_special, parse_special); + add_text(tokens); + } + + void add_text(const std::vector & tokens) { if (tokens.empty()) { - continue; + return; } - mtmd_input_chunk chunk{ - MTMD_INPUT_CHUNK_TYPE_TEXT, - std::move(tokens), - nullptr, // image tokens - nullptr, // audio tokens - }; - output->entries.emplace_back(std::move(chunk)); - - // only add image/audio tokens to middle of 2 parts - // therefore, we skip handling image/audio if this is the last part - if (&parts.back() == &part) { - continue; + // if last entry is also a text chunk, add tokens to it instead of creating new chunk + if (!cur.entries.empty() && cur.entries.back().type == MTMD_INPUT_CHUNK_TYPE_TEXT) { + cur.entries.back().tokens_text.insert( + cur.entries.back().tokens_text.end(), + tokens.begin(), + tokens.end()); + } else { + mtmd_input_chunk chunk{ + MTMD_INPUT_CHUNK_TYPE_TEXT, + tokens, + nullptr, // image tokens + nullptr, // audio tokens + }; + cur.entries.emplace_back(std::move(chunk)); } + } - if (!bitmaps[i_bm]->is_audio) { + int32_t add_media(const mtmd_bitmap * bitmap) { + if (!bitmap->is_audio) { // handle image - if (i_bm >= n_bitmaps) { - LOG_ERR("%s: error: not enough images for %d parts\n", __func__, (int)parts.size()); - return 1; - } - if (!ctx->ctx_v) { LOG_ERR("%s: error: model does not support vision input\n", __func__); return 2; } + add_text(ctx->img_beg, false, true); // add image begin token + // convert mtmd_bitmap to clip_image_u8 clip_image_u8_ptr img_u8(clip_image_u8_init()); - img_u8->nx = bitmaps[i_bm]->nx; - img_u8->ny = bitmaps[i_bm]->ny; - img_u8->buf.resize(bitmaps[i_bm]->data.size()); - std::memcpy(img_u8->buf.data(), bitmaps[i_bm]->data.data(), img_u8->nx * img_u8->ny * 3); + img_u8->nx = bitmap->nx; + img_u8->ny = bitmap->ny; + img_u8->buf.resize(bitmap->data.size()); + std::memcpy(img_u8->buf.data(), bitmap->data.data(), img_u8->nx * img_u8->ny * 3); // preprocess image clip_image_f32_batch batch_f32; @@ -466,7 +457,7 @@ int32_t mtmd_tokenize(mtmd_context * ctx, || ctx->slice_tmpl == MTMD_SLICE_TMPL_LLAMA4 ) { // split batch into chunks of single images - auto chunks = split_batch_to_chunk(ctx->ctx_v, std::move(batch_f32), bitmaps[i_bm]->id); + auto chunks = split_batch_to_chunk(std::move(batch_f32), bitmap->id); GGML_ASSERT(chunks.size() > 0); auto ov_chunk = std::move(chunks.front()); @@ -475,11 +466,11 @@ int32_t mtmd_tokenize(mtmd_context * ctx, // add overview image (first) if (ctx->ov_img_first) { if (ctx->tok_ov_img_start != LLAMA_TOKEN_NULL) { - add_text_chunk({ctx->tok_ov_img_start}); + add_text({ctx->tok_ov_img_start}); } - output->entries.emplace_back(std::move(ov_chunk)); + cur.entries.emplace_back(std::move(ov_chunk)); if (ctx->tok_ov_img_end != LLAMA_TOKEN_NULL) { - add_text_chunk({ctx->tok_ov_img_end}); + add_text({ctx->tok_ov_img_end}); } } @@ -488,39 +479,39 @@ int32_t mtmd_tokenize(mtmd_context * ctx, const int n_col = batch_f32.grid_x; const int n_row = batch_f32.grid_y; if (ctx->tok_slices_start != LLAMA_TOKEN_NULL) { - add_text_chunk({ctx->tok_slices_start}); + add_text({ctx->tok_slices_start}); } for (int y = 0; y < n_row; y++) { for (int x = 0; x < n_col; x++) { const bool is_last_in_row = (x == n_col - 1); if (ctx->tok_sli_img_start != LLAMA_TOKEN_NULL) { - add_text_chunk({ctx->tok_sli_img_start}); + add_text({ctx->tok_sli_img_start}); } - output->entries.emplace_back(std::move(chunks[y * n_col + x])); + cur.entries.emplace_back(std::move(chunks[y * n_col + x])); if (ctx->tok_sli_img_end != LLAMA_TOKEN_NULL) { - add_text_chunk({ctx->tok_sli_img_end}); + add_text({ctx->tok_sli_img_end}); } if (!is_last_in_row && ctx->tok_sli_img_mid != LLAMA_TOKEN_NULL) { - add_text_chunk({ctx->tok_sli_img_mid}); + add_text({ctx->tok_sli_img_mid}); } } if ((y != n_row - 1 || ctx->tok_row_end_trail) && ctx->tok_row_end != LLAMA_TOKEN_NULL) { - add_text_chunk({ctx->tok_row_end}); + add_text({ctx->tok_row_end}); } } if (ctx->tok_slices_end != LLAMA_TOKEN_NULL) { - add_text_chunk({ctx->tok_slices_end}); + add_text({ctx->tok_slices_end}); } } // add overview image (last) if (!ctx->ov_img_first) { if (ctx->tok_ov_img_start != LLAMA_TOKEN_NULL) { - add_text_chunk({ctx->tok_ov_img_start}); + add_text({ctx->tok_ov_img_start}); } - output->entries.emplace_back(std::move(ov_chunk)); + cur.entries.emplace_back(std::move(ov_chunk)); if (ctx->tok_ov_img_end != LLAMA_TOKEN_NULL) { - add_text_chunk({ctx->tok_ov_img_end}); + add_text({ctx->tok_ov_img_end}); } } @@ -542,7 +533,7 @@ int32_t mtmd_tokenize(mtmd_context * ctx, image_tokens->ny = 1; } image_tokens->batch_f32 = std::move(batch_f32); - image_tokens->id = bitmaps[i_bm]->id; // optional + image_tokens->id = bitmap->id; // optional LOG_DBG("image_tokens->nx = %d\n", image_tokens->nx); LOG_DBG("image_tokens->ny = %d\n", image_tokens->ny); @@ -554,35 +545,31 @@ int32_t mtmd_tokenize(mtmd_context * ctx, std::move(image_tokens), nullptr, // audio tokens }; - output->entries.emplace_back(std::move(chunk)); + cur.entries.emplace_back(std::move(chunk)); } - i_bm++; // move to next image - continue; + add_text(ctx->img_end, false, true); // add image end token } else { // handle audio - if (i_bm >= n_bitmaps) { - LOG_ERR("%s: error: not enough images for %d parts\n", __func__, (int)parts.size()); - return 1; - } - if (!ctx->ctx_a) { LOG_ERR("%s: error: model does not support audio input\n", __func__); return 2; } - if (bitmaps[i_bm]->data.size() == 0) { + if (bitmap->data.size() == 0) { LOG_ERR("%s: error: empty audio data\n", __func__); return 2; } + add_text(ctx->aud_beg, false, true); // add audio begin token + // preprocess audio GGML_ASSERT(ctx->w_filters.n_mel); // make sure we have filter preloaded std::vector mel_spec_chunks; - const float * samples = (const float *)bitmaps[i_bm]->data.data(); - size_t n_samples = bitmaps[i_bm]->data.size() / sizeof(float); + const float * samples = (const float *)bitmap->data.data(); + size_t n_samples = bitmap->data.size() / sizeof(float); bool ok = whisper_preprocessor::preprocess_audio(samples, n_samples, ctx->w_filters, mel_spec_chunks); if (!ok) { LOG_ERR("Unable to preprocess audio\n"); @@ -605,7 +592,7 @@ int32_t mtmd_tokenize(mtmd_context * ctx, mtmd_audio_tokens_ptr audio_tokens(new mtmd_audio_tokens); audio_tokens->n_tokens = n_tokens; audio_tokens->batch_f32 = std::move(batch_f32); - audio_tokens->id = bitmaps[i_bm]->id; // optional + audio_tokens->id = bitmap->id; // optional LOG_DBG("audio_tokens->n_tokens = %d\n", audio_tokens->n_tokens); @@ -615,15 +602,86 @@ int32_t mtmd_tokenize(mtmd_context * ctx, nullptr, // image tokens std::move(audio_tokens), }; - output->entries.emplace_back(std::move(chunk)); + cur.entries.emplace_back(std::move(chunk)); } - i_bm++; - continue; + add_text(ctx->aud_end, false, true); // add audio end token } + + return 0; } - return 0; + std::vector split_batch_to_chunk(clip_image_f32_batch && batch_f32, const std::string & id) { + std::vector chunks; + + for (auto & entry : batch_f32.entries) { + mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens); + image_tokens->nx = clip_n_output_tokens(ctx->ctx_v, entry.get()); + image_tokens->ny = 1; + image_tokens->batch_f32.entries.push_back(std::move(entry)); + image_tokens->id = id; + + mtmd_input_chunk chunk{ + MTMD_INPUT_CHUNK_TYPE_IMAGE, + {}, // text tokens + std::move(image_tokens), + nullptr, // audio tokens + }; + chunks.emplace_back(std::move(chunk)); + } + + return chunks; + } + + // for example: "a <__media__> b <__media__> c" --> "a", "<__media__>", "b", "<__media__>", "c" + static std::vector split_text(const std::string & input, const std::string & delimiter) { + std::vector result; + if (input.empty()) { + return result; + } + size_t start = 0; + size_t pos = 0; + while ((pos = input.find(delimiter, start)) != std::string::npos) { + if (pos > start) { + result.push_back(input.substr(start, pos - start)); + } + result.push_back(delimiter); + start = pos + delimiter.length(); + } + if (start < input.length()) { + result.push_back(input.substr(start)); + } + return result; + } + + // copied from common_tokenize + static std::vector mtmd_tokenize_text_internal( + const struct llama_vocab * vocab, + const std::string & text, + bool add_special, + bool parse_special) { + // upper limit for the number of tokens + int n_tokens = text.length() + 2 * add_special; + std::vector result(n_tokens); + n_tokens = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); + if (n_tokens < 0) { + result.resize(-n_tokens); + int check = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); + GGML_ASSERT(check == -n_tokens); + } else { + result.resize(n_tokens); + } + return result; + } +}; + +int32_t mtmd_tokenize(mtmd_context * ctx, + mtmd_input_chunks * output, + const mtmd_input_text * text, + const mtmd_bitmap ** bitmaps, + size_t n_bitmaps) { + mtmd_tokenizer tokenizer(ctx, text, bitmaps, n_bitmaps); + return tokenizer.tokenize(output); } int32_t mtmd_encode_chunk(mtmd_context * ctx, const mtmd_input_chunk * chunk) { diff --git a/tools/mtmd/test-2.mp3 b/tools/mtmd/test-2.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..aa9d7ec2c1dde062a3124d68971eb1b2b3dfe563 GIT binary patch literal 140060 zcmZtNWmFr@8z}Ha3qcy(-L-hp;u7487I!HQMM`j|xD_Y3yOrWD#VM2mh2l=3IJxEh z-w*f0-IHu4XHPcsJF~O9&oi?S8D3=Id7;zR(vto6`4Rw-l+C=X1-Ll51vt66xc=Mv zzZLvmz32b=`oAK?+4}vz(!bv!<^YJarvMTP8U_{^mw=d*oQjs7iG_`mn^!dS-TBK~YJ0WlddUOIt@*Pv7A1 z*yOjF`K9I6wO_w?_6|?Z{#;$(KR*BSze-yCt0XV?|Elr7H9Zpizrw$IxRYH3{lA<3 zKjnX(0RSmV^ew>7)zd2*N8Q&z4ZJ`kyhP{)6mT&yUt;!+CNnB$@>rsQsikPCGetT( zJ0l~x(U1|&{Hc0hkVkEQyX&WVHHL`&>RdR4l1i)jvMsps#i4(})a?ToMwEu|^ks#8 zi*Cg!x%Zdr`1F}qZJw%2It{i#vCf?JUY||#f4RDvqB92=8ruf3wVOV)7vOEkF#u8(61F|cWi~5JkCUXS;3yW>Avv23qt z)1W?W>QPH^G15>?=2$X`E??e!c#Y*XPs~hxxy)G)MVGNI<@nZe?{vp%fmik>w+gEX zK*={AR2-QEy%rKmT=97&k1oe!73pcVufcLCVV)7#m}Ii*?BWQlZ8?{Z-sw1E4j(6~ zQ1JC(Y~oWwL`^X>ld>=>u1N(bSi_b^@QZ8QCGu9-&9_(ktOx)U3JCy9R&C()8H560 zP&?UGannmiQ?oaRp!@5C)Unv9-~B>fZPN@rBX_D+WGf!(XCD6I{O$tVtbrRpy_tv1 z7C&24E2u02MfN~QG(MnD!nx(Me`oIaEajV~JJ|9tJ3eCf_SDVHK@z~vXNpfRD`sb? zifzmCn&bncamMdJu=Y@P!mEncY$|k(aHlItnReWyjO-6Xa~de96;c2v7*S&%0TX+aV8*uFK0MIJc+Iljv-0G8k^zjWJ)X4?JNdO( z|H;NLZ>^t}W?)@F;LeznZpx|2cEYtyLq(fhIx50%RpxJPeY%eK0YDvLip0^NF9`c| zNkA2=0~Nyg@H$}(LWp2R>F=e$+VW}fYDyP@JjR6Sv%^>4&7ZWzJJL}5%`++EVHW?N z{&Src@_M^bM^tvxH0k2}&YDK+FDRwd?})mV z+YR5bE0myWjI>#BcK(+)Iu)bupC09@*}kqb9U+=6GzxY#r<>GafOHC}xX%GtM7BQyredvFaX*=Okr!`Ih=0^eT- zEK|>h5LF=S5p}Du4!Gd8__;>Vy&P*1{D6UUU{GDf#XBrKeH3pG5sFe+7ERVY#79O@}2YHJZc6y4?Vfcz`~mkeZkSb8BxSv@!_c(F={PxEsFh ziM=yny>6xUsJJ5(Pwl6hv?Rca2C98JZD@w3lq966M~!9Rfy3hd13}Rrnw?5BR1)R! zPq;3lN{Z{HEanHSe8;AW6(e4t?Y^6vg*$waqWHd%FB9_@PaaQkA@kN?9S)CWJ;@KR zYnXy36oeda6b#=K&6Z*mo_vAv2C+F+NSQXFC4|gZ)${P*YL^L*jI_%@klfYB&iFUx zD=!E3`OJxz`$9@$xTm!5dFS9yg{y2MX)c$oJA^*5kyyELq{RKybQVge8)gk`fr@5G zmURH0T|0JXe;l6l#h0vZ>+}~SptK(sB*i3>2T3AN7ZAkc&1VoE6hz!*Q$?(zAxl(! z6`GnP1?J}bu^4ao26k1{3}4vj$y8W7Mb-+Wo!K`SB(-y1>AcH17^IT$E=2v=t&1Bk zXMs@xEzy@i;WDetp(jKe~C6b>Z#_;tg$l-P^$1{b&#CdvxaW(vbtZ|OxJkYrwB zn=yYp#30hdFBS=8oq>C9V|T-cxWD5a3--{QvL=ks(e{)*gQ%e(i7waOj4D^Lit4k= zs9Ll}X>PZu^7|*7iRgX}Bm1(#Va{8Em6dJG@2^AI&nRza4m@P*d7^msITNZ;#U`{l zye~FoX^;%2td`EDKsL2$KXT!}eW;aV*HU#g;T0(*bgp`JP#WD+4t|pen8$Wgd_s8z1HM!^({ z@>qK8GA3ykp&HBNO9<|_R|lnoV1{{uIIQXJhjLt@quSJPfbC@rkuQYxfNRKUA7v01i0x z>cSB|FlU&I-RD+u_Z3}pBWQBQIPNIEs#fvwhJ9(V;1I8a6(pJXu)wZff;!bnV)9oF z{?y9b**00z%sHu2u09gPUMK}cU=YGfh3fOjTI@j%(S-02W;aT&C^u*PazfXC7pTi8 z=6m)X=9tg=PZl*xi>pyd-O&;|e}Tg%#!uq)B2=G23jg}hPlqZ|)pH3Z4o{+#Csn!8 zJJ`45@>KNGVg0x8iYlN7x#c*DYtBp)<&8?$Dl$3oT}B%S`^?a% zE@v~&v8A%j-7%;B)gW4|_M928mwqSz7W70HN$vSIJEK5E8_gf6ga0MO>8;D>WB=(g z*3I8naAvE_E(BuCgPZ@|z)?oxXxvoy%tv=K2#WrjBvEz~F5H0lj*ZiC(93R&{R3t| zYRz3a_pVsMZzGtN$&Z_+R$9y+vhX*g_Atk(@&{SePm<~|ypbfvdeuLN5yWzv zTN_%1f^FNYFbY&e-C`wd3uK7VZnTBoy^tZMc?Ow7LG*DJrJ)`(>Pak~;h1g4XU+mn zl2lx`HUe9LyP)HGf{yFF#=v4x<5(@)9wyWkyI14NSwuN-MLT_SX-Y&eA2ZhhLy{0j zj84Pyu~m}5G^%2KW+xBpVNxKPos;lab-Jn(TOHUNLep5cNGyP1M9F6v~Yv8^^;v8DQF?~XX!cWJ%Tu&w^Q zh2Q`F>tSHbDM?g@-&%QX>nHxW!&55a>*4ihkP{RX87JNJ2cR`CWo_j;LoB)K-n#kw zDbT>2@>?GK(JWR~<_DS#6j2URl0-iWpp4Mq^{o%)$~LgFXvHo&GP^ppPxz7`12CA1 zsaz-lAq4l0XBLyDX+#wIR`|GX0Sfdo6jO3=Ks6Dj{UuD&(8Iu zJo^aWS^0!IgVGr2BndHF(ofx*SC6*61#=Egi5N)GH0k~AjffHRi7W;nJ{C>J61=vl zf)G_tpN;_r2VTaAq&~-wIm$YAjM=VA)`{$iP%x|d7~?IdddgS6p97OUaH zgaGyAl0wmhr3MQpWUov}4~Kah-TE0s4&ZoPakpJV5qxvmt|{w)NfK#Y*osG3o8Yjf z>cqtS)NR44)OR1LsZIU+l)5#+&E3s{*pHk}@BL;wmWEzSy>pR}!a4X>oAbCuwR;MG zP~O6!CwE<+zQbbog*;*pQ6552#?us!uh z$VaQma?`@Xi{DurBEt*myGI&tiz&{s}13VWBQ1j5?q70W|MjVImEbO$1 zX>~Q&j(iN>`92n;Myl*ed#%F+G2c?Y+1(vn%>sgHaoW~ncFM;qPj%Z$;15)UMB^l{ z#vcFu?8M>MIa6_T;JlTP4DCkDPR*PXzm;^R zi60&Rk`gElInMO?NMIR2a#I2!IUG@wB9nw1C4U!2<4&DQii;#LX2R4?n4Pvt=DvEJ zfX7@=OV2}^p}__}yH{6F9_`21H{OkulH5ovo?7t5c#ju3nxQ148m?}cdNMC-ygx&l zr@8U(s7%0*tT&4w{SO4ippG-C6179nhB?#!AvkeBEhsfG{4TBFeI&df`%F?^i|FaKcgS;Tx(SBxqj7W)!P= zmCj$o-O(dk;qaZy%1+(weYnr1ta-{OI!`Giq7T!ZO+POy2b&gFWSvr0Wfi2x_)QZk zV@{|$OfEKI!QV}vL6`u@iq~7dPWD`txcc{HszABBp=ZYaY=u?m%($$^Yi0d#9yM!| zKg1SUGJf#;SvT-665LipeZF-#-1;h*B#f7(V7k%k>9VpTR08@EOW~RkwOXB2kUISU z5es2nKanK0@3m5479YZHv7ejA3IwS>-jdwxg{#&!eD9YsKOBC?L!Aw%ADH0t1m05@ zFkyjkFsryO5Tgz%1dd!f&;hhCfvG2!5WWeFBxqeNk{q~rh~pe3m)T_+w2OXU<{|q98g82F^(40h-LcO77~Ar?W_l@;}rYqoBqb{Zsuw$w7cV z*zXlWyKjyUimTveiT7gI01-0gRC@^ZFXW8uc$9pbTiccA5r@MUwc42nET3TT8Stng z>q;|b&%`r`4girbImp(@nn}@DGhaqyk||groIW5k@H49e$b4&!yzN%nea^#cYAuZi zvT1`UA>WImVm}*+a6i7jsO?Iy#Gxe&nlbj2edP3UC-)14Kl(YZ@Gp>5!wr-p!}+Qv zG$Lujp!@LKqF9Rx0(|sg__04_y}FmwwgvqVgPalu`7R#?Zo~=+QKM(-;o2HKq)Lg6 zK&oABfBql^G53V3jx%93=#Q} zbWW8kN|KYnuUHf*o2wN`eXft+k9NHeun}K1``AMgjSOI!1%GY+Hr%x1_D;)L+Q|*9 zV36&a@BwEInJGF7l%nq$w{}q^^Fxz4xU{GwNYf=cnfPBG>?ERtzuMu=*gE{$=58DV z8H>5u;i)ja^VAeT&;yV}8~sCOI32+|xK4#m$mjOR5<7%c`mxqgT8JNBrPo*)Itr}* zdQ4Lr7(!3Od`9-Wt=HODqOsa!I}~TG2;Mi{9aO;9Waj-&1zSGj%^cAEl04~ z1zZ0nbs#g(J2h=r*H3J!zQ;a(c?eC9x0Zwd6;{ShqmHGt1Zz03Rm-qeRI61)SHQ17 zHX8PD+uBVX{ZC}Koi`j*1?`RoaFCIJ>rAa|$V>3e($ zLWhb#Zy%!fX~0LpjqIkp;?&NL2jAxAqYRmkAIpb7rhhW7X}uILCVzkG#kq6Y{S4BD zg3Qp3Q-h)0yOgYKd_73_AFi&mlrMh=DDwVXc*=aZoDcEr!>k@dygTq)j41sJi14o* zJ6Q)wKqa^^p-tTs@yfF__VX8+pVbz!`aQ`$AJqD8j@&L5iUr_)x3q~sWLU1h9xn4J zD)!!z))@L)UV36DYf$j=U?|KVOZ8_e@2JU2`X?Xn!RE-nC9hCL#YWpS56+J>@Rq!y zC^MV9{vOKBh5D6e{r*5T9EQi;^*)0t@a;fnalUwvqfEt&4JmXyo_%o|3zk}0`V3M9 z@XW-_o^O2=NUPyV>ImgklQ((O$jH-BL^rT3h9+Mk&q2CbszUz;*5=5_2YrKO{SnQA zz*CQ}w{Zp~<~&zLObvdtdf)pv%GGLdpRymz-{X11zpcI(zw`d+r+2#qKW<(8K0{qx zU`?agw-HVgL6a}plM!DtgeVb7NmVOXaVpeUY5R?Fxpx8!%xs26eGL$?C!rj96I&Eu zO45&w5l(b?zQ$nXD^{tSw7Q};{*{@iuXCsXRn+8*L8C?AOFC1wb^A5C#isiV;{O-m z8G4u2L2wijY94y)``W9BAw1AJ%$`g%2d)9Hj;9IuDD+prFz-Qks^4u(`SRywk~!2^ zPO)z%e>qH6o$`CSy*rEG>s?>kqAm4C0NjLqPSmKEWXN=Vu$w{oJ@$n`BHeRI>4 z9+wU0)L%?UBPZ%@x>uqTShqEU5~jiS$iqkq5>T##G-@7YnluNbs_E0efqV1k+%GZ1 z9TZ@LN*j@)2+)FhTcbK@oR=Z*$>QtHUP^`*UccU3FL%B^O|g%JfkG6R4W2<#07%sJ z{qM|(UUf`opMgw83RI{kMjP#LZoQdAWiAz3-S`mNJDneox^VciLbit3`fN?5QksQ; z58ITgvo@Fz5x?z8jNnBGQe;hk>Y?t7ycWlSiSN9+WcU}OI437viw@)GQc8Np@_j+t zvFNQtxa~!=5+8oIAIezS7PVCxb&MyVT@wvnsSe5r=YKZBu$T0uV8Lr@sZ*^h7Sw2p zq^y>-<;a?Q_qm-WP5&K34O$$!*tTelloU)pb2;5E@Qrd%umWUX^sM}~@H0pS01dR- z3?b_vyia%Z`Gb`qrSK+fB2l>F+Jy2U6Q6YmCNac=GtX%rpAU>NuVhhnGw11aT_z*C}A;BJQYp9%?; z%uRzJo7A8YKI~?OM(H8ntUMM)hg$Kxk54vlu1Cv14v)VmbS46&wu-)2Bo_v`U9=rt4@)7AK7 zo1r)IWewkDL7Esn0jY9HaUzqm1Su$3N65^Sf5vtZFHF%VN=ezKCC0KHO-h;`JU`b@ zW1Z}dHfp;7?%gIqrFN7v-s(e>CjL-x&VztV?j_DHq=@>7<#*yybRMclTA~(3(+{Q0 z&=6^=%`rea2)$(~`Zos%H#r>h!c-2?r48dVnz<0YQMiO)4#pHL7_-eYu-p0!vu|nW zeq~`7rb!bTLNhp)hZ}F7KJQs{B-)Eu$J5>T#C}LT1xp}Rlv!~a*mwr910du;L*=?b z1hFz_zCY+E<)OUaNBO6s)QosUtlTx*H4yc)*Zg2{ckqw*K90ZTJGyQx^&Zf|JRM%q zC>*^@?{0xfj<|_D^53w2jQe++JX;34w7|@*0C+IX`u(h5k{Fn#P=8r+^AYBka6cv{ z_Zszm8K76>Z(bssUNEmeH4%er8H$s$M7IeP>U(Bpl8i(@ksFim+Uh0p-#8@!vz5eG8f$O8m3x$n;)c>tuIXTu*1<h>rXzj6n3Wjij!ZihdHit^kUR z;2&`t*Py7~ARAP{Gd-L<@!{J~Y>!HWw!Wt{IuF=zq7eVUXAl<@H_js9--Q)NIi;pM zmkYS~=69(7Dd3P&9Dd7!&40z`7cd$_(SOScIj*D_4!jb{N_G78RP3L z+vcB_uiw_l0Kf|msTE@!x??9y*M^*_TTcXY`7yMrwg6mcki{n^4P zv*7Wv7#tI%!9sYUIfxqWb#vJ$HZ0^y zY-A?vWm>KRcdeXfG@FLN4u zpa61W@xZ5V|BAPWj91tE!kV>ic0h|&WZB)gQYW6YKku^Q;w}kBfC71AC6k=|A_`rr z65e9V2O^e&N~46uH~Cx4qrZsEJ;t91JSr+semG<#B zm{wauNsO2k+dVpTc;rhY?7AJ9PLIs#Cmv)dUw6wfYBl6;4*GYzW}b%#+Gt{AaOXy4 zlxP!cw!YUuPI6vrrMHUrXPiu8VGQYU}sQ+e16z65Jw z&&Weh?1ZUZzw~V1$iUrm>DK#?RFuIs!>g=J>ZUWY6I5FC;Ov%GGKOd@+WAIBNr_jC zt-NMtnl!`|(;Qw7){gRbCaZb3VKWq3q#9z_j}aZuAW0}lyWOmc{yh#P?fJCbqB&}F zDSx4XEB6)^6hXLDT7Y@XddZ{OJVE5=Kp&2YRbFX^h~+I)C{wSI@L9Q74H6g1Ntxaf zZNktWd2IIj2O7!dZ_p3LC=uG8!OqNK*S*Ol84?8Y_@83VJC9hUaObD)r)%%-V-ui% z_w&ue%XjF=0EU~vB3H(niEHFOZ9B}rvuA$#^jb_Y*(HTtlt12()4zlUT+ZTq5c&Ai zz97B4t4t+Vp`CF`FoUhJI-U<3^ka*v9ha2y6F-Ag0Q4&c*HV~jyIg{k%XylvzC``~ zyOQ6m6+Oufi^sDnN#(`(o*B%(hZ!@YeQZ{h#kdX*J9L*YIwH`<*^94JsvxxE57>L8;YYf#&>#caT0^hs8 zo5g&L7rFVvsGaKEpzv>xXjzg)q>FmLO#!+8CSDi-AY+V>(hT);>BOqPxphDtKXnXi z^o?m{HAiwP%^U^olBd3=Ip+lg3{k`R8|lO~ev57-f|V3nCr%M<^&{y>Q_0ciAY$9J zKt)Yxb(o06M%89JYAB&-1^MlAT*gh{b%vuVH%XEHQD&9xOs6Sxa|WfH510SAE9b8=zG#^4gPZH3batru|ko+Ep|4dqHAxH+oosMU}ak~1Cv^^(T-q% zraGj~x%yk34@zgs3{G0!vna@W1PeY!<_ob2^%Pu?0@N?`Z*gql=wf@vE$Qht2YxT<~pA_PvP+s zTLr%!mYj}jIrZp+OCuW`&W3e&%M1z}GA^G%Y5@A`CntFrWJWcms_ZZE#bR&WesPb@ zbV@R0aHKRyG!!o(l^yA&AYvv*2O*y)-%z5#q+m3WM}F1-h+ys!EIc@={^d&*xr8=V zv0+Xf_fv{Bilgj!nuS|M3E5z-$$CST=n9~wDpvO)mzE^?t$Od z#lLO0=c6DFeE$CNH7;_`zj=Yn!1dH7RoXBZU3z+>skjLGnCau*j3b3{SmkQHasS$&Fcpz+0F@$Yxd zineB-EXcblUsHdTbemIplk?c{Eu{{>VZg)z>``Y16aX+zF#F_&QfPjo zGcuGZ-bwbHoo$?PiAfG2OHRkTvZ>E0i5U39@xAa(W9dJb%S6U~;*n#=5=+F=AeE=y z0ECsYR30%Nkbk5acMl$v`h?*E^qMT~vXax+M2S*v8lxPcXL{b`k*Qm3Mdm^&nMW+0 zEh-XyVO)q(8{9-C4CCtB+KRfXiG8N#g*1=*1@f)cjf>oU9SRGYP_eu~BLI+KEJ_K; zI(fmJkSBsS1hL>m#~Vp1&j?4TdY=FH8UXQGnC&3D1-*&2`~C-OgbCZttoT<>}RW2R>$VogJ6LV`(aAFwK0J??Kw*J6=(W^u;$YU93nMNfn~_WyLYGCR~s6va+|K5O6{m~LnH)>WJ0nJo^~rMX59RBx&ynG zSB02cOR4EZeaFjl?q<~Ol)azqo*PISsu(Jwrc6J9U`BDw9~#XhqfRgP$zL34TjtESJQf zbJFp`ZP_ZUE;E_^C(pZLx_9_kiAYxFX6g@UFHx|$uU{HU?Z`yNe#7~|NI>=!wb}QL zUbf~S-v)lNPy@xWV@z(K?>oj6vG3XhWyj?siW4XyZKQCA4k|&-qZAZ|1;hfs|F&-) znbgk6YT|ASFksKq!A}MmvUbjfcaGfr3(U?kE|D<&^KRl%p_JMEfv^%h=UQ$ zM}8rSv3O$2ne9)hE>>PM{t{sLmTFtEluI9UHF!?0O9~*I5Zez66KIf9E%;qziaUCN zP~ZHKd8POVzJJ!eoA>nYv~w=t8Keb8{~h99MPJRGPII8&f}Mb~qCTpvC+k7Y%c4ZY zv_YMm3U*pNXNXX+lvREq(ae&|n#?(3pE8-u$f}#gpq_-vg19%oFnHl?8#QZJBc-R` z{ZO;6?V*d-1pmzgo&alLYsJ$Kli)1=;RuSwIaM}Ph9W3Sw*e+$j~JQ_#S#6{pG3R9 zHX7<8zc4cL);#pQ^87+{EjVqhp=(z8MkgveR=4}QOCx5T(u(kJMfXG18(E0mCPNR6 z4P>^Sp9fN+BC-7hN@D>1h5?EDHEA=YjR|SL<=qw;v9VU1m_Uw5LbNlpq<0>Bb#Kjv7F+_?T9}y&VY&M9N zTx|b(zvsOEltr0)Z>HLo6gxZ+4U>lv@wptJb~k>v4O`XD(7LYpHoZ3`Q%Gbl*i-RvkswO1Uors*&B&cowl_r2$Jg*v5YQfcke z9q$hI)YNhHt_Ph68<$*zs<%RLCr`=LRQ23_tA|@m`QYYuZ0MXTfS`=`MF2OdPuqTJ z|50nrXMbgCvE6KTFNQ0_AnnkVhtZSvw=z5r4*$cSumpz}zsb8}CDvyc*Oi~>Q)qol zGkl(Mvg>f9M(GuO42x?eQ$VaSj-Mrs;Lc)^i!Bf=Q(q|1j)HA?>f(96Y+4ksf(N%b zDakU^(P}5kd(`x*a}g;0ch+_W&~H<9OJUNz3LN$iJ<%V|gahGk*H}*$BSVXni{wWV zzI-9?Z|P`|ppB8qucMVA+L0f0!5SG#W{FqTQ|JK`BLK7i<}iLR6XY?j(37P7fj@*W zf9a>a^M*IJd&K6>bsC zL?&fm?Xv7bN(3sn+2B!y9t<4eG(m(-vtu{7j7!irIx$tzq%-B^ule%&N{h{XvKZ}L z5w@hveGGaL$5BJ37#qkl3cSqc-xb00-F_Cq3rnNMSi&H?`B1{lqz#Ugm1cn&RsTP^ z6AQKCEmN8W^oRxh@9zx-Wx0Rw^~(>A0wW`Xwo8m-9Y5fYDZd%ttM_+phqT3XbHI;( z{fkoNzeZ6HD2NfjJgA>R(NGYSUu^@{gOEqgb~zBAFwdfn9>c6iM4*}36qL4}7B}>g zAbkOQ2Pr|FimKQg^!PXMp7H(o-WKavMA*CQM+#dXqxMlUlX3|b7vW-EB+wnJqGGhg z@;OeFT$>iPK5eALQaNel*fFw3lAP<}%EVATO~0g&GVxA%CX;?2P}ZL}*jXNBS~C8- zcf`fYLb7}KliKU)1`JMIfQWeE`(8lUohjjqc)dt9q-6oeCMxuTx>N;0F+}j5|M~QR z?`P(ps*9QjquvZH|4qh3wsg9VwDY6_?n9? zW{5!}$95vfa^E%6>;Fh9LJ4-AQy&J*tsx3peRVaG|s} z*{!~C{mZ9iOfl^ zE#Q@}AC_FhlnYRLhxk;fqz3ipkUqn&id8qW@Uep+4rpq*!{4uSc$^exSQNfD()$Z> zx=7`Bm6f}@g)lnMB7%{<;2y|Btd}Qq4ut`@_NAi%?-yHdOZcPZMLHDXRKAt>k#ckB zEzu`GDk33^n4zD`KZ8mD5C?;avS?}MYmBPBQkZaJY{fh5rYJ#yR+$PU7V)s?EIoET zRDV1hwvQ`W{le)%A_yp2qTL*)#J3LDUmcbsE6z+)O)knEwKu_Qk(+OG8H@v$r51<7 zfocF%IuZ}8;iNY@-=3tj(bX?$#XfHxCzWvl(R~cuAOl#a(2b22QzB0P4t+}(kf6uO zV-hs`jqOZaga^jwAyg>S8>E7Hli$v6z~s)_m6Pm&w^oq}98i2HBy^OQ=E!6IK#`YU zmZ5M=HaBN6@9G)U1b`wJ?BwgpXCz=X%;!-%)5nrxka7X#wu5qM3ZF$0Jag<$eJTEt zrMH@TXg~%^hJ>$9-QNhZU}XnFVQpbe)O6BVDYoIll=Bz@#_XtN{nwEZ%(TjkNEoW% z?PO|eBSY1WfNM8SUVj%8)atpGhOP8o8iA zBJgla)`UAfK7+c0(0>=bJEFHsR8Kni8N%S>Oqln`-5Yy%xwPhQ%&UPOOJh<1`G$di3*e-wRFG2WwVXIp5Ir`DU98Ine&G7I&3LiJ?E(d{wyjhsQ+a+u zy0hYt7=DrA4?`N~r7ZuZL*Y<9Tvjz%NRMoapS$!13E&**e7 zZ%;!qYQo_)rkJ;%U`u$y`C|g%FutSQJR?>+cDyi5|D06-`T8Y(jYrJHyw2;AHWr_%fMEU|UGoYa9CkTIg z{-Lnl6VPF49T)1TgTh6~?bGol9> z9M>N;)c^tw{`{8}XU*gz$LvG4aMEyhBA8jGa&-T<{c+z}?=jzGOC6U5v!lL7U3_^f zJ^Yp@(F(~=(wqhP^SDV2=Gj$}ZmMW< zCLY29Rg%x3dH}?}<+p>Wg(H_%^QAZ1AgAMmO~`@u2n{+A*?b~&C> z$MSr4H6kMShllXM4cQqw?O|kDUldqqw`FT{kP6E^kfjBZ{8L_;qQeV(M_26|n9KKuGbkMzgf4QQg zDe?^Jf`Z7aHFhx75X7<=y^^?qEz&RXw~V3;c2Px1^g6ahD7jMKRnRqf!{ril_%U+i zm=~hu7u}T%6~K(i%6U>!p&?|Zv({^$Xj~oM+z$I+kG@M*UKJIn9gG~r^!t9%Em|uS z5F-45(yqguonDdg2};vj;fK`^eWGM&N<@nVp%dc&;!c6EjyYRra%AT5t{fF5-$}(A z>s8F5APA!)NYKDi*;bFrHu#PR7O#>*MA*F6;pb|P%)z?%tTGbLGl&_tHlDUp{oCI~ z|La2lp5$x&e_1SCYaZv({`3SZ!LxUW&{oPZhp z`y?i5wfCuYptXe!OA&-z@%ZLcH3}Niiw7vlcUm^p05u@VB`GC}7o^NuMNZf81_k0C zNAfE~B{5$HnOA=8qY`xrJu zjOyw=B*t50$AW@*xcRokF9L?%!)tg3+xGmdsA(h$m|-&rh0~(a7K2%GP8Kg1-xdEi zBKZkG--xi~*fet)jaT!@iRKiBki>+zi`IBA2PiE~qA;DhUWEwLGN+j~;Q>jc3 zaa1z^sS9Q#_%m=itDaD%AW9;mo)atovgoka0VS!BC7gObA}T6$_!m9BGfV4;#6x7Z zB}3Z#oM1+l{V$tzi=`-Zw3HkpfDFB1b};h_fBFr2mUMP}><9P-Z=#x)l)c|?k#+OT z(Od%uBiewUa?ZOks*d%41AY)|Y4a9Wf4`Rj;U}#yC?|#x)aAC)u7!V1#>Xqr?2veF zq0dmvSWlVb?XV!#6o=xJsFK{s)$&rMJcW-1JBj&fHdi(WXoDA(GlsN1v1=8+m8mAD z3yD8?5y{b9HXNK_rw!uw-r1$hK3pA$Ff_~UntkyyjIyK(8c0?&!*o%Mi?d?Mtb3FX zoeU*zmSsjyF4|d10{8AI{b~AJL&Cd%G5f)&zR-p*WUiL6i5d++5R1P{l{p@9=!V!D zafa|>a`6|^^@0~M!sin_G5b|$R9Q}9Qdza#B@2p>XblcU=B01@Cy+|?PPLdw6e=@FR9{VnS%!}U zRC)W>tt)T#!ReR~b;cJEz;`3M5>TdRPA4HF{Vd@^f274^@G@tE6FI56Y?)N9C9puz z22iMujfqZkwwzHcW>Hr#+`n#&Ou@~eJ&sJEKW>`85%|gUgYBH4u*(0Oaauyb`V;nF zVTTb=Qnv@MY>b;_vjU>ylEb1Hd9xDe70rRB}wFJ+gQ~<;r z^KW=aMN&5Pr0EZOF6JO(*V{$cH2AJZZS#PL=PUR+9KK}wTZl8Z0*mP1gU3D*8OP-% zw5*ooDKS!nH!fj?=2E`2JaWZ9%W@BFgX$phC_-%)6uIwgHW6Zw;+e1IQmWb{*e zCb7g-=L)Qj50{ZAw};q8iY(uY-N@vIEtoN(PzhvyTPyA8VL=yOVH81ld&9;cmX`ar z`~!vXfL2xxKoG?H3Z_`+8thUuRr~zvXz0{Nmb%W4}H1eRfH!ocy zC`}P0u!cH+!zS9?>=A6uf4B3(yZSRs$gQ}#VujS;a6Il;B-Y{9TY&ZZe~Dv8;g$-tm?9fSqP+aBOuMHX>NZ0}8XFpOvDe@&vHE@JYc{#wR8w9zjNI1{v*^Kv z!WH&D&}QkZ)+wMnYnp(l*zRb|rd z5eO)}{oL=MC_D?nM2jZ7>lI@Mm1Ku#)kwIa^DIrp$5S^&ZCJ_in>FRnpmr#3xj3Xf z4T@{V>~u|ehA3wc8P}J($X2d1%{e)*@^)voJ(3zRyRQo;4*B;GT_md@8nrfVgf_L? z$I*s|igqMZ7m$ksFjCcHshCNJ%$9}kH?6YDddPgwP(2&AB-u$d;MGc*&Q}LJPW4!z zPt#PekOEk`wqogjW~1E3fz8XT6V2S>6u!EuW=fXjReBm5R!9=+ zJK8e2rXpz$7Y?KTpNb;n(mA}ASBNr2sFbq*RN%bjOKgc`5uirq>U#!3p&-j-r-xoB zf+)!G%RuZjQbnv#;;%Uif^@z)X@W#-VdEhqVi}LV*|7B&!CF6=*|1LV#WyYL^WxQxnd#S8m#hN< zKNIO!ZH2kjCAwq1IQVmPap_^@U5&h)t0|;K(;8|z8~Ao0-L^Sk)y~R zrp6yit=repu%oIMW?RAG6;6U>+V0<6t zKC3-a^M}HeJY@AqC<)oh^ro-K>x+?mwIA@qWh{`ksa)V4%70m(OaMgv!=|dPN<-ok z>vn0Bps6)}E&KZX`d-YPx0RX#kE|eXln%f(05B9rz1CrhqcWZ&(4@As|34(1by(AH z7skgJ4N{}S(T#K?qq`fWN4Jz9$ms4)>FzEiq(f;*1*N2vmfSmj@4vft?b>JO+4G$5 zIp@BgtBDNNb#jB^%VHCS)X|-^H@^A2B}S}5cin^y&xQ%cNHl&R=4d7k^=;f>e|TrH zoFtJ0gnhSP5g7P8oJSA1m^+VaJ{g$!e8UVfv|e~J*Y zreL(?Tal*8D_>dToq=Tx;A=m5#ocp!*)#@&^3^UHo7H;E2yEYR_2gduPPT0MoQgjs zkxVka`)dIjfqCQ2L}+x{IS8jCtd~kDAIF@3b{^WR2|BaS{=Cb)9o-yTwn(OeBP7Jj zD;8+8u+;oMW?tUJp3Sk_p^koMC0&;}g?nLM$2A1)x(a=1^+#1UIB5l~3WT3UNj0zj z0{L}knC4S{V-dpB1bm_*BW!JbUb(~*z##%qE*k=`{mLUSUbjLHyLY|8%~t9YP!k+m zUDCL=Qj<=*>W@%?JBGBTcewN$X?3Ns&m%ZS$<}Ik(RGw~1Z$};(5n~?l_Qf^Qw7$P z^48cFrH``TRk%IY)GN3-l}>g@g`}o&CM8GAOuJOd=ieo#m#_{!eqT84)I2&f{)5ar z$4(}91|UbpD-51t47q-@qO#w~q|RRMUx~!$?k*ZQMn!wOIKzOsdKkCh`j}dO57Xgi zrd9fbIlQR})U1k=*aGBV-yqzswcNk((n-!SSwDz={uq8>em{N$-8~-aL;LRmuNOc} z7-`oP0jG50;p*c_Lan9}w_CfrR!JgtIsDrGp=yrZbZgNi9-x{s)2$mT`?C{HK>vaa z3r)U^9I#BZ0dg*0Wjrdl7C#D`YhBS%4Q!c7K(iFJ_uB|NUneI>*uDYdXog`bE1f{O>l?k-UU64{Xvpxwt>Dr%^sHW4mo$$g1^wd)l_5h~fgcbT-v+~;ZQBdIu-~P8jNh0TuXgxe2;l8~gKzQ_Avgg0 z?Kj{1Q#hd_CHMV6^lFpNPa5aA15Se_turZ*z1SJ2S45g>MP5UuY->U3o6J7!H-^I_IGiCk zb~33eU0B^Z*LHz)bg1<>KSE?xtZas{iV8u>h?G@0m`phZ*1uzO5*)I`aZA;D(JrW!E~eGb)&D^hFsaV z6%CT|pCz;J2^Bp>C=a=9 zCn6hOcF8KqwI4g;MgDlo6t)aiSk|BzAK=hL`lkc1e8+%09e@$w0u}*OQH}9xDSoLa z=p9hm58p-u%E};Ww*^a_Qno44iT+`&3WzK`{pE?Ne45zxSwW^q|Pe2vOv28yG zwMM&H>FAm^q>ObEhPhDuiibgCjIN~FN!^i~P%!J+)r-t2rNTyhd7>8xMofy=vmM#J z%L}mymLLJ7y~+kE&;!IYG$Swz02JYG{xq<_ps~oY*NTcsnf)vqYPHJ`*d*lx^r!Mq zt??fEm>zW334f*h~$4LCu@obF7O5TNVF{4`Vz9)|dJuqYrk$>on3F9ti1*Zv8p1&&pyF54Cg z0NS%0T=PPKHrFVrk|M2lxpSDJM88HbQLrXxyTe6kEM$UQY~+x69;VYmc6C&wmohmh z!nRXk7Lx=2423(2PX7PhxXw0o(gw(iX|(%y|g6n&r|zhRo*4kX4_8PaLmp2 zma?MK;G$&*i|Xc)Sd=h1_^`C~#>tl+L^{Io%h*>osDa8U@z^5s6HoyFJ1NyvA{CAz znaM4{lQX0LHYQSO2Eg+ITS2r(yO_ZoJ_)^WFHRLwUp=;+oc2R3O}jZ(|G4||eqnJ{ z+sn2?q_EnL6G&9WkluFxMeRE8?vdJehDm$HYW{1eGJ?yy$F#jivtnM+83^bnzSR;o zk|P*G|JI$jDUfCSOo}5jy9q;dAbVCd6-5(jheMHF8-kmSOJ8UYMuC-&uq@|Mf^dD| zgzk@#pXg-iy|Q@Np9u-llwt~P@uHt5X-~en@6~6Xv4tjifcl?+s!^~i*CMyW+?p7o zjsyB*u!n(of3+;3z|1O;lL@KSPoA#+oiWjutl{VQi2E$MFoNGwkUjh$#A7~&%-gex zRhRaa-x+V8Njyv=fQ3_b3n?NrU^X)!PV9@S*h!I8=ICM3 zgyvv~qg5a%-J9j8wDm(n$Mo+{#|zb*8;kh@JSOWK&Q}44^iM#&$hzE$^hGKF=*WC< zEeJ)e5H(3Y%r7p=?=Tw3-q?gU0+c5X?%@`79fC_`%R`W;sg|bhcXLxp$`QW1Vrp+_ z%Pv=a2{VK4gw&mf4%JrXM{NRcRs6Uz*2DzkR$wr%4(@sOb0S z2T|yNP)`x1Ds<}MYY^h!PGc8i&%?Gv{sq)JXT;d7p8V*)x?Cxo=y#w)=M@}FE0qJ$ zXaR7C5p2_O6&sV_ijjo>9=z(DZ?_VTrJza1N?~x|u%UJ=dzTt;b(JX5u-SKtCGavZ z>hYmQWi-5Q-TXR^zNs3Y$O&!u$I5*$T9~k8T!>!#%;58(g5lMXt!>lE@mmImzpeNX zfo%V9veowjAHU{VCt>@qoZGR3qa4DEQjiUVZn2kT-P%~IeWusuYmP>5C&3N>;DaW2 zd(!|y4L$%<=XOy9rTQSzPXb2r{N^P7PDZ?J>hy}hOaA|q({XTYSdclg`!JxMz}AcA$gq)+AV{2%9JA{*?aZ%Y9B za)-r5Ilj0?9+DdgOHQ@-AJK2{URV6lIRn=DReBmI}DuG&rI{eT}lDqC&56iI%ivs6X| ze;t*xc*y?_rZYD}86`MHIhAwhHhqD?kisKIM$!Sm^nd42CIFiW%@=7JV=RkbCvb&j zQl!;xM&zSUUVd$0%kx>~5REze)8uj!A)O5?Bb9cd)gnDM?Jqj<^Fge-o}))1;Ogl=REm^7ymLuA2k^bB_{|~I z58%vrZT*idvBuwVAv*En?z1Yq5HPi%6zu)u?Wf=QKZeVloNd~=>J8N{KGj4f3!WnW zE%>nUqeXmq4MO#SCLT^Jl^=17DOiL2iRi!HR#@}oA@we<696@v> zq(#@38Uz&OzKh$KQax@bWjY<6s6q>~f9%xLT3#TL(b@~TONcQ&fY6bn^|;>0r-Ely zsS;6`0VxAzGos{FGCF!)oEJaEJ-!If`pU_pNBfhDV9GrKCBTV|h26R$lPI*G9ho=w z0|LRv8ymMp;7Bi9O~g%{UX9>fq77n}1mF3Gi%5!_(~HNjg<^&A4VITO8Seky(7K_G zV7}0+{N>)*R+2h4cwK>$INfAtpZ${-$Ne@lN=bq#Vh@pc$$qRpUd$O)E(hw{Hc`@g ztyT78Tba<8y_8r6Rp@=S;3CKZ6s_`R)iAmzS|E(3P_o)A)nh4#Is!odte;)6@N!W( zTtOq16FP2eHJBfCHT#VLQXJR!z&K9pjbi?1EeHZj;(G$}24HJ5AzPC211hP`c^8zd zdJ~hbE#WFcnP}XB-RK~y$O|hU?g5PcH(uEB~_ZMS=i?XA(x3E z*I8s1=5zZFqQz4Wwx+9{4n$1Z=#(!BjJ-kTeTeMj=Zn&`7B&lX%Eef&qmp$T#p`1- z-{d`3$p6)D3{4jkMaLYxv{K{Ci<#nJDbd*L**!viclm zET7*!KU6TD!af9Qi66YGezPf<1YFS`!bxMD;_WrZegZ;f+pOspd!-uMFPV=S&#}Ir zSFUUI7+IRj#x~8Q$6$+`FtS#QE~jRbsCipE5mOQnQ2#)bFM52Q_F9izw1xtCEgI*7 zQ$IX54TM-qf0h|Yb$f-l_D9-IV1lZwT`KC9x~MF|-4kP6D|P4yK_ENSc+GN?^q>8o z!cMLDg*kad6g3H$#{E8p@qt8+xf$0psB?_ejaAG;GYN59s)-ED3-bNk#>FLtuTIaN ziwiX`#S8_VGdgveDJ60duLkW{Zm^j@ySR(~pn3NGY5p%BDSe3duE=up&?ItUFVI5I z99CUUc08lHB1;<**L?RKAtioYS6Wu~(f(Td$DdbI;$%UW073{bV~K@oX@kn85tS$i zJYdFc@7^vQV}+AXS3+RrDYLnxl`k`7fW8;KPGt|-#Do7+WTfil0`L~%Y6r}0hQI{} zBdi!*LyO7u#3G<#HZ8Ff+#w!mV5eE`XWC@+=+!)OTb$GUteM)M+Q?F&vq{hmDS49O zeMq{3iZoiqfzr^5!h+ruES9?TuDsfz$hJ;lRQ)MHSpZ@tQOl?HkR7vg2QLKGA_c6& zyrwML@HGpF-4vbEn%Ip2PMYdQF)x8(ieBO-m* zY~09r+S9n^!`9?(OM!&HPFkC9{Jn|k0OIjr3=@!E7*};H|Eta2pru6L5xJANRnrdZ zHC0n=VTYkb?bFj^avHj5m3n1WFM~HN8T@fX^8I}Q@oYo ztmIsQw!8uWSmNN`Qv((zC`N4HB+cEJa7+ffrwHKyuth3#YZ+ArG_q>E&av{;3<`yq zAY`y;Hah5np1nnBv(bb@Hm8jbu8vsw_jyfl4nrUvFzcm6)^eNxqQ4@c0zq+L6bGWE zMX3Z}-tETBr@Mq0Oc7(+|ImmVRvC19pu^ua^{#;)_NBEw!|E&@JN^ z6GKiaoc)IETH)=YpBc@3*tqxilZ@yiYGR&%NCDW+RW=n_uBVD=`^b)@QS~MV{;e&L znKzjN_C4rgyEfMBBxb?hJxYLvy)3NQ8kYTlv;=6X|AW6t_?F1|$OZkGKnbqW zqNQ5vuqfSkb)lM!oT#)`9_VON~ypZVmHDGF{$pd-?5vL#n0_LE5OsueD?T ze}53uAofgk_091YMx*P;h~)~4O{05)cwX+@T1lT!L&TPVr*S|Ri*hKM}r z70EB-BKHb#?}OHqq7N8l$yMZtCMW~{&0$N4 zg4z(2R`t>!xXYk_2@Q30>)(%%?gqQ}wAkkeCYU^wV{wsX0bo7QqTp4n>YSWOE^IQU;f0_33(&{+VXZVR|vPC?q)5 zgRu`GgXpj2zl}YfJB*6|fnJ#&-S8w-0s!_Zr=&tUNA33s z@{;x~78KGkXBxtvt)Jq%=i28{7>!4sfFuCeN;OiPje5bV>Gs}{>8$?ZoN!(xh|j;M zUHYR|eHE5DMA6J=OlqAqw-(LZ(%Dq2A;SLO-b%z&5I$^u*>EBz8M!6r6r92+K`%-y z`C@=Dc2q@Wa7UgPC_+$eO2UF8U@H^=Pg51=e@U)GRXxvGJA7Ft3T8)PJ*@+Ms@pQu zlnB9&3MVhC6+a)>jt>r^Qze98Ixc1jwG8UL3QMQ5t6=XZakX9Xm?Fm{yrEtUrAyM5 z1ky)Z@PvvIz*+HJ(6r753{h2a9iM=ZqEB#@;x4(`mO9?<#YKFa*1y>bSlE`fQbc0` zqj)Z?{j?HAbq)XmI0{qB0r1rIpRo%WOq(jlag20vxXQ#G&@inyOXjW~)@2*>eKmib zpv&$28;>FIMFi4Sb$>=nVQ&A~h%SQ3fy@c|+ohBS_c=M|{Lk`b6jcfUfaWm<#yxdn zlf7cL5~Fg>W4ykYshV`oA;y`?KzW_sP10=^RsSWbf#DrP`9y-(_rr@n<+t-IwH@iz zT^$_R`f5IGEEdY^VHOhrz%!%&TCBQ2Y>96yC2&V+v6k)1LQFpZ!0nHT-_tt~-eO>!{+K>V;#Kb8(t<{D@-1c9xZ4rVMn9U*!mmQaAD-&?X6fHJ=rDHwUw%AdZ@j+Af z{iF4vf&mG83%}Atg>LXlRHc7&GOiyu3{=!qU4>Gq+s*n z8OLI39&v6fwh*!5{vAHRCSjkQPO7YqqhsGeu$-AQ>q~&YGIXnCG}ke)uzPHZL9|s; z=m7x3I~COir<$T@x{pUr8s37~FmLZf(L@tnao1~D&o5llbmE@Jqc4Ia>z8&O%_S?6 z8<$8ichw&mQ4$K8Jv%+|lBXr-0aPBYW3&O3jSIFhRu(4Q@FXSsy#|-ykP?tvLa8$# zng^+`Hw0|iR5Ai&k`47)`9YLk`4jtA#VtAwIgUc!-hnlJ3ZD)VTFZStUGhC5G)=p2 zu5Xjmk86l@tE&lC3DbVkP}o@fL9PiCWZ6#hQr4jfY$fw00;b=Vd@R!hUaQW#u)_B7 zG7;2YvTJDJPWTw#dHSz2YOzG^fXCR5j{@JE3d>LjIVD z^_`KaFu4%|p8oZ6OaAaDpkSbidK0}Zylk9X2!>p8Z%h9Y8dc>h!A8Q~WKK9vBr?7( zt=-#&*)iz;c{Hqhl>dCbxcx7-mO?h>t5so({-TVc$9gwFXGsYFQg2@idI*x;&59J& zH;(em8l^T)NVODlBz#o=Og|i<`F1f@?mjW6mu2b29?VYFA4ybN|K(PtD?w+WWoZ(zuxtjO{BW@<;hgoEI^Ry~{ zN7Y?@-Q4o9%c@_?sEV4}<)D0kjELq3 z4_8YKh^z)lii=Yo3arQQHkJ1 ze6fN3#a#hQ{^@FQMJgnbR#cJF-_hy$m0tg}cC(%EnJEfq=3E=FlpeNa>`h`^PF9UR)|( zo5}9tatHxAv$J~Cc7!8}JbcRUBsYukPL>R{e zUy)(t?@M}jH$jxwZUHwGJD|eMSuM7U}2zhD`%{75EhY;+NV}_&wdlfJ#x_mP7Nc*qW8TIg^dMjcQ&EOh z!Dq-W?VCm?Z@6g){?JVrktJy$hvJAG7_6f55@#FO=4=B!bVigFsusCX#RDJL?b zcmY6whGoOo3@w=eded84Xct8-=>ODvbF^2?BgFfSNVjX5FeOw5R|M|`?Q3(L?g_F* zw$1fumANvcyQ>loCJ}8+{tL4Pslaob{@D(K0#0_AFv1XxggO^k-cpe63N~l#WtX$eSqIA-dZ*2s)5cJG|+2*w;$yNdE%!vUi%LAuXz%RS~8T z#*QchHJK+MbU4nbo#%E~!?;Jpp8&f?Z1I_2${fy3C!|H#*{}Y-M`TJNuF&OW%IR5G zmFY0qw|OIZa-#&)j-G?b3CVnlv?FuI-n29uMEKNP8!qKm%{LoVETs}M#G*m|jnIxX zmLP&E?XrB|>bLU&)#x&rqgqE#Nk9f~vrGa^RrCTD*?$#0z9)FA#!W$nK7u0SKHFUS zAzl*<#X71jY~qLO7)nMH*O8U-F`DFUIVGF57SgjvL?xH@D~G`Fqr1XQ-}?F5hAA^Z zh|1hp|Fg?O0tIY&?3LQjzU4f={~Kek=eU*pOgIZL{#iL%W46gJ+)-;6Jnpx)mg%V< zC7Y%Yq$9(B6ExoQ{_EG)>@cb&K8}JQ6626>T(P7=*py~J+-PJAewP~sh`F3k*@kI< zqY!^pi@1eqvVhUnKhnjQ>P)u13ZZitq1@}*rN;1DXo-&|>cF_j}viaLP#ryn_7T#$&r%x8Qc96~w0bt4_8|4a5xs^Ru+ zigbh4yGz0|g%mxbLIJU}<8;tEiNvg8DoL_p5CZYfuex3QaRDGg7>d}jA`rM-2mF=) z)_cUy(Y2xND9W#+kJ+FuECpm0eM2oa_c>7GqPu~p2s12wN-@Cmc72@32_sH7n@fDp z#GG)}anu@-F4dwMgg`Gx`7l}@{29-ih->L?b=SOUQ3-JsFt~}&WB~}#I0i!JL2-`% zZ3-B0Y^4o{TAs221@KW@1ZHI^7zeO5Bz6!YcF3!|YPVdixoUi5GADoyV>g^tn_`3= zE;RIAdOpTKP)~Kp>)>gO8QaTaVHX;F5B#zqYt}c&y-f}zF~Wr8W=aKA0O~Ytdzqf& z;(XPOOUU49ON@vl1?$$~n)=1z_00|vOidxWG!9~K4h$>{Qo-Ns7{POT}byi;7zriuWyZ3OcFxTUZigV{F zLd0;KnJu%5sJQ|2Jg$GqO#jF+6;iUKQac4L@N~1}#pGyE9n~Q5ARt3AuEB?OV{syg z1mKlcPI##B_g|X=rT0*Eb#6q>QZ$$)GE#ttW{SPQ9jXE2u~sBRtMCopY2wnnh={^sA|Vnuc=zD?yc(V*O$> zx|Ozg05#wg>%BT?)}l{Va^`CB-6D`Y$L2=kB7OFP0GeZOmd%J z6Sz9xalCc)D?=deK(gf939GC_)FhtiR87Z9grVNHJ}^@&2n$XGydVk0QYfd*;}uF{ z69=&HV%8o(?_#y}?cZ!tleqob)ZM$;2@AM`%T&B&NHzUJq^I1--h^d2>@Ai51jK-B z;3@kZONPx#N4d1#NBJYp8}1v%(G2K=46n5FT@8N1Lbe6TMapTO*HG6|iFC~4Z*#r- zF|tiFsY|ZHUT^V>5%ZVDgoHC$48GEkR`NdcFq)j89=Gi6b+=!o`dj2LJZ*y zua|AQ`L&Jth48`nx9pkk_3L?6zE$dzO4BD5^Yb#<^8j2@I0U#cuw|n*^!va0zZY<9 z8ak)QEVU+;X9sO*@mUVJv0PNeF4J_F5sun7o3X^D%7nOZA^;!p9bVNAvj(--b3v(e zBj0`*Zi$QhLl1*Iu!P_b)1y`+t1vfnGZRIwx(#d#X~?Tk^VdB=nKr;9PQnU4v?I`k zt#_G=k&#E>%85(+g73(2&Y?)(P=i+FZ8K~5mOB=j3yj(C9|aw;S+XkChm)F3FaggT z&6K4^+HHq;8B{IYa<-;PVZ)V@wJl>Px<*^4W-EjhQx?<`Qk>~ej-M>fd-N0`aX7Zf zsoh?fF`ZIceeXruRpmqIpr-7s{xj*qa^rL<#4dajj&5Ro_gQ)k ziDHi5d;{L`V$B#9!{bj0+A%9&OX6lp{a2kyVI|C-pJB8=I~EI1N-=Vr%J|5V<9V*N z*So%_@AmEEti5Nh$gVZg;`N#6qL-}FigAsPUV(v1pIXx=qZt4pV~L!^#1QS_O^qu6 zfQ|)yWA&>8Ra4pz68a05?45SBeI!<)2-qY7cg}2lPCcNv_Cv6lKEUXu<`a-29NQb; zrS%-nr~08%O6V^@T3jyid?<{L@xdow_Qbh-hA^p(L9X#6tLblnttw9;>83((IESi) z%>tJF0DXJBM{%umK)sQ^#D@8xfsoAFXd>lSHQms(OWo-A>&Le1hC|l_C;3DhhmG2x z>DTNf*+aJSR?i+?8F^pgY4#pcdXtO#%iRKs#iWLHa^%=)N>3qiq1%DO4bj7#zi49+s_%n9?9*vhfB#f2 zfwvedt}C>w1_rM6f~}YH@WFlyuUFe7$fqWIp#cIm&)0CN_!qpV7vvHt7%_v%U$>=ds!s>Rk>>6D&E7d?`51K_&^z8% z>^$#|Zhg*+@bntZ5zTTRoM1*dmNaG>@3!-mVaCSVD2Wfi(wawlHa!7hAq$OVZpY+` z((#SXm*rT^u$63W4!|?aD3NEAD60*xX`@8tSW3SsD)Q(gCxF;g5;7_R`Z9O@vCz`~ zr5(KWmeKDiJdkRr^sqbrcA+43d*CoM#t6u9(H6=hMt+gu)GhN^ri6Ml3HD#3`ci7Fy2P>(OA7V%rLBOLkjx_H%T`{HF}yrVcS3f1{up>-!1TS7 zoormB5aL0IeR57rnaq*ZP1l|nrobbc(v8mKTK9IMQ}tm*9149XYkshYjch3QXhhzhBks~`QuDFvN-5Cc+KkB{ zfE7Nz9UKukM0zC$$o}{IN`*q&^YClW5P*N17^nB=*56^j!@8wlpjrj15&bwjS9opu z@e;EMQgJ7BpAXd#imR=?&}sJl$1~!IPY_l>K$`4AX&LwK#2#|?ISCRW(5Ll&&YFM~ zR>C;Mu)WO5$+IP8lpikwqHsOWBvwf{Zf~AJVe3*TiDqT}srCef0mlKK2bF+kXBCtG z^j^eUSq2YD)B4sAQV+4%nj*6^QAb=k{%Jl)1WY}(*i4a7B%W0N$dXlJi7~+B4Iu`9 zC1&Ta?56F|pMRBi1MNgg9ts8MTBA5J6R#!YWP3Oq$5EJFo{xwpTXSeMzfM!AS3T<& zd5OLrfj3_Hv7*SUgB%e4e%G+0h225(pSe@VE}#65wPyb}TVACaqFk-UY9a2O%E#4f&-u91Cm;d<)~wxArZ6jo;UXCi zVWo>?HvjdQi>J<7L{G_?^b?n!|4#vLmA_ z?3dxhwCn))=rl>v$}ss&Q|Jt8M2xF(S(QRK zi^EM`UD>U50ulM1;hNRMZ)a%HVWdBfg)OSx;C7AOC2x&W&c|zttIV_5>T0N#0=$#- zn+Kq>so@KX?s&FGx~9W#>=ePE{YHtz9>e=unIjvoAC%9jE!gBtrMiQ0tt;*Xg=`>E zP*Zf#?bnfk_)!jTW|BTl?XXD`i37P*WuUyMhyVHiAx%M@E6i(4YmwQu6Gb_85^pn& zTB-J}(Wt4D6%W?wiIA{cAZ?9jmZRe(4zz*e&~0NsGI*B#7xbvz-KUU+Yu>YdAWIrR zmmO5`J&}B)?~k0z7S8n!Wx|B(2mXFB>ZtzJB1$w1d>NqMFt6gT_pwYhI_$}=Dl}{UjDtUX zLR8>CxTlHoqE*2slCA#Lgk{mDfSeg6OF9&k|I{|ZaO_}Wx5!e}CanrLwmgKXz7d1w4d2wi4osSEsm4Cxxmswea-?K2Eo4nZ`lo>SC@5=uKqDZJi zJYwpSj(PDF@9H5C$1UvTxq(|+zK?xG^{?F*<_v7wl(I~-G1Vw%Lw%GiyUEyglX z@j>B(g6;y}i}$Z$ZQuL%>QdRh68^XwocTd;khku*@HWBUGFYpT?gI#ID2lLvYG{w5 z7#>9(%DFcGW(GS60F7yY$70IM6cUNbUF|lZQ|#nbM`ZvJ@&q;S9+_rA5h9-wc48Af z3sW#HjFe&|M!|f9gj^z-l{7-UrpsnKcim4wYRH2%);O+ld_XgoV<(u-of<2N!15{z z1+75phW|o8ysHgaVndk*qsI{^%NBf@)|FZaRFi)i-0t?#p`}(Q=9BN(GBC|V zSG@SER9ocIJn@59u>r#!do%ym$9vMBfDGU`TsR&jS^mmmoDCN|3Eirg0ASYWDwUot z9f7kPhta^yoE&)RpjvBAiizdqOolNt@w~v1K8|OHV|ET~0O9A@?!4u3@F*tom=pip6ZB{IOW(L!C?RbazyQ zFsut6Sy-t<9=V0XpcR*_tp|)|P)dFRvVdcAs(SC0+DNP8x(0m4TlN!(F14UxH)tsf zD=IG5DU0kB=si;wQI`OJu8R)s+ksW|Z7>XJo+%d9GjT{99)J~C6&pgO`LF44pJ%Le|8Ga04zAa`7X}@ifFX$#b_2(A1N4Q=WKWWwB&bV zuiL#$`f@;Ve3WUJ!9|%VW0G7=hzbipE1{kFSu1#?E#dSp@T5lt&K`}20>I!zVKp1I z{{{BQLOuksYPE5@s-9z+6!|w~Qdm3RckW`bw+5I_>2qR4;>#OHPg}GXSoV7N;Xlmx!AlY` zMOt4@8q)jsTmr*rj!ykMmF|q$3M5La?nNS)J>Ui$oe1-{sYj4 z!zq^U2E!<3M#Dy>6cASZHYRHl$;YVqt7}kIH@?YQYuh>GYm@?OggZQfQEB)xEvEb| zt}`zbyoPzhEl-A)S~%}}80|4~-eb47!M)ti%C#(+*yu2-VPaC7N2RqX4Rk=;abx(KH2$YYE12$Tx?$u?CLoCTGnj3HR>1VO7-Edo&Zl~r4WwYjxWMw)aAEyhtrMV@ua zIYarxvC-xAxXIXuILSqRVIp*2*bpda8>bo9wJQ;!4H;9Z6NeQPRn38irt04eJQ&!$ z1&JivauryMU*%++J*?E7ckIWpb6!%4aSw;}%ge{t&2~iBx%*t33~42FZReNMJlv|d z^!{`aEK#E8;DV1ia1V;=m0ifHz3Jx-xd7?Q2*PJY##93Y-#I7@z3XaO2W|4OgN9b) zX-L0Rx`-$IJSsNQc>+>K`YyTYUm&ko6VK;-lI3zY^r8Hs$<<{3n#e`3IMO$_SZpRP zmhs2T=^Hp&<;%g$Z;pSS_d}&BL3xOKk!$}waYmVBr}rU{U?{XGZFJ*-vlIGEga8Ax zl$tM--ObdeOdwcER6)=x_@-0cx*{Xno0dvvJ;x)?6rDAvjRw1NTgJCKTdgeCKH)g6 zHWFARJ0-Q57(s2@+>e+jS6s!>*Nv6fKN{A0dkiCDgzG~UP(e$ak6jvRA`0bb< zZ%ud_gMtB)&%iz>d1*2#$ITd@KfZtTyHeb6udf_X+5-Wg*k_x{DIeNE$_Mk#gy@yPF}opXmH(Vt*FX`9zHbnHTtL-^eG^%>2#aCZuV8M2>Ebr)c>AMz@3Iyr)U+M!!TH4nXzw4Pq z1W!jJe}T40S8JNA22$6P{Q01@mivKcC5p<&osBvKt(1yERi?Lwe2=*cp5v@+y|i#7XY zt>BA2IST<%Zu@Cz5$1P#tgM-+N_P)oPD2kLGWIIMa&<;6Vn`yQgw#irIbEhQYOD(5 zJ!{^(q&Fx}K*mV(fhObHMw>0=L}Y}by*kP0-viY$s|xL9-0*AgH>}NIl^R3<(HqJ0 zZACPG60|dm$gfvY;5eLPq4_3%gXIKak?{~qhmG^TPbEbG>i&?whpT0bZ}#k zgn%~M$$$DT7(B)zSVwIuWI&Fvj`1}5du12#65ES8pY6oy*yit4WnDLS#LWe4EcUO% z@ey|N)(T&Z7#?GPteiChbyAp|j6Ad;B2Id7KFN|%v4D6I%6~0O>}ekptwn7~+(qaU z!pR-l>u9iyyT^P*QisSL0@dE(Gptuu#0~`~`n2-TI<+zFcA&}FWSO)qR>J|f$-va) zndJp{8RL<$?+ccE-+-DNK1Uaf5bBgj#$hbg?|Nr@qbSp#HM^Sft;&OLZnCQh7r*aI zW6IN=L!Kh!4#%46^Q&mIKUE~Fr5JWiQC5)+M>GeH!Lf1vq{Slnc9V#duqnqs*C8pR!5R^ic*0xdsK zTQh&>W6xHW7@_H!H>?Jo$6{)u%uvDQ_r5nZ>%hTq*j7J)s7!)c2 z0HW3Z+{51kuwi93wd5|^$|M|u5qRz{ZeaoN2-XJM=0v4jxwBfss!bgoWh3ooz1YT} zwOp8|f8k#)(gfUZitGjvc}@XaFXpNQ#hJjYyCB5dMi4vE;Eo*0Bd9r z-XbA--WJ>>LX~T_w`|Hac;T%;hy5ftXSHw5a%SH zvGe~GF`-J&53K9`J{aoU6l!}Pq0t!1C7xDZ1M_A#Vn0kX}p8Bu>2%lRFqMZTpVr+nv-%M_3;Xeg6>Pkvcn+6iG)}r zA-U#mf!06!_Y_z7U4}48;$Ed~-(Q~lUiyBoDDXkp{>7tDc81CGL0E@ZYS0>p#KaRgU zo)0}OxePcqZk=XrsalaD$$nc2$&3@?`o<|Q6seCV>N=&WOIJ!WZ*Rxq1Vm+Z%LkDg+>jXm~F=#fdKYd#(O#4~+lgFBv3W*I;KzQnw> zzL#OL2qqgrDgRSga+6@JluMdYGDqOgdgkwnJIPIfRIvLJ8UP9a07L}Pv!MJ|)$ywz zetAIoBH8Z-ziNS>@ipJ}lfQLr)hnGmq_l9C2~L5AbM(}r$D#FOYvt)=e4HP`fJl4( zx673Pu;FWpwT#uLS|n^27g^sutuP+)Gp*c>VKYa#wk(N=5$G74G=V_j__3~etJt1C z+XLAK2~2$h@mlXv>x^hv{DufL)1Wk(=I6VG-2|*}^dSp(ZV1E!e?+>A1)+Wtx=LEq zkQVX^g||{PcntYa)J}9fEE*CKT|(nn+u{9L7s=W-iK!|%IZS?8txpV52PH` zel_@*epXZdua&Ke0c?~NtStP@jcNPFF)~oFx=0eJ7sESJ@#5+! zLUm77EGGRTIPiZYon>59T^GggP{Pn10@A_|(j_R}-7SrDx5&`l-Q6Ia64H%ymxPpn zln4m(4$nKE=fnJe`<#33?6dY-24e6$R1u(tnT+9)@8U$?uea+YuX^B|j9a$?jlgor zaDOl^+E-ydzCqkrl6#-O` z8GoSS@@Vy|rRsADkOKik@57#u9eHk+wWKs~WAbqWsM5eLDb2BIzTefJ(lEtB_dzau zaj3a<{~1Z8z~{U0Ri6{d-Xc58;zhbAJZ5a9_C(hH>s5t_UJ7#M&?7-p3)(XXuSR5XeFzDTJHAWta$?tXiSpaa z_1*}v64-r=h$0ycr0n8SA(j5q5$l#y{}9i6Lru;W8~`OE5kv}R9ZD#6#Ye4z*KSETdQ6K8}`l1o~vDO(W&?v)Ge~2W`(Oghlbtz;SyN?ThJ%$g8 zBXpL&j`iu=gU(|h5y}B%Q^d_h9wjwhH*YR6>}IY^B}k(4CF~NqT3dXw0_6V-%?o^Bf0^gFL6Eo{!dFs;$sSh zazCGef}!Y4m^@{5jv#Ri^YVVo@SI{8eMvTp9;r0fWnRKK{*fAK>zLgI>h#=I$14vh z50sQnTzBp17T9gk>kXca?jf2pGn)g{b85u+A(xE^@ZAESBnAKo{xF!Y|Hy4q29E>& ziXF>A(pl|1WFb1B!B`|xOMZe8bbE=BV9$`>$H$`f3NKb1Ub`~X;QKZ@c2(M z&p}dA(p{aD_R&8cfZnLan@Mo}*2v`aV0E+Knu+_EPl|?t(WDbLA^&jjr zMS3#(OwLJU1K+=!OJysuISy~Gjfi9viCw||rG8fPYhoi-&M$Idk>>1+=laW2ijVRP zl!bt{Q9`wkp(*!%M8&dTKR7Ia5dS*gp|9-v?AR*t*}-jMUk}n9Hl=DtB03e#;b3m7MCR-ZmRWO=r4mQrZGzFkFzk@uv?GIXR#Iee5c2mMN$@!(J zUU~P%gcZOJu#_xM+`nW~=jTAWv+OR8-_YIORx<&eX<|8gD4{E>u7^O8p zpl>*&KKz}Ai7<XbeqN2aaK2*C1OC^=I<%7~08n9|YR8(M;8tQkW|atN zRxfzf2c|;!;itEV;V~gi4VBLdXfjq4FyV^l;<=V?sNV99ACn3=|CsQ1_w2 zc6$w@O|!d)PX`pJoTbM_HD5f&2>(X686IBrzRZ01kwY)W6L(p;boRQY--C{)Y_3o1 zZgF=Zyp@cdrP7~o=Cxu5_|K?2`iIH(1(YmXa~3g;C5`*3C7Z?9C}pHCdEiFWU1q~E zgj1XWu=_4&KR??O{okA7fh)**6F&@hDTU$lkPlm%_o_L%DZYsK29{O@W`umt6&izL zum@?^ejL1z#yp^(pez6P&V1&UraqMv23rF})J1?k0(Xc)*mtKDHH1xx-AFlxT2U|U z6jz$y7uXC;fbq5wv5+)>y#zyFuh)9rC`z|qmM8V?lKQJh^}_bEMpf^z`#)cT&fg^r zvEKT_9u`1s&{kZp2t=-5rqT4wuBHBlN+#q-Cm88-p+|HrQh_;?7n?=e+83tvqUu|Q z9gfc3bDehyJ?B*oh|ZD?4qFl%JLs7S+CTIDz^>fjYIldxOPrD7 z*r76kBO96ae5fj~Fb0=|*4AlwD0<$f+-|JdP%T?}?TxNQkk*5O(+82Tdr^Hj8b4!n zqZGB?e)#ZEz$<4A00g>oRF5hG*(y6G3{1+(Cl*t`Cr_BKhS#{R9*pPzn=@_!B0v4m z&w=}0yd!5WNsDm8mm?ii3a+6I$DnrMD3IY-xH4`tSP9Ub-WZZFsW)03uo(R_b6P<% z&gx^#RlnNThYWrD?cJ5${l(Mc-PzrhKkOY4aP~?I_V(@9gb;Uc6$oH!LLZ7FO@`7H zo2Yh3X|YW8R@1TFaK_Hp{%mbIycVfmQlFIeEHf!X0B_ZwAr@20=9An@^suBCgMA_# zZ}o)QGP5=qS7N%Yj)B5y(c(#nYeGEu^2cI{C#Vm~BL5OVibUXspT(lWgBhaHdh(bq9a zAqO7U87Ga1MY(M`ie>sXPrzX6xZpDh&kaZF35&Tbn{VI=easGYe!iLgf4gdodjf=D z^BOu=IUupv0Ht|su~Rm>{LZ&f&_8u`UJ$yyT{RVAkgwH&ZeSkwMTo9hn*>weU;4hp zO0JQd0?J@xP9-CPrnsZHxWfkJ-|CJHj~=OROt-B&+00(r!_FzE2mLowI0eO`TKEuF z!!#P0ar)PU{q;JP(D*TAj+QM^Ys=ZmB&~v{~FTvj* z>^Vp5VlL7vDyjFJpwdse+c#7pBwgqxgS$qJchAl3y)8Jn2H$7Mmqyv^*oAJ3O?_let3Xajr&hJ zD^b}*HlZuRct0DfoZ1RbUa8$GrpHq!9#r;oviAl=h{ne6VHe0sN8vOv`sdlOG5`S; zK?FeN$@)t~#f-2J!CbK_)4%e)U9mvu;>B*D*tI3tG{`)?PM{6+UCrxQfZ_Vri z?8zbl5{h*X2+h~RenF+++Cd}4l5v*U(U%g?c>y<)*u90K3sx80+vf_+L(xf&JP*R% zT@Y)kZsJQ#(oto}3AT$(tFE*e5{1YzafOJ{0?VuuShyG9 zje~q<8N48S_zRDOFS}N1 zb4Si<4s07Or)-+tG_J~IB^-jIz$1x06@lkZB{{etM^&4T_cxkwjWa<-&dK7QBK*Aw z`C7FUkB(ueM^w_*_MJ!}_D(wcO$}X4odly8HXQ<i^ldd3XM z8~+Eq3ls(sBm^-qNd4QqIUWv@u7DN>%?K?AntN62YIPwvuA$W)$Vm#OP{eyaUMMp%c8D(NKo6}nN)7F^Fk_EYNx!wOR@P?phIKm&w>Uve> z(hoUvoq)E<)2~1FP~#Gg_X>ODkRHn0qkMjTtS`xwxzLEs(POBUG$Ui(OI!E894m76 zt8^sp+4PL-w8`bIN1z}zwYR)qn4*dR64mJWYX*BQOxcyy`=XgexOtSmkW|Me4%OGb z+c9UN#MwWc(vGy~r%^fDu1Mjj)@))h#cXdq!USh*Da%1+Q2t%1vFbKNFne3XfaovI zQujD#?YHs;+a8Za=hTaD2mogTx-^s;^U-#LR%~bo`|Uw!X7h7}7NPh;H#~6n0R)p` zoA>O$K~{wFrlLOxY+{gim<8;aCJhanB44?fa?w0`eB|WT%&H`~Z@u>SABj2pm6Xqk zh>O5Wzj(UAm&7s5QV!mA=bTZgT1PEWOnbm^q-jI@>ruo}c?GMK2=CQ?m(W@Odl@ty zqLf05i>O^y$DN_6S5{iLV2tnAa`V#j=}A(<*dS4W*+pkt5X}$;T3xk0Of99)6k4HC zJ`+9B;F=kTecF{Ll7h8)?M7tmsI^#LvoMA#i}iGN+!yIYOZg0x1%UJEt-Io(xDq&L zzi$drXlTj2c!*VPm%3o*tB=?j_G&Uv5mFrWRHO{4y%YgA@xp%P$j_h)p9C}%(fogx zt{eLyo5kGDS?}Dq3hoRfUtsYw&2BxClz;0@x(~;}Z`MXFW++42rr&~;m0%i=64wZ> zAFKW<5FAtDPK;%bV!A%WoRPOs*w@k2dx zotARz5qG3-eV7sjQAdP@d_KBfJiRjgg)WE|Q?M3RfB;|s&p>d8Mu|9!UqbNv`jju+ za03sFM4?&!;W5l0T6E?kg* zsufMtI0@doRR1)Ol_MYP6*X#@#BJSn8iQPFDvvv`{buRdOka3$DE)$)SQFg@`>Cxx z>%p-1Pg z$PVUcwipVpBY%O3XFp64ip8^r3Rl8lwAG=LOMljHDoXBp9t35!&W$FOOEo{L6m2OK z{qY{6_dkZ8&C1*gF-Bt~y|2=UGk2zqrcF4k2zYf;T6(oK-_I6jwlu2;pszCMnf^qi zq(;Obatz1NcK-Qx!8IqdV4^S@p)#TMM3|vV zA^)_Xy3Rs}Ra*5<*9^0(%1H%suqX0HYuWh=VZzBX&^&yWW7iR$E?dt>=TLVMX{m^Q zR1bc(u8`VjX?52yz>bdgB2cts!i$|Q8L#eUDSd6%$0IS;%{&<)?YKyTMgNs?w7gl| z3I~@OY3%3wN7OzsbJ0K=1ZR=jV_tZ`uYw8Moh2TNL%`S+@xg+|PEGB7B^4*R#A}s@ zO2gehJmk@`18qU;D`KqEfY|E+CGwYK5L+Bs!KC=OSI!gTB7;xKDxxfRJusPqw#iyq z*s=XFayGf8K-IhJ&jgB<3=*7JH3M0Y2)pIN*=L{~C^%Zcr0fT^sC)jeri(~Al}T2g z&zaMBnR{PtUGpaKkR+_g zDE;)}uaQ+5TQKP~NZ0B@htgdj$Wa)`v}% z&t{jJSkZKJ^z^vl^nwEkfD$6Hz+iOt0nVQJXP^TpdU}~Yytm!!K?uL_O3B`_fZeO@ z&50-^XWAw-fN6VA95EZ4cyl9W4qwe@w_%_bh}4mYbT9aBgcz9GI91iOBQu`*dq^6y z(|(2Bn#pU4o!_W;=gw$H|= ztzz@1L-5Z;2}8wY*`UDTA#;27YW_5IK7ifpJ@@M6cxZeV(?b`8sS2hc_O|t@mA|U_ zz|>FHaIMfaK{ebBU6m)D9KHe*S3oST(GSl+hXDFWt!4~8MuBY#3*5ou7s2pW#g+Km z6LUa=iuk3As4Fr<;Ro?q9b$~XqF!xfXloX{tO7`0?{R5P8Wui!_r)pV46@hrbz7MP z^~!kmnt;P7S-gkSX^@o)9E508X{gE}9Qz_PDjJNK9vZ=k`gy5e*1{`%YykMVfv^{+ zV$}DWTof%l0Gx%+)k=us`xk=o*eRUW!=2fx2F6B5E=s&xJy6vk@^Q>0azaR_M(38U zt`?c2XDqKs77GqXp}gq&fkPbkalcyiKiT#XfI)ttqDZ3B8^}=e`#Rzsnmnba(ly6I z8!+}D%iD0?{8it=9ttplhC?KaMe+$H%$3rwBSKh1Nk!|DP1S=*F>{c$P}lBD%D z?zL6*Se0+2I71~s;D09Xu;%E{u)>1+T%n%;dh(ThS+SytPuwr2P=byZ&eGOnYb8&%dW|-T8jHW`oFJ5EJ<93%o<-uU${Cxu&>M1@ zM`RlE^4}6H4%Rz6u)6xYwXQF8!pn5|3A1pE?IYYC+q&{$<^dQCNCIBJzRpl)Y6oGr>Y$UELH^V>2 z;OtORu&~yB$&V1n%dEoFG~`J!uuCo6l`+2p0w!Z+rVn1Ag#%3}IG z`|F-gF~qpjJi9m%M7CXE=}ARf|2awZ!EYa^4cp70xJoZA8yi%BcGQX5R{t3`swt^P znJuxm%7r*Lw>ix9>wpN`c!-i0b73sjz2wjc52VXOV5v|Lw}FD75zLX`fv9=f2pwV42BUyO(y7gAx7`^ zep={7Q5b#8c%{td?asu%GvU0{hRW2S_|>D~+CkG7*H(e%BN=ZnbRAVXo{|ggZswS_ zLwPctI|d@y;fJ*`M#2&6z7&u9U&dMm0tVw6Pc?g0fK~qMn_{%X)8eM$q>d~_V($Lb zK&_0pmr9;tT9yea+e|r1Q+Wy9jsZD}SP0NI!vhd}@_=!LPRXbHl{HvG6uNk?avpWE>-BY+_Ys+F(HD-)~}99zcj40=Lst~SDckj)F%1zWeY`> zPy1ceRxi8ObQAZ4ppHtL6-8P+1L%rx zP;`qn8)EZm+&(^NL%L~l4c%Sbnkgce#SbX?OuW?OUWK8H6u*Di`pCxOw;zvD#_Xe_ znvybKNm~=%P@yFrt}+@RKjoIDNwuD-^q~GuchLHGr~pdAri4&4IZ2`of|L_tB9T(yf@SQ= zv+(9-ud35)uu?jAC{{i1eO=0G^{vVxE5A){r2Zj27{3ni)|g-<9BAsi9bvpuYZWUH z&^5N+otl3d?o_0`o7TWLZIzpgNgiSi7i-DHBnRs))k`wHd=-_S76Y~$qTotF!)LpKRY8O2{kUNW&Gshl_(4f(%bb z-;6N~158h3c*K$~3qBF(h@!YOL!`6BU&JVV=42nC`^SyPfPsc8`SZ7%juV(yKA>n} z?+bmw1&dJ9#Lb+hbYIb~`|lerTQ;SiYIV5kmEaB;G6?;;aLf(Q;_<7s!9fEQaL#-k zUF4&!oJ9P`Z zpok~Nj|T^tgUMC$*+^c0c!19osthQ={r{Y0*sN(mVhJ@(SC~VGts0fdB-u8Kn)ssg zc1+xaH^Dv1O&v`&i*dgD<#SbQ9VfZ4n3nnjUAn`%P>MoScN~7Ym-PhSMo4am5RuX) zR+DMplkRAhmqvd`e(mz57vw}Wj6f`;d5S@|XVd!+>Q8kXl_uo>n2%I!*tX);)+3?9 zJ3dSAVi%iTp+qcv4Et3+g3h2&1tENQHVN}vP>`_Tuj^%ti$75hH2y>wu*l6KI#WDr zJ5P;`TC*PBqtaNZawe;&h}_5nkTo8K323P#B5OXh0|d zG`>JzV_*HAL=^YHoOt_Yygedk5^Rz|mFQ`)-7#AcCICy=i3~Hffu>W7#Ij+5@lDhY z(}F1oPW>eb0`dst4lQ)FJTznysgj8hoi1rIF8*HE0~-lAmTg6&c&55_u|qSn>SZXS zGfaHauIgA~d*h)F(3LSeQVP&x#G7!jY|$H$0zH0o=v!Hj$5dR2Rc))>`m z14si4Vni61j5Y%#1&sbh{Tb*3TrSsdvKQ7i_YC<`@C&-kzTCv1+-C~r{4z^9~F9G+@1=8UVK9{ zDnpnN@>EhT!nnx%gGKY7QLrrl1_I1}lGuYJqFIESVyJv|hA3zAM&t+%VDT@Ah$$K2 ziUYW!@UUx(h>%TR1tEjgn93XyX_@FiNR70tVv|)ou$oyJWp3HeAYR16Ai61L)cYe@dsa(mz1?MwykQEACKG?0Cp2(Lc&24 zP7SIli*f|t7vL-L1`QWV!Yo>P{sz+m1z8=*3){{btWOIyG5lyzGTG-3%AyS~##>lZ z5m_sA*_nv%Z2m&P?V`^W@&dpbS7B#$74>pB%r|KfjMZ|UhYDvB1b7It-!@6~oH&e0 zSS;waog(ROh0t`l&`LrzL^KG$S%kBT>s0C|oBsM@>%Yi>sSF<>wJ2yu4DZmai%|K* z(&@aF{f<##{+dOf@9urc9{~Ut18Bl?G2TZ;Qo;pM$reT;>RS)|EiT7hEsz^3OfzgS z4bO7Nm#$apIwjJ8)v9nNZS7wrG7&HDF2Gjet{o@3D;A!*TTI_wi-s}HE#SHR88`PB z+8#Y56vSQP*mV9P^WRfD2rl&r(5)^$5|zNQ&WWXBH1KhEA=R*ok$si#M#^zTyS$^m zq2t{12gj2VYuM?KSK>bLq73~V*62-&v7L)%f8o}#uPQql4DE^#jPyiV=>U5Svy9kKR7@%*kXY2r?2=Lr7U1p*gG*FDcDI*w6?LIuHzd%dm zVh-;Jl$8@MqFn4wT;?}dl_fVl00Y@4@r7B~)b4LfPE2vO10)?_4748ua-;^N`WzlH+E?Z-#%DwXWkHn8*mY|ocB&Jjac_QpBm6|b z+n~fycXk(?xuv%JpN2LU3br$G4U5*|kc_JwOeb z=m?A(1#o1-!oD-Rm}e0DAX0M2Q3UI9@Fw#kh`wLxoDxdVnxE+dO*+DQ6u;p?0X^$6 zQuFDD8HUkSTFmDPRl=n{31(%*6{1qO)z?>;^9tp6_z$5g>Pm$=7t#&}_8&6*o6c{! z<~JSf?qF6^_hv|B8A1vZ(9odVri3qR-i(tjR89lo0_nMBt3U0|ziY2pxeE}+ivc`_ zNYrs`JKVXcqU#zDkHKNvIBM*obU2uijXMR5^h9dIIB#^xX2?%1=kbFS>6C z0NG6(btCY3*y-}ONQf(`~ik>bZbui*f9~y39kG2)#{+fS`$k@wN#a|KksZilb z0H|t{>m-bBfMIkgmJ(PEhoShJvp$iK+XPz!p*CA(d5Q`VN@b8L4)+BkPYhpGyui4khhXfS6)#C+uekv+hsW^c8tUDX&rEg*`2G5oN(OvJh^B*uBp!* zU+=%ofvM5>$-*8FmG?@;bAxXTvHS)G`yFuX0Kq@x8&t}u#oaO+Ep$KHP^z3@xMiRO zX{v?bp*$3?Z{=~ksv(xf8WonT+R78ng0kS|y6)sIv|W5=H0p=i_I5>mG>nn*y0lO5jlT}wc3X6JRM-%EH*~MliKnk&JlP-`nB?R zx71(9EkzOTv+l{1NIViLoES1?#k~pywe)uD+ zOHsXgvbNsx27Umxq-2)#w*r~kwe(NYCqPrq$JFcyDp-|wk~dzRx`-q%!L?0BuICGGjxzt~hidHZ*0 z^BHIet}biTtu9fTF)V~wmlFEa9b@fue}5vm$r6OL6H*O+89<7Sg|(Uo;PIx+6JyFb zbT{ZsaO5$RH^?#5gq}!)h{$K{2kMGaapb@AZv1IkTTOOHCU&(qef3layO4ZP;0|15 z11E~B5EJN6_V1%ulV74$&F$Q)V9XL`sNks6OK?|WU3pD9ylS$=FGSTD*kSmkwL0im z&YT+J_vLM+P0%vd*z4L;wPLF3nwE_Bk2cL(lqw8+FdTN3iVK|I`J5W@DYPL9T}TA{ z&p_hbCWsxs5UMF+RC}@AWCs31Un^Rc@QPMmXnJxV` zcZkNxk*RkiQHey(mB%IKW$!F4wdL@*Bw7x+`tU)h{Ue0?Q;n8C+yD|bs&9Av^kqtH z=J39~*`nsOo%>5DzuB=iMva^19J0km&4TB|OlNEm2+=ak93YD97HSy8CNH!T)2ih& z$>w1ktLXCsu|ntsj_pSh$L@s61$1)nA#i_nH(AZ6cmarqFoo9on`e5feWH zH3R4}o*%lBp`b9b)6VNuEV2IKUy^uJSi|*?pW1vGU%KjvUr5laaLIitHKnn4{86GQ z9L_Z_Nv_Tf8w){J#HQU6(z{tR2?P4kfkJ4Gk*_K;#N@E2&z>Z6K-+zv{ zRpJ8m+)wnzfH?q*nj>0*8Ukx0IAitcFAt3jVkzFUkOvPb4o>1S7$uZfO zlnwyRTQv=JJ7>fEEZJ`T{pJ2?-A!J2U&#+e*%WeL7(mU`-y8uTNIp9o40;P95STP!sb8O&gX%NY( zMm9DovOe0LRkc4{FE39A8tpz?HUe5?u;3GcvqGviYioui$arM zgAooZf_}OThV7Xxvgax^PVXlCacExLdYXS~zF)8guih0s@NE1$zUSZ-z@8WI5ty61S5CMCc%wt;UPoOk;PuW)w_Vl^o&5EVL&d*xQ^UB%N)0P!npae$+ zC4QW8z)`TN^`s`7cBh4eoWGu$%AXAZ=^KbKsyBc1`<9?nTvQLa>GHR2U3mf<5(HB9 zK0C3%Ud^gltrjlWht{g)E(|7BmJw?jAehd%JI8@WObRczBb1;2?bsct^0`8(0G6}9 z?Y___Sss+L&g%?3jI;3!JE3&c7cSWjr0C5=V)kHa8$zeVBz&B%l8IF8WIK@#MtraQatzT* zUd7w{C4Gn>1=plFxFT>(6E}1-+JNtGxiQPs{Q$+w5Y-2?V$DD9nO16rCru zBVx4RX*)x-(Vz;E<$1KG{B<$A(nj7M#ADK%cl$3)U*Lx&u2P~?yjX3ieg;Z{qWyC8 ztY)`KP(o+vs>ax6LBP$yAXK<*bk_RQQAp>YyB31E1<9nK!>6O6!bc3xB?a-5%F2E# zO&=$m-7$DycW>(c2t&9-88oeiHU+eDOKYd-Gp$14DDyMoQ{^jX&&h^LDHz zA)(u|zGuia>(L*67K{eaX%F`Qz3$R|SM^Ei*RTozNLA+QYtmb4M;>`+9NrXGoiSk$ zDiyuKJD+kux@eM}Bp> zUcz3a3`(Hc{-HHE?4QL>ZnHb9)hk(-@%sH?_2#Le$m{a%HULxKo|*yGhFTc0E@{~l z-YTo($C&d1n+AWJRPbNEkXdOdWz_jY@j_H2k)>1B=AMbkiQxM^(Q}2;0dVxK#bXeZ zS}eDg8~%St_BA>>@_=(BIkj2OqtTyU+SRjQo+KwVTJ7){!+JghA~6=lFddfU;jEos zs!aiOUfc7u1cd~{Y*NC8zMcQ>8`G+StZFa3YUomNs1O~3$&XGiM+`x@jMiA}<2WY4iI3-5D1_!74!})y49- zf~eI^*D+dkWBS{`wW%rECz29Ujm=nduCS> z501lbZ$({3V%;M%{JHW2E;<8#hSq!fywiQZ`i&914`o;n=+zbrFlfz%6C zj!L|jH|qlmd;H!9=ylE$e;l#$-yjXM@zMBFoyId3N`m4H9S-IgArRt>W$R7;$oCxdK4dj$@IwJbA{>wv}%9%gAy=@ zcuLK`>y%tuAJYM;mv&DzFiZ{towJPH##P^k&+)r9EHe3p<0DX+YNRFlNHA#Y+pW~C z=jQ@`i=qHF8bHwp>ZCPdX^>CZLKnI>PFlRGH3Ou{ja^Le$e+wg!(t$V%6z z$~D6G^wz=AU@-iE!Kk6{^GH3G@Q&qqIzin*aYpCMwVh8V`hTmvo`HIxX#WSdDmVR%|yP;Q`3@6AI{13Qokwyf_Q!~ z#(^K8c4dg@37^^wwqJ^Ju0=(K_BZ_dlR3x=t_e5KQL=rS3PY&0H4yQ+^}za-{Z#{* z@dOKsAdjwG<%%rZUCWX`UsE;(kFS$qsP0c=W?Y--^<(^S_tEeDGf)E*pHSHkKAb`- zhr{AuR|n9&ouhGb_%T?Ci0I<#DWDWWq$oD<4WX- z#E$k`vJ?Twd)6kP zK=Xh1F$M-W^u>_uQ1!tG#Qlp`#g+L30w8u(XuJxVTl=X-M(tB^qg{qcjkryvLSvSf zDGw!~=t&s1DmC^rg;FlZL1qM@xqm$XZb=4SB)0=@YJc~sPOBs8$8aPoFQSl}wFq9$$h0Cs7qL#{cB!8ipu`^O@p^IOH5*~* zU7<^3D)bo!9`9@1OY4N~4hT&D6%BP>O>BLQ6=DT{S-F1(YJm^yyTTnw%mU-Z%NNzRyio3yp<)-*;t*=Y;|`BtN_YQpc=&Go ziwJXeDnU+ng0}^=D@Gz^654(M9z&)~2gB@(lQ3!G73Y{zRx`i*+P?>aW>eknoU`(p zcnPePn$CCeNjEMpxi4mfCOj>I`n~cjtan%592gc~n{tfuK1s1J(fG)&K(07cL%J-+4qjfl_& z6$vh{4nj>{{;EW$tzLOKIypw)_T-+-k51%mDZZddtIvg+KS)TZvc!0?j-JAuXV8L4 zA#2*?S7y-@CZO(yei{GC(Nl;RAcJ>1G2Iwg%?r;B!Pec_w z#RNz5=}sFYFKZln!JFwhgOuLoF@oSBVW~ig!f>gIw;z5jhm>plE`+Jhm-D8~`w^7fZ;U$NIu;QCCqjTTX=wGg-BjV2c1>@DmX7409P#(s`z4E(NrL{O z%OwV#B!>jw?w!6koo*z8q8V}jBaG4nG{ny&CO?t2ldem9)SGd*3Di3BOjVFyb5qmX zC4F>_(i)#q{99}aF%jI(o9!wQ!FySmS1ea58(ukG@ne?tJ`~~HB+FIR#m{Zy9q_(2C$X^@M*73 z`EOOumlT|CdY0C%sprc7E)(r>khC%nxMRd|Mf*|)J;iajPABs2e`Xpj zF$;Zod?jJo^=kz7uRd$O726^>P(qO&1~HT``TAy_};Hc3=;3A=#Lqqs;We zhOlkKWtD>Q0~t>#PC2`(gv^nTy(X8}fjYY~*`4VJRav#6FmdhvK zqqk-X`wUf^2FXGfU}ijF@?$8q8SdqP5d-K)2*}Bq(>lm{;*sxSl#5=fn4}DNl!b z0OvwwLsO4deR*PqWR35ZYjSjiLiN_#y;cPW+1XbJL=O|mq~7<@eOLz~0w5$3{aVl` zRp%5AS5#>x)=OLVbLVNm+C9Tzqrg|E6>nsdB&-d=7pJXN3xUAclG_a^Y?UQZ1BM{5 zuJ=!2qO-U6DSqcQBm^2L%89Ua;J+m|f`C>RV^+;Bm+&sR+U<&P72RR^IDFXHeA$|M zhH5_{IX}9L$6{g0^o5IffhDDJk(FmV1HDjBMX(LOTrGG)HkuOk+Ip^cUBD=iYQ)wwWvC5)b+HZLgWK~;4ZHIwfeBDX-m{7 z_GS3%FXPO7KC>5r$^`jHg8zxJ_Mu>^ea-4Rx!#=D$L_@`#rYhXATI)XDgjrKU>dWj zyMpmj1pkB|oJ|4ZP#RLTRvu)z9@FHLW@#(&Z*@@NNMgQr^72U0g$?D({5qaW*pvT* zW=fQM$GyEGAq%x#oprHPnB+Ph8W~G>h`HP)Bk|cRLL1_Xa5Gb+xo@gm?ActbVqA#g zb4cIRQNH1PgC--J|6VgU3I>p}kucwjhzOyBs8vlsh;uI@NzH9oEGwLyd7WCW9`!AT zH|0IuuBVR_?+UrQsE`Q={+n1k0MNfxsn*a{ft)LCZqkyggBV?MNK*YzRZ$_y5V4mP z;(1HQ?awb!HRUcw&U@^ZS6IDSIsl>E}Z2_a%Qw@E_TP~|Ll1``r(n+L87r30#n0ve55ezVa^phZ8v+NY+B1nIw% zNenV+FD^D^am~t5IXETX?~%J;;~lwdfk#sD<-Kvf@!MCxI-?RSFE|6ZQkC5 z*cLdu5m5bL_hI84?~k^`{;KWQ+jc{Ndt-Q1A}DInx&(t6!dHUOn#MZT1TirN{u>_^ z0Q)hd=o=8DM=C;NfS?#Ftgp6$kxXJ99}zfCk@0>O-3(S!szWL zD$fF58q^^=P8<=*leCJ zm)WkV76fN@89izOd~~tQab~)-vQQBKASZS+MC0e#gb3>C_eA>Q?V|d)!kR% z+Dt4qn;WEr*&1SsG4l)rzwjEl^6ne}IFzERghPs$pED!a1h?!Be(qW3JCTJ{?=9Hglkw@8jJCz#T?Oq_RZXHahLp;i<{qi zEv`s@e_XQAosNQ(aKHaZ+2%bGaK+X-z@eq$0%mntfSTQg`=?PCnUp}$Fvq1Ddj%Us z9RT3Xd$Qd{Qx%7j5@92iz|imqohi@V!*OnJZoP|*H&MehL+Ucw@^9k4T*Y!Fo~=6= zo~!2`qvYgJVl$k!_;&eUf|afFmQa224SY7)A4T{&b5V8G8fo!06TTq~^DtDF?1O8c zb=%3D1ll1e0A^JMu53Eo!GV8+#uW(e$m2ixkTsQ)Amtx`ZW9vRG`z*%>ON%`%n5#; zg_5G2(ykj1`zl%EQ^X;-l+MV#k4`Axw2B9{LSopM?*o00s=;uc4PaGTf-XBd&oQGY zS9nQ1@-_m9aVlF%ExR88M<3QWbUmtcTtk`a5<+{?DlcY24h@Rrk@MB;e!VK1Lp>v& zsi&@p+Ox7aQ96v7jsuIkKJ3RzQuZ!(^q)r{_-tLJBwGh`Hh-HYbS!T^;-)-FTNiqM zt4u*gtA;!XDnkI&@T_>R6)J#WPF)E1poj9eD=gb zqAgxyaDbt~r~$d6VifXWsZyp0UEMYpe(B#+U-*ggQbj##p7Z);@iQc}2$WE>z>K9{ zM1D4v*iHD(nh|}U`hy#_1Vba0d7{&tenN%b)+f}x)Gle5iu}zKj9Y5U`E8BU*yaD% z(-)QmzHcgI6kNv$nQ6eK2C$-kx89X02`8nMi;HiS$$+c<1<+QS(&@rEk3)OM%IvPC z>TkXa{`DR1ea5ea-#tdJfYJb1#E)91O-`Ze%q;vPi3cMYH>3=lZ&>0^C-T)kd{iMb zdn&`TfKO)7ItClgV^S~X^&QDJSCkul*FjkYvjtx!1=4}^9H^B*Uqf>rPmBEkaLk=- z5i+q9rjE{JExZcvu#lOc(m+gu!16CAIVq8H$cr+iXJ-5We6p)53?QDQW|2IFz}TRs z_t&@HQhH>5RjOU_`o0QY?)VHtA1~_y{7shVLes;6Z#(8fL_zf1o~h5CzA}V~=xQw? zB%BK3cYa=fBLp~Wn_%Iknj`N+Fxb|uRJm$XG~(*2Bg;bdm1|;cq0l3{M)CD? z0V1RZr@ju8fdrbG&UE3)niA$+?zB2SH4ajMUPvWtvLQu@)*taRQeK*t*!WNOZd!mp z0hKH>NBKaX#V|O^it50r7Q^_y+43*Y5TeaD8O!rbB4juLIRTa!V{fS@>!M)2xn5astOgq_>VFH_-B?Z<_ z=<_fsgkD%-BBp{JncQCsqzjW1<5loYeoXO%6y}>Vv*g!1g@PH)FzZEMw)b&@cUbQ8Ly=CeDRP-ut=Y+7HDw zJP3U3fvYfx1TQ{4{C$-U)$;JPzfgdpFH?tYm&p|%BKHcgf)MnXM0%aYEYoki39nORbO1Gt(MYR6f)s^SqppZ z0nm_giqt)r_CG>bpwoIdyf!LKNN#-J5K5Yk2Y-z}W=HWOme4t+k*QW4b6v&GWFWom z^w6Vod7Gz3(P~syH5P6`gILWi&$rL{tquWa3Jw^OUE9fN`(H4*J6ks52v-q^A}Kj5 zUvBWXfRSuZogtUB{`NYTc|QRzGI)RAo&KPxLIEGUi*GcZGnBvDDyRH#@rImN^5cS% zmR?UQFE39z*+0=|3Q zHvEyYbDU6DDnYX1Rzcte-Vz8~8ljTY43DA7`Gu_SN@ z$*q<7llgD?5DY$3JfHNQ7;XOP2J zpk`$**BIw@pE&x;heGO<;dPXS{H0YM@4s;!`mi9 zI{$&_Mq35~a<-sg8LHIa0A~a8=;u@7<9Ds58p7uf{e|gMp)WtC+qN2Xi9=-jn>+9> zOU`|lHeTlWqIBot{gZ3Hn{MT3;R>TDvFO4_pNB^QQ#F;x-NR^qBfU9%HVgE3Yy9!uCsMHaV0xZC!D~z{yc_R5fC4*q96cJ4; zYTr;SOJ{26gVxnjRZUckzKRQgia=(@9b4L6=s(0A%Y^p!QPJcm)P&y0pn(2!VykaK z+6%VBh*SkBymixxe|bQ#6`IjStrT6N^4m35MCImKtWJXROFhbfB|dvJDJl451T?;t zKkH0QBLW4a@%cLYNN`-1gNmpM(HP9pC?61-?BOF8MAJJRu6@kZ2DOCm{>lSD!}ppDXC7E~nG?4hcX}|A{uUrB=0Tfypd`(aVyW){&X=a;~19A>|l+hD$yO4i^3D5FD+Da2>#w#IwICdms>QJ?yjADDP(tT0j;lrb1g zmAP-BFXxK`irMjW(w^hqT;XtZnPqLv0+unKeatJw6bF(9oFzUG67}j62^bpK$iMl0 z4OC=foz(lVk4-PbU5>Y!gz)x5+49yQdD~m|bu24GDFm7VNhoY1aMm%_a{e5!1oH}k zqiX#13aAQ#Mm}t_AHF=O8pSGHcmO!BI;5Kjd8@?^^sAxI@%AWCtPu#cDlO5z*67m1 znueKLLyw$97@?Mw_t`uRAG#^-aT7xx*TkXFfvc;xIQuWh92aP7mSLu^z82c|cy&~tyg3I_v7WM4tUFcHC;2!=9WPsr zRYQ+Vp0w^3g^RdI5S^p~uO?P`YDb4j2f)!pOjVa39n}}*=f%Y0w`u&&7;;N$j>Z#1 zdvur2!AP-&UHBa{?ZY2(v*~^6lPKJCXy*erH@CN)!cy(yL!+4e$6ZxaTwEdMdR5wo z*^FLo^R@;RFs%cXiu4tZJlB|?x~+b(+;2x+>x1wXUkA{fpUM(ngkEWX^yUkfp*dHp z*1yqqKLm}h6gDGq9Mr^O^A0a&$%A?@JJM+4>+kM=LBpYQk;*`Hm(%CFH28alDW+=y zgh#vOOoAB>q=x08!Qr~gMkCqD714kw2*B-{I!1<7PnZkvybIYgC&cK_ZQN3_q+|X` z&`V)enDdgi%vTK~#naz%ilDadCaB1A@`go?$3XY8Xn^qhoUo7=%c!DsdxPuGHRg)& z=wWG5_($~dNUQ=*$#}RC7!%04>U`6?1X|kIn8sv$9CG#BwUsw3r=EhQ&t}l|_KD}@ z*9uL-%(lB+>bR=iRVfay(+lX}KFo`aRw2iNm%2~ng(#&sE=LIcBVDGF*~{`V>XGaW zegu0|7c-PQr92M%dY@@`CT0{qc{0%Qw^zvD@2a6AfJ6z=nYy^DO{%~iy5ZW-2y*-&JJw6r` zcDV3Q5R^AGDdz6QUL%tr6gXJ~!0(C0t@%Ne8u0Sj$0705+t@Gb6V(v+6JaJM?6`j` z)+GQMSEF8KSwpitfgOJ&%0a=b5h8vH7I8t^;T1^?qD4p9w`uL6EMCJKwXUloA%^J$ z6}XuKjwKu3oG1t?B1`(uwu(2%K2tjWhW?q2s{=v^@uu{b9_7Z7HhGT$Xt8vn$?N=M z?Ga}lW1S7pMu?K0k;i&p4S&P#!oRmzaw{ag!oF|5eyaJ$-;X!SnjO6+sWBRi^hFD-p~896&uqij)YNincZEE>l@_KkqJrpX4DMow)anRTsbH@&C0hI=l5B;d zs=891G}%iBke5W1#s5l0d`DrBv6z`cD}_c!tU*mvZoPp^OtzzZnYlpE=Y<3 zf!HF%+MQh1W&u|j36F&DOtoI(@1tenf6%$vSyIxNgW&npIseY*?9w7te@4V;;V>lL zKS=zww@VL9)FE6}DGIcg<%ZMkCr z`l=1gZw3NvEN7SGO$KT;)1HgxQcY$+P1nL%Q<7x(S)aFl=f$9oBJl-?Aob~vDbSuP z!C%kn4oNKjAthJ@IIQ@1r1e_gTgfpZ?iwo00C4|EARsqbS4Uh@i>B{KD3gc#QYpZn zSTaRFhV#iO3FC;0^24DsR9@R@2Gm*Ao8_xuAmd)5Hi8fWN0h@4RR zDAUew?F}M%h}Xr%zR=eg71AuO>Jl z4*7!+ZY6S7{xDRx$oKtF-{4Xa8oIY~zhk9LMY@KFf)*X2heh1v5CCQK(~WAx^xz6> zD5X)KeW*;+by-X4-<=%CALo)^;sKM0BvLgJtb08mgOnX!7HkllY#5kaXvA(CsOsTA zXCl`PEu~+{KdZ@1-dmG+q_rwp7|fe1mN<94Vlk&nw+AXabj1yEv4Pd)mb9?rC1R(G z_@3tCBt1U_uCT*^EHvQEkp$X9_ z5@{$i`8uw*-%WA8b98Mr8^?qIiaRQ^P$W6*XUQN(vEJ+>AF75u243DT-&@&EdT7c& zH65iAjjpKajTn7g>e!vw2(7z0$eHC;Es&a}*C4XIE0jmZ+*%kPPKZb^DK2~$kezCL zyWX9`M720wd<3sOTdd_A^232zHk z4uO=N@MmOjvG<;wE;0dr0?_Jw2Evhr3dy--lG%t*;ZQ=-(u(Pn7)Iv2APKo}$yn0N zi1I_q^|^{WCDAE`%3B=D9W|ZMz0TW+apwaT-RZQkd9e63yoQJuivg7ieRFh-qah0r z2gPF&0D$Fi*k(eI2#K|=Kg{K%501HD0OBGEiPHc$A1Nb02@$>@!Qt#svTG)Xtx5Sh zb9|ubEpr9&_wPD`D|74=By9>;b~mKW7$jp@cI$tB9lal_Z>lYR1+)P`^Q*M06D!x! zE<3)iK)G}0tlymRj*-Ht_xZL^`(SEq(F6jp&!*Z~l|VFC{t%=I;kriKzXNFL?;TJ)?AZ$|(hd!yi!`_X23sq>taanbEoIvAbqnd*(dXMs@#?<<+Jm4Q!cdnvyZfXZ z+H?@FDq&w+Jlxj{L>c4umyW`Be66#%0;xq8aJ`9~k&8Et`CI)#UCw+W13fB{mR)3o zl1q+>LzXT_Ag|1yZYE>AD|Ngj&7^x;KZB-wxs+$2&})|ClT{ynycPqYM#|28*Updm zP}jXGhPYF?4SBr`UU``^&~s*ocHY9ZflE6nb2s6Aj`Ul-1hz+Sf6r7fPbd(NOaBcx!GN0paX24P8V$&rd$HyojL z%hzGMOczxARkp_M+3MwbNla2zG5QTzy$pjVU*9hYs$JLr^iV?(+|eNa&vyV4(Rf$M zLN4H(V)`k7sbdV2l5L18LQC@NCTkB-{eKyqYN~qwt~M>_i%R; zn-4Xn4M#!D@80$%)@U%ngRkhG3XDem{A+=kl{kY9*n9^38H+d{(H@UgL~s*#bdxs< zuZ&w`VJDKnGCZJ|N}&duD>9tX!)v*Z{RZ>ao#2xE)Jv?vlhKre~`(^*$HYf^+7CbCD z9+n~z17f^%cwy6HX#aEJ*|pqr!)9(K%~&+K#I5KpfbVLhPQ#O|I?;-#Ig35p701zrOr`dRqrAOxIarHCkV)m!I7*(Z! z4}$?&!9eI2o-#faNs=0?hsVpCm*0V7V;^&5SsZw+-rZq{$GytavX4anf zxrq~lED22#Na6m7M(Jh%rXYOiQO!a}r`|RkMNb52H<^Dp{!wtIluLN%uC1;Ogy*1= za*F+|dXr2=%s_5lAA~-DJDARfOBIK$eaY@{)hNyT&*gLifQJ953wsF|lxA>-#pOPN zhPtKD4iL)8eBr5<<%+&`w@LnoC*=m3{}V7MZrDd#OubXIl=X<9 zbVY=`=IJm?9510rhOeh;JZq(`%8KB>q#r(FrOFxxOK|Xg^(?Popuay5*1?-T0;7Je zX(ow zI=oT7`~gq_fObrSwCP(a<#F{#=BHZx5jcLNZVbq{$#0}CBEH5UD}@wk`hkn7ZYbnT zzI60kawUqGR=zB7&APGJBW@G=5ZYz!7hb%%%moGdUM#QSGxSfC-e_)GWlb@PJ0Ntu zEzzbGwFMJDA<~&oPh4{FVpygK8dHCM{~ifK&E<5qt0T%wWaGo=G+LJOHr;8M)1zZ2 zjM2aA)AWJ?FT%uxsm=)WCEbN?Zn1r1yw5CP7;t_2~6$PjkR>Hj&&?UbkILX64*x4Z38xFY}5)NW33KPaF61i zdJ1K&EDDtZFj+5vKgP`ua;hrFN~Ly<8UedrF0}i3;qmx)jh$u0GJ`?3(^2dU@AU!^_qq9{fy!e`L+OJkUr(569#;_g@0GvET!dF5p8s z8dXecnfd&lv{K=ZO?s|?w+w@v@0+Sf>nNDhY}R}PI|i-$VmDD@B`_5M1 zHU*pT8Vq=p5Bk^p8r9E>`%m}0QCj6T3{9w58c1F%1OwE2`_`FQf6xt|-TH4X=Q0IP zERmx*EY@`=O)?F)Xn?^T-lU5##_?~G1U^an4+h)-deW4#_>^$%QvMfRdkf_(WF2Iz zA1BzdcT?alH|UP?e|_YwZ5PrQ3WkSY@RI~+iTuVHc7DQuhH?2 z!5tV4X9n}B{P%uyF_kx8f&XV3j91p8Y<_g#^9l%dvEDfuI@7s@_?6YcY>LFogUAUo-DE*#uS-%@{XxmCy=OWawpmE*MdIl-q|jd1ZK zk%2YCQ6VvG7x+i9hWab-=Yt4~*!FcO`1>;n(aY3L;j@z4dyBo*(KCz24OoV3z~Z(w zPVR2tc;c6Ni-W3J+nqUYhsxp^$j_hVLgb z6f?$(7H3nj21F_nNzcp=Gn>z;mDuBT(+5%i;&NdXN)fkE#nAB&$aM4$PZ)pR>+BJq z0RRBB&2qCI=9$r+x-x>zNF6A{?ws+*h`|-62hh)aQ>41j+^8QfWtUPi_4ZSWS|jCj zVTpq!F*iKU2UN8(XFk(DJ+LUil6ODvkAL7C9C)qL>Bno#v$C5aZX5yNunov{}3!DuhlzU2ht*i-Y}WalUHlz>zY2)c-}Lm8s}T@rEWWarr)IXi#2fZX=Y?F$X87 z*g7*^bU#Z2>bhV`-v5ZiP2tWo`8CJGgZ}?pu5Qz+GF$uJo373~42{$HSSBJgVLzp` z4ERA$M#_QOkMxHfPH;M^t3XXT0=T3n$_DrR36a!+gvNzL0hj)ZTE(gErO0K%V~?%2 zo`!^b?c12X+E?Y>(G-EZS*ns_RGSEcpf=71ZjA?`Y`TrSC4zMX zSN+dJb18-ykqd=r_Xr7%U*lT*iyrTU=3@nYZ}^(Y4>0Dg07tZP=2j3wdBArnljyAg zlmp|DWZIp3xBnD!qLBDb5oc(KZz}%*f}j^*JJeyc`1_UBw1>nRIxHa3BIpO1p(!-L zVt2mcZ5&-(zxdyI^Gwk7=*)la;11C7PwLM%aN~uG$@~DWsJmGSgKj=TZ|B?=*qgor z75x{Rxdy2@k8r~Ao5J)Vh$0X;vfy40{_>@g}FL zyZKzP#)}7-*ziwAyyM>doqfKwif1K3<pWCQ_#5DL8oPsL{)$cCsxq7EhCs(F_5l@3b zwu@h5q6|Q#j|bq!f8%1(Z27m<-vi*A^6K12LExlt96OsJfI4TX!qTaTE}_yGViK$w zJjJSf_wY}(7VXS0xLGpXi_(=?e!H`ke|oa><9N9#6tV;0B|hhaJpl$jw2{l+&c2kW9j-}g5dQ_B}@zD zDx0^wT+2wCN~8p}_Or=*cDOqjwC&YQNm?BFTh6sY#@{rno4qgi$=Qa*+i=~Awm{2> zpz@F(T2AfXPaGu)6l7!j$|57mX2TZ9K6?G5t-Rg&29d;^4)K&UOj#0gE{CfPR!6~n z_TcifA4=A+79mgc5LrHHFv!kOpUzTD*AFd2PBp`bt7k`}j^@Xz6l((DK7(edT$Kaw zyl|~hWMWC>DhoZfhtyL!A08*HI^^Ssyvq8Ij|dc~XA|{nbZ1&AJ0;yhON!P-PdD09rVN(|!(oft%y+xQ-!1esbog5S=L*gc^oL zWVz2d25_i&jsu96OkP(=$^Mnz{$mDSNKf316ns=le$GvAYv)J;w|@piKB@t9V{}T) ziRFgf+0Se-iGSy5Je_pB0_uV!aP3Mah*b|(=h&TQry~3gF;SbHmszJ&gHOxwMa&*q9l?JK-4YG~Wl(c- zZL{}&st#7b8cH`;we6qRmnO8B%FJtSxCPr)t7(Vlt7y@0YuBxs)N2=)s7@Q_vh~q5 z%}e1Az-5>tntVo~ar7i^K-i#*;6nLzO9cH_$36y3re3cJj8M@LM zES`$H-BX@L(t!SrHW#WmlMdpBx z!l}{ec-RG+)a%IKU#MwJ{^TEyaU+elfu{Un7A~~Nu`Z8^k+FOnvciwCgQY-}-ku#V z863PMEx(rnvKgom!8TejBPkfwrxsjCS1)Z2JG!nisyDe%wVSqyX>T&eE^lAH_y47c zRztQaiOYhSNYBp>OYw@FHQ6Qzk2;$PNmR~V-*7B~D3^p(ZIozGQAdD2<)PfkI7L0_ z-x`vxy#s@xlIKF0FAtn8KcQXnhkr$CJZxL-aYxJKgn_~8V`rCA~4j=!E!Gt zO%biFaU6xioH4{`r@ltTv3t}RC5`KY{e?Mt$%T+$Y^F=i-`GblGwtoUEwTIMj>zO< zef!V@Y5eMQOJZjUTvB&y#CdZWEi_oD`atL#o69POnxok@x{ZyM#?!ZhJgoAxII5R- z6Z}YudwAhq&O`RL)SbH1t5_tzVn|qgPQcn3Fvlk>>z@o-DkTQ>ax>rn>Q1Ee$dSYStc%s>u3I9Sf1OS)f^lzQK{(AwdK+aTLT3h{kh2A^Pn5mdfb(*{* z@HNz~2-Z%pp$j=ID7TlLn2iI9k0Q^({R#N)^UYhpV>b||rn~wfOyG=^!gr*>5YeSjN zw)m8pCMROFLPHaWKZsevGb+O-sV~DL^*uF!w|Uh>Hqt*(cW~ct#^~Nf`&8&jt+*pO zlJ>{6h)zc;;l$^Er`$0_C8!m@3c+{Khj)7y67Nqck*p6Oa;wLSF%2wUJMK=+wG*$& z5k`zoxoJ%05#3F>rBD$yYgSK|wx=48X^E_gRcsx1{qkRInQ_LSQL_p;6lR}ppji1S zZ->8t##0dTB$=+2h~qLeU(Y$xPi_(*SOgFaxzbgJUP;~rIdja|8T}r5mU^DT)!*U4{|2ck?Jp>#F=r3}jGTE>4hCX*?_HK|T zw0?;d0A;@W+jTUJA=yu*!s-UPor00n38(A8PhZj z(i7n!dalTLpBHeS>))x#Y@R>)zh={a+1>`QpjtB1rgbO7&+?LYo3usrn%7zERUZdP zu3v3kn`~CpSOWk=&g}cs)lltEx3?WRwoTKD3p~q5a(^l^H)*Xc7rUj$&5$lWt$-Gj z@$u}To)&&}Kg1~Aw`w$|sp~L$;?hIyW3|}0hMm-Bl3j;IH(ea9FROeu%KO^+6|fNN zpuCYI6d>FQhSz842&gKZMm~Y>IpIb9dzU-c{t0EF4t;w9`GFUU<4_BN(8+^nPIlk< z(hn6L8=uXsPP6w5HPF|m%$hz6*o5wIM&&qOaS7P((8+r`Q)}O(l|3!l$rsA^%W-Fs zMK1Cj2`3;F%0jOKPUzn~9Ek$}ct!G_Hgg)M&)CqLz`g|~K{a~1Jep%$8`hG3o5pG< zfeIIro4i!P$%IR*yz!K^we_68i}}HshN<5%h?B#7z(Xwrw>X*h?$qb}(+mko2A={( z^Uros?QGWgUI86I(1i(TmEsqq6=>+Lsp9X>M!KN-ZyV}2YBCPm*^yL!+K44UnxuOE zdz}axz3*j6E*58}|B2g!V}L`^-_}L_>3O!3om1{@ZEbGPOHH{@p&9NfR^9P~+u@zE z5w5{8s>ZDZ(-#+ER!$K^Bk#0_)r|UN^BA%h`B9>4;kJONG;XnNo&@z zx%4PGN#;5p7)HNWal7z-NS3*-&_Pdg-6t4}eD0dh?$5?&xq8#FGKLe8@`W53^G^Ti z?I$tq^6NL~X!OAP5K|-u7nPsH)eB<{pfu9srJ6`9f%X;?Z(|%4%02)fTu6^3rjPQ= zsZ&4n{N15ex(x~yD{;YGT77&ixWoDO(tvkvazXX^NAg1Q{9@v}LYsE81n~pCRAe1u z|2OdBsxVVf&5`3Ub^cG3O1uUjF*b zdlYNvUM&CGalb=VQIV;YG1H`Ti+`_4f3X*L{ECF1(7g6LSM_fk&a6Uf{=tf)gSMMl z8qaN@9)X1ZZScdJm?bm004b^UZWJS-ujtA7LjQgFd%Wo@aeUTv z@q2iL%C_FOz2CGVF-Q*wf2g}ZnCk24g`1m&px_oHP5ryJ9?9jUbl|`kV{^Sb%2iMLOI>YVbuc zrWi0RvEx6#0{RKS#Nnh=Mm-;NFQTiK<@rS(0uhZYz%H#yIDCgn0oVbOj=)7J-1WMc z_P9)S{EEbW9_!%>wTvNU$lKaGGN?F!$$JEYBt)YVMP$CbY&ct)E!W;vtr^Jw5FzbB zjlx`@aAZ;D7FImP2$?{U@3CZtmoDAB3xw_TRsVJanc=iCsO0~w4h3Lv>liLBo~arR znt%0!$3ngYsBRw3QDb%a4G$rfp#}dLded$(@X8cyE`#sFFx4ftT_)Z_C0ot| zlBkrmr9yZ>2*daXeth(?;85Y%fw@lT9W0Sg2imUV2p|SbC}R%8qy6^rrcHv-I^xO# zhf9fvy#!S%n))_w>KPlRO_$QKa^4z0f2#dxvmmt%fOXGZZC(#Osrwk%0Gwn>JVPU+ z^lX|eUFNA^COTiP42qtxzT&fxksbb>J&8bG8GIrNeKv+b{-6*m0VmR&EppBPG0DPs z#EbBNS3naGEtGrOhF!Z*xDa3D^*Rwixp*saqGbLNd@h=SXD_P@J}-1MWm!F`3!Y~!mrF7788PYTveOEe}g zCz{P5865Z#pVJ@Xo&k6Wh*T(2NPBNUiOMv4h}JZgC-4<0MIy)p_KI+cxI`+#!JHl5 z-Ba<|1Ur1vd$FAc1(RBC7SbSOBqb8V=*PTv&*~#gd^a~kF+5L>Bc8};OR=Hb$D(LT z{X_3pK!p%Y`)6+TU*&_#Mf66eR9#bTC!d0!le(D^-=d=a*5DkDo{@egCGC zW24thT5PEk$s%^|rde|v15kjwvKTl>@gKZ3%0xoZ)A-@t+(fw15%d&n>VI8;8M2HY zdH*TF;{n+DP>%l`5C<3DR5~mW7P;$oxR+}5)OS;k0^?X8{6(idkAd0i?j^q$uZiEJ zTzZg`V#h^lP&zHaeoFMwnVinUpD)5Wu?Nv4G7@^5Lvq>FBZ;2cPBCr!8a2?p?Y`uw z_U0Q~$`N=pMJ7D9fsxGr79B~0NO889~dkf;E{ zqin;AqL1d!Tn2JdqEU9e3EHVOHOJ#>`35ZDU&vT#U&CPGJ z^H)G%0PYsQSsmBvHYepVe_ADAb@g*xoCEWuA=}2K;s8Q4KwetvOC{5d;~UHjMWJB> z8PZTy3&lWqLFNY{i;Wl>lPS!Jr_l%=1LP!hG1FQCxC?p5={%cAo1|~%@6{5@sYDQf zkb%|b#~lv7?q`rGPS-Fkc55Mhn>#td{EBv;t<^|(`&$>*2^ujI$~yW_4rR5*Zg0=v zF(qn2#U4_FqQ{RDdzqS*U4x_m1Z{Q0G8FDYX0EKUhrBW#X{Vit0%2!GItC zM4vbi4D)*@g&f?u#KJSaD>NQ2B{=G|f|VqJ3qTQ~6ZF$Z!njI$g>R)A{}B-lyWj+$ zv~xXg;?GEZk9*c>Z5F2Hq(c@tzYaPqYssSmswhoG5iNUa)!`&(dtdgX5wn|Mm4BJrSeAqqS`d30Gm+!toVt(R!W$82-8A~| zsL|PWGmfXiY1&{J0iy*MP=<;aZ~lB-(j&2iP{Y#zfLiRrF)SHDC7?TOH@P%$m`wL; zg)jgrsw?jMO?Go8DQDc_i8P>xp~mKPm6$nR{H4T86Wav_yV^JKCe%;|(ON_VS45tD z=!>&TPQyj+;X{gGBv2xYKvOBZUR7R}MtA}mn3dHhL{(#*xmG2qXF)*3Q#z?GXT-~3 zA2gb3n4RwNEj9?n&U$8ET#mt4V}8ooZeG3y&)2Og_;I-fu?BQn^j3pw?zANzG;#)bVY9b_WEN>bP?*e8dueGH&>yEv3|k z#@yq3sf3v?L5-6_nqIuiG2ir6Q^6Lf)Cm>#GVsu$a74K?Y;378EdU4*?mviL02n>o zlKbj$zZ4m?-ix{Xaq)5tt?88S&lp9WWBUyd<@ZmUc8#MFX{KF?Jx{5Wl3l^q3Q@uU zt!N}XecK4M#E;&6dt(Yk)JoMSG%>CnV#+8*mWTngSytACWj4*EKTennqAT zwpRvHcJFTB!SXW80InjO2)yzZHXj@b*X)8dmC8Mpg>wZ5%w(cMXxQ@V&#pLMxM|^q zN{lbuZdYW}>0#xOO#cg&N9o#G;aNEpde4@zTw`snMO_j;6U$)JWfFLG`wEB!g4Ts> zR8Q43C{V~!UP0hc0RSPxQ)P-+w#he&mJnDV*da-^9N704UxK9+fAahtYGRu}Wz@ce zcDyZ^j3Pdlyi0L$t^UEZLma%^LE%-|vH9G+6hO7Wq42S#-K?Do7Wf%EH|9=atDu0b zY?z6j(1jo*23NeAys(Xvj=z)AvUlo|m+Sc1S@Fy1eI-xHdG!?qOZDSAtt^LQVT9BU zEJH~bC6TCkpd^Yim_g<@B9k=X5`(rf+Vse^rGG@xMA$xDWYJ9c`4c|XIg^?&KxhVbYtzN*-kazE>6*KOYuN&B z8jam$GLtfVCo7;+r&-={O-!kr+#W9%@(^!I417|KBZc$+^!j-5Y;bg(n*w* zgzV3E!L_O(AtA#z12f^-B2y5eB0)vCjR*!$F7Vcg%NSDoc56#0V3k)u(lGC`K~uIS zg$uP$M-FAtFE6{iIr*c^z5OPFJVvwBF(&HMFELENa?;D(45LvGj;YE`zpK^9z=e{J zWN?CeP6Gvyjn+&xz!W`epUb&s=^&{e&vsMLXf(FE2-wvYtdgq(S@M`=cd?5LBJ>6< zMQ(fRO?SB8NI>WHiC_Gvem`#F0_fR$b>t^3LDVwMxZR{G;_L2=3jq-C@E z{0OVyYq6JfOPnJO_LL)?lyVEy5EQG0#E+Vln&B93AdJP7NkhzltXDwl0JOKK7Inmp z!u)BqZCB}MN*S-pbDRpOQi*wS8Y)v970pNs(Fh5g+#K4mP#5vyE}m)&?+y%xI9kNZtHi9ZPUTeEJWhv>FTDvS0@?VIZ#uv6}LoYffg1DNdJ+L zfXWD*4LqE@O*tm59Ls-yGeAH_dpiX_)d~p=PaA9Xj1tqfg9-mvKqe5>#R6E%RbD9I zv3RE!eTPv`0;uEV0?*)!Qc{;gk07*;P@72JvCEmOsmV{lX64eZ^i*G6>xRQ_EMQqXjNrevuaw zYZRpKW3JJl4gr>BtA1e#Ztx@-3IF-P4+2a5p{2Q$yGv9;Mcu=qCN+q>^-9vRct9pl zA*P?vA#W)+tK!_Q3}(_FWwUEr%cjgfIl1mhIB71A)3 z5dZ$(QZ%0u3R?NbXA%kwPeOgeqxSFZM)?SyeYy92ea5d9f9)EZ{_(DE)cVbqUaN>#ef+Z!?pb zs595E%zA0HbSAr&k@Y2=5M0F*#tnLoJ+2~CKT6EidwD!FVF}|f=Dy1^*MMdq!midL z0B%aE6rS5q%8op^{S$X+ZFQ9k;z&zG-WMi%4H4viSt&D;0=Ba!z1TTAL2m{EItyL0 zqq2i$G?C|bLo>*F{>WhaD@PaGoUGDQX(Kwo1M z3EbmOcHs1*?XgFhu()GY&9QTsp?D>t3o|4v=o1aHfg{w{ZLi0gGW80Y3zZBLwafe0 z{?2jy9WRd+fqq^6z^178JK+r;`&SjPti*rW7*sqivN1w;^@5+xTfcNm1Ay1~Qc4$0 zLHPB#U$=2q=;t%rvfU4`bU56$=q3syaKR@_M*4&v>2rRzP3xcbFGFPDsr-@{BQ@qJ(h$oq~tcBVJDzvut)(4&Fz2-T)_^;i>XDYe&OlvK$6 z&b07Ksl>+?bS)IDZ0)G$u9kHlz92{Ts4O1_}@n>VX26?4SAjzAl{yWMwmiGctm(9O@(qp1Hhs-yOd& zQFz-l=7h}_qxS7`1}~~ixJ2f%9)Nn1U{XiyEUij()OLe$q!s_G zLfl)N29J~RsLnbxL8v zh}Kzz2w@P^S{FW`@Zx~flpU2XG(SC)AwvG|V$Fs*lGKVuH91#{rB!fUVorQ)G^HiV zq2jjL`^4K-^`?c1%wrftaI5BZZk{w?;;Hj7_9Q{_~ zK>cwG^gWKp!PlQDU0vmH|9D}&IO^zm?ft)8jtUsD{+K-jCQh*+p}*z&12_}?FYi<1 z$I<`G;{1KgTwLVijpX`;^WwMieQ}Kc zv2+!FO}Afrj2hh?(%s#R9^DPn-4Y@QjP7ovySq_oq)SRdLXZ*=B_#Kb=l$(JxIX8; zzdQFiS0MSj&d#h8<=IMp8e<@2Kwa8Z@{^E9BaI$qK(ZVV;NY6~OkqHA*w8Uy3cf56 z3VWO6y|8MFb24E3L6Vt`vGUS4FbW5?qT~N*xg3u~m2` ztTiTcnEhMnq)kp)D&)!S@?%uIz7RS&i?SmK5g~^b<7(j!mFr+4j#a=dG@cCn`CGY8 zK;tnD&WxI84tNOWZZ%=U-3%$-+VX>Wr=Rz_A=B(HfLx)8qz}4klBk1k3mjXr6aMi; zN_ohBbeQz)2TUlsl{1ssDL+J5raiHZe}h{(BC~&+@y6)phzUz*r}1$>N2kTYKjrwr zFj2rji78PV*V|I{eI4-cLZt6{@B@w8+#R;~!*aR~@A3mg4`6#){#K1J`o4Z& zv~qjNwS%`d2q@jER#=J{JV+S~v2sUw_9*SnfU(sVKn?&5i8`fv2G>yMB$n&wT)bCC zj$e|qxlP})v?0W5t~+NayFWOe>$zSquFLZXAw|@DR|*UeIDM|xRB^9D@tSQ^@a~(y z<{*VC{LD}IjPoyKJ#FLY6roRshYK-MyEv%Z8KaShH9YF@Aix{}J-=&P-X>=ZB8v9q z@9o2r+7CO8=ZeTf9AchdHMYS!zF+S0_2Fah^Q&{olP%jFXS}r>BYC;a-OvNN0Y@c6 z4(Y{Y5)mImJ>$D9uJ;a`l+Im_KVhciu>(5@7DfLtG@ar5EthanY zF=g0(4(0sf@5)6HsEDC}pYoZG)m_D()EaM0Hej&E(A|h@eCcj)dCy$m=9v z23qKzy#^XJ*{L*PeeYgjo6apl5mGmo!DQQDj^cdk?-IolTOxy@BKx;7`e{{vfxd%& zKJj_YQ~7d$NqOpb*F>OM88S)5M(a5Gs9203+W$DTwoq(Z1cwHR=7ntPlaA~}PkW2! z8VA^?f79pzs~(L5$iHIH-(u#uwY1;j-)3h1|^sU4<~hl}!|&e%}vz89-$ zsKMz|(D!ZgGc}*`Q+m7D!6txm#d}xN6+nCq^J1aQt?E%_!=wso&D&)tv5l42hDcHl zIkYA#^(3aD5nf4f+a6CZ&`a$OehkpI5wY6Z+0^PjI7W$N6tXSraeSVQPbs&d{g$`o zcdYqTr)8UN6$uz#WI3xHkge7O(kSw!<}~D(|Hq6mge#}7&DChsWrV2DeJ+WaJejqR zi$5VLv{038hR2EJacthOYkxKtDK@;&RDF2;JC!@CQD$RLYTu9tx=k@^(XZhpgiA5n}vPog{G| z!E)1OdB4Zw^jf9=^lFTd{u)nK`~YhUIavs{3Og^Yk{R^Xhla_yRp3FIhboy!`HdPq z$$!f`&@;~2{pBoy1T~UZwz@j{*O=R=2#t8(zuX}^06H440|%)lFMpvM&t)8!@>5dG z1VX5hHc$u8^Aey()CMA^bgn+2WLh_Y{$W)ch;;GlF9YJCb>2@QRU<9*SFpHi(JBlK zPuBPEl0BqHf zeny{pCZ6?Bn3`b#A%z&If`%xIk5tf&L(GN+swuwz!DIx+Ae^rG=fo0+kE8ODe)i}P z=wqIGTl7^>#m{3U(a^_o*Al2O9Lt@rAwqBY0>}q|E)wlkRqTE#UUI`(Hic+t_A zx8{hPrs`6~D3)Q}3tyF30E2K~kNN!|a6Nxw(f&Oe-WOcWw`!&udvDq+5Xq2ew8PFW zp6i}B)s}N1D~oLCJn6E$c={dtGu;EJDlR6A8xGdVgK~T>SIfn4(MAQD`g}E`m99@p zSp-g4@3@)G)kAr`YPjiY#?{zO8x9SEyOBP<@@bu`tMKjdyGU2L78PlkGMT)=XxgSX zK7xf69g&?V)A#{_?E`ec4y>869ODd)H;QO4fS~Y*wFVnC2_-zmjK-_$#1%Gx_Y?}! zX=Wyaeu;MXkzHJFE2a8-jMAX3b3$8Q6$?$ikR!|CjQZr6Y~iKZ8s}QpECYO49ntzm z?pFg(O){^=zL7})G7SI}s^Z0s2S7`s5%n!Yri%}w)zRJ`qy1MeuE@&k^yb*>#}^(< z%;aTvw-8z3jT)uWtYus0kY^uC9Dvza&yz|_7C)WUM14J$6*4C|6fiynq`}A+W$YE_ zC*&d}iLy7fx&-0BlE*G>OMwdCI>c^g{HOX%fM?r8!#!3!Qk+iIG$A9&bTfq9*$QX&et4#xqw2PbH4^~un=6aI(eg*W=&;i*|-b2EywP*B( zkVq)(LJRR@bQK+U6vz;60}&TP2qYNHNH$e$|6Pd?l46t|4tn*CFYRN|nX;Pzb5Clt`~qt$n_%p0{TI_? zb=$kea0;n}C{CDC&Y3wC01_9HnK?Q4QlVV9ZEL@FLvxjkN}khod(3axHGcIjy z{ZAon9TRs++O~H|x}$`~E6e^X4zP9St?}$K^N^fmy$lx8(&3QhE1t6zqb;ZMos_aW z*`^Z8SoY>H<|1+kpbTKGf~NasHl>qQy5`mG;gF2R?fp2#;}owsa!CaZSjz&oTx~Op zyOT5hL>m+(_&3z0K(6ftPZ5+%{c!Ig?X`Vc+wf|@>ac}?5mv@~Z&qfgt86&o*ANH$}? za{FPscGczC7&oS@T2T?IBP6grTg6th^6$Q`M!F^2E|6?t>|Q#?!7 zn+DXJEpIh2U17mHyo~3ERQ<66p}v`or=TZs`lh=)t~p$Kbc*=WE!mSo69iGS2H1w3 z-ZKier94aez@yw7MrA}WXy?&D@TEena9u9U3Lf>@qE5r<8xiZyC?@@RI-?n$r_wmu z)`t*!D)4byZ+5X*?!DIKY{aMVS?RD zL=ZLm#K#jF1>D1_r0)@6;X?W1)66!$f{4HUg0SVIIbYuhmI!nn!bO_ksC^ti1x|_ zNZ%4#hT3w2OcZWqD_4(1>9)}|d&!8ep^^V)5b%>a z(@rLg-XfPSgBbiJCJTDx(XsV2uS!a9=4S?okw_jlqnwzPYaHuic`;vsFpvAM56FZ} z$Wc0la#Ovs_dX^GzYN(@r?nY~VAJ|oB-ouNHTfc#3gq8%E<}wN0@{4O%`%9}MWk>B zA3Xol?=SdU9t3-ZMcm}oeExFe#-LanPx=i!je{Yt>R78X8t!gSWC376EP9Nu2ibrV zL7o^TST!vdL=hyktD^-Gn~C9`IyjXN99zpp*{q!w*xMU{wTDN5!K;jZ)$?T0#+f6` zshrRCld@uEdZ0l*bWrQ5vxE0mLFRg|gV8cQo>GKnz6!Nc5(tQk9!G=;m=T*8^DR4$ zNJyS)K2cc7OG%I-yK-l$4mXJD9Sfi%WDqo#UI2vsp}SvDqHjql{X!~290I##_z@U9 zHI^Nl_ua(2GS%bz#nTt~Yv9F#A2Yu~Q+;PNG@RTSt37j}&pC3X>jo+xR@0se9{e(m zs2T_He`OyvzJj9|4sA~vKlzMEKuoAK-`!Ri@7=l$Kw)Qxt${ITUu%rw6PEF%DN!4C zNT)B55$Z|NNc?E9VsvBbqO>MiOY54i1%on`8PdaJKRX~=3VPwC{UOpg>5nsHROyCb z3<5_p3Fo=E_Jew536R8Ld*EmiI+z@Y7eh40OTQO(BioozDWa7!_=`Kp^!U82;(_@4 zrJBL#6Qs)HT2mh5jI!nzK)s=ugUs6M<&B{>1Z-4oL<_XF1KeS8iJ&lqa(-r8t|+qP zfT$0>8;(`LhW;gOW7zteSeD`&qGWh3u1{)B4NAtlFLb%JGlp;ymUfiktt0pJ&x4hw zrg`*_HB5{C`cxF!4Dzv1Ui&O{wvzN{8~QSENfuS?#imcr?S;eHJ5TIrSY7QajRLZ~ zs09qex3E&al?3nk2H2C6?WcaIIlRlC{@F2@l>rK1rA30-#y0|#!mUiNH!EqBgN z{fn{~DaP8`vj%L!F!Q|`w}wCkSIZ0*LlcX=Vu8)f`7lzmDJw+ca$dzkOM?gj*i#9`byoJGrWRcLEbgCrkdM_gu?e%6;g)_Um$DZywQ zo408`V<1-d-^q+tLoa{^0q6pMU6{)&?i3i|?sA0ptN+aSZXIO$+lg?Z2Lb%2r;r5+ z^VS^>C{nue;BIVeB2w-zK5k^9g)CHfAeBN%ajO>d-oNxhRPM2hkq4~CSz;ync6v6} z4DG@#4W-wH<6u8c*g~}Gsv+&!bKDq!*q$^kp6o}&YEcJ*FewcYWDMa^NV5c!tjcQa zbED|2p=#+YAfUg^#khMgJnaGn1YqM!5>(ld*@KAB}`7G9Fl~^Hs z)Pz_vVt!_Ir_S~^F;q0Ot*r627pNePx+c970E45QE)l(3w`j)uVr+=2eZiDg{m49# z4i~>kT}E%rsi!Lkt6M6S9Zw4|xBY8x0+|vG03gFVzqg!j@$9)bFndqVm~d*bOnB}Y z1JsSM>0HP_Ie6j!eFLClmN+pNdw6LValm`Idw?TD-gCeXU>_2d!YIb7k^X0tSG*cA zMs&KB2oXm>;1z0b3Z#i>(*k%z!dI+BND_U}*1s+Es}BoUbbCE*NrYH79Cn~R{)VvV z$!vrKU!*PQeCoN@fsc}5$CgNFRV40Eqpuc_2@V5jAQ?#Hgwx-N@1^4+eXvlambZ?Z zfAy-Cj8Sv^0PELpV|$Ko@cie1TiymYYYm38s%tiSGF%Rf4c^?4`#ria&3*dDG44~o zxL9#CN&&s=mlL-SK%0lzMJH(Y6cw>vR}$XoXw{~2s?!u(q9Ldu{^i!-oeG==QjsH; z@Z#kyDUT*h%gryTm~w)`SG@>R$DlD$5HSp8mDNr~NHHy8kRZIS0NvYnul$5F(GGSDUg5TlIf+|!`H!PL0GTy!KA z8WA@rKFJFpxIIK7g2!Pqw~oV$Eran9TRys6d(kqaqYq7vq;|%X8Y3{zCT(!5Mv__Q!OJxd%M_b zHq=U41cSBaA5GDYrZuLIH=u(-dTC?Y2m?7u zu7B#Q*{-W=xwCR~fKn`n6$p<@JI+5D^}Z5f(}kxF5?7az2mjOP<5=-5GqO4QL}MBm z+6bFG6BUZ z|EY(5LJ1Y5^?PEWcpAhFUClf|RS_dS)&%SNkTyyb-bkLIk1J%0E?sM$upUAB&twBi zvYp}&=ZB_ie&q8qyV|DLEwrqsZgUUSJkix=88s0-K2h&+hEYk1QV>Gj9x#j|*pnD6 z#<#ge^@*Hq%Jud>DIaZzfV<$%b(U^Me{`16^|?AM^!wFQL%KdIiB=|^T0Hc_h!7;f zfYw}h@bFTD&u5>6JhU8B4+-pKo*XzcbEq9rbZ~K^g(i;XrW*YB zzd8*!W2=H2*4quL#@999kdf5h-o*XI<@!Ti_S&SwxBLg~{DjHy?bOq%>0D^_f|WW} zl0@Lvfz)wah{t7JP@sziuY_j7`~Fn;)#a30a)i?z!H6}K^4*|LZD{q@&u50x(FNJ{ z#tzahI}W6lFuSr#4b1R!dz5jtd8^ME52atlDZU5xhx5feku6#`9M+ScWhN99sCkxl zu^Xqq8<}h`;8iYYW?K>u{J8L0uuHYosgmNsVQ6K~{`Qmg&%6-)Ig9bPlnPRQ2?Mz<;h!kBH4KDc4uX1Rl1Adm+!||H|*5!w*m;L)c|1hsMvNHt#64( zkzrycRbJmah~yj(5?HJl;Q@T}=033C+KxJN8AE50nlu?_@=Cgnu5~xhzJ(0F#M;oB zb_?m(leai{m*h^Z#B;-)X0eSfdscWhZYG3HDzqInDsk$JEpU!~FNgIG0{ZUdJICZB z=Dko*wXs||2VyvprDK#l=5Z>2Ainf|17yyrhg&r|btZj(g9aaUxU}(246>!(W#kJW z_^xw8Sog(-ETx29xDX16gYHLV`7Wx4yK_XZHKY zKRljBHO%YULdRnPFhsYcif{*1#iUL~Gqnqix9ZY8Eb{qXk%5PaxU zU1MI=Y!&Jm?;_M`iSUGqZLB8XLRdEgY(Em5Mbl>qD6w0Di)OF=YIG$9IwRNnD$^Ui z1Z&?H;qAeyvpGCmBF)LrbqcW6QHwU!*VZQs`N}r&tAp zZJ(;}+UnWsn%^NI+YxhLi1+C0JQGGu>0q-S;i)yJeIg;xqI~6cNk9O}{_lMk2_=wt z(1%B`p&6H)RTX-oX2exXz4s5F#60QF33z7`@MCQmq%7$4i9F;@mv5*-c*n&zss0A1 zKPdr!gZH|xl5Ak+lMU0j8UiXziksoNEn|P7A#7;Nnj$Q|CIsYWY`F#OdyO5ybCEZ5 zlk%0WuB^=h{L_E94WcLe6 zW7DgyU}a~?@M=p?J*bYh_VxU;M08BPBnN?=+4sB=us%XHUpK8TH#b{j*h__6;53E5 z9!kYcJ*tInSMXM@(%CuWDLG#v!EHN}VL3X+2p3fuv1BGqU*8NlP38%dO{s#FtwW8! zgv*5Trr2G{R@Rn*y%2pOUjb5%i0t^4u_9w{O!C4Pnh-NSmYrP#>)PHf28^@j z^xFtF6PIC8U=|5%uHC#r!h=EfA7+=i4hA-or3|TpOuij9c$qPXc8My!S>tHz(|PHy zy#$3459uZN-bJDdC?HV)_)C0ml~s!@>VvnlNEy=N3!t}9bU+>4 zSl6yaDW8+&hVbWiC}JuuzRs8tDGP^LrJ#nst)4Td?JtF-Qjs9VbQ|f9J)5}dWr?`P z9aO=S#5&^ly;S3knZ5-e)zoA|*kNm*k6dn4HsEVyd<6JmnChC00reFc*jEvgrtdU0 zU=U9inR36Hcu7EDbWJ$+UV$gtu8IAZ7up~4H*F2vG?~h!j+$v4{)sXAxM;(GP(h7E zT*fqO1TpD5UK$mQ+9<((7nn*MobhGH|{YR`U_W<TIzmI~Wfi)S9<)a!l-L1E?=OdY$0?>I_3>Yw7O*G5FyjQ9TywG;drw2$P`X&nuz&Kok^b#vBW~=pXvX%%P&Q?OevM>hbTUeF`~7U)r@}QDue!*IKe|N??5Ls=8^)`>r*V^{H%^Lsdv1UCEpE)8?$n>|h z!%%n*1qXvZ#ge*6iC;X=xhYLUcAXIr0f#%=n-xVSI=AdFmsRJ91$uaQX0V~-3f280vC_1vFdIE!7s0@+& zb$RShGe9NEZ~9!X0c1jJPIN%D>JyeI zZ%aiwQkY6wrihsK4^;!pb!fNMa&4x*oHpBm-s-8h&c8a?SuPLXlqv9rwnrZ!663uS zjX`tvoHgCxN+}D1>#ORdl?{MAy34;x9ycdH`m*A|SMN9(P;MqJhJGFh;ZJ38OC6MA z1_-A!lw{r(Bu4?KNp6B42a>Px)kkRvq`_`3C@*n*@g zOXV-v@Oxs3hTpZUNNqNe3V+SMWUug-*?cB=rVikQ?a%H?fk``p2M|JiU;Tnw?OmSp zl=A1sLelo|;;>kAE!2C`j(3eShe# zh?myA4ZL`ut}gX^^u3l^6*gN30BwE0(18IeJJPYgWnb=40~B4N0iFcoK&wq$-O+}% zX@mgUKu8AZ1lr5#RfriMO{hjp8$SQ_pn9uXMJ&UprG6a_iP<&S8Tjg zKA+ilO^(LDyE@VPfx3VAwxbzC{@JHzk%v6^ZqusXOL4<=V7I}OD99lf#n>pi{@oEk z6_?g3X2#6q`w;Ipp|TEjsV7F4BGu%#+1&Z*PP~aM-`-^dkgWsA z|7BstQ7_|Wa`wX(1c&X$tI{wr1LodtvvTYJc!b}or> zb(d|QEU{kN?kXweR*M8Aap_u`|MBB-n)5`0cbhoEONE93Xafq|jXd_pVpJ7^Gnh0? zpE5_?xBpgBxg869?OM$2K(GWLB!{Y^rbb2L;2TZbc}@=vfX6Jmei$;Hzd@y(el;V( zilp!bF|8RRLdLO3LLDI)h?a;bl`yMFPEN^{#KTWJ;5bfBuwxld6l=}1Ag_*e3S1ld zj=m3)iCs!VtFCqr4fT=o{5Q^vLkvm&25~aK)svOMz(E2Sgj2tUJnd5v^kInQIhfFyLHi( zoos(g1uX>#Q}_dbo8_WeNIaNhQ2i7hAR)xc&8$UIhPjm0YA7o&(W!VF5=@>@Um;)j z5k;`(n7r)Q`4@Wqb1S;G&;5QS7*7#~ZHFHCm6T9SJ%fv1!I=OAhGyVKk`)5&EHVvJ z%BBd_md2siqlY>B@6G+X1a{duVOTA8-DmG3@PF*GT>v`68D~c-6g{r;@U1WyffNx0 z0xiOBayB}nO!~iTm5hJrdi+wxToJPRu`B_x2o@5X1wfdp1(e2xK106kPZ|LF$%biw zV8RSXH6ea9gHDsxirVis(MjijZPSn`6Wkt?ZR?g{`glaaMfb+=)FLGXL1CS|-Gv{c z7(8Es{{E30$%99#ReS^Bfd|DtTaD@%a~$E5Hh(=L<3t^6#2Dug>iK57CHFe4l2D<+ zHEhUIJ5uSn`lwz{q$Z>AM55CZ$?YHk z@&7{xThP91=%}J834xW?^2#A-_{n87h^68PglpOK?6GS;h_iwLo4vRnk}OBb`M&xL zWLK@3D}cvyj?~~;ljhUDD4Y^j2T{Jxmjv%3Ip!wKeARH`jd{O3o!>D)b-qwDXE(jm zN{~OPSPyxKtgH)=S^D4&BavkIWK=L^ML_hQ|KB3K+rGA|r_o$cD1@i_5&m2aG{R0V z$8dk9TT~c;FM6tkl#5F|#wx1@v)?ZUOw8Lf}>wJ6j@mZ8%$tV+5B?AxAvL1h1w^$gl(7B&oOh;xOKprL8C6 zZ5+f}k@?}G02D3ngOV}H9^%YfDYLh`@?Ueh4JVR}+QIF^c~xmR1tT(rGRtp`pp0q= zU;3|^Hs7FmE4r`H%YB6ifx75Du;2K;<9NTLtc8emLQnMUmNU}CJ#GFN1Gzet>~bDk zgA`1zoz&jB8doCSif!*t?>}}9?1n26Z%gyj?eo!d|GgHWJU^wP0T4C=l-;!vKNn{- z@9Kk?%ECIcvG^t0f3`@q3IEJ{)x_~n>@BR3#U>Lzk!AdosB+c6wS95E2F{O8UH^`| zdgA+yC>N`C&dZST{_(&ViDYN5%J(fqP678rY zip*{>UfF{=;n$M_S8-FbUs|5W2-|GRBPpN%?Y{7f3H>wtj^3%>_LyJchUfTe?io5U-@a;COYXMDWj?GH3X+ z2F&8aTp*<5pYg%%WAZZz2NNIh(C(+x>K8z8yS$~pruD^53tn_?9pPBdZ_|*eQ|LZH zNC+2M4RaU~0N94OL4Xk~0YTScJ1XS+=n8hDA|1ICL=_q`0nQP3DdLm@K4_AYG3Qo2HM2@wp;}vLP zAY&#pxP3&9hF&&+Z(=T44LD(WsLNPDQ5^nQ3@d(b7{sRQX#3C?kAl24(t8n9uh$Hz4s?YSCav^Xt zG9yHfkb3vrg?;-p+iW4~yy~?@M!V;)%^aI=ou)WlV9gOE{pli6A^1;Au!qxSRbFs? zpLv&~)_eV7*LQBl9+alA#|n_L%?F{Jz!TE2Vmyc9ra`o&ybi*JgpO*P+BGC$G!ra->g+R|@YZz}JOArMCjRhLEI3kbgf|BNGWBgWZg1}#Ug6#qdH@3j z#m(FjKfmPJQ?~QhS^MR&Szd>?BO%XM);?`B6=>VIK+GgiOqJONcZ7*1Gp_QUuKjFL zttOcsI=;NDEEA{shfY5e^R8b&JqF*G3c*LgiQ>jR(NH`cB95*?D8i(O(+6K!#PNi+ zV~;-$DnAjUnK5eeeC_1zx^jKXR}^)2#|qa&N07k)&VQiHAxT~UjL|C@-@03VEI`qonsungv6RWeZ|H>6ZL)&%I#5Na$aJxB);(X(lAR# zI6q01qcC^Zms_!3UC?MOOhbL{k{`Q_CY#OF5WWLa$(dXXTB~?nSB08#{*xM`JR%BL zn{rj%+wR6o6K#kzf=x-6tAoXE#sl^OXdHmYq{A}Ta89G!#|q+*OSDY5FZG7J%nAV1n#X{$Xu&2{n6O|ncjIcQ&4EJ%l(IM+AW zD>e&-M}^FuZ`5qe1x&~bd$N-oHK+`0y^3}wBW9x&#WL{cVl4i^r#atK7)CR;l3#F| z#Yh%s*@IvDkpb-aVC#2$xeJjeBqlWsFVSlJm*nR$R3*wO7@Scj18H#io?%ECe+xTG zv|Be`Qy3aoCI95$Bi>N7pgqe+^8)BAyg6+!RpW6O)FxuPjE?zdYkS;T8Kb3K8UxD8 z$Fss;wv!(VW@LXB8m_m&&IGEN3k@8u*@%M*jC-#5w$uq4wk+C zfPd{r6qe-NpACZ@IvjL)+ikaqSjfiG;%9{XaWD6h-+Z>}Z}OeardQ8cOdOau0}y?W zVkj9=Vh*5boO@kdeCgj6v*Q$<~=eVpqYR2$CWcdPWd zh28%n8)WWz9gWP=6ryH;Rafv|h2VbZcsSnC3A$eDnH<+qnDvj8>`j}m4AOuQKDH_J zPztP3V^WmUg;E2porbhlZZ^W+w}SGmKCeQoKA_Z)q*AON_vyp>COoy6_TaQxtmszqj{+&$uP?DAKY*% zEE-@1^z0oKD+523oI#j2$jmPlItE}|@_W7HLxoqmcSIyIPi1JihTYOZ0bm+j!4yC# zFqIRt9B3ysV(8kil{IFaLTLWV3JQE@?NDNmp_uB3&bTBxIiaKz8wQZw{;3_h>wt#H zs%2v5<9T$r@i^^Z=Mm7BNOVnw9hDm~Afx-em__;5RV@H%jf^nPaUg%k(qWYE+-hPN zlA}?Uaj#UpRh7p}5dscJA3{dx&1|Hpu=&Q*0pY3Wv-^EQ{>u-MK=(KuuAIHk6E0xC zBTo-oYZDYmT*giHJSoii|BnKO|5;#Fxs&KS!n5Y9+&M}uz2;0C%Zw8M{>nkq?NtZl z%pHsiKmdW7kt07NRm&q32XqtfN^0b2#?++VYb+*!(ITih+@vUUEi=MZwD#4a^mC~> z3ru3hb(&{*!Nn-2UNFSaK;l2b)=5OvX=8Stf=nrk^oTS4zHGXSQ!LVA*xOBN!)qUI zrl-CnAW~(S)~Vwse%&Z)h(~p~y;l3BApL1o7=WmRg?!b{hN| zVDbx1K9`qt@y!Gs81V(rArzzguT_1$Hp08Slgo(AHR8waD1-^nS3wZ+(EyHd4T}ws zfXORkM4r+J31x|c_)=S7?lrnDF6NvE+9M|NojZ5U%7hzc+GiWJ+|sl{QEih(S`(%D z;x;nnwTkW%lpog^M~TP&9AgsxTF8I-e+vA~J0y5*HjtGh9`ytHlUYTIrv0V(x=dVTn;QNcLnAuv@{W3ir#z6qq&S6_Ld0xdnfkc z8;c`cPNeBj!6$kK`_D)U?n8U@RjH!{N|*Mk=9V7{#4{-(s(LeHLu^%g-k(${64TYy zdv_0O>>c|?cs2L^LYuaOM&)O(;=1x+7&5P-{}66exp8?I2WsRlb)9sj9SejJ$A^8=Sf;Z{%VG9V}X4G;5SKtxDE zi=?KbPIjEMFAwEF?oObW5k^bMjj2c*jm{{1zevnv8A)ZJUz;kL$mv_HpQX@B+FmNY zK@WH7>4e>nRiR0>j=WT87(gJ2tl3iqMNpwW<1d7wge#$*FfRe+mOgA(G)=I{LkUwE zw`}qL`PHQ$EBC%8!fEexOjA&<{8l{RAKO@S!3Tk?K65xiA#)F5LI=NP!JQ|~Xf@<2?h@8Q+k4&tOB@6m7&_o$} zzssw|Fu%)=Z!OO6k@+Z|7zlW0qdguQ{ZkM72d^%6Nc`nJDH$>o@WXeVi{5D-RLN58Z~ zD4D9Lx#UQH7#W>JdOXgK+j_%pZ2j9!?H*xhrJR^c6mxf9Yv!JDx#$y2FRTD50AMFN zs}e_^(Pj!@#3`&!-ZvnrdW+3v9AI0&ptf`1KT7GflQpP@Wfb|z&>p`gKa31_Dy8u| zelH&$0%iR-u%M5O93Vo$8AD-<_+ML({5Lf(kEfD z6Dm%#(XF$;JD=`e0JXzs+t(h^IgKqs)W7!s68+qseWH_xS*&cEq~3HToQ7GVt;L`Tcsi_y=)h82X+~N~dz9KTFELdUW-(VdKS4Y!`H8$r>!h`|f@_gU^vB zpM!#4-?}8n1v%AR`8j$?dA}_ZYx^R-ba9Nl?`WEOvb(vQT<4$h<#whH`~EX0a_X-? zGU>#L5Hc;x07&=?pb;o~LY0Yc7!>_Ab3J?|39MH(e!em4@Bwg)U;fhpY$8@as$=gX zBt2pC5e?bCA$emh5`nh%+s`WYPNHT#PZo(Lr)t5Y1BJ_8Jv^w=QD{~C`Er`j*`1j}@#{tpQ!zAov0)&os*@pbD(Pr>6xgOFd1Y{tCrE(AmWzB{aReG17XrLP8ia2@@^{%7R8`zP1>Xoy zB%cfP_n%vQ4k8xsH$`95Z?W$%b~n1X*nV``oXS}pJE*@_0sY%WUA%lG!NSvrO!3|{dc5~W&vW+9VH+g(x{H!mb6B4=ovb?Lliv?oMNLlS zNS`e5B9c!BD>(Hqs3EdD4XksZI9FFNxrFA4y4$ef1TpHJ$R9QPv#~PsM9WL`()#F( zeRFjbVujt6SzUg~Yk+NWRkF<-i+s^kt(aA`I2m5%P%-lmJ0nfRBF*@a)Cy&5Pshc$ z^JM<=Z%1N+Mz>Qdubw_yvRF^Yaj1=o$<^z3-fdt2?rmi zKZ&9P3L)1tq|Cms`2)W6XT3L1T0`nKgw_2^Nm^q)*6M8>kau{;@W^j4;%0W_EP~ct zidR8vXV#5|dr-st}tieR}mPtf_fIzpzkE2HvbF z!XG#UiSuqRfFz;lkVs?w2msKD(XERgidZ8*dQ_l3|F!>S|6EZzoT1Mk06FWcTv`wE zO*Q=LPrA6b5HY4|nqKEBg68~b3^GPfNFs|}=j?20n*0^wnhBfJ`LO3O%tY;*$f#sgUM zI`I?eLg}-b+`ELFo1tX`blG}}lUQXO^ov|TEfs_nCbZTTY)8VdbGtHq*&WReq|$^l z$Zx(&81D-pMgRetw4L$Gdx)I-ZJ{Shez1EKW3W$gr{q%1`k_&)4)asXslw4_kEv_? zUvxrZOo!#?wN(t#X|&^0ym*+!+uwyD0YBdloMel9nEK%IODZiTSQyoJi(eq5&z|e6 zhB>-gpN;1m8s#s$S>{P{#x&!>%~S{L_3KJF&63)s$=Dc)Tgf)+;@^J^c}^HQC4ST1 zoP?cJSbtS>fvpc8mC0O_C+Dy{mB+Dns?lVApH|Z{L4wQhc=GKAL-&RCFl-q`lKA9A z{Z7Wx5?=seLea@^T&iNc@3gRwrJA9DU&!LO_1Ico6o~)QiYqKiGem~MVt?wL7(BrQ zDwM8|7Z$GgmpVM2{T=j#R%&^4stop%SGb~L0P3^}0MZbbZnj7ngxTm^j#?aOK|Ph| z#NAC7OJOTbOa%ree%N9X%)a*n%*0)#N1?%JRv|KChy=g7V_2`(RVVtw*F>XR;f>aT zIo;0qf&teXfXRka4Skxqt6O<24RYKRi*2i;Q~1Xp)n-TkJ~1>gn!3!QOObf;jaa@d z0`3ID*C|>fqyXGVUge%BD47p!y)bn#z+X+tii2`cj6N%kk&d=`m+D}s?38+h?r4g& zk|%68SN=2Q&H<``C?d9;gbquk0(eI$~sXtvtE-g{l+{%djXFd@P#vnbR0Skfgj6GL-+`J6ZO4I!9wH2 zP~88P4@gjSUkv?^mjE<{Ot!15#2kjP`xa<_FJmGkLr`|_%CBPv#7;Vj844JG%Y~^+ znJ#HA39=Qzrz{~o4`It+(RTN)1dH+E6$HA${%{`=;e9n#qj5mSP)~dO+CvOXZMI>J zx)S7C31(TYmQ4(^W3c650cdjoA>TNlk;l|_)4SKC;YY`2%=9@lpw0|?!9`em2;3e}?8FQVTH@#iz6kfaFbsz6%m%ixg@1K*tzf|h9 z`sU`Z+oSpD8xab18_T@M(StKvaQa}d(?>)B-!TuBc3BvClhnM^J}3qGHF&=9#_tCg zeRISRBWQO_0Mk-&sv}8u4`C>gA853NOv=)0%V1t`;W&qht)xIn7teZ({lK&XX zaW~9J&U7ePf4yRxE8o|5qJR-V?4TORk||p@_4a|@RCu;9yaIrb6jH?lnI({kr4|6c z073e!6Q=C3NMMROUTz*t5GCz;gA$3qW^NU5Kt z6+LvvkwPJ7Gc|U;5zfS4Iq~V$1q~0f)~6Dzc|Ry6>8K5#?esOus}; z2NGfG94t^4;2pqi;gm?3c(c3YBS|Snt{@}EPZ-}&G$)a?3HX6}-(D9;Iz30y{`Zli z2cT`9S;8GjWFXYPsxM>rY-hb(fiYC|*Z^f(q{@6^l;bf~2O&h(39Nb{BTf|ZX|wJ$ zAIHnMv^e*bSyFrcwtzX~xe1$|E&Qf3dJtn@L6vwZ!P9rm^KmfRkZ^RpEQ?f&={^EL zO`6FW(BsbeN;oKj!BG=7@YlMrLaDeEZJIMxs0dw1@z4W1UeRZ5F*cM%1!tk%hTe48va_Y!?QDx4~bpW6;zpO-u%chm_r9wPV zbZtCK)JW7piTaCnPhvX!PZl~V^5PzCIK+~8=U=S%1`Gs9Cx@K*x_lkl#2IGmYh7r^ zwd+UpSviyqlI>_0#8Z0BIyd)~h-wYOiHnP8ERxL}c+u7=ElFBhC!GJ-V;~e_Z>q(qolTNwjtpcK=4Mv zDBA6?xolAJmD}SOrik=kc_jXbP)SIyrX%8V%$ibF7rw2=)mH)jEY7vB4Vm6n`P9vn ziH+9!A7?H$@&W+>CTy_LFDU|mWfThHayhW3R8JH5CWV5L0c)&q{MEKY8?4zV2=@Gx zz>M$vovYvfu0iVQMil^_bd%Ao+hwZtJreN8V4JM`^oC=YAm_3};P)JW>C~&Is~GHD zorew5mZ7H*DszW#Ei@V*#&3v^_{NLnxoBy@T((i6o2{-g`0rc6F)x6W0qAOF z-dDo_pkt+L2tO`D>d=l4iV`O9XQFDjlsuN6Pbg6@NzI#HdOlYzsS_?^B+Nu&M|UF; z18=%_1n0|T%5J#~J?|W{UKd-YeIr+EKU%O`$rV!jK#-KU6npBPx2R&_G~&?A%q}+y zG#9^JrFU1{NG!)>LhVO^N4F~$PYB%Ne-ddP!*D0 zDeYQfT`z@Z288)uNrZOAlc8Qc`~dfhaRKj-Hv&yjq~86tO&;_=iG0!iwFZV+1|GE0BAJh-B5~`Ef$&B_g07s--qq@Mnbp4V@Gm@Nx3)xTOS(-QR0 z`IW-yIA%>7Hc;bb#j$z{Kpy}Az9v~7N0!FxL`D;H^7%@uh~wxis5lm^GLA+opc5M` zSQ-@zyer{JS4HFk?2f-i(MfLM*#a8~jiIULY2BtC3Zl=6#MS%h4KMfmssK>I2u$JD zf#j12C^Hi%uS;pm==d?ht5GURQcB~@-K_M^c=o*D<2Sc@E$><4=a&kR!X5ZyH4}O4 zN;FcBYbrtAo_)%4shKJ?xp+JZhp?VvDAvH#D$%QLj69W_WB)@KHR1pQE*UrWV1JIp zZx6ILDyZRh45~EcP|iWGKZx^mw(YM#7?fC8Qf9mhMiaySux)Q;`Mf?(XhJL>iF>X{1X~0qKz3Z+U;;{s(7g?sMO+o>P2S~d8K&Nag~oR0vzF7=~@TR)knK@axS>mFE^*9PJTvM8Z2(Fe!Kvp4FU0f zRjU`3DHf(-o(hZly@fPqjbXviW(U>BAmhjNI7EI=0`}*Hi;dv7d z<4u)~!twKCan(p1ut(TA-491WEK4W6p{4O29>aDnA7ZAfOy~3L!>VoTYILCU?9QsMh$ooE zr72J~k+53v&$88fF+Ir@R)(G>-T6%Mu#hy)4o}Dye^6}fb|r;5RqcH1!nTQvql%F~ zJzUio&H+iabU_toh0%nw<#4Uym7`Uk(4Jg76d?deavy*9$Q#!g-Wez8mB3gCkD|U~ zsZr>}zfPqG;FBKQZtQZ;d;MYImXmiH@YD?$Z_>s1)%7#YN<)Jfo&WzUC%1K_N|KKW$0Lel$vyR0^}3w$g>!-3GsAnj-a!1T$w7F~27D za2Na(Wc@Pb3^1+LA8abF1pHMsm&0T3w>54quLuJlJ7RE(@fsAw+GVh3Oy8}2!N^01 zCASQ|t+-&UEPQTrowt$TIi{Hv;tIi%x=j*H!@gk_7r<82oVWf@%K>wd#VGfwY}!AU zB5i0pFVIDe&rBOg`}cbM!gDoZNxuTm4wws9!V`dsHYgUDJmV{fM>-j0F@B7`Wv3G+ zz(8 z&c}1JB)W=wBi<8^9_s!r9)yFZNLp$oCEl0I^;^^Bw<7K^yHY#&1HH~M3g}xbDC0v- z@RQO;!s_-A2GG@s3_2Di6fqp#n{e+(47IO(PBI5=IR9n@o(#N1$P&iT3~;L~Q$Clk zJihvbD%Ougfa5F`X}h&oZQuBFAtdzCr4CD{&QBnSTk|n+OqD?=lAZW%5Wh_9_jkPa zZ2SXO34$dq8$KD!Z4WLkb2k^-z&BI_LVPsWx!>vzG`OQ^LMnEqQabO*TJ-7!Y1z#c zQ`{RF$(Qv^@?5s{84$PUc~fJ@3Al;FBDQ5QB5OZ?r>ezu{EW*k1&6xyUUV}(!(Tgv z$S+si%kgfj*TYFBT3#KI(+-V#yU{%uIQgEs~ zy5+o(tR{HDAlZw>b{)&}S$F$1W84m=_}%uxqGfg>{_1~Z!DXw2-c+OtGa zc`&OovZNftO>fAlFOh0am^$aXbv9_1L~tIgWk+nesAyT3T`k{P2?tcyt4&lhFztIN zmtX4A<=1AOGJQC-D&$I^56Z!Ewe=lg)BnY>G;w|^mSyqJDQ8o3d40O^G&u9}A(E%L z`(ZQql8p41Kpiyr8q%efb*I_zSS>C<1pp??XhUMccfzwT5z>O-5pbyKKLT*7X-6F` z5I|Sx-6>I-CtY$RjRAK4!d?OTw{Pnv>*^s=I5^Cw&-fz9A^Qx_iM1{0<^4T}$T%@0 zaT5hmhF1Bq#O@So;v zLQ_5C)8OW=Sn6nPf5z0XjkcyQWkx%#oH3x8?jXS>r2l6=AOfIW6&telpbw}})?b}x z;GV$d0AEWAXH5F7YHRE9eEe&LEa#~j7C6(r#kzR%GX9?8dsKYiqHiojjMf5Vv%uJJ zXgjmRK6In0j0jf1n8;Ew4&xd;?JV!71vlX~DvC#__!%`4q- z4Xw>TT8%mO-s+9)F2goIcfXGb5tHmU{9UF!-r0Z5P9Tkh7Gl9E8dOE0OA_NEIyS3# z`%rHZ{G$O8^)op1_@`+g0rBXSn;&0Zn)6X@QBy+QmzN01!Q2$?Eb74J1FHF_?kNdk zEje-Q>Tx)6*#1rXUMK5QeXs5f{ptXeNN-zan1|D$=#aYnam?s8dBSME2MOzbY`sk( zH@7baqy4_=T*BJh1_=w$G%?s9=`4vpDGHZwlqhPNl%7|0U};N973;m{F%&wU5T1t^ zuj9gg2LX7+;jz9TD62UN>Ff`cTIMWv>uv^FNO@zE#E?AB83zB_zGfUoz^sV-eEo@L z@{}9u^}TH}L`qTWHJBVV!*Q%L-fn6}9Kx^sS_MWFdr3Li(fjav5A6DiAjNM=c-*F219pM9HZ zfH6syfmJ*QF`9r9S2I$%9#1DOTA|d$VvhZ|`RE_4{ENTFBfaBj+l`fc{i4H4aHGq$GK2C%htn9o1G6O*Reh$CN?9h}+Yxv6IiM>f8 zWgtN*2C@7hk?)F4>&gNhCYn}$U|1C;H@pJ3m29m^w$Uj+K1>NAL>noLmN_jJO8P+X zlG@P2%8HCDOn(HaIs#>baMnqsIO!)0+Jgxs#Kgo>UOYF1QuM*^gnXdyEiC&XGKcQe zl&Bb}tee)k!w(Bl%OUE_Q;{u{g=L@l(DFY&WVEAV!mH<3Fv-GmaPeJ7%5~xvR;KNW zehS9LW6tmIEEOjhd^_R5{%RMu|6K|31NYyw|GCRK0%&DYW^p@^W|deLzB*1sN&F!i zny<_CHHL~K@B~H?NR*gm+2#S8*qUPZ!J1-%?GXGqT)mUlO!ZuNYrB4(mSX2I0!v%@ zp(@Ev)HrGy3&vJwh4@Zm-A|cBZD+L44@KuwY`tyXT5QJCi7Jd1NQyp5{7gv~+m>(l zHgmKYXSa323B(l|kZ{Nx#TRIHR^P|>WGU_wIapX8RdWA}gQf-i3#9q@sdn=4tBt2s z=+BH+yX2*xp|BR$yT8^+F&8cpM0p(m5_p4}M(!Qr; z(^EXeovtXX6;wnf@VX?1x-yM~fsjbnwoqMu!C!pilTJ4X);Dc$zt!9@#9@nQC~FC7 zMs7TPkd+_wS(~u{c_#nxHq!riK76uS+4ehn*c>6}dlR@`0AA4(ROKy*W(?6J8*xoB zArO~uflQ*(AA%^c6aQUY$UKTRXz2_`BO&ZG;lFhZ(j28T5nD>50VVh%(&yJxVS%Y| zco)ki-x&^W%LE(E5RMK2@TqCCx?Dm>Ov*VgDHj3(F^RZU!scCX@tv>ENoh>~jk_ru zWgSqIWS`VtXapNz;{?+iBa=RxzR4{F?ug4q5U=)6B>@2`Sh-9yOBFi$ftub)leN)j$>o{{V`S_2(47-eq}1TruTtCg&$XDt zbfs{dS9iwm_@pS=nHbhMbN%FB05%o2zUfG>6r%Y@Su=zP(b5P7fz8hVI7x>Q@#Bn( zRrYCqIb|USRPFE9()XK|=JnX4-+K9S&7?1Y5@GzmauemU>RE*f=W9rU=WDA@L|F(R zNiJ{vHcZKK^1$`GBQrsJ@S8u2VU=26P(!|&>lDgPf6=vz&a^dLw`Wj>@7GEkv|XR^ z$wrxZwh?Ydrq|$yv~jc?xR^$DWLB-y1?)Rd*Jq0>t-N07t$(bGm>K4QdD0PtLYlne z-j&NgxAt97)y7%^DzkM?LkzMgLzsBY|`@ZIN&h)YGhI9J{|C)$( z@)tyaa-wnem=giwvc}%SRu!D#xJ=VYQp)+&0A^m>8%3^(8J~r(2qq7t{dfX5!Vn7< z^v1T7&bMTimT2o2{Hgzu2-!L&Jvt4={@)9rDgY#X{q9$m8jfyfzp0Bm5_n+5 zbvTYJJh1#M?h3c1E=K|!Q;KJa*JzplP)qv4@wCL$dav884_jfYgYA*>WNi9$<`fhP z<;eXC@Y9I9K&Yrln9|l$_;(=)1z4#1oGIE%D1J?}GL$&Tk$_BCjKdEA5yV?R@6w5k zwX69LdEgT!4aV?2uf>7So_eB%=j5F2Q?sCC^qh&PV9&0VNV00-{M7~iX8G3}{f=H@ zQ54Y4uf*8Z*cPCqd zTf+kN_fLO1UHOXseaYxM5`#iR`0U235jG)`6^=JW0Nf6%7qcWOCmKY6Cgx<$YclJM z=Fn3=%;LaB!`KKD_aA9#GHDe!=Hl3whcrT>osd*QJCdztHNh{2ch3tmarlp=N!qNA z%WgCmj^Y%F21Py|k#d%buyeK_&QC^ng4~v+Y^}Hkfn*24>nj55)VcfV+x!W-(?13z z?f#T4nkJ{gQ0UcWZ!JvL!GbKvmMZrGC>Ox9;8E%9fWRBk{juXegEJ7YmLm0K!V{9U zL2!cXIqIb3)p4bm_!H07W~?hXfo!*T<&Twl5o!abQR$TNLPF*!C7X*%Sw9%d;dE9z z_|K#iI?T%Sjvwv6!8D2IpNr|(iEIAOdbDqaskxFN&9az2%yU4**&LXgrF`hwTen)4 zruqH3#xLmWL)yZ3kBRe#wk0u9c2%>3pwnCE1N0_&-D2Mq;SoUM-^b=suUjz_DKWBX znx8)IXTR)2ku7gLkp1R>@e5MP1%3f!4&z;rsFQR9aLIIaM@=*ElkzB-%B;Vu2K{%u zKbuJ41v)_u@_FNzLX|UOX$@+*^W__EHoQ}1W__-Fs2>cB@blVJN~TVI^qzzsen#s6 z@S(*z%2|0xrq&wfvtENVo{IhXgcU0;d9@8wstc}b!vkGFBFQHp9wu8 zkpLQfBpEe1I+pY?D2U*S5O|4|&jYwTW?)?#F90F#u+ z!5fEU(8-FGgoen0z@9|OiPXsl5EQ!jwXARz5Zk-9RwYQ+-94^8Swr%;Ri`BIdOk!=z7Q@I8M z=&fYQ#y>%kjmA8GpiQ)f^pA5mgxC7Ur>f-S7>=b+@9oHLYg&NZM}$zx&|S%0TBOJ? zln%o+Y2M$Zl2^XhmXmONVY{beajx>s2ItzDb@Y9z3+&R!`{S@1-@BV`4mNaP+;7oO zhF!$~f(0QyGr#+_v>&xny@s+9i$b;K^b?z3`!x|o%j>+)_F}mClz6BNv{r*n89dB_ zVxL__WyKOlQ56!WS|;?>rkVb6SX#=&WMzttzRZ2!H1Q^grFT@Ze)G4ZV%jY9Zw%GTn8_`>=xywf zIPj_i-=Y8ejJ3lz-BHtDYwO^zq^1nhk22+tuKx+2D9?nZe_ z&(PaS%VP0ACAj{du*fIMKdG#xbb(l;}DmlLeKSf5= ziwXw4GCSzX<={ILRn*o^4y%QEOWc8qIsN;wCvf6Fd%W`4Tr@ff&b z>`wDEn13U5Ylgd!6cqR>_h}?u+F0cGm7Lf;T~i!uC8c~;m_hjJfCt$lyEq55N#2;8 z(Z-VeGf0ErMlOR zo>I4cr5lt*s2rc}zcch20EsS})WxaK%2K+tT}3xQl`<0M5kdI4Wc3y@9MBF)CdCCf z`IJweq+_h;q|40n5Y}e%f0#hk%ZMGbP=G}G+J}A;rbqF*Q$6&XPjR3_jOGc z=wM&#N_cHIkiAZ2;&W7_hwH6TGR{+b7k{#rt5qwp?AHB4%`dOMo9CsP#n}y4R)5`t zLpj7ZHuIWT5ad_?O&?fbPoHxbT~5P8G=_OUIoq7}5|Lkv={+xuvwDEBRO(FeV;~AW ze??5G_Y>uN9LjAEAANR(lEG8WYENVlS&b>fkvg3v9IE%PDFbbWFfLDF!tN<2s9neg-B>~p3!_@Z)HugR`-#%TZS4Q5~%oJ=J$lk}-$0miht;yGB zQ0UDkFVmB`6@%;zj>WDc6-|e(r?~GQPV6u;Mhw68lF-yEeHGaHfu1DOtzJfiqECta zQ9{F90Vlx$3!L|#E>{o$(ODVQaY~KJN3p)YigVBP5kW@y7@>)WiXO_ad9%t+ghjMl zq)z$wj$@9COV1Tu)oJ$n^8Lt{4)j5}Tj-B0z zq{YFWoulifU{5Vxl6B+cAZik#euqfP(Dfk3&SHdwIO!0ph6GIRZTUC1yi_BHJ1>{K ztH&8ioODij+S>%LT$Qnk^v(lLpv zz9V-n8V%0Fi7x_Q?h^EAJTj=F2aQ&1X<)sW1a(@#%Lv+fUqxbx(W+wDtv;AiVw_ru z#)3%aruD7VHO5JCV*N8MS|^sL9z&e|ZVRo?dh<3v(cq8SK`38a~TPRLe}pqWl?sSfDz0UIXy@9zmL4yIPP+yint0Xr%7t_ z_@L1vwV7j0C`c*J5d8)FS5Sek?BkU|z%X)D=uOvX(^>q4BLbw@Z8Zc&u!v)quxhov z1!!_KE~j2s4!!^~f`EvtEso1HL*&zrt|QYKnQ9mcYFvxG;MD5GuoU&FOc3ucF2;U! zDu0fKhp+ApTF|1_%6Db`MCLYn^6rfDY&(|4p# z1Cc9@ga6VLmibe;ei=cCtzgxB7$On^90FFO3atWvo-kDvzVgV$muPxhs(2|AAY%xk zwfRnWO2_RoPSXgtiI+cBirnlVav_~Kh4;RpY!^k>XhXx7cIY%EGd6%|sQuqIfe zsgRS|iEaK;>F;xtAJBNYZZjQkR$&d6G6*Lf@f9x7?=H9q_kB6jS;Nf1;GxUyKvM8+ z3gW3~H-V{BV1T?}_t2d~Wt>K~A_fy*WR|#GUSBR$sb=1V#JZwQ|egQR6H&saQ@7?iGG{imAA@PxY*WT zOZX{K_A^5=O-?8oE0977X{LaLp*Dug513aYM2|!L-fl8CFEBv~!nStuF8AxJc|0{3 z=3Iyn?6x+KZj29tZGn2|ct*};w(D6jZzm`)xy8k_KB>!nt81GM6*@~Kr{0ev_H}kue$!cFV zn{M7x+i~=$BFI}vu*#&+ZjiIY$lAR_r+Z!>+y9|BPThcgeA-73{Ow8pw>2ev3`I<@ zpPls!AR7p7vxQw}G(e&OZV)I?N45&I(f&~vqe;F)}#OO54hXa+CCwQUINZU~A`Oovj*Mp5d`Ap^Nff zE;q>wAXNxP(}PJpaZ9yvas5q`I&wOGf?-v&aUWFaL(F%pF4tAsgr;-DTM={}|Ex!u zszm@72ZtIrI@1pdJ()8yhof2T4*BMy^&?IbDL-{%yqeADd|kuBtx{KY@;4)hf&iXi zU32eWNh5etL1<==uzSORFK;U_W|p;OU&3uF12k=>7@>C9)~BQnn^ay>dVB3sY;4U4ep zMCTg!gOH()uY%sg_s$Omf1T#gZfX0{$5+##mwg#(u4 z6Z(bw#B}l4ZBA3K4!J-Q5nO{B|7p#|DT5aR08T||h<(TjZh&=>3~Ln_sR66PR`$ON z92a#eP=eXFsXUb9=_ zkdvNFsm5MIEN-f!)8%iGz7?SLG^E4K63`F}Kcd6JH>OfriJ%|@&b6 z5(K?$!r}8R1m}HzwO;`Q9sI8Akr5hkuI>Tc<*R6r~lHSbl{mtBHP^WDM-1%E-t_R5y!a zvYJ2k?G@-acIs5PI2{H?Bx4s$}og8gS8pXtzAox9VO4P6hnn=m7 zOw5Os2h=E;6Ujs@J`0r7;I4QJ&EkUk*C9U z3(GozZm)dmoT$y}^*x+CPBV6R{*j*!-7Dsuz8U0}`hv=PLabnm3!n?F|1UzIs71A& zGyqPTg3%{4>aNImrH%(9S!qijzIF>k&a`SBQP3XpIyM@y;%s0>8M#?ErDwb-j*3Jh zzTxtLc#_Sx<9=?=jO!70Nj&l(I1rGJx}Rm@rXq*Hed$NUv+AdmYSq;tg$x00lo@l~ zb-ydXLMALebh->3ua188ldH5&C@rUi^otLPjhf?2YQ&xHE9C3MUX?CPk|?*5D{NV2 zKm0~5qhdck#NccO0C|9w8PuRd2{CjKmKHM;!auIoKI}Z~!jMz{&oc%acCUWqihmP6 zAQ)?H6GjX;>VB0pD3{VM#mBUnHWq&JT}n+#H~Vn#bItgYnYsL~I)S{o4CfOjL8N@l zej?injW`WAt|pbvZ+HY1PG$v%8bz-O)o5|XSR$e?{j{@{-&HT}DhA8MWh6Ka)Cj++ z_p7HOqSCM?TZ(%pdU@#S$M%)lK(B+X8j{vcXbyGn;iL^=8<%d=F^{Xt3>Q0KHa=#~ z(WTtv*i&OgB-tU_+Tz^&G15GCo~jLZ4g$=JH+=`^eCIRlscGmT?OOSn?u(x* zCzxJ5+c83sw#s^BLsIudtWe^{uf$LSm!e$GXZY&Sg zri_}+RUF`D2=h0S;PC#^TN_-fqi~X|r&~thMMZuPt(Vw`qsDGr72YE{tBn-9M(rw# z^*-Q+!|bo|Lho31vir`t(*R3QbPN+tgzRf(0-3fVzv8k&8R$dfe_brrj|$%!Nfi;_ zDTwmZqERK>9bP%M+g1PqBsjxy4Y1`Kk^HFTpX?dAoMIyabR2)oP7dU||2*vsxKk2K z+UiS0eNbF3d__25K=01T+lj1-ramApTjKx#Bm*VZG4&mKu23k$@U~_w$tPr)PFv0N zT)Fj%MKYo6oCD{{(HI@orl(oAVTR ze|sbLlV!k!OWftY8G09mXO{Qg!ke)gC~_Xw;TYgwZ!Xt2${#oWc_G6%e%s zSjoT+R2cq8w&&ktx4xAb5;IXTG^sLonQEBqQxr>6X7(BVknIUKr=i8j?c`&Gl@~@} zmm;`G0ocp+cIqtC@haih#tia@r1e3JRCMm*PFuBk~S@>vW5*+FWLYGaCz- zdHv7m6w9tFr4?dU*9=jhVz_hUF`qpG^d_1fpo2mmTJH@Hw08pD4gJ#`f`&n@@H4ijU<9d#R_?xA4fSsz&9q=bThpGevj-qAKr4AjWw6{ zRo!F|+UfZD{i(Y8cudV(aF6&A0YpMc9@m<`czbALW9tuq-4ngc{+<4c^==ABRO_#= z`VLQC4tIpuUI`L8;q~1+zeuQ!U{`0CZ&15AKyY}@!#hIGSSj@aNDl%+W6&tgQkrE; zsQ2d0=EgO{s1A=jkf%i&!)8pPrr~~1tBRRC;efx^hsrI{i*B`G@b)kl=RKFnO0Tsp z>nt|v)~#Lj3C;L`5oYE1av3W2t?wQ9BVsZiX5JnN;w&9@o1Q%-yt1S@KBv1iutnt< zKxJj5$1aOMTpRP)AQ*1z{BP>oHRBxIwvx)RA7ZQUtG)WmgJk9}qLCY@-zG)pGlcD3s#S~vA#yN6}cyW8u!^S93*xOY>~j!$oyV9g=`)%FFD-3y1-?&=Bx_k+rj zmiG+k>KcVk`L?K?0Tz2tA+Kx`cZSI)bHrnsqPK?-J~H>)A>R`Yh{7qg%@8XvGOU09 zqb5EDj;J-V-!YA8`WL7m8%2%E%GOln8ox~E9k6RKc!u-^kAEG!`6jPoQqURc!Yo$4 zKGN{3^5@yz-fOGOrong+Rx{fwnQhspk%sCRvB-SGo2mD1e~khkH@Eb^-9ARVv4(Z! z{MQf(smlxl@E6k(vorTF^fH(~)@_QQKK3WqJ|$)is(8c(oZ&+q6m^%X+*qEWk4^seWu&*P#ctoU%b z2RqaQ8WhOe2d?~M&G#Qf$Ih$@r=_YhfpikfNNxREH(3F{`(klzd|v@Y(P4q-7^!yh zD(}Tkn|Xy*=3}!*hjhwi=)?O79Mo6=aR3`}>C+f{RBGAKh5_0laF!B4c1dFLRiZm- z0B~hvY^Po!M8+yq$f8!3GK6B#53w+}Yfr~a`G`Fkt-O7S5R6Ddz~NL`)Nsz9Dn+Rke@-M3k|{15Ta+R+*j?wtaSta={jL`+pLhJ|=3hn?X9ukfV(!XDG^Gsm!i z;^bPqe4raEEJ+u$Ycm{gXyGFp_>Atk(^eW@DXQfG5ye~n_ zijqcZ`gHzO7Wg>X9|VJpzH24A+A0lqkNu*xU1cEt^Ls+=kHXESOJypTq9y}%JrV+P z5*9^4gADqLG#9HpQ*}OR1T7}%k>hfgUU4LC*g@=gg0KN}bEKf}p4;HsvynYz|L=kz z6`IDWwF3%DFEe5HPfU_3=^Z?;lh}OHb}qaKSLu3vBt#*2lX^*Uj#T+54ca%BGTr92 zv?Ix2=Q2zCRY=X$Y3o}ykZ3<7DNUG|HzK4bUs*ZvKLZa-0CR=Gpswi>r#Q9#Iy1-g zDr8PXRvi&wTx1qCd0!kRD4Wg!h`<#L+q)#Iwr;!b&rH-#eRtr|9ZlkgJ z-u;vQ=~Kqa+u!225$yb4!BOcsgr(KYT_&pDGEl$&jCHl)G21!xR50|j{d1Wq%A@}s zsFodM=G$~=Fd4FJyj`Wo_=Sgw1IOu&FaX5n5JOMR!%StR0hdHNQVEHHRpwXQv3YbF ziFN7ZT_M7$*!NMV1T=F|3qzc5j@B^2A?n2DRnFvsO3oY5jbh`9YkKIE_@b!PZv)GH zBq>Wt7h;z`=weUqU&2v~AaEJNF%AewlNW7wSk_~BA5C$XszZ>GURnF&+C;S8Ee_|t zzI|9(-;4T9opF2%y_#G6eE%)9xPxOgF)S^)D*_?(X;;h^F-0m80uKJPSmDcCJPH0uZ9<^ZKG&SBSsB915$F04dvZ4@ za;vsIP>|Wnqrdu`Q{j+j#vgWXfBz9IGSAKJ1pN|tNEpM(z)H~Z}GV8D^=QINHExoSV&IV<40(EQ)r2%g2Jbk5t?+A9pg5oU5 z@dL`2kdf{8S%TJB#>Q3B((VSR4g-Ijxj8ZwVgCC!_r?h9dz0<`q-~7LpFgoKO+*ZT z4wU$GyfS4Z&cF$^rvo@6B#4q@rLx&HW*}fRRFo!m=FgX|$xJCM#q@~e#7H2f6R<9C z%^aAG2JUTMVzF9sz^@)~P~sy(27x5&?k^9VCeTZS7$In9dN!4?*0y@Y->ZuTI)br|=@()zH#D1O|(H1rO^RF${o|_*ZZ3NKN zjG@=KAuL?eP~X!v(`SK(-sdDYyZm(6)fmnPzynmE%*tqg=rRtDNE1+R)xz`Ut7lSE z#p*CnLLgIv9hdzL0ioLT<9MSdZO#m2PaaELglcP2VRjR9kv;|??wm-v*IpRezRZ-P z06dPyxR6JmVF~MtubUInz0GU&RN|6=Ocl8oX0ZR(BMT$N^7lwe!zhLW{k`O zel-MwcQ$6P-HGc@93Lu95~hzr;k(SPHQsKs9kK)Ppw1NVgd&r4o2h0DwRG;}{viHP zk!ZNoq*KS`Q-yi**!Z*K%&&WoueULH;q7AU0i#;wBuk){9&}ku_-~yWOTN?1`=QO} zzIq#&;xEX~=en#p_)r{QJa?H?rhlD41M3A4G63=sa)Sw{h2FxrOOa8ZF2Ziu*Mv4& z`7FZ8Gc6)OGRb#enJ%=5EvuDooJw)Hn0`$@4(X~v3pqpPE{Wr=biCr7wx*o6pzWy- zYtZITIdwW$UvgBUs7l~Ifk)9|YuV1(FVvFNKQ_!yHO}8N48;{vAInNB`eJJN@{Sab zuL*DH(*%$HWK6Gowj57bIzCK+>IjciLO1&E$xooi85mj@%8AH}0KB+}42=X=7*CNR zS5Dd={p5nLG0D2z0+q&_+RBsdUNmF>nN#D!THECwm1SC75^)WjBTXj)46PA?6Zw*X z@AYlqGIlay)PDXS>l4sFd4MfC{BJ_>bb@qogT7eklZ0C)0f4A3e9a{rKaETU4il-A zv~xQ``(}`H?Xq{^^MeBN7J&*K4$eQT{LK#oG*)r_KkxlsujzwPZ1*WV(u*wD{srww z$+D5C)<{CxlQ%6EpK1VXEMPYaNSd@ zR=6ryn3s@Jvt4jM%`~6kSJet72doDGix4Hu6!hHgMG5Z2a0KfF16L`GFtc*lZ{5hv z1Indi6*|SU05A?OL@$)i>AAE&^-BdxK#Y|_-#GHV4^Fp)KSX1y^=RFEN~g8V!S2he|xsBr$(Nh zn0-aQBZZ(F1jH6U&^8HqquaZ0%($yuHC7q+pYr1nKc>1n`Ea?D_wi1 zcF^0A=ktP%t8XNt{qXHsJ*V#;m;!oA#%`kLX+^S3vUP+20{>kX z7WMtsVsiEH094d?@((#8&EGm$HIVrG#@|tzK?@>qw3INRBr0PS);V??mICtm3Be5k z_Nrx=AKe8SZPP_``a^rag`xiyk}zLQH?K-cbQEQ(I(pHxUI@ zcO*j5_p8Skis8}~tt!CcJgq>Z zPs9&8%O1718AkCRk1;crKiWV=gzFE}bn7Gy;yo^=Kv_9|Pv$s$t~{hZXiTU)p2)s& zT)DL`CnbnFr{B6%`S21UMwq7X)aL--b43~JD>EDhiKVeChvjF|Cf*em2s+T|7>(>Wi)6050X-v^Pl77?YYQ^*jLFk(q?~C zN{Y6#GynZ-{-b>{wtT^-`&mYk1`LPYMgwF$W}y<~HDB51MkgskBm+z11l^gB^4tq! zzQ@^M!HbXWp<(74$6NA64S}lj>AeSF4T#+(tl#~Pi$%lAg{J&s_X|Q{UJSA)i9*9`=7v);%tHX>165%VNoA;94@rkS} zU?#GwZ<;@vfPofwOKqZs_r>&-u6(f0(#G7B^w+-x*G20-9K|Ra)SB=T7 zEA~ageK~Vwsc2 z7e$pi{7wqN_b(}@2%!DDvpObLA5)<`Vk(OvP7i*M}Deam_h1ulY1Z%%7MC}f-sO|N5FLFj61x|GJ!URb70re@|t zLGNZy07>~`3sRG-Y>0m=`WTKPobzTE;$Ay|hJbfGY3VUoc7z}IDl2P)ldlA>$nVlo zqOaZ@AX6~@?{TUN0b%zz#c|qkDaF;;l*Kbjdr!`#&8N)_lotc@8dp1*ikYn?eccu);lx;82v@Ze8e&>p z)JzJ2qfeW|c=bE4%*ipKlt=AKl2@J)!+ro2wVWgOZme@c!(~3Xa@)xM8)L6`<90UK788}0S=fg?$%~! zWPaZw(2z`Xm`5!gv#v}e;l)9zU#%s?k5*2EHz za||)c2mhTR7g)2t!|p5R44i6IgXmD)*{R8 zlReTJPL7Na-_V};l!gVBepVRwQ0Jgo)_W(ymcM!i+uDd{+Q!@@-6Wt@@z&5#Dt&xV z^-rgmm?rra;XC6AkuVSD58hbtym)@lQzianw#D%wdHhFqQNie1W6!2`u);e)++^dy(0dV>qy6YPlbq<9MV{9DhjIsC+${WNHQ0VC&C#_z(B8w%* z1#y=o)|UwRL(tM0oDLB6q9r2!p2s(-I_kz)c)meEqGwhIx=a=6^qgWuI5RU+9kZP( z1+{G{#sbEI+{$8VQ9d6PLv&G%ZCzH$wmHFy)E{Q%Ri3`u!P#P2IpQRPLB{59e&!4_g+M+{*cf?;6oC;Jg}$Q58L`#pc%j2Ru#_^c2d&$Au(Y= zGTl2s^94{8tZtyQiwDbj$YLB{o#&`_!SU?y(U-mbISn8yyW01*{G3j=-rT^nJ9=J& zYHNzX>pdrUf}*~%Mp-czSrRe>^gCDrtW-_TWWNb;+hW)mF1qKmzQsjmtao#L4a6p~ zZl1uY^Kjv$jzam)#<)0T8n2rSR-2tK&kuw)s~$>K^^aT3_TEw&=Uw<6Ax1^az}L;ZLf#UHI2{Bl7*^#ldnJ& zac1UYqiT``eGZq;a}LbaimUSY@>wR2x#YfnHT+TLD@&$tZ3FcL1?s2=WPWCJbX-M6 z|0{M+tJASkuALaPIm>`N##udGAVa%oFe-?}Zhf#SIT9N{H-v{jk?fb!ATH5JNps=u z+W5o!ib0!p1Z2I}sa zGXCTLl>z87S{9W>YMsJ~hre%74?Q{DmK!cXlqH~Hx|~(!FLE)3>Rn=ILA#DrDn1f8 zbc*Wiw(>3L7M`AjBj2$3Ei^lwo8iU^76ZHGwGD70XX6g67j{o$J5d>pOxg{GfS*n* zB~EtGYXf#JsS^(Yn~dvb?K)y!Q3}qYtPARqO1I}NTtt2Vu1h%yK-)%#&n=KmWVG)# zIy{pR$(K^_WW!P&$+=Q9&Yi6~KZhPM3EQsIZa>R;D3xJVpcWcI2oG@ir7>rJXXOa0z3 z%bWrhWR>EBYv@8fbK0L&o#o@Xb=VNtuvQR0I!*YEPUS&!MX@*rUYNvhNoM<+^qS>s zSOv3a16!n{o?BRlm_cJc6#XelpEV(z@j8(nfsX$h#nFKttw;SON=0(1(v_iWxiRz0 z8EOR3!?|=Sn<|t23M!vc38N0=-Y9etORxr|LmxGNh9bU6F(k{wgdM<*9Kj722l`(~dDe_Z@N3^^}Rgai6C+Az+w-HxR4iHbl=rk~BcOfr-*FcvK= z_nin!e8G1oF6a;SU-U;6g7o?D)TtCRleZ8{bNS0jlC0QwVnKBG-a)0VNtJf+>f{^c=T@mj&Lq4+;P z^d{g{wt&cAE_{iqirUBPbZEU(%LvGIegEx(g@J34R8=WK{!C2TqxwO>$!a=5_PeL0 zI&B|P6OH)KYdjC08T11hxN+0an;JO7dT}~5IDqCA`R_W5{$1ye*akAM!f0uUtCQv; z8|scKd`&@a9z(t^(=N7gUt3>cVHcfhM+FTqvG&^aU!@YZnemv)#%y&Q{atza5K;iJ z38>*Sd6%pVCn{jw+Q|LeX#njlgTmhsPHb@O0?O3xcLS(zf#nqHkElso2MmSz{Qss8 zIS_R4wMu1H%d9aa-T44%)j~m(YNZm6!n-~PUbRWt=cw^TVLt5f=Nc+b6-8vUwagVj z#20I=tj1gzSC!D*oSATF=imQvbQXS1cTpJMC`XSJ5Rh(=mXsRZ9fEXsODG%N-AH#g zk^%zK9TG}+BLb3p$GiXF`#E>_ckj8+c^)Tiuus-2Mhq}jM9JA;UZz5PzaVL$7k%xD za0v_#YsS-Xlw6?6XewChVi@y%Sf02V)pF$5Im{qLVsnuM5#6xJ9*-&X6S}Z7MB$3O zEzoz!AjpE9X;dxjiE_s!MBD1S%i=%_l-g?mAjp}6Byq2Bm00I4N1vFlo@^`O0lfq> zIK3j+eO_{T2={?Ut{Rf^Va)=ju24`b3O57<;wapQVB*9Ek*ec?`+odL?p-9lWUXxH4D$(sXKym_~%!#)|Lt-^xj$;eysUSJM|cX66^AE z8F((-!_fUyC&$p?;mUB>9GqI#>R*@!k-1{REJK8gR$&BXFJ<5rNWLp~2$_XdHsWs; zOxqE1F)gD-S6AwTgH85diDsTSTBVWV!|IM(QHq`auB8t#kFusvM`Qcfc?;0z+l z?*u_~Yhi|GHKb3btvWQ~5lDl9c0(9})Lz`Y5`TIEAXKNgbYWN7dsWG(22*Dl(GV zkph=q)2(0Yue2O%* zAoWnumSroj^q&(JtLhu$DQ|AD3LeT%98^j6e|@c;NaBr zXCMzK#{Q)dJBez2dQOFH3&}Yr7H3R8(&A?7N2S?tGtmLvj;)tr%M}Z&!7qvNxb3-q z!l%ci3DY?RSYY2P0jID52|Sl=n=3qttblJ}@M!t~HEdrXWW+;H)wFeKx`ZtzW9WSi zFSynAtPGJPG`wNv=3-egHMp{vC`zN3U9?;SIf>opE52+F&SVY1qb7&sI5K& zxkIs2SdG{^D{tK?tC*8x8nR1PQlh!$%J!AiYx%LGgIFTIr*IITOsw`M#(P=S91j;} z9KcV#-+J>=4Cv0^LGfQdK3ZetafBi*^kca8XXDpPfC^_M32@X34p5-d)A<77#FQp` z8R{Z&HI?mMzl!Ao^W2loi0b(cCG9jil7n$^fYOnltv{^&EuBsoqVY4*<*qgxqlN$E z-_{lC zKr!kry!WEDE>v>ZJVS}hYGjmd?6@F7C0RHLK;JDVi%Se`Q)(&>(LlE!sz>c=GvbxD zdM0*^v0xIk;F~ADm*a)A*YcKFTNq@#icAhz#HtErn3n#k{tW89ml*U&?_^|*{Hmv< z;ZJLj-%;A%`Chy`^y(_9uN3_du*WX3$H{SLe#%5@g;?If1rZnhzr1Tfr1g< zW%quib0h7W%O zcIH#MbxI$xLcY+TaqGBji6+vA1@_yO5%*|X{4y2ilL6qD_2T#!=u(^|RW1g*)8>h( z^mfVYVJATG%HQmpKxvf6r{?&a+umA;QPANCXqUeh}r7f=tVb4=x*pOmR6 zL$#>{En3PY+8KM?9dU?KMiPcCUA%`*Sr>}kOW*ohFMJX^gr&UZ)@u;?_`~ZN2$5;S zd25VlDM-k^=4iUaVA!=7fw9g!jYJ)!csit5@dUsfOPJh=Bn@e?jWdLKr{{y_T5f+*R z2-UV){X5Fari(~3yW$^MHaFhdRIfVD&n_f~Z239qo5l*0`Z}7EM^`{1%K|aMuxVU8$ zn3-w3!y>&3Evaq%4$sf}2Hjk*F}vmad9eG@ChXU!CEzKID=~g6^b(DK zkkc$2%X$uD0reh7Wa5(Mp`I)uaPl0no3&p@pR8TtTE z@+APtj>7sY7ZgdlPtv4a0`aXQMH$xLuW< zI!38?-r2H)ojSpcwYg^eDr zpy+`q)m|+oNHo2&IsndBoRO(u?i2OxUJND%$qH5L|#eQNa&) zu+;4VU7#|W#Pw!ze0>tvj|6HTe#*j6wXkIr=87*H3HRzU8BKgeq$+G`!N2 ze+qxmo~dnoh~3&ffKB)tD?8KEV5i86>(T;B%8(jR+?0->OyZe6Zpu z>8>4UUY`}77JI&5JyOZ4pk6brk3C*$2s*fWqJViH`?9-(r^ zHz{GWM|e}m^qSz`;#BAaL#PG*K1yj>G@(fKOb6@flP<&l;K%dox)PR+cftq?=gl%X zq!c~p@Vc_Z&Bc&;V=z6mHxGBiKC}f+D+e$;3KE~Lwgut887!{t)JVV1-9&q1WXEsn z%vEi04_DM&#;<6OVzB@8FmCj41izxhAEweqC0+^S7D&ja*Px5Xt1Qc#OIOF&?SJ=E zzqM#4q5=k0bCZAbGWAtBkuL5gN~DEWKiS7bEn(B_q%_qp}B>pYMO24 zc;Wi(>j5+AX%nRf6T_L-SvhU$X_2@`2sOklY56|@@t}lKifR2(K&n8KYX$VgFlSgHR! z>AFtcVH$9ioWZuV@!dTwV1FfiFQ~NFz0#>yWBVSELA1G{cGlp{#^zA0^dQb~oTFI@ ztOsv7`RL&jDxXkfs=ZlB@Q$t`uxU#2kc)$cr`P7dlTVnD%-#s9(q&O=%Ur>y{w78b z`m{SQ=;ieLZJ=+&8LR~hp0BY4q7*DAo8-Uz0@(;hk?^#D%qu+up+nJ|c`Wv#>oOpe zN994p;0>ac+0qnE&dp$1**y&%q;QOifLkw8W4e)G(;@c+GB~_~)V%}m^JFo(l8^K& zg$g`_&s{j%8Osbqh`ok;Gu`Yra?(frgzvxE29+~6{}{pw!H%*^E_@+xTJS}*GnM{+ z@(C;k4!&KP)@8*^F`{f)QW)~KM+{ssR{WVsvG{y3P0v;Nm#*seLKF&s0+0!6Hi%SA zE@p}4<=7i&WQ^RXFQ<9H(GmR-H^hPL$|%I?`ybV}x>pUV6`p}e0O(WRMpbm>5^@-< z%%LRP03Ql?b<|0m(>tb` zS_UcV!+ds^`RFczSo&Ntq*SF|j)jSQurT(S+1|Z+DbyKTJN#hS{IR1efgNWZF&)d+ z_r^(L(-xsf`IlKf^cLlYv9e?#oW(fn$n4_l2E7p)2_mht{YBEH3Q#DDr`|0OOK!VW zFbeL8rIdQsKD*Jwgd)K!&4+ypp|X)6ylEs7x%@L zQt9mD^mH6AM_9>tazm+1mC$75WSYsC`z|BM8C7M?JL|6X5+*b=Am(A9a~~Nj*jrw9 z-36YZlT)Ac!$|Qyik%okGFK3E$JmubN({(lls@A z>9Hd=LI1$?U3~7DPam{ttr!j{*Z^QaxjfpNi1)l7s7o z+J8e#%e&|Grt@Pq=3z3*vG&LkMbhtT79dqz$rjCj)xJ+Cs$zaMxCS;>F#)rw2zPMz z!GHW?UUDl1F9(VzzceF2uY4Wc<66WfOLsOkU$$t1kT-?Y&Xw?dE*58GC9ZBvFc#u; zMC(AVo0p6NwvloDVwENB)ZV5`tTIe2Yn-MUA=td`y(;y!Ck^p& z2V9bNsC<Qn(BLUA%2=m{o~am-mM7Q*q?CtiSv<)%_U`A3RUj6!P8=ZJXAb*dIfnw5o+| zAv^>wNQt3LKpO3qR451t2*al)PAZAF7PsBM8_pHmdyt7Wx(9KZ!5l&B0w|jWS|TJS z@oTy^zfUjZ?eLM$qxY6lFM;Crv}l;5aUzfGhcoiaOJid`xIgHXe_loIvyw`u`!U>j z9?brRH%&A6B&@(^2)LNS!@@p>hk7OqZ9fB{0kBYJqy8bHKBYh`@H$RFqC5l1&no5a zsm|=Ay3-LwnTZ*BkMJ1$3Qkx~CetLol%k-Lop?qVI8&lv$0{bwLdC`LF;@i^U)TAZNlT>~fJn`>4)OK%TN z&?=#OqF@6kgmG;(CO7L7t=9OL=0$TP0b(L?eov`BR;}YdHINB>vs+Yjl6GeyMqf(m zZ72>!v3AnJj0)_#O5rH?A!~(@%^KGzv(|PksWPV_`CCP$9biK?&xwk~s6T z@pb)crWJ^|0Y`Q*rsG>WKTHx!DZN33qJPf1b`>5ZMb}6Cvo_m~U~vG_?_MZZnVt2} z^v$Lx-;MLEW6hni=pRmNTPtuGtf%_uFLJ)wh{m^n%0q#1PWAEE0+IwMxsZQ zmq|1h9a&(?TVeR7uF0E$>_yn5dc;wO$N<_Asl;w2nN1p#v=SG0W_h6M)ip~e*RFS8Sm4$#1BGra_(h*OgYD;7)c#J91w+C?V6|!^s=G^ z8t#O-eOVH*6WXW)m>@LCv+os%?n-}?N!_nFnVQF1--ll#mzr`aCCt7-HT;*13{T=k z=EHL`XkTIy3WwPe4q>+^qbEApCFGn`w6p#Ao!(+MenKTP z$iiUWGV^vErQ0Z+igc?`{=w--FfHZkYW3IOojc}KRAuF#_nZ&MW#3m+zg~gE+hr_R zscf`ZAjeszuF*U_;n^=uM;`I=kg}7|rnz28E6f>01Ih{5^py_#jyuV~{L>AZpccoL zwUIl9KOTJhxo|3Kn3#)r2w}AMD+Z2cqz^wd+fc$$kp(Y3U)Fy^uQ%N%1@U)||1x~{`kmqHcj9=s z%xmyrDo(CAf{31Qbj6#RmJw}UM7Y0*WXww04lB=%62eH|N5{! zabc@`xYu+7u6EL`kUsK;P^NaaULzxz&628|VROwq3s0ko%;pdM=Djwi{*F9(NZ4D| z&C!3RY;0%uQzGH$CiT-|R~3jRBNS5&&82^G&;!$OWR9rjhs~D24$E188ZUL2&;?t^ zm)S?F%sul$X|eON&0yxy(Om4-tw1CzM&{MN8$vH)=jraNjDYE4acU-~d^4pyR0kVy zWIpacs9i6nvzz0Zdg~)NdFb%$8Hf*nA#kIugryBsjJ9tx?Q?*>v^OyHCy^ri;tZP@n}+ z%V07J%KDJaEeDO)zR987^m+?awdHi%PWo&SbjcA7@p4e~_FVQSwoDo?mDIl$v!UhdZnAGr@KO_4i8u2NhkHh~2u){(yB;DOM@o~M+E!*u zEdvbLXAAdgxCGRw!@}5@|0*Pt8U07-dp@NeY0jUI7?acI_5YB{G;)?v6~c`w#CS|E_Z3>jIUq8^T^(g)!s>#5p|t~GCe!O=0Ldg3cYIK-xJ zw67H)ThvI>K{GQ^Wmm}hw&u@RdF6|Ofq&Ycx*4Cay&3ePiY>yuy}Uhk5;zH_JAmjf zNXWRI9KX18_$=->5#aq~G=8oS7=XSn7I4CYb|D+Za#b3A4v25wXS9gUM}bP90Cz$u z20uHLKtZZNYBZQSo@E*`=-ghjyunsDS>Kq@Xo>8+%R?MM$MR9Yi8@z?2xRt{5z-qU zjFL3Ce~vq^$dlMEe5hg+F|C6%?87KYUS!0YOLkawm=}hWI5ll*Ia8;#GJl`Bfv4+Z zEUK^Lws3QV%>pw41Y@dV5hb4+8kuAD$IjAx$#+cVOiuTB3DMb#Y*Og?w3%K#`iZ8c z`~{}Cd<)AO)83szi$_278HfUrE}NpL%xNp3oN?G35@YFr3lJXuP;XJQVe++6!c?DH zqZub~FsOt`3Q2_qSfc6lV#o58Uk3G2FKcE)PutnJxhp#$O4e`wJ#Pke$6#XaQC=WA|BT1>O%H_(n@P?^0ba`LNv6sSt zAK#RXTKIU3QtXf`q|tT%*+*Hk3wHIAkD#whRpo{L5tXqz{ETj4V6#gbsQuCP=IhC9 zzo1mCw2WtEym0b+05?b=GMF$^UZt)G2rN%Cdn2li@I&j8AZ@)dxsa}7Ba`5Lt+8Xi zL$#D9`a4Kch%9s&d^k*P_qAYEkJt5uaGu}SJ6pA6)jPkkhP;&Wae>(^yP;BA4ti4Q zo*|cr+W4qptOCer{7U;X5JGYaxOI#sQM*vit!cU>e#1bE|Ngs-o1PPA*0}w&oMW5q z7bA^;4}M#G|Lj!E-v`_>X8-1gA84k0Xn~K35QZ|NCQySw+*UqvkrJfB4J432aTWTtiu(uIdRI zZmlwDCcA1?WO-O~9r6B}b2Cfgsi|o*tHpj=aY%dFeRs(m5876+0`s*1pw@;5et8G2#+j^=g#0s!vc z8k6}HZU(77U^bNRXY=LJ+Tw9kU6wmIKzFWT>`&wn1=wU;)xO~bozZMMhMf1%KdKGk~GgNAENWW}f7XW`Q)YKfR zVts8L+W1L!qU*%Q>+o;j6@Ax(n6rV@W?mVQdu;M=VM*bzQKQGzi0x9im5EecJTCkI zN|p|IQ5?9T3_!#{Z`5uzw%dn?osFWsC!~(K{l#je7Ax)9 zn&~%iZMt1F6u|%bKR1L9{l-`}S`$w)w*oyOQg3VyV*M1W!^w|NIWW&imG|BbUyDKo z2AR+zpmxBf%_mMXQRrl>SlF6CgB*6sSfA)KFj<*e7h_E9uPk~uI|Zx`vDK-dOGc8* z!6d+@%Fe|j`-#k`v+pJ4AYZt|{7Xw*vm^zjNF%NWKlg&OY_n(O3_y0vhh43S< zdr#KaE|=JmYlD*Jc6o?8w6eIA;SW79O>&4{YoQGt#72l3pT8tdp?ilo1BDwPksu`p z4F~qrH&N71K3C{70KId`nvF!AOZsK?{w4YXW1tPiR^D&Ay~?O0rdnkDr7xy5$Mt9L zRx+&SFG;s?Q2YlNZ@*C92?g9S|2jR4VuZKHl22E@C-YC*;fd8|k#gD>huMSyg=D$t zB^qHx$t74sBdA`kFNqBf43@cYAHFPE8&MUoq-JHEsywprogs~P!9OjJ%k@CA;ACq0 z)#xChz(_fvuOdWjOCUqD(nq=`ij&9@rePtU<|8y^Y8Tu$lOZmTRru zn?FxEBq``K|2JXL`+fespW#e$=rH(zL_*x6h1vjJuu0qM)O=SVex#`V84C@)q8m;n zonvoe@gL#>MNE4PQA?KekL!p-gv0G$t1pk~yD;k557;UwR?{gT1Lj)~#i<86I{0Rs zq%c&vmo@$ul4Kwnjeg#x(KGcBT>FDi9Dq5oM%AQW>tG62{Q}L}*L*x|48S*|d>JL??JR6a9qI1h^1BhX@+{aqLopU zPSK$J2A`JQiN#*Hd)}pngoS1Z^1(T^1`amxl3P`PPegs4tL)>HInRd9%@_L?OrL&%}7iAK#?l# zG;rv=&jma$xe_P_m7im2-9Wy4w$tB>NUIlAyb?5KB&Opt=8ouKKtC=Pn5Tz=WN=u> zv3H6X%Z7bj^KHWE=ieUJ>>mZb@cLxRq31pDj=A`K%is{h8-Vu5hQvvXTxkP00=UmY zv^NEZ83$$~V;~=QxE#WwAdXqD$KQQ+zxbH&L9BcA(zsnJ$pMpStYKGV>0cP~LP}@c z)@lM@uK)edkCS)*{#`$1PKG{RFqaV`Qz7vNrzf$r8MyJEFjMb$vXoqfUDZB*zNE7|U0h}7gTwK5LNKwX z8x8s$I^{mD6){bIt$?qn^WTPaT48cfwQbI|IAY(7{yO?NU|9jsM@UyV!tw^HC_$#n zP=cI-q0YU=*2XTD1XV?%EuTpB(lusZ&%j26}Ef8ayfJTUpZ$Z`7ZOJqZWo1Z{K;^daF# z6E|s`J<@2Q6wNagvB=wSL66$z-8-G@DH+FEvxbp1?C$F}HC%M?3l9AKd7M}t?ed$P zmQ)QRlF)Y%4|3r;J9#z%?NdX^at?`yfOr5vk$6$_a9QwcY?ApmUn--)=?;z7Ca`~i zuv+NobAkmqHToZ{mluQuSXmL9-_l6uf%Q<8HwI&E_tJ(+Y+2;5litJ-&v2XYP2Il)Z_&YyTd6UWDaAiq1e<|B%Vpy~A@G8~T)r@&#G%1aP z9Ekf2!~b zogg37_`;|;tyg<3^3+OQ@9)VkCYStF-mAPg-r|B7PU~JzQ_+qA2?LD@eQp}H)bJa* z5NX>OI2TqZP41(v@c~I$Y?6OJIp?vU2RU9E!t&a5tJD*K!P^)QKofoQiWySxMJ=}_ zZ#Y&g^1)K5Rtk@)+42LoxFEXu+-2~~bjA7zjz5p0g|7WHa&Jlqxlzdl2qa0u!(|}! zKQ^9$w4qqRx7yVtIuc*$Djs@ec z&<+*`^*q^1l6~Ky?gP1;j^eDkYzMxd15DfGL(OC@9jZJEVQkW|0!hD-ur=YYk&M?f z2}BVm-2!lM_gNtX_C9r3DJwF7XCvw&UTgtix@af-NY>uhM-2s_GYf?gZz!;gSLcUD z3OxfUK*2;t=A}j6q~^&do!8jh=unODbAo?SbwwXcS-oEJN4I{9|DDFnPP>q?q>{~j zL=-{`U@Qans3BAx<#Cd#=WrAKvi1$tL7bhj5OUKLO((3x*!dt#g znKJ*1#0-unHy5rgqKU@Go<`;~$)f0|`k33@S9D;6 z-;12lofqQ%nD%}a-B}Z#$ySw1Cakrtl8+(OzsAQ&bJE_#Mh!EXx6`*tsF8*gtl_r4 z9xg2-XOdpkWQ%?gOt+pBBUSIb@ihh>nojBRer&5M)a~nrRfSv)Q)xB?1)2JGiF)%{ z|0j!BzNPki>5dYzpqj3+e|f*E#zA|eyIEobx9@5^WO8LCOZ^Icpb>3joCapHMx$Q5bX(1t1 zvkhb@Gy&YqZwpeK>8B~=i{I&@LF|a+olfUXK&g#FYm?>NJ|1i!DXao}4Aafy#4YN(@cTzwh=C^omvAzLBU z8#e98uhnO_NbobOUK;=Mbq9+ZY+nv%d@im(TXTMb*T!saolPRFxjvu4Cq4hS`f>nZ zFij&`+tTX7l!w}lSc`1?a@+RINb$cy63F@ETkfNeM?Z~3jXPiNb~x7iZZA!jjzEb& zLk#I43|ceDQgP@T=?veCrpgNr*< zenhh%p&C8OB=B5?LE5ad`^9G;_%tR)T8SVH0OVw89<&5yAtJ5)b1>in>v50-!rM~4 zn}nZ8q;S@K4VOF0*WDMFEp*CUvfW{}YdxK1-999YKC30UJhA{pXXcmN)>}P3Eck#+77kcgvVs?UG6_g#k_%;AZ))^wA^={>}Si z%V%-@fea$cICHsXe>6ATJS@IA$ybd9Ye~wr2K=f;0DMB|zkV#6o)LDC(esjn0Wj2b zRgdc|Van)so+Tn>Wy7%=IPpigy)ERZAeoo3>W zgjV7s%wbr`1d*0K;d6(SvmeL-k>aruRYxZ8_e zU#mLB(Wx0Y&Wpord2jgp?K=Vt&4eumfuB(;+G6Wxh#7301F5Dq`q*z7?v%9c(QNHm zX4~NJe~VKkTf{JvyJ4lj8Sc$NM2U#QaYf7E9*0DTc= z8J0YAA&+TyQ6&O#h#!~U-21JfD)#AA$f2AWEw47%Xe!ZixH%RS{33zJ43jvD5Dd<3 zG)iSwrelaopxdsIFjH%i#}7rLZUvV+4ULB88)e{UR7%f%;L(1qxB794SNxT+*jm-! zeGygs&pM~beepMWN3zI0RO0)|e9KmSal3EOkk15-f{!1xh(_lst7e?#NJLyd*7S2o z8~w6=x$RyP#xztyow{heFgX9!_gA*!Ue*O4)cnh@#$Y!#>dzy8SQ0G0{h#WGxDF8p zxR(a&aVf{wcwDo68-1LTP%#bWxoWA2{xAxftf9^Qn@t(_uCGU~=n-KwKhjJSY0LDW zJCVAxJly}Vp;MuDpjl>H*fslOdIj{35_0MTld7zg?GUpobFf7Vh>ib z{G6*P@0`KGw&ARH$yQ%WvXqLD4)hxTP=_;5woi&}2dwere2xSxe|K;+dHQ@1yru@w z(l7j_t@S-y22ajH3zhi-TlC+w8J(TYWJm4aUnlJ8=-qGU)wSi&-?P!{<2^6AVuTr6 z6e38n&($)Yy*LQ{47L7Jq0NdYHE$@%w$JZh$1iV+Edp8S)5#ymJ*<7O(`Q|x@s>v6 z`sb4f$-l1Jj^R&cSunZQ&VQx9`BK}qG*3U7wUkr?w`EhlIk#KuPHf)IysM6~H@KUq zH$aF2o_{`A9B=P)!_iV~bGpO5yyQ@bHqc;7C`g=K^h9RPpe1SHFLqMQFPmoFN_W{{gbll)#m8V5rd1#9$&0>IO8 z_78mkbT>kdhl@-^*Qq*$HXl;W@ibD&k|qbb#ONzR)X|VU>FQ&^kb3NLcvFz_sC8 z`HZg+N z;03}4Y$VyI3RoTEN+dP%v4f~Z(}&I_BCMCd41b<;YbwNHOuO1x@fBhe zntgU5=uQ%{8Yk*Nd(@u%j6)|kR8IhqfvnSsEzmTMtsI-lzGNEl0vX3-OvfgWMd{9| zv5)Fq&^lp0xiDL%(2*~T`a2HfpA6`e@U>h{M$CS-FSMvb5HUEM7BwF7LOkzgmKAB%a%Laf4<5F-7Qskc|6eLd^I_#ddR2!S`W+fg z6`Ew zOc_>lz}Mj3V;ymD`0`L)tuhk;s>lKWV?sOf@FE%9)8Rt%Se(Klr1M}d2xsuME}j2W zq(646uT{I*A$1{lIia-ZvEgC5WC?9R+Fu2HQrT7^2{g&tgNpx`9Dsm`RcWacimsYc z`8EfkUDD=6!=mrr7|_=G4d4aFKuyKXU}5rXRy_(Ua8D6I<=B0 zxEyP3o>81R6_sw0lXkh66iuh)!ra|+MIaPUjoXgu?#BQ}a;cCruhkV*{$XbPWe=JN zEv|?TV0d;!0c69Hg-L+EQj`Yug}J&Oc@GSvNMgkA?-+To4O|~#;BjeMT_-ziO6llX zLgdC)Q?ewnDSc;$!LsZ?;h4(S0pMY&+)7fk5L3t*xb&WTYmn_IR9MLac70k+k+w83 zi)@qkuZBOh)XY)!)n%oN+6nvF*FFe;TbBgDFc^{2suPhr0@>2#UvQ|KUA)97E`)Iu z_1>VAEqU`CcPJENw-_JEssm5y4u9~LXe19A!!H<9WHPkrtSj1AZqn|i^QkxKettS$ zlZ@=12?t}5B7fy3OOeCUZ}nB9^I&blE8XNBO8r}6kIwd7p=W!D8^>t6$~INXqk~Yw zyOvgT&L~9IaxM9IRxd`Suwp3?dQT>eB4}l37doL{&|2r8`GT>~qRz+o=p}KX|xd7tx+5JqJh(55i@_y&=jh06fp_+(?s1ZsL zD@Qk+JzW<&k~SuR8KsiyIoGLe>qC`e;9Yn6@&cg6lHW4~HCL^IhsQXy~*3d&oMa#&k z^^hECpRUE|L5;~%6x<+M=GXR~N`lVi6jSmP!kcKP zwZ5ySz=jG8&BO3us_CCs7=AfH_3_w^#moi=v;I^nlGuFk-#BfB`q zEwt6*;xDj2Rk(Xe9liNlC6LGH%oi$dQ$~M0D3<1)efAXM;Wh||zfX4K<G1RmUPnjk;znYq%?uJ%pB6Sn(n>kg8qwe+O`)}lrC>g zS6sGDrPyxijr9g{`omzJfIuZxg|^N@%P#@!RM6vdg%A^Ka`Wc0>2(+KnXCt)=mo}< zyB@m~NS>>dD9Eftw{#)`&-$Bni>cN^%uoOG5M46lua(v&!jCKb{ z2KGkMc26i&l{{VLwD72dLsjNxWfbQ87FadpYh&A3fFs918Qw*uavGZel_s)aauS28 zRHl~zG^m#Z&v?zALVP@iZ15)22oDghi-V4*jy2*2(yi4pEAESSgWpZJEuPV)8I^OM zS(BT4EZH4A?ey#;8YMee}-U+Pl@z~I8uRr;c;E<(x9drf%FUNpL& z(!dk4duPj}kujd7zVfu(A&XRa?OppA8HS_YqXY6Fi3&JEbN5z#va+zW-Ji28=5GsS z1*#txHFyBc>96&u)CqU)miFL8UZ5wS29=Edwh#d=w%_>tbvfiHTBpd8EDGIW* z5HFU6+*@fLGAd4SE}CB67tthQ9UhM6Z5RJ{ecBL@4_An@oFbP@P9=SEgjE4CqU8lh zF9XqJ-}XJc(|DI9oF5{?Z7T%`cX>gc^Q|X}GLN|+Ko5y6k0z;A4g6gXHW6=FwtOm7 z!*cJeM3x>y3~MMT4j%S8NPiP*=A?n}FyPAR&hd}dAz2a$(lQsjS5pd1gFkkW5`2aC z6O#9PKNLp-kn)hS(jqHxb}I%=L7@s4t#h^aHBBGxdU|sNgr?i-QhaY1rcS)ms2Abz z83}2eUQ;Z-tm{Sqc}?|WLc(&t%SJNF_va-yfY6~|Sx0j!yQ*eZ{YB*eMuJrd&H8qu z;d4H%c~tmxHiX+9u@)I2%wI|wm21Z>?}t7LB00FM z=c6M{t&MG}8cXxXk&yTPbY(o1WMUA0n6}f{em6WqB(gkl0l$OeX5pp6Q2R80zQ6yg z8O5z1Fvmtfq-tU^kNxIGOn@sG>8#`F8E6V|W6jlNt8L)JDcCGDt2T45(YXC+}z@3>5ZLkp*5!|lAEFtq^0fIKb=r(Gsj zGIMFGcLbhvR>2(Ri~~a>p_(9WZqX)n2^RTF{*C-n6XpVn(t(f+$Vg}n#X_mt0d3!h zm(oYC48AmI7vQj?>?|>ee+Z+n_h>sLhNFf9+J-N|7p>t8O|@6Kzn^W}=T?|uok~$($U7W-l7nS*v!DI1WIYIXdX6Z22{n2 zTg~8wdC+?1e^RF0qpr~MWmKt8g}fgH;Qez*HyvZ?73^}Bj*V0cm2U#M5U9{BSpB#Y zPyxyxp6??C&RKXnCb3FQup<(|7@>sZHx z(Ntexd?X8sC;|i#UpYKopznk>+gvq56~w2vuT)!`IM}x&J92US_vu^GTKVSOZ*-BBg>A5NVQT!Vp?5oD2(nozP>|hOD zkJUkmP>*F~(jR>}qM{$t`$s)qh*ByXoVFV`bEe)( zct_^9deX<>P3n%V7=oIvusjGqzD4gD&;cSA&84Jc*KvSWZrNc`jBB!;_SdebzZXYG zp9QlU{nL8C{P(H%-JxrMc=!mavH}n#%A)wIBp}ZU5zlN$LfpzIlh4V{&4fMJ?K2dG zEj`-04439Jb#*k(E&qBwPdJMuA{)8>z%JbRg2DECZ0Gv1-f+te@+Z@4qaKNvHKpZ6 ze8)rQn7==dVbRBrce!Fi96t^2AHHtSjyOuaJ_K~9kq4SyMFsxGmhIBWWM2F4+bNS{ z<@Qh5Q+J8Ek!keU`Str3c5s(!0KoWnoU#{$Id?6fOyVP#`h_WCil$B5{eI9#jP!>q zLIBa#ycX8ixBGBw?sJR|0T>G-Yysz8Oqpogkzi0m~+EjWvHSb8vCCA#fgN8-&;(Sb1=Dj9=X0 zQb*-&+{7UTS0v%O{E~GQVg5xYG28GQp)Dj#;op2*ru6dqOr=U)!fWXzTy(kxYWQVM z9Uz?Dn8?t*KPTD)`eKn2*s=m@xlA*aYJ(D9l4E5;W|5LH9ti@>>Ze8MgF+F+a$!HF z@gqdHB6H|@nPp&EbWbxE7T37?0P+t|m>8f+KZFEA_K1_zi| zQr~-lyh>^Vkc&c3@KB*hVr8-=9?yV!p)Z6aB(zeXV0*@+R&E-^ZpvLo_CmSjN8i;F zzI<_NQ3B;=dHKiP$com12VMgCLB+S%u=7bpO0-Fpr^bu(h-F8OV4Pg{30*pWZ#u6h!RgKPD9#l5XVR@^I(7eJd`Jn7JGbC4f zcYIg0pG`^m@YYd~Z%Rt~MgEHk9ycYnbV*|g{|?qjiP!V$I;B@V+h>f*_c!8WS8qs> z$Ix&m%)|KA%%d>a#Z43p8@@8Gd@BgggS!rokE4QW%G~Nbbli~U}jMSo|$*6J-^Q&CM8o!uKYe<30&7Bb06G)r8bs;4qHJru7r=t z*S4NJA)$eugW~*EFhQWL0Iq zW`3Qg(yQ?utjR%Yf3}l5B?EcmLNz5f)A$O3pXQTTIrY-?*<)omj1Hhpsc=tinoU9@ zPOAz+2HG(;2{GFFal=OcEy48xJZ^i({i6A`WZas!g>Ul^tfSe8Yza1h;q8nOco~`8 znek?NedE3nej>!h~qjMdqXaoEtoUyq(VB()o%=@g?fPRf#g1Sxn_3@43?8 zW@a>ICPiM=ae|vS^OB9vPfi$${x?czN_ap_Ihpn2C06=Hsr9}PTa=#v>#&hOdIL4) z&geWkud94g?7Nx~WKs#y^B5~X!5c2rw9@8ido}{4fS}3SdPsAo+8GzQL)uXSzNKFNEQmF$PznYFb{Ye%Oa9%0?bj$&q9_g;)`t8ov+{CYX!>| zDP<4@(&N}P!PA;Iu6nJ;YM55U5a! zT^e)0bc2i^rqxGh{a#k-*n@4~2jJAvkg`VfA^-hSzWND!{y|srX&4|3ipSKN5Di}> zl%hIdx)Rf8>Wak>`&4?DMLnE5br`MorI=OHg@PjYSLJ0V5wy$3FqfmL35P`W$^=L- znuk!dVL8i*iT*jmOE8KK5ZF<U1v;5-clIZL-=8nT43vgu(VIc_~NOBPB1~ zsV(LdH3bQh2*94zQKH{p>BsT`;@IqtW648ca38`dAMQ_bUS9P88hzq)5|ZT61yheC zjb%aq!4hU~y+fwbsMov#Z%$Lnj4>zGmR`PkX>Hs1? zgLO=cBMl4wGa$H~l3=S|X|zhcOuQ|BD#=c^XgFo}$7QXA7v{1XtaZU-N6iI^D?Ud^ z&5^5xwQqRWl-M}d@eYnGEsjEL-%Y+(!z>Z3U|pRE)H2XYsJ*p76H+KV;85p>OKr%t z+!gg96Xs%z-L;VSClsJY$me}$0O<;mqapuXy(dplWCR^MJgn@#df;VrzzK01&+&FM zQOb7b4fq5^EhUV}>aM@$a199J(>`TU6iGd4*dos9&fKijjK%emWWbl=1Ff^9+Z;(*5%9mp5hD*p16eKc#}$ zsS1F{pCfjIX5ynPc$iAM0NBaBq|1SML((&zQ3h99d_;9HjaX9l1K=Rrn9M6xnIAm- z)6I%xg!UPbGXS>9lBgz8`yiKN&mWfJ9koXdgB{K*DQv@T`7G)oG;~p5943=km;Y4b z@AQDjbHG@X&=M0xr6AxN2|A+Wy|+QAdF@Xv6LTg^DiAVupA^F+(Z>R&>(L9Mx66tZ z74@{OBezY>&ZaMyPC znsV%Bxo^a7yX}L97%|zqN=negS0$hngGN1((8(2?e=k(FTxQ_54IdZ=kCibjb~9r8 z|A+gpQ}9IukDWqP$6Bz~Lr!U3!0Oa_%14bav&}vCK(4ZtlEvGsH^LAqii!q%`WSH! z5RxGbB%0JOaL??>io})4WQRsgbD0G9UZ12=kdGY`HC0FPy$u2rVYef!>8-VLp?AP%ir!!k$oV!g$gwuT9HT(o{(cwO?CQ*`Zj@=14X zku9VL`D~+hu6ND--kZo|RKIAC z;om8E5`2SqXYrv3h3aZhgC zN?$S-orZGMtb*Y;|KwNvS$gSxx3oELHR6#ZxIGOSF|CO(3b-nz3rQ@9K4>*S_#1kr zf;_$K?sLaZhF~TBlC(e4gI;LF5rW&dAATvsS%ObkNh6w)tVBDdLGOu~SHkePdE_b# z%KZ4mUsn?B%-qVL3~}9(8h>o9h1KYjzQ6rz>DFtbI0_Q&v=rM)5V}wgn$;|M)I(CL zUVDyE9=yug!{ro4aTRlGnnSUeAX(k(8qMrus1XSa5mJ3_kT~&=?wM_L35u%nflGnD zoNr$-NmWz-q%{0QR*GO)ENGNY z+^AhGd8H|%$_$>c*6xnBlq2ei@l3UV;09KZPCojD`r(@(bJ&7_mjUv1;z2Sbxbi3e zNWt*QDb2dMB1AbsmfQ*M;3dFraDc9`oxV)3=)SWwmWyg4aR0b}Wr*FOFAd61dzwj% zBgCPge-qVoFFZuM&+FmkWoh8B8<8FZI{*#Hor^6&;5OWBKw~mPjVnFvpNT?Rih|J5 zw4>&UWy__v_x2<;B3wPc6xo>Wdv7khU>6_0z#av$62Bt3oO##o=Rx0G*cczOsSEH7 zTyQA|Afj1k-@knTPn`!*#NVEx?2yhM1t4lBin5`zyCI3K5VZe@`bE02!LXU5N(vWv zR~>4Df=!C;OY1D_-{n@a21iuTP{^6Q;=!ZLG`@>9g`1p0W2vkzNxDR za%D`G9`)aJ)xuz#UJzyq)_kVXi|wC1el}9S@mJQj9VZI7e%4@z8PsIwu=5Ls6;a)2 zWl+G1rO1Qd1$;EYmi-w~l<6V*M#)|vP%H0a)fdbg@r?XJ3rDU#k${fMkosx;bOnn) zg=h$7DvM(|#^p6_pZv{AGkI*f7zBf4DP~t{4jMIgKvD+Zwp+g3{%I2s8`k^)t6pAy zesM$a1>U7nReCK#0K@V5pOQ0^amN?}c{|g<0|_>y6XFtFyF02N&Vy@2vW;x1T2&Nh=j}P#3&6c|!nG0(@3uzK_o4ik zuqPr9ZS`dH`VXorm;dSSRjevY20%T)49vrt0U9pZc}W!4U&#-L6`Yl6k@8nrT#sg+ z0gXXlu<~mtLS{4UbK$dkDl+~j%#2=T_Vf|#FEKLFZ+Zs6-!Z(30I0l+Q)XTq1Zu__ zQKb=bc_7a&!jb?jN#HUXQ=JdJHNiqSLx-HhC-YKFm@CbW>v%Le>o?rbfWFRVe-xSO{Pa?gOe z01U}n`_iI@glT4{rmF%GQYnR5aZC{J0ZC+i&HI$0{&v?v{^8}PMa-O>w&_@J2$K^N z9w(@NO8QpFJb)q~Mpkrtmd zz7R#$o!(YU<&pD&XoiCbgY+csL7UBUR`%(oz8!#OE{Pi9^oJ5-x~lMk;+sACGoVKJ ziuKmIG+jwdp4tg6ht4x)Wyq0BNvJ|dyhe=*rHj6f^ZMf3HopVSYASHrq!l3XUt&x=o<;{ZW9}&q&9Qct$ zX(ebcG>w^;48<4j|Gdc@Ud04+J-u6frRrW$6}}dBc3obz8yQ$i+q5B>S9qoBL8HiJ1R2WhmkdyCvaP9mhY-m>wv2 z?9ZptI=Me4RHuBGMB4c7Z`@z3{~;7SI<*c?2rRk{#mW2?lLeusrnl6Z!3r%C-J&)mWIM6k3^YBOV&+Z=80oo1aRH{- zjfPLQIqX@nOIQlUyI3n0PSITf13trS3M|zG3t~>i)oV@>k#VU=<<-`d zgXsRUs}^TveI}tFO2_~EM5{ST#ziyt8;pjpWT2WbqD_nK|EiK#?xc7RCCnhXWuVn* z(XIOF2qB2HtXa3!H%>iJO zAj@XBJ(NXVwLdq2aIB*}giMliAKo; zP(y+ak_Bn!fl=53j_Hzf=#)2`+egBIQug<8^2wm$H!_lAD62-eL^MyQ^iFEpbi;F| zB087tFQh0E+eCx+dIxvMjjXD@txd(m+jrCq!y_Cb+~-+BwiJGdoz>#LyIb5&{RF)0 z4oH_(R%XDEM^;nc&GLB>$OB~+bgD~i`r&$UEI_b!aF>x$E%B=i`;)&G>vM!!q4>0N zws)Zbx@kY;I}EM(_*9y&c854dA8><81>IvL4!&3izVuC|oBcQrznU=$UT6-0|0b#E}V4L}+S z3z}-NTBFG@=#XUQdia8_;{5I)Wg9km`qH$IgW~o)Ir%pGLPewXZsrMgCA5R!{yqI; z*sU!hz?vehMN}=exVR|PJ zYXtdRN8$cRZ35Llx}g$i((_H}Z(@p>-~Q$kt=Krtn39y(e^75c-@1=W-a2S4d6$~a zSZkf0I7#miJNk5tJ%{SyEYn%LiH!1~{ssUL0!-$a*7I0`NiY$}GsPP+CXp~j2WDxg z6o@~WwsD4)R#Z@6wdW6|ntGOy4dWs(VX)8uLkN5JCn+88FzWBaTfC@55l|#dxyjw# z{B`w4UkA#Ca^PYZ*TR(8EM+#|YbnsVF)K zC~&E%Qckjdw; zPCob4<&uc15u=i8b0a!1?A!P@r@C-D#JTiomIM=GGV=!KlQmJOAxq*%kCi`&7_o$Y zI^Lc&R>6Dgud+dh>5GmvXDrV6N=%Hc#N@rPkV^0-M8aYn=MLCaOX&4L^fbk^-x zoc53bFs@?%#=b_L(p+{-ump?7NF)#~GVNj_;f#^B>=8sR5MEkDI zs5|-R2b-CgS(~+J(CQ&QdacU@GU&KrDPc-2R4#4)M%B{#TiLk((=IGSRC4mG4ozjT zpYcbx&n<&^zcXkHDT2Bpv@lV07%P?n1_+q;xuPD7z}K7(dOz|UUaV% zeXT6NSjQPe0f12sl@NtAWpiuD%w8auIx{Ot82hYqv92NJ;k@k`Py-+@eC-%kCx7=Q zw;FOoa7yvr+wleV%#YEg|3dZ&eqUZ<_aVM2GevfGGT8MG;o-riJQ&yum;(AXjzExe z5oX_Psv#0noJDU!dHB~BnqLH7qz3dN_07j%I?05;%9?4}y;G~H6cBD3e(5fCZeMre zpbvN5F+073O+Hb6s(qJ$#Ag+*7rWOFy(^d2ts9;(;n13#2TS*GZ!1X=Q8EzXkH1_* zBB+x6c|2_Lba-Owt=cLmieH&oF{M15j!RU(Biox-!ko*;+$Wv^^+M6Fdc8}-m;SuV z<7_&Q?lag@Xu$6#S?Tstcn9Gfs5$H`2JfvnDrh98>t%*}Y5fwb2k`NIe3;nC!$({q zr0A}gm6N;@2{rF+TjbmI-u+!Y7PAnu=`_J+H{JbWJ521B|6`9-Qu-aMQNScWNw$hd0^>bhAFcJ%)7}dC2I|!GoHy;YfXQ{|dPY+K2o`5XiOguiw)`FU1y;t_v)V=L)z{seq--OK?B^cb;-rs}4yXWdAK{u=_$`a=ol?>MoJ)Q!x1NvWPAwEL{PDkRZ3Wx<_H-BPRiO|weU>vx{)@=INfMdWUq)odE&?_g=r{_D_~+F6JTCO}2R z^-*irI^-$ks3hq`d`L1Yb=@PP@15EXWu2EhfcFXofc!w|^M{x+e08ck++(-%%Fqig zdh0J-v0X{&)@k*R(6|p(tJ~I?A{7X*BRFYq&6sET zL%7vjR~gta5}P%IgXewhYFf9FEQHX-CLY^~+R_5w`tz+HF_U1nNWJDh%H2HHWxR;2 zx){}#Kt`42CDVLMskVmX|G0rQcjH{BvC#1H+iz17OH_`^PfgcGS?gRCc>qB6-#v|L zW<OHdri7G;{b4-=-vdu6+*p>LBQy_1Z^~DTfj9)n;vcb=Mjy-; z!WORcOcq&)b!%vf6nhks*w8`&bzu(N{e?lb(kp%jQSqeEou%Kw4y;7OC&WL~X9zjj zYu0D~{^f4^kYV&WKpfp<$1Cs#&vdN373M)eV6vd%$X>Cg%uK{eC33;7SI{4A1!rfB zRVW2-erTR3!W-AnW3v?98~&CvfS_tI?sFv}MIp~1T=e-p{?4iXL?B!pEErYi#I@5h z_H&ds?#*Xc6A~W+kxXg4$zxbz{DJHq?AtQ#8K&ztM^Z8*j#o-scD%LNS+q%G8iFA|#b^@`n;J!{(;z zNxvjN;fw+L0OC40TBB=7jIgWEjJV0QT>eWY@s^hm(*!eKV;K>8WEhfEz6`1{AtpMp zHvB|10?d8uwwP1bpfeu*yztSE#A0*^oobI^lbAHf6*K^Y z$qy>a{1r4%W*b94E>BF(;t)Ayu)OkAq}J=tg#F*3xf1(6SL?6N|2rsgb1HFD(lTW{ zIK>JQP@Nk57A&M%It}x*PCccJmU1JQbZf~{dDGS9oKo+0&9ctBe2Dn149+``h!z zZzwc@8MTsMQwMO-vJ-iwl6qlHY_AZUuPSXUc-Pev{j5Gw>%|Ymr)eT!5#p$7L)ZpY zIk_Wv`%oaNZreO)PDqZm5em^?qljsk85-y{e?hznTm9~0^0%{rY8io3Gq!m= z^G=*288N{EI3rTNu`?1eKt64kE^I_7WEchM?itW`_>;T*P+F(-$AyZmyeU%mTTtKu zZ?)7h0NynbvK;?ILD?ki(!Y+0$j4n@dq7*&MTUk`gC#sPE~`A)ij2+e_oDS<$M-*@ zR`#8cB1`jbtz9q#phKpa;tVrQjPtMbvtcqU_2~3$0HdNP2cvp-7V4z{pg9v<#1WTH zrFOLork@GciJYv4w;%3MLPKZ{3nG!Yh-d`1Xjs}wME6p^f&x_aZ|S)gw4E#48nb7Z zTrFH&ZPpD>VhXEOM@voxR?fvxg#!npa0b<$0qp}A({CQiOTQTTzVR9V5LFXr1`dke*3ue;p>Rnxg`uRwJLBqD4YEe6@ zrDfd&V1WJ?^H%yRJ~Cz=JL~HZh$Vv2NyLa|I1(-ic7@?dVTkzGN*-BY9gWdsDO**F_P6Dw7&d#<#8b zX{K)VA2SPYaDa|34U{HXtM{{Db$yIzfENZB_qELe z%{_jdc8%|^KRXg(^H!_qX4dxTUtXg9b51z`Flp}Osu|?O9KYCAG|VA>EW13$R4&~2 z_kGHGxPcKCwtL&zwXC!%{`H4FVYjApRATgF_gJEW5;7L8+Ef?6tBYza(aO|?#qSCa zML8H*g{~>kv+WsupT>mvEtRLamSD>@JnW5I zYU;E-HqyA+E8{kG$sd(^{yirPI`52vI4&QZkAA^me-sR7M&9m`Sm*dEP&}Fc-M3rb z`+e4w?2*blE8-5{1;WnUppZI5K!y_a-;;ylPr-EBvjIGPCi`3XO<~>;eBbp%bhvCy zBK~6LWdzQg>z;AM6fFg0QL-Z*Zoap}!NQoK?#^?gnZYrCo7THHRlzZzQ_@P1>wkE; zKT#=Nzx!98y+(Z>FV0fw%P(a4@)PxyG;i3&pz)@6gT5i>tlo-o_1Ty0?Vq8-6D>z> z%Nb5uFxbl#iY>}XxFP~U5F`J}3g($4X*>% zsd#ntDb2*hY9+5vIvOMm*IWl3ERFuKE87OnswoRPTC_eEZoyG36CyUW@J)TPeJQ?IgiBU~N!3t@$E8T}nz-Rk z<F1p9wE~@jS zZEMB((i48lSWvG%rUphgsgUjar*i6ng!%W;@Z${>VStGpD+oOQo^FyIUCKev&M}gT zQ{9xLiq#xlA4RLKtRuVPGcyjx@AP!$Zm)!INpt&4(-MKeI*QTOg5ux&2=xhgDTO`3 zJzU-O!C4SF>o(ZtU(;z+3UH<@#f#HXHxD+m@{|ok;Ahd}#cKv$@FT{7Kl8V{mQx!tYk% z*YVk*Nvn)ahn5tvQMB{t3|MJ=@^awO)&|bzG%i#*yy{x+9tx$RF;>J!aBBwSo?cc3S zIa1qiKI01K=B2%Vkatl>>+qw)5zcvjpxop#+QysyqiC zIpwzt^LRuzsKaUKAxXB1W!Ee?d!<|t4`2TqGndW~+2IT?(i`RBr9RCGbAIi5eTZp2 zDB73tUqjPe?R?TZS7Z4K|9=XNa8`py{rmlN+W-r4_P<5xByHjVh@pc7K&Td)c>%&A z9>Z3uZ-nq=vU1*bOse~8dqUW&_Sh6FF~=K5JgQ#&hS-pEu~FvXrBZ7Q$WaUyh*m3~ z|D1^5r!=>+RBhG_otl~og_|maKLFolli!P>|DY|youEkBOk!!-%I&J3k=#Wgxsh*r zxG;-S&Q$H0nY%1rI~S#6KnkPl4_1~+55uEr4fm!V!0`1f*7Q%~YyA_ED8pa^pDwWA zyocdFZr=KJN~XcSRN4OKeXerGa4qK`<2?r3eq~&akHP6U)b#XL&TeWkVB)g_G#Q=?i`wn{POkc>g5qL{w(p28NM{jD_j+rb5#t0z`F1w z5lg2{&ZkQ>tDG>g+~XLeX<=zV&GX<7gHta2?=mlnlT>PYoRYO4pANP5B-fUz^^ZNY z<-T2ghHAOu{Lp9me(%7D z*yhl3l_NiMl)8PL^G8oLPk4ALKmQ15n^51SppKVu<=8-{Y_J}SV{5i4<)~Vcc%8~! zUSd5zvqt&DpkZoKt2!mQ++zOmnv^NB-jI-Rhmrt6yim31Ysx^!hl&`^jh1^B+sHIK z=XtoIW6|iTK#q5A<0%kUWY^+O{ zpeh(tSay=Gf&K0S0um^a zGXM|@F}x52NAwhP{8QTwgg2+I279I<0n!)_-KkhCy37QjvJpK3q>%+UBv^cNjl2Zd zP=aNB*rR5GR;NPbhxHc0lFl4=Mmhd~(jPz`9~Q#CW+Og%i$1x-|_VL3Vd zbSjf?E%fE0H-=H0xl#kW>e7AsS=E~Kc*{tVhG&lNYED}?mxC2Kst|1~#_Jx?8dIN6 z$^$7Fr_&eEyER}CM9qNMSQu6lDS>UjD9M|P2ULW&-zX)?*)GNT)uxj%81e}!62F7N zAk0|kfxB%s87_-(_eu4kOt=+{pHfvBDhzg4s6=5EKX}KGlh;WM^dUGh z=@OyE0NgJJNlfZ#htgZ|0XxkKvap6D)~{p;-V9hmYpD~at8UD8Cy&B2?(<&Sa}3Qt z(%h7$Dpcg@4M${@tXew5cvt*VAA9z%+jF8Jzl^5s(;s6mUvDmNJk4}!?;k((K0JPX zch~K$-&u8Lw@0o1WbB*PeR@6Bx!$qQmWxZkOQ#Z9aasbYTqf5PFh@iUP-;)0J!nUE z3Z=jwVQ;ow&CAfgeg;$u#g_|FX^#NNESM^Zsi8oCEEAXD8E)oy5|^g?Sl~fUYbeAhq?1G^I+Fn4{tr zEjn`Wk;t}vHTPx3_Yv(j7ET_vy}tg$*Amq{-D>%pE1f;lPd3w1+l_FRd+u-N^$zci zqpcuLM=_!ub>M8lpxlgGiw;k9dU(HpRM5xfJNrw*#8hbBf|pNvwvlO<>i2PVI);QAL2@HCxieD3!u!LiAIi9E@YZ{jr`-m7{z2qN3SY7O)OGg#u?v|OfkWp;hRpJ zeT$AD-e`%2EmP^+1@pIB`R@FrR4K4cm{+BLsarLy^QlYaLQBNNG9Z)^S4`Sd2yB8( zawsVwojsS0WG!$Uoruc}h>{?^O-Vhq$MIaxFkC*KRjfB;%A-um4=+-#=3)O@z_w0V zvQ=k|B&3}CkBsFF{~s#0-NUTQmnd_9v&%O0dRjKwzNxl+dhlNf^|UO#!8~LZ&Whct zbkBL>XAwi80qeXUl5IT?*o}^%Pg5s};Qpm4HR7jks*t5;|Cf=Ur%q}!LC#VUHGo(? zB+7rPDioI!YLX7760ESLjZ(Sa&RF{8p0Ebn@7N4iW;3N_V*x|I{L;snLq@wr80MCr z3`Qlo=nJ)BQFHAf0@bO4b}?{LIU_Qx_;7{ue<3Apuo}ICPFrMtFd~#>k!gtyW@Bj1 zNUbu+0PX%;cm+W*@6e1ng%#Rea;vU`Q@#m$X=X-Sy3hB{PNeDS8Xo>_EI!pHmR1wf zGz9eR+F=<+lXM_zG7ZCwDw0HGXku zQUle#YHyk?{jh08T7M)aHIV+HP-`YS=)isWl0QOEF)tR#D}4XogvYv(&_{1f$8yU9 zEdVg#d*6yl#PeOZhFr%nBs@>A)lT{hUsLKjT#43cGa}U65h;qw>94Us5r!n>(R7WI zVzKtBDH!`Xhx`LEJ&_b0g=_kQso5w~v359b8^e&rl9N5(G}WHm%y{J(7qi94=ZWIr zj5VVNV7{Jg0`LG@RR&frY?81e7dFe5w|&4M-gk??iY+YnqO$`|*mOboI!5K!Y9)0Q z2Ba$+L`@)-eA~=gBk%W|!Mp>+_IYyi>b0%Z0$*ihX$*4?hMj%mb5GWX**$06f<)+q)1bqD|$AAEXa(XROfSc+-g}M>ymd-Cyx*uId+yWzuq{afexX1JfS6 z?Xl3cfOD!ph15;e?*^xob*K5bZ_Lv%%T2SN+B#eQK0c8x`Z2p~Ml!96z>K$MC;v{t zcFAwnGBPrJG8idaJ&F;%%6u~X)GWd*P|sV6k@cp=^*M`&z5X6qCX9dkP1ip!?Yiex z+iQ20WWcMEvS&?ImcS5aaEt{C%=?;(npF0OgTHo1Rco0l#dU-5-1a{ppnz_x*HuIk`gcDW&rg*r6M=k}FAW?N7*OYf6w=ovF3tQJ*n8?$ z?ue)H3=5(M=;Z-o3fDeqjWiJwn);m+JDI$n$cOX9{_?9l&f4Ki4aLmj0Iiw@q9O?5 zb{9*pECK|^hq|V$dYGRR;#nXBBMrxs@=-J6XO!&6w+8-z+y#aFCP>%w?Xw2dRahKz zgYiAdzIaBfuolnG!~$(OyMgRD$hs-067(M)eKf{jezn8?{*!4Ya+fNyBOAN7qZsFz z|Mv=tE^45qWJxO~NOn>j98n`y&C3~;{}e5rYjHYO|GM`n8Sg8R*Q@nAkeUo_Pyk`r zga4t1XFHxMx@j8mx(Nk4s4}d0MZYV(nI9JnRW5#Su2{GATdb%`xhiT!b#fmu^hEvGurGe!`i<| zup9tpxHddVZwOG#eF?AHd%yjTPFdgd$)e!r4 zOYEh=EGoG83bJa5dE)|CtBE7sDY$zRW0o`?Bga>stl0}s6(eqqRjpQ!y|DXcQiNo! zu)t0g#D!1lai7n@01Yw@3-X{0UEO<&p(w7*pbkN&%#F=SDU-OGIj|^6gfsP>4D3zg zplU)eUyEP3u*(4y)$u=^d`|*bNfa{vQ@wL!&=EcoM(4ZzckglVKlB}l)1s^Y{MY$t zLeb~$Ocfc*)r4bhewC1XzaJw2hn&8qOWMMFe}4ojwXe@HA3={zq9Y6w`!xv7>!Guy zsmoS~K1KSE_WJh5c%cs&q)Q>mV)z0PBtMK2%<%Hl1|*ox^L4V|Ez-L-@4WWwG8|Go zirJ^ortPO#Rn|%Gt(#+`B!Yb zQW*MxpcW@U>|`)Fy+Hq9zd?Pf*EQ5DB+9Pe{NBzvw2 z=0TYC)Y?T5pMVAi`#zm45{?)0!XVY3oJWU8ky%d@(nbJU_-b+$p-`hEdZ>Ig${Za| z0{3z6+lxsJIB&IO53#q#OI?gnewkB^`wOlWqWZ8{5`$D-MWRo@S9I0*CRLWKe#V3BXgmc#bhi$J*pgJ+Hy`+NC-H_{V-7f6QvRV^QL}ip{9X8^QV8ONZOl@ z0?HJUpCQ<~vRp%{WH0%NHohibb??Mj+g-yZIKgz@X+BV0%ROb)723Sxl>E>n&w9z z303swmZ^ua8>RSCebXca=j1l`So%;>&Jds$5>EK=9D^SN=|xj%J)A< z$QlZUCV7uws%OY2{Qi3#kvD(4u$L0AV^!x;pf2^=t}1VNPXp3zy9iG6{BKD27XoiCpp7}|FyAJa!q$x-JVYwNv+a8N{dX#uPh17 zgrb0|JW>FIj)oBkaETqx+Y)DudSpxKcIJ<;A^M)g6g4iwwB%FHW|^k*F3a6XlVT%% zSzhpa@HczK{8b%}HIx(SJ~^0YW0TYJ8EE}hed<$FqXn#c&dIU0^?i{W7E7WTxa1iS z6mAa{V_jOLI4?$6USERQWw^)ZwOK5K5GZL5Koh5t%Hkm#D@qh|mf>SFyN=- z4C3m@9Mnk)C}I#EIK#kBCYO~4x?FFmi_O}hnc#T@_2Vu}yM=`i+)KqhDwn$Mf z#Y{RluaYde?r0DjrVl~u_$1%|ZgJ?7$0fII)tJ$AYlikgu_byd<(Y=&tM=l5|1E<( z{omYKKQ#HhZ~WN^VFJQHX{3cw12Jeu#{?Nt(%m543Zr3C5+dC-q@_eUrBg(ugtxRv zOYA%R3%);op1+JOP83XaQ(!I2=#xkR)bmP#@znfS&RtrEfjc<9yK> z^mCV?a3(E{`ojIL*!gvZ-r-HbI0K}l^1fzrX;y#KX^ndlS`HDld!8(;;ZAIlcnF98 zr<8-Mx@x~VpzLeOe{k!r2MLGq%W`xx0M&+(8FUd(Q$fE7+bfMR#oVdVTGLi`L^QpV z6tLyLbgExF&BVQzOK!5#(Q!CdgcZS<70D1iER}zC(}%`fUD~%MTwV6Z|GfHDIl^6^ z;og%(Rcb`T3a*qy=&RR8dsgaVD>O=opy{Uxgv>lq@6GWB@!FKvy6Ihw!z?_} zR70j`1}v*tiw+ly*MbUz$&e>cD>*$wkU4y7sVsE01FpxV%aQcp3~hM>jU#_ja09C< z%Dz=#&e8dUA*UJJyo}l+iDR`Njrg=HhxU#Qc+X)3X3j1W(PJtRGF6s9D*9Xz4P|m{ zREd0~=!71$_JDk-IKY;$b;MHKXPUUOJDXYh>*}(wY@)VcP$V-wF7w@S#boc&Y!y1F z?nu>Om*YJ$zhIa@a`m$Phje?@_k*iX&ZJY4k551NQ03s#=m-G53QpKp@s77j8o__; zO$|1MMsR7Gr6*h0g79E>eAosFuSnF)5j@7fE=!~G|z+sr)9NCA>%MC12MR$ z67?zS<;>BoXDN@Si+FkxLt|OTe9MB$l^{@q{U6p6y-lsrFd|;cdv{sCk7^UwQiQ$j z{nOn_%fH`os!WL}^MIf1y+CKSgJ*p8Vj@HYJTU+P*gfg@_WVA2;&o_p45u;E1o%MWTLMdyD|wpMLAzwWvouW8Pct}2YrP`dWY{kkaV?*#x$2zSI(0y` z44|YLGA9qdqGe~ytq>rFN0lLByVSbyESg-SFK9>Gnswub4-d|;mXLd|^(tu0Mrkr| zb61mbG#Wv;R*ki?yI5NgwG5HbXu=O6Se_4%9nEN64)Se^WMgqLVxw!K^U$zI8gv@R z$K0X#uV`iEAVi-BT<*+6(szS2a!DcK7U`E;31XBPX}6Z?U(|H%`!jqbwsUHUhyw@_ z9Wgn!OK1NY)=)guXv(;DE0}N%iITT9U$mpiQC&h9i zZ0Bk=E=gq{M|3{vhzf8peWUMn1K@*VuY~OE=!a`=*{C3l6bl^E%@_XDDh?c+r(XLu z2^)l81lOv6p$HdG(j~tA3kzvyv3>l0CVmO?%bZk=oRCeN)%RFrC_#Nf-Vl+2=0@sw z4okE?*qXZ6rCKU@K4ef>P^qH^%6A9Y?BvvmqU!E;1LkkN)vT2VriYX)a<~{26c_@F zSKdoE{_F3;@xuqHC&O@?E<|COTjc9gMB=-JcHHPTGm}#kXDngi*9i*kJnbnCjPxM` z3qN{BRA1VLnF;<`|AAiZr-Hq59U^-Y15vZb2+k0g$Z$Ks7y+B%thiQ>S<7!flq=s1 zNs$Bpg+y3ck1nGlU&+wnstPoOQSv5LSF|?IWTdd0U{B9_664@XSE`Cwu|KRRu~wtB zP|1_85{C<7Vy3J%i&5Gz5gGg29ten815}1FFKr_z-!S*{I^7G)Z5lM&k4!8uTHX9? z(WobQedOGLo2&(T7_J+gnM_*^khDU3{xHT z!^kYwI6-xw#AJB#5Q3UOLdd&jfvfqy}}@BUG+adV$l1N&1K z@@3{kro*rD@kc0;y=Rauco{l>Qmvs%0?O{iDKvE4G#lqTJ6W)4SsH`GLywqzjb(|S zh_V07-7rUJh4xY9O(6KngSLOTD?j4g5|}g)!tGhr>SE1UxhUHBnw|z)?LMY~$xl5D zNNk98E*E#kfU_r7yH~0YU3GRDbj&(Y``pxZv7}b{cHQb$Z??W%{KiL=fO$VS@7sWX z#`-`2qDW;~f~ld>q~<@)4kk9O5Nk;q9nW6*HNLKO?5MN1C^8tymn=0-!y(NDghUh;O+?91oICVS3psqMH1S@NrM18hy-APE}8mLzaL z&nM%JyIQDk)*T#cl5Gk%FaI;v zM*`3t8O!EW0BRz%-N=$a5V~Kde?D6rR}-eW7=V>#$vZfGb?%oVgv)NftnM%jI#!N; zR~i+6h~(z-BFZyL7E(xI^&~TArYFD}>Zu-@4@{e)G@O1nLyidk7`;lUxVm&@^a>Ty zzq2lv_0DAPbFi%V^ti==ew5ZN)e(L?TI4E4$xHM7NwrBO~_eqHQpstnI^^toL z3<(T~zBmS`R8yQk9%5eXSUO47?PI5(&B?(CnHE%X0eJ-(op9PB*)MEF77A%!)DE>F zvYJX~rGNA%S4C4*$i0;aY|~OA+$I0K>3;kRmiT))FIlSU00!pbSbM1(+AXIN9KVdZ z7v#tBt@b?@a0Vry6$?0E9aRkc0;G>y8M1nr zv(HV;@>U+LDiZbfKchmRhH2F9>{dzb**xccZnDYS^4P(v-)0Jcbl3rjDiWY;LHH4? zZ-F63(z+A*y8I}W1SR33#JnwSRR<^h;DRnG*F z?U;r9Dmw@_B3ADXXlYFW2*ROOc()TCAacHzp;sOGpjm;tOsMst#X~d0z-vL*l+!G2 zY_&B4`NruZ{%V_oj*J}@sSrBMDXEx^8htL5zB4=KT27~5DU7ttGZ^~OramKa`0B3^ zvQ1p#wN{c|stE%-khE~`!$@(h9)A+hLC};CwLlFgjScPEjV8Sk0A3BE2GcB2mZT;V z;v^bGr@)jJgKTu|nUe^sEx;qEDncg_JCMX>F%Y;y9d}U7Iq$}mL0@lk177-!?l7J3 zsmlK07V^D3QNlC!{7?sEMV6wdv0W^^ESf%z+*sX#w#fTCnaAh8F#IhRss~RwHKx)& zLI;D5iyeK0;RLmcy)O5yU*T}mMxx@T%ta{xKIB=rleEE9Gx%T}v{5CS|M?T4d3Xdh zcT~b`LH1=gSt_IP!H^^!M-3`U`A|StP~gVJ?b=--a&`slCe9^AL(I?e$q40#tK5QS z5BV5(|JWtTTZHl{M+SZj{wqmutHU!h%vVajTb#Cz^sTv15r3ig1*`t)v#wi+} zAj?dfedT@lhe3)lt8@1ekIM#J2=qzc_USYk@msmUpjht?4A10Fm?FouAe~@pV_{?b zzZ|J%^43Ca0%+5Q&Uo$Y6BHe3Yr68!8f!5md zySHWAD^FkA*L+rUKo-2YOevJ2ZPiT#lMjf7`>uc~QE3i+b9&MPstG-~&8qI8F@EpF*7uh&p-cevoyIR2JGZ5d#+`g5K23NV!vhlT!E0jJIF3zSS8qcT-D%CO! z-I;6vQeY(P(W>fX`Lx(#vr}P|T9h2|=g^t5xMa_P#{(eT)F1oCk8C8^k4IM(DJ_o&IzA@F>U6PL#5CTlI2C z!}YQr2 zCXteloJw5D<6 zI0$dxEZs=G2s56};Qh9z-nv33p}BBurDH9m8d#{ zl7~^Hw^2K~9Q^@85$L@ZsdvBJvm#X6ps(h9?4x{i^(Rzl2H76>{EW|Sp=XU>TPf1$ z@N1(CJ;5&QZg8_f?2q%bCK@nvhP#DMWt`<3Jjt9H!22RAjrra7QpvyonCOIj%UE;d zg!p*)~+P$~@ zsCacylN6Nygb5E{TtDi$0@%>3x9iq-XzGsE9)yXo~8T7#Za@Zk|F zbmWXW!H1DrwQi-^vInL53+*-G7{vvj0HFqs-$@nk(oeEn`7!k2w9`bYoyX;ybVQCP zS2zq70K4Twog2Y)YG|HjMoUj6Yq8p_m+L**CQeygkvCfx!*%`#ziUB$03?xAubB%! SDg#7Ag8{<-!FB$>m-K&;S>4$H literal 0 HcmV?d00001 diff --git a/tools/mtmd/tests.sh b/tools/mtmd/tests.sh index 15a37b0d22bb4..cb1b99b63e09c 100755 --- a/tools/mtmd/tests.sh +++ b/tools/mtmd/tests.sh @@ -30,46 +30,53 @@ fi ############### -arr_bin=() arr_hf=() arr_tmpl=() # chat template +arr_file=() -add_test() { - local bin=$1 - local hf=$2 - local tmpl=${3:-""} # default to empty string if not provided - arr_bin+=("$bin") +add_test_vision() { + local hf=$1 + local tmpl=${2:-""} # default to empty string if not provided arr_hf+=("$hf") arr_tmpl+=("$tmpl") + arr_file+=("test-1.jpeg") } -add_test "llama-mtmd-cli" "ggml-org/SmolVLM-500M-Instruct-GGUF:Q8_0" -add_test "llama-mtmd-cli" "ggml-org/SmolVLM2-2.2B-Instruct-GGUF:Q4_K_M" -add_test "llama-mtmd-cli" "ggml-org/SmolVLM2-500M-Video-Instruct-GGUF:Q8_0" -add_test "llama-mtmd-cli" "ggml-org/gemma-3-4b-it-GGUF:Q4_K_M" -add_test "llama-mtmd-cli" "THUDM/glm-edge-v-5b-gguf:Q4_K_M" -add_test "llama-mtmd-cli" "second-state/Llava-v1.5-7B-GGUF:Q2_K" "vicuna" -add_test "llama-mtmd-cli" "cjpais/llava-1.6-mistral-7b-gguf:Q3_K_M" "vicuna" -add_test "llama-mtmd-cli" "ibm-research/granite-vision-3.2-2b-GGUF:Q4_K_M" -add_test "llama-mtmd-cli" "second-state/MiniCPM-Llama3-V-2_5-GGUF:Q2_K" # model from openbmb is corrupted -add_test "llama-mtmd-cli" "openbmb/MiniCPM-V-2_6-gguf:Q2_K" -add_test "llama-mtmd-cli" "openbmb/MiniCPM-o-2_6-gguf:Q4_0" -add_test "llama-mtmd-cli" "bartowski/Qwen2-VL-2B-Instruct-GGUF:Q4_K_M" -add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-3B-Instruct-GGUF:Q4_K_M" -add_test "llama-mtmd-cli" "ggml-org/InternVL2_5-1B-GGUF:Q8_0" -add_test "llama-mtmd-cli" "ggml-org/InternVL3-1B-Instruct-GGUF:Q8_0" +add_test_audio() { + local hf=$1 + arr_hf+=("$hf") + arr_tmpl+=("") # no need for chat tmpl + arr_file+=("test-2.mp3") +} + +add_test_vision "ggml-org/SmolVLM-500M-Instruct-GGUF:Q8_0" +add_test_vision "ggml-org/SmolVLM2-2.2B-Instruct-GGUF:Q4_K_M" +add_test_vision "ggml-org/SmolVLM2-500M-Video-Instruct-GGUF:Q8_0" +add_test_vision "ggml-org/gemma-3-4b-it-GGUF:Q4_K_M" +add_test_vision "THUDM/glm-edge-v-5b-gguf:Q4_K_M" +add_test_vision "second-state/Llava-v1.5-7B-GGUF:Q2_K" "vicuna" +add_test_vision "cjpais/llava-1.6-mistral-7b-gguf:Q3_K_M" "vicuna" +add_test_vision "ibm-research/granite-vision-3.2-2b-GGUF:Q4_K_M" +add_test_vision "second-state/MiniCPM-Llama3-V-2_5-GGUF:Q2_K" # model from openbmb is corrupted +add_test_vision "openbmb/MiniCPM-V-2_6-gguf:Q2_K" +add_test_vision "openbmb/MiniCPM-o-2_6-gguf:Q4_0" +add_test_vision "bartowski/Qwen2-VL-2B-Instruct-GGUF:Q4_K_M" +add_test_vision "ggml-org/Qwen2.5-VL-3B-Instruct-GGUF:Q4_K_M" +add_test_vision "ggml-org/InternVL2_5-1B-GGUF:Q8_0" +add_test_vision "ggml-org/InternVL3-1B-Instruct-GGUF:Q8_0" +add_test_audio "ggml-org/ultravox-v0_5-llama-3_2-1b-GGUF:Q8_0" # to test the big models, run: ./tests.sh big if [ "$RUN_BIG_TESTS" = true ]; then - add_test "llama-mtmd-cli" "ggml-org/pixtral-12b-GGUF:Q4_K_M" - add_test "llama-mtmd-cli" "ggml-org/Mistral-Small-3.1-24B-Instruct-2503-GGUF" "mistral-v7" - add_test "llama-mtmd-cli" "ggml-org/Qwen2-VL-2B-Instruct-GGUF:Q4_K_M" - add_test "llama-mtmd-cli" "ggml-org/Qwen2-VL-7B-Instruct-GGUF:Q4_K_M" - add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-3B-Instruct-GGUF:Q4_K_M" - add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-7B-Instruct-GGUF:Q4_K_M" - add_test "llama-mtmd-cli" "ggml-org/InternVL3-8B-Instruct-GGUF:Q4_K_M" - add_test "llama-mtmd-cli" "ggml-org/InternVL3-14B-Instruct-GGUF:Q4_K_M" - # add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-32B-Instruct-GGUF:Q4_K_M" # does not work on my mac M3 Ultra + add_test_vision "ggml-org/pixtral-12b-GGUF:Q4_K_M" + add_test_vision "ggml-org/Mistral-Small-3.1-24B-Instruct-2503-GGUF" "mistral-v7" + add_test_vision "ggml-org/Qwen2-VL-2B-Instruct-GGUF:Q4_K_M" + add_test_vision "ggml-org/Qwen2-VL-7B-Instruct-GGUF:Q4_K_M" + add_test_vision "ggml-org/Qwen2.5-VL-3B-Instruct-GGUF:Q4_K_M" + add_test_vision "ggml-org/Qwen2.5-VL-7B-Instruct-GGUF:Q4_K_M" + add_test_vision "ggml-org/InternVL3-8B-Instruct-GGUF:Q4_K_M" + add_test_vision "ggml-org/InternVL3-14B-Instruct-GGUF:Q4_K_M" + # add_test_vision "ggml-org/Qwen2.5-VL-32B-Instruct-GGUF:Q4_K_M" # does not work on my mac M3 Ultra fi # to test the huge models, run: ./tests.sh huge @@ -91,14 +98,15 @@ fi ############### -cmake --build build -j --target "${arr_bin[@]}" +cmake --build build -j --target llama-mtmd-cli arr_res=() -for i in "${!arr_bin[@]}"; do - bin="${arr_bin[$i]}" +for i in "${!arr_hf[@]}"; do + bin="llama-mtmd-cli" hf="${arr_hf[$i]}" tmpl="${arr_tmpl[$i]}" + inp_file="${arr_file[$i]}" echo "Running test with binary: $bin and HF model: $hf" echo "" @@ -107,7 +115,7 @@ for i in "${!arr_bin[@]}"; do output=$(\ "$PROJ_ROOT/build/bin/$bin" \ -hf "$hf" \ - --image $SCRIPT_DIR/test-1.jpeg \ + --image $SCRIPT_DIR/$inp_file \ -p "what is the publisher name of the newspaper?" \ --temp 0 -n 128 \ ${tmpl:+--chat-template "$tmpl"} \ From bb92d1d0077b9fc90f6aa0b97e91fa9e03ac7ada Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Mon, 26 May 2025 00:56:31 +0200 Subject: [PATCH 03/16] fix compile --- convert_hf_to_gguf.py | 2 +- tools/mtmd/clip.cpp | 2 +- tools/mtmd/clip.h | 8 ++++++-- tools/mtmd/mtmd.cpp | 4 ++-- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 27eb2a8f1cf22..b350dcae64bd5 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -1151,7 +1151,7 @@ def __init__(self, *args, **kwargs): self.global_config = copy.deepcopy(self.hparams) self.hparams_vision = self.get_vision_config() self.hparams_audio = self.get_audio_config() - + if self.hparams_vision is None and self.hparams_audio is None: raise ValueError("vision_config / audio_config not found in hparams") diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp index 290ee33ae465a..df84a00a82e24 100644 --- a/tools/mtmd/clip.cpp +++ b/tools/mtmd/clip.cpp @@ -2672,7 +2672,7 @@ struct clip_model_loader { } }; -std::pair clip_init(const char * fname, struct clip_context_params ctx_params) { +struct clip_init_result clip_init(const char * fname, struct clip_context_params ctx_params) { g_logger_state.verbosity_thold = ctx_params.verbosity; clip_ctx * ctx_vision = nullptr; clip_ctx * ctx_audio = nullptr; diff --git a/tools/mtmd/clip.h b/tools/mtmd/clip.h index c02cb2cbb983c..7bdc3157cf268 100644 --- a/tools/mtmd/clip.h +++ b/tools/mtmd/clip.h @@ -29,8 +29,12 @@ struct clip_context_params { enum ggml_log_level verbosity; }; -// returns pair of contexts -std::pair clip_init(const char * fname, struct clip_context_params ctx_params); +struct clip_init_result { + struct clip_ctx * ctx_v; // vision context + struct clip_ctx * ctx_a; // audio context +}; + +struct clip_init_result clip_init(const char * fname, struct clip_context_params ctx_params); void clip_free(struct clip_ctx * ctx); diff --git a/tools/mtmd/mtmd.cpp b/tools/mtmd/mtmd.cpp index b3d5391c3e5c3..1543927348dfd 100644 --- a/tools/mtmd/mtmd.cpp +++ b/tools/mtmd/mtmd.cpp @@ -150,8 +150,8 @@ struct mtmd_context { ctx_clip_params.use_gpu = ctx_params.use_gpu; ctx_clip_params.verbosity = ctx_params.verbosity; auto res = clip_init(mmproj_fname, ctx_clip_params); - ctx_v = res.first; - ctx_a = res.second; + ctx_v = res.ctx_v; + ctx_a = res.ctx_a; if (!ctx_v && !ctx_a) { throw std::runtime_error(string_format("Failed to load CLIP model from %s\n", mmproj_fname)); } From 8b51e7fabf7a96dd020893b051bc6a3eb19924db Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Mon, 26 May 2025 01:27:24 +0200 Subject: [PATCH 04/16] ok, missing SinusoidsPositionEmbedding --- convert_hf_to_gguf.py | 62 ++++++++++++++++++++++++++++++++---------- tools/mtmd/clip-impl.h | 2 ++ tools/mtmd/clip.cpp | 11 +++++++- tools/mtmd/mtmd.cpp | 1 + 4 files changed, 60 insertions(+), 16 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index b350dcae64bd5..93828f1b7df22 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -1124,6 +1124,8 @@ class MmprojModel(ModelBase): preprocessor_config: dict[str, Any] global_config: dict[str, Any] + n_block_keys = ["n_layers", "num_hidden_layers", "n_layer", "num_layers", "depth"] + has_vision_encoder: bool = True # by default has_audio_encoder: bool = False @@ -1160,8 +1162,7 @@ def __init__(self, *args, **kwargs): # TODO @ngxson : this is a hack to support both vision and audio encoders have_multiple_encoders = self.has_audio_encoder and self.has_vision_encoder - self.block_count = 128 if have_multiple_encoders else \ - self.find_hparam(["n_layers", "num_hidden_layers", "n_layer", "num_layers", "depth"], True) + self.block_count = 128 if have_multiple_encoders else self.find_hparam(self.n_block_keys, True) self.tensor_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.MMPROJ, self.block_count) # load preprocessor config @@ -1185,26 +1186,26 @@ def set_gguf_parameters(self): self.gguf_writer.add_vision_projection_dim(self.n_embd_text) # vision config - self.gguf_writer.add_vision_image_size(self.find_hparam(["image_size"])) - self.gguf_writer.add_vision_patch_size(self.find_hparam(["patch_size"])) - self.gguf_writer.add_vision_embedding_length(self.find_hparam(["hidden_size"])) - self.gguf_writer.add_vision_feed_forward_length(self.find_hparam(["intermediate_size"])) - self.gguf_writer.add_vision_block_count(self.block_count) - self.gguf_writer.add_vision_head_count(self.find_hparam(["num_attention_heads"])) + self.gguf_writer.add_vision_image_size(self.find_vparam(["image_size"])) + self.gguf_writer.add_vision_patch_size(self.find_vparam(["patch_size"])) + self.gguf_writer.add_vision_embedding_length(self.find_vparam(["hidden_size"])) + self.gguf_writer.add_vision_feed_forward_length(self.find_vparam(["intermediate_size"])) + self.gguf_writer.add_vision_block_count(self.find_vparam(self.n_block_keys)) + self.gguf_writer.add_vision_head_count(self.find_vparam(["num_attention_heads"])) # preprocessor config self.gguf_writer.add_vision_image_mean(self.preprocessor_config["image_mean"]) self.gguf_writer.add_vision_image_std(self.preprocessor_config["image_std"]) - elif self.has_audio_encoder: + if self.has_audio_encoder: self.gguf_writer.add_clip_has_audio_encoder(True) self.gguf_writer.add_audio_projection_dim(self.n_embd_text) # audio config - self.gguf_writer.add_audio_embedding_length(self.find_hparam(["hidden_size"])) - self.gguf_writer.add_audio_feed_forward_length(self.find_hparam(["intermediate_size"])) - self.gguf_writer.add_audio_block_count(self.block_count) - self.gguf_writer.add_audio_head_count(self.find_hparam(["num_attention_heads"])) + self.gguf_writer.add_audio_embedding_length(self.find_aparam(["hidden_size"])) + self.gguf_writer.add_audio_feed_forward_length(self.find_aparam(["intermediate_size"])) + self.gguf_writer.add_audio_block_count(self.find_aparam(self.n_block_keys)) + self.gguf_writer.add_audio_head_count(self.find_aparam(["num_attention_heads"])) else: raise ValueError("MmprojModel must have either vision or audio encoder") @@ -1212,6 +1213,24 @@ def set_gguf_parameters(self): def write_vocab(self): raise ValueError("MmprojModel does not support vocab writing") + def find_vparam(self, keys: Iterable[str], optional: bool = False) -> Any: + key = next((k for k in keys if k in self.hparams), None) + assert self.hparams_vision is not None + return self._find_param(self.hparams_vision, keys, optional) + + def find_aparam(self, keys: Iterable[str], optional: bool = False) -> Any: + key = next((k for k in keys if k in self.hparams), None) + assert self.hparams_audio is not None + return self._find_param(self.hparams_audio, keys, optional) + + def _find_param(self, obj: dict[str, Any], keys: Iterable[str], optional: bool = False) -> Any: + key = next((k for k in keys if k in obj), None) + if key is not None: + return obj[key] + if optional: + return None + raise KeyError(f"could not find any of: {keys}") + @ModelBase.register("GPTNeoXForCausalLM") class GPTNeoXModel(TextModel): @@ -2743,9 +2762,9 @@ def set_gguf_parameters(self): self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN2VL) elif model_type == 'qwen2_5_vl' or model_type == 'qwen2_5_omni': if model_type == 'qwen2_5_omni': - self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN25VL) - else: self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN25O) + else: + self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN25VL) self.gguf_writer.add_vision_use_silu(True) # find n_wa_pattern (window attention pattern) fullatt_block_indexes = hparams.get("fullatt_block_indexes") @@ -2808,6 +2827,19 @@ class Qwen25OmniModel(Qwen2VLVisionModel): has_vision_encoder = True has_audio_encoder = True + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + assert self.hparams_audio is not None + self.hparams_audio["hidden_size"] = self.hparams_audio["d_model"] + self.hparams_audio["intermediate_size"] = self.hparams_audio["encoder_ffn_dim"] + self.hparams_audio["num_attention_heads"] = self.hparams_audio["encoder_attention_heads"] + + def set_gguf_parameters(self): + super().set_gguf_parameters() + assert self.hparams_audio is not None + self.gguf_writer.add_audio_num_mel_bins(self.hparams_audio["num_mel_bins"]) + self.gguf_writer.add_audio_attention_layernorm_eps(self.hparams_audio.get("layer_norm_eps", 1e-5)) + def get_vision_config(self) -> dict[str, Any] | None: return self.global_config["thinker_config"].get("vision_config") diff --git a/tools/mtmd/clip-impl.h b/tools/mtmd/clip-impl.h index 27ce8c43f678c..8280bce8b93f8 100644 --- a/tools/mtmd/clip-impl.h +++ b/tools/mtmd/clip-impl.h @@ -130,6 +130,7 @@ enum projector_type { PROJECTOR_TYPE_INTERNVL, PROJECTOR_TYPE_LLAMA4, PROJECTOR_TYPE_QWEN2A, + PROJECTOR_TYPE_QWEN25O, PROJECTOR_TYPE_UNKNOWN, }; @@ -148,6 +149,7 @@ static std::map PROJECTOR_TYPE_NAMES = { { PROJECTOR_TYPE_INTERNVL, "internvl"}, { PROJECTOR_TYPE_LLAMA4, "llama4"}, { PROJECTOR_TYPE_QWEN2A, "qwen2a"}, + { PROJECTOR_TYPE_QWEN25O, "qwen2.5o"}, }; static projector_type clip_projector_type_from_string(const std::string & str) { diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp index df84a00a82e24..8a5b774c05663 100644 --- a/tools/mtmd/clip.cpp +++ b/tools/mtmd/clip.cpp @@ -415,6 +415,7 @@ struct clip_ctx { } } + // this function is added so that we don't change too much of the existing code projector_type proj_type() const { return model.proj_type; } @@ -2086,6 +2087,13 @@ struct clip_model_loader { if (model.proj_type == PROJECTOR_TYPE_UNKNOWN) { throw std::runtime_error(string_format("%s: unknown projector type: %s\n", __func__, proj_type.c_str())); } + + // correct arch for multimodal models + if (model.proj_type == PROJECTOR_TYPE_QWEN25O) { + model.proj_type = modality == CLIP_MODALITY_VISION + ? PROJECTOR_TYPE_QWEN25VL + : PROJECTOR_TYPE_QWEN2A; + } } const bool is_vision = model.modality == CLIP_MODALITY_VISION; @@ -4078,7 +4086,8 @@ bool clip_has_audio_encoder(const struct clip_ctx * ctx) { } bool clip_has_whisper_encoder(const struct clip_ctx * ctx) { - return ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX || ctx->proj_type() == PROJECTOR_TYPE_QWEN2A; + return ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX + || ctx->proj_type() == PROJECTOR_TYPE_QWEN2A; } bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec) { diff --git a/tools/mtmd/mtmd.cpp b/tools/mtmd/mtmd.cpp index 1543927348dfd..a9ff5b5c84e51 100644 --- a/tools/mtmd/mtmd.cpp +++ b/tools/mtmd/mtmd.cpp @@ -104,6 +104,7 @@ struct mtmd_context { int n_threads; std::string media_marker; + // these are not token, but strings used to mark the beginning and end of image/audio embeddings std::string img_beg; std::string img_end; std::string aud_beg; From 24ec43ebec4209ee8fd6a0eed20022933f0532a0 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Mon, 26 May 2025 09:41:56 +0200 Subject: [PATCH 05/16] first working version --- convert_hf_to_gguf.py | 22 +++++++++ tools/mtmd/clip.cpp | 3 +- tools/mtmd/clip.h | 1 - tools/mtmd/mtmd-helper.cpp | 44 +++++++++++++----- tools/mtmd/mtmd.cpp | 92 ++++++++++++++++++++++---------------- 5 files changed, 110 insertions(+), 52 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 93828f1b7df22..2da348f0ca5a4 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2845,6 +2845,25 @@ def get_vision_config(self) -> dict[str, Any] | None: def get_audio_config(self) -> dict[str, Any] | None: return self.global_config["thinker_config"].get("audio_config") + + + def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: + # SinusoidsPositionEmbedding + assert self.hparams_audio is not None + max_timescale = 10000 + length = 1500 + channels = self.hparams_audio["hidden_size"] + log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1) + inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2).float()) + scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :] + pos_embd = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1).to(dtype=torch.float32) + yield ("audio_tower.embed_positions.weight", pos_embd) + + def tensor_force_quant(self, name, new_name, bid, n_dims): + del bid, new_name, n_dims # unused + if ".conv" in name and ".weight" in name: + return gguf.GGMLQuantizationType.F16 + return False def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: if name.startswith("thinker."): @@ -2852,6 +2871,9 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter if name.startswith("audio_tower"): # process audio tensors + if "conv1.bias" in name or "conv2.bias" in name: + # transpose conv1 and conv2 bias + data_torch = data_torch.unsqueeze(-1) if "audio_bos_eos_token" in name: # this tensor is left unused in transformers code # https://github.com/huggingface/transformers/blob/6e3063422c4b1c014aa60c32b9254fd2902f0f28/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py#L1809 diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp index 8a5b774c05663..3fafa8ba19a95 100644 --- a/tools/mtmd/clip.cpp +++ b/tools/mtmd/clip.cpp @@ -4066,7 +4066,8 @@ bool clip_is_glm(const struct clip_ctx * ctx) { } bool clip_is_qwen2vl(const struct clip_ctx * ctx) { - return ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL; + return ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL + || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL; } bool clip_is_llava(const struct clip_ctx * ctx) { diff --git a/tools/mtmd/clip.h b/tools/mtmd/clip.h index 7bdc3157cf268..ab949b9f0a502 100644 --- a/tools/mtmd/clip.h +++ b/tools/mtmd/clip.h @@ -3,7 +3,6 @@ #include "ggml.h" #include #include -#include // !!! Internal header, to be used by mtmd only !!! diff --git a/tools/mtmd/mtmd-helper.cpp b/tools/mtmd/mtmd-helper.cpp index b79094c0a48b6..414fdad1b3d87 100644 --- a/tools/mtmd/mtmd-helper.cpp +++ b/tools/mtmd/mtmd-helper.cpp @@ -66,7 +66,8 @@ struct decode_embd_batch { } } - void set_position_mrope(llama_pos pos_0, int nx, int ny, llama_seq_id seq_id) { + // M-RoPE for image + void set_position_mrope_2d(llama_pos pos_0, int nx, int ny, llama_seq_id seq_id) { GGML_ASSERT(n_pos_per_embd == 4); seq_id_0[0] = seq_id; for (int y = 0; y < ny; y++) { @@ -85,6 +86,23 @@ struct decode_embd_batch { } } + // M-RoPE for audio + void set_position_mrope_1d(llama_pos pos_0, int32_t n_tokens, llama_seq_id seq_id) { + GGML_ASSERT(n_pos_per_embd == 4); + seq_id_0[0] = seq_id; + for (int i = 0; i < n_tokens; i++) { + pos[i ] = pos_0 + i; + pos[i + batch.n_tokens ] = pos_0 + i; + pos[i + batch.n_tokens * 2] = pos_0 + i; + pos[i + batch.n_tokens * 3] = 0; // last pos dim is unused + } + for (int i = 0; i < batch.n_tokens; i++) { + batch.n_seq_id[i] = 1; + batch.seq_id [i] = seq_id_0.data(); + batch.logits [i] = false; + } + } + llama_batch get_view(int offset, int n_tokens) { llama_pos * pos_ptr; pos_view.clear(); @@ -146,18 +164,20 @@ int32_t mtmd_helper_decode_image_chunk( decode_embd_batch batch_embd(encoded_embd, n_tokens, n_pos_per_embd, n_mmproj_embd); if (mtmd_decode_use_mrope(ctx)) { - const auto image_tokens = mtmd_input_chunk_get_tokens_image(chunk); - if (chunk_type != MTMD_INPUT_CHUNK_TYPE_IMAGE) { - LOG_ERR("failed to decode chunk: M-RoPE only accepts image chunk\n"); - return -1; - } - if (!image_tokens) { - LOG_ERR("failed to decode chunk: image tokens are null\n"); - return -1; + if (chunk_type == MTMD_INPUT_CHUNK_TYPE_IMAGE) { + const auto image_tokens = mtmd_input_chunk_get_tokens_image(chunk); + if (!image_tokens) { + LOG_ERR("failed to decode chunk: image tokens are null\n"); + return -1; + } + const int nx = mtmd_image_tokens_get_nx(image_tokens); + const int ny = mtmd_image_tokens_get_ny(image_tokens); + batch_embd.set_position_mrope_2d(n_past, nx, ny, seq_id); + } else if (chunk_type == MTMD_INPUT_CHUNK_TYPE_AUDIO) { + batch_embd.set_position_mrope_1d(n_past, n_tokens, seq_id); + } else { + GGML_ABORT("invalid chunk type for M-RoPE"); } - const int nx = mtmd_image_tokens_get_nx(image_tokens); - const int ny = mtmd_image_tokens_get_ny(image_tokens); - batch_embd.set_position_mrope(n_past, nx, ny, seq_id); } else { batch_embd.set_position_normal(n_past, seq_id); } diff --git a/tools/mtmd/mtmd.cpp b/tools/mtmd/mtmd.cpp index a9ff5b5c84e51..641c1156199d5 100644 --- a/tools/mtmd/mtmd.cpp +++ b/tools/mtmd/mtmd.cpp @@ -157,18 +157,26 @@ struct mtmd_context { throw std::runtime_error(string_format("Failed to load CLIP model from %s\n", mmproj_fname)); } - clip_ctx * ctx_clip = get_clip_ctx(); - if (llama_model_n_embd(text_model) != clip_n_mmproj_embd(ctx_clip)) { + if (llama_model_n_embd(text_model) != n_embd_projected()) { throw std::runtime_error(string_format( "mismatch between text model (n_embd = %d) and mmproj (n_embd = %d)\n" "hint: you may be using wrong mmproj\n", - llama_model_n_embd(text_model), clip_n_mmproj_embd(ctx_clip))); + llama_model_n_embd(text_model), n_embd_projected())); } + if (ctx_v) { + init_vision(); + } + if (ctx_a) { + init_audio(); + } + } - use_mrope = clip_is_qwen2vl(ctx_clip); + void init_vision() { + GGML_ASSERT(ctx_v != nullptr); + use_mrope = clip_is_qwen2vl(ctx_v); - projector_type proj = clip_get_projector_type(ctx_clip); - int minicpmv_version = clip_is_minicpmv(ctx_clip); + projector_type proj = clip_get_projector_type(ctx_v); + int minicpmv_version = clip_is_minicpmv(ctx_v); if (minicpmv_version == 2) { // minicpmv 2.5 format: // (overview) (slice) (slice) \n ... @@ -219,57 +227,53 @@ struct mtmd_context { } // set boi/eoi - projector_type pt = proj_type(); - if (pt == PROJECTOR_TYPE_GEMMA3) { + if (proj == PROJECTOR_TYPE_GEMMA3) { // ... (image embeddings) ... img_beg = ""; img_end = ""; - } else if (pt == PROJECTOR_TYPE_IDEFICS3) { + } else if (proj == PROJECTOR_TYPE_IDEFICS3) { // https://github.com/huggingface/transformers/blob/a42ba80fa520c784c8f11a973ca9034e5f859b79/src/transformers/models/idefics3/processing_idefics3.py#L192-L215 img_beg = ""; img_end = ""; - } else if (pt == PROJECTOR_TYPE_PIXTRAL) { + } else if (proj == PROJECTOR_TYPE_PIXTRAL) { // https://github.com/huggingface/transformers/blob/1cd110c6cb6a6237614130c470e9a902dbc1a4bd/docs/source/en/model_doc/pixtral.md img_end = "[IMG_END]"; - } else if (pt == PROJECTOR_TYPE_QWEN2VL || pt == PROJECTOR_TYPE_QWEN25VL) { + } else if (proj == PROJECTOR_TYPE_QWEN2VL || proj == PROJECTOR_TYPE_QWEN25VL) { // <|vision_start|> ... (image embeddings) ... <|vision_end|> img_beg = "<|vision_start|>"; img_end = "<|vision_end|>"; - } else if (pt == PROJECTOR_TYPE_LLAMA4) { + } else if (proj == PROJECTOR_TYPE_LLAMA4) { // (more details in mtmd_context constructor) img_beg = "<|image_start|>"; img_end = "<|image_end|>"; + LOG_WRN("%s: llama 4 vision is known to have degraded quality:\n" + " https://github.com/ggml-org/llama.cpp/pull/13282\n", __func__); - } else if (pt == PROJECTOR_TYPE_INTERNVL) { + } else if (proj == PROJECTOR_TYPE_INTERNVL) { // ... (image embeddings) ... img_beg = ""; img_end = ""; - } else if (pt == PROJECTOR_TYPE_QWEN2A) { + } + } + + void init_audio() { + GGML_ASSERT(ctx_a != nullptr); + projector_type proj = clip_get_projector_type(ctx_a); + + LOG_WRN("%s: audio input is in experimental stage and may have reduced quality:\n" + " https://github.com/ggml-org/llama.cpp/discussions/13759\n", __func__); + + if (proj == PROJECTOR_TYPE_QWEN2A) { // <|audio_bos|> ... (embeddings) ... <|audio_eos|> aud_beg = "<|audio_bos|>"; aud_end = "<|audio_eos|>"; } - - // warning messages - if (proj == PROJECTOR_TYPE_LLAMA4) { - LOG_WRN("%s: llama 4 vision is known to have degraded quality:\n" - " https://github.com/ggml-org/llama.cpp/pull/13282\n", __func__); - } - if (ctx_a) { - LOG_WRN("%s: audio input is in experimental stage and may have reduced quality:\n" - " https://github.com/ggml-org/llama.cpp/discussions/13759\n", __func__); - } - } - - // get the main clip ctx - clip_ctx * get_clip_ctx() const { - return ctx_v ? ctx_v : ctx_a; } // get clip ctx based on chunk type @@ -282,14 +286,17 @@ struct mtmd_context { GGML_ABORT("unknown chunk type"); } - // both audio and vision contexts have the same projector type - projector_type proj_type() const { - return clip_get_projector_type(get_clip_ctx()); + projector_type proj_type_v() const { + return ctx_v ? clip_get_projector_type(ctx_v) : PROJECTOR_TYPE_UNKNOWN; + } + + projector_type proj_type_a() const { + return ctx_a ? clip_get_projector_type(ctx_a) : PROJECTOR_TYPE_UNKNOWN; } // both audio and vision contexts have the n_embd output dimension int n_embd_projected() const { - return clip_n_mmproj_embd(get_clip_ctx()); + return clip_n_mmproj_embd(ctx_v ? ctx_v : ctx_a); } ~mtmd_context() { @@ -400,6 +407,7 @@ struct mtmd_tokenizer { } void add_text(const std::string & txt, bool add_special, bool parse_special) { + LOG_DBG("%s: %s\n", __func__, txt.c_str()); auto tokens = mtmd_tokenize_text_internal(vocab, txt, add_special, parse_special); add_text(tokens); } @@ -434,7 +442,9 @@ struct mtmd_tokenizer { return 2; } - add_text(ctx->img_beg, false, true); // add image begin token + if (!ctx->img_beg.empty()) { + add_text(ctx->img_beg, false, true); // add image begin token + } // convert mtmd_bitmap to clip_image_u8 clip_image_u8_ptr img_u8(clip_image_u8_init()); @@ -549,7 +559,9 @@ struct mtmd_tokenizer { cur.entries.emplace_back(std::move(chunk)); } - add_text(ctx->img_end, false, true); // add image end token + if (!ctx->img_end.empty()) { + add_text(ctx->img_end, false, true); // add image end token + } } else { // handle audio @@ -564,7 +576,9 @@ struct mtmd_tokenizer { return 2; } - add_text(ctx->aud_beg, false, true); // add audio begin token + if (!ctx->aud_beg.empty()) { + add_text(ctx->aud_beg, false, true); // add audio begin token + } // preprocess audio GGML_ASSERT(ctx->w_filters.n_mel); // make sure we have filter preloaded @@ -606,7 +620,9 @@ struct mtmd_tokenizer { cur.entries.emplace_back(std::move(chunk)); } - add_text(ctx->aud_end, false, true); // add audio end token + if (!ctx->aud_end.empty()) { + add_text(ctx->aud_end, false, true); // add audio end token + } } return 0; @@ -751,7 +767,7 @@ float * mtmd_get_output_embd(mtmd_context * ctx) { } bool mtmd_decode_use_non_causal(mtmd_context * ctx) { - if (ctx->proj_type() == PROJECTOR_TYPE_GEMMA3) { + if (ctx->ctx_v && clip_get_projector_type(ctx->ctx_v) == PROJECTOR_TYPE_GEMMA3) { return true; } return false; From 1ac73f4070ba1bf403d3c34dd911ca919ca4b9e9 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Mon, 26 May 2025 09:43:19 +0200 Subject: [PATCH 06/16] fix style --- convert_hf_to_gguf.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 2da348f0ca5a4..a015ecee08328 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -1214,12 +1214,10 @@ def write_vocab(self): raise ValueError("MmprojModel does not support vocab writing") def find_vparam(self, keys: Iterable[str], optional: bool = False) -> Any: - key = next((k for k in keys if k in self.hparams), None) assert self.hparams_vision is not None return self._find_param(self.hparams_vision, keys, optional) def find_aparam(self, keys: Iterable[str], optional: bool = False) -> Any: - key = next((k for k in keys if k in self.hparams), None) assert self.hparams_audio is not None return self._find_param(self.hparams_audio, keys, optional) @@ -2845,8 +2843,7 @@ def get_vision_config(self) -> dict[str, Any] | None: def get_audio_config(self) -> dict[str, Any] | None: return self.global_config["thinker_config"].get("audio_config") - - + def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: # SinusoidsPositionEmbedding assert self.hparams_audio is not None From 9013245317b8a1e169dd4693372e763b63698a60 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Mon, 26 May 2025 10:31:50 +0200 Subject: [PATCH 07/16] more strict validate of n_embd --- tools/mtmd/mtmd.cpp | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/tools/mtmd/mtmd.cpp b/tools/mtmd/mtmd.cpp index 641c1156199d5..a0813713d987e 100644 --- a/tools/mtmd/mtmd.cpp +++ b/tools/mtmd/mtmd.cpp @@ -103,6 +103,7 @@ struct mtmd_context { bool print_timings; int n_threads; std::string media_marker; + const bool n_embd_text; // these are not token, but strings used to mark the beginning and end of image/audio embeddings std::string img_beg; @@ -137,7 +138,8 @@ struct mtmd_context { text_model (text_model), print_timings(ctx_params.print_timings), n_threads (ctx_params.n_threads), - media_marker (ctx_params.media_marker) + media_marker (ctx_params.media_marker), + n_embd_text (llama_model_n_embd(text_model)) { if (std::string(ctx_params.image_marker) != MTMD_DEFAULT_IMAGE_MARKER) { throw std::runtime_error("custom image_marker is not supported anymore, use media_marker instead"); @@ -156,12 +158,26 @@ struct mtmd_context { if (!ctx_v && !ctx_a) { throw std::runtime_error(string_format("Failed to load CLIP model from %s\n", mmproj_fname)); } + + // if both vision and audio mmproj are present, we need to validate their n_embd + if (ctx_v && ctx_a) { + int n_embd_v = clip_n_mmproj_embd(ctx_v); + int n_embd_a = clip_n_mmproj_embd(ctx_a); + if (n_embd_v != n_embd_a) { + throw std::runtime_error(string_format( + "mismatch between vision and audio mmproj (n_embd_v = %d, n_embd_a = %d)\n", + n_embd_v, n_embd_a)); + } + } - if (llama_model_n_embd(text_model) != n_embd_projected()) { + // since we already validate n_embd of vision and audio mmproj, + // we can safely assume that they are the same + int n_embd_clip = clip_n_mmproj_embd(ctx_v ? ctx_v : ctx_a); + if (n_embd_text != n_embd_clip) { throw std::runtime_error(string_format( "mismatch between text model (n_embd = %d) and mmproj (n_embd = %d)\n" "hint: you may be using wrong mmproj\n", - llama_model_n_embd(text_model), n_embd_projected())); + n_embd_text, n_embd_clip)); } if (ctx_v) { init_vision(); @@ -294,11 +310,6 @@ struct mtmd_context { return ctx_a ? clip_get_projector_type(ctx_a) : PROJECTOR_TYPE_UNKNOWN; } - // both audio and vision contexts have the n_embd output dimension - int n_embd_projected() const { - return clip_n_mmproj_embd(ctx_v ? ctx_v : ctx_a); - } - ~mtmd_context() { clip_free(ctx_a); clip_free(ctx_v); @@ -716,7 +727,7 @@ int32_t mtmd_encode_chunk(mtmd_context * ctx, const mtmd_input_chunk * chunk) { LOG_ERR("%s: model does not support audio input\n", __func__); return 1; } - int n_mmproj_embd = ctx->n_embd_projected(); + int n_mmproj_embd = ctx->n_embd_text; ctx->image_embd_v.resize(chunk->tokens_audio->n_tokens * n_mmproj_embd); bool ok = clip_image_batch_encode( ctx->ctx_a, From 346d252227f095788eac30030812ebc0f5219658 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Mon, 26 May 2025 11:07:13 +0200 Subject: [PATCH 08/16] refactor if..else to switch --- tools/mtmd/clip.cpp | 131 ++++++++++++++++++++++++++------------------ tools/mtmd/mtmd.cpp | 2 +- 2 files changed, 79 insertions(+), 54 deletions(-) diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp index 3fafa8ba19a95..753400d47f17d 100644 --- a/tools/mtmd/clip.cpp +++ b/tools/mtmd/clip.cpp @@ -3490,59 +3490,84 @@ int clip_n_output_tokens_y(const struct clip_ctx * ctx, struct clip_image_f32 * int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * img) { const auto & params = ctx->model.hparams; - int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size); - int scale_factor = ctx->model.hparams.proj_scale_factor; - - if (ctx->proj_type() == PROJECTOR_TYPE_LDP - || ctx->proj_type() == PROJECTOR_TYPE_LDPV2 - || ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE) { - n_patches /= 4; - if (ctx->model.mm_glm_tok_boi) { - n_patches += 2; // for BOI and EOI token embeddings - } - } else if (ctx->proj_type() == PROJECTOR_TYPE_MINICPMV) { - if (params.minicpmv_version == 2) { - n_patches = 96; - } - else if (params.minicpmv_version == 3) { - n_patches = 64; - } - else if (params.minicpmv_version == 4) { - n_patches = 64; - } - else { - GGML_ABORT("Unknown minicpmv version"); - } - } else if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL) { - int patch_size = params.patch_size * 2; - int x_patch = img->nx / patch_size + (int)(img->nx % patch_size > 0); - int y_patch = img->ny / patch_size + (int)(img->ny % patch_size > 0); - n_patches = x_patch * y_patch; - } else if (ctx->proj_type() == PROJECTOR_TYPE_GEMMA3) { - int n_per_side = params.image_size / params.patch_size; - int n_per_side_2d_pool = n_per_side / params.proj_scale_factor; - n_patches = n_per_side_2d_pool * n_per_side_2d_pool; - } else if (ctx->proj_type() == PROJECTOR_TYPE_IDEFICS3 || ctx->proj_type() == PROJECTOR_TYPE_INTERNVL) { - // both W and H are divided by proj_scale_factor - n_patches /= (params.proj_scale_factor * params.proj_scale_factor); - } else if (ctx->proj_type() == PROJECTOR_TYPE_PIXTRAL) { - int n_merge = params.spatial_merge_size; - int n_patches_x = img->nx / params.patch_size / (n_merge > 0 ? n_merge : 1); - int n_patches_y = img->ny / params.patch_size / (n_merge > 0 ? n_merge : 1); - n_patches = n_patches_y*n_patches_x + n_patches_y - 1; // + one [IMG_BREAK] per row, except the last row - } else if (ctx->proj_type() == PROJECTOR_TYPE_LLAMA4) { - n_patches /= (scale_factor * scale_factor); - } else if (ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX) { - const int proj_stack_factor = ctx->model.hparams.proj_stack_factor; - const int n_len = CLIP_ALIGN(img->nx, proj_stack_factor); - n_patches = n_len / proj_stack_factor / 2; - } else if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2A) { - // divide by 2 because of whisper - // another divide by 2 because of nn.AvgPool1d(2, stride=2) - n_patches = img->nx / 4; + // only for models using fixed size square images + int n_patches_sq = (params.image_size / params.patch_size) * (params.image_size / params.patch_size); + + projector_type proj = ctx->proj_type(); + + switch (proj) { + case PROJECTOR_TYPE_LDP: + case PROJECTOR_TYPE_LDPV2: + case PROJECTOR_TYPE_GLM_EDGE: + { + n_patches_sq /= 4; + if (ctx->model.mm_glm_tok_boi) { + n_patches_sq += 2; // for BOI and EOI token embeddings + } + } break; + case PROJECTOR_TYPE_MINICPMV: + { + if (params.minicpmv_version == 2) { + n_patches_sq = 96; + } else if (params.minicpmv_version == 3) { + n_patches_sq = 64; + } else if (params.minicpmv_version == 4) { + n_patches_sq = 64; + } else { + GGML_ABORT("Unknown minicpmv version"); + } + } break; + case PROJECTOR_TYPE_QWEN2VL: + case PROJECTOR_TYPE_QWEN25VL: + { + // dynamic size + int patch_size = params.patch_size * 2; + int x_patch = img->nx / patch_size + (int)(img->nx % patch_size > 0); + int y_patch = img->ny / patch_size + (int)(img->ny % patch_size > 0); + n_patches_sq = x_patch * y_patch; + } break; + case PROJECTOR_TYPE_GEMMA3: + { + int n_per_side = params.image_size / params.patch_size; + int n_per_side_2d_pool = n_per_side / params.proj_scale_factor; + n_patches_sq = n_per_side_2d_pool * n_per_side_2d_pool; + } break; + case PROJECTOR_TYPE_IDEFICS3: + case PROJECTOR_TYPE_INTERNVL: + { + // both W and H are divided by proj_scale_factor + n_patches_sq /= (params.proj_scale_factor * params.proj_scale_factor); + } break; + case PROJECTOR_TYPE_PIXTRAL: + { + // dynamic size + int n_merge = params.spatial_merge_size; + int n_patches_x = img->nx / params.patch_size / (n_merge > 0 ? n_merge : 1); + int n_patches_y = img->ny / params.patch_size / (n_merge > 0 ? n_merge : 1); + n_patches_sq = n_patches_y * n_patches_x + n_patches_y - 1; // + one [IMG_BREAK] per row, except the last row + } break; + case PROJECTOR_TYPE_LLAMA4: + { + int scale_factor = ctx->model.hparams.proj_scale_factor; + n_patches_sq /= (scale_factor * scale_factor); + } break; + case PROJECTOR_TYPE_ULTRAVOX: + { + const int proj_stack_factor = ctx->model.hparams.proj_stack_factor; + const int n_len = CLIP_ALIGN(img->nx, proj_stack_factor); + n_patches_sq = n_len / proj_stack_factor / 2; + } break; + case PROJECTOR_TYPE_QWEN2A: + { + // divide by 2 because of whisper + // another divide by 2 because of nn.AvgPool1d(2, stride=2) + n_patches_sq = img->nx / 4; + } break; + default: + GGML_ABORT("unsupported projector type"); } - return n_patches; + return n_patches_sq; } static std::vector>> get_1d_sincos_pos_embed_from_grid_new(int embed_dim, const std::vector> & pos) { @@ -3747,7 +3772,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima } // set input per projector - switch (ctx->proj_type()) { + switch (ctx->model.proj_type) { case PROJECTOR_TYPE_MINICPMV: { // inspired from siglip: @@ -4013,7 +4038,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima int clip_n_mmproj_embd(const struct clip_ctx * ctx) { const auto & hparams = ctx->model.hparams; - switch (ctx->proj_type()) { + switch (ctx->model.proj_type) { case PROJECTOR_TYPE_LDP: return ctx->model.mm_model_block_1_block_2_1_b->ne[0]; case PROJECTOR_TYPE_LDPV2: diff --git a/tools/mtmd/mtmd.cpp b/tools/mtmd/mtmd.cpp index a0813713d987e..ee9830882e184 100644 --- a/tools/mtmd/mtmd.cpp +++ b/tools/mtmd/mtmd.cpp @@ -158,7 +158,7 @@ struct mtmd_context { if (!ctx_v && !ctx_a) { throw std::runtime_error(string_format("Failed to load CLIP model from %s\n", mmproj_fname)); } - + // if both vision and audio mmproj are present, we need to validate their n_embd if (ctx_v && ctx_a) { int n_embd_v = clip_n_mmproj_embd(ctx_v); From 6e65e0c5eec400388e20a59ed6c553ae54456390 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Mon, 26 May 2025 11:25:41 +0200 Subject: [PATCH 09/16] fix regression --- tools/mtmd/clip.cpp | 5 +++++ tools/mtmd/mtmd.cpp | 12 ++++++------ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp index 753400d47f17d..727b6ce429f3b 100644 --- a/tools/mtmd/clip.cpp +++ b/tools/mtmd/clip.cpp @@ -3496,6 +3496,11 @@ int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * im projector_type proj = ctx->proj_type(); switch (proj) { + case PROJECTOR_TYPE_MLP: + case PROJECTOR_TYPE_MLP_NORM: + { + // do nothing + } break; case PROJECTOR_TYPE_LDP: case PROJECTOR_TYPE_LDPV2: case PROJECTOR_TYPE_GLM_EDGE: diff --git a/tools/mtmd/mtmd.cpp b/tools/mtmd/mtmd.cpp index ee9830882e184..bd58587d21f52 100644 --- a/tools/mtmd/mtmd.cpp +++ b/tools/mtmd/mtmd.cpp @@ -103,7 +103,7 @@ struct mtmd_context { bool print_timings; int n_threads; std::string media_marker; - const bool n_embd_text; + const int n_embd_text; // these are not token, but strings used to mark the beginning and end of image/audio embeddings std::string img_beg; @@ -237,11 +237,6 @@ struct mtmd_context { ov_img_first = false; // overview image is last } - if (ctx_a && clip_has_whisper_encoder(ctx_a)) { - // TODO @ngxson : check if model n_mel is 128 or 80 - w_filters = whisper_precalc_filters::get_128_bins(); - } - // set boi/eoi if (proj == PROJECTOR_TYPE_GEMMA3) { // ... (image embeddings) ... @@ -281,6 +276,11 @@ struct mtmd_context { GGML_ASSERT(ctx_a != nullptr); projector_type proj = clip_get_projector_type(ctx_a); + if (clip_has_whisper_encoder(ctx_a)) { + // TODO @ngxson : check if model n_mel is 128 or 80 + w_filters = whisper_precalc_filters::get_128_bins(); + } + LOG_WRN("%s: audio input is in experimental stage and may have reduced quality:\n" " https://github.com/ggml-org/llama.cpp/discussions/13759\n", __func__); From 235fbdbf4bac88ba5b721daba0516890905ff35d Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Mon, 26 May 2025 11:25:49 +0200 Subject: [PATCH 10/16] add test for 3B --- tools/mtmd/tests.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/mtmd/tests.sh b/tools/mtmd/tests.sh index cb1b99b63e09c..873f7587928ca 100755 --- a/tools/mtmd/tests.sh +++ b/tools/mtmd/tests.sh @@ -64,7 +64,10 @@ add_test_vision "bartowski/Qwen2-VL-2B-Instruct-GGUF:Q4_K_M" add_test_vision "ggml-org/Qwen2.5-VL-3B-Instruct-GGUF:Q4_K_M" add_test_vision "ggml-org/InternVL2_5-1B-GGUF:Q8_0" add_test_vision "ggml-org/InternVL3-1B-Instruct-GGUF:Q8_0" +add_test_vision "ggml-org/Qwen2.5-Omni-3B-GGUF:Q4_K_M" + add_test_audio "ggml-org/ultravox-v0_5-llama-3_2-1b-GGUF:Q8_0" +add_test_audio "ggml-org/Qwen2.5-Omni-3B-GGUF:Q4_K_M" # to test the big models, run: ./tests.sh big if [ "$RUN_BIG_TESTS" = true ]; then From bf34f38fcfb62f8f75dbe97ac08847ef19050296 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Mon, 26 May 2025 11:26:06 +0200 Subject: [PATCH 11/16] update docs --- docs/multimodal.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/multimodal.md b/docs/multimodal.md index 3a0994a279ae8..e849c2a0b8ba1 100644 --- a/docs/multimodal.md +++ b/docs/multimodal.md @@ -98,3 +98,12 @@ NOTE: some models may require large context window, for example: `-c 8192` # note: no pre-quantized GGUF this model, as they have very poor result # ref: https://github.com/ggml-org/llama.cpp/pull/13760 ``` + +**Mixed modalities**: + +```sh +# Qwen2.5 Omni +# Capabilities: audio input, vision input +(tool_name) -hf ggml-org/Qwen2.5-Omni-3B-GGUF +(tool_name) -hf ggml-org/Qwen2.5-Omni-7B-GGUF +``` From d03c2407abb939a3f0036ca39439c9c5eb2ebe06 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Mon, 26 May 2025 11:43:39 +0200 Subject: [PATCH 12/16] fix tokenizing with add_special --- tools/mtmd/clip-impl.h | 2 +- tools/mtmd/mtmd.cpp | 37 ++++++++++++++++++++++++++++++------- tools/mtmd/tests.sh | 8 ++++++-- 3 files changed, 37 insertions(+), 10 deletions(-) diff --git a/tools/mtmd/clip-impl.h b/tools/mtmd/clip-impl.h index 8280bce8b93f8..62c936ed00f77 100644 --- a/tools/mtmd/clip-impl.h +++ b/tools/mtmd/clip-impl.h @@ -130,7 +130,7 @@ enum projector_type { PROJECTOR_TYPE_INTERNVL, PROJECTOR_TYPE_LLAMA4, PROJECTOR_TYPE_QWEN2A, - PROJECTOR_TYPE_QWEN25O, + PROJECTOR_TYPE_QWEN25O, // will be replaced by QWEN2A or QWEN25VL depending on clip_ctx PROJECTOR_TYPE_UNKNOWN, }; diff --git a/tools/mtmd/mtmd.cpp b/tools/mtmd/mtmd.cpp index bd58587d21f52..52bf71e2c9dc0 100644 --- a/tools/mtmd/mtmd.cpp +++ b/tools/mtmd/mtmd.cpp @@ -402,10 +402,33 @@ struct mtmd_tokenizer { } } else { // this is a text part, we should add it as text - add_text(part, add_special, parse_special); + add_text(part, parse_special); } } + if (add_special && llama_vocab_get_add_bos(vocab)) { + // if first chunk is text, we add BOS token to first text chunk + // otherwise, create a new text chunk with BOS token + if (!cur.entries.empty() && cur.entries[0].type == MTMD_INPUT_CHUNK_TYPE_TEXT) { + // add BOS token to the beginning of first text chunk + cur.entries[0].tokens_text.insert(cur.entries[0].tokens_text.begin(), llama_vocab_bos(vocab)); + } else { + // create a new text chunk with BOS token at the beginning + mtmd_input_chunk bos_chunk{ + MTMD_INPUT_CHUNK_TYPE_TEXT, + {llama_vocab_bos(vocab)}, + nullptr, // image tokens + nullptr, // audio tokens + }; + cur.entries.insert(cur.entries.begin(), std::move(bos_chunk)); + } + } + + if (add_special && llama_vocab_get_add_eos(vocab)) { + // if last chunk is text, we add EOS token to it + add_text({llama_vocab_eos(vocab)}); + } + if (i_bm != bitmaps.size()) { LOG_ERR("%s: error: number of bitmaps (%zu) does not match number of markers (%zu)\n", __func__, bitmaps.size(), parts.size() - 1); @@ -417,9 +440,9 @@ struct mtmd_tokenizer { return 0; } - void add_text(const std::string & txt, bool add_special, bool parse_special) { + void add_text(const std::string & txt, bool parse_special) { LOG_DBG("%s: %s\n", __func__, txt.c_str()); - auto tokens = mtmd_tokenize_text_internal(vocab, txt, add_special, parse_special); + auto tokens = mtmd_tokenize_text_internal(vocab, txt, /* add_special */ false, parse_special); add_text(tokens); } @@ -454,7 +477,7 @@ struct mtmd_tokenizer { } if (!ctx->img_beg.empty()) { - add_text(ctx->img_beg, false, true); // add image begin token + add_text(ctx->img_beg, true); // add image begin token } // convert mtmd_bitmap to clip_image_u8 @@ -571,7 +594,7 @@ struct mtmd_tokenizer { } if (!ctx->img_end.empty()) { - add_text(ctx->img_end, false, true); // add image end token + add_text(ctx->img_end, true); // add image end token } } else { @@ -588,7 +611,7 @@ struct mtmd_tokenizer { } if (!ctx->aud_beg.empty()) { - add_text(ctx->aud_beg, false, true); // add audio begin token + add_text(ctx->aud_beg, true); // add audio begin token } // preprocess audio @@ -632,7 +655,7 @@ struct mtmd_tokenizer { } if (!ctx->aud_end.empty()) { - add_text(ctx->aud_end, false, true); // add audio end token + add_text(ctx->aud_end, true); // add audio end token } } diff --git a/tools/mtmd/tests.sh b/tools/mtmd/tests.sh index 873f7587928ca..a3efb06afb893 100755 --- a/tools/mtmd/tests.sh +++ b/tools/mtmd/tests.sh @@ -30,6 +30,7 @@ fi ############### +arr_prefix=() arr_hf=() arr_tmpl=() # chat template arr_file=() @@ -37,6 +38,7 @@ arr_file=() add_test_vision() { local hf=$1 local tmpl=${2:-""} # default to empty string if not provided + arr_prefix+=("[vision]") arr_hf+=("$hf") arr_tmpl+=("$tmpl") arr_file+=("test-1.jpeg") @@ -44,6 +46,7 @@ add_test_vision() { add_test_audio() { local hf=$1 + arr_prefix+=("[audio] ") arr_hf+=("$hf") arr_tmpl+=("") # no need for chat tmpl arr_file+=("test-2.mp3") @@ -107,6 +110,7 @@ arr_res=() for i in "${!arr_hf[@]}"; do bin="llama-mtmd-cli" + prefix="${arr_prefix[$i]}" hf="${arr_hf[$i]}" tmpl="${arr_tmpl[$i]}" inp_file="${arr_file[$i]}" @@ -127,9 +131,9 @@ for i in "${!arr_hf[@]}"; do echo "$output" > $SCRIPT_DIR/output/$bin-$(echo "$hf" | tr '/' '-').log if echo "$output" | grep -iq "new york"; then - result="\033[32mOK\033[0m: $bin $hf" + result="$prefix \033[32mOK\033[0m: $bin $hf" else - result="\033[31mFAIL\033[0m: $bin $hf" + result="$prefix \033[31mFAIL\033[0m: $bin $hf" fi echo -e "$result" arr_res+=("$result") From ef48e8f260d13bf82aa9b4d3a21f55b7a6ce1235 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Mon, 26 May 2025 11:54:29 +0200 Subject: [PATCH 13/16] add more tests --- tools/mtmd/tests.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/mtmd/tests.sh b/tools/mtmd/tests.sh index a3efb06afb893..721b6826463e0 100755 --- a/tools/mtmd/tests.sh +++ b/tools/mtmd/tests.sh @@ -82,7 +82,11 @@ if [ "$RUN_BIG_TESTS" = true ]; then add_test_vision "ggml-org/Qwen2.5-VL-7B-Instruct-GGUF:Q4_K_M" add_test_vision "ggml-org/InternVL3-8B-Instruct-GGUF:Q4_K_M" add_test_vision "ggml-org/InternVL3-14B-Instruct-GGUF:Q4_K_M" + add_test_vision "ggml-org/Qwen2.5-Omni-7B-GGUF:Q4_K_M" # add_test_vision "ggml-org/Qwen2.5-VL-32B-Instruct-GGUF:Q4_K_M" # does not work on my mac M3 Ultra + + add_test_audio "ggml-org/ultravox-v0_5-llama-3_1-8b-GGUF:Q4_K_M" + add_test_audio "ggml-org/Qwen2.5-Omni-7B-GGUF:Q4_K_M" fi # to test the huge models, run: ./tests.sh huge From 94d893d6435e77d70ce22a6333b91c5905ac503d Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Mon, 26 May 2025 11:56:37 +0200 Subject: [PATCH 14/16] fix test case "huge" --- tools/mtmd/tests.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/mtmd/tests.sh b/tools/mtmd/tests.sh index 721b6826463e0..aa0019893283e 100755 --- a/tools/mtmd/tests.sh +++ b/tools/mtmd/tests.sh @@ -25,7 +25,7 @@ RUN_HUGE_TESTS=false if [ "${1:-}" = "huge" ]; then RUN_HUGE_TESTS=true RUN_BIG_TESTS=true - echo "Include BIG models..." + echo "Include BIG and HUGE models..." fi ############### @@ -93,18 +93,18 @@ fi # this will run both the big and huge models # huge models are > 32B parameters if [ "$RUN_HUGE_TESTS" = true ]; then - add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-72B-Instruct-GGUF:Q4_K_M" - add_test "llama-mtmd-cli" "ggml-org/Llama-4-Scout-17B-16E-Instruct-GGUF:IQ1_S" + add_test_vision "ggml-org/Qwen2.5-VL-72B-Instruct-GGUF:Q4_K_M" + add_test_vision "ggml-org/Llama-4-Scout-17B-16E-Instruct-GGUF:IQ1_S" fi # these models always give the wrong answer, not sure why -# add_test "llama-mtmd-cli" "ggml-org/SmolVLM-Instruct-GGUF:Q4_K_M" -# add_test "llama-mtmd-cli" "ggml-org/SmolVLM-256M-Instruct-GGUF:Q8_0" -# add_test "llama-mtmd-cli" "ggml-org/SmolVLM2-256M-Video-Instruct-GGUF:Q8_0" +# add_test_vision "ggml-org/SmolVLM-Instruct-GGUF:Q4_K_M" +# add_test_vision "ggml-org/SmolVLM-256M-Instruct-GGUF:Q8_0" +# add_test_vision "ggml-org/SmolVLM2-256M-Video-Instruct-GGUF:Q8_0" # this model has broken chat template, not usable -# add_test "llama-mtmd-cli" "cmp-nct/Yi-VL-6B-GGUF:Q5_K" -# add_test "llama-mtmd-cli" "guinmoon/MobileVLM-3B-GGUF:Q4_K_M" "deepseek" +# add_test_vision "cmp-nct/Yi-VL-6B-GGUF:Q5_K" +# add_test_vision "guinmoon/MobileVLM-3B-GGUF:Q4_K_M" "deepseek" ############### From 05310968743ab1777719eb3e3f6f28fafc8fa620 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Tue, 27 May 2025 10:11:28 +0200 Subject: [PATCH 15/16] rm redundant code --- tools/mtmd/clip.cpp | 1 - tools/mtmd/clip.h | 1 - 2 files changed, 2 deletions(-) diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp index 727b6ce429f3b..6ae2c2ce46fd2 100644 --- a/tools/mtmd/clip.cpp +++ b/tools/mtmd/clip.cpp @@ -2602,7 +2602,6 @@ struct clip_model_loader { img->nx = hparams.warmup_audio_size; img->ny = hparams.n_mel_bins; } - img->buf.resize(img->nx * img->ny * 3); batch.entries.push_back(std::move(img)); ggml_cgraph * gf = clip_image_build_graph(&ctx_clip, batch); diff --git a/tools/mtmd/clip.h b/tools/mtmd/clip.h index ab949b9f0a502..cb2eb261fe2e8 100644 --- a/tools/mtmd/clip.h +++ b/tools/mtmd/clip.h @@ -24,7 +24,6 @@ enum clip_modality { struct clip_context_params { bool use_gpu; - enum clip_modality modality; enum ggml_log_level verbosity; }; From 27a8f26685e61e039e0c18aa987d94d980ef0ef4 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Tue, 27 May 2025 10:13:25 +0200 Subject: [PATCH 16/16] set_position_mrope_1d rm n_tokens --- tools/mtmd/mtmd-helper.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/mtmd/mtmd-helper.cpp b/tools/mtmd/mtmd-helper.cpp index 414fdad1b3d87..e6c926080cde3 100644 --- a/tools/mtmd/mtmd-helper.cpp +++ b/tools/mtmd/mtmd-helper.cpp @@ -87,10 +87,10 @@ struct decode_embd_batch { } // M-RoPE for audio - void set_position_mrope_1d(llama_pos pos_0, int32_t n_tokens, llama_seq_id seq_id) { + void set_position_mrope_1d(llama_pos pos_0, llama_seq_id seq_id) { GGML_ASSERT(n_pos_per_embd == 4); seq_id_0[0] = seq_id; - for (int i = 0; i < n_tokens; i++) { + for (int i = 0; i < batch.n_tokens; i++) { pos[i ] = pos_0 + i; pos[i + batch.n_tokens ] = pos_0 + i; pos[i + batch.n_tokens * 2] = pos_0 + i; @@ -174,7 +174,7 @@ int32_t mtmd_helper_decode_image_chunk( const int ny = mtmd_image_tokens_get_ny(image_tokens); batch_embd.set_position_mrope_2d(n_past, nx, ny, seq_id); } else if (chunk_type == MTMD_INPUT_CHUNK_TYPE_AUDIO) { - batch_embd.set_position_mrope_1d(n_past, n_tokens, seq_id); + batch_embd.set_position_mrope_1d(n_past, seq_id); } else { GGML_ABORT("invalid chunk type for M-RoPE"); }