carnival13's picture
Add new SentenceTransformer model.
e87e1e2 verified
metadata
base_model: sentence-transformers/all-mpnet-base-v2
library_name: sentence-transformers
metrics:
  - cosine_accuracy@1
  - cosine_accuracy@3
  - cosine_accuracy@5
  - cosine_accuracy@10
  - cosine_precision@1
  - cosine_precision@3
  - cosine_precision@5
  - cosine_precision@10
  - cosine_recall@1
  - cosine_recall@3
  - cosine_recall@5
  - cosine_recall@10
  - cosine_ndcg@10
  - cosine_mrr@10
  - cosine_map@100
pipeline_tag: sentence-similarity
tags:
  - sentence-transformers
  - sentence-similarity
  - feature-extraction
  - generated_from_trainer
  - dataset_size:505654
  - loss:MultipleNegativesRankingLoss
widget:
  - source_sentence: >-
      module: stationery & printed material & services group: stationery &
      printed material & services supergroup: stationery & printed material &
      services example descriptions: munchkin crayons hween printedsheet mask 2
      pk printed tape tour os silver butterfly relax with art m ab
      hardbacknotebook stickers p val youmeyou text heat w mandalorian a 5 nbook
      nediun bubble envelopes 6 pk whs pastel expan org p poll decoration 1
      airtricity payasyoug
    sentences:
      - 'retailer: groveify description: rainbow magicbooks'
      - 'retailer: crispcorner description: glazed k kreme'
      - 'retailer: vitalveg description: may held aop fl'
  - source_sentence: >-
      module: flavoured drinks carbonated cola group: drinks flavoured rtd
      supergroup: beverages non alcoholic example descriptions: cola w xcoke
      zero 15 oml pepsi 240 k coke zero 500 ml d lepsi max chry 600 coke cherry
      can 009500 pepsi max 500 ml tuo diet coke cf kloke zero coke zero 250 ml
      diet coke nin 15 cocac 3 a 250 ml coca cola 330 ml 10 px coke 125 lzero
      coke 250 mlreg pmpg 5 p
    sentences:
      - 'retailer: vitalveg description: coke 240 k'
      - 'retailer: vitalveg description: tala silicone icing'
      - 'retailer: bountify description: pah antibac wood 10 l'
  - source_sentence: >-
      module: skin conditioning moisturising group: skin conditioning
      moisturising supergroup: personal care example descriptions: ss crmy bdy
      oil dove dm spa sr f m 7 nivea creme 50 carmex lime stick talc powder bo
      dry skn gel garnier milk bld lpblm orgnl vit a serum nv cr gran oh olay
      bright eye crm bio oil 2 x 200 ml nvfc srm q 10 prlbst sf aa nt crm 50
      aveeno cream 500 ml
    sentences:
      - 'retailer: wilko description: radiator m key'
      - 'retailer: nourify description: okf lprp tblpbl un'
      - 'retailer: crispcorner description: 065 each fredflo 60 biodegradable'
  - source_sentence: >-
      module: cakes gateaux ambient group: cakes gateaux ambient supergroup:
      food ambient example descriptions: x 20 pkmcvitiesjaffacakes 1 srn ban
      lunchbx js angel slices x 6 spk mr kipling frosty fancies plantastic
      cherry choc fl hr kipling angel slices 10 pk brompton choc brownies
      jschocchunknuffin loaded drip cake hobnbchoc fjack oreo muffins x 2 mr
      kipling victoria slices 6 pack mk kip choc rdsugar m the best brownies
      odby 5 choc mini
    sentences:
      - 'retailer: flavorful description: nr choc brownies'
      - 'retailer: producify description: dettol srfc wipe'
      - 'retailer: noshify description: garden wheels plate'
  - source_sentence: >-
      module: bread ambient group: bread ambient supergroup: food ambient
      example descriptions: 1 war 3 toastie 400 g cc 90 varburtons bread tovis
      snelwrspmpkin 800 g warbutons medium bread spk giant crumpets z hovis med
      wht 600 g sandwich thins 5 pk warb pk crumpets mission plain tortilla 25
      cm warburtons 4 protein thin bagels hovis soft wet med hovis wholemefl
      pataks pappadums 6 pk warb so bth disc pappajuns
    sentences:
      - 'retailer: greenly description: pomodoro sauce'
      - 'retailer: crispcorner description: kingsmill 5050 medius bread 800 g'
      - 'retailer: vitalveg description: ready to eat prun'
model-index:
  - name: SentenceTransformer based on sentence-transformers/all-mpnet-base-v2
    results:
      - task:
          type: information-retrieval
          name: Information Retrieval
        dataset:
          name: sentence transformers/all mpnet base v2
          type: sentence-transformers/all-mpnet-base-v2
        metrics:
          - type: cosine_accuracy@1
            value: 0.498812351543943
            name: Cosine Accuracy@1
          - type: cosine_accuracy@3
            value: 0.6342042755344418
            name: Cosine Accuracy@3
          - type: cosine_accuracy@5
            value: 0.7102137767220903
            name: Cosine Accuracy@5
          - type: cosine_accuracy@10
            value: 0.7838479809976246
            name: Cosine Accuracy@10
          - type: cosine_precision@1
            value: 0.498812351543943
            name: Cosine Precision@1
          - type: cosine_precision@3
            value: 0.21140142517814728
            name: Cosine Precision@3
          - type: cosine_precision@5
            value: 0.14204275534441804
            name: Cosine Precision@5
          - type: cosine_precision@10
            value: 0.07838479809976245
            name: Cosine Precision@10
          - type: cosine_recall@1
            value: 0.498812351543943
            name: Cosine Recall@1
          - type: cosine_recall@3
            value: 0.6342042755344418
            name: Cosine Recall@3
          - type: cosine_recall@5
            value: 0.7102137767220903
            name: Cosine Recall@5
          - type: cosine_recall@10
            value: 0.7838479809976246
            name: Cosine Recall@10
          - type: cosine_ndcg@10
            value: 0.6324346540369431
            name: Cosine Ndcg@10
          - type: cosine_mrr@10
            value: 0.5850111224220487
            name: Cosine Mrr@10
          - type: cosine_map@100
            value: 0.5910447073012788
            name: Cosine Map@100

SentenceTransformer based on sentence-transformers/all-mpnet-base-v2

This is a sentence-transformers model finetuned from sentence-transformers/all-mpnet-base-v2 on the csv dataset. It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.

Model Details

Model Description

  • Model Type: Sentence Transformer
  • Base model: sentence-transformers/all-mpnet-base-v2
  • Maximum Sequence Length: 384 tokens
  • Output Dimensionality: 768 tokens
  • Similarity Function: Cosine Similarity
  • Training Dataset:
    • csv

Model Sources

Full Model Architecture

SentenceTransformer(
  (0): Transformer({'max_seq_length': 384, 'do_lower_case': False}) with Transformer model: MPNetModel 
  (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
  (2): Normalize()
)

Usage

Direct Usage (Sentence Transformers)

First install the Sentence Transformers library:

pip install -U sentence-transformers

Then you can load this model and run inference.

from sentence_transformers import SentenceTransformer

# Download from the 🤗 Hub
model = SentenceTransformer("carnival13/all-mpnet-base-v2-modulepred")
# Run inference
sentences = [
    'module: bread ambient group: bread ambient supergroup: food ambient example descriptions: 1 war 3 toastie 400 g cc 90 varburtons bread tovis snelwrspmpkin 800 g warbutons medium bread spk giant crumpets z hovis med wht 600 g sandwich thins 5 pk warb pk crumpets mission plain tortilla 25 cm warburtons 4 protein thin bagels hovis soft wet med hovis wholemefl pataks pappadums 6 pk warb so bth disc pappajuns',
    'retailer: crispcorner description: kingsmill 5050 medius bread 800 g',
    'retailer: vitalveg description: ready to eat prun',
]
embeddings = model.encode(sentences)
print(embeddings.shape)
# [3, 768]

# Get the similarity scores for the embeddings
similarities = model.similarity(embeddings, embeddings)
print(similarities.shape)
# [3, 3]

Evaluation

Metrics

Information Retrieval

Metric Value
cosine_accuracy@1 0.4988
cosine_accuracy@3 0.6342
cosine_accuracy@5 0.7102
cosine_accuracy@10 0.7838
cosine_precision@1 0.4988
cosine_precision@3 0.2114
cosine_precision@5 0.142
cosine_precision@10 0.0784
cosine_recall@1 0.4988
cosine_recall@3 0.6342
cosine_recall@5 0.7102
cosine_recall@10 0.7838
cosine_ndcg@10 0.6324
cosine_mrr@10 0.585
cosine_map@100 0.591

Training Details

Training Dataset

csv

  • Dataset: csv
  • Size: 505,654 training samples
  • Columns: query and full_doc
  • Approximate statistics based on the first 1000 samples:
    query full_doc
    type string string
    details
    • min: 10 tokens
    • mean: 14.8 tokens
    • max: 23 tokens
    • min: 83 tokens
    • mean: 115.71 tokens
    • max: 176 tokens
  • Samples:
    query full_doc
    retailer: vitalveg description: twin xira module: chocolate single variety group: chocolate chocolate substitutes supergroup: biscuits & confectionery & snacks example descriptions: milky way twin 43 crml prtzlarum rai galaxy mnstr pipnut 34 g dark pb cup nest mnch foge p nestle smarties shar dark choc chun x 10 pk kinder bueno 1 dr oetker 72 da poppets choc offee pouch yorkie biscuit zpk haltesers truffles bog cadbury mini snowballs p terrys choc orange 3435 g galaxy fusion dark 704 100 g
    retailer: freshnosh description: mab pop sockt module: clothing & personal accessories group: clothing & personal accessories supergroup: clothing & personal accessories example descriptions: pk blue trad ging 40 d 3 pk opaque tight t 74 green cali jogger ss animal swing yb denim stripe pump aw 21 ff vest aw 21 girls 5 pk lounge toplo sku 1 pk fleecy tight knitted pom hat pk briefs timeless double pom pomkids hat cute face twosie sku coral jersey str pun faded petrol t 32 seamfree waist c
    retailer: nourify description: bts prwn ckt swch module: bread sandwiches filled rolls wraps group: bread fresh fixed weight supergroup: food perishable example descriptions: us chicken may hamche sw jo dbs allbtr pp st 4 js baconfree ran posh cheesy bea naturify cb swich sp eggcress f cpdfeggbacon js cheeseonion sv duck wrap reduced price takeout egg mayo sandwich 7 takeout cheeseonion s wich 2 ad leicester plough bts cheese pman 2 1 cp bacon chese s
  • Loss: MultipleNegativesRankingLoss with these parameters:
    {
        "scale": 20.0,
        "similarity_fct": "cos_sim"
    }
    

Training Hyperparameters

Non-Default Hyperparameters

  • eval_strategy: steps
  • per_device_train_batch_size: 4
  • per_device_eval_batch_size: 16
  • learning_rate: 2e-05
  • num_train_epochs: 1
  • warmup_ratio: 0.1
  • fp16: True
  • batch_sampler: no_duplicates

All Hyperparameters

Click to expand
  • overwrite_output_dir: False
  • do_predict: False
  • eval_strategy: steps
  • prediction_loss_only: True
  • per_device_train_batch_size: 4
  • per_device_eval_batch_size: 16
  • per_gpu_train_batch_size: None
  • per_gpu_eval_batch_size: None
  • gradient_accumulation_steps: 1
  • eval_accumulation_steps: None
  • torch_empty_cache_steps: None
  • learning_rate: 2e-05
  • weight_decay: 0.0
  • adam_beta1: 0.9
  • adam_beta2: 0.999
  • adam_epsilon: 1e-08
  • max_grad_norm: 1.0
  • num_train_epochs: 1
  • max_steps: -1
  • lr_scheduler_type: linear
  • lr_scheduler_kwargs: {}
  • warmup_ratio: 0.1
  • warmup_steps: 0
  • log_level: passive
  • log_level_replica: warning
  • log_on_each_node: True
  • logging_nan_inf_filter: True
  • save_safetensors: True
  • save_on_each_node: False
  • save_only_model: False
  • restore_callback_states_from_checkpoint: False
  • no_cuda: False
  • use_cpu: False
  • use_mps_device: False
  • seed: 42
  • data_seed: None
  • jit_mode_eval: False
  • use_ipex: False
  • bf16: False
  • fp16: True
  • fp16_opt_level: O1
  • half_precision_backend: auto
  • bf16_full_eval: False
  • fp16_full_eval: False
  • tf32: None
  • local_rank: 0
  • ddp_backend: None
  • tpu_num_cores: None
  • tpu_metrics_debug: False
  • debug: []
  • dataloader_drop_last: False
  • dataloader_num_workers: 0
  • dataloader_prefetch_factor: None
  • past_index: -1
  • disable_tqdm: False
  • remove_unused_columns: True
  • label_names: None
  • load_best_model_at_end: False
  • ignore_data_skip: False
  • fsdp: []
  • fsdp_min_num_params: 0
  • fsdp_config: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
  • fsdp_transformer_layer_cls_to_wrap: None
  • accelerator_config: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
  • deepspeed: None
  • label_smoothing_factor: 0.0
  • optim: adamw_torch
  • optim_args: None
  • adafactor: False
  • group_by_length: False
  • length_column_name: length
  • ddp_find_unused_parameters: None
  • ddp_bucket_cap_mb: None
  • ddp_broadcast_buffers: False
  • dataloader_pin_memory: True
  • dataloader_persistent_workers: False
  • skip_memory_metrics: True
  • use_legacy_prediction_loop: False
  • push_to_hub: False
  • resume_from_checkpoint: None
  • hub_model_id: None
  • hub_strategy: every_save
  • hub_private_repo: False
  • hub_always_push: False
  • gradient_checkpointing: False
  • gradient_checkpointing_kwargs: None
  • include_inputs_for_metrics: False
  • eval_do_concat_batches: True
  • fp16_backend: auto
  • push_to_hub_model_id: None
  • push_to_hub_organization: None
  • mp_parameters:
  • auto_find_batch_size: False
  • full_determinism: False
  • torchdynamo: None
  • ray_scope: last
  • ddp_timeout: 1800
  • torch_compile: False
  • torch_compile_backend: None
  • torch_compile_mode: None
  • dispatch_batches: None
  • split_batches: None
  • include_tokens_per_second: False
  • include_num_input_tokens_seen: False
  • neftune_noise_alpha: None
  • optim_target_modules: None
  • batch_eval_metrics: False
  • eval_on_start: False
  • eval_use_gather_object: False
  • batch_sampler: no_duplicates
  • multi_dataset_batch_sampler: proportional

Training Logs

Epoch Step Training Loss sentence-transformers/all-mpnet-base-v2_cosine_map@100
0.0016 100 1.6195 0.2567
0.0032 200 1.47 0.3166
0.0047 300 1.2703 0.3814
0.0063 400 1.1335 0.4495
0.0079 500 0.9942 0.4827
0.0095 600 0.9004 0.5058
0.0111 700 0.8838 0.5069
0.0016 100 0.951 0.5197
0.0032 200 0.9597 0.5323
0.0047 300 0.9241 0.5406
0.0063 400 0.8225 0.5484
0.0079 500 0.7961 0.5568
0.0095 600 0.7536 0.5621
0.0111 700 0.7387 0.5623
0.0127 800 0.7716 0.5746
0.0142 900 0.7921 0.5651
0.0158 1000 0.7744 0.5707
0.0174 1100 0.8021 0.5770
0.0190 1200 0.732 0.5756
0.0206 1300 0.764 0.5798
0.0221 1400 0.7726 0.5873
0.0237 1500 0.6676 0.5921
0.0253 1600 0.6851 0.5841
0.0269 1700 0.7404 0.5964
0.0285 1800 0.6798 0.5928
0.0301 1900 0.6485 0.5753
0.0316 2000 0.649 0.5839
0.0332 2100 0.6739 0.5891
0.0348 2200 0.6616 0.6045
0.0364 2300 0.6287 0.5863
0.0380 2400 0.6602 0.5898
0.0396 2500 0.5667 0.5910

Framework Versions

  • Python: 3.10.14
  • Sentence Transformers: 3.1.1
  • Transformers: 4.44.2
  • PyTorch: 2.4.0+cu124
  • Accelerate: 0.33.0
  • Datasets: 2.21.0
  • Tokenizers: 0.19.1

Citation

BibTeX

Sentence Transformers

@inproceedings{reimers-2019-sentence-bert,
    title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
    author = "Reimers, Nils and Gurevych, Iryna",
    booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
    month = "11",
    year = "2019",
    publisher = "Association for Computational Linguistics",
    url = "https://arxiv.org/abs/1908.10084",
}

MultipleNegativesRankingLoss

@misc{henderson2017efficient,
    title={Efficient Natural Language Response Suggestion for Smart Reply},
    author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},
    year={2017},
    eprint={1705.00652},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}