# %% import torch import json import random import numpy as np from transformers import AutoTokenizer from transformers import AutoModel from loss import batch_all_triplet_loss, batch_hard_triplet_loss from sklearn.neighbors import KNeighborsClassifier from tqdm import tqdm # %% def generate_train_entity_sets(entity_id_mentions, entity_id_name, group_size, anchor=True): # split entity mentions into groups entity_sets = [] # anchor means to use the entity_name as an anchor if anchor: # entity_id_mentions is a list of dicts, each dict is an id and a list of mentions for id, mentions in entity_id_mentions.items(): # shuffle the mentions random.shuffle(mentions) # make batches of at most group size - 10 positives = [mentions[i:i + group_size] for i in range(0, len(mentions), group_size)] # the first element is always the entity_name # this is why we use (group_size - 1) anchor_positive = [([entity_id_name[id]]+p, id) for p in positives] entity_sets.extend(anchor_positive) else: # in this case, there is no entity_name in each group for id, mentions in entity_id_mentions.items(): group = list(set([entity_id_name[id]] + mentions)) random.shuffle(group) positives = [(mentions[i:i + group_size], id) for i in range(0, len(mentions), group_size)] entity_sets.extend(positives) return entity_sets # the batch generator selects batch_size entries from the "data" # but actually the "data" is a list of 10 items or less def batchGenerator(data, batch_size): for i in range(0, len(data), batch_size): batch = data[i:i+batch_size] x, y = [], [] for t in batch: x.extend(t[0]) # this is the list of mentions y.extend([t[1]]*len(t[0])) # this multiplies a single label by the number of mentions yield x, y # %% with open('../esAppMod/tca_entities.json', 'r') as file: entities = json.load(file) all_entity_id_name = {entity['entity_id']: entity['entity_name'] for _, entity in entities['data'].items()} with open('../esAppMod/train.json', 'r') as file: train = json.load(file) train_entity_id_mentions = {data['entity_id']: data['mentions'] for _, data in train['data'].items()} train_entity_id_name = {data['entity_id']: all_entity_id_name[data['entity_id']] for _, data in train['data'].items()} # %% num_sample_per_class = 10 # samples in each group batch_size = 16 # number of groups, effective batch_size for computing triplet loss = batch_size * num_sample_per_class margin = 2 epochs = 200 DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') # MODEL_NAME = 'distilbert-base-cased' #'prajjwal1/bert-small' #'bert-base-cased' MODEL_NAME = 'prajjwal1/bert-small' # 'prajjwal1/bert-small' 'bert-base-cased' 'distilbert-base-cased' tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) model = AutoModel.from_pretrained(MODEL_NAME) optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5) model.to(DEVICE) model.train() losses = [] data = generate_train_entity_sets(train_entity_id_mentions, train_entity_id_name, num_sample_per_class-1, anchor=True) # %% random.shuffle(data)