Compare commits
	
		
			2 Commits
		
	
	
		
			737c86bc2e
			...
			b01ca4f395
		
	
	| Author | SHA1 | Date | 
|---|---|---|
| 
							
							
								
								 | 
						b01ca4f395 | |
| 
							
							
								
								 | 
						446ed1429c | 
| 
						 | 
				
			
			@ -0,0 +1 @@
 | 
			
		|||
*.zip
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,5 @@
 | 
			
		|||
# End to End deployment code
 | 
			
		||||
 | 
			
		||||
This code takes in an input dataframe (assuming that the server code already
 | 
			
		||||
processed json to python dataframe), and applies our mapping + threshold +
 | 
			
		||||
de_duplicate code.
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,5 @@
 | 
			
		|||
models
 | 
			
		||||
raw_data.csv
 | 
			
		||||
train_all.csv
 | 
			
		||||
__pycache__
 | 
			
		||||
*.pt
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,433 @@
 | 
			
		|||
 | 
			
		||||
# %%
 | 
			
		||||
import pandas as pd
 | 
			
		||||
import os
 | 
			
		||||
import glob
 | 
			
		||||
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix
 | 
			
		||||
import numpy as np
 | 
			
		||||
from tqdm import tqdm
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
from transformers import (
 | 
			
		||||
    AutoTokenizer,
 | 
			
		||||
    AutoModelForSequenceClassification,
 | 
			
		||||
    AutoModelForSeq2SeqLM,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
##################
 | 
			
		||||
# global parameters
 | 
			
		||||
##################
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BertEmbedder:
 | 
			
		||||
    def __init__(self, input_texts, model_checkpoint):
 | 
			
		||||
        # we need to generate the embedding from list of input strings
 | 
			
		||||
        self.embeddings = []
 | 
			
		||||
        self.inputs = input_texts
 | 
			
		||||
        model_checkpoint = model_checkpoint 
 | 
			
		||||
        self.tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, return_tensors="pt", clean_up_tokenization_spaces=True)
 | 
			
		||||
 | 
			
		||||
        model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint)
 | 
			
		||||
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 | 
			
		||||
        # device = "cpu"
 | 
			
		||||
        self.model = model.to(self.device)
 | 
			
		||||
        self.model = self.model.eval()
 | 
			
		||||
        # self.model = torch.compile(self.model)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def make_embedding(self, batch_size=128):
 | 
			
		||||
        all_embeddings = self.embeddings
 | 
			
		||||
        input_texts = self.inputs
 | 
			
		||||
 | 
			
		||||
        for i in range(0, len(input_texts), batch_size):
 | 
			
		||||
            batch_texts = input_texts[i:i+batch_size]
 | 
			
		||||
            # Tokenize the input text
 | 
			
		||||
            inputs = self.tokenizer(batch_texts, return_tensors="pt", padding=True, truncation=True, max_length=120)
 | 
			
		||||
            input_ids = inputs.input_ids.to(self.device)
 | 
			
		||||
            attention_mask = inputs.attention_mask.to(self.device)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
            # Pass the input through the encoder and retrieve the embeddings
 | 
			
		||||
            with torch.amp.autocast(device_type="cuda", dtype=torch.bfloat16):
 | 
			
		||||
                with torch.no_grad():
 | 
			
		||||
                    encoder_outputs = self.model(input_ids, attention_mask=attention_mask, output_hidden_states=True)
 | 
			
		||||
                    # get last layer
 | 
			
		||||
                    embeddings = encoder_outputs.hidden_states[-1]
 | 
			
		||||
                    # get cls token embedding
 | 
			
		||||
                    cls_embeddings = embeddings[:, 0, :]  # Shape: (batch_size, hidden_size)
 | 
			
		||||
                    all_embeddings.append(cls_embeddings)
 | 
			
		||||
        
 | 
			
		||||
        # remove the batch list and makes a single large tensor, dim=0 increases row-wise
 | 
			
		||||
        all_embeddings = torch.cat(all_embeddings, dim=0)
 | 
			
		||||
 | 
			
		||||
        self.embeddings = all_embeddings
 | 
			
		||||
 | 
			
		||||
class T5Embedder:
 | 
			
		||||
    def __init__(self, input_texts, model_checkpoint):
 | 
			
		||||
        # we need to generate the embedding from list of input strings
 | 
			
		||||
        self.embeddings = []
 | 
			
		||||
        self.inputs = input_texts
 | 
			
		||||
        model_checkpoint = model_checkpoint 
 | 
			
		||||
        self.tokenizer = AutoTokenizer.from_pretrained("t5-base", return_tensors="pt", clean_up_tokenization_spaces=True)
 | 
			
		||||
        # define additional special tokens
 | 
			
		||||
        additional_special_tokens = ["<THING_START>", "<THING_END>", "<PROPERTY_START>", "<PROPERTY_END>", "<NAME>", "<DESC>", "<SIG>", "<UNIT>", "<DATA_TYPE>"]
 | 
			
		||||
        # add the additional special tokens to the tokenizer
 | 
			
		||||
        self.tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
 | 
			
		||||
 | 
			
		||||
        model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
 | 
			
		||||
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 | 
			
		||||
        # device = "cpu"
 | 
			
		||||
        model.to(self.device)
 | 
			
		||||
        self.model = model.eval()
 | 
			
		||||
        self.model = torch.compile(self.model)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def make_embedding(self, batch_size=128):
 | 
			
		||||
        all_embeddings = self.embeddings
 | 
			
		||||
        input_texts = self.inputs
 | 
			
		||||
 | 
			
		||||
        for i in range(0, len(input_texts), batch_size):
 | 
			
		||||
            batch_texts = input_texts[i:i+batch_size]
 | 
			
		||||
            # Tokenize the input text
 | 
			
		||||
            inputs = self.tokenizer(batch_texts, return_tensors="pt", padding=True, truncation=True, max_length=128)
 | 
			
		||||
            input_ids = inputs.input_ids.to(self.device)
 | 
			
		||||
            attention_mask = inputs.attention_mask.to(self.device)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
            # Pass the input through the encoder and retrieve the embeddings
 | 
			
		||||
 | 
			
		||||
            with torch.amp.autocast(device_type="cuda", dtype=torch.bfloat16):
 | 
			
		||||
                with torch.no_grad():
 | 
			
		||||
                    encoder_outputs = self.model.encoder(input_ids, attention_mask=attention_mask)
 | 
			
		||||
                    embeddings = encoder_outputs.last_hidden_state
 | 
			
		||||
 | 
			
		||||
            # Compute the mean pooling of the token embeddings
 | 
			
		||||
            # mean_embedding = embeddings.mean(dim=1)
 | 
			
		||||
            mean_embedding = (embeddings * attention_mask.unsqueeze(-1)).sum(dim=1) / attention_mask.sum(dim=1, keepdim=True)
 | 
			
		||||
            all_embeddings.append(mean_embedding)
 | 
			
		||||
        
 | 
			
		||||
        # remove the batch list and makes a single large tensor, dim=0 increases row-wise
 | 
			
		||||
        all_embeddings = torch.cat(all_embeddings, dim=0)
 | 
			
		||||
 | 
			
		||||
        self.embeddings = all_embeddings
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def cosine_similarity_chunked(batch1, batch2, chunk_size=1024):
 | 
			
		||||
    device = 'cuda'
 | 
			
		||||
    batch1_size = batch1.size(0)
 | 
			
		||||
    batch2_size = batch2.size(0)
 | 
			
		||||
    batch2.to(device)
 | 
			
		||||
    
 | 
			
		||||
    # Prepare an empty tensor to store results
 | 
			
		||||
    cos_sim = torch.empty(batch1_size, batch2_size, device=device)
 | 
			
		||||
 | 
			
		||||
    # Process batch1 in chunks
 | 
			
		||||
    for i in range(0, batch1_size, chunk_size):
 | 
			
		||||
        batch1_chunk = batch1[i:i + chunk_size]  # Get chunk of batch1
 | 
			
		||||
        
 | 
			
		||||
        batch1_chunk.to(device)
 | 
			
		||||
        # Expand batch1 chunk and entire batch2 for comparison
 | 
			
		||||
        # batch1_chunk_exp = batch1_chunk.unsqueeze(1)  # Shape: (chunk_size, 1, seq_len)
 | 
			
		||||
        # batch2_exp = batch2.unsqueeze(0)  # Shape: (1, batch2_size, seq_len)
 | 
			
		||||
        batch2_norms = batch2.norm(dim=1, keepdim=True)
 | 
			
		||||
 | 
			
		||||
        
 | 
			
		||||
        # Compute cosine similarity for the chunk and store it in the final tensor
 | 
			
		||||
        # cos_sim[i:i + chunk_size] = F.cosine_similarity(batch1_chunk_exp, batch2_exp, dim=-1)
 | 
			
		||||
 | 
			
		||||
        # Compute cosine similarity by matrix multiplication and normalizing
 | 
			
		||||
        sim_chunk = torch.mm(batch1_chunk, batch2.T) / (batch1_chunk.norm(dim=1, keepdim=True) * batch2_norms.T + 1e-8)
 | 
			
		||||
        
 | 
			
		||||
        # Store the results in the appropriate part of the final tensor
 | 
			
		||||
        cos_sim[i:i + chunk_size] = sim_chunk
 | 
			
		||||
    
 | 
			
		||||
    return cos_sim
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
###################
 | 
			
		||||
# helper functions
 | 
			
		||||
class Embedder():
 | 
			
		||||
    input_df: pd.DataFrame
 | 
			
		||||
    fold: int
 | 
			
		||||
    batch_size: int
 | 
			
		||||
 | 
			
		||||
    def __init__(self, input_df, batch_size):
 | 
			
		||||
        self.input_df = input_df
 | 
			
		||||
        self.batch_size = batch_size
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def make_embedding(self, checkpoint_path):
 | 
			
		||||
 | 
			
		||||
        def generate_input_list(df):
 | 
			
		||||
            input_list = []
 | 
			
		||||
            for _, row in df.iterrows():
 | 
			
		||||
                desc = f"<DESC>{row['tag_description']}<DESC>"
 | 
			
		||||
                unit = f"<UNIT>{row['unit']}<UNIT>"
 | 
			
		||||
                # name = f"<NAME>{row['tag_name']}<NAME>"
 | 
			
		||||
                element = f"{desc}{unit}"
 | 
			
		||||
                input_list.append(element)
 | 
			
		||||
            return input_list
 | 
			
		||||
 | 
			
		||||
        # prepare reference embed
 | 
			
		||||
        train_data = list(generate_input_list(self.input_df))
 | 
			
		||||
        # Define the directory and the pattern
 | 
			
		||||
        # embedder = T5Embedder(train_data, checkpoint_path)
 | 
			
		||||
        embedder = BertEmbedder(train_data, checkpoint_path)
 | 
			
		||||
        embedder.make_embedding(batch_size=self.batch_size)
 | 
			
		||||
        return embedder.embeddings
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# the selection function takes in the full cos_sim_matrix then subsets the
 | 
			
		||||
# matrix according to the test_candidates_mask and train_candidates_mask that we
 | 
			
		||||
# give it
 | 
			
		||||
# it returns the most likely source candidate index and score among the source
 | 
			
		||||
# candidate list
 | 
			
		||||
# we then map the local idx to the ship-level idx
 | 
			
		||||
def selection(cos_sim_matrix, source_mask, target_mask):
 | 
			
		||||
    # subset_matrix = cos_sim_matrix[condition_source]
 | 
			
		||||
    # except we are subsetting 2D matrix (row, column)
 | 
			
		||||
    subset_matrix = cos_sim_matrix[np.ix_(source_mask, target_mask)]
 | 
			
		||||
    # we select top-k here
 | 
			
		||||
    # Get the indices of the top-k maximum values along axis 1
 | 
			
		||||
    top_k = 1
 | 
			
		||||
    # returns a potential 2d matrix of which columns have the highest values
 | 
			
		||||
    # top_k_indices = np.argsort(subset_matrix, axis=1)[:, -top_k:]  # Get indices of top k values
 | 
			
		||||
    # this partial sorts and ensures we care only top_k are correctly sorted
 | 
			
		||||
    top_k_indices = np.argpartition(subset_matrix, -top_k, axis=1)[:, -top_k:]
 | 
			
		||||
    
 | 
			
		||||
    # Get the values of the top 5 maximum scores
 | 
			
		||||
    top_k_values = np.take_along_axis(subset_matrix, top_k_indices, axis=1)
 | 
			
		||||
    
 | 
			
		||||
    # Calculate the average of the top-k scores along axis 1
 | 
			
		||||
    y_scores = np.mean(top_k_values, axis=1)
 | 
			
		||||
    max_idx = np.argmax(y_scores)
 | 
			
		||||
    max_score = y_scores[max_idx]
 | 
			
		||||
 | 
			
		||||
    # convert boolean to indices
 | 
			
		||||
    condition_indices = np.where(source_mask)[0]
 | 
			
		||||
    max_idx = condition_indices[max_idx]
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    return max_idx, max_score
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
####################
 | 
			
		||||
# global level
 | 
			
		||||
# obtain the full mdm_list
 | 
			
		||||
 | 
			
		||||
#####################
 | 
			
		||||
# fold level
 | 
			
		||||
 | 
			
		||||
def run_deduplication(
 | 
			
		||||
    test_df,
 | 
			
		||||
    train_df,
 | 
			
		||||
    batch_size=1024,
 | 
			
		||||
    threshold=0.9,
 | 
			
		||||
    diagnostic=False
 | 
			
		||||
    ):
 | 
			
		||||
 | 
			
		||||
    # TODO: replace this with a list of values to import
 | 
			
		||||
    # too wasteful to just import everything
 | 
			
		||||
    data_path = '../data_import/exports/data_mapping_mdm.csv'
 | 
			
		||||
    full_df = pd.read_csv(data_path, skipinitialspace=True)
 | 
			
		||||
    full_df['mapping'] = full_df['thing'] + ' ' + full_df['property']
 | 
			
		||||
    full_mdm_mapping_list = sorted(list((set(full_df['mapping']))))
 | 
			
		||||
 | 
			
		||||
    # set the fold
 | 
			
		||||
    # import test data
 | 
			
		||||
    df = test_df
 | 
			
		||||
    df['p_mapping'] = df['p_thing'] + " " + df['p_property']
 | 
			
		||||
 | 
			
		||||
    # get target data
 | 
			
		||||
    data_path = "train_all.csv"
 | 
			
		||||
    train_df = pd.read_csv(data_path, skipinitialspace=True)
 | 
			
		||||
    train_df['mapping'] = train_df['thing'] + " " + train_df['property']
 | 
			
		||||
 | 
			
		||||
    # generate your embeddings
 | 
			
		||||
    checkpoint_path = 'models/bert_model'
 | 
			
		||||
 | 
			
		||||
    # cache embeddings
 | 
			
		||||
    file_path = "train_embeds.pt"
 | 
			
		||||
    if os.path.exists(file_path):
 | 
			
		||||
        # Load the tensor if the file exists
 | 
			
		||||
        tensor = torch.load(file_path, weights_only=True)
 | 
			
		||||
        print("Loaded tensor")
 | 
			
		||||
    else:
 | 
			
		||||
        # Create and save the tensor if the file doesn't exist
 | 
			
		||||
        print('generate train embeddings')
 | 
			
		||||
        train_embedder = Embedder(input_df=train_df, batch_size=batch_size)
 | 
			
		||||
        tensor = train_embedder.make_embedding(checkpoint_path)
 | 
			
		||||
        torch.save(tensor, file_path, weights_only=True)
 | 
			
		||||
        print("Tensor saved to file.")
 | 
			
		||||
    
 | 
			
		||||
    train_embeds = tensor
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # if we can, we can cache the train embeddings and load directly
 | 
			
		||||
    # we can generate the train embeddings once and re-use for every ship
 | 
			
		||||
 | 
			
		||||
    # generate new embeddings for each ship
 | 
			
		||||
    print('generate test embeddings')
 | 
			
		||||
    test_embedder = Embedder(input_df=df, batch_size=batch_size)
 | 
			
		||||
    global_test_embeds = test_embedder.make_embedding(checkpoint_path)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # create global_answer array
 | 
			
		||||
    # the purpose of this array is to track the classification state at the global
 | 
			
		||||
    # level
 | 
			
		||||
    global_answer = np.zeros(len(df), dtype=bool)
 | 
			
		||||
 | 
			
		||||
    #############################
 | 
			
		||||
    # ship level
 | 
			
		||||
    # we have to split into per-ship analysis
 | 
			
		||||
    ships_list = sorted(list(set(df['ships_idx'])))
 | 
			
		||||
 | 
			
		||||
    for ship_idx in tqdm(ships_list):
 | 
			
		||||
        # ship_df = df[df['ships_idx'] == ship_idx]
 | 
			
		||||
        # required to map local ship_answer array to global_answer array
 | 
			
		||||
        # map_local_index_to_global_index = ship_df.index.to_numpy()
 | 
			
		||||
 | 
			
		||||
        # we want to subset the ship and only p_mdm values
 | 
			
		||||
        ship_mask = df['ships_idx'] == ship_idx
 | 
			
		||||
        map_local_index_to_global_index = np.where(ship_mask)[0]
 | 
			
		||||
        ship_df = df[ship_mask].reset_index(drop=True)
 | 
			
		||||
 | 
			
		||||
        # subset the test embeds
 | 
			
		||||
        test_embeds = global_test_embeds[map_local_index_to_global_index]
 | 
			
		||||
 | 
			
		||||
        # generate the cosine sim matrix for the ship level
 | 
			
		||||
        cos_sim_matrix = cosine_similarity_chunked(test_embeds, train_embeds, chunk_size=1024).cpu().numpy()
 | 
			
		||||
 | 
			
		||||
        ##############################
 | 
			
		||||
        # selection level
 | 
			
		||||
        # The general idea:
 | 
			
		||||
        # step 1: keep only pattern generations that belong to mdm list
 | 
			
		||||
        # -> this removes totally wrong datasets that mapped to totally wrong things
 | 
			
		||||
        # step 2: loop through the mdm list and isolate data in both train and test that
 | 
			
		||||
        # belong to the same pattern class
 | 
			
		||||
        # -> this is more tricky, because we have non-mdm mapping to correct classes
 | 
			
		||||
        # -> so we have to find which candidate is most similar to the training data
 | 
			
		||||
 | 
			
		||||
        # it is very tricky to keep track of classification across multiple stages so we
 | 
			
		||||
        # will use a boolean answer list to map answers back to the global answer list
 | 
			
		||||
 | 
			
		||||
        # initialize the local answer list
 | 
			
		||||
        ship_answer_list = np.ones(len(ship_df), dtype=bool)
 | 
			
		||||
 | 
			
		||||
        ###########
 | 
			
		||||
        # STEP 1A: ensure that the predicted mapping labels are valid
 | 
			
		||||
        pattern_match_mask = ship_df['p_mapping'].apply(lambda x: x in full_mdm_mapping_list).to_numpy()
 | 
			
		||||
        pattern_match_mask = pattern_match_mask.astype(bool)
 | 
			
		||||
        # anything not in the pattern_match_mask are hallucinations
 | 
			
		||||
        # this has the same effect as setting any wrong generations as non-mdm
 | 
			
		||||
        ship_answer_list[~pattern_match_mask] = False
 | 
			
		||||
 | 
			
		||||
        # # STEP 1B: subset our de-duplication to use only predicted_mdm labels
 | 
			
		||||
        # p_mdm_mask = ship_df['p_mdm']
 | 
			
		||||
        # # assign false to any non p_mdm entries
 | 
			
		||||
        # ship_answer_list[~p_mdm_mask] = False
 | 
			
		||||
        # # modify pattern_match_mask to remove any non p_mdm values
 | 
			
		||||
        # pattern_match_mask = pattern_match_mask & p_mdm_mask
 | 
			
		||||
 | 
			
		||||
        ###########
 | 
			
		||||
        # STEP 2
 | 
			
		||||
        # we now go through each class found in our generated set
 | 
			
		||||
 | 
			
		||||
        # we want to identify per-ship mdm classes
 | 
			
		||||
        ship_predicted_classes = sorted(set(ship_df['p_mapping'][pattern_match_mask].to_list()))
 | 
			
		||||
 | 
			
		||||
        # this function performs the selection given a class
 | 
			
		||||
        # it takes in the cos_sim_matrix
 | 
			
		||||
        # it returns the selection by mutating the answer_list
 | 
			
		||||
        # it sets all relevant idxs to False initially, then sets the selected values to True
 | 
			
		||||
        def selection_for_class(select_class, cos_sim_matrix, answer_list):
 | 
			
		||||
 | 
			
		||||
            # create local copy of answer_list
 | 
			
		||||
            ship_answer_list = answer_list.copy()
 | 
			
		||||
            # sample_df = ship_df[ship_df['p_mapping'] == select_class]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
            # we need to set all idx of chosen entries as False in answer_list -> assume wrong by default
 | 
			
		||||
            # selected_idx_list = sample_df.index.to_numpy()
 | 
			
		||||
            selected_idx_list = np.where(ship_df['p_mapping'] == select_class)[0]
 | 
			
		||||
 | 
			
		||||
            # basic assumption check
 | 
			
		||||
 | 
			
		||||
            # generate the masking arrays for both test and train embeddings
 | 
			
		||||
            # we select a tuple from each group, and use that as a candidate for selection
 | 
			
		||||
            test_candidates_mask = ship_df['p_mapping'] == select_class
 | 
			
		||||
            # we make candidates to compare against in the data sharing the same class
 | 
			
		||||
            train_candidates_mask = train_df['mapping'] == select_class
 | 
			
		||||
 | 
			
		||||
            if sum(train_candidates_mask) == 0:
 | 
			
		||||
                # it can be the case that the mdm-valid mapping class is not found in training data
 | 
			
		||||
                # print("not found in training data", select_class)
 | 
			
		||||
                ship_answer_list[selected_idx_list] = False
 | 
			
		||||
                return ship_answer_list
 | 
			
		||||
 | 
			
		||||
            # perform selection
 | 
			
		||||
            # max_idx is the id
 | 
			
		||||
            max_idx, max_score = selection(cos_sim_matrix, test_candidates_mask, train_candidates_mask)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
            # set the duplicate entries to False
 | 
			
		||||
            ship_answer_list[selected_idx_list] = False
 | 
			
		||||
            # then only set the one unique chosen value as True
 | 
			
		||||
            if max_score > threshold:
 | 
			
		||||
                ship_answer_list[max_idx] = True
 | 
			
		||||
 | 
			
		||||
            return ship_answer_list 
 | 
			
		||||
 | 
			
		||||
        # we choose one mdm class
 | 
			
		||||
        for select_class in ship_predicted_classes:
 | 
			
		||||
            # this resulted in big improvement
 | 
			
		||||
            if (sum(ship_df['p_mapping'] == select_class)) > 0:
 | 
			
		||||
                ship_answer_list = selection_for_class(select_class, cos_sim_matrix, ship_answer_list)
 | 
			
		||||
 | 
			
		||||
        # we want to write back to global_answer
 | 
			
		||||
        # first we convert local indices to global indices
 | 
			
		||||
        ship_local_indices = np.where(ship_answer_list)[0]
 | 
			
		||||
        ship_global_indices = map_local_index_to_global_index[ship_local_indices]
 | 
			
		||||
        global_answer[ship_global_indices] = True
 | 
			
		||||
 | 
			
		||||
    # we set all unselected values to None
 | 
			
		||||
    df.loc[~global_answer, 'p_thing'] = None
 | 
			
		||||
    df.loc[~global_answer, 'p_property'] = None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    if diagnostic:
 | 
			
		||||
        print(80 * '*')
 | 
			
		||||
 | 
			
		||||
        y_true = df['MDM'].to_list()
 | 
			
		||||
        y_pred = global_answer
 | 
			
		||||
 | 
			
		||||
        tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
 | 
			
		||||
        print(f"tp: {tp}")
 | 
			
		||||
        print(f"tn: {tn}")
 | 
			
		||||
        print(f"fp: {fp}")
 | 
			
		||||
        print(f"fn: {fn}")
 | 
			
		||||
 | 
			
		||||
        # compute metrics
 | 
			
		||||
        accuracy = accuracy_score(y_true, y_pred)
 | 
			
		||||
        f1 = f1_score(y_true, y_pred)
 | 
			
		||||
        precision = precision_score(y_true, y_pred)
 | 
			
		||||
        recall = recall_score(y_true, y_pred)
 | 
			
		||||
 | 
			
		||||
        # print the results
 | 
			
		||||
        print(f'accuracy: {accuracy:.5f}')
 | 
			
		||||
        print(f'f1 score: {f1:.5f}')
 | 
			
		||||
        print(f'Precision: {precision:.5f}')
 | 
			
		||||
        print(f'Recall: {recall:.5f}')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    return df
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,214 @@
 | 
			
		|||
import torch
 | 
			
		||||
from torch.utils.data import DataLoader
 | 
			
		||||
from transformers import (
 | 
			
		||||
    T5TokenizerFast,
 | 
			
		||||
    AutoModelForSeq2SeqLM,
 | 
			
		||||
    StoppingCriteria,
 | 
			
		||||
    StoppingCriteriaList,
 | 
			
		||||
)
 | 
			
		||||
import os
 | 
			
		||||
from tqdm import tqdm
 | 
			
		||||
from datasets import Dataset
 | 
			
		||||
import numpy as np
 | 
			
		||||
from torch.nn.utils.rnn import pad_sequence
 | 
			
		||||
 | 
			
		||||
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Mapper():
 | 
			
		||||
    tokenizer: T5TokenizerFast
 | 
			
		||||
    model: torch.nn.Module
 | 
			
		||||
    dataloader: DataLoader
 | 
			
		||||
 | 
			
		||||
    def __init__(self, checkpoint_path):
 | 
			
		||||
        self._create_tokenizer()
 | 
			
		||||
        self._load_model(checkpoint_path)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def _create_tokenizer(self):
 | 
			
		||||
        # %%
 | 
			
		||||
        # load tokenizer
 | 
			
		||||
        self.tokenizer = T5TokenizerFast.from_pretrained("t5-small", return_tensors="pt", clean_up_tokenization_spaces=True)
 | 
			
		||||
        # Define additional special tokens
 | 
			
		||||
        additional_special_tokens = ["<THING_START>", "<THING_END>", "<PROPERTY_START>", "<PROPERTY_END>", "<NAME>", "<DESC>", "SIG", "UNIT", "DATA_TYPE"]
 | 
			
		||||
        # Add the additional special tokens to the tokenizer
 | 
			
		||||
        self.tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
 | 
			
		||||
 | 
			
		||||
    def _load_model(self, checkpoint_path: str):
 | 
			
		||||
        # load model
 | 
			
		||||
        # Define the directory and the pattern
 | 
			
		||||
        model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint_path)
 | 
			
		||||
        # set model to eval
 | 
			
		||||
        self.model = model.eval()
 | 
			
		||||
        self.model.cuda()
 | 
			
		||||
        # self.model = torch.compile(self.model)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def prepare_dataloader(self, input_df, batch_size, max_length):
 | 
			
		||||
        """
 | 
			
		||||
        *arguments*
 | 
			
		||||
        - input_df: input dataframe containing fields 'tag_description', 'thing', 'property'
 | 
			
		||||
        - batch_size: the batch size of dataloader output
 | 
			
		||||
        - max_length: length of tokenizer output
 | 
			
		||||
        """
 | 
			
		||||
        print("preparing dataloader")
 | 
			
		||||
        # convert each dataframe row into a dictionary
 | 
			
		||||
        # outputs a list of dictionaries
 | 
			
		||||
 | 
			
		||||
        def _process_df(df):
 | 
			
		||||
            output_list = []
 | 
			
		||||
            for _, row in df.iterrows():
 | 
			
		||||
                desc = f"<DESC>{row['tag_description']}<DESC>"
 | 
			
		||||
                unit = f"<UNIT>{row['unit']}<UNIT>"
 | 
			
		||||
                element = {
 | 
			
		||||
                    'input' : f"{desc}{unit}"
 | 
			
		||||
                    # 'output': f"<THING_START>{row['thing']}<THING_END><PROPERTY_START>{row['property']}<PROPERTY_END>",
 | 
			
		||||
                }
 | 
			
		||||
                output_list.append(element)
 | 
			
		||||
 | 
			
		||||
            return output_list
 | 
			
		||||
 | 
			
		||||
        def _preprocess_function(example):
 | 
			
		||||
            input = example['input']
 | 
			
		||||
            # target = example['output']
 | 
			
		||||
            # text_target sets the corresponding label to inputs
 | 
			
		||||
            # there is no need to create a separate 'labels'
 | 
			
		||||
            model_inputs = self.tokenizer(
 | 
			
		||||
                input,
 | 
			
		||||
                # text_target=target, 
 | 
			
		||||
                # max_length=max_length,
 | 
			
		||||
                padding=True,
 | 
			
		||||
                truncation=True,
 | 
			
		||||
                return_tensors="pt",
 | 
			
		||||
            )
 | 
			
		||||
            return model_inputs
 | 
			
		||||
 | 
			
		||||
        test_dataset = Dataset.from_list(_process_df(input_df))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        # map maps function to each "row" in the dataset
 | 
			
		||||
        # aka the data in the immediate nesting
 | 
			
		||||
        datasets = test_dataset.map(
 | 
			
		||||
            _preprocess_function,
 | 
			
		||||
            batched=True,
 | 
			
		||||
            num_proc=1,
 | 
			
		||||
            remove_columns=test_dataset.column_names,
 | 
			
		||||
        )
 | 
			
		||||
        # datasets = _preprocess_function(test_dataset)
 | 
			
		||||
        datasets.set_format(type='torch', columns=['input_ids', 'attention_mask'])
 | 
			
		||||
 | 
			
		||||
        def _custom_collate_fn(batch):
 | 
			
		||||
            # Extract data and targets separately if needed
 | 
			
		||||
            inputs = [item['input_ids'] for item in batch]
 | 
			
		||||
            attention_masks = [item['attention_mask'] for item in batch]
 | 
			
		||||
 | 
			
		||||
            # Pad data to the same length
 | 
			
		||||
            padded_inputs = pad_sequence(inputs, batch_first=True)
 | 
			
		||||
            padded_attention_masks = pad_sequence(attention_masks, batch_first=True)
 | 
			
		||||
 | 
			
		||||
            return {'input_ids': padded_inputs, 'attention_mask': padded_attention_masks}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        # create dataloader
 | 
			
		||||
        self.dataloader = DataLoader(
 | 
			
		||||
            datasets, 
 | 
			
		||||
            batch_size=batch_size,
 | 
			
		||||
            collate_fn=_custom_collate_fn
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def generate(self):
 | 
			
		||||
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 | 
			
		||||
        MAX_GENERATE_LENGTH = 120
 | 
			
		||||
 | 
			
		||||
        pred_generations = []
 | 
			
		||||
        # pred_labels = []
 | 
			
		||||
        # self.model already assigned to device
 | 
			
		||||
        # self.model.cuda()
 | 
			
		||||
 | 
			
		||||
        # introduce early stopping so that it doesn't have to generate max length
 | 
			
		||||
        class StopOnEndToken(StoppingCriteria):
 | 
			
		||||
            def __init__(self, end_token_id):
 | 
			
		||||
                self.end_token_id = end_token_id
 | 
			
		||||
 | 
			
		||||
            def __call__(self, input_ids, scores, **kwargs):
 | 
			
		||||
                # Check if the last token in any sequence is the end token
 | 
			
		||||
                batch_stopped = input_ids[:, -1] == self.end_token_id
 | 
			
		||||
                # only stop if all have reached end token
 | 
			
		||||
                if batch_stopped.all():
 | 
			
		||||
                    return True  # Stop generation for the entire batch
 | 
			
		||||
                return False
 | 
			
		||||
 | 
			
		||||
        # Define the end token ID (e.g., the ID for <eos>)
 | 
			
		||||
        end_token_id = 32103  # property end token
 | 
			
		||||
 | 
			
		||||
        # Use the stopping criteria
 | 
			
		||||
        stopping_criteria = StoppingCriteriaList([StopOnEndToken(end_token_id)])
 | 
			
		||||
 | 
			
		||||
        print("start generation")
 | 
			
		||||
        for batch in tqdm(self.dataloader):
 | 
			
		||||
            # Inference in batches
 | 
			
		||||
            input_ids = batch['input_ids']
 | 
			
		||||
            attention_mask = batch['attention_mask']
 | 
			
		||||
            # save labels too
 | 
			
		||||
            # pred_labels.extend(batch['labels'])
 | 
			
		||||
            
 | 
			
		||||
 | 
			
		||||
            # Move to GPU if available
 | 
			
		||||
            input_ids = input_ids.to(device)
 | 
			
		||||
            attention_mask = attention_mask.to(device)
 | 
			
		||||
 | 
			
		||||
            # Perform inference
 | 
			
		||||
            # disable if running on gpu's without tensor cores
 | 
			
		||||
            with torch.amp.autocast(device_type="cuda", dtype=torch.bfloat16):
 | 
			
		||||
                with torch.no_grad():
 | 
			
		||||
                    outputs = self.model.generate(input_ids,
 | 
			
		||||
                                            attention_mask=attention_mask,
 | 
			
		||||
                                            max_length=MAX_GENERATE_LENGTH,
 | 
			
		||||
                                            # eos_token_id=32103,
 | 
			
		||||
                                            # early_stopping=True,
 | 
			
		||||
                                            use_cache=True,
 | 
			
		||||
                                            stopping_criteria=stopping_criteria)
 | 
			
		||||
                
 | 
			
		||||
                    # Decode the output and print the results
 | 
			
		||||
                    pred_generations.extend(outputs.to("cpu"))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        # %%
 | 
			
		||||
        # extract sequence and decode
 | 
			
		||||
        def extract_seq(tokens, start_value, end_value):
 | 
			
		||||
            if start_value not in tokens or end_value not in tokens:
 | 
			
		||||
                return None  # Or handle this case according to your requirements
 | 
			
		||||
            start_id = np.where(tokens == start_value)[0][0]
 | 
			
		||||
            end_id = np.where(tokens == end_value)[0][0]
 | 
			
		||||
 | 
			
		||||
            return tokens[start_id+1:end_id]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        def process_tensor_output(tokens):
 | 
			
		||||
            thing_seq = extract_seq(tokens, 32100, 32101) # 32100 = <THING_START>, 32101 = <THING_END>
 | 
			
		||||
            property_seq = extract_seq(tokens, 32102, 32103) # 32102 = <PROPERTY_START>, 32103 = <PROPERTY_END>
 | 
			
		||||
            p_thing = None
 | 
			
		||||
            p_property = None
 | 
			
		||||
            if (thing_seq is not None):
 | 
			
		||||
                p_thing =  self.tokenizer.decode(thing_seq, skip_special_tokens=False)
 | 
			
		||||
            if (property_seq is not None):
 | 
			
		||||
                p_property =  self.tokenizer.decode(property_seq, skip_special_tokens=False)
 | 
			
		||||
            return p_thing, p_property
 | 
			
		||||
 | 
			
		||||
        # decode prediction labels
 | 
			
		||||
        def decode_preds(tokens_list):
 | 
			
		||||
            thing_prediction_list = []
 | 
			
		||||
            property_prediction_list = []
 | 
			
		||||
            for tokens in tokens_list:
 | 
			
		||||
                p_thing, p_property = process_tensor_output(tokens)
 | 
			
		||||
                thing_prediction_list.append(p_thing)
 | 
			
		||||
                property_prediction_list.append(p_property)
 | 
			
		||||
            return thing_prediction_list, property_prediction_list 
 | 
			
		||||
 | 
			
		||||
        thing_prediction_list, property_prediction_list = decode_preds(pred_generations)
 | 
			
		||||
        return thing_prediction_list, property_prediction_list
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,77 @@
 | 
			
		|||
# %%
 | 
			
		||||
import re
 | 
			
		||||
from replacement_dict import desc_replacement_dict, unit_replacement_dict
 | 
			
		||||
 | 
			
		||||
class Abbreviator:
 | 
			
		||||
 | 
			
		||||
    def __init__(self, df):
 | 
			
		||||
        self.df = df
 | 
			
		||||
 | 
			
		||||
    def _count_abbreviation_occurrences(self, tag_descriptions, abbreviation):
 | 
			
		||||
        """Count the number of occurrences of the abbreviation in the list of machine descriptions."""
 | 
			
		||||
        pattern = re.compile(abbreviation)
 | 
			
		||||
        count = sum(len(pattern.findall(description)) for description in tag_descriptions)
 | 
			
		||||
        return count
 | 
			
		||||
 | 
			
		||||
    def _replace_abbreviations(self, tag_descriptions, abbreviations):
 | 
			
		||||
        """Replace the abbreviations according to the key-pair value provided."""
 | 
			
		||||
        replaced_descriptions = []
 | 
			
		||||
        for description in tag_descriptions:
 | 
			
		||||
            for abbreviation, replacement in abbreviations.items():
 | 
			
		||||
                description = re.sub(abbreviation, replacement, description)
 | 
			
		||||
 | 
			
		||||
            replaced_descriptions.append(description)
 | 
			
		||||
        return replaced_descriptions
 | 
			
		||||
 | 
			
		||||
    def _cleanup_spaces(self, tag_descriptions):
 | 
			
		||||
        # Replace all whitespace with a single space
 | 
			
		||||
        replaced_descriptions = []
 | 
			
		||||
        for description in tag_descriptions:
 | 
			
		||||
            description_clean = re.sub(r'\s+', ' ', description)
 | 
			
		||||
            replaced_descriptions.append(description_clean)
 | 
			
		||||
        return replaced_descriptions
 | 
			
		||||
 | 
			
		||||
    # remove all dots
 | 
			
		||||
    def _cleanup_dots(self, tag_descriptions):
 | 
			
		||||
        replaced_descriptions = []
 | 
			
		||||
        for description in tag_descriptions:
 | 
			
		||||
            description_clean = re.sub(r'\.', '', description)
 | 
			
		||||
            replaced_descriptions.append(description_clean)
 | 
			
		||||
        return replaced_descriptions
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def run(self):
 | 
			
		||||
        df = self.df
 | 
			
		||||
 | 
			
		||||
        # %%
 | 
			
		||||
        # Replace abbreviations
 | 
			
		||||
        print("running substitution for descriptions")
 | 
			
		||||
        # normalize to uppercase
 | 
			
		||||
        # strip leading and trailing whitespace
 | 
			
		||||
        df['tag_description'] = df['tag_description'].str.strip()
 | 
			
		||||
        df['tag_description'] = df['tag_description'].str.upper()
 | 
			
		||||
        # Replace whitespace-only entries with "NOVALUE"
 | 
			
		||||
        # note that "N/A" can be read as nan
 | 
			
		||||
        # replace whitespace only values as NOVALUE
 | 
			
		||||
        df['tag_description']= df['tag_description'].fillna("NOVALUE")
 | 
			
		||||
        df['tag_description'] = df['tag_description'].replace(r'^\s*$', 'NOVALUE', regex=True)
 | 
			
		||||
 | 
			
		||||
        # perform actual substitution
 | 
			
		||||
        tag_descriptions = df['tag_description']
 | 
			
		||||
        replaced_descriptions = self._replace_abbreviations(tag_descriptions, desc_replacement_dict)
 | 
			
		||||
        replaced_descriptions = self._cleanup_spaces(replaced_descriptions)
 | 
			
		||||
        replaced_descriptions = self._cleanup_dots(replaced_descriptions)
 | 
			
		||||
        df["tag_description"] = replaced_descriptions
 | 
			
		||||
        # print("Descriptions after replacement:", replaced_descriptions)
 | 
			
		||||
 | 
			
		||||
        # %%
 | 
			
		||||
        print("running substitutions for units")
 | 
			
		||||
        df['unit'] = df['unit'].fillna("NOVALUE")
 | 
			
		||||
        df['unit'] = df['unit'].replace(r'^\s*$', 'NOVALUE', regex=True)
 | 
			
		||||
        unit_list = df['unit']
 | 
			
		||||
        new_unit = self._replace_abbreviations(unit_list, unit_replacement_dict)
 | 
			
		||||
        new_unit = self._cleanup_spaces(new_unit)
 | 
			
		||||
        df['unit'] = new_unit
 | 
			
		||||
 | 
			
		||||
        return df
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,291 @@
 | 
			
		|||
# substitution mapping for descriptions
 | 
			
		||||
# Abbreviations and their replacements
 | 
			
		||||
desc_replacement_dict = {
 | 
			
		||||
    r'\bLIST\b': 'LIST',
 | 
			
		||||
    # exhaust gas
 | 
			
		||||
    r'\bE\. GAS\b': 'EXHAUST GAS',
 | 
			
		||||
    r'\bEXH\.\b': 'EXHAUST',
 | 
			
		||||
    r'\bEXH\b': 'EXHAUST',
 | 
			
		||||
    r'\bEXHAUST\.\b': 'EXHAUST',
 | 
			
		||||
    r'\bEXHAUST\b': 'EXHAUST',
 | 
			
		||||
    r'\bBLR\.EXH\.\b': 'BOILER EXHAUST',
 | 
			
		||||
    # temperature
 | 
			
		||||
    r'\bTEMP\.\b': 'TEMPERATURE',
 | 
			
		||||
    r'\bTEMP\b': 'TEMPERATURE',
 | 
			
		||||
    r'\bTEMPERATURE\.\b': 'TEMPERATURE',
 | 
			
		||||
    r'\bTEMPERATURE\b': 'TEMPERATURE',
 | 
			
		||||
    # cylinder
 | 
			
		||||
    r'\bCYL(\d+)\b': r'CYLINDER\1',
 | 
			
		||||
    r'\bCYL\.(\d+)\b': r'CYLINDER\1',
 | 
			
		||||
    r'\bCYL(?=\d|\W|$)\b': 'CYLINDER',
 | 
			
		||||
    r'\bCYL\.\b': 'CYLINDER',
 | 
			
		||||
    r'\bCYL\b': 'CYLINDER',
 | 
			
		||||
    # cooling
 | 
			
		||||
    r'\bCOOL\.\b': 'COOLING',
 | 
			
		||||
    r'\bCOOLING\b': 'COOLING',
 | 
			
		||||
    r'\bCOOLER\b': 'COOLER',
 | 
			
		||||
    r'\bCW\b': 'COOLING WATER',
 | 
			
		||||
    r'\bC\.W\b': 'COOLING WATER',
 | 
			
		||||
    r'\bJ\.C\.F\.W\b': 'JACKET COOLING FEED WATER',
 | 
			
		||||
    r'\bJ\.C F\.W\b': 'JACKET COOLING FEED WATER',
 | 
			
		||||
    r'\bJACKET C\.F\.W\b': 'JACKET COOLING FEED WATER',
 | 
			
		||||
    r'\bCOOL\. F\.W\b': 'COOLING FEED WATER',
 | 
			
		||||
    r'\bC\.F\.W\b': 'COOLING FEED WATER',
 | 
			
		||||
    # sea water
 | 
			
		||||
    r'\bC\.S\.W\b': 'COOLING SEA WATER',
 | 
			
		||||
    r'\bCSW\b': 'COOLING SEA WATER',
 | 
			
		||||
    r'\bC.S.W\b': 'COOLING SEA WATER',
 | 
			
		||||
    # water
 | 
			
		||||
    r'\bFEED W\.\b': 'FEED WATER',
 | 
			
		||||
    r'\bFEED W\b': 'FEED WATER',
 | 
			
		||||
    r'\bF\.W\b': 'FEED WATER',
 | 
			
		||||
    r'\bF\.W\.\b': 'FEED WATER',
 | 
			
		||||
    r'\bFW\b': 'FEED WATER',
 | 
			
		||||
    # r'\bWATER\b': 'WATER',
 | 
			
		||||
    r'\bSCAV\.\b': 'SCAVENGE',
 | 
			
		||||
    r'\bSCAV\b': 'SCAVENGE',
 | 
			
		||||
    r'\bINL\.\b': 'INLET',
 | 
			
		||||
    r'\bINLET\b': 'INLET',
 | 
			
		||||
    r'\bOUT\.\b': 'OUTLET',
 | 
			
		||||
    r'\bOUTL\.\b': 'OUTLET',
 | 
			
		||||
    r'\bOUTLET\b': 'OUTLET',
 | 
			
		||||
    # tank
 | 
			
		||||
    r'\bSTOR\.TK\b': 'STORAGE TANK',
 | 
			
		||||
    r'\bSTOR\. TK\b': 'STORAGE TANK',
 | 
			
		||||
    r'\bSERV\. TK\b': 'SERVICE TANK',
 | 
			
		||||
    r'\bSETT\. TK\b': 'SETTLING TANK',
 | 
			
		||||
    r'\bBK\b': 'BUNKER',
 | 
			
		||||
    r'\bTK\b': 'TANK',
 | 
			
		||||
    # PRESSURE
 | 
			
		||||
    r'\bPRESS\b': 'PRESSURE',
 | 
			
		||||
    r'\bPRESS\.\b': 'PRESSURE',
 | 
			
		||||
    r'\bPRESSURE\b': 'PRESSURE',
 | 
			
		||||
    r'PRS\b': 'PRESSURE',  # this is a special replacement - it is safe to replace PRS w/o checks
 | 
			
		||||
    # ENGINE
 | 
			
		||||
    r'\bENG\.\b': 'ENGINE',
 | 
			
		||||
    r'\bENG\b': 'ENGINE',
 | 
			
		||||
    r'\bENGINE\b': 'ENGINE',
 | 
			
		||||
    r'\bENGINE SPEED\b': 'ENGINE SPEED',
 | 
			
		||||
    r'\bENGINE RUNNING\b': 'ENGINE RUNNING',
 | 
			
		||||
    r'\bENGINE RPM PICKUP\b': 'ENGINE RPM PICKUP',
 | 
			
		||||
    r'\bENGINE ROOM\b': 'ENGINE ROOM',
 | 
			
		||||
    r'\bE/R\b': 'ENGINE ROOM',
 | 
			
		||||
    # MAIN ENGINE
 | 
			
		||||
    r'\bM/E NO.(\d+)\b': r'NO\1 MAIN_ENGINE',
 | 
			
		||||
    r'\bM/E NO(\d+)\b': r'NO\1 MAIN_ENGINE',
 | 
			
		||||
    r'\bM/E  NO.(\d+)\b': r'NO\1 MAIN_ENGINE',
 | 
			
		||||
    r'\bME NO.(\d+)\b': r'NO\1 MAIN_ENGINE',
 | 
			
		||||
    r'\bM/E\b': 'MAIN_ENGINE',
 | 
			
		||||
    r'\bM/E(.)\b': r'MAIN_ENGINE \1', # M/E(S/P)
 | 
			
		||||
    r'\bME(.)\b': r'MAIN_ENGINE \1', # ME(S/P)
 | 
			
		||||
    r'\bM_E\b': 'MAIN_ENGINE',
 | 
			
		||||
    r'\bME(?=\d|\W|$)\b': 'MAIN_ENGINE',
 | 
			
		||||
    r'\bMAIN ENGINE\b': 'MAIN_ENGINE',
 | 
			
		||||
    # ENGINE variants
 | 
			
		||||
    r'\bM_E_RPM\b': 'MAIN ENGINE RPM',
 | 
			
		||||
    r'\bM/E_M\.G\.O\.\b': 'MAIN ENGINE MARINE GAS OIL',
 | 
			
		||||
    r'\bM/E_H\.F\.O\.\b': 'MAIN ENGINE HEAVY FUEL OIL',
 | 
			
		||||
    # GENERATOR ENGINE
 | 
			
		||||
    r'\bGEN(\d+)\b': r'NO\1 GENERATOR_ENGINE',
 | 
			
		||||
    r'\bGE(\d+)\b': r'NO\1 GENERATOR_ENGINE',
 | 
			
		||||
    # ensure that we substitute only for terms where following GE is num or special
 | 
			
		||||
    r'\bGE(?=\d|\W|$)\b': 'GENERATOR_ENGINE',
 | 
			
		||||
    r'\bG/E(\d+)\b': r'NO\1 GENERATOR_ENGINE',
 | 
			
		||||
    r'\bG/E\b': r'GENERATOR_ENGINE',
 | 
			
		||||
    r'\bG_E(\d+)\b': r'NO\1 GENERATOR_ENGINE',
 | 
			
		||||
    r'\bG_E\b': 'GENERATOR_ENGINE',
 | 
			
		||||
    r'\bGENERATOR ENGINE\b': 'GENERATOR_ENGINE',
 | 
			
		||||
    r'\bG/E_M\.G\.O\b': 'GENERATOR_ENGINE MARINE GAS OIL',
 | 
			
		||||
    # DG
 | 
			
		||||
    r'\bDG(\d+)\b': r'NO\1 GENERATOR_ENGINE',
 | 
			
		||||
    r'\bDG\b': 'GENERATOR_ENGINE',
 | 
			
		||||
    r'\bD/G\b': 'GENERATOR_ENGINE',
 | 
			
		||||
    r'\bDG(\d+)\((.)\)\b': r'NO\1\2 GENERATOR_ENGINE', # handle DG2(A)
 | 
			
		||||
    r'\bDG(\d+[A-Za-z])\b': r'NO\1 GENERATOR_ENGINE', # handle DG2A
 | 
			
		||||
    # DG variants
 | 
			
		||||
    r'\bDG_CURRENT\b': 'GENERATOR_ENGINE CURRENT',
 | 
			
		||||
    r'\bDG_LOAD\b': 'GENERATOR_ENGINE LOAD',
 | 
			
		||||
    r'\bDG_FREQUENCY\b': 'GENERATOR_ENGINE FREQUENCY',
 | 
			
		||||
    r'\bDG_VOLTAGE\b': 'GENERATOR_ENGINE VOLTAGE',
 | 
			
		||||
    r'\bDG_CLOSED\b': 'GENERATOR_ENGINE CLOSED',
 | 
			
		||||
    r'\bD/G_CURRENT\b': 'GENERATOR_ENGINE CURRENT',
 | 
			
		||||
    r'\bD/G_LOAD\b': 'GENERATOR_ENGINE LOAD',
 | 
			
		||||
    r'\bD/G_FREQUENCY\b': 'GENERATOR_ENGINE FREQUENCY',
 | 
			
		||||
    r'\bD/G_VOLTAGE\b': 'GENERATOR_ENGINE VOLTAGE',
 | 
			
		||||
    r'\bD/G_CLOSED\b': 'GENERATOR_ENGINE CLOSED',
 | 
			
		||||
    # MGE
 | 
			
		||||
    r'\b(\d+)MGE\b': r'NO\1 MAIN_GENERATOR_ENGINE',
 | 
			
		||||
    # generator engine and mgo
 | 
			
		||||
    r'\bG/E_M\.G\.O\.\b': r'GENERATOR_ENGINE MARINE GAS OIL',
 | 
			
		||||
    r'\bG/E_H\.F\.O\.\b': r'GENERATOR_ENGINE HEAVY FUEL OIL',
 | 
			
		||||
    # ultra low sulfur fuel oil
 | 
			
		||||
    r'\bU\.L\.S\.F\.O\b': 'ULTRA LOW SULFUR FUEL OIL',
 | 
			
		||||
    r'\bULSFO\b': 'ULTRA LOW SULFUR FUEL OIL',
 | 
			
		||||
    # marine gas oil
 | 
			
		||||
    r'\bM\.G\.O\b': 'MARINE GAS OIL',
 | 
			
		||||
    r'\bMGO\b': 'MARINE GAS OIL',
 | 
			
		||||
    r'\bMDO\b': 'MARINE DIESEL OIL',
 | 
			
		||||
    # light fuel oil
 | 
			
		||||
    r'\bL\.F\.O\b': 'LIGHT FUEL OIL',
 | 
			
		||||
    r'\bLFO\b': 'LIGHT FUEL OIL',
 | 
			
		||||
    # heavy fuel oil
 | 
			
		||||
    r'\bHFO\b': 'HEAVY FUEL OIL',
 | 
			
		||||
    r'\bH\.F\.O\b': 'HEAVY FUEL OIL',
 | 
			
		||||
    # piston cooling oil
 | 
			
		||||
    r'\bPCO\b': 'PISTON COOLING OIL',
 | 
			
		||||
    r'\bP\.C\.O\.\b': 'PISTON COOLING OIL',
 | 
			
		||||
    r'\bP\.C\.O\b': 'PISTON COOLING OIL',
 | 
			
		||||
    r'PISTION C.O': 'PISTON COOLING OIL',
 | 
			
		||||
    # diesel oil
 | 
			
		||||
    r'\bD.O\b': 'DIESEL OIL',
 | 
			
		||||
    # for remaining fuel oil that couldn't be substituted
 | 
			
		||||
    r'\bF\.O\b': 'FUEL OIL',
 | 
			
		||||
    r'\bFO\b': 'FUEL OIL',
 | 
			
		||||
    # lubricant
 | 
			
		||||
    r'\bLUB\.\b': 'LUBRICANT',
 | 
			
		||||
    r'\bLUBE\b': 'LUBRICANT',
 | 
			
		||||
    r'\bLUBR\.\b': 'LUBRICANT',
 | 
			
		||||
    r'\bLUBRICATING\.\b': 'LUBRICANT',
 | 
			
		||||
    r'\bLUBRICATION\.\b': 'LUBRICANT',
 | 
			
		||||
    # lubricating oil
 | 
			
		||||
    r'\bL\.O\b': 'LUBRICATING OIL',
 | 
			
		||||
    r'\bLO\b': 'LUBRICATING OIL',
 | 
			
		||||
    # lubricating oil pressure
 | 
			
		||||
    r'\bLO_PRESS\b': 'LUBRICATING OIL PRESSURE',
 | 
			
		||||
    r'\bLO_PRESSURE\b': 'LUBRICATING OIL PRESSURE',
 | 
			
		||||
    # temperature
 | 
			
		||||
    r'\bL\.T\b': 'LOW TEMPERATURE',
 | 
			
		||||
    r'\bLT\b': 'LOW TEMPERATURE',
 | 
			
		||||
    r'\bH\.T\b': 'HIGH TEMPERATURE',
 | 
			
		||||
    r'\bHT\b': 'HIGH TEMPERATURE',
 | 
			
		||||
    # BOILER
 | 
			
		||||
    # auxiliary boiler
 | 
			
		||||
    # replace these first before replacing AUXILIARY only
 | 
			
		||||
    r'\bAUX\.BOILER\b': 'AUXILIARY BOILER',
 | 
			
		||||
    r'\bAUX\. BOILER\b': 'AUXILIARY BOILER',
 | 
			
		||||
    r'\bAUX BLR\b': 'AUXILIARY BOILER',
 | 
			
		||||
    r'\bAUX\.\b': 'AUXILIARY',
 | 
			
		||||
    r'\bAUX\b': 'AUXILIARY',
 | 
			
		||||
    # composite boiler
 | 
			
		||||
    r'\bCOMP\. BOILER\b': 'COMPOSITE BOILER',
 | 
			
		||||
    r'\bCOMP\.BOILER\b': 'COMPOSITE BOILER',
 | 
			
		||||
    r'\bCOMP BOILER\b': 'COMPOSITE BOILER',
 | 
			
		||||
    r'\bCOMP\b': 'COMPOSITE',
 | 
			
		||||
    r'\bCMPS\b': 'COMPOSITE',
 | 
			
		||||
    # any other boiler
 | 
			
		||||
    r'\bBLR\.\b': 'BOILER',
 | 
			
		||||
    r'\bBLR\b': 'BOILER',
 | 
			
		||||
    r'\bBOILER W.CIRC.P/P\b': 'BOILER WATER CIRC P/P',
 | 
			
		||||
    # windind
 | 
			
		||||
    r'\bWIND\.\b': 'WINDING',
 | 
			
		||||
    r'\bWINDING\b': 'WINDING',
 | 
			
		||||
    # VOLTAGE/FREQ/CURRENT
 | 
			
		||||
    r'\bVLOT\.': 'VOLTAGE', # correct spelling
 | 
			
		||||
    r'\bVOLT\.': 'VOLTAGE',
 | 
			
		||||
    r'\bVOLTAGE\b': 'VOLTAGE',
 | 
			
		||||
    r'\bFREQ\.': 'FREQUENCY',
 | 
			
		||||
    r'\bFREQUENCY\b': 'FREQUENCY',
 | 
			
		||||
    r'\bCURR\.': 'CURRENT',
 | 
			
		||||
    r'\bCURRENT\b': 'CURRENT',
 | 
			
		||||
    # TURBOCHARGER
 | 
			
		||||
    r'\bTCA\b': 'TURBOCHARGER',
 | 
			
		||||
    r'\bTCB\b': 'TURBOCHARGER',
 | 
			
		||||
    r'\bT/C\b': 'TURBOCHARGER',
 | 
			
		||||
    r'\bT_C\b': 'TURBOCHARGER',
 | 
			
		||||
    r'\bT/C_RPM\b': 'TURBOCHARGER RPM',
 | 
			
		||||
    r'\bTC(\d+)\b': r'TURBOCHARGER\1',
 | 
			
		||||
    r'\bT/C(\d+)\b': r'TURBOCHARGER\1',
 | 
			
		||||
    r'\bTC(?=\d|\W|$)\b': 'TURBOCHARGER',
 | 
			
		||||
    r'\bTURBOCHAGER\b': 'TURBOCHARGER',
 | 
			
		||||
    r'\bTURBOCHARGER\b': 'TURBOCHARGER',
 | 
			
		||||
    r'\bTURBOCHG\b': 'TURBOCHARGER',
 | 
			
		||||
    # misc spelling errors
 | 
			
		||||
    r'\bOPERATOIN\b': 'OPERATION',
 | 
			
		||||
    # wrongly attached terms
 | 
			
		||||
    r'\bBOILERMGO\b': 'BOILER MGO',
 | 
			
		||||
    # additional standardizing replacement
 | 
			
		||||
    # replace # followed by a number with NO
 | 
			
		||||
    r'#(?=\d)\b': 'NO',
 | 
			
		||||
    r'\bNO\.(?=\d)\b': 'NO',
 | 
			
		||||
    r'\bNO\.\.(?=\d)\b': 'NO',
 | 
			
		||||
    # others:
 | 
			
		||||
    # generator
 | 
			
		||||
    r'\bGEN\.\b': 'GENERATOR',
 | 
			
		||||
    # others
 | 
			
		||||
    r'\bGEN\.WIND\.TEMP\b': 'GENERATOR WINDING TEMPERATURE',
 | 
			
		||||
    r'\bFLTR\b': 'FILTER',
 | 
			
		||||
    r'\bCLR\b': 'CLEAR',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# substitution mapping for units
 | 
			
		||||
# Abbreviations and their replacements
 | 
			
		||||
unit_replacement_dict = {
 | 
			
		||||
    r'\b%\b': 'PERCENT',
 | 
			
		||||
    r'\b-\b': '',
 | 
			
		||||
    r'\b-  \b': '',
 | 
			
		||||
    # ensure no character after A
 | 
			
		||||
    r'\bA(?!\w|/)': 'CURRENT',
 | 
			
		||||
    r'\bAmp(?!\w|/)': 'CURRENT',
 | 
			
		||||
    r'\bHz\b': 'HERTZ',
 | 
			
		||||
    r'\bKG/CM2\b': 'PRESSURE',
 | 
			
		||||
    r'\bKG/H\b': 'KILOGRAM PER HOUR',
 | 
			
		||||
    r'\bKNm\b': 'RPM',
 | 
			
		||||
    r'\bKW\b': 'POWER',
 | 
			
		||||
    r'\bKg(?!\w|/)': 'MASS',
 | 
			
		||||
    r'\bKw\b': 'POWER',
 | 
			
		||||
    r'\bL(?!\w|/)': 'VOLUME',
 | 
			
		||||
    r'\bMT/h\b': 'METRIC TONNES PER HOUR',
 | 
			
		||||
    r'\bMpa\b': 'PRESSURE',
 | 
			
		||||
    r'\bPF\b': 'POWER FACTOR',
 | 
			
		||||
    r'\bRPM\b': 'RPM',
 | 
			
		||||
    r'\bV(?!\w|/)': 'VOLTAGE',
 | 
			
		||||
    r'\bbar(?!\w|/)': 'PRESSURE',
 | 
			
		||||
    r'\bbarA\b': 'SCAVENGE PRESSURE',
 | 
			
		||||
    r'\bcST\b': 'VISCOSITY',
 | 
			
		||||
    r'\bcSt\b': 'VISCOSITY',
 | 
			
		||||
    r'\bcst\b': 'VISCOSITY',
 | 
			
		||||
    r'\bdeg(?!\w|/|\.)': 'DEGREE',
 | 
			
		||||
    r'\bdeg.C\b': 'TEMPERATURE',
 | 
			
		||||
    r'\bdegC\b': 'TEMPERATURE',
 | 
			
		||||
    r'\bdegree\b': 'DEGREE',
 | 
			
		||||
    r'\bdegreeC\b': 'TEMPERATURE',
 | 
			
		||||
    r'\bhPa\b': 'PRESSURE',
 | 
			
		||||
    r'\bhours\b': 'HOURS',
 | 
			
		||||
    r'\bkN\b': 'THRUST',
 | 
			
		||||
    r'\bkNm\b': 'TORQUE',
 | 
			
		||||
    r'\bkW\b': 'POWER',
 | 
			
		||||
    # ensure that kg is not followed by anything
 | 
			
		||||
    r'\bkg(?!\w|/)': 'FLOW', # somehow in the data its flow
 | 
			
		||||
    r'\bkg/P\b': 'MASS FLOW',
 | 
			
		||||
    r'\bkg/cm2\b': 'PRESSURE',
 | 
			
		||||
    r'\bkg/cm²\b': 'PRESSURE',
 | 
			
		||||
    r'\bkg/h\b': 'MASS FLOW',
 | 
			
		||||
    r'\bkg/hr\b': 'MASS FLOW',
 | 
			
		||||
    r'\bkg/pulse\b': '',
 | 
			
		||||
    r'\bkgf/cm2\b': 'PRESSURE',
 | 
			
		||||
    r'\bkgf/cm²\b': 'PRESSURE',
 | 
			
		||||
    r'\bkgf/㎠\b': 'PRESSURE',
 | 
			
		||||
    r'\bknots\b': 'SPEED',
 | 
			
		||||
    r'\bkw\b': 'POWER',
 | 
			
		||||
    r'\bl/Hr\b': 'VOLUME FLOW',
 | 
			
		||||
    r'\bl/h\b': 'VOLUME FLOW',
 | 
			
		||||
    r'\bl_Hr\b': 'VOLUME FLOW',
 | 
			
		||||
    r'\bl_hr\b': 'VOLUME FLOW',
 | 
			
		||||
    r'\bM\b': 'DRAFT', # for wind draft
 | 
			
		||||
    r'm': 'm', # wind draft and trim - not useful
 | 
			
		||||
    r'\bm/s\b': 'SPEED',
 | 
			
		||||
    r'\bm3\b': 'VOLUME',
 | 
			
		||||
    r'\bmH2O\b': 'DRAFT',
 | 
			
		||||
    r'\bmWC\b': 'DRAFT',
 | 
			
		||||
    r'\bmbar\b': 'PRESSURE',
 | 
			
		||||
    r'\bmg\b': 'ACCELERATION',
 | 
			
		||||
    r'\bmin-¹\b': '', # data too varied
 | 
			
		||||
    r'\bmm\b': '', # data too varied
 | 
			
		||||
    r'\bmmH2O\b': 'WATER DRUM LEVEL',
 | 
			
		||||
    r'\brev\b': 'RPM',
 | 
			
		||||
    r'\brpm\b': 'RPM',
 | 
			
		||||
    r'\bx1000min-¹\b': '',
 | 
			
		||||
    r'\b°C\b': 'TEMPERATURE',
 | 
			
		||||
    r'\bºC\b': 'TEMPERATURE',
 | 
			
		||||
    r'\b℃\b': 'TEMPERATURE'
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,65 @@
 | 
			
		|||
# %%
 | 
			
		||||
import pandas as pd
 | 
			
		||||
import os
 | 
			
		||||
import glob
 | 
			
		||||
from mapper import Mapper
 | 
			
		||||
from preprocess import Abbreviator
 | 
			
		||||
from deduplication import run_deduplication
 | 
			
		||||
 | 
			
		||||
# global config
 | 
			
		||||
BATCH_SIZE = 512
 | 
			
		||||
SHIPS_LIST = [1000,1001,1003,1004]
 | 
			
		||||
 | 
			
		||||
# %%
 | 
			
		||||
# START: we import the raw data csv and extract only a few ships from it to simulate incoming json
 | 
			
		||||
data_path = 'raw_data.csv'
 | 
			
		||||
full_df = pd.read_csv(data_path, skipinitialspace=True)
 | 
			
		||||
# subset ships only to that found in SHIPS_LIST
 | 
			
		||||
df = full_df[full_df['ships_idx'].isin(SHIPS_LIST)].reset_index(drop=True)
 | 
			
		||||
 | 
			
		||||
# test parameters
 | 
			
		||||
num_rows = len(df) - 1
 | 
			
		||||
df = df[:num_rows]
 | 
			
		||||
print(len(df))
 | 
			
		||||
 | 
			
		||||
# pre-process data
 | 
			
		||||
abbreviator = Abbreviator(df)
 | 
			
		||||
df = abbreviator.run()
 | 
			
		||||
 | 
			
		||||
# %%
 | 
			
		||||
##########################################
 | 
			
		||||
# run mapping
 | 
			
		||||
# checkpoint
 | 
			
		||||
# Use glob to find matching paths
 | 
			
		||||
checkpoint_path = 'models/mapping_model'
 | 
			
		||||
mapper = Mapper(checkpoint_path)
 | 
			
		||||
mapper.prepare_dataloader(df, batch_size=BATCH_SIZE, max_length=128)
 | 
			
		||||
thing_prediction_list, property_prediction_list = mapper.generate()
 | 
			
		||||
 | 
			
		||||
# add labels too
 | 
			
		||||
# thing_actual_list, property_actual_list = decode_preds(pred_labels)
 | 
			
		||||
# Convert the list to a Pandas DataFrame
 | 
			
		||||
df_out = pd.DataFrame({
 | 
			
		||||
    'p_thing': thing_prediction_list, 
 | 
			
		||||
    'p_property': property_prediction_list
 | 
			
		||||
})
 | 
			
		||||
# df_out['p_thing_correct'] = df_out['p_thing'] == df_out['thing']
 | 
			
		||||
# df_out['p_property_correct'] = df_out['p_property'] == df_out['property']
 | 
			
		||||
df = pd.concat([df, df_out], axis=1)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# %%
 | 
			
		||||
####################################
 | 
			
		||||
# run de_duplication with thresholding
 | 
			
		||||
data_path = "train_all.csv"
 | 
			
		||||
train_df = pd.read_csv(data_path, skipinitialspace=True)
 | 
			
		||||
train_df['mapping'] = train_df['thing'] + " " + train_df['property']
 | 
			
		||||
 | 
			
		||||
df = run_deduplication(
 | 
			
		||||
    test_df=df,
 | 
			
		||||
    train_df=train_df,
 | 
			
		||||
    batch_size=BATCH_SIZE,
 | 
			
		||||
    threshold=0.85,
 | 
			
		||||
    diagnostic=True)
 | 
			
		||||
 | 
			
		||||
# %%
 | 
			
		||||
| 
						 | 
				
			
			@ -3,9 +3,7 @@ from transformers import (
 | 
			
		|||
    AutoTokenizer,
 | 
			
		||||
    AutoModelForSequenceClassification,
 | 
			
		||||
    AutoModelForSeq2SeqLM,
 | 
			
		||||
    DataCollatorWithPadding,
 | 
			
		||||
)
 | 
			
		||||
import torch.nn.functional as F
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -24,7 +22,7 @@ class BertEmbedder:
 | 
			
		|||
        self.model = model.eval()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def make_embedding(self, batch_size=64):
 | 
			
		||||
    def make_embedding(self, batch_size=128):
 | 
			
		||||
        all_embeddings = self.embeddings
 | 
			
		||||
        input_texts = self.inputs
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -0,0 +1,2 @@
 | 
			
		|||
checkpoint*
 | 
			
		||||
tensorboard-log
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1 @@
 | 
			
		|||
__pycache__
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,125 @@
 | 
			
		|||
from dataclasses import dataclass
 | 
			
		||||
from typing import List, Optional, Tuple, Union
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
import torch.utils.checkpoint
 | 
			
		||||
from torch import nn
 | 
			
		||||
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
 | 
			
		||||
 | 
			
		||||
from transformers import (
 | 
			
		||||
    T5PreTrainedModel,
 | 
			
		||||
    T5Model
 | 
			
		||||
    
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from transformers.modeling_outputs import (
 | 
			
		||||
    SequenceClassifierOutput,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
def mean_pooling(encoder_outputs, attention_mask):
 | 
			
		||||
    """
 | 
			
		||||
    Perform mean pooling over encoder outputs, considering the attention mask.
 | 
			
		||||
    """
 | 
			
		||||
    hidden_states = encoder_outputs.last_hidden_state  # Shape: (batch_size, seq_length, hidden_size)
 | 
			
		||||
    mask = attention_mask.unsqueeze(-1)  # Shape: (batch_size, seq_length, 1)
 | 
			
		||||
    masked_hidden_states = hidden_states * mask  # Zero out padding tokens
 | 
			
		||||
    sum_hidden_states = masked_hidden_states.sum(dim=1)  # Sum over sequence length
 | 
			
		||||
    sum_mask = mask.sum(dim=1)  # Sum the mask (number of non-padding tokens)
 | 
			
		||||
    return sum_hidden_states / sum_mask  # Mean pooling
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class T5EncoderForSequenceClassification(T5PreTrainedModel):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, checkpoint, tokenizer, config, num_labels):
 | 
			
		||||
        super().__init__(config)
 | 
			
		||||
        self.num_labels = num_labels
 | 
			
		||||
        self.config = config
 | 
			
		||||
 | 
			
		||||
        # we force the loading of a pre-trained model here
 | 
			
		||||
        self.t5 = T5Model.from_pretrained(checkpoint)
 | 
			
		||||
        self.t5.resize_token_embeddings(len(tokenizer))
 | 
			
		||||
        classifier_dropout = (
 | 
			
		||||
            config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
 | 
			
		||||
        )
 | 
			
		||||
        self.dropout = nn.Dropout(classifier_dropout)
 | 
			
		||||
        self.classifier = nn.Linear(config.hidden_size, self.num_labels)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def forward(
 | 
			
		||||
        self,
 | 
			
		||||
        input_ids: Optional[torch.Tensor] = None,
 | 
			
		||||
        attention_mask: Optional[torch.Tensor] = None,
 | 
			
		||||
        token_type_ids: Optional[torch.Tensor] = None,
 | 
			
		||||
        position_ids: Optional[torch.Tensor] = None,
 | 
			
		||||
        head_mask: Optional[torch.Tensor] = None,
 | 
			
		||||
        inputs_embeds: Optional[torch.Tensor] = None,
 | 
			
		||||
        labels: Optional[torch.Tensor] = None,
 | 
			
		||||
        output_attentions: Optional[bool] = None,
 | 
			
		||||
        output_hidden_states: Optional[bool] = None,
 | 
			
		||||
        return_dict: Optional[bool] = None,
 | 
			
		||||
    ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
 | 
			
		||||
        r"""
 | 
			
		||||
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
 | 
			
		||||
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
 | 
			
		||||
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
 | 
			
		||||
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
 | 
			
		||||
        """
 | 
			
		||||
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        # encoder_outputs = self.t5.encoder(
 | 
			
		||||
        #     input_ids,
 | 
			
		||||
        #     attention_mask=attention_mask,
 | 
			
		||||
        #     head_mask=head_mask,
 | 
			
		||||
        #     inputs_embeds=inputs_embeds,
 | 
			
		||||
        #     output_attentions=output_attentions,
 | 
			
		||||
        #     output_hidden_states=output_hidden_states,
 | 
			
		||||
        #     return_dict=return_dict,
 | 
			
		||||
        # )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        encoder_outputs = self.t5.encoder(input_ids, attention_mask=attention_mask)
 | 
			
		||||
        # last_hidden_state = encoder_outputs.last_hidden_state
 | 
			
		||||
        # use mean of hidden state
 | 
			
		||||
        # pooled_output = mean_pooling(encoder_outputs, attention_mask)
 | 
			
		||||
 | 
			
		||||
        # Use the hidden state of the first token as the sequence representation
 | 
			
		||||
        pooled_output = encoder_outputs.last_hidden_state[:, 0, :]  # Shape: (batch_size, hidden_size)
 | 
			
		||||
 | 
			
		||||
        # pooled_output = encoder_outputs[1]
 | 
			
		||||
 | 
			
		||||
        pooled_output = self.dropout(pooled_output)
 | 
			
		||||
        logits = self.classifier(pooled_output)
 | 
			
		||||
 | 
			
		||||
        loss = None
 | 
			
		||||
        if labels is not None:
 | 
			
		||||
            if self.config.problem_type is None:
 | 
			
		||||
                if self.num_labels == 1:
 | 
			
		||||
                    self.config.problem_type = "regression"
 | 
			
		||||
                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
 | 
			
		||||
                    self.config.problem_type = "single_label_classification"
 | 
			
		||||
                else:
 | 
			
		||||
                    self.config.problem_type = "multi_label_classification"
 | 
			
		||||
 | 
			
		||||
            if self.config.problem_type == "regression":
 | 
			
		||||
                loss_fct = MSELoss()
 | 
			
		||||
                if self.num_labels == 1:
 | 
			
		||||
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
 | 
			
		||||
                else:
 | 
			
		||||
                    loss = loss_fct(logits, labels)
 | 
			
		||||
            elif self.config.problem_type == "single_label_classification":
 | 
			
		||||
                loss_fct = CrossEntropyLoss()
 | 
			
		||||
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
 | 
			
		||||
            elif self.config.problem_type == "multi_label_classification":
 | 
			
		||||
                loss_fct = BCEWithLogitsLoss()
 | 
			
		||||
                loss = loss_fct(logits, labels)
 | 
			
		||||
        if not return_dict:
 | 
			
		||||
            output = (logits,) + encoder_outputs[2:]
 | 
			
		||||
            return ((loss,) + output) if loss is not None else output
 | 
			
		||||
 | 
			
		||||
        return SequenceClassifierOutput(
 | 
			
		||||
            loss=loss,
 | 
			
		||||
            logits=logits,
 | 
			
		||||
            hidden_states=encoder_outputs.hidden_states,
 | 
			
		||||
            attentions=encoder_outputs.attentions,
 | 
			
		||||
        )
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,2 @@
 | 
			
		|||
__pycache__
 | 
			
		||||
exports/
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,168 @@
 | 
			
		|||
import torch
 | 
			
		||||
from torch.utils.data import DataLoader
 | 
			
		||||
from transformers import (
 | 
			
		||||
    T5TokenizerFast,
 | 
			
		||||
    AutoModelForSeq2SeqLM,
 | 
			
		||||
)
 | 
			
		||||
import os
 | 
			
		||||
from tqdm import tqdm
 | 
			
		||||
from datasets import Dataset
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Inference():
 | 
			
		||||
    tokenizer: T5TokenizerFast
 | 
			
		||||
    model: torch.nn.Module
 | 
			
		||||
    dataloader: DataLoader
 | 
			
		||||
 | 
			
		||||
    def __init__(self, checkpoint_path):
 | 
			
		||||
        self._create_tokenizer()
 | 
			
		||||
        self._load_model(checkpoint_path)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def _create_tokenizer(self):
 | 
			
		||||
        # %%
 | 
			
		||||
        # load tokenizer
 | 
			
		||||
        self.tokenizer = T5TokenizerFast.from_pretrained("t5-small", return_tensors="pt", clean_up_tokenization_spaces=True)
 | 
			
		||||
        # Define additional special tokens
 | 
			
		||||
        additional_special_tokens = ["<THING_START>", "<THING_END>", "<PROPERTY_START>", "<PROPERTY_END>", "<NAME>", "<DESC>", "SIG", "UNIT", "DATA_TYPE"]
 | 
			
		||||
        # Add the additional special tokens to the tokenizer
 | 
			
		||||
        self.tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
 | 
			
		||||
 | 
			
		||||
    def _load_model(self, checkpoint_path: str):
 | 
			
		||||
        # load model
 | 
			
		||||
        # Define the directory and the pattern
 | 
			
		||||
        model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint_path)
 | 
			
		||||
        model = torch.compile(model)
 | 
			
		||||
        # set model to eval
 | 
			
		||||
        self.model = model.eval()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def prepare_dataloader(self, input_df, batch_size, max_length):
 | 
			
		||||
        """
 | 
			
		||||
        *arguments*
 | 
			
		||||
        - input_df: input dataframe containing fields 'tag_description', 'thing', 'property'
 | 
			
		||||
        - batch_size: the batch size of dataloader output
 | 
			
		||||
        - max_length: length of tokenizer output
 | 
			
		||||
        """
 | 
			
		||||
        print("preparing dataloader")
 | 
			
		||||
        # convert each dataframe row into a dictionary
 | 
			
		||||
        # outputs a list of dictionaries
 | 
			
		||||
 | 
			
		||||
        def _process_df(df):
 | 
			
		||||
            output_list = []
 | 
			
		||||
            for _, row in df.iterrows():
 | 
			
		||||
                desc = f"<DESC>{row['tag_description']}<DESC>"
 | 
			
		||||
                unit = f"<UNIT>{row['unit']}<UNIT>"
 | 
			
		||||
                element = {
 | 
			
		||||
                    'input' : f"{desc}{unit}",
 | 
			
		||||
                    'output': f"<THING_START>{row['thing']}<THING_END><PROPERTY_START>{row['property']}<PROPERTY_END>",
 | 
			
		||||
                }
 | 
			
		||||
                output_list.append(element)
 | 
			
		||||
 | 
			
		||||
            return output_list
 | 
			
		||||
 | 
			
		||||
        def _preprocess_function(example):
 | 
			
		||||
            input = example['input']
 | 
			
		||||
            target = example['output']
 | 
			
		||||
            # text_target sets the corresponding label to inputs
 | 
			
		||||
            # there is no need to create a separate 'labels'
 | 
			
		||||
            model_inputs = self.tokenizer(
 | 
			
		||||
                input,
 | 
			
		||||
                text_target=target, 
 | 
			
		||||
                max_length=max_length,
 | 
			
		||||
                return_tensors="pt",
 | 
			
		||||
                padding="max_length",
 | 
			
		||||
                truncation=True,
 | 
			
		||||
            )
 | 
			
		||||
            return model_inputs
 | 
			
		||||
 | 
			
		||||
        test_dataset = Dataset.from_list(_process_df(input_df))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        # map maps function to each "row" in the dataset
 | 
			
		||||
        # aka the data in the immediate nesting
 | 
			
		||||
        datasets = test_dataset.map(
 | 
			
		||||
            _preprocess_function,
 | 
			
		||||
            batched=True,
 | 
			
		||||
            num_proc=1,
 | 
			
		||||
            remove_columns=test_dataset.column_names,
 | 
			
		||||
        )
 | 
			
		||||
        # datasets = _preprocess_function(test_dataset)
 | 
			
		||||
        datasets.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
 | 
			
		||||
 | 
			
		||||
        # create dataloader
 | 
			
		||||
        self.dataloader = DataLoader(datasets, batch_size=batch_size)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def generate(self):
 | 
			
		||||
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 | 
			
		||||
        MAX_GENERATE_LENGTH = 128
 | 
			
		||||
 | 
			
		||||
        pred_generations = []
 | 
			
		||||
        pred_labels = []
 | 
			
		||||
 | 
			
		||||
        print("start generation")
 | 
			
		||||
        for batch in tqdm(self.dataloader):
 | 
			
		||||
            # Inference in batches
 | 
			
		||||
            input_ids = batch['input_ids']
 | 
			
		||||
            attention_mask = batch['attention_mask']
 | 
			
		||||
            # save labels too
 | 
			
		||||
            pred_labels.extend(batch['labels'])
 | 
			
		||||
            
 | 
			
		||||
 | 
			
		||||
            # Move to GPU if available
 | 
			
		||||
            input_ids = input_ids.to(device)
 | 
			
		||||
            attention_mask = attention_mask.to(device)
 | 
			
		||||
            self.model.to(device)
 | 
			
		||||
 | 
			
		||||
            # Perform inference
 | 
			
		||||
            with torch.no_grad():
 | 
			
		||||
                outputs = self.model.generate(input_ids,
 | 
			
		||||
                                        attention_mask=attention_mask,
 | 
			
		||||
                                        max_length=MAX_GENERATE_LENGTH)
 | 
			
		||||
                
 | 
			
		||||
                # Decode the output and print the results
 | 
			
		||||
                pred_generations.extend(outputs.to("cpu"))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        # %%
 | 
			
		||||
        # extract sequence and decode
 | 
			
		||||
        def extract_seq(tokens, start_value, end_value):
 | 
			
		||||
            if start_value not in tokens or end_value not in tokens:
 | 
			
		||||
                return None  # Or handle this case according to your requirements
 | 
			
		||||
            start_id = np.where(tokens == start_value)[0][0]
 | 
			
		||||
            end_id = np.where(tokens == end_value)[0][0]
 | 
			
		||||
 | 
			
		||||
            return tokens[start_id+1:end_id]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        def process_tensor_output(tokens):
 | 
			
		||||
            thing_seq = extract_seq(tokens, 32100, 32101) # 32100 = <THING_START>, 32101 = <THING_END>
 | 
			
		||||
            property_seq = extract_seq(tokens, 32102, 32103) # 32102 = <PROPERTY_START>, 32103 = <PROPERTY_END>
 | 
			
		||||
            p_thing = None
 | 
			
		||||
            p_property = None
 | 
			
		||||
            if (thing_seq is not None):
 | 
			
		||||
                p_thing =  self.tokenizer.decode(thing_seq, skip_special_tokens=False)
 | 
			
		||||
            if (property_seq is not None):
 | 
			
		||||
                p_property =  self.tokenizer.decode(property_seq, skip_special_tokens=False)
 | 
			
		||||
            return p_thing, p_property
 | 
			
		||||
 | 
			
		||||
        # decode prediction labels
 | 
			
		||||
        def decode_preds(tokens_list):
 | 
			
		||||
            thing_prediction_list = []
 | 
			
		||||
            property_prediction_list = []
 | 
			
		||||
            for tokens in tokens_list:
 | 
			
		||||
                p_thing, p_property = process_tensor_output(tokens)
 | 
			
		||||
                thing_prediction_list.append(p_thing)
 | 
			
		||||
                property_prediction_list.append(p_property)
 | 
			
		||||
            return thing_prediction_list, property_prediction_list 
 | 
			
		||||
 | 
			
		||||
        thing_prediction_list, property_prediction_list = decode_preds(pred_generations)
 | 
			
		||||
        return thing_prediction_list, property_prediction_list
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,6 @@
 | 
			
		|||
 | 
			
		||||
Accuracy for fold 1: 0.9427354472314246
 | 
			
		||||
Accuracy for fold 2: 0.8859813084112149
 | 
			
		||||
Accuracy for fold 3: 0.9683734939759037
 | 
			
		||||
Accuracy for fold 4: 0.9762131303520457
 | 
			
		||||
Accuracy for fold 5: 0.907924874026569
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,73 @@
 | 
			
		|||
 | 
			
		||||
import pandas as pd
 | 
			
		||||
import os
 | 
			
		||||
import glob
 | 
			
		||||
from inference import Inference
 | 
			
		||||
 | 
			
		||||
checkpoint_directory =  '../'
 | 
			
		||||
 | 
			
		||||
BATCH_SIZE = 512
 | 
			
		||||
 | 
			
		||||
def infer_and_select(fold):
 | 
			
		||||
    print(f"Inference for fold {fold}")
 | 
			
		||||
    # import test data
 | 
			
		||||
    data_path = f"../../../data_preprocess/exports/dataset/group_{fold}/test_all.csv"
 | 
			
		||||
    df = pd.read_csv(data_path, skipinitialspace=True)
 | 
			
		||||
 | 
			
		||||
    # get target data
 | 
			
		||||
    data_path = f"../../../data_preprocess/exports/dataset/group_{fold}/train_all.csv"
 | 
			
		||||
    train_df = pd.read_csv(data_path, skipinitialspace=True)
 | 
			
		||||
    # processing to help with selection later
 | 
			
		||||
    train_df['thing_property'] = train_df['thing'] + " " + train_df['property']
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    ##########################################
 | 
			
		||||
    # run inference
 | 
			
		||||
    # checkpoint
 | 
			
		||||
    # Use glob to find matching paths
 | 
			
		||||
    directory = os.path.join(checkpoint_directory, f'checkpoint_fold_{fold}b')
 | 
			
		||||
    # Use glob to find matching paths
 | 
			
		||||
    # path is usually checkpoint_fold_1/checkpoint-<step number>
 | 
			
		||||
    # we are guaranteed to save only 1 checkpoint from training
 | 
			
		||||
    pattern = 'checkpoint-*'
 | 
			
		||||
    checkpoint_path = glob.glob(os.path.join(directory, pattern))[0]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    infer = Inference(checkpoint_path)
 | 
			
		||||
    infer.prepare_dataloader(df, batch_size=BATCH_SIZE, max_length=128)
 | 
			
		||||
    thing_prediction_list, property_prediction_list = infer.generate()
 | 
			
		||||
 | 
			
		||||
    # add labels too
 | 
			
		||||
    # thing_actual_list, property_actual_list = decode_preds(pred_labels)
 | 
			
		||||
    # Convert the list to a Pandas DataFrame
 | 
			
		||||
    df_out = pd.DataFrame({
 | 
			
		||||
        'p_thing': thing_prediction_list, 
 | 
			
		||||
        'p_property': property_prediction_list
 | 
			
		||||
    })
 | 
			
		||||
    # df_out['p_thing_correct'] = df_out['p_thing'] == df_out['thing']
 | 
			
		||||
    # df_out['p_property_correct'] = df_out['p_property'] == df_out['property']
 | 
			
		||||
    df = pd.concat([df, df_out], axis=1)
 | 
			
		||||
 | 
			
		||||
    # we can save the t5 generation output here
 | 
			
		||||
    df.to_csv(f"exports/result_group_{fold}.csv", index=False)
 | 
			
		||||
 | 
			
		||||
    # here we want to evaluate mapping accuracy within the valid in mdm data only
 | 
			
		||||
    in_mdm = df['MDM']
 | 
			
		||||
    condition_correct_thing = df['p_thing'] == df['thing']
 | 
			
		||||
    condition_correct_property = df['p_property'] == df['property']
 | 
			
		||||
    prediction_mdm_correct = sum(condition_correct_thing & condition_correct_property & in_mdm)
 | 
			
		||||
    pred_correct_proportion = prediction_mdm_correct/sum(in_mdm)
 | 
			
		||||
 | 
			
		||||
    # write output to file output.txt
 | 
			
		||||
    with open("output.txt", "a") as f:
 | 
			
		||||
        print(f'Accuracy for fold {fold}: {pred_correct_proportion}', file=f)
 | 
			
		||||
 | 
			
		||||
###########################################  
 | 
			
		||||
# Execute for all folds
 | 
			
		||||
 | 
			
		||||
# reset file before writing to it
 | 
			
		||||
with open("output.txt", "w") as f:
 | 
			
		||||
    print('', file=f)
 | 
			
		||||
 | 
			
		||||
for fold in [1,2,3,4,5]:
 | 
			
		||||
    infer_and_select(fold)
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,227 @@
 | 
			
		|||
# %%
 | 
			
		||||
 | 
			
		||||
# from datasets import load_from_disk
 | 
			
		||||
import os
 | 
			
		||||
import glob
 | 
			
		||||
 | 
			
		||||
os.environ['NCCL_P2P_DISABLE'] = '1'
 | 
			
		||||
os.environ['NCCL_IB_DISABLE'] = '1'
 | 
			
		||||
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
 | 
			
		||||
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
from custom_t5.modeling_t5 import T5EncoderForSequenceClassification
 | 
			
		||||
 | 
			
		||||
from safetensors.torch import load_file
 | 
			
		||||
from transformers import (
 | 
			
		||||
    T5Config,
 | 
			
		||||
    T5TokenizerFast,
 | 
			
		||||
    AutoModelForSeq2SeqLM,
 | 
			
		||||
    DataCollatorForSeq2Seq,
 | 
			
		||||
    Seq2SeqTrainer,
 | 
			
		||||
    EarlyStoppingCallback,
 | 
			
		||||
    Seq2SeqTrainingArguments,
 | 
			
		||||
    T5ForConditionalGeneration,
 | 
			
		||||
    T5Model
 | 
			
		||||
)
 | 
			
		||||
import evaluate
 | 
			
		||||
import numpy as np
 | 
			
		||||
import pandas as pd
 | 
			
		||||
# import matplotlib.pyplot as plt
 | 
			
		||||
from datasets import Dataset, DatasetDict
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
torch.set_float32_matmul_precision('high')
 | 
			
		||||
 | 
			
		||||
# outputs a list of dictionaries
 | 
			
		||||
def process_df_to_dict(df):
 | 
			
		||||
    output_list = []
 | 
			
		||||
    for _, row in df.iterrows():
 | 
			
		||||
        desc = f"<DESC>{row['tag_description']}<DESC>"
 | 
			
		||||
        unit = f"<UNIT>{row['unit']}<UNIT>"
 | 
			
		||||
        element = {
 | 
			
		||||
            'input' : f"{desc}{unit}",
 | 
			
		||||
            'output': f"<THING_START>{row['thing']}<THING_END><PROPERTY_START>{row['property']}<PROPERTY_END>",
 | 
			
		||||
        }
 | 
			
		||||
        output_list.append(element)
 | 
			
		||||
 | 
			
		||||
    return output_list
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def create_split_dataset(fold):
 | 
			
		||||
    # train 
 | 
			
		||||
    data_path = f"../../data_preprocess/exports/dataset/group_{fold}/train_all.csv"
 | 
			
		||||
    train_df = pd.read_csv(data_path, skipinitialspace=True)
 | 
			
		||||
 | 
			
		||||
    # valid
 | 
			
		||||
    data_path = f"../../data_preprocess/exports/dataset/group_{fold}/valid.csv"
 | 
			
		||||
    validation_df = pd.read_csv(data_path, skipinitialspace=True)
 | 
			
		||||
 | 
			
		||||
    combined_data = DatasetDict({
 | 
			
		||||
        'train': Dataset.from_list(process_df_to_dict(train_df)),
 | 
			
		||||
        'validation' : Dataset.from_list(process_df_to_dict(validation_df)),
 | 
			
		||||
    })
 | 
			
		||||
    return combined_data
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# function to perform training for a given fold
 | 
			
		||||
def train(fold):
 | 
			
		||||
    save_path = f'checkpoint_fold_{fold}b'
 | 
			
		||||
    split_datasets = create_split_dataset(fold)
 | 
			
		||||
 | 
			
		||||
    # prepare tokenizer
 | 
			
		||||
    model_checkpoint = "t5-small"
 | 
			
		||||
    tokenizer = T5TokenizerFast.from_pretrained(model_checkpoint, return_tensors="pt", clean_up_tokenization_spaces=True)
 | 
			
		||||
    # Define additional special tokens
 | 
			
		||||
    additional_special_tokens = ["<THING_START>", "<THING_END>", "<PROPERTY_START>", "<PROPERTY_END>", "<NAME>", "<DESC>", "<SIG>", "<UNIT>", "<DATA_TYPE>"]
 | 
			
		||||
    # Add the additional special tokens to the tokenizer
 | 
			
		||||
    tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
 | 
			
		||||
 | 
			
		||||
    max_length = 120
 | 
			
		||||
 | 
			
		||||
    # given a dataset entry, run it through the tokenizer
 | 
			
		||||
    def preprocess_function(example):
 | 
			
		||||
        input = example['input']
 | 
			
		||||
        target = example['output']
 | 
			
		||||
        # text_target sets the corresponding label to inputs
 | 
			
		||||
        # there is no need to create a separate 'labels'
 | 
			
		||||
        model_inputs = tokenizer(
 | 
			
		||||
            input,
 | 
			
		||||
            text_target=target, 
 | 
			
		||||
            max_length=max_length,
 | 
			
		||||
            truncation=True,
 | 
			
		||||
            padding="max_length"
 | 
			
		||||
        )
 | 
			
		||||
        return model_inputs
 | 
			
		||||
 | 
			
		||||
    # map maps function to each "row" in the dataset
 | 
			
		||||
    # aka the data in the immediate nesting
 | 
			
		||||
    tokenized_datasets = split_datasets.map(
 | 
			
		||||
        preprocess_function,
 | 
			
		||||
        batched=True,
 | 
			
		||||
        num_proc=8,
 | 
			
		||||
        remove_columns=split_datasets["train"].column_names,
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    # https://github.com/huggingface/transformers/pull/28414
 | 
			
		||||
    # model_checkpoint = "google/t5-efficient-tiny"
 | 
			
		||||
    # device_map set to auto to force it to load contiguous weights 
 | 
			
		||||
    # model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint, device_map='auto')
 | 
			
		||||
 | 
			
		||||
    directory = os.path.join(".", f'checkpoint_fold_{fold}a')
 | 
			
		||||
    # Use glob to find matching paths
 | 
			
		||||
    # path is usually checkpoint_fold_1/checkpoint-<step number>
 | 
			
		||||
    # we are guaranteed to save only 1 checkpoint from training
 | 
			
		||||
    pattern = 'checkpoint-*'
 | 
			
		||||
    prev_checkpoint = glob.glob(os.path.join(directory, pattern))[0]
 | 
			
		||||
    # t5_classify = T5Model.from_pretrained(prev_checkpoint)
 | 
			
		||||
    # Load the checkpoint
 | 
			
		||||
    checkpoint_path = f"{prev_checkpoint}/model.safetensors"
 | 
			
		||||
    checkpoint = load_file(checkpoint_path)
 | 
			
		||||
    # Filter out weights related to the classification head
 | 
			
		||||
    t5_weights = {key: value for key, value in checkpoint.items() if "classifier" not in key}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    model = T5ForConditionalGeneration.from_pretrained(model_checkpoint)
 | 
			
		||||
    model.load_state_dict(state_dict=t5_weights, strict=False)
 | 
			
		||||
    # important! after extending tokens vocab
 | 
			
		||||
    model.resize_token_embeddings(len(tokenizer))
 | 
			
		||||
 | 
			
		||||
    # Freeze the encoder
 | 
			
		||||
    for param in model.encoder.parameters():
 | 
			
		||||
        param.requires_grad = False
 | 
			
		||||
 | 
			
		||||
    # Freeze the shared embedding layer
 | 
			
		||||
    for param in model.shared.parameters():
 | 
			
		||||
        param.requires_grad = False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)
 | 
			
		||||
    metric = evaluate.load("sacrebleu")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def compute_metrics(eval_preds):
 | 
			
		||||
        preds, labels = eval_preds
 | 
			
		||||
        # In case the model returns more than the prediction logits
 | 
			
		||||
        if isinstance(preds, tuple):
 | 
			
		||||
            preds = preds[0]
 | 
			
		||||
 | 
			
		||||
        decoded_preds = tokenizer.batch_decode(preds, 
 | 
			
		||||
                                            skip_special_tokens=False)
 | 
			
		||||
 | 
			
		||||
        # Replace -100s in the labels as we can't decode them
 | 
			
		||||
        labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
 | 
			
		||||
        decoded_labels = tokenizer.batch_decode(labels,
 | 
			
		||||
                                                skip_special_tokens=False)
 | 
			
		||||
 | 
			
		||||
        # Remove <PAD> tokens from decoded predictions and labels
 | 
			
		||||
        decoded_preds = [pred.replace(tokenizer.pad_token, '').strip() for pred in decoded_preds]
 | 
			
		||||
        decoded_labels = [[label.replace(tokenizer.pad_token, '').strip()] for label in decoded_labels]
 | 
			
		||||
 | 
			
		||||
        # Some simple post-processing
 | 
			
		||||
        # decoded_preds = [pred.strip() for pred in decoded_preds]
 | 
			
		||||
        # decoded_labels = [[label.strip()] for label in decoded_labels]
 | 
			
		||||
        # print(decoded_preds, decoded_labels)
 | 
			
		||||
 | 
			
		||||
        result = metric.compute(predictions=decoded_preds, references=decoded_labels)
 | 
			
		||||
        return {"bleu": result["score"]}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # Generation Config
 | 
			
		||||
    # from transformers import GenerationConfig
 | 
			
		||||
    gen_config = model.generation_config
 | 
			
		||||
    gen_config.max_length = 128
 | 
			
		||||
 | 
			
		||||
    # compile
 | 
			
		||||
    # model = torch.compile(model, backend="inductor", dynamic=True)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # Trainer
 | 
			
		||||
 | 
			
		||||
    args = Seq2SeqTrainingArguments(
 | 
			
		||||
        f"{save_path}",
 | 
			
		||||
        # eval_strategy="epoch",
 | 
			
		||||
        eval_strategy="no",
 | 
			
		||||
        logging_dir="tensorboard-log",
 | 
			
		||||
        logging_strategy="epoch",
 | 
			
		||||
        # save_strategy="epoch",
 | 
			
		||||
        load_best_model_at_end=False,
 | 
			
		||||
        learning_rate=1e-3,
 | 
			
		||||
        per_device_train_batch_size=64,
 | 
			
		||||
        per_device_eval_batch_size=64,
 | 
			
		||||
        auto_find_batch_size=False,
 | 
			
		||||
        ddp_find_unused_parameters=False,
 | 
			
		||||
        weight_decay=0.01,
 | 
			
		||||
        save_total_limit=1,
 | 
			
		||||
        num_train_epochs=80,
 | 
			
		||||
        predict_with_generate=True,
 | 
			
		||||
        bf16=True,
 | 
			
		||||
        push_to_hub=False,
 | 
			
		||||
        generation_config=gen_config,
 | 
			
		||||
        remove_unused_columns=False,
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    trainer = Seq2SeqTrainer(
 | 
			
		||||
        model,
 | 
			
		||||
        args,
 | 
			
		||||
        train_dataset=tokenized_datasets["train"],
 | 
			
		||||
        eval_dataset=tokenized_datasets["validation"],
 | 
			
		||||
        data_collator=data_collator,
 | 
			
		||||
        tokenizer=tokenizer,
 | 
			
		||||
        compute_metrics=compute_metrics,
 | 
			
		||||
        # callbacks=[EarlyStoppingCallback(early_stopping_patience=3)],
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    # uncomment to load training from checkpoint
 | 
			
		||||
    # checkpoint_path = 'default_40_1/checkpoint-5600'
 | 
			
		||||
    # trainer.train(resume_from_checkpoint=checkpoint_path)
 | 
			
		||||
 | 
			
		||||
    trainer.train()
 | 
			
		||||
 | 
			
		||||
# execute training
 | 
			
		||||
for fold in [1,2,3,4,5]:
 | 
			
		||||
    print(fold)
 | 
			
		||||
    train(fold)
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,228 @@
 | 
			
		|||
# %%
 | 
			
		||||
 | 
			
		||||
# from datasets import load_from_disk
 | 
			
		||||
import os
 | 
			
		||||
 | 
			
		||||
os.environ['NCCL_P2P_DISABLE'] = '1'
 | 
			
		||||
os.environ['NCCL_IB_DISABLE'] = '1'
 | 
			
		||||
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
 | 
			
		||||
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
from custom_t5.modeling_t5 import T5EncoderForSequenceClassification
 | 
			
		||||
from transformers import (
 | 
			
		||||
    AutoTokenizer,
 | 
			
		||||
    AutoModelForSequenceClassification,
 | 
			
		||||
    DataCollatorWithPadding,
 | 
			
		||||
    Trainer,
 | 
			
		||||
    EarlyStoppingCallback,
 | 
			
		||||
    TrainingArguments,
 | 
			
		||||
    T5Config,
 | 
			
		||||
)
 | 
			
		||||
import evaluate
 | 
			
		||||
import numpy as np
 | 
			
		||||
import pandas as pd
 | 
			
		||||
# import matplotlib.pyplot as plt
 | 
			
		||||
from datasets import Dataset, DatasetDict
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
torch.set_float32_matmul_precision('high')
 | 
			
		||||
 | 
			
		||||
# %%
 | 
			
		||||
 | 
			
		||||
# we need to create the mdm_list
 | 
			
		||||
# import the full mdm-only file
 | 
			
		||||
data_path = '../../data_import/exports/data_mapping_mdm.csv'
 | 
			
		||||
full_df = pd.read_csv(data_path, skipinitialspace=True)
 | 
			
		||||
mdm_list = sorted(list((set(full_df['pattern']))))
 | 
			
		||||
 | 
			
		||||
# # rather than use pattern, we use the real thing and property
 | 
			
		||||
# thing_property = full_df['thing'] + full_df['property']
 | 
			
		||||
# thing_property = thing_property.to_list()
 | 
			
		||||
# mdm_list = sorted(list(set(thing_property)))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# %%
 | 
			
		||||
id2label = {}
 | 
			
		||||
label2id = {}
 | 
			
		||||
for idx, val in enumerate(mdm_list):
 | 
			
		||||
    id2label[idx] = val
 | 
			
		||||
    label2id[val] = idx
 | 
			
		||||
 | 
			
		||||
# %%
 | 
			
		||||
 | 
			
		||||
# outputs a list of dictionaries
 | 
			
		||||
# processes dataframe into lists of dictionaries
 | 
			
		||||
# each element maps input to output
 | 
			
		||||
# input: tag_description
 | 
			
		||||
# output: class label
 | 
			
		||||
def process_df_to_dict(df, mdm_list):
 | 
			
		||||
    output_list = []
 | 
			
		||||
    for _, row in df.iterrows():
 | 
			
		||||
        desc = f"<DESC>{row['tag_description']}<DESC>"
 | 
			
		||||
        unit = f"<UNIT>{row['unit']}<UNIT>"
 | 
			
		||||
        # pattern = f"{row['thing'] + row['property']}"
 | 
			
		||||
        pattern = f"{row['thing_pattern'] + ' ' + row['property_pattern']}"
 | 
			
		||||
        try:
 | 
			
		||||
            index = mdm_list.index(pattern)
 | 
			
		||||
        except ValueError:
 | 
			
		||||
            print("Error: value not found in MDM list")
 | 
			
		||||
            index = -1
 | 
			
		||||
        element = {
 | 
			
		||||
            'text' : f"{desc}{unit}",
 | 
			
		||||
            'label': index,
 | 
			
		||||
        }
 | 
			
		||||
        output_list.append(element)
 | 
			
		||||
 | 
			
		||||
    return output_list
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def create_split_dataset(fold, mdm_list):
 | 
			
		||||
    # train 
 | 
			
		||||
    data_path = f"../../data_preprocess/exports/dataset/group_{fold}/train_all.csv"
 | 
			
		||||
    train_df = pd.read_csv(data_path, skipinitialspace=True)
 | 
			
		||||
 | 
			
		||||
    # valid
 | 
			
		||||
    data_path = f"../../data_preprocess/exports/dataset/group_{fold}/valid.csv"
 | 
			
		||||
    validation_df = pd.read_csv(data_path, skipinitialspace=True)
 | 
			
		||||
 | 
			
		||||
    combined_data = DatasetDict({
 | 
			
		||||
        'train': Dataset.from_list(process_df_to_dict(train_df, mdm_list)),
 | 
			
		||||
        'validation' : Dataset.from_list(process_df_to_dict(validation_df, mdm_list)),
 | 
			
		||||
    })
 | 
			
		||||
    return combined_data
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# %%
 | 
			
		||||
 | 
			
		||||
# function to perform training for a given fold
 | 
			
		||||
def train(fold):
 | 
			
		||||
 | 
			
		||||
    save_path = f'checkpoint_fold_{fold}a'
 | 
			
		||||
    split_datasets = create_split_dataset(fold, mdm_list)
 | 
			
		||||
 | 
			
		||||
    # prepare tokenizer
 | 
			
		||||
 | 
			
		||||
    # model_checkpoint = "distilbert/distilbert-base-uncased"
 | 
			
		||||
    # model_checkpoint = 'google-bert/bert-base-cased'
 | 
			
		||||
    model_checkpoint = "t5-small"
 | 
			
		||||
    tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, return_tensors="pt", clean_up_tokenization_spaces=True)
 | 
			
		||||
    # Define additional special tokens
 | 
			
		||||
    additional_special_tokens = ["<THING_START>", "<THING_END>", "<PROPERTY_START>", "<PROPERTY_END>", "<NAME>", "<DESC>", "<SIG>", "<UNIT>", "<DATA_TYPE>"]
 | 
			
		||||
    # Add the additional special tokens to the tokenizer
 | 
			
		||||
    tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
 | 
			
		||||
 | 
			
		||||
    max_length = 120
 | 
			
		||||
 | 
			
		||||
    # given a dataset entry, run it through the tokenizer
 | 
			
		||||
    def preprocess_function(example):
 | 
			
		||||
        input = example['text']
 | 
			
		||||
        # text_target sets the corresponding label to inputs
 | 
			
		||||
        # there is no need to create a separate 'labels'
 | 
			
		||||
        model_inputs = tokenizer(
 | 
			
		||||
            input,
 | 
			
		||||
            max_length=max_length,
 | 
			
		||||
            truncation=True,
 | 
			
		||||
            padding="max_length"
 | 
			
		||||
        )
 | 
			
		||||
        return model_inputs
 | 
			
		||||
 | 
			
		||||
    # map maps function to each "row" in the dataset
 | 
			
		||||
    # aka the data in the immediate nesting
 | 
			
		||||
    tokenized_datasets = split_datasets.map(
 | 
			
		||||
        preprocess_function,
 | 
			
		||||
        batched=True,
 | 
			
		||||
        num_proc=8,
 | 
			
		||||
        remove_columns="text",
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    # %% temp
 | 
			
		||||
    # tokenized_datasets['train'].rename_columns()
 | 
			
		||||
 | 
			
		||||
    # %%
 | 
			
		||||
    # create data collator
 | 
			
		||||
 | 
			
		||||
    data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
 | 
			
		||||
 | 
			
		||||
    # %%
 | 
			
		||||
    # compute metrics
 | 
			
		||||
    metric = evaluate.load("accuracy")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def compute_metrics(eval_preds):
 | 
			
		||||
        preds, labels = eval_preds
 | 
			
		||||
        preds = np.argmax(preds, axis=1)
 | 
			
		||||
        return metric.compute(predictions=preds, references=labels)
 | 
			
		||||
 | 
			
		||||
    # %%
 | 
			
		||||
    # create id2label and label2id
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # %%
 | 
			
		||||
    # model = AutoModelForSequenceClassification.from_pretrained(
 | 
			
		||||
    #     model_checkpoint,
 | 
			
		||||
    #     num_labels=len(mdm_list),
 | 
			
		||||
    #     id2label=id2label,
 | 
			
		||||
    #     label2id=label2id)
 | 
			
		||||
    model = T5EncoderForSequenceClassification(
 | 
			
		||||
        checkpoint=model_checkpoint,
 | 
			
		||||
        tokenizer=tokenizer,
 | 
			
		||||
        config=T5Config.from_pretrained(model_checkpoint),
 | 
			
		||||
        num_labels=len(mdm_list)
 | 
			
		||||
    )
 | 
			
		||||
    # important! after extending tokens vocab
 | 
			
		||||
    # model.t5.resize_token_embeddings(len(tokenizer))
 | 
			
		||||
 | 
			
		||||
    # model = torch.compile(model, backend="inductor", dynamic=True)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # %%
 | 
			
		||||
    # Trainer
 | 
			
		||||
 | 
			
		||||
    training_args = TrainingArguments(
 | 
			
		||||
        output_dir=f"{save_path}",
 | 
			
		||||
        # eval_strategy="epoch",
 | 
			
		||||
        eval_strategy="no",
 | 
			
		||||
        logging_dir="tensorboard-log",
 | 
			
		||||
        logging_strategy="epoch",
 | 
			
		||||
        # save_strategy="epoch",
 | 
			
		||||
        load_best_model_at_end=False,
 | 
			
		||||
        learning_rate=1e-3,
 | 
			
		||||
        per_device_train_batch_size=128,
 | 
			
		||||
        per_device_eval_batch_size=128,
 | 
			
		||||
        auto_find_batch_size=False,
 | 
			
		||||
        ddp_find_unused_parameters=False,
 | 
			
		||||
        weight_decay=0.01,
 | 
			
		||||
        save_total_limit=1,
 | 
			
		||||
        num_train_epochs=80,
 | 
			
		||||
        bf16=True,
 | 
			
		||||
        push_to_hub=False,
 | 
			
		||||
        remove_unused_columns=False,
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    trainer = Trainer(
 | 
			
		||||
        model,
 | 
			
		||||
        training_args,
 | 
			
		||||
        train_dataset=tokenized_datasets["train"],
 | 
			
		||||
        eval_dataset=tokenized_datasets["validation"],
 | 
			
		||||
        tokenizer=tokenizer,
 | 
			
		||||
        data_collator=data_collator,
 | 
			
		||||
        compute_metrics=compute_metrics,
 | 
			
		||||
        # callbacks=[EarlyStoppingCallback(early_stopping_patience=3)],
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    # uncomment to load training from checkpoint
 | 
			
		||||
    # checkpoint_path = 'default_40_1/checkpoint-5600'
 | 
			
		||||
    # trainer.train(resume_from_checkpoint=checkpoint_path)
 | 
			
		||||
 | 
			
		||||
    trainer.train()
 | 
			
		||||
 | 
			
		||||
# execute training
 | 
			
		||||
for fold in [1,2,3,4,5]:
 | 
			
		||||
    print(fold)
 | 
			
		||||
    train(fold)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# %%
 | 
			
		||||
		Loading…
	
		Reference in New Issue