feat: end-to-end code needed for deployment that includes preprocess,
mapping, post-process (de-duplication)
This commit is contained in:
parent
737c86bc2e
commit
446ed1429c
|
@ -0,0 +1,5 @@
|
|||
# End to End deployment code
|
||||
|
||||
This code takes in an input dataframe (assuming that the server code already
|
||||
processed json to python dataframe), and applies our mapping + threshold +
|
||||
de_duplicate code.
|
|
@ -0,0 +1,4 @@
|
|||
models
|
||||
raw_data.csv
|
||||
train_all.csv
|
||||
__pycache__
|
|
@ -0,0 +1,415 @@
|
|||
|
||||
# %%
|
||||
import pandas as pd
|
||||
import os
|
||||
import glob
|
||||
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
|
||||
import torch
|
||||
from transformers import (
|
||||
AutoTokenizer,
|
||||
AutoModelForSequenceClassification,
|
||||
AutoModelForSeq2SeqLM,
|
||||
)
|
||||
|
||||
##################
|
||||
# global parameters
|
||||
##################
|
||||
|
||||
|
||||
|
||||
class BertEmbedder:
|
||||
def __init__(self, input_texts, model_checkpoint):
|
||||
# we need to generate the embedding from list of input strings
|
||||
self.embeddings = []
|
||||
self.inputs = input_texts
|
||||
model_checkpoint = model_checkpoint
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, return_tensors="pt", clean_up_tokenization_spaces=True)
|
||||
|
||||
model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint)
|
||||
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
# device = "cpu"
|
||||
model.to(self.device)
|
||||
self.model = model.eval()
|
||||
|
||||
|
||||
def make_embedding(self, batch_size=128):
|
||||
all_embeddings = self.embeddings
|
||||
input_texts = self.inputs
|
||||
|
||||
for i in range(0, len(input_texts), batch_size):
|
||||
batch_texts = input_texts[i:i+batch_size]
|
||||
# Tokenize the input text
|
||||
inputs = self.tokenizer(batch_texts, return_tensors="pt", padding=True, truncation=True, max_length=120)
|
||||
input_ids = inputs.input_ids.to(self.device)
|
||||
attention_mask = inputs.attention_mask.to(self.device)
|
||||
|
||||
|
||||
# Pass the input through the encoder and retrieve the embeddings
|
||||
with torch.amp.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||
with torch.no_grad():
|
||||
encoder_outputs = self.model(input_ids, attention_mask=attention_mask, output_hidden_states=True)
|
||||
# get last layer
|
||||
embeddings = encoder_outputs.hidden_states[-1]
|
||||
# get cls token embedding
|
||||
cls_embeddings = embeddings[:, 0, :] # Shape: (batch_size, hidden_size)
|
||||
all_embeddings.append(cls_embeddings)
|
||||
|
||||
# remove the batch list and makes a single large tensor, dim=0 increases row-wise
|
||||
all_embeddings = torch.cat(all_embeddings, dim=0)
|
||||
|
||||
self.embeddings = all_embeddings
|
||||
|
||||
class T5Embedder:
|
||||
def __init__(self, input_texts, model_checkpoint):
|
||||
# we need to generate the embedding from list of input strings
|
||||
self.embeddings = []
|
||||
self.inputs = input_texts
|
||||
model_checkpoint = model_checkpoint
|
||||
self.tokenizer = AutoTokenizer.from_pretrained("t5-base", return_tensors="pt", clean_up_tokenization_spaces=True)
|
||||
# define additional special tokens
|
||||
additional_special_tokens = ["<THING_START>", "<THING_END>", "<PROPERTY_START>", "<PROPERTY_END>", "<NAME>", "<DESC>", "<SIG>", "<UNIT>", "<DATA_TYPE>"]
|
||||
# add the additional special tokens to the tokenizer
|
||||
self.tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
|
||||
|
||||
model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
|
||||
self.device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
|
||||
# device = "cpu"
|
||||
model.to(self.device)
|
||||
self.model = model.eval()
|
||||
|
||||
|
||||
|
||||
|
||||
def make_embedding(self, batch_size=128):
|
||||
all_embeddings = self.embeddings
|
||||
input_texts = self.inputs
|
||||
|
||||
for i in range(0, len(input_texts), batch_size):
|
||||
batch_texts = input_texts[i:i+batch_size]
|
||||
# Tokenize the input text
|
||||
inputs = self.tokenizer(batch_texts, return_tensors="pt", padding=True, truncation=True, max_length=128)
|
||||
input_ids = inputs.input_ids.to(self.device)
|
||||
attention_mask = inputs.attention_mask.to(self.device)
|
||||
|
||||
|
||||
# Pass the input through the encoder and retrieve the embeddings
|
||||
|
||||
with torch.amp.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||
with torch.no_grad():
|
||||
encoder_outputs = self.model.encoder(input_ids, attention_mask=attention_mask)
|
||||
embeddings = encoder_outputs.last_hidden_state
|
||||
|
||||
# Compute the mean pooling of the token embeddings
|
||||
# mean_embedding = embeddings.mean(dim=1)
|
||||
mean_embedding = (embeddings * attention_mask.unsqueeze(-1)).sum(dim=1) / attention_mask.sum(dim=1, keepdim=True)
|
||||
all_embeddings.append(mean_embedding)
|
||||
|
||||
# remove the batch list and makes a single large tensor, dim=0 increases row-wise
|
||||
all_embeddings = torch.cat(all_embeddings, dim=0)
|
||||
|
||||
self.embeddings = all_embeddings
|
||||
|
||||
|
||||
def cosine_similarity_chunked(batch1, batch2, chunk_size=1024):
|
||||
device = 'cuda'
|
||||
batch1_size = batch1.size(0)
|
||||
batch2_size = batch2.size(0)
|
||||
batch2.to(device)
|
||||
|
||||
# Prepare an empty tensor to store results
|
||||
cos_sim = torch.empty(batch1_size, batch2_size, device=device)
|
||||
|
||||
# Process batch1 in chunks
|
||||
for i in range(0, batch1_size, chunk_size):
|
||||
batch1_chunk = batch1[i:i + chunk_size] # Get chunk of batch1
|
||||
|
||||
batch1_chunk.to(device)
|
||||
# Expand batch1 chunk and entire batch2 for comparison
|
||||
# batch1_chunk_exp = batch1_chunk.unsqueeze(1) # Shape: (chunk_size, 1, seq_len)
|
||||
# batch2_exp = batch2.unsqueeze(0) # Shape: (1, batch2_size, seq_len)
|
||||
batch2_norms = batch2.norm(dim=1, keepdim=True)
|
||||
|
||||
|
||||
# Compute cosine similarity for the chunk and store it in the final tensor
|
||||
# cos_sim[i:i + chunk_size] = F.cosine_similarity(batch1_chunk_exp, batch2_exp, dim=-1)
|
||||
|
||||
# Compute cosine similarity by matrix multiplication and normalizing
|
||||
sim_chunk = torch.mm(batch1_chunk, batch2.T) / (batch1_chunk.norm(dim=1, keepdim=True) * batch2_norms.T + 1e-8)
|
||||
|
||||
# Store the results in the appropriate part of the final tensor
|
||||
cos_sim[i:i + chunk_size] = sim_chunk
|
||||
|
||||
return cos_sim
|
||||
|
||||
|
||||
|
||||
|
||||
###################
|
||||
# helper functions
|
||||
class Embedder():
|
||||
input_df: pd.DataFrame
|
||||
fold: int
|
||||
batch_size: int
|
||||
|
||||
def __init__(self, input_df, batch_size):
|
||||
self.input_df = input_df
|
||||
self.batch_size = batch_size
|
||||
|
||||
|
||||
def make_embedding(self, checkpoint_path):
|
||||
|
||||
def generate_input_list(df):
|
||||
input_list = []
|
||||
for _, row in df.iterrows():
|
||||
desc = f"<DESC>{row['tag_description']}<DESC>"
|
||||
unit = f"<UNIT>{row['unit']}<UNIT>"
|
||||
# name = f"<NAME>{row['tag_name']}<NAME>"
|
||||
element = f"{desc}{unit}"
|
||||
input_list.append(element)
|
||||
return input_list
|
||||
|
||||
# prepare reference embed
|
||||
train_data = list(generate_input_list(self.input_df))
|
||||
# Define the directory and the pattern
|
||||
# embedder = T5Embedder(train_data, checkpoint_path)
|
||||
embedder = BertEmbedder(train_data, checkpoint_path)
|
||||
embedder.make_embedding(batch_size=self.batch_size)
|
||||
return embedder.embeddings
|
||||
|
||||
|
||||
|
||||
|
||||
# the selection function takes in the full cos_sim_matrix then subsets the
|
||||
# matrix according to the test_candidates_mask and train_candidates_mask that we
|
||||
# give it
|
||||
# it returns the most likely source candidate index and score among the source
|
||||
# candidate list
|
||||
# we then map the local idx to the ship-level idx
|
||||
def selection(cos_sim_matrix, source_mask, target_mask):
|
||||
# subset_matrix = cos_sim_matrix[condition_source]
|
||||
# except we are subsetting 2D matrix (row, column)
|
||||
subset_matrix = cos_sim_matrix[np.ix_(source_mask, target_mask)]
|
||||
# we select top-k here
|
||||
# Get the indices of the top-k maximum values along axis 1
|
||||
top_k = 1
|
||||
# returns a potential 2d matrix of which columns have the highest values
|
||||
# top_k_indices = np.argsort(subset_matrix, axis=1)[:, -top_k:] # Get indices of top k values
|
||||
# this partial sorts and ensures we care only top_k are correctly sorted
|
||||
top_k_indices = np.argpartition(subset_matrix, -top_k, axis=1)[:, -top_k:]
|
||||
|
||||
# Get the values of the top 5 maximum scores
|
||||
top_k_values = np.take_along_axis(subset_matrix, top_k_indices, axis=1)
|
||||
|
||||
# Calculate the average of the top-k scores along axis 1
|
||||
y_scores = np.mean(top_k_values, axis=1)
|
||||
max_idx = np.argmax(y_scores)
|
||||
max_score = y_scores[max_idx]
|
||||
|
||||
# convert boolean to indices
|
||||
condition_indices = np.where(source_mask)[0]
|
||||
max_idx = condition_indices[max_idx]
|
||||
|
||||
|
||||
return max_idx, max_score
|
||||
|
||||
|
||||
|
||||
####################
|
||||
# global level
|
||||
# obtain the full mdm_list
|
||||
|
||||
#####################
|
||||
# fold level
|
||||
|
||||
def run_deduplication(
|
||||
test_df,
|
||||
train_df,
|
||||
batch_size=1024,
|
||||
threshold=0.9,
|
||||
diagnostic=False
|
||||
):
|
||||
|
||||
# TODO: replace this with a list of values to import
|
||||
# too wasteful to just import everything
|
||||
data_path = '../data_import/exports/data_mapping_mdm.csv'
|
||||
full_df = pd.read_csv(data_path, skipinitialspace=True)
|
||||
full_df['mapping'] = full_df['thing'] + ' ' + full_df['property']
|
||||
full_mdm_mapping_list = sorted(list((set(full_df['mapping']))))
|
||||
|
||||
# set the fold
|
||||
# import test data
|
||||
df = test_df
|
||||
df['p_mapping'] = df['p_thing'] + " " + df['p_property']
|
||||
|
||||
# get target data
|
||||
data_path = "train_all.csv"
|
||||
train_df = pd.read_csv(data_path, skipinitialspace=True)
|
||||
train_df['mapping'] = train_df['thing'] + " " + train_df['property']
|
||||
|
||||
# generate your embeddings
|
||||
checkpoint_path = 'models/bert_model'
|
||||
# we can generate the train embeddings once and re-use for every ship
|
||||
print('generate train embeddings')
|
||||
train_embedder = Embedder(input_df=train_df, batch_size=batch_size)
|
||||
train_embeds = train_embedder.make_embedding(checkpoint_path)
|
||||
|
||||
# generate new embeddings for each ship
|
||||
print('generate test embeddings')
|
||||
test_embedder = Embedder(input_df=df, batch_size=batch_size)
|
||||
global_test_embeds = test_embedder.make_embedding(checkpoint_path)
|
||||
|
||||
|
||||
# create global_answer array
|
||||
# the purpose of this array is to track the classification state at the global
|
||||
# level
|
||||
global_answer = np.zeros(len(df), dtype=bool)
|
||||
|
||||
#############################
|
||||
# ship level
|
||||
# we have to split into per-ship analysis
|
||||
ships_list = sorted(list(set(df['ships_idx'])))
|
||||
|
||||
for ship_idx in tqdm(ships_list):
|
||||
# ship_df = df[df['ships_idx'] == ship_idx]
|
||||
# required to map local ship_answer array to global_answer array
|
||||
# map_local_index_to_global_index = ship_df.index.to_numpy()
|
||||
|
||||
# we want to subset the ship and only p_mdm values
|
||||
ship_mask = df['ships_idx'] == ship_idx
|
||||
map_local_index_to_global_index = np.where(ship_mask)[0]
|
||||
ship_df = df[ship_mask].reset_index(drop=True)
|
||||
|
||||
# subset the test embeds
|
||||
test_embeds = global_test_embeds[map_local_index_to_global_index]
|
||||
|
||||
# generate the cosine sim matrix for the ship level
|
||||
cos_sim_matrix = cosine_similarity_chunked(test_embeds, train_embeds, chunk_size=1024).cpu().numpy()
|
||||
|
||||
##############################
|
||||
# selection level
|
||||
# The general idea:
|
||||
# step 1: keep only pattern generations that belong to mdm list
|
||||
# -> this removes totally wrong datasets that mapped to totally wrong things
|
||||
# step 2: loop through the mdm list and isolate data in both train and test that
|
||||
# belong to the same pattern class
|
||||
# -> this is more tricky, because we have non-mdm mapping to correct classes
|
||||
# -> so we have to find which candidate is most similar to the training data
|
||||
|
||||
# it is very tricky to keep track of classification across multiple stages so we
|
||||
# will use a boolean answer list to map answers back to the global answer list
|
||||
|
||||
# initialize the local answer list
|
||||
ship_answer_list = np.ones(len(ship_df), dtype=bool)
|
||||
|
||||
###########
|
||||
# STEP 1A: ensure that the predicted mapping labels are valid
|
||||
pattern_match_mask = ship_df['p_mapping'].apply(lambda x: x in full_mdm_mapping_list).to_numpy()
|
||||
pattern_match_mask = pattern_match_mask.astype(bool)
|
||||
# anything not in the pattern_match_mask are hallucinations
|
||||
# this has the same effect as setting any wrong generations as non-mdm
|
||||
ship_answer_list[~pattern_match_mask] = False
|
||||
|
||||
# # STEP 1B: subset our de-duplication to use only predicted_mdm labels
|
||||
# p_mdm_mask = ship_df['p_mdm']
|
||||
# # assign false to any non p_mdm entries
|
||||
# ship_answer_list[~p_mdm_mask] = False
|
||||
# # modify pattern_match_mask to remove any non p_mdm values
|
||||
# pattern_match_mask = pattern_match_mask & p_mdm_mask
|
||||
|
||||
###########
|
||||
# STEP 2
|
||||
# we now go through each class found in our generated set
|
||||
|
||||
# we want to identify per-ship mdm classes
|
||||
ship_predicted_classes = sorted(set(ship_df['p_mapping'][pattern_match_mask].to_list()))
|
||||
|
||||
# this function performs the selection given a class
|
||||
# it takes in the cos_sim_matrix
|
||||
# it returns the selection by mutating the answer_list
|
||||
# it sets all relevant idxs to False initially, then sets the selected values to True
|
||||
def selection_for_class(select_class, cos_sim_matrix, answer_list):
|
||||
|
||||
# create local copy of answer_list
|
||||
ship_answer_list = answer_list.copy()
|
||||
# sample_df = ship_df[ship_df['p_mapping'] == select_class]
|
||||
|
||||
|
||||
# we need to set all idx of chosen entries as False in answer_list -> assume wrong by default
|
||||
# selected_idx_list = sample_df.index.to_numpy()
|
||||
selected_idx_list = np.where(ship_df['p_mapping'] == select_class)[0]
|
||||
|
||||
# basic assumption check
|
||||
|
||||
# generate the masking arrays for both test and train embeddings
|
||||
# we select a tuple from each group, and use that as a candidate for selection
|
||||
test_candidates_mask = ship_df['p_mapping'] == select_class
|
||||
# we make candidates to compare against in the data sharing the same class
|
||||
train_candidates_mask = train_df['mapping'] == select_class
|
||||
|
||||
if sum(train_candidates_mask) == 0:
|
||||
# it can be the case that the mdm-valid mapping class is not found in training data
|
||||
# print("not found in training data", select_class)
|
||||
ship_answer_list[selected_idx_list] = False
|
||||
return ship_answer_list
|
||||
|
||||
# perform selection
|
||||
# max_idx is the id
|
||||
max_idx, max_score = selection(cos_sim_matrix, test_candidates_mask, train_candidates_mask)
|
||||
|
||||
|
||||
# set the duplicate entries to False
|
||||
ship_answer_list[selected_idx_list] = False
|
||||
# then only set the one unique chosen value as True
|
||||
if max_score > threshold:
|
||||
ship_answer_list[max_idx] = True
|
||||
|
||||
return ship_answer_list
|
||||
|
||||
# we choose one mdm class
|
||||
for select_class in ship_predicted_classes:
|
||||
# this resulted in big improvement
|
||||
if (sum(ship_df['p_mapping'] == select_class)) > 0:
|
||||
ship_answer_list = selection_for_class(select_class, cos_sim_matrix, ship_answer_list)
|
||||
|
||||
# we want to write back to global_answer
|
||||
# first we convert local indices to global indices
|
||||
ship_local_indices = np.where(ship_answer_list)[0]
|
||||
ship_global_indices = map_local_index_to_global_index[ship_local_indices]
|
||||
global_answer[ship_global_indices] = True
|
||||
|
||||
# we set all unselected values to None
|
||||
df.loc[~global_answer, 'p_thing'] = None
|
||||
df.loc[~global_answer, 'p_property'] = None
|
||||
|
||||
|
||||
if diagnostic:
|
||||
print(80 * '*')
|
||||
|
||||
y_true = df['MDM'].to_list()
|
||||
y_pred = global_answer
|
||||
|
||||
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
|
||||
print(f"tp: {tp}")
|
||||
print(f"tn: {tn}")
|
||||
print(f"fp: {fp}")
|
||||
print(f"fn: {fn}")
|
||||
|
||||
# compute metrics
|
||||
accuracy = accuracy_score(y_true, y_pred)
|
||||
f1 = f1_score(y_true, y_pred)
|
||||
precision = precision_score(y_true, y_pred)
|
||||
recall = recall_score(y_true, y_pred)
|
||||
|
||||
# print the results
|
||||
print(f'accuracy: {accuracy:.5f}')
|
||||
print(f'f1 score: {f1:.5f}')
|
||||
print(f'Precision: {precision:.5f}')
|
||||
print(f'Recall: {recall:.5f}')
|
||||
|
||||
|
||||
return df
|
||||
|
||||
|
|
@ -0,0 +1,169 @@
|
|||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import (
|
||||
T5TokenizerFast,
|
||||
AutoModelForSeq2SeqLM,
|
||||
)
|
||||
import os
|
||||
from tqdm import tqdm
|
||||
from datasets import Dataset
|
||||
import numpy as np
|
||||
|
||||
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
|
||||
|
||||
|
||||
class Mapper():
|
||||
tokenizer: T5TokenizerFast
|
||||
model: torch.nn.Module
|
||||
dataloader: DataLoader
|
||||
|
||||
def __init__(self, checkpoint_path):
|
||||
self._create_tokenizer()
|
||||
self._load_model(checkpoint_path)
|
||||
|
||||
|
||||
def _create_tokenizer(self):
|
||||
# %%
|
||||
# load tokenizer
|
||||
self.tokenizer = T5TokenizerFast.from_pretrained("t5-small", return_tensors="pt", clean_up_tokenization_spaces=True)
|
||||
# Define additional special tokens
|
||||
additional_special_tokens = ["<THING_START>", "<THING_END>", "<PROPERTY_START>", "<PROPERTY_END>", "<NAME>", "<DESC>", "SIG", "UNIT", "DATA_TYPE"]
|
||||
# Add the additional special tokens to the tokenizer
|
||||
self.tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
|
||||
|
||||
def _load_model(self, checkpoint_path: str):
|
||||
# load model
|
||||
# Define the directory and the pattern
|
||||
model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint_path)
|
||||
model = torch.compile(model)
|
||||
# set model to eval
|
||||
self.model = model.eval()
|
||||
|
||||
|
||||
|
||||
|
||||
def prepare_dataloader(self, input_df, batch_size, max_length):
|
||||
"""
|
||||
*arguments*
|
||||
- input_df: input dataframe containing fields 'tag_description', 'thing', 'property'
|
||||
- batch_size: the batch size of dataloader output
|
||||
- max_length: length of tokenizer output
|
||||
"""
|
||||
print("preparing dataloader")
|
||||
# convert each dataframe row into a dictionary
|
||||
# outputs a list of dictionaries
|
||||
|
||||
def _process_df(df):
|
||||
output_list = []
|
||||
for _, row in df.iterrows():
|
||||
desc = f"<DESC>{row['tag_description']}<DESC>"
|
||||
unit = f"<UNIT>{row['unit']}<UNIT>"
|
||||
element = {
|
||||
'input' : f"{desc}{unit}",
|
||||
'output': f"<THING_START>{row['thing']}<THING_END><PROPERTY_START>{row['property']}<PROPERTY_END>",
|
||||
}
|
||||
output_list.append(element)
|
||||
|
||||
return output_list
|
||||
|
||||
def _preprocess_function(example):
|
||||
input = example['input']
|
||||
target = example['output']
|
||||
# text_target sets the corresponding label to inputs
|
||||
# there is no need to create a separate 'labels'
|
||||
model_inputs = self.tokenizer(
|
||||
input,
|
||||
text_target=target,
|
||||
max_length=max_length,
|
||||
return_tensors="pt",
|
||||
padding='max_length',
|
||||
truncation=True,
|
||||
)
|
||||
return model_inputs
|
||||
|
||||
test_dataset = Dataset.from_list(_process_df(input_df))
|
||||
|
||||
|
||||
# map maps function to each "row" in the dataset
|
||||
# aka the data in the immediate nesting
|
||||
datasets = test_dataset.map(
|
||||
_preprocess_function,
|
||||
batched=True,
|
||||
num_proc=1,
|
||||
remove_columns=test_dataset.column_names,
|
||||
)
|
||||
# datasets = _preprocess_function(test_dataset)
|
||||
datasets.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
|
||||
|
||||
# create dataloader
|
||||
self.dataloader = DataLoader(datasets, batch_size=batch_size)
|
||||
|
||||
|
||||
def generate(self):
|
||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
MAX_GENERATE_LENGTH = 128
|
||||
|
||||
pred_generations = []
|
||||
pred_labels = []
|
||||
self.model.cuda()
|
||||
|
||||
print("start generation")
|
||||
for batch in tqdm(self.dataloader):
|
||||
# Inference in batches
|
||||
input_ids = batch['input_ids']
|
||||
attention_mask = batch['attention_mask']
|
||||
# save labels too
|
||||
pred_labels.extend(batch['labels'])
|
||||
|
||||
|
||||
# Move to GPU if available
|
||||
input_ids = input_ids.to(device)
|
||||
attention_mask = attention_mask.to(device)
|
||||
|
||||
# Perform inference
|
||||
# disable if running on gpu's without tensor cores
|
||||
with torch.amp.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||
with torch.no_grad():
|
||||
outputs = self.model.generate(input_ids,
|
||||
attention_mask=attention_mask,
|
||||
max_length=MAX_GENERATE_LENGTH)
|
||||
|
||||
# Decode the output and print the results
|
||||
pred_generations.extend(outputs.to("cpu"))
|
||||
|
||||
|
||||
|
||||
# %%
|
||||
# extract sequence and decode
|
||||
def extract_seq(tokens, start_value, end_value):
|
||||
if start_value not in tokens or end_value not in tokens:
|
||||
return None # Or handle this case according to your requirements
|
||||
start_id = np.where(tokens == start_value)[0][0]
|
||||
end_id = np.where(tokens == end_value)[0][0]
|
||||
|
||||
return tokens[start_id+1:end_id]
|
||||
|
||||
|
||||
def process_tensor_output(tokens):
|
||||
thing_seq = extract_seq(tokens, 32100, 32101) # 32100 = <THING_START>, 32101 = <THING_END>
|
||||
property_seq = extract_seq(tokens, 32102, 32103) # 32102 = <PROPERTY_START>, 32103 = <PROPERTY_END>
|
||||
p_thing = None
|
||||
p_property = None
|
||||
if (thing_seq is not None):
|
||||
p_thing = self.tokenizer.decode(thing_seq, skip_special_tokens=False)
|
||||
if (property_seq is not None):
|
||||
p_property = self.tokenizer.decode(property_seq, skip_special_tokens=False)
|
||||
return p_thing, p_property
|
||||
|
||||
# decode prediction labels
|
||||
def decode_preds(tokens_list):
|
||||
thing_prediction_list = []
|
||||
property_prediction_list = []
|
||||
for tokens in tokens_list:
|
||||
p_thing, p_property = process_tensor_output(tokens)
|
||||
thing_prediction_list.append(p_thing)
|
||||
property_prediction_list.append(p_property)
|
||||
return thing_prediction_list, property_prediction_list
|
||||
|
||||
thing_prediction_list, property_prediction_list = decode_preds(pred_generations)
|
||||
return thing_prediction_list, property_prediction_list
|
|
@ -0,0 +1,77 @@
|
|||
# %%
|
||||
import re
|
||||
from replacement_dict import desc_replacement_dict, unit_replacement_dict
|
||||
|
||||
class Abbreviator:
|
||||
|
||||
def __init__(self, df):
|
||||
self.df = df
|
||||
|
||||
def _count_abbreviation_occurrences(self, tag_descriptions, abbreviation):
|
||||
"""Count the number of occurrences of the abbreviation in the list of machine descriptions."""
|
||||
pattern = re.compile(abbreviation)
|
||||
count = sum(len(pattern.findall(description)) for description in tag_descriptions)
|
||||
return count
|
||||
|
||||
def _replace_abbreviations(self, tag_descriptions, abbreviations):
|
||||
"""Replace the abbreviations according to the key-pair value provided."""
|
||||
replaced_descriptions = []
|
||||
for description in tag_descriptions:
|
||||
for abbreviation, replacement in abbreviations.items():
|
||||
description = re.sub(abbreviation, replacement, description)
|
||||
|
||||
replaced_descriptions.append(description)
|
||||
return replaced_descriptions
|
||||
|
||||
def _cleanup_spaces(self, tag_descriptions):
|
||||
# Replace all whitespace with a single space
|
||||
replaced_descriptions = []
|
||||
for description in tag_descriptions:
|
||||
description_clean = re.sub(r'\s+', ' ', description)
|
||||
replaced_descriptions.append(description_clean)
|
||||
return replaced_descriptions
|
||||
|
||||
# remove all dots
|
||||
def _cleanup_dots(self, tag_descriptions):
|
||||
replaced_descriptions = []
|
||||
for description in tag_descriptions:
|
||||
description_clean = re.sub(r'\.', '', description)
|
||||
replaced_descriptions.append(description_clean)
|
||||
return replaced_descriptions
|
||||
|
||||
|
||||
def run(self):
|
||||
df = self.df
|
||||
|
||||
# %%
|
||||
# Replace abbreviations
|
||||
print("running substitution for descriptions")
|
||||
# normalize to uppercase
|
||||
# strip leading and trailing whitespace
|
||||
df['tag_description'] = df['tag_description'].str.strip()
|
||||
df['tag_description'] = df['tag_description'].str.upper()
|
||||
# Replace whitespace-only entries with "NOVALUE"
|
||||
# note that "N/A" can be read as nan
|
||||
# replace whitespace only values as NOVALUE
|
||||
df['tag_description']= df['tag_description'].fillna("NOVALUE")
|
||||
df['tag_description'] = df['tag_description'].replace(r'^\s*$', 'NOVALUE', regex=True)
|
||||
|
||||
# perform actual substitution
|
||||
tag_descriptions = df['tag_description']
|
||||
replaced_descriptions = self._replace_abbreviations(tag_descriptions, desc_replacement_dict)
|
||||
replaced_descriptions = self._cleanup_spaces(replaced_descriptions)
|
||||
replaced_descriptions = self._cleanup_dots(replaced_descriptions)
|
||||
df["tag_description"] = replaced_descriptions
|
||||
# print("Descriptions after replacement:", replaced_descriptions)
|
||||
|
||||
# %%
|
||||
print("running substitutions for units")
|
||||
df['unit'] = df['unit'].fillna("NOVALUE")
|
||||
df['unit'] = df['unit'].replace(r'^\s*$', 'NOVALUE', regex=True)
|
||||
unit_list = df['unit']
|
||||
new_unit = self._replace_abbreviations(unit_list, unit_replacement_dict)
|
||||
new_unit = self._cleanup_spaces(new_unit)
|
||||
df['unit'] = new_unit
|
||||
|
||||
return df
|
||||
|
|
@ -0,0 +1,291 @@
|
|||
# substitution mapping for descriptions
|
||||
# Abbreviations and their replacements
|
||||
desc_replacement_dict = {
|
||||
r'\bLIST\b': 'LIST',
|
||||
# exhaust gas
|
||||
r'\bE\. GAS\b': 'EXHAUST GAS',
|
||||
r'\bEXH\.\b': 'EXHAUST',
|
||||
r'\bEXH\b': 'EXHAUST',
|
||||
r'\bEXHAUST\.\b': 'EXHAUST',
|
||||
r'\bEXHAUST\b': 'EXHAUST',
|
||||
r'\bBLR\.EXH\.\b': 'BOILER EXHAUST',
|
||||
# temperature
|
||||
r'\bTEMP\.\b': 'TEMPERATURE',
|
||||
r'\bTEMP\b': 'TEMPERATURE',
|
||||
r'\bTEMPERATURE\.\b': 'TEMPERATURE',
|
||||
r'\bTEMPERATURE\b': 'TEMPERATURE',
|
||||
# cylinder
|
||||
r'\bCYL(\d+)\b': r'CYLINDER\1',
|
||||
r'\bCYL\.(\d+)\b': r'CYLINDER\1',
|
||||
r'\bCYL(?=\d|\W|$)\b': 'CYLINDER',
|
||||
r'\bCYL\.\b': 'CYLINDER',
|
||||
r'\bCYL\b': 'CYLINDER',
|
||||
# cooling
|
||||
r'\bCOOL\.\b': 'COOLING',
|
||||
r'\bCOOLING\b': 'COOLING',
|
||||
r'\bCOOLER\b': 'COOLER',
|
||||
r'\bCW\b': 'COOLING WATER',
|
||||
r'\bC\.W\b': 'COOLING WATER',
|
||||
r'\bJ\.C\.F\.W\b': 'JACKET COOLING FEED WATER',
|
||||
r'\bJ\.C F\.W\b': 'JACKET COOLING FEED WATER',
|
||||
r'\bJACKET C\.F\.W\b': 'JACKET COOLING FEED WATER',
|
||||
r'\bCOOL\. F\.W\b': 'COOLING FEED WATER',
|
||||
r'\bC\.F\.W\b': 'COOLING FEED WATER',
|
||||
# sea water
|
||||
r'\bC\.S\.W\b': 'COOLING SEA WATER',
|
||||
r'\bCSW\b': 'COOLING SEA WATER',
|
||||
r'\bC.S.W\b': 'COOLING SEA WATER',
|
||||
# water
|
||||
r'\bFEED W\.\b': 'FEED WATER',
|
||||
r'\bFEED W\b': 'FEED WATER',
|
||||
r'\bF\.W\b': 'FEED WATER',
|
||||
r'\bF\.W\.\b': 'FEED WATER',
|
||||
r'\bFW\b': 'FEED WATER',
|
||||
# r'\bWATER\b': 'WATER',
|
||||
r'\bSCAV\.\b': 'SCAVENGE',
|
||||
r'\bSCAV\b': 'SCAVENGE',
|
||||
r'\bINL\.\b': 'INLET',
|
||||
r'\bINLET\b': 'INLET',
|
||||
r'\bOUT\.\b': 'OUTLET',
|
||||
r'\bOUTL\.\b': 'OUTLET',
|
||||
r'\bOUTLET\b': 'OUTLET',
|
||||
# tank
|
||||
r'\bSTOR\.TK\b': 'STORAGE TANK',
|
||||
r'\bSTOR\. TK\b': 'STORAGE TANK',
|
||||
r'\bSERV\. TK\b': 'SERVICE TANK',
|
||||
r'\bSETT\. TK\b': 'SETTLING TANK',
|
||||
r'\bBK\b': 'BUNKER',
|
||||
r'\bTK\b': 'TANK',
|
||||
# PRESSURE
|
||||
r'\bPRESS\b': 'PRESSURE',
|
||||
r'\bPRESS\.\b': 'PRESSURE',
|
||||
r'\bPRESSURE\b': 'PRESSURE',
|
||||
r'PRS\b': 'PRESSURE', # this is a special replacement - it is safe to replace PRS w/o checks
|
||||
# ENGINE
|
||||
r'\bENG\.\b': 'ENGINE',
|
||||
r'\bENG\b': 'ENGINE',
|
||||
r'\bENGINE\b': 'ENGINE',
|
||||
r'\bENGINE SPEED\b': 'ENGINE SPEED',
|
||||
r'\bENGINE RUNNING\b': 'ENGINE RUNNING',
|
||||
r'\bENGINE RPM PICKUP\b': 'ENGINE RPM PICKUP',
|
||||
r'\bENGINE ROOM\b': 'ENGINE ROOM',
|
||||
r'\bE/R\b': 'ENGINE ROOM',
|
||||
# MAIN ENGINE
|
||||
r'\bM/E NO.(\d+)\b': r'NO\1 MAIN_ENGINE',
|
||||
r'\bM/E NO(\d+)\b': r'NO\1 MAIN_ENGINE',
|
||||
r'\bM/E NO.(\d+)\b': r'NO\1 MAIN_ENGINE',
|
||||
r'\bME NO.(\d+)\b': r'NO\1 MAIN_ENGINE',
|
||||
r'\bM/E\b': 'MAIN_ENGINE',
|
||||
r'\bM/E(.)\b': r'MAIN_ENGINE \1', # M/E(S/P)
|
||||
r'\bME(.)\b': r'MAIN_ENGINE \1', # ME(S/P)
|
||||
r'\bM_E\b': 'MAIN_ENGINE',
|
||||
r'\bME(?=\d|\W|$)\b': 'MAIN_ENGINE',
|
||||
r'\bMAIN ENGINE\b': 'MAIN_ENGINE',
|
||||
# ENGINE variants
|
||||
r'\bM_E_RPM\b': 'MAIN ENGINE RPM',
|
||||
r'\bM/E_M\.G\.O\.\b': 'MAIN ENGINE MARINE GAS OIL',
|
||||
r'\bM/E_H\.F\.O\.\b': 'MAIN ENGINE HEAVY FUEL OIL',
|
||||
# GENERATOR ENGINE
|
||||
r'\bGEN(\d+)\b': r'NO\1 GENERATOR_ENGINE',
|
||||
r'\bGE(\d+)\b': r'NO\1 GENERATOR_ENGINE',
|
||||
# ensure that we substitute only for terms where following GE is num or special
|
||||
r'\bGE(?=\d|\W|$)\b': 'GENERATOR_ENGINE',
|
||||
r'\bG/E(\d+)\b': r'NO\1 GENERATOR_ENGINE',
|
||||
r'\bG/E\b': r'GENERATOR_ENGINE',
|
||||
r'\bG_E(\d+)\b': r'NO\1 GENERATOR_ENGINE',
|
||||
r'\bG_E\b': 'GENERATOR_ENGINE',
|
||||
r'\bGENERATOR ENGINE\b': 'GENERATOR_ENGINE',
|
||||
r'\bG/E_M\.G\.O\b': 'GENERATOR_ENGINE MARINE GAS OIL',
|
||||
# DG
|
||||
r'\bDG(\d+)\b': r'NO\1 GENERATOR_ENGINE',
|
||||
r'\bDG\b': 'GENERATOR_ENGINE',
|
||||
r'\bD/G\b': 'GENERATOR_ENGINE',
|
||||
r'\bDG(\d+)\((.)\)\b': r'NO\1\2 GENERATOR_ENGINE', # handle DG2(A)
|
||||
r'\bDG(\d+[A-Za-z])\b': r'NO\1 GENERATOR_ENGINE', # handle DG2A
|
||||
# DG variants
|
||||
r'\bDG_CURRENT\b': 'GENERATOR_ENGINE CURRENT',
|
||||
r'\bDG_LOAD\b': 'GENERATOR_ENGINE LOAD',
|
||||
r'\bDG_FREQUENCY\b': 'GENERATOR_ENGINE FREQUENCY',
|
||||
r'\bDG_VOLTAGE\b': 'GENERATOR_ENGINE VOLTAGE',
|
||||
r'\bDG_CLOSED\b': 'GENERATOR_ENGINE CLOSED',
|
||||
r'\bD/G_CURRENT\b': 'GENERATOR_ENGINE CURRENT',
|
||||
r'\bD/G_LOAD\b': 'GENERATOR_ENGINE LOAD',
|
||||
r'\bD/G_FREQUENCY\b': 'GENERATOR_ENGINE FREQUENCY',
|
||||
r'\bD/G_VOLTAGE\b': 'GENERATOR_ENGINE VOLTAGE',
|
||||
r'\bD/G_CLOSED\b': 'GENERATOR_ENGINE CLOSED',
|
||||
# MGE
|
||||
r'\b(\d+)MGE\b': r'NO\1 MAIN_GENERATOR_ENGINE',
|
||||
# generator engine and mgo
|
||||
r'\bG/E_M\.G\.O\.\b': r'GENERATOR_ENGINE MARINE GAS OIL',
|
||||
r'\bG/E_H\.F\.O\.\b': r'GENERATOR_ENGINE HEAVY FUEL OIL',
|
||||
# ultra low sulfur fuel oil
|
||||
r'\bU\.L\.S\.F\.O\b': 'ULTRA LOW SULFUR FUEL OIL',
|
||||
r'\bULSFO\b': 'ULTRA LOW SULFUR FUEL OIL',
|
||||
# marine gas oil
|
||||
r'\bM\.G\.O\b': 'MARINE GAS OIL',
|
||||
r'\bMGO\b': 'MARINE GAS OIL',
|
||||
r'\bMDO\b': 'MARINE DIESEL OIL',
|
||||
# light fuel oil
|
||||
r'\bL\.F\.O\b': 'LIGHT FUEL OIL',
|
||||
r'\bLFO\b': 'LIGHT FUEL OIL',
|
||||
# heavy fuel oil
|
||||
r'\bHFO\b': 'HEAVY FUEL OIL',
|
||||
r'\bH\.F\.O\b': 'HEAVY FUEL OIL',
|
||||
# piston cooling oil
|
||||
r'\bPCO\b': 'PISTON COOLING OIL',
|
||||
r'\bP\.C\.O\.\b': 'PISTON COOLING OIL',
|
||||
r'\bP\.C\.O\b': 'PISTON COOLING OIL',
|
||||
r'PISTION C.O': 'PISTON COOLING OIL',
|
||||
# diesel oil
|
||||
r'\bD.O\b': 'DIESEL OIL',
|
||||
# for remaining fuel oil that couldn't be substituted
|
||||
r'\bF\.O\b': 'FUEL OIL',
|
||||
r'\bFO\b': 'FUEL OIL',
|
||||
# lubricant
|
||||
r'\bLUB\.\b': 'LUBRICANT',
|
||||
r'\bLUBE\b': 'LUBRICANT',
|
||||
r'\bLUBR\.\b': 'LUBRICANT',
|
||||
r'\bLUBRICATING\.\b': 'LUBRICANT',
|
||||
r'\bLUBRICATION\.\b': 'LUBRICANT',
|
||||
# lubricating oil
|
||||
r'\bL\.O\b': 'LUBRICATING OIL',
|
||||
r'\bLO\b': 'LUBRICATING OIL',
|
||||
# lubricating oil pressure
|
||||
r'\bLO_PRESS\b': 'LUBRICATING OIL PRESSURE',
|
||||
r'\bLO_PRESSURE\b': 'LUBRICATING OIL PRESSURE',
|
||||
# temperature
|
||||
r'\bL\.T\b': 'LOW TEMPERATURE',
|
||||
r'\bLT\b': 'LOW TEMPERATURE',
|
||||
r'\bH\.T\b': 'HIGH TEMPERATURE',
|
||||
r'\bHT\b': 'HIGH TEMPERATURE',
|
||||
# BOILER
|
||||
# auxiliary boiler
|
||||
# replace these first before replacing AUXILIARY only
|
||||
r'\bAUX\.BOILER\b': 'AUXILIARY BOILER',
|
||||
r'\bAUX\. BOILER\b': 'AUXILIARY BOILER',
|
||||
r'\bAUX BLR\b': 'AUXILIARY BOILER',
|
||||
r'\bAUX\.\b': 'AUXILIARY',
|
||||
r'\bAUX\b': 'AUXILIARY',
|
||||
# composite boiler
|
||||
r'\bCOMP\. BOILER\b': 'COMPOSITE BOILER',
|
||||
r'\bCOMP\.BOILER\b': 'COMPOSITE BOILER',
|
||||
r'\bCOMP BOILER\b': 'COMPOSITE BOILER',
|
||||
r'\bCOMP\b': 'COMPOSITE',
|
||||
r'\bCMPS\b': 'COMPOSITE',
|
||||
# any other boiler
|
||||
r'\bBLR\.\b': 'BOILER',
|
||||
r'\bBLR\b': 'BOILER',
|
||||
r'\bBOILER W.CIRC.P/P\b': 'BOILER WATER CIRC P/P',
|
||||
# windind
|
||||
r'\bWIND\.\b': 'WINDING',
|
||||
r'\bWINDING\b': 'WINDING',
|
||||
# VOLTAGE/FREQ/CURRENT
|
||||
r'\bVLOT\.': 'VOLTAGE', # correct spelling
|
||||
r'\bVOLT\.': 'VOLTAGE',
|
||||
r'\bVOLTAGE\b': 'VOLTAGE',
|
||||
r'\bFREQ\.': 'FREQUENCY',
|
||||
r'\bFREQUENCY\b': 'FREQUENCY',
|
||||
r'\bCURR\.': 'CURRENT',
|
||||
r'\bCURRENT\b': 'CURRENT',
|
||||
# TURBOCHARGER
|
||||
r'\bTCA\b': 'TURBOCHARGER',
|
||||
r'\bTCB\b': 'TURBOCHARGER',
|
||||
r'\bT/C\b': 'TURBOCHARGER',
|
||||
r'\bT_C\b': 'TURBOCHARGER',
|
||||
r'\bT/C_RPM\b': 'TURBOCHARGER RPM',
|
||||
r'\bTC(\d+)\b': r'TURBOCHARGER\1',
|
||||
r'\bT/C(\d+)\b': r'TURBOCHARGER\1',
|
||||
r'\bTC(?=\d|\W|$)\b': 'TURBOCHARGER',
|
||||
r'\bTURBOCHAGER\b': 'TURBOCHARGER',
|
||||
r'\bTURBOCHARGER\b': 'TURBOCHARGER',
|
||||
r'\bTURBOCHG\b': 'TURBOCHARGER',
|
||||
# misc spelling errors
|
||||
r'\bOPERATOIN\b': 'OPERATION',
|
||||
# wrongly attached terms
|
||||
r'\bBOILERMGO\b': 'BOILER MGO',
|
||||
# additional standardizing replacement
|
||||
# replace # followed by a number with NO
|
||||
r'#(?=\d)\b': 'NO',
|
||||
r'\bNO\.(?=\d)\b': 'NO',
|
||||
r'\bNO\.\.(?=\d)\b': 'NO',
|
||||
# others:
|
||||
# generator
|
||||
r'\bGEN\.\b': 'GENERATOR',
|
||||
# others
|
||||
r'\bGEN\.WIND\.TEMP\b': 'GENERATOR WINDING TEMPERATURE',
|
||||
r'\bFLTR\b': 'FILTER',
|
||||
r'\bCLR\b': 'CLEAR',
|
||||
}
|
||||
|
||||
# substitution mapping for units
|
||||
# Abbreviations and their replacements
|
||||
unit_replacement_dict = {
|
||||
r'\b%\b': 'PERCENT',
|
||||
r'\b-\b': '',
|
||||
r'\b- \b': '',
|
||||
# ensure no character after A
|
||||
r'\bA(?!\w|/)': 'CURRENT',
|
||||
r'\bAmp(?!\w|/)': 'CURRENT',
|
||||
r'\bHz\b': 'HERTZ',
|
||||
r'\bKG/CM2\b': 'PRESSURE',
|
||||
r'\bKG/H\b': 'KILOGRAM PER HOUR',
|
||||
r'\bKNm\b': 'RPM',
|
||||
r'\bKW\b': 'POWER',
|
||||
r'\bKg(?!\w|/)': 'MASS',
|
||||
r'\bKw\b': 'POWER',
|
||||
r'\bL(?!\w|/)': 'VOLUME',
|
||||
r'\bMT/h\b': 'METRIC TONNES PER HOUR',
|
||||
r'\bMpa\b': 'PRESSURE',
|
||||
r'\bPF\b': 'POWER FACTOR',
|
||||
r'\bRPM\b': 'RPM',
|
||||
r'\bV(?!\w|/)': 'VOLTAGE',
|
||||
r'\bbar(?!\w|/)': 'PRESSURE',
|
||||
r'\bbarA\b': 'SCAVENGE PRESSURE',
|
||||
r'\bcST\b': 'VISCOSITY',
|
||||
r'\bcSt\b': 'VISCOSITY',
|
||||
r'\bcst\b': 'VISCOSITY',
|
||||
r'\bdeg(?!\w|/|\.)': 'DEGREE',
|
||||
r'\bdeg.C\b': 'TEMPERATURE',
|
||||
r'\bdegC\b': 'TEMPERATURE',
|
||||
r'\bdegree\b': 'DEGREE',
|
||||
r'\bdegreeC\b': 'TEMPERATURE',
|
||||
r'\bhPa\b': 'PRESSURE',
|
||||
r'\bhours\b': 'HOURS',
|
||||
r'\bkN\b': 'THRUST',
|
||||
r'\bkNm\b': 'TORQUE',
|
||||
r'\bkW\b': 'POWER',
|
||||
# ensure that kg is not followed by anything
|
||||
r'\bkg(?!\w|/)': 'FLOW', # somehow in the data its flow
|
||||
r'\bkg/P\b': 'MASS FLOW',
|
||||
r'\bkg/cm2\b': 'PRESSURE',
|
||||
r'\bkg/cm²\b': 'PRESSURE',
|
||||
r'\bkg/h\b': 'MASS FLOW',
|
||||
r'\bkg/hr\b': 'MASS FLOW',
|
||||
r'\bkg/pulse\b': '',
|
||||
r'\bkgf/cm2\b': 'PRESSURE',
|
||||
r'\bkgf/cm²\b': 'PRESSURE',
|
||||
r'\bkgf/㎠\b': 'PRESSURE',
|
||||
r'\bknots\b': 'SPEED',
|
||||
r'\bkw\b': 'POWER',
|
||||
r'\bl/Hr\b': 'VOLUME FLOW',
|
||||
r'\bl/h\b': 'VOLUME FLOW',
|
||||
r'\bl_Hr\b': 'VOLUME FLOW',
|
||||
r'\bl_hr\b': 'VOLUME FLOW',
|
||||
r'\bM\b': 'DRAFT', # for wind draft
|
||||
r'm': 'm', # wind draft and trim - not useful
|
||||
r'\bm/s\b': 'SPEED',
|
||||
r'\bm3\b': 'VOLUME',
|
||||
r'\bmH2O\b': 'DRAFT',
|
||||
r'\bmWC\b': 'DRAFT',
|
||||
r'\bmbar\b': 'PRESSURE',
|
||||
r'\bmg\b': 'ACCELERATION',
|
||||
r'\bmin-¹\b': '', # data too varied
|
||||
r'\bmm\b': '', # data too varied
|
||||
r'\bmmH2O\b': 'WATER DRUM LEVEL',
|
||||
r'\brev\b': 'RPM',
|
||||
r'\brpm\b': 'RPM',
|
||||
r'\bx1000min-¹\b': '',
|
||||
r'\b°C\b': 'TEMPERATURE',
|
||||
r'\bºC\b': 'TEMPERATURE',
|
||||
r'\b℃\b': 'TEMPERATURE'
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
# %%
|
||||
import pandas as pd
|
||||
import os
|
||||
import glob
|
||||
from mapper import Mapper
|
||||
from preprocess import Abbreviator
|
||||
from deduplication import run_deduplication
|
||||
|
||||
# global config
|
||||
BATCH_SIZE = 1024
|
||||
SHIPS_LIST = [1000,1001,1003]
|
||||
|
||||
# %%
|
||||
# START: we import the raw data csv and extract only a few ships from it to simulate incoming json
|
||||
data_path = 'raw_data.csv'
|
||||
full_df = pd.read_csv(data_path, skipinitialspace=True)
|
||||
# subset ships only to that found in SHIPS_LIST
|
||||
df = full_df[full_df['ships_idx'].isin(SHIPS_LIST)].reset_index(drop=True)
|
||||
|
||||
num_rows = 2000
|
||||
df = df[:num_rows]
|
||||
print(len(df))
|
||||
|
||||
# pre-process data
|
||||
abbreviator = Abbreviator(df)
|
||||
df = abbreviator.run()
|
||||
|
||||
# %%
|
||||
##########################################
|
||||
# run mapping
|
||||
# checkpoint
|
||||
# Use glob to find matching paths
|
||||
checkpoint_path = 'models/mapping_model'
|
||||
mapper = Mapper(checkpoint_path)
|
||||
mapper.prepare_dataloader(df, batch_size=BATCH_SIZE, max_length=128)
|
||||
thing_prediction_list, property_prediction_list = mapper.generate()
|
||||
|
||||
# add labels too
|
||||
# thing_actual_list, property_actual_list = decode_preds(pred_labels)
|
||||
# Convert the list to a Pandas DataFrame
|
||||
df_out = pd.DataFrame({
|
||||
'p_thing': thing_prediction_list,
|
||||
'p_property': property_prediction_list
|
||||
})
|
||||
# df_out['p_thing_correct'] = df_out['p_thing'] == df_out['thing']
|
||||
# df_out['p_property_correct'] = df_out['p_property'] == df_out['property']
|
||||
df = pd.concat([df, df_out], axis=1)
|
||||
|
||||
|
||||
# %%
|
||||
####################################
|
||||
# run de_duplication with thresholding
|
||||
data_path = "train_all.csv"
|
||||
train_df = pd.read_csv(data_path, skipinitialspace=True)
|
||||
train_df['mapping'] = train_df['thing'] + " " + train_df['property']
|
||||
|
||||
df = run_deduplication(
|
||||
test_df=df,
|
||||
train_df=train_df,
|
||||
batch_size=BATCH_SIZE,
|
||||
threshold=0.9,
|
||||
diagnostic=True)
|
||||
|
||||
# %%
|
|
@ -3,9 +3,7 @@ from transformers import (
|
|||
AutoTokenizer,
|
||||
AutoModelForSequenceClassification,
|
||||
AutoModelForSeq2SeqLM,
|
||||
DataCollatorWithPadding,
|
||||
)
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
|
||||
|
@ -24,7 +22,7 @@ class BertEmbedder:
|
|||
self.model = model.eval()
|
||||
|
||||
|
||||
def make_embedding(self, batch_size=64):
|
||||
def make_embedding(self, batch_size=128):
|
||||
all_embeddings = self.embeddings
|
||||
input_texts = self.inputs
|
||||
|
||||
|
|
Loading…
Reference in New Issue