Fix Dockerfile build issue
This commit is contained in:
1
autorag/nodes/passagereranker/tart/__init__.py
Normal file
1
autorag/nodes/passagereranker/tart/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .tart import Tart
|
||||
152
autorag/nodes/passagereranker/tart/modeling_enc_t5.py
Normal file
152
autorag/nodes/passagereranker/tart/modeling_enc_t5.py
Normal file
@@ -0,0 +1,152 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
||||
|
||||
import copy
|
||||
|
||||
from transformers.modeling_outputs import SequenceClassifierOutput
|
||||
from transformers.models.t5.modeling_t5 import T5Config, T5PreTrainedModel, T5Stack
|
||||
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
|
||||
|
||||
from autorag.utils.util import empty_cuda_cache
|
||||
|
||||
|
||||
class EncT5ForSequenceClassification(T5PreTrainedModel):
|
||||
_keys_to_ignore_on_load_missing = [
|
||||
r"encoder\.embed_tokens\.weight",
|
||||
]
|
||||
|
||||
def __init__(self, config: T5Config, dropout=0.1):
|
||||
super().__init__(config)
|
||||
try:
|
||||
from torch import nn
|
||||
except ImportError:
|
||||
raise ImportError("Please install PyTorch to use TART reranker.")
|
||||
self.num_labels = config.num_labels
|
||||
self.config = config
|
||||
|
||||
self.shared = nn.Embedding(config.vocab_size, config.d_model)
|
||||
|
||||
encoder_config = copy.deepcopy(config)
|
||||
encoder_config.use_cache = False
|
||||
encoder_config.is_encoder_decoder = False
|
||||
self.encoder = T5Stack(encoder_config, self.shared)
|
||||
|
||||
self.dropout = nn.Dropout(dropout)
|
||||
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
||||
|
||||
# Initialize weights and apply final processing
|
||||
self.post_init()
|
||||
|
||||
# Model parallel
|
||||
self.model_parallel = False
|
||||
self.device_map = None
|
||||
|
||||
def parallelize(self, device_map=None):
|
||||
try:
|
||||
import torch
|
||||
except ImportError:
|
||||
raise ImportError("Please install PyTorch to use TART reranker.")
|
||||
self.device_map = (
|
||||
get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
|
||||
if device_map is None
|
||||
else device_map
|
||||
)
|
||||
assert_device_map(self.device_map, len(self.encoder.block))
|
||||
self.encoder.parallelize(self.device_map)
|
||||
self.classifier = self.classifier.to(self.encoder.first_device)
|
||||
self.model_parallel = True
|
||||
|
||||
def deparallelize(self):
|
||||
self.encoder.deparallelize()
|
||||
self.encoder = self.encoder.to("cpu")
|
||||
self.model_parallel = False
|
||||
self.device_map = None
|
||||
empty_cuda_cache()
|
||||
|
||||
def get_input_embeddings(self):
|
||||
return self.shared
|
||||
|
||||
def set_input_embeddings(self, new_embeddings):
|
||||
self.shared = new_embeddings
|
||||
self.encoder.set_input_embeddings(new_embeddings)
|
||||
|
||||
def get_encoder(self):
|
||||
return self.encoder
|
||||
|
||||
def _prune_heads(self, heads_to_prune):
|
||||
"""
|
||||
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
||||
class PreTrainedModel
|
||||
"""
|
||||
for layer, heads in heads_to_prune.items():
|
||||
self.encoder.layer[layer].attention.prune_heads(heads)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
input_ids=None,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
inputs_embeds=None,
|
||||
labels=None,
|
||||
output_attentions=None,
|
||||
output_hidden_states=None,
|
||||
return_dict=None,
|
||||
):
|
||||
try:
|
||||
import torch
|
||||
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
||||
except ImportError:
|
||||
raise ImportError("Please install PyTorch to use TART reranker.")
|
||||
return_dict = (
|
||||
return_dict if return_dict is not None else self.config.use_return_dict
|
||||
)
|
||||
|
||||
outputs = self.encoder(
|
||||
input_ids=input_ids,
|
||||
attention_mask=attention_mask,
|
||||
inputs_embeds=inputs_embeds,
|
||||
head_mask=head_mask,
|
||||
output_attentions=output_attentions,
|
||||
output_hidden_states=output_hidden_states,
|
||||
return_dict=return_dict,
|
||||
)
|
||||
|
||||
hidden_states = outputs[0]
|
||||
pooled_output = hidden_states[:, 0, :] # Take bos token (equiv. to <s>)
|
||||
|
||||
pooled_output = self.dropout(pooled_output)
|
||||
logits = self.classifier(pooled_output)
|
||||
|
||||
loss = None
|
||||
if labels is not None:
|
||||
if self.config.problem_type is None:
|
||||
if self.num_labels == 1:
|
||||
self.config.problem_type = "regression"
|
||||
elif self.num_labels > 1 and (
|
||||
labels.dtype == torch.long or labels.dtype == torch.int
|
||||
):
|
||||
self.config.problem_type = "single_label_classification"
|
||||
else:
|
||||
self.config.problem_type = "multi_label_classification"
|
||||
|
||||
if self.config.problem_type == "regression":
|
||||
loss_fct = MSELoss()
|
||||
if self.num_labels == 1:
|
||||
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
||||
else:
|
||||
loss = loss_fct(logits, labels)
|
||||
elif self.config.problem_type == "single_label_classification":
|
||||
loss_fct = CrossEntropyLoss()
|
||||
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
||||
elif self.config.problem_type == "multi_label_classification":
|
||||
loss_fct = BCEWithLogitsLoss()
|
||||
loss = loss_fct(logits, labels)
|
||||
if not return_dict:
|
||||
output = (logits,) + outputs[1:]
|
||||
return ((loss,) + output) if loss is not None else output
|
||||
|
||||
return SequenceClassifierOutput(
|
||||
loss=loss,
|
||||
logits=logits,
|
||||
hidden_states=outputs.hidden_states,
|
||||
attentions=outputs.attentions,
|
||||
)
|
||||
139
autorag/nodes/passagereranker/tart/tart.py
Normal file
139
autorag/nodes/passagereranker/tart/tart.py
Normal file
@@ -0,0 +1,139 @@
|
||||
from itertools import chain
|
||||
from typing import List, Tuple
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from autorag.nodes.passagereranker.base import BasePassageReranker
|
||||
from autorag.nodes.passagereranker.tart.modeling_enc_t5 import (
|
||||
EncT5ForSequenceClassification,
|
||||
)
|
||||
from autorag.nodes.passagereranker.tart.tokenization_enc_t5 import EncT5Tokenizer
|
||||
from autorag.utils.util import (
|
||||
make_batch,
|
||||
sort_by_scores,
|
||||
flatten_apply,
|
||||
select_top_k,
|
||||
result_to_dataframe,
|
||||
empty_cuda_cache,
|
||||
)
|
||||
|
||||
|
||||
class Tart(BasePassageReranker):
|
||||
def __init__(self, project_dir: str, *args, **kwargs):
|
||||
super().__init__(project_dir)
|
||||
try:
|
||||
import torch
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"torch is not installed. Please install torch first to use TART reranker."
|
||||
)
|
||||
model_name = "facebook/tart-full-flan-t5-xl"
|
||||
self.model = EncT5ForSequenceClassification.from_pretrained(model_name)
|
||||
self.tokenizer = EncT5Tokenizer.from_pretrained(model_name)
|
||||
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
self.model = self.model.to(self.device)
|
||||
|
||||
def __del__(self):
|
||||
del self.model
|
||||
del self.tokenizer
|
||||
empty_cuda_cache()
|
||||
super().__del__()
|
||||
|
||||
@result_to_dataframe(["retrieved_contents", "retrieved_ids", "retrieve_scores"])
|
||||
def pure(self, previous_result: pd.DataFrame, *args, **kwargs):
|
||||
queries, contents, _, ids = self.cast_to_run(previous_result)
|
||||
top_k = kwargs.pop("top_k")
|
||||
instruction = kwargs.pop("instruction", "Find passage to answer given question")
|
||||
batch = kwargs.pop("batch", 64)
|
||||
return self._pure(queries, contents, ids, top_k, instruction, batch)
|
||||
|
||||
def _pure(
|
||||
self,
|
||||
queries: List[str],
|
||||
contents_list: List[List[str]],
|
||||
ids_list: List[List[str]],
|
||||
top_k: int,
|
||||
instruction: str = "Find passage to answer given question",
|
||||
batch: int = 64,
|
||||
) -> Tuple[List[List[str]], List[List[str]], List[List[float]]]:
|
||||
"""
|
||||
Rerank a list of contents based on their relevance to a query using Tart.
|
||||
TART is a reranker based on TART (https://github.com/facebookresearch/tart).
|
||||
You can rerank the passages with the instruction using TARTReranker.
|
||||
The default model is facebook/tart-full-flan-t5-xl.
|
||||
|
||||
:param queries: The list of queries to use for reranking
|
||||
:param contents_list: The list of lists of contents to rerank
|
||||
:param ids_list: The list of lists of ids retrieved from the initial ranking
|
||||
:param top_k: The number of passages to be retrieved
|
||||
:param instruction: The instruction for reranking.
|
||||
Note: default instruction is "Find passage to answer given question"
|
||||
The default instruction from the TART paper is being used.
|
||||
If you want to use a different instruction, you can change the instruction through this parameter
|
||||
:param batch: The number of queries to be processed in a batch
|
||||
:return: tuple of lists containing the reranked contents, ids, and scores
|
||||
"""
|
||||
nested_list = [
|
||||
[["{} [SEP] {}".format(instruction, query)] for _ in contents]
|
||||
for query, contents in zip(queries, contents_list)
|
||||
]
|
||||
|
||||
rerank_scores = flatten_apply(
|
||||
tart_run_model,
|
||||
nested_list,
|
||||
model=self.model,
|
||||
batch_size=batch,
|
||||
tokenizer=self.tokenizer,
|
||||
device=self.device,
|
||||
contents_list=contents_list,
|
||||
)
|
||||
|
||||
df = pd.DataFrame(
|
||||
{
|
||||
"contents": contents_list,
|
||||
"ids": ids_list,
|
||||
"scores": rerank_scores,
|
||||
}
|
||||
)
|
||||
df[["contents", "ids", "scores"]] = df.apply(
|
||||
sort_by_scores, axis=1, result_type="expand"
|
||||
)
|
||||
results = select_top_k(df, ["contents", "ids", "scores"], top_k)
|
||||
|
||||
return (
|
||||
results["contents"].tolist(),
|
||||
results["ids"].tolist(),
|
||||
results["scores"].tolist(),
|
||||
)
|
||||
|
||||
|
||||
def tart_run_model(
|
||||
input_texts, contents_list, model, batch_size: int, tokenizer, device
|
||||
):
|
||||
try:
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"torch is not installed. Please install torch first to use TART reranker."
|
||||
)
|
||||
flattened_texts = list(chain.from_iterable(input_texts))
|
||||
flattened_contents = list(chain.from_iterable(contents_list))
|
||||
batch_input_texts = make_batch(flattened_texts, batch_size)
|
||||
batch_contents_list = make_batch(flattened_contents, batch_size)
|
||||
results = []
|
||||
for batch_texts, batch_contents in zip(batch_input_texts, batch_contents_list):
|
||||
feature = tokenizer(
|
||||
batch_texts,
|
||||
batch_contents,
|
||||
padding=True,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
).to(device)
|
||||
with torch.no_grad():
|
||||
pred_scores = model(**feature).logits
|
||||
normalized_scores = [
|
||||
float(score[1]) for score in F.softmax(pred_scores, dim=1)
|
||||
]
|
||||
results.extend(normalized_scores)
|
||||
return results
|
||||
112
autorag/nodes/passagereranker/tart/tokenization_enc_t5.py
Normal file
112
autorag/nodes/passagereranker/tart/tokenization_enc_t5.py
Normal file
@@ -0,0 +1,112 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from transformers import T5Tokenizer
|
||||
|
||||
|
||||
class EncT5Tokenizer(T5Tokenizer):
|
||||
def __init__(
|
||||
self,
|
||||
vocab_file,
|
||||
bos_token="<s>",
|
||||
eos_token="</s>",
|
||||
unk_token="<unk>",
|
||||
pad_token="<pad>",
|
||||
extra_ids=100,
|
||||
additional_special_tokens=None,
|
||||
sp_model_kwargs: Optional[Dict[str, Any]] = None,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
|
||||
|
||||
super().__init__(
|
||||
vocab_file=vocab_file,
|
||||
bos_token=bos_token,
|
||||
eos_token=eos_token,
|
||||
unk_token=unk_token,
|
||||
pad_token=pad_token,
|
||||
extra_ids=extra_ids,
|
||||
additional_special_tokens=additional_special_tokens,
|
||||
sp_model_kwargs=sp_model_kwargs,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def get_special_tokens_mask(
|
||||
self,
|
||||
token_ids_0: List[int],
|
||||
token_ids_1: Optional[List[int]] = None,
|
||||
already_has_special_tokens: bool = False,
|
||||
) -> List[int]:
|
||||
"""
|
||||
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
||||
special tokens using the tokenizer `prepare_for_model` method.
|
||||
Args:
|
||||
token_ids_0 (`List[int]`):
|
||||
List of IDs.
|
||||
token_ids_1 (`List[int]`, *optional*):
|
||||
Optional second list of IDs for sequence pairs.
|
||||
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not the token list is already formatted with special tokens for the model.
|
||||
Returns:
|
||||
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
||||
"""
|
||||
if already_has_special_tokens:
|
||||
return super().get_special_tokens_mask(
|
||||
token_ids_0=token_ids_0,
|
||||
token_ids_1=token_ids_1,
|
||||
already_has_special_tokens=True,
|
||||
)
|
||||
|
||||
# normal case: some special tokens
|
||||
if token_ids_1 is None:
|
||||
return [1] + ([0] * len(token_ids_0)) + [1]
|
||||
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
|
||||
|
||||
def create_token_type_ids_from_sequences(
|
||||
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
||||
) -> List[int]:
|
||||
"""
|
||||
Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
|
||||
use of token type ids, therefore a list of zeros is returned.
|
||||
Args:
|
||||
token_ids_0 (`List[int]`):
|
||||
List of IDs.
|
||||
token_ids_1 (`List[int]`, *optional*):
|
||||
Optional second list of IDs for sequence pairs.
|
||||
Returns:
|
||||
`List[int]`: List of zeros.
|
||||
"""
|
||||
bos = [self.bos_token_id]
|
||||
eos = [self.eos_token_id]
|
||||
|
||||
if token_ids_1 is None:
|
||||
return len(bos + token_ids_0 + eos) * [0]
|
||||
return len(bos + token_ids_0 + eos + token_ids_1 + eos) * [0]
|
||||
|
||||
def build_inputs_with_special_tokens(
|
||||
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
||||
) -> List[int]:
|
||||
"""
|
||||
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
||||
adding special tokens. A sequence has the following format:
|
||||
- single sequence: `<s> X </s>`
|
||||
- pair of sequences: `<s> A </s> B </s>`
|
||||
Args:
|
||||
token_ids_0 (`List[int]`):
|
||||
List of IDs to which the special tokens will be added.
|
||||
token_ids_1 (`List[int]`, *optional*):
|
||||
Optional second list of IDs for sequence pairs.
|
||||
Returns:
|
||||
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
||||
"""
|
||||
if token_ids_1 is None:
|
||||
return [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
|
||||
else:
|
||||
return (
|
||||
[self.bos_token_id]
|
||||
+ token_ids_0
|
||||
+ [self.eos_token_id]
|
||||
+ token_ids_1
|
||||
+ [self.eos_token_id]
|
||||
)
|
||||
Reference in New Issue
Block a user