diff --git a/.drone.yml b/.drone.yml index 9bc4870..d58a24d 100644 --- a/.drone.yml +++ b/.drone.yml @@ -1,8 +1,6 @@ kind: pipeline name: update_website -when: - branch: - - main + steps: - name: fetch_webmentions image: python:3.7 @@ -22,7 +20,9 @@ steps: - name: store_webmentions image: appleboy/drone-git-push - + when: + branch: + - main settings: remote_name: origin branch: main diff --git a/brainsteam/content/posts/2022/01/13-01-painless-explainability-for-text-models-with-eli5/images/explanation_example.png b/brainsteam/content/posts/2022/01/13-01-painless-explainability-for-text-models-with-eli5/images/explanation_example.png new file mode 100644 index 0000000..a955cc6 Binary files /dev/null and b/brainsteam/content/posts/2022/01/13-01-painless-explainability-for-text-models-with-eli5/images/explanation_example.png differ diff --git a/brainsteam/content/posts/2022/01/13-01-painless-explainability-for-text-models-with-eli5/images/explanation_svm.png b/brainsteam/content/posts/2022/01/13-01-painless-explainability-for-text-models-with-eli5/images/explanation_svm.png new file mode 100644 index 0000000..0a8fbbf Binary files /dev/null and b/brainsteam/content/posts/2022/01/13-01-painless-explainability-for-text-models-with-eli5/images/explanation_svm.png differ diff --git a/brainsteam/content/posts/2022/01/13-01-painless-explainability-for-text-models-with-eli5/images/weights.png b/brainsteam/content/posts/2022/01/13-01-painless-explainability-for-text-models-with-eli5/images/weights.png new file mode 100644 index 0000000..8b7b6f5 Binary files /dev/null and b/brainsteam/content/posts/2022/01/13-01-painless-explainability-for-text-models-with-eli5/images/weights.png differ diff --git a/brainsteam/content/posts/2022/01/13-01-painless-explainability-for-text-models-with-eli5/index.md b/brainsteam/content/posts/2022/01/13-01-painless-explainability-for-text-models-with-eli5/index.md new file mode 100644 index 0000000..3212891 --- /dev/null +++ b/brainsteam/content/posts/2022/01/13-01-painless-explainability-for-text-models-with-eli5/index.md @@ -0,0 +1,437 @@ +--- +title: Painless Explainability for NLP/Text Models with LIME and ELI5 +type: post +description: An introduction to LIME ML model explainability in the context of NLP usage and how to use ELI5 library - a painless way to use LIME local explainability for almost any model. +resources: + - name: feature + src: images/scrabble.jpg +date: 2022-01-13T07:47:11+00:00 +url: /2022/01/13/painless-explainability-for-text-models-with-eli5 +toc: true +tags: + - machine-learning + - work + - explainability +--- + + +## Introduction + + +Explainability of machine learning models is a hot topic right now - particularly in deep learning where models are that bit harder to reason about and understand. These models are often called 'black boxes' because you put something in, you get something out and you don't really know how that outcome was achieved. The ability to explain machine learning model's decisions in terms of the features passed in is both useful from a debugging standpoint (identifying features with weird weights) and with legislation like [GDPR's Right to an Explanation](https://www.privacy-regulation.eu/en/r71.htm) it is becoming important in a commercial setting to be able to explain why models behave a certain way. + +In this post I will give a simplified overview of how LIME works (I may take some small technical liberties and manufacture some contrived examples to demonstrate some of these mechanisms and phenomena - apologies) and then I'll give a brief explanation of how LIME can be applied to a sci-kit learn SVM-based sentiment model and then a huggingface/torch sentiment model. + +{{
}} + + + + +## Understanding LIME + +Lime stands for **L**ocal, **I**nterpretable **M**odel-agnostic **E**xplanations and is a technique proposed by [Ribeiro et al.](https://arxiv.org/abs/1602.04938) in 2016. The basic premise is that for a given input example (in an image classifier we're talking 1 image, in a text classifier we're talking 1 unit of text e.g. a paragraph or a sentence, in a numerical model trained on tabular data we're talking 1 row from that table), LIME can approximate how much of an effect each of the features extracted from the input have on the final output (i.e. How important are a cluster of pixels in an image?, How important are specific words/phrases in a sentence?, How important is each column in that row of numbers?). + +For a given example both contributing and negating features are highlighted (reasons for and against that decision). + + +{{
}} + + + +### Local + +The local aspect of LIME is described in [the paper](https://arxiv.org/abs/1602.04938): + + +> ...Although it is often impossible for an explanation to be completely faithful unless it is the complete description of the model itself, for an explanation to be meaningful it must at least be locally faithful, i.e. it must correspond to how the model behaves inthe vicinity of the instance being predicted... +> + + +This is a really important constraint of LIME: it offers excellent example-specific explanations that work well for pockets of similar data points but these explanations can't necessarily be generalised for the whole of the model under examination. The authors of the paper also attempt to illustrate this limitation in a diagram: + +{{
}} + + + + +This is especially important in tasks that are highly context dependent (like text classification). Here's a contrived example of a spam detection use case. Take the words "7 million usd" as in: + +>Sir, +> +>I am a wealthy widow and if you help me I will pay you 7 million usd +> +>Best Regards + +and also + +>Kevin, +> +>the new term sheet from the investors is in, they're offering 7 million usd for 5% equity, +> +> Brian Smith
+> Head of Mergers & Acquisitions + +In the first example, the words "7 million usd" contribute to the suspicion that this is a scam in the presence of "wealthy widow" and "help me". In the second example the words "7 million usd" aren't as important, they're words that you'd probably expect in a legitimate email about an investment opportunity from your colleague in Mergers. + +The point I'm trying to make is that it's very difficult to come up with good general rules about which words are important without any context (and indeed if you can do that then you probably don't need machine learning, you can just build a rule-based system that checks for the presence or absense of words on a list). The overall decision function of "spam or not spam" is much more complicated than "these words are good and these words are bad" but for a certain set of "spammy" examples we can certainly say which words are more spammy and which words are less spammy. This is analogous to the concepts at play in LIME too. + +Therefore when we're using LIME, we should avoid saying things like "The model seems to consider the words 'million' and 'usd' spammy" and we should say things like "in cases similar to the widow email, it looks like the words 'million' and 'usd' contributed to the decision that this email was spam in the absense of any other redeeming words". + + +### Interpretable + +Some machine learning models like [linear models](https://scikit-learn.org/stable/modules/linear_model.html) and [Decision Trees](https://scikit-learn.org/stable/modules/tree.html) are inherently interpretable through being able to measure parameter coefficients (how big the weight of the feature is when calculating the decision boundary line) in the case of the former and how early on a feature appears in a decision tree (since decision trees use [information gain](https://en.wikipedia.org/wiki/Information_gain_in_decision_trees) to put features that tell us most about the final classification/decision near the top of the tree so that they impact more data points) in the case of the latter. + +LIME exploits these explainable models in order to explain the local context around a given input example. We perturb (slightly change) the input example and use the black-box model under analysis to make predictions. As words are added or removed from the input, the output from the black box model changes slightly (in the [contrived again] example below, removing the word 'love' from the movie review reduces the probability that the review is positive.) + +{{
}} + + +These perturbed inputs and the outputs from the 'black box' model that we're analysing outputs are then used as a training set to train the local, interpretable model. + +For text models, LIME uses [Bag-of-Words](https://en.wikipedia.org/wiki/Bag-of-words_model) (BoW) representations of the perturbed input as the features for the local model. + +We can then use the interpretable information (parameter coefficients/feature position in decision tree) for the local model to approximately interpret the effect that the different words have on the bigger model since each word in the local BoW vocabulary will have an associated coefficient. + + +### Model-Agnostic + +LIME's model agnosticism is one of its most useful attributes. As long as you know how to encode the input data and your model has the ability to provide probabality distributions over its outputs, you can provide local explanations for any type of model. This is because the explanation comes from the local model and the BoW features therein rather than the black box model. + +In the section below I've provided some examples of how to use ELI5 with some different types of models. + +### Explanation + +Explanations that are produced by LIME for NLP models are expressed in terms of which words/phrases were considered as the biggest contributing factors towards a class decision by the model. + +If you look at the results in Jupyter you'll get blue and green highlights over the text input showing the degree to which each word contributed (green) or reduced (red) the likelihood that the input example is from the class under the microscope. In the example below you can see that kidney stones and medication are keywords that the model has learned can be used to classify examples in this neighbourhood (remember these explanations don't apply globally) as medical and that the presence of these words detracts from the likelihood that the email is about religion or graphic design. + +{{
}} + +The `` contribution is the model's underlying bias towards or against a particular class - again ***within this neighbourhood**. The most intuitive way to think about this parameter is that it describes the model's perception that other examples, similar to this one, belong to the given class. The bias is usually a much smaller contributing factor than the actual features as we see in the example above. + +We can also inspect the weights/feature importances that the model has generated ***for the current local neighbourhood*** and see, for each class, what words or phrases the model thinks are predictive of a particular class. + +{{
}} + +This table can also be useful as it can highlight surprising/incorrect results like that "to be" or "do anything" might signal a post about atheism. It's always worth having a look and if you see anything weird then also [check whether the model is trustworthy](#checking-whether-the-explanation-is-trustworthy) or whether your black-box model might be doing something strange. + + +## Usage Examples + +### Requirements and Setup + +In order to get any of the examples below running you will need a relatively recent version of Python 3 and the [eli5](https://eli5.readthedocs.io/en/latest/autodocs/lime.html#eli5.lime.lime.TextExplainer) library installed too. You will probably want to run the example code in a [Jupyter Notebook](https://jupyter.org/) so that you can see the pretty graphical explanations. + +If you're not sure about which version of Python to install, you might want to have a quick look at [my opinionated guide to Python environment setup](/2021/04/01/opinionated-guide-to-virtualenvs/). + +All of these examples will work fine on machines without GPUs although the [transformer model](#eli5-and-transformershuggingface) is a little slow running on CPU (it takes about 60 seconds to run on my 2020 Dell XPS w/ i7, 16GB RAM). + +### ELI5 and Sci-kit Learn + +[Scikit-Learn](https://scikit-learn.org/stable/) is one of the most widely used machine learning libraries used by data scientists everywhere. In this first example we're going to train a model in sci-kit learn and then use ELI5 to get an explanation for it. Make sure you have your python environment set up and [scikit-learn](https://scikit-learn.org/stable/) installed. + +If you recognise the following example that's because it is also the example that [ELI5 use in their documentation](https://eli5.readthedocs.io/en/latest/tutorials/black-box-text-classifiers.html#example-problem-lsa-svm-for-20-newsgroups-dataset) but I've added some commentary to what's happening in the code snippets. + +We are going to train a [Support Vector Machine (SVM)](https://en.wikipedia.org/wiki/Support-vector_machine) model to predict which newsgroup an email came from thanks to the [20 newsgroup](https://scikit-learn.org/stable/datasets/real_world.html#newsgroups-dataset) dataset. SVMs with a linear kernel do have feature coefficients which could be used to provide global feature importance. However, to make it harder we will be using an [RBF](https://en.wikipedia.org/wiki/Radial_basis_function_kernel) kernel and we will use [Latent Semantic Analysis](https://en.wikipedia.org/wiki/Latent_semantic_analysis) because that's the setup used in the example and it's a combination that cannot be explained simply without LIME. + +#### Why SVM and LSA? + +So why do they used RBF and LIME? Is it a contrived example just to show off LIME? + +Well LSA is often used as a way to get more performance from an underlying [BoW](https://en.wikipedia.org/wiki/Bag-of-words_model) model by reducing dimensionality and combining commonly co-occuring words into a single feature (rather than having one feature per word). With LSA we might be able to do a better job of capturing some of the general 'topics' and themes that occur across a whole document rather than just tracking words and key phrases (n-grams). This could help with scenarios like the spammer above where LSA could put co-occurences of 'widow', 'million' and 'usd' in one dimension and 'term sheet', 'million', 'usd' in another dimension, giving the SVM a bit of context for the words 'million' and 'usd'. + +RBF is a SVM kernel that can separate data that is not linearly seperable and there's a great explanation of this [here](https://www.kdnuggets.com/2016/06/select-support-vector-machine-kernels.html). RBF is often cited as a [reasonable first choice](https://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf) of kernel for SVMs. However, NLP practitioners will generally [recommend a linear kernel for text classification](https://www.svm-tutorial.com/2014/10/svm-linear-kernel-good-text-classification/) as in practice, and in my experience, text is usually linearly separable. However it will always depend on dataset so do some visualisation during exploratory analysis to see if an RBF kernel is appropriate. + + +#### Training the Model + +First we are going to use scikit-learn's built in [fetch_20newsgroups](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_20newsgroups.html#sklearn.datasets.fetch_20newsgroups) helper function to download some example emails from 4 newsgroups. There could reasonably be some serious overlap between the atheism and christian boards so this might be where LSA and our RBF kernel come in handy. + +```python +from sklearn.datasets import fetch_20newsgroups + +categories = ['alt.atheism', 'soc.religion.christian', + 'comp.graphics', 'sci.med'] +twenty_train = fetch_20newsgroups( + subset='train', + categories=categories, + shuffle=True, + random_state=42, + remove=('headers', 'footers'), +) +twenty_test = fetch_20newsgroups( + subset='test', + categories=categories, + shuffle=True, + random_state=42, + remove=('headers', 'footers'), +) + +``` + +In the next code snippet we train the code. The [TFIDFVectorizer](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html) splits the texts into tokens, builds a bag-of-words representation of the text but with the addition of [TF-IDF](https://en.wikipedia.org/wiki/Tf%E2%80%93idf) information to help us filter out words that don't give us any information. + +The [TruncatedSVD](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.TruncatedSVD.html) object is applied to the TFIDF vectorizer to give us our latent signals/categories. + +Then the [SVC](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html) is fed the output of the SVD/LSA component. + +Each component is linked together into a [Pipeline](https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html) object that basically provides syntactic sugar for us later and avoids us having to manually define an interface for ELI5 to call in order to use our model. + +Finally we call `pipe.fit()` on the training data to actually feed the pipeline and train the model and `pipe.score()` on the test set to give us a top-line accuracy (if we were doing a thorough job we should probably also look at [other appropriate metrics](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html)). + +`random_state` is simply a number that is used to seed Python's pseudo-random number generator which +scikit-learn usesfor pseudo-random operations. Setting random state explicitly is a good habit to get +into in order to preserve the reproducibility of your models. + +Another key parameter set here is `probability=True` on the SVM. This will allow us to get the probability distributions +across the classes that LIME will need later. If you don't set this then `predict_proba()` will fail at the next step. + + +```python +from sklearn.feature_extraction.text import TfidfVectorizer +from sklearn.svm import SVC +from sklearn.decomposition import TruncatedSVD +from sklearn.pipeline import Pipeline, make_pipeline + +vec = TfidfVectorizer(min_df=3, stop_words='english', + ngram_range=(1, 2)) +svd = TruncatedSVD(n_components=100, n_iter=7, random_state=42) +lsa = make_pipeline(vec, svd) + +clf = SVC(C=150, gamma=2e-2, probability=True) +pipe = make_pipeline(lsa, clf) +pipe.fit(twenty_train.data, twenty_train.target) +pipe.score(twenty_test.data, twenty_test.target) + +``` + +#### Getting Some Predictions + +Now that the model is trained it is possible to run it on unseen data and get a prediction. In the tutorial +the ELI5 authors provide a pretty printing function that shows the probability distribution of the labels for +a given example. + +```python +def print_prediction(doc): + y_pred = pipe.predict_proba([doc])[0] + for target, prob in zip(twenty_train.target_names, y_pred): + print("{:.3f} {}".format(prob, target)) + +doc = twenty_test.data[0] +print_prediction(doc) +``` + +This is basically just predicting the classes for the given document, which is the first doc in the test set, and then combining the probabilities in the prediction (`y_pred`) with the class names (`twenty_train.target_names`). + +#### Getting an Explanation + +Getting an explanation of out this model is relatively simple at this point. We simply import the [TextExplainer](https://eli5.readthedocs.io/en/latest/autodocs/lime.html#eli5.lime.lime.TextExplainer) class from ELI5 and `fit()` it to the document (the first one in the test set as per the above snippet). The TextExplainer will use the SVC pipeline `pipe` to make predictions for a bunch of perturbed examples and train its own model. The `show_predictions` function will then give a visualisation of the explanation. The `target_names=` parameter is used to pass the class names from our dataset to the text explainer so that they can be displayed nicely. + +```python +import eli5 +from eli5.lime import TextExplainer + +te = TextExplainer(random_state=42) +te.fit(doc, pipe.predict_proba) +te.show_prediction(target_names=twenty_train.target_names) +``` + +Et voila! Hopefully you will get some output that looks like the below: + +{{
}} + +Finally we can look at the model weights too + + ```python +te.explain_weights(target_names=twenty_train.target_names) +``` + +{{
}} + + +### ELI5 and Transformers/Huggingface + +[Transformers](https://huggingface.co/docs/transformers/index) is an open source library provided by HuggingFace which provides an easy to use wrapper around PyTorch and Tensorflow specifically to make it easy to use transformer-based NLP models like BERT, RoBERTa etc. In order to use ELI5 with Transformers from huggingface, we need to have Python3, [transformers](https://huggingface.co/docs/transformers/index) and a recent version of [pytorch](https://pytorch.org/) installed. + +This example will work on a machine without a GPU provided you aren't planning on training your transformer model from scratch. I am using [this sentiment model](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) which evaluates the sentiment/rating of reviews from 1 to 5 in English, Dutch, German, French or Spanish. + +#### Why Transformers? + +Transformer-based models are, at the time of writing, **the in thing** for NLP models - they are a type of deep neural network that has contextual understanding of full sentences. If you're not familiar with them [this article](https://towardsdatascience.com/transformers-89034557de14) offers a fairly good introduction. + +There are good reasons for not using transformers - first and foremost is that they are very computationally expensive to train and somewhat computationally expensive during inference (as you will see if you run both the above SVM experiment and the below transformer experiment). If you find that a less powerful (both in terms of understanding and in terms of power consumption) model works for your use case then using that instead is probably a good move - it'll save you headaches later if you need to scale up your inference operation. + +#### Loading The Model + +The following snippet of code simply loads the model into memory amd sets up the tokenizer ready for use with new text examples + +```python + +from transformers import AutoModelForSequenceClassification +from transformers import AutoTokenizer +import numpy as np +import pandas as pd +from typing import List + +# this is the name of the model we want to evaluate on +# huggingface.com/models or alternatively you could train your own +MODEL="nlptown/bert-base-multilingual-uncased-sentiment" + +tokenizer = AutoTokenizer.from_pretrained(MODEL) +model = AutoModelForSequenceClassification.from_pretrained(MODEL) +``` + +#### Defining the Interface with ELI5 + +This snippet of code defines the all important `model_adapter` function which we use to interface between PyTorch and ELI5. + +ELI5 expects to be able to pass in a list of perturbed texts and get back a set of probability distributions (a matrix in the shape [NUM_EXAMPLES, NUM_CLASSES]). + +In our function we have to encode the text into a BERT compatible input format using the [tokenizer](https://huggingface.co/transformers/main_classes/tokenizer.html). +Then we pass the encoded input to the model and receive some predictions. + +Finally we use `softmax()` which will convert the raw *logits* generated by the model into nice smooth probability functions that LIME is expecting to see. + +You may be wondering about the for loop and the batches? ELI5 tries to get results for 5000 samples at a time (by default) and that might be fine in a smaller, less powerful model but with a transformer we can't fit all of those examples into memory. Therefore we split the samples into batches of 64 at a time so that we don't end up running out of RAM. + +```python +def model_adapter(texts: List[str]): + + all_scores = [] + + for i in range(0, len(texts), 64): + + batch = texts[i:i+64] + + # use bert encoder to tokenize text + encoded_input = tokenizer(batch, + return_tensors='pt', + padding=True, + truncation=True, + max_length=model.config.max_position_embeddings-2) + + # run the model + output = model(**encoded_input) + # by default this model gives raw logits rather + # than a nice smooth softmax so we apply it ourselves here + scores = output[0].softmax(1).detach().numpy() + + all_scores.extend(scores) + + return np.array(all_scores) + +``` + +#### Getting an Explanation + +The last piece in the puzzle is to actually run the model and get our explanation. Firstly we initialize our explainer object. +`n_samples` gives the number of perturbed examples that LIME should generate in order to train the local model (more samples +should give a more faithful local explanation at the cost of more compute/taking longer). Note that as above, we manually set +`random_state` for reproducibility. + +Next we pass the text that we'd like to get an explanation for and the model_adapter function into `fit()` - this will trigger +ELI5 to train a LIME model using our transformer model which could take a few seconds or minutes depending on what sort of machine +spec you have. + +Finally, we render the explanation using `te.explain_prediction()`. We pass `target_names=list(model.config.id2label.values())` which +tells the `TextExplainer` what the class names from the bert model are (class names are stored in `config.id2label` by convention in +[Huggingface transformer configurations](https://huggingface.co/docs/transformers/main_classes/configuration) but this function will accept +any list of strings that is the same length as the number of classes in the model). + +```python +from eli5.lime import TextExplainer + +te = TextExplainer(n_samples=5000, random_state=42) +te.fit("""The restaurant was amazing, the quality of their +food was exceptional. The waiters were so polite.""", model_adapter) +te.explain_prediction(target_names=list(model.config.id2label.values())) +``` + +Et voila! Hopefully you will get some output that looks like the below: + +{{
}} + +You might also want to check the model weights with: + +```python +te.explain_weights(target_names=list(model.config.id2label.values())) +``` + + +### ELI5 and a Remotely Hosted Model / API + +This one is quite fun and exciting. Since LIME is model agnostic, we can get an explanation for a remotely hosted model assuming we have access to +the full probability distribution over its labels (and assuming you have enough API credits to train your local model). + +In this example I'm using Huggingface's [inference api](https://api-inference.huggingface.co/docs/python/html/quicktour.html) where they host transformer models on your behalf - you can pay to have your models run on GPUs for higher throughput. I made this guide with the free tier allowance which gives you 30k tokens per month - if you are using LIME with default settings you could easily eat through this whilst generating a single explanation so this is yet again a contrived example that gives you a taster of what is possible. + +#### Setting up + +For this part of the tutorial you will need the Python [requests](https://docs.python-requests.org/en/latest/) library and we are also going to make use of [scipy](https://docs.scipy.org). You will also need a huggingface account and you will need to set up your API key as described in the [documentation](https://api-inference.huggingface.co/docs/python/html/quicktour.html). + +#### Building a Remote Model Adapter + +Firstly we need to build a model adapter function that allows ELI5 to interface with huggingface's models. + +```python + +import json + +import requests + +MODEL="nlptown/bert-base-multilingual-uncased-sentiment" +API_TOKEN="YOUR API KEY HERE" +API_URL = f"https://api-inference.huggingface.co/models/{MODEL}" +headers = {"Authorization": f"Bearer {API_TOKEN}"} + +def query(payload): + data = json.dumps(payload) + response = requests.request("POST", API_URL, headers=headers, data=data) + return json.loads(response.content.decode("utf-8")) + +def result_to_df(result): + rows = [] + + for result_row in result: + row = {} + for lbl_score in result_row: + row[lbl_score['label']] = lbl_score['score'] + + rows.append(row) + + return pd.DataFrame(rows) + +def remote_model_adapter(texts: List[str]): + + all_scores = [] + + for text in texts: + + data = query(text) + all_scores.extend(result_to_df(data).values) + + return softmax(np.array(all_scores), axis=1) +``` + +## Checking whether the explanation is trustworthy + +How do we know if our explanations are good? Like any other ML model, the models produced by LIME should be evaluated using a held-out/unseen test set of perturbed examples that have not been seen before. If the local model can do well at predicting the black box weights for other, local examples that it's not seen yet, then we can assume that the model is a good fit (at least within the specific 'locality' under analysis). + +When we evaluate the local model against the black box model we want to know that, at the very least, the local model is making the same class predictions as the parent black-box model (do both the child model and parent model predict the same most likely class). However, it is also useful to know precisely how similar those outputs are (given that both models predict the same 'most likely' class, what is the percentage difference in probability between the two predictions). A good local model should produce a very similar probability distribution to the parent black-box model for the same inputs. Therefore we use [KL-Divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence) as our performance metric in order to evaluate how well the model is performing. In a nutshell KL-Divergence tells you how similar 2 probability distributions are - and we want this number to be as small as possible (i.e. the probability distributions are pretty much the same). + +ELI5 provides this functionality all for free (generates a test set of perturbed examples and evalutes the final model automatically) so all we need is to look at the metrics and interpret them. For any of the above examples you should be able to run `te.metrics_` in Jupyter to get an output similar to the one below: + +``` +{'mean_KL_divergence': 0.01961629150756376, 'score': 0.9853027527973619} +``` + +The `score` metric is our local model accuracy which is 98.5% - that's quite reassuring. The mean KL Divergence is low at 0.0196 - this can be interpreted as a mean difference/divergence in the predictions of about 2% across the whole dataset which seems acceptable. + +If these KL divergence is high or the score is low then you have a bad local model and it's worth checking to see why that might be the case and probably best not to trust the results. The [ELI5 Documentation](https://eli5.readthedocs.io/en/latest/tutorials/black-box-text-classifiers.html#should-we-trust-the-explanation) has some excellent information on specific cases where your NLP model might fail and how you might go about diagnosing these issues. + +## Conclusion + +In this post I have given you an insight into how LIME works under the covers and how it uses simple local models to offer explanations of more powerful black-box models. I've discussed some of the limitations of this approach and given some practical code examples for how you could apply LIME to commonly used frameworks in Python as well as a remote model API. + +If you enjoyed this article please take a moment to tweet, toot or send me a webmention. \ No newline at end of file diff --git a/brainsteam/content/posts/2022/01/13-01-painless-explainability-for-text-models-with-eli5/test.ipynb b/brainsteam/content/posts/2022/01/13-01-painless-explainability-for-text-models-with-eli5/test.ipynb new file mode 100644 index 0000000..b4d31e6 --- /dev/null +++ b/brainsteam/content/posts/2022/01/13-01-painless-explainability-for-text-models-with-eli5/test.ipynb @@ -0,0 +1,1768 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Scikit Learn ELI5 Example" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install scikit-learn" + ] + }, + { + "cell_type": "code", + "execution_count": 88, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.datasets import fetch_20newsgroups\n", + "\n", + "categories = ['alt.atheism', 'soc.religion.christian',\n", + " 'comp.graphics', 'sci.med']\n", + "twenty_train = fetch_20newsgroups(\n", + " subset='train',\n", + " categories=categories,\n", + " shuffle=True,\n", + " random_state=42,\n", + " remove=('headers', 'footers'),\n", + ")\n", + "twenty_test = fetch_20newsgroups(\n", + " subset='test',\n", + " categories=categories,\n", + " shuffle=True,\n", + " random_state=42,\n", + " remove=('headers', 'footers'),\n", + ")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 89, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "0.8901464713715047" + ] + }, + "execution_count": 89, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from sklearn.feature_extraction.text import TfidfVectorizer\n", + "from sklearn.svm import SVC\n", + "from sklearn.decomposition import TruncatedSVD\n", + "from sklearn.pipeline import Pipeline, make_pipeline\n", + "\n", + "vec = TfidfVectorizer(min_df=3, stop_words='english',\n", + " ngram_range=(1, 2))\n", + "svd = TruncatedSVD(n_components=100, n_iter=7, random_state=42)\n", + "lsa = make_pipeline(vec, svd)\n", + "\n", + "clf = SVC(C=150, gamma=2e-2, probability=True)\n", + "pipe = make_pipeline(lsa, clf)\n", + "pipe.fit(twenty_train.data, twenty_train.target)\n", + "pipe.score(twenty_test.data, twenty_test.target)" + ] + }, + { + "cell_type": "code", + "execution_count": 90, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.001 alt.atheism\n", + "0.001 comp.graphics\n", + "0.995 sci.med\n", + "0.003 soc.religion.christian\n" + ] + } + ], + "source": [ + "def print_prediction(doc):\n", + " y_pred = pipe.predict_proba([doc])[0]\n", + " for target, prob in zip(twenty_train.target_names, y_pred):\n", + " print(\"{:.3f} {}\".format(prob, target))\n", + "\n", + "doc = twenty_test.data[0]\n", + "print_prediction(doc)" + ] + }, + { + "cell_type": "code", + "execution_count": 91, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + "\n", + "\n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "

\n", + " \n", + " \n", + " y=alt.atheism\n", + " \n", + "\n", + "\n", + " \n", + " (probability 0.000, score -8.648)\n", + "\n", + "top features\n", + "

\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + "
\n", + " Contribution?\n", + " Feature
\n", + " -0.398\n", + " \n", + " <BIAS>\n", + "
\n", + " -8.249\n", + " \n", + " Highlighted in text (sum)\n", + "
\n", + "\n", + " \n", + "\n", + "\n", + "\n", + "

\n", + " as i recall from my bout with kidney stones, there isn't any\n", + "medication that can do anything about them except relieve the pain.\n", + "\n", + "either they pass, or they have to be broken up with sound, or they have\n", + "to be extracted surgically.\n", + "\n", + "when i was in, the x-ray tech happened to mention that she'd had kidney\n", + "stones and children, and the childbirth hurt less.\n", + "

\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "

\n", + " \n", + " \n", + " y=comp.graphics\n", + " \n", + "\n", + "\n", + " \n", + " (probability 0.000, score -8.687)\n", + "\n", + "top features\n", + "

\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + "
\n", + " Contribution?\n", + " Feature
\n", + " -0.283\n", + " \n", + " <BIAS>\n", + "
\n", + " -8.404\n", + " \n", + " Highlighted in text (sum)\n", + "
\n", + "\n", + " \n", + "\n", + "\n", + "\n", + "

\n", + " as i recall from my bout with kidney stones, there isn't any\n", + "medication that can do anything about them except relieve the pain.\n", + "\n", + "either they pass, or they have to be broken up with sound, or they have\n", + "to be extracted surgically.\n", + "\n", + "when i was in, the x-ray tech happened to mention that she'd had kidney\n", + "stones and children, and the childbirth hurt less.\n", + "

\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "

\n", + " \n", + " \n", + " y=sci.med\n", + " \n", + "\n", + "\n", + " \n", + " (probability 0.996, score 6.821)\n", + "\n", + "top features\n", + "

\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + "
\n", + " Contribution?\n", + " Feature
\n", + " +6.883\n", + " \n", + " Highlighted in text (sum)\n", + "
\n", + " -0.061\n", + " \n", + " <BIAS>\n", + "
\n", + "\n", + " \n", + "\n", + "\n", + "\n", + "

\n", + " as i recall from my bout with kidney stones, there isn't any\n", + "medication that can do anything about them except relieve the pain.\n", + "\n", + "either they pass, or they have to be broken up with sound, or they have\n", + "to be extracted surgically.\n", + "\n", + "when i was in, the x-ray tech happened to mention that she'd had kidney\n", + "stones and children, and the childbirth hurt less.\n", + "

\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "

\n", + " \n", + " \n", + " y=soc.religion.christian\n", + " \n", + "\n", + "\n", + " \n", + " (probability 0.004, score -5.612)\n", + "\n", + "top features\n", + "

\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + "
\n", + " Contribution?\n", + " Feature
\n", + " -0.326\n", + " \n", + " <BIAS>\n", + "
\n", + " -5.286\n", + " \n", + " Highlighted in text (sum)\n", + "
\n", + "\n", + " \n", + "\n", + "\n", + "\n", + "

\n", + " as i recall from my bout with kidney stones, there isn't any\n", + "medication that can do anything about them except relieve the pain.\n", + "\n", + "either they pass, or they have to be broken up with sound, or they have\n", + "to be extracted surgically.\n", + "\n", + "when i was in, the x-ray tech happened to mention that she'd had kidney\n", + "stones and children, and the childbirth hurt less.\n", + "

\n", + "\n", + " \n", + "\n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 91, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import eli5\n", + "from eli5.lime import TextExplainer\n", + "\n", + "te = TextExplainer(random_state=42)\n", + "te.fit(doc, pipe.predict_proba)\n", + "te.show_prediction(target_names=twenty_train.target_names)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Transformers ELI5 example\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install transformers\n", + "!pip install torch==1.9.1+cpu -f https://download.pytorch.org/whl/torch_stable.html" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Downloading: 100%|██████████| 39.0/39.0 [00:00<00:00, 21.5kB/s]\n", + "Downloading: 100%|██████████| 953/953 [00:00<00:00, 492kB/s]\n", + "Downloading: 100%|██████████| 851k/851k [00:00<00:00, 933kB/s] \n", + "Downloading: 100%|██████████| 112/112 [00:00<00:00, 70.8kB/s]\n", + "Downloading: 100%|██████████| 638M/638M [00:50<00:00, 13.4MB/s] \n" + ] + } + ], + "source": [ + "from transformers import AutoModelForSequenceClassification\n", + "from transformers import AutoTokenizer\n", + "import numpy as np\n", + "import pandas as pd\n", + "from eli5.lime import TextExplainer\n", + "from typing import List\n", + "\n", + "\n", + "MODEL=\"nlptown/bert-base-multilingual-uncased-sentiment\"\n", + "\n", + "tokenizer = AutoTokenizer.from_pretrained(MODEL)\n", + "model = AutoModelForSequenceClassification.from_pretrained(MODEL)" + ] + }, + { + "cell_type": "code", + "execution_count": 80, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "\n", + "def model_adapter(texts: List[str]):\n", + " \n", + " all_scores = []\n", + "\n", + " for i in range(0, len(texts), 64):\n", + "\n", + " batch = texts[i:i+64]\n", + " \n", + " # use bert encoder to tokenize text \n", + " encoded_input = tokenizer(batch, return_tensors='pt', padding=True, truncation=True, max_length=model.config.max_position_embeddings-2)\n", + " # run the model\n", + " output = model(**encoded_input)\n", + " # by default this model gives raw logits rather than a nice smooth softmax \n", + " # so we apply it ourselves here\n", + " scores = output[0].softmax(1).detach().numpy()\n", + "\n", + " all_scores.extend(scores)\n", + "\n", + " return np.array(all_scores)" + ] + }, + { + "cell_type": "code", + "execution_count": 84, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/james/miniconda3/envs/pgesg/lib/python3.7/site-packages/sklearn/base.py:213: FutureWarning: From version 0.24, get_params will raise an AttributeError if a parameter cannot be retrieved as an instance attribute. Previously it would return None.\n", + " FutureWarning)\n" + ] + }, + { + "data": { + "text/plain": [ + "TextExplainer(char_based=False,\n", + " clf=SGDClassifier(alpha=0.001, loss='log', penalty='elasticnet',\n", + " random_state=RandomState(MT19937) at 0x7FE441F8F490),\n", + " random_state=42,\n", + " sampler=MaskingTextSamplers(random_state=RandomState(MT19937) at 0x7FE441F8F490,\n", + " sampler_params=None,\n", + " token_pattern='(?u)\\\\b\\\\w+\\\\b',\n", + " weights=array([0.7, 0.3])),\n", + " token_pattern='(?u)\\\\b\\\\w+\\\\b',\n", + " vec=CountVectorizer(ngram_range=(1, 2),\n", + " token_pattern='(?u)\\\\b\\\\w+\\\\b'))" + ] + }, + "execution_count": 84, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "te = TextExplainer(n_samples=5000, random_state=42)\n", + "te.fit(\"The restaurant was amazing, the quality of their food was exceptional. The waiters were so polite.\", model_adapter)" + ] + }, + { + "cell_type": "code", + "execution_count": 85, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + "\n", + "\n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "

\n", + " \n", + " \n", + " y=1 star\n", + " \n", + "\n", + "\n", + " \n", + " (probability 0.001, score -6.834)\n", + "\n", + "top features\n", + "

\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + "
\n", + " Contribution?\n", + " Feature
\n", + " -0.468\n", + " \n", + " <BIAS>\n", + "
\n", + " -6.366\n", + " \n", + " Highlighted in text (sum)\n", + "
\n", + "\n", + " \n", + "\n", + "\n", + "\n", + "

\n", + " the restaurant was amazing, the quality of their food was exceptional. the waiters were so polite.\n", + "

\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "

\n", + " \n", + " \n", + " y=2 stars\n", + " \n", + "\n", + "\n", + " \n", + " (probability 0.002, score -6.109)\n", + "\n", + "top features\n", + "

\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + "
\n", + " Contribution?\n", + " Feature
\n", + " -0.400\n", + " \n", + " <BIAS>\n", + "
\n", + " -5.709\n", + " \n", + " Highlighted in text (sum)\n", + "
\n", + "\n", + " \n", + "\n", + "\n", + "\n", + "

\n", + " the restaurant was amazing, the quality of their food was exceptional. the waiters were so polite.\n", + "

\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "

\n", + " \n", + " \n", + " y=3 stars\n", + " \n", + "\n", + "\n", + " \n", + " (probability 0.013, score -4.219)\n", + "\n", + "top features\n", + "

\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + "
\n", + " Contribution?\n", + " Feature
\n", + " -0.397\n", + " \n", + " <BIAS>\n", + "
\n", + " -3.822\n", + " \n", + " Highlighted in text (sum)\n", + "
\n", + "\n", + " \n", + "\n", + "\n", + "\n", + "

\n", + " the restaurant was amazing, the quality of their food was exceptional. the waiters were so polite.\n", + "

\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "

\n", + " \n", + " \n", + " y=4 stars\n", + " \n", + "\n", + "\n", + " \n", + " (probability 0.218, score -1.159)\n", + "\n", + "top features\n", + "

\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + "
\n", + " Contribution?\n", + " Feature
\n", + " -0.539\n", + " \n", + " Highlighted in text (sum)\n", + "
\n", + " -0.621\n", + " \n", + " <BIAS>\n", + "
\n", + "\n", + " \n", + "\n", + "\n", + "\n", + "

\n", + " the restaurant was amazing, the quality of their food was exceptional. the waiters were so polite.\n", + "

\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "

\n", + " \n", + " \n", + " y=5 stars\n", + " \n", + "\n", + "\n", + " \n", + " (probability 0.766, score 1.648)\n", + "\n", + "top features\n", + "

\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + "
\n", + " Contribution?\n", + " Feature
\n", + " +2.169\n", + " \n", + " Highlighted in text (sum)\n", + "
\n", + " -0.521\n", + " \n", + " <BIAS>\n", + "
\n", + "\n", + " \n", + "\n", + "\n", + "\n", + "

\n", + " the restaurant was amazing, the quality of their food was exceptional. the waiters were so polite.\n", + "

\n", + "\n", + " \n", + "\n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "Explanation(estimator=\"SGDClassifier(alpha=0.001, loss='log', penalty='elasticnet',\\n random_state=RandomState(MT19937) at 0x7FE441F8F5A0)\", description=None, error=None, method='linear model', is_regression=False, targets=[TargetExplanation(target='1 star', feature_weights=FeatureWeights(pos=[FeatureWeight(feature='were so', weight=0.4412142792788093, std=None, value=1.0), FeatureWeight(feature='food was', weight=0.26752107341372183, std=None, value=1.0), FeatureWeight(feature='restaurant was', weight=0.26656608799204584, std=None, value=1.0), FeatureWeight(feature='waiters were', weight=0.20671345225968554, std=None, value=1.0), FeatureWeight(feature='their food', weight=0.1922554265078954, std=None, value=1.0), FeatureWeight(feature='of their', weight=0.17089439768401465, std=None, value=1.0), FeatureWeight(feature='so polite', weight=0.16971856137145266, std=None, value=1.0), FeatureWeight(feature='the restaurant', weight=0.0939572854002312, std=None, value=1.0), FeatureWeight(feature='the quality', weight=0.062358167271686325, std=None, value=1.0), FeatureWeight(feature='was amazing', weight=0.04621754955053698, std=None, value=1.0), FeatureWeight(feature='quality of', weight=0.036982483291333176, std=None, value=1.0)], neg=[FeatureWeight(feature='exceptional', weight=-1.3083460073861424, std=None, value=1.0), FeatureWeight(feature='amazing', weight=-1.2489714603173, std=None, value=1.0), FeatureWeight(feature='quality', weight=-0.9791490505974166, std=None, value=1.0), FeatureWeight(feature='restaurant', weight=-0.7171043876100932, std=None, value=1.0), FeatureWeight(feature='food', weight=-0.6679038114428723, std=None, value=1.0), FeatureWeight(feature='their', weight=-0.5886911093430724, std=None, value=1.0), FeatureWeight(feature='waiters', weight=-0.562453762890923, std=None, value=1.0), FeatureWeight(feature='the', weight=-0.5612386460991848, std=None, value=3.0), FeatureWeight(feature='polite', weight=-0.4909223929633989, std=None, value=1.0), FeatureWeight(feature='so', weight=-0.4746325526619182, std=None, value=1.0), FeatureWeight(feature='', weight=-0.4681751172694132, std=None, value=1.0), FeatureWeight(feature='were', weight=-0.3105891970589915, std=None, value=1.0), FeatureWeight(feature='was', weight=-0.20789884677179024, std=None, value=2.0), FeatureWeight(feature='of', weight=-0.12298547156907864, std=None, value=1.0), FeatureWeight(feature='was exceptional', weight=-0.07927481966138608, std=None, value=1.0)], pos_remaining=0, neg_remaining=0), proba=0.0009819658649170206, score=-6.833937869621568, weighted_spans=WeightedSpans(docs_weighted_spans=[DocWeightedSpans(document='the restaurant was amazing, the quality of their food was exceptional. the waiters were so polite.', spans=[('the', [(0, 3)], -0.5612386460991848), ('restaurant', [(4, 14)], -0.7171043876100932), ('was', [(15, 18)], -0.20789884677179024), ('amazing', [(19, 26)], -1.2489714603173), ('the', [(28, 31)], -0.5612386460991848), ('quality', [(32, 39)], -0.9791490505974166), ('of', [(40, 42)], -0.12298547156907864), ('their', [(43, 48)], -0.5886911093430724), ('food', [(49, 53)], -0.6679038114428723), ('was', [(54, 57)], -0.20789884677179024), ('exceptional', [(58, 69)], -1.3083460073861424), ('the', [(71, 74)], -0.5612386460991848), ('waiters', [(75, 82)], -0.562453762890923), ('were', [(83, 87)], -0.3105891970589915), ('so', [(88, 90)], -0.4746325526619182), ('polite', [(91, 97)], -0.4909223929633989), ('the restaurant', [(0, 3), (4, 14)], 0.0939572854002312), ('restaurant was', [(4, 14), (15, 18)], 0.26656608799204584), ('was amazing', [(15, 18), (19, 26)], 0.04621754955053698), ('the quality', [(28, 31), (32, 39)], 0.062358167271686325), ('quality of', [(32, 39), (40, 42)], 0.036982483291333176), ('of their', [(40, 42), (43, 48)], 0.17089439768401465), ('their food', [(43, 48), (49, 53)], 0.1922554265078954), ('food was', [(49, 53), (54, 57)], 0.26752107341372183), ('was exceptional', [(54, 57), (58, 69)], -0.07927481966138608), ('waiters were', [(75, 82), (83, 87)], 0.20671345225968554), ('were so', [(83, 87), (88, 90)], 0.4412142792788093), ('so polite', [(88, 90), (91, 97)], 0.16971856137145266)], preserve_density=False, vec_name=None)], other=FeatureWeights(pos=[], neg=[FeatureWeight(feature=, weight=-6.365762752352155, std=None, value=None), FeatureWeight(feature='', weight=-0.4681751172694132, std=None, value=1.0)], pos_remaining=0, neg_remaining=0)), heatmap=None), TargetExplanation(target='2 stars', feature_weights=FeatureWeights(pos=[FeatureWeight(feature='were so', weight=0.3687584351354406, std=None, value=1.0), FeatureWeight(feature='food was', weight=0.2818213047878279, std=None, value=1.0), FeatureWeight(feature='restaurant was', weight=0.23694730908198494, std=None, value=1.0), FeatureWeight(feature='waiters were', weight=0.21363431305973518, std=None, value=1.0), FeatureWeight(feature='quality of', weight=0.20779481726471224, std=None, value=1.0), FeatureWeight(feature='the waiters', weight=0.20231925231831172, std=None, value=1.0), FeatureWeight(feature='their food', weight=0.1811418380466741, std=None, value=1.0), FeatureWeight(feature='of their', weight=0.1753614820666619, std=None, value=1.0), FeatureWeight(feature='the quality', weight=0.15707226483127093, std=None, value=1.0)], neg=[FeatureWeight(feature='amazing', weight=-1.5783751972217899, std=None, value=1.0), FeatureWeight(feature='exceptional', weight=-1.4790309783432105, std=None, value=1.0), FeatureWeight(feature='quality', weight=-0.9347689615679214, std=None, value=1.0), FeatureWeight(feature='food', weight=-0.6327993132635227, std=None, value=1.0), FeatureWeight(feature='the', weight=-0.5788250220187662, std=None, value=3.0), FeatureWeight(feature='restaurant', weight=-0.5602856269944528, std=None, value=1.0), FeatureWeight(feature='waiters', weight=-0.4789001349410294, std=None, value=1.0), FeatureWeight(feature='their', weight=-0.46587342328228626, std=None, value=1.0), FeatureWeight(feature='polite', weight=-0.4606029092132801, std=None, value=1.0), FeatureWeight(feature='', weight=-0.4001754632372141, std=None, value=1.0), FeatureWeight(feature='were', weight=-0.2543025592541633, std=None, value=1.0), FeatureWeight(feature='so', weight=-0.09871439184564407, std=None, value=1.0), FeatureWeight(feature='exceptional the', weight=-0.09697973820214975, std=None, value=1.0), FeatureWeight(feature='of', weight=-0.08167691136359295, std=None, value=1.0), FeatureWeight(feature='was', weight=-0.0324062121189669, std=None, value=2.0)], pos_remaining=0, neg_remaining=0), proba=0.002025322078557236, score=-6.108865826275371, weighted_spans=WeightedSpans(docs_weighted_spans=[DocWeightedSpans(document='the restaurant was amazing, the quality of their food was exceptional. the waiters were so polite.', spans=[('the', [(0, 3)], -0.5788250220187662), ('restaurant', [(4, 14)], -0.5602856269944528), ('was', [(15, 18)], -0.0324062121189669), ('amazing', [(19, 26)], -1.5783751972217899), ('the', [(28, 31)], -0.5788250220187662), ('quality', [(32, 39)], -0.9347689615679214), ('of', [(40, 42)], -0.08167691136359295), ('their', [(43, 48)], -0.46587342328228626), ('food', [(49, 53)], -0.6327993132635227), ('was', [(54, 57)], -0.0324062121189669), ('exceptional', [(58, 69)], -1.4790309783432105), ('the', [(71, 74)], -0.5788250220187662), ('waiters', [(75, 82)], -0.4789001349410294), ('were', [(83, 87)], -0.2543025592541633), ('so', [(88, 90)], -0.09871439184564407), ('polite', [(91, 97)], -0.4606029092132801), ('restaurant was', [(4, 14), (15, 18)], 0.23694730908198494), ('the quality', [(28, 31), (32, 39)], 0.15707226483127093), ('quality of', [(32, 39), (40, 42)], 0.20779481726471224), ('of their', [(40, 42), (43, 48)], 0.1753614820666619), ('their food', [(43, 48), (49, 53)], 0.1811418380466741), ('food was', [(49, 53), (54, 57)], 0.2818213047878279), ('exceptional the', [(58, 69), (71, 74)], -0.09697973820214975), ('the waiters', [(71, 74), (75, 82)], 0.20231925231831172), ('waiters were', [(75, 82), (83, 87)], 0.21363431305973518), ('were so', [(83, 87), (88, 90)], 0.3687584351354406)], preserve_density=False, vec_name=None)], other=FeatureWeights(pos=[], neg=[FeatureWeight(feature=, weight=-5.708690363038157, std=None, value=None), FeatureWeight(feature='', weight=-0.4001754632372141, std=None, value=1.0)], pos_remaining=0, neg_remaining=0)), heatmap=None), TargetExplanation(target='3 stars', feature_weights=FeatureWeights(pos=[FeatureWeight(feature='so', weight=0.3732887992270877, std=None, value=1.0), FeatureWeight(feature='was amazing', weight=0.2973068556130115, std=None, value=1.0), FeatureWeight(feature='food was', weight=0.2966172170449891, std=None, value=1.0), FeatureWeight(feature='their food', weight=0.2183481994654202, std=None, value=1.0), FeatureWeight(feature='the quality', weight=0.2157576281628976, std=None, value=1.0), FeatureWeight(feature='waiters were', weight=0.19123854139546625, std=None, value=1.0), FeatureWeight(feature='restaurant was', weight=0.15656173982130717, std=None, value=1.0), FeatureWeight(feature='the restaurant', weight=0.1479724362568333, std=None, value=1.0), FeatureWeight(feature='quality of', weight=0.14264912543429953, std=None, value=1.0), FeatureWeight(feature='of their', weight=0.12312764745359929, std=None, value=1.0), FeatureWeight(feature='the waiters', weight=0.09515209208809647, std=None, value=1.0), FeatureWeight(feature='was', weight=0.07169380833545562, std=None, value=2.0)], neg=[FeatureWeight(feature='amazing', weight=-1.5430523131452285, std=None, value=1.0), FeatureWeight(feature='exceptional', weight=-1.3418851039304842, std=None, value=1.0), FeatureWeight(feature='quality', weight=-0.6076663429775776, std=None, value=1.0), FeatureWeight(feature='so polite', weight=-0.5499925348438075, std=None, value=1.0), FeatureWeight(feature='the', weight=-0.46821278223214635, std=None, value=3.0), FeatureWeight(feature='', weight=-0.3973113230934471, std=None, value=1.0), FeatureWeight(feature='food', weight=-0.32245188389535806, std=None, value=1.0), FeatureWeight(feature='waiters', weight=-0.3220949693051254, std=None, value=1.0), FeatureWeight(feature='restaurant', weight=-0.2779002036079693, std=None, value=1.0), FeatureWeight(feature='were', weight=-0.22670218335080206, std=None, value=1.0), FeatureWeight(feature='their', weight=-0.22404977692950792, std=None, value=1.0), FeatureWeight(feature='polite', weight=-0.17712570659886498, std=None, value=1.0), FeatureWeight(feature='were so', weight=-0.07174581836680817, std=None, value=1.0), FeatureWeight(feature='of', weight=-0.018921718697862692, std=None, value=1.0)], pos_remaining=0, neg_remaining=0), proba=0.01323435654544038, score=-4.219398570676526, weighted_spans=WeightedSpans(docs_weighted_spans=[DocWeightedSpans(document='the restaurant was amazing, the quality of their food was exceptional. the waiters were so polite.', spans=[('the', [(0, 3)], -0.46821278223214635), ('restaurant', [(4, 14)], -0.2779002036079693), ('was', [(15, 18)], 0.07169380833545562), ('amazing', [(19, 26)], -1.5430523131452285), ('the', [(28, 31)], -0.46821278223214635), ('quality', [(32, 39)], -0.6076663429775776), ('of', [(40, 42)], -0.018921718697862692), ('their', [(43, 48)], -0.22404977692950792), ('food', [(49, 53)], -0.32245188389535806), ('was', [(54, 57)], 0.07169380833545562), ('exceptional', [(58, 69)], -1.3418851039304842), ('the', [(71, 74)], -0.46821278223214635), ('waiters', [(75, 82)], -0.3220949693051254), ('were', [(83, 87)], -0.22670218335080206), ('so', [(88, 90)], 0.3732887992270877), ('polite', [(91, 97)], -0.17712570659886498), ('the restaurant', [(0, 3), (4, 14)], 0.1479724362568333), ('restaurant was', [(4, 14), (15, 18)], 0.15656173982130717), ('was amazing', [(15, 18), (19, 26)], 0.2973068556130115), ('the quality', [(28, 31), (32, 39)], 0.2157576281628976), ('quality of', [(32, 39), (40, 42)], 0.14264912543429953), ('of their', [(40, 42), (43, 48)], 0.12312764745359929), ('their food', [(43, 48), (49, 53)], 0.2183481994654202), ('food was', [(49, 53), (54, 57)], 0.2966172170449891), ('the waiters', [(71, 74), (75, 82)], 0.09515209208809647), ('waiters were', [(75, 82), (83, 87)], 0.19123854139546625), ('were so', [(83, 87), (88, 90)], -0.07174581836680817), ('so polite', [(88, 90), (91, 97)], -0.5499925348438075)], preserve_density=False, vec_name=None)], other=FeatureWeights(pos=[], neg=[FeatureWeight(feature=, weight=-3.8220872475830787, std=None, value=None), FeatureWeight(feature='', weight=-0.3973113230934471, std=None, value=1.0)], pos_remaining=0, neg_remaining=0)), heatmap=None), TargetExplanation(target='4 stars', feature_weights=FeatureWeights(pos=[FeatureWeight(feature='was amazing', weight=0.2406325077168544, std=None, value=1.0), FeatureWeight(feature='their food', weight=0.15157723853870006, std=None, value=1.0), FeatureWeight(feature='the quality', weight=0.1399818513805465, std=None, value=1.0), FeatureWeight(feature='the restaurant', weight=0.13787874014492787, std=None, value=1.0), FeatureWeight(feature='restaurant was', weight=0.12684331354544345, std=None, value=1.0), FeatureWeight(feature='the waiters', weight=0.12309277037766059, std=None, value=1.0), FeatureWeight(feature='waiters were', weight=0.09734080413820671, std=None, value=1.0), FeatureWeight(feature='food was', weight=0.08692520104048276, std=None, value=1.0), FeatureWeight(feature='was exceptional', weight=0.08062590525808609, std=None, value=1.0), FeatureWeight(feature='exceptional the', weight=0.009074556454012837, std=None, value=1.0), FeatureWeight(feature='amazing the', weight=0.006158831321800694, std=None, value=1.0)], neg=[FeatureWeight(feature='amazing', weight=-0.6215482013799933, std=None, value=1.0), FeatureWeight(feature='', weight=-0.620885173127225, std=None, value=1.0), FeatureWeight(feature='exceptional', weight=-0.2908827666417324, std=None, value=1.0), FeatureWeight(feature='the', weight=-0.2644996283089753, std=None, value=3.0), FeatureWeight(feature='so polite', weight=-0.15732513067254023, std=None, value=1.0), FeatureWeight(feature='so', weight=-0.15620445986313092, std=None, value=1.0), FeatureWeight(feature='were', weight=-0.10913166927840638, std=None, value=1.0), FeatureWeight(feature='quality', weight=-0.10333919110091677, std=None, value=1.0), FeatureWeight(feature='waiters', weight=-0.03571463874678352, std=None, value=1.0)], pos_remaining=0, neg_remaining=0), proba=0.21802021364159152, score=-1.1593991392029817, weighted_spans=WeightedSpans(docs_weighted_spans=[DocWeightedSpans(document='the restaurant was amazing, the quality of their food was exceptional. the waiters were so polite.', spans=[('the', [(0, 3)], -0.2644996283089753), ('amazing', [(19, 26)], -0.6215482013799933), ('the', [(28, 31)], -0.2644996283089753), ('quality', [(32, 39)], -0.10333919110091677), ('exceptional', [(58, 69)], -0.2908827666417324), ('the', [(71, 74)], -0.2644996283089753), ('waiters', [(75, 82)], -0.03571463874678352), ('were', [(83, 87)], -0.10913166927840638), ('so', [(88, 90)], -0.15620445986313092), ('the restaurant', [(0, 3), (4, 14)], 0.13787874014492787), ('restaurant was', [(4, 14), (15, 18)], 0.12684331354544345), ('was amazing', [(15, 18), (19, 26)], 0.2406325077168544), ('amazing the', [(19, 26), (28, 31)], 0.006158831321800694), ('the quality', [(28, 31), (32, 39)], 0.1399818513805465), ('their food', [(43, 48), (49, 53)], 0.15157723853870006), ('food was', [(49, 53), (54, 57)], 0.08692520104048276), ('was exceptional', [(54, 57), (58, 69)], 0.08062590525808609), ('exceptional the', [(58, 69), (71, 74)], 0.009074556454012837), ('the waiters', [(71, 74), (75, 82)], 0.12309277037766059), ('waiters were', [(75, 82), (83, 87)], 0.09734080413820671), ('so polite', [(88, 90), (91, 97)], -0.15732513067254023)], preserve_density=False, vec_name=None)], other=FeatureWeights(pos=[], neg=[FeatureWeight(feature='', weight=-0.620885173127225, std=None, value=1.0), FeatureWeight(feature=, weight=-0.5385139660757567, std=None, value=None)], pos_remaining=0, neg_remaining=0)), heatmap=None), TargetExplanation(target='5 stars', feature_weights=FeatureWeights(pos=[FeatureWeight(feature='amazing', weight=1.4166616343102316, std=None, value=1.0), FeatureWeight(feature='exceptional', weight=1.1116493929667317, std=None, value=1.0), FeatureWeight(feature='so polite', weight=0.7183995264652492, std=None, value=1.0), FeatureWeight(feature='quality', weight=0.4603260408381861, std=None, value=1.0), FeatureWeight(feature='were so', weight=0.2490215751420647, std=None, value=1.0), FeatureWeight(feature='of their', weight=0.0823679786389739, std=None, value=1.0), FeatureWeight(feature='food', weight=0.055860706726513255, std=None, value=1.0), FeatureWeight(feature='waiters', weight=0.02617745978942477, std=None, value=1.0), FeatureWeight(feature='food was', weight=0.01654520820628123, std=None, value=1.0)], neg=[FeatureWeight(feature='', weight=-0.5209352118853326, std=None, value=1.0), FeatureWeight(feature='was', weight=-0.48869249339855975, std=None, value=2.0), FeatureWeight(feature='so', weight=-0.42322661890010355, std=None, value=1.0), FeatureWeight(feature='was amazing', weight=-0.2535461214477351, std=None, value=1.0), FeatureWeight(feature='of', weight=-0.21389664754772872, std=None, value=1.0), FeatureWeight(feature='their food', weight=-0.17140724010730823, std=None, value=1.0), FeatureWeight(feature='the quality', weight=-0.14962242819710417, std=None, value=1.0), FeatureWeight(feature='amazing the', weight=-0.07179925678814633, std=None, value=1.0), FeatureWeight(feature='polite', weight=-0.06184756173850257, std=None, value=1.0), FeatureWeight(feature='the', weight=-0.06032069433534206, std=None, value=3.0), FeatureWeight(feature='exceptional the', weight=-0.04214936922012958, std=None, value=1.0), FeatureWeight(feature='waiters were', weight=-0.031429441689239994, std=None, value=1.0)], pos_remaining=0, neg_remaining=0), proba=0.7657381418694937, score=1.6481364378284238, weighted_spans=WeightedSpans(docs_weighted_spans=[DocWeightedSpans(document='the restaurant was amazing, the quality of their food was exceptional. the waiters were so polite.', spans=[('the', [(0, 3)], -0.06032069433534206), ('was', [(15, 18)], -0.48869249339855975), ('amazing', [(19, 26)], 1.4166616343102316), ('the', [(28, 31)], -0.06032069433534206), ('quality', [(32, 39)], 0.4603260408381861), ('of', [(40, 42)], -0.21389664754772872), ('food', [(49, 53)], 0.055860706726513255), ('was', [(54, 57)], -0.48869249339855975), ('exceptional', [(58, 69)], 1.1116493929667317), ('the', [(71, 74)], -0.06032069433534206), ('waiters', [(75, 82)], 0.02617745978942477), ('so', [(88, 90)], -0.42322661890010355), ('polite', [(91, 97)], -0.06184756173850257), ('was amazing', [(15, 18), (19, 26)], -0.2535461214477351), ('amazing the', [(19, 26), (28, 31)], -0.07179925678814633), ('the quality', [(28, 31), (32, 39)], -0.14962242819710417), ('of their', [(40, 42), (43, 48)], 0.0823679786389739), ('their food', [(43, 48), (49, 53)], -0.17140724010730823), ('food was', [(49, 53), (54, 57)], 0.01654520820628123), ('exceptional the', [(58, 69), (71, 74)], -0.04214936922012958), ('waiters were', [(75, 82), (83, 87)], -0.031429441689239994), ('were so', [(83, 87), (88, 90)], 0.2490215751420647), ('so polite', [(88, 90), (91, 97)], 0.7183995264652492)], preserve_density=False, vec_name=None)], other=FeatureWeights(pos=[FeatureWeight(feature=, weight=2.169071649713757, std=None, value=None)], neg=[FeatureWeight(feature='', weight=-0.5209352118853326, std=None, value=1.0)], pos_remaining=0, neg_remaining=0)), heatmap=None)], feature_importances=None, decision_tree=None, highlight_spaces=None, transition_features=None, image=None)" + ] + }, + "execution_count": 85, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "te.explain_prediction(target_names=list(model.config.id2label.values()))" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
012
0{'label': 'LABEL_0', 'score': 0.00171170581597...{'label': 'LABEL_1', 'score': 0.00916427746415...{'label': 'LABEL_2', 'score': 0.9891239404678345}
\n", + "
" + ], + "text/plain": [ + " 0 ... 2\n", + "0 {'label': 'LABEL_0', 'score': 0.00171170581597... ... {'label': 'LABEL_2', 'score': 0.9891239404678345}\n", + "\n", + "[1 rows x 3 columns]" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pd.DataFrame(data)" + ] + }, + { + "cell_type": "code", + "execution_count": 86, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.datasets import fetch_20newsgroups\n", + "\n", + "categories = ['alt.atheism', 'soc.religion.christian',\n", + " 'comp.graphics', 'sci.med']\n", + " \n", + "twenty_train = fetch_20newsgroups(\n", + " subset='train',\n", + " categories=categories,\n", + " shuffle=True,\n", + " random_state=42,\n", + " remove=('headers', 'footers'),\n", + ")\n", + "twenty_test = fetch_20newsgroups(\n", + " subset='test',\n", + " categories=categories,\n", + " shuffle=True,\n", + " random_state=42,\n", + " remove=('headers', 'footers'),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Remote API Explanation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "API_TOKEN=\"YOUR API KEY HERE\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "from scipy.special import softmax\n", + "import requests\n", + "\n", + "MODEL=\"nlptown/bert-base-multilingual-uncased-sentiment\"\n", + "\n", + "API_URL = f\"https://api-inference.huggingface.co/models/{MODEL}\"\n", + "headers = {\"Authorization\": f\"Bearer {API_TOKEN}\"}\n", + "\n", + "def query(payload):\n", + " data = json.dumps(payload)\n", + " response = requests.request(\"POST\", API_URL, headers=headers, data=data)\n", + " return json.loads(response.content.decode(\"utf-8\"))\n", + "\n", + "def result_to_df(result):\n", + " rows = []\n", + " \n", + " for result_row in result:\n", + " row = {}\n", + " for lbl_score in result_row:\n", + " row[lbl_score['label']] = lbl_score['score']\n", + "\n", + " rows.append(row)\n", + " \n", + " return pd.DataFrame(rows)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data = query(\"This is very nice\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def remote_model_adapter(texts: List[str]):\n", + " \n", + " all_scores = []\n", + "\n", + " for text in texts:\n", + " \n", + " data = query(text)\n", + " all_scores.extend(result_to_df(data).values)\n", + "\n", + " return softmax(np.array(all_scores), axis=1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
1 star2 stars3 stars4 stars5 stars
00.0031290.0030550.0176890.1941690.781958
\n", + "
" + ], + "text/plain": [ + " 1 star 2 stars 3 stars 4 stars 5 stars\n", + "0 0.003129 0.003055 0.017689 0.194169 0.781958" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "data = result_to_df(query('this is so much fun'))\n", + "data" + ] + }, + { + "cell_type": "code", + "execution_count": 136, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + "\n", + "\n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "

\n", + " \n", + " \n", + " y=1 star\n", + " \n", + "\n", + "\n", + " \n", + " (probability 0.001, score -7.683)\n", + "\n", + "top features\n", + "

\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + "
\n", + " Contribution?\n", + " Feature
\n", + " -0.075\n", + " \n", + " <BIAS>\n", + "
\n", + " -7.608\n", + " \n", + " Highlighted in text (sum)\n", + "
\n", + "\n", + " \n", + "\n", + "\n", + "\n", + "

\n", + " the restaurant was amazing, the quality of their food was exceptional. the waiters were so polite.\n", + "

\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "

\n", + " \n", + " \n", + " y=2 stars\n", + " \n", + "\n", + "\n", + " \n", + " (probability 0.021, score -3.995)\n", + "\n", + "top features\n", + "

\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + "
\n", + " Contribution?\n", + " Feature
\n", + " -0.306\n", + " \n", + " <BIAS>\n", + "
\n", + " -3.689\n", + " \n", + " Highlighted in text (sum)\n", + "
\n", + "\n", + " \n", + "\n", + "\n", + "\n", + "

\n", + " the restaurant was amazing, the quality of their food was exceptional. the waiters were so polite.\n", + "

\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "

\n", + " \n", + " \n", + " y=3 stars\n", + " \n", + "\n", + "\n", + " \n", + " (probability 0.858, score 0.965)\n", + "\n", + "top features\n", + "

\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + "
\n", + " Contribution?\n", + " Feature
\n", + " +1.079\n", + " \n", + " Highlighted in text (sum)\n", + "
\n", + " -0.114\n", + " \n", + " <BIAS>\n", + "
\n", + "\n", + " \n", + "\n", + "\n", + "\n", + "

\n", + " the restaurant was amazing, the quality of their food was exceptional. the waiters were so polite.\n", + "

\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "

\n", + " \n", + " \n", + " y=4 stars\n", + " \n", + "\n", + "\n", + " \n", + " (probability 0.016, score -4.281)\n", + "\n", + "top features\n", + "

\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + "
\n", + " Contribution?\n", + " Feature
\n", + " -0.294\n", + " \n", + " <BIAS>\n", + "
\n", + " -3.987\n", + " \n", + " Highlighted in text (sum)\n", + "
\n", + "\n", + " \n", + "\n", + "\n", + "\n", + "

\n", + " the restaurant was amazing, the quality of their food was exceptional. the waiters were so polite.\n", + "

\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "

\n", + " \n", + " \n", + " y=5 stars\n", + " \n", + "\n", + "\n", + " \n", + " (probability 0.104, score -2.343)\n", + "\n", + "top features\n", + "

\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \n", + "\n", + " \n", + "
\n", + " Contribution?\n", + " Feature
\n", + " -0.173\n", + " \n", + " <BIAS>\n", + "
\n", + " -2.170\n", + " \n", + " Highlighted in text (sum)\n", + "
\n", + "\n", + " \n", + "\n", + "\n", + "\n", + "

\n", + " the restaurant was amazing, the quality of their food was exceptional. the waiters were so polite.\n", + "

\n", + "\n", + " \n", + "\n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 136, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "te = TextExplainer(n_samples=20, random_state=42)\n", + "te.fit(\"The restaurant was amazing, the quality of their food was exceptional. The waiters were so polite.\", remote_model_adapter)\n", + "te.show_prediction(target_names=list(data.columns))" + ] + } + ], + "metadata": { + "interpreter": { + "hash": "6f245f586ce379714bd6be03acc40f31e300ec6e2c664326cf81e22a8633b6cc" + }, + "kernelspec": { + "display_name": "Python 3.7.11 64-bit ('pgesg': conda)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.11" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/brainsteam/themes/hugo-ink b/brainsteam/themes/hugo-ink index 44add7b..7525e6e 160000 --- a/brainsteam/themes/hugo-ink +++ b/brainsteam/themes/hugo-ink @@ -1 +1 @@ -Subproject commit 44add7b9d9cb2e062858cf1f58fa3cc23a01a258 +Subproject commit 7525e6e51ab7a9321c53b56d9f359cd4ca18ba13