Skip to content

Simple MNL

import os

# Remove GPU use
os.environ["CUDA_VISIBLE_DEVICES"] = ""

import sys

sys.path.append("../../")

import numpy as np

from choice_learn.models.simple_mnl import SimpleMNL
from choice_learn.data import ChoiceDataset
from choice_learn.datasets.base import load_heating

Let's recreate this tutorial by Yves Croissant for the mlogit R package.

It uses the Heating dataset, where we try to predict which heating hardware a houseold will chose available in choice_learn.datasets !

heating_df = load_heating(as_frame=True)

shared_features_by_choice = ["income", "agehed", "rooms"]
choice = ["depvar"]
items_features_by_choice = ["ic.", "oc."]
items = ["hp", "gc", "gr", "ec", "er"]

choices = np.array([items.index(val) for val in heating_df[choice].to_numpy().ravel()])
shared_features_by_choice = heating_df[shared_features_by_choice].to_numpy().astype("float32")
items_features_by_choice = np.stack([heating_df[[feat + item for feat in items_features_by_choice]].to_numpy() for item in items], axis=1)

First part estimates a simple MNL without intercept from the 'ic' and 'oc' features. By default, SimpleMNL does not integrate any intercept, but you can precise 'None'.

dataset = ChoiceDataset(items_features_by_choice=items_features_by_choice,
                        choices=choices)
model = SimpleMNL(intercept=None)
history = model.fit(dataset, get_report=True, verbose=2)
print("Estimation Negative LogLikelihood:",
      model.evaluate(dataset) * len(dataset))
Estimation Negative LogLikelihood: tf.Tensor(1095.2418, shape=(), dtype=float32)

Model analysis and Comparison with R's mlogit package

model.report
Coefficient Name Coefficient Estimation Std. Err z_value P(.>z)
0 Weights_items_features_0 -0.006232 0.000353 -17.665276 0.0
1 Weights_items_features_1 -0.004580 0.000322 -14.216597 0.0

We reach very similar results. The second part is about modelling useing the ic + oc/0.12 ratio. Here is how it can be done:

ratio_items_features = []
for case in range(items_features_by_choice.shape[0]):
    feat = []
    for item in range(items_features_by_choice.shape[1]):
        feat.append([items_features_by_choice[case, item, 0] + items_features_by_choice[case, item, 1] / 0.12])
    ratio_items_features.append(feat)
ratio_contexts_items = np.array(ratio_items_features)
ratio_contexts_items.shape
ratio_dataset = ChoiceDataset(items_features_by_choice=ratio_items_features, choices=choices)
model = SimpleMNL()
history = model.fit(ratio_dataset, get_report=False)
print("Weights:", model.trainable_weights)
print("Estimation Negative LogLikelihood:", model.evaluate(ratio_dataset) * len(ratio_dataset))
Weights: [<tf.Variable 'Weights_items_features:0' shape=(1,) dtype=float32, numpy=array([-0.00071585], dtype=float32)>]
Estimation Negative LogLikelihood: tf.Tensor(1248.7051, shape=(), dtype=float32)

Finally, to add itemwise intercept for the last part, here is how it can be done:

model = SimpleMNL(intercept="item")
history = model.fit(dataset, get_report=True)
model.report
Coefficient Name Coefficient Estimation Std. Err z_value P(.>z)
0 Weights_items_features_0 -0.001533 0.000621 -2.469419 1.353323e-02
1 Weights_items_features_1 -0.006996 0.001554 -4.501966 6.675720e-06
2 Intercept_0 1.710970 0.226742 7.545891 0.000000e+00
3 Intercept_1 0.308264 0.206592 1.492139 1.356628e-01
4 Intercept_2 1.658846 0.448416 3.699350 2.161264e-04
5 Intercept_3 1.853437 0.361952 5.120669 3.576279e-07