For the record, here’s the code that solved the issue:
basic_model = pm.Model()
with basic_model:
# Priors for unknown model parameters
alphas = pm.Normal("alphas", mu=0, sigma=1, shape=num_items)
#sd_distrib = pm.Exponential.dist(100.0)
#sd_distrib = pm.HalfNormal.dist(10)
sd_distrib = pm.Normal.dist(mu=1, sigma=10, shape=num_items)
#sd_distrib = pm.ChiSquared.dist(nu=1, shape=num_items)
#sd_distrib = pm.HalfCauchy.dist(beta=1, shape=num_items)
L_sigma, corr, stds = pm.LKJCholeskyCov('L_sigma', eta=1, n=num_items, sd_dist=sd_distrib, compute_corr=True, store_in_trace=False)
cov = pm.Deterministic("cov", L_sigma.dot(L_sigma.T))
betas_init = pm.MvNormal('betas_init', mu=np.zeros((num_items)), cov=np.eye(num_items), shape=(num_respondents, num_items))
def logp(win_loss, alphas, betas_init, L_sigma):
#win = win_loss[:int(win_loss.shape[0]/2),]
#loss = win_loss[int(win_loss.shape[0]/2):,]
win = win_loss[:winloss_size,]
loss = win_loss[winloss_size:,]
#betas = at.reshape(at.tile(alphas.T,num_respondents), (num_respondents, num_items)) + pm.math.dot(betas_init,L_sigma)
#at.printing.Print('betas_init')(betas_init.shape)
#at.printing.Print('L_sigma')(L_sigma.shape)
betas = at.reshape(at.tile(alphas.T,num_respondents), (num_respondents, num_items)) + at.dot(betas_init,L_sigma)
betas_effects_coded = at.concatenate([betas,at.reshape(-at.sum(betas, axis = 1),(num_respondents,1))], axis=1)
winners = at.sum(betas_effects_coded[ind_list,]*win, axis = 1)
losers = at.sum(betas_effects_coded[ind_list,]*loss, axis = 1)
shp = choicedata.shape[0]
mat_tmp = at.concatenate([at.reshape(winners,(shp,1)),at.reshape(losers,(shp,1))],axis=1)
prob_tmp = softmax(mat_tmp)
log_prob_tmp = at.log(prob_tmp)
return at.reshape(at.bincount(ind_list, weights=log_prob_tmp),(num_respondents,1))
#logp = function(inputs=[alphas, L_sigma, betas_init], givens={win:win, loss:loss}, outputs=at.reshape(at.bincount(ind_list, weights=at.log(softmax(at.concatenate([at.reshape(at.sum(at.concatenate([at.reshape(at.tile(alphas.T,num_respondents), (num_respondents, num_items)) + at.dot(betas_init,L_sigma),at.reshape(-at.sum(at.reshape(at.tile(alphas.T,num_respondents), (num_respondents, num_items)) + at.dot(betas_init,L_sigma), axis = 1),(num_respondents,1))], axis=1)[ind_list,]*win, axis = 1),(choicedata.shape[0],1)),at.reshape(at.sum(at.concatenate([at.reshape(at.tile(alphas.T,num_respondents), (num_respondents, num_items)) + at.dot(betas_init,L_sigma),at.reshape(-at.sum(at.reshape(at.tile(alphas.T,num_respondents), (num_respondents, num_items)) + at.dot(betas_init,L_sigma), axis = 1),(num_respondents,1))], axis=1)[ind_list,]*loss, axis = 1),(choicedata.shape[0],1))],axis=1)))),(num_respondents,1)))
#logp = theano.function([In(win_dummy, value=win), In(loss_dummy, value=loss)], at.reshape(at.bincount(ind_list, weights=at.log(softmax(at.concatenate([at.reshape(at.sum(at.concatenate([at.reshape(at.tile(alphas.T,num_respondents), (num_respondents, num_items)) + at.dot(betas_init,L_sigma),at.reshape(-at.sum(at.reshape(at.tile(alphas.T,num_respondents), (num_respondents, num_items)) + at.dot(betas_init,L_sigma), axis = 1),(num_respondents,1))], axis=1)[ind_list,]*win_dummy, axis = 1),(choicedata.shape[0],1)),at.reshape(at.sum(at.concatenate([at.reshape(at.tile(alphas.T,num_respondents), (num_respondents, num_items)) + at.dot(betas_init,L_sigma),at.reshape(-at.sum(at.reshape(at.tile(alphas.T,num_respondents), (num_respondents, num_items)) + at.dot(betas_init,L_sigma), axis = 1),(num_respondents,1))], axis=1)[ind_list,]*loss_dummy, axis = 1),(choicedata.shape[0],1))],axis=1)))),(num_respondents,1)))
#logp = at.reshape(at.bincount(ind_list, weights=at.log(softmax(at.concatenate([at.reshape(at.sum(at.concatenate([at.reshape(at.tile(alphas.T,num_respondents), (num_respondents, num_items)) + at.dot(betas_init,L_sigma),at.reshape(-at.sum(at.reshape(at.tile(alphas.T,num_respondents), (num_respondents, num_items)) + at.dot(betas_init,L_sigma), axis = 1),(num_respondents,1))], axis=1)[ind_list,]*win, axis = 1),(choicedata.shape[0],1)),at.reshape(at.sum(at.concatenate([at.reshape(at.tile(alphas.T,num_respondents), (num_respondents, num_items)) + at.dot(betas_init,L_sigma),at.reshape(-at.sum(at.reshape(at.tile(alphas.T,num_respondents), (num_respondents, num_items)) + at.dot(betas_init,L_sigma), axis = 1),(num_respondents,1))], axis=1)[ind_list,]*loss, axis = 1),(choicedata.shape[0],1))],axis=1)))),(num_respondents,1))
#logp = theano.function([In(win_tn, value = win), In(loss_tn, value = loss)], ll)
likelihood = pm.DensityDist(
"likelihood",
alphas, betas_init, L_sigma,
logp=logp,
observed = win_loss
)