123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130 |
- functions {
- #include "blocks/confusion_model.stan"
- #include "blocks/confusion_inverse_model.stan"
- #include "blocks/behavior_model_truth.stan"
- }
- // TODO
- // use speech rates to set priors on truth_vocs
- data {
- int<lower=1> n_classes; // number of classes
- // analysis data block
- int<lower=1> n_recs;
- int<lower=1> n_children;
- array[n_recs] int<lower=1> children;
- array[n_recs] real<lower=1> age;
- array[n_recs] int<lower=-1> siblings;
- array[n_recs, n_classes] int<lower=0> vocs;
- array[n_children] int<lower=1> corpus;
- real<lower=0> recs_duration;
- // speaker confusion data block
- int<lower=1> n_clips; // number of clips
- int<lower=1> n_groups; // number of groups
- int<lower=1> n_corpora;
- array [n_clips] int group;
- array [n_clips] int conf_corpus;
- array [n_clips,n_classes] int<lower=0> algo_total; // algo vocs attributed to specific speakers
- array [n_clips,n_classes] int<lower=0> truth_total;
- array [n_clips] real<lower=0> clip_duration;
- array [n_clips] real<lower=0> clip_age;
- int<lower=0> n_validation;
- // actual speech rates
- int<lower=1> n_rates;
- int<lower=1> n_speech_rate_children;
- array [n_rates,n_classes] int<lower=0> speech_rates;
- array [n_rates] int group_corpus;
- array [n_rates] real<lower=0> durations;
- array [n_rates] real<lower=0> speech_rate_age;
- array [n_rates] int<lower=-1> speech_rate_siblings;
- array [n_rates] int<lower=1,upper=n_speech_rate_children> speech_rate_child;
- // parallel processing
- int<lower=1> threads;
- }
- transformed data {
- vector<lower=0>[n_groups] recording_age;
- array[n_speech_rate_children] int<lower=1> speech_rate_child_corpus;
- array[n_children] int<lower=-1> child_siblings;
- array[n_speech_rate_children] int<lower=-1> speech_rate_child_siblings;
- int no_siblings = 0;
- int has_siblings = 0;
- for (c in 1:n_clips) {
- recording_age[group[c]] = clip_age[c];
- }
- for (k in 1:n_rates) {
- speech_rate_child_corpus[speech_rate_child[k]] = group_corpus[k];
- }
- for (k in 1:n_recs) {
- child_siblings[children[k]] = siblings[k];
- }
- for (c in 1:n_children) {
- if (child_siblings[c] == 0) {
- no_siblings += 1;
- }
- else if (child_siblings[c] > 0) {
- has_siblings += 1;
- }
- }
- for (k in 1:n_rates) {
- speech_rate_child_siblings[speech_rate_child[k]] = speech_rate_siblings[k];
- }
- }
- parameters {
- matrix<lower=0>[n_children,n_classes-1] mu_child_level;
- vector [n_children] child_dev_age;
- matrix<lower=0> [n_recs, n_classes] truth_vocs;
- array [n_recs] matrix<lower=0>[n_classes,n_classes] actual_confusion_baseline;
- // confusion parameters
- #include "blocks/confusion_model_parameters.stan"
- // behavior model parameters
- #include "blocks/behavior_model_parameters.stan"
- // parameters specific to human annotations
- #include "blocks/human_annotations_parameters.stan"
- }
- model {
- // inverse confusion model
- target += reduce_sum(
- inverse_model_lpmf, children, 1,
- n_recs, n_classes, recs_duration,
- vocs, age,
- truth_vocs, actual_confusion_baseline, mus, alphas//, mus_fp, alphas_fp
- );
- // contribution of full recordings to the model of behavior
- #include "blocks/behavior_observations_model.stan"
- target += reduce_sum(
- confusion_model_lpmf, group, n_clips%/%(threads*4),
- n_classes,
- algo_total, truth_total, clip_duration, clip_age,
- lambda//, lambda_fp
- );
- // priors on the nuisance parameters of the confusion model
- #include "blocks/confusion_model_priors.stan"
- // priors on the hierarchical model of speech behavior
- #include "blocks/behavior_model_priors.stan"
- // human annotations contribution
- #include "blocks/human_annotations.stan"
- }
|