Scheduled service maintenance on November 22


On Friday, November 22, 2024, between 06:00 CET and 18:00 CET, GIN services will undergo planned maintenance. Extended service interruptions should be expected. We will try to keep downtimes to a minimum, but recommend that users avoid critical tasks, large data uploads, or DOI requests during this time.

We apologize for any inconvenience.

Browse Source

[DATALAD] Recorded changes

Lucas Gautheron 10 hours ago
parent
commit
548702f84b

+ 59 - 0
code/models/blocks/human_annotations_simple.stan

@@ -0,0 +1,59 @@
+real mu_adu = p_sib*(mu_pop_level[3]+mu_pop_level[4])+(1-p_sib)*(mu_pop_level[3]+mu_pop_level[4])*exp(beta_sib_adu/10.0);
+
+for (g in 1:n_rates) {
+    real chi_mu = mu_pop_level[1]*exp(
+        (alpha_dev+sigma_dev*child_dev_speech_age[speech_rate_child[g]])*speech_rate_age[g]/12.0 + ((speech_rate_child_level[speech_rate_child[g],2]+speech_rate_child_level[speech_rate_child[g],3])/mu_adu-1.0)*(beta_dev*speech_rate_age[g]/12.0/10.0)+(beta_direct/10.0)*(sum(speech_rate[3:,g])/(speech_rate_child_level[speech_rate_child[g],2]+speech_rate_child_level[speech_rate_child[g],3])-1.0)
+    );
+    speech_rate[1,g] ~ gamma(
+        alpha_child_level[1],
+        alpha_child_level[1]/chi_mu
+    );
+
+    speech_rate[2:,g] ~ gamma(
+        alpha_child_level[2:],
+        (alpha_child_level[2:]./(speech_rate_child_level[speech_rate_child[g],:]')) //'
+    );
+    speech_rates[g,:] ~ poisson(speech_rate[:,g]*durations[g]*1000);
+}
+
+for (c in 1:n_speech_rate_children) {
+    if (speech_rate_child_siblings[c] >= 0) {
+        int distrib = child_siblings[c]==0?2:1;
+
+        speech_rate_child_level[c,1] ~ gamma(
+            alpha_pop_level[distrib,1],
+            (alpha_pop_level[distrib,1]/(mu_pop_level[1]*exp(
+                speech_rate_child_siblings[c]==0?beta_sib_och:0
+            )))
+        );
+
+        speech_rate_child_level[c,2:] ~ gamma(
+            alpha_pop_level[distrib,2:],
+            (alpha_pop_level[distrib,2:]./(mu_pop_level[2:]*exp(
+                speech_rate_child_siblings[c]==0?beta_sib_adu/10.0:0
+            )))
+        );
+    }
+    else {     
+        vector [2] ll_child;   
+        for (sib in 0:1) {
+            int distrib = sib==0?2:1;
+            
+            ll_child[distrib] = sib==0?log1m(p_sib):log(p_sib);
+            ll_child[distrib] += gamma_lpdf(speech_rate_child_level[c,1] | alpha_pop_level[distrib,1],
+                (alpha_pop_level[distrib,1]/(mu_pop_level[1]*exp(
+                    sib==0?beta_sib_och:0
+                )))
+            );
+            ll_child[distrib] += gamma_lpdf(speech_rate_child_level[c,2:] | alpha_pop_level[distrib,2:],
+                (alpha_pop_level[distrib,2:]./(mu_pop_level[2:]*exp(
+                    sib==0?beta_sib_adu/10.0:0
+                )))
+            );
+        }
+        target += log_sum_exp(ll_child);
+    }
+
+}
+
+child_dev_speech_age ~ normal(0, 1);

+ 127 - 0
code/models/dev_poisson_simple.stan

@@ -0,0 +1,127 @@
+functions {
+    #include "blocks/confusion_model.stan"
+    #include "blocks/confusion_inverse_model.stan"
+    #include "blocks/behavior_model_truth.stan"
+}
+
+// TODO
+// use speech rates to set priors on truth_vocs
+data {
+    int<lower=1> n_classes; // number of classes
+
+    // analysis data block
+    int<lower=1> n_recs;
+    int<lower=1> n_children;
+
+    array[n_recs] int<lower=1> children;
+    array[n_recs] real<lower=1> age;
+    array[n_recs] int<lower=-1> siblings;
+    array[n_recs, n_classes] int<lower=0> vocs;
+    array[n_children] int<lower=1> corpus;
+
+    real<lower=0> recs_duration;
+
+    // speaker confusion data block
+    int<lower=1> n_clips;   // number of clips
+    int<lower=1> n_groups; // number of groups
+    int<lower=1> n_corpora;
+    array [n_clips] int group;
+    array [n_clips] int conf_corpus;
+    array [n_clips,n_classes] int<lower=0> algo_total; // algo vocs attributed to specific speakers
+    array [n_clips,n_classes] int<lower=0> truth_total;
+    array [n_clips] real<lower=0> clip_duration;
+    array [n_clips] real<lower=0> clip_age;
+
+    int<lower=0> n_validation;
+
+    // actual speech rates
+    int<lower=1> n_rates;
+    int<lower=1> n_speech_rate_children;
+
+    array [n_rates,n_classes] int<lower=0> speech_rates;
+    array [n_rates] int group_corpus;
+    array [n_rates] real<lower=0> durations;
+    array [n_rates] real<lower=0> speech_rate_age;
+    array [n_rates] int<lower=-1> speech_rate_siblings;
+    array [n_rates] int<lower=1,upper=n_speech_rate_children> speech_rate_child;
+
+    // parallel processing
+    int<lower=1> threads;
+}
+
+transformed data {
+    vector<lower=0>[n_groups] recording_age;
+    array[n_speech_rate_children] int<lower=1> speech_rate_child_corpus;
+
+    array[n_children] int<lower=-1> child_siblings;
+    array[n_speech_rate_children] int<lower=-1> speech_rate_child_siblings;
+    int no_siblings = 0;
+    int has_siblings = 0;
+
+    for (c in 1:n_clips) {
+        recording_age[group[c]] = clip_age[c];
+    }
+
+    for (k in 1:n_rates) {
+        speech_rate_child_corpus[speech_rate_child[k]] = group_corpus[k];
+    }
+
+    for (k in 1:n_recs) {
+        child_siblings[children[k]] = siblings[k];
+    }
+
+    for (c in 1:n_children) {
+        if (child_siblings[c] == 0) {
+            no_siblings += 1;
+        }
+        else if (child_siblings[c] > 0) {
+            has_siblings += 1;
+        }
+    }
+
+    for (k in 1:n_rates) {
+        speech_rate_child_siblings[speech_rate_child[k]] = speech_rate_siblings[k];
+    }
+}
+
+parameters {
+    matrix<lower=0>[n_children,n_classes-1] mu_child_level;
+    vector [n_children] child_dev_age;
+    matrix<lower=0> [n_recs, n_classes] truth_vocs;
+    array [n_recs] matrix<lower=0>[n_classes,n_classes] actual_confusion_baseline;
+
+    // confusion parameters
+    #include "blocks/confusion_model_parameters.stan"
+
+    // behavior model parameters
+    #include "blocks/behavior_model_parameters_simple.stan"
+}
+
+model {
+    // inverse confusion model
+    target += reduce_sum(
+       inverse_model_lpmf, children, 1,
+       n_recs, n_classes, recs_duration,
+       vocs, age,
+       truth_vocs, actual_confusion_baseline, mus, alphas//, mus_fp, etas_fp
+    );
+
+    // contribution of full recordings to the model of behavior
+    #include "blocks/behavior_observations_model_simple.stan"
+
+    target += reduce_sum(
+        confusion_model_lpmf, group, 1,
+        n_classes,
+        algo_total, truth_total, clip_duration, clip_age, lambda
+    );
+
+    // priors on the nuisance parameters of the confusion model
+    #include "blocks/confusion_model_priors.stan"
+
+    // priors on the hierarchical model of speech behavior
+    #include "blocks/behavior_model_priors_simple.stan"
+}
+
+generated quantities {
+    #include "blocks/behavior_model_generated.stan"
+}

+ 99 - 0
code/models/human_simple.stan

@@ -0,0 +1,99 @@
+// TODO
+// use speech rates to set priors on truth_vocs
+data {
+    int<lower=1> n_classes; // number of classes
+
+    // analysis data block
+    int<lower=1> n_recs;
+    int<lower=1> n_children;
+
+    array[n_recs] int<lower=1> children;
+    array[n_recs] real<lower=1> age;
+    array[n_recs] int<lower=-1> siblings;
+    array[n_recs, n_classes] int<lower=0> vocs;
+    array[n_children] int<lower=1> corpus;
+
+    real<lower=0> recs_duration;
+
+    // speaker confusion data block
+    int<lower=1> n_clips;   // number of clips
+    int<lower=1> n_groups; // number of groups
+    int<lower=1> n_corpora;
+    array [n_clips] int group;
+    array [n_clips] int conf_corpus;
+    array [n_clips,n_classes] int<lower=0> algo_total; // algo vocs attributed to specific speakers
+    array [n_clips,n_classes] int<lower=0> truth_total;
+    array [n_clips] real<lower=0> clip_duration;
+    array [n_clips] real<lower=0> clip_age;
+
+    int<lower=0> n_validation;
+
+    // actual speech rates
+    int<lower=1> n_rates;
+    int<lower=1> n_speech_rate_children;
+
+    array [n_rates,n_classes] int<lower=0> speech_rates;
+    array [n_rates] int group_corpus;
+    array [n_rates] real<lower=0> durations;
+    array [n_rates] real<lower=0> speech_rate_age;
+    array [n_rates] int<lower=-1> speech_rate_siblings;
+    array [n_rates] int<lower=1,upper=n_speech_rate_children> speech_rate_child;
+
+    // parallel processing
+    int<lower=1> threads;
+}
+
+transformed data {
+    vector<lower=0>[n_groups] recording_age;
+    array[n_speech_rate_children] int<lower=1> speech_rate_child_corpus;
+
+    array[n_children] int<lower=-1> child_siblings;
+    array[n_speech_rate_children] int<lower=-1> speech_rate_child_siblings;
+    int no_siblings = 0;
+    int has_siblings = 0;
+
+    for (c in 1:n_clips) {
+        recording_age[group[c]] = clip_age[c];
+    }
+
+    for (k in 1:n_rates) {
+        speech_rate_child_corpus[speech_rate_child[k]] = group_corpus[k];
+    }
+
+    for (k in 1:n_recs) {
+        child_siblings[children[k]] = siblings[k];
+    }
+
+    for (c in 1:n_children) {
+        if (child_siblings[c] == 0) {
+            no_siblings += 1;
+        }
+        else if (child_siblings[c] > 0) {
+            has_siblings += 1;
+        }
+    }
+
+    for (k in 1:n_rates) {
+        speech_rate_child_siblings[speech_rate_child[k]] = speech_rate_siblings[k];
+    }
+}
+
+parameters {
+    // behavior model parameters
+    #include "blocks/behavior_model_parameters_simple.stan"
+
+    // parameters specific to human annotations
+    #include "blocks/human_annotations_parameters.stan"
+}
+
+model {
+    // priors on the hierarchical model of speech behavior
+    #include "blocks/behavior_model_priors_simple.stan"
+    
+    // human annotations contribution
+    #include "blocks/human_annotations_simple.stan"
+}
+
+generated quantities {
+    #include "blocks/behavior_model_generated.stan"
+}