unsigned int            group_weight;
        struct sched_group_capacity *sgc;
        int                     asym_prefer_cpu;        /* CPU of highest priority in group */
+       int                     flags;
 
        /*
         * The CPUs this group covers.
 
                tmp = sd;
                sd = sd->parent;
                destroy_sched_domain(tmp);
-               if (sd)
+               if (sd) {
+                       struct sched_group *sg = sd->groups;
+
+                       /*
+                        * sched groups hold the flags of the child sched
+                        * domain for convenience. Clear such flags since
+                        * the child is being destroyed.
+                        */
+                       do {
+                               sg->flags = 0;
+                       } while (sg != sd->groups);
+
                        sd->child = NULL;
+               }
        }
 
        for (tmp = sd; tmp; tmp = tmp->parent)
                return NULL;
 
        sg_span = sched_group_span(sg);
-       if (sd->child)
+       if (sd->child) {
                cpumask_copy(sg_span, sched_domain_span(sd->child));
-       else
+               sg->flags = sd->child->flags;
+       } else {
                cpumask_copy(sg_span, sched_domain_span(sd));
+       }
 
        atomic_inc(&sg->ref);
        return sg;
        if (child) {
                cpumask_copy(sched_group_span(sg), sched_domain_span(child));
                cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
+               sg->flags = child->flags;
        } else {
                cpumask_set_cpu(cpu, sched_group_span(sg));
                cpumask_set_cpu(cpu, group_balance_mask(sg));