/* * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR * policies) */ #ifdef CONFIG_SMP static inline int rt_overloaded(struct rq *rq) { return atomic_read(&rq->rd->rto_count); } static inline void rt_set_overload(struct rq *rq) { if (!rq->online) return; cpu_set(rq->cpu, rq->rd->rto_mask); /* * Make sure the mask is visible before we set * the overload count. That is checked to determine * if we should look at the mask. It would be a shame * if we looked at the mask, but the mask was not * updated yet. */ wmb(); atomic_inc(&rq->rd->rto_count); } static inline void rt_clear_overload(struct rq *rq) { if (!rq->online) return; /* the order here really doesn't matter */ atomic_dec(&rq->rd->rto_count); cpu_clear(rq->cpu, rq->rd->rto_mask); } static void update_rt_migration(struct rq *rq) { if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) { if (!rq->rt.overloaded) { rt_set_overload(rq); rq->rt.overloaded = 1; } } else if (rq->rt.overloaded) { rt_clear_overload(rq); rq->rt.overloaded = 0; } } #endif /* CONFIG_SMP */ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) { return container_of(rt_se, struct task_struct, rt); } static inline int on_rt_rq(struct sched_rt_entity *rt_se) { return !list_empty(&rt_se->run_list); } #ifdef CONFIG_RT_GROUP_SCHED static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) { if (!rt_rq->tg) return RUNTIME_INF; return rt_rq->rt_runtime; } static inline u64 sched_rt_period(struct rt_rq *rt_rq) { return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); } #define for_each_leaf_rt_rq(rt_rq, rq) \ list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) { return rt_rq->rq; } static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) { return rt_se->rt_rq; } #define for_each_sched_rt_entity(rt_se) \ for (; rt_se; rt_se = rt_se->parent) static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) { return rt_se->my_q; } static void enqueue_rt_entity(struct sched_rt_entity *rt_se); static void dequeue_rt_entity(struct sched_rt_entity *rt_se); static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) { struct sched_rt_entity *rt_se = rt_rq->rt_se; if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) { struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; enqueue_rt_entity(rt_se); if (rt_rq->highest_prio < curr->prio) resched_task(curr); } } static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) { struct sched_rt_entity *rt_se = rt_rq->rt_se; if (rt_se && on_rt_rq(rt_se)) dequeue_rt_entity(rt_se); } static inline int rt_rq_throttled(struct rt_rq *rt_rq) { return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; } static int rt_se_boosted(struct sched_rt_entity *rt_se) { struct rt_rq *rt_rq = group_rt_rq(rt_se); struct task_struct *p; if (rt_rq) return !!rt_rq->rt_nr_boosted; p = rt_task_of(rt_se); return p->prio != p->normal_prio; } #ifdef CONFIG_SMP static inline cpumask_t sched_rt_period_mask(void) { return cpu_rq(smp_processor_id())->rd->span; } #else static inline cpumask_t sched_rt_period_mask(void) { return cpu_online_map; } #endif static inline struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) { return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; } static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) { return &rt_rq->tg->rt_bandwidth; } #else /* !CONFIG_RT_GROUP_SCHED */ static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) { return rt_rq->rt_runtime; } static inline u64 sched_rt_period(struct rt_rq *rt_rq) { return ktime_to_ns(def_rt_bandwidth.rt_period); } #define for_each_leaf_rt_rq(rt_rq, rq) \ for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) { return container_of(rt_rq, struct rq, rt); } static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) { struct task_struct *p = rt_task_of(rt_se); struct rq *rq = task_rq(p); return &rq->rt; } #define for_each_sched_rt_entity(rt_se) \ for (; rt_se; rt_se = NULL) static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) { return NULL; } static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) { if (rt_rq->rt_nr_running) resched_task(rq_of_rt_rq(rt_rq)->curr); } static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) { } static inline int rt_rq_throttled(struct rt_rq *rt_rq) { return rt_rq->rt_throttled; } static inline cpumask_t sched_rt_period_mask(void) { return cpu_online_map; } static inline struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) { return &cpu_rq(cpu)->rt; } static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) { return &def_rt_bandwidth; } #endif /* CONFIG_RT_GROUP_SCHED */ #ifdef CONFIG_SMP static int do_balance_runtime(struct rt_rq *rt_rq) { struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); struct root_domain *rd = cpu_rq(smp_processor_id())->rd; int i, weight, more = 0; u64 rt_period; weight = cpus_weight(rd->span); spin_lock(&rt_b->rt_runtime_lock); rt_period = ktime_to_ns(rt_b->rt_period); for_each_cpu_mask_nr(i, rd->span) { struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); s64 diff; if (iter == rt_rq) continue; spin_lock(&iter->rt_runtime_lock); if (iter->rt_runtime == RUNTIME_INF) goto next; diff = iter->rt_runtime - iter->rt_time; if (diff > 0) { diff = div_u64((u64)diff, weight); if (rt_rq->rt_runtime + diff > rt_period) diff = rt_period - rt_rq->rt_runtime; iter->rt_runtime -= diff; rt_rq->rt_runtime += diff; more = 1; if (rt_rq->rt_runtime == rt_period) { spin_unlock(&iter->rt_runtime_lock); break; } } next: spin_unlock(&iter->rt_runtime_lock); } spin_unlock(&rt_b->rt_runtime_lock); return more; } static void __disable_runtime(struct rq *rq) { struct root_domain *rd = rq->rd; struct rt_rq *rt_rq; if (unlikely(!scheduler_running)) return; for_each_leaf_rt_rq(rt_rq, rq) { struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); s64 want; int i; spin_lock(&rt_b->rt_runtime_lock); spin_lock(&rt_rq->rt_runtime_lock); if (rt_rq->rt_runtime == RUNTIME_INF || rt_rq->rt_runtime == rt_b->rt_runtime) goto balanced; spin_unlock(&rt_rq->rt_runtime_lock); want = rt_b->rt_runtime - rt_rq->rt_runtime; for_each_cpu_mask(i, rd->span) { struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); s64 diff; if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) continue; spin_lock(&iter->rt_runtime_lock); if (want > 0) { diff = min_t(s64, iter->rt_runtime, want); iter->rt_runtime -= diff; want -= diff; } else { iter->rt_runtime -= want; want -= want; } spin_unlock(&iter->rt_runtime_lock); if (!want) break; } spin_lock(&rt_rq->rt_runtime_lock); BUG_ON(want); balanced: rt_rq->rt_runtime = RUNTIME_INF; spin_unlock(&rt_rq->rt_runtime_lock); spin_unlock(&rt_b->rt_runtime_lock); } } static void disable_runtime(struct rq *rq) { unsigned long flags; spin_lock_irqsave(&rq->lock, flags); __disable_runtime(rq); spin_unlock_irqrestore(&rq->lock, flags); } static void __enable_runtime(struct rq *rq) { struct rt_rq *rt_rq; if (unlikely(!scheduler_running)) return; for_each_leaf_rt_rq(rt_rq, rq) { struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); spin_lock(&rt_b->rt_runtime_lock); spin_lock(&rt_rq->rt_runtime_lock); rt_rq->rt_runtime = rt_b->rt_runtime; rt_rq->rt_time = 0; spin_unlock(&rt_rq->rt_runtime_lock); spin_unlock(&rt_b->rt_runtime_lock); } } static void enable_runtime(struct rq *rq) { unsigned long flags; spin_lock_irqsave(&rq->lock, flags); __enable_runtime(rq); spin_unlock_irqrestore(&rq->lock, flags); } static int balance_runtime(struct rt_rq *rt_rq) { int more = 0; if (rt_rq->rt_time > rt_rq->rt_runtime) { spin_unlock(&rt_rq->rt_runtime_lock); more = do_balance_runtime(rt_rq); spin_lock(&rt_rq->rt_runtime_lock); } return more; } #else /* !CONFIG_SMP */ static inline int balance_runtime(struct rt_rq *rt_rq) { return 0; } #endif /* CONFIG_SMP */ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) { int i, idle = 1; cpumask_t span; if (rt_b->rt_runtime == RUNTIME_INF) return 1; span = sched_rt_period_mask(); for_each_cpu_mask(i, span) { int enqueue = 0; struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); struct rq *rq = rq_of_rt_rq(rt_rq); spin_lock(&rq->lock); if (rt_rq->rt_time) { u64 runtime; spin_lock(&rt_rq->rt_runtime_lock); if (rt_rq->rt_throttled) balance_runtime(rt_rq); runtime = rt_rq->rt_runtime; rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { rt_rq->rt_throttled = 0; enqueue = 1; } if (rt_rq->rt_time || rt_rq->rt_nr_running) idle = 0; spin_unlock(&rt_rq->rt_runtime_lock); } else if (rt_rq->rt_nr_running) idle = 0; if (enqueue) sched_rt_rq_enqueue(rt_rq); spin_unlock(&rq->lock); } return idle; } static inline int rt_se_prio(struct sched_rt_entity *rt_se) { #ifdef CONFIG_RT_GROUP_SCHED struct rt_rq *rt_rq = group_rt_rq(rt_se); if (rt_rq) return rt_
<?php $TRANSLATIONS = array(
"News" => "Uutiset",
"Can not add feed: Exists already" => "Syötettä ei voi lisätä: se on jo olemassa",
"Can not add feed: URL does not exist or has invalid xml" => "Syötettä ei voi lisätä: joko osoitetta ei ole olemassa tai siinä on virheellistä XML:ää",
"Articles without feed" => "Vailla syötettä olevat artikkelit",
"Can not add folder: Exists already" => "Kansiota ei voi lisätä: se on jo olemassa",
" " => " ",
"_" => "_",
"status" => "tila",
"Undo deletion of %s" => "Kumoa poisto: %s",
"Add Website" => "Lisää verkkosivusto",
"Error: address exists already!" => "Virhe: osoite on jo olemassa!",
"Error: folder exists already" => "Virhe: kansio on jo olemassa",
"Address" => "Osoite",
"Add" => "Lisää",
"New folder" => "Uusi kansio",
"Folder" => "Kansio",
"Choose folder" => "Valitse kansio",
"Folder name" => "Kansion nimi",
"Back to folder selection" => "Takaisin kansion valintaan",
"Create folder" => "Luo kansio",
"Starred" => "Suositut",
"Unread articles" => "Lukemattomat artikkelit",
"All articles" => "Kaikki artikkelit",
"Mark read" => "Merkitse luetuksi",
"Welcome to the ownCloud News app!" => "Tervetuloa ownCloudin syötesovellukseen!",
"read on website" => "lue verkkosivustolla",
"star" => "tähti",
"from" => "Syöte:",
"by" => " Kirjoittaja:",
"Download" => "Lataa",
"Keep unread" => "Pidä lukemattomana",
"Cancel" => "Peru",
"Save" => "Tallenna",
"Delete website" => "Poista verkkosivusto",
"Rename feed" => "Nimeä syöte uudelleen",
"Collapse" => "Kutista näkymää",
"Delete folder" => "Poista kansio",
"Rename folder" => "Muuta kansion nimeä",
"Settings" => "Asetukset",
"Use compact view" => "Käytä tiivistä näkymää",
"Subscriptions (OPML)" => "Tilaukset (OPML)",
"Import" => "Tuo",
"Export" => "Vie",
"Error when importing: file does not contain valid OPML" => "Virhe tuotaessa: tiedostossa ei ole kelvollista OPML:ää",
"Unread/Starred Articles" => "Lukemattomat/tähdellä varustetut artikkelit",
"Error when importing: file does not contain valid JSON" => "Virhe tuotaessa: tiedosto ei sisällä kelvollista JSONia",
"Show read articles" => "Näytä luetut artikkelit",
"Hide read articles" => "Piilota luetut artikkelit"
);