summaryrefslogtreecommitdiffstats
path: root/Cargo.toml
AgeCommit message (Expand)Author
2021-01-30uptick: 0.5.7 (#399)Clement Tsang
2021-01-12bug: Fix missing sorting arrow when for non-% mem (#389)Clement Tsang
2020-12-21refactor: Cut out sysinfo from Linux builds (#368)Clement Tsang
2020-12-18other: Turn off debug and turn on LTO in release profile again (#367)Clement Tsang
2020-12-17uptick: 0.5.6 (#362)Clement Tsang
2020-12-17refactor: re-use heim for ARM targets if possible (#360)Clement Tsang
2020-12-16deps: Update various dependencies (#358)Clement Tsang
2020-12-15deps: Update to heim 0.1 (#354)Clement Tsang
2020-12-15uptick: 0.5.5 (#356)0.5.5Clement Tsang
2020-12-12refactor: More minor optimization changes (#353)Clement Tsang
2020-12-11refactor: Use feature flags to avoid building with fern and log (#351)Clement Tsang
2020-12-11refactor: Another small optimization pass (#350)Clement Tsang
2020-12-10uptick: 0.5.4 (#348)0.5.4Clement Tsang
2020-12-10bug: Fix some performance regressions (#344)Clement Tsang
2020-12-03refactor: Clean up some parts of data harvesting (#336)Clement Tsang
2020-11-30refactor: Simplify data harvesting (#335)Clement Tsang
2020-11-28deps: update dependencies (#334)Clement Tsang
2020-11-26uptick: 0.5.30.5.3ClementTsang
2020-11-25uptick: 0.5.2ClementTsang
2020-11-22other: Switch to once_cell (#324)Clement Tsang
2020-11-22ci: uptick to 0.5.1, fix ciClementTsang
2020-11-20docs: Update README and changelog for 0.5.0ClementTsang
2020-11-15feature: default colour schemes (#296)Clement Tsang
2020-11-02deps: update anyhow, crossterm, futures, regex, thiserror, toml (#293)Clement Tsang
2020-11-02deps: Bump dependencies that require uom <0.29.0 (#274)Clement Tsang
2020-11-01deps: switch from dirs to dirs-next (#285)Clement Tsang
2020-10-02other: more traces to debug, update some deps (#264)Clement Tsang
2020-09-28feature: Add persistent search settings (#257)Clement Tsang
2020-09-27bug: fix chart and data point overlap (#256)Clement Tsang
2020-09-26refactor: tui-rs 0.11.0 refactor (#253)Clement Tsang
2020-09-18bug: Fix for index oob caused by terminal size mismatch (#238)Clement Tsang
2020-09-06feature: Adds tree view (#223)Clement Tsang
2020-09-02other: aarch64 support (#217)Clement Tsang
2020-08-31refactor: Update error messages w/ anyhow and thiserror (#216)Clement Tsang
2020-08-31other: Add autocomplete file generation (#213)Clement Tsang
2020-08-29deps: Update battery, sysinfoClementTsang
2020-08-28feature: Adaptive network widget (#206)Clement Tsang
2020-08-26uptick: 0.4.70.4.7ClementTsang
2020-08-25uptick: 0.4.6 (#200)Clement Tsang
2020-08-23ci: switch to prepush rather than precommitClementTsang
2020-08-23ci: set up clippy pre-hookClementTsang
2020-08-23ci: Move wix output naming to CargoClementTsang
2020-08-22ci: Add winget template generation (#199)Clement Tsang
2020-08-21feature: Add hook to properly clean up in the case of a kill callClement Tsang
2020-08-21refactor: Remove ps callsClement Tsang
2020-08-19refactor: Refactor code, add new testsClement Tsang
2020-08-17feature: Add appox. total mem as an option for processes and basic memClement Tsang
2020-08-16refactor: Revert tui upgrade to 0.10Clement Tsang
2020-08-15feature: Allow sorting by any columnClement Tsang
2020-08-11deps: Update dependencies, drop MSRVClement Tsang
lass="o">= ms->m_header.h_length - sizeof(struct dlm_message); e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS); if (!e) { log_print("dlm_add_requestqueue: out of memory len %d", length); return; } e->nodeid = nodeid; memcpy(&e->request, ms, ms->m_header.h_length); mutex_lock(&ls->ls_requestqueue_mutex); list_add_tail(&e->list, &ls->ls_requestqueue); mutex_unlock(&ls->ls_requestqueue_mutex); } /* * Called by dlm_recoverd to process normal messages saved while recovery was * happening. Normal locking has been enabled before this is called. dlm_recv * upon receiving a message, will wait for all saved messages to be drained * here before processing the message it got. If a new dlm_ls_stop() arrives * while we're processing these saved messages, it may block trying to suspend * dlm_recv if dlm_recv is waiting for us in dlm_wait_requestqueue. In that * case, we don't abort since locking_stopped is still 0. If dlm_recv is not * waiting for us, then this processing may be aborted due to locking_stopped. */ int dlm_process_requestqueue(struct dlm_ls *ls) { struct rq_entry *e; int error = 0; mutex_lock(&ls->ls_requestqueue_mutex); for (;;) { if (list_empty(&ls->ls_requestqueue)) { mutex_unlock(&ls->ls_requestqueue_mutex); error = 0; break; } e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); mutex_unlock(&ls->ls_requestqueue_mutex); dlm_receive_message_saved(ls, &e->request); mutex_lock(&ls->ls_requestqueue_mutex); list_del(&e->list); kfree(e); if (dlm_locking_stopped(ls)) { log_debug(ls, "process_requestqueue abort running"); mutex_unlock(&ls->ls_requestqueue_mutex); error = -EINTR; break; } schedule(); } return error; } /* * After recovery is done, locking is resumed and dlm_recoverd takes all the * saved requests and processes them as they would have been by dlm_recv. At * the same time, dlm_recv will start receiving new requests from remote nodes. * We want to delay dlm_recv processing new requests until dlm_recoverd has * finished processing the old saved requests. We don't check for locking * stopped here because dlm_ls_stop won't stop locking until it's suspended us * (dlm_recv). */ void dlm_wait_requestqueue(struct dlm_ls *ls) { for (;;) { mutex_lock(&ls->ls_requestqueue_mutex); if (list_empty(&ls->ls_requestqueue)) break; mutex_unlock(&ls->ls_requestqueue_mutex); schedule(); } mutex_unlock(&ls->ls_requestqueue_mutex); } static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid) { uint32_t type = ms->m_type; /* the ls is being cleaned up and freed by release_lockspace */ if (!ls->ls_count) return 1; if (dlm_is_removed(ls, nodeid)) return 1; /* directory operations are always purged because the directory is always rebuilt during recovery and the lookups resent */ if (type == DLM_MSG_REMOVE || type == DLM_MSG_LOOKUP || type == DLM_MSG_LOOKUP_REPLY) return 1; if (!dlm_no_directory(ls)) return 0; /* with no directory, the master is likely to change as a part of recovery; requests to/from the defunct master need to be purged */ switch (type) { case DLM_MSG_REQUEST: case DLM_MSG_CONVERT: case DLM_MSG_UNLOCK: case DLM_MSG_CANCEL: /* we're no longer the master of this resource, the sender will resend to the new master (see waiter_needs_recovery) */ if (dlm_hash2nodeid(ls, ms->m_hash) != dlm_our_nodeid()) return 1; break; case DLM_MSG_REQUEST_REPLY: case DLM_MSG_CONVERT_REPLY: case DLM_MSG_UNLOCK_REPLY: case DLM_MSG_CANCEL_REPLY: case DLM_MSG_GRANT: /* this reply is from the former master of the resource, we'll resend to the new master if needed */ if (dlm_hash2nodeid(ls, ms->m_hash) != nodeid) return 1; break; } return 0; } void dlm_purge_requestqueue(struct dlm_ls *ls) { struct dlm_message *ms; struct rq_entry *e, *safe; mutex_lock(&ls->ls_requestqueue_mutex); list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { ms = &e->request; if (purge_request(ls, ms, e->nodeid)) { list_del(&e->list); kfree(e); } } mutex_unlock(&ls->ls_requestqueue_mutex); }