80 |
// so manually allow this
|
81 |
#[allow(clippy::mutex_atomic)]
|
82 |
let kill_switch = Arc::new(Mutex::new(false));
|
60 |
#[test]
|
61 |
#[allow(clippy::mutex_atomic)]
|
62 |
fn test_sync() {
|
60 |
#[test]
|
61 |
#[cfg_attr(feature = "cargo-clippy", allow(mutex_atomic))]
|
62 |
fn test_sync() {
|
127 |
#[allow(clippy::mutex_atomic)]
|
128 |
let healthy = Arc::new(tokio::sync::RwLock::new(false));
|
80 |
#[allow(clippy::mutex_atomic)]
|
81 |
impl RuSOEM {
|
14 |
// This is used for the Condvar, which requires this kind of construction
|
15 |
#[allow(clippy::mutex_atomic)]
|
16 |
pub fn new(cb: Box<dyn Fn(T) + Send>) -> Self {
|
14 |
// This is used for the Condvar, which requires this kind of construction
|
15 |
#[allow(clippy::mutex_atomic)]
|
16 |
pub fn new(cb: Box<dyn Fn(T) + Send>) -> Self {
|
218 |
// significantly more complicated for negligible benefit.
|
219 |
#[allow(clippy::mutex_atomic)]
|
220 |
impl CrossRequestRetryState {
|
650 |
// the bool value
|
651 |
#[allow(clippy::mutex_atomic)]
|
652 |
impl CustomHandler {
|
49 |
#[allow(clippy::mutex_atomic)]
|
50 |
impl GlobalSystem {
|
12 |
#[derive(Debug)]
|
13 |
#[allow(clippy::mutex_atomic)]
|
14 |
pub struct Sleepers {
|
25 |
#[allow(clippy::mutex_atomic)]
|
26 |
impl Default for Sleepers {
|
37 |
#[allow(clippy::mutex_atomic)]
|
38 |
impl Sleepers {
|
273 |
impl WalletSync for CompactFiltersBlockchain {
|
274 |
#[allow(clippy::mutex_atomic)] // Mutex is easier to understand than a CAS loop.
|
275 |
fn wallet_setup<D: BatchDatabase>(
|
333 |
#[allow(clippy::mutex_atomic)]
|
334 |
let last_synced_block = Arc::new(Mutex::new(synced_height));
|
74 |
// Create termination mutex and cvar
|
75 |
#[allow(clippy::mutex_atomic)]
|
76 |
let thread_termination_lock = Arc::new(Mutex::new(false));
|
204 |
// It's in the Condvar docs, so this is the recommended way to do it.
|
205 |
#[allow(clippy::mutex_atomic)]
|
206 |
unsafe {
|
822 |
let nursery = nursery.in_current_span();
|
823 |
#[allow(clippy::mutex_atomic)]
|
824 |
let ready_pair = Arc::new((Mutex::new(false), Condvar::new()));
|
944 |
let nursery = nursery.in_current_span();
|
945 |
#[allow(clippy::mutex_atomic)]
|
946 |
let ready_pair = Arc::new((Mutex::new(false), Condvar::new()));
|
233 |
let nursery = nursery.in_current_span();
|
234 |
#[allow(clippy::mutex_atomic)]
|
235 |
let ready_pair = Arc::new((Mutex::new(false), Condvar::new()));
|
13 |
mod response_elements;
|
14 |
#[allow(clippy::mutex_atomic)]
|
15 |
// mod run_migrations;
|
145 |
let queue = Arc::new(Mutex::new(Vec::new()));
|
146 |
#[allow(clippy::mutex_atomic)]
|
147 |
let shutdown = Arc::new((Mutex::new(false), Condvar::new()));
|
9 |
#[derive(Debug)]
|
10 |
#[allow(clippy::mutex_atomic)]
|
11 |
pub struct ThreadPark {
|
16 |
#[allow(clippy::mutex_atomic)]
|
17 |
impl ThreadPark {
|
68 |
/// Shim implementations are responsible for calling [`Self::signal`].
|
69 |
#[allow(clippy::mutex_atomic)] // Condvar expected to be used with Mutex, not AtomicBool.
|
70 |
#[derive(Default)]
|
73 |
#[allow(clippy::mutex_atomic)]
|
74 |
impl ExitSignal {
|
19 |
impl Stats {
|
20 |
#[allow(clippy::mutex_atomic)]
|
21 |
pub fn new() -> Self {
|
669 |
#[allow(clippy::mutex_atomic)]
|
670 |
#[test]
|
856 |
#[test]
|
857 |
#[allow(clippy::mutex_atomic)]
|
858 |
fn test_inner_stream_timeout() {
|
71 |
block_opts.set_block_size(if opts.low_memory { 256 << 10 } else { 1 << 20 });
|
72 |
#[allow(clippy::mutex_atomic)]
|
73 |
let mut store = DbStore {
|
39 |
impl VecEnv {
|
40 |
#[allow(clippy::mutex_atomic)]
|
41 |
pub fn new<T: Environment + Send + 'static>(
|
233 |
#[allow(clippy::mutex_atomic)]
|
234 |
unsafe extern "C" fn on_ping_end<F: Fn(&Summary, &Reply)>(
|
312 |
impl<'a, F: Fn(&Summary, &Reply)> Tracker<'a, F> {
|
313 |
#[allow(clippy::mutex_atomic)]
|
314 |
pub fn new(reply_callback: Option<&'a F>) -> Self {
|
41 |
#[allow(clippy::mutex_atomic, unused)]
|
42 |
impl Semaphore {
|
172 |
let extractor = FeatureExtractor::new(&self.config);
|
173 |
#[allow(clippy::mutex_atomic)]
|
174 |
let processed = Mutex::new(0usize);
|
179 |
.map(|(i, doc)| {
|
180 |
#[allow(clippy::mutex_atomic)]
|
181 |
{
|
133 |
let extractor = FeatureExtractor::new(&self.config);
|
134 |
#[allow(clippy::mutex_atomic)]
|
135 |
let processed = Mutex::new(0usize);
|
140 |
.map(|(i, doc)| {
|
141 |
#[allow(clippy::mutex_atomic)]
|
142 |
{
|
145 |
/// ```
|
146 |
#[allow(clippy::mutex_atomic)]
|
147 |
pub fn build(self) -> FdbResult<(NetworkRunner, NetworkWait)> {
|
930 |
/// ```
|
931 |
#[allow(clippy::mutex_atomic)]
|
932 |
pub fn wait(&self) {
|
51 |
#[allow(clippy::mutex_atomic)]
|
52 |
impl<T: IndexValue> BucketMapHolder<T> {
|
24 |
// See https://github.com/rust-lang/rust-clippy/issues/1516
|
25 |
#[allow(clippy::mutex_atomic)]
|
26 |
static TASK_CONDVAR: Lazy<Arc<(Mutex<bool>, Condvar)>> =
|
447 |
#[allow(clippy::mutex_atomic)]
|
448 |
let active_background_workers = Arc::new((Mutex::new(0), Condvar::new()));
|
476 |
#[allow(clippy::mutex_atomic)]
|
477 |
let active_background_workers = Arc::new((Mutex::new(0), Condvar::new()));
|
35 |
// We need the `Mutex<bool>` to work in pair with `Condvar`.
|
36 |
#[allow(clippy::mutex_atomic)]
|
37 |
mod scheduler;
|
70 |
#[allow(clippy::mutex_atomic)]
|
71 |
#[derive(Debug)]
|
78 |
#[allow(clippy::mutex_atomic)]
|
79 |
impl InnerPool {
|
121 |
#[allow(clippy::mutex_atomic)]
|
122 |
impl Drop for InnerPool {
|
96 |
// TODO: revisit this lint
|
97 |
#[allow(clippy::mutex_atomic)]
|
98 |
let available = Arc::new(Mutex::new(initial_available));
|
90 |
// TODO: revisit this lint
|
91 |
#[allow(clippy::mutex_atomic)]
|
92 |
let available = Arc::new((Mutex::new(running), Condvar::new()));
|
209 |
// TODO: revisit this lint
|
210 |
#[allow(clippy::mutex_atomic)]
|
211 |
let charging = Arc::new(Mutex::new(initial_charging));
|
217 |
// TODO: revisit this lint
|
218 |
#[allow(clippy::mutex_atomic)]
|
219 |
let reachable = Arc::new(Mutex::new(initial_reachable));
|
60 |
// TODO: revisit this lint
|
61 |
#[allow(clippy::mutex_atomic)]
|
62 |
let state = Arc::new(Mutex::new(initial_state as i64));
|
606 |
// the thread and interrupt the ticker wait.
|
607 |
#[allow(clippy::mutex_atomic)]
|
608 |
let stopping = Arc::new((Mutex::new(false), Condvar::new()));
|
60 |
impl Indexer {
|
61 |
#[allow(clippy::mutex_atomic)]
|
62 |
pub fn new(
|
336 |
#[test]
|
337 |
#[allow(clippy::mutex_atomic)]
|
338 |
fn cache() {
|
79 |
#[allow(clippy::mutex_atomic)]
|
80 |
fn isla_main() -> i32 {
|
51 |
#[allow(clippy::mutex_atomic)]
|
52 |
fn isla_main() -> i32 {
|
51 |
#[allow(clippy::mutex_atomic)]
|
52 |
fn isla_main() -> i32 {
|
395 |
#[allow(clippy::mutex_atomic)]
|
396 |
let syncpair = Arc::new((Mutex::new(ShMemServiceStatus::Starting), Condvar::new()));
|
143 |
let mut inner = instance.activate();
|
144 |
#[allow(clippy::mutex_atomic)]
|
145 |
let is_alive = Arc::new(Mutex::new(true));
|
17 |
index: usize,
|
18 |
#[ allow(clippy::mutex_atomic) ]
|
19 |
next_compaction: Mutex<usize>,
|
27 |
pub fn new(index: usize, data_blocks: Arc<DataBlocks>, params: Arc<Params>, manifest: Arc<Manifest>) -> Self {
|
28 |
#[ allow(clippy::mutex_atomic) ]
|
29 |
Self {
|
120 |
pub async fn start_compaction(&self) -> (Vec<usize>, Vec<Arc<SortedTable>>) {
|
121 |
#[ allow(clippy::mutex_atomic) ]
|
122 |
let mut next_compaction = self.next_compaction.lock().await;
|
16 |
#[allow(clippy::mutex_atomic)]
|
17 |
impl ThreadPark {
|
9 |
#[derive(Debug)]
|
10 |
#[allow(clippy::mutex_atomic)]
|
11 |
pub struct ThreadPark {
|
16 |
#[allow(clippy::mutex_atomic)]
|
17 |
impl ThreadPark {
|
475 |
let closed = Arc::new(AtomicBool::new(false));
|
476 |
#[allow(clippy::mutex_atomic)]
|
477 |
let paused = Arc::new(Mutex::new(paused));
|
478 |
#[allow(clippy::mutex_atomic)]
|
479 |
let producer_paused = Arc::new(Mutex::new(producer_paused));
|
56 |
impl Indexer {
|
57 |
#[allow(clippy::mutex_atomic)]
|
58 |
pub fn new(
|
15 |
#[allow(clippy::mutex_atomic)]
|
16 |
static SHOW_CONSOLE: Lazy<Arc<Mutex<bool>>> = Lazy::new(|| Arc::new(Mutex::new(false)));
|
577 |
#[crate::rt_test]
|
578 |
#[allow(clippy::mutex_atomic)]
|
579 |
async fn basics() {
|
158 |
#[allow(clippy::mutex_atomic)]
|
159 |
fn get_blob(&self, blob_id: &str) -> LocalFsResult<Arc<dyn BlobReader>> {
|
113 |
pub fn new(jobs_count: usize) -> Self {
|
114 |
#[allow(clippy::mutex_atomic)]
|
115 |
let notifier = Arc::new((Mutex::new(false), Condvar::new()));
|
102 |
) -> Option<Provider> {
|
103 |
#[allow(clippy::mutex_atomic)]
|
104 |
let pkcs11_provider = Provider {
|
4 |
#[test]
|
5 |
#[allow(clippy::mutex_atomic)]
|
6 |
fn test_basic() {
|
18 |
/// 收集远端的输出内容并返回之
|
19 |
#[allow(clippy::mutex_atomic)]
|
20 |
pub(super) fn exec(
|
307 |
running: Arc::new((
|
308 |
#[allow(clippy::mutex_atomic)]
|
309 |
Mutex::new(false),
|
33 |
#[allow(clippy::mutex_atomic)]
|
34 |
async fn run() -> Result<()> {
|
60 |
#[test]
|
61 |
#[cfg_attr(feature = "cargo-clippy", allow(mutex_atomic))]
|
62 |
fn test_sync() {
|
7 |
#[allow(clippy::mutex_atomic)]
|
8 |
pub fn lossy_channel<T>(queue_size: usize) -> (LossySender<T>, LossyReceiver<T>) {
|
383 |
#[cfg_attr(feature = "cargo-clippy", allow(mutex_atomic))]
|
384 |
pub fn run_main() {
|
453 |
test_with(|ctx| {
|
454 |
#[allow(clippy::mutex_atomic)]
|
455 |
let called = Arc::new(Mutex::new(false));
|
42 |
impl InnerConnection {
|
43 |
#[allow(clippy::mutex_atomic)]
|
44 |
#[inline]
|
151 |
#[allow(clippy::mutex_atomic)]
|
152 |
pub fn close(&mut self) -> Result<()> {
|
15 |
#[allow(clippy::mutex_atomic)]
|
16 |
impl UnlockNotification {
|
44 |
impl InnerConnection {
|
45 |
#[allow(clippy::mutex_atomic)]
|
46 |
#[inline]
|
153 |
#[allow(clippy::mutex_atomic)]
|
154 |
pub fn close(&mut self) -> Result<()> {
|
15 |
#[allow(clippy::mutex_atomic)]
|
16 |
impl UnlockNotification {
|
38 |
impl InnerConnection {
|
39 |
#[allow(clippy::mutex_atomic)]
|
40 |
#[inline]
|
147 |
#[allow(clippy::mutex_atomic)]
|
148 |
pub fn close(&mut self) -> Result<()> {
|
19 |
#[cfg(feature = "unlock_notify")]
|
20 |
#[allow(clippy::mutex_atomic)]
|
21 |
impl UnlockNotification {
|
66 |
#[allow(clippy::mutex_atomic)]
|
67 |
impl<T: IndexValue> BucketMapHolder<T> {
|
201 |
fn new(next: usize) -> SyncWait {
|
202 |
#[allow(clippy::mutex_atomic)]
|
203 |
SyncWait {
|
211 |
fn wait(&self) {
|
212 |
#[allow(clippy::mutex_atomic)]
|
213 |
let mut completed = unsafe { self.mutex.lock().unwrap_unchecked() };
|
220 |
fn signal(&self) {
|
221 |
#[allow(clippy::mutex_atomic)]
|
222 |
let mut completed = unsafe { self.mutex.lock().unwrap_unchecked() };
|
60 |
#[test]
|
61 |
#[cfg_attr(feature = "cargo-clippy", allow(mutex_atomic))]
|
62 |
fn test_sync() {
|
193 |
let queue = Arc::new(Mutex::new(Default::default()));
|
194 |
#[allow(clippy::mutex_atomic)]
|
195 |
let shutdown = Arc::new((Mutex::new(false), Condvar::new()));
|
176 |
/// Wrap a [Provider](trait.Provider.html).
|
177 |
#[allow(clippy::mutex_atomic)]
|
178 |
pub fn new(p: P) -> Self {
|
38 |
) -> Self {
|
39 |
#[allow(clippy::mutex_atomic)] // mutex used in CondVar below
|
40 |
let shutdown = Arc::new(Mutex::new(ShutdownState::Running));
|
50 |
/// of `closelog` either clear the pointer or don't retain it at all.)
|
51 |
#[allow(clippy::mutex_atomic)]
|
52 |
static LAST_UNIQUE_IDENT: Lazy<Mutex<usize>> =
|
66 |
#[allow(clippy::mutex_atomic)]
|
67 |
impl<T: IndexValue> BucketMapHolder<T> {
|
11 |
// Panic if the `interval.is_zero()` is `true`.
|
12 |
#[allow(clippy::mutex_atomic)]
|
13 |
#[must_use]
|
38 |
impl Drop for PeriodicWorker {
|
39 |
#[allow(clippy::mutex_atomic)]
|
40 |
fn drop(&mut self) {
|
137 |
#[allow(clippy::mutex_atomic)]
|
138 |
fn spawn_reactor<T: StageReactor>(
|
106 |
#[allow(clippy::mutex_atomic)]
|
107 |
fn stop_system(&mut self) -> Loop {
|
22 |
// for processes using a lot of files and using sysinfo at the same time.
|
23 |
#[allow(clippy::mutex_atomic)]
|
24 |
pub(crate) static mut REMAINING_FILES: once_cell::sync::Lazy<Arc<Mutex<isize>>> =
|
19 |
/// 收集远端的输出内容并返回之
|
20 |
#[allow(clippy::mutex_atomic)]
|
21 |
pub(super) fn exec(
|
80 |
/// 串行执行 ssh 交互式操作
|
81 |
#[allow(clippy::mutex_atomic)]
|
82 |
pub(super) fn exec_interactive(vm_conn_info: Vec<VmConnInfo>) -> ! {
|
517 |
// TODO: explore supporting timeouts upstream in Socks5Proxy.
|
518 |
#[allow(clippy::mutex_atomic)]
|
519 |
let stream = if let Some(deadline) = deadline {
|
38 |
) -> Self {
|
39 |
#[allow(clippy::mutex_atomic)] // mutex used in CondVar below
|
40 |
let shutdown = Arc::new(Mutex::new(ShutdownState::Running));
|
77 |
#[allow(clippy::mutex_atomic)]
|
78 |
impl<T: IndexValue> BucketMapHolder<T> {
|
176 |
{
|
177 |
#[allow(clippy::mutex_atomic)]
|
178 |
let finished_setter = Arc::new((Mutex::new(false), Condvar::new()));
|
928 |
fn gpu_read(&self) -> MetalRwLockGpuReadGuard {
|
929 |
#[allow(clippy::mutex_atomic)]
|
930 |
let mut reader_count = self.inner.reader_count.lock().unwrap();
|
935 |
fn cpu_write(&mut self) -> &mut T {
|
936 |
#[allow(clippy::mutex_atomic)]
|
937 |
let mut reader_count = self.inner.reader_count.lock().unwrap();
|
945 |
#[derive(Default)]
|
946 |
#[allow(clippy::mutex_atomic)]
|
947 |
// This is a false positive since Clippy is unable to detect us
|
959 |
fn drop(&mut self) {
|
960 |
#[allow(clippy::mutex_atomic)]
|
961 |
let mut reader_count = self.inner.reader_count.lock().unwrap();
|
930 |
/// ```
|
931 |
#[allow(clippy::mutex_atomic)]
|
932 |
pub fn wait(&self) {
|
922 |
fn gpu_read(&self) -> MetalRwLockGpuReadGuard {
|
923 |
#[allow(clippy::mutex_atomic)]
|
924 |
let mut reader_count = self.inner.reader_count.lock().unwrap();
|
929 |
fn cpu_write(&mut self) -> &mut T {
|
930 |
#[allow(clippy::mutex_atomic)]
|
931 |
let mut reader_count = self.inner.reader_count.lock().unwrap();
|
939 |
#[derive(Default)]
|
940 |
#[allow(clippy::mutex_atomic)]
|
941 |
// This is a false positive since Clippy is unable to detect us
|
953 |
fn drop(&mut self) {
|
954 |
#[allow(clippy::mutex_atomic)]
|
955 |
let mut reader_count = self.inner.reader_count.lock().unwrap();
|
635 |
#[allow(clippy::mutex_atomic)]
|
636 |
let pair = Arc::new((Mutex::new(false), Condvar::new()));
|
150 |
// I just don't want to think about ordering
|
151 |
#![allow(clippy::mutex_atomic)]
|
281 |
#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)]
|
282 |
#![allow(clippy::mutex_atomic, clippy::module_inception)]
|
283 |
#![doc(test(attr(deny(rust_2018_idioms, warnings))))]
|
1 |
#![allow(clippy::mutex_atomic)]
|
1 |
#![allow(clippy::mutex_atomic)]
|
2 |
use std::io::prelude::*;
|
1 |
// Necessary for using `Mutex<usize>` for conditional variables
|
2 |
#![allow(clippy::mutex_atomic)]
|
51 |
// Ignore the lazy_static warning about the mutex
|
52 |
#![allow(clippy::mutex_atomic)]
|
12 |
#![allow(clippy::mutex_atomic, clippy::redundant_clone)]
|
1 |
#![allow(clippy::mutex_atomic, clippy::redundant_clone)]
|
1 |
#![allow(clippy::mutex_atomic)]
|
243 |
impl RebalanceCheck {
|
244 |
#![allow(clippy::mutex_atomic)]
|
245 |
pub fn new() -> Self {
|
1 |
#![deny(clippy::all)]
|
2 |
#![allow(clippy::mutex_atomic)]
|
3 |
use anyhow::Result;
|
1 |
#![allow(clippy::mutex_atomic)]
|
1 |
#![allow(clippy::mutex_atomic)] //because I want the value AND the locking. I think :)
|
1 |
#![allow(clippy::mutex_atomic)] //because I want the value AND the locking. actually, rethink this (TODO)
|
71 |
mod tests {
|
72 |
#![allow(unused_variables, clippy::mutex_atomic)]
|
4 |
#![warn(missing_debug_implementations, rust_2018_idioms)]
|
5 |
#![allow(clippy::mutex_atomic, clippy::module_inception)]
|
6 |
#![doc(test(attr(deny(rust_2018_idioms, warnings))))]
|
1 |
#![allow(clippy::mutex_atomic)] // prevent fp with idiomatic condvar code
|
2 |
#[macro_use]
|
1 |
#![allow(clippy::mutex_atomic)]
|
2 |
#[macro_use] extern crate lazy_static;
|
1 |
#![allow(clippy::mutex_atomic)] // Avoid clippy warning about JITS_MADE
|
2 |
#![allow(clippy::new_without_default)] // Avoid clippy warning about Jit::new
|
1 |
// background_load_in_progress used with condition variable
|
2 |
#![allow(clippy::mutex_atomic)]
|
1 |
#![allow(clippy::mutex_atomic)]
|
2 |
use crate::codes::BinaryCode;
|
1 |
#![allow(clippy::mutex_atomic)]
|
3 |
// This lint never produces correct suggestions in our case.
|
4 |
#![allow(clippy::mutex_atomic)]
|
1 |
// Necessary for using `Mutex<usize>` for conditional variables
|
2 |
#![allow(clippy::mutex_atomic)]
|
1 |
// Allow using Mutex<bool> to support Mutex/Condvar pattern
|
2 |
#![allow(clippy::mutex_atomic)]
|
12 |
// See under "known problems" https://rust-lang.github.io/rust-clippy/master/index.html#mutex_atomic
|
13 |
#![allow(clippy::mutex_atomic)]
|
1 |
#![allow(clippy::mutex_atomic)]
|
1 |
#![allow(clippy::blacklisted_name, clippy::mutex_atomic)]
|
2 |
#![cfg(feature = "thread_safe")]
|
5 |
clippy::unknown_clippy_lints,
|
6 |
clippy::mutex_atomic
|
7 |
)]
|
1 |
#![allow(clippy::mutex_atomic)] // Mutex<bool> needed for condvar
|
2 |
//! Installer of futures.
|
1 |
// Necessary for using `Mutex<usize>` for conditional variables
|
2 |
#![allow(clippy::mutex_atomic)]
|
66 |
#![warn(missing_docs)]
|
67 |
#![allow(clippy::mutex_atomic)]
|
3 |
// We run into this here, so let's silence this lint for this file.
|
4 |
#![allow(clippy::mutex_atomic)]
|
225 |
#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)]
|
226 |
#![allow(clippy::mutex_atomic, clippy::module_inception)]
|
227 |
#![doc(test(attr(deny(rust_2018_idioms, warnings))))]
|
3 |
#![allow(clippy::mutex_atomic)]
|
1 |
#![allow(clippy::mutex_atomic)]
|
1 |
#![allow(clippy::mutex_atomic)]
|
9 |
mod inner {
|
10 |
#![allow(clippy::cast_lossless, clippy::mutex_atomic)]
|
11 |
extern crate clap;
|
1 |
#![allow(clippy::mutex_atomic)]
|
33 |
// TODO(Lytigas) re-architecht the Driverstation
|
34 |
#![allow(clippy::mutex_atomic)]
|
1 |
#![allow(clippy::mutex_atomic)]
|
2 |
use std::sync::{Arc, Condvar, Mutex};
|