Skip to content

Commit

Permalink
Cleanups for clippy
Browse files Browse the repository at this point in the history
Clippy started being more picky about a bunch of things.
  • Loading branch information
rdaum committed Dec 8, 2024
1 parent e614245 commit f734c0d
Show file tree
Hide file tree
Showing 12 changed files with 65 additions and 61 deletions.
14 changes: 8 additions & 6 deletions crates/daemon/src/connections_fjall.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,13 +61,15 @@ struct Inner {

impl ConnectionsFjall {
pub fn open(path: Option<&Path>) -> Self {
let (tmpdir, path) = if path.is_none() {
let tmpdir = tempfile::TempDir::new().unwrap();
let path = tmpdir.path().to_path_buf();
(Some(tmpdir), path)
} else {
(None, path.unwrap().to_path_buf())
let (tmpdir, path) = match path {
Some(path) => (None, path.to_path_buf()),
None => {
let tmpdir = tempfile::TempDir::new().unwrap();
let path = tmpdir.path().to_path_buf();
(Some(tmpdir), path)
}
};

info!("Opening connections database at {:?}", path);
let keyspace = Config::new(&path).open().unwrap();
let sequences_partition = keyspace
Expand Down
35 changes: 27 additions & 8 deletions crates/daemon/src/rpc_hosts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,14 @@ use std::time::SystemTime;
use tracing::warn;

/// Manages the set of known hosts and the listeners they have registered.
struct HostRecord {
last_seen: SystemTime,
host_type: HostType,
listeners: Vec<(Obj, SocketAddr)>,
}

#[derive(Default)]
pub struct Hosts(HashMap<HostToken, (SystemTime, HostType, Vec<(Obj, SocketAddr)>)>);
pub struct Hosts(HashMap<HostToken, HostRecord>);

impl Hosts {
pub(crate) fn receive_ping(
Expand All @@ -32,14 +38,21 @@ impl Hosts {
) -> bool {
let now = SystemTime::now();
self.0
.insert(host_token, (now, host_type, listeners))
.insert(
host_token,
HostRecord {
last_seen: now,
host_type,
listeners,
},
)
.is_none()
}

pub(crate) fn ping_check(&mut self, timeout: std::time::Duration) {
let now = SystemTime::now();
let mut expired = vec![];
for (host_token, (last_seen, _, _)) in self.0.iter() {
for (host_token, HostRecord { last_seen, .. }) in self.0.iter() {
if now.duration_since(*last_seen).unwrap() > timeout {
warn!(
"Host {} has not responded in time: {:?}, removing its listeners from the list",
Expand All @@ -57,11 +70,17 @@ impl Hosts {
pub(crate) fn listeners(&self) -> Vec<(Obj, HostType, SocketAddr)> {
self.0
.values()
.flat_map(|(_, host_type, listeners)| {
listeners
.iter()
.map(move |(oid, addr)| (oid.clone(), *host_type, *addr))
})
.flat_map(
|HostRecord {
host_type,
listeners,
..
}| {
listeners
.iter()
.map(move |(oid, addr)| (oid.clone(), *host_type, *addr))
},
)
.collect()
}

Expand Down
20 changes: 10 additions & 10 deletions crates/db/src/fjall_provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,15 @@ where
{
let result: Bytes = user_value.into();
let ts = Timestamp(u64::from_le_bytes(result[0..8].try_into().unwrap()));
let codomain = Codomain::from_bytes(result.slice(8..)).map_err(|_| Error::EncodingError)?;
let codomain = Codomain::from_bytes(result.slice(8..)).map_err(|_| Error::EncodingFailure)?;
Ok((ts, codomain))
}

fn encode<Codomain>(ts: Timestamp, codomain: Codomain) -> Result<UserValue, Error>
where
Codomain: AsByteBuffer,
{
let as_bytes = codomain.as_bytes().map_err(|_| Error::EncodingError)?;
let as_bytes = codomain.as_bytes().map_err(|_| Error::EncodingFailure)?;
let mut result = Vec::with_capacity(8 + as_bytes.len());
result.extend_from_slice(&ts.0.to_le_bytes());
result.extend_from_slice(&as_bytes);
Expand All @@ -68,11 +68,11 @@ where
Codomain: Clone + Eq + PartialEq + AsByteBuffer,
{
fn get(&self, domain: &Domain) -> Result<Option<(Timestamp, Codomain)>, Error> {
let key = domain.as_bytes().map_err(|_| Error::EncodingError)?;
let key = domain.as_bytes().map_err(|_| Error::EncodingFailure)?;
let Some(result) = self
.fjall_partition
.get(key)
.map_err(|_| Error::RetrieveError)?
.map_err(|_| Error::RetrievalFailure)?
else {
return Ok(None);
};
Expand All @@ -81,19 +81,19 @@ where
}

fn put(&self, timestamp: Timestamp, domain: Domain, codomain: Codomain) -> Result<(), Error> {
let key = domain.as_bytes().map_err(|_| Error::EncodingError)?;
let key = domain.as_bytes().map_err(|_| Error::EncodingFailure)?;
let value = encode::<Codomain>(timestamp, codomain)?;
self.fjall_partition
.insert(key, value)
.map_err(|_| Error::StoreError)?;
.map_err(|_| Error::StorageFailure)?;
Ok(())
}

fn del(&self, _timestamp: Timestamp, domain: &Domain) -> Result<(), Error> {
let key = domain.as_bytes().map_err(|_| Error::EncodingError)?;
let key = domain.as_bytes().map_err(|_| Error::EncodingFailure)?;
self.fjall_partition
.remove(key)
.map_err(|_| Error::StoreError)?;
.map_err(|_| Error::StorageFailure)?;
Ok(())
}

Expand All @@ -103,8 +103,8 @@ where
{
let mut result = Vec::new();
for entry in self.fjall_partition.iter() {
let (key, value) = entry.map_err(|_| Error::RetrieveError)?;
let domain = Domain::from_bytes(key.into()).map_err(|_| Error::EncodingError)?;
let (key, value) = entry.map_err(|_| Error::RetrievalFailure)?;
let domain = Domain::from_bytes(key.into()).map_err(|_| Error::EncodingFailure)?;
let (ts, codomain) = decode::<Codomain>(value)?;
if predicate(&domain, &codomain) {
result.push((ts, domain, codomain));
Expand Down
1 change: 0 additions & 1 deletion crates/db/src/loader.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ use moor_values::Var;

/// Interface exposed to be used by the textdump loader. Overlap of functionality with what
/// WorldState could provide, but potentially different constraints/semantics (e.g. no perms checks)
pub trait LoaderInterface: Send {
/// For reading textdumps...
fn create_object(&self, objid: Option<Obj>, attrs: &ObjAttrs) -> Result<Obj, WorldStateError>;
Expand Down
20 changes: 4 additions & 16 deletions crates/db/src/tx/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,21 +18,9 @@ mod tx_table;
pub use global_cache::{GlobalCache, Provider};
pub use tx_table::{TransactionalTable, WorkingSet};

#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd)]
pub struct Timestamp(pub u64);

impl PartialOrd for Timestamp {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.0.partial_cmp(&other.0)
}
}

impl Ord for Timestamp {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.0.cmp(&other.0)
}
}

#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct Tx {
pub(crate) ts: Timestamp,
Expand All @@ -45,9 +33,9 @@ pub enum Error {
#[error("Conflict detected")]
Conflict,
#[error("Retrieval error")]
RetrieveError,
RetrievalFailure,
#[error("Store error")]
StoreError,
StorageFailure,
#[error("Encoding error")]
EncodingError,
EncodingFailure,
}
24 changes: 9 additions & 15 deletions crates/db/src/tx/tx_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -430,11 +430,10 @@ where
#[cfg(test)]
mod tests {
use super::*;
use std::cell::RefCell;
use std::sync::Arc;
use std::sync::{Arc, Mutex};

struct TestBackingStore {
store: RefCell<HashMap<u64, u64>>,
store: Arc<Mutex<HashMap<u64, u64>>>,
}

impl TestBackingStore {
Expand All @@ -444,27 +443,22 @@ mod tests {
store.insert(*k, *v);
}
TestBackingStore {
store: RefCell::new(store),
store: Arc::new(Mutex::new(store)),
}
}
}
impl Canonical<u64, u64> for TestBackingStore {
fn get(&self, domain: &u64) -> Result<Option<(Timestamp, u64)>, Error> {
Ok(self
.store
.borrow()
.get(domain)
.cloned()
.map(|v| (Timestamp(0), v)))
let store = self.store.lock().unwrap();
Ok(store.get(domain).cloned().map(|v| (Timestamp(0), v)))
}

fn scan<F: Fn(&u64, &u64) -> bool>(
&self,
predicate: &F,
) -> Result<Vec<(Timestamp, u64, u64)>, Error> {
Ok(self
.store
.borrow()
let store = self.store.lock().unwrap();
Ok(store
.iter()
.filter(|(k, v)| predicate(k, v))
.map(|(k, v)| (Timestamp(0), *k, *v))
Expand All @@ -474,7 +468,7 @@ mod tests {

impl TestBackingStore {
fn apply(&self, working_set: Vec<(u64, Op<u64>)>) {
let mut store = self.store.borrow_mut();
let mut store = self.store.lock().unwrap();
for op in working_set {
match op.1.to_type {
OpType::Insert => {
Expand Down Expand Up @@ -662,7 +656,7 @@ mod tests {
backing_store.apply(ws);

// And verify the contents of the backing store
let store = backing_store.store.borrow();
let store = backing_store.store.lock().unwrap();
assert_eq!(store.get(&1), Some(&456));
assert_eq!(store.get(&2), Some(&3));
assert_eq!(store.get(&3), Some(&3));
Expand Down
3 changes: 1 addition & 2 deletions crates/db/src/worldstate_transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,8 @@ pub trait WorldStateTransaction: Send {

/// Destroy the given object, and restructure the property inheritance accordingly.
fn recycle_object(&self, obj: &Obj) -> Result<(), WorldStateError>;
/// Get the parent of the given object.

/// Get the parent of the given object.
fn get_object_parent(&self, obj: &Obj) -> Result<Obj, WorldStateError>;

/// Set the parent of the given object, and restructure the property inheritance accordingly.
Expand Down Expand Up @@ -123,7 +123,6 @@ pub trait WorldStateTransaction: Send {
) -> Result<(), WorldStateError>;

/// Define a new verb on the given object.
// Yes yes I know it's a lot of arguments, but wrapper object here is redundant.
#[allow(clippy::too_many_arguments)]
fn add_object_verb(
&self,
Expand Down
2 changes: 0 additions & 2 deletions crates/kernel/src/tasks/sessions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,13 @@ use moor_values::{Error, Obj, SYSTEM_OBJECT};
/// memory mapped file, a full database, or a simple in-memory buffer.
///
/// Implementations would live in the 'server' host (e.g. websocket connections or repl loop)
///
// TODO: Fix up connected/reconnected/discconnected handling.
// Will probably deprecate MOO's concept of 'disconnected' and 'connected' players in the long
// run and emulate slack, discord, skype, etc which have a concept of 'presence' (online, offline,
// away, etc) but keep a persistent virtual history. Challenge would be around making this work
// nicely with existing MOO code.
// Right now the same user can connect multiple times and we output and input on all connections,
// which is different from MOO's "reconnected" handling, but probably preferable.

pub trait Session: Send + Sync {
/// Commit for current activity, called by the scheduler when a task commits and *after* the world
/// state has successfully been committed. This is the point at which the session should send
Expand Down
1 change: 1 addition & 0 deletions crates/kernel/src/tasks/task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -593,6 +593,7 @@ impl Task {
}
}

#[allow(clippy::type_complexity)]
fn find_verb_for_command(
player: &Obj,
player_location: &Obj,
Expand Down
1 change: 1 addition & 0 deletions crates/testing/load-tools/src/tx-list-append.rs
Original file line number Diff line number Diff line change
Expand Up @@ -234,6 +234,7 @@ fn process_writes(write_log: &List) -> Vec<(usize, Vec<i64>)> {
appends
}

#[allow(clippy::too_many_arguments)]
async fn workload(
args: Args,
zmq_ctx: tmq::Context,
Expand Down
2 changes: 1 addition & 1 deletion crates/testing/load-tools/src/verb-dispatch-load-test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ const LOAD_TEST_VERB: &str = r#"
return 1;
"#;

#[allow(clippy::too_many_arguments)]
async fn workload(
args: Args,
zmq_ctx: tmq::Context,
Expand Down Expand Up @@ -200,7 +201,6 @@ async fn load_test_workload(
let kill_switch = kill_switch.clone();
let zmq_ctx = zmq_ctx.clone();
let rpc_address = args.client_args.rpc_address.clone();
let client_id = client_id;
let client_token = client_token.clone();
let connection_oid = connection_oid.clone();
tokio::spawn(async move {
Expand Down
3 changes: 3 additions & 0 deletions crates/web-host/src/host/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,8 @@ pub fn var_as_json(v: &Var) -> serde_json::Value {
}
}

// Not used yet
#[allow(dead_code)]
#[derive(Debug, Clone, PartialEq, thiserror::Error)]
pub enum JsonParseError {
#[error("Unknown type")]
Expand All @@ -112,6 +114,7 @@ pub enum JsonParseError {
InvalidRepresentation,
}

#[allow(dead_code)]
pub fn json_as_var(j: &serde_json::Value) -> Result<Var, JsonParseError> {
match j {
serde_json::Value::Null => Ok(v_none()),
Expand Down

0 comments on commit f734c0d

Please sign in to comment.