Skip to content
This repository was archived by the owner on Jul 25, 2022. It is now read-only.

Commit 40d2591

Browse files
committed
Add fs_type field to targets table
The fs_type data should be included in the targets table. This will be used when creating the ldev conf and likely other things as well. Signed-off-by: johnsonw <[email protected]>
1 parent eaa8533 commit 40d2591

File tree

8 files changed

+376
-217
lines changed

8 files changed

+376
-217
lines changed

iml-agent/src/action_plugins/ldev.rs

+26-1
Original file line numberDiff line numberDiff line change
@@ -60,148 +60,171 @@ pub async fn create(entries: Vec<LdevEntry>) -> Result<(), ImlAgentError> {
6060
#[cfg(test)]
6161
mod tests {
6262
use super::*;
63+
use iml_wire_types::FsType;
6364

6465
#[test]
6566
fn test_create() -> Result<(), ImlAgentError> {
66-
// oss2 oss1 zfsmo-OST0013 zfs:ost19/ost19
6767
let entries = vec![
6868
LdevEntry {
6969
primary: "mds1".into(),
7070
failover: Some("mds2".into()),
7171
label: "MGS".into(),
7272
device: "zfs:mdt0/mdt0".into(),
73+
fs_type: FsType::Zfs,
7374
},
7475
LdevEntry {
7576
primary: "mds1".into(),
7677
failover: Some("mds2".into()),
7778
label: "zfsmo-MDT0000".into(),
7879
device: "zfs:mdt0/mdt0".into(),
80+
fs_type: FsType::Zfs,
7981
},
8082
LdevEntry {
8183
primary: "mds2".into(),
8284
failover: Some("mds1".into()),
8385
label: "zfsmo-MDT0001".into(),
8486
device: "zfs:mdt1/mdt1".into(),
87+
fs_type: FsType::Zfs,
8588
},
8689
LdevEntry {
8790
primary: "oss1".into(),
8891
failover: Some("oss2".into()),
8992
label: "zfsmo-OST0000".into(),
9093
device: "zfs:ost0/ost0".into(),
94+
fs_type: FsType::Zfs,
9195
},
9296
LdevEntry {
9397
primary: "oss1".into(),
9498
failover: Some("oss2".into()),
9599
label: "zfsmo-OST0001".into(),
96100
device: "zfs:ost1/ost1".into(),
101+
fs_type: FsType::Zfs,
97102
},
98103
LdevEntry {
99104
primary: "oss1".into(),
100105
failover: Some("oss2".into()),
101106
label: "zfsmo-OST0002".into(),
102107
device: "zfs:ost2/ost2".into(),
108+
fs_type: FsType::Zfs,
103109
},
104110
LdevEntry {
105111
primary: "oss1".into(),
106112
failover: Some("oss2".into()),
107113
label: "zfsmo-OST0003".into(),
108114
device: "zfs:ost3/ost3".into(),
115+
fs_type: FsType::Zfs,
109116
},
110117
LdevEntry {
111118
primary: "oss1".into(),
112119
failover: Some("oss2".into()),
113120
label: "zfsmo-OST0004".into(),
114121
device: "zfs:ost4/ost4".into(),
122+
fs_type: FsType::Zfs,
115123
},
116124
LdevEntry {
117125
primary: "oss1".into(),
118126
failover: Some("oss2".into()),
119127
label: "zfsmo-OST0005".into(),
120128
device: "zfs:ost5/ost5".into(),
129+
fs_type: FsType::Zfs,
121130
},
122131
LdevEntry {
123132
primary: "oss1".into(),
124133
failover: Some("oss2".into()),
125134
label: "zfsmo-OST0006".into(),
126135
device: "zfs:ost6/ost6".into(),
136+
fs_type: FsType::Zfs,
127137
},
128138
LdevEntry {
129139
primary: "oss1".into(),
130140
failover: Some("oss2".into()),
131141
label: "zfsmo-OST0007".into(),
132142
device: "zfs:ost7/ost7".into(),
143+
fs_type: FsType::Zfs,
133144
},
134145
LdevEntry {
135146
primary: "oss1".into(),
136147
failover: Some("oss2".into()),
137148
label: "zfsmo-OST0008".into(),
138149
device: "zfs:ost8/ost8".into(),
150+
fs_type: FsType::Zfs,
139151
},
140152
LdevEntry {
141153
primary: "oss1".into(),
142154
failover: Some("oss2".into()),
143155
label: "zfsmo-OST0009".into(),
144156
device: "zfs:ost9/ost9".into(),
157+
fs_type: FsType::Zfs,
145158
},
146159
LdevEntry {
147160
primary: "oss2".into(),
148161
failover: Some("oss1".into()),
149162
label: "zfsmo-OST000a".into(),
150163
device: "zfs:ost10/ost10".into(),
164+
fs_type: FsType::Zfs,
151165
},
152166
LdevEntry {
153167
primary: "oss2".into(),
154168
failover: Some("oss1".into()),
155169
label: "zfsmo-OST000b".into(),
156170
device: "zfs:ost11/ost11".into(),
171+
fs_type: FsType::Zfs,
157172
},
158173
LdevEntry {
159174
primary: "oss2".into(),
160175
failover: Some("oss1".into()),
161176
label: "zfsmo-OST000c".into(),
162177
device: "zfs:ost12/ost12".into(),
178+
fs_type: FsType::Zfs,
163179
},
164180
LdevEntry {
165181
primary: "oss2".into(),
166182
failover: Some("oss1".into()),
167183
label: "zfsmo-OST000d".into(),
168184
device: "zfs:ost13/ost13".into(),
185+
fs_type: FsType::Zfs,
169186
},
170187
LdevEntry {
171188
primary: "oss2".into(),
172189
failover: Some("oss1".into()),
173190
label: "zfsmo-OST000e".into(),
174191
device: "zfs:ost14/ost14".into(),
192+
fs_type: FsType::Zfs,
175193
},
176194
LdevEntry {
177195
primary: "oss2".into(),
178196
failover: Some("oss1".into()),
179197
label: "zfsmo-OST000f".into(),
180198
device: "zfs:ost15/ost15".into(),
199+
fs_type: FsType::Zfs,
181200
},
182201
LdevEntry {
183202
primary: "oss2".into(),
184203
failover: Some("oss1".into()),
185204
label: "zfsmo-OST0010".into(),
186205
device: "zfs:ost16/ost16".into(),
206+
fs_type: FsType::Zfs,
187207
},
188208
LdevEntry {
189209
primary: "oss2".into(),
190210
failover: Some("oss1".into()),
191211
label: "zfsmo-OST00011".into(),
192212
device: "zfs:ost17/ost17".into(),
213+
fs_type: FsType::Zfs,
193214
},
194215
LdevEntry {
195216
primary: "oss2".into(),
196217
failover: Some("oss1".into()),
197218
label: "zfsmo-OST00012".into(),
198219
device: "zfs:ost18/ost18".into(),
220+
fs_type: FsType::Zfs,
199221
},
200222
LdevEntry {
201223
primary: "oss2".into(),
202224
failover: Some("oss1".into()),
203225
label: "zfsmo-OST00013".into(),
204226
device: "zfs:ost19/ost19".into(),
227+
fs_type: FsType::Zfs,
205228
},
206229
]
207230
.into_iter()
@@ -221,12 +244,14 @@ mod tests {
221244
failover: None,
222245
label: "MGS".into(),
223246
device: "zfs:mdt0/mdt0".into(),
247+
fs_type: FsType::Zfs,
224248
},
225249
LdevEntry {
226250
primary: "mds1".into(),
227251
failover: Some("mds2".into()),
228252
label: "zfsmo-MDT0000".into(),
229253
device: "zfs:mdt0/mdt0".into(),
254+
fs_type: FsType::Zfs,
230255
},
231256
]
232257
.into_iter()

iml-api/src/graphql/mod.rs

+78-42
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,8 @@ struct Target {
7878
uuid: String,
7979
/// Where this target is mounted
8080
mount_path: Option<String>,
81+
/// The filesystem type associated with this target
82+
fs_type: String,
8183
}
8284

8385
#[derive(juniper::GraphQLObject)]
@@ -220,49 +222,15 @@ impl QueryRoot {
220222
fs_name: Option<String>,
221223
exclude_unmounted: Option<bool>,
222224
) -> juniper::FieldResult<Vec<Target>> {
223-
let dir = dir.unwrap_or_default();
224-
225-
let xs: Vec<Target> = sqlx::query_as!(
226-
Target,
227-
r#"
228-
SELECT * from target t
229-
ORDER BY
230-
CASE WHEN $3 = 'asc' THEN t.name END ASC,
231-
CASE WHEN $3 = 'desc' THEN t.name END DESC
232-
OFFSET $1 LIMIT $2"#,
233-
offset.unwrap_or(0) as i64,
234-
limit.map(|x| x as i64),
235-
dir.deref()
225+
let xs = get_targets(
226+
&context.pg_pool,
227+
limit,
228+
offset,
229+
dir,
230+
fs_name,
231+
exclude_unmounted.unwrap_or_default(),
236232
)
237-
.fetch_all(&context.pg_pool)
238-
.await?
239-
.into_iter()
240-
.filter(|x| match &fs_name {
241-
Some(fs) => x.filesystems.contains(&fs),
242-
None => true,
243-
})
244-
.filter(|x| match exclude_unmounted {
245-
Some(true) => x.state != "unmounted",
246-
Some(false) | None => true,
247-
})
248-
.collect();
249-
250-
let target_resources = get_fs_target_resources(&context.pg_pool, None).await?;
251-
252-
let xs: Vec<Target> = xs
253-
.into_iter()
254-
.map(|mut x| {
255-
let resource = target_resources
256-
.iter()
257-
.find(|resource| resource.name == x.name);
258-
259-
if let Some(resource) = resource {
260-
x.host_ids = resource.cluster_hosts.clone();
261-
}
262-
263-
x
264-
})
265-
.collect();
233+
.await?;
266234

267235
Ok(xs)
268236
}
@@ -1193,3 +1161,71 @@ fn create_task_job<'a>(task_id: i32) -> SendJob<'a, HashMap<String, serde_json::
11931161
.collect(),
11941162
}
11951163
}
1164+
1165+
async fn get_targets(
1166+
pool: &PgPool,
1167+
limit: Option<i32>,
1168+
offset: Option<i32>,
1169+
dir: Option<SortDir>,
1170+
fs_name: Option<String>,
1171+
exclude_unmounted: bool,
1172+
) -> juniper::FieldResult<Vec<Target>> {
1173+
let dir = dir.unwrap_or_default();
1174+
1175+
let xs: Vec<Target> = sqlx::query!(
1176+
r#"
1177+
SELECT state, name, active_host_id, host_ids, filesystems, uuid, mount_path, dev_path, fs_type::text from target t
1178+
ORDER BY
1179+
CASE WHEN $3 = 'asc' THEN t.name END ASC,
1180+
CASE WHEN $3 = 'desc' THEN t.name END DESC
1181+
OFFSET $1 LIMIT $2"#,
1182+
offset.unwrap_or(0) as i64,
1183+
limit.map(|x| x as i64),
1184+
dir.deref()
1185+
)
1186+
.fetch(pool)
1187+
.map_ok(|x| {
1188+
Target {
1189+
state: x.state,
1190+
name: x.name,
1191+
active_host_id: x.active_host_id,
1192+
host_ids: x.host_ids,
1193+
filesystems: x.filesystems,
1194+
uuid: x.uuid,
1195+
mount_path: x.mount_path,
1196+
dev_path: x.dev_path,
1197+
fs_type: x.fs_type.unwrap_or_else(|| "ldiskfs".to_string()).into()
1198+
}
1199+
})
1200+
.try_collect::<Vec<Target>>()
1201+
.await?
1202+
.into_iter()
1203+
.filter(|x| match &fs_name {
1204+
Some(fs) => x.filesystems.contains(&fs),
1205+
None => true,
1206+
})
1207+
.filter(|x| match exclude_unmounted {
1208+
true => x.state != "unmounted",
1209+
false => true,
1210+
})
1211+
.collect();
1212+
1213+
let target_resources = get_fs_target_resources(&pool, None).await?;
1214+
1215+
let xs: Vec<Target> = xs
1216+
.into_iter()
1217+
.map(|mut x| {
1218+
let resource = target_resources
1219+
.iter()
1220+
.find(|resource| resource.name == x.name);
1221+
1222+
if let Some(resource) = resource {
1223+
x.host_ids = resource.cluster_hosts.clone();
1224+
}
1225+
1226+
x
1227+
})
1228+
.collect();
1229+
1230+
Ok(xs)
1231+
}

0 commit comments

Comments
 (0)