@@ -51,13 +51,13 @@ class QemuConsole():
51
51
def __init__ (self , qemu_binary = None , pnor = None , skiboot = None ,
52
52
prompt = None , kernel = None , initramfs = None ,
53
53
block_setup_term = None , delaybeforesend = None ,
54
- logfile = sys .stdout , hda = None , cdrom = None ):
54
+ logfile = sys .stdout , disks = None , cdrom = None ):
55
55
self .qemu_binary = qemu_binary
56
56
self .pnor = pnor
57
57
self .skiboot = skiboot
58
58
self .kernel = kernel
59
59
self .initramfs = initramfs
60
- self .hda = hda
60
+ self .disks = disks
61
61
self .state = ConsoleState .DISCONNECTED
62
62
self .logfile = logfile
63
63
self .delaybeforesend = delaybeforesend
@@ -101,6 +101,10 @@ def disable_setup_term_quiet(self):
101
101
self .setup_term_quiet = 0
102
102
self .setup_term_disable = 0
103
103
104
+ # Because this makes sense for the console
105
+ def update_disks (self , disks ):
106
+ self .disks = disks
107
+
104
108
def close (self ):
105
109
self .util .clear_state (self )
106
110
try :
@@ -141,22 +145,75 @@ def connect(self):
141
145
if self .initramfs is not None :
142
146
cmd = cmd + " -initrd %s" % (self .initramfs )
143
147
144
- if self .hda is not None :
145
- # Put the disk on the first PHB
146
- cmd = (cmd
147
- + " -drive file={},id=disk01,if=none" .format (self .hda )
148
- + " -device virtio-blk-pci,drive=disk01,id=virtio01,bus=pcie.0,addr=0"
149
- )
148
+ # So in the powernv QEMU model we have 3 PHBs with one slot free each.
149
+ # We can add a pcie bridge to each of these, and each bridge has 31
150
+ # slots.. if you see where I'm going..
151
+ cmd = (cmd
152
+ + " -device pcie-pci-bridge,id=pcie.3,bus=pcie.0,addr=0x0"
153
+ + " -device pcie-pci-bridge,id=pcie.4,bus=pcie.1,addr=0x0"
154
+ + " -device pcie-pci-bridge,id=pcie.5,bus=pcie.2,addr=0x0"
155
+ )
156
+
157
+ prefilled_slots = 0
150
158
if self .cdrom is not None :
151
- # Put the CDROM on the second PHB
159
+ # Put the CDROM in slot 2 of the second PHB (1 is reserved for later)
152
160
cmd = (cmd
153
161
+ " -drive file={},id=cdrom01,if=none,media=cdrom" .format (self .cdrom )
154
- + " -device virtio-blk-pci,drive=cdrom01,id=virtio02,bus=pcie.1 ,addr=0 "
162
+ + " -device virtio-blk-pci,drive=cdrom01,id=virtio02,bus=pcie.4 ,addr=2 "
155
163
)
164
+ prefilled_slots += 1
165
+
166
+ bridges = []
167
+ bridges .append ({'bus' : 3 , 'n_devices' : 0 , 'bridged' : False })
168
+ bridges .append ({'bus' : 4 , 'n_devices' : prefilled_slots , 'bridged' : False })
169
+ bridges .append ({'bus' : 5 , 'n_devices' : 0 , 'bridged' : False })
170
+
171
+ # For any amount of disks we have, start finding spots for them in the PHBs
172
+ if self .disks :
173
+ diskid = 0
174
+ bid = 0
175
+ for disk in self .disks :
176
+ bridge = bridges [bid ]
177
+ if bridge ['n_devices' ] >= 30 :
178
+ # This bridge is full
179
+ if bid == len (bridges ) - 1 :
180
+ # All bridges full, find one to extend
181
+ if [x for x in bridges if x ['bridged' ] == False ] == []:
182
+ # We messed up and filled up all our slots
183
+ raise OpTestError ("Oops! We ran out of slots!" )
184
+ for i in range (0 , bid ):
185
+ if not bridges [i ]['bridged' ]:
186
+ # We can add a bridge here
187
+ parent = bridges [i ]['bus' ]
188
+ new = bridges [- 1 ]['bus' ] + 1
189
+ print ("Adding new bridge {} on bridge {}" .format (new , parent ))
190
+ bridges .append ({'bus' : new , 'n_devices' : 0 , 'bridged' : False })
191
+ cmd = cmd + " -device pcie-pci-bridge,id=pcie.{},bus=pcie.{},addr=0x1" .format (new , parent )
192
+ bid = bid + 1
193
+ bridges [i ]['bridged' ] = True
194
+ bridge = bridges [bid ]
195
+ break
196
+ else :
197
+ # Just move to the next one, subsequent bridge should
198
+ # always have slots
199
+ bid = bid + 1
200
+ bridge = bridges [bid ]
201
+ if bridge ['n_devices' ] >= 30 :
202
+ raise OpTestError ("Lost track of our PCI bridges!" )
203
+
204
+ # Got a bridge, let's go!
205
+ # Valid bridge slots are 1..31, but keep 1 free for more bridges
206
+ addr = 2 + bridge ['n_devices' ]
207
+ print ("Adding disk {} on bus {} at address {}" .format (diskid , bridge ['bus' ], addr ))
208
+ cmd = cmd + " -drive file={},id=disk{},if=none" .format (disk .name , diskid )
209
+ cmd = cmd + " -device virtio-blk-pci,drive=disk{},id=virtio{},bus=pcie.{},addr={}" .format (diskid , diskid , bridge ['bus' ], hex (addr ))
210
+ diskid += 1
211
+ bridge ['n_devices' ] += 1
212
+
156
213
# typical host ip=10.0.2.2 and typical skiroot 10.0.2.15
157
214
# use skiroot as the source, no sshd in skiroot
215
+
158
216
fru_path = os .path .join (OpTestConfiguration .conf .basedir , "test_binaries" , "qemu_fru" )
159
- cmd = cmd + " -nic user,model=virtio-net-pci"
160
217
cmd = cmd + " -device ipmi-bmc-sim,id=bmc0,frudatafile=" + fru_path + " -device isa-ipmi-bt,bmc=bmc0,irq=10"
161
218
cmd = cmd + " -serial none -device isa-serial,chardev=s1 -chardev stdio,id=s1,signal=off"
162
219
print (cmd )
@@ -236,6 +293,7 @@ class OpTestQemu():
236
293
def __init__ (self , conf = None , qemu_binary = None , pnor = None , skiboot = None ,
237
294
kernel = None , initramfs = None , cdrom = None ,
238
295
logfile = sys .stdout ):
296
+ self .disks = []
239
297
# need the conf object to properly bind opened object
240
298
# we need to be able to cleanup/close the temp file in signal handler
241
299
self .conf = conf
@@ -267,31 +325,28 @@ def __init__(self, conf=None, qemu_binary=None, pnor=None, skiboot=None,
267
325
" and then retry." )
268
326
raise e
269
327
328
+ self .disks .append (self .conf .args .qemu_scratch_disk )
270
329
atexit .register (self .__del__ )
271
330
self .console = QemuConsole (qemu_binary = qemu_binary ,
272
331
pnor = pnor ,
273
332
skiboot = skiboot ,
274
333
kernel = kernel ,
275
334
initramfs = initramfs ,
276
335
logfile = logfile ,
277
- hda = self .conf .args .qemu_scratch_disk .name ,
278
- cdrom = cdrom )
336
+ disks = self .disks , cdrom = cdrom )
279
337
self .ipmi = QemuIPMI (self .console )
280
338
self .system = None
281
339
282
340
def __del__ (self ):
283
- log . debug ( "OpTestQemu cleaning up qemu_scratch_disk={}"
284
- . format ( self . conf . args . qemu_scratch_disk ))
285
- if self .conf .args .qemu_scratch_disk :
341
+ for fd in self . disks :
342
+ log . debug ( "OpTestQemu cleaning up qemu_scratch_disk={}"
343
+ . format ( self .conf .args .qemu_scratch_disk ))
286
344
try :
287
- self .conf .args .qemu_scratch_disk .close ()
288
- self .conf .args .qemu_scratch_disk = None
289
- # if this was a temp file it will be deleted upon close
290
- # optest_handler closes if signal encountered
291
- log .debug ("OpTestQemu closed qemu_scratch_disk" )
345
+ fd .close ()
292
346
except Exception as e :
293
347
log .error ("OpTestQemu cleanup, ignoring Exception={}"
294
348
.format (e ))
349
+ self .disks = []
295
350
296
351
def set_system (self , system ):
297
352
self .console .system = system
@@ -332,3 +387,12 @@ def supports_ipmi_dcmi(self):
332
387
333
388
def has_ipmi_sel (self ):
334
389
return False
390
+
391
+ def add_temporary_disk (self , size ):
392
+ self .console .close ()
393
+
394
+ fd = tempfile .NamedTemporaryFile (delete = True )
395
+ self .disks .append (fd )
396
+ create_hda = subprocess .check_call (["qemu-img" , "create" ,
397
+ "-fqcow2" , fd .name , size ])
398
+ self .console .update_disks (self .disks )
0 commit comments