summary refs log tree commit diff stats
path: root/results/classifier/zero-shot/118/all/965867
blob: 317a9683d05908bc1ed76cc150f9c7053829379c (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
KVM: 0.930
user-level: 0.898
permissions: 0.895
virtual: 0.895
x86: 0.885
TCG: 0.882
risc-v: 0.879
performance: 0.879
VMM: 0.878
hypervisor: 0.874
boot: 0.874
register: 0.873
graphic: 0.870
ppc: 0.870
files: 0.865
device: 0.863
assembly: 0.858
architecture: 0.855
i386: 0.855
arm: 0.850
vnc: 0.847
debug: 0.843
semantic: 0.843
kernel: 0.842
socket: 0.840
network: 0.836
PID: 0.810
peripherals: 0.807
mistranslation: 0.787

9p virtual file system on qemu slow

Hi, 
The 9p virtual file system is slow. Several examples below: 
---------------------------------------------------------
Host for the first time: 
$ time ls bam.unsorted/
...........................
real    0m0.084s
user    0m0.000s
sys     0m0.028s
--------------------------------------------------
Host second and following: 

real    0m0.009s
user    0m0.000s
sys     0m0.008s
------------------------------------------------------
VM for the first time: 
$ time ls bam.unsorted/
................................
real    0m23.141s
user    0m0.064s
sys     0m2.156s
------------------------------------------------------
VM for the second time
real    0m3.643s
user    0m0.024s
sys     0m0.424s
----------------------------------------------------

Copy on host: 
$ time cp 5173T.root.bak test.tmp
real    0m30.346s
user    0m0.004s
sys     0m5.324s

$ ls -lahs test.tmp
2.7G -rw------- 1 oneadmin cloud 2.7G Mar 26 21:47 test.tmp

---------------------------------------------------
$ copy on VM for the same file

time cp 5173T.root.bak test.tmp

real    5m46.978s
user    0m0.352s
sys     1m38.962s

Thanks for taking the time to report this bug.  Which release are you currently using?  Is this performance a regression over past releases?  Would it be possible for you to test with upstream qemu to see whether performance has improved?

Hi Serge, 

Here are info: 

max@s0:/var/lib/one/var$ uname -a
Linux s0 3.2.0-20-generic #33-Ubuntu SMP Tue Mar 27 16:42:26 UTC 2012 x86_64 x86_64 x86_64 GNU/Linux


max@s0:/var/lib/one/var$ kvm --version
QEMU emulator version 1.0 (qemu-kvm-devel), Copyright (c) 2003-2008 Fabrice Bellard


 ps aux| grep kvm | grep one-52
oneadmin 62687 28.9  0.3 25283292 839204 ?     Sl   01:31   2:22 /usr/bin/kvm -S -M pc-1.0 -enable-kvm -m 20040 -smp 60,sockets=60,cores=1,threads=1 -name one-52 -uuid 30c77a47-85c4-65dd-6c21-9d9b3e66cabe -nodefconfig -nodefaults -chardev socket,id=charmonitor,path=/var/lib/libvirt/qemu/one-52.monitor,server,nowait -mon chardev=charmonitor,id=monitor,mode=control -rtc base=utc -no-shutdown -drive file=/var/lib/one/var//52/images/disk.0,if=none,id=drive-virtio-disk0,format=raw,cache=writeback -device virtio-blk-pci,bus=pci.0,addr=0x5,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=1 -drive file=/var/lib/one/var//52/images/disk.1,if=none,id=drive-virtio-disk4,format=raw,cache=writeback -device virtio-blk-pci,bus=pci.0,addr=0x6,drive=drive-virtio-disk4,id=virtio-disk4 -fsdev local,security_model=mapped,id=fsdev-fs0,path=/tank/biouml-shared -device virtio-9p-pci,id=fs0,fsdev=fsdev-fs0,mount_tag=VirtFS,bus=pci.0,addr=0x3 -netdev tap,fd=21,id=hostnet0 -device virtio-net-pci,netdev=hostnet0,id=net0,mac=02:00:c0:a8:02:1e,bus=pci.0,addr=0x4 -usb -device usb-mouse,id=input0 -vnc 0.0.0.0:52 -vga cirrus -device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x7 -cpu host


Thanks.

I'm posting a new package based on today's git head of upstream at ppa:ubuntu-virt/ppa.  When (and if) that builds, is it possible for you to try with that version?  Then we will know whether to file this bug upstream, or find and cherrypick the fixes.

Hi Serge, 

syslogs on host completely clean for all my 3 bugs. No messages at all regarding any errors. 

I just added this repository. I can basically update now daily as I have to reboot server daily to make kvm working. 
I installed qemu version: 1.0+noroms-20120220-0ubuntu1. 



Quoting max (<email address hidden>):
> Hi Serge,
> 
> syslogs on host completely clean for all my 3 bugs. No messages at all
> regarding any errors.

Thanks.

> I just added this repository. I can basically update now daily as I have to reboot server daily to make kvm working. 
> I installed qemu version: 1.0+noroms-20120220-0ubuntu1.

1.0+noroms-20120330-0ubuntu1 should now be available, though of course
if the issue is fixed in 1.0+noroms-20120220-0ubuntu1 then that's even
more helpful :)


Hi Serge, 

I tried new build. It is not working for me. I got this message: 

max@s0:/var/lib/one/var$ virsh create 52/deployment.0 
error: Failed to create domain from 52/deployment.0
error: internal error process exited while connecting to monitor: kvm: -fsdev local,security_model=mapped,id=fsdev-fs0,path=/tank/biouml-shared: there is no option group "fsdev"
fsdev is not supported by this qemu build.


Drat.

I'll simply need to try to reproduce.

Hi Serge, 

qemu build 0330 does not support -fsdev. I was not able to start kvm with 9p at all. 

If you will make another build I will try it too. 

Thanks

Quoting max (<email address hidden>):
> Hi Serge,
> 
> qemu build 0330 does not support -fsdev. I was not able to start kvm
> with 9p at all.
> 
> If you will make another build I will try it too.

Thanks.  Sorry about that.  I'm hoping I just used an old packaging
tree as my starting base, will re-try, and post here when it has
built.


Hi,

version 	1.0+noroms-20120330-0ubuntu3  has been built.  Could you verify whether that fixes the issue (as well as your others)?

Thanks Serge, 

I installed it and now testing. Let's wait for several days. I will write if have any issues.  

Hi Serge, 
Stability bugs was fixed in KVM, thanks!
Unfortunately this one still here: 

----------------------------------------------------------------------------
VM: 

 ls -las XXXXX.bam
14537970 -rw-r--r-- 1 10001 cloud 14885246140 Mar 23 12:19 XXXXX.bam


time cp XXXX.bam test.tmp

real	14m45.580s
user	0m0.420s
sys	1m45.823s

~16 Mb/sec
--------------------------------------------------------------
Host:
$ ls -las 5173N_sorted_dedup_rg_dd2_kar.chr4.ra.dd.recal.bam
15220278 -rw-r--r-- 1 oneadmin cloud 15583853314 Mar 27 18:53 YYYY.bam


time cp YYYYY.bam test.tmp

real    4m38.525s
user    0m0.048s
sys     1m11.124s

~53MB/sec

Thanks, Max.  Marked as affecting upstream QEMU per the last comment.

Can you try with security_model=passthrough?

One of possible problems could be a block size. In this case I am using ZFS with raidZ 4+1 drives. Each drive has 4Kb block. So optimal block size is 16384 bytes. By optimizing block size it possible to improve performance 10 folds but 9p stably provides 10 folds worse performance than native writes. 

Some extra tests: 
---------------------
VM - mapped

$ dd if=/dev/zero of=test count=100000
100000+0 records in
100000+0 records out
51200000 bytes (51 MB) copied, 20.7879 s, 2.5 MB/s

$ dd if=/dev/zero of=test count=100000  bs=16384
100000+0 records in
100000+0 records out
1638400000 bytes (1.6 GB) copied, 74.4378 s, 22.0 MB/s
------------------------------------------------------------
Host: 
$ dd if=/dev/zero of=test count=100000
100000+0 records in
100000+0 records out
51200000 bytes (51 MB) copied, 1.60118 s, 32.0 MB/s

$ dd if=/dev/zero of=test count=100000  bs=16384
100000+0 records in
100000+0 records out
1638400000 bytes (1.6 GB) copied, 4.89932 s, 334 MB/s


Iggy: I has issue with permission in passthrough mode. Can you give an idea how to setup permissions in this mode? 

>>>Can you try with security_model=passthrough?
It provides the same results, see below: 

$ dd if=/dev/zero of=test count=100000
100000+0 records in
100000+0 records out
51200000 bytes (51 MB) copied, 19.8581 s, 2.6 MB/s


$ dd if=/dev/zero of=test count=100000 bs=16384
100000+0 records in
100000+0 records out
1638400000 bytes (1.6 GB) copied, 72.3009 s, 22.7 MB/s


Hi Max,

Could you try passing msize=262144 for 9p mount point and post the results?

Host:
[root@llm116 media]# ls -lhas file
1.1G -rw-r--r-- 1 root root 1.0G Apr 26 11:05 file

[root@llm116 media]# dd if=/dev/zero of=file bs=1M count=1024
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB) copied, 0.700828 s, 1.5 GB/s

[root@llm116 media]# time cp file file2

real    0m6.353s
user    0m0.007s
sys     0m1.520s

VM:

[root@qemu-img-64 pass]# time cp file 9p_file

real    0m12.261s
user    0m0.154s
sys     0m2.582s

[root@qemu-img-64 pass]# dd if=/dev/zero of=file.9 bs=1M count=1024
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB) copied, 2.07335 s, 518 MB/s

[root@qemu-img-64 pass]# mount
[snip]
v_pass on /pass type 9p (rw,trans=virtio,version=9p2000.L,msize=262144)
[/snip]

Hi Mohan, 
this parameter provide significant improvement in big file access/write: 
--------------------------------------------------------------------------------------------
VirtFS on /srv/shared type 9p (rw,trans=virtio,version=9p2000.L,msize=262144)

$ dd if=/dev/zero of=test count=100000 bs=16384
100000+0 records in
100000+0 records out
1638400000 bytes (1.6 GB) copied, 36.3589 s, 45.1 MB/s

l$ dd if=/dev/zero of=test count=100000 
100000+0 records in
100000+0 records out
51200000 bytes (51 MB) copied, 25.6597 s, 2.0 MB/s

$ dd if=/dev/zero of=test count=1024 bs=262144
1024+0 records in
1024+0 records out
268435456 bytes (268 MB) copied, 3.41936 s, 78.5 MB/s

Speed of copy for large file ~45MB/s (read and write from the same disk). 
---------------------------------------------------------------------------------------------
But Host: 
time ls -lahs bam.unsorted/
many files: 
real    0m0.053s
user    0m0.004s
sys     0m0.036s

VM: 
real	0m4.449s
user	0m0.012s
sys	0m0.136s
--------------------------------------------------------
So we have delays on the first file access. 
Is it possible to resolve this issue? 



Can you still reproduce this problem with the latest version of QEMU (currently version 2.9.0)?

[Expired for QEMU because there has been no activity for 60 days.]

[Expired for qemu-kvm (Ubuntu) because there has been no activity for 60 days.]