From 256709d2eb3fd80d768a99964be5caa61effa2a0 Mon Sep 17 00:00:00 2001 From: Christian Krinitsin Date: Tue, 3 Jun 2025 12:04:13 +0000 Subject: add new classifier result --- results/classifier/001/instruction/11357571 | 47 + results/classifier/001/instruction/11933524 | 1125 +++ results/classifier/001/instruction/24190340 | 2056 ++++++ results/classifier/001/instruction/26095107 | 158 + results/classifier/001/instruction/33802194 | 4939 +++++++++++++ results/classifier/001/instruction/42226390 | 187 + results/classifier/001/instruction/50773216 | 110 + results/classifier/001/instruction/51610399 | 308 + results/classifier/001/instruction/55961334 | 39 + results/classifier/001/instruction/62179944 | 31 + results/classifier/001/instruction/63565653 | 49 + results/classifier/001/instruction/70868267 | 40 + results/classifier/001/instruction/73660729 | 31 + results/classifier/001/mistranslation/14887122 | 258 + results/classifier/001/mistranslation/22219210 | 43 + results/classifier/001/mistranslation/23270873 | 692 ++ results/classifier/001/mistranslation/24930826 | 33 + results/classifier/001/mistranslation/25842545 | 202 + results/classifier/001/mistranslation/26430026 | 165 + results/classifier/001/mistranslation/36568044 | 4581 ++++++++++++ results/classifier/001/mistranslation/64322995 | 54 + results/classifier/001/mistranslation/70294255 | 1061 +++ results/classifier/001/mistranslation/71456293 | 1486 ++++ results/classifier/001/mistranslation/74466963 | 1878 +++++ results/classifier/001/mistranslation/74545755 | 344 + results/classifier/001/mistranslation/80604314 | 1480 ++++ results/classifier/001/mistranslation/80615920 | 348 + results/classifier/001/other/02364653 | 363 + results/classifier/001/other/02572177 | 421 ++ results/classifier/001/other/04472277 | 576 ++ results/classifier/001/other/12869209 | 88 + results/classifier/001/other/13442371 | 369 + results/classifier/001/other/14488057 | 711 ++ results/classifier/001/other/16056596 | 98 + results/classifier/001/other/16201167 | 100 + results/classifier/001/other/16228234 | 1844 +++++ results/classifier/001/other/17743720 | 771 ++ results/classifier/001/other/21221931 | 328 + results/classifier/001/other/21247035 | 1321 ++++ results/classifier/001/other/23300761 | 313 + results/classifier/001/other/23448582 | 265 + results/classifier/001/other/25892827 | 1077 +++ results/classifier/001/other/31349848 | 154 + results/classifier/001/other/32484936 | 223 + results/classifier/001/other/35170175 | 521 ++ results/classifier/001/other/42613410 | 149 + results/classifier/001/other/42974450 | 429 ++ results/classifier/001/other/43643137 | 538 ++ results/classifier/001/other/48245039 | 530 ++ results/classifier/001/other/55247116 | 1310 ++++ results/classifier/001/other/55367348 | 532 ++ results/classifier/001/other/55753058 | 293 + results/classifier/001/other/56309929 | 180 + results/classifier/001/other/56937788 | 344 + results/classifier/001/other/57195159 | 315 + results/classifier/001/other/57231878 | 242 + results/classifier/001/other/57756589 | 1421 ++++ results/classifier/001/other/59540920 | 376 + results/classifier/001/other/60339453 | 61 + results/classifier/001/other/64571620 | 785 +++ results/classifier/001/other/65781993 | 2793 ++++++++ results/classifier/001/other/66743673 | 364 + results/classifier/001/other/67821138 | 199 + results/classifier/001/other/68897003 | 716 ++ results/classifier/001/other/70021271 | 7448 +++++++++++++++++++ results/classifier/001/other/70416488 | 1179 ++++ results/classifier/001/other/74715356 | 126 + results/classifier/001/other/79834768 | 409 ++ results/classifier/001/other/81775929 | 235 + results/classifier/001/other/85542195 | 120 + results/classifier/001/other/88225572 | 2900 ++++++++ results/classifier/001/other/88281850 | 281 + results/classifier/001/other/92957605 | 418 ++ results/classifier/001/other/95154278 | 155 + results/classifier/001/other/99674399 | 148 + results/classifier/001/semantic/05479587 | 83 + results/classifier/001/semantic/12360755 | 296 + results/classifier/001/semantic/28596630 | 113 + results/classifier/001/semantic/30680944 | 595 ++ results/classifier/001/semantic/46572227 | 406 ++ results/classifier/001/semantic/53568181 | 78 + results/classifier/001/semantic/80570214 | 400 ++ results/classifier/001/semantic/96782458 | 999 +++ .../001/semantic/gitlab_semantic_addsubps | 28 + .../classifier/001/semantic/gitlab_semantic_adox | 41 + .../classifier/001/semantic/gitlab_semantic_bextr | 30 + .../classifier/001/semantic/gitlab_semantic_blsi | 25 + .../classifier/001/semantic/gitlab_semantic_blsmsk | 32 + .../classifier/001/semantic/gitlab_semantic_bzhi | 43 + results/classifier/002/boot/42226390 | 188 + results/classifier/002/boot/51610399 | 309 + results/classifier/002/boot/60339453 | 62 + results/classifier/002/boot/67821138 | 200 + results/classifier/002/instruction/11357571 | 48 + results/classifier/002/instruction/11933524 | 1126 +++ results/classifier/002/instruction/24190340 | 2057 ++++++ results/classifier/002/instruction/26095107 | 159 + results/classifier/002/instruction/33802194 | 4940 +++++++++++++ results/classifier/002/instruction/50773216 | 111 + results/classifier/002/instruction/55961334 | 40 + results/classifier/002/instruction/62179944 | 32 + results/classifier/002/instruction/63565653 | 50 + results/classifier/002/instruction/70868267 | 41 + results/classifier/002/instruction/73660729 | 32 + results/classifier/002/mistranslation/14887122 | 259 + results/classifier/002/mistranslation/22219210 | 44 + results/classifier/002/mistranslation/23270873 | 693 ++ results/classifier/002/mistranslation/24930826 | 34 + results/classifier/002/mistranslation/25842545 | 203 + results/classifier/002/mistranslation/26430026 | 166 + results/classifier/002/mistranslation/36568044 | 4582 ++++++++++++ results/classifier/002/mistranslation/64322995 | 55 + results/classifier/002/mistranslation/70294255 | 1062 +++ results/classifier/002/mistranslation/71456293 | 1487 ++++ results/classifier/002/mistranslation/74466963 | 1879 +++++ results/classifier/002/mistranslation/74545755 | 345 + results/classifier/002/mistranslation/80604314 | 1481 ++++ results/classifier/002/mistranslation/80615920 | 349 + results/classifier/002/other/02364653 | 364 + results/classifier/002/other/02572177 | 422 ++ results/classifier/002/other/04472277 | 577 ++ results/classifier/002/other/12869209 | 89 + results/classifier/002/other/13442371 | 370 + results/classifier/002/other/14488057 | 712 ++ results/classifier/002/other/16056596 | 99 + results/classifier/002/other/16201167 | 101 + results/classifier/002/other/16228234 | 1845 +++++ results/classifier/002/other/17743720 | 772 ++ results/classifier/002/other/21221931 | 329 + results/classifier/002/other/21247035 | 1322 ++++ results/classifier/002/other/23300761 | 314 + results/classifier/002/other/23448582 | 266 + results/classifier/002/other/25892827 | 1078 +++ results/classifier/002/other/31349848 | 155 + results/classifier/002/other/32484936 | 224 + results/classifier/002/other/35170175 | 522 ++ results/classifier/002/other/42613410 | 150 + results/classifier/002/other/42974450 | 430 ++ results/classifier/002/other/43643137 | 539 ++ results/classifier/002/other/48245039 | 531 ++ results/classifier/002/other/55247116 | 1311 ++++ results/classifier/002/other/55367348 | 533 ++ results/classifier/002/other/55753058 | 294 + results/classifier/002/other/56309929 | 181 + results/classifier/002/other/56937788 | 345 + results/classifier/002/other/57195159 | 316 + results/classifier/002/other/57231878 | 243 + results/classifier/002/other/57756589 | 1422 ++++ results/classifier/002/other/59540920 | 377 + results/classifier/002/other/64571620 | 786 +++ results/classifier/002/other/65781993 | 2794 ++++++++ results/classifier/002/other/66743673 | 365 + results/classifier/002/other/68897003 | 717 ++ results/classifier/002/other/70021271 | 7449 +++++++++++++++++++ results/classifier/002/other/70416488 | 1180 ++++ results/classifier/002/other/74715356 | 127 + results/classifier/002/other/79834768 | 410 ++ results/classifier/002/other/81775929 | 236 + results/classifier/002/other/85542195 | 121 + results/classifier/002/other/88225572 | 2901 ++++++++ results/classifier/002/other/88281850 | 282 + results/classifier/002/other/92957605 | 419 ++ results/classifier/002/other/95154278 | 156 + results/classifier/002/other/99674399 | 149 + results/classifier/002/semantic/05479587 | 84 + results/classifier/002/semantic/12360755 | 297 + results/classifier/002/semantic/28596630 | 114 + results/classifier/002/semantic/30680944 | 596 ++ results/classifier/002/semantic/46572227 | 407 ++ results/classifier/002/semantic/53568181 | 79 + results/classifier/002/semantic/80570214 | 401 ++ results/classifier/002/semantic/96782458 | 1000 +++ .../002/semantic/gitlab_semantic_addsubps | 29 + .../classifier/002/semantic/gitlab_semantic_adox | 42 + .../classifier/002/semantic/gitlab_semantic_bextr | 31 + .../classifier/002/semantic/gitlab_semantic_blsi | 26 + .../classifier/002/semantic/gitlab_semantic_blsmsk | 33 + .../classifier/002/semantic/gitlab_semantic_bzhi | 44 + results/classifier/003/KVM/04472277 | 579 ++ results/classifier/003/KVM/26430026 | 168 + results/classifier/003/KVM/33802194 | 4942 +++++++++++++ results/classifier/003/KVM/42613410 | 152 + results/classifier/003/KVM/43643137 | 541 ++ results/classifier/003/KVM/55961334 | 42 + results/classifier/003/KVM/71456293 | 1489 ++++ results/classifier/003/KVM/80615920 | 351 + results/classifier/003/boot/42226390 | 190 + results/classifier/003/boot/51610399 | 311 + results/classifier/003/boot/60339453 | 64 + results/classifier/003/boot/67821138 | 202 + results/classifier/003/instruction/11357571 | 50 + results/classifier/003/instruction/11933524 | 1128 +++ results/classifier/003/instruction/24190340 | 2059 ++++++ results/classifier/003/instruction/26095107 | 161 + results/classifier/003/instruction/50773216 | 113 + results/classifier/003/instruction/63565653 | 52 + results/classifier/003/instruction/70868267 | 43 + results/classifier/003/instruction/73660729 | 34 + results/classifier/003/mistranslation/14887122 | 261 + results/classifier/003/mistranslation/22219210 | 46 + results/classifier/003/mistranslation/23270873 | 695 ++ results/classifier/003/mistranslation/24930826 | 36 + results/classifier/003/mistranslation/25842545 | 205 + results/classifier/003/mistranslation/36568044 | 4584 ++++++++++++ results/classifier/003/mistranslation/64322995 | 57 + results/classifier/003/mistranslation/70294255 | 1064 +++ results/classifier/003/mistranslation/74466963 | 1881 +++++ results/classifier/003/mistranslation/74545755 | 347 + results/classifier/003/mistranslation/80604314 | 1483 ++++ results/classifier/003/network/05479587 | 86 + results/classifier/003/network/62179944 | 34 + results/classifier/003/other/02364653 | 366 + results/classifier/003/other/02572177 | 424 ++ results/classifier/003/other/12869209 | 91 + results/classifier/003/other/13442371 | 372 + results/classifier/003/other/14488057 | 714 ++ results/classifier/003/other/16056596 | 101 + results/classifier/003/other/16201167 | 103 + results/classifier/003/other/16228234 | 1847 +++++ results/classifier/003/other/17743720 | 774 ++ results/classifier/003/other/21221931 | 331 + results/classifier/003/other/21247035 | 1324 ++++ results/classifier/003/other/23300761 | 316 + results/classifier/003/other/23448582 | 268 + results/classifier/003/other/25892827 | 1080 +++ results/classifier/003/other/31349848 | 157 + results/classifier/003/other/32484936 | 226 + results/classifier/003/other/35170175 | 524 ++ results/classifier/003/other/42974450 | 432 ++ results/classifier/003/other/48245039 | 533 ++ results/classifier/003/other/55247116 | 1313 ++++ results/classifier/003/other/55367348 | 535 ++ results/classifier/003/other/55753058 | 296 + results/classifier/003/other/56309929 | 183 + results/classifier/003/other/56937788 | 347 + results/classifier/003/other/57195159 | 318 + results/classifier/003/other/57231878 | 245 + results/classifier/003/other/57756589 | 1424 ++++ results/classifier/003/other/59540920 | 379 + results/classifier/003/other/64571620 | 788 +++ results/classifier/003/other/65781993 | 2796 ++++++++ results/classifier/003/other/66743673 | 367 + results/classifier/003/other/68897003 | 719 ++ results/classifier/003/other/70021271 | 7451 +++++++++++++++++++ results/classifier/003/other/70416488 | 1182 ++++ results/classifier/003/other/74715356 | 129 + results/classifier/003/other/79834768 | 412 ++ results/classifier/003/other/81775929 | 238 + results/classifier/003/other/85542195 | 123 + results/classifier/003/other/88225572 | 2903 ++++++++ results/classifier/003/other/88281850 | 284 + results/classifier/003/other/92957605 | 421 ++ results/classifier/003/other/95154278 | 158 + results/classifier/003/other/99674399 | 151 + results/classifier/003/semantic/12360755 | 299 + results/classifier/003/semantic/28596630 | 116 + results/classifier/003/semantic/30680944 | 598 ++ results/classifier/003/semantic/46572227 | 409 ++ results/classifier/003/semantic/53568181 | 81 + results/classifier/003/semantic/80570214 | 403 ++ results/classifier/003/semantic/96782458 | 1002 +++ .../003/semantic/gitlab_semantic_addsubps | 31 + .../classifier/003/semantic/gitlab_semantic_adox | 44 + .../classifier/003/semantic/gitlab_semantic_bextr | 33 + .../classifier/003/semantic/gitlab_semantic_blsi | 28 + .../classifier/003/semantic/gitlab_semantic_blsmsk | 35 + .../classifier/003/semantic/gitlab_semantic_bzhi | 46 + results/classifier/004/KVM/04472277 | 584 ++ results/classifier/004/KVM/26430026 | 173 + results/classifier/004/KVM/43643137 | 546 ++ results/classifier/004/KVM/71456293 | 1494 ++++ results/classifier/004/KVM/80615920 | 356 + results/classifier/004/boot/51610399 | 316 + results/classifier/004/boot/60339453 | 69 + results/classifier/004/device/14488057 | 719 ++ results/classifier/004/device/24190340 | 2064 ++++++ results/classifier/004/device/24930826 | 41 + results/classifier/004/device/26095107 | 166 + results/classifier/004/device/28596630 | 121 + results/classifier/004/device/36568044 | 4589 ++++++++++++ results/classifier/004/device/42226390 | 195 + results/classifier/004/device/48245039 | 538 ++ results/classifier/004/device/57195159 | 323 + results/classifier/004/device/57231878 | 250 + results/classifier/004/device/67821138 | 207 + results/classifier/004/device/99674399 | 156 + results/classifier/004/graphic/22219210 | 51 + results/classifier/004/graphic/30680944 | 603 ++ results/classifier/004/graphic/46572227 | 414 ++ results/classifier/004/graphic/53568181 | 86 + results/classifier/004/graphic/55961334 | 47 + results/classifier/004/graphic/73660729 | 39 + results/classifier/004/instruction/11933524 | 1133 +++ results/classifier/004/instruction/50773216 | 118 + results/classifier/004/instruction/63565653 | 57 + results/classifier/004/instruction/70868267 | 48 + results/classifier/004/mistranslation/14887122 | 266 + results/classifier/004/mistranslation/23270873 | 700 ++ results/classifier/004/mistranslation/25842545 | 210 + results/classifier/004/mistranslation/64322995 | 62 + results/classifier/004/mistranslation/70294255 | 1069 +++ results/classifier/004/mistranslation/74466963 | 1886 +++++ results/classifier/004/mistranslation/74545755 | 352 + results/classifier/004/mistranslation/80604314 | 1488 ++++ results/classifier/004/network/05479587 | 91 + results/classifier/004/network/62179944 | 39 + results/classifier/004/other/02364653 | 371 + results/classifier/004/other/02572177 | 429 ++ results/classifier/004/other/12869209 | 96 + results/classifier/004/other/13442371 | 377 + results/classifier/004/other/16056596 | 106 + results/classifier/004/other/16201167 | 108 + results/classifier/004/other/16228234 | 1852 +++++ results/classifier/004/other/17743720 | 779 ++ results/classifier/004/other/21221931 | 336 + results/classifier/004/other/21247035 | 1329 ++++ results/classifier/004/other/23300761 | 321 + results/classifier/004/other/23448582 | 273 + results/classifier/004/other/25892827 | 1085 +++ results/classifier/004/other/31349848 | 162 + results/classifier/004/other/32484936 | 231 + results/classifier/004/other/35170175 | 529 ++ results/classifier/004/other/42974450 | 437 ++ results/classifier/004/other/55247116 | 1318 ++++ results/classifier/004/other/55367348 | 540 ++ results/classifier/004/other/55753058 | 301 + results/classifier/004/other/56309929 | 188 + results/classifier/004/other/56937788 | 352 + results/classifier/004/other/57756589 | 1429 ++++ results/classifier/004/other/59540920 | 384 + results/classifier/004/other/64571620 | 793 +++ results/classifier/004/other/65781993 | 2801 ++++++++ results/classifier/004/other/66743673 | 372 + results/classifier/004/other/68897003 | 724 ++ results/classifier/004/other/70021271 | 7456 ++++++++++++++++++++ results/classifier/004/other/70416488 | 1187 ++++ results/classifier/004/other/74715356 | 134 + results/classifier/004/other/79834768 | 417 ++ results/classifier/004/other/81775929 | 243 + results/classifier/004/other/85542195 | 128 + results/classifier/004/other/88225572 | 2908 ++++++++ results/classifier/004/other/88281850 | 289 + results/classifier/004/other/92957605 | 426 ++ results/classifier/004/other/95154278 | 163 + results/classifier/004/other/96782458 | 1007 +++ results/classifier/004/semantic/12360755 | 304 + .../004/semantic/gitlab_semantic_addsubps | 36 + .../classifier/004/semantic/gitlab_semantic_adox | 49 + .../classifier/004/semantic/gitlab_semantic_bextr | 38 + .../classifier/004/semantic/gitlab_semantic_blsi | 33 + .../classifier/004/semantic/gitlab_semantic_blsmsk | 40 + .../classifier/004/semantic/gitlab_semantic_bzhi | 51 + results/classifier/004/vnc/11357571 | 55 + results/classifier/004/vnc/33802194 | 4947 +++++++++++++ results/classifier/004/vnc/42613410 | 157 + results/classifier/004/vnc/80570214 | 408 ++ results/classifier/005/KVM/04472277 | 584 ++ results/classifier/005/KVM/26430026 | 173 + results/classifier/005/KVM/43643137 | 546 ++ results/classifier/005/KVM/71456293 | 1494 ++++ results/classifier/005/KVM/80615920 | 356 + results/classifier/005/assembly/48245039 | 538 ++ results/classifier/005/boot/51610399 | 316 + results/classifier/005/boot/60339453 | 69 + results/classifier/005/device/14488057 | 719 ++ results/classifier/005/device/24190340 | 2064 ++++++ results/classifier/005/device/24930826 | 41 + results/classifier/005/device/28596630 | 121 + results/classifier/005/device/42226390 | 195 + results/classifier/005/device/57195159 | 323 + results/classifier/005/device/57231878 | 250 + results/classifier/005/device/67821138 | 207 + results/classifier/005/device/99674399 | 156 + results/classifier/005/graphic/22219210 | 51 + results/classifier/005/graphic/30680944 | 603 ++ results/classifier/005/graphic/55961334 | 47 + results/classifier/005/graphic/73660729 | 39 + results/classifier/005/instruction/11933524 | 1133 +++ results/classifier/005/instruction/26095107 | 166 + results/classifier/005/instruction/50773216 | 118 + results/classifier/005/instruction/63565653 | 57 + results/classifier/005/instruction/70868267 | 48 + results/classifier/005/mistranslation/14887122 | 266 + results/classifier/005/mistranslation/23270873 | 700 ++ results/classifier/005/mistranslation/25842545 | 210 + results/classifier/005/mistranslation/36568044 | 4589 ++++++++++++ results/classifier/005/mistranslation/64322995 | 62 + results/classifier/005/mistranslation/70294255 | 1069 +++ results/classifier/005/mistranslation/74466963 | 1886 +++++ results/classifier/005/mistranslation/74545755 | 352 + results/classifier/005/mistranslation/80604314 | 1488 ++++ results/classifier/005/network/05479587 | 91 + results/classifier/005/network/62179944 | 39 + results/classifier/005/other/02364653 | 371 + results/classifier/005/other/02572177 | 429 ++ results/classifier/005/other/12869209 | 96 + results/classifier/005/other/13442371 | 377 + results/classifier/005/other/16056596 | 106 + results/classifier/005/other/16201167 | 108 + results/classifier/005/other/16228234 | 1852 +++++ results/classifier/005/other/17743720 | 779 ++ results/classifier/005/other/21221931 | 336 + results/classifier/005/other/21247035 | 1329 ++++ results/classifier/005/other/23300761 | 321 + results/classifier/005/other/23448582 | 273 + results/classifier/005/other/25892827 | 1085 +++ results/classifier/005/other/31349848 | 162 + results/classifier/005/other/32484936 | 231 + results/classifier/005/other/35170175 | 529 ++ results/classifier/005/other/42974450 | 437 ++ results/classifier/005/other/55247116 | 1318 ++++ results/classifier/005/other/55367348 | 540 ++ results/classifier/005/other/55753058 | 301 + results/classifier/005/other/56309929 | 188 + results/classifier/005/other/56937788 | 352 + results/classifier/005/other/57756589 | 1429 ++++ results/classifier/005/other/59540920 | 384 + results/classifier/005/other/64571620 | 793 +++ results/classifier/005/other/65781993 | 2801 ++++++++ results/classifier/005/other/66743673 | 372 + results/classifier/005/other/68897003 | 724 ++ results/classifier/005/other/70021271 | 7456 ++++++++++++++++++++ results/classifier/005/other/70416488 | 1187 ++++ results/classifier/005/other/74715356 | 134 + results/classifier/005/other/79834768 | 417 ++ results/classifier/005/other/81775929 | 243 + results/classifier/005/other/85542195 | 128 + results/classifier/005/other/88225572 | 2908 ++++++++ results/classifier/005/other/88281850 | 289 + results/classifier/005/other/92957605 | 426 ++ results/classifier/005/other/95154278 | 163 + results/classifier/005/semantic/12360755 | 304 + results/classifier/005/semantic/46572227 | 414 ++ results/classifier/005/semantic/53568181 | 86 + results/classifier/005/semantic/96782458 | 1007 +++ .../005/semantic/gitlab_semantic_addsubps | 36 + .../classifier/005/semantic/gitlab_semantic_adox | 49 + .../classifier/005/semantic/gitlab_semantic_bextr | 38 + .../classifier/005/semantic/gitlab_semantic_blsi | 33 + .../classifier/005/semantic/gitlab_semantic_blsmsk | 40 + .../classifier/005/semantic/gitlab_semantic_bzhi | 51 + results/classifier/005/vnc/11357571 | 55 + results/classifier/005/vnc/33802194 | 4947 +++++++++++++ results/classifier/005/vnc/42613410 | 157 + results/classifier/005/vnc/80570214 | 408 ++ results/classifier/01/instruction/11357571 | 47 - results/classifier/01/instruction/11933524 | 1125 --- results/classifier/01/instruction/24190340 | 2056 ------ results/classifier/01/instruction/26095107 | 158 - results/classifier/01/instruction/33802194 | 4939 ------------- results/classifier/01/instruction/42226390 | 187 - results/classifier/01/instruction/50773216 | 110 - results/classifier/01/instruction/51610399 | 308 - results/classifier/01/instruction/55961334 | 39 - results/classifier/01/instruction/62179944 | 31 - results/classifier/01/instruction/63565653 | 49 - results/classifier/01/instruction/70868267 | 40 - results/classifier/01/instruction/73660729 | 31 - results/classifier/01/mistranslation/14887122 | 258 - results/classifier/01/mistranslation/22219210 | 43 - results/classifier/01/mistranslation/23270873 | 692 -- results/classifier/01/mistranslation/24930826 | 33 - results/classifier/01/mistranslation/25842545 | 202 - results/classifier/01/mistranslation/26430026 | 165 - results/classifier/01/mistranslation/36568044 | 4581 ------------ results/classifier/01/mistranslation/64322995 | 54 - results/classifier/01/mistranslation/70294255 | 1061 --- results/classifier/01/mistranslation/71456293 | 1486 ---- results/classifier/01/mistranslation/74466963 | 1878 ----- results/classifier/01/mistranslation/74545755 | 344 - results/classifier/01/mistranslation/80604314 | 1480 ---- results/classifier/01/mistranslation/80615920 | 348 - results/classifier/01/other/02364653 | 363 - results/classifier/01/other/02572177 | 421 -- results/classifier/01/other/04472277 | 576 -- results/classifier/01/other/12869209 | 88 - results/classifier/01/other/13442371 | 369 - results/classifier/01/other/14488057 | 711 -- results/classifier/01/other/16056596 | 98 - results/classifier/01/other/16201167 | 100 - results/classifier/01/other/16228234 | 1844 ----- results/classifier/01/other/17743720 | 771 -- results/classifier/01/other/21221931 | 328 - results/classifier/01/other/21247035 | 1321 ---- results/classifier/01/other/23300761 | 313 - results/classifier/01/other/23448582 | 265 - results/classifier/01/other/25892827 | 1077 --- results/classifier/01/other/31349848 | 154 - results/classifier/01/other/32484936 | 223 - results/classifier/01/other/35170175 | 521 -- results/classifier/01/other/42613410 | 149 - results/classifier/01/other/42974450 | 429 -- results/classifier/01/other/43643137 | 538 -- results/classifier/01/other/48245039 | 530 -- results/classifier/01/other/55247116 | 1310 ---- results/classifier/01/other/55367348 | 532 -- results/classifier/01/other/55753058 | 293 - results/classifier/01/other/56309929 | 180 - results/classifier/01/other/56937788 | 344 - results/classifier/01/other/57195159 | 315 - results/classifier/01/other/57231878 | 242 - results/classifier/01/other/57756589 | 1421 ---- results/classifier/01/other/59540920 | 376 - results/classifier/01/other/60339453 | 61 - results/classifier/01/other/64571620 | 785 --- results/classifier/01/other/65781993 | 2793 -------- results/classifier/01/other/66743673 | 364 - results/classifier/01/other/67821138 | 199 - results/classifier/01/other/68897003 | 716 -- results/classifier/01/other/70021271 | 7448 ------------------- results/classifier/01/other/70416488 | 1179 ---- results/classifier/01/other/74715356 | 126 - results/classifier/01/other/79834768 | 409 -- results/classifier/01/other/81775929 | 235 - results/classifier/01/other/85542195 | 120 - results/classifier/01/other/88225572 | 2900 -------- results/classifier/01/other/88281850 | 281 - results/classifier/01/other/92957605 | 418 -- results/classifier/01/other/95154278 | 155 - results/classifier/01/other/99674399 | 148 - results/classifier/01/semantic/05479587 | 83 - results/classifier/01/semantic/12360755 | 296 - results/classifier/01/semantic/28596630 | 113 - results/classifier/01/semantic/30680944 | 595 -- results/classifier/01/semantic/46572227 | 406 -- results/classifier/01/semantic/53568181 | 78 - results/classifier/01/semantic/80570214 | 400 -- results/classifier/01/semantic/96782458 | 999 --- .../01/semantic/gitlab_semantic_addsubps | 28 - .../classifier/01/semantic/gitlab_semantic_adox | 41 - .../classifier/01/semantic/gitlab_semantic_bextr | 30 - .../classifier/01/semantic/gitlab_semantic_blsi | 25 - .../classifier/01/semantic/gitlab_semantic_blsmsk | 32 - .../classifier/01/semantic/gitlab_semantic_bzhi | 43 - results/classifier/02/boot/42226390 | 188 - results/classifier/02/boot/51610399 | 309 - results/classifier/02/boot/60339453 | 62 - results/classifier/02/boot/67821138 | 200 - results/classifier/02/instruction/11357571 | 48 - results/classifier/02/instruction/11933524 | 1126 --- results/classifier/02/instruction/24190340 | 2057 ------ results/classifier/02/instruction/26095107 | 159 - results/classifier/02/instruction/33802194 | 4940 ------------- results/classifier/02/instruction/50773216 | 111 - results/classifier/02/instruction/55961334 | 40 - results/classifier/02/instruction/62179944 | 32 - results/classifier/02/instruction/63565653 | 50 - results/classifier/02/instruction/70868267 | 41 - results/classifier/02/instruction/73660729 | 32 - results/classifier/02/mistranslation/14887122 | 259 - results/classifier/02/mistranslation/22219210 | 44 - results/classifier/02/mistranslation/23270873 | 693 -- results/classifier/02/mistranslation/24930826 | 34 - results/classifier/02/mistranslation/25842545 | 203 - results/classifier/02/mistranslation/26430026 | 166 - results/classifier/02/mistranslation/36568044 | 4582 ------------ results/classifier/02/mistranslation/64322995 | 55 - results/classifier/02/mistranslation/70294255 | 1062 --- results/classifier/02/mistranslation/71456293 | 1487 ---- results/classifier/02/mistranslation/74466963 | 1879 ----- results/classifier/02/mistranslation/74545755 | 345 - results/classifier/02/mistranslation/80604314 | 1481 ---- results/classifier/02/mistranslation/80615920 | 349 - results/classifier/02/other/02364653 | 364 - results/classifier/02/other/02572177 | 422 -- results/classifier/02/other/04472277 | 577 -- results/classifier/02/other/12869209 | 89 - results/classifier/02/other/13442371 | 370 - results/classifier/02/other/14488057 | 712 -- results/classifier/02/other/16056596 | 99 - results/classifier/02/other/16201167 | 101 - results/classifier/02/other/16228234 | 1845 ----- results/classifier/02/other/17743720 | 772 -- results/classifier/02/other/21221931 | 329 - results/classifier/02/other/21247035 | 1322 ---- results/classifier/02/other/23300761 | 314 - results/classifier/02/other/23448582 | 266 - results/classifier/02/other/25892827 | 1078 --- results/classifier/02/other/31349848 | 155 - results/classifier/02/other/32484936 | 224 - results/classifier/02/other/35170175 | 522 -- results/classifier/02/other/42613410 | 150 - results/classifier/02/other/42974450 | 430 -- results/classifier/02/other/43643137 | 539 -- results/classifier/02/other/48245039 | 531 -- results/classifier/02/other/55247116 | 1311 ---- results/classifier/02/other/55367348 | 533 -- results/classifier/02/other/55753058 | 294 - results/classifier/02/other/56309929 | 181 - results/classifier/02/other/56937788 | 345 - results/classifier/02/other/57195159 | 316 - results/classifier/02/other/57231878 | 243 - results/classifier/02/other/57756589 | 1422 ---- results/classifier/02/other/59540920 | 377 - results/classifier/02/other/64571620 | 786 --- results/classifier/02/other/65781993 | 2794 -------- results/classifier/02/other/66743673 | 365 - results/classifier/02/other/68897003 | 717 -- results/classifier/02/other/70021271 | 7449 ------------------- results/classifier/02/other/70416488 | 1180 ---- results/classifier/02/other/74715356 | 127 - results/classifier/02/other/79834768 | 410 -- results/classifier/02/other/81775929 | 236 - results/classifier/02/other/85542195 | 121 - results/classifier/02/other/88225572 | 2901 -------- results/classifier/02/other/88281850 | 282 - results/classifier/02/other/92957605 | 419 -- results/classifier/02/other/95154278 | 156 - results/classifier/02/other/99674399 | 149 - results/classifier/02/semantic/05479587 | 84 - results/classifier/02/semantic/12360755 | 297 - results/classifier/02/semantic/28596630 | 114 - results/classifier/02/semantic/30680944 | 596 -- results/classifier/02/semantic/46572227 | 407 -- results/classifier/02/semantic/53568181 | 79 - results/classifier/02/semantic/80570214 | 401 -- results/classifier/02/semantic/96782458 | 1000 --- .../02/semantic/gitlab_semantic_addsubps | 29 - .../classifier/02/semantic/gitlab_semantic_adox | 42 - .../classifier/02/semantic/gitlab_semantic_bextr | 31 - .../classifier/02/semantic/gitlab_semantic_blsi | 26 - .../classifier/02/semantic/gitlab_semantic_blsmsk | 33 - .../classifier/02/semantic/gitlab_semantic_bzhi | 44 - results/classifier/03/KVM/04472277 | 579 -- results/classifier/03/KVM/26430026 | 168 - results/classifier/03/KVM/33802194 | 4942 ------------- results/classifier/03/KVM/42613410 | 152 - results/classifier/03/KVM/43643137 | 541 -- results/classifier/03/KVM/55961334 | 42 - results/classifier/03/KVM/71456293 | 1489 ---- results/classifier/03/KVM/80615920 | 351 - results/classifier/03/boot/42226390 | 190 - results/classifier/03/boot/51610399 | 311 - results/classifier/03/boot/60339453 | 64 - results/classifier/03/boot/67821138 | 202 - results/classifier/03/instruction/11357571 | 50 - results/classifier/03/instruction/11933524 | 1128 --- results/classifier/03/instruction/24190340 | 2059 ------ results/classifier/03/instruction/26095107 | 161 - results/classifier/03/instruction/50773216 | 113 - results/classifier/03/instruction/63565653 | 52 - results/classifier/03/instruction/70868267 | 43 - results/classifier/03/instruction/73660729 | 34 - results/classifier/03/mistranslation/14887122 | 261 - results/classifier/03/mistranslation/22219210 | 46 - results/classifier/03/mistranslation/23270873 | 695 -- results/classifier/03/mistranslation/24930826 | 36 - results/classifier/03/mistranslation/25842545 | 205 - results/classifier/03/mistranslation/36568044 | 4584 ------------ results/classifier/03/mistranslation/64322995 | 57 - results/classifier/03/mistranslation/70294255 | 1064 --- results/classifier/03/mistranslation/74466963 | 1881 ----- results/classifier/03/mistranslation/74545755 | 347 - results/classifier/03/mistranslation/80604314 | 1483 ---- results/classifier/03/network/05479587 | 86 - results/classifier/03/network/62179944 | 34 - results/classifier/03/other/02364653 | 366 - results/classifier/03/other/02572177 | 424 -- results/classifier/03/other/12869209 | 91 - results/classifier/03/other/13442371 | 372 - results/classifier/03/other/14488057 | 714 -- results/classifier/03/other/16056596 | 101 - results/classifier/03/other/16201167 | 103 - results/classifier/03/other/16228234 | 1847 ----- results/classifier/03/other/17743720 | 774 -- results/classifier/03/other/21221931 | 331 - results/classifier/03/other/21247035 | 1324 ---- results/classifier/03/other/23300761 | 316 - results/classifier/03/other/23448582 | 268 - results/classifier/03/other/25892827 | 1080 --- results/classifier/03/other/31349848 | 157 - results/classifier/03/other/32484936 | 226 - results/classifier/03/other/35170175 | 524 -- results/classifier/03/other/42974450 | 432 -- results/classifier/03/other/48245039 | 533 -- results/classifier/03/other/55247116 | 1313 ---- results/classifier/03/other/55367348 | 535 -- results/classifier/03/other/55753058 | 296 - results/classifier/03/other/56309929 | 183 - results/classifier/03/other/56937788 | 347 - results/classifier/03/other/57195159 | 318 - results/classifier/03/other/57231878 | 245 - results/classifier/03/other/57756589 | 1424 ---- results/classifier/03/other/59540920 | 379 - results/classifier/03/other/64571620 | 788 --- results/classifier/03/other/65781993 | 2796 -------- results/classifier/03/other/66743673 | 367 - results/classifier/03/other/68897003 | 719 -- results/classifier/03/other/70021271 | 7451 ------------------- results/classifier/03/other/70416488 | 1182 ---- results/classifier/03/other/74715356 | 129 - results/classifier/03/other/79834768 | 412 -- results/classifier/03/other/81775929 | 238 - results/classifier/03/other/85542195 | 123 - results/classifier/03/other/88225572 | 2903 -------- results/classifier/03/other/88281850 | 284 - results/classifier/03/other/92957605 | 421 -- results/classifier/03/other/95154278 | 158 - results/classifier/03/other/99674399 | 151 - results/classifier/03/semantic/12360755 | 299 - results/classifier/03/semantic/28596630 | 116 - results/classifier/03/semantic/30680944 | 598 -- results/classifier/03/semantic/46572227 | 409 -- results/classifier/03/semantic/53568181 | 81 - results/classifier/03/semantic/80570214 | 403 -- results/classifier/03/semantic/96782458 | 1002 --- .../03/semantic/gitlab_semantic_addsubps | 31 - .../classifier/03/semantic/gitlab_semantic_adox | 44 - .../classifier/03/semantic/gitlab_semantic_bextr | 33 - .../classifier/03/semantic/gitlab_semantic_blsi | 28 - .../classifier/03/semantic/gitlab_semantic_blsmsk | 35 - .../classifier/03/semantic/gitlab_semantic_bzhi | 46 - results/classifier/04/KVM/04472277 | 584 -- results/classifier/04/KVM/26430026 | 173 - results/classifier/04/KVM/43643137 | 546 -- results/classifier/04/KVM/71456293 | 1494 ---- results/classifier/04/KVM/80615920 | 356 - results/classifier/04/assembly/48245039 | 538 -- results/classifier/04/boot/51610399 | 316 - results/classifier/04/boot/60339453 | 69 - results/classifier/04/device/14488057 | 719 -- results/classifier/04/device/24190340 | 2064 ------ results/classifier/04/device/24930826 | 41 - results/classifier/04/device/28596630 | 121 - results/classifier/04/device/42226390 | 195 - results/classifier/04/device/57195159 | 323 - results/classifier/04/device/57231878 | 250 - results/classifier/04/device/67821138 | 207 - results/classifier/04/device/99674399 | 156 - results/classifier/04/graphic/22219210 | 51 - results/classifier/04/graphic/30680944 | 603 -- results/classifier/04/graphic/55961334 | 47 - results/classifier/04/graphic/73660729 | 39 - results/classifier/04/instruction/11933524 | 1133 --- results/classifier/04/instruction/26095107 | 166 - results/classifier/04/instruction/50773216 | 118 - results/classifier/04/instruction/63565653 | 57 - results/classifier/04/instruction/70868267 | 48 - results/classifier/04/mistranslation/14887122 | 266 - results/classifier/04/mistranslation/23270873 | 700 -- results/classifier/04/mistranslation/25842545 | 210 - results/classifier/04/mistranslation/36568044 | 4589 ------------ results/classifier/04/mistranslation/64322995 | 62 - results/classifier/04/mistranslation/70294255 | 1069 --- results/classifier/04/mistranslation/74466963 | 1886 ----- results/classifier/04/mistranslation/74545755 | 352 - results/classifier/04/mistranslation/80604314 | 1488 ---- results/classifier/04/network/05479587 | 91 - results/classifier/04/network/62179944 | 39 - results/classifier/04/other/02364653 | 371 - results/classifier/04/other/02572177 | 429 -- results/classifier/04/other/12869209 | 96 - results/classifier/04/other/13442371 | 377 - results/classifier/04/other/16056596 | 106 - results/classifier/04/other/16201167 | 108 - results/classifier/04/other/16228234 | 1852 ----- results/classifier/04/other/17743720 | 779 -- results/classifier/04/other/21221931 | 336 - results/classifier/04/other/21247035 | 1329 ---- results/classifier/04/other/23300761 | 321 - results/classifier/04/other/23448582 | 273 - results/classifier/04/other/25892827 | 1085 --- results/classifier/04/other/31349848 | 162 - results/classifier/04/other/32484936 | 231 - results/classifier/04/other/35170175 | 529 -- results/classifier/04/other/42974450 | 437 -- results/classifier/04/other/55247116 | 1318 ---- results/classifier/04/other/55367348 | 540 -- results/classifier/04/other/55753058 | 301 - results/classifier/04/other/56309929 | 188 - results/classifier/04/other/56937788 | 352 - results/classifier/04/other/57756589 | 1429 ---- results/classifier/04/other/59540920 | 384 - results/classifier/04/other/64571620 | 793 --- results/classifier/04/other/65781993 | 2801 -------- results/classifier/04/other/66743673 | 372 - results/classifier/04/other/68897003 | 724 -- results/classifier/04/other/70021271 | 7456 -------------------- results/classifier/04/other/70416488 | 1187 ---- results/classifier/04/other/74715356 | 134 - results/classifier/04/other/79834768 | 417 -- results/classifier/04/other/81775929 | 243 - results/classifier/04/other/85542195 | 128 - results/classifier/04/other/88225572 | 2908 -------- results/classifier/04/other/88281850 | 289 - results/classifier/04/other/92957605 | 426 -- results/classifier/04/other/95154278 | 163 - results/classifier/04/semantic/12360755 | 304 - results/classifier/04/semantic/46572227 | 414 -- results/classifier/04/semantic/53568181 | 86 - results/classifier/04/semantic/96782458 | 1007 --- .../04/semantic/gitlab_semantic_addsubps | 36 - .../classifier/04/semantic/gitlab_semantic_adox | 49 - .../classifier/04/semantic/gitlab_semantic_bextr | 38 - .../classifier/04/semantic/gitlab_semantic_blsi | 33 - .../classifier/04/semantic/gitlab_semantic_blsmsk | 40 - .../classifier/04/semantic/gitlab_semantic_bzhi | 51 - results/classifier/04/vnc/11357571 | 55 - results/classifier/04/vnc/33802194 | 4947 ------------- results/classifier/04/vnc/42613410 | 157 - results/classifier/04/vnc/80570214 | 408 -- results/classifier/05/KVM/04472277 | 584 -- results/classifier/05/KVM/26430026 | 173 - results/classifier/05/KVM/43643137 | 546 -- results/classifier/05/KVM/71456293 | 1494 ---- results/classifier/05/KVM/80615920 | 356 - results/classifier/05/boot/51610399 | 316 - results/classifier/05/boot/60339453 | 69 - results/classifier/05/device/14488057 | 719 -- results/classifier/05/device/24190340 | 2064 ------ results/classifier/05/device/24930826 | 41 - results/classifier/05/device/26095107 | 166 - results/classifier/05/device/28596630 | 121 - results/classifier/05/device/36568044 | 4589 ------------ results/classifier/05/device/42226390 | 195 - results/classifier/05/device/48245039 | 538 -- results/classifier/05/device/57195159 | 323 - results/classifier/05/device/57231878 | 250 - results/classifier/05/device/67821138 | 207 - results/classifier/05/device/99674399 | 156 - results/classifier/05/graphic/22219210 | 51 - results/classifier/05/graphic/30680944 | 603 -- results/classifier/05/graphic/46572227 | 414 -- results/classifier/05/graphic/53568181 | 86 - results/classifier/05/graphic/55961334 | 47 - results/classifier/05/graphic/73660729 | 39 - results/classifier/05/instruction/11933524 | 1133 --- results/classifier/05/instruction/50773216 | 118 - results/classifier/05/instruction/63565653 | 57 - results/classifier/05/instruction/70868267 | 48 - results/classifier/05/mistranslation/14887122 | 266 - results/classifier/05/mistranslation/23270873 | 700 -- results/classifier/05/mistranslation/25842545 | 210 - results/classifier/05/mistranslation/64322995 | 62 - results/classifier/05/mistranslation/70294255 | 1069 --- results/classifier/05/mistranslation/74466963 | 1886 ----- results/classifier/05/mistranslation/74545755 | 352 - results/classifier/05/mistranslation/80604314 | 1488 ---- results/classifier/05/network/05479587 | 91 - results/classifier/05/network/62179944 | 39 - results/classifier/05/other/02364653 | 371 - results/classifier/05/other/02572177 | 429 -- results/classifier/05/other/12869209 | 96 - results/classifier/05/other/13442371 | 377 - results/classifier/05/other/16056596 | 106 - results/classifier/05/other/16201167 | 108 - results/classifier/05/other/16228234 | 1852 ----- results/classifier/05/other/17743720 | 779 -- results/classifier/05/other/21221931 | 336 - results/classifier/05/other/21247035 | 1329 ---- results/classifier/05/other/23300761 | 321 - results/classifier/05/other/23448582 | 273 - results/classifier/05/other/25892827 | 1085 --- results/classifier/05/other/31349848 | 162 - results/classifier/05/other/32484936 | 231 - results/classifier/05/other/35170175 | 529 -- results/classifier/05/other/42974450 | 437 -- results/classifier/05/other/55247116 | 1318 ---- results/classifier/05/other/55367348 | 540 -- results/classifier/05/other/55753058 | 301 - results/classifier/05/other/56309929 | 188 - results/classifier/05/other/56937788 | 352 - results/classifier/05/other/57756589 | 1429 ---- results/classifier/05/other/59540920 | 384 - results/classifier/05/other/64571620 | 793 --- results/classifier/05/other/65781993 | 2801 -------- results/classifier/05/other/66743673 | 372 - results/classifier/05/other/68897003 | 724 -- results/classifier/05/other/70021271 | 7456 -------------------- results/classifier/05/other/70416488 | 1187 ---- results/classifier/05/other/74715356 | 134 - results/classifier/05/other/79834768 | 417 -- results/classifier/05/other/81775929 | 243 - results/classifier/05/other/85542195 | 128 - results/classifier/05/other/88225572 | 2908 -------- results/classifier/05/other/88281850 | 289 - results/classifier/05/other/92957605 | 426 -- results/classifier/05/other/95154278 | 163 - results/classifier/05/other/96782458 | 1007 --- results/classifier/05/semantic/12360755 | 304 - .../05/semantic/gitlab_semantic_addsubps | 36 - .../classifier/05/semantic/gitlab_semantic_adox | 49 - .../classifier/05/semantic/gitlab_semantic_bextr | 38 - .../classifier/05/semantic/gitlab_semantic_blsi | 33 - .../classifier/05/semantic/gitlab_semantic_blsmsk | 40 - .../classifier/05/semantic/gitlab_semantic_bzhi | 51 - results/classifier/05/vnc/11357571 | 55 - results/classifier/05/vnc/33802194 | 4947 ------------- results/classifier/05/vnc/42613410 | 157 - results/classifier/05/vnc/80570214 | 408 -- results/classifier/105/KVM/04472277 | 584 ++ results/classifier/105/KVM/1002 | 14 + results/classifier/105/KVM/1009 | 36 + results/classifier/105/KVM/1035 | 29 + results/classifier/105/KVM/1042561 | 50 + results/classifier/105/KVM/1045 | 39 + results/classifier/105/KVM/1063807 | 86 + results/classifier/105/KVM/1064 | 56 + results/classifier/105/KVM/1073952 | 47 + results/classifier/105/KVM/1093360 | 31 + results/classifier/105/KVM/110 | 14 + results/classifier/105/KVM/1135567 | 51 + results/classifier/105/KVM/1136 | 14 + results/classifier/105/KVM/1138 | 14 + results/classifier/105/KVM/1155 | 40 + results/classifier/105/KVM/1156632 | 49 + results/classifier/105/KVM/1173490 | 41 + results/classifier/105/KVM/1191326 | 242 + results/classifier/105/KVM/1198 | 66 + results/classifier/105/KVM/1201447 | 41 + results/classifier/105/KVM/1202289 | 103 + results/classifier/105/KVM/1203 | 58 + results/classifier/105/KVM/1215 | 85 + results/classifier/105/KVM/1224444 | 657 ++ results/classifier/105/KVM/1243639 | 127 + results/classifier/105/KVM/1253777 | 101 + results/classifier/105/KVM/1259499 | 85 + results/classifier/105/KVM/1268596 | 66 + results/classifier/105/KVM/1288259 | 65 + results/classifier/105/KVM/1294227 | 45 + results/classifier/105/KVM/1307281 | 28 + results/classifier/105/KVM/1307656 | 70 + results/classifier/105/KVM/1312668 | 71 + results/classifier/105/KVM/1344 | 14 + results/classifier/105/KVM/1352179 | 46 + results/classifier/105/KVM/1353149 | 52 + results/classifier/105/KVM/1383857 | 211 + results/classifier/105/KVM/139 | 14 + results/classifier/105/KVM/1398 | 19 + results/classifier/105/KVM/1399191 | 1189 ++++ results/classifier/105/KVM/1405385 | 278 + results/classifier/105/KVM/1408152 | 29 + results/classifier/105/KVM/1435359 | 61 + results/classifier/105/KVM/1441443 | 53 + results/classifier/105/KVM/1446726 | 52 + results/classifier/105/KVM/1456804 | 340 + results/classifier/105/KVM/1463143 | 126 + results/classifier/105/KVM/1465935 | 302 + results/classifier/105/KVM/1470481 | 27 + results/classifier/105/KVM/1481375 | 43 + results/classifier/105/KVM/1484 | 42 + results/classifier/105/KVM/1500935 | 32 + results/classifier/105/KVM/1502884 | 60 + results/classifier/105/KVM/1502934 | 42 + results/classifier/105/KVM/1518969 | 49 + results/classifier/105/KVM/1529187 | 56 + results/classifier/105/KVM/1532 | 516 ++ results/classifier/105/KVM/1545 | 22 + results/classifier/105/KVM/1547012 | 53 + results/classifier/105/KVM/1553999 | 40 + results/classifier/105/KVM/1557057 | 91 + results/classifier/105/KVM/1559 | 20 + results/classifier/105/KVM/1565 | 47 + results/classifier/105/KVM/1570 | 76 + results/classifier/105/KVM/1574246 | 93 + results/classifier/105/KVM/1576347 | 303 + results/classifier/105/KVM/1579645 | 31 + results/classifier/105/KVM/1583 | 32 + results/classifier/105/KVM/1583775 | 52 + results/classifier/105/KVM/1591628 | 193 + results/classifier/105/KVM/1597 | 69 + results/classifier/105/KVM/1626596 | 36 + results/classifier/105/KVM/1626972 | 3869 ++++++++++ results/classifier/105/KVM/1635339 | 826 +++ results/classifier/105/KVM/1637511 | 43 + results/classifier/105/KVM/1642011 | 36 + results/classifier/105/KVM/1652 | 45 + results/classifier/105/KVM/1663287 | 126 + results/classifier/105/KVM/1671876 | 195 + results/classifier/105/KVM/1673722 | 126 + results/classifier/105/KVM/1675458 | 94 + results/classifier/105/KVM/1677247 | 34 + results/classifier/105/KVM/1681688 | 224 + results/classifier/105/KVM/1682128 | 19 + results/classifier/105/KVM/1686350 | 64 + results/classifier/105/KVM/1688 | 46 + results/classifier/105/KVM/1699824 | 296 + results/classifier/105/KVM/1706058 | 110 + results/classifier/105/KVM/1706296 | 441 ++ results/classifier/105/KVM/1709784 | 346 + results/classifier/105/KVM/1715203 | 329 + results/classifier/105/KVM/1723731 | 41 + results/classifier/105/KVM/1723927 | 118 + results/classifier/105/KVM/1726 | 110 + results/classifier/105/KVM/1728615 | 538 ++ results/classifier/105/KVM/1728657 | 134 + results/classifier/105/KVM/1731957 | 56 + results/classifier/105/KVM/1752026 | 1262 ++++ results/classifier/105/KVM/1754038 | 354 + results/classifier/105/KVM/1756728 | 35 + results/classifier/105/KVM/1758819 | 351 + results/classifier/105/KVM/1769189 | 142 + results/classifier/105/KVM/1770724 | 93 + results/classifier/105/KVM/1775702 | 96 + results/classifier/105/KVM/1776920 | 470 ++ results/classifier/105/KVM/1778966 | 49 + results/classifier/105/KVM/1788582 | 87 + results/classifier/105/KVM/1792523 | 90 + results/classifier/105/KVM/1798057 | 55 + results/classifier/105/KVM/1807052 | 375 + results/classifier/105/KVM/1808 | 84 + results/classifier/105/KVM/1808928 | 126 + results/classifier/105/KVM/1814418 | 543 ++ results/classifier/105/KVM/1814420 | 45 + results/classifier/105/KVM/1821771 | 103 + results/classifier/105/KVM/1830821 | 47 + results/classifier/105/KVM/1833204 | 137 + results/classifier/105/KVM/1834051 | 40 + results/classifier/105/KVM/1835466 | 263 + results/classifier/105/KVM/1836763 | 132 + results/classifier/105/KVM/1838569 | 111 + results/classifier/105/KVM/1843073 | 225 + results/classifier/105/KVM/1847440 | 562 ++ results/classifier/105/KVM/1848244 | 29 + results/classifier/105/KVM/1851972 | 135 + results/classifier/105/KVM/1852781 | 84 + results/classifier/105/KVM/1853123 | 62 + results/classifier/105/KVM/1859310 | 36 + results/classifier/105/KVM/1860759 | 150 + results/classifier/105/KVM/1862874 | 118 + results/classifier/105/KVM/1863819 | 59 + results/classifier/105/KVM/1865160 | 92 + results/classifier/105/KVM/1873340 | 39 + results/classifier/105/KVM/1873344 | 34 + results/classifier/105/KVM/1874888 | 113 + results/classifier/105/KVM/1877052 | 61 + results/classifier/105/KVM/1878250 | 67 + results/classifier/105/KVM/1878642 | 70 + results/classifier/105/KVM/1878645 | 1074 +++ results/classifier/105/KVM/1879646 | 52 + results/classifier/105/KVM/1880355 | 148 + results/classifier/105/KVM/1880507 | 25 + results/classifier/105/KVM/1883732 | 134 + results/classifier/105/KVM/1883733 | 370 + results/classifier/105/KVM/1888601 | 329 + results/classifier/105/KVM/1889945 | 161 + results/classifier/105/KVM/1891354 | 406 ++ results/classifier/105/KVM/1892963 | 339 + results/classifier/105/KVM/1892966 | 202 + results/classifier/105/KVM/1897783 | 169 + results/classifier/105/KVM/1902612 | 63 + results/classifier/105/KVM/1906 | 47 + results/classifier/105/KVM/1908513 | 81 + results/classifier/105/KVM/1908832 | 267 + results/classifier/105/KVM/1910941 | 143 + results/classifier/105/KVM/1912224 | 384 + results/classifier/105/KVM/1914353 | 74 + results/classifier/105/KVM/1914696 | 137 + results/classifier/105/KVM/1914748 | 37 + results/classifier/105/KVM/1915539 | 120 + results/classifier/105/KVM/1919036 | 178 + results/classifier/105/KVM/1919169 | 34 + results/classifier/105/KVM/1921635 | 172 + results/classifier/105/KVM/1922430 | 84 + results/classifier/105/KVM/1924231 | 129 + results/classifier/105/KVM/1924914 | 103 + results/classifier/105/KVM/1926596 | 80 + results/classifier/105/KVM/1926782 | 76 + results/classifier/105/KVM/1926952 | 239 + results/classifier/105/KVM/1936 | 14 + results/classifier/105/KVM/1941 | 115 + results/classifier/105/KVM/1967814 | 438 ++ results/classifier/105/KVM/1977 | 43 + results/classifier/105/KVM/2041 | 40 + results/classifier/105/KVM/2046 | 14 + results/classifier/105/KVM/2060 | 16 + results/classifier/105/KVM/2071 | 125 + results/classifier/105/KVM/2110 | 24 + results/classifier/105/KVM/2165 | 81 + results/classifier/105/KVM/2265 | 61 + results/classifier/105/KVM/2313 | 30 + results/classifier/105/KVM/2321 | 53 + results/classifier/105/KVM/2325 | 24 + results/classifier/105/KVM/2334 | 265 + results/classifier/105/KVM/2363 | 14 + results/classifier/105/KVM/239 | 14 + results/classifier/105/KVM/2392 | 14 + results/classifier/105/KVM/2394 | 42 + results/classifier/105/KVM/2398 | 75 + results/classifier/105/KVM/2412 | 113 + results/classifier/105/KVM/2436 | 14 + results/classifier/105/KVM/2445 | 100 + results/classifier/105/KVM/2447 | 36 + results/classifier/105/KVM/252 | 14 + results/classifier/105/KVM/2548 | 419 ++ results/classifier/105/KVM/2566 | 132 + results/classifier/105/KVM/2567 | 91 + results/classifier/105/KVM/2573 | 22 + results/classifier/105/KVM/2589 | 69 + results/classifier/105/KVM/2631 | 94 + results/classifier/105/KVM/26430026 | 173 + results/classifier/105/KVM/2650 | 205 + results/classifier/105/KVM/2658 | 14 + results/classifier/105/KVM/2692 | 14 + results/classifier/105/KVM/2779 | 54 + results/classifier/105/KVM/2927 | 184 + results/classifier/105/KVM/2950 | 78 + results/classifier/105/KVM/391880 | 27 + results/classifier/105/KVM/412 | 14 + results/classifier/105/KVM/43643137 | 546 ++ results/classifier/105/KVM/498 | 55 + results/classifier/105/KVM/504368 | 191 + results/classifier/105/KVM/528 | 14 + results/classifier/105/KVM/530077 | 29 + results/classifier/105/KVM/563 | 14 + results/classifier/105/KVM/584514 | 49 + results/classifier/105/KVM/589231 | 27 + results/classifier/105/KVM/599574 | 30 + results/classifier/105/KVM/612297 | 31 + results/classifier/105/KVM/612452 | 140 + results/classifier/105/KVM/642304 | 59 + results/classifier/105/KVM/645524 | 25 + results/classifier/105/KVM/71456293 | 1494 ++++ results/classifier/105/KVM/721659 | 53 + results/classifier/105/KVM/73 | 14 + results/classifier/105/KVM/735 | 39 + results/classifier/105/KVM/735752 | 224 + results/classifier/105/KVM/747583 | 40 + results/classifier/105/KVM/748 | 14 + results/classifier/105/KVM/772275 | 57 + results/classifier/105/KVM/785668 | 420 ++ results/classifier/105/KVM/797905 | 65 + results/classifier/105/KVM/80615920 | 356 + results/classifier/105/KVM/809 | 14 + results/classifier/105/KVM/810588 | 52 + results/classifier/105/KVM/816860 | 44 + results/classifier/105/KVM/819 | 88 + results/classifier/105/KVM/902720 | 129 + results/classifier/105/KVM/903 | 368 + results/classifier/105/KVM/905095 | 391 + results/classifier/105/KVM/916 | 24 + results/classifier/105/KVM/920772 | 55 + results/classifier/105/KVM/922355 | 27 + results/classifier/105/KVM/954 | 1270 ++++ results/classifier/105/KVM/957 | 84 + results/classifier/105/KVM/961 | 14 + results/classifier/105/KVM/964 | 53 + results/classifier/105/KVM/965867 | 315 + results/classifier/105/KVM/966316 | 37 + results/classifier/105/KVM/992067 | 46 + results/classifier/105/assembly/1098729 | 177 + results/classifier/105/assembly/1396497 | 100 + results/classifier/105/assembly/1402755 | 89 + results/classifier/105/assembly/1490611 | 356 + results/classifier/105/assembly/1520 | 62 + results/classifier/105/assembly/1548170 | 152 + results/classifier/105/assembly/1605 | 51 + results/classifier/105/assembly/1612 | 64 + results/classifier/105/assembly/1620 | 107 + results/classifier/105/assembly/1649 | 30 + results/classifier/105/assembly/1662050 | 373 + results/classifier/105/assembly/1724570 | 95 + results/classifier/105/assembly/1772262 | 67 + results/classifier/105/assembly/1787002 | 37 + results/classifier/105/assembly/1806114 | 93 + results/classifier/105/assembly/1847793 | 265 + results/classifier/105/assembly/1850000 | 156 + results/classifier/105/assembly/1852196 | 95 + results/classifier/105/assembly/1862167 | 32 + results/classifier/105/assembly/1877136 | 77 + results/classifier/105/assembly/1882671 | 797 +++ results/classifier/105/assembly/1883784 | 61 + results/classifier/105/assembly/2013 | 91 + results/classifier/105/assembly/2180 | 49 + results/classifier/105/assembly/2186 | 47 + results/classifier/105/assembly/2303 | 84 + results/classifier/105/assembly/2463 | 22 + results/classifier/105/assembly/2677 | 14 + results/classifier/105/assembly/2871 | 14 + results/classifier/105/assembly/494 | 14 + results/classifier/105/assembly/536 | 14 + results/classifier/105/assembly/710 | 14 + results/classifier/105/assembly/811683 | 319 + results/classifier/105/assembly/884401 | 83 + results/classifier/105/assembly/904 | 29 + results/classifier/105/assembly/968 | 108 + results/classifier/105/boot/1018 | 36 + results/classifier/105/boot/1021649 | 125 + results/classifier/105/boot/1026176 | 39 + results/classifier/105/boot/1055090 | 39 + results/classifier/105/boot/1074 | 31 + results/classifier/105/boot/1089006 | 198 + results/classifier/105/boot/1101 | 25 + results/classifier/105/boot/1119281 | 826 +++ results/classifier/105/boot/1129957 | 66 + results/classifier/105/boot/1131 | 33 + results/classifier/105/boot/1212402 | 173 + results/classifier/105/boot/1221797 | 56 + results/classifier/105/boot/1256548 | 29 + results/classifier/105/boot/1273944 | 73 + results/classifier/105/boot/1280 | 21 + results/classifier/105/boot/1285508 | 53 + results/classifier/105/boot/1289898 | 130 + results/classifier/105/boot/1290558 | 212 + results/classifier/105/boot/1314667 | 126 + results/classifier/105/boot/1320 | 25 + results/classifier/105/boot/1348 | 14 + results/classifier/105/boot/1426472 | 51 + results/classifier/105/boot/1481750 | 49 + results/classifier/105/boot/1488212 | 29 + results/classifier/105/boot/1505062 | 37 + results/classifier/105/boot/1516 | 52 + results/classifier/105/boot/1522 | 53 + results/classifier/105/boot/1534978 | 39 + results/classifier/105/boot/1587535 | 52 + results/classifier/105/boot/1589 | 23 + results/classifier/105/boot/1589153 | 62 + results/classifier/105/boot/1589257 | 34 + results/classifier/105/boot/1595 | 42 + results/classifier/105/boot/1605045 | 29 + results/classifier/105/boot/1624726 | 53 + results/classifier/105/boot/1638 | 32 + results/classifier/105/boot/1652286 | 51 + results/classifier/105/boot/1688231 | 85 + results/classifier/105/boot/1689 | 24 + results/classifier/105/boot/1694808 | 56 + results/classifier/105/boot/1696 | 52 + results/classifier/105/boot/1718719 | 147 + results/classifier/105/boot/1732177 | 23 + results/classifier/105/boot/1734474 | 72 + results/classifier/105/boot/1745 | 80 + results/classifier/105/boot/1748296 | 95 + results/classifier/105/boot/1753314 | 67 + results/classifier/105/boot/1754656 | 187 + results/classifier/105/boot/1756538 | 38 + results/classifier/105/boot/1797262 | 54 + results/classifier/105/boot/1811 | 49 + results/classifier/105/boot/1823998 | 35 + results/classifier/105/boot/1826 | 42 + results/classifier/105/boot/1829498 | 62 + results/classifier/105/boot/1831115 | 163 + results/classifier/105/boot/1835694 | 419 ++ results/classifier/105/boot/1836136 | 25 + results/classifier/105/boot/1838390 | 97 + results/classifier/105/boot/1838658 | 177 + results/classifier/105/boot/1840920 | 27 + results/classifier/105/boot/1853429 | 41 + results/classifier/105/boot/1859106 | 65 + results/classifier/105/boot/1859254 | 73 + results/classifier/105/boot/1859916 | 79 + results/classifier/105/boot/1860742 | 75 + results/classifier/105/boot/1862110 | 111 + results/classifier/105/boot/1862619 | 94 + results/classifier/105/boot/1863508 | 52 + results/classifier/105/boot/1872644 | 72 + results/classifier/105/boot/1873338 | 84 + results/classifier/105/boot/1874264 | 423 ++ results/classifier/105/boot/1879590 | 86 + results/classifier/105/boot/1883593 | 67 + results/classifier/105/boot/1888417 | 50 + results/classifier/105/boot/1890290 | 151 + results/classifier/105/boot/1896754 | 53 + results/classifier/105/boot/1906181 | 59 + results/classifier/105/boot/1910 | 75 + results/classifier/105/boot/1914117 | 449 ++ results/classifier/105/boot/1921280 | 53 + results/classifier/105/boot/2034 | 21 + results/classifier/105/boot/2183 | 33 + results/classifier/105/boot/2193 | 43 + results/classifier/105/boot/2212 | 30 + results/classifier/105/boot/2337 | 75 + results/classifier/105/boot/2343 | 44 + results/classifier/105/boot/2360 | 43 + results/classifier/105/boot/2400 | 56 + results/classifier/105/boot/2557 | 14 + results/classifier/105/boot/2585 | 20 + results/classifier/105/boot/2620 | 22 + results/classifier/105/boot/2699 | 31 + results/classifier/105/boot/2705 | 30 + results/classifier/105/boot/2739 | 16 + results/classifier/105/boot/2754 | 39 + results/classifier/105/boot/2782 | 23 + results/classifier/105/boot/2788 | 26 + results/classifier/105/boot/2810 | 14 + results/classifier/105/boot/2863 | 14 + results/classifier/105/boot/2957 | 41 + results/classifier/105/boot/2959 | 90 + results/classifier/105/boot/2961 | 44 + results/classifier/105/boot/2984 | 66 + results/classifier/105/boot/436 | 14 + results/classifier/105/boot/475 | 14 + results/classifier/105/boot/499 | 14 + results/classifier/105/boot/51610399 | 316 + results/classifier/105/boot/586175 | 460 ++ results/classifier/105/boot/587 | 14 + results/classifier/105/boot/60339453 | 69 + results/classifier/105/boot/622 | 14 + results/classifier/105/boot/627982 | 44 + results/classifier/105/boot/660060 | 52 + results/classifier/105/boot/669 | 36 + results/classifier/105/boot/688052 | 58 + results/classifier/105/boot/692570 | 124 + results/classifier/105/boot/700276 | 49 + results/classifier/105/boot/708 | 23 + results/classifier/105/boot/744856 | 28 + results/classifier/105/boot/786 | 30 + results/classifier/105/boot/797 | 22 + results/classifier/105/boot/808737 | 138 + results/classifier/105/boot/822408 | 83 + results/classifier/105/boot/830833 | 127 + results/classifier/105/boot/836 | 98 + results/classifier/105/boot/87 | 14 + results/classifier/105/boot/886 | 29 + results/classifier/105/boot/888016 | 38 + results/classifier/105/boot/899961 | 170 + results/classifier/105/boot/907 | 20 + results/classifier/105/boot/973 | 32 + results/classifier/105/boot/985 | 72 + results/classifier/105/boot/997 | 30 + results/classifier/105/device/1001 | 14 + results/classifier/105/device/1006 | 16 + results/classifier/105/device/101 | 14 + results/classifier/105/device/1013 | 14 + results/classifier/105/device/1014 | 16 + results/classifier/105/device/1017793 | 21 + results/classifier/105/device/102 | 14 + results/classifier/105/device/1020309 | 170 + results/classifier/105/device/1021 | 22 + results/classifier/105/device/1024 | 23 + results/classifier/105/device/1026 | 129 + results/classifier/105/device/103 | 14 + results/classifier/105/device/1032 | 29 + results/classifier/105/device/1033 | 40 + results/classifier/105/device/1034980 | 35 + results/classifier/105/device/1039 | 14 + results/classifier/105/device/1042 | 33 + results/classifier/105/device/1043 | 23 + results/classifier/105/device/1044 | 14 + results/classifier/105/device/1047999 | 133 + results/classifier/105/device/1049 | 14 + results/classifier/105/device/1050823 | 35 + results/classifier/105/device/1054812 | 24 + results/classifier/105/device/1056 | 14 + results/classifier/105/device/106 | 14 + results/classifier/105/device/1066 | 45 + results/classifier/105/device/107 | 14 + results/classifier/105/device/1070 | 23 + results/classifier/105/device/1070762 | 59 + results/classifier/105/device/1072 | 37 + results/classifier/105/device/1073 | 42 + results/classifier/105/device/1076 | 25 + results/classifier/105/device/1077 | 14 + results/classifier/105/device/1078 | 57 + results/classifier/105/device/1079 | 45 + results/classifier/105/device/1080 | 14 + results/classifier/105/device/1083 | 14 + results/classifier/105/device/1087 | 14 + results/classifier/105/device/1088 | 14 + results/classifier/105/device/1090558 | 27 + results/classifier/105/device/1090602 | 31 + results/classifier/105/device/1091 | 26 + results/classifier/105/device/1094950 | 339 + results/classifier/105/device/1097 | 14 + results/classifier/105/device/1099 | 14 + results/classifier/105/device/1103 | 14 + results/classifier/105/device/1104 | 14 + results/classifier/105/device/1105 | 16 + results/classifier/105/device/1106 | 22 + results/classifier/105/device/1108 | 14 + results/classifier/105/device/1110 | 16 + results/classifier/105/device/1112 | 14 + results/classifier/105/device/1114 | 14 + results/classifier/105/device/112 | 14 + results/classifier/105/device/1124 | 14 + results/classifier/105/device/1125 | 16 + results/classifier/105/device/1127053 | 100 + results/classifier/105/device/1130 | 42 + results/classifier/105/device/1132 | 128 + results/classifier/105/device/1133 | 23 + results/classifier/105/device/1133668 | 24 + results/classifier/105/device/1134 | 16 + results/classifier/105/device/1135 | 25 + results/classifier/105/device/1137 | 48 + results/classifier/105/device/114 | 14 + results/classifier/105/device/1140 | 14 + results/classifier/105/device/1149 | 22 + results/classifier/105/device/115 | 14 + results/classifier/105/device/1154 | 14 + results/classifier/105/device/1155677 | 40 + results/classifier/105/device/116 | 14 + results/classifier/105/device/1160 | 14 + results/classifier/105/device/1163474 | 27 + results/classifier/105/device/1164 | 30 + results/classifier/105/device/1165 | 16 + results/classifier/105/device/1165383 | 29 + results/classifier/105/device/1167 | 14 + results/classifier/105/device/117 | 14 + results/classifier/105/device/1171 | 16 + results/classifier/105/device/118 | 14 + results/classifier/105/device/1181 | 14 + results/classifier/105/device/1181354 | 23 + results/classifier/105/device/1186 | 30 + results/classifier/105/device/1187 | 14 + results/classifier/105/device/1187529 | 30 + results/classifier/105/device/1188018 | 38 + results/classifier/105/device/119 | 14 + results/classifier/105/device/1190 | 14 + results/classifier/105/device/1190525 | 74 + results/classifier/105/device/1191 | 22 + results/classifier/105/device/1193 | 29 + results/classifier/105/device/1193564 | 21 + results/classifier/105/device/1194954 | 23 + results/classifier/105/device/1196 | 25 + results/classifier/105/device/1196426 | 63 + results/classifier/105/device/1198350 | 65 + results/classifier/105/device/120 | 14 + results/classifier/105/device/1200212 | 39 + results/classifier/105/device/1206 | 109 + results/classifier/105/device/1207228 | 31 + results/classifier/105/device/1208 | 22 + results/classifier/105/device/1209 | 18 + results/classifier/105/device/121 | 14 + results/classifier/105/device/1210 | 21 + results/classifier/105/device/1210212 | 40 + results/classifier/105/device/1211 | 20 + results/classifier/105/device/1213 | 56 + results/classifier/105/device/1214884 | 28 + results/classifier/105/device/122 | 14 + results/classifier/105/device/1221 | 42 + results/classifier/105/device/1224414 | 41 + results/classifier/105/device/1225 | 14 + results/classifier/105/device/1226 | 38 + results/classifier/105/device/1227 | 14 + results/classifier/105/device/1228 | 56 + results/classifier/105/device/1229 | 22 + results/classifier/105/device/123 | 14 + results/classifier/105/device/1230 | 36 + results/classifier/105/device/1232 | 26 + results/classifier/105/device/1233 | 14 + results/classifier/105/device/1234 | 16 + results/classifier/105/device/1237 | 14 + results/classifier/105/device/1237625 | 66 + results/classifier/105/device/1241569 | 25 + results/classifier/105/device/1242 | 14 + results/classifier/105/device/1245 | 14 + results/classifier/105/device/1246 | 14 + results/classifier/105/device/1248469 | 21 + results/classifier/105/device/125 | 14 + results/classifier/105/device/1250 | 16 + results/classifier/105/device/1252010 | 29 + results/classifier/105/device/1253 | 14 + results/classifier/105/device/1254443 | 25 + results/classifier/105/device/1258 | 14 + results/classifier/105/device/1259 | 14 + results/classifier/105/device/126 | 14 + results/classifier/105/device/1260 | 14 + results/classifier/105/device/1261743 | 33 + results/classifier/105/device/1262 | 14 + results/classifier/105/device/1263 | 14 + results/classifier/105/device/1263747 | 111 + results/classifier/105/device/1265 | 14 + results/classifier/105/device/1267520 | 43 + results/classifier/105/device/1268 | 14 + results/classifier/105/device/1273 | 14 + results/classifier/105/device/1274170 | 25 + results/classifier/105/device/1275 | 22 + results/classifier/105/device/1279257 | 29 + results/classifier/105/device/1280521 | 21 + results/classifier/105/device/1281 | 14 + results/classifier/105/device/1282 | 16 + results/classifier/105/device/1284 | 27 + results/classifier/105/device/1284090 | 27 + results/classifier/105/device/1287 | 23 + results/classifier/105/device/1289 | 14 + results/classifier/105/device/1290 | 14 + results/classifier/105/device/1292 | 16 + results/classifier/105/device/1293 | 14 + results/classifier/105/device/1294 | 14 + results/classifier/105/device/1296882 | 24 + results/classifier/105/device/1297218 | 436 ++ results/classifier/105/device/1298 | 28 + results/classifier/105/device/1300 | 24 + results/classifier/105/device/1300021 | 22 + results/classifier/105/device/1302 | 30 + results/classifier/105/device/1303 | 14 + results/classifier/105/device/1305 | 27 + results/classifier/105/device/1308 | 14 + results/classifier/105/device/131 | 14 + results/classifier/105/device/1311 | 14 + results/classifier/105/device/1312 | 24 + results/classifier/105/device/1313816 | 46 + results/classifier/105/device/1314293 | 26 + results/classifier/105/device/1315 | 14 + results/classifier/105/device/1315747 | 29 + results/classifier/105/device/1316 | 14 + results/classifier/105/device/1318 | 32 + results/classifier/105/device/1318746 | 47 + results/classifier/105/device/1319493 | 61 + results/classifier/105/device/132 | 14 + results/classifier/105/device/1320968 | 62 + results/classifier/105/device/1326986 | 32 + results/classifier/105/device/133 | 14 + results/classifier/105/device/1331334 | 23 + results/classifier/105/device/1332234 | 70 + results/classifier/105/device/1333216 | 79 + results/classifier/105/device/1333688 | 73 + results/classifier/105/device/1334397 | 28 + results/classifier/105/device/1335 | 14 + results/classifier/105/device/1336123 | 31 + results/classifier/105/device/1336192 | 23 + results/classifier/105/device/1336794 | 652 ++ results/classifier/105/device/134 | 14 + results/classifier/105/device/1342 | 37 + results/classifier/105/device/1346 | 48 + results/classifier/105/device/135 | 14 + results/classifier/105/device/1352 | 14 + results/classifier/105/device/1353346 | 28 + results/classifier/105/device/1354 | 14 + results/classifier/105/device/1354167 | 259 + results/classifier/105/device/1357 | 22 + results/classifier/105/device/1357175 | 38 + results/classifier/105/device/1357206 | 81 + results/classifier/105/device/1357440 | 50 + results/classifier/105/device/1358287 | 38 + results/classifier/105/device/1358722 | 35 + results/classifier/105/device/1359394 | 57 + results/classifier/105/device/1363 | 16 + results/classifier/105/device/1366 | 97 + results/classifier/105/device/1368791 | 28 + results/classifier/105/device/1369 | 14 + results/classifier/105/device/137 | 14 + results/classifier/105/device/1377163 | 35 + results/classifier/105/device/1379 | 14 + results/classifier/105/device/1382477 | 32 + results/classifier/105/device/1384 | 14 + results/classifier/105/device/1384892 | 223 + results/classifier/105/device/1385934 | 166 + results/classifier/105/device/1386478 | 23 + results/classifier/105/device/1387 | 22 + results/classifier/105/device/1388 | 27 + results/classifier/105/device/1390 | 14 + results/classifier/105/device/1391 | 41 + results/classifier/105/device/1392 | 27 + results/classifier/105/device/140 | 14 + results/classifier/105/device/1403 | 14 + results/classifier/105/device/1406 | 14 + results/classifier/105/device/1413 | 35 + results/classifier/105/device/142 | 14 + results/classifier/105/device/1423528 | 46 + results/classifier/105/device/1423668 | 27 + results/classifier/105/device/1426 | 51 + results/classifier/105/device/1434 | 16 + results/classifier/105/device/1437970 | 98 + results/classifier/105/device/1438 | 20 + results/classifier/105/device/144 | 14 + results/classifier/105/device/1443 | 14 + results/classifier/105/device/1445633 | 32 + results/classifier/105/device/1447 | 20 + results/classifier/105/device/14488057 | 719 ++ results/classifier/105/device/145 | 14 + results/classifier/105/device/1450 | 14 + results/classifier/105/device/1450891 | 44 + results/classifier/105/device/1452062 | 58 + results/classifier/105/device/1452742 | 48 + results/classifier/105/device/1453025 | 33 + results/classifier/105/device/1456 | 24 + results/classifier/105/device/1459 | 48 + results/classifier/105/device/1459622 | 41 + results/classifier/105/device/1461918 | 36 + results/classifier/105/device/1462949 | 51 + results/classifier/105/device/1463463 | 35 + results/classifier/105/device/1465 | 14 + results/classifier/105/device/1466 | 20 + results/classifier/105/device/1469978 | 53 + results/classifier/105/device/147 | 14 + results/classifier/105/device/1476 | 14 + results/classifier/105/device/1476183 | 35 + results/classifier/105/device/1476800 | 22 + results/classifier/105/device/1477538 | 155 + results/classifier/105/device/1478 | 79 + results/classifier/105/device/148 | 14 + results/classifier/105/device/1480562 | 66 + results/classifier/105/device/1481 | 14 + results/classifier/105/device/1481654 | 54 + results/classifier/105/device/1481990 | 55 + results/classifier/105/device/1483 | 14 + results/classifier/105/device/1485 | 25 + results/classifier/105/device/1485010 | 24 + results/classifier/105/device/1486 | 102 + results/classifier/105/device/1487 | 18 + results/classifier/105/device/1488363 | 272 + results/classifier/105/device/149 | 14 + results/classifier/105/device/1491 | 14 + results/classifier/105/device/1492 | 305 + results/classifier/105/device/1497 | 16 + results/classifier/105/device/150 | 14 + results/classifier/105/device/1501 | 14 + results/classifier/105/device/1502 | 14 + results/classifier/105/device/1502613 | 55 + results/classifier/105/device/1503 | 63 + results/classifier/105/device/1504528 | 31 + results/classifier/105/device/1505652 | 91 + results/classifier/105/device/1506 | 14 + results/classifier/105/device/1507 | 50 + results/classifier/105/device/1509336 | 80 + results/classifier/105/device/1512 | 14 + results/classifier/105/device/1513234 | 29 + results/classifier/105/device/1515 | 31 + results/classifier/105/device/1517 | 14 + results/classifier/105/device/1519 | 25 + results/classifier/105/device/1521 | 14 + results/classifier/105/device/1523 | 14 + results/classifier/105/device/1523246 | 108 + results/classifier/105/device/1523811 | 119 + results/classifier/105/device/1524 | 50 + results/classifier/105/device/1528 | 22 + results/classifier/105/device/153 | 14 + results/classifier/105/device/1530246 | 42 + results/classifier/105/device/1531352 | 25 + results/classifier/105/device/1533141 | 41 + results/classifier/105/device/1533848 | 19 + results/classifier/105/device/1535497 | 42 + results/classifier/105/device/1538 | 14 + results/classifier/105/device/1539 | 16 + results/classifier/105/device/1543057 | 41 + results/classifier/105/device/1546 | 14 + results/classifier/105/device/1547 | 25 + results/classifier/105/device/1548471 | 25 + results/classifier/105/device/155 | 14 + results/classifier/105/device/1550503 | 56 + results/classifier/105/device/1553760 | 21 + results/classifier/105/device/1555 | 45 + results/classifier/105/device/1557033 | 34 + results/classifier/105/device/1558 | 34 + results/classifier/105/device/1563 | 16 + results/classifier/105/device/1563612 | 71 + results/classifier/105/device/1563931 | 35 + results/classifier/105/device/1564 | 83 + results/classifier/105/device/1568107 | 29 + results/classifier/105/device/1568356 | 84 + results/classifier/105/device/1568589 | 64 + results/classifier/105/device/157 | 14 + results/classifier/105/device/1572 | 14 + results/classifier/105/device/1572959 | 26 + results/classifier/105/device/1574 | 102 + results/classifier/105/device/1575607 | 72 + results/classifier/105/device/1577937 | 44 + results/classifier/105/device/1578 | 16 + results/classifier/105/device/158 | 14 + results/classifier/105/device/1581308 | 38 + results/classifier/105/device/1581976 | 35 + results/classifier/105/device/1583420 | 32 + results/classifier/105/device/1583421 | 35 + results/classifier/105/device/1583784 | 34 + results/classifier/105/device/1585840 | 113 + results/classifier/105/device/1586613 | 27 + results/classifier/105/device/1587065 | 246 + results/classifier/105/device/1587970 | 25 + results/classifier/105/device/1588473 | 40 + results/classifier/105/device/1590796 | 60 + results/classifier/105/device/1591 | 14 + results/classifier/105/device/1592351 | 76 + results/classifier/105/device/1592590 | 35 + results/classifier/105/device/1594239 | 1701 +++++ results/classifier/105/device/1594861 | 357 + results/classifier/105/device/1596204 | 39 + results/classifier/105/device/1596579 | 113 + results/classifier/105/device/1597138 | 53 + results/classifier/105/device/1601 | 93 + results/classifier/105/device/1602 | 20 + results/classifier/105/device/1603779 | 40 + results/classifier/105/device/1603785 | 31 + results/classifier/105/device/161 | 14 + results/classifier/105/device/1610 | 14 + results/classifier/105/device/1616 | 14 + results/classifier/105/device/1616706 | 34 + results/classifier/105/device/1618 | 26 + results/classifier/105/device/1619438 | 31 + results/classifier/105/device/1619896 | 87 + results/classifier/105/device/162 | 14 + results/classifier/105/device/1620660 | 32 + results/classifier/105/device/1622 | 14 + results/classifier/105/device/1623998 | 83 + results/classifier/105/device/1629618 | 124 + results/classifier/105/device/1630527 | 31 + results/classifier/105/device/1630723 | 534 ++ results/classifier/105/device/1631 | 30 + results/classifier/105/device/1631625 | 34 + results/classifier/105/device/1634069 | 33 + results/classifier/105/device/1634852 | 62 + results/classifier/105/device/1635695 | 26 + results/classifier/105/device/1637693 | 66 + results/classifier/105/device/1637974 | 97 + results/classifier/105/device/1639 | 14 + results/classifier/105/device/1639791 | 46 + results/classifier/105/device/164 | 14 + results/classifier/105/device/1643 | 14 + results/classifier/105/device/1643342 | 39 + results/classifier/105/device/1645287 | 38 + results/classifier/105/device/1647 | 25 + results/classifier/105/device/1649040 | 142 + results/classifier/105/device/165 | 14 + results/classifier/105/device/1651 | 14 + results/classifier/105/device/1652459 | 38 + results/classifier/105/device/1653577 | 22 + results/classifier/105/device/1654137 | 226 + results/classifier/105/device/1655 | 14 + results/classifier/105/device/1656710 | 27 + results/classifier/105/device/1657010 | 34 + results/classifier/105/device/1658506 | 31 + results/classifier/105/device/1660 | 14 + results/classifier/105/device/1660035 | 51 + results/classifier/105/device/1661 | 24 + results/classifier/105/device/1661758 | 140 + results/classifier/105/device/1663 | 47 + results/classifier/105/device/1664 | 14 + results/classifier/105/device/1665 | 14 + results/classifier/105/device/1668041 | 102 + results/classifier/105/device/1669 | 24 + results/classifier/105/device/167 | 14 + results/classifier/105/device/1670 | 22 + results/classifier/105/device/1670509 | 60 + results/classifier/105/device/1672 | 21 + results/classifier/105/device/1673957 | 50 + results/classifier/105/device/1675 | 14 + results/classifier/105/device/1675332 | 19 + results/classifier/105/device/1675333 | 29 + results/classifier/105/device/1675549 | 32 + results/classifier/105/device/168 | 14 + results/classifier/105/device/1681404 | 39 + results/classifier/105/device/1682 | 16 + results/classifier/105/device/1685526 | 31 + results/classifier/105/device/1686364 | 39 + results/classifier/105/device/1687270 | 34 + results/classifier/105/device/1687309 | 188 + results/classifier/105/device/1687578 | 37 + results/classifier/105/device/1690 | 16 + results/classifier/105/device/1691109 | 55 + results/classifier/105/device/1696773 | 1335 ++++ results/classifier/105/device/1697 | 32 + results/classifier/105/device/1698 | 14 + results/classifier/105/device/1699628 | 35 + results/classifier/105/device/1703147 | 73 + results/classifier/105/device/1704186 | 25 + results/classifier/105/device/1706825 | 31 + results/classifier/105/device/1707587 | 27 + results/classifier/105/device/171 | 14 + results/classifier/105/device/1710 | 64 + results/classifier/105/device/1711 | 14 + results/classifier/105/device/1712 | 22 + results/classifier/105/device/1712027 | 66 + results/classifier/105/device/1712818 | 84 + results/classifier/105/device/1713434 | 487 ++ results/classifier/105/device/1714538 | 22 + results/classifier/105/device/1714750 | 130 + results/classifier/105/device/1718 | 60 + results/classifier/105/device/172 | 14 + results/classifier/105/device/1720969 | 25 + results/classifier/105/device/1723984 | 59 + results/classifier/105/device/1724 | 58 + results/classifier/105/device/1726733 | 27 + results/classifier/105/device/173 | 14 + results/classifier/105/device/1730099 | 27 + results/classifier/105/device/1731347 | 56 + results/classifier/105/device/1732981 | 33 + results/classifier/105/device/1733 | 16 + results/classifier/105/device/1733720 | 90 + results/classifier/105/device/1734792 | 33 + results/classifier/105/device/1735384 | 596 ++ results/classifier/105/device/1735576 | 45 + results/classifier/105/device/1735653 | 62 + results/classifier/105/device/1737194 | 642 ++ results/classifier/105/device/1737882 | 21 + results/classifier/105/device/1737883 | 24 + results/classifier/105/device/1738771 | 36 + results/classifier/105/device/174 | 14 + results/classifier/105/device/1741718 | 225 + results/classifier/105/device/1742 | 108 + results/classifier/105/device/1743 | 29 + results/classifier/105/device/1744654 | 20 + results/classifier/105/device/1745895 | 48 + results/classifier/105/device/1746 | 16 + results/classifier/105/device/1747 | 29 + results/classifier/105/device/1747393 | 22 + results/classifier/105/device/1748 | 69 + results/classifier/105/device/1749 | 33 + results/classifier/105/device/175 | 14 + results/classifier/105/device/1754 | 29 + results/classifier/105/device/1754597 | 48 + results/classifier/105/device/1757323 | 105 + results/classifier/105/device/1758091 | 40 + results/classifier/105/device/1759 | 23 + results/classifier/105/device/1759264 | 231 + results/classifier/105/device/1759337 | 35 + results/classifier/105/device/1759492 | 34 + results/classifier/105/device/176 | 14 + results/classifier/105/device/1760176 | 86 + results/classifier/105/device/1761 | 14 + results/classifier/105/device/1761535 | 68 + results/classifier/105/device/1764 | 14 + results/classifier/105/device/1766 | 16 + results/classifier/105/device/1767 | 16 + results/classifier/105/device/1769 | 14 + results/classifier/105/device/1769067 | 25 + results/classifier/105/device/1772 | 25 + results/classifier/105/device/1773 | 22 + results/classifier/105/device/1774830 | 118 + results/classifier/105/device/1776 | 14 + results/classifier/105/device/1776224 | 59 + results/classifier/105/device/1777226 | 35 + results/classifier/105/device/1777232 | 29 + results/classifier/105/device/1777235 | 29 + results/classifier/105/device/178 | 14 + results/classifier/105/device/1780815 | 26 + results/classifier/105/device/1784 | 26 + results/classifier/105/device/1784919 | 39 + results/classifier/105/device/1785 | 38 + results/classifier/105/device/1785902 | 54 + results/classifier/105/device/1785972 | 90 + results/classifier/105/device/1789 | 30 + results/classifier/105/device/179 | 14 + results/classifier/105/device/1790268 | 44 + results/classifier/105/device/1791763 | 42 + results/classifier/105/device/1793 | 48 + results/classifier/105/device/1793297 | 31 + results/classifier/105/device/1793539 | 29 + results/classifier/105/device/1793859 | 64 + results/classifier/105/device/1794 | 40 + results/classifier/105/device/1794187 | 43 + results/classifier/105/device/1794939 | 51 + results/classifier/105/device/1795 | 21 + results/classifier/105/device/1797 | 16 + results/classifier/105/device/1798 | 14 + results/classifier/105/device/1799766 | 255 + results/classifier/105/device/180 | 14 + results/classifier/105/device/1800088 | 26 + results/classifier/105/device/1800156 | 23 + results/classifier/105/device/1802 | 22 + results/classifier/105/device/1802150 | 75 + results/classifier/105/device/1804 | 24 + results/classifier/105/device/1804678 | 79 + results/classifier/105/device/1805697 | 72 + results/classifier/105/device/1806243 | 151 + results/classifier/105/device/1807073 | 102 + results/classifier/105/device/1808565 | 35 + results/classifier/105/device/1809 | 66 + results/classifier/105/device/181 | 14 + results/classifier/105/device/1810343 | 34 + results/classifier/105/device/1810590 | 43 + results/classifier/105/device/1810956 | 30 + results/classifier/105/device/1811499 | 47 + results/classifier/105/device/1811720 | 25 + results/classifier/105/device/1811758 | 37 + results/classifier/105/device/1811888 | 41 + results/classifier/105/device/1811916 | 27 + results/classifier/105/device/1812451 | 30 + results/classifier/105/device/1813045 | 37 + results/classifier/105/device/1813307 | 43 + results/classifier/105/device/1815263 | 121 + results/classifier/105/device/1815445 | 33 + results/classifier/105/device/1815889 | 927 +++ results/classifier/105/device/1816189 | 50 + results/classifier/105/device/1816805 | 37 + results/classifier/105/device/1817 | 14 + results/classifier/105/device/1818398 | 38 + results/classifier/105/device/1819 | 23 + results/classifier/105/device/1819343 | 54 + results/classifier/105/device/1819649 | 55 + results/classifier/105/device/182 | 14 + results/classifier/105/device/1820 | 23 + results/classifier/105/device/1821131 | 47 + results/classifier/105/device/1823831 | 54 + results/classifier/105/device/1824 | 14 + results/classifier/105/device/1824616 | 27 + results/classifier/105/device/1824744 | 29 + results/classifier/105/device/1824853 | 93 + results/classifier/105/device/1826200 | 55 + results/classifier/105/device/1826827 | 116 + results/classifier/105/device/1828 | 34 + results/classifier/105/device/183 | 14 + results/classifier/105/device/1831354 | 42 + results/classifier/105/device/1831362 | 49 + results/classifier/105/device/1831477 | 34 + results/classifier/105/device/1832 | 14 + results/classifier/105/device/1832916 | 32 + results/classifier/105/device/1835827 | 28 + results/classifier/105/device/1835839 | 282 + results/classifier/105/device/1836451 | 36 + results/classifier/105/device/1837049 | 197 + results/classifier/105/device/1837094 | 46 + results/classifier/105/device/1837851 | 40 + results/classifier/105/device/1838 | 14 + results/classifier/105/device/1838312 | 656 ++ results/classifier/105/device/1838465 | 46 + results/classifier/105/device/1839 | 54 + results/classifier/105/device/184 | 14 + results/classifier/105/device/1840865 | 57 + results/classifier/105/device/1842787 | 221 + results/classifier/105/device/1843 | 28 + results/classifier/105/device/1843205 | 119 + results/classifier/105/device/1843711 | 48 + results/classifier/105/device/1843795 | 68 + results/classifier/105/device/1844644 | 23 + results/classifier/105/device/1844814 | 43 + results/classifier/105/device/1844817 | 40 + results/classifier/105/device/1846816 | 499 ++ results/classifier/105/device/1847 | 14 + results/classifier/105/device/1849 | 84 + results/classifier/105/device/1851 | 448 ++ results/classifier/105/device/1853 | 14 + results/classifier/105/device/1853781 | 95 + results/classifier/105/device/1853898 | 53 + results/classifier/105/device/1854 | 31 + results/classifier/105/device/1854204 | 65 + results/classifier/105/device/1854878 | 61 + results/classifier/105/device/1855002 | 47 + results/classifier/105/device/1857 | 65 + results/classifier/105/device/1857269 | 45 + results/classifier/105/device/1858461 | 198 + results/classifier/105/device/1858488 | 52 + results/classifier/105/device/1859 | 21 + results/classifier/105/device/1860053 | 72 + results/classifier/105/device/1860914 | 67 + results/classifier/105/device/1861341 | 50 + results/classifier/105/device/1861458 | 43 + results/classifier/105/device/1861468 | 36 + results/classifier/105/device/1861551 | 68 + results/classifier/105/device/1861605 | 37 + results/classifier/105/device/1863441 | 44 + results/classifier/105/device/1864 | 34 + results/classifier/105/device/1864814 | 48 + results/classifier/105/device/1865048 | 129 + results/classifier/105/device/1865188 | 54 + results/classifier/105/device/1865350 | 59 + results/classifier/105/device/1866 | 14 + results/classifier/105/device/1866577 | 31 + results/classifier/105/device/1866792 | 91 + results/classifier/105/device/1869241 | 38 + results/classifier/105/device/1869426 | 97 + results/classifier/105/device/1869497 | 29 + results/classifier/105/device/187 | 14 + results/classifier/105/device/1870039 | 73 + results/classifier/105/device/1871 | 14 + results/classifier/105/device/1871005 | 34 + results/classifier/105/device/1872 | 14 + results/classifier/105/device/1872113 | 182 + results/classifier/105/device/1873335 | 111 + results/classifier/105/device/1873341 | 33 + results/classifier/105/device/1874486 | 127 + results/classifier/105/device/1875080 | 37 + results/classifier/105/device/1876373 | 89 + results/classifier/105/device/1876678 | 596 ++ results/classifier/105/device/1877 | 256 + results/classifier/105/device/1877716 | 145 + results/classifier/105/device/1878627 | 34 + results/classifier/105/device/1878628 | 32 + results/classifier/105/device/188 | 14 + results/classifier/105/device/1880 | 26 + results/classifier/105/device/1880822 | 285 + results/classifier/105/device/1881249 | 79 + results/classifier/105/device/1881645 | 22 + results/classifier/105/device/1882241 | 128 + results/classifier/105/device/1882350 | 108 + results/classifier/105/device/1882787 | 80 + results/classifier/105/device/1882851 | 505 ++ results/classifier/105/device/1884302 | 47 + results/classifier/105/device/1884982 | 43 + results/classifier/105/device/1885889 | 61 + results/classifier/105/device/1886076 | 65 + results/classifier/105/device/1886210 | 38 + results/classifier/105/device/1887 | 22 + results/classifier/105/device/1887820 | 29 + results/classifier/105/device/1888 | 25 + results/classifier/105/device/1888728 | 47 + results/classifier/105/device/1888971 | 51 + results/classifier/105/device/189 | 14 + results/classifier/105/device/1890157 | 54 + results/classifier/105/device/1890160 | 118 + results/classifier/105/device/1890775 | 70 + results/classifier/105/device/1891830 | 56 + results/classifier/105/device/1893040 | 310 + results/classifier/105/device/1893634 | 56 + results/classifier/105/device/1894818 | 322 + results/classifier/105/device/1895363 | 40 + results/classifier/105/device/1895895 | 61 + results/classifier/105/device/1896096 | 433 ++ results/classifier/105/device/1897680 | 131 + results/classifier/105/device/1898 | 45 + results/classifier/105/device/1900122 | 156 + results/classifier/105/device/1901068 | 58 + results/classifier/105/device/1901532 | 44 + results/classifier/105/device/1902262 | 66 + results/classifier/105/device/1902306 | 56 + results/classifier/105/device/1904490 | 70 + results/classifier/105/device/1906463 | 36 + results/classifier/105/device/1906608 | 63 + results/classifier/105/device/1906693 | 189 + results/classifier/105/device/1907042 | 65 + results/classifier/105/device/1909261 | 73 + results/classifier/105/device/1909418 | 438 ++ results/classifier/105/device/1911188 | 68 + results/classifier/105/device/1911797 | 40 + results/classifier/105/device/1912 | 14 + results/classifier/105/device/1912857 | 57 + results/classifier/105/device/1913344 | 26 + results/classifier/105/device/1913969 | 78 + results/classifier/105/device/1914 | 20 + results/classifier/105/device/1914667 | 86 + results/classifier/105/device/1916 | 41 + results/classifier/105/device/1917184 | 61 + results/classifier/105/device/192 | 14 + results/classifier/105/device/1920767 | 29 + results/classifier/105/device/1920784 | 978 +++ results/classifier/105/device/1921092 | 53 + results/classifier/105/device/1922102 | 95 + results/classifier/105/device/1922252 | 37 + results/classifier/105/device/1926111 | 212 + results/classifier/105/device/1926995 | 38 + results/classifier/105/device/1927408 | 70 + results/classifier/105/device/1927530 | 77 + results/classifier/105/device/1929 | 34 + results/classifier/105/device/193 | 14 + results/classifier/105/device/1932 | 25 + results/classifier/105/device/1933 | 54 + results/classifier/105/device/1936977 | 25 + results/classifier/105/device/1939179 | 44 + results/classifier/105/device/1945 | 14 + results/classifier/105/device/1945540 | 188 + results/classifier/105/device/1947933 | 32 + results/classifier/105/device/1952448 | 52 + results/classifier/105/device/1959 | 14 + results/classifier/105/device/196 | 14 + results/classifier/105/device/1960 | 35 + results/classifier/105/device/1961 | 14 + results/classifier/105/device/1964 | 20 + results/classifier/105/device/1965 | 14 + results/classifier/105/device/1968 | 14 + results/classifier/105/device/1969 | 14 + results/classifier/105/device/197 | 14 + results/classifier/105/device/1973 | 14 + results/classifier/105/device/1974 | 14 + results/classifier/105/device/1979 | 44 + results/classifier/105/device/1980 | 26 + results/classifier/105/device/1984 | 14 + results/classifier/105/device/1985 | 14 + results/classifier/105/device/200 | 14 + results/classifier/105/device/2000 | 58 + results/classifier/105/device/2002 | 16 + results/classifier/105/device/201 | 14 + results/classifier/105/device/2011 | 14 + results/classifier/105/device/2014 | 66 + results/classifier/105/device/2018 | 31 + results/classifier/105/device/202 | 14 + results/classifier/105/device/2020 | 24 + results/classifier/105/device/2021 | 14 + results/classifier/105/device/2025586 | 57 + results/classifier/105/device/2026 | 21 + results/classifier/105/device/2027 | 246 + results/classifier/105/device/2028 | 14 + results/classifier/105/device/2032 | 44 + results/classifier/105/device/2033 | 14 + results/classifier/105/device/204 | 14 + results/classifier/105/device/2040 | 37 + results/classifier/105/device/2044 | 18 + results/classifier/105/device/2045 | 14 + results/classifier/105/device/2047 | 16 + results/classifier/105/device/2048 | 14 + results/classifier/105/device/2049 | 24 + results/classifier/105/device/205 | 14 + results/classifier/105/device/2051 | 14 + results/classifier/105/device/2055 | 20 + results/classifier/105/device/2065 | 14 + results/classifier/105/device/2066 | 14 + results/classifier/105/device/2067 | 14 + results/classifier/105/device/206818 | 59 + results/classifier/105/device/207 | 14 + results/classifier/105/device/2072 | 14 + results/classifier/105/device/2072564 | 340 + results/classifier/105/device/2079 | 14 + results/classifier/105/device/208 | 14 + results/classifier/105/device/2080 | 14 + results/classifier/105/device/2081 | 22 + results/classifier/105/device/2086 | 28 + results/classifier/105/device/2087 | 41 + results/classifier/105/device/2093 | 16 + results/classifier/105/device/2095 | 14 + results/classifier/105/device/2096 | 14 + results/classifier/105/device/2097 | 14 + results/classifier/105/device/2098 | 14 + results/classifier/105/device/2107 | 14 + results/classifier/105/device/2108 | 14 + results/classifier/105/device/211 | 14 + results/classifier/105/device/2112 | 39 + results/classifier/105/device/2119 | 14 + results/classifier/105/device/212 | 14 + results/classifier/105/device/2122 | 20 + results/classifier/105/device/2124 | 14 + results/classifier/105/device/2125 | 27 + results/classifier/105/device/2126 | 14 + results/classifier/105/device/2128 | 14 + results/classifier/105/device/2129 | 14 + results/classifier/105/device/213 | 14 + results/classifier/105/device/2132 | 24 + results/classifier/105/device/2135 | 37 + results/classifier/105/device/2137 | 14 + results/classifier/105/device/2148 | 22 + results/classifier/105/device/215 | 14 + results/classifier/105/device/2152 | 14 + results/classifier/105/device/2153 | 14 + results/classifier/105/device/2158 | 22 + results/classifier/105/device/2161 | 14 + results/classifier/105/device/2162 | 14 + results/classifier/105/device/2164 | 20 + results/classifier/105/device/2166 | 14 + results/classifier/105/device/217 | 14 + results/classifier/105/device/2172 | 14 + results/classifier/105/device/2173 | 14 + results/classifier/105/device/2174 | 14 + results/classifier/105/device/2176 | 14 + results/classifier/105/device/2179 | 64 + results/classifier/105/device/2181 | 16 + results/classifier/105/device/2188 | 23 + results/classifier/105/device/219 | 14 + results/classifier/105/device/2192 | 14 + results/classifier/105/device/2195 | 52 + results/classifier/105/device/2196 | 14 + results/classifier/105/device/220 | 14 + results/classifier/105/device/2201 | 24 + results/classifier/105/device/2204 | 86 + results/classifier/105/device/2205 | 63 + results/classifier/105/device/221 | 14 + results/classifier/105/device/2211 | 40 + results/classifier/105/device/2213 | 28 + results/classifier/105/device/2215 | 14 + results/classifier/105/device/2218 | 25 + results/classifier/105/device/2219 | 14 + results/classifier/105/device/2221 | 14 + results/classifier/105/device/2222 | 14 + results/classifier/105/device/2229 | 18 + results/classifier/105/device/223 | 14 + results/classifier/105/device/2239 | 14 + results/classifier/105/device/224 | 14 + results/classifier/105/device/2241 | 14 + results/classifier/105/device/2243 | 22 + results/classifier/105/device/2245 | 14 + results/classifier/105/device/2246 | 14 + results/classifier/105/device/2247 | 19 + results/classifier/105/device/2249 | 46 + results/classifier/105/device/225 | 14 + results/classifier/105/device/226 | 14 + results/classifier/105/device/2266 | 82 + results/classifier/105/device/2268 | 56 + results/classifier/105/device/2270 | 14 + results/classifier/105/device/2272 | 34 + results/classifier/105/device/2275 | 22 + results/classifier/105/device/2277 | 14 + results/classifier/105/device/2278 | 14 + results/classifier/105/device/2282 | 14 + results/classifier/105/device/2284 | 14 + results/classifier/105/device/2285 | 14 + results/classifier/105/device/2286 | 14 + results/classifier/105/device/2289 | 14 + results/classifier/105/device/2294 | 14 + results/classifier/105/device/2295 | 17 + results/classifier/105/device/2301 | 14 + results/classifier/105/device/2306 | 14 + results/classifier/105/device/2307 | 52 + results/classifier/105/device/2309 | 44 + results/classifier/105/device/231 | 14 + results/classifier/105/device/2310 | 14 + results/classifier/105/device/2312 | 57 + results/classifier/105/device/2314 | 29 + results/classifier/105/device/2320 | 14 + results/classifier/105/device/2322 | 14 + results/classifier/105/device/2327 | 74 + results/classifier/105/device/2329 | 14 + results/classifier/105/device/233 | 14 + results/classifier/105/device/2331 | 14 + results/classifier/105/device/2336 | 36 + results/classifier/105/device/2338 | 14 + results/classifier/105/device/2339 | 14 + results/classifier/105/device/2347 | 22 + results/classifier/105/device/2348 | 20 + results/classifier/105/device/2350 | 27 + results/classifier/105/device/2351 | 28 + results/classifier/105/device/2354 | 20 + results/classifier/105/device/2356 | 28 + results/classifier/105/device/2357 | 31 + results/classifier/105/device/2359 | 45 + results/classifier/105/device/236 | 14 + results/classifier/105/device/2362 | 76 + results/classifier/105/device/2368 | 14 + results/classifier/105/device/237 | 14 + results/classifier/105/device/2381 | 16 + results/classifier/105/device/2383 | 16 + results/classifier/105/device/2391 | 29 + results/classifier/105/device/2396 | 14 + results/classifier/105/device/240 | 14 + results/classifier/105/device/2406 | 20 + results/classifier/105/device/2416 | 52 + results/classifier/105/device/2417 | 18 + results/classifier/105/device/24190340 | 2064 ++++++ results/classifier/105/device/242 | 14 + results/classifier/105/device/2426 | 14 + results/classifier/105/device/243 | 14 + results/classifier/105/device/2438 | 14 + results/classifier/105/device/2443 | 31 + results/classifier/105/device/245 | 14 + results/classifier/105/device/2454 | 21 + results/classifier/105/device/2456 | 14 + results/classifier/105/device/2458 | 14 + results/classifier/105/device/2464 | 24 + results/classifier/105/device/2465 | 14 + results/classifier/105/device/2468 | 46 + results/classifier/105/device/2469 | 14 + results/classifier/105/device/247 | 14 + results/classifier/105/device/2471 | 14 + results/classifier/105/device/2472 | 14 + results/classifier/105/device/2473 | 16 + results/classifier/105/device/2475 | 14 + results/classifier/105/device/2477 | 14 + results/classifier/105/device/2479 | 43 + results/classifier/105/device/248 | 14 + results/classifier/105/device/2480 | 42 + results/classifier/105/device/24930826 | 41 + results/classifier/105/device/250 | 14 + results/classifier/105/device/2503 | 22 + results/classifier/105/device/2505 | 14 + results/classifier/105/device/2507 | 26 + results/classifier/105/device/2508 | 14 + results/classifier/105/device/251 | 14 + results/classifier/105/device/2516 | 14 + results/classifier/105/device/2517 | 14 + results/classifier/105/device/2521 | 29 + results/classifier/105/device/2527 | 14 + results/classifier/105/device/253 | 14 + results/classifier/105/device/2530 | 30 + results/classifier/105/device/2533 | 14 + results/classifier/105/device/2535 | 14 + results/classifier/105/device/2536 | 14 + results/classifier/105/device/2539 | 14 + results/classifier/105/device/254 | 14 + results/classifier/105/device/2541 | 14 + results/classifier/105/device/2542 | 14 + results/classifier/105/device/2544 | 14 + results/classifier/105/device/2545 | 22 + results/classifier/105/device/2546 | 14 + results/classifier/105/device/2547 | 16 + results/classifier/105/device/2549 | 16 + results/classifier/105/device/256 | 14 + results/classifier/105/device/2564 | 14 + results/classifier/105/device/2568 | 14 + results/classifier/105/device/257 | 14 + results/classifier/105/device/2572 | 43 + results/classifier/105/device/2575 | 14 + results/classifier/105/device/2579 | 14 + results/classifier/105/device/2586 | 16 + results/classifier/105/device/2587 | 14 + results/classifier/105/device/2588 | 56 + results/classifier/105/device/259 | 14 + results/classifier/105/device/2590 | 36 + results/classifier/105/device/2596 | 14 + results/classifier/105/device/2597 | 14 + results/classifier/105/device/260 | 14 + results/classifier/105/device/2605 | 14 + results/classifier/105/device/26095107 | 166 + results/classifier/105/device/261 | 14 + results/classifier/105/device/2613 | 14 + results/classifier/105/device/2615 | 23 + results/classifier/105/device/262 | 14 + results/classifier/105/device/2626 | 21 + results/classifier/105/device/2627 | 14 + results/classifier/105/device/2629 | 14 + results/classifier/105/device/2636 | 14 + results/classifier/105/device/2640 | 14 + results/classifier/105/device/265 | 14 + results/classifier/105/device/2651 | 21 + results/classifier/105/device/2652 | 14 + results/classifier/105/device/2653 | 14 + results/classifier/105/device/2654 | 27 + results/classifier/105/device/2659 | 14 + results/classifier/105/device/2660 | 14 + results/classifier/105/device/2661 | 46 + results/classifier/105/device/2663 | 20 + results/classifier/105/device/2664 | 22 + results/classifier/105/device/2665 | 24 + results/classifier/105/device/2666 | 35 + results/classifier/105/device/2679 | 14 + results/classifier/105/device/2681 | 14 + results/classifier/105/device/2682 | 54 + results/classifier/105/device/2689 | 14 + results/classifier/105/device/269 | 14 + results/classifier/105/device/2693 | 19 + results/classifier/105/device/2695 | 16 + results/classifier/105/device/2697 | 14 + results/classifier/105/device/2698 | 22 + results/classifier/105/device/270 | 14 + results/classifier/105/device/2700 | 21 + results/classifier/105/device/2701 | 14 + results/classifier/105/device/2703 | 53 + results/classifier/105/device/2707 | 23 + results/classifier/105/device/2708 | 14 + results/classifier/105/device/271 | 14 + results/classifier/105/device/2711 | 14 + results/classifier/105/device/2713 | 26 + results/classifier/105/device/2714 | 20 + results/classifier/105/device/2715 | 14 + results/classifier/105/device/2716 | 20 + results/classifier/105/device/272 | 14 + results/classifier/105/device/2721 | 14 + results/classifier/105/device/2724 | 21 + results/classifier/105/device/2725 | 14 + results/classifier/105/device/2726 | 14 + results/classifier/105/device/2733 | 25 + results/classifier/105/device/2734 | 37 + results/classifier/105/device/2735 | 23 + results/classifier/105/device/2737 | 14 + results/classifier/105/device/2741 | 74 + results/classifier/105/device/2743 | 16 + results/classifier/105/device/2751 | 14 + results/classifier/105/device/2752 | 290 + results/classifier/105/device/2759 | 14 + results/classifier/105/device/2762 | 18 + results/classifier/105/device/2763 | 37 + results/classifier/105/device/2765 | 14 + results/classifier/105/device/2769 | 16 + results/classifier/105/device/2777 | 72 + results/classifier/105/device/278 | 14 + results/classifier/105/device/2781 | 14 + results/classifier/105/device/279 | 14 + results/classifier/105/device/2794 | 62 + results/classifier/105/device/2796 | 20 + results/classifier/105/device/2797 | 16 + results/classifier/105/device/280 | 14 + results/classifier/105/device/2801 | 14 + results/classifier/105/device/2803 | 121 + results/classifier/105/device/2804 | 14 + results/classifier/105/device/2805 | 35 + results/classifier/105/device/2808 | 14 + results/classifier/105/device/281 | 14 + results/classifier/105/device/2812 | 14 + results/classifier/105/device/2813 | 22 + results/classifier/105/device/2815 | 14 + results/classifier/105/device/2824 | 14 + results/classifier/105/device/2825 | 50 + results/classifier/105/device/283 | 14 + results/classifier/105/device/2830 | 14 + results/classifier/105/device/2831 | 33 + results/classifier/105/device/2838 | 21 + results/classifier/105/device/2841 | 24 + results/classifier/105/device/2842 | 14 + results/classifier/105/device/2845 | 45 + results/classifier/105/device/2846 | 14 + results/classifier/105/device/2847 | 14 + results/classifier/105/device/285 | 14 + results/classifier/105/device/2850 | 16 + results/classifier/105/device/2858 | 14 + results/classifier/105/device/2859 | 14 + results/classifier/105/device/28596630 | 121 + results/classifier/105/device/286 | 14 + results/classifier/105/device/2869 | 16 + results/classifier/105/device/287 | 14 + results/classifier/105/device/2870 | 14 + results/classifier/105/device/2873 | 22 + results/classifier/105/device/2877 | 14 + results/classifier/105/device/2878 | 14 + results/classifier/105/device/2880 | 16 + results/classifier/105/device/2881 | 23 + results/classifier/105/device/2885 | 14 + results/classifier/105/device/2886 | 28 + results/classifier/105/device/2887 | 14 + results/classifier/105/device/2888 | 27 + results/classifier/105/device/289 | 14 + results/classifier/105/device/2890 | 14 + results/classifier/105/device/2892 | 14 + results/classifier/105/device/2893 | 25 + results/classifier/105/device/2894 | 38 + results/classifier/105/device/2896 | 14 + results/classifier/105/device/290 | 14 + results/classifier/105/device/2902 | 24 + results/classifier/105/device/2904 | 24 + results/classifier/105/device/2905 | 37 + results/classifier/105/device/291 | 14 + results/classifier/105/device/2910 | 18 + results/classifier/105/device/2912 | 26 + results/classifier/105/device/2913 | 14 + results/classifier/105/device/2918 | 14 + results/classifier/105/device/292 | 14 + results/classifier/105/device/2922 | 20 + results/classifier/105/device/2923 | 26 + results/classifier/105/device/2924 | 28 + results/classifier/105/device/2929 | 20 + results/classifier/105/device/293 | 14 + results/classifier/105/device/2930 | 14 + results/classifier/105/device/2937 | 14 + results/classifier/105/device/2939 | 14 + results/classifier/105/device/294 | 14 + results/classifier/105/device/2940 | 14 + results/classifier/105/device/2941 | 14 + results/classifier/105/device/2955 | 14 + results/classifier/105/device/296 | 14 + results/classifier/105/device/2963 | 37 + results/classifier/105/device/2964 | 22 + results/classifier/105/device/2968 | 35 + results/classifier/105/device/2976 | 38 + results/classifier/105/device/298 | 14 + results/classifier/105/device/2983 | 128 + results/classifier/105/device/2985 | 21 + results/classifier/105/device/2986 | 14 + results/classifier/105/device/302 | 14 + results/classifier/105/device/303 | 14 + results/classifier/105/device/304 | 14 + results/classifier/105/device/305 | 14 + results/classifier/105/device/307 | 14 + results/classifier/105/device/310 | 14 + results/classifier/105/device/311 | 14 + results/classifier/105/device/313 | 14 + results/classifier/105/device/316 | 14 + results/classifier/105/device/317 | 14 + results/classifier/105/device/318 | 14 + results/classifier/105/device/319 | 14 + results/classifier/105/device/320 | 14 + results/classifier/105/device/321 | 14 + results/classifier/105/device/322 | 14 + results/classifier/105/device/324 | 14 + results/classifier/105/device/325 | 14 + results/classifier/105/device/326 | 14 + results/classifier/105/device/328 | 14 + results/classifier/105/device/329 | 14 + results/classifier/105/device/330 | 14 + results/classifier/105/device/331 | 14 + results/classifier/105/device/332 | 14 + results/classifier/105/device/334 | 14 + results/classifier/105/device/337 | 14 + results/classifier/105/device/338 | 14 + results/classifier/105/device/340 | 14 + results/classifier/105/device/341 | 14 + results/classifier/105/device/344 | 14 + results/classifier/105/device/346 | 14 + results/classifier/105/device/349 | 14 + results/classifier/105/device/350 | 14 + results/classifier/105/device/353 | 14 + results/classifier/105/device/354 | 14 + results/classifier/105/device/355410 | 93 + results/classifier/105/device/357 | 14 + results/classifier/105/device/358 | 14 + results/classifier/105/device/362 | 14 + results/classifier/105/device/363 | 14 + results/classifier/105/device/365 | 14 + results/classifier/105/device/36568044 | 4589 ++++++++++++ results/classifier/105/device/367 | 14 + results/classifier/105/device/368 | 14 + results/classifier/105/device/375 | 14 + results/classifier/105/device/380 | 14 + results/classifier/105/device/383 | 14 + results/classifier/105/device/384 | 14 + results/classifier/105/device/385 | 14 + results/classifier/105/device/386 | 14 + results/classifier/105/device/387 | 14 + results/classifier/105/device/389 | 14 + results/classifier/105/device/391 | 14 + results/classifier/105/device/393 | 14 + results/classifier/105/device/394 | 14 + results/classifier/105/device/395 | 14 + results/classifier/105/device/397 | 14 + results/classifier/105/device/398 | 14 + results/classifier/105/device/399 | 14 + results/classifier/105/device/402 | 14 + results/classifier/105/device/403 | 14 + results/classifier/105/device/404 | 14 + results/classifier/105/device/405 | 14 + results/classifier/105/device/406 | 14 + results/classifier/105/device/407 | 14 + results/classifier/105/device/408 | 14 + results/classifier/105/device/409 | 14 + results/classifier/105/device/410 | 14 + results/classifier/105/device/411 | 14 + results/classifier/105/device/413 | 14 + results/classifier/105/device/414 | 14 + results/classifier/105/device/415 | 14 + results/classifier/105/device/416 | 14 + results/classifier/105/device/418 | 14 + results/classifier/105/device/419 | 14 + results/classifier/105/device/420 | 14 + results/classifier/105/device/422 | 14 + results/classifier/105/device/42226390 | 195 + results/classifier/105/device/423 | 14 + results/classifier/105/device/424 | 14 + results/classifier/105/device/425 | 14 + results/classifier/105/device/429 | 14 + results/classifier/105/device/430 | 14 + results/classifier/105/device/431 | 14 + results/classifier/105/device/432 | 14 + results/classifier/105/device/433 | 14 + results/classifier/105/device/434 | 14 + results/classifier/105/device/437 | 14 + results/classifier/105/device/438 | 14 + results/classifier/105/device/44 | 14 + results/classifier/105/device/441 | 14 + results/classifier/105/device/443 | 14 + results/classifier/105/device/444 | 14 + results/classifier/105/device/445 | 14 + results/classifier/105/device/446 | 14 + results/classifier/105/device/448 | 14 + results/classifier/105/device/449 | 81 + results/classifier/105/device/45 | 14 + results/classifier/105/device/451 | 14 + results/classifier/105/device/452 | 69 + results/classifier/105/device/453 | 16 + results/classifier/105/device/454 | 16 + results/classifier/105/device/457 | 14 + results/classifier/105/device/458 | 14 + results/classifier/105/device/46 | 14 + results/classifier/105/device/461 | 14 + results/classifier/105/device/464 | 14 + results/classifier/105/device/467 | 14 + results/classifier/105/device/468 | 14 + results/classifier/105/device/469 | 14 + results/classifier/105/device/472 | 14 + results/classifier/105/device/473 | 14 + results/classifier/105/device/476 | 14 + results/classifier/105/device/477 | 25 + results/classifier/105/device/479 | 25 + results/classifier/105/device/48 | 14 + results/classifier/105/device/480 | 14 + results/classifier/105/device/482 | 14 + results/classifier/105/device/48245039 | 538 ++ results/classifier/105/device/484 | 14 + results/classifier/105/device/485258 | 48 + results/classifier/105/device/487 | 14 + results/classifier/105/device/49 | 14 + results/classifier/105/device/490 | 842 +++ results/classifier/105/device/498039 | 31 + results/classifier/105/device/498523 | 60 + results/classifier/105/device/50 | 14 + results/classifier/105/device/501 | 14 + results/classifier/105/device/502 | 14 + results/classifier/105/device/503 | 14 + results/classifier/105/device/506 | 14 + results/classifier/105/device/51 | 14 + results/classifier/105/device/510 | 14 + results/classifier/105/device/511 | 14 + results/classifier/105/device/512 | 14 + results/classifier/105/device/513 | 14 + results/classifier/105/device/52 | 14 + results/classifier/105/device/520 | 46 + results/classifier/105/device/521202 | 29 + results/classifier/105/device/524 | 14 + results/classifier/105/device/527 | 14 + results/classifier/105/device/529 | 17 + results/classifier/105/device/531 | 14 + results/classifier/105/device/532 | 14 + results/classifier/105/device/533613 | 79 + results/classifier/105/device/534 | 14 + results/classifier/105/device/535 | 14 + results/classifier/105/device/537 | 14 + results/classifier/105/device/538 | 14 + results/classifier/105/device/54 | 14 + results/classifier/105/device/540 | 14 + results/classifier/105/device/542 | 14 + results/classifier/105/device/547 | 14 + results/classifier/105/device/549 | 14 + results/classifier/105/device/55 | 14 + results/classifier/105/device/550 | 14 + results/classifier/105/device/552 | 14 + results/classifier/105/device/554 | 14 + results/classifier/105/device/555 | 14 + results/classifier/105/device/556 | 32 + results/classifier/105/device/558 | 70 + results/classifier/105/device/56 | 14 + results/classifier/105/device/560 | 14 + results/classifier/105/device/561 | 14 + results/classifier/105/device/562 | 14 + results/classifier/105/device/565 | 14 + results/classifier/105/device/566 | 14 + results/classifier/105/device/567 | 14 + results/classifier/105/device/568228 | 1406 ++++ results/classifier/105/device/569 | 14 + results/classifier/105/device/570 | 14 + results/classifier/105/device/571 | 14 + results/classifier/105/device/57195159 | 323 + results/classifier/105/device/57231878 | 250 + results/classifier/105/device/573 | 14 + results/classifier/105/device/574 | 14 + results/classifier/105/device/575 | 14 + results/classifier/105/device/576 | 14 + results/classifier/105/device/581 | 14 + results/classifier/105/device/582 | 14 + results/classifier/105/device/584146 | 25 + results/classifier/105/device/584155 | 26 + results/classifier/105/device/585 | 14 + results/classifier/105/device/586 | 14 + results/classifier/105/device/588691 | 35 + results/classifier/105/device/588693 | 24 + results/classifier/105/device/588748 | 74 + results/classifier/105/device/589 | 14 + results/classifier/105/device/590 | 14 + results/classifier/105/device/591 | 14 + results/classifier/105/device/597 | 32 + results/classifier/105/device/598 | 14 + results/classifier/105/device/602544 | 22 + results/classifier/105/device/603 | 14 + results/classifier/105/device/604 | 14 + results/classifier/105/device/606 | 14 + results/classifier/105/device/608 | 14 + results/classifier/105/device/609 | 22 + results/classifier/105/device/61 | 14 + results/classifier/105/device/613529 | 39 + results/classifier/105/device/614 | 14 + results/classifier/105/device/615 | 23 + results/classifier/105/device/617 | 39 + results/classifier/105/device/620 | 14 + results/classifier/105/device/621 | 14 + results/classifier/105/device/623 | 21 + results/classifier/105/device/623852 | 336 + results/classifier/105/device/628 | 21 + results/classifier/105/device/628082 | 29 + results/classifier/105/device/63 | 14 + results/classifier/105/device/630 | 14 + results/classifier/105/device/633 | 45 + results/classifier/105/device/635 | 41 + results/classifier/105/device/637 | 17 + results/classifier/105/device/638955 | 1117 +++ results/classifier/105/device/64 | 14 + results/classifier/105/device/641 | 14 + results/classifier/105/device/642 | 17 + results/classifier/105/device/644 | 22 + results/classifier/105/device/645 | 14 + results/classifier/105/device/648 | 14 + results/classifier/105/device/649 | 21 + results/classifier/105/device/65 | 14 + results/classifier/105/device/650 | 37 + results/classifier/105/device/651 | 14 + results/classifier/105/device/655 | 45 + results/classifier/105/device/658152 | 37 + results/classifier/105/device/66 | 14 + results/classifier/105/device/660366 | 135 + results/classifier/105/device/661696 | 213 + results/classifier/105/device/662 | 24 + results/classifier/105/device/663 | 24 + results/classifier/105/device/665 | 65 + results/classifier/105/device/666 | 20 + results/classifier/105/device/667 | 14 + results/classifier/105/device/667791 | 57 + results/classifier/105/device/67 | 14 + results/classifier/105/device/672 | 16 + results/classifier/105/device/675 | 23 + results/classifier/105/device/677 | 14 + results/classifier/105/device/678 | 60 + results/classifier/105/device/67821138 | 207 + results/classifier/105/device/679 | 14 + results/classifier/105/device/68 | 14 + results/classifier/105/device/681 | 38 + results/classifier/105/device/683 | 14 + results/classifier/105/device/684 | 14 + results/classifier/105/device/689 | 46 + results/classifier/105/device/69 | 14 + results/classifier/105/device/695 | 14 + results/classifier/105/device/699 | 14 + results/classifier/105/device/700 | 14 + results/classifier/105/device/702 | 14 + results/classifier/105/device/705 | 40 + results/classifier/105/device/709 | 14 + results/classifier/105/device/71 | 14 + results/classifier/105/device/710234 | 30 + results/classifier/105/device/717929 | 43 + results/classifier/105/device/718 | 19 + results/classifier/105/device/720 | 14 + results/classifier/105/device/720657 | 42 + results/classifier/105/device/722 | 25 + results/classifier/105/device/723460 | 68 + results/classifier/105/device/726 | 14 + results/classifier/105/device/728 | 29 + results/classifier/105/device/730 | 14 + results/classifier/105/device/732 | 14 + results/classifier/105/device/737 | 16 + results/classifier/105/device/738 | 16 + results/classifier/105/device/739785 | 931 +++ results/classifier/105/device/74 | 14 + results/classifier/105/device/743 | 24 + results/classifier/105/device/745 | 49 + results/classifier/105/device/747 | 43 + results/classifier/105/device/749 | 14 + results/classifier/105/device/75 | 14 + results/classifier/105/device/751 | 14 + results/classifier/105/device/752 | 26 + results/classifier/105/device/753 | 14 + results/classifier/105/device/756 | 14 + results/classifier/105/device/757 | 14 + results/classifier/105/device/757654 | 62 + results/classifier/105/device/76 | 14 + results/classifier/105/device/760 | 16 + results/classifier/105/device/767 | 16 + results/classifier/105/device/770 | 14 + results/classifier/105/device/772358 | 40 + results/classifier/105/device/775 | 17 + results/classifier/105/device/776 | 38 + results/classifier/105/device/777 | 22 + results/classifier/105/device/779151 | 48 + results/classifier/105/device/78 | 14 + results/classifier/105/device/782 | 18 + results/classifier/105/device/783 | 16 + results/classifier/105/device/784 | 26 + results/classifier/105/device/785 | 14 + results/classifier/105/device/786208 | 28 + results/classifier/105/device/786209 | 28 + results/classifier/105/device/786440 | 40 + results/classifier/105/device/786442 | 23 + results/classifier/105/device/787 | 25 + results/classifier/105/device/788 | 14 + results/classifier/105/device/79 | 14 + results/classifier/105/device/790 | 14 + results/classifier/105/device/791 | 14 + results/classifier/105/device/792 | 14 + results/classifier/105/device/795 | 14 + results/classifier/105/device/796 | 30 + results/classifier/105/device/80 | 14 + results/classifier/105/device/800 | 38 + results/classifier/105/device/801 | 25 + results/classifier/105/device/804 | 22 + results/classifier/105/device/805 | 27 + results/classifier/105/device/808 | 31 + results/classifier/105/device/81 | 14 + results/classifier/105/device/812398 | 38 + results/classifier/105/device/815 | 14 + results/classifier/105/device/818 | 18 + results/classifier/105/device/82 | 14 + results/classifier/105/device/821 | 14 + results/classifier/105/device/823733 | 161 + results/classifier/105/device/827 | 14 + results/classifier/105/device/830 | 14 + results/classifier/105/device/831 | 14 + results/classifier/105/device/832 | 26 + results/classifier/105/device/837 | 43 + results/classifier/105/device/839790 | 51 + results/classifier/105/device/84 | 14 + results/classifier/105/device/840 | 23 + results/classifier/105/device/843 | 16 + results/classifier/105/device/846 | 14 + results/classifier/105/device/847 | 43 + results/classifier/105/device/85 | 14 + results/classifier/105/device/852 | 44 + results/classifier/105/device/858 | 24 + results/classifier/105/device/859 | 14 + results/classifier/105/device/86 | 14 + results/classifier/105/device/861 | 14 + results/classifier/105/device/864 | 28 + results/classifier/105/device/873 | 14 + results/classifier/105/device/873460 | 45 + results/classifier/105/device/875 | 14 + results/classifier/105/device/877498 | 75 + results/classifier/105/device/879 | 16 + results/classifier/105/device/88 | 14 + results/classifier/105/device/881637 | 34 + results/classifier/105/device/884 | 21 + results/classifier/105/device/887 | 14 + results/classifier/105/device/888150 | 29 + results/classifier/105/device/889 | 14 + results/classifier/105/device/89 | 14 + results/classifier/105/device/893 | 14 + results/classifier/105/device/896 | 14 + results/classifier/105/device/897 | 14 + results/classifier/105/device/90 | 14 + results/classifier/105/device/900 | 14 + results/classifier/105/device/901 | 23 + results/classifier/105/device/902 | 14 + results/classifier/105/device/904617 | 179 + results/classifier/105/device/906864 | 33 + results/classifier/105/device/910 | 14 + results/classifier/105/device/912983 | 62 + results/classifier/105/device/913 | 14 + results/classifier/105/device/914 | 14 + results/classifier/105/device/916720 | 75 + results/classifier/105/device/918 | 14 + results/classifier/105/device/92 | 14 + results/classifier/105/device/923 | 14 + results/classifier/105/device/924 | 14 + results/classifier/105/device/926 | 14 + results/classifier/105/device/928676 | 55 + results/classifier/105/device/93 | 14 + results/classifier/105/device/930 | 14 + results/classifier/105/device/931 | 14 + results/classifier/105/device/932 | 27 + results/classifier/105/device/933 | 39 + results/classifier/105/device/938 | 14 + results/classifier/105/device/94 | 14 + results/classifier/105/device/941 | 54 + results/classifier/105/device/942 | 14 + results/classifier/105/device/944 | 41 + results/classifier/105/device/944628 | 31 + results/classifier/105/device/95 | 14 + results/classifier/105/device/950692 | 130 + results/classifier/105/device/955 | 14 + results/classifier/105/device/955379 | 436 ++ results/classifier/105/device/959852 | 46 + results/classifier/105/device/96 | 14 + results/classifier/105/device/965 | 14 + results/classifier/105/device/972 | 14 + results/classifier/105/device/975 | 51 + results/classifier/105/device/977 | 14 + results/classifier/105/device/978 | 14 + results/classifier/105/device/98 | 14 + results/classifier/105/device/985288 | 23 + results/classifier/105/device/986 | 52 + results/classifier/105/device/986318 | 25 + results/classifier/105/device/989504 | 41 + results/classifier/105/device/99 | 14 + results/classifier/105/device/990 | 14 + results/classifier/105/device/990364 | 443 ++ results/classifier/105/device/991 | 19 + results/classifier/105/device/994 | 18 + results/classifier/105/device/996 | 39 + results/classifier/105/device/99674399 | 156 + results/classifier/105/graphic/1000 | 17 + results/classifier/105/graphic/1003 | 34 + results/classifier/105/graphic/1004 | 22 + results/classifier/105/graphic/1004408 | 52 + results/classifier/105/graphic/1005 | 190 + results/classifier/105/graphic/1007490 | 31 + results/classifier/105/graphic/1008 | 33 + results/classifier/105/graphic/1013241 | 81 + results/classifier/105/graphic/1013691 | 38 + results/classifier/105/graphic/1017 | 26 + results/classifier/105/graphic/1019 | 26 + results/classifier/105/graphic/1020 | 29 + results/classifier/105/graphic/1022023 | 38 + results/classifier/105/graphic/1023 | 73 + results/classifier/105/graphic/1024275 | 32 + results/classifier/105/graphic/1025 | 16 + results/classifier/105/graphic/1028 | 47 + results/classifier/105/graphic/1030807 | 71 + results/classifier/105/graphic/1031955 | 61 + results/classifier/105/graphic/1033494 | 28 + results/classifier/105/graphic/1036 | 28 + results/classifier/105/graphic/104 | 14 + results/classifier/105/graphic/1040 | 19 + results/classifier/105/graphic/1041 | 44 + results/classifier/105/graphic/1042084 | 105 + results/classifier/105/graphic/1046 | 25 + results/classifier/105/graphic/1047 | 117 + results/classifier/105/graphic/1047470 | 81 + results/classifier/105/graphic/1050 | 85 + results/classifier/105/graphic/1051 | 14 + results/classifier/105/graphic/1054 | 43 + results/classifier/105/graphic/1058 | 21 + results/classifier/105/graphic/1059 | 23 + results/classifier/105/graphic/1062589 | 153 + results/classifier/105/graphic/1063 | 22 + results/classifier/105/graphic/1068 | 24 + results/classifier/105/graphic/1069 | 26 + results/classifier/105/graphic/1075 | 29 + results/classifier/105/graphic/1081416 | 111 + results/classifier/105/graphic/1082 | 105 + results/classifier/105/graphic/1086 | 82 + results/classifier/105/graphic/1086745 | 265 + results/classifier/105/graphic/1087411 | 36 + results/classifier/105/graphic/1088617 | 34 + results/classifier/105/graphic/1089 | 37 + results/classifier/105/graphic/1090615 | 43 + results/classifier/105/graphic/1091115 | 49 + results/classifier/105/graphic/1093 | 46 + results/classifier/105/graphic/1093691 | 86 + results/classifier/105/graphic/1094 | 21 + results/classifier/105/graphic/1096 | 14 + results/classifier/105/graphic/1096713 | 26 + results/classifier/105/graphic/1098 | 24 + results/classifier/105/graphic/1099403 | 27 + results/classifier/105/graphic/1101210 | 221 + results/classifier/105/graphic/1107 | 37 + results/classifier/105/graphic/1113 | 27 + results/classifier/105/graphic/1115 | 27 + results/classifier/105/graphic/1119861 | 39 + results/classifier/105/graphic/1120 | 25 + results/classifier/105/graphic/1126 | 21 + results/classifier/105/graphic/1126369 | 36 + results/classifier/105/graphic/1128 | 37 + results/classifier/105/graphic/1129 | 36 + results/classifier/105/graphic/1130533 | 77 + results/classifier/105/graphic/1141 | 23 + results/classifier/105/graphic/1144 | 26 + results/classifier/105/graphic/1145 | 40 + results/classifier/105/graphic/1146 | 114 + results/classifier/105/graphic/1147 | 22 + results/classifier/105/graphic/1151 | 62 + results/classifier/105/graphic/1151450 | 36 + results/classifier/105/graphic/1157368 | 51 + results/classifier/105/graphic/1158912 | 323 + results/classifier/105/graphic/1159 | 45 + results/classifier/105/graphic/1162 | 25 + results/classifier/105/graphic/1162227 | 43 + results/classifier/105/graphic/1163 | 24 + results/classifier/105/graphic/1166 | 38 + results/classifier/105/graphic/1168 | 26 + results/classifier/105/graphic/1168733 | 28 + results/classifier/105/graphic/1169049 | 40 + results/classifier/105/graphic/1172613 | 83 + results/classifier/105/graphic/1175 | 21 + results/classifier/105/graphic/1175513 | 123 + results/classifier/105/graphic/1177 | 29 + results/classifier/105/graphic/1179 | 78 + results/classifier/105/graphic/1184 | 82 + results/classifier/105/graphic/1184616 | 218 + results/classifier/105/graphic/1185 | 18 + results/classifier/105/graphic/1185888 | 43 + results/classifier/105/graphic/1186303 | 101 + results/classifier/105/graphic/1186935 | 46 + results/classifier/105/graphic/1186984 | 67 + results/classifier/105/graphic/1187319 | 26 + results/classifier/105/graphic/1187334 | 26 + results/classifier/105/graphic/1188991 | 81 + results/classifier/105/graphic/1191457 | 19 + results/classifier/105/graphic/1192344 | 39 + results/classifier/105/graphic/1193555 | 29 + results/classifier/105/graphic/1194 | 28 + results/classifier/105/graphic/1196773 | 27 + results/classifier/105/graphic/1200 | 38 + results/classifier/105/graphic/1201 | 23 + results/classifier/105/graphic/1204697 | 144 + results/classifier/105/graphic/1205 | 20 + results/classifier/105/graphic/1207 | 16 + results/classifier/105/graphic/1207896 | 22 + results/classifier/105/graphic/1211910 | 26 + results/classifier/105/graphic/1214 | 14 + results/classifier/105/graphic/1216 | 22 + results/classifier/105/graphic/1219 | 26 + results/classifier/105/graphic/1220 | 29 + results/classifier/105/graphic/1221966 | 71 + results/classifier/105/graphic/1223 | 24 + results/classifier/105/graphic/1225187 | 56 + results/classifier/105/graphic/1230232 | 50 + results/classifier/105/graphic/1231 | 26 + results/classifier/105/graphic/1235 | 193 + results/classifier/105/graphic/1239 | 49 + results/classifier/105/graphic/1240 | 28 + results/classifier/105/graphic/1241 | 26 + results/classifier/105/graphic/1242963 | 71 + results/classifier/105/graphic/1243 | 14 + results/classifier/105/graphic/1243968 | 52 + results/classifier/105/graphic/1247478 | 47 + results/classifier/105/graphic/1252 | 30 + results/classifier/105/graphic/1252270 | 48 + results/classifier/105/graphic/1254 | 68 + results/classifier/105/graphic/1254940 | 109 + results/classifier/105/graphic/1255 | 24 + results/classifier/105/graphic/1255303 | 411 ++ results/classifier/105/graphic/1256 | 35 + results/classifier/105/graphic/1256122 | 70 + results/classifier/105/graphic/1256432 | 21 + results/classifier/105/graphic/1257352 | 184 + results/classifier/105/graphic/1258626 | 39 + results/classifier/105/graphic/1261 | 38 + results/classifier/105/graphic/1267 | 106 + results/classifier/105/graphic/1269628 | 32 + results/classifier/105/graphic/1270 | 27 + results/classifier/105/graphic/1272252 | 44 + results/classifier/105/graphic/1274 | 45 + results/classifier/105/graphic/1276 | 28 + results/classifier/105/graphic/1278 | 19 + results/classifier/105/graphic/1280961 | 29 + results/classifier/105/graphic/1285 | 33 + results/classifier/105/graphic/1288 | 22 + results/classifier/105/graphic/1289788 | 55 + results/classifier/105/graphic/1292037 | 32 + results/classifier/105/graphic/1295 | 40 + results/classifier/105/graphic/1296 | 21 + results/classifier/105/graphic/1297 | 14 + results/classifier/105/graphic/1301 | 30 + results/classifier/105/graphic/1304 | 22 + results/classifier/105/graphic/1305400 | 117 + results/classifier/105/graphic/1310 | 203 + results/classifier/105/graphic/1315257 | 68 + results/classifier/105/graphic/1317 | 62 + results/classifier/105/graphic/1318281 | 51 + results/classifier/105/graphic/1319 | 26 + results/classifier/105/graphic/1321 | 21 + results/classifier/105/graphic/1321684 | 94 + results/classifier/105/graphic/1323758 | 386 + results/classifier/105/graphic/1324 | 53 + results/classifier/105/graphic/1324727 | 46 + results/classifier/105/graphic/1325 | 94 + results/classifier/105/graphic/1328 | 22 + results/classifier/105/graphic/1329 | 25 + results/classifier/105/graphic/1329956 | 238 + results/classifier/105/graphic/1331859 | 34 + results/classifier/105/graphic/1332297 | 178 + results/classifier/105/graphic/1333 | 24 + results/classifier/105/graphic/1336801 | 95 + results/classifier/105/graphic/1338591 | 57 + results/classifier/105/graphic/1340 | 79 + results/classifier/105/graphic/1341 | 91 + results/classifier/105/graphic/1341032 | 25 + results/classifier/105/graphic/1342686 | 47 + results/classifier/105/graphic/1342704 | 39 + results/classifier/105/graphic/1343 | 49 + results/classifier/105/graphic/1343827 | 66 + results/classifier/105/graphic/1346769 | 55 + results/classifier/105/graphic/1346784 | 84 + results/classifier/105/graphic/1349 | 20 + results/classifier/105/graphic/1349277 | 75 + results/classifier/105/graphic/1349972 | 53 + results/classifier/105/graphic/1351 | 18 + results/classifier/105/graphic/1352130 | 46 + results/classifier/105/graphic/1353456 | 45 + results/classifier/105/graphic/1355644 | 35 + results/classifier/105/graphic/1355697 | 64 + results/classifier/105/graphic/1356 | 30 + results/classifier/105/graphic/1356969 | 37 + results/classifier/105/graphic/1357445 | 43 + results/classifier/105/graphic/1359930 | 152 + results/classifier/105/graphic/1360 | 32 + results/classifier/105/graphic/1361 | 33 + results/classifier/105/graphic/1361618 | 46 + results/classifier/105/graphic/1362 | 88 + results/classifier/105/graphic/1363467 | 38 + results/classifier/105/graphic/1363641 | 309 + results/classifier/105/graphic/1365 | 37 + results/classifier/105/graphic/1366363 | 47 + results/classifier/105/graphic/1368 | 51 + results/classifier/105/graphic/1368178 | 82 + results/classifier/105/graphic/1370585 | 35 + results/classifier/105/graphic/1374905 | 49 + results/classifier/105/graphic/1376938 | 38 + results/classifier/105/graphic/1378 | 33 + results/classifier/105/graphic/1378407 | 27 + results/classifier/105/graphic/1379688 | 38 + results/classifier/105/graphic/1380 | 17 + results/classifier/105/graphic/1381846 | 27 + results/classifier/105/graphic/1381879 | 51 + results/classifier/105/graphic/1386197 | 33 + results/classifier/105/graphic/1392468 | 39 + results/classifier/105/graphic/1395958 | 63 + results/classifier/105/graphic/1396 | 16 + results/classifier/105/graphic/1399939 | 28 + results/classifier/105/graphic/1399943 | 92 + results/classifier/105/graphic/1401 | 33 + results/classifier/105/graphic/1404 | 27 + results/classifier/105/graphic/1404690 | 148 + results/classifier/105/graphic/1406016 | 120 + results/classifier/105/graphic/1407 | 89 + results/classifier/105/graphic/1410 | 27 + results/classifier/105/graphic/1412098 | 114 + results/classifier/105/graphic/1415181 | 102 + results/classifier/105/graphic/1416988 | 48 + results/classifier/105/graphic/1419 | 105 + results/classifier/105/graphic/1420 | 52 + results/classifier/105/graphic/1421 | 32 + results/classifier/105/graphic/1422307 | 283 + results/classifier/105/graphic/1424237 | 69 + results/classifier/105/graphic/1426593 | 56 + results/classifier/105/graphic/1429034 | 38 + results/classifier/105/graphic/1433081 | 322 + results/classifier/105/graphic/1435 | 29 + results/classifier/105/graphic/1435973 | 77 + results/classifier/105/graphic/1437 | 19 + results/classifier/105/graphic/1439 | 24 + results/classifier/105/graphic/1439800 | 26 + results/classifier/105/graphic/1440843 | 51 + results/classifier/105/graphic/1445 | 140 + results/classifier/105/graphic/1451067 | 66 + results/classifier/105/graphic/1455 | 16 + results/classifier/105/graphic/1459626 | 33 + results/classifier/105/graphic/1462944 | 42 + results/classifier/105/graphic/1463 | 54 + results/classifier/105/graphic/1463172 | 44 + results/classifier/105/graphic/1463909 | 87 + results/classifier/105/graphic/1464 | 16 + results/classifier/105/graphic/1468 | 19 + results/classifier/105/graphic/1469946 | 265 + results/classifier/105/graphic/1470720 | 66 + results/classifier/105/graphic/1471 | 29 + results/classifier/105/graphic/1471583 | 172 + results/classifier/105/graphic/1471904 | 174 + results/classifier/105/graphic/1472 | 18 + results/classifier/105/graphic/1474 | 21 + results/classifier/105/graphic/1475 | 27 + results/classifier/105/graphic/1477 | 304 + results/classifier/105/graphic/1479632 | 84 + results/classifier/105/graphic/1482425 | 33 + results/classifier/105/graphic/1484925 | 65 + results/classifier/105/graphic/1485180 | 39 + results/classifier/105/graphic/1486768 | 37 + results/classifier/105/graphic/1488 | 48 + results/classifier/105/graphic/1492649 | 58 + results/classifier/105/graphic/1493033 | 71 + results/classifier/105/graphic/1495 | 19 + results/classifier/105/graphic/1496 | 40 + results/classifier/105/graphic/1496712 | 55 + results/classifier/105/graphic/1497479 | 54 + results/classifier/105/graphic/1499 | 103 + results/classifier/105/graphic/1508 | 104 + results/classifier/105/graphic/1508405 | 138 + results/classifier/105/graphic/1518 | 103 + results/classifier/105/graphic/1519037 | 35 + results/classifier/105/graphic/1520730 | 35 + results/classifier/105/graphic/1525 | 91 + results/classifier/105/graphic/1527300 | 28 + results/classifier/105/graphic/1529173 | 53 + results/classifier/105/graphic/1529226 | 51 + results/classifier/105/graphic/1529859 | 210 + results/classifier/105/graphic/1530 | 24 + results/classifier/105/graphic/1530035 | 42 + results/classifier/105/graphic/1530278 | 31 + results/classifier/105/graphic/1530386 | 49 + results/classifier/105/graphic/1531 | 28 + results/classifier/105/graphic/1531632 | 128 + results/classifier/105/graphic/1534382 | 44 + results/classifier/105/graphic/1534683 | 31 + results/classifier/105/graphic/1535 | 104 + results/classifier/105/graphic/1536 | 29 + results/classifier/105/graphic/1537 | 24 + results/classifier/105/graphic/1538541 | 78 + results/classifier/105/graphic/1539940 | 182 + results/classifier/105/graphic/1546445 | 61 + results/classifier/105/graphic/1546680 | 35 + results/classifier/105/graphic/1548166 | 47 + results/classifier/105/graphic/1550 | 29 + results/classifier/105/graphic/1550743 | 43 + results/classifier/105/graphic/1552 | 28 + results/classifier/105/graphic/1553 | 25 + results/classifier/105/graphic/1554 | 19 + results/classifier/105/graphic/1554451 | 33 + results/classifier/105/graphic/1555452 | 30 + results/classifier/105/graphic/1556 | 48 + results/classifier/105/graphic/1556044 | 26 + results/classifier/105/graphic/1556306 | 184 + results/classifier/105/graphic/1556372 | 39 + results/classifier/105/graphic/1557 | 24 + results/classifier/105/graphic/1569491 | 25 + results/classifier/105/graphic/1571 | 25 + results/classifier/105/graphic/1576 | 41 + results/classifier/105/graphic/1577 | 97 + results/classifier/105/graphic/1580459 | 3956 +++++++++++ results/classifier/105/graphic/1581 | 27 + results/classifier/105/graphic/1581695 | 35 + results/classifier/105/graphic/1585 | 40 + results/classifier/105/graphic/1585971 | 42 + results/classifier/105/graphic/1592 | 29 + results/classifier/105/graphic/1592336 | 45 + results/classifier/105/graphic/1593 | 20 + results/classifier/105/graphic/1596 | 33 + results/classifier/105/graphic/1598612 | 45 + results/classifier/105/graphic/1599 | 22 + results/classifier/105/graphic/1599214 | 153 + results/classifier/105/graphic/1600 | 38 + results/classifier/105/graphic/1600563 | 54 + results/classifier/105/graphic/1600681 | 67 + results/classifier/105/graphic/1603734 | 31 + results/classifier/105/graphic/1603970 | 53 + results/classifier/105/graphic/1605443 | 32 + results/classifier/105/graphic/1606 | 42 + results/classifier/105/graphic/1606708 | 39 + results/classifier/105/graphic/1609 | 32 + results/classifier/105/graphic/1611979 | 27 + results/classifier/105/graphic/1613 | 50 + results/classifier/105/graphic/1614348 | 60 + results/classifier/105/graphic/1615 | 24 + results/classifier/105/graphic/1615212 | 35 + results/classifier/105/graphic/1617 | 75 + results/classifier/105/graphic/1617385 | 62 + results/classifier/105/graphic/1618301 | 64 + results/classifier/105/graphic/1622582 | 108 + results/classifier/105/graphic/1623 | 22 + results/classifier/105/graphic/1623020 | 87 + results/classifier/105/graphic/1624 | 36 + results/classifier/105/graphic/1624896 | 41 + results/classifier/105/graphic/1625 | 26 + results/classifier/105/graphic/1626 | 18 + results/classifier/105/graphic/1629 | 14 + results/classifier/105/graphic/1629282 | 77 + results/classifier/105/graphic/1632 | 503 ++ results/classifier/105/graphic/1634 | 31 + results/classifier/105/graphic/1635 | 50 + results/classifier/105/graphic/1636 | 115 + results/classifier/105/graphic/1636126 | 69 + results/classifier/105/graphic/1636770 | 39 + results/classifier/105/graphic/1639225 | 171 + results/classifier/105/graphic/1639322 | 44 + results/classifier/105/graphic/1639983 | 46 + results/classifier/105/graphic/1640 | 38 + results/classifier/105/graphic/1641637 | 743 ++ results/classifier/105/graphic/1644 | 27 + results/classifier/105/graphic/1645 | 21 + results/classifier/105/graphic/1646 | 74 + results/classifier/105/graphic/1646610 | 96 + results/classifier/105/graphic/1649042 | 63 + results/classifier/105/graphic/1649233 | 38 + results/classifier/105/graphic/1651167 | 563 ++ results/classifier/105/graphic/1652011 | 155 + results/classifier/105/graphic/1652373 | 28 + results/classifier/105/graphic/1653 | 33 + results/classifier/105/graphic/1654826 | 41 + results/classifier/105/graphic/1655702 | 30 + results/classifier/105/graphic/1656711 | 36 + results/classifier/105/graphic/1657 | 46 + results/classifier/105/graphic/1658 | 73 + results/classifier/105/graphic/1658120 | 223 + results/classifier/105/graphic/1659 | 40 + results/classifier/105/graphic/1660946 | 266 + results/classifier/105/graphic/1662600 | 51 + results/classifier/105/graphic/1665789 | 28 + results/classifier/105/graphic/1665791 | 19 + results/classifier/105/graphic/1667613 | 32 + results/classifier/105/graphic/1668273 | 96 + results/classifier/105/graphic/1668556 | 37 + results/classifier/105/graphic/1671173 | 54 + results/classifier/105/graphic/1671677 | 45 + results/classifier/105/graphic/1673130 | 309 + results/classifier/105/graphic/1674 | 36 + results/classifier/105/graphic/1674056 | 55 + results/classifier/105/graphic/1674114 | 55 + results/classifier/105/graphic/1674117 | 99 + results/classifier/105/graphic/1676 | 20 + results/classifier/105/graphic/1677 | 26 + results/classifier/105/graphic/1678 | 21 + results/classifier/105/graphic/1679 | 28 + results/classifier/105/graphic/1679126 | 104 + results/classifier/105/graphic/1681 | 62 + results/classifier/105/graphic/1687 | 66 + results/classifier/105/graphic/1687214 | 34 + results/classifier/105/graphic/1687599 | 42 + results/classifier/105/graphic/1687653 | 151 + results/classifier/105/graphic/1689003 | 34 + results/classifier/105/graphic/1689245 | 35 + results/classifier/105/graphic/169 | 14 + results/classifier/105/graphic/1690322 | 32 + results/classifier/105/graphic/1693 | 42 + results/classifier/105/graphic/1693050 | 47 + results/classifier/105/graphic/1695 | 24 + results/classifier/105/graphic/1695169 | 23 + results/classifier/105/graphic/1698574 | 75 + results/classifier/105/graphic/1699867 | 42 + results/classifier/105/graphic/1701 | 18 + results/classifier/105/graphic/1701449 | 92 + results/classifier/105/graphic/1701808 | 52 + results/classifier/105/graphic/1701835 | 280 + results/classifier/105/graphic/1702 | 21 + results/classifier/105/graphic/1703795 | 79 + results/classifier/105/graphic/1707 | 36 + results/classifier/105/graphic/1708551 | 155 + results/classifier/105/graphic/1712564 | 45 + results/classifier/105/graphic/1713066 | 78 + results/classifier/105/graphic/1713328 | 31 + results/classifier/105/graphic/1714 | 40 + results/classifier/105/graphic/1715 | 14 + results/classifier/105/graphic/1715573 | 27 + results/classifier/105/graphic/1716 | 22 + results/classifier/105/graphic/1716132 | 40 + results/classifier/105/graphic/1716767 | 123 + results/classifier/105/graphic/1717 | 42 + results/classifier/105/graphic/1717414 | 48 + results/classifier/105/graphic/1718964 | 130 + results/classifier/105/graphic/1719196 | 749 ++ results/classifier/105/graphic/1721187 | 50 + results/classifier/105/graphic/1721468 | 77 + results/classifier/105/graphic/1721744 | 96 + results/classifier/105/graphic/1722 | 100 + results/classifier/105/graphic/1724590 | 40 + results/classifier/105/graphic/1725 | 34 + results/classifier/105/graphic/1727259 | 240 + results/classifier/105/graphic/1728 | 31 + results/classifier/105/graphic/1728116 | 72 + results/classifier/105/graphic/1729623 | 324 + results/classifier/105/graphic/1730 | 29 + results/classifier/105/graphic/1731 | 25 + results/classifier/105/graphic/1731588 | 35 + results/classifier/105/graphic/1734810 | 60 + results/classifier/105/graphic/1738202 | 63 + results/classifier/105/graphic/1738767 | 41 + results/classifier/105/graphic/1739 | 49 + results/classifier/105/graphic/1739413 | 70 + results/classifier/105/graphic/1740887 | 44 + results/classifier/105/graphic/1743337 | 29 + results/classifier/105/graphic/1743441 | 25 + results/classifier/105/graphic/1745354 | 31 + results/classifier/105/graphic/1746394 | 49 + results/classifier/105/graphic/1746943 | 40 + results/classifier/105/graphic/1747056 | 82 + results/classifier/105/graphic/1748612 | 48 + results/classifier/105/graphic/1748756 | 25 + results/classifier/105/graphic/1749223 | 50 + results/classifier/105/graphic/1750 | 14 + results/classifier/105/graphic/1752 | 35 + results/classifier/105/graphic/1754295 | 30 + results/classifier/105/graphic/1755 | 33 + results/classifier/105/graphic/1755479 | 62 + results/classifier/105/graphic/1757363 | 67 + results/classifier/105/graphic/1758 | 25 + results/classifier/105/graphic/1760 | 66 + results/classifier/105/graphic/1761153 | 45 + results/classifier/105/graphic/1761401 | 32 + results/classifier/105/graphic/1762 | 96 + results/classifier/105/graphic/1762558 | 76 + results/classifier/105/graphic/1765970 | 93 + results/classifier/105/graphic/1766841 | 94 + results/classifier/105/graphic/1766896 | 193 + results/classifier/105/graphic/1767146 | 59 + results/classifier/105/graphic/1767200 | 34 + results/classifier/105/graphic/1768295 | 73 + results/classifier/105/graphic/1770 | 35 + results/classifier/105/graphic/1770417 | 67 + results/classifier/105/graphic/1770859 | 29 + results/classifier/105/graphic/1777236 | 30 + results/classifier/105/graphic/1777252 | 71 + results/classifier/105/graphic/1777315 | 153 + results/classifier/105/graphic/1778182 | 49 + results/classifier/105/graphic/1779 | 43 + results/classifier/105/graphic/1779162 | 109 + results/classifier/105/graphic/1779649 | 26 + results/classifier/105/graphic/1779650 | 23 + results/classifier/105/graphic/1779955 | 105 + results/classifier/105/graphic/1781 | 67 + results/classifier/105/graphic/1781211 | 39 + results/classifier/105/graphic/1781463 | 117 + results/classifier/105/graphic/1781515 | 52 + results/classifier/105/graphic/1782300 | 118 + results/classifier/105/graphic/1783422 | 91 + results/classifier/105/graphic/1783437 | 40 + results/classifier/105/graphic/1785485 | 39 + results/classifier/105/graphic/1785698 | 598 ++ results/classifier/105/graphic/1787 | 26 + results/classifier/105/graphic/1787070 | 70 + results/classifier/105/graphic/1787754 | 114 + results/classifier/105/graphic/1789751 | 39 + results/classifier/105/graphic/1790260 | 40 + results/classifier/105/graphic/1790460 | 49 + results/classifier/105/graphic/1792193 | 58 + results/classifier/105/graphic/1793119 | 75 + results/classifier/105/graphic/1793183 | 82 + results/classifier/105/graphic/1793635 | 26 + results/classifier/105/graphic/1794202 | 21 + results/classifier/105/graphic/1794950 | 102 + results/classifier/105/graphic/1795148 | 36 + results/classifier/105/graphic/1795527 | 278 + results/classifier/105/graphic/1795799 | 72 + results/classifier/105/graphic/1796 | 29 + results/classifier/105/graphic/1797332 | 611 ++ results/classifier/105/graphic/1798434 | 26 + results/classifier/105/graphic/1798451 | 602 ++ results/classifier/105/graphic/1799792 | 47 + results/classifier/105/graphic/1799919 | 38 + results/classifier/105/graphic/1800 | 45 + results/classifier/105/graphic/1801073 | 91 + results/classifier/105/graphic/1802915 | 54 + results/classifier/105/graphic/1803 | 27 + results/classifier/105/graphic/1804961 | 177 + results/classifier/105/graphic/1805 | 79 + results/classifier/105/graphic/1806 | 18 + results/classifier/105/graphic/1806196 | 40 + results/classifier/105/graphic/1807 | 37 + results/classifier/105/graphic/1808824 | 32 + results/classifier/105/graphic/1809291 | 144 + results/classifier/105/graphic/1810000 | 132 + results/classifier/105/graphic/1810405 | 45 + results/classifier/105/graphic/1811782 | 28 + results/classifier/105/graphic/1812694 | 29 + results/classifier/105/graphic/1813010 | 81 + results/classifier/105/graphic/1813034 | 27 + results/classifier/105/graphic/1813406 | 30 + results/classifier/105/graphic/1814 | 29 + results/classifier/105/graphic/1814128 | 169 + results/classifier/105/graphic/1815009 | 27 + results/classifier/105/graphic/1815252 | 103 + results/classifier/105/graphic/1815911 | 69 + results/classifier/105/graphic/1816 | 87 + results/classifier/105/graphic/1817345 | 175 + results/classifier/105/graphic/1817846 | 68 + results/classifier/105/graphic/1818 | 33 + results/classifier/105/graphic/1818483 | 111 + results/classifier/105/graphic/1818880 | 247 + results/classifier/105/graphic/1819908 | 40 + results/classifier/105/graphic/1821 | 66 + results/classifier/105/graphic/1821515 | 101 + results/classifier/105/graphic/1822 | 20 + results/classifier/105/graphic/1822012 | 83 + results/classifier/105/graphic/1822798 | 38 + results/classifier/105/graphic/1823 | 24 + results/classifier/105/graphic/1824528 | 74 + results/classifier/105/graphic/1826175 | 63 + results/classifier/105/graphic/1826393 | 725 ++ results/classifier/105/graphic/1826599 | 32 + results/classifier/105/graphic/1827 | 14 + results/classifier/105/graphic/1827005 | 46 + results/classifier/105/graphic/1827772 | 37 + results/classifier/105/graphic/1828272 | 40 + results/classifier/105/graphic/1828429 | 36 + results/classifier/105/graphic/1829079 | 48 + results/classifier/105/graphic/1830 | 39 + results/classifier/105/graphic/1832535 | 38 + results/classifier/105/graphic/1833 | 97 + results/classifier/105/graphic/1833048 | 37 + results/classifier/105/graphic/1833871 | 34 + results/classifier/105/graphic/1835 | 31 + results/classifier/105/graphic/1835477 | 27 + results/classifier/105/graphic/1835693 | 37 + results/classifier/105/graphic/1835729 | 40 + results/classifier/105/graphic/1835732 | 36 + results/classifier/105/graphic/1836430 | 30 + results/classifier/105/graphic/1836558 | 458 ++ results/classifier/105/graphic/1836762 | 164 + results/classifier/105/graphic/1836855 | 165 + results/classifier/105/graphic/1838066 | 81 + results/classifier/105/graphic/1838228 | 41 + results/classifier/105/graphic/1838763 | 98 + results/classifier/105/graphic/1839294 | 40 + results/classifier/105/graphic/1839367 | 71 + results/classifier/105/graphic/1840250 | 34 + results/classifier/105/graphic/1840719 | 125 + results/classifier/105/graphic/1841 | 25 + results/classifier/105/graphic/1844 | 35 + results/classifier/105/graphic/1844597 | 125 + results/classifier/105/graphic/1844946 | 43 + results/classifier/105/graphic/1845 | 22 + results/classifier/105/graphic/1846392 | 112 + results/classifier/105/graphic/1851664 | 50 + results/classifier/105/graphic/1852 | 123 + results/classifier/105/graphic/1854577 | 30 + results/classifier/105/graphic/1854910 | 32 + results/classifier/105/graphic/1855535 | 71 + results/classifier/105/graphic/1856 | 26 + results/classifier/105/graphic/1856027 | 36 + results/classifier/105/graphic/1857449 | 64 + results/classifier/105/graphic/1858623 | 43 + results/classifier/105/graphic/1858814 | 37 + results/classifier/105/graphic/1859021 | 282 + results/classifier/105/graphic/1859081 | 55 + results/classifier/105/graphic/1859418 | 54 + results/classifier/105/graphic/1859723 | 36 + results/classifier/105/graphic/1860 | 23 + results/classifier/105/graphic/1861 | 42 + results/classifier/105/graphic/1861161 | 159 + results/classifier/105/graphic/1861394 | 43 + results/classifier/105/graphic/1861653 | 65 + results/classifier/105/graphic/1861692 | 28 + results/classifier/105/graphic/1862 | 31 + results/classifier/105/graphic/1863678 | 29 + results/classifier/105/graphic/1864984 | 44 + results/classifier/105/graphic/1865 | 37 + results/classifier/105/graphic/1865248 | 27 + results/classifier/105/graphic/1869073 | 32 + results/classifier/105/graphic/1869782 | 64 + results/classifier/105/graphic/1869858 | 28 + results/classifier/105/graphic/1870477 | 67 + results/classifier/105/graphic/1871267 | 47 + results/classifier/105/graphic/1871270 | 62 + results/classifier/105/graphic/1871798 | 190 + results/classifier/105/graphic/1873032 | 45 + results/classifier/105/graphic/1873337 | 45 + results/classifier/105/graphic/1873339 | 58 + results/classifier/105/graphic/1873542 | 35 + results/classifier/105/graphic/1874 | 30 + results/classifier/105/graphic/1874073 | 322 + results/classifier/105/graphic/1874674 | 75 + results/classifier/105/graphic/1875 | 22 + results/classifier/105/graphic/1875762 | 61 + results/classifier/105/graphic/1877137 | 34 + results/classifier/105/graphic/1877526 | 86 + results/classifier/105/graphic/1878 | 42 + results/classifier/105/graphic/1878043 | 100 + results/classifier/105/graphic/1878136 | 62 + results/classifier/105/graphic/1878413 | 56 + results/classifier/105/graphic/1878915 | 60 + results/classifier/105/graphic/1879 | 22 + results/classifier/105/graphic/1879425 | 78 + results/classifier/105/graphic/1879531 | 176 + results/classifier/105/graphic/1880539 | 56 + results/classifier/105/graphic/1880722 | 58 + results/classifier/105/graphic/1881004 | 155 + results/classifier/105/graphic/1881506 | 49 + results/classifier/105/graphic/1881552 | 70 + results/classifier/105/graphic/1882 | 22 + results/classifier/105/graphic/1882123 | 184 + results/classifier/105/graphic/1882817 | 93 + results/classifier/105/graphic/1883 | 19 + results/classifier/105/graphic/1883083 | 65 + results/classifier/105/graphic/1883729 | 411 ++ results/classifier/105/graphic/1883739 | 52 + results/classifier/105/graphic/1884 | 23 + results/classifier/105/graphic/1884017 | 64 + results/classifier/105/graphic/1884728 | 86 + results/classifier/105/graphic/1884990 | 33 + results/classifier/105/graphic/1885 | 37 + results/classifier/105/graphic/1885247 | 298 + results/classifier/105/graphic/1885553 | 57 + results/classifier/105/graphic/1886208 | 33 + results/classifier/105/graphic/1886285 | 54 + results/classifier/105/graphic/1886343 | 34 + results/classifier/105/graphic/1886602 | 143 + results/classifier/105/graphic/1889 | 60 + results/classifier/105/graphic/1889411 | 81 + results/classifier/105/graphic/1890155 | 62 + results/classifier/105/graphic/1890159 | 57 + results/classifier/105/graphic/1890208 | 46 + results/classifier/105/graphic/1890312 | 84 + results/classifier/105/graphic/1891 | 61 + results/classifier/105/graphic/1891829 | 68 + results/classifier/105/graphic/1892 | 46 + results/classifier/105/graphic/1892581 | 76 + results/classifier/105/graphic/1892604 | 135 + results/classifier/105/graphic/1892684 | 70 + results/classifier/105/graphic/1892761 | 32 + results/classifier/105/graphic/1893 | 26 + results/classifier/105/graphic/1893667 | 52 + results/classifier/105/graphic/1894 | 24 + results/classifier/105/graphic/1894361 | 34 + results/classifier/105/graphic/1894617 | 69 + results/classifier/105/graphic/1894804 | 675 ++ results/classifier/105/graphic/1894836 | 68 + results/classifier/105/graphic/1895219 | 101 + results/classifier/105/graphic/1895602 | 57 + results/classifier/105/graphic/1895703 | 68 + results/classifier/105/graphic/1896317 | 84 + results/classifier/105/graphic/1896342 | 96 + results/classifier/105/graphic/1897 | 34 + results/classifier/105/graphic/1897568 | 86 + results/classifier/105/graphic/1899 | 54 + results/classifier/105/graphic/1899733 | 36 + results/classifier/105/graphic/1900352 | 29 + results/classifier/105/graphic/1901892 | 86 + results/classifier/105/graphic/1902777 | 60 + results/classifier/105/graphic/1904206 | 87 + results/classifier/105/graphic/1904210 | 84 + results/classifier/105/graphic/1904317 | 102 + results/classifier/105/graphic/1904464 | 69 + results/classifier/105/graphic/1904486 | 70 + results/classifier/105/graphic/1905226 | 61 + results/classifier/105/graphic/1906155 | 914 +++ results/classifier/105/graphic/1906180 | 49 + results/classifier/105/graphic/1906184 | 74 + results/classifier/105/graphic/1906185 | 77 + results/classifier/105/graphic/1906536 | 143 + results/classifier/105/graphic/1906905 | 398 ++ results/classifier/105/graphic/1907061 | 61 + results/classifier/105/graphic/1907137 | 161 + results/classifier/105/graphic/1907210 | 66 + results/classifier/105/graphic/1907776 | 71 + results/classifier/105/graphic/1908 | 62 + results/classifier/105/graphic/1908062 | 170 + results/classifier/105/graphic/1908266 | 53 + results/classifier/105/graphic/1909 | 63 + results/classifier/105/graphic/1910723 | 234 + results/classifier/105/graphic/1912846 | 34 + results/classifier/105/graphic/1913 | 32 + results/classifier/105/graphic/1913341 | 40 + results/classifier/105/graphic/1913917 | 56 + results/classifier/105/graphic/1914535 | 72 + results/classifier/105/graphic/1915 | 24 + results/classifier/105/graphic/1915327 | 80 + results/classifier/105/graphic/1916343 | 75 + results/classifier/105/graphic/1916655 | 68 + results/classifier/105/graphic/1917394 | 403 ++ results/classifier/105/graphic/1919 | 33 + results/classifier/105/graphic/1920 | 24 + results/classifier/105/graphic/1920013 | 150 + results/classifier/105/graphic/1920871 | 97 + results/classifier/105/graphic/1920934 | 188 + results/classifier/105/graphic/1921061 | 82 + results/classifier/105/graphic/1922 | 33 + results/classifier/105/graphic/1922625 | 55 + results/classifier/105/graphic/1923 | 31 + results/classifier/105/graphic/1924669 | 28 + results/classifier/105/graphic/1924738 | 87 + results/classifier/105/graphic/1924987 | 58 + results/classifier/105/graphic/1925 | 24 + results/classifier/105/graphic/1925109 | 63 + results/classifier/105/graphic/1925966 | 79 + results/classifier/105/graphic/1926 | 37 + results/classifier/105/graphic/1926044 | 133 + results/classifier/105/graphic/1926052 | 32 + results/classifier/105/graphic/1926246 | 148 + results/classifier/105/graphic/1926996 | 83 + results/classifier/105/graphic/1930 | 59 + results/classifier/105/graphic/1934 | 37 + results/classifier/105/graphic/1937 | 24 + results/classifier/105/graphic/1938 | 49 + results/classifier/105/graphic/1940 | 33 + results/classifier/105/graphic/1942 | 22 + results/classifier/105/graphic/1943 | 37 + results/classifier/105/graphic/1946 | 41 + results/classifier/105/graphic/1950 | 22 + results/classifier/105/graphic/1952 | 109 + results/classifier/105/graphic/1954 | 41 + results/classifier/105/graphic/1962 | 42 + results/classifier/105/graphic/1963 | 41 + results/classifier/105/graphic/1966 | 21 + results/classifier/105/graphic/1971 | 161 + results/classifier/105/graphic/1983 | 43 + results/classifier/105/graphic/1986 | 27 + results/classifier/105/graphic/1989 | 45 + results/classifier/105/graphic/1990 | 32 + results/classifier/105/graphic/1994 | 14 + results/classifier/105/graphic/1994002 | 1839 +++++ results/classifier/105/graphic/1996 | 80 + results/classifier/105/graphic/1997 | 33 + results/classifier/105/graphic/1998 | 40 + results/classifier/105/graphic/2003 | 28 + results/classifier/105/graphic/2006 | 55 + results/classifier/105/graphic/2010 | 93 + results/classifier/105/graphic/2012 | 25 + results/classifier/105/graphic/2015 | 40 + results/classifier/105/graphic/2016 | 22 + results/classifier/105/graphic/2022 | 24 + results/classifier/105/graphic/2025 | 39 + results/classifier/105/graphic/2030 | 30 + results/classifier/105/graphic/2031 | 26 + results/classifier/105/graphic/2035 | 67 + results/classifier/105/graphic/2036 | 24 + results/classifier/105/graphic/2037 | 28 + results/classifier/105/graphic/2038 | 29 + results/classifier/105/graphic/2042 | 31 + results/classifier/105/graphic/2050 | 20 + results/classifier/105/graphic/2052 | 14 + results/classifier/105/graphic/2056 | 27 + results/classifier/105/graphic/2057 | 18 + results/classifier/105/graphic/2059 | 36 + results/classifier/105/graphic/2061 | 26 + results/classifier/105/graphic/2068 | 28 + results/classifier/105/graphic/2070 | 22 + results/classifier/105/graphic/2073 | 29 + results/classifier/105/graphic/2075 | 23 + results/classifier/105/graphic/2078 | 47 + results/classifier/105/graphic/2082 | 57 + results/classifier/105/graphic/2085 | 35 + results/classifier/105/graphic/2090 | 20 + results/classifier/105/graphic/2092 | 83 + results/classifier/105/graphic/2094 | 20 + results/classifier/105/graphic/2099 | 24 + results/classifier/105/graphic/2101 | 30 + results/classifier/105/graphic/2102 | 53 + results/classifier/105/graphic/2116 | 41 + results/classifier/105/graphic/2117 | 40 + results/classifier/105/graphic/2136 | 48 + results/classifier/105/graphic/2138 | 35 + results/classifier/105/graphic/2139 | 22 + results/classifier/105/graphic/214 | 14 + results/classifier/105/graphic/2141 | 37 + results/classifier/105/graphic/2144 | 35 + results/classifier/105/graphic/2145 | 50 + results/classifier/105/graphic/2146 | 127 + results/classifier/105/graphic/2147 | 22 + results/classifier/105/graphic/2150 | 26 + results/classifier/105/graphic/2151 | 208 + results/classifier/105/graphic/2154 | 20 + results/classifier/105/graphic/2155 | 36 + results/classifier/105/graphic/2159 | 190 + results/classifier/105/graphic/2167 | 53 + results/classifier/105/graphic/2168 | 45 + results/classifier/105/graphic/2187 | 14 + results/classifier/105/graphic/2190 | 20 + results/classifier/105/graphic/2199 | 25 + results/classifier/105/graphic/2200 | 22 + results/classifier/105/graphic/2202 | 46 + results/classifier/105/graphic/2206 | 23 + results/classifier/105/graphic/2216 | 16 + results/classifier/105/graphic/2220 | 541 ++ results/classifier/105/graphic/22219210 | 51 + results/classifier/105/graphic/2223 | 48 + results/classifier/105/graphic/2225 | 24 + results/classifier/105/graphic/2231 | 27 + results/classifier/105/graphic/2233 | 62 + results/classifier/105/graphic/2234 | 36 + results/classifier/105/graphic/2235 | 70 + results/classifier/105/graphic/2237 | 52 + results/classifier/105/graphic/2238 | 60 + results/classifier/105/graphic/2240 | 17 + results/classifier/105/graphic/2242 | 27 + results/classifier/105/graphic/2244 | 59 + results/classifier/105/graphic/2251 | 27 + results/classifier/105/graphic/2252 | 24 + results/classifier/105/graphic/2256 | 14 + results/classifier/105/graphic/2257 | 14 + results/classifier/105/graphic/2258 | 36 + results/classifier/105/graphic/2259 | 27 + results/classifier/105/graphic/2260 | 38 + results/classifier/105/graphic/2263 | 41 + results/classifier/105/graphic/2264 | 70 + results/classifier/105/graphic/2271 | 29 + results/classifier/105/graphic/2274 | 56 + results/classifier/105/graphic/2276 | 55 + results/classifier/105/graphic/2279 | 38 + results/classifier/105/graphic/2281 | 20 + results/classifier/105/graphic/2283 | 46 + results/classifier/105/graphic/2288 | 42 + results/classifier/105/graphic/229 | 14 + results/classifier/105/graphic/2290 | 156 + results/classifier/105/graphic/2293 | 48 + results/classifier/105/graphic/2298 | 25 + results/classifier/105/graphic/2304 | 51 + results/classifier/105/graphic/2315 | 25 + results/classifier/105/graphic/2316 | 49 + results/classifier/105/graphic/2319 | 30 + results/classifier/105/graphic/2323 | 39 + results/classifier/105/graphic/2326 | 37 + results/classifier/105/graphic/2332 | 42 + results/classifier/105/graphic/2333 | 58 + results/classifier/105/graphic/2335 | 221 + results/classifier/105/graphic/234 | 14 + results/classifier/105/graphic/2340 | 46 + results/classifier/105/graphic/2344 | 58 + results/classifier/105/graphic/2345 | 61 + results/classifier/105/graphic/2349 | 25 + results/classifier/105/graphic/2361 | 26 + results/classifier/105/graphic/2365 | 21 + results/classifier/105/graphic/2373 | 108 + results/classifier/105/graphic/2375 | 98 + results/classifier/105/graphic/2376 | 127 + results/classifier/105/graphic/2382 | 27 + results/classifier/105/graphic/2384 | 39 + results/classifier/105/graphic/2385 | 120 + results/classifier/105/graphic/2387 | 24 + results/classifier/105/graphic/2389 | 47 + results/classifier/105/graphic/2399 | 40 + results/classifier/105/graphic/2403 | 27 + results/classifier/105/graphic/2405 | 29 + results/classifier/105/graphic/2407 | 66 + results/classifier/105/graphic/2410 | 105 + results/classifier/105/graphic/2411 | 24 + results/classifier/105/graphic/241119 | 106 + results/classifier/105/graphic/2413 | 46 + results/classifier/105/graphic/2418 | 25 + results/classifier/105/graphic/2420 | 57 + results/classifier/105/graphic/2421 | 31 + results/classifier/105/graphic/2424 | 331 + results/classifier/105/graphic/2428 | 42 + results/classifier/105/graphic/2429 | 42 + results/classifier/105/graphic/2433 | 237 + results/classifier/105/graphic/2437 | 50 + results/classifier/105/graphic/2446 | 73 + results/classifier/105/graphic/2450 | 30 + results/classifier/105/graphic/2453 | 22 + results/classifier/105/graphic/2455 | 21 + results/classifier/105/graphic/2470 | 44 + results/classifier/105/graphic/2476 | 65 + results/classifier/105/graphic/2478 | 31 + results/classifier/105/graphic/2482 | 149 + results/classifier/105/graphic/2483 | 33 + results/classifier/105/graphic/2486 | 25 + results/classifier/105/graphic/2487 | 81 + results/classifier/105/graphic/2491 | 28 + results/classifier/105/graphic/2493 | 14 + results/classifier/105/graphic/2495 | 85 + results/classifier/105/graphic/2496 | 46 + results/classifier/105/graphic/2502 | 26 + results/classifier/105/graphic/2504 | 20 + results/classifier/105/graphic/2509 | 39 + results/classifier/105/graphic/2510 | 58 + results/classifier/105/graphic/2511 | 45 + results/classifier/105/graphic/2513 | 26 + results/classifier/105/graphic/2518 | 27 + results/classifier/105/graphic/2520 | 24 + results/classifier/105/graphic/2523 | 33 + results/classifier/105/graphic/2524 | 16 + results/classifier/105/graphic/2529 | 45 + results/classifier/105/graphic/2532 | 53 + results/classifier/105/graphic/2534 | 23 + results/classifier/105/graphic/2538 | 39 + results/classifier/105/graphic/2540 | 30 + results/classifier/105/graphic/2543 | 24 + results/classifier/105/graphic/2550 | 38 + results/classifier/105/graphic/2551 | 26 + results/classifier/105/graphic/2555 | 33 + results/classifier/105/graphic/2556 | 25 + results/classifier/105/graphic/2559 | 24 + results/classifier/105/graphic/2561 | 52 + results/classifier/105/graphic/2565 | 26 + results/classifier/105/graphic/2569 | 18 + results/classifier/105/graphic/2571 | 79 + results/classifier/105/graphic/2576 | 14 + results/classifier/105/graphic/2578 | 27 + results/classifier/105/graphic/258 | 14 + results/classifier/105/graphic/2580 | 25 + results/classifier/105/graphic/2581 | 25 + results/classifier/105/graphic/2591 | 14 + results/classifier/105/graphic/2594 | 48 + results/classifier/105/graphic/2595 | 148 + results/classifier/105/graphic/2601 | 49 + results/classifier/105/graphic/2602 | 22 + results/classifier/105/graphic/2604 | 57 + results/classifier/105/graphic/2606 | 211 + results/classifier/105/graphic/2609 | 24 + results/classifier/105/graphic/2616 | 24 + results/classifier/105/graphic/2617 | 22 + results/classifier/105/graphic/2618 | 14 + results/classifier/105/graphic/2621 | 28 + results/classifier/105/graphic/2633 | 39 + results/classifier/105/graphic/2635 | 25 + results/classifier/105/graphic/2637 | 66 + results/classifier/105/graphic/2639 | 35 + results/classifier/105/graphic/264 | 14 + results/classifier/105/graphic/2642 | 18 + results/classifier/105/graphic/2643 | 65 + results/classifier/105/graphic/2645 | 36 + results/classifier/105/graphic/2648 | 24 + results/classifier/105/graphic/2657 | 24 + results/classifier/105/graphic/2671 | 30 + results/classifier/105/graphic/2672 | 33 + results/classifier/105/graphic/2673 | 18 + results/classifier/105/graphic/2674 | 37 + results/classifier/105/graphic/2675 | 24 + results/classifier/105/graphic/2676 | 20 + results/classifier/105/graphic/2680 | 27 + results/classifier/105/graphic/2686 | 61 + results/classifier/105/graphic/2687 | 62 + results/classifier/105/graphic/2690 | 33 + results/classifier/105/graphic/2691 | 40 + results/classifier/105/graphic/2706 | 14 + results/classifier/105/graphic/2712 | 24 + results/classifier/105/graphic/2717 | 25 + results/classifier/105/graphic/2720 | 83 + results/classifier/105/graphic/2722 | 61 + results/classifier/105/graphic/2723 | 36 + results/classifier/105/graphic/2728 | 28 + results/classifier/105/graphic/2729 | 87 + results/classifier/105/graphic/2730 | 23 + results/classifier/105/graphic/2731 | 357 + results/classifier/105/graphic/2736 | 23 + results/classifier/105/graphic/2738 | 23 + results/classifier/105/graphic/2748 | 263 + results/classifier/105/graphic/2749 | 92 + results/classifier/105/graphic/2757 | 14 + results/classifier/105/graphic/2768 | 29 + results/classifier/105/graphic/2774 | 16 + results/classifier/105/graphic/2778 | 112 + results/classifier/105/graphic/2783 | 30 + results/classifier/105/graphic/2784 | 230 + results/classifier/105/graphic/2785 | 29 + results/classifier/105/graphic/2787 | 26 + results/classifier/105/graphic/2790 | 23 + results/classifier/105/graphic/2793 | 255 + results/classifier/105/graphic/2798 | 51 + results/classifier/105/graphic/2799 | 54 + results/classifier/105/graphic/2800 | 20 + results/classifier/105/graphic/2806 | 22 + results/classifier/105/graphic/2807 | 44 + results/classifier/105/graphic/2816 | 27 + results/classifier/105/graphic/2818 | 20 + results/classifier/105/graphic/2821 | 36 + results/classifier/105/graphic/2822 | 24 + results/classifier/105/graphic/2826 | 22 + results/classifier/105/graphic/2828 | 16 + results/classifier/105/graphic/2834 | 32 + results/classifier/105/graphic/2835 | 135 + results/classifier/105/graphic/2839 | 48 + results/classifier/105/graphic/2840 | 34 + results/classifier/105/graphic/2843 | 46 + results/classifier/105/graphic/2848 | 26 + results/classifier/105/graphic/2851 | 64 + results/classifier/105/graphic/2860 | 46 + results/classifier/105/graphic/2862 | 37 + results/classifier/105/graphic/2864 | 22 + results/classifier/105/graphic/2874 | 22 + results/classifier/105/graphic/2882 | 103 + results/classifier/105/graphic/2889 | 35 + results/classifier/105/graphic/2895 | 43 + results/classifier/105/graphic/2897 | 27 + results/classifier/105/graphic/2899 | 49 + results/classifier/105/graphic/2906 | 26 + results/classifier/105/graphic/2908 | 23 + results/classifier/105/graphic/2909 | 31 + results/classifier/105/graphic/2914 | 28 + results/classifier/105/graphic/2916 | 39 + results/classifier/105/graphic/2917 | 35 + results/classifier/105/graphic/2920 | 25 + results/classifier/105/graphic/2926 | 49 + results/classifier/105/graphic/2928 | 69 + results/classifier/105/graphic/2931 | 39 + results/classifier/105/graphic/2933 | 35 + results/classifier/105/graphic/2935 | 37 + results/classifier/105/graphic/2938 | 24 + results/classifier/105/graphic/2944 | 34 + results/classifier/105/graphic/2945 | 42 + results/classifier/105/graphic/2947 | 23 + results/classifier/105/graphic/2948 | 26 + results/classifier/105/graphic/2952 | 27 + results/classifier/105/graphic/2954 | 32 + results/classifier/105/graphic/2960 | 23 + results/classifier/105/graphic/2962 | 35 + results/classifier/105/graphic/2965 | 27 + results/classifier/105/graphic/2966 | 40 + results/classifier/105/graphic/2967 | 227 + results/classifier/105/graphic/2973 | 76 + results/classifier/105/graphic/2978 | 36 + results/classifier/105/graphic/2981 | 38 + results/classifier/105/graphic/2987 | 20 + results/classifier/105/graphic/2988 | 20 + results/classifier/105/graphic/30680944 | 603 ++ results/classifier/105/graphic/322602 | 41 + results/classifier/105/graphic/352 | 14 + results/classifier/105/graphic/370 | 14 + results/classifier/105/graphic/391879 | 54 + results/classifier/105/graphic/439 | 14 + results/classifier/105/graphic/441672 | 32 + results/classifier/105/graphic/455 | 47 + results/classifier/105/graphic/456 | 42 + results/classifier/105/graphic/46572227 | 414 ++ results/classifier/105/graphic/466 | 14 + results/classifier/105/graphic/471 | 77 + results/classifier/105/graphic/474 | 43 + results/classifier/105/graphic/474968 | 70 + results/classifier/105/graphic/485239 | 51 + results/classifier/105/graphic/488 | 43 + results/classifier/105/graphic/489 | 50 + results/classifier/105/graphic/492 | 40 + results/classifier/105/graphic/496 | 26 + results/classifier/105/graphic/497 | 32 + results/classifier/105/graphic/498421 | 23 + results/classifier/105/graphic/504 | 31 + results/classifier/105/graphic/505 | 25 + results/classifier/105/graphic/515 | 44 + results/classifier/105/graphic/519 | 46 + results/classifier/105/graphic/525 | 27 + results/classifier/105/graphic/526 | 28 + results/classifier/105/graphic/530 | 56 + results/classifier/105/graphic/53568181 | 86 + results/classifier/105/graphic/538808 | 41 + results/classifier/105/graphic/546 | 14 + results/classifier/105/graphic/548 | 14 + results/classifier/105/graphic/553 | 38 + results/classifier/105/graphic/55961334 | 47 + results/classifier/105/graphic/564 | 26 + results/classifier/105/graphic/567376 | 24 + results/classifier/105/graphic/567380 | 35 + results/classifier/105/graphic/577 | 38 + results/classifier/105/graphic/578 | 43 + results/classifier/105/graphic/579 | 63 + results/classifier/105/graphic/58 | 14 + results/classifier/105/graphic/581353 | 29 + results/classifier/105/graphic/587993 | 146 + results/classifier/105/graphic/588688 | 25 + results/classifier/105/graphic/588803 | 143 + results/classifier/105/graphic/595 | 24 + results/classifier/105/graphic/597351 | 50 + results/classifier/105/graphic/599 | 24 + results/classifier/105/graphic/599958 | 275 + results/classifier/105/graphic/601 | 33 + results/classifier/105/graphic/603872 | 63 + results/classifier/105/graphic/603878 | 40 + results/classifier/105/graphic/610 | 44 + results/classifier/105/graphic/612 | 14 + results/classifier/105/graphic/612677 | 36 + results/classifier/105/graphic/614958 | 120 + results/classifier/105/graphic/616 | 120 + results/classifier/105/graphic/618 | 108 + results/classifier/105/graphic/618533 | 238 + results/classifier/105/graphic/622367 | 32 + results/classifier/105/graphic/627 | 20 + results/classifier/105/graphic/629 | 23 + results/classifier/105/graphic/631 | 38 + results/classifier/105/graphic/636 | 369 + results/classifier/105/graphic/639 | 14 + results/classifier/105/graphic/640 | 19 + results/classifier/105/graphic/640213 | 41 + results/classifier/105/graphic/643430 | 50 + results/classifier/105/graphic/648128 | 238 + results/classifier/105/graphic/654 | 36 + results/classifier/105/graphic/656 | 20 + results/classifier/105/graphic/660 | 22 + results/classifier/105/graphic/661 | 57 + results/classifier/105/graphic/665743 | 23 + results/classifier/105/graphic/668 | 34 + results/classifier/105/graphic/671 | 31 + results/classifier/105/graphic/673 | 23 + results/classifier/105/graphic/674 | 27 + results/classifier/105/graphic/676 | 67 + results/classifier/105/graphic/678363 | 33 + results/classifier/105/graphic/680 | 14 + results/classifier/105/graphic/680758 | 68 + results/classifier/105/graphic/681613 | 41 + results/classifier/105/graphic/685096 | 347 + results/classifier/105/graphic/686 | 52 + results/classifier/105/graphic/686613 | 73 + results/classifier/105/graphic/690776 | 26 + results/classifier/105/graphic/691 | 19 + results/classifier/105/graphic/693 | 23 + results/classifier/105/graphic/694 | 14 + results/classifier/105/graphic/696 | 22 + results/classifier/105/graphic/696530 | 33 + results/classifier/105/graphic/696834 | 388 + results/classifier/105/graphic/705931 | 59 + results/classifier/105/graphic/707 | 75 + results/classifier/105/graphic/709584 | 39 + results/classifier/105/graphic/711 | 14 + results/classifier/105/graphic/712 | 27 + results/classifier/105/graphic/716 | 16 + results/classifier/105/graphic/717 | 16 + results/classifier/105/graphic/719 | 32 + results/classifier/105/graphic/721793 | 44 + results/classifier/105/graphic/722311 | 92 + results/classifier/105/graphic/724 | 14 + results/classifier/105/graphic/725 | 27 + results/classifier/105/graphic/726619 | 31 + results/classifier/105/graphic/731 | 34 + results/classifier/105/graphic/733 | 46 + results/classifier/105/graphic/734 | 41 + results/classifier/105/graphic/73660729 | 39 + results/classifier/105/graphic/739 | 30 + results/classifier/105/graphic/740 | 40 + results/classifier/105/graphic/740895 | 85 + results/classifier/105/graphic/741115 | 27 + results/classifier/105/graphic/752476 | 70 + results/classifier/105/graphic/755 | 72 + results/classifier/105/graphic/760956 | 42 + results/classifier/105/graphic/761 | 25 + results/classifier/105/graphic/764 | 58 + results/classifier/105/graphic/765 | 78 + results/classifier/105/graphic/768 | 25 + results/classifier/105/graphic/769 | 29 + results/classifier/105/graphic/771 | 27 + results/classifier/105/graphic/775604 | 29 + results/classifier/105/graphic/778 | 14 + results/classifier/105/graphic/784977 | 119 + results/classifier/105/graphic/786211 | 21 + results/classifier/105/graphic/788701 | 29 + results/classifier/105/graphic/788881 | 32 + results/classifier/105/graphic/788886 | 26 + results/classifier/105/graphic/789 | 25 + results/classifier/105/graphic/794 | 22 + results/classifier/105/graphic/798 | 28 + results/classifier/105/graphic/803 | 33 + results/classifier/105/graphic/807893 | 497 ++ results/classifier/105/graphic/808588 | 109 + results/classifier/105/graphic/809912 | 33 + results/classifier/105/graphic/813546 | 29 + results/classifier/105/graphic/820 | 26 + results/classifier/105/graphic/822 | 29 + results/classifier/105/graphic/823 | 34 + results/classifier/105/graphic/829 | 27 + results/classifier/105/graphic/834 | 72 + results/classifier/105/graphic/835 | 22 + results/classifier/105/graphic/839 | 63 + results/classifier/105/graphic/841 | 91 + results/classifier/105/graphic/842 | 26 + results/classifier/105/graphic/848 | 61 + results/classifier/105/graphic/849 | 35 + results/classifier/105/graphic/855 | 30 + results/classifier/105/graphic/857 | 25 + results/classifier/105/graphic/860 | 323 + results/classifier/105/graphic/863 | 67 + results/classifier/105/graphic/865 | 58 + results/classifier/105/graphic/865518 | 100 + results/classifier/105/graphic/866 | 66 + results/classifier/105/graphic/867 | 26 + results/classifier/105/graphic/868 | 28 + results/classifier/105/graphic/869 | 34 + results/classifier/105/graphic/871 | 27 + results/classifier/105/graphic/877 | 119 + results/classifier/105/graphic/878019 | 32 + results/classifier/105/graphic/883 | 40 + results/classifier/105/graphic/888 | 20 + results/classifier/105/graphic/892 | 18 + results/classifier/105/graphic/893068 | 29 + results/classifier/105/graphic/893956 | 350 + results/classifier/105/graphic/894 | 44 + results/classifier/105/graphic/902413 | 179 + results/classifier/105/graphic/904308 | 201 + results/classifier/105/graphic/905 | 14 + results/classifier/105/graphic/906221 | 64 + results/classifier/105/graphic/909 | 24 + results/classifier/105/graphic/911 | 30 + results/classifier/105/graphic/919 | 18 + results/classifier/105/graphic/920 | 25 + results/classifier/105/graphic/921 | 636 ++ results/classifier/105/graphic/922 | 33 + results/classifier/105/graphic/922076 | 64 + results/classifier/105/graphic/924943 | 61 + results/classifier/105/graphic/935945 | 636 ++ results/classifier/105/graphic/936 | 29 + results/classifier/105/graphic/938552 | 41 + results/classifier/105/graphic/938945 | 41 + results/classifier/105/graphic/939 | 88 + results/classifier/105/graphic/945 | 23 + results/classifier/105/graphic/946 | 25 + results/classifier/105/graphic/946043 | 30 + results/classifier/105/graphic/952 | 110 + results/classifier/105/graphic/956 | 55 + results/classifier/105/graphic/958 | 26 + results/classifier/105/graphic/959 | 22 + results/classifier/105/graphic/962 | 32 + results/classifier/105/graphic/966 | 71 + results/classifier/105/graphic/967 | 237 + results/classifier/105/graphic/970 | 46 + results/classifier/105/graphic/979 | 20 + results/classifier/105/graphic/980 | 29 + results/classifier/105/graphic/983 | 21 + results/classifier/105/graphic/988 | 14 + results/classifier/105/graphic/992 | 33 + results/classifier/105/graphic/995 | 24 + results/classifier/105/graphic/997631 | 35 + results/classifier/105/graphic/998 | 73 + results/classifier/105/instruction/1006655 | 153 + results/classifier/105/instruction/1006702 | 26 + results/classifier/105/instruction/1007 | 14 + results/classifier/105/instruction/1011 | 34 + results/classifier/105/instruction/1027 | 28 + results/classifier/105/instruction/1030104 | 45 + results/classifier/105/instruction/1037 | 14 + results/classifier/105/instruction/105 | 14 + results/classifier/105/instruction/1052857 | 61 + results/classifier/105/instruction/1053 | 22 + results/classifier/105/instruction/1057 | 36 + results/classifier/105/instruction/1060 | 46 + results/classifier/105/instruction/1061778 | 27 + results/classifier/105/instruction/1062 | 29 + results/classifier/105/instruction/1065 | 18 + results/classifier/105/instruction/1076445 | 65 + results/classifier/105/instruction/1078892 | 24 + results/classifier/105/instruction/1079080 | 43 + results/classifier/105/instruction/1085 | 53 + results/classifier/105/instruction/109 | 14 + results/classifier/105/instruction/1090837 | 51 + results/classifier/105/instruction/1092 | 27 + results/classifier/105/instruction/1095857 | 29 + results/classifier/105/instruction/1103868 | 76 + results/classifier/105/instruction/1103903 | 56 + results/classifier/105/instruction/111 | 14 + results/classifier/105/instruction/1111 | 31 + results/classifier/105/instruction/1116 | 31 + results/classifier/105/instruction/1119686 | 59 + results/classifier/105/instruction/1142 | 59 + results/classifier/105/instruction/1156 | 14 + results/classifier/105/instruction/1157 | 26 + results/classifier/105/instruction/1163034 | 59 + results/classifier/105/instruction/1163065 | 38 + results/classifier/105/instruction/11933524 | 1133 +++ results/classifier/105/instruction/1195 | 31 + results/classifier/105/instruction/1195012 | 53 + results/classifier/105/instruction/1196498 | 42 + results/classifier/105/instruction/1199 | 23 + results/classifier/105/instruction/1204 | 42 + results/classifier/105/instruction/1211943 | 21 + results/classifier/105/instruction/1216845 | 103 + results/classifier/105/instruction/1223477 | 48 + results/classifier/105/instruction/1224 | 23 + results/classifier/105/instruction/1239008 | 90 + results/classifier/105/instruction/1240669 | 55 + results/classifier/105/instruction/1244 | 58 + results/classifier/105/instruction/1245543 | 43 + results/classifier/105/instruction/1248168 | 42 + results/classifier/105/instruction/1251 | 28 + results/classifier/105/instruction/1253465 | 74 + results/classifier/105/instruction/1256826 | 29 + results/classifier/105/instruction/1260555 | 237 + results/classifier/105/instruction/1266 | 14 + results/classifier/105/instruction/1268671 | 66 + results/classifier/105/instruction/1269 | 39 + results/classifier/105/instruction/1272796 | 39 + results/classifier/105/instruction/1277 | 14 + results/classifier/105/instruction/1278166 | 21 + results/classifier/105/instruction/1283519 | 28 + results/classifier/105/instruction/129 | 25 + results/classifier/105/instruction/1306818 | 61 + results/classifier/105/instruction/1308381 | 99 + results/classifier/105/instruction/1309034 | 48 + results/classifier/105/instruction/1314 | 53 + results/classifier/105/instruction/1328996 | 25 + results/classifier/105/instruction/1338563 | 55 + results/classifier/105/instruction/1354529 | 62 + results/classifier/105/instruction/1354727 | 28 + results/classifier/105/instruction/1355 | 14 + results/classifier/105/instruction/1355738 | 58 + results/classifier/105/instruction/1356916 | 23 + results/classifier/105/instruction/1357226 | 80 + results/classifier/105/instruction/1358 | 14 + results/classifier/105/instruction/1361912 | 80 + results/classifier/105/instruction/1376 | 28 + results/classifier/105/instruction/1377 | 27 + results/classifier/105/instruction/1381642 | 46 + results/classifier/105/instruction/1394 | 74 + results/classifier/105/instruction/1409 | 14 + results/classifier/105/instruction/141 | 14 + results/classifier/105/instruction/1412 | 18 + results/classifier/105/instruction/1414 | 33 + results/classifier/105/instruction/1416 | 18 + results/classifier/105/instruction/1416246 | 68 + results/classifier/105/instruction/1423 | 26 + results/classifier/105/instruction/1426092 | 53 + results/classifier/105/instruction/1432 | 37 + results/classifier/105/instruction/1432103 | 21 + results/classifier/105/instruction/1437367 | 50 + results/classifier/105/instruction/1438572 | 34 + results/classifier/105/instruction/1441 | 47 + results/classifier/105/instruction/1442 | 14 + results/classifier/105/instruction/1452 | 14 + results/classifier/105/instruction/1458 | 40 + results/classifier/105/instruction/1460 | 18 + results/classifier/105/instruction/1460523 | 26 + results/classifier/105/instruction/1463338 | 37 + results/classifier/105/instruction/1469342 | 49 + results/classifier/105/instruction/1470 | 22 + results/classifier/105/instruction/1473 | 14 + results/classifier/105/instruction/1479717 | 56 + results/classifier/105/instruction/1481272 | 257 + results/classifier/105/instruction/1490886 | 64 + results/classifier/105/instruction/1498 | 18 + results/classifier/105/instruction/1500 | 51 + results/classifier/105/instruction/1503031 | 34 + results/classifier/105/instruction/1504 | 14 + results/classifier/105/instruction/1511 | 14 + results/classifier/105/instruction/1511710 | 38 + results/classifier/105/instruction/1524637 | 31 + results/classifier/105/instruction/1529764 | 28 + results/classifier/105/instruction/1541 | 45 + results/classifier/105/instruction/1541643 | 26 + results/classifier/105/instruction/1542 | 26 + results/classifier/105/instruction/1544 | 14 + results/classifier/105/instruction/1547526 | 430 ++ results/classifier/105/instruction/1549298 | 32 + results/classifier/105/instruction/1551 | 53 + results/classifier/105/instruction/1552549 | 24 + results/classifier/105/instruction/1560 | 14 + results/classifier/105/instruction/1565395 | 42 + results/classifier/105/instruction/1567254 | 53 + results/classifier/105/instruction/1574346 | 41 + results/classifier/105/instruction/1582 | 14 + results/classifier/105/instruction/1586611 | 38 + results/classifier/105/instruction/1587 | 38 + results/classifier/105/instruction/1589923 | 178 + results/classifier/105/instruction/1590336 | 52 + results/classifier/105/instruction/1594069 | 87 + results/classifier/105/instruction/1598029 | 41 + results/classifier/105/instruction/1605123 | 50 + results/classifier/105/instruction/1611 | 14 + results/classifier/105/instruction/1611394 | 54 + results/classifier/105/instruction/1614 | 14 + results/classifier/105/instruction/1615823 | 81 + results/classifier/105/instruction/1619 | 14 + results/classifier/105/instruction/1637 | 14 + results/classifier/105/instruction/1641 | 37 + results/classifier/105/instruction/1642 | 35 + results/classifier/105/instruction/1643537 | 45 + results/classifier/105/instruction/1645355 | 72 + results/classifier/105/instruction/1648 | 71 + results/classifier/105/instruction/1650 | 27 + results/classifier/105/instruction/1656676 | 38 + results/classifier/105/instruction/1672383 | 36 + results/classifier/105/instruction/1680679 | 215 + results/classifier/105/instruction/1681398 | 32 + results/classifier/105/instruction/1683 | 14 + results/classifier/105/instruction/1689367 | 120 + results/classifier/105/instruction/1694998 | 62 + results/classifier/105/instruction/1699567 | 45 + results/classifier/105/instruction/1705 | 79 + results/classifier/105/instruction/1709 | 49 + results/classifier/105/instruction/1715007 | 33 + results/classifier/105/instruction/1716028 | 542 ++ results/classifier/105/instruction/1716510 | 33 + results/classifier/105/instruction/1718118 | 74 + results/classifier/105/instruction/1719984 | 27 + results/classifier/105/instruction/1721275 | 100 + results/classifier/105/instruction/1724485 | 64 + results/classifier/105/instruction/1728448 | 85 + results/classifier/105/instruction/1728639 | 124 + results/classifier/105/instruction/1729 | 60 + results/classifier/105/instruction/1734 | 29 + results/classifier/105/instruction/1735082 | 42 + results/classifier/105/instruction/1736042 | 53 + results/classifier/105/instruction/1736655 | 88 + results/classifier/105/instruction/1737 | 62 + results/classifier/105/instruction/1738283 | 172 + results/classifier/105/instruction/1738434 | 50 + results/classifier/105/instruction/1741 | 14 + results/classifier/105/instruction/1744 | 14 + results/classifier/105/instruction/1751422 | 71 + results/classifier/105/instruction/1751494 | 38 + results/classifier/105/instruction/1755912 | 346 + results/classifier/105/instruction/1756927 | 47 + results/classifier/105/instruction/1759333 | 34 + results/classifier/105/instruction/1762707 | 51 + results/classifier/105/instruction/1767176 | 64 + results/classifier/105/instruction/1768 | 45 + results/classifier/105/instruction/1771 | 46 + results/classifier/105/instruction/1771570 | 40 + results/classifier/105/instruction/1771948 | 51 + results/classifier/105/instruction/1775011 | 37 + results/classifier/105/instruction/1776486 | 27 + results/classifier/105/instruction/1777786 | 68 + results/classifier/105/instruction/1778473 | 151 + results/classifier/105/instruction/1780 | 30 + results/classifier/105/instruction/1781281 | 65 + results/classifier/105/instruction/1782107 | 48 + results/classifier/105/instruction/1786 | 37 + results/classifier/105/instruction/1788 | 42 + results/classifier/105/instruction/1790 | 42 + results/classifier/105/instruction/1792 | 94 + results/classifier/105/instruction/1792659 | 65 + results/classifier/105/instruction/1793275 | 46 + results/classifier/105/instruction/1793608 | 49 + results/classifier/105/instruction/1793904 | 154 + results/classifier/105/instruction/1794086 | 72 + results/classifier/105/instruction/1797033 | 29 + results/classifier/105/instruction/1799 | 190 + results/classifier/105/instruction/1801674 | 95 + results/classifier/105/instruction/1803872 | 1336 ++++ results/classifier/105/instruction/1807675 | 55 + results/classifier/105/instruction/1809144 | 54 + results/classifier/105/instruction/1809684 | 45 + results/classifier/105/instruction/1810545 | 44 + results/classifier/105/instruction/1812861 | 41 + results/classifier/105/instruction/1813460 | 34 + results/classifier/105/instruction/1814343 | 49 + results/classifier/105/instruction/1815024 | 40 + results/classifier/105/instruction/1815423 | 84 + results/classifier/105/instruction/1815721 | 86 + results/classifier/105/instruction/1816614 | 42 + results/classifier/105/instruction/1817268 | 208 + results/classifier/105/instruction/1818075 | 133 + results/classifier/105/instruction/1820686 | 25 + results/classifier/105/instruction/1824344 | 71 + results/classifier/105/instruction/1824778 | 30 + results/classifier/105/instruction/1825 | 27 + results/classifier/105/instruction/1825002 | 194 + results/classifier/105/instruction/1825311 | 70 + results/classifier/105/instruction/1825359 | 200 + results/classifier/105/instruction/1826568 | 50 + results/classifier/105/instruction/1828507 | 78 + results/classifier/105/instruction/1828867 | 48 + results/classifier/105/instruction/1830031 | 111 + results/classifier/105/instruction/1830864 | 101 + results/classifier/105/instruction/1831 | 14 + results/classifier/105/instruction/1831545 | 37 + results/classifier/105/instruction/1832353 | 70 + results/classifier/105/instruction/1832422 | 33 + results/classifier/105/instruction/1834496 | 66 + results/classifier/105/instruction/1838475 | 48 + results/classifier/105/instruction/1838913 | 64 + results/classifier/105/instruction/1839807 | 120 + results/classifier/105/instruction/1840 | 14 + results/classifier/105/instruction/1840249 | 35 + results/classifier/105/instruction/1840777 | 89 + results/classifier/105/instruction/1842 | 28 + results/classifier/105/instruction/1842916 | 66 + results/classifier/105/instruction/1843254 | 26 + results/classifier/105/instruction/1845185 | 97 + results/classifier/105/instruction/1847232 | 536 ++ results/classifier/105/instruction/1847467 | 56 + results/classifier/105/instruction/1849879 | 26 + results/classifier/105/instruction/1850 | 42 + results/classifier/105/instruction/1850378 | 53 + results/classifier/105/instruction/1851939 | 34 + results/classifier/105/instruction/1857143 | 46 + results/classifier/105/instruction/1859989 | 65 + results/classifier/105/instruction/1860056 | 54 + results/classifier/105/instruction/1860920 | 51 + results/classifier/105/instruction/1861562 | 187 + results/classifier/105/instruction/1862887 | 100 + results/classifier/105/instruction/1863247 | 33 + results/classifier/105/instruction/1863685 | 48 + results/classifier/105/instruction/1865348 | 61 + results/classifier/105/instruction/1865626 | 51 + results/classifier/105/instruction/1873898 | 57 + results/classifier/105/instruction/1877706 | 64 + results/classifier/105/instruction/1877794 | 31 + results/classifier/105/instruction/1880424 | 59 + results/classifier/105/instruction/1881450 | 73 + results/classifier/105/instruction/1882065 | 50 + results/classifier/105/instruction/1882497 | 49 + results/classifier/105/instruction/1885350 | 60 + results/classifier/105/instruction/1885719 | 41 + results/classifier/105/instruction/1885720 | 35 + results/classifier/105/instruction/1886811 | 97 + results/classifier/105/instruction/1887641 | 56 + results/classifier/105/instruction/1888165 | 34 + results/classifier/105/instruction/1889288 | 26 + results/classifier/105/instruction/1889421 | 60 + results/classifier/105/instruction/1890 | 38 + results/classifier/105/instruction/1890310 | 65 + results/classifier/105/instruction/1892081 | 48 + results/classifier/105/instruction/1892441 | 57 + results/classifier/105/instruction/1892533 | 37 + results/classifier/105/instruction/1893010 | 38 + results/classifier/105/instruction/1894029 | 57 + results/classifier/105/instruction/1895305 | 66 + results/classifier/105/instruction/1897194 | 60 + results/classifier/105/instruction/1898954 | 73 + results/classifier/105/instruction/1899728 | 53 + results/classifier/105/instruction/1901 | 32 + results/classifier/105/instruction/1901981 | 89 + results/classifier/105/instruction/1902267 | 81 + results/classifier/105/instruction/1903712 | 47 + results/classifier/105/instruction/1903833 | 39 + results/classifier/105/instruction/1905356 | 53 + results/classifier/105/instruction/1906295 | 37 + results/classifier/105/instruction/1909392 | 48 + results/classifier/105/instruction/1909823 | 53 + results/classifier/105/instruction/1910605 | 62 + results/classifier/105/instruction/1912107 | 60 + results/classifier/105/instruction/1912934 | 78 + results/classifier/105/instruction/1913667 | 58 + results/classifier/105/instruction/1913913 | 84 + results/classifier/105/instruction/1913926 | 77 + results/classifier/105/instruction/1915027 | 27 + results/classifier/105/instruction/1916269 | 70 + results/classifier/105/instruction/1917 | 64 + results/classifier/105/instruction/1917661 | 67 + results/classifier/105/instruction/1921138 | 31 + results/classifier/105/instruction/1922617 | 255 + results/classifier/105/instruction/1922887 | 60 + results/classifier/105/instruction/1923629 | 51 + results/classifier/105/instruction/1923663 | 157 + results/classifier/105/instruction/1926174 | 61 + results/classifier/105/instruction/1926249 | 47 + results/classifier/105/instruction/1926277 | 227 + results/classifier/105/instruction/1926759 | 73 + results/classifier/105/instruction/1955 | 39 + results/classifier/105/instruction/1958 | 34 + results/classifier/105/instruction/1981 | 23 + results/classifier/105/instruction/1991 | 77 + results/classifier/105/instruction/2004 | 46 + results/classifier/105/instruction/2008 | 25 + results/classifier/105/instruction/203 | 14 + results/classifier/105/instruction/2039 | 24 + results/classifier/105/instruction/2043 | 87 + results/classifier/105/instruction/2054889 | 66 + results/classifier/105/instruction/2074 | 33 + results/classifier/105/instruction/2088 | 34 + results/classifier/105/instruction/2089 | 40 + results/classifier/105/instruction/2091 | 16 + results/classifier/105/instruction/2104 | 14 + results/classifier/105/instruction/2111 | 72 + results/classifier/105/instruction/2114 | 14 + results/classifier/105/instruction/2118 | 14 + results/classifier/105/instruction/2123 | 44 + results/classifier/105/instruction/2127 | 14 + results/classifier/105/instruction/2131 | 14 + results/classifier/105/instruction/2140 | 14 + results/classifier/105/instruction/2142 | 14 + results/classifier/105/instruction/2149 | 24 + results/classifier/105/instruction/2156 | 28 + results/classifier/105/instruction/2157 | 56 + results/classifier/105/instruction/216 | 14 + results/classifier/105/instruction/2160 | 14 + results/classifier/105/instruction/2163 | 14 + results/classifier/105/instruction/2175 | 51 + results/classifier/105/instruction/2177 | 14 + results/classifier/105/instruction/2198 | 38 + results/classifier/105/instruction/2207 | 24 + results/classifier/105/instruction/2226 | 69 + results/classifier/105/instruction/2248 | 49 + results/classifier/105/instruction/2250 | 57 + results/classifier/105/instruction/2287 | 42 + results/classifier/105/instruction/2300 | 14 + results/classifier/105/instruction/2302 | 38 + results/classifier/105/instruction/2317 | 51 + results/classifier/105/instruction/2318 | 47 + results/classifier/105/instruction/232 | 14 + results/classifier/105/instruction/2328 | 14 + results/classifier/105/instruction/2342 | 14 + results/classifier/105/instruction/2377 | 38 + results/classifier/105/instruction/2386 | 56 + results/classifier/105/instruction/2388 | 30 + results/classifier/105/instruction/2390 | 76 + results/classifier/105/instruction/2397 | 14 + results/classifier/105/instruction/2402 | 37 + results/classifier/105/instruction/2419 | 31 + results/classifier/105/instruction/2423 | 47 + results/classifier/105/instruction/2452 | 14 + results/classifier/105/instruction/246 | 14 + results/classifier/105/instruction/2466 | 37 + results/classifier/105/instruction/2497 | 16 + results/classifier/105/instruction/2499 | 43 + results/classifier/105/instruction/2500 | 17 + results/classifier/105/instruction/2501 | 14 + results/classifier/105/instruction/2506 | 71 + results/classifier/105/instruction/2522 | 30 + results/classifier/105/instruction/2525 | 14 + results/classifier/105/instruction/2531 | 73 + results/classifier/105/instruction/2537 | 14 + results/classifier/105/instruction/2554 | 24 + results/classifier/105/instruction/2563 | 223 + results/classifier/105/instruction/2577 | 14 + results/classifier/105/instruction/2583 | 38 + results/classifier/105/instruction/2592 | 50 + results/classifier/105/instruction/2598 | 14 + results/classifier/105/instruction/2619 | 14 + results/classifier/105/instruction/2628 | 33 + results/classifier/105/instruction/263 | 14 + results/classifier/105/instruction/2641 | 14 + results/classifier/105/instruction/2647 | 60 + results/classifier/105/instruction/2655 | 52 + results/classifier/105/instruction/2662 | 24 + results/classifier/105/instruction/2669 | 31 + results/classifier/105/instruction/268 | 14 + results/classifier/105/instruction/2683 | 52 + results/classifier/105/instruction/2696 | 25 + results/classifier/105/instruction/273 | 14 + results/classifier/105/instruction/2747 | 22 + results/classifier/105/instruction/275 | 14 + results/classifier/105/instruction/2750 | 24 + results/classifier/105/instruction/2755 | 24 + results/classifier/105/instruction/276 | 14 + results/classifier/105/instruction/2761 | 21 + results/classifier/105/instruction/2764 | 60 + results/classifier/105/instruction/2771 | 14 + results/classifier/105/instruction/2802 | 39 + results/classifier/105/instruction/2809 | 24 + results/classifier/105/instruction/2817 | 64 + results/classifier/105/instruction/2820 | 40 + results/classifier/105/instruction/2833 | 32 + results/classifier/105/instruction/2854 | 37 + results/classifier/105/instruction/2855 | 42 + results/classifier/105/instruction/2861 | 20 + results/classifier/105/instruction/2865 | 65 + results/classifier/105/instruction/2883 | 14 + results/classifier/105/instruction/2891 | 14 + results/classifier/105/instruction/2900 | 24 + results/classifier/105/instruction/2903 | 24 + results/classifier/105/instruction/2907 | 14 + results/classifier/105/instruction/2946 | 23 + results/classifier/105/instruction/2958 | 31 + results/classifier/105/instruction/2971 | 57 + results/classifier/105/instruction/2980 | 277 + results/classifier/105/instruction/300 | 14 + results/classifier/105/instruction/301 | 14 + results/classifier/105/instruction/306 | 14 + results/classifier/105/instruction/312 | 14 + results/classifier/105/instruction/333 | 14 + results/classifier/105/instruction/355 | 14 + results/classifier/105/instruction/361 | 14 + results/classifier/105/instruction/366 | 14 + results/classifier/105/instruction/381 | 14 + results/classifier/105/instruction/390 | 14 + results/classifier/105/instruction/417 | 14 + results/classifier/105/instruction/424450 | 38 + results/classifier/105/instruction/440 | 14 + results/classifier/105/instruction/442 | 14 + results/classifier/105/instruction/447 | 14 + results/classifier/105/instruction/459 | 48 + results/classifier/105/instruction/462 | 56 + results/classifier/105/instruction/481 | 14 + results/classifier/105/instruction/483 | 38 + results/classifier/105/instruction/485 | 14 + results/classifier/105/instruction/493 | 20 + results/classifier/105/instruction/498417 | 53 + results/classifier/105/instruction/50773216 | 118 + results/classifier/105/instruction/509 | 14 + results/classifier/105/instruction/514 | 38 + results/classifier/105/instruction/516 | 55 + results/classifier/105/instruction/521 | 14 + results/classifier/105/instruction/533 | 14 + results/classifier/105/instruction/543 | 14 + results/classifier/105/instruction/544 | 14 + results/classifier/105/instruction/545 | 14 + results/classifier/105/instruction/551 | 14 + results/classifier/105/instruction/584 | 56 + results/classifier/105/instruction/597575 | 50 + results/classifier/105/instruction/60 | 14 + results/classifier/105/instruction/613 | 14 + results/classifier/105/instruction/616769 | 52 + results/classifier/105/instruction/619 | 14 + results/classifier/105/instruction/62 | 14 + results/classifier/105/instruction/625 | 36 + results/classifier/105/instruction/632 | 14 + results/classifier/105/instruction/63565653 | 57 + results/classifier/105/instruction/636095 | 62 + results/classifier/105/instruction/643 | 14 + results/classifier/105/instruction/646 | 31 + results/classifier/105/instruction/652 | 41 + results/classifier/105/instruction/657329 | 66 + results/classifier/105/instruction/664 | 27 + results/classifier/105/instruction/670 | 23 + results/classifier/105/instruction/674740 | 35 + results/classifier/105/instruction/682326 | 31 + results/classifier/105/instruction/682360 | 42 + results/classifier/105/instruction/690 | 32 + results/classifier/105/instruction/697 | 14 + results/classifier/105/instruction/701 | 14 + results/classifier/105/instruction/703 | 30 + results/classifier/105/instruction/70868267 | 48 + results/classifier/105/instruction/729 | 47 + results/classifier/105/instruction/744 | 16 + results/classifier/105/instruction/750 | 46 + results/classifier/105/instruction/754635 | 83 + results/classifier/105/instruction/760976 | 42 + results/classifier/105/instruction/781 | 14 + results/classifier/105/instruction/789652 | 29 + results/classifier/105/instruction/796480 | 63 + results/classifier/105/instruction/799 | 60 + results/classifier/105/instruction/802 | 39 + results/classifier/105/instruction/813 | 28 + results/classifier/105/instruction/817 | 14 + results/classifier/105/instruction/824 | 25 + results/classifier/105/instruction/825776 | 30 + results/classifier/105/instruction/826 | 29 + results/classifier/105/instruction/842290 | 30 + results/classifier/105/instruction/885 | 14 + results/classifier/105/instruction/891 | 14 + results/classifier/105/instruction/891002 | 57 + results/classifier/105/instruction/899664 | 120 + results/classifier/105/instruction/91 | 14 + results/classifier/105/instruction/925 | 31 + results/classifier/105/instruction/927 | 45 + results/classifier/105/instruction/929 | 46 + results/classifier/105/instruction/947 | 26 + results/classifier/105/instruction/953 | 14 + results/classifier/105/instruction/974958 | 26 + results/classifier/105/instruction/982 | 50 + results/classifier/105/instruction/984 | 36 + results/classifier/105/instruction/984516 | 82 + results/classifier/105/mistranslation/1015 | 14 + results/classifier/105/mistranslation/1027525 | 64 + results/classifier/105/mistranslation/1030 | 14 + results/classifier/105/mistranslation/1035042 | 30 + results/classifier/105/mistranslation/1036987 | 62 + results/classifier/105/mistranslation/1048 | 19 + results/classifier/105/mistranslation/1050694 | 97 + results/classifier/105/mistranslation/1054831 | 67 + results/classifier/105/mistranslation/1066909 | 31 + results/classifier/105/mistranslation/1068900 | 30 + results/classifier/105/mistranslation/1077806 | 21 + results/classifier/105/mistranslation/108 | 14 + results/classifier/105/mistranslation/1081 | 14 + results/classifier/105/mistranslation/1084 | 14 + results/classifier/105/mistranslation/1090604 | 37 + results/classifier/105/mistranslation/1095531 | 168 + results/classifier/105/mistranslation/1100 | 14 + results/classifier/105/mistranslation/1119 | 28 + results/classifier/105/mistranslation/1120383 | 160 + results/classifier/105/mistranslation/1121 | 83 + results/classifier/105/mistranslation/1127 | 142 + results/classifier/105/mistranslation/113 | 14 + results/classifier/105/mistranslation/1151986 | 314 + results/classifier/105/mistranslation/1153 | 14 + results/classifier/105/mistranslation/1161 | 14 + results/classifier/105/mistranslation/1169 | 16 + results/classifier/105/mistranslation/1172 | 72 + results/classifier/105/mistranslation/1178 | 14 + results/classifier/105/mistranslation/1178107 | 37 + results/classifier/105/mistranslation/1179664 | 64 + results/classifier/105/mistranslation/1179731 | 59 + results/classifier/105/mistranslation/1180777 | 1329 ++++ results/classifier/105/mistranslation/1182 | 82 + results/classifier/105/mistranslation/1192065 | 22 + results/classifier/105/mistranslation/1195882 | 154 + results/classifier/105/mistranslation/1196145 | 38 + results/classifier/105/mistranslation/1216368 | 52 + results/classifier/105/mistranslation/1217339 | 83 + results/classifier/105/mistranslation/1238 | 132 + results/classifier/105/mistranslation/1245724 | 84 + results/classifier/105/mistranslation/1248 | 24 + results/classifier/105/mistranslation/1249 | 14 + results/classifier/105/mistranslation/1257 | 16 + results/classifier/105/mistranslation/1276879 | 144 + results/classifier/105/mistranslation/128 | 14 + results/classifier/105/mistranslation/1284874 | 139 + results/classifier/105/mistranslation/1285363 | 135 + results/classifier/105/mistranslation/1287195 | 24 + results/classifier/105/mistranslation/1298442 | 32 + results/classifier/105/mistranslation/130 | 14 + results/classifier/105/mistranslation/1300863 | 23 + results/classifier/105/mistranslation/1305402 | 112 + results/classifier/105/mistranslation/1307 | 85 + results/classifier/105/mistranslation/1311614 | 135 + results/classifier/105/mistranslation/1318091 | 68 + results/classifier/105/mistranslation/1318474 | 33 + results/classifier/105/mistranslation/1320360 | 508 ++ results/classifier/105/mistranslation/1322 | 14 + results/classifier/105/mistranslation/1324112 | 1061 +++ results/classifier/105/mistranslation/1326533 | 36 + results/classifier/105/mistranslation/1331 | 14 + results/classifier/105/mistranslation/1332 | 14 + results/classifier/105/mistranslation/1334 | 14 + results/classifier/105/mistranslation/1336 | 14 + results/classifier/105/mistranslation/1337 | 29 + results/classifier/105/mistranslation/1338 | 14 + results/classifier/105/mistranslation/1345 | 14 + results/classifier/105/mistranslation/1353947 | 101 + results/classifier/105/mistranslation/1358619 | 118 + results/classifier/105/mistranslation/136 | 14 + results/classifier/105/mistranslation/1367365 | 40 + results/classifier/105/mistranslation/1376533 | 23 + results/classifier/105/mistranslation/1383 | 14 + results/classifier/105/mistranslation/1393440 | 34 + results/classifier/105/mistranslation/1397 | 14 + results/classifier/105/mistranslation/1402802 | 31 + results/classifier/105/mistranslation/1405176 | 23 + results/classifier/105/mistranslation/1407813 | 29 + results/classifier/105/mistranslation/1414293 | 24 + results/classifier/105/mistranslation/1417 | 18 + results/classifier/105/mistranslation/1429313 | 28 + results/classifier/105/mistranslation/143 | 14 + results/classifier/105/mistranslation/1431 | 63 + results/classifier/105/mistranslation/1431084 | 28 + results/classifier/105/mistranslation/1433 | 170 + results/classifier/105/mistranslation/1435101 | 23 + results/classifier/105/mistranslation/1437811 | 24 + results/classifier/105/mistranslation/1438144 | 30 + results/classifier/105/mistranslation/1449 | 18 + results/classifier/105/mistranslation/1449687 | 115 + results/classifier/105/mistranslation/1453 | 14 + results/classifier/105/mistranslation/146 | 14 + results/classifier/105/mistranslation/1461 | 14 + results/classifier/105/mistranslation/1469924 | 68 + results/classifier/105/mistranslation/1470536 | 32 + results/classifier/105/mistranslation/1473451 | 32 + results/classifier/105/mistranslation/1479 | 14 + results/classifier/105/mistranslation/1480 | 14 + results/classifier/105/mistranslation/1483070 | 177 + results/classifier/105/mistranslation/14887122 | 266 + results/classifier/105/mistranslation/1490 | 74 + results/classifier/105/mistranslation/1500175 | 137 + results/classifier/105/mistranslation/1512134 | 55 + results/classifier/105/mistranslation/1513 | 14 + results/classifier/105/mistranslation/152 | 14 + results/classifier/105/mistranslation/1526 | 14 + results/classifier/105/mistranslation/1527322 | 65 + results/classifier/105/mistranslation/1528214 | 58 + results/classifier/105/mistranslation/1529 | 14 + results/classifier/105/mistranslation/1533 | 14 + results/classifier/105/mistranslation/1536487 | 99 + results/classifier/105/mistranslation/154 | 14 + results/classifier/105/mistranslation/1543 | 14 + results/classifier/105/mistranslation/1545052 | 101 + results/classifier/105/mistranslation/156 | 14 + results/classifier/105/mistranslation/1562653 | 163 + results/classifier/105/mistranslation/1563152 | 72 + results/classifier/105/mistranslation/1563887 | 912 +++ results/classifier/105/mistranslation/1566 | 22 + results/classifier/105/mistranslation/1568621 | 47 + results/classifier/105/mistranslation/1573 | 14 + results/classifier/105/mistranslation/1577841 | 30 + results/classifier/105/mistranslation/1579306 | 85 + results/classifier/105/mistranslation/1579327 | 295 + results/classifier/105/mistranslation/1580586 | 37 + results/classifier/105/mistranslation/1584 | 14 + results/classifier/105/mistranslation/1586 | 120 + results/classifier/105/mistranslation/1586229 | 43 + results/classifier/105/mistranslation/159 | 14 + results/classifier/105/mistranslation/1590322 | 28 + results/classifier/105/mistranslation/1593605 | 260 + results/classifier/105/mistranslation/1596870 | 26 + results/classifier/105/mistranslation/1600112 | 37 + results/classifier/105/mistranslation/1606899 | 251 + results/classifier/105/mistranslation/1608 | 14 + results/classifier/105/mistranslation/1608802 | 89 + results/classifier/105/mistranslation/1610368 | 107 + results/classifier/105/mistranslation/1613817 | 130 + results/classifier/105/mistranslation/1614521 | 21 + results/classifier/105/mistranslation/1614609 | 65 + results/classifier/105/mistranslation/1615079 | 29 + results/classifier/105/mistranslation/1625987 | 81 + results/classifier/105/mistranslation/1631773 | 30 + results/classifier/105/mistranslation/1648726 | 42 + results/classifier/105/mistranslation/1650175 | 33 + results/classifier/105/mistranslation/1654 | 94 + results/classifier/105/mistranslation/1655708 | 80 + results/classifier/105/mistranslation/1657538 | 214 + results/classifier/105/mistranslation/1657841 | 29 + results/classifier/105/mistranslation/1658634 | 413 ++ results/classifier/105/mistranslation/166 | 14 + results/classifier/105/mistranslation/1660010 | 56 + results/classifier/105/mistranslation/1660599 | 26 + results/classifier/105/mistranslation/1662468 | 73 + results/classifier/105/mistranslation/1665344 | 26 + results/classifier/105/mistranslation/1666 | 14 + results/classifier/105/mistranslation/1667 | 14 + results/classifier/105/mistranslation/1668 | 58 + results/classifier/105/mistranslation/1668360 | 20 + results/classifier/105/mistranslation/1675108 | 368 + results/classifier/105/mistranslation/1683084 | 29 + results/classifier/105/mistranslation/1686980 | 101 + results/classifier/105/mistranslation/1692 | 115 + results/classifier/105/mistranslation/1694 | 14 + results/classifier/105/mistranslation/1696746 | 37 + results/classifier/105/mistranslation/1699 | 14 + results/classifier/105/mistranslation/1700 | 14 + results/classifier/105/mistranslation/1706 | 22 + results/classifier/105/mistranslation/1707274 | 24 + results/classifier/105/mistranslation/1708077 | 21 + results/classifier/105/mistranslation/1708215 | 44 + results/classifier/105/mistranslation/1709025 | 62 + results/classifier/105/mistranslation/1709170 | 132 + results/classifier/105/mistranslation/1711602 | 946 +++ results/classifier/105/mistranslation/1711828 | 74 + results/classifier/105/mistranslation/1715296 | 35 + results/classifier/105/mistranslation/1715715 | 76 + results/classifier/105/mistranslation/1719 | 20 + results/classifier/105/mistranslation/1719339 | 51 + results/classifier/105/mistranslation/1720 | 53 + results/classifier/105/mistranslation/1720971 | 27 + results/classifier/105/mistranslation/1722857 | 25 + results/classifier/105/mistranslation/1728325 | 90 + results/classifier/105/mistranslation/1729501 | 246 + results/classifier/105/mistranslation/1730101 | 23 + results/classifier/105/mistranslation/1731277 | 54 + results/classifier/105/mistranslation/1735 | 42 + results/classifier/105/mistranslation/1736376 | 29 + results/classifier/105/mistranslation/1743214 | 62 + results/classifier/105/mistranslation/1753 | 16 + results/classifier/105/mistranslation/1753437 | 61 + results/classifier/105/mistranslation/1754542 | 2018 ++++++ results/classifier/105/mistranslation/1756080 | 20 + results/classifier/105/mistranslation/1756519 | 71 + results/classifier/105/mistranslation/1767126 | 28 + results/classifier/105/mistranslation/177 | 14 + results/classifier/105/mistranslation/1772086 | 46 + results/classifier/105/mistranslation/1773743 | 43 + results/classifier/105/mistranslation/1774412 | 30 + results/classifier/105/mistranslation/1776096 | 96 + results/classifier/105/mistranslation/1777293 | 26 + results/classifier/105/mistranslation/1777969 | 118 + results/classifier/105/mistranslation/1778 | 14 + results/classifier/105/mistranslation/1779120 | 115 + results/classifier/105/mistranslation/1780812 | 29 + results/classifier/105/mistranslation/1780814 | 63 + results/classifier/105/mistranslation/1788098 | 1006 +++ results/classifier/105/mistranslation/1788275 | 83 + results/classifier/105/mistranslation/1788701 | 41 + results/classifier/105/mistranslation/1791947 | 69 + results/classifier/105/mistranslation/1793016 | 54 + results/classifier/105/mistranslation/1793791 | 296 + results/classifier/105/mistranslation/1795369 | 56 + results/classifier/105/mistranslation/1796816 | 41 + results/classifier/105/mistranslation/1799200 | 73 + results/classifier/105/mistranslation/1799768 | 27 + results/classifier/105/mistranslation/1800993 | 30 + results/classifier/105/mistranslation/1801 | 64 + results/classifier/105/mistranslation/1805445 | 96 + results/classifier/105/mistranslation/1811862 | 57 + results/classifier/105/mistranslation/1814381 | 78 + results/classifier/105/mistranslation/1815143 | 110 + results/classifier/105/mistranslation/1815413 | 43 + results/classifier/105/mistranslation/1816052 | 106 + results/classifier/105/mistranslation/1819289 | 247 + results/classifier/105/mistranslation/1820247 | 430 ++ results/classifier/105/mistranslation/1823152 | 89 + results/classifier/105/mistranslation/1824704 | 100 + results/classifier/105/mistranslation/1824768 | 117 + results/classifier/105/mistranslation/1825207 | 75 + results/classifier/105/mistranslation/1828723 | 45 + results/classifier/105/mistranslation/1829696 | 403 ++ results/classifier/105/mistranslation/1830415 | 32 + results/classifier/105/mistranslation/1830872 | 612 ++ results/classifier/105/mistranslation/1831750 | 92 + results/classifier/105/mistranslation/1832914 | 28 + results/classifier/105/mistranslation/1833053 | 66 + results/classifier/105/mistranslation/1834113 | 296 + results/classifier/105/mistranslation/1834613 | 81 + results/classifier/105/mistranslation/1835793 | 34 + results/classifier/105/mistranslation/1840646 | 31 + results/classifier/105/mistranslation/1840648 | 30 + results/classifier/105/mistranslation/1843852 | 89 + results/classifier/105/mistranslation/1847861 | 69 + results/classifier/105/mistranslation/1848556 | 238 + results/classifier/105/mistranslation/1849234 | 17 + results/classifier/105/mistranslation/1851552 | 332 + results/classifier/105/mistranslation/1853083 | 165 + results/classifier/105/mistranslation/1859713 | 70 + results/classifier/105/mistranslation/1861404 | 223 + results/classifier/105/mistranslation/1861946 | 233 + results/classifier/105/mistranslation/1862986 | 155 + results/classifier/105/mistranslation/1863023 | 71 + results/classifier/105/mistranslation/1863025 | 446 ++ results/classifier/105/mistranslation/1863200 | 105 + results/classifier/105/mistranslation/1863445 | 56 + results/classifier/105/mistranslation/1863601 | 39 + results/classifier/105/mistranslation/1863710 | 33 + results/classifier/105/mistranslation/1864704 | 65 + results/classifier/105/mistranslation/1864955 | 65 + results/classifier/105/mistranslation/1866870 | 831 +++ results/classifier/105/mistranslation/1867072 | 89 + results/classifier/105/mistranslation/1868617 | 51 + results/classifier/105/mistranslation/1873 | 97 + results/classifier/105/mistranslation/1873769 | 101 + results/classifier/105/mistranslation/1874504 | 45 + results/classifier/105/mistranslation/1875012 | 138 + results/classifier/105/mistranslation/1875819 | 21 + results/classifier/105/mistranslation/1877781 | 75 + results/classifier/105/mistranslation/1878641 | 74 + results/classifier/105/mistranslation/1879175 | 163 + results/classifier/105/mistranslation/1879227 | 82 + results/classifier/105/mistranslation/1880287 | 40 + results/classifier/105/mistranslation/1881648 | 44 + results/classifier/105/mistranslation/1884507 | 49 + results/classifier/105/mistranslation/1884719 | 593 ++ results/classifier/105/mistranslation/1885175 | 95 + results/classifier/105/mistranslation/1886097 | 84 + results/classifier/105/mistranslation/1886306 | 27 + results/classifier/105/mistranslation/1886318 | 308 + results/classifier/105/mistranslation/1887309 | 325 + results/classifier/105/mistranslation/1887318 | 97 + results/classifier/105/mistranslation/1888492 | 82 + results/classifier/105/mistranslation/1888818 | 100 + results/classifier/105/mistranslation/1888923 | 401 ++ results/classifier/105/mistranslation/1891748 | 118 + results/classifier/105/mistranslation/1891749 | 21 + results/classifier/105/mistranslation/1892960 | 342 + results/classifier/105/mistranslation/1896263 | 598 ++ results/classifier/105/mistranslation/1898883 | 61 + results/classifier/105/mistranslation/1900918 | 31 + results/classifier/105/mistranslation/1900919 | 60 + results/classifier/105/mistranslation/1901359 | 71 + results/classifier/105/mistranslation/1902394 | 121 + results/classifier/105/mistranslation/1902975 | 25 + results/classifier/105/mistranslation/1903493 | 21 + results/classifier/105/mistranslation/1905521 | 158 + results/classifier/105/mistranslation/1907 | 70 + results/classifier/105/mistranslation/1907953 | 19 + results/classifier/105/mistranslation/1908450 | 102 + results/classifier/105/mistranslation/1908626 | 175 + results/classifier/105/mistranslation/191 | 14 + results/classifier/105/mistranslation/1910540 | 23 + results/classifier/105/mistranslation/1911 | 53 + results/classifier/105/mistranslation/1911666 | 91 + results/classifier/105/mistranslation/1913315 | 94 + results/classifier/105/mistranslation/1913619 | 27 + results/classifier/105/mistranslation/1915063 | 718 ++ results/classifier/105/mistranslation/1915682 | 160 + results/classifier/105/mistranslation/1915925 | 904 +++ results/classifier/105/mistranslation/1916501 | 116 + results/classifier/105/mistranslation/1917565 | 173 + results/classifier/105/mistranslation/1918026 | 53 + results/classifier/105/mistranslation/1918149 | 56 + results/classifier/105/mistranslation/1919021 | 30 + results/classifier/105/mistranslation/1920752 | 122 + results/classifier/105/mistranslation/1921444 | 128 + results/classifier/105/mistranslation/1923689 | 101 + results/classifier/105/mistranslation/1923693 | 37 + results/classifier/105/mistranslation/1925496 | 138 + results/classifier/105/mistranslation/1931 | 16 + results/classifier/105/mistranslation/194 | 14 + results/classifier/105/mistranslation/1956 | 14 + results/classifier/105/mistranslation/1970 | 14 + results/classifier/105/mistranslation/1970563 | 132 + results/classifier/105/mistranslation/1975 | 50 + results/classifier/105/mistranslation/1978 | 18 + results/classifier/105/mistranslation/1982 | 22 + results/classifier/105/mistranslation/1995 | 14 + results/classifier/105/mistranslation/1999 | 64 + results/classifier/105/mistranslation/2005 | 42 + results/classifier/105/mistranslation/2053 | 14 + results/classifier/105/mistranslation/2054 | 55 + results/classifier/105/mistranslation/2062 | 14 + results/classifier/105/mistranslation/2076 | 14 + results/classifier/105/mistranslation/2077 | 14 + results/classifier/105/mistranslation/2084 | 14 + results/classifier/105/mistranslation/209 | 14 + results/classifier/105/mistranslation/2120 | 14 + results/classifier/105/mistranslation/2121 | 14 + results/classifier/105/mistranslation/2130 | 14 + results/classifier/105/mistranslation/2184 | 66 + results/classifier/105/mistranslation/2197 | 71 + results/classifier/105/mistranslation/2203 | 14 + results/classifier/105/mistranslation/2214 | 14 + results/classifier/105/mistranslation/222 | 14 + results/classifier/105/mistranslation/2227 | 49 + results/classifier/105/mistranslation/2232 | 14 + results/classifier/105/mistranslation/2236 | 14 + results/classifier/105/mistranslation/227 | 14 + results/classifier/105/mistranslation/228 | 14 + results/classifier/105/mistranslation/230 | 14 + results/classifier/105/mistranslation/23270873 | 700 ++ results/classifier/105/mistranslation/2352 | 14 + results/classifier/105/mistranslation/2366 | 14 + results/classifier/105/mistranslation/2367 | 14 + results/classifier/105/mistranslation/2369 | 14 + results/classifier/105/mistranslation/2395 | 73 + results/classifier/105/mistranslation/2401 | 14 + results/classifier/105/mistranslation/241 | 14 + results/classifier/105/mistranslation/2415 | 66 + results/classifier/105/mistranslation/2425 | 20 + results/classifier/105/mistranslation/2430 | 20 + results/classifier/105/mistranslation/2431 | 14 + results/classifier/105/mistranslation/244 | 14 + results/classifier/105/mistranslation/2448 | 59 + results/classifier/105/mistranslation/2451 | 14 + results/classifier/105/mistranslation/2467 | 44 + results/classifier/105/mistranslation/2481 | 14 + results/classifier/105/mistranslation/2484 | 14 + results/classifier/105/mistranslation/255 | 14 + results/classifier/105/mistranslation/25842545 | 210 + results/classifier/105/mistranslation/2600 | 14 + results/classifier/105/mistranslation/2607 | 80 + results/classifier/105/mistranslation/2610 | 14 + results/classifier/105/mistranslation/2614 | 14 + results/classifier/105/mistranslation/2630 | 14 + results/classifier/105/mistranslation/2638 | 30 + results/classifier/105/mistranslation/2644 | 79 + results/classifier/105/mistranslation/266 | 14 + results/classifier/105/mistranslation/267 | 14 + results/classifier/105/mistranslation/2684 | 14 + results/classifier/105/mistranslation/2694 | 37 + results/classifier/105/mistranslation/2709 | 14 + results/classifier/105/mistranslation/2740 | 80 + results/classifier/105/mistranslation/2766 | 36 + results/classifier/105/mistranslation/2770 | 27 + results/classifier/105/mistranslation/2776 | 14 + results/classifier/105/mistranslation/2786 | 24 + results/classifier/105/mistranslation/2795 | 173 + results/classifier/105/mistranslation/2811 | 107 + results/classifier/105/mistranslation/2823 | 56 + results/classifier/105/mistranslation/2837 | 14 + results/classifier/105/mistranslation/2868 | 16 + results/classifier/105/mistranslation/2879 | 14 + results/classifier/105/mistranslation/2901 | 14 + results/classifier/105/mistranslation/2932 | 14 + results/classifier/105/mistranslation/2942 | 78 + results/classifier/105/mistranslation/2949 | 30 + results/classifier/105/mistranslation/297 | 14 + results/classifier/105/mistranslation/2974 | 14 + results/classifier/105/mistranslation/2977 | 24 + results/classifier/105/mistranslation/314 | 14 + results/classifier/105/mistranslation/342 | 14 + results/classifier/105/mistranslation/343 | 14 + results/classifier/105/mistranslation/345 | 14 + results/classifier/105/mistranslation/356 | 14 + results/classifier/105/mistranslation/359 | 14 + results/classifier/105/mistranslation/364 | 14 + results/classifier/105/mistranslation/371 | 14 + results/classifier/105/mistranslation/372 | 14 + results/classifier/105/mistranslation/373 | 14 + results/classifier/105/mistranslation/374 | 14 + results/classifier/105/mistranslation/376 | 14 + results/classifier/105/mistranslation/378 | 14 + results/classifier/105/mistranslation/379 | 14 + results/classifier/105/mistranslation/382 | 14 + results/classifier/105/mistranslation/388 | 14 + results/classifier/105/mistranslation/392 | 14 + results/classifier/105/mistranslation/393569 | 90 + results/classifier/105/mistranslation/397212 | 228 + results/classifier/105/mistranslation/400 | 14 + results/classifier/105/mistranslation/421 | 14 + results/classifier/105/mistranslation/426 | 14 + results/classifier/105/mistranslation/427 | 14 + results/classifier/105/mistranslation/435 | 14 + results/classifier/105/mistranslation/450 | 14 + results/classifier/105/mistranslation/47 | 14 + results/classifier/105/mistranslation/470 | 14 + results/classifier/105/mistranslation/491 | 14 + results/classifier/105/mistranslation/500 | 14 + results/classifier/105/mistranslation/502107 | 134 + results/classifier/105/mistranslation/508 | 14 + results/classifier/105/mistranslation/521994 | 207 + results/classifier/105/mistranslation/53 | 14 + results/classifier/105/mistranslation/541 | 14 + results/classifier/105/mistranslation/562107 | 43 + results/classifier/105/mistranslation/568 | 39 + results/classifier/105/mistranslation/568053 | 24 + results/classifier/105/mistranslation/572 | 14 + results/classifier/105/mistranslation/589827 | 35 + results/classifier/105/mistranslation/59 | 14 + results/classifier/105/mistranslation/596 | 14 + results/classifier/105/mistranslation/602 | 26 + results/classifier/105/mistranslation/602336 | 120 + results/classifier/105/mistranslation/608107 | 51 + results/classifier/105/mistranslation/629791 | 21 + results/classifier/105/mistranslation/638 | 26 + results/classifier/105/mistranslation/64322995 | 62 + results/classifier/105/mistranslation/657 | 14 + results/classifier/105/mistranslation/658 | 14 + results/classifier/105/mistranslation/658904 | 22 + results/classifier/105/mistranslation/673009 | 153 + results/classifier/105/mistranslation/687 | 14 + results/classifier/105/mistranslation/688 | 60 + results/classifier/105/mistranslation/692 | 14 + results/classifier/105/mistranslation/70294255 | 1069 +++ results/classifier/105/mistranslation/704 | 14 + results/classifier/105/mistranslation/713 | 14 + results/classifier/105/mistranslation/72 | 14 + results/classifier/105/mistranslation/721 | 43 + results/classifier/105/mistranslation/727134 | 21 + results/classifier/105/mistranslation/74466963 | 1886 +++++ results/classifier/105/mistranslation/74545755 | 352 + results/classifier/105/mistranslation/746 | 14 + results/classifier/105/mistranslation/753916 | 47 + results/classifier/105/mistranslation/764252 | 31 + results/classifier/105/mistranslation/77 | 14 + results/classifier/105/mistranslation/773 | 40 + results/classifier/105/mistranslation/793 | 14 + results/classifier/105/mistranslation/795866 | 167 + results/classifier/105/mistranslation/80604314 | 1488 ++++ results/classifier/105/mistranslation/814 | 50 + results/classifier/105/mistranslation/814222 | 258 + results/classifier/105/mistranslation/828 | 24 + results/classifier/105/mistranslation/83 | 14 + results/classifier/105/mistranslation/838 | 14 + results/classifier/105/mistranslation/853 | 23 + results/classifier/105/mistranslation/862 | 62 + results/classifier/105/mistranslation/864490 | 32 + results/classifier/105/mistranslation/870 | 25 + results/classifier/105/mistranslation/886255 | 126 + results/classifier/105/mistranslation/889053 | 63 + results/classifier/105/mistranslation/890 | 14 + results/classifier/105/mistranslation/891625 | 52 + results/classifier/105/mistranslation/893367 | 37 + results/classifier/105/mistranslation/895 | 51 + results/classifier/105/mistranslation/906 | 14 + results/classifier/105/mistranslation/917 | 14 + results/classifier/105/mistranslation/939437 | 26 + results/classifier/105/mistranslation/939443 | 20 + results/classifier/105/mistranslation/940 | 14 + results/classifier/105/mistranslation/942659 | 61 + results/classifier/105/mistranslation/947273 | 21 + results/classifier/105/mistranslation/950 | 36 + results/classifier/105/mistranslation/961757 | 39 + results/classifier/105/mistranslation/963 | 14 + results/classifier/105/mistranslation/988128 | 52 + results/classifier/105/mistranslation/995758 | 30 + results/classifier/105/mistranslation/996798 | 31 + results/classifier/105/network/05479587 | 91 + results/classifier/105/network/1010484 | 38 + results/classifier/105/network/1014099 | 41 + results/classifier/105/network/1054180 | 36 + results/classifier/105/network/1067 | 97 + results/classifier/105/network/1071 | 25 + results/classifier/105/network/1139 | 91 + results/classifier/105/network/1158 | 14 + results/classifier/105/network/1174 | 26 + results/classifier/105/network/1176366 | 42 + results/classifier/105/network/1189 | 14 + results/classifier/105/network/1192464 | 56 + results/classifier/105/network/1196727 | 160 + results/classifier/105/network/1222034 | 242 + results/classifier/105/network/127 | 14 + results/classifier/105/network/1279 | 21 + results/classifier/105/network/1286 | 14 + results/classifier/105/network/1297781 | 26 + results/classifier/105/network/1299 | 37 + results/classifier/105/network/1309 | 14 + results/classifier/105/network/1364 | 28 + results/classifier/105/network/1369347 | 60 + results/classifier/105/network/1381 | 16 + results/classifier/105/network/1385 | 14 + results/classifier/105/network/1400 | 14 + results/classifier/105/network/1402289 | 52 + results/classifier/105/network/1422 | 28 + results/classifier/105/network/1440 | 14 + results/classifier/105/network/1451 | 14 + results/classifier/105/network/1462 | 27 + results/classifier/105/network/1482 | 28 + results/classifier/105/network/1502095 | 57 + results/classifier/105/network/1505 | 14 + results/classifier/105/network/151 | 14 + results/classifier/105/network/1543163 | 79 + results/classifier/105/network/1569988 | 71 + results/classifier/105/network/1574327 | 33 + results/classifier/105/network/1575561 | 29 + results/classifier/105/network/1585433 | 24 + results/classifier/105/network/1588591 | 26 + results/classifier/105/network/1604303 | 33 + results/classifier/105/network/1633508 | 63 + results/classifier/105/network/1634726 | 61 + results/classifier/105/network/1656 | 20 + results/classifier/105/network/1656927 | 35 + results/classifier/105/network/1662 | 48 + results/classifier/105/network/1702798 | 50 + results/classifier/105/network/1719689 | 34 + results/classifier/105/network/1721788 | 103 + results/classifier/105/network/1724477 | 57 + results/classifier/105/network/1732 | 16 + results/classifier/105/network/1751 | 14 + results/classifier/105/network/1754372 | 31 + results/classifier/105/network/1757 | 14 + results/classifier/105/network/1773753 | 291 + results/classifier/105/network/1779447 | 37 + results/classifier/105/network/1783 | 16 + results/classifier/105/network/1809453 | 30 + results/classifier/105/network/1814352 | 80 + results/classifier/105/network/1824622 | 32 + results/classifier/105/network/1832281 | 97 + results/classifier/105/network/1832877 | 67 + results/classifier/105/network/1849644 | 185 + results/classifier/105/network/1856834 | 213 + results/classifier/105/network/1857226 | 48 + results/classifier/105/network/1861875 | 48 + results/classifier/105/network/1862979 | 44 + results/classifier/105/network/1874539 | 42 + results/classifier/105/network/1874676 | 36 + results/classifier/105/network/1876 | 14 + results/classifier/105/network/1876187 | 27 + results/classifier/105/network/1881 | 28 + results/classifier/105/network/1883984 | 154 + results/classifier/105/network/1884169 | 31 + results/classifier/105/network/1884425 | 55 + results/classifier/105/network/1886793 | 396 ++ results/classifier/105/network/1894781 | 142 + results/classifier/105/network/190 | 14 + results/classifier/105/network/1903470 | 65 + results/classifier/105/network/1904954 | 53 + results/classifier/105/network/1912059 | 65 + results/classifier/105/network/1913012 | 175 + results/classifier/105/network/1957 | 33 + results/classifier/105/network/198 | 14 + results/classifier/105/network/199 | 14 + results/classifier/105/network/2009 | 14 + results/classifier/105/network/2017 | 30 + results/classifier/105/network/2019 | 39 + results/classifier/105/network/2023 | 14 + results/classifier/105/network/2024 | 43 + results/classifier/105/network/2109 | 14 + results/classifier/105/network/2113 | 14 + results/classifier/105/network/2143 | 51 + results/classifier/105/network/2178 | 30 + results/classifier/105/network/218 | 14 + results/classifier/105/network/2182 | 14 + results/classifier/105/network/2189 | 27 + results/classifier/105/network/2209 | 60 + results/classifier/105/network/2210 | 68 + results/classifier/105/network/2228 | 21 + results/classifier/105/network/235 | 14 + results/classifier/105/network/2364 | 14 + results/classifier/105/network/238 | 14 + results/classifier/105/network/2409 | 14 + results/classifier/105/network/2439 | 22 + results/classifier/105/network/2459 | 14 + results/classifier/105/network/2461 | 69 + results/classifier/105/network/2494 | 14 + results/classifier/105/network/2514 | 14 + results/classifier/105/network/2528 | 22 + results/classifier/105/network/2552 | 85 + results/classifier/105/network/2553 | 95 + results/classifier/105/network/2623 | 14 + results/classifier/105/network/2668 | 16 + results/classifier/105/network/2670 | 57 + results/classifier/105/network/2685 | 14 + results/classifier/105/network/2688 | 14 + results/classifier/105/network/2727 | 14 + results/classifier/105/network/2745 | 39 + results/classifier/105/network/2746 | 14 + results/classifier/105/network/2756 | 55 + results/classifier/105/network/2758 | 36 + results/classifier/105/network/2767 | 50 + results/classifier/105/network/277 | 14 + results/classifier/105/network/2780 | 30 + results/classifier/105/network/2814 | 14 + results/classifier/105/network/282 | 14 + results/classifier/105/network/2827 | 14 + results/classifier/105/network/2829 | 34 + results/classifier/105/network/2849 | 31 + results/classifier/105/network/2872 | 14 + results/classifier/105/network/2884 | 48 + results/classifier/105/network/2951 | 34 + results/classifier/105/network/2970 | 14 + results/classifier/105/network/299 | 14 + results/classifier/105/network/308 | 14 + results/classifier/105/network/309 | 14 + results/classifier/105/network/335 | 14 + results/classifier/105/network/336 | 14 + results/classifier/105/network/348 | 14 + results/classifier/105/network/360 | 14 + results/classifier/105/network/377 | 14 + results/classifier/105/network/401 | 14 + results/classifier/105/network/428 | 14 + results/classifier/105/network/460 | 14 + results/classifier/105/network/465 | 20 + results/classifier/105/network/485250 | 46 + results/classifier/105/network/495566 | 45 + results/classifier/105/network/517 | 14 + results/classifier/105/network/524447 | 336 + results/classifier/105/network/539 | 14 + results/classifier/105/network/551545 | 500 ++ results/classifier/105/network/557 | 14 + results/classifier/105/network/559 | 14 + results/classifier/105/network/580 | 30 + results/classifier/105/network/590552 | 36 + results/classifier/105/network/593 | 20 + results/classifier/105/network/605 | 28 + results/classifier/105/network/62179944 | 39 + results/classifier/105/network/626 | 14 + results/classifier/105/network/641118 | 30 + results/classifier/105/network/676029 | 62 + results/classifier/105/network/676934 | 31 + results/classifier/105/network/741 | 14 + results/classifier/105/network/762 | 14 + results/classifier/105/network/774 | 28 + results/classifier/105/network/806656 | 35 + results/classifier/105/network/807 | 31 + results/classifier/105/network/811 | 14 + results/classifier/105/network/812 | 137 + results/classifier/105/network/829455 | 56 + results/classifier/105/network/838974 | 27 + results/classifier/105/network/874 | 14 + results/classifier/105/network/894037 | 125 + results/classifier/105/network/898 | 14 + results/classifier/105/network/899 | 27 + results/classifier/105/network/903365 | 23 + results/classifier/105/network/912 | 14 + results/classifier/105/network/960 | 14 + results/classifier/105/network/97 | 14 + results/classifier/105/network/974 | 16 + results/classifier/105/network/976 | 14 + results/classifier/105/network/984476 | 27 + results/classifier/105/network/999 | 19 + results/classifier/105/other/02364653 | 371 + results/classifier/105/other/02572177 | 429 ++ results/classifier/105/other/100 | 14 + results/classifier/105/other/1008136 | 211 + results/classifier/105/other/1010 | 91 + results/classifier/105/other/1012 | 54 + results/classifier/105/other/1012023 | 111 + results/classifier/105/other/1014681 | 120 + results/classifier/105/other/1016 | 16 + results/classifier/105/other/1018530 | 121 + results/classifier/105/other/1022 | 46 + results/classifier/105/other/1025244 | 351 + results/classifier/105/other/1029 | 66 + results/classifier/105/other/1031920 | 80 + results/classifier/105/other/1036363 | 116 + results/classifier/105/other/1037606 | 144 + results/classifier/105/other/1037675 | 200 + results/classifier/105/other/1038070 | 132 + results/classifier/105/other/1038136 | 93 + results/classifier/105/other/1042388 | 373 + results/classifier/105/other/1052 | 92 + results/classifier/105/other/1054558 | 51 + results/classifier/105/other/1060928 | 488 ++ results/classifier/105/other/1061 | 259 + results/classifier/105/other/1062201 | 250 + results/classifier/105/other/1062220 | 98 + results/classifier/105/other/1062411 | 112 + results/classifier/105/other/1066055 | 847 +++ results/classifier/105/other/1077514 | 138 + results/classifier/105/other/1077708 | 61 + results/classifier/105/other/1079713 | 288 + results/classifier/105/other/1084148 | 69 + results/classifier/105/other/1086782 | 113 + results/classifier/105/other/1087114 | 205 + results/classifier/105/other/1087974 | 380 + results/classifier/105/other/1089005 | 56 + results/classifier/105/other/1089281 | 97 + results/classifier/105/other/1089496 | 78 + results/classifier/105/other/1090726 | 68 + results/classifier/105/other/1095 | 14 + results/classifier/105/other/1096714 | 47 + results/classifier/105/other/1102 | 51 + results/classifier/105/other/1105670 | 447 ++ results/classifier/105/other/1109 | 55 + results/classifier/105/other/1117 | 108 + results/classifier/105/other/1118 | 88 + results/classifier/105/other/1122 | 141 + results/classifier/105/other/1122492 | 64 + results/classifier/105/other/1123 | 104 + results/classifier/105/other/1123975 | 97 + results/classifier/105/other/1128935 | 394 ++ results/classifier/105/other/1129571 | 210 + results/classifier/105/other/1131757 | 249 + results/classifier/105/other/1133769 | 45 + results/classifier/105/other/1143 | 91 + results/classifier/105/other/1148 | 284 + results/classifier/105/other/1170 | 69 + results/classifier/105/other/1173 | 14 + results/classifier/105/other/1174654 | 404 ++ results/classifier/105/other/1175089 | 78 + results/classifier/105/other/1176 | 20 + results/classifier/105/other/1177774 | 82 + results/classifier/105/other/1178101 | 63 + results/classifier/105/other/1180 | 179 + results/classifier/105/other/1180923 | 222 + results/classifier/105/other/1180970 | 169 + results/classifier/105/other/1181796 | 91 + results/classifier/105/other/1182490 | 132 + results/classifier/105/other/1184089 | 108 + results/classifier/105/other/1185311 | 143 + results/classifier/105/other/1188 | 17 + results/classifier/105/other/1192 | 148 + results/classifier/105/other/1192499 | 472 ++ results/classifier/105/other/1192780 | 179 + results/classifier/105/other/1193628 | 101 + results/classifier/105/other/1197 | 828 +++ results/classifier/105/other/1201446 | 154 + results/classifier/105/other/1205156 | 263 + results/classifier/105/other/1217 | 143 + results/classifier/105/other/1218 | 33 + results/classifier/105/other/1218098 | 135 + results/classifier/105/other/1219207 | 75 + results/classifier/105/other/1219234 | 36 + results/classifier/105/other/1222 | 34 + results/classifier/105/other/1226531 | 73 + results/classifier/105/other/1233225 | 121 + results/classifier/105/other/1234179 | 213 + results/classifier/105/other/1236 | 59 + results/classifier/105/other/1243287 | 144 + results/classifier/105/other/1245703 | 91 + results/classifier/105/other/1246890 | 69 + results/classifier/105/other/1248959 | 103 + results/classifier/105/other/1250360 | 113 + results/classifier/105/other/1251470 | 42 + results/classifier/105/other/1254672 | 98 + results/classifier/105/other/1254786 | 111 + results/classifier/105/other/1254828 | 160 + results/classifier/105/other/1257099 | 1192 ++++ results/classifier/105/other/1257334 | 96 + results/classifier/105/other/1258168 | 327 + results/classifier/105/other/1261320 | 60 + results/classifier/105/other/1261450 | 111 + results/classifier/105/other/1267955 | 187 + results/classifier/105/other/1268279 | 348 + results/classifier/105/other/1269606 | 143 + results/classifier/105/other/1270397 | 90 + results/classifier/105/other/1272 | 63 + results/classifier/105/other/1277433 | 276 + results/classifier/105/other/1279500 | 146 + results/classifier/105/other/1283 | 95 + results/classifier/105/other/1286253 | 118 + results/classifier/105/other/12869209 | 96 + results/classifier/105/other/1290370 | 54 + results/classifier/105/other/1291 | 14 + results/classifier/105/other/1294898 | 109 + results/classifier/105/other/1295587 | 80 + results/classifier/105/other/1297487 | 371 + results/classifier/105/other/1303926 | 312 + results/classifier/105/other/1306 | 172 + results/classifier/105/other/1308341 | 184 + results/classifier/105/other/1308542 | 189 + results/classifier/105/other/1310714 | 194 + results/classifier/105/other/1312561 | 91 + results/classifier/105/other/1314857 | 108 + results/classifier/105/other/1317603 | 198 + results/classifier/105/other/1319100 | 148 + results/classifier/105/other/1321464 | 66 + results/classifier/105/other/1323 | 30 + results/classifier/105/other/1324724 | 54 + results/classifier/105/other/1326 | 71 + results/classifier/105/other/1327 | 103 + results/classifier/105/other/1327608 | 73 + results/classifier/105/other/1330 | 195 + results/classifier/105/other/1333651 | 628 ++ results/classifier/105/other/1336194 | 43 + results/classifier/105/other/13442371 | 377 + results/classifier/105/other/1347 | 36 + results/classifier/105/other/1348106 | 223 + results/classifier/105/other/1350 | 102 + results/classifier/105/other/1350435 | 267 + results/classifier/105/other/1352465 | 23 + results/classifier/105/other/1353 | 188 + results/classifier/105/other/1359 | 14 + results/classifier/105/other/1359383 | 230 + results/classifier/105/other/1362635 | 133 + results/classifier/105/other/1364501 | 158 + results/classifier/105/other/1367 | 18 + results/classifier/105/other/1368204 | 38 + results/classifier/105/other/1371915 | 61 + results/classifier/105/other/1373228 | 281 + results/classifier/105/other/1373362 | 162 + results/classifier/105/other/1377095 | 143 + results/classifier/105/other/1378554 | 201 + results/classifier/105/other/138 | 14 + results/classifier/105/other/1382 | 53 + results/classifier/105/other/1386 | 641 ++ results/classifier/105/other/1389 | 74 + results/classifier/105/other/1390520 | 385 + results/classifier/105/other/1392504 | 312 + results/classifier/105/other/1393 | 78 + results/classifier/105/other/1395 | 169 + results/classifier/105/other/1396052 | 187 + results/classifier/105/other/1397157 | 48 + results/classifier/105/other/1399 | 85 + results/classifier/105/other/1399957 | 143 + results/classifier/105/other/1400768 | 147 + results/classifier/105/other/1401798 | 67 + results/classifier/105/other/1402 | 72 + results/classifier/105/other/1404278 | 427 ++ results/classifier/105/other/1404610 | 42 + results/classifier/105/other/1407454 | 101 + results/classifier/105/other/1408 | 100 + results/classifier/105/other/1411 | 468 ++ results/classifier/105/other/1414222 | 105 + results/classifier/105/other/1414466 | 199 + results/classifier/105/other/1415 | 102 + results/classifier/105/other/1418 | 100 + results/classifier/105/other/1422285 | 120 + results/classifier/105/other/1423124 | 178 + results/classifier/105/other/1424 | 116 + results/classifier/105/other/1425 | 97 + results/classifier/105/other/1427 | 387 + results/classifier/105/other/1428657 | 174 + results/classifier/105/other/1428958 | 105 + results/classifier/105/other/1429 | 68 + results/classifier/105/other/1429841 | 132 + results/classifier/105/other/1430 | 123 + results/classifier/105/other/1434779 | 130 + results/classifier/105/other/1436 | 74 + results/classifier/105/other/1444 | 55 + results/classifier/105/other/1446 | 188 + results/classifier/105/other/1448985 | 182 + results/classifier/105/other/1452230 | 36 + results/classifier/105/other/1452904 | 226 + results/classifier/105/other/1453436 | 108 + results/classifier/105/other/1455254 | 148 + results/classifier/105/other/1455475 | 94 + results/classifier/105/other/1456819 | 128 + results/classifier/105/other/1457 | 14 + results/classifier/105/other/1457275 | 122 + results/classifier/105/other/1462640 | 156 + results/classifier/105/other/1464611 | 192 + results/classifier/105/other/1467240 | 199 + results/classifier/105/other/1470170 | 140 + results/classifier/105/other/1472083 | 80 + results/classifier/105/other/1474263 | 177 + results/classifier/105/other/1478360 | 191 + results/classifier/105/other/1484990 | 178 + results/classifier/105/other/1486911 | 77 + results/classifier/105/other/1487264 | 36 + results/classifier/105/other/1488901 | 180 + results/classifier/105/other/1489 | 107 + results/classifier/105/other/1493 | 98 + results/classifier/105/other/1494 | 945 +++ results/classifier/105/other/1494350 | 1047 +++ results/classifier/105/other/1495380 | 28 + results/classifier/105/other/1496384 | 35 + results/classifier/105/other/1498144 | 163 + results/classifier/105/other/1499908 | 99 + results/classifier/105/other/1500265 | 41 + results/classifier/105/other/1504513 | 98 + results/classifier/105/other/1505041 | 113 + results/classifier/105/other/1505759 | 98 + results/classifier/105/other/1509 | 174 + results/classifier/105/other/1510 | 105 + results/classifier/105/other/1511887 | 87 + results/classifier/105/other/1514 | 14 + results/classifier/105/other/1516408 | 129 + results/classifier/105/other/1525123 | 143 + results/classifier/105/other/1525676 | 62 + results/classifier/105/other/1525682 | 141 + results/classifier/105/other/1527 | 16 + results/classifier/105/other/1527765 | 166 + results/classifier/105/other/1544524 | 85 + results/classifier/105/other/1545024 | 221 + results/classifier/105/other/1549 | 108 + results/classifier/105/other/1549654 | 303 + results/classifier/105/other/1558175 | 566 ++ results/classifier/105/other/1562 | 142 + results/classifier/105/other/1568 | 52 + results/classifier/105/other/1569053 | 58 + results/classifier/105/other/1570134 | 1186 ++++ results/classifier/105/other/1571084 | 155 + results/classifier/105/other/1574572 | 72 + results/classifier/105/other/1575 | 14 + results/classifier/105/other/1578192 | 107 + results/classifier/105/other/1579 | 17 + results/classifier/105/other/1579565 | 99 + results/classifier/105/other/1581334 | 250 + results/classifier/105/other/1581796 | 232 + results/classifier/105/other/1581936 | 245 + results/classifier/105/other/1585008 | 64 + results/classifier/105/other/1585533 | 99 + results/classifier/105/other/1587211 | 126 + results/classifier/105/other/1588 | 182 + results/classifier/105/other/1589272 | 106 + results/classifier/105/other/1590 | 134 + results/classifier/105/other/1591611 | 108 + results/classifier/105/other/1591724 | 67 + results/classifier/105/other/1592315 | 84 + results/classifier/105/other/1593756 | 37 + results/classifier/105/other/1594 | 34 + results/classifier/105/other/1594394 | 127 + results/classifier/105/other/1595240 | 75 + results/classifier/105/other/1596160 | 339 + results/classifier/105/other/1596832 | 90 + results/classifier/105/other/1598 | 71 + results/classifier/105/other/1599539 | 55 + results/classifier/105/other/160 | 14 + results/classifier/105/other/1602247 | 90 + results/classifier/105/other/1603 | 86 + results/classifier/105/other/1603580 | 57 + results/classifier/105/other/1603636 | 490 ++ results/classifier/105/other/1605506 | 172 + results/classifier/105/other/1605611 | 76 + results/classifier/105/other/16056596 | 106 + results/classifier/105/other/1607 | 14 + results/classifier/105/other/1609968 | 86 + results/classifier/105/other/1613133 | 119 + results/classifier/105/other/1617929 | 99 + results/classifier/105/other/1618122 | 77 + results/classifier/105/other/1618265 | 19 + results/classifier/105/other/1619991 | 150 + results/classifier/105/other/16201167 | 108 + results/classifier/105/other/1621 | 119 + results/classifier/105/other/1622547 | 409 ++ results/classifier/105/other/16228234 | 1852 +++++ results/classifier/105/other/1623276 | 312 + results/classifier/105/other/1625216 | 98 + results/classifier/105/other/1625295 | 299 + results/classifier/105/other/1626207 | 67 + results/classifier/105/other/1627 | 52 + results/classifier/105/other/1628 | 143 + results/classifier/105/other/1628971 | 77 + results/classifier/105/other/163 | 14 + results/classifier/105/other/1630 | 213 + results/classifier/105/other/1633 | 81 + results/classifier/105/other/1636217 | 243 + results/classifier/105/other/1639394 | 133 + results/classifier/105/other/1640073 | 96 + results/classifier/105/other/1640525 | 132 + results/classifier/105/other/1641861 | 87 + results/classifier/105/other/1642421 | 106 + results/classifier/105/other/1643619 | 67 + results/classifier/105/other/1644754 | 119 + results/classifier/105/other/1647683 | 452 ++ results/classifier/105/other/1652333 | 40 + results/classifier/105/other/1653063 | 82 + results/classifier/105/other/1653384 | 747 ++ results/classifier/105/other/1653419 | 93 + results/classifier/105/other/1654271 | 231 + results/classifier/105/other/1655700 | 44 + results/classifier/105/other/1655764 | 41 + results/classifier/105/other/1656234 | 88 + results/classifier/105/other/1658141 | 85 + results/classifier/105/other/1659267 | 33 + results/classifier/105/other/1659901 | 40 + results/classifier/105/other/1661386 | 1636 +++++ results/classifier/105/other/1665389 | 227 + results/classifier/105/other/1667401 | 85 + results/classifier/105/other/1668103 | 83 + results/classifier/105/other/1670170 | 291 + results/classifier/105/other/1670175 | 304 + results/classifier/105/other/1671 | 1370 ++++ results/classifier/105/other/1672365 | 66 + results/classifier/105/other/1673976 | 319 + results/classifier/105/other/1674925 | 310 + results/classifier/105/other/1678466 | 143 + results/classifier/105/other/1679358 | 68 + results/classifier/105/other/1680 | 115 + results/classifier/105/other/1680991 | 175 + results/classifier/105/other/1681439 | 609 ++ results/classifier/105/other/1682093 | 92 + results/classifier/105/other/1684 | 58 + results/classifier/105/other/1684239 | 859 +++ results/classifier/105/other/1685 | 72 + results/classifier/105/other/1685242 | 167 + results/classifier/105/other/1686170 | 424 ++ results/classifier/105/other/1686390 | 128 + results/classifier/105/other/1687569 | 76 + results/classifier/105/other/1691 | 23 + results/classifier/105/other/1691379 | 75 + results/classifier/105/other/1693667 | 151 + results/classifier/105/other/1699277 | 135 + results/classifier/105/other/1701798 | 436 ++ results/classifier/105/other/1701821 | 237 + results/classifier/105/other/1701971 | 98 + results/classifier/105/other/1701973 | 51 + results/classifier/105/other/1702621 | 68 + results/classifier/105/other/1703 | 58 + results/classifier/105/other/1703506 | 242 + results/classifier/105/other/1704 | 82 + results/classifier/105/other/1704638 | 96 + results/classifier/105/other/1705118 | 171 + results/classifier/105/other/1706866 | 62 + results/classifier/105/other/1707297 | 69 + results/classifier/105/other/1708 | 80 + results/classifier/105/other/1708442 | 350 + results/classifier/105/other/1708617 | 47 + results/classifier/105/other/1711316 | 97 + results/classifier/105/other/1713 | 53 + results/classifier/105/other/1713408 | 113 + results/classifier/105/other/1713516 | 102 + results/classifier/105/other/1713825 | 107 + results/classifier/105/other/1714331 | 598 ++ results/classifier/105/other/1715162 | 90 + results/classifier/105/other/1715700 | 552 ++ results/classifier/105/other/1716292 | 75 + results/classifier/105/other/1717708 | 181 + results/classifier/105/other/1718295 | 260 + results/classifier/105/other/1719282 | 144 + results/classifier/105/other/1719870 | 100 + results/classifier/105/other/1720747 | 21 + results/classifier/105/other/1721 | 75 + results/classifier/105/other/1721952 | 57 + results/classifier/105/other/1722074 | 149 + results/classifier/105/other/1722884 | 156 + results/classifier/105/other/1723161 | 242 + results/classifier/105/other/1723488 | 86 + results/classifier/105/other/1726394 | 77 + results/classifier/105/other/1726910 | 52 + results/classifier/105/other/1727 | 94 + results/classifier/105/other/1727250 | 494 ++ results/classifier/105/other/1727737 | 191 + results/classifier/105/other/1728256 | 113 + results/classifier/105/other/1728635 | 207 + results/classifier/105/other/1728643 | 129 + results/classifier/105/other/1728660 | 73 + results/classifier/105/other/1728661 | 131 + results/classifier/105/other/1732679 | 109 + results/classifier/105/other/1732959 | 108 + results/classifier/105/other/1736 | 80 + results/classifier/105/other/1737444 | 146 + results/classifier/105/other/1738 | 162 + results/classifier/105/other/1738507 | 33 + results/classifier/105/other/1738691 | 258 + results/classifier/105/other/1738840 | 205 + results/classifier/105/other/1739304 | 184 + results/classifier/105/other/1739378 | 95 + results/classifier/105/other/1740 | 86 + results/classifier/105/other/1740364 | 539 ++ results/classifier/105/other/1745312 | 2236 ++++++ results/classifier/105/other/1748434 | 85 + results/classifier/105/other/1749016 | 168 + results/classifier/105/other/1749393 | 671 ++ results/classifier/105/other/1750229 | 753 ++ results/classifier/105/other/1753186 | 66 + results/classifier/105/other/1756807 | 140 + results/classifier/105/other/1759522 | 126 + results/classifier/105/other/1761798 | 521 ++ results/classifier/105/other/1763 | 25 + results/classifier/105/other/1763536 | 165 + results/classifier/105/other/1765 | 108 + results/classifier/105/other/1768246 | 198 + results/classifier/105/other/1769053 | 1155 +++ results/classifier/105/other/1771238 | 204 + results/classifier/105/other/1772075 | 84 + results/classifier/105/other/1772165 | 378 + results/classifier/105/other/1772166 | 36 + results/classifier/105/other/1774 | 36 + results/classifier/105/other/1774149 | 118 + results/classifier/105/other/17743720 | 779 ++ results/classifier/105/other/1774605 | 110 + results/classifier/105/other/1774677 | 178 + results/classifier/105/other/1774853 | 76 + results/classifier/105/other/1775 | 74 + results/classifier/105/other/1775366 | 39 + results/classifier/105/other/1775555 | 696 ++ results/classifier/105/other/1776478 | 328 + results/classifier/105/other/1776760 | 76 + results/classifier/105/other/1777301 | 78 + results/classifier/105/other/1777777 | 346 + results/classifier/105/other/1778350 | 124 + results/classifier/105/other/1779017 | 108 + results/classifier/105/other/1780928 | 70 + results/classifier/105/other/1782 | 71 + results/classifier/105/other/1783362 | 935 +++ results/classifier/105/other/1784900 | 178 + results/classifier/105/other/1785197 | 179 + results/classifier/105/other/1785308 | 43 + results/classifier/105/other/1785670 | 533 ++ results/classifier/105/other/1787012 | 181 + results/classifier/105/other/1787505 | 108 + results/classifier/105/other/1788665 | 665 ++ results/classifier/105/other/1790018 | 70 + results/classifier/105/other/1790975 | 73 + results/classifier/105/other/1791 | 53 + results/classifier/105/other/1791796 | 1138 +++ results/classifier/105/other/1794285 | 119 + results/classifier/105/other/1796520 | 118 + results/classifier/105/other/1800401 | 147 + results/classifier/105/other/1800786 | 80 + results/classifier/105/other/1802684 | 184 + results/classifier/105/other/1803160 | 189 + results/classifier/105/other/1804323 | 155 + results/classifier/105/other/1805256 | 2821 ++++++++ results/classifier/105/other/1806824 | 142 + results/classifier/105/other/1808563 | 38 + results/classifier/105/other/1809075 | 94 + results/classifier/105/other/1809304 | 71 + results/classifier/105/other/1809665 | 140 + results/classifier/105/other/1810 | 204 + results/classifier/105/other/1810105 | 66 + results/classifier/105/other/1810400 | 53 + results/classifier/105/other/1810433 | 183 + results/classifier/105/other/1810603 | 287 + results/classifier/105/other/1810975 | 42 + results/classifier/105/other/1811244 | 120 + results/classifier/105/other/1811533 | 170 + results/classifier/105/other/1811543 | 94 + results/classifier/105/other/1811653 | 60 + results/classifier/105/other/1812 | 38 + results/classifier/105/other/1812091 | 76 + results/classifier/105/other/1813 | 125 + results/classifier/105/other/1813165 | 426 ++ results/classifier/105/other/1813201 | 108 + results/classifier/105/other/1813398 | 69 + results/classifier/105/other/1813940 | 81 + results/classifier/105/other/1815 | 95 + results/classifier/105/other/1815078 | 72 + results/classifier/105/other/1815371 | 55 + results/classifier/105/other/1815993 | 69 + results/classifier/105/other/1817239 | 103 + results/classifier/105/other/1817525 | 100 + results/classifier/105/other/1817865 | 86 + results/classifier/105/other/1818122 | 121 + results/classifier/105/other/1818207 | 155 + results/classifier/105/other/1818367 | 377 + results/classifier/105/other/1818937 | 259 + results/classifier/105/other/1819182 | 72 + results/classifier/105/other/1821006 | 126 + results/classifier/105/other/1821054 | 66 + results/classifier/105/other/1821430 | 252 + results/classifier/105/other/1821444 | 166 + results/classifier/105/other/1821595 | 140 + results/classifier/105/other/1821839 | 195 + results/classifier/105/other/1821884 | 42 + results/classifier/105/other/1823169 | 134 + results/classifier/105/other/1823458 | 260 + results/classifier/105/other/1824053 | 100 + results/classifier/105/other/1824331 | 84 + results/classifier/105/other/1825452 | 59 + results/classifier/105/other/1826172 | 91 + results/classifier/105/other/1826422 | 95 + results/classifier/105/other/1827871 | 61 + results/classifier/105/other/1829 | 101 + results/classifier/105/other/1829242 | 73 + results/classifier/105/other/1829459 | 117 + results/classifier/105/other/1829682 | 832 +++ results/classifier/105/other/1831225 | 506 ++ results/classifier/105/other/1831486 | 59 + results/classifier/105/other/1832250 | 75 + results/classifier/105/other/1833101 | 92 + results/classifier/105/other/1833661 | 170 + results/classifier/105/other/1833668 | 64 + results/classifier/105/other/1834399 | 53 + results/classifier/105/other/1835865 | 83 + results/classifier/105/other/1836 | 14 + results/classifier/105/other/1836078 | 250 + results/classifier/105/other/1836192 | 48 + results/classifier/105/other/1836501 | 139 + results/classifier/105/other/1836537 | 27 + results/classifier/105/other/1837218 | 54 + results/classifier/105/other/1837347 | 63 + results/classifier/105/other/1837909 | 143 + results/classifier/105/other/1838277 | 220 + results/classifier/105/other/1838703 | 54 + results/classifier/105/other/1838946 | 634 ++ results/classifier/105/other/1839060 | 120 + results/classifier/105/other/1839325 | 113 + results/classifier/105/other/1839428 | 174 + results/classifier/105/other/1840252 | 80 + results/classifier/105/other/1840922 | 82 + results/classifier/105/other/1841442 | 152 + results/classifier/105/other/1841491 | 71 + results/classifier/105/other/1841592 | 592 ++ results/classifier/105/other/1841990 | 399 ++ results/classifier/105/other/1842038 | 696 ++ results/classifier/105/other/1842530 | 90 + results/classifier/105/other/1842774 | 1328 ++++ results/classifier/105/other/1842925 | 135 + results/classifier/105/other/1843133 | 147 + results/classifier/105/other/1843651 | 111 + results/classifier/105/other/1844053 | 93 + results/classifier/105/other/1844635 | 156 + results/classifier/105/other/1845580 | 85 + results/classifier/105/other/1846451 | 200 + results/classifier/105/other/1847525 | 124 + results/classifier/105/other/1847906 | 51 + results/classifier/105/other/1848 | 38 + results/classifier/105/other/1848901 | 149 + results/classifier/105/other/1849894 | 85 + results/classifier/105/other/1850570 | 372 + results/classifier/105/other/1850751 | 37 + results/classifier/105/other/1851547 | 67 + results/classifier/105/other/1851845 | 41 + results/classifier/105/other/1852115 | 66 + results/classifier/105/other/1853042 | 350 + results/classifier/105/other/1853826 | 444 ++ results/classifier/105/other/1854738 | 57 + results/classifier/105/other/1855 | 72 + results/classifier/105/other/1855072 | 37 + results/classifier/105/other/1855617 | 74 + results/classifier/105/other/1856399 | 59 + results/classifier/105/other/1856706 | 72 + results/classifier/105/other/1856724 | 68 + results/classifier/105/other/1856837 | 121 + results/classifier/105/other/1857640 | 148 + results/classifier/105/other/1858 | 23 + results/classifier/105/other/1858046 | 62 + results/classifier/105/other/1858415 | 108 + results/classifier/105/other/1859291 | 170 + results/classifier/105/other/1859378 | 83 + results/classifier/105/other/1859384 | 343 + results/classifier/105/other/1859656 | 812 +++ results/classifier/105/other/1859920 | 119 + results/classifier/105/other/1860553 | 145 + results/classifier/105/other/1860610 | 33 + results/classifier/105/other/1861677 | 87 + results/classifier/105/other/1862415 | 113 + results/classifier/105/other/1863 | 85 + results/classifier/105/other/1863096 | 95 + results/classifier/105/other/1863333 | 100 + results/classifier/105/other/1863486 | 107 + results/classifier/105/other/1863526 | 71 + results/classifier/105/other/1864536 | 64 + results/classifier/105/other/1865099 | 586 ++ results/classifier/105/other/1866892 | 187 + results/classifier/105/other/1866962 | 188 + results/classifier/105/other/1868116 | 521 ++ results/classifier/105/other/1869006 | 354 + results/classifier/105/other/1871250 | 99 + results/classifier/105/other/1871842 | 1343 ++++ results/classifier/105/other/1872237 | 291 + results/classifier/105/other/1874678 | 70 + results/classifier/105/other/1874904 | 39 + results/classifier/105/other/1875139 | 186 + results/classifier/105/other/1876568 | 45 + results/classifier/105/other/1877384 | 298 + results/classifier/105/other/1877418 | 44 + results/classifier/105/other/1878034 | 182 + results/classifier/105/other/1878054 | 237 + results/classifier/105/other/1878057 | 536 ++ results/classifier/105/other/1878067 | 201 + results/classifier/105/other/1878134 | 109 + results/classifier/105/other/1878253 | 167 + results/classifier/105/other/1878255 | 290 + results/classifier/105/other/1878259 | 75 + results/classifier/105/other/1878263 | 62 + results/classifier/105/other/1878323 | 82 + results/classifier/105/other/1878501 | 91 + results/classifier/105/other/1878651 | 124 + results/classifier/105/other/1879223 | 80 + results/classifier/105/other/1879587 | 113 + results/classifier/105/other/1879998 | 47 + results/classifier/105/other/1880066 | 180 + results/classifier/105/other/1880189 | 125 + results/classifier/105/other/1880225 | 1120 +++ results/classifier/105/other/1880326 | 718 ++ results/classifier/105/other/1880332 | 50 + results/classifier/105/other/1880518 | 96 + results/classifier/105/other/1880763 | 66 + results/classifier/105/other/1881231 | 86 + results/classifier/105/other/1881729 | 48 + results/classifier/105/other/1883414 | 76 + results/classifier/105/other/1883560 | 313 + results/classifier/105/other/1883728 | 176 + results/classifier/105/other/1884684 | 251 + results/classifier/105/other/1884693 | 76 + results/classifier/105/other/1884831 | 194 + results/classifier/105/other/1885332 | 369 + results/classifier/105/other/1885827 | 434 ++ results/classifier/105/other/1886155 | 116 + results/classifier/105/other/1886225 | 34 + results/classifier/105/other/1886362 | 801 +++ results/classifier/105/other/1887303 | 238 + results/classifier/105/other/1887306 | 110 + results/classifier/105/other/1887745 | 39 + results/classifier/105/other/1887854 | 84 + results/classifier/105/other/1888467 | 89 + results/classifier/105/other/1888606 | 858 +++ results/classifier/105/other/1888663 | 37 + results/classifier/105/other/1888714 | 79 + results/classifier/105/other/1888918 | 184 + results/classifier/105/other/1889033 | 144 + results/classifier/105/other/1889621 | 412 ++ results/classifier/105/other/1889943 | 381 + results/classifier/105/other/1890069 | 166 + results/classifier/105/other/1890152 | 71 + results/classifier/105/other/1890311 | 66 + results/classifier/105/other/1890333 | 168 + results/classifier/105/other/1890360 | 265 + results/classifier/105/other/1890370 | 88 + results/classifier/105/other/1890395 | 182 + results/classifier/105/other/1890545 | 405 ++ results/classifier/105/other/1891341 | 233 + results/classifier/105/other/1892540 | 1290 ++++ results/classifier/105/other/1892541 | 61 + results/classifier/105/other/1892544 | 503 ++ results/classifier/105/other/1892962 | 149 + results/classifier/105/other/1892978 | 821 +++ results/classifier/105/other/1893003 | 92 + results/classifier/105/other/1893691 | 144 + results/classifier/105/other/1893758 | 86 + results/classifier/105/other/1893807 | 59 + results/classifier/105/other/1894071 | 126 + results/classifier/105/other/1894869 | 160 + results/classifier/105/other/1895 | 159 + results/classifier/105/other/1895053 | 383 + results/classifier/105/other/1895080 | 1340 ++++ results/classifier/105/other/1895122 | 95 + results/classifier/105/other/1895310 | 120 + results/classifier/105/other/1895399 | 140 + results/classifier/105/other/1895471 | 92 + results/classifier/105/other/1896298 | 190 + results/classifier/105/other/1896561 | 72 + results/classifier/105/other/1897481 | 2089 ++++++ results/classifier/105/other/1898011 | 95 + results/classifier/105/other/1898490 | 155 + results/classifier/105/other/1899082 | 148 + results/classifier/105/other/1899539 | 46 + results/classifier/105/other/1900155 | 107 + results/classifier/105/other/1900241 | 250 + results/classifier/105/other/1900779 | 68 + results/classifier/105/other/1902 | 78 + results/classifier/105/other/1902112 | 61 + results/classifier/105/other/1902365 | 436 ++ results/classifier/105/other/1902451 | 67 + results/classifier/105/other/1902470 | 340 + results/classifier/105/other/1903 | 54 + results/classifier/105/other/1904259 | 68 + results/classifier/105/other/1904315 | 71 + results/classifier/105/other/1904331 | 113 + results/classifier/105/other/1904652 | 98 + results/classifier/105/other/1905 | 16 + results/classifier/105/other/1905037 | 113 + results/classifier/105/other/1905297 | 131 + results/classifier/105/other/1905444 | 67 + results/classifier/105/other/1905651 | 111 + results/classifier/105/other/1906193 | 171 + results/classifier/105/other/1906694 | 185 + results/classifier/105/other/1907427 | 58 + results/classifier/105/other/1907497 | 95 + results/classifier/105/other/1907817 | 65 + results/classifier/105/other/1907909 | 77 + results/classifier/105/other/1907938 | 97 + results/classifier/105/other/1908369 | 129 + results/classifier/105/other/1908416 | 61 + results/classifier/105/other/1908489 | 147 + results/classifier/105/other/1908515 | 93 + results/classifier/105/other/1908781 | 55 + results/classifier/105/other/1909247 | 1605 +++++ results/classifier/105/other/1909256 | 39 + results/classifier/105/other/1909770 | 255 + results/classifier/105/other/1909921 | 113 + results/classifier/105/other/1910505 | 89 + results/classifier/105/other/1910586 | 179 + results/classifier/105/other/1910603 | 234 + results/classifier/105/other/1910696 | 76 + results/classifier/105/other/1910826 | 124 + results/classifier/105/other/1911075 | 95 + results/classifier/105/other/1911216 | 101 + results/classifier/105/other/1911351 | 120 + results/classifier/105/other/1911839 | 86 + results/classifier/105/other/1912170 | 94 + results/classifier/105/other/1912777 | 126 + results/classifier/105/other/1912790 | 82 + results/classifier/105/other/1913505 | 192 + results/classifier/105/other/1913510 | 123 + results/classifier/105/other/1913668 | 53 + results/classifier/105/other/1913669 | 111 + results/classifier/105/other/1913873 | 83 + results/classifier/105/other/1913914 | 73 + results/classifier/105/other/1913915 | 64 + results/classifier/105/other/1913916 | 89 + results/classifier/105/other/1913919 | 171 + results/classifier/105/other/1913923 | 136 + results/classifier/105/other/1914021 | 118 + results/classifier/105/other/1914236 | 119 + results/classifier/105/other/1914282 | 128 + results/classifier/105/other/1914294 | 68 + results/classifier/105/other/1914638 | 599 ++ results/classifier/105/other/1914849 | 112 + results/classifier/105/other/1914870 | 134 + results/classifier/105/other/1915431 | 57 + results/classifier/105/other/1915531 | 114 + results/classifier/105/other/1915535 | 96 + results/classifier/105/other/1916112 | 172 + results/classifier/105/other/1916344 | 118 + results/classifier/105/other/1916394 | 98 + results/classifier/105/other/1916506 | 62 + results/classifier/105/other/1916775 | 105 + results/classifier/105/other/1917082 | 153 + results/classifier/105/other/1917085 | 80 + results/classifier/105/other/1917161 | 107 + results/classifier/105/other/1917442 | 130 + results/classifier/105/other/1917542 | 184 + results/classifier/105/other/1917591 | 74 + results/classifier/105/other/1918 | 62 + results/classifier/105/other/1918084 | 246 + results/classifier/105/other/1918302 | 126 + results/classifier/105/other/1918321 | 370 + results/classifier/105/other/1918917 | 156 + results/classifier/105/other/1918975 | 63 + results/classifier/105/other/1919035 | 150 + results/classifier/105/other/1919253 | 106 + results/classifier/105/other/1920211 | 59 + results/classifier/105/other/1920672 | 77 + results/classifier/105/other/1920913 | 445 ++ results/classifier/105/other/1921 | 43 + results/classifier/105/other/1921082 | 62 + results/classifier/105/other/1921664 | 1250 ++++ results/classifier/105/other/1922325 | 43 + results/classifier/105/other/1922611 | 105 + results/classifier/105/other/1922773 | 143 + results/classifier/105/other/1923497 | 137 + results/classifier/105/other/1923583 | 85 + results/classifier/105/other/1924 | 79 + results/classifier/105/other/1924912 | 193 + results/classifier/105/other/1925094 | 51 + results/classifier/105/other/1925417 | 105 + results/classifier/105/other/1925512 | 131 + results/classifier/105/other/1926202 | 116 + results/classifier/105/other/1926231 | 86 + results/classifier/105/other/1926497 | 141 + results/classifier/105/other/1926521 | 116 + results/classifier/105/other/1927 | 537 ++ results/classifier/105/other/1928 | 78 + results/classifier/105/other/1929710 | 262 + results/classifier/105/other/1935 | 18 + results/classifier/105/other/1944 | 84 + results/classifier/105/other/1947 | 33 + results/classifier/105/other/195 | 14 + results/classifier/105/other/1951 | 151 + results/classifier/105/other/1953 | 159 + results/classifier/105/other/1967 | 14 + results/classifier/105/other/1967248 | 57 + results/classifier/105/other/1972 | 52 + results/classifier/105/other/1976 | 62 + results/classifier/105/other/1987 | 61 + results/classifier/105/other/1992 | 922 +++ results/classifier/105/other/1993 | 63 + results/classifier/105/other/2029 | 14 + results/classifier/105/other/2055003 | 113 + results/classifier/105/other/2058 | 65 + results/classifier/105/other/206 | 14 + results/classifier/105/other/2063 | 68 + results/classifier/105/other/2065579 | 255 + results/classifier/105/other/2069 | 365 + results/classifier/105/other/2078790 | 673 ++ results/classifier/105/other/2083 | 124 + results/classifier/105/other/210 | 14 + results/classifier/105/other/2100 | 19 + results/classifier/105/other/2105 | 14 + results/classifier/105/other/2106 | 68 + results/classifier/105/other/2115 | 67 + results/classifier/105/other/21221931 | 336 + results/classifier/105/other/21247035 | 1329 ++++ results/classifier/105/other/2133 | 68 + results/classifier/105/other/2134 | 14 + results/classifier/105/other/2169 | 406 ++ results/classifier/105/other/2170 | 57 + results/classifier/105/other/2194 | 107 + results/classifier/105/other/2208 | 101 + results/classifier/105/other/2217 | 14 + results/classifier/105/other/2224 | 218 + results/classifier/105/other/2230 | 154 + results/classifier/105/other/2255 | 14 + results/classifier/105/other/2261 | 100 + results/classifier/105/other/2262 | 212 + results/classifier/105/other/2267 | 565 ++ results/classifier/105/other/2269 | 54 + results/classifier/105/other/2273 | 58 + results/classifier/105/other/2291 | 195 + results/classifier/105/other/2296 | 110 + results/classifier/105/other/2297 | 14 + results/classifier/105/other/2299 | 216 + results/classifier/105/other/2308 | 92 + results/classifier/105/other/2324 | 60 + results/classifier/105/other/2330 | 86 + results/classifier/105/other/23300761 | 321 + results/classifier/105/other/23448582 | 273 + results/classifier/105/other/2346 | 85 + results/classifier/105/other/2353 | 69 + results/classifier/105/other/2355 | 92 + results/classifier/105/other/2358 | 63 + results/classifier/105/other/2370 | 24 + results/classifier/105/other/2371 | 65 + results/classifier/105/other/2372 | 122 + results/classifier/105/other/2374 | 124 + results/classifier/105/other/2379 | 139 + results/classifier/105/other/2380 | 118 + results/classifier/105/other/2408 | 250 + results/classifier/105/other/2414 | 130 + results/classifier/105/other/2422 | 82 + results/classifier/105/other/2427 | 154 + results/classifier/105/other/2432 | 83 + results/classifier/105/other/2435 | 33 + results/classifier/105/other/2440 | 125 + results/classifier/105/other/2441 | 113 + results/classifier/105/other/2442 | 160 + results/classifier/105/other/2462 | 52 + results/classifier/105/other/2474 | 109 + results/classifier/105/other/2485 | 60 + results/classifier/105/other/2488 | 78 + results/classifier/105/other/2489 | 105 + results/classifier/105/other/249 | 14 + results/classifier/105/other/2498 | 64 + results/classifier/105/other/2512 | 58 + results/classifier/105/other/2515 | 59 + results/classifier/105/other/2519 | 14 + results/classifier/105/other/2526 | 52 + results/classifier/105/other/2558 | 78 + results/classifier/105/other/2560 | 118 + results/classifier/105/other/2570 | 68 + results/classifier/105/other/2574 | 62 + results/classifier/105/other/25892827 | 1085 +++ results/classifier/105/other/2593 | 70 + results/classifier/105/other/2599 | 14 + results/classifier/105/other/2603 | 115 + results/classifier/105/other/2611 | 16 + results/classifier/105/other/2612 | 95 + results/classifier/105/other/2622 | 280 + results/classifier/105/other/2625 | 96 + results/classifier/105/other/2632 | 96 + results/classifier/105/other/2634 | 190 + results/classifier/105/other/2656 | 14 + results/classifier/105/other/2667 | 225 + results/classifier/105/other/2702 | 66 + results/classifier/105/other/2710 | 139 + results/classifier/105/other/2718 | 115 + results/classifier/105/other/2719 | 14 + results/classifier/105/other/2732 | 52 + results/classifier/105/other/274 | 14 + results/classifier/105/other/2742 | 79 + results/classifier/105/other/2744 | 18 + results/classifier/105/other/2753 | 137 + results/classifier/105/other/2760 | 14 + results/classifier/105/other/2773 | 73 + results/classifier/105/other/2775 | 147 + results/classifier/105/other/2789 | 14 + results/classifier/105/other/2791 | 76 + results/classifier/105/other/2792 | 83 + results/classifier/105/other/2819 | 130 + results/classifier/105/other/2832 | 112 + results/classifier/105/other/2836 | 56 + results/classifier/105/other/2852 | 93 + results/classifier/105/other/2853 | 67 + results/classifier/105/other/2856 | 103 + results/classifier/105/other/2857 | 113 + results/classifier/105/other/2866 | 198 + results/classifier/105/other/2875 | 42 + results/classifier/105/other/2898 | 128 + results/classifier/105/other/2915 | 42 + results/classifier/105/other/2919 | 26 + results/classifier/105/other/2921 | 381 + results/classifier/105/other/2934 | 77 + results/classifier/105/other/2943 | 20 + results/classifier/105/other/2956 | 227 + results/classifier/105/other/2969 | 225 + results/classifier/105/other/2972 | 650 ++ results/classifier/105/other/2975 | 81 + results/classifier/105/other/31349848 | 162 + results/classifier/105/other/315 | 14 + results/classifier/105/other/32484936 | 231 + results/classifier/105/other/35170175 | 529 ++ results/classifier/105/other/396 | 14 + results/classifier/105/other/42974450 | 437 ++ results/classifier/105/other/453617 | 112 + results/classifier/105/other/463 | 38 + results/classifier/105/other/478 | 412 ++ results/classifier/105/other/486 | 14 + results/classifier/105/other/495 | 14 + results/classifier/105/other/497273 | 88 + results/classifier/105/other/498035 | 99 + results/classifier/105/other/507 | 69 + results/classifier/105/other/518 | 14 + results/classifier/105/other/522 | 67 + results/classifier/105/other/523 | 139 + results/classifier/105/other/545089 | 87 + results/classifier/105/other/546458 | 345 + results/classifier/105/other/546638 | 79 + results/classifier/105/other/55247116 | 1318 ++++ results/classifier/105/other/55367348 | 540 ++ results/classifier/105/other/55753058 | 301 + results/classifier/105/other/56309929 | 188 + results/classifier/105/other/568445 | 143 + results/classifier/105/other/56937788 | 352 + results/classifier/105/other/57 | 14 + results/classifier/105/other/57756589 | 1429 ++++ results/classifier/105/other/583 | 14 + results/classifier/105/other/584516 | 69 + results/classifier/105/other/588 | 78 + results/classifier/105/other/588731 | 167 + results/classifier/105/other/589315 | 49 + results/classifier/105/other/591666 | 180 + results/classifier/105/other/592 | 121 + results/classifier/105/other/594 | 14 + results/classifier/105/other/59540920 | 384 + results/classifier/105/other/597362 | 91 + results/classifier/105/other/601946 | 90 + results/classifier/105/other/607 | 72 + results/classifier/105/other/607204 | 57 + results/classifier/105/other/611 | 140 + results/classifier/105/other/624 | 350 + results/classifier/105/other/634 | 94 + results/classifier/105/other/636315 | 445 ++ results/classifier/105/other/638806 | 115 + results/classifier/105/other/643465 | 65 + results/classifier/105/other/64571620 | 793 +++ results/classifier/105/other/647 | 314 + results/classifier/105/other/653 | 72 + results/classifier/105/other/657006 | 195 + results/classifier/105/other/65781993 | 2801 ++++++++ results/classifier/105/other/66743673 | 372 + results/classifier/105/other/670769 | 314 + results/classifier/105/other/672934 | 95 + results/classifier/105/other/68897003 | 724 ++ results/classifier/105/other/696094 | 157 + results/classifier/105/other/698 | 371 + results/classifier/105/other/70021271 | 7456 ++++++++++++++++++++ results/classifier/105/other/70416488 | 1187 ++++ results/classifier/105/other/706 | 51 + results/classifier/105/other/712337 | 62 + results/classifier/105/other/712416 | 89 + results/classifier/105/other/714629 | 23 + results/classifier/105/other/721825 | 109 + results/classifier/105/other/727 | 169 + results/classifier/105/other/732155 | 144 + results/classifier/105/other/736 | 60 + results/classifier/105/other/741887 | 156 + results/classifier/105/other/742 | 57 + results/classifier/105/other/74715356 | 134 + results/classifier/105/other/754 | 220 + results/classifier/105/other/758 | 59 + results/classifier/105/other/761469 | 176 + results/classifier/105/other/766 | 40 + results/classifier/105/other/780 | 67 + results/classifier/105/other/788697 | 247 + results/classifier/105/other/79834768 | 417 ++ results/classifier/105/other/804517 | 110 + results/classifier/105/other/806 | 76 + results/classifier/105/other/810 | 84 + results/classifier/105/other/816 | 60 + results/classifier/105/other/81775929 | 243 + results/classifier/105/other/818647 | 336 + results/classifier/105/other/818673 | 795 +++ results/classifier/105/other/821078 | 54 + results/classifier/105/other/825 | 51 + results/classifier/105/other/833658 | 36 + results/classifier/105/other/844 | 57 + results/classifier/105/other/845 | 72 + results/classifier/105/other/851 | 246 + results/classifier/105/other/85542195 | 128 + results/classifier/105/other/855800 | 95 + results/classifier/105/other/856 | 74 + results/classifier/105/other/878 | 55 + results/classifier/105/other/880 | 14 + results/classifier/105/other/881 | 33 + results/classifier/105/other/882 | 486 ++ results/classifier/105/other/88225572 | 2908 ++++++++ results/classifier/105/other/88281850 | 289 + results/classifier/105/other/886147 | 27 + results/classifier/105/other/886621 | 312 + results/classifier/105/other/887883 | 191 + results/classifier/105/other/889827 | 219 + results/classifier/105/other/891525 | 72 + results/classifier/105/other/893208 | 554 ++ results/classifier/105/other/897193 | 86 + results/classifier/105/other/897466 | 68 + results/classifier/105/other/897750 | 388 + results/classifier/105/other/899140 | 800 +++ results/classifier/105/other/906804 | 60 + results/classifier/105/other/907063 | 72 + results/classifier/105/other/915 | 392 + results/classifier/105/other/917645 | 28 + results/classifier/105/other/917824 | 86 + results/classifier/105/other/918791 | 290 + results/classifier/105/other/921208 | 450 ++ results/classifier/105/other/925405 | 109 + results/classifier/105/other/928 | 97 + results/classifier/105/other/92957605 | 426 ++ results/classifier/105/other/932487 | 630 ++ results/classifier/105/other/932490 | 38 + results/classifier/105/other/934 | 58 + results/classifier/105/other/937 | 81 + results/classifier/105/other/939027 | 1183 ++++ results/classifier/105/other/943 | 14 + results/classifier/105/other/948 | 45 + results/classifier/105/other/949 | 327 + results/classifier/105/other/951 | 208 + results/classifier/105/other/95154278 | 163 + results/classifier/105/other/954099 | 95 + results/classifier/105/other/959992 | 64 + results/classifier/105/other/960378 | 62 + results/classifier/105/other/965133 | 158 + results/classifier/105/other/965327 | 791 +++ results/classifier/105/other/96782458 | 1007 +++ results/classifier/105/other/977391 | 730 ++ results/classifier/105/other/986770 | 88 + results/classifier/105/other/987 | 62 + results/classifier/105/other/988909 | 36 + results/classifier/105/other/989 | 113 + results/classifier/105/other/993 | 94 + results/classifier/105/other/994662 | 177 + results/classifier/105/semantic/1013714 | 66 + results/classifier/105/semantic/1013888 | 117 + results/classifier/105/semantic/1034 | 30 + results/classifier/105/semantic/1034423 | 367 + results/classifier/105/semantic/1038 | 24 + results/classifier/105/semantic/1042654 | 103 + results/classifier/105/semantic/1077116 | 113 + results/classifier/105/semantic/1077838 | 199 + results/classifier/105/semantic/1094564 | 367 + results/classifier/105/semantic/1094786 | 88 + results/classifier/105/semantic/1102027 | 125 + results/classifier/105/semantic/1152 | 41 + results/classifier/105/semantic/1156313 | 128 + results/classifier/105/semantic/1180924 | 65 + results/classifier/105/semantic/1212 | 22 + results/classifier/105/semantic/1223467 | 60 + results/classifier/105/semantic/12360755 | 304 + results/classifier/105/semantic/124 | 14 + results/classifier/105/semantic/1242765 | 89 + results/classifier/105/semantic/1285505 | 115 + results/classifier/105/semantic/1288385 | 103 + results/classifier/105/semantic/1288620 | 179 + results/classifier/105/semantic/1299190 | 76 + results/classifier/105/semantic/1299858 | 43 + results/classifier/105/semantic/1307225 | 485 ++ results/classifier/105/semantic/1310324 | 93 + results/classifier/105/semantic/1338957 | 31 + results/classifier/105/semantic/1347555 | 126 + results/classifier/105/semantic/1349722 | 60 + results/classifier/105/semantic/1366836 | 88 + results/classifier/105/semantic/1368815 | 288 + results/classifier/105/semantic/1370 | 26 + results/classifier/105/semantic/1371 | 32 + results/classifier/105/semantic/1372 | 33 + results/classifier/105/semantic/1373 | 33 + results/classifier/105/semantic/1374 | 35 + results/classifier/105/semantic/1375 | 32 + results/classifier/105/semantic/1395217 | 330 + results/classifier/105/semantic/1405 | 134 + results/classifier/105/semantic/1407808 | 32 + results/classifier/105/semantic/1425597 | 44 + results/classifier/105/semantic/1428352 | 63 + results/classifier/105/semantic/1469 | 61 + results/classifier/105/semantic/1477683 | 33 + results/classifier/105/semantic/1478376 | 62 + results/classifier/105/semantic/1497204 | 73 + results/classifier/105/semantic/1497711 | 50 + results/classifier/105/semantic/1524546 | 63 + results/classifier/105/semantic/1528239 | 66 + results/classifier/105/semantic/1528718 | 54 + results/classifier/105/semantic/1529449 | 208 + results/classifier/105/semantic/1534 | 14 + results/classifier/105/semantic/1555076 | 62 + results/classifier/105/semantic/1561 | 40 + results/classifier/105/semantic/1569 | 40 + results/classifier/105/semantic/1588328 | 870 +++ results/classifier/105/semantic/1589564 | 52 + results/classifier/105/semantic/1596009 | 53 + results/classifier/105/semantic/1603693 | 141 + results/classifier/105/semantic/1617114 | 61 + results/classifier/105/semantic/1629483 | 53 + results/classifier/105/semantic/1670377 | 112 + results/classifier/105/semantic/1677492 | 62 + results/classifier/105/semantic/1682681 | 90 + results/classifier/105/semantic/1689499 | 100 + results/classifier/105/semantic/1695286 | 37 + results/classifier/105/semantic/1696180 | 68 + results/classifier/105/semantic/1701974 | 50 + results/classifier/105/semantic/1704658 | 69 + results/classifier/105/semantic/1725707 | 97 + results/classifier/105/semantic/1735049 | 66 + results/classifier/105/semantic/1738545 | 62 + results/classifier/105/semantic/1740219 | 192 + results/classifier/105/semantic/1743191 | 490 ++ results/classifier/105/semantic/1745316 | 208 + results/classifier/105/semantic/1750899 | 51 + results/classifier/105/semantic/1751264 | 83 + results/classifier/105/semantic/1751674 | 81 + results/classifier/105/semantic/1753309 | 118 + results/classifier/105/semantic/1760262 | 72 + results/classifier/105/semantic/1761027 | 49 + results/classifier/105/semantic/1777672 | 123 + results/classifier/105/semantic/1779634 | 91 + results/classifier/105/semantic/1790617 | 76 + results/classifier/105/semantic/1791680 | 88 + results/classifier/105/semantic/1798659 | 53 + results/classifier/105/semantic/1798780 | 104 + results/classifier/105/semantic/1805913 | 154 + results/classifier/105/semantic/1809252 | 60 + results/classifier/105/semantic/1809546 | 90 + results/classifier/105/semantic/1811711 | 101 + results/classifier/105/semantic/1813305 | 75 + results/classifier/105/semantic/1828508 | 108 + results/classifier/105/semantic/1829964 | 101 + results/classifier/105/semantic/1834 | 197 + results/classifier/105/semantic/1843151 | 237 + results/classifier/105/semantic/1843941 | 28 + results/classifier/105/semantic/1846 | 38 + results/classifier/105/semantic/1846427 | 721 ++ results/classifier/105/semantic/1848231 | 41 + results/classifier/105/semantic/185 | 14 + results/classifier/105/semantic/1851095 | 85 + results/classifier/105/semantic/1856335 | 1085 +++ results/classifier/105/semantic/1859359 | 107 + results/classifier/105/semantic/186 | 14 + results/classifier/105/semantic/1860575 | 73 + results/classifier/105/semantic/1865252 | 49 + results/classifier/105/semantic/1867519 | 263 + results/classifier/105/semantic/1868055 | 149 + results/classifier/105/semantic/1868527 | 44 + results/classifier/105/semantic/1870098 | 54 + results/classifier/105/semantic/1872847 | 62 + results/classifier/105/semantic/1875702 | 44 + results/classifier/105/semantic/1877688 | 52 + results/classifier/105/semantic/1878348 | 108 + results/classifier/105/semantic/1879672 | 664 ++ results/classifier/105/semantic/1879955 | 75 + results/classifier/105/semantic/1883268 | 112 + results/classifier/105/semantic/1883400 | 60 + results/classifier/105/semantic/1884095 | 70 + results/classifier/105/semantic/1888964 | 85 + results/classifier/105/semantic/1896 | 67 + results/classifier/105/semantic/1898215 | 98 + results/classifier/105/semantic/1900 | 14 + results/classifier/105/semantic/1905562 | 80 + results/classifier/105/semantic/1905979 | 68 + results/classifier/105/semantic/1906156 | 68 + results/classifier/105/semantic/1907952 | 195 + results/classifier/105/semantic/1907969 | 164 + results/classifier/105/semantic/1908551 | 93 + results/classifier/105/semantic/1912065 | 98 + results/classifier/105/semantic/1914986 | 101 + results/classifier/105/semantic/1915794 | 37 + results/classifier/105/semantic/1917940 | 51 + results/classifier/105/semantic/1920602 | 95 + results/classifier/105/semantic/1921468 | 322 + results/classifier/105/semantic/1921948 | 606 ++ results/classifier/105/semantic/1922391 | 140 + results/classifier/105/semantic/1923197 | 149 + results/classifier/105/semantic/1924603 | 112 + results/classifier/105/semantic/1939 | 77 + results/classifier/105/semantic/1948 | 16 + results/classifier/105/semantic/2007 | 42 + results/classifier/105/semantic/2064 | 25 + results/classifier/105/semantic/2103 | 14 + results/classifier/105/semantic/2185 | 14 + results/classifier/105/semantic/2253 | 14 + results/classifier/105/semantic/2280 | 14 + results/classifier/105/semantic/237164 | 108 + results/classifier/105/semantic/2378 | 41 + results/classifier/105/semantic/2393 | 33 + results/classifier/105/semantic/2434 | 42 + results/classifier/105/semantic/2449 | 14 + results/classifier/105/semantic/2457 | 14 + results/classifier/105/semantic/2460 | 21 + results/classifier/105/semantic/2562 | 65 + results/classifier/105/semantic/2582 | 36 + results/classifier/105/semantic/2649 | 53 + results/classifier/105/semantic/2704 | 315 + results/classifier/105/semantic/2911 | 78 + results/classifier/105/semantic/2953 | 79 + results/classifier/105/semantic/304636 | 103 + results/classifier/105/semantic/369 | 14 + results/classifier/105/semantic/490484 | 76 + results/classifier/105/semantic/526653 | 57 + results/classifier/105/semantic/568614 | 100 + results/classifier/105/semantic/600 | 14 + results/classifier/105/semantic/639651 | 256 + results/classifier/105/semantic/645662 | 620 ++ results/classifier/105/semantic/691424 | 101 + results/classifier/105/semantic/714 | 56 + results/classifier/105/semantic/757702 | 854 +++ results/classifier/105/semantic/855630 | 44 + results/classifier/105/semantic/876 | 47 + results/classifier/105/semantic/908 | 14 + results/classifier/105/semantic/935 | 72 + results/classifier/105/semantic/969 | 14 + results/classifier/105/socket/1020484 | 35 + results/classifier/105/socket/1030666 | 114 + results/classifier/105/socket/1031 | 55 + results/classifier/105/socket/1055 | 29 + results/classifier/105/socket/1064631 | 31 + results/classifier/105/socket/1067119 | 26 + results/classifier/105/socket/1067517 | 94 + results/classifier/105/socket/1075272 | 30 + results/classifier/105/socket/1075339 | 27 + results/classifier/105/socket/1080086 | 224 + results/classifier/105/socket/1090 | 28 + results/classifier/105/socket/1185395 | 113 + results/classifier/105/socket/1213196 | 37 + results/classifier/105/socket/1228285 | 79 + results/classifier/105/socket/1253563 | 55 + results/classifier/105/socket/1264 | 14 + results/classifier/105/socket/1299566 | 31 + results/classifier/105/socket/1381639 | 32 + results/classifier/105/socket/1410288 | 79 + results/classifier/105/socket/1450881 | 125 + results/classifier/105/socket/1463812 | 190 + results/classifier/105/socket/1542965 | 23 + results/classifier/105/socket/1567 | 47 + results/classifier/105/socket/1572329 | 104 + results/classifier/105/socket/1585432 | 45 + results/classifier/105/socket/1586756 | 141 + results/classifier/105/socket/1612908 | 25 + results/classifier/105/socket/1663079 | 40 + results/classifier/105/socket/1673373 | 123 + results/classifier/105/socket/1700380 | 34 + results/classifier/105/socket/1708462 | 39 + results/classifier/105/socket/1721220 | 49 + results/classifier/105/socket/1721222 | 32 + results/classifier/105/socket/1721224 | 39 + results/classifier/105/socket/1725267 | 122 + results/classifier/105/socket/1744009 | 41 + results/classifier/105/socket/1754605 | 35 + results/classifier/105/socket/1756 | 56 + results/classifier/105/socket/1759338 | 140 + results/classifier/105/socket/1777 | 14 + results/classifier/105/socket/1781280 | 27 + results/classifier/105/socket/1796754 | 51 + results/classifier/105/socket/1801933 | 146 + results/classifier/105/socket/1823790 | 215 + results/classifier/105/socket/1826401 | 57 + results/classifier/105/socket/1828207 | 52 + results/classifier/105/socket/1828608 | 56 + results/classifier/105/socket/1829779 | 93 + results/classifier/105/socket/1829945 | 62 + results/classifier/105/socket/1836453 | 50 + results/classifier/105/socket/1837 | 48 + results/classifier/105/socket/1837651 | 34 + results/classifier/105/socket/1843590 | 49 + results/classifier/105/socket/1857811 | 202 + results/classifier/105/socket/1861884 | 50 + results/classifier/105/socket/1867601 | 34 + results/classifier/105/socket/1868221 | 76 + results/classifier/105/socket/1870331 | 91 + results/classifier/105/socket/1877015 | 192 + results/classifier/105/socket/1882784 | 78 + results/classifier/105/socket/1885718 | 80 + results/classifier/105/socket/1886 | 30 + results/classifier/105/socket/1887604 | 79 + results/classifier/105/socket/1888303 | 67 + results/classifier/105/socket/1898084 | 49 + results/classifier/105/socket/1901440 | 137 + results/classifier/105/socket/1904 | 29 + results/classifier/105/socket/1906948 | 73 + results/classifier/105/socket/1907926 | 51 + results/classifier/105/socket/1923692 | 29 + results/classifier/105/socket/1925449 | 59 + results/classifier/105/socket/1949 | 25 + results/classifier/105/socket/2191 | 14 + results/classifier/105/socket/2254 | 14 + results/classifier/105/socket/2292 | 32 + results/classifier/105/socket/2341 | 45 + results/classifier/105/socket/2444 | 14 + results/classifier/105/socket/2584 | 29 + results/classifier/105/socket/2624 | 52 + results/classifier/105/socket/2678 | 22 + results/classifier/105/socket/284 | 14 + results/classifier/105/socket/2867 | 26 + results/classifier/105/socket/2876 | 27 + results/classifier/105/socket/2925 | 38 + results/classifier/105/socket/323 | 14 + results/classifier/105/socket/347 | 14 + results/classifier/105/socket/588735 | 51 + results/classifier/105/socket/761471 | 28 + results/classifier/105/socket/778032 | 108 + results/classifier/105/socket/796202 | 83 + results/classifier/105/socket/833 | 55 + results/classifier/105/socket/872 | 14 + results/classifier/105/socket/939995 | 114 + results/classifier/105/vnc/1004050 | 54 + results/classifier/105/vnc/1047576 | 82 + results/classifier/105/vnc/11357571 | 55 + results/classifier/105/vnc/1136477 | 30 + results/classifier/105/vnc/1150 | 97 + results/classifier/105/vnc/1162644 | 144 + results/classifier/105/vnc/1183 | 144 + results/classifier/105/vnc/1207686 | 256 + results/classifier/105/vnc/1246990 | 66 + results/classifier/105/vnc/1321028 | 98 + results/classifier/105/vnc/1339 | 29 + results/classifier/105/vnc/1354279 | 83 + results/classifier/105/vnc/1388735 | 60 + results/classifier/105/vnc/1391942 | 70 + results/classifier/105/vnc/1393486 | 32 + results/classifier/105/vnc/1453608 | 24 + results/classifier/105/vnc/1453612 | 34 + results/classifier/105/vnc/1453613 | 30 + results/classifier/105/vnc/1454 | 75 + results/classifier/105/vnc/1455912 | 53 + results/classifier/105/vnc/1467 | 14 + results/classifier/105/vnc/1486278 | 36 + results/classifier/105/vnc/1490853 | 234 + results/classifier/105/vnc/1516446 | 579 ++ results/classifier/105/vnc/1548 | 51 + results/classifier/105/vnc/1580 | 57 + results/classifier/105/vnc/1586194 | 64 + results/classifier/105/vnc/1604 | 76 + results/classifier/105/vnc/1618431 | 273 + results/classifier/105/vnc/1637447 | 30 + results/classifier/105/vnc/1649236 | 40 + results/classifier/105/vnc/1661176 | 22 + results/classifier/105/vnc/1661815 | 56 + results/classifier/105/vnc/1673 | 62 + results/classifier/105/vnc/1686 | 56 + results/classifier/105/vnc/1693649 | 103 + results/classifier/105/vnc/1696353 | 114 + results/classifier/105/vnc/170 | 14 + results/classifier/105/vnc/1705717 | 76 + results/classifier/105/vnc/1715186 | 65 + results/classifier/105/vnc/1721221 | 104 + results/classifier/105/vnc/1732671 | 31 + results/classifier/105/vnc/1739371 | 208 + results/classifier/105/vnc/1752646 | 41 + results/classifier/105/vnc/1762179 | 103 + results/classifier/105/vnc/1766904 | 60 + results/classifier/105/vnc/1771042 | 81 + results/classifier/105/vnc/1785203 | 59 + results/classifier/105/vnc/1785734 | 129 + results/classifier/105/vnc/1786343 | 55 + results/classifier/105/vnc/1795100 | 62 + results/classifier/105/vnc/1802465 | 80 + results/classifier/105/vnc/1806040 | 42 + results/classifier/105/vnc/1816819 | 73 + results/classifier/105/vnc/1819108 | 52 + results/classifier/105/vnc/1829576 | 89 + results/classifier/105/vnc/1856549 | 128 + results/classifier/105/vnc/1867786 | 148 + results/classifier/105/vnc/1870911 | 104 + results/classifier/105/vnc/1872790 | 84 + results/classifier/105/vnc/1888431 | 98 + results/classifier/105/vnc/1893744 | 140 + results/classifier/105/vnc/1903752 | 35 + results/classifier/105/vnc/1906516 | 137 + results/classifier/105/vnc/1912780 | 197 + results/classifier/105/vnc/1923648 | 72 + results/classifier/105/vnc/1923861 | 165 + results/classifier/105/vnc/1988 | 39 + results/classifier/105/vnc/2001 | 56 + results/classifier/105/vnc/2171 | 38 + results/classifier/105/vnc/2311 | 28 + results/classifier/105/vnc/2490 | 64 + results/classifier/105/vnc/2492 | 33 + results/classifier/105/vnc/2608 | 22 + results/classifier/105/vnc/2646 | 38 + results/classifier/105/vnc/2772 | 87 + results/classifier/105/vnc/327 | 14 + results/classifier/105/vnc/33802194 | 4947 +++++++++++++ results/classifier/105/vnc/351 | 14 + results/classifier/105/vnc/42613410 | 157 + results/classifier/105/vnc/659 | 54 + results/classifier/105/vnc/685 | 82 + results/classifier/105/vnc/697197 | 265 + results/classifier/105/vnc/697510 | 165 + results/classifier/105/vnc/70 | 14 + results/classifier/105/vnc/723 | 44 + results/classifier/105/vnc/759 | 25 + results/classifier/105/vnc/772 | 25 + results/classifier/105/vnc/779 | 26 + results/classifier/105/vnc/80570214 | 408 ++ results/classifier/105/vnc/824074 | 24 + results/classifier/105/vnc/850 | 72 + results/classifier/105/vnc/854 | 75 + results/classifier/105/vnc/974229 | 140 + results/classifier/105/vnc/981 | 23 + results/classifier/105/vnc/994412 | 24 + 6702 files changed, 852753 insertions(+), 299045 deletions(-) create mode 100644 results/classifier/001/instruction/11357571 create mode 100644 results/classifier/001/instruction/11933524 create mode 100644 results/classifier/001/instruction/24190340 create mode 100644 results/classifier/001/instruction/26095107 create mode 100644 results/classifier/001/instruction/33802194 create mode 100644 results/classifier/001/instruction/42226390 create mode 100644 results/classifier/001/instruction/50773216 create mode 100644 results/classifier/001/instruction/51610399 create mode 100644 results/classifier/001/instruction/55961334 create mode 100644 results/classifier/001/instruction/62179944 create mode 100644 results/classifier/001/instruction/63565653 create mode 100644 results/classifier/001/instruction/70868267 create mode 100644 results/classifier/001/instruction/73660729 create mode 100644 results/classifier/001/mistranslation/14887122 create mode 100644 results/classifier/001/mistranslation/22219210 create mode 100644 results/classifier/001/mistranslation/23270873 create mode 100644 results/classifier/001/mistranslation/24930826 create mode 100644 results/classifier/001/mistranslation/25842545 create mode 100644 results/classifier/001/mistranslation/26430026 create mode 100644 results/classifier/001/mistranslation/36568044 create mode 100644 results/classifier/001/mistranslation/64322995 create mode 100644 results/classifier/001/mistranslation/70294255 create mode 100644 results/classifier/001/mistranslation/71456293 create mode 100644 results/classifier/001/mistranslation/74466963 create mode 100644 results/classifier/001/mistranslation/74545755 create mode 100644 results/classifier/001/mistranslation/80604314 create mode 100644 results/classifier/001/mistranslation/80615920 create mode 100644 results/classifier/001/other/02364653 create mode 100644 results/classifier/001/other/02572177 create mode 100644 results/classifier/001/other/04472277 create mode 100644 results/classifier/001/other/12869209 create mode 100644 results/classifier/001/other/13442371 create mode 100644 results/classifier/001/other/14488057 create mode 100644 results/classifier/001/other/16056596 create mode 100644 results/classifier/001/other/16201167 create mode 100644 results/classifier/001/other/16228234 create mode 100644 results/classifier/001/other/17743720 create mode 100644 results/classifier/001/other/21221931 create mode 100644 results/classifier/001/other/21247035 create mode 100644 results/classifier/001/other/23300761 create mode 100644 results/classifier/001/other/23448582 create mode 100644 results/classifier/001/other/25892827 create mode 100644 results/classifier/001/other/31349848 create mode 100644 results/classifier/001/other/32484936 create mode 100644 results/classifier/001/other/35170175 create mode 100644 results/classifier/001/other/42613410 create mode 100644 results/classifier/001/other/42974450 create mode 100644 results/classifier/001/other/43643137 create mode 100644 results/classifier/001/other/48245039 create mode 100644 results/classifier/001/other/55247116 create mode 100644 results/classifier/001/other/55367348 create mode 100644 results/classifier/001/other/55753058 create mode 100644 results/classifier/001/other/56309929 create mode 100644 results/classifier/001/other/56937788 create mode 100644 results/classifier/001/other/57195159 create mode 100644 results/classifier/001/other/57231878 create mode 100644 results/classifier/001/other/57756589 create mode 100644 results/classifier/001/other/59540920 create mode 100644 results/classifier/001/other/60339453 create mode 100644 results/classifier/001/other/64571620 create mode 100644 results/classifier/001/other/65781993 create mode 100644 results/classifier/001/other/66743673 create mode 100644 results/classifier/001/other/67821138 create mode 100644 results/classifier/001/other/68897003 create mode 100644 results/classifier/001/other/70021271 create mode 100644 results/classifier/001/other/70416488 create mode 100644 results/classifier/001/other/74715356 create mode 100644 results/classifier/001/other/79834768 create mode 100644 results/classifier/001/other/81775929 create mode 100644 results/classifier/001/other/85542195 create mode 100644 results/classifier/001/other/88225572 create mode 100644 results/classifier/001/other/88281850 create mode 100644 results/classifier/001/other/92957605 create mode 100644 results/classifier/001/other/95154278 create mode 100644 results/classifier/001/other/99674399 create mode 100644 results/classifier/001/semantic/05479587 create mode 100644 results/classifier/001/semantic/12360755 create mode 100644 results/classifier/001/semantic/28596630 create mode 100644 results/classifier/001/semantic/30680944 create mode 100644 results/classifier/001/semantic/46572227 create mode 100644 results/classifier/001/semantic/53568181 create mode 100644 results/classifier/001/semantic/80570214 create mode 100644 results/classifier/001/semantic/96782458 create mode 100644 results/classifier/001/semantic/gitlab_semantic_addsubps create mode 100644 results/classifier/001/semantic/gitlab_semantic_adox create mode 100644 results/classifier/001/semantic/gitlab_semantic_bextr create mode 100644 results/classifier/001/semantic/gitlab_semantic_blsi create mode 100644 results/classifier/001/semantic/gitlab_semantic_blsmsk create mode 100644 results/classifier/001/semantic/gitlab_semantic_bzhi create mode 100644 results/classifier/002/boot/42226390 create mode 100644 results/classifier/002/boot/51610399 create mode 100644 results/classifier/002/boot/60339453 create mode 100644 results/classifier/002/boot/67821138 create mode 100644 results/classifier/002/instruction/11357571 create mode 100644 results/classifier/002/instruction/11933524 create mode 100644 results/classifier/002/instruction/24190340 create mode 100644 results/classifier/002/instruction/26095107 create mode 100644 results/classifier/002/instruction/33802194 create mode 100644 results/classifier/002/instruction/50773216 create mode 100644 results/classifier/002/instruction/55961334 create mode 100644 results/classifier/002/instruction/62179944 create mode 100644 results/classifier/002/instruction/63565653 create mode 100644 results/classifier/002/instruction/70868267 create mode 100644 results/classifier/002/instruction/73660729 create mode 100644 results/classifier/002/mistranslation/14887122 create mode 100644 results/classifier/002/mistranslation/22219210 create mode 100644 results/classifier/002/mistranslation/23270873 create mode 100644 results/classifier/002/mistranslation/24930826 create mode 100644 results/classifier/002/mistranslation/25842545 create mode 100644 results/classifier/002/mistranslation/26430026 create mode 100644 results/classifier/002/mistranslation/36568044 create mode 100644 results/classifier/002/mistranslation/64322995 create mode 100644 results/classifier/002/mistranslation/70294255 create mode 100644 results/classifier/002/mistranslation/71456293 create mode 100644 results/classifier/002/mistranslation/74466963 create mode 100644 results/classifier/002/mistranslation/74545755 create mode 100644 results/classifier/002/mistranslation/80604314 create mode 100644 results/classifier/002/mistranslation/80615920 create mode 100644 results/classifier/002/other/02364653 create mode 100644 results/classifier/002/other/02572177 create mode 100644 results/classifier/002/other/04472277 create mode 100644 results/classifier/002/other/12869209 create mode 100644 results/classifier/002/other/13442371 create mode 100644 results/classifier/002/other/14488057 create mode 100644 results/classifier/002/other/16056596 create mode 100644 results/classifier/002/other/16201167 create mode 100644 results/classifier/002/other/16228234 create mode 100644 results/classifier/002/other/17743720 create mode 100644 results/classifier/002/other/21221931 create mode 100644 results/classifier/002/other/21247035 create mode 100644 results/classifier/002/other/23300761 create mode 100644 results/classifier/002/other/23448582 create mode 100644 results/classifier/002/other/25892827 create mode 100644 results/classifier/002/other/31349848 create mode 100644 results/classifier/002/other/32484936 create mode 100644 results/classifier/002/other/35170175 create mode 100644 results/classifier/002/other/42613410 create mode 100644 results/classifier/002/other/42974450 create mode 100644 results/classifier/002/other/43643137 create mode 100644 results/classifier/002/other/48245039 create mode 100644 results/classifier/002/other/55247116 create mode 100644 results/classifier/002/other/55367348 create mode 100644 results/classifier/002/other/55753058 create mode 100644 results/classifier/002/other/56309929 create mode 100644 results/classifier/002/other/56937788 create mode 100644 results/classifier/002/other/57195159 create mode 100644 results/classifier/002/other/57231878 create mode 100644 results/classifier/002/other/57756589 create mode 100644 results/classifier/002/other/59540920 create mode 100644 results/classifier/002/other/64571620 create mode 100644 results/classifier/002/other/65781993 create mode 100644 results/classifier/002/other/66743673 create mode 100644 results/classifier/002/other/68897003 create mode 100644 results/classifier/002/other/70021271 create mode 100644 results/classifier/002/other/70416488 create mode 100644 results/classifier/002/other/74715356 create mode 100644 results/classifier/002/other/79834768 create mode 100644 results/classifier/002/other/81775929 create mode 100644 results/classifier/002/other/85542195 create mode 100644 results/classifier/002/other/88225572 create mode 100644 results/classifier/002/other/88281850 create mode 100644 results/classifier/002/other/92957605 create mode 100644 results/classifier/002/other/95154278 create mode 100644 results/classifier/002/other/99674399 create mode 100644 results/classifier/002/semantic/05479587 create mode 100644 results/classifier/002/semantic/12360755 create mode 100644 results/classifier/002/semantic/28596630 create mode 100644 results/classifier/002/semantic/30680944 create mode 100644 results/classifier/002/semantic/46572227 create mode 100644 results/classifier/002/semantic/53568181 create mode 100644 results/classifier/002/semantic/80570214 create mode 100644 results/classifier/002/semantic/96782458 create mode 100644 results/classifier/002/semantic/gitlab_semantic_addsubps create mode 100644 results/classifier/002/semantic/gitlab_semantic_adox create mode 100644 results/classifier/002/semantic/gitlab_semantic_bextr create mode 100644 results/classifier/002/semantic/gitlab_semantic_blsi create mode 100644 results/classifier/002/semantic/gitlab_semantic_blsmsk create mode 100644 results/classifier/002/semantic/gitlab_semantic_bzhi create mode 100644 results/classifier/003/KVM/04472277 create mode 100644 results/classifier/003/KVM/26430026 create mode 100644 results/classifier/003/KVM/33802194 create mode 100644 results/classifier/003/KVM/42613410 create mode 100644 results/classifier/003/KVM/43643137 create mode 100644 results/classifier/003/KVM/55961334 create mode 100644 results/classifier/003/KVM/71456293 create mode 100644 results/classifier/003/KVM/80615920 create mode 100644 results/classifier/003/boot/42226390 create mode 100644 results/classifier/003/boot/51610399 create mode 100644 results/classifier/003/boot/60339453 create mode 100644 results/classifier/003/boot/67821138 create mode 100644 results/classifier/003/instruction/11357571 create mode 100644 results/classifier/003/instruction/11933524 create mode 100644 results/classifier/003/instruction/24190340 create mode 100644 results/classifier/003/instruction/26095107 create mode 100644 results/classifier/003/instruction/50773216 create mode 100644 results/classifier/003/instruction/63565653 create mode 100644 results/classifier/003/instruction/70868267 create mode 100644 results/classifier/003/instruction/73660729 create mode 100644 results/classifier/003/mistranslation/14887122 create mode 100644 results/classifier/003/mistranslation/22219210 create mode 100644 results/classifier/003/mistranslation/23270873 create mode 100644 results/classifier/003/mistranslation/24930826 create mode 100644 results/classifier/003/mistranslation/25842545 create mode 100644 results/classifier/003/mistranslation/36568044 create mode 100644 results/classifier/003/mistranslation/64322995 create mode 100644 results/classifier/003/mistranslation/70294255 create mode 100644 results/classifier/003/mistranslation/74466963 create mode 100644 results/classifier/003/mistranslation/74545755 create mode 100644 results/classifier/003/mistranslation/80604314 create mode 100644 results/classifier/003/network/05479587 create mode 100644 results/classifier/003/network/62179944 create mode 100644 results/classifier/003/other/02364653 create mode 100644 results/classifier/003/other/02572177 create mode 100644 results/classifier/003/other/12869209 create mode 100644 results/classifier/003/other/13442371 create mode 100644 results/classifier/003/other/14488057 create mode 100644 results/classifier/003/other/16056596 create mode 100644 results/classifier/003/other/16201167 create mode 100644 results/classifier/003/other/16228234 create mode 100644 results/classifier/003/other/17743720 create mode 100644 results/classifier/003/other/21221931 create mode 100644 results/classifier/003/other/21247035 create mode 100644 results/classifier/003/other/23300761 create mode 100644 results/classifier/003/other/23448582 create mode 100644 results/classifier/003/other/25892827 create mode 100644 results/classifier/003/other/31349848 create mode 100644 results/classifier/003/other/32484936 create mode 100644 results/classifier/003/other/35170175 create mode 100644 results/classifier/003/other/42974450 create mode 100644 results/classifier/003/other/48245039 create mode 100644 results/classifier/003/other/55247116 create mode 100644 results/classifier/003/other/55367348 create mode 100644 results/classifier/003/other/55753058 create mode 100644 results/classifier/003/other/56309929 create mode 100644 results/classifier/003/other/56937788 create mode 100644 results/classifier/003/other/57195159 create mode 100644 results/classifier/003/other/57231878 create mode 100644 results/classifier/003/other/57756589 create mode 100644 results/classifier/003/other/59540920 create mode 100644 results/classifier/003/other/64571620 create mode 100644 results/classifier/003/other/65781993 create mode 100644 results/classifier/003/other/66743673 create mode 100644 results/classifier/003/other/68897003 create mode 100644 results/classifier/003/other/70021271 create mode 100644 results/classifier/003/other/70416488 create mode 100644 results/classifier/003/other/74715356 create mode 100644 results/classifier/003/other/79834768 create mode 100644 results/classifier/003/other/81775929 create mode 100644 results/classifier/003/other/85542195 create mode 100644 results/classifier/003/other/88225572 create mode 100644 results/classifier/003/other/88281850 create mode 100644 results/classifier/003/other/92957605 create mode 100644 results/classifier/003/other/95154278 create mode 100644 results/classifier/003/other/99674399 create mode 100644 results/classifier/003/semantic/12360755 create mode 100644 results/classifier/003/semantic/28596630 create mode 100644 results/classifier/003/semantic/30680944 create mode 100644 results/classifier/003/semantic/46572227 create mode 100644 results/classifier/003/semantic/53568181 create mode 100644 results/classifier/003/semantic/80570214 create mode 100644 results/classifier/003/semantic/96782458 create mode 100644 results/classifier/003/semantic/gitlab_semantic_addsubps create mode 100644 results/classifier/003/semantic/gitlab_semantic_adox create mode 100644 results/classifier/003/semantic/gitlab_semantic_bextr create mode 100644 results/classifier/003/semantic/gitlab_semantic_blsi create mode 100644 results/classifier/003/semantic/gitlab_semantic_blsmsk create mode 100644 results/classifier/003/semantic/gitlab_semantic_bzhi create mode 100644 results/classifier/004/KVM/04472277 create mode 100644 results/classifier/004/KVM/26430026 create mode 100644 results/classifier/004/KVM/43643137 create mode 100644 results/classifier/004/KVM/71456293 create mode 100644 results/classifier/004/KVM/80615920 create mode 100644 results/classifier/004/boot/51610399 create mode 100644 results/classifier/004/boot/60339453 create mode 100644 results/classifier/004/device/14488057 create mode 100644 results/classifier/004/device/24190340 create mode 100644 results/classifier/004/device/24930826 create mode 100644 results/classifier/004/device/26095107 create mode 100644 results/classifier/004/device/28596630 create mode 100644 results/classifier/004/device/36568044 create mode 100644 results/classifier/004/device/42226390 create mode 100644 results/classifier/004/device/48245039 create mode 100644 results/classifier/004/device/57195159 create mode 100644 results/classifier/004/device/57231878 create mode 100644 results/classifier/004/device/67821138 create mode 100644 results/classifier/004/device/99674399 create mode 100644 results/classifier/004/graphic/22219210 create mode 100644 results/classifier/004/graphic/30680944 create mode 100644 results/classifier/004/graphic/46572227 create mode 100644 results/classifier/004/graphic/53568181 create mode 100644 results/classifier/004/graphic/55961334 create mode 100644 results/classifier/004/graphic/73660729 create mode 100644 results/classifier/004/instruction/11933524 create mode 100644 results/classifier/004/instruction/50773216 create mode 100644 results/classifier/004/instruction/63565653 create mode 100644 results/classifier/004/instruction/70868267 create mode 100644 results/classifier/004/mistranslation/14887122 create mode 100644 results/classifier/004/mistranslation/23270873 create mode 100644 results/classifier/004/mistranslation/25842545 create mode 100644 results/classifier/004/mistranslation/64322995 create mode 100644 results/classifier/004/mistranslation/70294255 create mode 100644 results/classifier/004/mistranslation/74466963 create mode 100644 results/classifier/004/mistranslation/74545755 create mode 100644 results/classifier/004/mistranslation/80604314 create mode 100644 results/classifier/004/network/05479587 create mode 100644 results/classifier/004/network/62179944 create mode 100644 results/classifier/004/other/02364653 create mode 100644 results/classifier/004/other/02572177 create mode 100644 results/classifier/004/other/12869209 create mode 100644 results/classifier/004/other/13442371 create mode 100644 results/classifier/004/other/16056596 create mode 100644 results/classifier/004/other/16201167 create mode 100644 results/classifier/004/other/16228234 create mode 100644 results/classifier/004/other/17743720 create mode 100644 results/classifier/004/other/21221931 create mode 100644 results/classifier/004/other/21247035 create mode 100644 results/classifier/004/other/23300761 create mode 100644 results/classifier/004/other/23448582 create mode 100644 results/classifier/004/other/25892827 create mode 100644 results/classifier/004/other/31349848 create mode 100644 results/classifier/004/other/32484936 create mode 100644 results/classifier/004/other/35170175 create mode 100644 results/classifier/004/other/42974450 create mode 100644 results/classifier/004/other/55247116 create mode 100644 results/classifier/004/other/55367348 create mode 100644 results/classifier/004/other/55753058 create mode 100644 results/classifier/004/other/56309929 create mode 100644 results/classifier/004/other/56937788 create mode 100644 results/classifier/004/other/57756589 create mode 100644 results/classifier/004/other/59540920 create mode 100644 results/classifier/004/other/64571620 create mode 100644 results/classifier/004/other/65781993 create mode 100644 results/classifier/004/other/66743673 create mode 100644 results/classifier/004/other/68897003 create mode 100644 results/classifier/004/other/70021271 create mode 100644 results/classifier/004/other/70416488 create mode 100644 results/classifier/004/other/74715356 create mode 100644 results/classifier/004/other/79834768 create mode 100644 results/classifier/004/other/81775929 create mode 100644 results/classifier/004/other/85542195 create mode 100644 results/classifier/004/other/88225572 create mode 100644 results/classifier/004/other/88281850 create mode 100644 results/classifier/004/other/92957605 create mode 100644 results/classifier/004/other/95154278 create mode 100644 results/classifier/004/other/96782458 create mode 100644 results/classifier/004/semantic/12360755 create mode 100644 results/classifier/004/semantic/gitlab_semantic_addsubps create mode 100644 results/classifier/004/semantic/gitlab_semantic_adox create mode 100644 results/classifier/004/semantic/gitlab_semantic_bextr create mode 100644 results/classifier/004/semantic/gitlab_semantic_blsi create mode 100644 results/classifier/004/semantic/gitlab_semantic_blsmsk create mode 100644 results/classifier/004/semantic/gitlab_semantic_bzhi create mode 100644 results/classifier/004/vnc/11357571 create mode 100644 results/classifier/004/vnc/33802194 create mode 100644 results/classifier/004/vnc/42613410 create mode 100644 results/classifier/004/vnc/80570214 create mode 100644 results/classifier/005/KVM/04472277 create mode 100644 results/classifier/005/KVM/26430026 create mode 100644 results/classifier/005/KVM/43643137 create mode 100644 results/classifier/005/KVM/71456293 create mode 100644 results/classifier/005/KVM/80615920 create mode 100644 results/classifier/005/assembly/48245039 create mode 100644 results/classifier/005/boot/51610399 create mode 100644 results/classifier/005/boot/60339453 create mode 100644 results/classifier/005/device/14488057 create mode 100644 results/classifier/005/device/24190340 create mode 100644 results/classifier/005/device/24930826 create mode 100644 results/classifier/005/device/28596630 create mode 100644 results/classifier/005/device/42226390 create mode 100644 results/classifier/005/device/57195159 create mode 100644 results/classifier/005/device/57231878 create mode 100644 results/classifier/005/device/67821138 create mode 100644 results/classifier/005/device/99674399 create mode 100644 results/classifier/005/graphic/22219210 create mode 100644 results/classifier/005/graphic/30680944 create mode 100644 results/classifier/005/graphic/55961334 create mode 100644 results/classifier/005/graphic/73660729 create mode 100644 results/classifier/005/instruction/11933524 create mode 100644 results/classifier/005/instruction/26095107 create mode 100644 results/classifier/005/instruction/50773216 create mode 100644 results/classifier/005/instruction/63565653 create mode 100644 results/classifier/005/instruction/70868267 create mode 100644 results/classifier/005/mistranslation/14887122 create mode 100644 results/classifier/005/mistranslation/23270873 create mode 100644 results/classifier/005/mistranslation/25842545 create mode 100644 results/classifier/005/mistranslation/36568044 create mode 100644 results/classifier/005/mistranslation/64322995 create mode 100644 results/classifier/005/mistranslation/70294255 create mode 100644 results/classifier/005/mistranslation/74466963 create mode 100644 results/classifier/005/mistranslation/74545755 create mode 100644 results/classifier/005/mistranslation/80604314 create mode 100644 results/classifier/005/network/05479587 create mode 100644 results/classifier/005/network/62179944 create mode 100644 results/classifier/005/other/02364653 create mode 100644 results/classifier/005/other/02572177 create mode 100644 results/classifier/005/other/12869209 create mode 100644 results/classifier/005/other/13442371 create mode 100644 results/classifier/005/other/16056596 create mode 100644 results/classifier/005/other/16201167 create mode 100644 results/classifier/005/other/16228234 create mode 100644 results/classifier/005/other/17743720 create mode 100644 results/classifier/005/other/21221931 create mode 100644 results/classifier/005/other/21247035 create mode 100644 results/classifier/005/other/23300761 create mode 100644 results/classifier/005/other/23448582 create mode 100644 results/classifier/005/other/25892827 create mode 100644 results/classifier/005/other/31349848 create mode 100644 results/classifier/005/other/32484936 create mode 100644 results/classifier/005/other/35170175 create mode 100644 results/classifier/005/other/42974450 create mode 100644 results/classifier/005/other/55247116 create mode 100644 results/classifier/005/other/55367348 create mode 100644 results/classifier/005/other/55753058 create mode 100644 results/classifier/005/other/56309929 create mode 100644 results/classifier/005/other/56937788 create mode 100644 results/classifier/005/other/57756589 create mode 100644 results/classifier/005/other/59540920 create mode 100644 results/classifier/005/other/64571620 create mode 100644 results/classifier/005/other/65781993 create mode 100644 results/classifier/005/other/66743673 create mode 100644 results/classifier/005/other/68897003 create mode 100644 results/classifier/005/other/70021271 create mode 100644 results/classifier/005/other/70416488 create mode 100644 results/classifier/005/other/74715356 create mode 100644 results/classifier/005/other/79834768 create mode 100644 results/classifier/005/other/81775929 create mode 100644 results/classifier/005/other/85542195 create mode 100644 results/classifier/005/other/88225572 create mode 100644 results/classifier/005/other/88281850 create mode 100644 results/classifier/005/other/92957605 create mode 100644 results/classifier/005/other/95154278 create mode 100644 results/classifier/005/semantic/12360755 create mode 100644 results/classifier/005/semantic/46572227 create mode 100644 results/classifier/005/semantic/53568181 create mode 100644 results/classifier/005/semantic/96782458 create mode 100644 results/classifier/005/semantic/gitlab_semantic_addsubps create mode 100644 results/classifier/005/semantic/gitlab_semantic_adox create mode 100644 results/classifier/005/semantic/gitlab_semantic_bextr create mode 100644 results/classifier/005/semantic/gitlab_semantic_blsi create mode 100644 results/classifier/005/semantic/gitlab_semantic_blsmsk create mode 100644 results/classifier/005/semantic/gitlab_semantic_bzhi create mode 100644 results/classifier/005/vnc/11357571 create mode 100644 results/classifier/005/vnc/33802194 create mode 100644 results/classifier/005/vnc/42613410 create mode 100644 results/classifier/005/vnc/80570214 delete mode 100644 results/classifier/01/instruction/11357571 delete mode 100644 results/classifier/01/instruction/11933524 delete mode 100644 results/classifier/01/instruction/24190340 delete mode 100644 results/classifier/01/instruction/26095107 delete mode 100644 results/classifier/01/instruction/33802194 delete mode 100644 results/classifier/01/instruction/42226390 delete mode 100644 results/classifier/01/instruction/50773216 delete mode 100644 results/classifier/01/instruction/51610399 delete mode 100644 results/classifier/01/instruction/55961334 delete mode 100644 results/classifier/01/instruction/62179944 delete mode 100644 results/classifier/01/instruction/63565653 delete mode 100644 results/classifier/01/instruction/70868267 delete mode 100644 results/classifier/01/instruction/73660729 delete mode 100644 results/classifier/01/mistranslation/14887122 delete mode 100644 results/classifier/01/mistranslation/22219210 delete mode 100644 results/classifier/01/mistranslation/23270873 delete mode 100644 results/classifier/01/mistranslation/24930826 delete mode 100644 results/classifier/01/mistranslation/25842545 delete mode 100644 results/classifier/01/mistranslation/26430026 delete mode 100644 results/classifier/01/mistranslation/36568044 delete mode 100644 results/classifier/01/mistranslation/64322995 delete mode 100644 results/classifier/01/mistranslation/70294255 delete mode 100644 results/classifier/01/mistranslation/71456293 delete mode 100644 results/classifier/01/mistranslation/74466963 delete mode 100644 results/classifier/01/mistranslation/74545755 delete mode 100644 results/classifier/01/mistranslation/80604314 delete mode 100644 results/classifier/01/mistranslation/80615920 delete mode 100644 results/classifier/01/other/02364653 delete mode 100644 results/classifier/01/other/02572177 delete mode 100644 results/classifier/01/other/04472277 delete mode 100644 results/classifier/01/other/12869209 delete mode 100644 results/classifier/01/other/13442371 delete mode 100644 results/classifier/01/other/14488057 delete mode 100644 results/classifier/01/other/16056596 delete mode 100644 results/classifier/01/other/16201167 delete mode 100644 results/classifier/01/other/16228234 delete mode 100644 results/classifier/01/other/17743720 delete mode 100644 results/classifier/01/other/21221931 delete mode 100644 results/classifier/01/other/21247035 delete mode 100644 results/classifier/01/other/23300761 delete mode 100644 results/classifier/01/other/23448582 delete mode 100644 results/classifier/01/other/25892827 delete mode 100644 results/classifier/01/other/31349848 delete mode 100644 results/classifier/01/other/32484936 delete mode 100644 results/classifier/01/other/35170175 delete mode 100644 results/classifier/01/other/42613410 delete mode 100644 results/classifier/01/other/42974450 delete mode 100644 results/classifier/01/other/43643137 delete mode 100644 results/classifier/01/other/48245039 delete mode 100644 results/classifier/01/other/55247116 delete mode 100644 results/classifier/01/other/55367348 delete mode 100644 results/classifier/01/other/55753058 delete mode 100644 results/classifier/01/other/56309929 delete mode 100644 results/classifier/01/other/56937788 delete mode 100644 results/classifier/01/other/57195159 delete mode 100644 results/classifier/01/other/57231878 delete mode 100644 results/classifier/01/other/57756589 delete mode 100644 results/classifier/01/other/59540920 delete mode 100644 results/classifier/01/other/60339453 delete mode 100644 results/classifier/01/other/64571620 delete mode 100644 results/classifier/01/other/65781993 delete mode 100644 results/classifier/01/other/66743673 delete mode 100644 results/classifier/01/other/67821138 delete mode 100644 results/classifier/01/other/68897003 delete mode 100644 results/classifier/01/other/70021271 delete mode 100644 results/classifier/01/other/70416488 delete mode 100644 results/classifier/01/other/74715356 delete mode 100644 results/classifier/01/other/79834768 delete mode 100644 results/classifier/01/other/81775929 delete mode 100644 results/classifier/01/other/85542195 delete mode 100644 results/classifier/01/other/88225572 delete mode 100644 results/classifier/01/other/88281850 delete mode 100644 results/classifier/01/other/92957605 delete mode 100644 results/classifier/01/other/95154278 delete mode 100644 results/classifier/01/other/99674399 delete mode 100644 results/classifier/01/semantic/05479587 delete mode 100644 results/classifier/01/semantic/12360755 delete mode 100644 results/classifier/01/semantic/28596630 delete mode 100644 results/classifier/01/semantic/30680944 delete mode 100644 results/classifier/01/semantic/46572227 delete mode 100644 results/classifier/01/semantic/53568181 delete mode 100644 results/classifier/01/semantic/80570214 delete mode 100644 results/classifier/01/semantic/96782458 delete mode 100644 results/classifier/01/semantic/gitlab_semantic_addsubps delete mode 100644 results/classifier/01/semantic/gitlab_semantic_adox delete mode 100644 results/classifier/01/semantic/gitlab_semantic_bextr delete mode 100644 results/classifier/01/semantic/gitlab_semantic_blsi delete mode 100644 results/classifier/01/semantic/gitlab_semantic_blsmsk delete mode 100644 results/classifier/01/semantic/gitlab_semantic_bzhi delete mode 100644 results/classifier/02/boot/42226390 delete mode 100644 results/classifier/02/boot/51610399 delete mode 100644 results/classifier/02/boot/60339453 delete mode 100644 results/classifier/02/boot/67821138 delete mode 100644 results/classifier/02/instruction/11357571 delete mode 100644 results/classifier/02/instruction/11933524 delete mode 100644 results/classifier/02/instruction/24190340 delete mode 100644 results/classifier/02/instruction/26095107 delete mode 100644 results/classifier/02/instruction/33802194 delete mode 100644 results/classifier/02/instruction/50773216 delete mode 100644 results/classifier/02/instruction/55961334 delete mode 100644 results/classifier/02/instruction/62179944 delete mode 100644 results/classifier/02/instruction/63565653 delete mode 100644 results/classifier/02/instruction/70868267 delete mode 100644 results/classifier/02/instruction/73660729 delete mode 100644 results/classifier/02/mistranslation/14887122 delete mode 100644 results/classifier/02/mistranslation/22219210 delete mode 100644 results/classifier/02/mistranslation/23270873 delete mode 100644 results/classifier/02/mistranslation/24930826 delete mode 100644 results/classifier/02/mistranslation/25842545 delete mode 100644 results/classifier/02/mistranslation/26430026 delete mode 100644 results/classifier/02/mistranslation/36568044 delete mode 100644 results/classifier/02/mistranslation/64322995 delete mode 100644 results/classifier/02/mistranslation/70294255 delete mode 100644 results/classifier/02/mistranslation/71456293 delete mode 100644 results/classifier/02/mistranslation/74466963 delete mode 100644 results/classifier/02/mistranslation/74545755 delete mode 100644 results/classifier/02/mistranslation/80604314 delete mode 100644 results/classifier/02/mistranslation/80615920 delete mode 100644 results/classifier/02/other/02364653 delete mode 100644 results/classifier/02/other/02572177 delete mode 100644 results/classifier/02/other/04472277 delete mode 100644 results/classifier/02/other/12869209 delete mode 100644 results/classifier/02/other/13442371 delete mode 100644 results/classifier/02/other/14488057 delete mode 100644 results/classifier/02/other/16056596 delete mode 100644 results/classifier/02/other/16201167 delete mode 100644 results/classifier/02/other/16228234 delete mode 100644 results/classifier/02/other/17743720 delete mode 100644 results/classifier/02/other/21221931 delete mode 100644 results/classifier/02/other/21247035 delete mode 100644 results/classifier/02/other/23300761 delete mode 100644 results/classifier/02/other/23448582 delete mode 100644 results/classifier/02/other/25892827 delete mode 100644 results/classifier/02/other/31349848 delete mode 100644 results/classifier/02/other/32484936 delete mode 100644 results/classifier/02/other/35170175 delete mode 100644 results/classifier/02/other/42613410 delete mode 100644 results/classifier/02/other/42974450 delete mode 100644 results/classifier/02/other/43643137 delete mode 100644 results/classifier/02/other/48245039 delete mode 100644 results/classifier/02/other/55247116 delete mode 100644 results/classifier/02/other/55367348 delete mode 100644 results/classifier/02/other/55753058 delete mode 100644 results/classifier/02/other/56309929 delete mode 100644 results/classifier/02/other/56937788 delete mode 100644 results/classifier/02/other/57195159 delete mode 100644 results/classifier/02/other/57231878 delete mode 100644 results/classifier/02/other/57756589 delete mode 100644 results/classifier/02/other/59540920 delete mode 100644 results/classifier/02/other/64571620 delete mode 100644 results/classifier/02/other/65781993 delete mode 100644 results/classifier/02/other/66743673 delete mode 100644 results/classifier/02/other/68897003 delete mode 100644 results/classifier/02/other/70021271 delete mode 100644 results/classifier/02/other/70416488 delete mode 100644 results/classifier/02/other/74715356 delete mode 100644 results/classifier/02/other/79834768 delete mode 100644 results/classifier/02/other/81775929 delete mode 100644 results/classifier/02/other/85542195 delete mode 100644 results/classifier/02/other/88225572 delete mode 100644 results/classifier/02/other/88281850 delete mode 100644 results/classifier/02/other/92957605 delete mode 100644 results/classifier/02/other/95154278 delete mode 100644 results/classifier/02/other/99674399 delete mode 100644 results/classifier/02/semantic/05479587 delete mode 100644 results/classifier/02/semantic/12360755 delete mode 100644 results/classifier/02/semantic/28596630 delete mode 100644 results/classifier/02/semantic/30680944 delete mode 100644 results/classifier/02/semantic/46572227 delete mode 100644 results/classifier/02/semantic/53568181 delete mode 100644 results/classifier/02/semantic/80570214 delete mode 100644 results/classifier/02/semantic/96782458 delete mode 100644 results/classifier/02/semantic/gitlab_semantic_addsubps delete mode 100644 results/classifier/02/semantic/gitlab_semantic_adox delete mode 100644 results/classifier/02/semantic/gitlab_semantic_bextr delete mode 100644 results/classifier/02/semantic/gitlab_semantic_blsi delete mode 100644 results/classifier/02/semantic/gitlab_semantic_blsmsk delete mode 100644 results/classifier/02/semantic/gitlab_semantic_bzhi delete mode 100644 results/classifier/03/KVM/04472277 delete mode 100644 results/classifier/03/KVM/26430026 delete mode 100644 results/classifier/03/KVM/33802194 delete mode 100644 results/classifier/03/KVM/42613410 delete mode 100644 results/classifier/03/KVM/43643137 delete mode 100644 results/classifier/03/KVM/55961334 delete mode 100644 results/classifier/03/KVM/71456293 delete mode 100644 results/classifier/03/KVM/80615920 delete mode 100644 results/classifier/03/boot/42226390 delete mode 100644 results/classifier/03/boot/51610399 delete mode 100644 results/classifier/03/boot/60339453 delete mode 100644 results/classifier/03/boot/67821138 delete mode 100644 results/classifier/03/instruction/11357571 delete mode 100644 results/classifier/03/instruction/11933524 delete mode 100644 results/classifier/03/instruction/24190340 delete mode 100644 results/classifier/03/instruction/26095107 delete mode 100644 results/classifier/03/instruction/50773216 delete mode 100644 results/classifier/03/instruction/63565653 delete mode 100644 results/classifier/03/instruction/70868267 delete mode 100644 results/classifier/03/instruction/73660729 delete mode 100644 results/classifier/03/mistranslation/14887122 delete mode 100644 results/classifier/03/mistranslation/22219210 delete mode 100644 results/classifier/03/mistranslation/23270873 delete mode 100644 results/classifier/03/mistranslation/24930826 delete mode 100644 results/classifier/03/mistranslation/25842545 delete mode 100644 results/classifier/03/mistranslation/36568044 delete mode 100644 results/classifier/03/mistranslation/64322995 delete mode 100644 results/classifier/03/mistranslation/70294255 delete mode 100644 results/classifier/03/mistranslation/74466963 delete mode 100644 results/classifier/03/mistranslation/74545755 delete mode 100644 results/classifier/03/mistranslation/80604314 delete mode 100644 results/classifier/03/network/05479587 delete mode 100644 results/classifier/03/network/62179944 delete mode 100644 results/classifier/03/other/02364653 delete mode 100644 results/classifier/03/other/02572177 delete mode 100644 results/classifier/03/other/12869209 delete mode 100644 results/classifier/03/other/13442371 delete mode 100644 results/classifier/03/other/14488057 delete mode 100644 results/classifier/03/other/16056596 delete mode 100644 results/classifier/03/other/16201167 delete mode 100644 results/classifier/03/other/16228234 delete mode 100644 results/classifier/03/other/17743720 delete mode 100644 results/classifier/03/other/21221931 delete mode 100644 results/classifier/03/other/21247035 delete mode 100644 results/classifier/03/other/23300761 delete mode 100644 results/classifier/03/other/23448582 delete mode 100644 results/classifier/03/other/25892827 delete mode 100644 results/classifier/03/other/31349848 delete mode 100644 results/classifier/03/other/32484936 delete mode 100644 results/classifier/03/other/35170175 delete mode 100644 results/classifier/03/other/42974450 delete mode 100644 results/classifier/03/other/48245039 delete mode 100644 results/classifier/03/other/55247116 delete mode 100644 results/classifier/03/other/55367348 delete mode 100644 results/classifier/03/other/55753058 delete mode 100644 results/classifier/03/other/56309929 delete mode 100644 results/classifier/03/other/56937788 delete mode 100644 results/classifier/03/other/57195159 delete mode 100644 results/classifier/03/other/57231878 delete mode 100644 results/classifier/03/other/57756589 delete mode 100644 results/classifier/03/other/59540920 delete mode 100644 results/classifier/03/other/64571620 delete mode 100644 results/classifier/03/other/65781993 delete mode 100644 results/classifier/03/other/66743673 delete mode 100644 results/classifier/03/other/68897003 delete mode 100644 results/classifier/03/other/70021271 delete mode 100644 results/classifier/03/other/70416488 delete mode 100644 results/classifier/03/other/74715356 delete mode 100644 results/classifier/03/other/79834768 delete mode 100644 results/classifier/03/other/81775929 delete mode 100644 results/classifier/03/other/85542195 delete mode 100644 results/classifier/03/other/88225572 delete mode 100644 results/classifier/03/other/88281850 delete mode 100644 results/classifier/03/other/92957605 delete mode 100644 results/classifier/03/other/95154278 delete mode 100644 results/classifier/03/other/99674399 delete mode 100644 results/classifier/03/semantic/12360755 delete mode 100644 results/classifier/03/semantic/28596630 delete mode 100644 results/classifier/03/semantic/30680944 delete mode 100644 results/classifier/03/semantic/46572227 delete mode 100644 results/classifier/03/semantic/53568181 delete mode 100644 results/classifier/03/semantic/80570214 delete mode 100644 results/classifier/03/semantic/96782458 delete mode 100644 results/classifier/03/semantic/gitlab_semantic_addsubps delete mode 100644 results/classifier/03/semantic/gitlab_semantic_adox delete mode 100644 results/classifier/03/semantic/gitlab_semantic_bextr delete mode 100644 results/classifier/03/semantic/gitlab_semantic_blsi delete mode 100644 results/classifier/03/semantic/gitlab_semantic_blsmsk delete mode 100644 results/classifier/03/semantic/gitlab_semantic_bzhi delete mode 100644 results/classifier/04/KVM/04472277 delete mode 100644 results/classifier/04/KVM/26430026 delete mode 100644 results/classifier/04/KVM/43643137 delete mode 100644 results/classifier/04/KVM/71456293 delete mode 100644 results/classifier/04/KVM/80615920 delete mode 100644 results/classifier/04/assembly/48245039 delete mode 100644 results/classifier/04/boot/51610399 delete mode 100644 results/classifier/04/boot/60339453 delete mode 100644 results/classifier/04/device/14488057 delete mode 100644 results/classifier/04/device/24190340 delete mode 100644 results/classifier/04/device/24930826 delete mode 100644 results/classifier/04/device/28596630 delete mode 100644 results/classifier/04/device/42226390 delete mode 100644 results/classifier/04/device/57195159 delete mode 100644 results/classifier/04/device/57231878 delete mode 100644 results/classifier/04/device/67821138 delete mode 100644 results/classifier/04/device/99674399 delete mode 100644 results/classifier/04/graphic/22219210 delete mode 100644 results/classifier/04/graphic/30680944 delete mode 100644 results/classifier/04/graphic/55961334 delete mode 100644 results/classifier/04/graphic/73660729 delete mode 100644 results/classifier/04/instruction/11933524 delete mode 100644 results/classifier/04/instruction/26095107 delete mode 100644 results/classifier/04/instruction/50773216 delete mode 100644 results/classifier/04/instruction/63565653 delete mode 100644 results/classifier/04/instruction/70868267 delete mode 100644 results/classifier/04/mistranslation/14887122 delete mode 100644 results/classifier/04/mistranslation/23270873 delete mode 100644 results/classifier/04/mistranslation/25842545 delete mode 100644 results/classifier/04/mistranslation/36568044 delete mode 100644 results/classifier/04/mistranslation/64322995 delete mode 100644 results/classifier/04/mistranslation/70294255 delete mode 100644 results/classifier/04/mistranslation/74466963 delete mode 100644 results/classifier/04/mistranslation/74545755 delete mode 100644 results/classifier/04/mistranslation/80604314 delete mode 100644 results/classifier/04/network/05479587 delete mode 100644 results/classifier/04/network/62179944 delete mode 100644 results/classifier/04/other/02364653 delete mode 100644 results/classifier/04/other/02572177 delete mode 100644 results/classifier/04/other/12869209 delete mode 100644 results/classifier/04/other/13442371 delete mode 100644 results/classifier/04/other/16056596 delete mode 100644 results/classifier/04/other/16201167 delete mode 100644 results/classifier/04/other/16228234 delete mode 100644 results/classifier/04/other/17743720 delete mode 100644 results/classifier/04/other/21221931 delete mode 100644 results/classifier/04/other/21247035 delete mode 100644 results/classifier/04/other/23300761 delete mode 100644 results/classifier/04/other/23448582 delete mode 100644 results/classifier/04/other/25892827 delete mode 100644 results/classifier/04/other/31349848 delete mode 100644 results/classifier/04/other/32484936 delete mode 100644 results/classifier/04/other/35170175 delete mode 100644 results/classifier/04/other/42974450 delete mode 100644 results/classifier/04/other/55247116 delete mode 100644 results/classifier/04/other/55367348 delete mode 100644 results/classifier/04/other/55753058 delete mode 100644 results/classifier/04/other/56309929 delete mode 100644 results/classifier/04/other/56937788 delete mode 100644 results/classifier/04/other/57756589 delete mode 100644 results/classifier/04/other/59540920 delete mode 100644 results/classifier/04/other/64571620 delete mode 100644 results/classifier/04/other/65781993 delete mode 100644 results/classifier/04/other/66743673 delete mode 100644 results/classifier/04/other/68897003 delete mode 100644 results/classifier/04/other/70021271 delete mode 100644 results/classifier/04/other/70416488 delete mode 100644 results/classifier/04/other/74715356 delete mode 100644 results/classifier/04/other/79834768 delete mode 100644 results/classifier/04/other/81775929 delete mode 100644 results/classifier/04/other/85542195 delete mode 100644 results/classifier/04/other/88225572 delete mode 100644 results/classifier/04/other/88281850 delete mode 100644 results/classifier/04/other/92957605 delete mode 100644 results/classifier/04/other/95154278 delete mode 100644 results/classifier/04/semantic/12360755 delete mode 100644 results/classifier/04/semantic/46572227 delete mode 100644 results/classifier/04/semantic/53568181 delete mode 100644 results/classifier/04/semantic/96782458 delete mode 100644 results/classifier/04/semantic/gitlab_semantic_addsubps delete mode 100644 results/classifier/04/semantic/gitlab_semantic_adox delete mode 100644 results/classifier/04/semantic/gitlab_semantic_bextr delete mode 100644 results/classifier/04/semantic/gitlab_semantic_blsi delete mode 100644 results/classifier/04/semantic/gitlab_semantic_blsmsk delete mode 100644 results/classifier/04/semantic/gitlab_semantic_bzhi delete mode 100644 results/classifier/04/vnc/11357571 delete mode 100644 results/classifier/04/vnc/33802194 delete mode 100644 results/classifier/04/vnc/42613410 delete mode 100644 results/classifier/04/vnc/80570214 delete mode 100644 results/classifier/05/KVM/04472277 delete mode 100644 results/classifier/05/KVM/26430026 delete mode 100644 results/classifier/05/KVM/43643137 delete mode 100644 results/classifier/05/KVM/71456293 delete mode 100644 results/classifier/05/KVM/80615920 delete mode 100644 results/classifier/05/boot/51610399 delete mode 100644 results/classifier/05/boot/60339453 delete mode 100644 results/classifier/05/device/14488057 delete mode 100644 results/classifier/05/device/24190340 delete mode 100644 results/classifier/05/device/24930826 delete mode 100644 results/classifier/05/device/26095107 delete mode 100644 results/classifier/05/device/28596630 delete mode 100644 results/classifier/05/device/36568044 delete mode 100644 results/classifier/05/device/42226390 delete mode 100644 results/classifier/05/device/48245039 delete mode 100644 results/classifier/05/device/57195159 delete mode 100644 results/classifier/05/device/57231878 delete mode 100644 results/classifier/05/device/67821138 delete mode 100644 results/classifier/05/device/99674399 delete mode 100644 results/classifier/05/graphic/22219210 delete mode 100644 results/classifier/05/graphic/30680944 delete mode 100644 results/classifier/05/graphic/46572227 delete mode 100644 results/classifier/05/graphic/53568181 delete mode 100644 results/classifier/05/graphic/55961334 delete mode 100644 results/classifier/05/graphic/73660729 delete mode 100644 results/classifier/05/instruction/11933524 delete mode 100644 results/classifier/05/instruction/50773216 delete mode 100644 results/classifier/05/instruction/63565653 delete mode 100644 results/classifier/05/instruction/70868267 delete mode 100644 results/classifier/05/mistranslation/14887122 delete mode 100644 results/classifier/05/mistranslation/23270873 delete mode 100644 results/classifier/05/mistranslation/25842545 delete mode 100644 results/classifier/05/mistranslation/64322995 delete mode 100644 results/classifier/05/mistranslation/70294255 delete mode 100644 results/classifier/05/mistranslation/74466963 delete mode 100644 results/classifier/05/mistranslation/74545755 delete mode 100644 results/classifier/05/mistranslation/80604314 delete mode 100644 results/classifier/05/network/05479587 delete mode 100644 results/classifier/05/network/62179944 delete mode 100644 results/classifier/05/other/02364653 delete mode 100644 results/classifier/05/other/02572177 delete mode 100644 results/classifier/05/other/12869209 delete mode 100644 results/classifier/05/other/13442371 delete mode 100644 results/classifier/05/other/16056596 delete mode 100644 results/classifier/05/other/16201167 delete mode 100644 results/classifier/05/other/16228234 delete mode 100644 results/classifier/05/other/17743720 delete mode 100644 results/classifier/05/other/21221931 delete mode 100644 results/classifier/05/other/21247035 delete mode 100644 results/classifier/05/other/23300761 delete mode 100644 results/classifier/05/other/23448582 delete mode 100644 results/classifier/05/other/25892827 delete mode 100644 results/classifier/05/other/31349848 delete mode 100644 results/classifier/05/other/32484936 delete mode 100644 results/classifier/05/other/35170175 delete mode 100644 results/classifier/05/other/42974450 delete mode 100644 results/classifier/05/other/55247116 delete mode 100644 results/classifier/05/other/55367348 delete mode 100644 results/classifier/05/other/55753058 delete mode 100644 results/classifier/05/other/56309929 delete mode 100644 results/classifier/05/other/56937788 delete mode 100644 results/classifier/05/other/57756589 delete mode 100644 results/classifier/05/other/59540920 delete mode 100644 results/classifier/05/other/64571620 delete mode 100644 results/classifier/05/other/65781993 delete mode 100644 results/classifier/05/other/66743673 delete mode 100644 results/classifier/05/other/68897003 delete mode 100644 results/classifier/05/other/70021271 delete mode 100644 results/classifier/05/other/70416488 delete mode 100644 results/classifier/05/other/74715356 delete mode 100644 results/classifier/05/other/79834768 delete mode 100644 results/classifier/05/other/81775929 delete mode 100644 results/classifier/05/other/85542195 delete mode 100644 results/classifier/05/other/88225572 delete mode 100644 results/classifier/05/other/88281850 delete mode 100644 results/classifier/05/other/92957605 delete mode 100644 results/classifier/05/other/95154278 delete mode 100644 results/classifier/05/other/96782458 delete mode 100644 results/classifier/05/semantic/12360755 delete mode 100644 results/classifier/05/semantic/gitlab_semantic_addsubps delete mode 100644 results/classifier/05/semantic/gitlab_semantic_adox delete mode 100644 results/classifier/05/semantic/gitlab_semantic_bextr delete mode 100644 results/classifier/05/semantic/gitlab_semantic_blsi delete mode 100644 results/classifier/05/semantic/gitlab_semantic_blsmsk delete mode 100644 results/classifier/05/semantic/gitlab_semantic_bzhi delete mode 100644 results/classifier/05/vnc/11357571 delete mode 100644 results/classifier/05/vnc/33802194 delete mode 100644 results/classifier/05/vnc/42613410 delete mode 100644 results/classifier/05/vnc/80570214 create mode 100644 results/classifier/105/KVM/04472277 create mode 100644 results/classifier/105/KVM/1002 create mode 100644 results/classifier/105/KVM/1009 create mode 100644 results/classifier/105/KVM/1035 create mode 100644 results/classifier/105/KVM/1042561 create mode 100644 results/classifier/105/KVM/1045 create mode 100644 results/classifier/105/KVM/1063807 create mode 100644 results/classifier/105/KVM/1064 create mode 100644 results/classifier/105/KVM/1073952 create mode 100644 results/classifier/105/KVM/1093360 create mode 100644 results/classifier/105/KVM/110 create mode 100644 results/classifier/105/KVM/1135567 create mode 100644 results/classifier/105/KVM/1136 create mode 100644 results/classifier/105/KVM/1138 create mode 100644 results/classifier/105/KVM/1155 create mode 100644 results/classifier/105/KVM/1156632 create mode 100644 results/classifier/105/KVM/1173490 create mode 100644 results/classifier/105/KVM/1191326 create mode 100644 results/classifier/105/KVM/1198 create mode 100644 results/classifier/105/KVM/1201447 create mode 100644 results/classifier/105/KVM/1202289 create mode 100644 results/classifier/105/KVM/1203 create mode 100644 results/classifier/105/KVM/1215 create mode 100644 results/classifier/105/KVM/1224444 create mode 100644 results/classifier/105/KVM/1243639 create mode 100644 results/classifier/105/KVM/1253777 create mode 100644 results/classifier/105/KVM/1259499 create mode 100644 results/classifier/105/KVM/1268596 create mode 100644 results/classifier/105/KVM/1288259 create mode 100644 results/classifier/105/KVM/1294227 create mode 100644 results/classifier/105/KVM/1307281 create mode 100644 results/classifier/105/KVM/1307656 create mode 100644 results/classifier/105/KVM/1312668 create mode 100644 results/classifier/105/KVM/1344 create mode 100644 results/classifier/105/KVM/1352179 create mode 100644 results/classifier/105/KVM/1353149 create mode 100644 results/classifier/105/KVM/1383857 create mode 100644 results/classifier/105/KVM/139 create mode 100644 results/classifier/105/KVM/1398 create mode 100644 results/classifier/105/KVM/1399191 create mode 100644 results/classifier/105/KVM/1405385 create mode 100644 results/classifier/105/KVM/1408152 create mode 100644 results/classifier/105/KVM/1435359 create mode 100644 results/classifier/105/KVM/1441443 create mode 100644 results/classifier/105/KVM/1446726 create mode 100644 results/classifier/105/KVM/1456804 create mode 100644 results/classifier/105/KVM/1463143 create mode 100644 results/classifier/105/KVM/1465935 create mode 100644 results/classifier/105/KVM/1470481 create mode 100644 results/classifier/105/KVM/1481375 create mode 100644 results/classifier/105/KVM/1484 create mode 100644 results/classifier/105/KVM/1500935 create mode 100644 results/classifier/105/KVM/1502884 create mode 100644 results/classifier/105/KVM/1502934 create mode 100644 results/classifier/105/KVM/1518969 create mode 100644 results/classifier/105/KVM/1529187 create mode 100644 results/classifier/105/KVM/1532 create mode 100644 results/classifier/105/KVM/1545 create mode 100644 results/classifier/105/KVM/1547012 create mode 100644 results/classifier/105/KVM/1553999 create mode 100644 results/classifier/105/KVM/1557057 create mode 100644 results/classifier/105/KVM/1559 create mode 100644 results/classifier/105/KVM/1565 create mode 100644 results/classifier/105/KVM/1570 create mode 100644 results/classifier/105/KVM/1574246 create mode 100644 results/classifier/105/KVM/1576347 create mode 100644 results/classifier/105/KVM/1579645 create mode 100644 results/classifier/105/KVM/1583 create mode 100644 results/classifier/105/KVM/1583775 create mode 100644 results/classifier/105/KVM/1591628 create mode 100644 results/classifier/105/KVM/1597 create mode 100644 results/classifier/105/KVM/1626596 create mode 100644 results/classifier/105/KVM/1626972 create mode 100644 results/classifier/105/KVM/1635339 create mode 100644 results/classifier/105/KVM/1637511 create mode 100644 results/classifier/105/KVM/1642011 create mode 100644 results/classifier/105/KVM/1652 create mode 100644 results/classifier/105/KVM/1663287 create mode 100644 results/classifier/105/KVM/1671876 create mode 100644 results/classifier/105/KVM/1673722 create mode 100644 results/classifier/105/KVM/1675458 create mode 100644 results/classifier/105/KVM/1677247 create mode 100644 results/classifier/105/KVM/1681688 create mode 100644 results/classifier/105/KVM/1682128 create mode 100644 results/classifier/105/KVM/1686350 create mode 100644 results/classifier/105/KVM/1688 create mode 100644 results/classifier/105/KVM/1699824 create mode 100644 results/classifier/105/KVM/1706058 create mode 100644 results/classifier/105/KVM/1706296 create mode 100644 results/classifier/105/KVM/1709784 create mode 100644 results/classifier/105/KVM/1715203 create mode 100644 results/classifier/105/KVM/1723731 create mode 100644 results/classifier/105/KVM/1723927 create mode 100644 results/classifier/105/KVM/1726 create mode 100644 results/classifier/105/KVM/1728615 create mode 100644 results/classifier/105/KVM/1728657 create mode 100644 results/classifier/105/KVM/1731957 create mode 100644 results/classifier/105/KVM/1752026 create mode 100644 results/classifier/105/KVM/1754038 create mode 100644 results/classifier/105/KVM/1756728 create mode 100644 results/classifier/105/KVM/1758819 create mode 100644 results/classifier/105/KVM/1769189 create mode 100644 results/classifier/105/KVM/1770724 create mode 100644 results/classifier/105/KVM/1775702 create mode 100644 results/classifier/105/KVM/1776920 create mode 100644 results/classifier/105/KVM/1778966 create mode 100644 results/classifier/105/KVM/1788582 create mode 100644 results/classifier/105/KVM/1792523 create mode 100644 results/classifier/105/KVM/1798057 create mode 100644 results/classifier/105/KVM/1807052 create mode 100644 results/classifier/105/KVM/1808 create mode 100644 results/classifier/105/KVM/1808928 create mode 100644 results/classifier/105/KVM/1814418 create mode 100644 results/classifier/105/KVM/1814420 create mode 100644 results/classifier/105/KVM/1821771 create mode 100644 results/classifier/105/KVM/1830821 create mode 100644 results/classifier/105/KVM/1833204 create mode 100644 results/classifier/105/KVM/1834051 create mode 100644 results/classifier/105/KVM/1835466 create mode 100644 results/classifier/105/KVM/1836763 create mode 100644 results/classifier/105/KVM/1838569 create mode 100644 results/classifier/105/KVM/1843073 create mode 100644 results/classifier/105/KVM/1847440 create mode 100644 results/classifier/105/KVM/1848244 create mode 100644 results/classifier/105/KVM/1851972 create mode 100644 results/classifier/105/KVM/1852781 create mode 100644 results/classifier/105/KVM/1853123 create mode 100644 results/classifier/105/KVM/1859310 create mode 100644 results/classifier/105/KVM/1860759 create mode 100644 results/classifier/105/KVM/1862874 create mode 100644 results/classifier/105/KVM/1863819 create mode 100644 results/classifier/105/KVM/1865160 create mode 100644 results/classifier/105/KVM/1873340 create mode 100644 results/classifier/105/KVM/1873344 create mode 100644 results/classifier/105/KVM/1874888 create mode 100644 results/classifier/105/KVM/1877052 create mode 100644 results/classifier/105/KVM/1878250 create mode 100644 results/classifier/105/KVM/1878642 create mode 100644 results/classifier/105/KVM/1878645 create mode 100644 results/classifier/105/KVM/1879646 create mode 100644 results/classifier/105/KVM/1880355 create mode 100644 results/classifier/105/KVM/1880507 create mode 100644 results/classifier/105/KVM/1883732 create mode 100644 results/classifier/105/KVM/1883733 create mode 100644 results/classifier/105/KVM/1888601 create mode 100644 results/classifier/105/KVM/1889945 create mode 100644 results/classifier/105/KVM/1891354 create mode 100644 results/classifier/105/KVM/1892963 create mode 100644 results/classifier/105/KVM/1892966 create mode 100644 results/classifier/105/KVM/1897783 create mode 100644 results/classifier/105/KVM/1902612 create mode 100644 results/classifier/105/KVM/1906 create mode 100644 results/classifier/105/KVM/1908513 create mode 100644 results/classifier/105/KVM/1908832 create mode 100644 results/classifier/105/KVM/1910941 create mode 100644 results/classifier/105/KVM/1912224 create mode 100644 results/classifier/105/KVM/1914353 create mode 100644 results/classifier/105/KVM/1914696 create mode 100644 results/classifier/105/KVM/1914748 create mode 100644 results/classifier/105/KVM/1915539 create mode 100644 results/classifier/105/KVM/1919036 create mode 100644 results/classifier/105/KVM/1919169 create mode 100644 results/classifier/105/KVM/1921635 create mode 100644 results/classifier/105/KVM/1922430 create mode 100644 results/classifier/105/KVM/1924231 create mode 100644 results/classifier/105/KVM/1924914 create mode 100644 results/classifier/105/KVM/1926596 create mode 100644 results/classifier/105/KVM/1926782 create mode 100644 results/classifier/105/KVM/1926952 create mode 100644 results/classifier/105/KVM/1936 create mode 100644 results/classifier/105/KVM/1941 create mode 100644 results/classifier/105/KVM/1967814 create mode 100644 results/classifier/105/KVM/1977 create mode 100644 results/classifier/105/KVM/2041 create mode 100644 results/classifier/105/KVM/2046 create mode 100644 results/classifier/105/KVM/2060 create mode 100644 results/classifier/105/KVM/2071 create mode 100644 results/classifier/105/KVM/2110 create mode 100644 results/classifier/105/KVM/2165 create mode 100644 results/classifier/105/KVM/2265 create mode 100644 results/classifier/105/KVM/2313 create mode 100644 results/classifier/105/KVM/2321 create mode 100644 results/classifier/105/KVM/2325 create mode 100644 results/classifier/105/KVM/2334 create mode 100644 results/classifier/105/KVM/2363 create mode 100644 results/classifier/105/KVM/239 create mode 100644 results/classifier/105/KVM/2392 create mode 100644 results/classifier/105/KVM/2394 create mode 100644 results/classifier/105/KVM/2398 create mode 100644 results/classifier/105/KVM/2412 create mode 100644 results/classifier/105/KVM/2436 create mode 100644 results/classifier/105/KVM/2445 create mode 100644 results/classifier/105/KVM/2447 create mode 100644 results/classifier/105/KVM/252 create mode 100644 results/classifier/105/KVM/2548 create mode 100644 results/classifier/105/KVM/2566 create mode 100644 results/classifier/105/KVM/2567 create mode 100644 results/classifier/105/KVM/2573 create mode 100644 results/classifier/105/KVM/2589 create mode 100644 results/classifier/105/KVM/2631 create mode 100644 results/classifier/105/KVM/26430026 create mode 100644 results/classifier/105/KVM/2650 create mode 100644 results/classifier/105/KVM/2658 create mode 100644 results/classifier/105/KVM/2692 create mode 100644 results/classifier/105/KVM/2779 create mode 100644 results/classifier/105/KVM/2927 create mode 100644 results/classifier/105/KVM/2950 create mode 100644 results/classifier/105/KVM/391880 create mode 100644 results/classifier/105/KVM/412 create mode 100644 results/classifier/105/KVM/43643137 create mode 100644 results/classifier/105/KVM/498 create mode 100644 results/classifier/105/KVM/504368 create mode 100644 results/classifier/105/KVM/528 create mode 100644 results/classifier/105/KVM/530077 create mode 100644 results/classifier/105/KVM/563 create mode 100644 results/classifier/105/KVM/584514 create mode 100644 results/classifier/105/KVM/589231 create mode 100644 results/classifier/105/KVM/599574 create mode 100644 results/classifier/105/KVM/612297 create mode 100644 results/classifier/105/KVM/612452 create mode 100644 results/classifier/105/KVM/642304 create mode 100644 results/classifier/105/KVM/645524 create mode 100644 results/classifier/105/KVM/71456293 create mode 100644 results/classifier/105/KVM/721659 create mode 100644 results/classifier/105/KVM/73 create mode 100644 results/classifier/105/KVM/735 create mode 100644 results/classifier/105/KVM/735752 create mode 100644 results/classifier/105/KVM/747583 create mode 100644 results/classifier/105/KVM/748 create mode 100644 results/classifier/105/KVM/772275 create mode 100644 results/classifier/105/KVM/785668 create mode 100644 results/classifier/105/KVM/797905 create mode 100644 results/classifier/105/KVM/80615920 create mode 100644 results/classifier/105/KVM/809 create mode 100644 results/classifier/105/KVM/810588 create mode 100644 results/classifier/105/KVM/816860 create mode 100644 results/classifier/105/KVM/819 create mode 100644 results/classifier/105/KVM/902720 create mode 100644 results/classifier/105/KVM/903 create mode 100644 results/classifier/105/KVM/905095 create mode 100644 results/classifier/105/KVM/916 create mode 100644 results/classifier/105/KVM/920772 create mode 100644 results/classifier/105/KVM/922355 create mode 100644 results/classifier/105/KVM/954 create mode 100644 results/classifier/105/KVM/957 create mode 100644 results/classifier/105/KVM/961 create mode 100644 results/classifier/105/KVM/964 create mode 100644 results/classifier/105/KVM/965867 create mode 100644 results/classifier/105/KVM/966316 create mode 100644 results/classifier/105/KVM/992067 create mode 100644 results/classifier/105/assembly/1098729 create mode 100644 results/classifier/105/assembly/1396497 create mode 100644 results/classifier/105/assembly/1402755 create mode 100644 results/classifier/105/assembly/1490611 create mode 100644 results/classifier/105/assembly/1520 create mode 100644 results/classifier/105/assembly/1548170 create mode 100644 results/classifier/105/assembly/1605 create mode 100644 results/classifier/105/assembly/1612 create mode 100644 results/classifier/105/assembly/1620 create mode 100644 results/classifier/105/assembly/1649 create mode 100644 results/classifier/105/assembly/1662050 create mode 100644 results/classifier/105/assembly/1724570 create mode 100644 results/classifier/105/assembly/1772262 create mode 100644 results/classifier/105/assembly/1787002 create mode 100644 results/classifier/105/assembly/1806114 create mode 100644 results/classifier/105/assembly/1847793 create mode 100644 results/classifier/105/assembly/1850000 create mode 100644 results/classifier/105/assembly/1852196 create mode 100644 results/classifier/105/assembly/1862167 create mode 100644 results/classifier/105/assembly/1877136 create mode 100644 results/classifier/105/assembly/1882671 create mode 100644 results/classifier/105/assembly/1883784 create mode 100644 results/classifier/105/assembly/2013 create mode 100644 results/classifier/105/assembly/2180 create mode 100644 results/classifier/105/assembly/2186 create mode 100644 results/classifier/105/assembly/2303 create mode 100644 results/classifier/105/assembly/2463 create mode 100644 results/classifier/105/assembly/2677 create mode 100644 results/classifier/105/assembly/2871 create mode 100644 results/classifier/105/assembly/494 create mode 100644 results/classifier/105/assembly/536 create mode 100644 results/classifier/105/assembly/710 create mode 100644 results/classifier/105/assembly/811683 create mode 100644 results/classifier/105/assembly/884401 create mode 100644 results/classifier/105/assembly/904 create mode 100644 results/classifier/105/assembly/968 create mode 100644 results/classifier/105/boot/1018 create mode 100644 results/classifier/105/boot/1021649 create mode 100644 results/classifier/105/boot/1026176 create mode 100644 results/classifier/105/boot/1055090 create mode 100644 results/classifier/105/boot/1074 create mode 100644 results/classifier/105/boot/1089006 create mode 100644 results/classifier/105/boot/1101 create mode 100644 results/classifier/105/boot/1119281 create mode 100644 results/classifier/105/boot/1129957 create mode 100644 results/classifier/105/boot/1131 create mode 100644 results/classifier/105/boot/1212402 create mode 100644 results/classifier/105/boot/1221797 create mode 100644 results/classifier/105/boot/1256548 create mode 100644 results/classifier/105/boot/1273944 create mode 100644 results/classifier/105/boot/1280 create mode 100644 results/classifier/105/boot/1285508 create mode 100644 results/classifier/105/boot/1289898 create mode 100644 results/classifier/105/boot/1290558 create mode 100644 results/classifier/105/boot/1314667 create mode 100644 results/classifier/105/boot/1320 create mode 100644 results/classifier/105/boot/1348 create mode 100644 results/classifier/105/boot/1426472 create mode 100644 results/classifier/105/boot/1481750 create mode 100644 results/classifier/105/boot/1488212 create mode 100644 results/classifier/105/boot/1505062 create mode 100644 results/classifier/105/boot/1516 create mode 100644 results/classifier/105/boot/1522 create mode 100644 results/classifier/105/boot/1534978 create mode 100644 results/classifier/105/boot/1587535 create mode 100644 results/classifier/105/boot/1589 create mode 100644 results/classifier/105/boot/1589153 create mode 100644 results/classifier/105/boot/1589257 create mode 100644 results/classifier/105/boot/1595 create mode 100644 results/classifier/105/boot/1605045 create mode 100644 results/classifier/105/boot/1624726 create mode 100644 results/classifier/105/boot/1638 create mode 100644 results/classifier/105/boot/1652286 create mode 100644 results/classifier/105/boot/1688231 create mode 100644 results/classifier/105/boot/1689 create mode 100644 results/classifier/105/boot/1694808 create mode 100644 results/classifier/105/boot/1696 create mode 100644 results/classifier/105/boot/1718719 create mode 100644 results/classifier/105/boot/1732177 create mode 100644 results/classifier/105/boot/1734474 create mode 100644 results/classifier/105/boot/1745 create mode 100644 results/classifier/105/boot/1748296 create mode 100644 results/classifier/105/boot/1753314 create mode 100644 results/classifier/105/boot/1754656 create mode 100644 results/classifier/105/boot/1756538 create mode 100644 results/classifier/105/boot/1797262 create mode 100644 results/classifier/105/boot/1811 create mode 100644 results/classifier/105/boot/1823998 create mode 100644 results/classifier/105/boot/1826 create mode 100644 results/classifier/105/boot/1829498 create mode 100644 results/classifier/105/boot/1831115 create mode 100644 results/classifier/105/boot/1835694 create mode 100644 results/classifier/105/boot/1836136 create mode 100644 results/classifier/105/boot/1838390 create mode 100644 results/classifier/105/boot/1838658 create mode 100644 results/classifier/105/boot/1840920 create mode 100644 results/classifier/105/boot/1853429 create mode 100644 results/classifier/105/boot/1859106 create mode 100644 results/classifier/105/boot/1859254 create mode 100644 results/classifier/105/boot/1859916 create mode 100644 results/classifier/105/boot/1860742 create mode 100644 results/classifier/105/boot/1862110 create mode 100644 results/classifier/105/boot/1862619 create mode 100644 results/classifier/105/boot/1863508 create mode 100644 results/classifier/105/boot/1872644 create mode 100644 results/classifier/105/boot/1873338 create mode 100644 results/classifier/105/boot/1874264 create mode 100644 results/classifier/105/boot/1879590 create mode 100644 results/classifier/105/boot/1883593 create mode 100644 results/classifier/105/boot/1888417 create mode 100644 results/classifier/105/boot/1890290 create mode 100644 results/classifier/105/boot/1896754 create mode 100644 results/classifier/105/boot/1906181 create mode 100644 results/classifier/105/boot/1910 create mode 100644 results/classifier/105/boot/1914117 create mode 100644 results/classifier/105/boot/1921280 create mode 100644 results/classifier/105/boot/2034 create mode 100644 results/classifier/105/boot/2183 create mode 100644 results/classifier/105/boot/2193 create mode 100644 results/classifier/105/boot/2212 create mode 100644 results/classifier/105/boot/2337 create mode 100644 results/classifier/105/boot/2343 create mode 100644 results/classifier/105/boot/2360 create mode 100644 results/classifier/105/boot/2400 create mode 100644 results/classifier/105/boot/2557 create mode 100644 results/classifier/105/boot/2585 create mode 100644 results/classifier/105/boot/2620 create mode 100644 results/classifier/105/boot/2699 create mode 100644 results/classifier/105/boot/2705 create mode 100644 results/classifier/105/boot/2739 create mode 100644 results/classifier/105/boot/2754 create mode 100644 results/classifier/105/boot/2782 create mode 100644 results/classifier/105/boot/2788 create mode 100644 results/classifier/105/boot/2810 create mode 100644 results/classifier/105/boot/2863 create mode 100644 results/classifier/105/boot/2957 create mode 100644 results/classifier/105/boot/2959 create mode 100644 results/classifier/105/boot/2961 create mode 100644 results/classifier/105/boot/2984 create mode 100644 results/classifier/105/boot/436 create mode 100644 results/classifier/105/boot/475 create mode 100644 results/classifier/105/boot/499 create mode 100644 results/classifier/105/boot/51610399 create mode 100644 results/classifier/105/boot/586175 create mode 100644 results/classifier/105/boot/587 create mode 100644 results/classifier/105/boot/60339453 create mode 100644 results/classifier/105/boot/622 create mode 100644 results/classifier/105/boot/627982 create mode 100644 results/classifier/105/boot/660060 create mode 100644 results/classifier/105/boot/669 create mode 100644 results/classifier/105/boot/688052 create mode 100644 results/classifier/105/boot/692570 create mode 100644 results/classifier/105/boot/700276 create mode 100644 results/classifier/105/boot/708 create mode 100644 results/classifier/105/boot/744856 create mode 100644 results/classifier/105/boot/786 create mode 100644 results/classifier/105/boot/797 create mode 100644 results/classifier/105/boot/808737 create mode 100644 results/classifier/105/boot/822408 create mode 100644 results/classifier/105/boot/830833 create mode 100644 results/classifier/105/boot/836 create mode 100644 results/classifier/105/boot/87 create mode 100644 results/classifier/105/boot/886 create mode 100644 results/classifier/105/boot/888016 create mode 100644 results/classifier/105/boot/899961 create mode 100644 results/classifier/105/boot/907 create mode 100644 results/classifier/105/boot/973 create mode 100644 results/classifier/105/boot/985 create mode 100644 results/classifier/105/boot/997 create mode 100644 results/classifier/105/device/1001 create mode 100644 results/classifier/105/device/1006 create mode 100644 results/classifier/105/device/101 create mode 100644 results/classifier/105/device/1013 create mode 100644 results/classifier/105/device/1014 create mode 100644 results/classifier/105/device/1017793 create mode 100644 results/classifier/105/device/102 create mode 100644 results/classifier/105/device/1020309 create mode 100644 results/classifier/105/device/1021 create mode 100644 results/classifier/105/device/1024 create mode 100644 results/classifier/105/device/1026 create mode 100644 results/classifier/105/device/103 create mode 100644 results/classifier/105/device/1032 create mode 100644 results/classifier/105/device/1033 create mode 100644 results/classifier/105/device/1034980 create mode 100644 results/classifier/105/device/1039 create mode 100644 results/classifier/105/device/1042 create mode 100644 results/classifier/105/device/1043 create mode 100644 results/classifier/105/device/1044 create mode 100644 results/classifier/105/device/1047999 create mode 100644 results/classifier/105/device/1049 create mode 100644 results/classifier/105/device/1050823 create mode 100644 results/classifier/105/device/1054812 create mode 100644 results/classifier/105/device/1056 create mode 100644 results/classifier/105/device/106 create mode 100644 results/classifier/105/device/1066 create mode 100644 results/classifier/105/device/107 create mode 100644 results/classifier/105/device/1070 create mode 100644 results/classifier/105/device/1070762 create mode 100644 results/classifier/105/device/1072 create mode 100644 results/classifier/105/device/1073 create mode 100644 results/classifier/105/device/1076 create mode 100644 results/classifier/105/device/1077 create mode 100644 results/classifier/105/device/1078 create mode 100644 results/classifier/105/device/1079 create mode 100644 results/classifier/105/device/1080 create mode 100644 results/classifier/105/device/1083 create mode 100644 results/classifier/105/device/1087 create mode 100644 results/classifier/105/device/1088 create mode 100644 results/classifier/105/device/1090558 create mode 100644 results/classifier/105/device/1090602 create mode 100644 results/classifier/105/device/1091 create mode 100644 results/classifier/105/device/1094950 create mode 100644 results/classifier/105/device/1097 create mode 100644 results/classifier/105/device/1099 create mode 100644 results/classifier/105/device/1103 create mode 100644 results/classifier/105/device/1104 create mode 100644 results/classifier/105/device/1105 create mode 100644 results/classifier/105/device/1106 create mode 100644 results/classifier/105/device/1108 create mode 100644 results/classifier/105/device/1110 create mode 100644 results/classifier/105/device/1112 create mode 100644 results/classifier/105/device/1114 create mode 100644 results/classifier/105/device/112 create mode 100644 results/classifier/105/device/1124 create mode 100644 results/classifier/105/device/1125 create mode 100644 results/classifier/105/device/1127053 create mode 100644 results/classifier/105/device/1130 create mode 100644 results/classifier/105/device/1132 create mode 100644 results/classifier/105/device/1133 create mode 100644 results/classifier/105/device/1133668 create mode 100644 results/classifier/105/device/1134 create mode 100644 results/classifier/105/device/1135 create mode 100644 results/classifier/105/device/1137 create mode 100644 results/classifier/105/device/114 create mode 100644 results/classifier/105/device/1140 create mode 100644 results/classifier/105/device/1149 create mode 100644 results/classifier/105/device/115 create mode 100644 results/classifier/105/device/1154 create mode 100644 results/classifier/105/device/1155677 create mode 100644 results/classifier/105/device/116 create mode 100644 results/classifier/105/device/1160 create mode 100644 results/classifier/105/device/1163474 create mode 100644 results/classifier/105/device/1164 create mode 100644 results/classifier/105/device/1165 create mode 100644 results/classifier/105/device/1165383 create mode 100644 results/classifier/105/device/1167 create mode 100644 results/classifier/105/device/117 create mode 100644 results/classifier/105/device/1171 create mode 100644 results/classifier/105/device/118 create mode 100644 results/classifier/105/device/1181 create mode 100644 results/classifier/105/device/1181354 create mode 100644 results/classifier/105/device/1186 create mode 100644 results/classifier/105/device/1187 create mode 100644 results/classifier/105/device/1187529 create mode 100644 results/classifier/105/device/1188018 create mode 100644 results/classifier/105/device/119 create mode 100644 results/classifier/105/device/1190 create mode 100644 results/classifier/105/device/1190525 create mode 100644 results/classifier/105/device/1191 create mode 100644 results/classifier/105/device/1193 create mode 100644 results/classifier/105/device/1193564 create mode 100644 results/classifier/105/device/1194954 create mode 100644 results/classifier/105/device/1196 create mode 100644 results/classifier/105/device/1196426 create mode 100644 results/classifier/105/device/1198350 create mode 100644 results/classifier/105/device/120 create mode 100644 results/classifier/105/device/1200212 create mode 100644 results/classifier/105/device/1206 create mode 100644 results/classifier/105/device/1207228 create mode 100644 results/classifier/105/device/1208 create mode 100644 results/classifier/105/device/1209 create mode 100644 results/classifier/105/device/121 create mode 100644 results/classifier/105/device/1210 create mode 100644 results/classifier/105/device/1210212 create mode 100644 results/classifier/105/device/1211 create mode 100644 results/classifier/105/device/1213 create mode 100644 results/classifier/105/device/1214884 create mode 100644 results/classifier/105/device/122 create mode 100644 results/classifier/105/device/1221 create mode 100644 results/classifier/105/device/1224414 create mode 100644 results/classifier/105/device/1225 create mode 100644 results/classifier/105/device/1226 create mode 100644 results/classifier/105/device/1227 create mode 100644 results/classifier/105/device/1228 create mode 100644 results/classifier/105/device/1229 create mode 100644 results/classifier/105/device/123 create mode 100644 results/classifier/105/device/1230 create mode 100644 results/classifier/105/device/1232 create mode 100644 results/classifier/105/device/1233 create mode 100644 results/classifier/105/device/1234 create mode 100644 results/classifier/105/device/1237 create mode 100644 results/classifier/105/device/1237625 create mode 100644 results/classifier/105/device/1241569 create mode 100644 results/classifier/105/device/1242 create mode 100644 results/classifier/105/device/1245 create mode 100644 results/classifier/105/device/1246 create mode 100644 results/classifier/105/device/1248469 create mode 100644 results/classifier/105/device/125 create mode 100644 results/classifier/105/device/1250 create mode 100644 results/classifier/105/device/1252010 create mode 100644 results/classifier/105/device/1253 create mode 100644 results/classifier/105/device/1254443 create mode 100644 results/classifier/105/device/1258 create mode 100644 results/classifier/105/device/1259 create mode 100644 results/classifier/105/device/126 create mode 100644 results/classifier/105/device/1260 create mode 100644 results/classifier/105/device/1261743 create mode 100644 results/classifier/105/device/1262 create mode 100644 results/classifier/105/device/1263 create mode 100644 results/classifier/105/device/1263747 create mode 100644 results/classifier/105/device/1265 create mode 100644 results/classifier/105/device/1267520 create mode 100644 results/classifier/105/device/1268 create mode 100644 results/classifier/105/device/1273 create mode 100644 results/classifier/105/device/1274170 create mode 100644 results/classifier/105/device/1275 create mode 100644 results/classifier/105/device/1279257 create mode 100644 results/classifier/105/device/1280521 create mode 100644 results/classifier/105/device/1281 create mode 100644 results/classifier/105/device/1282 create mode 100644 results/classifier/105/device/1284 create mode 100644 results/classifier/105/device/1284090 create mode 100644 results/classifier/105/device/1287 create mode 100644 results/classifier/105/device/1289 create mode 100644 results/classifier/105/device/1290 create mode 100644 results/classifier/105/device/1292 create mode 100644 results/classifier/105/device/1293 create mode 100644 results/classifier/105/device/1294 create mode 100644 results/classifier/105/device/1296882 create mode 100644 results/classifier/105/device/1297218 create mode 100644 results/classifier/105/device/1298 create mode 100644 results/classifier/105/device/1300 create mode 100644 results/classifier/105/device/1300021 create mode 100644 results/classifier/105/device/1302 create mode 100644 results/classifier/105/device/1303 create mode 100644 results/classifier/105/device/1305 create mode 100644 results/classifier/105/device/1308 create mode 100644 results/classifier/105/device/131 create mode 100644 results/classifier/105/device/1311 create mode 100644 results/classifier/105/device/1312 create mode 100644 results/classifier/105/device/1313816 create mode 100644 results/classifier/105/device/1314293 create mode 100644 results/classifier/105/device/1315 create mode 100644 results/classifier/105/device/1315747 create mode 100644 results/classifier/105/device/1316 create mode 100644 results/classifier/105/device/1318 create mode 100644 results/classifier/105/device/1318746 create mode 100644 results/classifier/105/device/1319493 create mode 100644 results/classifier/105/device/132 create mode 100644 results/classifier/105/device/1320968 create mode 100644 results/classifier/105/device/1326986 create mode 100644 results/classifier/105/device/133 create mode 100644 results/classifier/105/device/1331334 create mode 100644 results/classifier/105/device/1332234 create mode 100644 results/classifier/105/device/1333216 create mode 100644 results/classifier/105/device/1333688 create mode 100644 results/classifier/105/device/1334397 create mode 100644 results/classifier/105/device/1335 create mode 100644 results/classifier/105/device/1336123 create mode 100644 results/classifier/105/device/1336192 create mode 100644 results/classifier/105/device/1336794 create mode 100644 results/classifier/105/device/134 create mode 100644 results/classifier/105/device/1342 create mode 100644 results/classifier/105/device/1346 create mode 100644 results/classifier/105/device/135 create mode 100644 results/classifier/105/device/1352 create mode 100644 results/classifier/105/device/1353346 create mode 100644 results/classifier/105/device/1354 create mode 100644 results/classifier/105/device/1354167 create mode 100644 results/classifier/105/device/1357 create mode 100644 results/classifier/105/device/1357175 create mode 100644 results/classifier/105/device/1357206 create mode 100644 results/classifier/105/device/1357440 create mode 100644 results/classifier/105/device/1358287 create mode 100644 results/classifier/105/device/1358722 create mode 100644 results/classifier/105/device/1359394 create mode 100644 results/classifier/105/device/1363 create mode 100644 results/classifier/105/device/1366 create mode 100644 results/classifier/105/device/1368791 create mode 100644 results/classifier/105/device/1369 create mode 100644 results/classifier/105/device/137 create mode 100644 results/classifier/105/device/1377163 create mode 100644 results/classifier/105/device/1379 create mode 100644 results/classifier/105/device/1382477 create mode 100644 results/classifier/105/device/1384 create mode 100644 results/classifier/105/device/1384892 create mode 100644 results/classifier/105/device/1385934 create mode 100644 results/classifier/105/device/1386478 create mode 100644 results/classifier/105/device/1387 create mode 100644 results/classifier/105/device/1388 create mode 100644 results/classifier/105/device/1390 create mode 100644 results/classifier/105/device/1391 create mode 100644 results/classifier/105/device/1392 create mode 100644 results/classifier/105/device/140 create mode 100644 results/classifier/105/device/1403 create mode 100644 results/classifier/105/device/1406 create mode 100644 results/classifier/105/device/1413 create mode 100644 results/classifier/105/device/142 create mode 100644 results/classifier/105/device/1423528 create mode 100644 results/classifier/105/device/1423668 create mode 100644 results/classifier/105/device/1426 create mode 100644 results/classifier/105/device/1434 create mode 100644 results/classifier/105/device/1437970 create mode 100644 results/classifier/105/device/1438 create mode 100644 results/classifier/105/device/144 create mode 100644 results/classifier/105/device/1443 create mode 100644 results/classifier/105/device/1445633 create mode 100644 results/classifier/105/device/1447 create mode 100644 results/classifier/105/device/14488057 create mode 100644 results/classifier/105/device/145 create mode 100644 results/classifier/105/device/1450 create mode 100644 results/classifier/105/device/1450891 create mode 100644 results/classifier/105/device/1452062 create mode 100644 results/classifier/105/device/1452742 create mode 100644 results/classifier/105/device/1453025 create mode 100644 results/classifier/105/device/1456 create mode 100644 results/classifier/105/device/1459 create mode 100644 results/classifier/105/device/1459622 create mode 100644 results/classifier/105/device/1461918 create mode 100644 results/classifier/105/device/1462949 create mode 100644 results/classifier/105/device/1463463 create mode 100644 results/classifier/105/device/1465 create mode 100644 results/classifier/105/device/1466 create mode 100644 results/classifier/105/device/1469978 create mode 100644 results/classifier/105/device/147 create mode 100644 results/classifier/105/device/1476 create mode 100644 results/classifier/105/device/1476183 create mode 100644 results/classifier/105/device/1476800 create mode 100644 results/classifier/105/device/1477538 create mode 100644 results/classifier/105/device/1478 create mode 100644 results/classifier/105/device/148 create mode 100644 results/classifier/105/device/1480562 create mode 100644 results/classifier/105/device/1481 create mode 100644 results/classifier/105/device/1481654 create mode 100644 results/classifier/105/device/1481990 create mode 100644 results/classifier/105/device/1483 create mode 100644 results/classifier/105/device/1485 create mode 100644 results/classifier/105/device/1485010 create mode 100644 results/classifier/105/device/1486 create mode 100644 results/classifier/105/device/1487 create mode 100644 results/classifier/105/device/1488363 create mode 100644 results/classifier/105/device/149 create mode 100644 results/classifier/105/device/1491 create mode 100644 results/classifier/105/device/1492 create mode 100644 results/classifier/105/device/1497 create mode 100644 results/classifier/105/device/150 create mode 100644 results/classifier/105/device/1501 create mode 100644 results/classifier/105/device/1502 create mode 100644 results/classifier/105/device/1502613 create mode 100644 results/classifier/105/device/1503 create mode 100644 results/classifier/105/device/1504528 create mode 100644 results/classifier/105/device/1505652 create mode 100644 results/classifier/105/device/1506 create mode 100644 results/classifier/105/device/1507 create mode 100644 results/classifier/105/device/1509336 create mode 100644 results/classifier/105/device/1512 create mode 100644 results/classifier/105/device/1513234 create mode 100644 results/classifier/105/device/1515 create mode 100644 results/classifier/105/device/1517 create mode 100644 results/classifier/105/device/1519 create mode 100644 results/classifier/105/device/1521 create mode 100644 results/classifier/105/device/1523 create mode 100644 results/classifier/105/device/1523246 create mode 100644 results/classifier/105/device/1523811 create mode 100644 results/classifier/105/device/1524 create mode 100644 results/classifier/105/device/1528 create mode 100644 results/classifier/105/device/153 create mode 100644 results/classifier/105/device/1530246 create mode 100644 results/classifier/105/device/1531352 create mode 100644 results/classifier/105/device/1533141 create mode 100644 results/classifier/105/device/1533848 create mode 100644 results/classifier/105/device/1535497 create mode 100644 results/classifier/105/device/1538 create mode 100644 results/classifier/105/device/1539 create mode 100644 results/classifier/105/device/1543057 create mode 100644 results/classifier/105/device/1546 create mode 100644 results/classifier/105/device/1547 create mode 100644 results/classifier/105/device/1548471 create mode 100644 results/classifier/105/device/155 create mode 100644 results/classifier/105/device/1550503 create mode 100644 results/classifier/105/device/1553760 create mode 100644 results/classifier/105/device/1555 create mode 100644 results/classifier/105/device/1557033 create mode 100644 results/classifier/105/device/1558 create mode 100644 results/classifier/105/device/1563 create mode 100644 results/classifier/105/device/1563612 create mode 100644 results/classifier/105/device/1563931 create mode 100644 results/classifier/105/device/1564 create mode 100644 results/classifier/105/device/1568107 create mode 100644 results/classifier/105/device/1568356 create mode 100644 results/classifier/105/device/1568589 create mode 100644 results/classifier/105/device/157 create mode 100644 results/classifier/105/device/1572 create mode 100644 results/classifier/105/device/1572959 create mode 100644 results/classifier/105/device/1574 create mode 100644 results/classifier/105/device/1575607 create mode 100644 results/classifier/105/device/1577937 create mode 100644 results/classifier/105/device/1578 create mode 100644 results/classifier/105/device/158 create mode 100644 results/classifier/105/device/1581308 create mode 100644 results/classifier/105/device/1581976 create mode 100644 results/classifier/105/device/1583420 create mode 100644 results/classifier/105/device/1583421 create mode 100644 results/classifier/105/device/1583784 create mode 100644 results/classifier/105/device/1585840 create mode 100644 results/classifier/105/device/1586613 create mode 100644 results/classifier/105/device/1587065 create mode 100644 results/classifier/105/device/1587970 create mode 100644 results/classifier/105/device/1588473 create mode 100644 results/classifier/105/device/1590796 create mode 100644 results/classifier/105/device/1591 create mode 100644 results/classifier/105/device/1592351 create mode 100644 results/classifier/105/device/1592590 create mode 100644 results/classifier/105/device/1594239 create mode 100644 results/classifier/105/device/1594861 create mode 100644 results/classifier/105/device/1596204 create mode 100644 results/classifier/105/device/1596579 create mode 100644 results/classifier/105/device/1597138 create mode 100644 results/classifier/105/device/1601 create mode 100644 results/classifier/105/device/1602 create mode 100644 results/classifier/105/device/1603779 create mode 100644 results/classifier/105/device/1603785 create mode 100644 results/classifier/105/device/161 create mode 100644 results/classifier/105/device/1610 create mode 100644 results/classifier/105/device/1616 create mode 100644 results/classifier/105/device/1616706 create mode 100644 results/classifier/105/device/1618 create mode 100644 results/classifier/105/device/1619438 create mode 100644 results/classifier/105/device/1619896 create mode 100644 results/classifier/105/device/162 create mode 100644 results/classifier/105/device/1620660 create mode 100644 results/classifier/105/device/1622 create mode 100644 results/classifier/105/device/1623998 create mode 100644 results/classifier/105/device/1629618 create mode 100644 results/classifier/105/device/1630527 create mode 100644 results/classifier/105/device/1630723 create mode 100644 results/classifier/105/device/1631 create mode 100644 results/classifier/105/device/1631625 create mode 100644 results/classifier/105/device/1634069 create mode 100644 results/classifier/105/device/1634852 create mode 100644 results/classifier/105/device/1635695 create mode 100644 results/classifier/105/device/1637693 create mode 100644 results/classifier/105/device/1637974 create mode 100644 results/classifier/105/device/1639 create mode 100644 results/classifier/105/device/1639791 create mode 100644 results/classifier/105/device/164 create mode 100644 results/classifier/105/device/1643 create mode 100644 results/classifier/105/device/1643342 create mode 100644 results/classifier/105/device/1645287 create mode 100644 results/classifier/105/device/1647 create mode 100644 results/classifier/105/device/1649040 create mode 100644 results/classifier/105/device/165 create mode 100644 results/classifier/105/device/1651 create mode 100644 results/classifier/105/device/1652459 create mode 100644 results/classifier/105/device/1653577 create mode 100644 results/classifier/105/device/1654137 create mode 100644 results/classifier/105/device/1655 create mode 100644 results/classifier/105/device/1656710 create mode 100644 results/classifier/105/device/1657010 create mode 100644 results/classifier/105/device/1658506 create mode 100644 results/classifier/105/device/1660 create mode 100644 results/classifier/105/device/1660035 create mode 100644 results/classifier/105/device/1661 create mode 100644 results/classifier/105/device/1661758 create mode 100644 results/classifier/105/device/1663 create mode 100644 results/classifier/105/device/1664 create mode 100644 results/classifier/105/device/1665 create mode 100644 results/classifier/105/device/1668041 create mode 100644 results/classifier/105/device/1669 create mode 100644 results/classifier/105/device/167 create mode 100644 results/classifier/105/device/1670 create mode 100644 results/classifier/105/device/1670509 create mode 100644 results/classifier/105/device/1672 create mode 100644 results/classifier/105/device/1673957 create mode 100644 results/classifier/105/device/1675 create mode 100644 results/classifier/105/device/1675332 create mode 100644 results/classifier/105/device/1675333 create mode 100644 results/classifier/105/device/1675549 create mode 100644 results/classifier/105/device/168 create mode 100644 results/classifier/105/device/1681404 create mode 100644 results/classifier/105/device/1682 create mode 100644 results/classifier/105/device/1685526 create mode 100644 results/classifier/105/device/1686364 create mode 100644 results/classifier/105/device/1687270 create mode 100644 results/classifier/105/device/1687309 create mode 100644 results/classifier/105/device/1687578 create mode 100644 results/classifier/105/device/1690 create mode 100644 results/classifier/105/device/1691109 create mode 100644 results/classifier/105/device/1696773 create mode 100644 results/classifier/105/device/1697 create mode 100644 results/classifier/105/device/1698 create mode 100644 results/classifier/105/device/1699628 create mode 100644 results/classifier/105/device/1703147 create mode 100644 results/classifier/105/device/1704186 create mode 100644 results/classifier/105/device/1706825 create mode 100644 results/classifier/105/device/1707587 create mode 100644 results/classifier/105/device/171 create mode 100644 results/classifier/105/device/1710 create mode 100644 results/classifier/105/device/1711 create mode 100644 results/classifier/105/device/1712 create mode 100644 results/classifier/105/device/1712027 create mode 100644 results/classifier/105/device/1712818 create mode 100644 results/classifier/105/device/1713434 create mode 100644 results/classifier/105/device/1714538 create mode 100644 results/classifier/105/device/1714750 create mode 100644 results/classifier/105/device/1718 create mode 100644 results/classifier/105/device/172 create mode 100644 results/classifier/105/device/1720969 create mode 100644 results/classifier/105/device/1723984 create mode 100644 results/classifier/105/device/1724 create mode 100644 results/classifier/105/device/1726733 create mode 100644 results/classifier/105/device/173 create mode 100644 results/classifier/105/device/1730099 create mode 100644 results/classifier/105/device/1731347 create mode 100644 results/classifier/105/device/1732981 create mode 100644 results/classifier/105/device/1733 create mode 100644 results/classifier/105/device/1733720 create mode 100644 results/classifier/105/device/1734792 create mode 100644 results/classifier/105/device/1735384 create mode 100644 results/classifier/105/device/1735576 create mode 100644 results/classifier/105/device/1735653 create mode 100644 results/classifier/105/device/1737194 create mode 100644 results/classifier/105/device/1737882 create mode 100644 results/classifier/105/device/1737883 create mode 100644 results/classifier/105/device/1738771 create mode 100644 results/classifier/105/device/174 create mode 100644 results/classifier/105/device/1741718 create mode 100644 results/classifier/105/device/1742 create mode 100644 results/classifier/105/device/1743 create mode 100644 results/classifier/105/device/1744654 create mode 100644 results/classifier/105/device/1745895 create mode 100644 results/classifier/105/device/1746 create mode 100644 results/classifier/105/device/1747 create mode 100644 results/classifier/105/device/1747393 create mode 100644 results/classifier/105/device/1748 create mode 100644 results/classifier/105/device/1749 create mode 100644 results/classifier/105/device/175 create mode 100644 results/classifier/105/device/1754 create mode 100644 results/classifier/105/device/1754597 create mode 100644 results/classifier/105/device/1757323 create mode 100644 results/classifier/105/device/1758091 create mode 100644 results/classifier/105/device/1759 create mode 100644 results/classifier/105/device/1759264 create mode 100644 results/classifier/105/device/1759337 create mode 100644 results/classifier/105/device/1759492 create mode 100644 results/classifier/105/device/176 create mode 100644 results/classifier/105/device/1760176 create mode 100644 results/classifier/105/device/1761 create mode 100644 results/classifier/105/device/1761535 create mode 100644 results/classifier/105/device/1764 create mode 100644 results/classifier/105/device/1766 create mode 100644 results/classifier/105/device/1767 create mode 100644 results/classifier/105/device/1769 create mode 100644 results/classifier/105/device/1769067 create mode 100644 results/classifier/105/device/1772 create mode 100644 results/classifier/105/device/1773 create mode 100644 results/classifier/105/device/1774830 create mode 100644 results/classifier/105/device/1776 create mode 100644 results/classifier/105/device/1776224 create mode 100644 results/classifier/105/device/1777226 create mode 100644 results/classifier/105/device/1777232 create mode 100644 results/classifier/105/device/1777235 create mode 100644 results/classifier/105/device/178 create mode 100644 results/classifier/105/device/1780815 create mode 100644 results/classifier/105/device/1784 create mode 100644 results/classifier/105/device/1784919 create mode 100644 results/classifier/105/device/1785 create mode 100644 results/classifier/105/device/1785902 create mode 100644 results/classifier/105/device/1785972 create mode 100644 results/classifier/105/device/1789 create mode 100644 results/classifier/105/device/179 create mode 100644 results/classifier/105/device/1790268 create mode 100644 results/classifier/105/device/1791763 create mode 100644 results/classifier/105/device/1793 create mode 100644 results/classifier/105/device/1793297 create mode 100644 results/classifier/105/device/1793539 create mode 100644 results/classifier/105/device/1793859 create mode 100644 results/classifier/105/device/1794 create mode 100644 results/classifier/105/device/1794187 create mode 100644 results/classifier/105/device/1794939 create mode 100644 results/classifier/105/device/1795 create mode 100644 results/classifier/105/device/1797 create mode 100644 results/classifier/105/device/1798 create mode 100644 results/classifier/105/device/1799766 create mode 100644 results/classifier/105/device/180 create mode 100644 results/classifier/105/device/1800088 create mode 100644 results/classifier/105/device/1800156 create mode 100644 results/classifier/105/device/1802 create mode 100644 results/classifier/105/device/1802150 create mode 100644 results/classifier/105/device/1804 create mode 100644 results/classifier/105/device/1804678 create mode 100644 results/classifier/105/device/1805697 create mode 100644 results/classifier/105/device/1806243 create mode 100644 results/classifier/105/device/1807073 create mode 100644 results/classifier/105/device/1808565 create mode 100644 results/classifier/105/device/1809 create mode 100644 results/classifier/105/device/181 create mode 100644 results/classifier/105/device/1810343 create mode 100644 results/classifier/105/device/1810590 create mode 100644 results/classifier/105/device/1810956 create mode 100644 results/classifier/105/device/1811499 create mode 100644 results/classifier/105/device/1811720 create mode 100644 results/classifier/105/device/1811758 create mode 100644 results/classifier/105/device/1811888 create mode 100644 results/classifier/105/device/1811916 create mode 100644 results/classifier/105/device/1812451 create mode 100644 results/classifier/105/device/1813045 create mode 100644 results/classifier/105/device/1813307 create mode 100644 results/classifier/105/device/1815263 create mode 100644 results/classifier/105/device/1815445 create mode 100644 results/classifier/105/device/1815889 create mode 100644 results/classifier/105/device/1816189 create mode 100644 results/classifier/105/device/1816805 create mode 100644 results/classifier/105/device/1817 create mode 100644 results/classifier/105/device/1818398 create mode 100644 results/classifier/105/device/1819 create mode 100644 results/classifier/105/device/1819343 create mode 100644 results/classifier/105/device/1819649 create mode 100644 results/classifier/105/device/182 create mode 100644 results/classifier/105/device/1820 create mode 100644 results/classifier/105/device/1821131 create mode 100644 results/classifier/105/device/1823831 create mode 100644 results/classifier/105/device/1824 create mode 100644 results/classifier/105/device/1824616 create mode 100644 results/classifier/105/device/1824744 create mode 100644 results/classifier/105/device/1824853 create mode 100644 results/classifier/105/device/1826200 create mode 100644 results/classifier/105/device/1826827 create mode 100644 results/classifier/105/device/1828 create mode 100644 results/classifier/105/device/183 create mode 100644 results/classifier/105/device/1831354 create mode 100644 results/classifier/105/device/1831362 create mode 100644 results/classifier/105/device/1831477 create mode 100644 results/classifier/105/device/1832 create mode 100644 results/classifier/105/device/1832916 create mode 100644 results/classifier/105/device/1835827 create mode 100644 results/classifier/105/device/1835839 create mode 100644 results/classifier/105/device/1836451 create mode 100644 results/classifier/105/device/1837049 create mode 100644 results/classifier/105/device/1837094 create mode 100644 results/classifier/105/device/1837851 create mode 100644 results/classifier/105/device/1838 create mode 100644 results/classifier/105/device/1838312 create mode 100644 results/classifier/105/device/1838465 create mode 100644 results/classifier/105/device/1839 create mode 100644 results/classifier/105/device/184 create mode 100644 results/classifier/105/device/1840865 create mode 100644 results/classifier/105/device/1842787 create mode 100644 results/classifier/105/device/1843 create mode 100644 results/classifier/105/device/1843205 create mode 100644 results/classifier/105/device/1843711 create mode 100644 results/classifier/105/device/1843795 create mode 100644 results/classifier/105/device/1844644 create mode 100644 results/classifier/105/device/1844814 create mode 100644 results/classifier/105/device/1844817 create mode 100644 results/classifier/105/device/1846816 create mode 100644 results/classifier/105/device/1847 create mode 100644 results/classifier/105/device/1849 create mode 100644 results/classifier/105/device/1851 create mode 100644 results/classifier/105/device/1853 create mode 100644 results/classifier/105/device/1853781 create mode 100644 results/classifier/105/device/1853898 create mode 100644 results/classifier/105/device/1854 create mode 100644 results/classifier/105/device/1854204 create mode 100644 results/classifier/105/device/1854878 create mode 100644 results/classifier/105/device/1855002 create mode 100644 results/classifier/105/device/1857 create mode 100644 results/classifier/105/device/1857269 create mode 100644 results/classifier/105/device/1858461 create mode 100644 results/classifier/105/device/1858488 create mode 100644 results/classifier/105/device/1859 create mode 100644 results/classifier/105/device/1860053 create mode 100644 results/classifier/105/device/1860914 create mode 100644 results/classifier/105/device/1861341 create mode 100644 results/classifier/105/device/1861458 create mode 100644 results/classifier/105/device/1861468 create mode 100644 results/classifier/105/device/1861551 create mode 100644 results/classifier/105/device/1861605 create mode 100644 results/classifier/105/device/1863441 create mode 100644 results/classifier/105/device/1864 create mode 100644 results/classifier/105/device/1864814 create mode 100644 results/classifier/105/device/1865048 create mode 100644 results/classifier/105/device/1865188 create mode 100644 results/classifier/105/device/1865350 create mode 100644 results/classifier/105/device/1866 create mode 100644 results/classifier/105/device/1866577 create mode 100644 results/classifier/105/device/1866792 create mode 100644 results/classifier/105/device/1869241 create mode 100644 results/classifier/105/device/1869426 create mode 100644 results/classifier/105/device/1869497 create mode 100644 results/classifier/105/device/187 create mode 100644 results/classifier/105/device/1870039 create mode 100644 results/classifier/105/device/1871 create mode 100644 results/classifier/105/device/1871005 create mode 100644 results/classifier/105/device/1872 create mode 100644 results/classifier/105/device/1872113 create mode 100644 results/classifier/105/device/1873335 create mode 100644 results/classifier/105/device/1873341 create mode 100644 results/classifier/105/device/1874486 create mode 100644 results/classifier/105/device/1875080 create mode 100644 results/classifier/105/device/1876373 create mode 100644 results/classifier/105/device/1876678 create mode 100644 results/classifier/105/device/1877 create mode 100644 results/classifier/105/device/1877716 create mode 100644 results/classifier/105/device/1878627 create mode 100644 results/classifier/105/device/1878628 create mode 100644 results/classifier/105/device/188 create mode 100644 results/classifier/105/device/1880 create mode 100644 results/classifier/105/device/1880822 create mode 100644 results/classifier/105/device/1881249 create mode 100644 results/classifier/105/device/1881645 create mode 100644 results/classifier/105/device/1882241 create mode 100644 results/classifier/105/device/1882350 create mode 100644 results/classifier/105/device/1882787 create mode 100644 results/classifier/105/device/1882851 create mode 100644 results/classifier/105/device/1884302 create mode 100644 results/classifier/105/device/1884982 create mode 100644 results/classifier/105/device/1885889 create mode 100644 results/classifier/105/device/1886076 create mode 100644 results/classifier/105/device/1886210 create mode 100644 results/classifier/105/device/1887 create mode 100644 results/classifier/105/device/1887820 create mode 100644 results/classifier/105/device/1888 create mode 100644 results/classifier/105/device/1888728 create mode 100644 results/classifier/105/device/1888971 create mode 100644 results/classifier/105/device/189 create mode 100644 results/classifier/105/device/1890157 create mode 100644 results/classifier/105/device/1890160 create mode 100644 results/classifier/105/device/1890775 create mode 100644 results/classifier/105/device/1891830 create mode 100644 results/classifier/105/device/1893040 create mode 100644 results/classifier/105/device/1893634 create mode 100644 results/classifier/105/device/1894818 create mode 100644 results/classifier/105/device/1895363 create mode 100644 results/classifier/105/device/1895895 create mode 100644 results/classifier/105/device/1896096 create mode 100644 results/classifier/105/device/1897680 create mode 100644 results/classifier/105/device/1898 create mode 100644 results/classifier/105/device/1900122 create mode 100644 results/classifier/105/device/1901068 create mode 100644 results/classifier/105/device/1901532 create mode 100644 results/classifier/105/device/1902262 create mode 100644 results/classifier/105/device/1902306 create mode 100644 results/classifier/105/device/1904490 create mode 100644 results/classifier/105/device/1906463 create mode 100644 results/classifier/105/device/1906608 create mode 100644 results/classifier/105/device/1906693 create mode 100644 results/classifier/105/device/1907042 create mode 100644 results/classifier/105/device/1909261 create mode 100644 results/classifier/105/device/1909418 create mode 100644 results/classifier/105/device/1911188 create mode 100644 results/classifier/105/device/1911797 create mode 100644 results/classifier/105/device/1912 create mode 100644 results/classifier/105/device/1912857 create mode 100644 results/classifier/105/device/1913344 create mode 100644 results/classifier/105/device/1913969 create mode 100644 results/classifier/105/device/1914 create mode 100644 results/classifier/105/device/1914667 create mode 100644 results/classifier/105/device/1916 create mode 100644 results/classifier/105/device/1917184 create mode 100644 results/classifier/105/device/192 create mode 100644 results/classifier/105/device/1920767 create mode 100644 results/classifier/105/device/1920784 create mode 100644 results/classifier/105/device/1921092 create mode 100644 results/classifier/105/device/1922102 create mode 100644 results/classifier/105/device/1922252 create mode 100644 results/classifier/105/device/1926111 create mode 100644 results/classifier/105/device/1926995 create mode 100644 results/classifier/105/device/1927408 create mode 100644 results/classifier/105/device/1927530 create mode 100644 results/classifier/105/device/1929 create mode 100644 results/classifier/105/device/193 create mode 100644 results/classifier/105/device/1932 create mode 100644 results/classifier/105/device/1933 create mode 100644 results/classifier/105/device/1936977 create mode 100644 results/classifier/105/device/1939179 create mode 100644 results/classifier/105/device/1945 create mode 100644 results/classifier/105/device/1945540 create mode 100644 results/classifier/105/device/1947933 create mode 100644 results/classifier/105/device/1952448 create mode 100644 results/classifier/105/device/1959 create mode 100644 results/classifier/105/device/196 create mode 100644 results/classifier/105/device/1960 create mode 100644 results/classifier/105/device/1961 create mode 100644 results/classifier/105/device/1964 create mode 100644 results/classifier/105/device/1965 create mode 100644 results/classifier/105/device/1968 create mode 100644 results/classifier/105/device/1969 create mode 100644 results/classifier/105/device/197 create mode 100644 results/classifier/105/device/1973 create mode 100644 results/classifier/105/device/1974 create mode 100644 results/classifier/105/device/1979 create mode 100644 results/classifier/105/device/1980 create mode 100644 results/classifier/105/device/1984 create mode 100644 results/classifier/105/device/1985 create mode 100644 results/classifier/105/device/200 create mode 100644 results/classifier/105/device/2000 create mode 100644 results/classifier/105/device/2002 create mode 100644 results/classifier/105/device/201 create mode 100644 results/classifier/105/device/2011 create mode 100644 results/classifier/105/device/2014 create mode 100644 results/classifier/105/device/2018 create mode 100644 results/classifier/105/device/202 create mode 100644 results/classifier/105/device/2020 create mode 100644 results/classifier/105/device/2021 create mode 100644 results/classifier/105/device/2025586 create mode 100644 results/classifier/105/device/2026 create mode 100644 results/classifier/105/device/2027 create mode 100644 results/classifier/105/device/2028 create mode 100644 results/classifier/105/device/2032 create mode 100644 results/classifier/105/device/2033 create mode 100644 results/classifier/105/device/204 create mode 100644 results/classifier/105/device/2040 create mode 100644 results/classifier/105/device/2044 create mode 100644 results/classifier/105/device/2045 create mode 100644 results/classifier/105/device/2047 create mode 100644 results/classifier/105/device/2048 create mode 100644 results/classifier/105/device/2049 create mode 100644 results/classifier/105/device/205 create mode 100644 results/classifier/105/device/2051 create mode 100644 results/classifier/105/device/2055 create mode 100644 results/classifier/105/device/2065 create mode 100644 results/classifier/105/device/2066 create mode 100644 results/classifier/105/device/2067 create mode 100644 results/classifier/105/device/206818 create mode 100644 results/classifier/105/device/207 create mode 100644 results/classifier/105/device/2072 create mode 100644 results/classifier/105/device/2072564 create mode 100644 results/classifier/105/device/2079 create mode 100644 results/classifier/105/device/208 create mode 100644 results/classifier/105/device/2080 create mode 100644 results/classifier/105/device/2081 create mode 100644 results/classifier/105/device/2086 create mode 100644 results/classifier/105/device/2087 create mode 100644 results/classifier/105/device/2093 create mode 100644 results/classifier/105/device/2095 create mode 100644 results/classifier/105/device/2096 create mode 100644 results/classifier/105/device/2097 create mode 100644 results/classifier/105/device/2098 create mode 100644 results/classifier/105/device/2107 create mode 100644 results/classifier/105/device/2108 create mode 100644 results/classifier/105/device/211 create mode 100644 results/classifier/105/device/2112 create mode 100644 results/classifier/105/device/2119 create mode 100644 results/classifier/105/device/212 create mode 100644 results/classifier/105/device/2122 create mode 100644 results/classifier/105/device/2124 create mode 100644 results/classifier/105/device/2125 create mode 100644 results/classifier/105/device/2126 create mode 100644 results/classifier/105/device/2128 create mode 100644 results/classifier/105/device/2129 create mode 100644 results/classifier/105/device/213 create mode 100644 results/classifier/105/device/2132 create mode 100644 results/classifier/105/device/2135 create mode 100644 results/classifier/105/device/2137 create mode 100644 results/classifier/105/device/2148 create mode 100644 results/classifier/105/device/215 create mode 100644 results/classifier/105/device/2152 create mode 100644 results/classifier/105/device/2153 create mode 100644 results/classifier/105/device/2158 create mode 100644 results/classifier/105/device/2161 create mode 100644 results/classifier/105/device/2162 create mode 100644 results/classifier/105/device/2164 create mode 100644 results/classifier/105/device/2166 create mode 100644 results/classifier/105/device/217 create mode 100644 results/classifier/105/device/2172 create mode 100644 results/classifier/105/device/2173 create mode 100644 results/classifier/105/device/2174 create mode 100644 results/classifier/105/device/2176 create mode 100644 results/classifier/105/device/2179 create mode 100644 results/classifier/105/device/2181 create mode 100644 results/classifier/105/device/2188 create mode 100644 results/classifier/105/device/219 create mode 100644 results/classifier/105/device/2192 create mode 100644 results/classifier/105/device/2195 create mode 100644 results/classifier/105/device/2196 create mode 100644 results/classifier/105/device/220 create mode 100644 results/classifier/105/device/2201 create mode 100644 results/classifier/105/device/2204 create mode 100644 results/classifier/105/device/2205 create mode 100644 results/classifier/105/device/221 create mode 100644 results/classifier/105/device/2211 create mode 100644 results/classifier/105/device/2213 create mode 100644 results/classifier/105/device/2215 create mode 100644 results/classifier/105/device/2218 create mode 100644 results/classifier/105/device/2219 create mode 100644 results/classifier/105/device/2221 create mode 100644 results/classifier/105/device/2222 create mode 100644 results/classifier/105/device/2229 create mode 100644 results/classifier/105/device/223 create mode 100644 results/classifier/105/device/2239 create mode 100644 results/classifier/105/device/224 create mode 100644 results/classifier/105/device/2241 create mode 100644 results/classifier/105/device/2243 create mode 100644 results/classifier/105/device/2245 create mode 100644 results/classifier/105/device/2246 create mode 100644 results/classifier/105/device/2247 create mode 100644 results/classifier/105/device/2249 create mode 100644 results/classifier/105/device/225 create mode 100644 results/classifier/105/device/226 create mode 100644 results/classifier/105/device/2266 create mode 100644 results/classifier/105/device/2268 create mode 100644 results/classifier/105/device/2270 create mode 100644 results/classifier/105/device/2272 create mode 100644 results/classifier/105/device/2275 create mode 100644 results/classifier/105/device/2277 create mode 100644 results/classifier/105/device/2278 create mode 100644 results/classifier/105/device/2282 create mode 100644 results/classifier/105/device/2284 create mode 100644 results/classifier/105/device/2285 create mode 100644 results/classifier/105/device/2286 create mode 100644 results/classifier/105/device/2289 create mode 100644 results/classifier/105/device/2294 create mode 100644 results/classifier/105/device/2295 create mode 100644 results/classifier/105/device/2301 create mode 100644 results/classifier/105/device/2306 create mode 100644 results/classifier/105/device/2307 create mode 100644 results/classifier/105/device/2309 create mode 100644 results/classifier/105/device/231 create mode 100644 results/classifier/105/device/2310 create mode 100644 results/classifier/105/device/2312 create mode 100644 results/classifier/105/device/2314 create mode 100644 results/classifier/105/device/2320 create mode 100644 results/classifier/105/device/2322 create mode 100644 results/classifier/105/device/2327 create mode 100644 results/classifier/105/device/2329 create mode 100644 results/classifier/105/device/233 create mode 100644 results/classifier/105/device/2331 create mode 100644 results/classifier/105/device/2336 create mode 100644 results/classifier/105/device/2338 create mode 100644 results/classifier/105/device/2339 create mode 100644 results/classifier/105/device/2347 create mode 100644 results/classifier/105/device/2348 create mode 100644 results/classifier/105/device/2350 create mode 100644 results/classifier/105/device/2351 create mode 100644 results/classifier/105/device/2354 create mode 100644 results/classifier/105/device/2356 create mode 100644 results/classifier/105/device/2357 create mode 100644 results/classifier/105/device/2359 create mode 100644 results/classifier/105/device/236 create mode 100644 results/classifier/105/device/2362 create mode 100644 results/classifier/105/device/2368 create mode 100644 results/classifier/105/device/237 create mode 100644 results/classifier/105/device/2381 create mode 100644 results/classifier/105/device/2383 create mode 100644 results/classifier/105/device/2391 create mode 100644 results/classifier/105/device/2396 create mode 100644 results/classifier/105/device/240 create mode 100644 results/classifier/105/device/2406 create mode 100644 results/classifier/105/device/2416 create mode 100644 results/classifier/105/device/2417 create mode 100644 results/classifier/105/device/24190340 create mode 100644 results/classifier/105/device/242 create mode 100644 results/classifier/105/device/2426 create mode 100644 results/classifier/105/device/243 create mode 100644 results/classifier/105/device/2438 create mode 100644 results/classifier/105/device/2443 create mode 100644 results/classifier/105/device/245 create mode 100644 results/classifier/105/device/2454 create mode 100644 results/classifier/105/device/2456 create mode 100644 results/classifier/105/device/2458 create mode 100644 results/classifier/105/device/2464 create mode 100644 results/classifier/105/device/2465 create mode 100644 results/classifier/105/device/2468 create mode 100644 results/classifier/105/device/2469 create mode 100644 results/classifier/105/device/247 create mode 100644 results/classifier/105/device/2471 create mode 100644 results/classifier/105/device/2472 create mode 100644 results/classifier/105/device/2473 create mode 100644 results/classifier/105/device/2475 create mode 100644 results/classifier/105/device/2477 create mode 100644 results/classifier/105/device/2479 create mode 100644 results/classifier/105/device/248 create mode 100644 results/classifier/105/device/2480 create mode 100644 results/classifier/105/device/24930826 create mode 100644 results/classifier/105/device/250 create mode 100644 results/classifier/105/device/2503 create mode 100644 results/classifier/105/device/2505 create mode 100644 results/classifier/105/device/2507 create mode 100644 results/classifier/105/device/2508 create mode 100644 results/classifier/105/device/251 create mode 100644 results/classifier/105/device/2516 create mode 100644 results/classifier/105/device/2517 create mode 100644 results/classifier/105/device/2521 create mode 100644 results/classifier/105/device/2527 create mode 100644 results/classifier/105/device/253 create mode 100644 results/classifier/105/device/2530 create mode 100644 results/classifier/105/device/2533 create mode 100644 results/classifier/105/device/2535 create mode 100644 results/classifier/105/device/2536 create mode 100644 results/classifier/105/device/2539 create mode 100644 results/classifier/105/device/254 create mode 100644 results/classifier/105/device/2541 create mode 100644 results/classifier/105/device/2542 create mode 100644 results/classifier/105/device/2544 create mode 100644 results/classifier/105/device/2545 create mode 100644 results/classifier/105/device/2546 create mode 100644 results/classifier/105/device/2547 create mode 100644 results/classifier/105/device/2549 create mode 100644 results/classifier/105/device/256 create mode 100644 results/classifier/105/device/2564 create mode 100644 results/classifier/105/device/2568 create mode 100644 results/classifier/105/device/257 create mode 100644 results/classifier/105/device/2572 create mode 100644 results/classifier/105/device/2575 create mode 100644 results/classifier/105/device/2579 create mode 100644 results/classifier/105/device/2586 create mode 100644 results/classifier/105/device/2587 create mode 100644 results/classifier/105/device/2588 create mode 100644 results/classifier/105/device/259 create mode 100644 results/classifier/105/device/2590 create mode 100644 results/classifier/105/device/2596 create mode 100644 results/classifier/105/device/2597 create mode 100644 results/classifier/105/device/260 create mode 100644 results/classifier/105/device/2605 create mode 100644 results/classifier/105/device/26095107 create mode 100644 results/classifier/105/device/261 create mode 100644 results/classifier/105/device/2613 create mode 100644 results/classifier/105/device/2615 create mode 100644 results/classifier/105/device/262 create mode 100644 results/classifier/105/device/2626 create mode 100644 results/classifier/105/device/2627 create mode 100644 results/classifier/105/device/2629 create mode 100644 results/classifier/105/device/2636 create mode 100644 results/classifier/105/device/2640 create mode 100644 results/classifier/105/device/265 create mode 100644 results/classifier/105/device/2651 create mode 100644 results/classifier/105/device/2652 create mode 100644 results/classifier/105/device/2653 create mode 100644 results/classifier/105/device/2654 create mode 100644 results/classifier/105/device/2659 create mode 100644 results/classifier/105/device/2660 create mode 100644 results/classifier/105/device/2661 create mode 100644 results/classifier/105/device/2663 create mode 100644 results/classifier/105/device/2664 create mode 100644 results/classifier/105/device/2665 create mode 100644 results/classifier/105/device/2666 create mode 100644 results/classifier/105/device/2679 create mode 100644 results/classifier/105/device/2681 create mode 100644 results/classifier/105/device/2682 create mode 100644 results/classifier/105/device/2689 create mode 100644 results/classifier/105/device/269 create mode 100644 results/classifier/105/device/2693 create mode 100644 results/classifier/105/device/2695 create mode 100644 results/classifier/105/device/2697 create mode 100644 results/classifier/105/device/2698 create mode 100644 results/classifier/105/device/270 create mode 100644 results/classifier/105/device/2700 create mode 100644 results/classifier/105/device/2701 create mode 100644 results/classifier/105/device/2703 create mode 100644 results/classifier/105/device/2707 create mode 100644 results/classifier/105/device/2708 create mode 100644 results/classifier/105/device/271 create mode 100644 results/classifier/105/device/2711 create mode 100644 results/classifier/105/device/2713 create mode 100644 results/classifier/105/device/2714 create mode 100644 results/classifier/105/device/2715 create mode 100644 results/classifier/105/device/2716 create mode 100644 results/classifier/105/device/272 create mode 100644 results/classifier/105/device/2721 create mode 100644 results/classifier/105/device/2724 create mode 100644 results/classifier/105/device/2725 create mode 100644 results/classifier/105/device/2726 create mode 100644 results/classifier/105/device/2733 create mode 100644 results/classifier/105/device/2734 create mode 100644 results/classifier/105/device/2735 create mode 100644 results/classifier/105/device/2737 create mode 100644 results/classifier/105/device/2741 create mode 100644 results/classifier/105/device/2743 create mode 100644 results/classifier/105/device/2751 create mode 100644 results/classifier/105/device/2752 create mode 100644 results/classifier/105/device/2759 create mode 100644 results/classifier/105/device/2762 create mode 100644 results/classifier/105/device/2763 create mode 100644 results/classifier/105/device/2765 create mode 100644 results/classifier/105/device/2769 create mode 100644 results/classifier/105/device/2777 create mode 100644 results/classifier/105/device/278 create mode 100644 results/classifier/105/device/2781 create mode 100644 results/classifier/105/device/279 create mode 100644 results/classifier/105/device/2794 create mode 100644 results/classifier/105/device/2796 create mode 100644 results/classifier/105/device/2797 create mode 100644 results/classifier/105/device/280 create mode 100644 results/classifier/105/device/2801 create mode 100644 results/classifier/105/device/2803 create mode 100644 results/classifier/105/device/2804 create mode 100644 results/classifier/105/device/2805 create mode 100644 results/classifier/105/device/2808 create mode 100644 results/classifier/105/device/281 create mode 100644 results/classifier/105/device/2812 create mode 100644 results/classifier/105/device/2813 create mode 100644 results/classifier/105/device/2815 create mode 100644 results/classifier/105/device/2824 create mode 100644 results/classifier/105/device/2825 create mode 100644 results/classifier/105/device/283 create mode 100644 results/classifier/105/device/2830 create mode 100644 results/classifier/105/device/2831 create mode 100644 results/classifier/105/device/2838 create mode 100644 results/classifier/105/device/2841 create mode 100644 results/classifier/105/device/2842 create mode 100644 results/classifier/105/device/2845 create mode 100644 results/classifier/105/device/2846 create mode 100644 results/classifier/105/device/2847 create mode 100644 results/classifier/105/device/285 create mode 100644 results/classifier/105/device/2850 create mode 100644 results/classifier/105/device/2858 create mode 100644 results/classifier/105/device/2859 create mode 100644 results/classifier/105/device/28596630 create mode 100644 results/classifier/105/device/286 create mode 100644 results/classifier/105/device/2869 create mode 100644 results/classifier/105/device/287 create mode 100644 results/classifier/105/device/2870 create mode 100644 results/classifier/105/device/2873 create mode 100644 results/classifier/105/device/2877 create mode 100644 results/classifier/105/device/2878 create mode 100644 results/classifier/105/device/2880 create mode 100644 results/classifier/105/device/2881 create mode 100644 results/classifier/105/device/2885 create mode 100644 results/classifier/105/device/2886 create mode 100644 results/classifier/105/device/2887 create mode 100644 results/classifier/105/device/2888 create mode 100644 results/classifier/105/device/289 create mode 100644 results/classifier/105/device/2890 create mode 100644 results/classifier/105/device/2892 create mode 100644 results/classifier/105/device/2893 create mode 100644 results/classifier/105/device/2894 create mode 100644 results/classifier/105/device/2896 create mode 100644 results/classifier/105/device/290 create mode 100644 results/classifier/105/device/2902 create mode 100644 results/classifier/105/device/2904 create mode 100644 results/classifier/105/device/2905 create mode 100644 results/classifier/105/device/291 create mode 100644 results/classifier/105/device/2910 create mode 100644 results/classifier/105/device/2912 create mode 100644 results/classifier/105/device/2913 create mode 100644 results/classifier/105/device/2918 create mode 100644 results/classifier/105/device/292 create mode 100644 results/classifier/105/device/2922 create mode 100644 results/classifier/105/device/2923 create mode 100644 results/classifier/105/device/2924 create mode 100644 results/classifier/105/device/2929 create mode 100644 results/classifier/105/device/293 create mode 100644 results/classifier/105/device/2930 create mode 100644 results/classifier/105/device/2937 create mode 100644 results/classifier/105/device/2939 create mode 100644 results/classifier/105/device/294 create mode 100644 results/classifier/105/device/2940 create mode 100644 results/classifier/105/device/2941 create mode 100644 results/classifier/105/device/2955 create mode 100644 results/classifier/105/device/296 create mode 100644 results/classifier/105/device/2963 create mode 100644 results/classifier/105/device/2964 create mode 100644 results/classifier/105/device/2968 create mode 100644 results/classifier/105/device/2976 create mode 100644 results/classifier/105/device/298 create mode 100644 results/classifier/105/device/2983 create mode 100644 results/classifier/105/device/2985 create mode 100644 results/classifier/105/device/2986 create mode 100644 results/classifier/105/device/302 create mode 100644 results/classifier/105/device/303 create mode 100644 results/classifier/105/device/304 create mode 100644 results/classifier/105/device/305 create mode 100644 results/classifier/105/device/307 create mode 100644 results/classifier/105/device/310 create mode 100644 results/classifier/105/device/311 create mode 100644 results/classifier/105/device/313 create mode 100644 results/classifier/105/device/316 create mode 100644 results/classifier/105/device/317 create mode 100644 results/classifier/105/device/318 create mode 100644 results/classifier/105/device/319 create mode 100644 results/classifier/105/device/320 create mode 100644 results/classifier/105/device/321 create mode 100644 results/classifier/105/device/322 create mode 100644 results/classifier/105/device/324 create mode 100644 results/classifier/105/device/325 create mode 100644 results/classifier/105/device/326 create mode 100644 results/classifier/105/device/328 create mode 100644 results/classifier/105/device/329 create mode 100644 results/classifier/105/device/330 create mode 100644 results/classifier/105/device/331 create mode 100644 results/classifier/105/device/332 create mode 100644 results/classifier/105/device/334 create mode 100644 results/classifier/105/device/337 create mode 100644 results/classifier/105/device/338 create mode 100644 results/classifier/105/device/340 create mode 100644 results/classifier/105/device/341 create mode 100644 results/classifier/105/device/344 create mode 100644 results/classifier/105/device/346 create mode 100644 results/classifier/105/device/349 create mode 100644 results/classifier/105/device/350 create mode 100644 results/classifier/105/device/353 create mode 100644 results/classifier/105/device/354 create mode 100644 results/classifier/105/device/355410 create mode 100644 results/classifier/105/device/357 create mode 100644 results/classifier/105/device/358 create mode 100644 results/classifier/105/device/362 create mode 100644 results/classifier/105/device/363 create mode 100644 results/classifier/105/device/365 create mode 100644 results/classifier/105/device/36568044 create mode 100644 results/classifier/105/device/367 create mode 100644 results/classifier/105/device/368 create mode 100644 results/classifier/105/device/375 create mode 100644 results/classifier/105/device/380 create mode 100644 results/classifier/105/device/383 create mode 100644 results/classifier/105/device/384 create mode 100644 results/classifier/105/device/385 create mode 100644 results/classifier/105/device/386 create mode 100644 results/classifier/105/device/387 create mode 100644 results/classifier/105/device/389 create mode 100644 results/classifier/105/device/391 create mode 100644 results/classifier/105/device/393 create mode 100644 results/classifier/105/device/394 create mode 100644 results/classifier/105/device/395 create mode 100644 results/classifier/105/device/397 create mode 100644 results/classifier/105/device/398 create mode 100644 results/classifier/105/device/399 create mode 100644 results/classifier/105/device/402 create mode 100644 results/classifier/105/device/403 create mode 100644 results/classifier/105/device/404 create mode 100644 results/classifier/105/device/405 create mode 100644 results/classifier/105/device/406 create mode 100644 results/classifier/105/device/407 create mode 100644 results/classifier/105/device/408 create mode 100644 results/classifier/105/device/409 create mode 100644 results/classifier/105/device/410 create mode 100644 results/classifier/105/device/411 create mode 100644 results/classifier/105/device/413 create mode 100644 results/classifier/105/device/414 create mode 100644 results/classifier/105/device/415 create mode 100644 results/classifier/105/device/416 create mode 100644 results/classifier/105/device/418 create mode 100644 results/classifier/105/device/419 create mode 100644 results/classifier/105/device/420 create mode 100644 results/classifier/105/device/422 create mode 100644 results/classifier/105/device/42226390 create mode 100644 results/classifier/105/device/423 create mode 100644 results/classifier/105/device/424 create mode 100644 results/classifier/105/device/425 create mode 100644 results/classifier/105/device/429 create mode 100644 results/classifier/105/device/430 create mode 100644 results/classifier/105/device/431 create mode 100644 results/classifier/105/device/432 create mode 100644 results/classifier/105/device/433 create mode 100644 results/classifier/105/device/434 create mode 100644 results/classifier/105/device/437 create mode 100644 results/classifier/105/device/438 create mode 100644 results/classifier/105/device/44 create mode 100644 results/classifier/105/device/441 create mode 100644 results/classifier/105/device/443 create mode 100644 results/classifier/105/device/444 create mode 100644 results/classifier/105/device/445 create mode 100644 results/classifier/105/device/446 create mode 100644 results/classifier/105/device/448 create mode 100644 results/classifier/105/device/449 create mode 100644 results/classifier/105/device/45 create mode 100644 results/classifier/105/device/451 create mode 100644 results/classifier/105/device/452 create mode 100644 results/classifier/105/device/453 create mode 100644 results/classifier/105/device/454 create mode 100644 results/classifier/105/device/457 create mode 100644 results/classifier/105/device/458 create mode 100644 results/classifier/105/device/46 create mode 100644 results/classifier/105/device/461 create mode 100644 results/classifier/105/device/464 create mode 100644 results/classifier/105/device/467 create mode 100644 results/classifier/105/device/468 create mode 100644 results/classifier/105/device/469 create mode 100644 results/classifier/105/device/472 create mode 100644 results/classifier/105/device/473 create mode 100644 results/classifier/105/device/476 create mode 100644 results/classifier/105/device/477 create mode 100644 results/classifier/105/device/479 create mode 100644 results/classifier/105/device/48 create mode 100644 results/classifier/105/device/480 create mode 100644 results/classifier/105/device/482 create mode 100644 results/classifier/105/device/48245039 create mode 100644 results/classifier/105/device/484 create mode 100644 results/classifier/105/device/485258 create mode 100644 results/classifier/105/device/487 create mode 100644 results/classifier/105/device/49 create mode 100644 results/classifier/105/device/490 create mode 100644 results/classifier/105/device/498039 create mode 100644 results/classifier/105/device/498523 create mode 100644 results/classifier/105/device/50 create mode 100644 results/classifier/105/device/501 create mode 100644 results/classifier/105/device/502 create mode 100644 results/classifier/105/device/503 create mode 100644 results/classifier/105/device/506 create mode 100644 results/classifier/105/device/51 create mode 100644 results/classifier/105/device/510 create mode 100644 results/classifier/105/device/511 create mode 100644 results/classifier/105/device/512 create mode 100644 results/classifier/105/device/513 create mode 100644 results/classifier/105/device/52 create mode 100644 results/classifier/105/device/520 create mode 100644 results/classifier/105/device/521202 create mode 100644 results/classifier/105/device/524 create mode 100644 results/classifier/105/device/527 create mode 100644 results/classifier/105/device/529 create mode 100644 results/classifier/105/device/531 create mode 100644 results/classifier/105/device/532 create mode 100644 results/classifier/105/device/533613 create mode 100644 results/classifier/105/device/534 create mode 100644 results/classifier/105/device/535 create mode 100644 results/classifier/105/device/537 create mode 100644 results/classifier/105/device/538 create mode 100644 results/classifier/105/device/54 create mode 100644 results/classifier/105/device/540 create mode 100644 results/classifier/105/device/542 create mode 100644 results/classifier/105/device/547 create mode 100644 results/classifier/105/device/549 create mode 100644 results/classifier/105/device/55 create mode 100644 results/classifier/105/device/550 create mode 100644 results/classifier/105/device/552 create mode 100644 results/classifier/105/device/554 create mode 100644 results/classifier/105/device/555 create mode 100644 results/classifier/105/device/556 create mode 100644 results/classifier/105/device/558 create mode 100644 results/classifier/105/device/56 create mode 100644 results/classifier/105/device/560 create mode 100644 results/classifier/105/device/561 create mode 100644 results/classifier/105/device/562 create mode 100644 results/classifier/105/device/565 create mode 100644 results/classifier/105/device/566 create mode 100644 results/classifier/105/device/567 create mode 100644 results/classifier/105/device/568228 create mode 100644 results/classifier/105/device/569 create mode 100644 results/classifier/105/device/570 create mode 100644 results/classifier/105/device/571 create mode 100644 results/classifier/105/device/57195159 create mode 100644 results/classifier/105/device/57231878 create mode 100644 results/classifier/105/device/573 create mode 100644 results/classifier/105/device/574 create mode 100644 results/classifier/105/device/575 create mode 100644 results/classifier/105/device/576 create mode 100644 results/classifier/105/device/581 create mode 100644 results/classifier/105/device/582 create mode 100644 results/classifier/105/device/584146 create mode 100644 results/classifier/105/device/584155 create mode 100644 results/classifier/105/device/585 create mode 100644 results/classifier/105/device/586 create mode 100644 results/classifier/105/device/588691 create mode 100644 results/classifier/105/device/588693 create mode 100644 results/classifier/105/device/588748 create mode 100644 results/classifier/105/device/589 create mode 100644 results/classifier/105/device/590 create mode 100644 results/classifier/105/device/591 create mode 100644 results/classifier/105/device/597 create mode 100644 results/classifier/105/device/598 create mode 100644 results/classifier/105/device/602544 create mode 100644 results/classifier/105/device/603 create mode 100644 results/classifier/105/device/604 create mode 100644 results/classifier/105/device/606 create mode 100644 results/classifier/105/device/608 create mode 100644 results/classifier/105/device/609 create mode 100644 results/classifier/105/device/61 create mode 100644 results/classifier/105/device/613529 create mode 100644 results/classifier/105/device/614 create mode 100644 results/classifier/105/device/615 create mode 100644 results/classifier/105/device/617 create mode 100644 results/classifier/105/device/620 create mode 100644 results/classifier/105/device/621 create mode 100644 results/classifier/105/device/623 create mode 100644 results/classifier/105/device/623852 create mode 100644 results/classifier/105/device/628 create mode 100644 results/classifier/105/device/628082 create mode 100644 results/classifier/105/device/63 create mode 100644 results/classifier/105/device/630 create mode 100644 results/classifier/105/device/633 create mode 100644 results/classifier/105/device/635 create mode 100644 results/classifier/105/device/637 create mode 100644 results/classifier/105/device/638955 create mode 100644 results/classifier/105/device/64 create mode 100644 results/classifier/105/device/641 create mode 100644 results/classifier/105/device/642 create mode 100644 results/classifier/105/device/644 create mode 100644 results/classifier/105/device/645 create mode 100644 results/classifier/105/device/648 create mode 100644 results/classifier/105/device/649 create mode 100644 results/classifier/105/device/65 create mode 100644 results/classifier/105/device/650 create mode 100644 results/classifier/105/device/651 create mode 100644 results/classifier/105/device/655 create mode 100644 results/classifier/105/device/658152 create mode 100644 results/classifier/105/device/66 create mode 100644 results/classifier/105/device/660366 create mode 100644 results/classifier/105/device/661696 create mode 100644 results/classifier/105/device/662 create mode 100644 results/classifier/105/device/663 create mode 100644 results/classifier/105/device/665 create mode 100644 results/classifier/105/device/666 create mode 100644 results/classifier/105/device/667 create mode 100644 results/classifier/105/device/667791 create mode 100644 results/classifier/105/device/67 create mode 100644 results/classifier/105/device/672 create mode 100644 results/classifier/105/device/675 create mode 100644 results/classifier/105/device/677 create mode 100644 results/classifier/105/device/678 create mode 100644 results/classifier/105/device/67821138 create mode 100644 results/classifier/105/device/679 create mode 100644 results/classifier/105/device/68 create mode 100644 results/classifier/105/device/681 create mode 100644 results/classifier/105/device/683 create mode 100644 results/classifier/105/device/684 create mode 100644 results/classifier/105/device/689 create mode 100644 results/classifier/105/device/69 create mode 100644 results/classifier/105/device/695 create mode 100644 results/classifier/105/device/699 create mode 100644 results/classifier/105/device/700 create mode 100644 results/classifier/105/device/702 create mode 100644 results/classifier/105/device/705 create mode 100644 results/classifier/105/device/709 create mode 100644 results/classifier/105/device/71 create mode 100644 results/classifier/105/device/710234 create mode 100644 results/classifier/105/device/717929 create mode 100644 results/classifier/105/device/718 create mode 100644 results/classifier/105/device/720 create mode 100644 results/classifier/105/device/720657 create mode 100644 results/classifier/105/device/722 create mode 100644 results/classifier/105/device/723460 create mode 100644 results/classifier/105/device/726 create mode 100644 results/classifier/105/device/728 create mode 100644 results/classifier/105/device/730 create mode 100644 results/classifier/105/device/732 create mode 100644 results/classifier/105/device/737 create mode 100644 results/classifier/105/device/738 create mode 100644 results/classifier/105/device/739785 create mode 100644 results/classifier/105/device/74 create mode 100644 results/classifier/105/device/743 create mode 100644 results/classifier/105/device/745 create mode 100644 results/classifier/105/device/747 create mode 100644 results/classifier/105/device/749 create mode 100644 results/classifier/105/device/75 create mode 100644 results/classifier/105/device/751 create mode 100644 results/classifier/105/device/752 create mode 100644 results/classifier/105/device/753 create mode 100644 results/classifier/105/device/756 create mode 100644 results/classifier/105/device/757 create mode 100644 results/classifier/105/device/757654 create mode 100644 results/classifier/105/device/76 create mode 100644 results/classifier/105/device/760 create mode 100644 results/classifier/105/device/767 create mode 100644 results/classifier/105/device/770 create mode 100644 results/classifier/105/device/772358 create mode 100644 results/classifier/105/device/775 create mode 100644 results/classifier/105/device/776 create mode 100644 results/classifier/105/device/777 create mode 100644 results/classifier/105/device/779151 create mode 100644 results/classifier/105/device/78 create mode 100644 results/classifier/105/device/782 create mode 100644 results/classifier/105/device/783 create mode 100644 results/classifier/105/device/784 create mode 100644 results/classifier/105/device/785 create mode 100644 results/classifier/105/device/786208 create mode 100644 results/classifier/105/device/786209 create mode 100644 results/classifier/105/device/786440 create mode 100644 results/classifier/105/device/786442 create mode 100644 results/classifier/105/device/787 create mode 100644 results/classifier/105/device/788 create mode 100644 results/classifier/105/device/79 create mode 100644 results/classifier/105/device/790 create mode 100644 results/classifier/105/device/791 create mode 100644 results/classifier/105/device/792 create mode 100644 results/classifier/105/device/795 create mode 100644 results/classifier/105/device/796 create mode 100644 results/classifier/105/device/80 create mode 100644 results/classifier/105/device/800 create mode 100644 results/classifier/105/device/801 create mode 100644 results/classifier/105/device/804 create mode 100644 results/classifier/105/device/805 create mode 100644 results/classifier/105/device/808 create mode 100644 results/classifier/105/device/81 create mode 100644 results/classifier/105/device/812398 create mode 100644 results/classifier/105/device/815 create mode 100644 results/classifier/105/device/818 create mode 100644 results/classifier/105/device/82 create mode 100644 results/classifier/105/device/821 create mode 100644 results/classifier/105/device/823733 create mode 100644 results/classifier/105/device/827 create mode 100644 results/classifier/105/device/830 create mode 100644 results/classifier/105/device/831 create mode 100644 results/classifier/105/device/832 create mode 100644 results/classifier/105/device/837 create mode 100644 results/classifier/105/device/839790 create mode 100644 results/classifier/105/device/84 create mode 100644 results/classifier/105/device/840 create mode 100644 results/classifier/105/device/843 create mode 100644 results/classifier/105/device/846 create mode 100644 results/classifier/105/device/847 create mode 100644 results/classifier/105/device/85 create mode 100644 results/classifier/105/device/852 create mode 100644 results/classifier/105/device/858 create mode 100644 results/classifier/105/device/859 create mode 100644 results/classifier/105/device/86 create mode 100644 results/classifier/105/device/861 create mode 100644 results/classifier/105/device/864 create mode 100644 results/classifier/105/device/873 create mode 100644 results/classifier/105/device/873460 create mode 100644 results/classifier/105/device/875 create mode 100644 results/classifier/105/device/877498 create mode 100644 results/classifier/105/device/879 create mode 100644 results/classifier/105/device/88 create mode 100644 results/classifier/105/device/881637 create mode 100644 results/classifier/105/device/884 create mode 100644 results/classifier/105/device/887 create mode 100644 results/classifier/105/device/888150 create mode 100644 results/classifier/105/device/889 create mode 100644 results/classifier/105/device/89 create mode 100644 results/classifier/105/device/893 create mode 100644 results/classifier/105/device/896 create mode 100644 results/classifier/105/device/897 create mode 100644 results/classifier/105/device/90 create mode 100644 results/classifier/105/device/900 create mode 100644 results/classifier/105/device/901 create mode 100644 results/classifier/105/device/902 create mode 100644 results/classifier/105/device/904617 create mode 100644 results/classifier/105/device/906864 create mode 100644 results/classifier/105/device/910 create mode 100644 results/classifier/105/device/912983 create mode 100644 results/classifier/105/device/913 create mode 100644 results/classifier/105/device/914 create mode 100644 results/classifier/105/device/916720 create mode 100644 results/classifier/105/device/918 create mode 100644 results/classifier/105/device/92 create mode 100644 results/classifier/105/device/923 create mode 100644 results/classifier/105/device/924 create mode 100644 results/classifier/105/device/926 create mode 100644 results/classifier/105/device/928676 create mode 100644 results/classifier/105/device/93 create mode 100644 results/classifier/105/device/930 create mode 100644 results/classifier/105/device/931 create mode 100644 results/classifier/105/device/932 create mode 100644 results/classifier/105/device/933 create mode 100644 results/classifier/105/device/938 create mode 100644 results/classifier/105/device/94 create mode 100644 results/classifier/105/device/941 create mode 100644 results/classifier/105/device/942 create mode 100644 results/classifier/105/device/944 create mode 100644 results/classifier/105/device/944628 create mode 100644 results/classifier/105/device/95 create mode 100644 results/classifier/105/device/950692 create mode 100644 results/classifier/105/device/955 create mode 100644 results/classifier/105/device/955379 create mode 100644 results/classifier/105/device/959852 create mode 100644 results/classifier/105/device/96 create mode 100644 results/classifier/105/device/965 create mode 100644 results/classifier/105/device/972 create mode 100644 results/classifier/105/device/975 create mode 100644 results/classifier/105/device/977 create mode 100644 results/classifier/105/device/978 create mode 100644 results/classifier/105/device/98 create mode 100644 results/classifier/105/device/985288 create mode 100644 results/classifier/105/device/986 create mode 100644 results/classifier/105/device/986318 create mode 100644 results/classifier/105/device/989504 create mode 100644 results/classifier/105/device/99 create mode 100644 results/classifier/105/device/990 create mode 100644 results/classifier/105/device/990364 create mode 100644 results/classifier/105/device/991 create mode 100644 results/classifier/105/device/994 create mode 100644 results/classifier/105/device/996 create mode 100644 results/classifier/105/device/99674399 create mode 100644 results/classifier/105/graphic/1000 create mode 100644 results/classifier/105/graphic/1003 create mode 100644 results/classifier/105/graphic/1004 create mode 100644 results/classifier/105/graphic/1004408 create mode 100644 results/classifier/105/graphic/1005 create mode 100644 results/classifier/105/graphic/1007490 create mode 100644 results/classifier/105/graphic/1008 create mode 100644 results/classifier/105/graphic/1013241 create mode 100644 results/classifier/105/graphic/1013691 create mode 100644 results/classifier/105/graphic/1017 create mode 100644 results/classifier/105/graphic/1019 create mode 100644 results/classifier/105/graphic/1020 create mode 100644 results/classifier/105/graphic/1022023 create mode 100644 results/classifier/105/graphic/1023 create mode 100644 results/classifier/105/graphic/1024275 create mode 100644 results/classifier/105/graphic/1025 create mode 100644 results/classifier/105/graphic/1028 create mode 100644 results/classifier/105/graphic/1030807 create mode 100644 results/classifier/105/graphic/1031955 create mode 100644 results/classifier/105/graphic/1033494 create mode 100644 results/classifier/105/graphic/1036 create mode 100644 results/classifier/105/graphic/104 create mode 100644 results/classifier/105/graphic/1040 create mode 100644 results/classifier/105/graphic/1041 create mode 100644 results/classifier/105/graphic/1042084 create mode 100644 results/classifier/105/graphic/1046 create mode 100644 results/classifier/105/graphic/1047 create mode 100644 results/classifier/105/graphic/1047470 create mode 100644 results/classifier/105/graphic/1050 create mode 100644 results/classifier/105/graphic/1051 create mode 100644 results/classifier/105/graphic/1054 create mode 100644 results/classifier/105/graphic/1058 create mode 100644 results/classifier/105/graphic/1059 create mode 100644 results/classifier/105/graphic/1062589 create mode 100644 results/classifier/105/graphic/1063 create mode 100644 results/classifier/105/graphic/1068 create mode 100644 results/classifier/105/graphic/1069 create mode 100644 results/classifier/105/graphic/1075 create mode 100644 results/classifier/105/graphic/1081416 create mode 100644 results/classifier/105/graphic/1082 create mode 100644 results/classifier/105/graphic/1086 create mode 100644 results/classifier/105/graphic/1086745 create mode 100644 results/classifier/105/graphic/1087411 create mode 100644 results/classifier/105/graphic/1088617 create mode 100644 results/classifier/105/graphic/1089 create mode 100644 results/classifier/105/graphic/1090615 create mode 100644 results/classifier/105/graphic/1091115 create mode 100644 results/classifier/105/graphic/1093 create mode 100644 results/classifier/105/graphic/1093691 create mode 100644 results/classifier/105/graphic/1094 create mode 100644 results/classifier/105/graphic/1096 create mode 100644 results/classifier/105/graphic/1096713 create mode 100644 results/classifier/105/graphic/1098 create mode 100644 results/classifier/105/graphic/1099403 create mode 100644 results/classifier/105/graphic/1101210 create mode 100644 results/classifier/105/graphic/1107 create mode 100644 results/classifier/105/graphic/1113 create mode 100644 results/classifier/105/graphic/1115 create mode 100644 results/classifier/105/graphic/1119861 create mode 100644 results/classifier/105/graphic/1120 create mode 100644 results/classifier/105/graphic/1126 create mode 100644 results/classifier/105/graphic/1126369 create mode 100644 results/classifier/105/graphic/1128 create mode 100644 results/classifier/105/graphic/1129 create mode 100644 results/classifier/105/graphic/1130533 create mode 100644 results/classifier/105/graphic/1141 create mode 100644 results/classifier/105/graphic/1144 create mode 100644 results/classifier/105/graphic/1145 create mode 100644 results/classifier/105/graphic/1146 create mode 100644 results/classifier/105/graphic/1147 create mode 100644 results/classifier/105/graphic/1151 create mode 100644 results/classifier/105/graphic/1151450 create mode 100644 results/classifier/105/graphic/1157368 create mode 100644 results/classifier/105/graphic/1158912 create mode 100644 results/classifier/105/graphic/1159 create mode 100644 results/classifier/105/graphic/1162 create mode 100644 results/classifier/105/graphic/1162227 create mode 100644 results/classifier/105/graphic/1163 create mode 100644 results/classifier/105/graphic/1166 create mode 100644 results/classifier/105/graphic/1168 create mode 100644 results/classifier/105/graphic/1168733 create mode 100644 results/classifier/105/graphic/1169049 create mode 100644 results/classifier/105/graphic/1172613 create mode 100644 results/classifier/105/graphic/1175 create mode 100644 results/classifier/105/graphic/1175513 create mode 100644 results/classifier/105/graphic/1177 create mode 100644 results/classifier/105/graphic/1179 create mode 100644 results/classifier/105/graphic/1184 create mode 100644 results/classifier/105/graphic/1184616 create mode 100644 results/classifier/105/graphic/1185 create mode 100644 results/classifier/105/graphic/1185888 create mode 100644 results/classifier/105/graphic/1186303 create mode 100644 results/classifier/105/graphic/1186935 create mode 100644 results/classifier/105/graphic/1186984 create mode 100644 results/classifier/105/graphic/1187319 create mode 100644 results/classifier/105/graphic/1187334 create mode 100644 results/classifier/105/graphic/1188991 create mode 100644 results/classifier/105/graphic/1191457 create mode 100644 results/classifier/105/graphic/1192344 create mode 100644 results/classifier/105/graphic/1193555 create mode 100644 results/classifier/105/graphic/1194 create mode 100644 results/classifier/105/graphic/1196773 create mode 100644 results/classifier/105/graphic/1200 create mode 100644 results/classifier/105/graphic/1201 create mode 100644 results/classifier/105/graphic/1204697 create mode 100644 results/classifier/105/graphic/1205 create mode 100644 results/classifier/105/graphic/1207 create mode 100644 results/classifier/105/graphic/1207896 create mode 100644 results/classifier/105/graphic/1211910 create mode 100644 results/classifier/105/graphic/1214 create mode 100644 results/classifier/105/graphic/1216 create mode 100644 results/classifier/105/graphic/1219 create mode 100644 results/classifier/105/graphic/1220 create mode 100644 results/classifier/105/graphic/1221966 create mode 100644 results/classifier/105/graphic/1223 create mode 100644 results/classifier/105/graphic/1225187 create mode 100644 results/classifier/105/graphic/1230232 create mode 100644 results/classifier/105/graphic/1231 create mode 100644 results/classifier/105/graphic/1235 create mode 100644 results/classifier/105/graphic/1239 create mode 100644 results/classifier/105/graphic/1240 create mode 100644 results/classifier/105/graphic/1241 create mode 100644 results/classifier/105/graphic/1242963 create mode 100644 results/classifier/105/graphic/1243 create mode 100644 results/classifier/105/graphic/1243968 create mode 100644 results/classifier/105/graphic/1247478 create mode 100644 results/classifier/105/graphic/1252 create mode 100644 results/classifier/105/graphic/1252270 create mode 100644 results/classifier/105/graphic/1254 create mode 100644 results/classifier/105/graphic/1254940 create mode 100644 results/classifier/105/graphic/1255 create mode 100644 results/classifier/105/graphic/1255303 create mode 100644 results/classifier/105/graphic/1256 create mode 100644 results/classifier/105/graphic/1256122 create mode 100644 results/classifier/105/graphic/1256432 create mode 100644 results/classifier/105/graphic/1257352 create mode 100644 results/classifier/105/graphic/1258626 create mode 100644 results/classifier/105/graphic/1261 create mode 100644 results/classifier/105/graphic/1267 create mode 100644 results/classifier/105/graphic/1269628 create mode 100644 results/classifier/105/graphic/1270 create mode 100644 results/classifier/105/graphic/1272252 create mode 100644 results/classifier/105/graphic/1274 create mode 100644 results/classifier/105/graphic/1276 create mode 100644 results/classifier/105/graphic/1278 create mode 100644 results/classifier/105/graphic/1280961 create mode 100644 results/classifier/105/graphic/1285 create mode 100644 results/classifier/105/graphic/1288 create mode 100644 results/classifier/105/graphic/1289788 create mode 100644 results/classifier/105/graphic/1292037 create mode 100644 results/classifier/105/graphic/1295 create mode 100644 results/classifier/105/graphic/1296 create mode 100644 results/classifier/105/graphic/1297 create mode 100644 results/classifier/105/graphic/1301 create mode 100644 results/classifier/105/graphic/1304 create mode 100644 results/classifier/105/graphic/1305400 create mode 100644 results/classifier/105/graphic/1310 create mode 100644 results/classifier/105/graphic/1315257 create mode 100644 results/classifier/105/graphic/1317 create mode 100644 results/classifier/105/graphic/1318281 create mode 100644 results/classifier/105/graphic/1319 create mode 100644 results/classifier/105/graphic/1321 create mode 100644 results/classifier/105/graphic/1321684 create mode 100644 results/classifier/105/graphic/1323758 create mode 100644 results/classifier/105/graphic/1324 create mode 100644 results/classifier/105/graphic/1324727 create mode 100644 results/classifier/105/graphic/1325 create mode 100644 results/classifier/105/graphic/1328 create mode 100644 results/classifier/105/graphic/1329 create mode 100644 results/classifier/105/graphic/1329956 create mode 100644 results/classifier/105/graphic/1331859 create mode 100644 results/classifier/105/graphic/1332297 create mode 100644 results/classifier/105/graphic/1333 create mode 100644 results/classifier/105/graphic/1336801 create mode 100644 results/classifier/105/graphic/1338591 create mode 100644 results/classifier/105/graphic/1340 create mode 100644 results/classifier/105/graphic/1341 create mode 100644 results/classifier/105/graphic/1341032 create mode 100644 results/classifier/105/graphic/1342686 create mode 100644 results/classifier/105/graphic/1342704 create mode 100644 results/classifier/105/graphic/1343 create mode 100644 results/classifier/105/graphic/1343827 create mode 100644 results/classifier/105/graphic/1346769 create mode 100644 results/classifier/105/graphic/1346784 create mode 100644 results/classifier/105/graphic/1349 create mode 100644 results/classifier/105/graphic/1349277 create mode 100644 results/classifier/105/graphic/1349972 create mode 100644 results/classifier/105/graphic/1351 create mode 100644 results/classifier/105/graphic/1352130 create mode 100644 results/classifier/105/graphic/1353456 create mode 100644 results/classifier/105/graphic/1355644 create mode 100644 results/classifier/105/graphic/1355697 create mode 100644 results/classifier/105/graphic/1356 create mode 100644 results/classifier/105/graphic/1356969 create mode 100644 results/classifier/105/graphic/1357445 create mode 100644 results/classifier/105/graphic/1359930 create mode 100644 results/classifier/105/graphic/1360 create mode 100644 results/classifier/105/graphic/1361 create mode 100644 results/classifier/105/graphic/1361618 create mode 100644 results/classifier/105/graphic/1362 create mode 100644 results/classifier/105/graphic/1363467 create mode 100644 results/classifier/105/graphic/1363641 create mode 100644 results/classifier/105/graphic/1365 create mode 100644 results/classifier/105/graphic/1366363 create mode 100644 results/classifier/105/graphic/1368 create mode 100644 results/classifier/105/graphic/1368178 create mode 100644 results/classifier/105/graphic/1370585 create mode 100644 results/classifier/105/graphic/1374905 create mode 100644 results/classifier/105/graphic/1376938 create mode 100644 results/classifier/105/graphic/1378 create mode 100644 results/classifier/105/graphic/1378407 create mode 100644 results/classifier/105/graphic/1379688 create mode 100644 results/classifier/105/graphic/1380 create mode 100644 results/classifier/105/graphic/1381846 create mode 100644 results/classifier/105/graphic/1381879 create mode 100644 results/classifier/105/graphic/1386197 create mode 100644 results/classifier/105/graphic/1392468 create mode 100644 results/classifier/105/graphic/1395958 create mode 100644 results/classifier/105/graphic/1396 create mode 100644 results/classifier/105/graphic/1399939 create mode 100644 results/classifier/105/graphic/1399943 create mode 100644 results/classifier/105/graphic/1401 create mode 100644 results/classifier/105/graphic/1404 create mode 100644 results/classifier/105/graphic/1404690 create mode 100644 results/classifier/105/graphic/1406016 create mode 100644 results/classifier/105/graphic/1407 create mode 100644 results/classifier/105/graphic/1410 create mode 100644 results/classifier/105/graphic/1412098 create mode 100644 results/classifier/105/graphic/1415181 create mode 100644 results/classifier/105/graphic/1416988 create mode 100644 results/classifier/105/graphic/1419 create mode 100644 results/classifier/105/graphic/1420 create mode 100644 results/classifier/105/graphic/1421 create mode 100644 results/classifier/105/graphic/1422307 create mode 100644 results/classifier/105/graphic/1424237 create mode 100644 results/classifier/105/graphic/1426593 create mode 100644 results/classifier/105/graphic/1429034 create mode 100644 results/classifier/105/graphic/1433081 create mode 100644 results/classifier/105/graphic/1435 create mode 100644 results/classifier/105/graphic/1435973 create mode 100644 results/classifier/105/graphic/1437 create mode 100644 results/classifier/105/graphic/1439 create mode 100644 results/classifier/105/graphic/1439800 create mode 100644 results/classifier/105/graphic/1440843 create mode 100644 results/classifier/105/graphic/1445 create mode 100644 results/classifier/105/graphic/1451067 create mode 100644 results/classifier/105/graphic/1455 create mode 100644 results/classifier/105/graphic/1459626 create mode 100644 results/classifier/105/graphic/1462944 create mode 100644 results/classifier/105/graphic/1463 create mode 100644 results/classifier/105/graphic/1463172 create mode 100644 results/classifier/105/graphic/1463909 create mode 100644 results/classifier/105/graphic/1464 create mode 100644 results/classifier/105/graphic/1468 create mode 100644 results/classifier/105/graphic/1469946 create mode 100644 results/classifier/105/graphic/1470720 create mode 100644 results/classifier/105/graphic/1471 create mode 100644 results/classifier/105/graphic/1471583 create mode 100644 results/classifier/105/graphic/1471904 create mode 100644 results/classifier/105/graphic/1472 create mode 100644 results/classifier/105/graphic/1474 create mode 100644 results/classifier/105/graphic/1475 create mode 100644 results/classifier/105/graphic/1477 create mode 100644 results/classifier/105/graphic/1479632 create mode 100644 results/classifier/105/graphic/1482425 create mode 100644 results/classifier/105/graphic/1484925 create mode 100644 results/classifier/105/graphic/1485180 create mode 100644 results/classifier/105/graphic/1486768 create mode 100644 results/classifier/105/graphic/1488 create mode 100644 results/classifier/105/graphic/1492649 create mode 100644 results/classifier/105/graphic/1493033 create mode 100644 results/classifier/105/graphic/1495 create mode 100644 results/classifier/105/graphic/1496 create mode 100644 results/classifier/105/graphic/1496712 create mode 100644 results/classifier/105/graphic/1497479 create mode 100644 results/classifier/105/graphic/1499 create mode 100644 results/classifier/105/graphic/1508 create mode 100644 results/classifier/105/graphic/1508405 create mode 100644 results/classifier/105/graphic/1518 create mode 100644 results/classifier/105/graphic/1519037 create mode 100644 results/classifier/105/graphic/1520730 create mode 100644 results/classifier/105/graphic/1525 create mode 100644 results/classifier/105/graphic/1527300 create mode 100644 results/classifier/105/graphic/1529173 create mode 100644 results/classifier/105/graphic/1529226 create mode 100644 results/classifier/105/graphic/1529859 create mode 100644 results/classifier/105/graphic/1530 create mode 100644 results/classifier/105/graphic/1530035 create mode 100644 results/classifier/105/graphic/1530278 create mode 100644 results/classifier/105/graphic/1530386 create mode 100644 results/classifier/105/graphic/1531 create mode 100644 results/classifier/105/graphic/1531632 create mode 100644 results/classifier/105/graphic/1534382 create mode 100644 results/classifier/105/graphic/1534683 create mode 100644 results/classifier/105/graphic/1535 create mode 100644 results/classifier/105/graphic/1536 create mode 100644 results/classifier/105/graphic/1537 create mode 100644 results/classifier/105/graphic/1538541 create mode 100644 results/classifier/105/graphic/1539940 create mode 100644 results/classifier/105/graphic/1546445 create mode 100644 results/classifier/105/graphic/1546680 create mode 100644 results/classifier/105/graphic/1548166 create mode 100644 results/classifier/105/graphic/1550 create mode 100644 results/classifier/105/graphic/1550743 create mode 100644 results/classifier/105/graphic/1552 create mode 100644 results/classifier/105/graphic/1553 create mode 100644 results/classifier/105/graphic/1554 create mode 100644 results/classifier/105/graphic/1554451 create mode 100644 results/classifier/105/graphic/1555452 create mode 100644 results/classifier/105/graphic/1556 create mode 100644 results/classifier/105/graphic/1556044 create mode 100644 results/classifier/105/graphic/1556306 create mode 100644 results/classifier/105/graphic/1556372 create mode 100644 results/classifier/105/graphic/1557 create mode 100644 results/classifier/105/graphic/1569491 create mode 100644 results/classifier/105/graphic/1571 create mode 100644 results/classifier/105/graphic/1576 create mode 100644 results/classifier/105/graphic/1577 create mode 100644 results/classifier/105/graphic/1580459 create mode 100644 results/classifier/105/graphic/1581 create mode 100644 results/classifier/105/graphic/1581695 create mode 100644 results/classifier/105/graphic/1585 create mode 100644 results/classifier/105/graphic/1585971 create mode 100644 results/classifier/105/graphic/1592 create mode 100644 results/classifier/105/graphic/1592336 create mode 100644 results/classifier/105/graphic/1593 create mode 100644 results/classifier/105/graphic/1596 create mode 100644 results/classifier/105/graphic/1598612 create mode 100644 results/classifier/105/graphic/1599 create mode 100644 results/classifier/105/graphic/1599214 create mode 100644 results/classifier/105/graphic/1600 create mode 100644 results/classifier/105/graphic/1600563 create mode 100644 results/classifier/105/graphic/1600681 create mode 100644 results/classifier/105/graphic/1603734 create mode 100644 results/classifier/105/graphic/1603970 create mode 100644 results/classifier/105/graphic/1605443 create mode 100644 results/classifier/105/graphic/1606 create mode 100644 results/classifier/105/graphic/1606708 create mode 100644 results/classifier/105/graphic/1609 create mode 100644 results/classifier/105/graphic/1611979 create mode 100644 results/classifier/105/graphic/1613 create mode 100644 results/classifier/105/graphic/1614348 create mode 100644 results/classifier/105/graphic/1615 create mode 100644 results/classifier/105/graphic/1615212 create mode 100644 results/classifier/105/graphic/1617 create mode 100644 results/classifier/105/graphic/1617385 create mode 100644 results/classifier/105/graphic/1618301 create mode 100644 results/classifier/105/graphic/1622582 create mode 100644 results/classifier/105/graphic/1623 create mode 100644 results/classifier/105/graphic/1623020 create mode 100644 results/classifier/105/graphic/1624 create mode 100644 results/classifier/105/graphic/1624896 create mode 100644 results/classifier/105/graphic/1625 create mode 100644 results/classifier/105/graphic/1626 create mode 100644 results/classifier/105/graphic/1629 create mode 100644 results/classifier/105/graphic/1629282 create mode 100644 results/classifier/105/graphic/1632 create mode 100644 results/classifier/105/graphic/1634 create mode 100644 results/classifier/105/graphic/1635 create mode 100644 results/classifier/105/graphic/1636 create mode 100644 results/classifier/105/graphic/1636126 create mode 100644 results/classifier/105/graphic/1636770 create mode 100644 results/classifier/105/graphic/1639225 create mode 100644 results/classifier/105/graphic/1639322 create mode 100644 results/classifier/105/graphic/1639983 create mode 100644 results/classifier/105/graphic/1640 create mode 100644 results/classifier/105/graphic/1641637 create mode 100644 results/classifier/105/graphic/1644 create mode 100644 results/classifier/105/graphic/1645 create mode 100644 results/classifier/105/graphic/1646 create mode 100644 results/classifier/105/graphic/1646610 create mode 100644 results/classifier/105/graphic/1649042 create mode 100644 results/classifier/105/graphic/1649233 create mode 100644 results/classifier/105/graphic/1651167 create mode 100644 results/classifier/105/graphic/1652011 create mode 100644 results/classifier/105/graphic/1652373 create mode 100644 results/classifier/105/graphic/1653 create mode 100644 results/classifier/105/graphic/1654826 create mode 100644 results/classifier/105/graphic/1655702 create mode 100644 results/classifier/105/graphic/1656711 create mode 100644 results/classifier/105/graphic/1657 create mode 100644 results/classifier/105/graphic/1658 create mode 100644 results/classifier/105/graphic/1658120 create mode 100644 results/classifier/105/graphic/1659 create mode 100644 results/classifier/105/graphic/1660946 create mode 100644 results/classifier/105/graphic/1662600 create mode 100644 results/classifier/105/graphic/1665789 create mode 100644 results/classifier/105/graphic/1665791 create mode 100644 results/classifier/105/graphic/1667613 create mode 100644 results/classifier/105/graphic/1668273 create mode 100644 results/classifier/105/graphic/1668556 create mode 100644 results/classifier/105/graphic/1671173 create mode 100644 results/classifier/105/graphic/1671677 create mode 100644 results/classifier/105/graphic/1673130 create mode 100644 results/classifier/105/graphic/1674 create mode 100644 results/classifier/105/graphic/1674056 create mode 100644 results/classifier/105/graphic/1674114 create mode 100644 results/classifier/105/graphic/1674117 create mode 100644 results/classifier/105/graphic/1676 create mode 100644 results/classifier/105/graphic/1677 create mode 100644 results/classifier/105/graphic/1678 create mode 100644 results/classifier/105/graphic/1679 create mode 100644 results/classifier/105/graphic/1679126 create mode 100644 results/classifier/105/graphic/1681 create mode 100644 results/classifier/105/graphic/1687 create mode 100644 results/classifier/105/graphic/1687214 create mode 100644 results/classifier/105/graphic/1687599 create mode 100644 results/classifier/105/graphic/1687653 create mode 100644 results/classifier/105/graphic/1689003 create mode 100644 results/classifier/105/graphic/1689245 create mode 100644 results/classifier/105/graphic/169 create mode 100644 results/classifier/105/graphic/1690322 create mode 100644 results/classifier/105/graphic/1693 create mode 100644 results/classifier/105/graphic/1693050 create mode 100644 results/classifier/105/graphic/1695 create mode 100644 results/classifier/105/graphic/1695169 create mode 100644 results/classifier/105/graphic/1698574 create mode 100644 results/classifier/105/graphic/1699867 create mode 100644 results/classifier/105/graphic/1701 create mode 100644 results/classifier/105/graphic/1701449 create mode 100644 results/classifier/105/graphic/1701808 create mode 100644 results/classifier/105/graphic/1701835 create mode 100644 results/classifier/105/graphic/1702 create mode 100644 results/classifier/105/graphic/1703795 create mode 100644 results/classifier/105/graphic/1707 create mode 100644 results/classifier/105/graphic/1708551 create mode 100644 results/classifier/105/graphic/1712564 create mode 100644 results/classifier/105/graphic/1713066 create mode 100644 results/classifier/105/graphic/1713328 create mode 100644 results/classifier/105/graphic/1714 create mode 100644 results/classifier/105/graphic/1715 create mode 100644 results/classifier/105/graphic/1715573 create mode 100644 results/classifier/105/graphic/1716 create mode 100644 results/classifier/105/graphic/1716132 create mode 100644 results/classifier/105/graphic/1716767 create mode 100644 results/classifier/105/graphic/1717 create mode 100644 results/classifier/105/graphic/1717414 create mode 100644 results/classifier/105/graphic/1718964 create mode 100644 results/classifier/105/graphic/1719196 create mode 100644 results/classifier/105/graphic/1721187 create mode 100644 results/classifier/105/graphic/1721468 create mode 100644 results/classifier/105/graphic/1721744 create mode 100644 results/classifier/105/graphic/1722 create mode 100644 results/classifier/105/graphic/1724590 create mode 100644 results/classifier/105/graphic/1725 create mode 100644 results/classifier/105/graphic/1727259 create mode 100644 results/classifier/105/graphic/1728 create mode 100644 results/classifier/105/graphic/1728116 create mode 100644 results/classifier/105/graphic/1729623 create mode 100644 results/classifier/105/graphic/1730 create mode 100644 results/classifier/105/graphic/1731 create mode 100644 results/classifier/105/graphic/1731588 create mode 100644 results/classifier/105/graphic/1734810 create mode 100644 results/classifier/105/graphic/1738202 create mode 100644 results/classifier/105/graphic/1738767 create mode 100644 results/classifier/105/graphic/1739 create mode 100644 results/classifier/105/graphic/1739413 create mode 100644 results/classifier/105/graphic/1740887 create mode 100644 results/classifier/105/graphic/1743337 create mode 100644 results/classifier/105/graphic/1743441 create mode 100644 results/classifier/105/graphic/1745354 create mode 100644 results/classifier/105/graphic/1746394 create mode 100644 results/classifier/105/graphic/1746943 create mode 100644 results/classifier/105/graphic/1747056 create mode 100644 results/classifier/105/graphic/1748612 create mode 100644 results/classifier/105/graphic/1748756 create mode 100644 results/classifier/105/graphic/1749223 create mode 100644 results/classifier/105/graphic/1750 create mode 100644 results/classifier/105/graphic/1752 create mode 100644 results/classifier/105/graphic/1754295 create mode 100644 results/classifier/105/graphic/1755 create mode 100644 results/classifier/105/graphic/1755479 create mode 100644 results/classifier/105/graphic/1757363 create mode 100644 results/classifier/105/graphic/1758 create mode 100644 results/classifier/105/graphic/1760 create mode 100644 results/classifier/105/graphic/1761153 create mode 100644 results/classifier/105/graphic/1761401 create mode 100644 results/classifier/105/graphic/1762 create mode 100644 results/classifier/105/graphic/1762558 create mode 100644 results/classifier/105/graphic/1765970 create mode 100644 results/classifier/105/graphic/1766841 create mode 100644 results/classifier/105/graphic/1766896 create mode 100644 results/classifier/105/graphic/1767146 create mode 100644 results/classifier/105/graphic/1767200 create mode 100644 results/classifier/105/graphic/1768295 create mode 100644 results/classifier/105/graphic/1770 create mode 100644 results/classifier/105/graphic/1770417 create mode 100644 results/classifier/105/graphic/1770859 create mode 100644 results/classifier/105/graphic/1777236 create mode 100644 results/classifier/105/graphic/1777252 create mode 100644 results/classifier/105/graphic/1777315 create mode 100644 results/classifier/105/graphic/1778182 create mode 100644 results/classifier/105/graphic/1779 create mode 100644 results/classifier/105/graphic/1779162 create mode 100644 results/classifier/105/graphic/1779649 create mode 100644 results/classifier/105/graphic/1779650 create mode 100644 results/classifier/105/graphic/1779955 create mode 100644 results/classifier/105/graphic/1781 create mode 100644 results/classifier/105/graphic/1781211 create mode 100644 results/classifier/105/graphic/1781463 create mode 100644 results/classifier/105/graphic/1781515 create mode 100644 results/classifier/105/graphic/1782300 create mode 100644 results/classifier/105/graphic/1783422 create mode 100644 results/classifier/105/graphic/1783437 create mode 100644 results/classifier/105/graphic/1785485 create mode 100644 results/classifier/105/graphic/1785698 create mode 100644 results/classifier/105/graphic/1787 create mode 100644 results/classifier/105/graphic/1787070 create mode 100644 results/classifier/105/graphic/1787754 create mode 100644 results/classifier/105/graphic/1789751 create mode 100644 results/classifier/105/graphic/1790260 create mode 100644 results/classifier/105/graphic/1790460 create mode 100644 results/classifier/105/graphic/1792193 create mode 100644 results/classifier/105/graphic/1793119 create mode 100644 results/classifier/105/graphic/1793183 create mode 100644 results/classifier/105/graphic/1793635 create mode 100644 results/classifier/105/graphic/1794202 create mode 100644 results/classifier/105/graphic/1794950 create mode 100644 results/classifier/105/graphic/1795148 create mode 100644 results/classifier/105/graphic/1795527 create mode 100644 results/classifier/105/graphic/1795799 create mode 100644 results/classifier/105/graphic/1796 create mode 100644 results/classifier/105/graphic/1797332 create mode 100644 results/classifier/105/graphic/1798434 create mode 100644 results/classifier/105/graphic/1798451 create mode 100644 results/classifier/105/graphic/1799792 create mode 100644 results/classifier/105/graphic/1799919 create mode 100644 results/classifier/105/graphic/1800 create mode 100644 results/classifier/105/graphic/1801073 create mode 100644 results/classifier/105/graphic/1802915 create mode 100644 results/classifier/105/graphic/1803 create mode 100644 results/classifier/105/graphic/1804961 create mode 100644 results/classifier/105/graphic/1805 create mode 100644 results/classifier/105/graphic/1806 create mode 100644 results/classifier/105/graphic/1806196 create mode 100644 results/classifier/105/graphic/1807 create mode 100644 results/classifier/105/graphic/1808824 create mode 100644 results/classifier/105/graphic/1809291 create mode 100644 results/classifier/105/graphic/1810000 create mode 100644 results/classifier/105/graphic/1810405 create mode 100644 results/classifier/105/graphic/1811782 create mode 100644 results/classifier/105/graphic/1812694 create mode 100644 results/classifier/105/graphic/1813010 create mode 100644 results/classifier/105/graphic/1813034 create mode 100644 results/classifier/105/graphic/1813406 create mode 100644 results/classifier/105/graphic/1814 create mode 100644 results/classifier/105/graphic/1814128 create mode 100644 results/classifier/105/graphic/1815009 create mode 100644 results/classifier/105/graphic/1815252 create mode 100644 results/classifier/105/graphic/1815911 create mode 100644 results/classifier/105/graphic/1816 create mode 100644 results/classifier/105/graphic/1817345 create mode 100644 results/classifier/105/graphic/1817846 create mode 100644 results/classifier/105/graphic/1818 create mode 100644 results/classifier/105/graphic/1818483 create mode 100644 results/classifier/105/graphic/1818880 create mode 100644 results/classifier/105/graphic/1819908 create mode 100644 results/classifier/105/graphic/1821 create mode 100644 results/classifier/105/graphic/1821515 create mode 100644 results/classifier/105/graphic/1822 create mode 100644 results/classifier/105/graphic/1822012 create mode 100644 results/classifier/105/graphic/1822798 create mode 100644 results/classifier/105/graphic/1823 create mode 100644 results/classifier/105/graphic/1824528 create mode 100644 results/classifier/105/graphic/1826175 create mode 100644 results/classifier/105/graphic/1826393 create mode 100644 results/classifier/105/graphic/1826599 create mode 100644 results/classifier/105/graphic/1827 create mode 100644 results/classifier/105/graphic/1827005 create mode 100644 results/classifier/105/graphic/1827772 create mode 100644 results/classifier/105/graphic/1828272 create mode 100644 results/classifier/105/graphic/1828429 create mode 100644 results/classifier/105/graphic/1829079 create mode 100644 results/classifier/105/graphic/1830 create mode 100644 results/classifier/105/graphic/1832535 create mode 100644 results/classifier/105/graphic/1833 create mode 100644 results/classifier/105/graphic/1833048 create mode 100644 results/classifier/105/graphic/1833871 create mode 100644 results/classifier/105/graphic/1835 create mode 100644 results/classifier/105/graphic/1835477 create mode 100644 results/classifier/105/graphic/1835693 create mode 100644 results/classifier/105/graphic/1835729 create mode 100644 results/classifier/105/graphic/1835732 create mode 100644 results/classifier/105/graphic/1836430 create mode 100644 results/classifier/105/graphic/1836558 create mode 100644 results/classifier/105/graphic/1836762 create mode 100644 results/classifier/105/graphic/1836855 create mode 100644 results/classifier/105/graphic/1838066 create mode 100644 results/classifier/105/graphic/1838228 create mode 100644 results/classifier/105/graphic/1838763 create mode 100644 results/classifier/105/graphic/1839294 create mode 100644 results/classifier/105/graphic/1839367 create mode 100644 results/classifier/105/graphic/1840250 create mode 100644 results/classifier/105/graphic/1840719 create mode 100644 results/classifier/105/graphic/1841 create mode 100644 results/classifier/105/graphic/1844 create mode 100644 results/classifier/105/graphic/1844597 create mode 100644 results/classifier/105/graphic/1844946 create mode 100644 results/classifier/105/graphic/1845 create mode 100644 results/classifier/105/graphic/1846392 create mode 100644 results/classifier/105/graphic/1851664 create mode 100644 results/classifier/105/graphic/1852 create mode 100644 results/classifier/105/graphic/1854577 create mode 100644 results/classifier/105/graphic/1854910 create mode 100644 results/classifier/105/graphic/1855535 create mode 100644 results/classifier/105/graphic/1856 create mode 100644 results/classifier/105/graphic/1856027 create mode 100644 results/classifier/105/graphic/1857449 create mode 100644 results/classifier/105/graphic/1858623 create mode 100644 results/classifier/105/graphic/1858814 create mode 100644 results/classifier/105/graphic/1859021 create mode 100644 results/classifier/105/graphic/1859081 create mode 100644 results/classifier/105/graphic/1859418 create mode 100644 results/classifier/105/graphic/1859723 create mode 100644 results/classifier/105/graphic/1860 create mode 100644 results/classifier/105/graphic/1861 create mode 100644 results/classifier/105/graphic/1861161 create mode 100644 results/classifier/105/graphic/1861394 create mode 100644 results/classifier/105/graphic/1861653 create mode 100644 results/classifier/105/graphic/1861692 create mode 100644 results/classifier/105/graphic/1862 create mode 100644 results/classifier/105/graphic/1863678 create mode 100644 results/classifier/105/graphic/1864984 create mode 100644 results/classifier/105/graphic/1865 create mode 100644 results/classifier/105/graphic/1865248 create mode 100644 results/classifier/105/graphic/1869073 create mode 100644 results/classifier/105/graphic/1869782 create mode 100644 results/classifier/105/graphic/1869858 create mode 100644 results/classifier/105/graphic/1870477 create mode 100644 results/classifier/105/graphic/1871267 create mode 100644 results/classifier/105/graphic/1871270 create mode 100644 results/classifier/105/graphic/1871798 create mode 100644 results/classifier/105/graphic/1873032 create mode 100644 results/classifier/105/graphic/1873337 create mode 100644 results/classifier/105/graphic/1873339 create mode 100644 results/classifier/105/graphic/1873542 create mode 100644 results/classifier/105/graphic/1874 create mode 100644 results/classifier/105/graphic/1874073 create mode 100644 results/classifier/105/graphic/1874674 create mode 100644 results/classifier/105/graphic/1875 create mode 100644 results/classifier/105/graphic/1875762 create mode 100644 results/classifier/105/graphic/1877137 create mode 100644 results/classifier/105/graphic/1877526 create mode 100644 results/classifier/105/graphic/1878 create mode 100644 results/classifier/105/graphic/1878043 create mode 100644 results/classifier/105/graphic/1878136 create mode 100644 results/classifier/105/graphic/1878413 create mode 100644 results/classifier/105/graphic/1878915 create mode 100644 results/classifier/105/graphic/1879 create mode 100644 results/classifier/105/graphic/1879425 create mode 100644 results/classifier/105/graphic/1879531 create mode 100644 results/classifier/105/graphic/1880539 create mode 100644 results/classifier/105/graphic/1880722 create mode 100644 results/classifier/105/graphic/1881004 create mode 100644 results/classifier/105/graphic/1881506 create mode 100644 results/classifier/105/graphic/1881552 create mode 100644 results/classifier/105/graphic/1882 create mode 100644 results/classifier/105/graphic/1882123 create mode 100644 results/classifier/105/graphic/1882817 create mode 100644 results/classifier/105/graphic/1883 create mode 100644 results/classifier/105/graphic/1883083 create mode 100644 results/classifier/105/graphic/1883729 create mode 100644 results/classifier/105/graphic/1883739 create mode 100644 results/classifier/105/graphic/1884 create mode 100644 results/classifier/105/graphic/1884017 create mode 100644 results/classifier/105/graphic/1884728 create mode 100644 results/classifier/105/graphic/1884990 create mode 100644 results/classifier/105/graphic/1885 create mode 100644 results/classifier/105/graphic/1885247 create mode 100644 results/classifier/105/graphic/1885553 create mode 100644 results/classifier/105/graphic/1886208 create mode 100644 results/classifier/105/graphic/1886285 create mode 100644 results/classifier/105/graphic/1886343 create mode 100644 results/classifier/105/graphic/1886602 create mode 100644 results/classifier/105/graphic/1889 create mode 100644 results/classifier/105/graphic/1889411 create mode 100644 results/classifier/105/graphic/1890155 create mode 100644 results/classifier/105/graphic/1890159 create mode 100644 results/classifier/105/graphic/1890208 create mode 100644 results/classifier/105/graphic/1890312 create mode 100644 results/classifier/105/graphic/1891 create mode 100644 results/classifier/105/graphic/1891829 create mode 100644 results/classifier/105/graphic/1892 create mode 100644 results/classifier/105/graphic/1892581 create mode 100644 results/classifier/105/graphic/1892604 create mode 100644 results/classifier/105/graphic/1892684 create mode 100644 results/classifier/105/graphic/1892761 create mode 100644 results/classifier/105/graphic/1893 create mode 100644 results/classifier/105/graphic/1893667 create mode 100644 results/classifier/105/graphic/1894 create mode 100644 results/classifier/105/graphic/1894361 create mode 100644 results/classifier/105/graphic/1894617 create mode 100644 results/classifier/105/graphic/1894804 create mode 100644 results/classifier/105/graphic/1894836 create mode 100644 results/classifier/105/graphic/1895219 create mode 100644 results/classifier/105/graphic/1895602 create mode 100644 results/classifier/105/graphic/1895703 create mode 100644 results/classifier/105/graphic/1896317 create mode 100644 results/classifier/105/graphic/1896342 create mode 100644 results/classifier/105/graphic/1897 create mode 100644 results/classifier/105/graphic/1897568 create mode 100644 results/classifier/105/graphic/1899 create mode 100644 results/classifier/105/graphic/1899733 create mode 100644 results/classifier/105/graphic/1900352 create mode 100644 results/classifier/105/graphic/1901892 create mode 100644 results/classifier/105/graphic/1902777 create mode 100644 results/classifier/105/graphic/1904206 create mode 100644 results/classifier/105/graphic/1904210 create mode 100644 results/classifier/105/graphic/1904317 create mode 100644 results/classifier/105/graphic/1904464 create mode 100644 results/classifier/105/graphic/1904486 create mode 100644 results/classifier/105/graphic/1905226 create mode 100644 results/classifier/105/graphic/1906155 create mode 100644 results/classifier/105/graphic/1906180 create mode 100644 results/classifier/105/graphic/1906184 create mode 100644 results/classifier/105/graphic/1906185 create mode 100644 results/classifier/105/graphic/1906536 create mode 100644 results/classifier/105/graphic/1906905 create mode 100644 results/classifier/105/graphic/1907061 create mode 100644 results/classifier/105/graphic/1907137 create mode 100644 results/classifier/105/graphic/1907210 create mode 100644 results/classifier/105/graphic/1907776 create mode 100644 results/classifier/105/graphic/1908 create mode 100644 results/classifier/105/graphic/1908062 create mode 100644 results/classifier/105/graphic/1908266 create mode 100644 results/classifier/105/graphic/1909 create mode 100644 results/classifier/105/graphic/1910723 create mode 100644 results/classifier/105/graphic/1912846 create mode 100644 results/classifier/105/graphic/1913 create mode 100644 results/classifier/105/graphic/1913341 create mode 100644 results/classifier/105/graphic/1913917 create mode 100644 results/classifier/105/graphic/1914535 create mode 100644 results/classifier/105/graphic/1915 create mode 100644 results/classifier/105/graphic/1915327 create mode 100644 results/classifier/105/graphic/1916343 create mode 100644 results/classifier/105/graphic/1916655 create mode 100644 results/classifier/105/graphic/1917394 create mode 100644 results/classifier/105/graphic/1919 create mode 100644 results/classifier/105/graphic/1920 create mode 100644 results/classifier/105/graphic/1920013 create mode 100644 results/classifier/105/graphic/1920871 create mode 100644 results/classifier/105/graphic/1920934 create mode 100644 results/classifier/105/graphic/1921061 create mode 100644 results/classifier/105/graphic/1922 create mode 100644 results/classifier/105/graphic/1922625 create mode 100644 results/classifier/105/graphic/1923 create mode 100644 results/classifier/105/graphic/1924669 create mode 100644 results/classifier/105/graphic/1924738 create mode 100644 results/classifier/105/graphic/1924987 create mode 100644 results/classifier/105/graphic/1925 create mode 100644 results/classifier/105/graphic/1925109 create mode 100644 results/classifier/105/graphic/1925966 create mode 100644 results/classifier/105/graphic/1926 create mode 100644 results/classifier/105/graphic/1926044 create mode 100644 results/classifier/105/graphic/1926052 create mode 100644 results/classifier/105/graphic/1926246 create mode 100644 results/classifier/105/graphic/1926996 create mode 100644 results/classifier/105/graphic/1930 create mode 100644 results/classifier/105/graphic/1934 create mode 100644 results/classifier/105/graphic/1937 create mode 100644 results/classifier/105/graphic/1938 create mode 100644 results/classifier/105/graphic/1940 create mode 100644 results/classifier/105/graphic/1942 create mode 100644 results/classifier/105/graphic/1943 create mode 100644 results/classifier/105/graphic/1946 create mode 100644 results/classifier/105/graphic/1950 create mode 100644 results/classifier/105/graphic/1952 create mode 100644 results/classifier/105/graphic/1954 create mode 100644 results/classifier/105/graphic/1962 create mode 100644 results/classifier/105/graphic/1963 create mode 100644 results/classifier/105/graphic/1966 create mode 100644 results/classifier/105/graphic/1971 create mode 100644 results/classifier/105/graphic/1983 create mode 100644 results/classifier/105/graphic/1986 create mode 100644 results/classifier/105/graphic/1989 create mode 100644 results/classifier/105/graphic/1990 create mode 100644 results/classifier/105/graphic/1994 create mode 100644 results/classifier/105/graphic/1994002 create mode 100644 results/classifier/105/graphic/1996 create mode 100644 results/classifier/105/graphic/1997 create mode 100644 results/classifier/105/graphic/1998 create mode 100644 results/classifier/105/graphic/2003 create mode 100644 results/classifier/105/graphic/2006 create mode 100644 results/classifier/105/graphic/2010 create mode 100644 results/classifier/105/graphic/2012 create mode 100644 results/classifier/105/graphic/2015 create mode 100644 results/classifier/105/graphic/2016 create mode 100644 results/classifier/105/graphic/2022 create mode 100644 results/classifier/105/graphic/2025 create mode 100644 results/classifier/105/graphic/2030 create mode 100644 results/classifier/105/graphic/2031 create mode 100644 results/classifier/105/graphic/2035 create mode 100644 results/classifier/105/graphic/2036 create mode 100644 results/classifier/105/graphic/2037 create mode 100644 results/classifier/105/graphic/2038 create mode 100644 results/classifier/105/graphic/2042 create mode 100644 results/classifier/105/graphic/2050 create mode 100644 results/classifier/105/graphic/2052 create mode 100644 results/classifier/105/graphic/2056 create mode 100644 results/classifier/105/graphic/2057 create mode 100644 results/classifier/105/graphic/2059 create mode 100644 results/classifier/105/graphic/2061 create mode 100644 results/classifier/105/graphic/2068 create mode 100644 results/classifier/105/graphic/2070 create mode 100644 results/classifier/105/graphic/2073 create mode 100644 results/classifier/105/graphic/2075 create mode 100644 results/classifier/105/graphic/2078 create mode 100644 results/classifier/105/graphic/2082 create mode 100644 results/classifier/105/graphic/2085 create mode 100644 results/classifier/105/graphic/2090 create mode 100644 results/classifier/105/graphic/2092 create mode 100644 results/classifier/105/graphic/2094 create mode 100644 results/classifier/105/graphic/2099 create mode 100644 results/classifier/105/graphic/2101 create mode 100644 results/classifier/105/graphic/2102 create mode 100644 results/classifier/105/graphic/2116 create mode 100644 results/classifier/105/graphic/2117 create mode 100644 results/classifier/105/graphic/2136 create mode 100644 results/classifier/105/graphic/2138 create mode 100644 results/classifier/105/graphic/2139 create mode 100644 results/classifier/105/graphic/214 create mode 100644 results/classifier/105/graphic/2141 create mode 100644 results/classifier/105/graphic/2144 create mode 100644 results/classifier/105/graphic/2145 create mode 100644 results/classifier/105/graphic/2146 create mode 100644 results/classifier/105/graphic/2147 create mode 100644 results/classifier/105/graphic/2150 create mode 100644 results/classifier/105/graphic/2151 create mode 100644 results/classifier/105/graphic/2154 create mode 100644 results/classifier/105/graphic/2155 create mode 100644 results/classifier/105/graphic/2159 create mode 100644 results/classifier/105/graphic/2167 create mode 100644 results/classifier/105/graphic/2168 create mode 100644 results/classifier/105/graphic/2187 create mode 100644 results/classifier/105/graphic/2190 create mode 100644 results/classifier/105/graphic/2199 create mode 100644 results/classifier/105/graphic/2200 create mode 100644 results/classifier/105/graphic/2202 create mode 100644 results/classifier/105/graphic/2206 create mode 100644 results/classifier/105/graphic/2216 create mode 100644 results/classifier/105/graphic/2220 create mode 100644 results/classifier/105/graphic/22219210 create mode 100644 results/classifier/105/graphic/2223 create mode 100644 results/classifier/105/graphic/2225 create mode 100644 results/classifier/105/graphic/2231 create mode 100644 results/classifier/105/graphic/2233 create mode 100644 results/classifier/105/graphic/2234 create mode 100644 results/classifier/105/graphic/2235 create mode 100644 results/classifier/105/graphic/2237 create mode 100644 results/classifier/105/graphic/2238 create mode 100644 results/classifier/105/graphic/2240 create mode 100644 results/classifier/105/graphic/2242 create mode 100644 results/classifier/105/graphic/2244 create mode 100644 results/classifier/105/graphic/2251 create mode 100644 results/classifier/105/graphic/2252 create mode 100644 results/classifier/105/graphic/2256 create mode 100644 results/classifier/105/graphic/2257 create mode 100644 results/classifier/105/graphic/2258 create mode 100644 results/classifier/105/graphic/2259 create mode 100644 results/classifier/105/graphic/2260 create mode 100644 results/classifier/105/graphic/2263 create mode 100644 results/classifier/105/graphic/2264 create mode 100644 results/classifier/105/graphic/2271 create mode 100644 results/classifier/105/graphic/2274 create mode 100644 results/classifier/105/graphic/2276 create mode 100644 results/classifier/105/graphic/2279 create mode 100644 results/classifier/105/graphic/2281 create mode 100644 results/classifier/105/graphic/2283 create mode 100644 results/classifier/105/graphic/2288 create mode 100644 results/classifier/105/graphic/229 create mode 100644 results/classifier/105/graphic/2290 create mode 100644 results/classifier/105/graphic/2293 create mode 100644 results/classifier/105/graphic/2298 create mode 100644 results/classifier/105/graphic/2304 create mode 100644 results/classifier/105/graphic/2315 create mode 100644 results/classifier/105/graphic/2316 create mode 100644 results/classifier/105/graphic/2319 create mode 100644 results/classifier/105/graphic/2323 create mode 100644 results/classifier/105/graphic/2326 create mode 100644 results/classifier/105/graphic/2332 create mode 100644 results/classifier/105/graphic/2333 create mode 100644 results/classifier/105/graphic/2335 create mode 100644 results/classifier/105/graphic/234 create mode 100644 results/classifier/105/graphic/2340 create mode 100644 results/classifier/105/graphic/2344 create mode 100644 results/classifier/105/graphic/2345 create mode 100644 results/classifier/105/graphic/2349 create mode 100644 results/classifier/105/graphic/2361 create mode 100644 results/classifier/105/graphic/2365 create mode 100644 results/classifier/105/graphic/2373 create mode 100644 results/classifier/105/graphic/2375 create mode 100644 results/classifier/105/graphic/2376 create mode 100644 results/classifier/105/graphic/2382 create mode 100644 results/classifier/105/graphic/2384 create mode 100644 results/classifier/105/graphic/2385 create mode 100644 results/classifier/105/graphic/2387 create mode 100644 results/classifier/105/graphic/2389 create mode 100644 results/classifier/105/graphic/2399 create mode 100644 results/classifier/105/graphic/2403 create mode 100644 results/classifier/105/graphic/2405 create mode 100644 results/classifier/105/graphic/2407 create mode 100644 results/classifier/105/graphic/2410 create mode 100644 results/classifier/105/graphic/2411 create mode 100644 results/classifier/105/graphic/241119 create mode 100644 results/classifier/105/graphic/2413 create mode 100644 results/classifier/105/graphic/2418 create mode 100644 results/classifier/105/graphic/2420 create mode 100644 results/classifier/105/graphic/2421 create mode 100644 results/classifier/105/graphic/2424 create mode 100644 results/classifier/105/graphic/2428 create mode 100644 results/classifier/105/graphic/2429 create mode 100644 results/classifier/105/graphic/2433 create mode 100644 results/classifier/105/graphic/2437 create mode 100644 results/classifier/105/graphic/2446 create mode 100644 results/classifier/105/graphic/2450 create mode 100644 results/classifier/105/graphic/2453 create mode 100644 results/classifier/105/graphic/2455 create mode 100644 results/classifier/105/graphic/2470 create mode 100644 results/classifier/105/graphic/2476 create mode 100644 results/classifier/105/graphic/2478 create mode 100644 results/classifier/105/graphic/2482 create mode 100644 results/classifier/105/graphic/2483 create mode 100644 results/classifier/105/graphic/2486 create mode 100644 results/classifier/105/graphic/2487 create mode 100644 results/classifier/105/graphic/2491 create mode 100644 results/classifier/105/graphic/2493 create mode 100644 results/classifier/105/graphic/2495 create mode 100644 results/classifier/105/graphic/2496 create mode 100644 results/classifier/105/graphic/2502 create mode 100644 results/classifier/105/graphic/2504 create mode 100644 results/classifier/105/graphic/2509 create mode 100644 results/classifier/105/graphic/2510 create mode 100644 results/classifier/105/graphic/2511 create mode 100644 results/classifier/105/graphic/2513 create mode 100644 results/classifier/105/graphic/2518 create mode 100644 results/classifier/105/graphic/2520 create mode 100644 results/classifier/105/graphic/2523 create mode 100644 results/classifier/105/graphic/2524 create mode 100644 results/classifier/105/graphic/2529 create mode 100644 results/classifier/105/graphic/2532 create mode 100644 results/classifier/105/graphic/2534 create mode 100644 results/classifier/105/graphic/2538 create mode 100644 results/classifier/105/graphic/2540 create mode 100644 results/classifier/105/graphic/2543 create mode 100644 results/classifier/105/graphic/2550 create mode 100644 results/classifier/105/graphic/2551 create mode 100644 results/classifier/105/graphic/2555 create mode 100644 results/classifier/105/graphic/2556 create mode 100644 results/classifier/105/graphic/2559 create mode 100644 results/classifier/105/graphic/2561 create mode 100644 results/classifier/105/graphic/2565 create mode 100644 results/classifier/105/graphic/2569 create mode 100644 results/classifier/105/graphic/2571 create mode 100644 results/classifier/105/graphic/2576 create mode 100644 results/classifier/105/graphic/2578 create mode 100644 results/classifier/105/graphic/258 create mode 100644 results/classifier/105/graphic/2580 create mode 100644 results/classifier/105/graphic/2581 create mode 100644 results/classifier/105/graphic/2591 create mode 100644 results/classifier/105/graphic/2594 create mode 100644 results/classifier/105/graphic/2595 create mode 100644 results/classifier/105/graphic/2601 create mode 100644 results/classifier/105/graphic/2602 create mode 100644 results/classifier/105/graphic/2604 create mode 100644 results/classifier/105/graphic/2606 create mode 100644 results/classifier/105/graphic/2609 create mode 100644 results/classifier/105/graphic/2616 create mode 100644 results/classifier/105/graphic/2617 create mode 100644 results/classifier/105/graphic/2618 create mode 100644 results/classifier/105/graphic/2621 create mode 100644 results/classifier/105/graphic/2633 create mode 100644 results/classifier/105/graphic/2635 create mode 100644 results/classifier/105/graphic/2637 create mode 100644 results/classifier/105/graphic/2639 create mode 100644 results/classifier/105/graphic/264 create mode 100644 results/classifier/105/graphic/2642 create mode 100644 results/classifier/105/graphic/2643 create mode 100644 results/classifier/105/graphic/2645 create mode 100644 results/classifier/105/graphic/2648 create mode 100644 results/classifier/105/graphic/2657 create mode 100644 results/classifier/105/graphic/2671 create mode 100644 results/classifier/105/graphic/2672 create mode 100644 results/classifier/105/graphic/2673 create mode 100644 results/classifier/105/graphic/2674 create mode 100644 results/classifier/105/graphic/2675 create mode 100644 results/classifier/105/graphic/2676 create mode 100644 results/classifier/105/graphic/2680 create mode 100644 results/classifier/105/graphic/2686 create mode 100644 results/classifier/105/graphic/2687 create mode 100644 results/classifier/105/graphic/2690 create mode 100644 results/classifier/105/graphic/2691 create mode 100644 results/classifier/105/graphic/2706 create mode 100644 results/classifier/105/graphic/2712 create mode 100644 results/classifier/105/graphic/2717 create mode 100644 results/classifier/105/graphic/2720 create mode 100644 results/classifier/105/graphic/2722 create mode 100644 results/classifier/105/graphic/2723 create mode 100644 results/classifier/105/graphic/2728 create mode 100644 results/classifier/105/graphic/2729 create mode 100644 results/classifier/105/graphic/2730 create mode 100644 results/classifier/105/graphic/2731 create mode 100644 results/classifier/105/graphic/2736 create mode 100644 results/classifier/105/graphic/2738 create mode 100644 results/classifier/105/graphic/2748 create mode 100644 results/classifier/105/graphic/2749 create mode 100644 results/classifier/105/graphic/2757 create mode 100644 results/classifier/105/graphic/2768 create mode 100644 results/classifier/105/graphic/2774 create mode 100644 results/classifier/105/graphic/2778 create mode 100644 results/classifier/105/graphic/2783 create mode 100644 results/classifier/105/graphic/2784 create mode 100644 results/classifier/105/graphic/2785 create mode 100644 results/classifier/105/graphic/2787 create mode 100644 results/classifier/105/graphic/2790 create mode 100644 results/classifier/105/graphic/2793 create mode 100644 results/classifier/105/graphic/2798 create mode 100644 results/classifier/105/graphic/2799 create mode 100644 results/classifier/105/graphic/2800 create mode 100644 results/classifier/105/graphic/2806 create mode 100644 results/classifier/105/graphic/2807 create mode 100644 results/classifier/105/graphic/2816 create mode 100644 results/classifier/105/graphic/2818 create mode 100644 results/classifier/105/graphic/2821 create mode 100644 results/classifier/105/graphic/2822 create mode 100644 results/classifier/105/graphic/2826 create mode 100644 results/classifier/105/graphic/2828 create mode 100644 results/classifier/105/graphic/2834 create mode 100644 results/classifier/105/graphic/2835 create mode 100644 results/classifier/105/graphic/2839 create mode 100644 results/classifier/105/graphic/2840 create mode 100644 results/classifier/105/graphic/2843 create mode 100644 results/classifier/105/graphic/2848 create mode 100644 results/classifier/105/graphic/2851 create mode 100644 results/classifier/105/graphic/2860 create mode 100644 results/classifier/105/graphic/2862 create mode 100644 results/classifier/105/graphic/2864 create mode 100644 results/classifier/105/graphic/2874 create mode 100644 results/classifier/105/graphic/2882 create mode 100644 results/classifier/105/graphic/2889 create mode 100644 results/classifier/105/graphic/2895 create mode 100644 results/classifier/105/graphic/2897 create mode 100644 results/classifier/105/graphic/2899 create mode 100644 results/classifier/105/graphic/2906 create mode 100644 results/classifier/105/graphic/2908 create mode 100644 results/classifier/105/graphic/2909 create mode 100644 results/classifier/105/graphic/2914 create mode 100644 results/classifier/105/graphic/2916 create mode 100644 results/classifier/105/graphic/2917 create mode 100644 results/classifier/105/graphic/2920 create mode 100644 results/classifier/105/graphic/2926 create mode 100644 results/classifier/105/graphic/2928 create mode 100644 results/classifier/105/graphic/2931 create mode 100644 results/classifier/105/graphic/2933 create mode 100644 results/classifier/105/graphic/2935 create mode 100644 results/classifier/105/graphic/2938 create mode 100644 results/classifier/105/graphic/2944 create mode 100644 results/classifier/105/graphic/2945 create mode 100644 results/classifier/105/graphic/2947 create mode 100644 results/classifier/105/graphic/2948 create mode 100644 results/classifier/105/graphic/2952 create mode 100644 results/classifier/105/graphic/2954 create mode 100644 results/classifier/105/graphic/2960 create mode 100644 results/classifier/105/graphic/2962 create mode 100644 results/classifier/105/graphic/2965 create mode 100644 results/classifier/105/graphic/2966 create mode 100644 results/classifier/105/graphic/2967 create mode 100644 results/classifier/105/graphic/2973 create mode 100644 results/classifier/105/graphic/2978 create mode 100644 results/classifier/105/graphic/2981 create mode 100644 results/classifier/105/graphic/2987 create mode 100644 results/classifier/105/graphic/2988 create mode 100644 results/classifier/105/graphic/30680944 create mode 100644 results/classifier/105/graphic/322602 create mode 100644 results/classifier/105/graphic/352 create mode 100644 results/classifier/105/graphic/370 create mode 100644 results/classifier/105/graphic/391879 create mode 100644 results/classifier/105/graphic/439 create mode 100644 results/classifier/105/graphic/441672 create mode 100644 results/classifier/105/graphic/455 create mode 100644 results/classifier/105/graphic/456 create mode 100644 results/classifier/105/graphic/46572227 create mode 100644 results/classifier/105/graphic/466 create mode 100644 results/classifier/105/graphic/471 create mode 100644 results/classifier/105/graphic/474 create mode 100644 results/classifier/105/graphic/474968 create mode 100644 results/classifier/105/graphic/485239 create mode 100644 results/classifier/105/graphic/488 create mode 100644 results/classifier/105/graphic/489 create mode 100644 results/classifier/105/graphic/492 create mode 100644 results/classifier/105/graphic/496 create mode 100644 results/classifier/105/graphic/497 create mode 100644 results/classifier/105/graphic/498421 create mode 100644 results/classifier/105/graphic/504 create mode 100644 results/classifier/105/graphic/505 create mode 100644 results/classifier/105/graphic/515 create mode 100644 results/classifier/105/graphic/519 create mode 100644 results/classifier/105/graphic/525 create mode 100644 results/classifier/105/graphic/526 create mode 100644 results/classifier/105/graphic/530 create mode 100644 results/classifier/105/graphic/53568181 create mode 100644 results/classifier/105/graphic/538808 create mode 100644 results/classifier/105/graphic/546 create mode 100644 results/classifier/105/graphic/548 create mode 100644 results/classifier/105/graphic/553 create mode 100644 results/classifier/105/graphic/55961334 create mode 100644 results/classifier/105/graphic/564 create mode 100644 results/classifier/105/graphic/567376 create mode 100644 results/classifier/105/graphic/567380 create mode 100644 results/classifier/105/graphic/577 create mode 100644 results/classifier/105/graphic/578 create mode 100644 results/classifier/105/graphic/579 create mode 100644 results/classifier/105/graphic/58 create mode 100644 results/classifier/105/graphic/581353 create mode 100644 results/classifier/105/graphic/587993 create mode 100644 results/classifier/105/graphic/588688 create mode 100644 results/classifier/105/graphic/588803 create mode 100644 results/classifier/105/graphic/595 create mode 100644 results/classifier/105/graphic/597351 create mode 100644 results/classifier/105/graphic/599 create mode 100644 results/classifier/105/graphic/599958 create mode 100644 results/classifier/105/graphic/601 create mode 100644 results/classifier/105/graphic/603872 create mode 100644 results/classifier/105/graphic/603878 create mode 100644 results/classifier/105/graphic/610 create mode 100644 results/classifier/105/graphic/612 create mode 100644 results/classifier/105/graphic/612677 create mode 100644 results/classifier/105/graphic/614958 create mode 100644 results/classifier/105/graphic/616 create mode 100644 results/classifier/105/graphic/618 create mode 100644 results/classifier/105/graphic/618533 create mode 100644 results/classifier/105/graphic/622367 create mode 100644 results/classifier/105/graphic/627 create mode 100644 results/classifier/105/graphic/629 create mode 100644 results/classifier/105/graphic/631 create mode 100644 results/classifier/105/graphic/636 create mode 100644 results/classifier/105/graphic/639 create mode 100644 results/classifier/105/graphic/640 create mode 100644 results/classifier/105/graphic/640213 create mode 100644 results/classifier/105/graphic/643430 create mode 100644 results/classifier/105/graphic/648128 create mode 100644 results/classifier/105/graphic/654 create mode 100644 results/classifier/105/graphic/656 create mode 100644 results/classifier/105/graphic/660 create mode 100644 results/classifier/105/graphic/661 create mode 100644 results/classifier/105/graphic/665743 create mode 100644 results/classifier/105/graphic/668 create mode 100644 results/classifier/105/graphic/671 create mode 100644 results/classifier/105/graphic/673 create mode 100644 results/classifier/105/graphic/674 create mode 100644 results/classifier/105/graphic/676 create mode 100644 results/classifier/105/graphic/678363 create mode 100644 results/classifier/105/graphic/680 create mode 100644 results/classifier/105/graphic/680758 create mode 100644 results/classifier/105/graphic/681613 create mode 100644 results/classifier/105/graphic/685096 create mode 100644 results/classifier/105/graphic/686 create mode 100644 results/classifier/105/graphic/686613 create mode 100644 results/classifier/105/graphic/690776 create mode 100644 results/classifier/105/graphic/691 create mode 100644 results/classifier/105/graphic/693 create mode 100644 results/classifier/105/graphic/694 create mode 100644 results/classifier/105/graphic/696 create mode 100644 results/classifier/105/graphic/696530 create mode 100644 results/classifier/105/graphic/696834 create mode 100644 results/classifier/105/graphic/705931 create mode 100644 results/classifier/105/graphic/707 create mode 100644 results/classifier/105/graphic/709584 create mode 100644 results/classifier/105/graphic/711 create mode 100644 results/classifier/105/graphic/712 create mode 100644 results/classifier/105/graphic/716 create mode 100644 results/classifier/105/graphic/717 create mode 100644 results/classifier/105/graphic/719 create mode 100644 results/classifier/105/graphic/721793 create mode 100644 results/classifier/105/graphic/722311 create mode 100644 results/classifier/105/graphic/724 create mode 100644 results/classifier/105/graphic/725 create mode 100644 results/classifier/105/graphic/726619 create mode 100644 results/classifier/105/graphic/731 create mode 100644 results/classifier/105/graphic/733 create mode 100644 results/classifier/105/graphic/734 create mode 100644 results/classifier/105/graphic/73660729 create mode 100644 results/classifier/105/graphic/739 create mode 100644 results/classifier/105/graphic/740 create mode 100644 results/classifier/105/graphic/740895 create mode 100644 results/classifier/105/graphic/741115 create mode 100644 results/classifier/105/graphic/752476 create mode 100644 results/classifier/105/graphic/755 create mode 100644 results/classifier/105/graphic/760956 create mode 100644 results/classifier/105/graphic/761 create mode 100644 results/classifier/105/graphic/764 create mode 100644 results/classifier/105/graphic/765 create mode 100644 results/classifier/105/graphic/768 create mode 100644 results/classifier/105/graphic/769 create mode 100644 results/classifier/105/graphic/771 create mode 100644 results/classifier/105/graphic/775604 create mode 100644 results/classifier/105/graphic/778 create mode 100644 results/classifier/105/graphic/784977 create mode 100644 results/classifier/105/graphic/786211 create mode 100644 results/classifier/105/graphic/788701 create mode 100644 results/classifier/105/graphic/788881 create mode 100644 results/classifier/105/graphic/788886 create mode 100644 results/classifier/105/graphic/789 create mode 100644 results/classifier/105/graphic/794 create mode 100644 results/classifier/105/graphic/798 create mode 100644 results/classifier/105/graphic/803 create mode 100644 results/classifier/105/graphic/807893 create mode 100644 results/classifier/105/graphic/808588 create mode 100644 results/classifier/105/graphic/809912 create mode 100644 results/classifier/105/graphic/813546 create mode 100644 results/classifier/105/graphic/820 create mode 100644 results/classifier/105/graphic/822 create mode 100644 results/classifier/105/graphic/823 create mode 100644 results/classifier/105/graphic/829 create mode 100644 results/classifier/105/graphic/834 create mode 100644 results/classifier/105/graphic/835 create mode 100644 results/classifier/105/graphic/839 create mode 100644 results/classifier/105/graphic/841 create mode 100644 results/classifier/105/graphic/842 create mode 100644 results/classifier/105/graphic/848 create mode 100644 results/classifier/105/graphic/849 create mode 100644 results/classifier/105/graphic/855 create mode 100644 results/classifier/105/graphic/857 create mode 100644 results/classifier/105/graphic/860 create mode 100644 results/classifier/105/graphic/863 create mode 100644 results/classifier/105/graphic/865 create mode 100644 results/classifier/105/graphic/865518 create mode 100644 results/classifier/105/graphic/866 create mode 100644 results/classifier/105/graphic/867 create mode 100644 results/classifier/105/graphic/868 create mode 100644 results/classifier/105/graphic/869 create mode 100644 results/classifier/105/graphic/871 create mode 100644 results/classifier/105/graphic/877 create mode 100644 results/classifier/105/graphic/878019 create mode 100644 results/classifier/105/graphic/883 create mode 100644 results/classifier/105/graphic/888 create mode 100644 results/classifier/105/graphic/892 create mode 100644 results/classifier/105/graphic/893068 create mode 100644 results/classifier/105/graphic/893956 create mode 100644 results/classifier/105/graphic/894 create mode 100644 results/classifier/105/graphic/902413 create mode 100644 results/classifier/105/graphic/904308 create mode 100644 results/classifier/105/graphic/905 create mode 100644 results/classifier/105/graphic/906221 create mode 100644 results/classifier/105/graphic/909 create mode 100644 results/classifier/105/graphic/911 create mode 100644 results/classifier/105/graphic/919 create mode 100644 results/classifier/105/graphic/920 create mode 100644 results/classifier/105/graphic/921 create mode 100644 results/classifier/105/graphic/922 create mode 100644 results/classifier/105/graphic/922076 create mode 100644 results/classifier/105/graphic/924943 create mode 100644 results/classifier/105/graphic/935945 create mode 100644 results/classifier/105/graphic/936 create mode 100644 results/classifier/105/graphic/938552 create mode 100644 results/classifier/105/graphic/938945 create mode 100644 results/classifier/105/graphic/939 create mode 100644 results/classifier/105/graphic/945 create mode 100644 results/classifier/105/graphic/946 create mode 100644 results/classifier/105/graphic/946043 create mode 100644 results/classifier/105/graphic/952 create mode 100644 results/classifier/105/graphic/956 create mode 100644 results/classifier/105/graphic/958 create mode 100644 results/classifier/105/graphic/959 create mode 100644 results/classifier/105/graphic/962 create mode 100644 results/classifier/105/graphic/966 create mode 100644 results/classifier/105/graphic/967 create mode 100644 results/classifier/105/graphic/970 create mode 100644 results/classifier/105/graphic/979 create mode 100644 results/classifier/105/graphic/980 create mode 100644 results/classifier/105/graphic/983 create mode 100644 results/classifier/105/graphic/988 create mode 100644 results/classifier/105/graphic/992 create mode 100644 results/classifier/105/graphic/995 create mode 100644 results/classifier/105/graphic/997631 create mode 100644 results/classifier/105/graphic/998 create mode 100644 results/classifier/105/instruction/1006655 create mode 100644 results/classifier/105/instruction/1006702 create mode 100644 results/classifier/105/instruction/1007 create mode 100644 results/classifier/105/instruction/1011 create mode 100644 results/classifier/105/instruction/1027 create mode 100644 results/classifier/105/instruction/1030104 create mode 100644 results/classifier/105/instruction/1037 create mode 100644 results/classifier/105/instruction/105 create mode 100644 results/classifier/105/instruction/1052857 create mode 100644 results/classifier/105/instruction/1053 create mode 100644 results/classifier/105/instruction/1057 create mode 100644 results/classifier/105/instruction/1060 create mode 100644 results/classifier/105/instruction/1061778 create mode 100644 results/classifier/105/instruction/1062 create mode 100644 results/classifier/105/instruction/1065 create mode 100644 results/classifier/105/instruction/1076445 create mode 100644 results/classifier/105/instruction/1078892 create mode 100644 results/classifier/105/instruction/1079080 create mode 100644 results/classifier/105/instruction/1085 create mode 100644 results/classifier/105/instruction/109 create mode 100644 results/classifier/105/instruction/1090837 create mode 100644 results/classifier/105/instruction/1092 create mode 100644 results/classifier/105/instruction/1095857 create mode 100644 results/classifier/105/instruction/1103868 create mode 100644 results/classifier/105/instruction/1103903 create mode 100644 results/classifier/105/instruction/111 create mode 100644 results/classifier/105/instruction/1111 create mode 100644 results/classifier/105/instruction/1116 create mode 100644 results/classifier/105/instruction/1119686 create mode 100644 results/classifier/105/instruction/1142 create mode 100644 results/classifier/105/instruction/1156 create mode 100644 results/classifier/105/instruction/1157 create mode 100644 results/classifier/105/instruction/1163034 create mode 100644 results/classifier/105/instruction/1163065 create mode 100644 results/classifier/105/instruction/11933524 create mode 100644 results/classifier/105/instruction/1195 create mode 100644 results/classifier/105/instruction/1195012 create mode 100644 results/classifier/105/instruction/1196498 create mode 100644 results/classifier/105/instruction/1199 create mode 100644 results/classifier/105/instruction/1204 create mode 100644 results/classifier/105/instruction/1211943 create mode 100644 results/classifier/105/instruction/1216845 create mode 100644 results/classifier/105/instruction/1223477 create mode 100644 results/classifier/105/instruction/1224 create mode 100644 results/classifier/105/instruction/1239008 create mode 100644 results/classifier/105/instruction/1240669 create mode 100644 results/classifier/105/instruction/1244 create mode 100644 results/classifier/105/instruction/1245543 create mode 100644 results/classifier/105/instruction/1248168 create mode 100644 results/classifier/105/instruction/1251 create mode 100644 results/classifier/105/instruction/1253465 create mode 100644 results/classifier/105/instruction/1256826 create mode 100644 results/classifier/105/instruction/1260555 create mode 100644 results/classifier/105/instruction/1266 create mode 100644 results/classifier/105/instruction/1268671 create mode 100644 results/classifier/105/instruction/1269 create mode 100644 results/classifier/105/instruction/1272796 create mode 100644 results/classifier/105/instruction/1277 create mode 100644 results/classifier/105/instruction/1278166 create mode 100644 results/classifier/105/instruction/1283519 create mode 100644 results/classifier/105/instruction/129 create mode 100644 results/classifier/105/instruction/1306818 create mode 100644 results/classifier/105/instruction/1308381 create mode 100644 results/classifier/105/instruction/1309034 create mode 100644 results/classifier/105/instruction/1314 create mode 100644 results/classifier/105/instruction/1328996 create mode 100644 results/classifier/105/instruction/1338563 create mode 100644 results/classifier/105/instruction/1354529 create mode 100644 results/classifier/105/instruction/1354727 create mode 100644 results/classifier/105/instruction/1355 create mode 100644 results/classifier/105/instruction/1355738 create mode 100644 results/classifier/105/instruction/1356916 create mode 100644 results/classifier/105/instruction/1357226 create mode 100644 results/classifier/105/instruction/1358 create mode 100644 results/classifier/105/instruction/1361912 create mode 100644 results/classifier/105/instruction/1376 create mode 100644 results/classifier/105/instruction/1377 create mode 100644 results/classifier/105/instruction/1381642 create mode 100644 results/classifier/105/instruction/1394 create mode 100644 results/classifier/105/instruction/1409 create mode 100644 results/classifier/105/instruction/141 create mode 100644 results/classifier/105/instruction/1412 create mode 100644 results/classifier/105/instruction/1414 create mode 100644 results/classifier/105/instruction/1416 create mode 100644 results/classifier/105/instruction/1416246 create mode 100644 results/classifier/105/instruction/1423 create mode 100644 results/classifier/105/instruction/1426092 create mode 100644 results/classifier/105/instruction/1432 create mode 100644 results/classifier/105/instruction/1432103 create mode 100644 results/classifier/105/instruction/1437367 create mode 100644 results/classifier/105/instruction/1438572 create mode 100644 results/classifier/105/instruction/1441 create mode 100644 results/classifier/105/instruction/1442 create mode 100644 results/classifier/105/instruction/1452 create mode 100644 results/classifier/105/instruction/1458 create mode 100644 results/classifier/105/instruction/1460 create mode 100644 results/classifier/105/instruction/1460523 create mode 100644 results/classifier/105/instruction/1463338 create mode 100644 results/classifier/105/instruction/1469342 create mode 100644 results/classifier/105/instruction/1470 create mode 100644 results/classifier/105/instruction/1473 create mode 100644 results/classifier/105/instruction/1479717 create mode 100644 results/classifier/105/instruction/1481272 create mode 100644 results/classifier/105/instruction/1490886 create mode 100644 results/classifier/105/instruction/1498 create mode 100644 results/classifier/105/instruction/1500 create mode 100644 results/classifier/105/instruction/1503031 create mode 100644 results/classifier/105/instruction/1504 create mode 100644 results/classifier/105/instruction/1511 create mode 100644 results/classifier/105/instruction/1511710 create mode 100644 results/classifier/105/instruction/1524637 create mode 100644 results/classifier/105/instruction/1529764 create mode 100644 results/classifier/105/instruction/1541 create mode 100644 results/classifier/105/instruction/1541643 create mode 100644 results/classifier/105/instruction/1542 create mode 100644 results/classifier/105/instruction/1544 create mode 100644 results/classifier/105/instruction/1547526 create mode 100644 results/classifier/105/instruction/1549298 create mode 100644 results/classifier/105/instruction/1551 create mode 100644 results/classifier/105/instruction/1552549 create mode 100644 results/classifier/105/instruction/1560 create mode 100644 results/classifier/105/instruction/1565395 create mode 100644 results/classifier/105/instruction/1567254 create mode 100644 results/classifier/105/instruction/1574346 create mode 100644 results/classifier/105/instruction/1582 create mode 100644 results/classifier/105/instruction/1586611 create mode 100644 results/classifier/105/instruction/1587 create mode 100644 results/classifier/105/instruction/1589923 create mode 100644 results/classifier/105/instruction/1590336 create mode 100644 results/classifier/105/instruction/1594069 create mode 100644 results/classifier/105/instruction/1598029 create mode 100644 results/classifier/105/instruction/1605123 create mode 100644 results/classifier/105/instruction/1611 create mode 100644 results/classifier/105/instruction/1611394 create mode 100644 results/classifier/105/instruction/1614 create mode 100644 results/classifier/105/instruction/1615823 create mode 100644 results/classifier/105/instruction/1619 create mode 100644 results/classifier/105/instruction/1637 create mode 100644 results/classifier/105/instruction/1641 create mode 100644 results/classifier/105/instruction/1642 create mode 100644 results/classifier/105/instruction/1643537 create mode 100644 results/classifier/105/instruction/1645355 create mode 100644 results/classifier/105/instruction/1648 create mode 100644 results/classifier/105/instruction/1650 create mode 100644 results/classifier/105/instruction/1656676 create mode 100644 results/classifier/105/instruction/1672383 create mode 100644 results/classifier/105/instruction/1680679 create mode 100644 results/classifier/105/instruction/1681398 create mode 100644 results/classifier/105/instruction/1683 create mode 100644 results/classifier/105/instruction/1689367 create mode 100644 results/classifier/105/instruction/1694998 create mode 100644 results/classifier/105/instruction/1699567 create mode 100644 results/classifier/105/instruction/1705 create mode 100644 results/classifier/105/instruction/1709 create mode 100644 results/classifier/105/instruction/1715007 create mode 100644 results/classifier/105/instruction/1716028 create mode 100644 results/classifier/105/instruction/1716510 create mode 100644 results/classifier/105/instruction/1718118 create mode 100644 results/classifier/105/instruction/1719984 create mode 100644 results/classifier/105/instruction/1721275 create mode 100644 results/classifier/105/instruction/1724485 create mode 100644 results/classifier/105/instruction/1728448 create mode 100644 results/classifier/105/instruction/1728639 create mode 100644 results/classifier/105/instruction/1729 create mode 100644 results/classifier/105/instruction/1734 create mode 100644 results/classifier/105/instruction/1735082 create mode 100644 results/classifier/105/instruction/1736042 create mode 100644 results/classifier/105/instruction/1736655 create mode 100644 results/classifier/105/instruction/1737 create mode 100644 results/classifier/105/instruction/1738283 create mode 100644 results/classifier/105/instruction/1738434 create mode 100644 results/classifier/105/instruction/1741 create mode 100644 results/classifier/105/instruction/1744 create mode 100644 results/classifier/105/instruction/1751422 create mode 100644 results/classifier/105/instruction/1751494 create mode 100644 results/classifier/105/instruction/1755912 create mode 100644 results/classifier/105/instruction/1756927 create mode 100644 results/classifier/105/instruction/1759333 create mode 100644 results/classifier/105/instruction/1762707 create mode 100644 results/classifier/105/instruction/1767176 create mode 100644 results/classifier/105/instruction/1768 create mode 100644 results/classifier/105/instruction/1771 create mode 100644 results/classifier/105/instruction/1771570 create mode 100644 results/classifier/105/instruction/1771948 create mode 100644 results/classifier/105/instruction/1775011 create mode 100644 results/classifier/105/instruction/1776486 create mode 100644 results/classifier/105/instruction/1777786 create mode 100644 results/classifier/105/instruction/1778473 create mode 100644 results/classifier/105/instruction/1780 create mode 100644 results/classifier/105/instruction/1781281 create mode 100644 results/classifier/105/instruction/1782107 create mode 100644 results/classifier/105/instruction/1786 create mode 100644 results/classifier/105/instruction/1788 create mode 100644 results/classifier/105/instruction/1790 create mode 100644 results/classifier/105/instruction/1792 create mode 100644 results/classifier/105/instruction/1792659 create mode 100644 results/classifier/105/instruction/1793275 create mode 100644 results/classifier/105/instruction/1793608 create mode 100644 results/classifier/105/instruction/1793904 create mode 100644 results/classifier/105/instruction/1794086 create mode 100644 results/classifier/105/instruction/1797033 create mode 100644 results/classifier/105/instruction/1799 create mode 100644 results/classifier/105/instruction/1801674 create mode 100644 results/classifier/105/instruction/1803872 create mode 100644 results/classifier/105/instruction/1807675 create mode 100644 results/classifier/105/instruction/1809144 create mode 100644 results/classifier/105/instruction/1809684 create mode 100644 results/classifier/105/instruction/1810545 create mode 100644 results/classifier/105/instruction/1812861 create mode 100644 results/classifier/105/instruction/1813460 create mode 100644 results/classifier/105/instruction/1814343 create mode 100644 results/classifier/105/instruction/1815024 create mode 100644 results/classifier/105/instruction/1815423 create mode 100644 results/classifier/105/instruction/1815721 create mode 100644 results/classifier/105/instruction/1816614 create mode 100644 results/classifier/105/instruction/1817268 create mode 100644 results/classifier/105/instruction/1818075 create mode 100644 results/classifier/105/instruction/1820686 create mode 100644 results/classifier/105/instruction/1824344 create mode 100644 results/classifier/105/instruction/1824778 create mode 100644 results/classifier/105/instruction/1825 create mode 100644 results/classifier/105/instruction/1825002 create mode 100644 results/classifier/105/instruction/1825311 create mode 100644 results/classifier/105/instruction/1825359 create mode 100644 results/classifier/105/instruction/1826568 create mode 100644 results/classifier/105/instruction/1828507 create mode 100644 results/classifier/105/instruction/1828867 create mode 100644 results/classifier/105/instruction/1830031 create mode 100644 results/classifier/105/instruction/1830864 create mode 100644 results/classifier/105/instruction/1831 create mode 100644 results/classifier/105/instruction/1831545 create mode 100644 results/classifier/105/instruction/1832353 create mode 100644 results/classifier/105/instruction/1832422 create mode 100644 results/classifier/105/instruction/1834496 create mode 100644 results/classifier/105/instruction/1838475 create mode 100644 results/classifier/105/instruction/1838913 create mode 100644 results/classifier/105/instruction/1839807 create mode 100644 results/classifier/105/instruction/1840 create mode 100644 results/classifier/105/instruction/1840249 create mode 100644 results/classifier/105/instruction/1840777 create mode 100644 results/classifier/105/instruction/1842 create mode 100644 results/classifier/105/instruction/1842916 create mode 100644 results/classifier/105/instruction/1843254 create mode 100644 results/classifier/105/instruction/1845185 create mode 100644 results/classifier/105/instruction/1847232 create mode 100644 results/classifier/105/instruction/1847467 create mode 100644 results/classifier/105/instruction/1849879 create mode 100644 results/classifier/105/instruction/1850 create mode 100644 results/classifier/105/instruction/1850378 create mode 100644 results/classifier/105/instruction/1851939 create mode 100644 results/classifier/105/instruction/1857143 create mode 100644 results/classifier/105/instruction/1859989 create mode 100644 results/classifier/105/instruction/1860056 create mode 100644 results/classifier/105/instruction/1860920 create mode 100644 results/classifier/105/instruction/1861562 create mode 100644 results/classifier/105/instruction/1862887 create mode 100644 results/classifier/105/instruction/1863247 create mode 100644 results/classifier/105/instruction/1863685 create mode 100644 results/classifier/105/instruction/1865348 create mode 100644 results/classifier/105/instruction/1865626 create mode 100644 results/classifier/105/instruction/1873898 create mode 100644 results/classifier/105/instruction/1877706 create mode 100644 results/classifier/105/instruction/1877794 create mode 100644 results/classifier/105/instruction/1880424 create mode 100644 results/classifier/105/instruction/1881450 create mode 100644 results/classifier/105/instruction/1882065 create mode 100644 results/classifier/105/instruction/1882497 create mode 100644 results/classifier/105/instruction/1885350 create mode 100644 results/classifier/105/instruction/1885719 create mode 100644 results/classifier/105/instruction/1885720 create mode 100644 results/classifier/105/instruction/1886811 create mode 100644 results/classifier/105/instruction/1887641 create mode 100644 results/classifier/105/instruction/1888165 create mode 100644 results/classifier/105/instruction/1889288 create mode 100644 results/classifier/105/instruction/1889421 create mode 100644 results/classifier/105/instruction/1890 create mode 100644 results/classifier/105/instruction/1890310 create mode 100644 results/classifier/105/instruction/1892081 create mode 100644 results/classifier/105/instruction/1892441 create mode 100644 results/classifier/105/instruction/1892533 create mode 100644 results/classifier/105/instruction/1893010 create mode 100644 results/classifier/105/instruction/1894029 create mode 100644 results/classifier/105/instruction/1895305 create mode 100644 results/classifier/105/instruction/1897194 create mode 100644 results/classifier/105/instruction/1898954 create mode 100644 results/classifier/105/instruction/1899728 create mode 100644 results/classifier/105/instruction/1901 create mode 100644 results/classifier/105/instruction/1901981 create mode 100644 results/classifier/105/instruction/1902267 create mode 100644 results/classifier/105/instruction/1903712 create mode 100644 results/classifier/105/instruction/1903833 create mode 100644 results/classifier/105/instruction/1905356 create mode 100644 results/classifier/105/instruction/1906295 create mode 100644 results/classifier/105/instruction/1909392 create mode 100644 results/classifier/105/instruction/1909823 create mode 100644 results/classifier/105/instruction/1910605 create mode 100644 results/classifier/105/instruction/1912107 create mode 100644 results/classifier/105/instruction/1912934 create mode 100644 results/classifier/105/instruction/1913667 create mode 100644 results/classifier/105/instruction/1913913 create mode 100644 results/classifier/105/instruction/1913926 create mode 100644 results/classifier/105/instruction/1915027 create mode 100644 results/classifier/105/instruction/1916269 create mode 100644 results/classifier/105/instruction/1917 create mode 100644 results/classifier/105/instruction/1917661 create mode 100644 results/classifier/105/instruction/1921138 create mode 100644 results/classifier/105/instruction/1922617 create mode 100644 results/classifier/105/instruction/1922887 create mode 100644 results/classifier/105/instruction/1923629 create mode 100644 results/classifier/105/instruction/1923663 create mode 100644 results/classifier/105/instruction/1926174 create mode 100644 results/classifier/105/instruction/1926249 create mode 100644 results/classifier/105/instruction/1926277 create mode 100644 results/classifier/105/instruction/1926759 create mode 100644 results/classifier/105/instruction/1955 create mode 100644 results/classifier/105/instruction/1958 create mode 100644 results/classifier/105/instruction/1981 create mode 100644 results/classifier/105/instruction/1991 create mode 100644 results/classifier/105/instruction/2004 create mode 100644 results/classifier/105/instruction/2008 create mode 100644 results/classifier/105/instruction/203 create mode 100644 results/classifier/105/instruction/2039 create mode 100644 results/classifier/105/instruction/2043 create mode 100644 results/classifier/105/instruction/2054889 create mode 100644 results/classifier/105/instruction/2074 create mode 100644 results/classifier/105/instruction/2088 create mode 100644 results/classifier/105/instruction/2089 create mode 100644 results/classifier/105/instruction/2091 create mode 100644 results/classifier/105/instruction/2104 create mode 100644 results/classifier/105/instruction/2111 create mode 100644 results/classifier/105/instruction/2114 create mode 100644 results/classifier/105/instruction/2118 create mode 100644 results/classifier/105/instruction/2123 create mode 100644 results/classifier/105/instruction/2127 create mode 100644 results/classifier/105/instruction/2131 create mode 100644 results/classifier/105/instruction/2140 create mode 100644 results/classifier/105/instruction/2142 create mode 100644 results/classifier/105/instruction/2149 create mode 100644 results/classifier/105/instruction/2156 create mode 100644 results/classifier/105/instruction/2157 create mode 100644 results/classifier/105/instruction/216 create mode 100644 results/classifier/105/instruction/2160 create mode 100644 results/classifier/105/instruction/2163 create mode 100644 results/classifier/105/instruction/2175 create mode 100644 results/classifier/105/instruction/2177 create mode 100644 results/classifier/105/instruction/2198 create mode 100644 results/classifier/105/instruction/2207 create mode 100644 results/classifier/105/instruction/2226 create mode 100644 results/classifier/105/instruction/2248 create mode 100644 results/classifier/105/instruction/2250 create mode 100644 results/classifier/105/instruction/2287 create mode 100644 results/classifier/105/instruction/2300 create mode 100644 results/classifier/105/instruction/2302 create mode 100644 results/classifier/105/instruction/2317 create mode 100644 results/classifier/105/instruction/2318 create mode 100644 results/classifier/105/instruction/232 create mode 100644 results/classifier/105/instruction/2328 create mode 100644 results/classifier/105/instruction/2342 create mode 100644 results/classifier/105/instruction/2377 create mode 100644 results/classifier/105/instruction/2386 create mode 100644 results/classifier/105/instruction/2388 create mode 100644 results/classifier/105/instruction/2390 create mode 100644 results/classifier/105/instruction/2397 create mode 100644 results/classifier/105/instruction/2402 create mode 100644 results/classifier/105/instruction/2419 create mode 100644 results/classifier/105/instruction/2423 create mode 100644 results/classifier/105/instruction/2452 create mode 100644 results/classifier/105/instruction/246 create mode 100644 results/classifier/105/instruction/2466 create mode 100644 results/classifier/105/instruction/2497 create mode 100644 results/classifier/105/instruction/2499 create mode 100644 results/classifier/105/instruction/2500 create mode 100644 results/classifier/105/instruction/2501 create mode 100644 results/classifier/105/instruction/2506 create mode 100644 results/classifier/105/instruction/2522 create mode 100644 results/classifier/105/instruction/2525 create mode 100644 results/classifier/105/instruction/2531 create mode 100644 results/classifier/105/instruction/2537 create mode 100644 results/classifier/105/instruction/2554 create mode 100644 results/classifier/105/instruction/2563 create mode 100644 results/classifier/105/instruction/2577 create mode 100644 results/classifier/105/instruction/2583 create mode 100644 results/classifier/105/instruction/2592 create mode 100644 results/classifier/105/instruction/2598 create mode 100644 results/classifier/105/instruction/2619 create mode 100644 results/classifier/105/instruction/2628 create mode 100644 results/classifier/105/instruction/263 create mode 100644 results/classifier/105/instruction/2641 create mode 100644 results/classifier/105/instruction/2647 create mode 100644 results/classifier/105/instruction/2655 create mode 100644 results/classifier/105/instruction/2662 create mode 100644 results/classifier/105/instruction/2669 create mode 100644 results/classifier/105/instruction/268 create mode 100644 results/classifier/105/instruction/2683 create mode 100644 results/classifier/105/instruction/2696 create mode 100644 results/classifier/105/instruction/273 create mode 100644 results/classifier/105/instruction/2747 create mode 100644 results/classifier/105/instruction/275 create mode 100644 results/classifier/105/instruction/2750 create mode 100644 results/classifier/105/instruction/2755 create mode 100644 results/classifier/105/instruction/276 create mode 100644 results/classifier/105/instruction/2761 create mode 100644 results/classifier/105/instruction/2764 create mode 100644 results/classifier/105/instruction/2771 create mode 100644 results/classifier/105/instruction/2802 create mode 100644 results/classifier/105/instruction/2809 create mode 100644 results/classifier/105/instruction/2817 create mode 100644 results/classifier/105/instruction/2820 create mode 100644 results/classifier/105/instruction/2833 create mode 100644 results/classifier/105/instruction/2854 create mode 100644 results/classifier/105/instruction/2855 create mode 100644 results/classifier/105/instruction/2861 create mode 100644 results/classifier/105/instruction/2865 create mode 100644 results/classifier/105/instruction/2883 create mode 100644 results/classifier/105/instruction/2891 create mode 100644 results/classifier/105/instruction/2900 create mode 100644 results/classifier/105/instruction/2903 create mode 100644 results/classifier/105/instruction/2907 create mode 100644 results/classifier/105/instruction/2946 create mode 100644 results/classifier/105/instruction/2958 create mode 100644 results/classifier/105/instruction/2971 create mode 100644 results/classifier/105/instruction/2980 create mode 100644 results/classifier/105/instruction/300 create mode 100644 results/classifier/105/instruction/301 create mode 100644 results/classifier/105/instruction/306 create mode 100644 results/classifier/105/instruction/312 create mode 100644 results/classifier/105/instruction/333 create mode 100644 results/classifier/105/instruction/355 create mode 100644 results/classifier/105/instruction/361 create mode 100644 results/classifier/105/instruction/366 create mode 100644 results/classifier/105/instruction/381 create mode 100644 results/classifier/105/instruction/390 create mode 100644 results/classifier/105/instruction/417 create mode 100644 results/classifier/105/instruction/424450 create mode 100644 results/classifier/105/instruction/440 create mode 100644 results/classifier/105/instruction/442 create mode 100644 results/classifier/105/instruction/447 create mode 100644 results/classifier/105/instruction/459 create mode 100644 results/classifier/105/instruction/462 create mode 100644 results/classifier/105/instruction/481 create mode 100644 results/classifier/105/instruction/483 create mode 100644 results/classifier/105/instruction/485 create mode 100644 results/classifier/105/instruction/493 create mode 100644 results/classifier/105/instruction/498417 create mode 100644 results/classifier/105/instruction/50773216 create mode 100644 results/classifier/105/instruction/509 create mode 100644 results/classifier/105/instruction/514 create mode 100644 results/classifier/105/instruction/516 create mode 100644 results/classifier/105/instruction/521 create mode 100644 results/classifier/105/instruction/533 create mode 100644 results/classifier/105/instruction/543 create mode 100644 results/classifier/105/instruction/544 create mode 100644 results/classifier/105/instruction/545 create mode 100644 results/classifier/105/instruction/551 create mode 100644 results/classifier/105/instruction/584 create mode 100644 results/classifier/105/instruction/597575 create mode 100644 results/classifier/105/instruction/60 create mode 100644 results/classifier/105/instruction/613 create mode 100644 results/classifier/105/instruction/616769 create mode 100644 results/classifier/105/instruction/619 create mode 100644 results/classifier/105/instruction/62 create mode 100644 results/classifier/105/instruction/625 create mode 100644 results/classifier/105/instruction/632 create mode 100644 results/classifier/105/instruction/63565653 create mode 100644 results/classifier/105/instruction/636095 create mode 100644 results/classifier/105/instruction/643 create mode 100644 results/classifier/105/instruction/646 create mode 100644 results/classifier/105/instruction/652 create mode 100644 results/classifier/105/instruction/657329 create mode 100644 results/classifier/105/instruction/664 create mode 100644 results/classifier/105/instruction/670 create mode 100644 results/classifier/105/instruction/674740 create mode 100644 results/classifier/105/instruction/682326 create mode 100644 results/classifier/105/instruction/682360 create mode 100644 results/classifier/105/instruction/690 create mode 100644 results/classifier/105/instruction/697 create mode 100644 results/classifier/105/instruction/701 create mode 100644 results/classifier/105/instruction/703 create mode 100644 results/classifier/105/instruction/70868267 create mode 100644 results/classifier/105/instruction/729 create mode 100644 results/classifier/105/instruction/744 create mode 100644 results/classifier/105/instruction/750 create mode 100644 results/classifier/105/instruction/754635 create mode 100644 results/classifier/105/instruction/760976 create mode 100644 results/classifier/105/instruction/781 create mode 100644 results/classifier/105/instruction/789652 create mode 100644 results/classifier/105/instruction/796480 create mode 100644 results/classifier/105/instruction/799 create mode 100644 results/classifier/105/instruction/802 create mode 100644 results/classifier/105/instruction/813 create mode 100644 results/classifier/105/instruction/817 create mode 100644 results/classifier/105/instruction/824 create mode 100644 results/classifier/105/instruction/825776 create mode 100644 results/classifier/105/instruction/826 create mode 100644 results/classifier/105/instruction/842290 create mode 100644 results/classifier/105/instruction/885 create mode 100644 results/classifier/105/instruction/891 create mode 100644 results/classifier/105/instruction/891002 create mode 100644 results/classifier/105/instruction/899664 create mode 100644 results/classifier/105/instruction/91 create mode 100644 results/classifier/105/instruction/925 create mode 100644 results/classifier/105/instruction/927 create mode 100644 results/classifier/105/instruction/929 create mode 100644 results/classifier/105/instruction/947 create mode 100644 results/classifier/105/instruction/953 create mode 100644 results/classifier/105/instruction/974958 create mode 100644 results/classifier/105/instruction/982 create mode 100644 results/classifier/105/instruction/984 create mode 100644 results/classifier/105/instruction/984516 create mode 100644 results/classifier/105/mistranslation/1015 create mode 100644 results/classifier/105/mistranslation/1027525 create mode 100644 results/classifier/105/mistranslation/1030 create mode 100644 results/classifier/105/mistranslation/1035042 create mode 100644 results/classifier/105/mistranslation/1036987 create mode 100644 results/classifier/105/mistranslation/1048 create mode 100644 results/classifier/105/mistranslation/1050694 create mode 100644 results/classifier/105/mistranslation/1054831 create mode 100644 results/classifier/105/mistranslation/1066909 create mode 100644 results/classifier/105/mistranslation/1068900 create mode 100644 results/classifier/105/mistranslation/1077806 create mode 100644 results/classifier/105/mistranslation/108 create mode 100644 results/classifier/105/mistranslation/1081 create mode 100644 results/classifier/105/mistranslation/1084 create mode 100644 results/classifier/105/mistranslation/1090604 create mode 100644 results/classifier/105/mistranslation/1095531 create mode 100644 results/classifier/105/mistranslation/1100 create mode 100644 results/classifier/105/mistranslation/1119 create mode 100644 results/classifier/105/mistranslation/1120383 create mode 100644 results/classifier/105/mistranslation/1121 create mode 100644 results/classifier/105/mistranslation/1127 create mode 100644 results/classifier/105/mistranslation/113 create mode 100644 results/classifier/105/mistranslation/1151986 create mode 100644 results/classifier/105/mistranslation/1153 create mode 100644 results/classifier/105/mistranslation/1161 create mode 100644 results/classifier/105/mistranslation/1169 create mode 100644 results/classifier/105/mistranslation/1172 create mode 100644 results/classifier/105/mistranslation/1178 create mode 100644 results/classifier/105/mistranslation/1178107 create mode 100644 results/classifier/105/mistranslation/1179664 create mode 100644 results/classifier/105/mistranslation/1179731 create mode 100644 results/classifier/105/mistranslation/1180777 create mode 100644 results/classifier/105/mistranslation/1182 create mode 100644 results/classifier/105/mistranslation/1192065 create mode 100644 results/classifier/105/mistranslation/1195882 create mode 100644 results/classifier/105/mistranslation/1196145 create mode 100644 results/classifier/105/mistranslation/1216368 create mode 100644 results/classifier/105/mistranslation/1217339 create mode 100644 results/classifier/105/mistranslation/1238 create mode 100644 results/classifier/105/mistranslation/1245724 create mode 100644 results/classifier/105/mistranslation/1248 create mode 100644 results/classifier/105/mistranslation/1249 create mode 100644 results/classifier/105/mistranslation/1257 create mode 100644 results/classifier/105/mistranslation/1276879 create mode 100644 results/classifier/105/mistranslation/128 create mode 100644 results/classifier/105/mistranslation/1284874 create mode 100644 results/classifier/105/mistranslation/1285363 create mode 100644 results/classifier/105/mistranslation/1287195 create mode 100644 results/classifier/105/mistranslation/1298442 create mode 100644 results/classifier/105/mistranslation/130 create mode 100644 results/classifier/105/mistranslation/1300863 create mode 100644 results/classifier/105/mistranslation/1305402 create mode 100644 results/classifier/105/mistranslation/1307 create mode 100644 results/classifier/105/mistranslation/1311614 create mode 100644 results/classifier/105/mistranslation/1318091 create mode 100644 results/classifier/105/mistranslation/1318474 create mode 100644 results/classifier/105/mistranslation/1320360 create mode 100644 results/classifier/105/mistranslation/1322 create mode 100644 results/classifier/105/mistranslation/1324112 create mode 100644 results/classifier/105/mistranslation/1326533 create mode 100644 results/classifier/105/mistranslation/1331 create mode 100644 results/classifier/105/mistranslation/1332 create mode 100644 results/classifier/105/mistranslation/1334 create mode 100644 results/classifier/105/mistranslation/1336 create mode 100644 results/classifier/105/mistranslation/1337 create mode 100644 results/classifier/105/mistranslation/1338 create mode 100644 results/classifier/105/mistranslation/1345 create mode 100644 results/classifier/105/mistranslation/1353947 create mode 100644 results/classifier/105/mistranslation/1358619 create mode 100644 results/classifier/105/mistranslation/136 create mode 100644 results/classifier/105/mistranslation/1367365 create mode 100644 results/classifier/105/mistranslation/1376533 create mode 100644 results/classifier/105/mistranslation/1383 create mode 100644 results/classifier/105/mistranslation/1393440 create mode 100644 results/classifier/105/mistranslation/1397 create mode 100644 results/classifier/105/mistranslation/1402802 create mode 100644 results/classifier/105/mistranslation/1405176 create mode 100644 results/classifier/105/mistranslation/1407813 create mode 100644 results/classifier/105/mistranslation/1414293 create mode 100644 results/classifier/105/mistranslation/1417 create mode 100644 results/classifier/105/mistranslation/1429313 create mode 100644 results/classifier/105/mistranslation/143 create mode 100644 results/classifier/105/mistranslation/1431 create mode 100644 results/classifier/105/mistranslation/1431084 create mode 100644 results/classifier/105/mistranslation/1433 create mode 100644 results/classifier/105/mistranslation/1435101 create mode 100644 results/classifier/105/mistranslation/1437811 create mode 100644 results/classifier/105/mistranslation/1438144 create mode 100644 results/classifier/105/mistranslation/1449 create mode 100644 results/classifier/105/mistranslation/1449687 create mode 100644 results/classifier/105/mistranslation/1453 create mode 100644 results/classifier/105/mistranslation/146 create mode 100644 results/classifier/105/mistranslation/1461 create mode 100644 results/classifier/105/mistranslation/1469924 create mode 100644 results/classifier/105/mistranslation/1470536 create mode 100644 results/classifier/105/mistranslation/1473451 create mode 100644 results/classifier/105/mistranslation/1479 create mode 100644 results/classifier/105/mistranslation/1480 create mode 100644 results/classifier/105/mistranslation/1483070 create mode 100644 results/classifier/105/mistranslation/14887122 create mode 100644 results/classifier/105/mistranslation/1490 create mode 100644 results/classifier/105/mistranslation/1500175 create mode 100644 results/classifier/105/mistranslation/1512134 create mode 100644 results/classifier/105/mistranslation/1513 create mode 100644 results/classifier/105/mistranslation/152 create mode 100644 results/classifier/105/mistranslation/1526 create mode 100644 results/classifier/105/mistranslation/1527322 create mode 100644 results/classifier/105/mistranslation/1528214 create mode 100644 results/classifier/105/mistranslation/1529 create mode 100644 results/classifier/105/mistranslation/1533 create mode 100644 results/classifier/105/mistranslation/1536487 create mode 100644 results/classifier/105/mistranslation/154 create mode 100644 results/classifier/105/mistranslation/1543 create mode 100644 results/classifier/105/mistranslation/1545052 create mode 100644 results/classifier/105/mistranslation/156 create mode 100644 results/classifier/105/mistranslation/1562653 create mode 100644 results/classifier/105/mistranslation/1563152 create mode 100644 results/classifier/105/mistranslation/1563887 create mode 100644 results/classifier/105/mistranslation/1566 create mode 100644 results/classifier/105/mistranslation/1568621 create mode 100644 results/classifier/105/mistranslation/1573 create mode 100644 results/classifier/105/mistranslation/1577841 create mode 100644 results/classifier/105/mistranslation/1579306 create mode 100644 results/classifier/105/mistranslation/1579327 create mode 100644 results/classifier/105/mistranslation/1580586 create mode 100644 results/classifier/105/mistranslation/1584 create mode 100644 results/classifier/105/mistranslation/1586 create mode 100644 results/classifier/105/mistranslation/1586229 create mode 100644 results/classifier/105/mistranslation/159 create mode 100644 results/classifier/105/mistranslation/1590322 create mode 100644 results/classifier/105/mistranslation/1593605 create mode 100644 results/classifier/105/mistranslation/1596870 create mode 100644 results/classifier/105/mistranslation/1600112 create mode 100644 results/classifier/105/mistranslation/1606899 create mode 100644 results/classifier/105/mistranslation/1608 create mode 100644 results/classifier/105/mistranslation/1608802 create mode 100644 results/classifier/105/mistranslation/1610368 create mode 100644 results/classifier/105/mistranslation/1613817 create mode 100644 results/classifier/105/mistranslation/1614521 create mode 100644 results/classifier/105/mistranslation/1614609 create mode 100644 results/classifier/105/mistranslation/1615079 create mode 100644 results/classifier/105/mistranslation/1625987 create mode 100644 results/classifier/105/mistranslation/1631773 create mode 100644 results/classifier/105/mistranslation/1648726 create mode 100644 results/classifier/105/mistranslation/1650175 create mode 100644 results/classifier/105/mistranslation/1654 create mode 100644 results/classifier/105/mistranslation/1655708 create mode 100644 results/classifier/105/mistranslation/1657538 create mode 100644 results/classifier/105/mistranslation/1657841 create mode 100644 results/classifier/105/mistranslation/1658634 create mode 100644 results/classifier/105/mistranslation/166 create mode 100644 results/classifier/105/mistranslation/1660010 create mode 100644 results/classifier/105/mistranslation/1660599 create mode 100644 results/classifier/105/mistranslation/1662468 create mode 100644 results/classifier/105/mistranslation/1665344 create mode 100644 results/classifier/105/mistranslation/1666 create mode 100644 results/classifier/105/mistranslation/1667 create mode 100644 results/classifier/105/mistranslation/1668 create mode 100644 results/classifier/105/mistranslation/1668360 create mode 100644 results/classifier/105/mistranslation/1675108 create mode 100644 results/classifier/105/mistranslation/1683084 create mode 100644 results/classifier/105/mistranslation/1686980 create mode 100644 results/classifier/105/mistranslation/1692 create mode 100644 results/classifier/105/mistranslation/1694 create mode 100644 results/classifier/105/mistranslation/1696746 create mode 100644 results/classifier/105/mistranslation/1699 create mode 100644 results/classifier/105/mistranslation/1700 create mode 100644 results/classifier/105/mistranslation/1706 create mode 100644 results/classifier/105/mistranslation/1707274 create mode 100644 results/classifier/105/mistranslation/1708077 create mode 100644 results/classifier/105/mistranslation/1708215 create mode 100644 results/classifier/105/mistranslation/1709025 create mode 100644 results/classifier/105/mistranslation/1709170 create mode 100644 results/classifier/105/mistranslation/1711602 create mode 100644 results/classifier/105/mistranslation/1711828 create mode 100644 results/classifier/105/mistranslation/1715296 create mode 100644 results/classifier/105/mistranslation/1715715 create mode 100644 results/classifier/105/mistranslation/1719 create mode 100644 results/classifier/105/mistranslation/1719339 create mode 100644 results/classifier/105/mistranslation/1720 create mode 100644 results/classifier/105/mistranslation/1720971 create mode 100644 results/classifier/105/mistranslation/1722857 create mode 100644 results/classifier/105/mistranslation/1728325 create mode 100644 results/classifier/105/mistranslation/1729501 create mode 100644 results/classifier/105/mistranslation/1730101 create mode 100644 results/classifier/105/mistranslation/1731277 create mode 100644 results/classifier/105/mistranslation/1735 create mode 100644 results/classifier/105/mistranslation/1736376 create mode 100644 results/classifier/105/mistranslation/1743214 create mode 100644 results/classifier/105/mistranslation/1753 create mode 100644 results/classifier/105/mistranslation/1753437 create mode 100644 results/classifier/105/mistranslation/1754542 create mode 100644 results/classifier/105/mistranslation/1756080 create mode 100644 results/classifier/105/mistranslation/1756519 create mode 100644 results/classifier/105/mistranslation/1767126 create mode 100644 results/classifier/105/mistranslation/177 create mode 100644 results/classifier/105/mistranslation/1772086 create mode 100644 results/classifier/105/mistranslation/1773743 create mode 100644 results/classifier/105/mistranslation/1774412 create mode 100644 results/classifier/105/mistranslation/1776096 create mode 100644 results/classifier/105/mistranslation/1777293 create mode 100644 results/classifier/105/mistranslation/1777969 create mode 100644 results/classifier/105/mistranslation/1778 create mode 100644 results/classifier/105/mistranslation/1779120 create mode 100644 results/classifier/105/mistranslation/1780812 create mode 100644 results/classifier/105/mistranslation/1780814 create mode 100644 results/classifier/105/mistranslation/1788098 create mode 100644 results/classifier/105/mistranslation/1788275 create mode 100644 results/classifier/105/mistranslation/1788701 create mode 100644 results/classifier/105/mistranslation/1791947 create mode 100644 results/classifier/105/mistranslation/1793016 create mode 100644 results/classifier/105/mistranslation/1793791 create mode 100644 results/classifier/105/mistranslation/1795369 create mode 100644 results/classifier/105/mistranslation/1796816 create mode 100644 results/classifier/105/mistranslation/1799200 create mode 100644 results/classifier/105/mistranslation/1799768 create mode 100644 results/classifier/105/mistranslation/1800993 create mode 100644 results/classifier/105/mistranslation/1801 create mode 100644 results/classifier/105/mistranslation/1805445 create mode 100644 results/classifier/105/mistranslation/1811862 create mode 100644 results/classifier/105/mistranslation/1814381 create mode 100644 results/classifier/105/mistranslation/1815143 create mode 100644 results/classifier/105/mistranslation/1815413 create mode 100644 results/classifier/105/mistranslation/1816052 create mode 100644 results/classifier/105/mistranslation/1819289 create mode 100644 results/classifier/105/mistranslation/1820247 create mode 100644 results/classifier/105/mistranslation/1823152 create mode 100644 results/classifier/105/mistranslation/1824704 create mode 100644 results/classifier/105/mistranslation/1824768 create mode 100644 results/classifier/105/mistranslation/1825207 create mode 100644 results/classifier/105/mistranslation/1828723 create mode 100644 results/classifier/105/mistranslation/1829696 create mode 100644 results/classifier/105/mistranslation/1830415 create mode 100644 results/classifier/105/mistranslation/1830872 create mode 100644 results/classifier/105/mistranslation/1831750 create mode 100644 results/classifier/105/mistranslation/1832914 create mode 100644 results/classifier/105/mistranslation/1833053 create mode 100644 results/classifier/105/mistranslation/1834113 create mode 100644 results/classifier/105/mistranslation/1834613 create mode 100644 results/classifier/105/mistranslation/1835793 create mode 100644 results/classifier/105/mistranslation/1840646 create mode 100644 results/classifier/105/mistranslation/1840648 create mode 100644 results/classifier/105/mistranslation/1843852 create mode 100644 results/classifier/105/mistranslation/1847861 create mode 100644 results/classifier/105/mistranslation/1848556 create mode 100644 results/classifier/105/mistranslation/1849234 create mode 100644 results/classifier/105/mistranslation/1851552 create mode 100644 results/classifier/105/mistranslation/1853083 create mode 100644 results/classifier/105/mistranslation/1859713 create mode 100644 results/classifier/105/mistranslation/1861404 create mode 100644 results/classifier/105/mistranslation/1861946 create mode 100644 results/classifier/105/mistranslation/1862986 create mode 100644 results/classifier/105/mistranslation/1863023 create mode 100644 results/classifier/105/mistranslation/1863025 create mode 100644 results/classifier/105/mistranslation/1863200 create mode 100644 results/classifier/105/mistranslation/1863445 create mode 100644 results/classifier/105/mistranslation/1863601 create mode 100644 results/classifier/105/mistranslation/1863710 create mode 100644 results/classifier/105/mistranslation/1864704 create mode 100644 results/classifier/105/mistranslation/1864955 create mode 100644 results/classifier/105/mistranslation/1866870 create mode 100644 results/classifier/105/mistranslation/1867072 create mode 100644 results/classifier/105/mistranslation/1868617 create mode 100644 results/classifier/105/mistranslation/1873 create mode 100644 results/classifier/105/mistranslation/1873769 create mode 100644 results/classifier/105/mistranslation/1874504 create mode 100644 results/classifier/105/mistranslation/1875012 create mode 100644 results/classifier/105/mistranslation/1875819 create mode 100644 results/classifier/105/mistranslation/1877781 create mode 100644 results/classifier/105/mistranslation/1878641 create mode 100644 results/classifier/105/mistranslation/1879175 create mode 100644 results/classifier/105/mistranslation/1879227 create mode 100644 results/classifier/105/mistranslation/1880287 create mode 100644 results/classifier/105/mistranslation/1881648 create mode 100644 results/classifier/105/mistranslation/1884507 create mode 100644 results/classifier/105/mistranslation/1884719 create mode 100644 results/classifier/105/mistranslation/1885175 create mode 100644 results/classifier/105/mistranslation/1886097 create mode 100644 results/classifier/105/mistranslation/1886306 create mode 100644 results/classifier/105/mistranslation/1886318 create mode 100644 results/classifier/105/mistranslation/1887309 create mode 100644 results/classifier/105/mistranslation/1887318 create mode 100644 results/classifier/105/mistranslation/1888492 create mode 100644 results/classifier/105/mistranslation/1888818 create mode 100644 results/classifier/105/mistranslation/1888923 create mode 100644 results/classifier/105/mistranslation/1891748 create mode 100644 results/classifier/105/mistranslation/1891749 create mode 100644 results/classifier/105/mistranslation/1892960 create mode 100644 results/classifier/105/mistranslation/1896263 create mode 100644 results/classifier/105/mistranslation/1898883 create mode 100644 results/classifier/105/mistranslation/1900918 create mode 100644 results/classifier/105/mistranslation/1900919 create mode 100644 results/classifier/105/mistranslation/1901359 create mode 100644 results/classifier/105/mistranslation/1902394 create mode 100644 results/classifier/105/mistranslation/1902975 create mode 100644 results/classifier/105/mistranslation/1903493 create mode 100644 results/classifier/105/mistranslation/1905521 create mode 100644 results/classifier/105/mistranslation/1907 create mode 100644 results/classifier/105/mistranslation/1907953 create mode 100644 results/classifier/105/mistranslation/1908450 create mode 100644 results/classifier/105/mistranslation/1908626 create mode 100644 results/classifier/105/mistranslation/191 create mode 100644 results/classifier/105/mistranslation/1910540 create mode 100644 results/classifier/105/mistranslation/1911 create mode 100644 results/classifier/105/mistranslation/1911666 create mode 100644 results/classifier/105/mistranslation/1913315 create mode 100644 results/classifier/105/mistranslation/1913619 create mode 100644 results/classifier/105/mistranslation/1915063 create mode 100644 results/classifier/105/mistranslation/1915682 create mode 100644 results/classifier/105/mistranslation/1915925 create mode 100644 results/classifier/105/mistranslation/1916501 create mode 100644 results/classifier/105/mistranslation/1917565 create mode 100644 results/classifier/105/mistranslation/1918026 create mode 100644 results/classifier/105/mistranslation/1918149 create mode 100644 results/classifier/105/mistranslation/1919021 create mode 100644 results/classifier/105/mistranslation/1920752 create mode 100644 results/classifier/105/mistranslation/1921444 create mode 100644 results/classifier/105/mistranslation/1923689 create mode 100644 results/classifier/105/mistranslation/1923693 create mode 100644 results/classifier/105/mistranslation/1925496 create mode 100644 results/classifier/105/mistranslation/1931 create mode 100644 results/classifier/105/mistranslation/194 create mode 100644 results/classifier/105/mistranslation/1956 create mode 100644 results/classifier/105/mistranslation/1970 create mode 100644 results/classifier/105/mistranslation/1970563 create mode 100644 results/classifier/105/mistranslation/1975 create mode 100644 results/classifier/105/mistranslation/1978 create mode 100644 results/classifier/105/mistranslation/1982 create mode 100644 results/classifier/105/mistranslation/1995 create mode 100644 results/classifier/105/mistranslation/1999 create mode 100644 results/classifier/105/mistranslation/2005 create mode 100644 results/classifier/105/mistranslation/2053 create mode 100644 results/classifier/105/mistranslation/2054 create mode 100644 results/classifier/105/mistranslation/2062 create mode 100644 results/classifier/105/mistranslation/2076 create mode 100644 results/classifier/105/mistranslation/2077 create mode 100644 results/classifier/105/mistranslation/2084 create mode 100644 results/classifier/105/mistranslation/209 create mode 100644 results/classifier/105/mistranslation/2120 create mode 100644 results/classifier/105/mistranslation/2121 create mode 100644 results/classifier/105/mistranslation/2130 create mode 100644 results/classifier/105/mistranslation/2184 create mode 100644 results/classifier/105/mistranslation/2197 create mode 100644 results/classifier/105/mistranslation/2203 create mode 100644 results/classifier/105/mistranslation/2214 create mode 100644 results/classifier/105/mistranslation/222 create mode 100644 results/classifier/105/mistranslation/2227 create mode 100644 results/classifier/105/mistranslation/2232 create mode 100644 results/classifier/105/mistranslation/2236 create mode 100644 results/classifier/105/mistranslation/227 create mode 100644 results/classifier/105/mistranslation/228 create mode 100644 results/classifier/105/mistranslation/230 create mode 100644 results/classifier/105/mistranslation/23270873 create mode 100644 results/classifier/105/mistranslation/2352 create mode 100644 results/classifier/105/mistranslation/2366 create mode 100644 results/classifier/105/mistranslation/2367 create mode 100644 results/classifier/105/mistranslation/2369 create mode 100644 results/classifier/105/mistranslation/2395 create mode 100644 results/classifier/105/mistranslation/2401 create mode 100644 results/classifier/105/mistranslation/241 create mode 100644 results/classifier/105/mistranslation/2415 create mode 100644 results/classifier/105/mistranslation/2425 create mode 100644 results/classifier/105/mistranslation/2430 create mode 100644 results/classifier/105/mistranslation/2431 create mode 100644 results/classifier/105/mistranslation/244 create mode 100644 results/classifier/105/mistranslation/2448 create mode 100644 results/classifier/105/mistranslation/2451 create mode 100644 results/classifier/105/mistranslation/2467 create mode 100644 results/classifier/105/mistranslation/2481 create mode 100644 results/classifier/105/mistranslation/2484 create mode 100644 results/classifier/105/mistranslation/255 create mode 100644 results/classifier/105/mistranslation/25842545 create mode 100644 results/classifier/105/mistranslation/2600 create mode 100644 results/classifier/105/mistranslation/2607 create mode 100644 results/classifier/105/mistranslation/2610 create mode 100644 results/classifier/105/mistranslation/2614 create mode 100644 results/classifier/105/mistranslation/2630 create mode 100644 results/classifier/105/mistranslation/2638 create mode 100644 results/classifier/105/mistranslation/2644 create mode 100644 results/classifier/105/mistranslation/266 create mode 100644 results/classifier/105/mistranslation/267 create mode 100644 results/classifier/105/mistranslation/2684 create mode 100644 results/classifier/105/mistranslation/2694 create mode 100644 results/classifier/105/mistranslation/2709 create mode 100644 results/classifier/105/mistranslation/2740 create mode 100644 results/classifier/105/mistranslation/2766 create mode 100644 results/classifier/105/mistranslation/2770 create mode 100644 results/classifier/105/mistranslation/2776 create mode 100644 results/classifier/105/mistranslation/2786 create mode 100644 results/classifier/105/mistranslation/2795 create mode 100644 results/classifier/105/mistranslation/2811 create mode 100644 results/classifier/105/mistranslation/2823 create mode 100644 results/classifier/105/mistranslation/2837 create mode 100644 results/classifier/105/mistranslation/2868 create mode 100644 results/classifier/105/mistranslation/2879 create mode 100644 results/classifier/105/mistranslation/2901 create mode 100644 results/classifier/105/mistranslation/2932 create mode 100644 results/classifier/105/mistranslation/2942 create mode 100644 results/classifier/105/mistranslation/2949 create mode 100644 results/classifier/105/mistranslation/297 create mode 100644 results/classifier/105/mistranslation/2974 create mode 100644 results/classifier/105/mistranslation/2977 create mode 100644 results/classifier/105/mistranslation/314 create mode 100644 results/classifier/105/mistranslation/342 create mode 100644 results/classifier/105/mistranslation/343 create mode 100644 results/classifier/105/mistranslation/345 create mode 100644 results/classifier/105/mistranslation/356 create mode 100644 results/classifier/105/mistranslation/359 create mode 100644 results/classifier/105/mistranslation/364 create mode 100644 results/classifier/105/mistranslation/371 create mode 100644 results/classifier/105/mistranslation/372 create mode 100644 results/classifier/105/mistranslation/373 create mode 100644 results/classifier/105/mistranslation/374 create mode 100644 results/classifier/105/mistranslation/376 create mode 100644 results/classifier/105/mistranslation/378 create mode 100644 results/classifier/105/mistranslation/379 create mode 100644 results/classifier/105/mistranslation/382 create mode 100644 results/classifier/105/mistranslation/388 create mode 100644 results/classifier/105/mistranslation/392 create mode 100644 results/classifier/105/mistranslation/393569 create mode 100644 results/classifier/105/mistranslation/397212 create mode 100644 results/classifier/105/mistranslation/400 create mode 100644 results/classifier/105/mistranslation/421 create mode 100644 results/classifier/105/mistranslation/426 create mode 100644 results/classifier/105/mistranslation/427 create mode 100644 results/classifier/105/mistranslation/435 create mode 100644 results/classifier/105/mistranslation/450 create mode 100644 results/classifier/105/mistranslation/47 create mode 100644 results/classifier/105/mistranslation/470 create mode 100644 results/classifier/105/mistranslation/491 create mode 100644 results/classifier/105/mistranslation/500 create mode 100644 results/classifier/105/mistranslation/502107 create mode 100644 results/classifier/105/mistranslation/508 create mode 100644 results/classifier/105/mistranslation/521994 create mode 100644 results/classifier/105/mistranslation/53 create mode 100644 results/classifier/105/mistranslation/541 create mode 100644 results/classifier/105/mistranslation/562107 create mode 100644 results/classifier/105/mistranslation/568 create mode 100644 results/classifier/105/mistranslation/568053 create mode 100644 results/classifier/105/mistranslation/572 create mode 100644 results/classifier/105/mistranslation/589827 create mode 100644 results/classifier/105/mistranslation/59 create mode 100644 results/classifier/105/mistranslation/596 create mode 100644 results/classifier/105/mistranslation/602 create mode 100644 results/classifier/105/mistranslation/602336 create mode 100644 results/classifier/105/mistranslation/608107 create mode 100644 results/classifier/105/mistranslation/629791 create mode 100644 results/classifier/105/mistranslation/638 create mode 100644 results/classifier/105/mistranslation/64322995 create mode 100644 results/classifier/105/mistranslation/657 create mode 100644 results/classifier/105/mistranslation/658 create mode 100644 results/classifier/105/mistranslation/658904 create mode 100644 results/classifier/105/mistranslation/673009 create mode 100644 results/classifier/105/mistranslation/687 create mode 100644 results/classifier/105/mistranslation/688 create mode 100644 results/classifier/105/mistranslation/692 create mode 100644 results/classifier/105/mistranslation/70294255 create mode 100644 results/classifier/105/mistranslation/704 create mode 100644 results/classifier/105/mistranslation/713 create mode 100644 results/classifier/105/mistranslation/72 create mode 100644 results/classifier/105/mistranslation/721 create mode 100644 results/classifier/105/mistranslation/727134 create mode 100644 results/classifier/105/mistranslation/74466963 create mode 100644 results/classifier/105/mistranslation/74545755 create mode 100644 results/classifier/105/mistranslation/746 create mode 100644 results/classifier/105/mistranslation/753916 create mode 100644 results/classifier/105/mistranslation/764252 create mode 100644 results/classifier/105/mistranslation/77 create mode 100644 results/classifier/105/mistranslation/773 create mode 100644 results/classifier/105/mistranslation/793 create mode 100644 results/classifier/105/mistranslation/795866 create mode 100644 results/classifier/105/mistranslation/80604314 create mode 100644 results/classifier/105/mistranslation/814 create mode 100644 results/classifier/105/mistranslation/814222 create mode 100644 results/classifier/105/mistranslation/828 create mode 100644 results/classifier/105/mistranslation/83 create mode 100644 results/classifier/105/mistranslation/838 create mode 100644 results/classifier/105/mistranslation/853 create mode 100644 results/classifier/105/mistranslation/862 create mode 100644 results/classifier/105/mistranslation/864490 create mode 100644 results/classifier/105/mistranslation/870 create mode 100644 results/classifier/105/mistranslation/886255 create mode 100644 results/classifier/105/mistranslation/889053 create mode 100644 results/classifier/105/mistranslation/890 create mode 100644 results/classifier/105/mistranslation/891625 create mode 100644 results/classifier/105/mistranslation/893367 create mode 100644 results/classifier/105/mistranslation/895 create mode 100644 results/classifier/105/mistranslation/906 create mode 100644 results/classifier/105/mistranslation/917 create mode 100644 results/classifier/105/mistranslation/939437 create mode 100644 results/classifier/105/mistranslation/939443 create mode 100644 results/classifier/105/mistranslation/940 create mode 100644 results/classifier/105/mistranslation/942659 create mode 100644 results/classifier/105/mistranslation/947273 create mode 100644 results/classifier/105/mistranslation/950 create mode 100644 results/classifier/105/mistranslation/961757 create mode 100644 results/classifier/105/mistranslation/963 create mode 100644 results/classifier/105/mistranslation/988128 create mode 100644 results/classifier/105/mistranslation/995758 create mode 100644 results/classifier/105/mistranslation/996798 create mode 100644 results/classifier/105/network/05479587 create mode 100644 results/classifier/105/network/1010484 create mode 100644 results/classifier/105/network/1014099 create mode 100644 results/classifier/105/network/1054180 create mode 100644 results/classifier/105/network/1067 create mode 100644 results/classifier/105/network/1071 create mode 100644 results/classifier/105/network/1139 create mode 100644 results/classifier/105/network/1158 create mode 100644 results/classifier/105/network/1174 create mode 100644 results/classifier/105/network/1176366 create mode 100644 results/classifier/105/network/1189 create mode 100644 results/classifier/105/network/1192464 create mode 100644 results/classifier/105/network/1196727 create mode 100644 results/classifier/105/network/1222034 create mode 100644 results/classifier/105/network/127 create mode 100644 results/classifier/105/network/1279 create mode 100644 results/classifier/105/network/1286 create mode 100644 results/classifier/105/network/1297781 create mode 100644 results/classifier/105/network/1299 create mode 100644 results/classifier/105/network/1309 create mode 100644 results/classifier/105/network/1364 create mode 100644 results/classifier/105/network/1369347 create mode 100644 results/classifier/105/network/1381 create mode 100644 results/classifier/105/network/1385 create mode 100644 results/classifier/105/network/1400 create mode 100644 results/classifier/105/network/1402289 create mode 100644 results/classifier/105/network/1422 create mode 100644 results/classifier/105/network/1440 create mode 100644 results/classifier/105/network/1451 create mode 100644 results/classifier/105/network/1462 create mode 100644 results/classifier/105/network/1482 create mode 100644 results/classifier/105/network/1502095 create mode 100644 results/classifier/105/network/1505 create mode 100644 results/classifier/105/network/151 create mode 100644 results/classifier/105/network/1543163 create mode 100644 results/classifier/105/network/1569988 create mode 100644 results/classifier/105/network/1574327 create mode 100644 results/classifier/105/network/1575561 create mode 100644 results/classifier/105/network/1585433 create mode 100644 results/classifier/105/network/1588591 create mode 100644 results/classifier/105/network/1604303 create mode 100644 results/classifier/105/network/1633508 create mode 100644 results/classifier/105/network/1634726 create mode 100644 results/classifier/105/network/1656 create mode 100644 results/classifier/105/network/1656927 create mode 100644 results/classifier/105/network/1662 create mode 100644 results/classifier/105/network/1702798 create mode 100644 results/classifier/105/network/1719689 create mode 100644 results/classifier/105/network/1721788 create mode 100644 results/classifier/105/network/1724477 create mode 100644 results/classifier/105/network/1732 create mode 100644 results/classifier/105/network/1751 create mode 100644 results/classifier/105/network/1754372 create mode 100644 results/classifier/105/network/1757 create mode 100644 results/classifier/105/network/1773753 create mode 100644 results/classifier/105/network/1779447 create mode 100644 results/classifier/105/network/1783 create mode 100644 results/classifier/105/network/1809453 create mode 100644 results/classifier/105/network/1814352 create mode 100644 results/classifier/105/network/1824622 create mode 100644 results/classifier/105/network/1832281 create mode 100644 results/classifier/105/network/1832877 create mode 100644 results/classifier/105/network/1849644 create mode 100644 results/classifier/105/network/1856834 create mode 100644 results/classifier/105/network/1857226 create mode 100644 results/classifier/105/network/1861875 create mode 100644 results/classifier/105/network/1862979 create mode 100644 results/classifier/105/network/1874539 create mode 100644 results/classifier/105/network/1874676 create mode 100644 results/classifier/105/network/1876 create mode 100644 results/classifier/105/network/1876187 create mode 100644 results/classifier/105/network/1881 create mode 100644 results/classifier/105/network/1883984 create mode 100644 results/classifier/105/network/1884169 create mode 100644 results/classifier/105/network/1884425 create mode 100644 results/classifier/105/network/1886793 create mode 100644 results/classifier/105/network/1894781 create mode 100644 results/classifier/105/network/190 create mode 100644 results/classifier/105/network/1903470 create mode 100644 results/classifier/105/network/1904954 create mode 100644 results/classifier/105/network/1912059 create mode 100644 results/classifier/105/network/1913012 create mode 100644 results/classifier/105/network/1957 create mode 100644 results/classifier/105/network/198 create mode 100644 results/classifier/105/network/199 create mode 100644 results/classifier/105/network/2009 create mode 100644 results/classifier/105/network/2017 create mode 100644 results/classifier/105/network/2019 create mode 100644 results/classifier/105/network/2023 create mode 100644 results/classifier/105/network/2024 create mode 100644 results/classifier/105/network/2109 create mode 100644 results/classifier/105/network/2113 create mode 100644 results/classifier/105/network/2143 create mode 100644 results/classifier/105/network/2178 create mode 100644 results/classifier/105/network/218 create mode 100644 results/classifier/105/network/2182 create mode 100644 results/classifier/105/network/2189 create mode 100644 results/classifier/105/network/2209 create mode 100644 results/classifier/105/network/2210 create mode 100644 results/classifier/105/network/2228 create mode 100644 results/classifier/105/network/235 create mode 100644 results/classifier/105/network/2364 create mode 100644 results/classifier/105/network/238 create mode 100644 results/classifier/105/network/2409 create mode 100644 results/classifier/105/network/2439 create mode 100644 results/classifier/105/network/2459 create mode 100644 results/classifier/105/network/2461 create mode 100644 results/classifier/105/network/2494 create mode 100644 results/classifier/105/network/2514 create mode 100644 results/classifier/105/network/2528 create mode 100644 results/classifier/105/network/2552 create mode 100644 results/classifier/105/network/2553 create mode 100644 results/classifier/105/network/2623 create mode 100644 results/classifier/105/network/2668 create mode 100644 results/classifier/105/network/2670 create mode 100644 results/classifier/105/network/2685 create mode 100644 results/classifier/105/network/2688 create mode 100644 results/classifier/105/network/2727 create mode 100644 results/classifier/105/network/2745 create mode 100644 results/classifier/105/network/2746 create mode 100644 results/classifier/105/network/2756 create mode 100644 results/classifier/105/network/2758 create mode 100644 results/classifier/105/network/2767 create mode 100644 results/classifier/105/network/277 create mode 100644 results/classifier/105/network/2780 create mode 100644 results/classifier/105/network/2814 create mode 100644 results/classifier/105/network/282 create mode 100644 results/classifier/105/network/2827 create mode 100644 results/classifier/105/network/2829 create mode 100644 results/classifier/105/network/2849 create mode 100644 results/classifier/105/network/2872 create mode 100644 results/classifier/105/network/2884 create mode 100644 results/classifier/105/network/2951 create mode 100644 results/classifier/105/network/2970 create mode 100644 results/classifier/105/network/299 create mode 100644 results/classifier/105/network/308 create mode 100644 results/classifier/105/network/309 create mode 100644 results/classifier/105/network/335 create mode 100644 results/classifier/105/network/336 create mode 100644 results/classifier/105/network/348 create mode 100644 results/classifier/105/network/360 create mode 100644 results/classifier/105/network/377 create mode 100644 results/classifier/105/network/401 create mode 100644 results/classifier/105/network/428 create mode 100644 results/classifier/105/network/460 create mode 100644 results/classifier/105/network/465 create mode 100644 results/classifier/105/network/485250 create mode 100644 results/classifier/105/network/495566 create mode 100644 results/classifier/105/network/517 create mode 100644 results/classifier/105/network/524447 create mode 100644 results/classifier/105/network/539 create mode 100644 results/classifier/105/network/551545 create mode 100644 results/classifier/105/network/557 create mode 100644 results/classifier/105/network/559 create mode 100644 results/classifier/105/network/580 create mode 100644 results/classifier/105/network/590552 create mode 100644 results/classifier/105/network/593 create mode 100644 results/classifier/105/network/605 create mode 100644 results/classifier/105/network/62179944 create mode 100644 results/classifier/105/network/626 create mode 100644 results/classifier/105/network/641118 create mode 100644 results/classifier/105/network/676029 create mode 100644 results/classifier/105/network/676934 create mode 100644 results/classifier/105/network/741 create mode 100644 results/classifier/105/network/762 create mode 100644 results/classifier/105/network/774 create mode 100644 results/classifier/105/network/806656 create mode 100644 results/classifier/105/network/807 create mode 100644 results/classifier/105/network/811 create mode 100644 results/classifier/105/network/812 create mode 100644 results/classifier/105/network/829455 create mode 100644 results/classifier/105/network/838974 create mode 100644 results/classifier/105/network/874 create mode 100644 results/classifier/105/network/894037 create mode 100644 results/classifier/105/network/898 create mode 100644 results/classifier/105/network/899 create mode 100644 results/classifier/105/network/903365 create mode 100644 results/classifier/105/network/912 create mode 100644 results/classifier/105/network/960 create mode 100644 results/classifier/105/network/97 create mode 100644 results/classifier/105/network/974 create mode 100644 results/classifier/105/network/976 create mode 100644 results/classifier/105/network/984476 create mode 100644 results/classifier/105/network/999 create mode 100644 results/classifier/105/other/02364653 create mode 100644 results/classifier/105/other/02572177 create mode 100644 results/classifier/105/other/100 create mode 100644 results/classifier/105/other/1008136 create mode 100644 results/classifier/105/other/1010 create mode 100644 results/classifier/105/other/1012 create mode 100644 results/classifier/105/other/1012023 create mode 100644 results/classifier/105/other/1014681 create mode 100644 results/classifier/105/other/1016 create mode 100644 results/classifier/105/other/1018530 create mode 100644 results/classifier/105/other/1022 create mode 100644 results/classifier/105/other/1025244 create mode 100644 results/classifier/105/other/1029 create mode 100644 results/classifier/105/other/1031920 create mode 100644 results/classifier/105/other/1036363 create mode 100644 results/classifier/105/other/1037606 create mode 100644 results/classifier/105/other/1037675 create mode 100644 results/classifier/105/other/1038070 create mode 100644 results/classifier/105/other/1038136 create mode 100644 results/classifier/105/other/1042388 create mode 100644 results/classifier/105/other/1052 create mode 100644 results/classifier/105/other/1054558 create mode 100644 results/classifier/105/other/1060928 create mode 100644 results/classifier/105/other/1061 create mode 100644 results/classifier/105/other/1062201 create mode 100644 results/classifier/105/other/1062220 create mode 100644 results/classifier/105/other/1062411 create mode 100644 results/classifier/105/other/1066055 create mode 100644 results/classifier/105/other/1077514 create mode 100644 results/classifier/105/other/1077708 create mode 100644 results/classifier/105/other/1079713 create mode 100644 results/classifier/105/other/1084148 create mode 100644 results/classifier/105/other/1086782 create mode 100644 results/classifier/105/other/1087114 create mode 100644 results/classifier/105/other/1087974 create mode 100644 results/classifier/105/other/1089005 create mode 100644 results/classifier/105/other/1089281 create mode 100644 results/classifier/105/other/1089496 create mode 100644 results/classifier/105/other/1090726 create mode 100644 results/classifier/105/other/1095 create mode 100644 results/classifier/105/other/1096714 create mode 100644 results/classifier/105/other/1102 create mode 100644 results/classifier/105/other/1105670 create mode 100644 results/classifier/105/other/1109 create mode 100644 results/classifier/105/other/1117 create mode 100644 results/classifier/105/other/1118 create mode 100644 results/classifier/105/other/1122 create mode 100644 results/classifier/105/other/1122492 create mode 100644 results/classifier/105/other/1123 create mode 100644 results/classifier/105/other/1123975 create mode 100644 results/classifier/105/other/1128935 create mode 100644 results/classifier/105/other/1129571 create mode 100644 results/classifier/105/other/1131757 create mode 100644 results/classifier/105/other/1133769 create mode 100644 results/classifier/105/other/1143 create mode 100644 results/classifier/105/other/1148 create mode 100644 results/classifier/105/other/1170 create mode 100644 results/classifier/105/other/1173 create mode 100644 results/classifier/105/other/1174654 create mode 100644 results/classifier/105/other/1175089 create mode 100644 results/classifier/105/other/1176 create mode 100644 results/classifier/105/other/1177774 create mode 100644 results/classifier/105/other/1178101 create mode 100644 results/classifier/105/other/1180 create mode 100644 results/classifier/105/other/1180923 create mode 100644 results/classifier/105/other/1180970 create mode 100644 results/classifier/105/other/1181796 create mode 100644 results/classifier/105/other/1182490 create mode 100644 results/classifier/105/other/1184089 create mode 100644 results/classifier/105/other/1185311 create mode 100644 results/classifier/105/other/1188 create mode 100644 results/classifier/105/other/1192 create mode 100644 results/classifier/105/other/1192499 create mode 100644 results/classifier/105/other/1192780 create mode 100644 results/classifier/105/other/1193628 create mode 100644 results/classifier/105/other/1197 create mode 100644 results/classifier/105/other/1201446 create mode 100644 results/classifier/105/other/1205156 create mode 100644 results/classifier/105/other/1217 create mode 100644 results/classifier/105/other/1218 create mode 100644 results/classifier/105/other/1218098 create mode 100644 results/classifier/105/other/1219207 create mode 100644 results/classifier/105/other/1219234 create mode 100644 results/classifier/105/other/1222 create mode 100644 results/classifier/105/other/1226531 create mode 100644 results/classifier/105/other/1233225 create mode 100644 results/classifier/105/other/1234179 create mode 100644 results/classifier/105/other/1236 create mode 100644 results/classifier/105/other/1243287 create mode 100644 results/classifier/105/other/1245703 create mode 100644 results/classifier/105/other/1246890 create mode 100644 results/classifier/105/other/1248959 create mode 100644 results/classifier/105/other/1250360 create mode 100644 results/classifier/105/other/1251470 create mode 100644 results/classifier/105/other/1254672 create mode 100644 results/classifier/105/other/1254786 create mode 100644 results/classifier/105/other/1254828 create mode 100644 results/classifier/105/other/1257099 create mode 100644 results/classifier/105/other/1257334 create mode 100644 results/classifier/105/other/1258168 create mode 100644 results/classifier/105/other/1261320 create mode 100644 results/classifier/105/other/1261450 create mode 100644 results/classifier/105/other/1267955 create mode 100644 results/classifier/105/other/1268279 create mode 100644 results/classifier/105/other/1269606 create mode 100644 results/classifier/105/other/1270397 create mode 100644 results/classifier/105/other/1272 create mode 100644 results/classifier/105/other/1277433 create mode 100644 results/classifier/105/other/1279500 create mode 100644 results/classifier/105/other/1283 create mode 100644 results/classifier/105/other/1286253 create mode 100644 results/classifier/105/other/12869209 create mode 100644 results/classifier/105/other/1290370 create mode 100644 results/classifier/105/other/1291 create mode 100644 results/classifier/105/other/1294898 create mode 100644 results/classifier/105/other/1295587 create mode 100644 results/classifier/105/other/1297487 create mode 100644 results/classifier/105/other/1303926 create mode 100644 results/classifier/105/other/1306 create mode 100644 results/classifier/105/other/1308341 create mode 100644 results/classifier/105/other/1308542 create mode 100644 results/classifier/105/other/1310714 create mode 100644 results/classifier/105/other/1312561 create mode 100644 results/classifier/105/other/1314857 create mode 100644 results/classifier/105/other/1317603 create mode 100644 results/classifier/105/other/1319100 create mode 100644 results/classifier/105/other/1321464 create mode 100644 results/classifier/105/other/1323 create mode 100644 results/classifier/105/other/1324724 create mode 100644 results/classifier/105/other/1326 create mode 100644 results/classifier/105/other/1327 create mode 100644 results/classifier/105/other/1327608 create mode 100644 results/classifier/105/other/1330 create mode 100644 results/classifier/105/other/1333651 create mode 100644 results/classifier/105/other/1336194 create mode 100644 results/classifier/105/other/13442371 create mode 100644 results/classifier/105/other/1347 create mode 100644 results/classifier/105/other/1348106 create mode 100644 results/classifier/105/other/1350 create mode 100644 results/classifier/105/other/1350435 create mode 100644 results/classifier/105/other/1352465 create mode 100644 results/classifier/105/other/1353 create mode 100644 results/classifier/105/other/1359 create mode 100644 results/classifier/105/other/1359383 create mode 100644 results/classifier/105/other/1362635 create mode 100644 results/classifier/105/other/1364501 create mode 100644 results/classifier/105/other/1367 create mode 100644 results/classifier/105/other/1368204 create mode 100644 results/classifier/105/other/1371915 create mode 100644 results/classifier/105/other/1373228 create mode 100644 results/classifier/105/other/1373362 create mode 100644 results/classifier/105/other/1377095 create mode 100644 results/classifier/105/other/1378554 create mode 100644 results/classifier/105/other/138 create mode 100644 results/classifier/105/other/1382 create mode 100644 results/classifier/105/other/1386 create mode 100644 results/classifier/105/other/1389 create mode 100644 results/classifier/105/other/1390520 create mode 100644 results/classifier/105/other/1392504 create mode 100644 results/classifier/105/other/1393 create mode 100644 results/classifier/105/other/1395 create mode 100644 results/classifier/105/other/1396052 create mode 100644 results/classifier/105/other/1397157 create mode 100644 results/classifier/105/other/1399 create mode 100644 results/classifier/105/other/1399957 create mode 100644 results/classifier/105/other/1400768 create mode 100644 results/classifier/105/other/1401798 create mode 100644 results/classifier/105/other/1402 create mode 100644 results/classifier/105/other/1404278 create mode 100644 results/classifier/105/other/1404610 create mode 100644 results/classifier/105/other/1407454 create mode 100644 results/classifier/105/other/1408 create mode 100644 results/classifier/105/other/1411 create mode 100644 results/classifier/105/other/1414222 create mode 100644 results/classifier/105/other/1414466 create mode 100644 results/classifier/105/other/1415 create mode 100644 results/classifier/105/other/1418 create mode 100644 results/classifier/105/other/1422285 create mode 100644 results/classifier/105/other/1423124 create mode 100644 results/classifier/105/other/1424 create mode 100644 results/classifier/105/other/1425 create mode 100644 results/classifier/105/other/1427 create mode 100644 results/classifier/105/other/1428657 create mode 100644 results/classifier/105/other/1428958 create mode 100644 results/classifier/105/other/1429 create mode 100644 results/classifier/105/other/1429841 create mode 100644 results/classifier/105/other/1430 create mode 100644 results/classifier/105/other/1434779 create mode 100644 results/classifier/105/other/1436 create mode 100644 results/classifier/105/other/1444 create mode 100644 results/classifier/105/other/1446 create mode 100644 results/classifier/105/other/1448985 create mode 100644 results/classifier/105/other/1452230 create mode 100644 results/classifier/105/other/1452904 create mode 100644 results/classifier/105/other/1453436 create mode 100644 results/classifier/105/other/1455254 create mode 100644 results/classifier/105/other/1455475 create mode 100644 results/classifier/105/other/1456819 create mode 100644 results/classifier/105/other/1457 create mode 100644 results/classifier/105/other/1457275 create mode 100644 results/classifier/105/other/1462640 create mode 100644 results/classifier/105/other/1464611 create mode 100644 results/classifier/105/other/1467240 create mode 100644 results/classifier/105/other/1470170 create mode 100644 results/classifier/105/other/1472083 create mode 100644 results/classifier/105/other/1474263 create mode 100644 results/classifier/105/other/1478360 create mode 100644 results/classifier/105/other/1484990 create mode 100644 results/classifier/105/other/1486911 create mode 100644 results/classifier/105/other/1487264 create mode 100644 results/classifier/105/other/1488901 create mode 100644 results/classifier/105/other/1489 create mode 100644 results/classifier/105/other/1493 create mode 100644 results/classifier/105/other/1494 create mode 100644 results/classifier/105/other/1494350 create mode 100644 results/classifier/105/other/1495380 create mode 100644 results/classifier/105/other/1496384 create mode 100644 results/classifier/105/other/1498144 create mode 100644 results/classifier/105/other/1499908 create mode 100644 results/classifier/105/other/1500265 create mode 100644 results/classifier/105/other/1504513 create mode 100644 results/classifier/105/other/1505041 create mode 100644 results/classifier/105/other/1505759 create mode 100644 results/classifier/105/other/1509 create mode 100644 results/classifier/105/other/1510 create mode 100644 results/classifier/105/other/1511887 create mode 100644 results/classifier/105/other/1514 create mode 100644 results/classifier/105/other/1516408 create mode 100644 results/classifier/105/other/1525123 create mode 100644 results/classifier/105/other/1525676 create mode 100644 results/classifier/105/other/1525682 create mode 100644 results/classifier/105/other/1527 create mode 100644 results/classifier/105/other/1527765 create mode 100644 results/classifier/105/other/1544524 create mode 100644 results/classifier/105/other/1545024 create mode 100644 results/classifier/105/other/1549 create mode 100644 results/classifier/105/other/1549654 create mode 100644 results/classifier/105/other/1558175 create mode 100644 results/classifier/105/other/1562 create mode 100644 results/classifier/105/other/1568 create mode 100644 results/classifier/105/other/1569053 create mode 100644 results/classifier/105/other/1570134 create mode 100644 results/classifier/105/other/1571084 create mode 100644 results/classifier/105/other/1574572 create mode 100644 results/classifier/105/other/1575 create mode 100644 results/classifier/105/other/1578192 create mode 100644 results/classifier/105/other/1579 create mode 100644 results/classifier/105/other/1579565 create mode 100644 results/classifier/105/other/1581334 create mode 100644 results/classifier/105/other/1581796 create mode 100644 results/classifier/105/other/1581936 create mode 100644 results/classifier/105/other/1585008 create mode 100644 results/classifier/105/other/1585533 create mode 100644 results/classifier/105/other/1587211 create mode 100644 results/classifier/105/other/1588 create mode 100644 results/classifier/105/other/1589272 create mode 100644 results/classifier/105/other/1590 create mode 100644 results/classifier/105/other/1591611 create mode 100644 results/classifier/105/other/1591724 create mode 100644 results/classifier/105/other/1592315 create mode 100644 results/classifier/105/other/1593756 create mode 100644 results/classifier/105/other/1594 create mode 100644 results/classifier/105/other/1594394 create mode 100644 results/classifier/105/other/1595240 create mode 100644 results/classifier/105/other/1596160 create mode 100644 results/classifier/105/other/1596832 create mode 100644 results/classifier/105/other/1598 create mode 100644 results/classifier/105/other/1599539 create mode 100644 results/classifier/105/other/160 create mode 100644 results/classifier/105/other/1602247 create mode 100644 results/classifier/105/other/1603 create mode 100644 results/classifier/105/other/1603580 create mode 100644 results/classifier/105/other/1603636 create mode 100644 results/classifier/105/other/1605506 create mode 100644 results/classifier/105/other/1605611 create mode 100644 results/classifier/105/other/16056596 create mode 100644 results/classifier/105/other/1607 create mode 100644 results/classifier/105/other/1609968 create mode 100644 results/classifier/105/other/1613133 create mode 100644 results/classifier/105/other/1617929 create mode 100644 results/classifier/105/other/1618122 create mode 100644 results/classifier/105/other/1618265 create mode 100644 results/classifier/105/other/1619991 create mode 100644 results/classifier/105/other/16201167 create mode 100644 results/classifier/105/other/1621 create mode 100644 results/classifier/105/other/1622547 create mode 100644 results/classifier/105/other/16228234 create mode 100644 results/classifier/105/other/1623276 create mode 100644 results/classifier/105/other/1625216 create mode 100644 results/classifier/105/other/1625295 create mode 100644 results/classifier/105/other/1626207 create mode 100644 results/classifier/105/other/1627 create mode 100644 results/classifier/105/other/1628 create mode 100644 results/classifier/105/other/1628971 create mode 100644 results/classifier/105/other/163 create mode 100644 results/classifier/105/other/1630 create mode 100644 results/classifier/105/other/1633 create mode 100644 results/classifier/105/other/1636217 create mode 100644 results/classifier/105/other/1639394 create mode 100644 results/classifier/105/other/1640073 create mode 100644 results/classifier/105/other/1640525 create mode 100644 results/classifier/105/other/1641861 create mode 100644 results/classifier/105/other/1642421 create mode 100644 results/classifier/105/other/1643619 create mode 100644 results/classifier/105/other/1644754 create mode 100644 results/classifier/105/other/1647683 create mode 100644 results/classifier/105/other/1652333 create mode 100644 results/classifier/105/other/1653063 create mode 100644 results/classifier/105/other/1653384 create mode 100644 results/classifier/105/other/1653419 create mode 100644 results/classifier/105/other/1654271 create mode 100644 results/classifier/105/other/1655700 create mode 100644 results/classifier/105/other/1655764 create mode 100644 results/classifier/105/other/1656234 create mode 100644 results/classifier/105/other/1658141 create mode 100644 results/classifier/105/other/1659267 create mode 100644 results/classifier/105/other/1659901 create mode 100644 results/classifier/105/other/1661386 create mode 100644 results/classifier/105/other/1665389 create mode 100644 results/classifier/105/other/1667401 create mode 100644 results/classifier/105/other/1668103 create mode 100644 results/classifier/105/other/1670170 create mode 100644 results/classifier/105/other/1670175 create mode 100644 results/classifier/105/other/1671 create mode 100644 results/classifier/105/other/1672365 create mode 100644 results/classifier/105/other/1673976 create mode 100644 results/classifier/105/other/1674925 create mode 100644 results/classifier/105/other/1678466 create mode 100644 results/classifier/105/other/1679358 create mode 100644 results/classifier/105/other/1680 create mode 100644 results/classifier/105/other/1680991 create mode 100644 results/classifier/105/other/1681439 create mode 100644 results/classifier/105/other/1682093 create mode 100644 results/classifier/105/other/1684 create mode 100644 results/classifier/105/other/1684239 create mode 100644 results/classifier/105/other/1685 create mode 100644 results/classifier/105/other/1685242 create mode 100644 results/classifier/105/other/1686170 create mode 100644 results/classifier/105/other/1686390 create mode 100644 results/classifier/105/other/1687569 create mode 100644 results/classifier/105/other/1691 create mode 100644 results/classifier/105/other/1691379 create mode 100644 results/classifier/105/other/1693667 create mode 100644 results/classifier/105/other/1699277 create mode 100644 results/classifier/105/other/1701798 create mode 100644 results/classifier/105/other/1701821 create mode 100644 results/classifier/105/other/1701971 create mode 100644 results/classifier/105/other/1701973 create mode 100644 results/classifier/105/other/1702621 create mode 100644 results/classifier/105/other/1703 create mode 100644 results/classifier/105/other/1703506 create mode 100644 results/classifier/105/other/1704 create mode 100644 results/classifier/105/other/1704638 create mode 100644 results/classifier/105/other/1705118 create mode 100644 results/classifier/105/other/1706866 create mode 100644 results/classifier/105/other/1707297 create mode 100644 results/classifier/105/other/1708 create mode 100644 results/classifier/105/other/1708442 create mode 100644 results/classifier/105/other/1708617 create mode 100644 results/classifier/105/other/1711316 create mode 100644 results/classifier/105/other/1713 create mode 100644 results/classifier/105/other/1713408 create mode 100644 results/classifier/105/other/1713516 create mode 100644 results/classifier/105/other/1713825 create mode 100644 results/classifier/105/other/1714331 create mode 100644 results/classifier/105/other/1715162 create mode 100644 results/classifier/105/other/1715700 create mode 100644 results/classifier/105/other/1716292 create mode 100644 results/classifier/105/other/1717708 create mode 100644 results/classifier/105/other/1718295 create mode 100644 results/classifier/105/other/1719282 create mode 100644 results/classifier/105/other/1719870 create mode 100644 results/classifier/105/other/1720747 create mode 100644 results/classifier/105/other/1721 create mode 100644 results/classifier/105/other/1721952 create mode 100644 results/classifier/105/other/1722074 create mode 100644 results/classifier/105/other/1722884 create mode 100644 results/classifier/105/other/1723161 create mode 100644 results/classifier/105/other/1723488 create mode 100644 results/classifier/105/other/1726394 create mode 100644 results/classifier/105/other/1726910 create mode 100644 results/classifier/105/other/1727 create mode 100644 results/classifier/105/other/1727250 create mode 100644 results/classifier/105/other/1727737 create mode 100644 results/classifier/105/other/1728256 create mode 100644 results/classifier/105/other/1728635 create mode 100644 results/classifier/105/other/1728643 create mode 100644 results/classifier/105/other/1728660 create mode 100644 results/classifier/105/other/1728661 create mode 100644 results/classifier/105/other/1732679 create mode 100644 results/classifier/105/other/1732959 create mode 100644 results/classifier/105/other/1736 create mode 100644 results/classifier/105/other/1737444 create mode 100644 results/classifier/105/other/1738 create mode 100644 results/classifier/105/other/1738507 create mode 100644 results/classifier/105/other/1738691 create mode 100644 results/classifier/105/other/1738840 create mode 100644 results/classifier/105/other/1739304 create mode 100644 results/classifier/105/other/1739378 create mode 100644 results/classifier/105/other/1740 create mode 100644 results/classifier/105/other/1740364 create mode 100644 results/classifier/105/other/1745312 create mode 100644 results/classifier/105/other/1748434 create mode 100644 results/classifier/105/other/1749016 create mode 100644 results/classifier/105/other/1749393 create mode 100644 results/classifier/105/other/1750229 create mode 100644 results/classifier/105/other/1753186 create mode 100644 results/classifier/105/other/1756807 create mode 100644 results/classifier/105/other/1759522 create mode 100644 results/classifier/105/other/1761798 create mode 100644 results/classifier/105/other/1763 create mode 100644 results/classifier/105/other/1763536 create mode 100644 results/classifier/105/other/1765 create mode 100644 results/classifier/105/other/1768246 create mode 100644 results/classifier/105/other/1769053 create mode 100644 results/classifier/105/other/1771238 create mode 100644 results/classifier/105/other/1772075 create mode 100644 results/classifier/105/other/1772165 create mode 100644 results/classifier/105/other/1772166 create mode 100644 results/classifier/105/other/1774 create mode 100644 results/classifier/105/other/1774149 create mode 100644 results/classifier/105/other/17743720 create mode 100644 results/classifier/105/other/1774605 create mode 100644 results/classifier/105/other/1774677 create mode 100644 results/classifier/105/other/1774853 create mode 100644 results/classifier/105/other/1775 create mode 100644 results/classifier/105/other/1775366 create mode 100644 results/classifier/105/other/1775555 create mode 100644 results/classifier/105/other/1776478 create mode 100644 results/classifier/105/other/1776760 create mode 100644 results/classifier/105/other/1777301 create mode 100644 results/classifier/105/other/1777777 create mode 100644 results/classifier/105/other/1778350 create mode 100644 results/classifier/105/other/1779017 create mode 100644 results/classifier/105/other/1780928 create mode 100644 results/classifier/105/other/1782 create mode 100644 results/classifier/105/other/1783362 create mode 100644 results/classifier/105/other/1784900 create mode 100644 results/classifier/105/other/1785197 create mode 100644 results/classifier/105/other/1785308 create mode 100644 results/classifier/105/other/1785670 create mode 100644 results/classifier/105/other/1787012 create mode 100644 results/classifier/105/other/1787505 create mode 100644 results/classifier/105/other/1788665 create mode 100644 results/classifier/105/other/1790018 create mode 100644 results/classifier/105/other/1790975 create mode 100644 results/classifier/105/other/1791 create mode 100644 results/classifier/105/other/1791796 create mode 100644 results/classifier/105/other/1794285 create mode 100644 results/classifier/105/other/1796520 create mode 100644 results/classifier/105/other/1800401 create mode 100644 results/classifier/105/other/1800786 create mode 100644 results/classifier/105/other/1802684 create mode 100644 results/classifier/105/other/1803160 create mode 100644 results/classifier/105/other/1804323 create mode 100644 results/classifier/105/other/1805256 create mode 100644 results/classifier/105/other/1806824 create mode 100644 results/classifier/105/other/1808563 create mode 100644 results/classifier/105/other/1809075 create mode 100644 results/classifier/105/other/1809304 create mode 100644 results/classifier/105/other/1809665 create mode 100644 results/classifier/105/other/1810 create mode 100644 results/classifier/105/other/1810105 create mode 100644 results/classifier/105/other/1810400 create mode 100644 results/classifier/105/other/1810433 create mode 100644 results/classifier/105/other/1810603 create mode 100644 results/classifier/105/other/1810975 create mode 100644 results/classifier/105/other/1811244 create mode 100644 results/classifier/105/other/1811533 create mode 100644 results/classifier/105/other/1811543 create mode 100644 results/classifier/105/other/1811653 create mode 100644 results/classifier/105/other/1812 create mode 100644 results/classifier/105/other/1812091 create mode 100644 results/classifier/105/other/1813 create mode 100644 results/classifier/105/other/1813165 create mode 100644 results/classifier/105/other/1813201 create mode 100644 results/classifier/105/other/1813398 create mode 100644 results/classifier/105/other/1813940 create mode 100644 results/classifier/105/other/1815 create mode 100644 results/classifier/105/other/1815078 create mode 100644 results/classifier/105/other/1815371 create mode 100644 results/classifier/105/other/1815993 create mode 100644 results/classifier/105/other/1817239 create mode 100644 results/classifier/105/other/1817525 create mode 100644 results/classifier/105/other/1817865 create mode 100644 results/classifier/105/other/1818122 create mode 100644 results/classifier/105/other/1818207 create mode 100644 results/classifier/105/other/1818367 create mode 100644 results/classifier/105/other/1818937 create mode 100644 results/classifier/105/other/1819182 create mode 100644 results/classifier/105/other/1821006 create mode 100644 results/classifier/105/other/1821054 create mode 100644 results/classifier/105/other/1821430 create mode 100644 results/classifier/105/other/1821444 create mode 100644 results/classifier/105/other/1821595 create mode 100644 results/classifier/105/other/1821839 create mode 100644 results/classifier/105/other/1821884 create mode 100644 results/classifier/105/other/1823169 create mode 100644 results/classifier/105/other/1823458 create mode 100644 results/classifier/105/other/1824053 create mode 100644 results/classifier/105/other/1824331 create mode 100644 results/classifier/105/other/1825452 create mode 100644 results/classifier/105/other/1826172 create mode 100644 results/classifier/105/other/1826422 create mode 100644 results/classifier/105/other/1827871 create mode 100644 results/classifier/105/other/1829 create mode 100644 results/classifier/105/other/1829242 create mode 100644 results/classifier/105/other/1829459 create mode 100644 results/classifier/105/other/1829682 create mode 100644 results/classifier/105/other/1831225 create mode 100644 results/classifier/105/other/1831486 create mode 100644 results/classifier/105/other/1832250 create mode 100644 results/classifier/105/other/1833101 create mode 100644 results/classifier/105/other/1833661 create mode 100644 results/classifier/105/other/1833668 create mode 100644 results/classifier/105/other/1834399 create mode 100644 results/classifier/105/other/1835865 create mode 100644 results/classifier/105/other/1836 create mode 100644 results/classifier/105/other/1836078 create mode 100644 results/classifier/105/other/1836192 create mode 100644 results/classifier/105/other/1836501 create mode 100644 results/classifier/105/other/1836537 create mode 100644 results/classifier/105/other/1837218 create mode 100644 results/classifier/105/other/1837347 create mode 100644 results/classifier/105/other/1837909 create mode 100644 results/classifier/105/other/1838277 create mode 100644 results/classifier/105/other/1838703 create mode 100644 results/classifier/105/other/1838946 create mode 100644 results/classifier/105/other/1839060 create mode 100644 results/classifier/105/other/1839325 create mode 100644 results/classifier/105/other/1839428 create mode 100644 results/classifier/105/other/1840252 create mode 100644 results/classifier/105/other/1840922 create mode 100644 results/classifier/105/other/1841442 create mode 100644 results/classifier/105/other/1841491 create mode 100644 results/classifier/105/other/1841592 create mode 100644 results/classifier/105/other/1841990 create mode 100644 results/classifier/105/other/1842038 create mode 100644 results/classifier/105/other/1842530 create mode 100644 results/classifier/105/other/1842774 create mode 100644 results/classifier/105/other/1842925 create mode 100644 results/classifier/105/other/1843133 create mode 100644 results/classifier/105/other/1843651 create mode 100644 results/classifier/105/other/1844053 create mode 100644 results/classifier/105/other/1844635 create mode 100644 results/classifier/105/other/1845580 create mode 100644 results/classifier/105/other/1846451 create mode 100644 results/classifier/105/other/1847525 create mode 100644 results/classifier/105/other/1847906 create mode 100644 results/classifier/105/other/1848 create mode 100644 results/classifier/105/other/1848901 create mode 100644 results/classifier/105/other/1849894 create mode 100644 results/classifier/105/other/1850570 create mode 100644 results/classifier/105/other/1850751 create mode 100644 results/classifier/105/other/1851547 create mode 100644 results/classifier/105/other/1851845 create mode 100644 results/classifier/105/other/1852115 create mode 100644 results/classifier/105/other/1853042 create mode 100644 results/classifier/105/other/1853826 create mode 100644 results/classifier/105/other/1854738 create mode 100644 results/classifier/105/other/1855 create mode 100644 results/classifier/105/other/1855072 create mode 100644 results/classifier/105/other/1855617 create mode 100644 results/classifier/105/other/1856399 create mode 100644 results/classifier/105/other/1856706 create mode 100644 results/classifier/105/other/1856724 create mode 100644 results/classifier/105/other/1856837 create mode 100644 results/classifier/105/other/1857640 create mode 100644 results/classifier/105/other/1858 create mode 100644 results/classifier/105/other/1858046 create mode 100644 results/classifier/105/other/1858415 create mode 100644 results/classifier/105/other/1859291 create mode 100644 results/classifier/105/other/1859378 create mode 100644 results/classifier/105/other/1859384 create mode 100644 results/classifier/105/other/1859656 create mode 100644 results/classifier/105/other/1859920 create mode 100644 results/classifier/105/other/1860553 create mode 100644 results/classifier/105/other/1860610 create mode 100644 results/classifier/105/other/1861677 create mode 100644 results/classifier/105/other/1862415 create mode 100644 results/classifier/105/other/1863 create mode 100644 results/classifier/105/other/1863096 create mode 100644 results/classifier/105/other/1863333 create mode 100644 results/classifier/105/other/1863486 create mode 100644 results/classifier/105/other/1863526 create mode 100644 results/classifier/105/other/1864536 create mode 100644 results/classifier/105/other/1865099 create mode 100644 results/classifier/105/other/1866892 create mode 100644 results/classifier/105/other/1866962 create mode 100644 results/classifier/105/other/1868116 create mode 100644 results/classifier/105/other/1869006 create mode 100644 results/classifier/105/other/1871250 create mode 100644 results/classifier/105/other/1871842 create mode 100644 results/classifier/105/other/1872237 create mode 100644 results/classifier/105/other/1874678 create mode 100644 results/classifier/105/other/1874904 create mode 100644 results/classifier/105/other/1875139 create mode 100644 results/classifier/105/other/1876568 create mode 100644 results/classifier/105/other/1877384 create mode 100644 results/classifier/105/other/1877418 create mode 100644 results/classifier/105/other/1878034 create mode 100644 results/classifier/105/other/1878054 create mode 100644 results/classifier/105/other/1878057 create mode 100644 results/classifier/105/other/1878067 create mode 100644 results/classifier/105/other/1878134 create mode 100644 results/classifier/105/other/1878253 create mode 100644 results/classifier/105/other/1878255 create mode 100644 results/classifier/105/other/1878259 create mode 100644 results/classifier/105/other/1878263 create mode 100644 results/classifier/105/other/1878323 create mode 100644 results/classifier/105/other/1878501 create mode 100644 results/classifier/105/other/1878651 create mode 100644 results/classifier/105/other/1879223 create mode 100644 results/classifier/105/other/1879587 create mode 100644 results/classifier/105/other/1879998 create mode 100644 results/classifier/105/other/1880066 create mode 100644 results/classifier/105/other/1880189 create mode 100644 results/classifier/105/other/1880225 create mode 100644 results/classifier/105/other/1880326 create mode 100644 results/classifier/105/other/1880332 create mode 100644 results/classifier/105/other/1880518 create mode 100644 results/classifier/105/other/1880763 create mode 100644 results/classifier/105/other/1881231 create mode 100644 results/classifier/105/other/1881729 create mode 100644 results/classifier/105/other/1883414 create mode 100644 results/classifier/105/other/1883560 create mode 100644 results/classifier/105/other/1883728 create mode 100644 results/classifier/105/other/1884684 create mode 100644 results/classifier/105/other/1884693 create mode 100644 results/classifier/105/other/1884831 create mode 100644 results/classifier/105/other/1885332 create mode 100644 results/classifier/105/other/1885827 create mode 100644 results/classifier/105/other/1886155 create mode 100644 results/classifier/105/other/1886225 create mode 100644 results/classifier/105/other/1886362 create mode 100644 results/classifier/105/other/1887303 create mode 100644 results/classifier/105/other/1887306 create mode 100644 results/classifier/105/other/1887745 create mode 100644 results/classifier/105/other/1887854 create mode 100644 results/classifier/105/other/1888467 create mode 100644 results/classifier/105/other/1888606 create mode 100644 results/classifier/105/other/1888663 create mode 100644 results/classifier/105/other/1888714 create mode 100644 results/classifier/105/other/1888918 create mode 100644 results/classifier/105/other/1889033 create mode 100644 results/classifier/105/other/1889621 create mode 100644 results/classifier/105/other/1889943 create mode 100644 results/classifier/105/other/1890069 create mode 100644 results/classifier/105/other/1890152 create mode 100644 results/classifier/105/other/1890311 create mode 100644 results/classifier/105/other/1890333 create mode 100644 results/classifier/105/other/1890360 create mode 100644 results/classifier/105/other/1890370 create mode 100644 results/classifier/105/other/1890395 create mode 100644 results/classifier/105/other/1890545 create mode 100644 results/classifier/105/other/1891341 create mode 100644 results/classifier/105/other/1892540 create mode 100644 results/classifier/105/other/1892541 create mode 100644 results/classifier/105/other/1892544 create mode 100644 results/classifier/105/other/1892962 create mode 100644 results/classifier/105/other/1892978 create mode 100644 results/classifier/105/other/1893003 create mode 100644 results/classifier/105/other/1893691 create mode 100644 results/classifier/105/other/1893758 create mode 100644 results/classifier/105/other/1893807 create mode 100644 results/classifier/105/other/1894071 create mode 100644 results/classifier/105/other/1894869 create mode 100644 results/classifier/105/other/1895 create mode 100644 results/classifier/105/other/1895053 create mode 100644 results/classifier/105/other/1895080 create mode 100644 results/classifier/105/other/1895122 create mode 100644 results/classifier/105/other/1895310 create mode 100644 results/classifier/105/other/1895399 create mode 100644 results/classifier/105/other/1895471 create mode 100644 results/classifier/105/other/1896298 create mode 100644 results/classifier/105/other/1896561 create mode 100644 results/classifier/105/other/1897481 create mode 100644 results/classifier/105/other/1898011 create mode 100644 results/classifier/105/other/1898490 create mode 100644 results/classifier/105/other/1899082 create mode 100644 results/classifier/105/other/1899539 create mode 100644 results/classifier/105/other/1900155 create mode 100644 results/classifier/105/other/1900241 create mode 100644 results/classifier/105/other/1900779 create mode 100644 results/classifier/105/other/1902 create mode 100644 results/classifier/105/other/1902112 create mode 100644 results/classifier/105/other/1902365 create mode 100644 results/classifier/105/other/1902451 create mode 100644 results/classifier/105/other/1902470 create mode 100644 results/classifier/105/other/1903 create mode 100644 results/classifier/105/other/1904259 create mode 100644 results/classifier/105/other/1904315 create mode 100644 results/classifier/105/other/1904331 create mode 100644 results/classifier/105/other/1904652 create mode 100644 results/classifier/105/other/1905 create mode 100644 results/classifier/105/other/1905037 create mode 100644 results/classifier/105/other/1905297 create mode 100644 results/classifier/105/other/1905444 create mode 100644 results/classifier/105/other/1905651 create mode 100644 results/classifier/105/other/1906193 create mode 100644 results/classifier/105/other/1906694 create mode 100644 results/classifier/105/other/1907427 create mode 100644 results/classifier/105/other/1907497 create mode 100644 results/classifier/105/other/1907817 create mode 100644 results/classifier/105/other/1907909 create mode 100644 results/classifier/105/other/1907938 create mode 100644 results/classifier/105/other/1908369 create mode 100644 results/classifier/105/other/1908416 create mode 100644 results/classifier/105/other/1908489 create mode 100644 results/classifier/105/other/1908515 create mode 100644 results/classifier/105/other/1908781 create mode 100644 results/classifier/105/other/1909247 create mode 100644 results/classifier/105/other/1909256 create mode 100644 results/classifier/105/other/1909770 create mode 100644 results/classifier/105/other/1909921 create mode 100644 results/classifier/105/other/1910505 create mode 100644 results/classifier/105/other/1910586 create mode 100644 results/classifier/105/other/1910603 create mode 100644 results/classifier/105/other/1910696 create mode 100644 results/classifier/105/other/1910826 create mode 100644 results/classifier/105/other/1911075 create mode 100644 results/classifier/105/other/1911216 create mode 100644 results/classifier/105/other/1911351 create mode 100644 results/classifier/105/other/1911839 create mode 100644 results/classifier/105/other/1912170 create mode 100644 results/classifier/105/other/1912777 create mode 100644 results/classifier/105/other/1912790 create mode 100644 results/classifier/105/other/1913505 create mode 100644 results/classifier/105/other/1913510 create mode 100644 results/classifier/105/other/1913668 create mode 100644 results/classifier/105/other/1913669 create mode 100644 results/classifier/105/other/1913873 create mode 100644 results/classifier/105/other/1913914 create mode 100644 results/classifier/105/other/1913915 create mode 100644 results/classifier/105/other/1913916 create mode 100644 results/classifier/105/other/1913919 create mode 100644 results/classifier/105/other/1913923 create mode 100644 results/classifier/105/other/1914021 create mode 100644 results/classifier/105/other/1914236 create mode 100644 results/classifier/105/other/1914282 create mode 100644 results/classifier/105/other/1914294 create mode 100644 results/classifier/105/other/1914638 create mode 100644 results/classifier/105/other/1914849 create mode 100644 results/classifier/105/other/1914870 create mode 100644 results/classifier/105/other/1915431 create mode 100644 results/classifier/105/other/1915531 create mode 100644 results/classifier/105/other/1915535 create mode 100644 results/classifier/105/other/1916112 create mode 100644 results/classifier/105/other/1916344 create mode 100644 results/classifier/105/other/1916394 create mode 100644 results/classifier/105/other/1916506 create mode 100644 results/classifier/105/other/1916775 create mode 100644 results/classifier/105/other/1917082 create mode 100644 results/classifier/105/other/1917085 create mode 100644 results/classifier/105/other/1917161 create mode 100644 results/classifier/105/other/1917442 create mode 100644 results/classifier/105/other/1917542 create mode 100644 results/classifier/105/other/1917591 create mode 100644 results/classifier/105/other/1918 create mode 100644 results/classifier/105/other/1918084 create mode 100644 results/classifier/105/other/1918302 create mode 100644 results/classifier/105/other/1918321 create mode 100644 results/classifier/105/other/1918917 create mode 100644 results/classifier/105/other/1918975 create mode 100644 results/classifier/105/other/1919035 create mode 100644 results/classifier/105/other/1919253 create mode 100644 results/classifier/105/other/1920211 create mode 100644 results/classifier/105/other/1920672 create mode 100644 results/classifier/105/other/1920913 create mode 100644 results/classifier/105/other/1921 create mode 100644 results/classifier/105/other/1921082 create mode 100644 results/classifier/105/other/1921664 create mode 100644 results/classifier/105/other/1922325 create mode 100644 results/classifier/105/other/1922611 create mode 100644 results/classifier/105/other/1922773 create mode 100644 results/classifier/105/other/1923497 create mode 100644 results/classifier/105/other/1923583 create mode 100644 results/classifier/105/other/1924 create mode 100644 results/classifier/105/other/1924912 create mode 100644 results/classifier/105/other/1925094 create mode 100644 results/classifier/105/other/1925417 create mode 100644 results/classifier/105/other/1925512 create mode 100644 results/classifier/105/other/1926202 create mode 100644 results/classifier/105/other/1926231 create mode 100644 results/classifier/105/other/1926497 create mode 100644 results/classifier/105/other/1926521 create mode 100644 results/classifier/105/other/1927 create mode 100644 results/classifier/105/other/1928 create mode 100644 results/classifier/105/other/1929710 create mode 100644 results/classifier/105/other/1935 create mode 100644 results/classifier/105/other/1944 create mode 100644 results/classifier/105/other/1947 create mode 100644 results/classifier/105/other/195 create mode 100644 results/classifier/105/other/1951 create mode 100644 results/classifier/105/other/1953 create mode 100644 results/classifier/105/other/1967 create mode 100644 results/classifier/105/other/1967248 create mode 100644 results/classifier/105/other/1972 create mode 100644 results/classifier/105/other/1976 create mode 100644 results/classifier/105/other/1987 create mode 100644 results/classifier/105/other/1992 create mode 100644 results/classifier/105/other/1993 create mode 100644 results/classifier/105/other/2029 create mode 100644 results/classifier/105/other/2055003 create mode 100644 results/classifier/105/other/2058 create mode 100644 results/classifier/105/other/206 create mode 100644 results/classifier/105/other/2063 create mode 100644 results/classifier/105/other/2065579 create mode 100644 results/classifier/105/other/2069 create mode 100644 results/classifier/105/other/2078790 create mode 100644 results/classifier/105/other/2083 create mode 100644 results/classifier/105/other/210 create mode 100644 results/classifier/105/other/2100 create mode 100644 results/classifier/105/other/2105 create mode 100644 results/classifier/105/other/2106 create mode 100644 results/classifier/105/other/2115 create mode 100644 results/classifier/105/other/21221931 create mode 100644 results/classifier/105/other/21247035 create mode 100644 results/classifier/105/other/2133 create mode 100644 results/classifier/105/other/2134 create mode 100644 results/classifier/105/other/2169 create mode 100644 results/classifier/105/other/2170 create mode 100644 results/classifier/105/other/2194 create mode 100644 results/classifier/105/other/2208 create mode 100644 results/classifier/105/other/2217 create mode 100644 results/classifier/105/other/2224 create mode 100644 results/classifier/105/other/2230 create mode 100644 results/classifier/105/other/2255 create mode 100644 results/classifier/105/other/2261 create mode 100644 results/classifier/105/other/2262 create mode 100644 results/classifier/105/other/2267 create mode 100644 results/classifier/105/other/2269 create mode 100644 results/classifier/105/other/2273 create mode 100644 results/classifier/105/other/2291 create mode 100644 results/classifier/105/other/2296 create mode 100644 results/classifier/105/other/2297 create mode 100644 results/classifier/105/other/2299 create mode 100644 results/classifier/105/other/2308 create mode 100644 results/classifier/105/other/2324 create mode 100644 results/classifier/105/other/2330 create mode 100644 results/classifier/105/other/23300761 create mode 100644 results/classifier/105/other/23448582 create mode 100644 results/classifier/105/other/2346 create mode 100644 results/classifier/105/other/2353 create mode 100644 results/classifier/105/other/2355 create mode 100644 results/classifier/105/other/2358 create mode 100644 results/classifier/105/other/2370 create mode 100644 results/classifier/105/other/2371 create mode 100644 results/classifier/105/other/2372 create mode 100644 results/classifier/105/other/2374 create mode 100644 results/classifier/105/other/2379 create mode 100644 results/classifier/105/other/2380 create mode 100644 results/classifier/105/other/2408 create mode 100644 results/classifier/105/other/2414 create mode 100644 results/classifier/105/other/2422 create mode 100644 results/classifier/105/other/2427 create mode 100644 results/classifier/105/other/2432 create mode 100644 results/classifier/105/other/2435 create mode 100644 results/classifier/105/other/2440 create mode 100644 results/classifier/105/other/2441 create mode 100644 results/classifier/105/other/2442 create mode 100644 results/classifier/105/other/2462 create mode 100644 results/classifier/105/other/2474 create mode 100644 results/classifier/105/other/2485 create mode 100644 results/classifier/105/other/2488 create mode 100644 results/classifier/105/other/2489 create mode 100644 results/classifier/105/other/249 create mode 100644 results/classifier/105/other/2498 create mode 100644 results/classifier/105/other/2512 create mode 100644 results/classifier/105/other/2515 create mode 100644 results/classifier/105/other/2519 create mode 100644 results/classifier/105/other/2526 create mode 100644 results/classifier/105/other/2558 create mode 100644 results/classifier/105/other/2560 create mode 100644 results/classifier/105/other/2570 create mode 100644 results/classifier/105/other/2574 create mode 100644 results/classifier/105/other/25892827 create mode 100644 results/classifier/105/other/2593 create mode 100644 results/classifier/105/other/2599 create mode 100644 results/classifier/105/other/2603 create mode 100644 results/classifier/105/other/2611 create mode 100644 results/classifier/105/other/2612 create mode 100644 results/classifier/105/other/2622 create mode 100644 results/classifier/105/other/2625 create mode 100644 results/classifier/105/other/2632 create mode 100644 results/classifier/105/other/2634 create mode 100644 results/classifier/105/other/2656 create mode 100644 results/classifier/105/other/2667 create mode 100644 results/classifier/105/other/2702 create mode 100644 results/classifier/105/other/2710 create mode 100644 results/classifier/105/other/2718 create mode 100644 results/classifier/105/other/2719 create mode 100644 results/classifier/105/other/2732 create mode 100644 results/classifier/105/other/274 create mode 100644 results/classifier/105/other/2742 create mode 100644 results/classifier/105/other/2744 create mode 100644 results/classifier/105/other/2753 create mode 100644 results/classifier/105/other/2760 create mode 100644 results/classifier/105/other/2773 create mode 100644 results/classifier/105/other/2775 create mode 100644 results/classifier/105/other/2789 create mode 100644 results/classifier/105/other/2791 create mode 100644 results/classifier/105/other/2792 create mode 100644 results/classifier/105/other/2819 create mode 100644 results/classifier/105/other/2832 create mode 100644 results/classifier/105/other/2836 create mode 100644 results/classifier/105/other/2852 create mode 100644 results/classifier/105/other/2853 create mode 100644 results/classifier/105/other/2856 create mode 100644 results/classifier/105/other/2857 create mode 100644 results/classifier/105/other/2866 create mode 100644 results/classifier/105/other/2875 create mode 100644 results/classifier/105/other/2898 create mode 100644 results/classifier/105/other/2915 create mode 100644 results/classifier/105/other/2919 create mode 100644 results/classifier/105/other/2921 create mode 100644 results/classifier/105/other/2934 create mode 100644 results/classifier/105/other/2943 create mode 100644 results/classifier/105/other/2956 create mode 100644 results/classifier/105/other/2969 create mode 100644 results/classifier/105/other/2972 create mode 100644 results/classifier/105/other/2975 create mode 100644 results/classifier/105/other/31349848 create mode 100644 results/classifier/105/other/315 create mode 100644 results/classifier/105/other/32484936 create mode 100644 results/classifier/105/other/35170175 create mode 100644 results/classifier/105/other/396 create mode 100644 results/classifier/105/other/42974450 create mode 100644 results/classifier/105/other/453617 create mode 100644 results/classifier/105/other/463 create mode 100644 results/classifier/105/other/478 create mode 100644 results/classifier/105/other/486 create mode 100644 results/classifier/105/other/495 create mode 100644 results/classifier/105/other/497273 create mode 100644 results/classifier/105/other/498035 create mode 100644 results/classifier/105/other/507 create mode 100644 results/classifier/105/other/518 create mode 100644 results/classifier/105/other/522 create mode 100644 results/classifier/105/other/523 create mode 100644 results/classifier/105/other/545089 create mode 100644 results/classifier/105/other/546458 create mode 100644 results/classifier/105/other/546638 create mode 100644 results/classifier/105/other/55247116 create mode 100644 results/classifier/105/other/55367348 create mode 100644 results/classifier/105/other/55753058 create mode 100644 results/classifier/105/other/56309929 create mode 100644 results/classifier/105/other/568445 create mode 100644 results/classifier/105/other/56937788 create mode 100644 results/classifier/105/other/57 create mode 100644 results/classifier/105/other/57756589 create mode 100644 results/classifier/105/other/583 create mode 100644 results/classifier/105/other/584516 create mode 100644 results/classifier/105/other/588 create mode 100644 results/classifier/105/other/588731 create mode 100644 results/classifier/105/other/589315 create mode 100644 results/classifier/105/other/591666 create mode 100644 results/classifier/105/other/592 create mode 100644 results/classifier/105/other/594 create mode 100644 results/classifier/105/other/59540920 create mode 100644 results/classifier/105/other/597362 create mode 100644 results/classifier/105/other/601946 create mode 100644 results/classifier/105/other/607 create mode 100644 results/classifier/105/other/607204 create mode 100644 results/classifier/105/other/611 create mode 100644 results/classifier/105/other/624 create mode 100644 results/classifier/105/other/634 create mode 100644 results/classifier/105/other/636315 create mode 100644 results/classifier/105/other/638806 create mode 100644 results/classifier/105/other/643465 create mode 100644 results/classifier/105/other/64571620 create mode 100644 results/classifier/105/other/647 create mode 100644 results/classifier/105/other/653 create mode 100644 results/classifier/105/other/657006 create mode 100644 results/classifier/105/other/65781993 create mode 100644 results/classifier/105/other/66743673 create mode 100644 results/classifier/105/other/670769 create mode 100644 results/classifier/105/other/672934 create mode 100644 results/classifier/105/other/68897003 create mode 100644 results/classifier/105/other/696094 create mode 100644 results/classifier/105/other/698 create mode 100644 results/classifier/105/other/70021271 create mode 100644 results/classifier/105/other/70416488 create mode 100644 results/classifier/105/other/706 create mode 100644 results/classifier/105/other/712337 create mode 100644 results/classifier/105/other/712416 create mode 100644 results/classifier/105/other/714629 create mode 100644 results/classifier/105/other/721825 create mode 100644 results/classifier/105/other/727 create mode 100644 results/classifier/105/other/732155 create mode 100644 results/classifier/105/other/736 create mode 100644 results/classifier/105/other/741887 create mode 100644 results/classifier/105/other/742 create mode 100644 results/classifier/105/other/74715356 create mode 100644 results/classifier/105/other/754 create mode 100644 results/classifier/105/other/758 create mode 100644 results/classifier/105/other/761469 create mode 100644 results/classifier/105/other/766 create mode 100644 results/classifier/105/other/780 create mode 100644 results/classifier/105/other/788697 create mode 100644 results/classifier/105/other/79834768 create mode 100644 results/classifier/105/other/804517 create mode 100644 results/classifier/105/other/806 create mode 100644 results/classifier/105/other/810 create mode 100644 results/classifier/105/other/816 create mode 100644 results/classifier/105/other/81775929 create mode 100644 results/classifier/105/other/818647 create mode 100644 results/classifier/105/other/818673 create mode 100644 results/classifier/105/other/821078 create mode 100644 results/classifier/105/other/825 create mode 100644 results/classifier/105/other/833658 create mode 100644 results/classifier/105/other/844 create mode 100644 results/classifier/105/other/845 create mode 100644 results/classifier/105/other/851 create mode 100644 results/classifier/105/other/85542195 create mode 100644 results/classifier/105/other/855800 create mode 100644 results/classifier/105/other/856 create mode 100644 results/classifier/105/other/878 create mode 100644 results/classifier/105/other/880 create mode 100644 results/classifier/105/other/881 create mode 100644 results/classifier/105/other/882 create mode 100644 results/classifier/105/other/88225572 create mode 100644 results/classifier/105/other/88281850 create mode 100644 results/classifier/105/other/886147 create mode 100644 results/classifier/105/other/886621 create mode 100644 results/classifier/105/other/887883 create mode 100644 results/classifier/105/other/889827 create mode 100644 results/classifier/105/other/891525 create mode 100644 results/classifier/105/other/893208 create mode 100644 results/classifier/105/other/897193 create mode 100644 results/classifier/105/other/897466 create mode 100644 results/classifier/105/other/897750 create mode 100644 results/classifier/105/other/899140 create mode 100644 results/classifier/105/other/906804 create mode 100644 results/classifier/105/other/907063 create mode 100644 results/classifier/105/other/915 create mode 100644 results/classifier/105/other/917645 create mode 100644 results/classifier/105/other/917824 create mode 100644 results/classifier/105/other/918791 create mode 100644 results/classifier/105/other/921208 create mode 100644 results/classifier/105/other/925405 create mode 100644 results/classifier/105/other/928 create mode 100644 results/classifier/105/other/92957605 create mode 100644 results/classifier/105/other/932487 create mode 100644 results/classifier/105/other/932490 create mode 100644 results/classifier/105/other/934 create mode 100644 results/classifier/105/other/937 create mode 100644 results/classifier/105/other/939027 create mode 100644 results/classifier/105/other/943 create mode 100644 results/classifier/105/other/948 create mode 100644 results/classifier/105/other/949 create mode 100644 results/classifier/105/other/951 create mode 100644 results/classifier/105/other/95154278 create mode 100644 results/classifier/105/other/954099 create mode 100644 results/classifier/105/other/959992 create mode 100644 results/classifier/105/other/960378 create mode 100644 results/classifier/105/other/965133 create mode 100644 results/classifier/105/other/965327 create mode 100644 results/classifier/105/other/96782458 create mode 100644 results/classifier/105/other/977391 create mode 100644 results/classifier/105/other/986770 create mode 100644 results/classifier/105/other/987 create mode 100644 results/classifier/105/other/988909 create mode 100644 results/classifier/105/other/989 create mode 100644 results/classifier/105/other/993 create mode 100644 results/classifier/105/other/994662 create mode 100644 results/classifier/105/semantic/1013714 create mode 100644 results/classifier/105/semantic/1013888 create mode 100644 results/classifier/105/semantic/1034 create mode 100644 results/classifier/105/semantic/1034423 create mode 100644 results/classifier/105/semantic/1038 create mode 100644 results/classifier/105/semantic/1042654 create mode 100644 results/classifier/105/semantic/1077116 create mode 100644 results/classifier/105/semantic/1077838 create mode 100644 results/classifier/105/semantic/1094564 create mode 100644 results/classifier/105/semantic/1094786 create mode 100644 results/classifier/105/semantic/1102027 create mode 100644 results/classifier/105/semantic/1152 create mode 100644 results/classifier/105/semantic/1156313 create mode 100644 results/classifier/105/semantic/1180924 create mode 100644 results/classifier/105/semantic/1212 create mode 100644 results/classifier/105/semantic/1223467 create mode 100644 results/classifier/105/semantic/12360755 create mode 100644 results/classifier/105/semantic/124 create mode 100644 results/classifier/105/semantic/1242765 create mode 100644 results/classifier/105/semantic/1285505 create mode 100644 results/classifier/105/semantic/1288385 create mode 100644 results/classifier/105/semantic/1288620 create mode 100644 results/classifier/105/semantic/1299190 create mode 100644 results/classifier/105/semantic/1299858 create mode 100644 results/classifier/105/semantic/1307225 create mode 100644 results/classifier/105/semantic/1310324 create mode 100644 results/classifier/105/semantic/1338957 create mode 100644 results/classifier/105/semantic/1347555 create mode 100644 results/classifier/105/semantic/1349722 create mode 100644 results/classifier/105/semantic/1366836 create mode 100644 results/classifier/105/semantic/1368815 create mode 100644 results/classifier/105/semantic/1370 create mode 100644 results/classifier/105/semantic/1371 create mode 100644 results/classifier/105/semantic/1372 create mode 100644 results/classifier/105/semantic/1373 create mode 100644 results/classifier/105/semantic/1374 create mode 100644 results/classifier/105/semantic/1375 create mode 100644 results/classifier/105/semantic/1395217 create mode 100644 results/classifier/105/semantic/1405 create mode 100644 results/classifier/105/semantic/1407808 create mode 100644 results/classifier/105/semantic/1425597 create mode 100644 results/classifier/105/semantic/1428352 create mode 100644 results/classifier/105/semantic/1469 create mode 100644 results/classifier/105/semantic/1477683 create mode 100644 results/classifier/105/semantic/1478376 create mode 100644 results/classifier/105/semantic/1497204 create mode 100644 results/classifier/105/semantic/1497711 create mode 100644 results/classifier/105/semantic/1524546 create mode 100644 results/classifier/105/semantic/1528239 create mode 100644 results/classifier/105/semantic/1528718 create mode 100644 results/classifier/105/semantic/1529449 create mode 100644 results/classifier/105/semantic/1534 create mode 100644 results/classifier/105/semantic/1555076 create mode 100644 results/classifier/105/semantic/1561 create mode 100644 results/classifier/105/semantic/1569 create mode 100644 results/classifier/105/semantic/1588328 create mode 100644 results/classifier/105/semantic/1589564 create mode 100644 results/classifier/105/semantic/1596009 create mode 100644 results/classifier/105/semantic/1603693 create mode 100644 results/classifier/105/semantic/1617114 create mode 100644 results/classifier/105/semantic/1629483 create mode 100644 results/classifier/105/semantic/1670377 create mode 100644 results/classifier/105/semantic/1677492 create mode 100644 results/classifier/105/semantic/1682681 create mode 100644 results/classifier/105/semantic/1689499 create mode 100644 results/classifier/105/semantic/1695286 create mode 100644 results/classifier/105/semantic/1696180 create mode 100644 results/classifier/105/semantic/1701974 create mode 100644 results/classifier/105/semantic/1704658 create mode 100644 results/classifier/105/semantic/1725707 create mode 100644 results/classifier/105/semantic/1735049 create mode 100644 results/classifier/105/semantic/1738545 create mode 100644 results/classifier/105/semantic/1740219 create mode 100644 results/classifier/105/semantic/1743191 create mode 100644 results/classifier/105/semantic/1745316 create mode 100644 results/classifier/105/semantic/1750899 create mode 100644 results/classifier/105/semantic/1751264 create mode 100644 results/classifier/105/semantic/1751674 create mode 100644 results/classifier/105/semantic/1753309 create mode 100644 results/classifier/105/semantic/1760262 create mode 100644 results/classifier/105/semantic/1761027 create mode 100644 results/classifier/105/semantic/1777672 create mode 100644 results/classifier/105/semantic/1779634 create mode 100644 results/classifier/105/semantic/1790617 create mode 100644 results/classifier/105/semantic/1791680 create mode 100644 results/classifier/105/semantic/1798659 create mode 100644 results/classifier/105/semantic/1798780 create mode 100644 results/classifier/105/semantic/1805913 create mode 100644 results/classifier/105/semantic/1809252 create mode 100644 results/classifier/105/semantic/1809546 create mode 100644 results/classifier/105/semantic/1811711 create mode 100644 results/classifier/105/semantic/1813305 create mode 100644 results/classifier/105/semantic/1828508 create mode 100644 results/classifier/105/semantic/1829964 create mode 100644 results/classifier/105/semantic/1834 create mode 100644 results/classifier/105/semantic/1843151 create mode 100644 results/classifier/105/semantic/1843941 create mode 100644 results/classifier/105/semantic/1846 create mode 100644 results/classifier/105/semantic/1846427 create mode 100644 results/classifier/105/semantic/1848231 create mode 100644 results/classifier/105/semantic/185 create mode 100644 results/classifier/105/semantic/1851095 create mode 100644 results/classifier/105/semantic/1856335 create mode 100644 results/classifier/105/semantic/1859359 create mode 100644 results/classifier/105/semantic/186 create mode 100644 results/classifier/105/semantic/1860575 create mode 100644 results/classifier/105/semantic/1865252 create mode 100644 results/classifier/105/semantic/1867519 create mode 100644 results/classifier/105/semantic/1868055 create mode 100644 results/classifier/105/semantic/1868527 create mode 100644 results/classifier/105/semantic/1870098 create mode 100644 results/classifier/105/semantic/1872847 create mode 100644 results/classifier/105/semantic/1875702 create mode 100644 results/classifier/105/semantic/1877688 create mode 100644 results/classifier/105/semantic/1878348 create mode 100644 results/classifier/105/semantic/1879672 create mode 100644 results/classifier/105/semantic/1879955 create mode 100644 results/classifier/105/semantic/1883268 create mode 100644 results/classifier/105/semantic/1883400 create mode 100644 results/classifier/105/semantic/1884095 create mode 100644 results/classifier/105/semantic/1888964 create mode 100644 results/classifier/105/semantic/1896 create mode 100644 results/classifier/105/semantic/1898215 create mode 100644 results/classifier/105/semantic/1900 create mode 100644 results/classifier/105/semantic/1905562 create mode 100644 results/classifier/105/semantic/1905979 create mode 100644 results/classifier/105/semantic/1906156 create mode 100644 results/classifier/105/semantic/1907952 create mode 100644 results/classifier/105/semantic/1907969 create mode 100644 results/classifier/105/semantic/1908551 create mode 100644 results/classifier/105/semantic/1912065 create mode 100644 results/classifier/105/semantic/1914986 create mode 100644 results/classifier/105/semantic/1915794 create mode 100644 results/classifier/105/semantic/1917940 create mode 100644 results/classifier/105/semantic/1920602 create mode 100644 results/classifier/105/semantic/1921468 create mode 100644 results/classifier/105/semantic/1921948 create mode 100644 results/classifier/105/semantic/1922391 create mode 100644 results/classifier/105/semantic/1923197 create mode 100644 results/classifier/105/semantic/1924603 create mode 100644 results/classifier/105/semantic/1939 create mode 100644 results/classifier/105/semantic/1948 create mode 100644 results/classifier/105/semantic/2007 create mode 100644 results/classifier/105/semantic/2064 create mode 100644 results/classifier/105/semantic/2103 create mode 100644 results/classifier/105/semantic/2185 create mode 100644 results/classifier/105/semantic/2253 create mode 100644 results/classifier/105/semantic/2280 create mode 100644 results/classifier/105/semantic/237164 create mode 100644 results/classifier/105/semantic/2378 create mode 100644 results/classifier/105/semantic/2393 create mode 100644 results/classifier/105/semantic/2434 create mode 100644 results/classifier/105/semantic/2449 create mode 100644 results/classifier/105/semantic/2457 create mode 100644 results/classifier/105/semantic/2460 create mode 100644 results/classifier/105/semantic/2562 create mode 100644 results/classifier/105/semantic/2582 create mode 100644 results/classifier/105/semantic/2649 create mode 100644 results/classifier/105/semantic/2704 create mode 100644 results/classifier/105/semantic/2911 create mode 100644 results/classifier/105/semantic/2953 create mode 100644 results/classifier/105/semantic/304636 create mode 100644 results/classifier/105/semantic/369 create mode 100644 results/classifier/105/semantic/490484 create mode 100644 results/classifier/105/semantic/526653 create mode 100644 results/classifier/105/semantic/568614 create mode 100644 results/classifier/105/semantic/600 create mode 100644 results/classifier/105/semantic/639651 create mode 100644 results/classifier/105/semantic/645662 create mode 100644 results/classifier/105/semantic/691424 create mode 100644 results/classifier/105/semantic/714 create mode 100644 results/classifier/105/semantic/757702 create mode 100644 results/classifier/105/semantic/855630 create mode 100644 results/classifier/105/semantic/876 create mode 100644 results/classifier/105/semantic/908 create mode 100644 results/classifier/105/semantic/935 create mode 100644 results/classifier/105/semantic/969 create mode 100644 results/classifier/105/socket/1020484 create mode 100644 results/classifier/105/socket/1030666 create mode 100644 results/classifier/105/socket/1031 create mode 100644 results/classifier/105/socket/1055 create mode 100644 results/classifier/105/socket/1064631 create mode 100644 results/classifier/105/socket/1067119 create mode 100644 results/classifier/105/socket/1067517 create mode 100644 results/classifier/105/socket/1075272 create mode 100644 results/classifier/105/socket/1075339 create mode 100644 results/classifier/105/socket/1080086 create mode 100644 results/classifier/105/socket/1090 create mode 100644 results/classifier/105/socket/1185395 create mode 100644 results/classifier/105/socket/1213196 create mode 100644 results/classifier/105/socket/1228285 create mode 100644 results/classifier/105/socket/1253563 create mode 100644 results/classifier/105/socket/1264 create mode 100644 results/classifier/105/socket/1299566 create mode 100644 results/classifier/105/socket/1381639 create mode 100644 results/classifier/105/socket/1410288 create mode 100644 results/classifier/105/socket/1450881 create mode 100644 results/classifier/105/socket/1463812 create mode 100644 results/classifier/105/socket/1542965 create mode 100644 results/classifier/105/socket/1567 create mode 100644 results/classifier/105/socket/1572329 create mode 100644 results/classifier/105/socket/1585432 create mode 100644 results/classifier/105/socket/1586756 create mode 100644 results/classifier/105/socket/1612908 create mode 100644 results/classifier/105/socket/1663079 create mode 100644 results/classifier/105/socket/1673373 create mode 100644 results/classifier/105/socket/1700380 create mode 100644 results/classifier/105/socket/1708462 create mode 100644 results/classifier/105/socket/1721220 create mode 100644 results/classifier/105/socket/1721222 create mode 100644 results/classifier/105/socket/1721224 create mode 100644 results/classifier/105/socket/1725267 create mode 100644 results/classifier/105/socket/1744009 create mode 100644 results/classifier/105/socket/1754605 create mode 100644 results/classifier/105/socket/1756 create mode 100644 results/classifier/105/socket/1759338 create mode 100644 results/classifier/105/socket/1777 create mode 100644 results/classifier/105/socket/1781280 create mode 100644 results/classifier/105/socket/1796754 create mode 100644 results/classifier/105/socket/1801933 create mode 100644 results/classifier/105/socket/1823790 create mode 100644 results/classifier/105/socket/1826401 create mode 100644 results/classifier/105/socket/1828207 create mode 100644 results/classifier/105/socket/1828608 create mode 100644 results/classifier/105/socket/1829779 create mode 100644 results/classifier/105/socket/1829945 create mode 100644 results/classifier/105/socket/1836453 create mode 100644 results/classifier/105/socket/1837 create mode 100644 results/classifier/105/socket/1837651 create mode 100644 results/classifier/105/socket/1843590 create mode 100644 results/classifier/105/socket/1857811 create mode 100644 results/classifier/105/socket/1861884 create mode 100644 results/classifier/105/socket/1867601 create mode 100644 results/classifier/105/socket/1868221 create mode 100644 results/classifier/105/socket/1870331 create mode 100644 results/classifier/105/socket/1877015 create mode 100644 results/classifier/105/socket/1882784 create mode 100644 results/classifier/105/socket/1885718 create mode 100644 results/classifier/105/socket/1886 create mode 100644 results/classifier/105/socket/1887604 create mode 100644 results/classifier/105/socket/1888303 create mode 100644 results/classifier/105/socket/1898084 create mode 100644 results/classifier/105/socket/1901440 create mode 100644 results/classifier/105/socket/1904 create mode 100644 results/classifier/105/socket/1906948 create mode 100644 results/classifier/105/socket/1907926 create mode 100644 results/classifier/105/socket/1923692 create mode 100644 results/classifier/105/socket/1925449 create mode 100644 results/classifier/105/socket/1949 create mode 100644 results/classifier/105/socket/2191 create mode 100644 results/classifier/105/socket/2254 create mode 100644 results/classifier/105/socket/2292 create mode 100644 results/classifier/105/socket/2341 create mode 100644 results/classifier/105/socket/2444 create mode 100644 results/classifier/105/socket/2584 create mode 100644 results/classifier/105/socket/2624 create mode 100644 results/classifier/105/socket/2678 create mode 100644 results/classifier/105/socket/284 create mode 100644 results/classifier/105/socket/2867 create mode 100644 results/classifier/105/socket/2876 create mode 100644 results/classifier/105/socket/2925 create mode 100644 results/classifier/105/socket/323 create mode 100644 results/classifier/105/socket/347 create mode 100644 results/classifier/105/socket/588735 create mode 100644 results/classifier/105/socket/761471 create mode 100644 results/classifier/105/socket/778032 create mode 100644 results/classifier/105/socket/796202 create mode 100644 results/classifier/105/socket/833 create mode 100644 results/classifier/105/socket/872 create mode 100644 results/classifier/105/socket/939995 create mode 100644 results/classifier/105/vnc/1004050 create mode 100644 results/classifier/105/vnc/1047576 create mode 100644 results/classifier/105/vnc/11357571 create mode 100644 results/classifier/105/vnc/1136477 create mode 100644 results/classifier/105/vnc/1150 create mode 100644 results/classifier/105/vnc/1162644 create mode 100644 results/classifier/105/vnc/1183 create mode 100644 results/classifier/105/vnc/1207686 create mode 100644 results/classifier/105/vnc/1246990 create mode 100644 results/classifier/105/vnc/1321028 create mode 100644 results/classifier/105/vnc/1339 create mode 100644 results/classifier/105/vnc/1354279 create mode 100644 results/classifier/105/vnc/1388735 create mode 100644 results/classifier/105/vnc/1391942 create mode 100644 results/classifier/105/vnc/1393486 create mode 100644 results/classifier/105/vnc/1453608 create mode 100644 results/classifier/105/vnc/1453612 create mode 100644 results/classifier/105/vnc/1453613 create mode 100644 results/classifier/105/vnc/1454 create mode 100644 results/classifier/105/vnc/1455912 create mode 100644 results/classifier/105/vnc/1467 create mode 100644 results/classifier/105/vnc/1486278 create mode 100644 results/classifier/105/vnc/1490853 create mode 100644 results/classifier/105/vnc/1516446 create mode 100644 results/classifier/105/vnc/1548 create mode 100644 results/classifier/105/vnc/1580 create mode 100644 results/classifier/105/vnc/1586194 create mode 100644 results/classifier/105/vnc/1604 create mode 100644 results/classifier/105/vnc/1618431 create mode 100644 results/classifier/105/vnc/1637447 create mode 100644 results/classifier/105/vnc/1649236 create mode 100644 results/classifier/105/vnc/1661176 create mode 100644 results/classifier/105/vnc/1661815 create mode 100644 results/classifier/105/vnc/1673 create mode 100644 results/classifier/105/vnc/1686 create mode 100644 results/classifier/105/vnc/1693649 create mode 100644 results/classifier/105/vnc/1696353 create mode 100644 results/classifier/105/vnc/170 create mode 100644 results/classifier/105/vnc/1705717 create mode 100644 results/classifier/105/vnc/1715186 create mode 100644 results/classifier/105/vnc/1721221 create mode 100644 results/classifier/105/vnc/1732671 create mode 100644 results/classifier/105/vnc/1739371 create mode 100644 results/classifier/105/vnc/1752646 create mode 100644 results/classifier/105/vnc/1762179 create mode 100644 results/classifier/105/vnc/1766904 create mode 100644 results/classifier/105/vnc/1771042 create mode 100644 results/classifier/105/vnc/1785203 create mode 100644 results/classifier/105/vnc/1785734 create mode 100644 results/classifier/105/vnc/1786343 create mode 100644 results/classifier/105/vnc/1795100 create mode 100644 results/classifier/105/vnc/1802465 create mode 100644 results/classifier/105/vnc/1806040 create mode 100644 results/classifier/105/vnc/1816819 create mode 100644 results/classifier/105/vnc/1819108 create mode 100644 results/classifier/105/vnc/1829576 create mode 100644 results/classifier/105/vnc/1856549 create mode 100644 results/classifier/105/vnc/1867786 create mode 100644 results/classifier/105/vnc/1870911 create mode 100644 results/classifier/105/vnc/1872790 create mode 100644 results/classifier/105/vnc/1888431 create mode 100644 results/classifier/105/vnc/1893744 create mode 100644 results/classifier/105/vnc/1903752 create mode 100644 results/classifier/105/vnc/1906516 create mode 100644 results/classifier/105/vnc/1912780 create mode 100644 results/classifier/105/vnc/1923648 create mode 100644 results/classifier/105/vnc/1923861 create mode 100644 results/classifier/105/vnc/1988 create mode 100644 results/classifier/105/vnc/2001 create mode 100644 results/classifier/105/vnc/2171 create mode 100644 results/classifier/105/vnc/2311 create mode 100644 results/classifier/105/vnc/2490 create mode 100644 results/classifier/105/vnc/2492 create mode 100644 results/classifier/105/vnc/2608 create mode 100644 results/classifier/105/vnc/2646 create mode 100644 results/classifier/105/vnc/2772 create mode 100644 results/classifier/105/vnc/327 create mode 100644 results/classifier/105/vnc/33802194 create mode 100644 results/classifier/105/vnc/351 create mode 100644 results/classifier/105/vnc/42613410 create mode 100644 results/classifier/105/vnc/659 create mode 100644 results/classifier/105/vnc/685 create mode 100644 results/classifier/105/vnc/697197 create mode 100644 results/classifier/105/vnc/697510 create mode 100644 results/classifier/105/vnc/70 create mode 100644 results/classifier/105/vnc/723 create mode 100644 results/classifier/105/vnc/759 create mode 100644 results/classifier/105/vnc/772 create mode 100644 results/classifier/105/vnc/779 create mode 100644 results/classifier/105/vnc/80570214 create mode 100644 results/classifier/105/vnc/824074 create mode 100644 results/classifier/105/vnc/850 create mode 100644 results/classifier/105/vnc/854 create mode 100644 results/classifier/105/vnc/974229 create mode 100644 results/classifier/105/vnc/981 create mode 100644 results/classifier/105/vnc/994412 diff --git a/results/classifier/001/instruction/11357571 b/results/classifier/001/instruction/11357571 new file mode 100644 index 000000000..1c3bc483f --- /dev/null +++ b/results/classifier/001/instruction/11357571 @@ -0,0 +1,47 @@ +instruction: 0.758 +semantic: 0.694 +other: 0.687 +mistranslation: 0.516 + +[Qemu-devel] [BUG] VNC: client won't send FramebufferUpdateRequest if job in flight is aborted + +Hi Gerd, Daniel. + +We noticed that if VncSharePolicy was configured with +VNC_SHARE_POLICY_FORCE_SHARED mode and +multiple vnc clients opened vnc connections, some clients could go blank screen +at high probability. +This problem can be reproduced when we regularly reboot suse12sp3 in graphic +mode both +with RealVNC and noVNC client. + +Then we dig into it and find out that some clients go blank screen because they +don't +send FramebufferUpdateRequest any more. One step further, we notice that each +time +the job in flight is aborted one client go blank screen. + +The bug is triggered in the following procedure. +Guest reboot => graphic mode switch => graphic_hw_update => vga_update_display +=> vga_draw_graphic (full_update = 1) => dpy_gfx_replace_surface => +vnc_dpy_switch => +vnc_abort_display_jobs (client may have job in flight) => job removed from the +queue +If one client has vnc job in flight, *vnc_abort_display_jobs* will wait until +its job is abandoned. +This behavior is done in vnc_worker_thread_loop when 'if (job->vs->ioc == NULL +|| job->vs->abort == true)' +branch is taken. + +As we can see, *vnc_abort_display_jobs* is intended to do some optimization to +avoid unnecessary client update. +But if client sends FramebufferUpdateRequest for some graphic area and its +FramebufferUpdate response job +is abandoned, the client may wait for the response and never send new +FramebufferUpdateRequest, which may +case the client go blank screen forever. + +So I am wondering whether we should drop the *vnc_abort_display_jobs* +optimization or do some trick here +to push the client to send new FramebufferUpdateRequest. Do you have any idea ? + diff --git a/results/classifier/001/instruction/11933524 b/results/classifier/001/instruction/11933524 new file mode 100644 index 000000000..3ff255be0 --- /dev/null +++ b/results/classifier/001/instruction/11933524 @@ -0,0 +1,1125 @@ +instruction: 0.775 +other: 0.771 +mistranslation: 0.719 +semantic: 0.673 + +[BUG] hw/i386/pc.c: CXL Fixed Memory Window should not reserve e820 in bios + +Early-boot e820 records will be inserted by the bios/efi/early boot +software and be reported to the kernel via insert_resource. Later, when +CXL drivers iterate through the regions again, they will insert another +resource and make the RESERVED memory area a child. + +This RESERVED memory area causes the memory region to become unusable, +and as a result attempting to create memory regions with + + `cxl create-region ...` + +Will fail due to the RESERVED area intersecting with the CXL window. + + +During boot the following traceback is observed: + +0xffffffff81101650 in insert_resource_expand_to_fit () +0xffffffff83d964c5 in e820__reserve_resources_late () +0xffffffff83e03210 in pcibios_resource_survey () +0xffffffff83e04f4a in pcibios_init () + +Which produces a call to reserve the CFMWS area: + +(gdb) p *new +$54 = {start = 0x290000000, end = 0x2cfffffff, name = "Reserved", + flags = 0x200, desc = 0x7, parent = 0x0, sibling = 0x0, + child = 0x0} + +Later the Kernel parses ACPI tables and reserves the exact same area as +the CXL Fixed Memory Window. The use of `insert_resource_conflict` +retains the RESERVED region and makes it a child of the new region. + +0xffffffff811016a4 in insert_resource_conflict () + insert_resource () +0xffffffff81a81389 in cxl_parse_cfmws () +0xffffffff818c4a81 in call_handler () + acpi_parse_entries_array () + +(gdb) p/x *new +$59 = {start = 0x290000000, end = 0x2cfffffff, name = "CXL Window 0", + flags = 0x200, desc = 0x0, parent = 0x0, sibling = 0x0, + child = 0x0} + +This produces the following output in /proc/iomem: + +590000000-68fffffff : CXL Window 0 + 590000000-68fffffff : Reserved + +This reserved area causes `get_free_mem_region()` to fail due to a check +against `__region_intersects()`. Due to this reserved area, the +intersect check will only ever return REGION_INTERSECTS, which causes +`cxl create-region` to always fail. + +Signed-off-by: Gregory Price +--- + hw/i386/pc.c | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/hw/i386/pc.c b/hw/i386/pc.c +index 566accf7e6..5bf5465a21 100644 +--- a/hw/i386/pc.c ++++ b/hw/i386/pc.c +@@ -1061,7 +1061,6 @@ void pc_memory_init(PCMachineState *pcms, + hwaddr cxl_size = MiB; + + cxl_base = pc_get_cxl_range_start(pcms); +- e820_add_entry(cxl_base, cxl_size, E820_RESERVED); + memory_region_init(mr, OBJECT(machine), "cxl_host_reg", cxl_size); + memory_region_add_subregion(system_memory, cxl_base, mr); + cxl_resv_end = cxl_base + cxl_size; +@@ -1077,7 +1076,6 @@ void pc_memory_init(PCMachineState *pcms, + memory_region_init_io(&fw->mr, OBJECT(machine), &cfmws_ops, fw, + "cxl-fixed-memory-region", fw->size); + memory_region_add_subregion(system_memory, fw->base, &fw->mr); +- e820_add_entry(fw->base, fw->size, E820_RESERVED); + cxl_fmw_base += fw->size; + cxl_resv_end = cxl_fmw_base; + } +-- +2.37.3 + +Early-boot e820 records will be inserted by the bios/efi/early boot +software and be reported to the kernel via insert_resource. Later, when +CXL drivers iterate through the regions again, they will insert another +resource and make the RESERVED memory area a child. + +This RESERVED memory area causes the memory region to become unusable, +and as a result attempting to create memory regions with + + `cxl create-region ...` + +Will fail due to the RESERVED area intersecting with the CXL window. + + +During boot the following traceback is observed: + +0xffffffff81101650 in insert_resource_expand_to_fit () +0xffffffff83d964c5 in e820__reserve_resources_late () +0xffffffff83e03210 in pcibios_resource_survey () +0xffffffff83e04f4a in pcibios_init () + +Which produces a call to reserve the CFMWS area: + +(gdb) p *new +$54 = {start = 0x290000000, end = 0x2cfffffff, name = "Reserved", + flags = 0x200, desc = 0x7, parent = 0x0, sibling = 0x0, + child = 0x0} + +Later the Kernel parses ACPI tables and reserves the exact same area as +the CXL Fixed Memory Window. The use of `insert_resource_conflict` +retains the RESERVED region and makes it a child of the new region. + +0xffffffff811016a4 in insert_resource_conflict () + insert_resource () +0xffffffff81a81389 in cxl_parse_cfmws () +0xffffffff818c4a81 in call_handler () + acpi_parse_entries_array () + +(gdb) p/x *new +$59 = {start = 0x290000000, end = 0x2cfffffff, name = "CXL Window 0", + flags = 0x200, desc = 0x0, parent = 0x0, sibling = 0x0, + child = 0x0} + +This produces the following output in /proc/iomem: + +590000000-68fffffff : CXL Window 0 + 590000000-68fffffff : Reserved + +This reserved area causes `get_free_mem_region()` to fail due to a check +against `__region_intersects()`. Due to this reserved area, the +intersect check will only ever return REGION_INTERSECTS, which causes +`cxl create-region` to always fail. + +Signed-off-by: Gregory Price +--- + hw/i386/pc.c | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/hw/i386/pc.c b/hw/i386/pc.c +index 566accf7e6..5bf5465a21 100644 +--- a/hw/i386/pc.c ++++ b/hw/i386/pc.c +@@ -1061,7 +1061,6 @@ void pc_memory_init(PCMachineState *pcms, + hwaddr cxl_size = MiB; +cxl_base = pc_get_cxl_range_start(pcms); +- e820_add_entry(cxl_base, cxl_size, E820_RESERVED); + memory_region_init(mr, OBJECT(machine), "cxl_host_reg", cxl_size); + memory_region_add_subregion(system_memory, cxl_base, mr); + cxl_resv_end = cxl_base + cxl_size; +@@ -1077,7 +1076,6 @@ void pc_memory_init(PCMachineState *pcms, + memory_region_init_io(&fw->mr, OBJECT(machine), &cfmws_ops, +fw, + "cxl-fixed-memory-region", fw->size); + memory_region_add_subregion(system_memory, fw->base, &fw->mr); +Or will this be subregion of cxl_base? + +Thanks, +Pankaj +- e820_add_entry(fw->base, fw->size, E820_RESERVED); + cxl_fmw_base += fw->size; + cxl_resv_end = cxl_fmw_base; + } + +> +> - e820_add_entry(cxl_base, cxl_size, E820_RESERVED); +> +> memory_region_init(mr, OBJECT(machine), "cxl_host_reg", cxl_size); +> +> memory_region_add_subregion(system_memory, cxl_base, mr); +> +> cxl_resv_end = cxl_base + cxl_size; +> +> @@ -1077,7 +1076,6 @@ void pc_memory_init(PCMachineState *pcms, +> +> memory_region_init_io(&fw->mr, OBJECT(machine), +> +> &cfmws_ops, fw, +> +> "cxl-fixed-memory-region", +> +> fw->size); +> +> memory_region_add_subregion(system_memory, fw->base, +> +> &fw->mr); +> +> +Or will this be subregion of cxl_base? +> +> +Thanks, +> +Pankaj +The memory region backing this memory area still has to be initialized +and added in the QEMU system, but it will now be initialized for use by +linux after PCI/ACPI setup occurs and the CXL driver discovers it via +CDAT. + +It's also still possible to assign this area a static memory region at +bool by setting up the SRATs in the ACPI tables, but that patch is not +upstream yet. + +On Tue, Oct 18, 2022 at 5:14 AM Gregory Price wrote: +> +> +Early-boot e820 records will be inserted by the bios/efi/early boot +> +software and be reported to the kernel via insert_resource. Later, when +> +CXL drivers iterate through the regions again, they will insert another +> +resource and make the RESERVED memory area a child. +I have already sent a patch +https://www.mail-archive.com/qemu-devel@nongnu.org/msg882012.html +. +When the patch is applied, there would not be any reserved entries +even with passing E820_RESERVED . +So this patch needs to be evaluated in the light of the above patch I +sent. Once you apply my patch, does the issue still exist? + +> +> +This RESERVED memory area causes the memory region to become unusable, +> +and as a result attempting to create memory regions with +> +> +`cxl create-region ...` +> +> +Will fail due to the RESERVED area intersecting with the CXL window. +> +> +> +During boot the following traceback is observed: +> +> +0xffffffff81101650 in insert_resource_expand_to_fit () +> +0xffffffff83d964c5 in e820__reserve_resources_late () +> +0xffffffff83e03210 in pcibios_resource_survey () +> +0xffffffff83e04f4a in pcibios_init () +> +> +Which produces a call to reserve the CFMWS area: +> +> +(gdb) p *new +> +$54 = {start = 0x290000000, end = 0x2cfffffff, name = "Reserved", +> +flags = 0x200, desc = 0x7, parent = 0x0, sibling = 0x0, +> +child = 0x0} +> +> +Later the Kernel parses ACPI tables and reserves the exact same area as +> +the CXL Fixed Memory Window. The use of `insert_resource_conflict` +> +retains the RESERVED region and makes it a child of the new region. +> +> +0xffffffff811016a4 in insert_resource_conflict () +> +insert_resource () +> +0xffffffff81a81389 in cxl_parse_cfmws () +> +0xffffffff818c4a81 in call_handler () +> +acpi_parse_entries_array () +> +> +(gdb) p/x *new +> +$59 = {start = 0x290000000, end = 0x2cfffffff, name = "CXL Window 0", +> +flags = 0x200, desc = 0x0, parent = 0x0, sibling = 0x0, +> +child = 0x0} +> +> +This produces the following output in /proc/iomem: +> +> +590000000-68fffffff : CXL Window 0 +> +590000000-68fffffff : Reserved +> +> +This reserved area causes `get_free_mem_region()` to fail due to a check +> +against `__region_intersects()`. Due to this reserved area, the +> +intersect check will only ever return REGION_INTERSECTS, which causes +> +`cxl create-region` to always fail. +> +> +Signed-off-by: Gregory Price +> +--- +> +hw/i386/pc.c | 2 -- +> +1 file changed, 2 deletions(-) +> +> +diff --git a/hw/i386/pc.c b/hw/i386/pc.c +> +index 566accf7e6..5bf5465a21 100644 +> +--- a/hw/i386/pc.c +> ++++ b/hw/i386/pc.c +> +@@ -1061,7 +1061,6 @@ void pc_memory_init(PCMachineState *pcms, +> +hwaddr cxl_size = MiB; +> +> +cxl_base = pc_get_cxl_range_start(pcms); +> +- e820_add_entry(cxl_base, cxl_size, E820_RESERVED); +> +memory_region_init(mr, OBJECT(machine), "cxl_host_reg", cxl_size); +> +memory_region_add_subregion(system_memory, cxl_base, mr); +> +cxl_resv_end = cxl_base + cxl_size; +> +@@ -1077,7 +1076,6 @@ void pc_memory_init(PCMachineState *pcms, +> +memory_region_init_io(&fw->mr, OBJECT(machine), &cfmws_ops, +> +fw, +> +"cxl-fixed-memory-region", fw->size); +> +memory_region_add_subregion(system_memory, fw->base, +> +&fw->mr); +> +- e820_add_entry(fw->base, fw->size, E820_RESERVED); +> +cxl_fmw_base += fw->size; +> +cxl_resv_end = cxl_fmw_base; +> +} +> +-- +> +2.37.3 +> + +This patch does not resolve the issue, reserved entries are still created. +[    0.000000] BIOS-e820: [mem 0x0000000280000000-0x00000002800fffff] reserved +[    0.000000] BIOS-e820: [mem 0x0000000290000000-0x000000029fffffff] reserved +# cat /proc/iomem +290000000-29fffffff : CXL Window 0 +  290000000-29fffffff : Reserved +# cxl create-region -m -d decoder0.0 -w 1 -g 256 mem0 +cxl region: create_region: region0: set_size failed: Numerical result out of range +cxl region: cmd_create_region: created 0 regions +On Tue, Oct 18, 2022 at 2:05 AM Ani Sinha < +ani@anisinha.ca +> wrote: +On Tue, Oct 18, 2022 at 5:14 AM Gregory Price < +gourry.memverge@gmail.com +> wrote: +> +> Early-boot e820 records will be inserted by the bios/efi/early boot +> software and be reported to the kernel via insert_resource.  Later, when +> CXL drivers iterate through the regions again, they will insert another +> resource and make the RESERVED memory area a child. +I have already sent a patch +https://www.mail-archive.com/qemu-devel@nongnu.org/msg882012.html +. +When the patch is applied, there would not be any reserved entries +even with passing E820_RESERVED . +So this patch needs to be evaluated in the light of the above patch I +sent. Once you apply my patch, does the issue still exist? +> +> This RESERVED memory area causes the memory region to become unusable, +> and as a result attempting to create memory regions with +> +>     `cxl create-region ...` +> +> Will fail due to the RESERVED area intersecting with the CXL window. +> +> +> During boot the following traceback is observed: +> +> 0xffffffff81101650 in insert_resource_expand_to_fit () +> 0xffffffff83d964c5 in e820__reserve_resources_late () +> 0xffffffff83e03210 in pcibios_resource_survey () +> 0xffffffff83e04f4a in pcibios_init () +> +> Which produces a call to reserve the CFMWS area: +> +> (gdb) p *new +> $54 = {start = 0x290000000, end = 0x2cfffffff, name = "Reserved", +>        flags = 0x200, desc = 0x7, parent = 0x0, sibling = 0x0, +>        child = 0x0} +> +> Later the Kernel parses ACPI tables and reserves the exact same area as +> the CXL Fixed Memory Window.  The use of `insert_resource_conflict` +> retains the RESERVED region and makes it a child of the new region. +> +> 0xffffffff811016a4 in insert_resource_conflict () +>                       insert_resource () +> 0xffffffff81a81389 in cxl_parse_cfmws () +> 0xffffffff818c4a81 in call_handler () +>                       acpi_parse_entries_array () +> +> (gdb) p/x *new +> $59 = {start = 0x290000000, end = 0x2cfffffff, name = "CXL Window 0", +>        flags = 0x200, desc = 0x0, parent = 0x0, sibling = 0x0, +>        child = 0x0} +> +> This produces the following output in /proc/iomem: +> +> 590000000-68fffffff : CXL Window 0 +>   590000000-68fffffff : Reserved +> +> This reserved area causes `get_free_mem_region()` to fail due to a check +> against `__region_intersects()`.  Due to this reserved area, the +> intersect check will only ever return REGION_INTERSECTS, which causes +> `cxl create-region` to always fail. +> +> Signed-off-by: Gregory Price < +gregory.price@memverge.com +> +> --- +>  hw/i386/pc.c | 2 -- +>  1 file changed, 2 deletions(-) +> +> diff --git a/hw/i386/pc.c b/hw/i386/pc.c +> index 566accf7e6..5bf5465a21 100644 +> --- a/hw/i386/pc.c +> +++ b/hw/i386/pc.c +> @@ -1061,7 +1061,6 @@ void pc_memory_init(PCMachineState *pcms, +>          hwaddr cxl_size = MiB; +> +>          cxl_base = pc_get_cxl_range_start(pcms); +> -        e820_add_entry(cxl_base, cxl_size, E820_RESERVED); +>          memory_region_init(mr, OBJECT(machine), "cxl_host_reg", cxl_size); +>          memory_region_add_subregion(system_memory, cxl_base, mr); +>          cxl_resv_end = cxl_base + cxl_size; +> @@ -1077,7 +1076,6 @@ void pc_memory_init(PCMachineState *pcms, +>                  memory_region_init_io(&fw->mr, OBJECT(machine), &cfmws_ops, fw, +>                                        "cxl-fixed-memory-region", fw->size); +>                  memory_region_add_subregion(system_memory, fw->base, &fw->mr); +> -                e820_add_entry(fw->base, fw->size, E820_RESERVED); +>                  cxl_fmw_base += fw->size; +>                  cxl_resv_end = cxl_fmw_base; +>              } +> -- +> 2.37.3 +> + ++Gerd Hoffmann + +On Tue, Oct 18, 2022 at 8:16 PM Gregory Price wrote: +> +> +This patch does not resolve the issue, reserved entries are still created. +> +> +[ 0.000000] BIOS-e820: [mem 0x0000000280000000-0x00000002800fffff] reserved +> +[ 0.000000] BIOS-e820: [mem 0x0000000290000000-0x000000029fffffff] reserved +> +> +# cat /proc/iomem +> +290000000-29fffffff : CXL Window 0 +> +290000000-29fffffff : Reserved +> +> +# cxl create-region -m -d decoder0.0 -w 1 -g 256 mem0 +> +cxl region: create_region: region0: set_size failed: Numerical result out of +> +range +> +cxl region: cmd_create_region: created 0 regions +> +> +On Tue, Oct 18, 2022 at 2:05 AM Ani Sinha wrote: +> +> +> +> On Tue, Oct 18, 2022 at 5:14 AM Gregory Price +> +> wrote: +> +> > +> +> > Early-boot e820 records will be inserted by the bios/efi/early boot +> +> > software and be reported to the kernel via insert_resource. Later, when +> +> > CXL drivers iterate through the regions again, they will insert another +> +> > resource and make the RESERVED memory area a child. +> +> +> +> I have already sent a patch +> +> +https://www.mail-archive.com/qemu-devel@nongnu.org/msg882012.html +. +> +> When the patch is applied, there would not be any reserved entries +> +> even with passing E820_RESERVED . +> +> So this patch needs to be evaluated in the light of the above patch I +> +> sent. Once you apply my patch, does the issue still exist? +> +> +> +> > +> +> > This RESERVED memory area causes the memory region to become unusable, +> +> > and as a result attempting to create memory regions with +> +> > +> +> > `cxl create-region ...` +> +> > +> +> > Will fail due to the RESERVED area intersecting with the CXL window. +> +> > +> +> > +> +> > During boot the following traceback is observed: +> +> > +> +> > 0xffffffff81101650 in insert_resource_expand_to_fit () +> +> > 0xffffffff83d964c5 in e820__reserve_resources_late () +> +> > 0xffffffff83e03210 in pcibios_resource_survey () +> +> > 0xffffffff83e04f4a in pcibios_init () +> +> > +> +> > Which produces a call to reserve the CFMWS area: +> +> > +> +> > (gdb) p *new +> +> > $54 = {start = 0x290000000, end = 0x2cfffffff, name = "Reserved", +> +> > flags = 0x200, desc = 0x7, parent = 0x0, sibling = 0x0, +> +> > child = 0x0} +> +> > +> +> > Later the Kernel parses ACPI tables and reserves the exact same area as +> +> > the CXL Fixed Memory Window. The use of `insert_resource_conflict` +> +> > retains the RESERVED region and makes it a child of the new region. +> +> > +> +> > 0xffffffff811016a4 in insert_resource_conflict () +> +> > insert_resource () +> +> > 0xffffffff81a81389 in cxl_parse_cfmws () +> +> > 0xffffffff818c4a81 in call_handler () +> +> > acpi_parse_entries_array () +> +> > +> +> > (gdb) p/x *new +> +> > $59 = {start = 0x290000000, end = 0x2cfffffff, name = "CXL Window 0", +> +> > flags = 0x200, desc = 0x0, parent = 0x0, sibling = 0x0, +> +> > child = 0x0} +> +> > +> +> > This produces the following output in /proc/iomem: +> +> > +> +> > 590000000-68fffffff : CXL Window 0 +> +> > 590000000-68fffffff : Reserved +> +> > +> +> > This reserved area causes `get_free_mem_region()` to fail due to a check +> +> > against `__region_intersects()`. Due to this reserved area, the +> +> > intersect check will only ever return REGION_INTERSECTS, which causes +> +> > `cxl create-region` to always fail. +> +> > +> +> > Signed-off-by: Gregory Price +> +> > --- +> +> > hw/i386/pc.c | 2 -- +> +> > 1 file changed, 2 deletions(-) +> +> > +> +> > diff --git a/hw/i386/pc.c b/hw/i386/pc.c +> +> > index 566accf7e6..5bf5465a21 100644 +> +> > --- a/hw/i386/pc.c +> +> > +++ b/hw/i386/pc.c +> +> > @@ -1061,7 +1061,6 @@ void pc_memory_init(PCMachineState *pcms, +> +> > hwaddr cxl_size = MiB; +> +> > +> +> > cxl_base = pc_get_cxl_range_start(pcms); +> +> > - e820_add_entry(cxl_base, cxl_size, E820_RESERVED); +> +> > memory_region_init(mr, OBJECT(machine), "cxl_host_reg", cxl_size); +> +> > memory_region_add_subregion(system_memory, cxl_base, mr); +> +> > cxl_resv_end = cxl_base + cxl_size; +> +> > @@ -1077,7 +1076,6 @@ void pc_memory_init(PCMachineState *pcms, +> +> > memory_region_init_io(&fw->mr, OBJECT(machine), +> +> > &cfmws_ops, fw, +> +> > "cxl-fixed-memory-region", +> +> > fw->size); +> +> > memory_region_add_subregion(system_memory, fw->base, +> +> > &fw->mr); +> +> > - e820_add_entry(fw->base, fw->size, E820_RESERVED); +> +> > cxl_fmw_base += fw->size; +> +> > cxl_resv_end = cxl_fmw_base; +> +> > } +> +> > -- +> +> > 2.37.3 +> +> > + +> +>> > diff --git a/hw/i386/pc.c b/hw/i386/pc.c +> +>> > index 566accf7e6..5bf5465a21 100644 +> +>> > --- a/hw/i386/pc.c +> +>> > +++ b/hw/i386/pc.c +> +>> > @@ -1061,7 +1061,6 @@ void pc_memory_init(PCMachineState *pcms, +> +>> > hwaddr cxl_size = MiB; +> +>> > +> +>> > cxl_base = pc_get_cxl_range_start(pcms); +> +>> > - e820_add_entry(cxl_base, cxl_size, E820_RESERVED); +Just dropping it doesn't look like a good plan to me. + +You can try set etc/reserved-memory-end fw_cfg file instead. Firmware +(both seabios and ovmf) read it and will make sure the 64bit pci mmio +window is placed above that address, i.e. this effectively reserves +address space. Right now used by memory hotplug code, but should work +for cxl too I think (disclaimer: don't know much about cxl ...). + +take care & HTH, + Gerd + +On Tue, 8 Nov 2022 12:21:11 +0100 +Gerd Hoffmann wrote: + +> +> >> > diff --git a/hw/i386/pc.c b/hw/i386/pc.c +> +> >> > index 566accf7e6..5bf5465a21 100644 +> +> >> > --- a/hw/i386/pc.c +> +> >> > +++ b/hw/i386/pc.c +> +> >> > @@ -1061,7 +1061,6 @@ void pc_memory_init(PCMachineState *pcms, +> +> >> > hwaddr cxl_size = MiB; +> +> >> > +> +> >> > cxl_base = pc_get_cxl_range_start(pcms); +> +> >> > - e820_add_entry(cxl_base, cxl_size, E820_RESERVED); +> +> +Just dropping it doesn't look like a good plan to me. +> +> +You can try set etc/reserved-memory-end fw_cfg file instead. Firmware +> +(both seabios and ovmf) read it and will make sure the 64bit pci mmio +> +window is placed above that address, i.e. this effectively reserves +> +address space. Right now used by memory hotplug code, but should work +> +for cxl too I think (disclaimer: don't know much about cxl ...). +As far as I know CXL impl. in QEMU isn't using etc/reserved-memory-end +at all, it' has its own mapping. + +Regardless of that, reserved E820 entries look wrong, and looking at +commit message OS is right to bailout on them (expected according +to ACPI spec). +Also spec says + +" +E820 Assumptions and Limitations + [...] + The platform boot firmware does not return a range description for the memory +mapping of + PCI devices, ISA Option ROMs, and ISA Plug and Play cards because the OS has +mechanisms + available to detect them. +" + +so dropping reserved entries looks reasonable from ACPI spec point of view. +(disclaimer: don't know much about cxl ... either) +> +> +take care & HTH, +> +Gerd +> + +On Fri, Nov 11, 2022 at 11:51:23AM +0100, Igor Mammedov wrote: +> +On Tue, 8 Nov 2022 12:21:11 +0100 +> +Gerd Hoffmann wrote: +> +> +> > >> > diff --git a/hw/i386/pc.c b/hw/i386/pc.c +> +> > >> > index 566accf7e6..5bf5465a21 100644 +> +> > >> > --- a/hw/i386/pc.c +> +> > >> > +++ b/hw/i386/pc.c +> +> > >> > @@ -1061,7 +1061,6 @@ void pc_memory_init(PCMachineState *pcms, +> +> > >> > hwaddr cxl_size = MiB; +> +> > >> > +> +> > >> > cxl_base = pc_get_cxl_range_start(pcms); +> +> > >> > - e820_add_entry(cxl_base, cxl_size, E820_RESERVED); +> +> +> +> Just dropping it doesn't look like a good plan to me. +> +> +> +> You can try set etc/reserved-memory-end fw_cfg file instead. Firmware +> +> (both seabios and ovmf) read it and will make sure the 64bit pci mmio +> +> window is placed above that address, i.e. this effectively reserves +> +> address space. Right now used by memory hotplug code, but should work +> +> for cxl too I think (disclaimer: don't know much about cxl ...). +> +> +As far as I know CXL impl. in QEMU isn't using etc/reserved-memory-end +> +at all, it' has its own mapping. +This should be changed. cxl should make sure the highest address used +is stored in etc/reserved-memory-end to avoid the firmware mapping pci +resources there. + +> +so dropping reserved entries looks reasonable from ACPI spec point of view. +Yep, I don't want dispute that. + +I suspect the reason for these entries to exist in the first place is to +inform the firmware that it should not place stuff there, and if we +remove that to conform with the spec we need some alternative way for +that ... + +take care, + Gerd + +On Fri, 11 Nov 2022 12:40:59 +0100 +Gerd Hoffmann wrote: + +> +On Fri, Nov 11, 2022 at 11:51:23AM +0100, Igor Mammedov wrote: +> +> On Tue, 8 Nov 2022 12:21:11 +0100 +> +> Gerd Hoffmann wrote: +> +> +> +> > > >> > diff --git a/hw/i386/pc.c b/hw/i386/pc.c +> +> > > >> > index 566accf7e6..5bf5465a21 100644 +> +> > > >> > --- a/hw/i386/pc.c +> +> > > >> > +++ b/hw/i386/pc.c +> +> > > >> > @@ -1061,7 +1061,6 @@ void pc_memory_init(PCMachineState *pcms, +> +> > > >> > hwaddr cxl_size = MiB; +> +> > > >> > +> +> > > >> > cxl_base = pc_get_cxl_range_start(pcms); +> +> > > >> > - e820_add_entry(cxl_base, cxl_size, E820_RESERVED); +> +> > +> +> > Just dropping it doesn't look like a good plan to me. +> +> > +> +> > You can try set etc/reserved-memory-end fw_cfg file instead. Firmware +> +> > (both seabios and ovmf) read it and will make sure the 64bit pci mmio +> +> > window is placed above that address, i.e. this effectively reserves +> +> > address space. Right now used by memory hotplug code, but should work +> +> > for cxl too I think (disclaimer: don't know much about cxl ...). +> +> +> +> As far as I know CXL impl. in QEMU isn't using etc/reserved-memory-end +> +> at all, it' has its own mapping. +> +> +This should be changed. cxl should make sure the highest address used +> +is stored in etc/reserved-memory-end to avoid the firmware mapping pci +> +resources there. +if (pcmc->has_reserved_memory && machine->device_memory->base) { + +[...] + + if (pcms->cxl_devices_state.is_enabled) { + + res_mem_end = cxl_resv_end; + +that should be handled by this line + + } + + *val = cpu_to_le64(ROUND_UP(res_mem_end, 1 * GiB)); + + fw_cfg_add_file(fw_cfg, "etc/reserved-memory-end", val, sizeof(*val)); + + } + +so SeaBIOS shouldn't intrude into CXL address space +(I assume EDK2 behave similarly here) + +> +> so dropping reserved entries looks reasonable from ACPI spec point of view. +> +> +> +> +Yep, I don't want dispute that. +> +> +I suspect the reason for these entries to exist in the first place is to +> +inform the firmware that it should not place stuff there, and if we +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +just to educate me, can you point out what SeaBIOS code does with reservations. + +> +remove that to conform with the spec we need some alternative way for +> +that ... +with etc/reserved-memory-end set as above, +is E820_RESERVED really needed here? + +(my understanding was that E820_RESERVED weren't accounted for when +initializing PCI devices) + +> +> +take care, +> +Gerd +> + +> +if (pcmc->has_reserved_memory && machine->device_memory->base) { +> +> +[...] +> +> +if (pcms->cxl_devices_state.is_enabled) { +> +> +res_mem_end = cxl_resv_end; +> +> +that should be handled by this line +> +> +} +> +> +*val = cpu_to_le64(ROUND_UP(res_mem_end, 1 * GiB)); +> +> +fw_cfg_add_file(fw_cfg, "etc/reserved-memory-end", val, +> +sizeof(*val)); +> +} +> +> +so SeaBIOS shouldn't intrude into CXL address space +Yes, looks good, so with this in place already everyting should be fine. + +> +(I assume EDK2 behave similarly here) +Correct, ovmf reads that fw_cfg file too. + +> +> I suspect the reason for these entries to exist in the first place is to +> +> inform the firmware that it should not place stuff there, and if we +> +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +> +just to educate me, can you point out what SeaBIOS code does with +> +reservations. +They are added to the e820 map which gets passed on to the OS. seabios +uses (and updateas) the e820 map too, when allocating memory for +example. While thinking about it I'm not fully sure it actually looks +at reservations, maybe it only uses (and updates) ram entries when +allocating memory. + +> +> remove that to conform with the spec we need some alternative way for +> +> that ... +> +> +with etc/reserved-memory-end set as above, +> +is E820_RESERVED really needed here? +No. Setting etc/reserved-memory-end is enough. + +So for the original patch: +Acked-by: Gerd Hoffmann + +take care, + Gerd + +On Fri, Nov 11, 2022 at 02:36:02PM +0100, Gerd Hoffmann wrote: +> +> if (pcmc->has_reserved_memory && machine->device_memory->base) { +> +> +> +> [...] +> +> +> +> if (pcms->cxl_devices_state.is_enabled) { +> +> +> +> res_mem_end = cxl_resv_end; +> +> +> +> that should be handled by this line +> +> +> +> } +> +> +> +> *val = cpu_to_le64(ROUND_UP(res_mem_end, 1 * GiB)); +> +> +> +> fw_cfg_add_file(fw_cfg, "etc/reserved-memory-end", val, +> +> sizeof(*val)); +> +> } +> +> +> +> so SeaBIOS shouldn't intrude into CXL address space +> +> +Yes, looks good, so with this in place already everyting should be fine. +> +> +> (I assume EDK2 behave similarly here) +> +> +Correct, ovmf reads that fw_cfg file too. +> +> +> > I suspect the reason for these entries to exist in the first place is to +> +> > inform the firmware that it should not place stuff there, and if we +> +> ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +> +> just to educate me, can you point out what SeaBIOS code does with +> +> reservations. +> +> +They are added to the e820 map which gets passed on to the OS. seabios +> +uses (and updateas) the e820 map too, when allocating memory for +> +example. While thinking about it I'm not fully sure it actually looks +> +at reservations, maybe it only uses (and updates) ram entries when +> +allocating memory. +> +> +> > remove that to conform with the spec we need some alternative way for +> +> > that ... +> +> +> +> with etc/reserved-memory-end set as above, +> +> is E820_RESERVED really needed here? +> +> +No. Setting etc/reserved-memory-end is enough. +> +> +So for the original patch: +> +Acked-by: Gerd Hoffmann +> +> +take care, +> +Gerd +It's upstream already, sorry I can't add your tag. + +-- +MST + diff --git a/results/classifier/001/instruction/24190340 b/results/classifier/001/instruction/24190340 new file mode 100644 index 000000000..784962c9c --- /dev/null +++ b/results/classifier/001/instruction/24190340 @@ -0,0 +1,2056 @@ +instruction: 0.818 +other: 0.811 +semantic: 0.793 +mistranslation: 0.758 + +[BUG, RFC] Block graph deadlock on job-dismiss + +Hi all, + +There's a bug in block layer which leads to block graph deadlock. +Notably, it takes place when blockdev IO is processed within a separate +iothread. + +This was initially caught by our tests, and I was able to reduce it to a +relatively simple reproducer. Such deadlocks are probably supposed to +be covered in iotests/graph-changes-while-io, but this deadlock isn't. + +Basically what the reproducer does is launches QEMU with a drive having +'iothread' option set, creates a chain of 2 snapshots, launches +block-commit job for a snapshot and then dismisses the job, starting +from the lower snapshot. If the guest is issuing IO at the same time, +there's a race in acquiring block graph lock and a potential deadlock. + +Here's how it can be reproduced: + +1. Run QEMU: +> +SRCDIR=/path/to/srcdir +> +> +> +> +> +$SRCDIR/build/qemu-system-x86_64 -enable-kvm \ +> +> +-machine q35 -cpu Nehalem \ +> +> +-name guest=alma8-vm,debug-threads=on \ +> +> +-m 2g -smp 2 \ +> +> +-nographic -nodefaults \ +> +> +-qmp unix:/var/run/alma8-qmp.sock,server=on,wait=off \ +> +> +-serial unix:/var/run/alma8-serial.sock,server=on,wait=off \ +> +> +-object iothread,id=iothread0 \ +> +> +-blockdev +> +node-name=disk,driver=qcow2,file.driver=file,file.filename=/path/to/img/alma8.qcow2 +> +\ +> +-device virtio-blk-pci,drive=disk,iothread=iothread0 +2. Launch IO (random reads) from within the guest: +> +nc -U /var/run/alma8-serial.sock +> +... +> +[root@alma8-vm ~]# fio --name=randread --ioengine=libaio --direct=1 --bs=4k +> +--size=1G --numjobs=1 --time_based=1 --runtime=300 --group_reporting +> +--rw=randread --iodepth=1 --filename=/testfile +3. Run snapshots creation & removal of lower snapshot operation in a +loop (script attached): +> +while /bin/true ; do ./remove_lower_snap.sh ; done +And then it occasionally hangs. + +Note: I've tried bisecting this, and looks like deadlock occurs starting +from the following commit: + +(BAD) 5bdbaebcce virtio: Re-enable notifications after drain +(GOOD) c42c3833e0 virtio-scsi: Attach event vq notifier with no_poll + +On the latest v10.0.0 it does hang as well. + + +Here's backtrace of the main thread: + +> +#0 0x00007fc547d427ce in __ppoll (fds=0x557eb79657b0, nfds=1, +> +timeout=, sigmask=0x0) at ../sysdeps/unix/sysv/linux/ppoll.c:43 +> +#1 0x0000557eb47d955c in qemu_poll_ns (fds=0x557eb79657b0, nfds=1, +> +timeout=-1) at ../util/qemu-timer.c:329 +> +#2 0x0000557eb47b2204 in fdmon_poll_wait (ctx=0x557eb76c5f20, +> +ready_list=0x7ffd94b4edd8, timeout=-1) at ../util/fdmon-poll.c:79 +> +#3 0x0000557eb47b1c45 in aio_poll (ctx=0x557eb76c5f20, blocking=true) at +> +../util/aio-posix.c:730 +> +#4 0x0000557eb4621edd in bdrv_do_drained_begin (bs=0x557eb795e950, +> +parent=0x0, poll=true) at ../block/io.c:378 +> +#5 0x0000557eb4621f7b in bdrv_drained_begin (bs=0x557eb795e950) at +> +../block/io.c:391 +> +#6 0x0000557eb45ec125 in bdrv_change_aio_context (bs=0x557eb795e950, +> +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +errp=0x0) +> +at ../block.c:7682 +> +#7 0x0000557eb45ebf2b in bdrv_child_change_aio_context (c=0x557eb7964250, +> +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +errp=0x0) +> +at ../block.c:7608 +> +#8 0x0000557eb45ec0c4 in bdrv_change_aio_context (bs=0x557eb79575e0, +> +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +errp=0x0) +> +at ../block.c:7668 +> +#9 0x0000557eb45ebf2b in bdrv_child_change_aio_context (c=0x557eb7e59110, +> +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +errp=0x0) +> +at ../block.c:7608 +> +#10 0x0000557eb45ec0c4 in bdrv_change_aio_context (bs=0x557eb7e51960, +> +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +errp=0x0) +> +at ../block.c:7668 +> +#11 0x0000557eb45ebf2b in bdrv_child_change_aio_context (c=0x557eb814ed80, +> +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +errp=0x0) +> +at ../block.c:7608 +> +#12 0x0000557eb45ee8e4 in child_job_change_aio_ctx (c=0x557eb7c9d3f0, +> +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +errp=0x0) +> +at ../blockjob.c:157 +> +#13 0x0000557eb45ebe2d in bdrv_parent_change_aio_context (c=0x557eb7c9d3f0, +> +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +errp=0x0) +> +at ../block.c:7592 +> +#14 0x0000557eb45ec06b in bdrv_change_aio_context (bs=0x557eb7d74310, +> +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +errp=0x0) +> +at ../block.c:7661 +> +#15 0x0000557eb45dcd7e in bdrv_child_cb_change_aio_ctx +> +(child=0x557eb8565af0, ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = +> +{...}, tran=0x557eb7a87160, errp=0x0) at ../block.c:1234 +> +#16 0x0000557eb45ebe2d in bdrv_parent_change_aio_context (c=0x557eb8565af0, +> +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +errp=0x0) +> +at ../block.c:7592 +> +#17 0x0000557eb45ec06b in bdrv_change_aio_context (bs=0x557eb79575e0, +> +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +errp=0x0) +> +at ../block.c:7661 +> +#18 0x0000557eb45ec1f3 in bdrv_try_change_aio_context (bs=0x557eb79575e0, +> +ctx=0x557eb76c5f20, ignore_child=0x0, errp=0x0) at ../block.c:7715 +> +#19 0x0000557eb45e1b15 in bdrv_root_unref_child (child=0x557eb7966f30) at +> +../block.c:3317 +> +#20 0x0000557eb45eeaa8 in block_job_remove_all_bdrv (job=0x557eb7952800) at +> +../blockjob.c:209 +> +#21 0x0000557eb45ee641 in block_job_free (job=0x557eb7952800) at +> +../blockjob.c:82 +> +#22 0x0000557eb45f17af in job_unref_locked (job=0x557eb7952800) at +> +../job.c:474 +> +#23 0x0000557eb45f257d in job_do_dismiss_locked (job=0x557eb7952800) at +> +../job.c:771 +> +#24 0x0000557eb45f25fe in job_dismiss_locked (jobptr=0x7ffd94b4f400, +> +errp=0x7ffd94b4f488) at ../job.c:783 +> +--Type for more, q to quit, c to continue without paging-- +> +#25 0x0000557eb45d8e84 in qmp_job_dismiss (id=0x557eb7aa42b0 "commit-snap1", +> +errp=0x7ffd94b4f488) at ../job-qmp.c:138 +> +#26 0x0000557eb472f6a3 in qmp_marshal_job_dismiss (args=0x7fc52c00a3b0, +> +ret=0x7fc53c880da8, errp=0x7fc53c880da0) at qapi/qapi-commands-job.c:221 +> +#27 0x0000557eb47a35f3 in do_qmp_dispatch_bh (opaque=0x7fc53c880e40) at +> +../qapi/qmp-dispatch.c:128 +> +#28 0x0000557eb47d1cd2 in aio_bh_call (bh=0x557eb79568f0) at +> +../util/async.c:172 +> +#29 0x0000557eb47d1df5 in aio_bh_poll (ctx=0x557eb76c0200) at +> +../util/async.c:219 +> +#30 0x0000557eb47b12f3 in aio_dispatch (ctx=0x557eb76c0200) at +> +../util/aio-posix.c:436 +> +#31 0x0000557eb47d2266 in aio_ctx_dispatch (source=0x557eb76c0200, +> +callback=0x0, user_data=0x0) at ../util/async.c:361 +> +#32 0x00007fc549232f4f in g_main_dispatch (context=0x557eb76c6430) at +> +../glib/gmain.c:3364 +> +#33 g_main_context_dispatch (context=0x557eb76c6430) at ../glib/gmain.c:4079 +> +#34 0x0000557eb47d3ab1 in glib_pollfds_poll () at ../util/main-loop.c:287 +> +#35 0x0000557eb47d3b38 in os_host_main_loop_wait (timeout=0) at +> +../util/main-loop.c:310 +> +#36 0x0000557eb47d3c58 in main_loop_wait (nonblocking=0) at +> +../util/main-loop.c:589 +> +#37 0x0000557eb4218b01 in qemu_main_loop () at ../system/runstate.c:835 +> +#38 0x0000557eb46df166 in qemu_default_main (opaque=0x0) at +> +../system/main.c:50 +> +#39 0x0000557eb46df215 in main (argc=24, argv=0x7ffd94b4f8d8) at +> +../system/main.c:80 +And here's coroutine trying to acquire read lock: + +> +(gdb) qemu coroutine reader_queue->entries.sqh_first +> +#0 0x0000557eb47d7068 in qemu_coroutine_switch (from_=0x557eb7aa48b0, +> +to_=0x7fc537fff508, action=COROUTINE_YIELD) at +> +../util/coroutine-ucontext.c:321 +> +#1 0x0000557eb47d4d4a in qemu_coroutine_yield () at +> +../util/qemu-coroutine.c:339 +> +#2 0x0000557eb47d56c8 in qemu_co_queue_wait_impl (queue=0x557eb59954c0 +> +, lock=0x7fc53c57de50, flags=0) at +> +../util/qemu-coroutine-lock.c:60 +> +#3 0x0000557eb461fea7 in bdrv_graph_co_rdlock () at ../block/graph-lock.c:231 +> +#4 0x0000557eb460c81a in graph_lockable_auto_lock (x=0x7fc53c57dee3) at +> +/home/root/src/qemu/master/include/block/graph-lock.h:213 +> +#5 0x0000557eb460fa41 in blk_co_do_preadv_part +> +(blk=0x557eb84c0810, offset=6890553344, bytes=4096, qiov=0x7fc530006988, +> +qiov_offset=0, flags=BDRV_REQ_REGISTERED_BUF) at ../block/block-backend.c:1339 +> +#6 0x0000557eb46104d7 in blk_aio_read_entry (opaque=0x7fc530003240) at +> +../block/block-backend.c:1619 +> +#7 0x0000557eb47d6c40 in coroutine_trampoline (i0=-1213577040, i1=21886) at +> +../util/coroutine-ucontext.c:175 +> +#8 0x00007fc547c2a360 in __start_context () at +> +../sysdeps/unix/sysv/linux/x86_64/__start_context.S:91 +> +#9 0x00007ffd94b4ea40 in () +> +#10 0x0000000000000000 in () +So it looks like main thread is processing job-dismiss request and is +holding write lock taken in block_job_remove_all_bdrv() (frame #20 +above). At the same time iothread spawns a coroutine which performs IO +request. Before the coroutine is spawned, blk_aio_prwv() increases +'in_flight' counter for Blk. Then blk_co_do_preadv_part() (frame #5) is +trying to acquire the read lock. But main thread isn't releasing the +lock as blk_root_drained_poll() returns true since blk->in_flight > 0. +Here's the deadlock. + +Any comments and suggestions on the subject are welcomed. Thanks! + +Andrey +remove_lower_snap.sh +Description: +application/shellscript + +On 4/24/25 8:32 PM, Andrey Drobyshev wrote: +> +Hi all, +> +> +There's a bug in block layer which leads to block graph deadlock. +> +Notably, it takes place when blockdev IO is processed within a separate +> +iothread. +> +> +This was initially caught by our tests, and I was able to reduce it to a +> +relatively simple reproducer. Such deadlocks are probably supposed to +> +be covered in iotests/graph-changes-while-io, but this deadlock isn't. +> +> +Basically what the reproducer does is launches QEMU with a drive having +> +'iothread' option set, creates a chain of 2 snapshots, launches +> +block-commit job for a snapshot and then dismisses the job, starting +> +from the lower snapshot. If the guest is issuing IO at the same time, +> +there's a race in acquiring block graph lock and a potential deadlock. +> +> +Here's how it can be reproduced: +> +> +[...] +> +I took a closer look at iotests/graph-changes-while-io, and have managed +to reproduce the same deadlock in a much simpler setup, without a guest. + +1. Run QSD:> ./build/storage-daemon/qemu-storage-daemon --object +iothread,id=iothread0 \ +> +--blockdev null-co,node-name=node0,read-zeroes=true \ +> +> +--nbd-server addr.type=unix,addr.path=/var/run/qsd_nbd.sock \ +> +> +--export +> +nbd,id=exp0,node-name=node0,iothread=iothread0,fixed-iothread=true,writable=true +> +\ +> +--chardev +> +socket,id=qmp-sock,path=/var/run/qsd_qmp.sock,server=on,wait=off \ +> +--monitor chardev=qmp-sock +2. Launch IO: +> +qemu-img bench -f raw -c 2000000 +> +'nbd+unix:///node0?socket=/var/run/qsd_nbd.sock' +3. Add 2 snapshots and remove lower one (script attached):> while +/bin/true ; do ./rls_qsd.sh ; done + +And then it hangs. + +I'll also send a patch with corresponding test case added directly to +iotests. + +This reproduce seems to be hanging starting from Fiona's commit +67446e605dc ("blockjob: drop AioContext lock before calling +bdrv_graph_wrlock()"). AioContext locks were dropped entirely later on +in Stefan's commit b49f4755c7 ("block: remove AioContext locking"), but +the problem remains. + +Andrey +rls_qsd.sh +Description: +application/shellscript + +From: Andrey Drobyshev + +This case is catching potential deadlock which takes place when job-dismiss +is issued when I/O requests are processed in a separate iothread. + +See +https://mail.gnu.org/archive/html/qemu-devel/2025-04/msg04421.html +Signed-off-by: Andrey Drobyshev +--- + .../qemu-iotests/tests/graph-changes-while-io | 101 ++++++++++++++++-- + .../tests/graph-changes-while-io.out | 4 +- + 2 files changed, 96 insertions(+), 9 deletions(-) + +diff --git a/tests/qemu-iotests/tests/graph-changes-while-io +b/tests/qemu-iotests/tests/graph-changes-while-io +index 194fda500e..e30f823da4 100755 +--- a/tests/qemu-iotests/tests/graph-changes-while-io ++++ b/tests/qemu-iotests/tests/graph-changes-while-io +@@ -27,6 +27,8 @@ from iotests import imgfmt, qemu_img, qemu_img_create, +qemu_io, \ + + + top = os.path.join(iotests.test_dir, 'top.img') ++snap1 = os.path.join(iotests.test_dir, 'snap1.img') ++snap2 = os.path.join(iotests.test_dir, 'snap2.img') + nbd_sock = os.path.join(iotests.sock_dir, 'nbd.sock') + + +@@ -58,6 +60,15 @@ class TestGraphChangesWhileIO(QMPTestCase): + def tearDown(self) -> None: + self.qsd.stop() + ++ def _wait_for_blockjob(self, status) -> None: ++ done = False ++ while not done: ++ for event in self.qsd.get_qmp().get_events(wait=10.0): ++ if event['event'] != 'JOB_STATUS_CHANGE': ++ continue ++ if event['data']['status'] == status: ++ done = True ++ + def test_blockdev_add_while_io(self) -> None: + # Run qemu-img bench in the background + bench_thr = Thread(target=do_qemu_img_bench) +@@ -116,13 +127,89 @@ class TestGraphChangesWhileIO(QMPTestCase): + 'device': 'job0', + }) + +- cancelled = False +- while not cancelled: +- for event in self.qsd.get_qmp().get_events(wait=10.0): +- if event['event'] != 'JOB_STATUS_CHANGE': +- continue +- if event['data']['status'] == 'null': +- cancelled = True ++ self._wait_for_blockjob('null') ++ ++ bench_thr.join() ++ ++ def test_remove_lower_snapshot_while_io(self) -> None: ++ # Run qemu-img bench in the background ++ bench_thr = Thread(target=do_qemu_img_bench, args=(100000, )) ++ bench_thr.start() ++ ++ # While I/O is performed on 'node0' node, consequently add 2 snapshots ++ # on top of it, then remove (commit) them starting from lower one. ++ while bench_thr.is_alive(): ++ # Recreate snapshot images on every iteration ++ qemu_img_create('-f', imgfmt, snap1, '1G') ++ qemu_img_create('-f', imgfmt, snap2, '1G') ++ ++ self.qsd.cmd('blockdev-add', { ++ 'driver': imgfmt, ++ 'node-name': 'snap1', ++ 'file': { ++ 'driver': 'file', ++ 'filename': snap1 ++ } ++ }) ++ ++ self.qsd.cmd('blockdev-snapshot', { ++ 'node': 'node0', ++ 'overlay': 'snap1', ++ }) ++ ++ self.qsd.cmd('blockdev-add', { ++ 'driver': imgfmt, ++ 'node-name': 'snap2', ++ 'file': { ++ 'driver': 'file', ++ 'filename': snap2 ++ } ++ }) ++ ++ self.qsd.cmd('blockdev-snapshot', { ++ 'node': 'snap1', ++ 'overlay': 'snap2', ++ }) ++ ++ self.qsd.cmd('block-commit', { ++ 'job-id': 'commit-snap1', ++ 'device': 'snap2', ++ 'top-node': 'snap1', ++ 'base-node': 'node0', ++ 'auto-finalize': True, ++ 'auto-dismiss': False, ++ }) ++ ++ self._wait_for_blockjob('concluded') ++ self.qsd.cmd('job-dismiss', { ++ 'id': 'commit-snap1', ++ }) ++ ++ self.qsd.cmd('block-commit', { ++ 'job-id': 'commit-snap2', ++ 'device': 'snap2', ++ 'top-node': 'snap2', ++ 'base-node': 'node0', ++ 'auto-finalize': True, ++ 'auto-dismiss': False, ++ }) ++ ++ self._wait_for_blockjob('ready') ++ self.qsd.cmd('job-complete', { ++ 'id': 'commit-snap2', ++ }) ++ ++ self._wait_for_blockjob('concluded') ++ self.qsd.cmd('job-dismiss', { ++ 'id': 'commit-snap2', ++ }) ++ ++ self.qsd.cmd('blockdev-del', { ++ 'node-name': 'snap1' ++ }) ++ self.qsd.cmd('blockdev-del', { ++ 'node-name': 'snap2' ++ }) + + bench_thr.join() + +diff --git a/tests/qemu-iotests/tests/graph-changes-while-io.out +b/tests/qemu-iotests/tests/graph-changes-while-io.out +index fbc63e62f8..8d7e996700 100644 +--- a/tests/qemu-iotests/tests/graph-changes-while-io.out ++++ b/tests/qemu-iotests/tests/graph-changes-while-io.out +@@ -1,5 +1,5 @@ +-.. ++... + ---------------------------------------------------------------------- +-Ran 2 tests ++Ran 3 tests + + OK +-- +2.43.5 + +Am 24.04.25 um 19:32 schrieb Andrey Drobyshev: +> +So it looks like main thread is processing job-dismiss request and is +> +holding write lock taken in block_job_remove_all_bdrv() (frame #20 +> +above). At the same time iothread spawns a coroutine which performs IO +> +request. Before the coroutine is spawned, blk_aio_prwv() increases +> +'in_flight' counter for Blk. Then blk_co_do_preadv_part() (frame #5) is +> +trying to acquire the read lock. But main thread isn't releasing the +> +lock as blk_root_drained_poll() returns true since blk->in_flight > 0. +> +Here's the deadlock. +And for the IO test you provided, it's client->nb_requests that behaves +similarly to blk->in_flight here. + +The issue also reproduces easily when issuing the following QMP command +in a loop while doing IO on a device: + +> +void qmp_block_locked_drain(const char *node_name, Error **errp) +> +{ +> +BlockDriverState *bs; +> +> +bs = bdrv_find_node(node_name); +> +if (!bs) { +> +error_setg(errp, "node not found"); +> +return; +> +} +> +> +bdrv_graph_wrlock(); +> +bdrv_drained_begin(bs); +> +bdrv_drained_end(bs); +> +bdrv_graph_wrunlock(); +> +} +It seems like either it would be necessary to require: +1. not draining inside an exclusively locked section +or +2. making sure that variables used by drained_poll routines are only set +while holding the reader lock +? + +Those seem to require rather involved changes, so a third option might +be to make draining inside an exclusively locked section possible, by +embedding such locked sections in a drained section: + +> +diff --git a/blockjob.c b/blockjob.c +> +index 32007f31a9..9b2f3b3ea9 100644 +> +--- a/blockjob.c +> ++++ b/blockjob.c +> +@@ -198,6 +198,7 @@ void block_job_remove_all_bdrv(BlockJob *job) +> +* one to make sure that such a concurrent access does not attempt +> +* to process an already freed BdrvChild. +> +*/ +> ++ bdrv_drain_all_begin(); +> +bdrv_graph_wrlock(); +> +while (job->nodes) { +> +GSList *l = job->nodes; +> +@@ -211,6 +212,7 @@ void block_job_remove_all_bdrv(BlockJob *job) +> +g_slist_free_1(l); +> +} +> +bdrv_graph_wrunlock(); +> ++ bdrv_drain_all_end(); +> +} +> +> +bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs) +This seems to fix the issue at hand. I can send a patch if this is +considered an acceptable approach. + +Best Regards, +Fiona + +On 4/30/25 11:47 AM, Fiona Ebner wrote: +> +Am 24.04.25 um 19:32 schrieb Andrey Drobyshev: +> +> So it looks like main thread is processing job-dismiss request and is +> +> holding write lock taken in block_job_remove_all_bdrv() (frame #20 +> +> above). At the same time iothread spawns a coroutine which performs IO +> +> request. Before the coroutine is spawned, blk_aio_prwv() increases +> +> 'in_flight' counter for Blk. Then blk_co_do_preadv_part() (frame #5) is +> +> trying to acquire the read lock. But main thread isn't releasing the +> +> lock as blk_root_drained_poll() returns true since blk->in_flight > 0. +> +> Here's the deadlock. +> +> +And for the IO test you provided, it's client->nb_requests that behaves +> +similarly to blk->in_flight here. +> +> +The issue also reproduces easily when issuing the following QMP command +> +in a loop while doing IO on a device: +> +> +> void qmp_block_locked_drain(const char *node_name, Error **errp) +> +> { +> +> BlockDriverState *bs; +> +> +> +> bs = bdrv_find_node(node_name); +> +> if (!bs) { +> +> error_setg(errp, "node not found"); +> +> return; +> +> } +> +> +> +> bdrv_graph_wrlock(); +> +> bdrv_drained_begin(bs); +> +> bdrv_drained_end(bs); +> +> bdrv_graph_wrunlock(); +> +> } +> +> +It seems like either it would be necessary to require: +> +1. not draining inside an exclusively locked section +> +or +> +2. making sure that variables used by drained_poll routines are only set +> +while holding the reader lock +> +? +> +> +Those seem to require rather involved changes, so a third option might +> +be to make draining inside an exclusively locked section possible, by +> +embedding such locked sections in a drained section: +> +> +> diff --git a/blockjob.c b/blockjob.c +> +> index 32007f31a9..9b2f3b3ea9 100644 +> +> --- a/blockjob.c +> +> +++ b/blockjob.c +> +> @@ -198,6 +198,7 @@ void block_job_remove_all_bdrv(BlockJob *job) +> +> * one to make sure that such a concurrent access does not attempt +> +> * to process an already freed BdrvChild. +> +> */ +> +> + bdrv_drain_all_begin(); +> +> bdrv_graph_wrlock(); +> +> while (job->nodes) { +> +> GSList *l = job->nodes; +> +> @@ -211,6 +212,7 @@ void block_job_remove_all_bdrv(BlockJob *job) +> +> g_slist_free_1(l); +> +> } +> +> bdrv_graph_wrunlock(); +> +> + bdrv_drain_all_end(); +> +> } +> +> +> +> bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs) +> +> +This seems to fix the issue at hand. I can send a patch if this is +> +considered an acceptable approach. +> +> +Best Regards, +> +Fiona +> +Hello Fiona, + +Thanks for looking into it. I've tried your 3rd option above and can +confirm it does fix the deadlock, at least I can't reproduce it. Other +iotests also don't seem to be breaking. So I personally am fine with +that patch. Would be nice to hear a word from the maintainers though on +whether there're any caveats with such approach. + +Andrey + +On Wed, Apr 30, 2025 at 10:11 AM Andrey Drobyshev + wrote: +> +> +On 4/30/25 11:47 AM, Fiona Ebner wrote: +> +> Am 24.04.25 um 19:32 schrieb Andrey Drobyshev: +> +>> So it looks like main thread is processing job-dismiss request and is +> +>> holding write lock taken in block_job_remove_all_bdrv() (frame #20 +> +>> above). At the same time iothread spawns a coroutine which performs IO +> +>> request. Before the coroutine is spawned, blk_aio_prwv() increases +> +>> 'in_flight' counter for Blk. Then blk_co_do_preadv_part() (frame #5) is +> +>> trying to acquire the read lock. But main thread isn't releasing the +> +>> lock as blk_root_drained_poll() returns true since blk->in_flight > 0. +> +>> Here's the deadlock. +> +> +> +> And for the IO test you provided, it's client->nb_requests that behaves +> +> similarly to blk->in_flight here. +> +> +> +> The issue also reproduces easily when issuing the following QMP command +> +> in a loop while doing IO on a device: +> +> +> +>> void qmp_block_locked_drain(const char *node_name, Error **errp) +> +>> { +> +>> BlockDriverState *bs; +> +>> +> +>> bs = bdrv_find_node(node_name); +> +>> if (!bs) { +> +>> error_setg(errp, "node not found"); +> +>> return; +> +>> } +> +>> +> +>> bdrv_graph_wrlock(); +> +>> bdrv_drained_begin(bs); +> +>> bdrv_drained_end(bs); +> +>> bdrv_graph_wrunlock(); +> +>> } +> +> +> +> It seems like either it would be necessary to require: +> +> 1. not draining inside an exclusively locked section +> +> or +> +> 2. making sure that variables used by drained_poll routines are only set +> +> while holding the reader lock +> +> ? +> +> +> +> Those seem to require rather involved changes, so a third option might +> +> be to make draining inside an exclusively locked section possible, by +> +> embedding such locked sections in a drained section: +> +> +> +>> diff --git a/blockjob.c b/blockjob.c +> +>> index 32007f31a9..9b2f3b3ea9 100644 +> +>> --- a/blockjob.c +> +>> +++ b/blockjob.c +> +>> @@ -198,6 +198,7 @@ void block_job_remove_all_bdrv(BlockJob *job) +> +>> * one to make sure that such a concurrent access does not attempt +> +>> * to process an already freed BdrvChild. +> +>> */ +> +>> + bdrv_drain_all_begin(); +> +>> bdrv_graph_wrlock(); +> +>> while (job->nodes) { +> +>> GSList *l = job->nodes; +> +>> @@ -211,6 +212,7 @@ void block_job_remove_all_bdrv(BlockJob *job) +> +>> g_slist_free_1(l); +> +>> } +> +>> bdrv_graph_wrunlock(); +> +>> + bdrv_drain_all_end(); +> +>> } +> +>> +> +>> bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs) +> +> +> +> This seems to fix the issue at hand. I can send a patch if this is +> +> considered an acceptable approach. +Kevin is aware of this thread but it's a public holiday tomorrow so it +may be a little longer. + +Stefan + +Am 24.04.2025 um 19:32 hat Andrey Drobyshev geschrieben: +> +Hi all, +> +> +There's a bug in block layer which leads to block graph deadlock. +> +Notably, it takes place when blockdev IO is processed within a separate +> +iothread. +> +> +This was initially caught by our tests, and I was able to reduce it to a +> +relatively simple reproducer. Such deadlocks are probably supposed to +> +be covered in iotests/graph-changes-while-io, but this deadlock isn't. +> +> +Basically what the reproducer does is launches QEMU with a drive having +> +'iothread' option set, creates a chain of 2 snapshots, launches +> +block-commit job for a snapshot and then dismisses the job, starting +> +from the lower snapshot. If the guest is issuing IO at the same time, +> +there's a race in acquiring block graph lock and a potential deadlock. +> +> +Here's how it can be reproduced: +> +> +1. Run QEMU: +> +> SRCDIR=/path/to/srcdir +> +> +> +> +> +> +> +> +> +> $SRCDIR/build/qemu-system-x86_64 -enable-kvm \ +> +> +> +> -machine q35 -cpu Nehalem \ +> +> +> +> -name guest=alma8-vm,debug-threads=on \ +> +> +> +> -m 2g -smp 2 \ +> +> +> +> -nographic -nodefaults \ +> +> +> +> -qmp unix:/var/run/alma8-qmp.sock,server=on,wait=off \ +> +> +> +> -serial unix:/var/run/alma8-serial.sock,server=on,wait=off \ +> +> +> +> -object iothread,id=iothread0 \ +> +> +> +> -blockdev +> +> node-name=disk,driver=qcow2,file.driver=file,file.filename=/path/to/img/alma8.qcow2 +> +> \ +> +> -device virtio-blk-pci,drive=disk,iothread=iothread0 +> +> +2. Launch IO (random reads) from within the guest: +> +> nc -U /var/run/alma8-serial.sock +> +> ... +> +> [root@alma8-vm ~]# fio --name=randread --ioengine=libaio --direct=1 --bs=4k +> +> --size=1G --numjobs=1 --time_based=1 --runtime=300 --group_reporting +> +> --rw=randread --iodepth=1 --filename=/testfile +> +> +3. Run snapshots creation & removal of lower snapshot operation in a +> +loop (script attached): +> +> while /bin/true ; do ./remove_lower_snap.sh ; done +> +> +And then it occasionally hangs. +> +> +Note: I've tried bisecting this, and looks like deadlock occurs starting +> +from the following commit: +> +> +(BAD) 5bdbaebcce virtio: Re-enable notifications after drain +> +(GOOD) c42c3833e0 virtio-scsi: Attach event vq notifier with no_poll +> +> +On the latest v10.0.0 it does hang as well. +> +> +> +Here's backtrace of the main thread: +> +> +> #0 0x00007fc547d427ce in __ppoll (fds=0x557eb79657b0, nfds=1, +> +> timeout=, sigmask=0x0) at +> +> ../sysdeps/unix/sysv/linux/ppoll.c:43 +> +> #1 0x0000557eb47d955c in qemu_poll_ns (fds=0x557eb79657b0, nfds=1, +> +> timeout=-1) at ../util/qemu-timer.c:329 +> +> #2 0x0000557eb47b2204 in fdmon_poll_wait (ctx=0x557eb76c5f20, +> +> ready_list=0x7ffd94b4edd8, timeout=-1) at ../util/fdmon-poll.c:79 +> +> #3 0x0000557eb47b1c45 in aio_poll (ctx=0x557eb76c5f20, blocking=true) at +> +> ../util/aio-posix.c:730 +> +> #4 0x0000557eb4621edd in bdrv_do_drained_begin (bs=0x557eb795e950, +> +> parent=0x0, poll=true) at ../block/io.c:378 +> +> #5 0x0000557eb4621f7b in bdrv_drained_begin (bs=0x557eb795e950) at +> +> ../block/io.c:391 +> +> #6 0x0000557eb45ec125 in bdrv_change_aio_context (bs=0x557eb795e950, +> +> ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +> errp=0x0) +> +> at ../block.c:7682 +> +> #7 0x0000557eb45ebf2b in bdrv_child_change_aio_context (c=0x557eb7964250, +> +> ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +> errp=0x0) +> +> at ../block.c:7608 +> +> #8 0x0000557eb45ec0c4 in bdrv_change_aio_context (bs=0x557eb79575e0, +> +> ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +> errp=0x0) +> +> at ../block.c:7668 +> +> #9 0x0000557eb45ebf2b in bdrv_child_change_aio_context (c=0x557eb7e59110, +> +> ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +> errp=0x0) +> +> at ../block.c:7608 +> +> #10 0x0000557eb45ec0c4 in bdrv_change_aio_context (bs=0x557eb7e51960, +> +> ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +> errp=0x0) +> +> at ../block.c:7668 +> +> #11 0x0000557eb45ebf2b in bdrv_child_change_aio_context (c=0x557eb814ed80, +> +> ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +> errp=0x0) +> +> at ../block.c:7608 +> +> #12 0x0000557eb45ee8e4 in child_job_change_aio_ctx (c=0x557eb7c9d3f0, +> +> ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +> errp=0x0) +> +> at ../blockjob.c:157 +> +> #13 0x0000557eb45ebe2d in bdrv_parent_change_aio_context (c=0x557eb7c9d3f0, +> +> ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +> errp=0x0) +> +> at ../block.c:7592 +> +> #14 0x0000557eb45ec06b in bdrv_change_aio_context (bs=0x557eb7d74310, +> +> ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +> errp=0x0) +> +> at ../block.c:7661 +> +> #15 0x0000557eb45dcd7e in bdrv_child_cb_change_aio_ctx +> +> (child=0x557eb8565af0, ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = +> +> {...}, tran=0x557eb7a87160, errp=0x0) at ../block.c:1234 +> +> #16 0x0000557eb45ebe2d in bdrv_parent_change_aio_context (c=0x557eb8565af0, +> +> ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +> errp=0x0) +> +> at ../block.c:7592 +> +> #17 0x0000557eb45ec06b in bdrv_change_aio_context (bs=0x557eb79575e0, +> +> ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +> +> errp=0x0) +> +> at ../block.c:7661 +> +> #18 0x0000557eb45ec1f3 in bdrv_try_change_aio_context (bs=0x557eb79575e0, +> +> ctx=0x557eb76c5f20, ignore_child=0x0, errp=0x0) at ../block.c:7715 +> +> #19 0x0000557eb45e1b15 in bdrv_root_unref_child (child=0x557eb7966f30) at +> +> ../block.c:3317 +> +> #20 0x0000557eb45eeaa8 in block_job_remove_all_bdrv (job=0x557eb7952800) at +> +> ../blockjob.c:209 +> +> #21 0x0000557eb45ee641 in block_job_free (job=0x557eb7952800) at +> +> ../blockjob.c:82 +> +> #22 0x0000557eb45f17af in job_unref_locked (job=0x557eb7952800) at +> +> ../job.c:474 +> +> #23 0x0000557eb45f257d in job_do_dismiss_locked (job=0x557eb7952800) at +> +> ../job.c:771 +> +> #24 0x0000557eb45f25fe in job_dismiss_locked (jobptr=0x7ffd94b4f400, +> +> errp=0x7ffd94b4f488) at ../job.c:783 +> +> --Type for more, q to quit, c to continue without paging-- +> +> #25 0x0000557eb45d8e84 in qmp_job_dismiss (id=0x557eb7aa42b0 +> +> "commit-snap1", errp=0x7ffd94b4f488) at ../job-qmp.c:138 +> +> #26 0x0000557eb472f6a3 in qmp_marshal_job_dismiss (args=0x7fc52c00a3b0, +> +> ret=0x7fc53c880da8, errp=0x7fc53c880da0) at qapi/qapi-commands-job.c:221 +> +> #27 0x0000557eb47a35f3 in do_qmp_dispatch_bh (opaque=0x7fc53c880e40) at +> +> ../qapi/qmp-dispatch.c:128 +> +> #28 0x0000557eb47d1cd2 in aio_bh_call (bh=0x557eb79568f0) at +> +> ../util/async.c:172 +> +> #29 0x0000557eb47d1df5 in aio_bh_poll (ctx=0x557eb76c0200) at +> +> ../util/async.c:219 +> +> #30 0x0000557eb47b12f3 in aio_dispatch (ctx=0x557eb76c0200) at +> +> ../util/aio-posix.c:436 +> +> #31 0x0000557eb47d2266 in aio_ctx_dispatch (source=0x557eb76c0200, +> +> callback=0x0, user_data=0x0) at ../util/async.c:361 +> +> #32 0x00007fc549232f4f in g_main_dispatch (context=0x557eb76c6430) at +> +> ../glib/gmain.c:3364 +> +> #33 g_main_context_dispatch (context=0x557eb76c6430) at ../glib/gmain.c:4079 +> +> #34 0x0000557eb47d3ab1 in glib_pollfds_poll () at ../util/main-loop.c:287 +> +> #35 0x0000557eb47d3b38 in os_host_main_loop_wait (timeout=0) at +> +> ../util/main-loop.c:310 +> +> #36 0x0000557eb47d3c58 in main_loop_wait (nonblocking=0) at +> +> ../util/main-loop.c:589 +> +> #37 0x0000557eb4218b01 in qemu_main_loop () at ../system/runstate.c:835 +> +> #38 0x0000557eb46df166 in qemu_default_main (opaque=0x0) at +> +> ../system/main.c:50 +> +> #39 0x0000557eb46df215 in main (argc=24, argv=0x7ffd94b4f8d8) at +> +> ../system/main.c:80 +> +> +> +And here's coroutine trying to acquire read lock: +> +> +> (gdb) qemu coroutine reader_queue->entries.sqh_first +> +> #0 0x0000557eb47d7068 in qemu_coroutine_switch (from_=0x557eb7aa48b0, +> +> to_=0x7fc537fff508, action=COROUTINE_YIELD) at +> +> ../util/coroutine-ucontext.c:321 +> +> #1 0x0000557eb47d4d4a in qemu_coroutine_yield () at +> +> ../util/qemu-coroutine.c:339 +> +> #2 0x0000557eb47d56c8 in qemu_co_queue_wait_impl (queue=0x557eb59954c0 +> +> , lock=0x7fc53c57de50, flags=0) at +> +> ../util/qemu-coroutine-lock.c:60 +> +> #3 0x0000557eb461fea7 in bdrv_graph_co_rdlock () at +> +> ../block/graph-lock.c:231 +> +> #4 0x0000557eb460c81a in graph_lockable_auto_lock (x=0x7fc53c57dee3) at +> +> /home/root/src/qemu/master/include/block/graph-lock.h:213 +> +> #5 0x0000557eb460fa41 in blk_co_do_preadv_part +> +> (blk=0x557eb84c0810, offset=6890553344, bytes=4096, +> +> qiov=0x7fc530006988, qiov_offset=0, flags=BDRV_REQ_REGISTERED_BUF) at +> +> ../block/block-backend.c:1339 +> +> #6 0x0000557eb46104d7 in blk_aio_read_entry (opaque=0x7fc530003240) at +> +> ../block/block-backend.c:1619 +> +> #7 0x0000557eb47d6c40 in coroutine_trampoline (i0=-1213577040, i1=21886) +> +> at ../util/coroutine-ucontext.c:175 +> +> #8 0x00007fc547c2a360 in __start_context () at +> +> ../sysdeps/unix/sysv/linux/x86_64/__start_context.S:91 +> +> #9 0x00007ffd94b4ea40 in () +> +> #10 0x0000000000000000 in () +> +> +> +So it looks like main thread is processing job-dismiss request and is +> +holding write lock taken in block_job_remove_all_bdrv() (frame #20 +> +above). At the same time iothread spawns a coroutine which performs IO +> +request. Before the coroutine is spawned, blk_aio_prwv() increases +> +'in_flight' counter for Blk. Then blk_co_do_preadv_part() (frame #5) is +> +trying to acquire the read lock. But main thread isn't releasing the +> +lock as blk_root_drained_poll() returns true since blk->in_flight > 0. +> +Here's the deadlock. +> +> +Any comments and suggestions on the subject are welcomed. Thanks! +I think this is what the blk_wait_while_drained() call was supposed to +address in blk_co_do_preadv_part(). However, with the use of multiple +I/O threads, this is racy. + +Do you think that in your case we hit the small race window between the +checks in blk_wait_while_drained() and GRAPH_RDLOCK_GUARD()? Or is there +another reason why blk_wait_while_drained() didn't do its job? + +Kevin + +On 5/2/25 19:34, Kevin Wolf wrote: +Am 24.04.2025 um 19:32 hat Andrey Drobyshev geschrieben: +Hi all, + +There's a bug in block layer which leads to block graph deadlock. +Notably, it takes place when blockdev IO is processed within a separate +iothread. + +This was initially caught by our tests, and I was able to reduce it to a +relatively simple reproducer. Such deadlocks are probably supposed to +be covered in iotests/graph-changes-while-io, but this deadlock isn't. + +Basically what the reproducer does is launches QEMU with a drive having +'iothread' option set, creates a chain of 2 snapshots, launches +block-commit job for a snapshot and then dismisses the job, starting +from the lower snapshot. If the guest is issuing IO at the same time, +there's a race in acquiring block graph lock and a potential deadlock. + +Here's how it can be reproduced: + +1. Run QEMU: +SRCDIR=/path/to/srcdir +$SRCDIR/build/qemu-system-x86_64 -enable-kvm \ +-machine q35 -cpu Nehalem \ + -name guest=alma8-vm,debug-threads=on \ + -m 2g -smp 2 \ + -nographic -nodefaults \ + -qmp unix:/var/run/alma8-qmp.sock,server=on,wait=off \ + -serial unix:/var/run/alma8-serial.sock,server=on,wait=off \ + -object iothread,id=iothread0 \ + -blockdev +node-name=disk,driver=qcow2,file.driver=file,file.filename=/path/to/img/alma8.qcow2 + \ + -device virtio-blk-pci,drive=disk,iothread=iothread0 +2. Launch IO (random reads) from within the guest: +nc -U /var/run/alma8-serial.sock +... +[root@alma8-vm ~]# fio --name=randread --ioengine=libaio --direct=1 --bs=4k +--size=1G --numjobs=1 --time_based=1 --runtime=300 --group_reporting +--rw=randread --iodepth=1 --filename=/testfile +3. Run snapshots creation & removal of lower snapshot operation in a +loop (script attached): +while /bin/true ; do ./remove_lower_snap.sh ; done +And then it occasionally hangs. + +Note: I've tried bisecting this, and looks like deadlock occurs starting +from the following commit: + +(BAD) 5bdbaebcce virtio: Re-enable notifications after drain +(GOOD) c42c3833e0 virtio-scsi: Attach event vq notifier with no_poll + +On the latest v10.0.0 it does hang as well. + + +Here's backtrace of the main thread: +#0 0x00007fc547d427ce in __ppoll (fds=0x557eb79657b0, nfds=1, timeout=, sigmask=0x0) at ../sysdeps/unix/sysv/linux/ppoll.c:43 +#1 0x0000557eb47d955c in qemu_poll_ns (fds=0x557eb79657b0, nfds=1, timeout=-1) +at ../util/qemu-timer.c:329 +#2 0x0000557eb47b2204 in fdmon_poll_wait (ctx=0x557eb76c5f20, +ready_list=0x7ffd94b4edd8, timeout=-1) at ../util/fdmon-poll.c:79 +#3 0x0000557eb47b1c45 in aio_poll (ctx=0x557eb76c5f20, blocking=true) at +../util/aio-posix.c:730 +#4 0x0000557eb4621edd in bdrv_do_drained_begin (bs=0x557eb795e950, parent=0x0, +poll=true) at ../block/io.c:378 +#5 0x0000557eb4621f7b in bdrv_drained_begin (bs=0x557eb795e950) at +../block/io.c:391 +#6 0x0000557eb45ec125 in bdrv_change_aio_context (bs=0x557eb795e950, +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +errp=0x0) + at ../block.c:7682 +#7 0x0000557eb45ebf2b in bdrv_child_change_aio_context (c=0x557eb7964250, +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +errp=0x0) + at ../block.c:7608 +#8 0x0000557eb45ec0c4 in bdrv_change_aio_context (bs=0x557eb79575e0, +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +errp=0x0) + at ../block.c:7668 +#9 0x0000557eb45ebf2b in bdrv_child_change_aio_context (c=0x557eb7e59110, +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +errp=0x0) + at ../block.c:7608 +#10 0x0000557eb45ec0c4 in bdrv_change_aio_context (bs=0x557eb7e51960, +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +errp=0x0) + at ../block.c:7668 +#11 0x0000557eb45ebf2b in bdrv_child_change_aio_context (c=0x557eb814ed80, +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +errp=0x0) + at ../block.c:7608 +#12 0x0000557eb45ee8e4 in child_job_change_aio_ctx (c=0x557eb7c9d3f0, +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +errp=0x0) + at ../blockjob.c:157 +#13 0x0000557eb45ebe2d in bdrv_parent_change_aio_context (c=0x557eb7c9d3f0, +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +errp=0x0) + at ../block.c:7592 +#14 0x0000557eb45ec06b in bdrv_change_aio_context (bs=0x557eb7d74310, +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +errp=0x0) + at ../block.c:7661 +#15 0x0000557eb45dcd7e in bdrv_child_cb_change_aio_ctx + (child=0x557eb8565af0, ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, +tran=0x557eb7a87160, errp=0x0) at ../block.c:1234 +#16 0x0000557eb45ebe2d in bdrv_parent_change_aio_context (c=0x557eb8565af0, +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +errp=0x0) + at ../block.c:7592 +#17 0x0000557eb45ec06b in bdrv_change_aio_context (bs=0x557eb79575e0, +ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, tran=0x557eb7a87160, +errp=0x0) + at ../block.c:7661 +#18 0x0000557eb45ec1f3 in bdrv_try_change_aio_context (bs=0x557eb79575e0, +ctx=0x557eb76c5f20, ignore_child=0x0, errp=0x0) at ../block.c:7715 +#19 0x0000557eb45e1b15 in bdrv_root_unref_child (child=0x557eb7966f30) at +../block.c:3317 +#20 0x0000557eb45eeaa8 in block_job_remove_all_bdrv (job=0x557eb7952800) at +../blockjob.c:209 +#21 0x0000557eb45ee641 in block_job_free (job=0x557eb7952800) at +../blockjob.c:82 +#22 0x0000557eb45f17af in job_unref_locked (job=0x557eb7952800) at ../job.c:474 +#23 0x0000557eb45f257d in job_do_dismiss_locked (job=0x557eb7952800) at +../job.c:771 +#24 0x0000557eb45f25fe in job_dismiss_locked (jobptr=0x7ffd94b4f400, +errp=0x7ffd94b4f488) at ../job.c:783 +--Type for more, q to quit, c to continue without paging-- +#25 0x0000557eb45d8e84 in qmp_job_dismiss (id=0x557eb7aa42b0 "commit-snap1", +errp=0x7ffd94b4f488) at ../job-qmp.c:138 +#26 0x0000557eb472f6a3 in qmp_marshal_job_dismiss (args=0x7fc52c00a3b0, +ret=0x7fc53c880da8, errp=0x7fc53c880da0) at qapi/qapi-commands-job.c:221 +#27 0x0000557eb47a35f3 in do_qmp_dispatch_bh (opaque=0x7fc53c880e40) at +../qapi/qmp-dispatch.c:128 +#28 0x0000557eb47d1cd2 in aio_bh_call (bh=0x557eb79568f0) at ../util/async.c:172 +#29 0x0000557eb47d1df5 in aio_bh_poll (ctx=0x557eb76c0200) at +../util/async.c:219 +#30 0x0000557eb47b12f3 in aio_dispatch (ctx=0x557eb76c0200) at +../util/aio-posix.c:436 +#31 0x0000557eb47d2266 in aio_ctx_dispatch (source=0x557eb76c0200, +callback=0x0, user_data=0x0) at ../util/async.c:361 +#32 0x00007fc549232f4f in g_main_dispatch (context=0x557eb76c6430) at +../glib/gmain.c:3364 +#33 g_main_context_dispatch (context=0x557eb76c6430) at ../glib/gmain.c:4079 +#34 0x0000557eb47d3ab1 in glib_pollfds_poll () at ../util/main-loop.c:287 +#35 0x0000557eb47d3b38 in os_host_main_loop_wait (timeout=0) at +../util/main-loop.c:310 +#36 0x0000557eb47d3c58 in main_loop_wait (nonblocking=0) at +../util/main-loop.c:589 +#37 0x0000557eb4218b01 in qemu_main_loop () at ../system/runstate.c:835 +#38 0x0000557eb46df166 in qemu_default_main (opaque=0x0) at ../system/main.c:50 +#39 0x0000557eb46df215 in main (argc=24, argv=0x7ffd94b4f8d8) at +../system/main.c:80 +And here's coroutine trying to acquire read lock: +(gdb) qemu coroutine reader_queue->entries.sqh_first +#0 0x0000557eb47d7068 in qemu_coroutine_switch (from_=0x557eb7aa48b0, +to_=0x7fc537fff508, action=COROUTINE_YIELD) at ../util/coroutine-ucontext.c:321 +#1 0x0000557eb47d4d4a in qemu_coroutine_yield () at +../util/qemu-coroutine.c:339 +#2 0x0000557eb47d56c8 in qemu_co_queue_wait_impl (queue=0x557eb59954c0 +, lock=0x7fc53c57de50, flags=0) at +../util/qemu-coroutine-lock.c:60 +#3 0x0000557eb461fea7 in bdrv_graph_co_rdlock () at ../block/graph-lock.c:231 +#4 0x0000557eb460c81a in graph_lockable_auto_lock (x=0x7fc53c57dee3) at +/home/root/src/qemu/master/include/block/graph-lock.h:213 +#5 0x0000557eb460fa41 in blk_co_do_preadv_part + (blk=0x557eb84c0810, offset=6890553344, bytes=4096, qiov=0x7fc530006988, +qiov_offset=0, flags=BDRV_REQ_REGISTERED_BUF) at ../block/block-backend.c:1339 +#6 0x0000557eb46104d7 in blk_aio_read_entry (opaque=0x7fc530003240) at +../block/block-backend.c:1619 +#7 0x0000557eb47d6c40 in coroutine_trampoline (i0=-1213577040, i1=21886) at +../util/coroutine-ucontext.c:175 +#8 0x00007fc547c2a360 in __start_context () at +../sysdeps/unix/sysv/linux/x86_64/__start_context.S:91 +#9 0x00007ffd94b4ea40 in () +#10 0x0000000000000000 in () +So it looks like main thread is processing job-dismiss request and is +holding write lock taken in block_job_remove_all_bdrv() (frame #20 +above). At the same time iothread spawns a coroutine which performs IO +request. Before the coroutine is spawned, blk_aio_prwv() increases +'in_flight' counter for Blk. Then blk_co_do_preadv_part() (frame #5) is +trying to acquire the read lock. But main thread isn't releasing the +lock as blk_root_drained_poll() returns true since blk->in_flight > 0. +Here's the deadlock. + +Any comments and suggestions on the subject are welcomed. Thanks! +I think this is what the blk_wait_while_drained() call was supposed to +address in blk_co_do_preadv_part(). However, with the use of multiple +I/O threads, this is racy. + +Do you think that in your case we hit the small race window between the +checks in blk_wait_while_drained() and GRAPH_RDLOCK_GUARD()? Or is there +another reason why blk_wait_while_drained() didn't do its job? + +Kevin +At my opinion there is very big race window. Main thread has +eaten graph write lock. After that another coroutine is stalled +within GRAPH_RDLOCK_GUARD() as there is no drain at the moment and only +after that main thread has started drain. That is why Fiona's idea is +looking working. Though this would mean that normally we should always +do that at the moment when we acquire write lock. May be even inside +this function. Den + +Am 02.05.2025 um 19:52 hat Denis V. Lunev geschrieben: +> +On 5/2/25 19:34, Kevin Wolf wrote: +> +> Am 24.04.2025 um 19:32 hat Andrey Drobyshev geschrieben: +> +> > Hi all, +> +> > +> +> > There's a bug in block layer which leads to block graph deadlock. +> +> > Notably, it takes place when blockdev IO is processed within a separate +> +> > iothread. +> +> > +> +> > This was initially caught by our tests, and I was able to reduce it to a +> +> > relatively simple reproducer. Such deadlocks are probably supposed to +> +> > be covered in iotests/graph-changes-while-io, but this deadlock isn't. +> +> > +> +> > Basically what the reproducer does is launches QEMU with a drive having +> +> > 'iothread' option set, creates a chain of 2 snapshots, launches +> +> > block-commit job for a snapshot and then dismisses the job, starting +> +> > from the lower snapshot. If the guest is issuing IO at the same time, +> +> > there's a race in acquiring block graph lock and a potential deadlock. +> +> > +> +> > Here's how it can be reproduced: +> +> > +> +> > 1. Run QEMU: +> +> > > SRCDIR=/path/to/srcdir +> +> > > $SRCDIR/build/qemu-system-x86_64 -enable-kvm \ +> +> > > -machine q35 -cpu Nehalem \ +> +> > > -name guest=alma8-vm,debug-threads=on \ +> +> > > -m 2g -smp 2 \ +> +> > > -nographic -nodefaults \ +> +> > > -qmp unix:/var/run/alma8-qmp.sock,server=on,wait=off \ +> +> > > -serial unix:/var/run/alma8-serial.sock,server=on,wait=off \ +> +> > > -object iothread,id=iothread0 \ +> +> > > -blockdev +> +> > > node-name=disk,driver=qcow2,file.driver=file,file.filename=/path/to/img/alma8.qcow2 +> +> > > \ +> +> > > -device virtio-blk-pci,drive=disk,iothread=iothread0 +> +> > 2. Launch IO (random reads) from within the guest: +> +> > > nc -U /var/run/alma8-serial.sock +> +> > > ... +> +> > > [root@alma8-vm ~]# fio --name=randread --ioengine=libaio --direct=1 +> +> > > --bs=4k --size=1G --numjobs=1 --time_based=1 --runtime=300 +> +> > > --group_reporting --rw=randread --iodepth=1 --filename=/testfile +> +> > 3. Run snapshots creation & removal of lower snapshot operation in a +> +> > loop (script attached): +> +> > > while /bin/true ; do ./remove_lower_snap.sh ; done +> +> > And then it occasionally hangs. +> +> > +> +> > Note: I've tried bisecting this, and looks like deadlock occurs starting +> +> > from the following commit: +> +> > +> +> > (BAD) 5bdbaebcce virtio: Re-enable notifications after drain +> +> > (GOOD) c42c3833e0 virtio-scsi: Attach event vq notifier with no_poll +> +> > +> +> > On the latest v10.0.0 it does hang as well. +> +> > +> +> > +> +> > Here's backtrace of the main thread: +> +> > +> +> > > #0 0x00007fc547d427ce in __ppoll (fds=0x557eb79657b0, nfds=1, +> +> > > timeout=, sigmask=0x0) at +> +> > > ../sysdeps/unix/sysv/linux/ppoll.c:43 +> +> > > #1 0x0000557eb47d955c in qemu_poll_ns (fds=0x557eb79657b0, nfds=1, +> +> > > timeout=-1) at ../util/qemu-timer.c:329 +> +> > > #2 0x0000557eb47b2204 in fdmon_poll_wait (ctx=0x557eb76c5f20, +> +> > > ready_list=0x7ffd94b4edd8, timeout=-1) at ../util/fdmon-poll.c:79 +> +> > > #3 0x0000557eb47b1c45 in aio_poll (ctx=0x557eb76c5f20, blocking=true) +> +> > > at ../util/aio-posix.c:730 +> +> > > #4 0x0000557eb4621edd in bdrv_do_drained_begin (bs=0x557eb795e950, +> +> > > parent=0x0, poll=true) at ../block/io.c:378 +> +> > > #5 0x0000557eb4621f7b in bdrv_drained_begin (bs=0x557eb795e950) at +> +> > > ../block/io.c:391 +> +> > > #6 0x0000557eb45ec125 in bdrv_change_aio_context (bs=0x557eb795e950, +> +> > > ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, +> +> > > tran=0x557eb7a87160, errp=0x0) +> +> > > at ../block.c:7682 +> +> > > #7 0x0000557eb45ebf2b in bdrv_child_change_aio_context +> +> > > (c=0x557eb7964250, ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, +> +> > > tran=0x557eb7a87160, errp=0x0) +> +> > > at ../block.c:7608 +> +> > > #8 0x0000557eb45ec0c4 in bdrv_change_aio_context (bs=0x557eb79575e0, +> +> > > ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, +> +> > > tran=0x557eb7a87160, errp=0x0) +> +> > > at ../block.c:7668 +> +> > > #9 0x0000557eb45ebf2b in bdrv_child_change_aio_context +> +> > > (c=0x557eb7e59110, ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, +> +> > > tran=0x557eb7a87160, errp=0x0) +> +> > > at ../block.c:7608 +> +> > > #10 0x0000557eb45ec0c4 in bdrv_change_aio_context (bs=0x557eb7e51960, +> +> > > ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, +> +> > > tran=0x557eb7a87160, errp=0x0) +> +> > > at ../block.c:7668 +> +> > > #11 0x0000557eb45ebf2b in bdrv_child_change_aio_context +> +> > > (c=0x557eb814ed80, ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, +> +> > > tran=0x557eb7a87160, errp=0x0) +> +> > > at ../block.c:7608 +> +> > > #12 0x0000557eb45ee8e4 in child_job_change_aio_ctx (c=0x557eb7c9d3f0, +> +> > > ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, +> +> > > tran=0x557eb7a87160, errp=0x0) +> +> > > at ../blockjob.c:157 +> +> > > #13 0x0000557eb45ebe2d in bdrv_parent_change_aio_context +> +> > > (c=0x557eb7c9d3f0, ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, +> +> > > tran=0x557eb7a87160, errp=0x0) +> +> > > at ../block.c:7592 +> +> > > #14 0x0000557eb45ec06b in bdrv_change_aio_context (bs=0x557eb7d74310, +> +> > > ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, +> +> > > tran=0x557eb7a87160, errp=0x0) +> +> > > at ../block.c:7661 +> +> > > #15 0x0000557eb45dcd7e in bdrv_child_cb_change_aio_ctx +> +> > > (child=0x557eb8565af0, ctx=0x557eb76c5f20, visited=0x557eb7e06b60 +> +> > > = {...}, tran=0x557eb7a87160, errp=0x0) at ../block.c:1234 +> +> > > #16 0x0000557eb45ebe2d in bdrv_parent_change_aio_context +> +> > > (c=0x557eb8565af0, ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, +> +> > > tran=0x557eb7a87160, errp=0x0) +> +> > > at ../block.c:7592 +> +> > > #17 0x0000557eb45ec06b in bdrv_change_aio_context (bs=0x557eb79575e0, +> +> > > ctx=0x557eb76c5f20, visited=0x557eb7e06b60 = {...}, +> +> > > tran=0x557eb7a87160, errp=0x0) +> +> > > at ../block.c:7661 +> +> > > #18 0x0000557eb45ec1f3 in bdrv_try_change_aio_context +> +> > > (bs=0x557eb79575e0, ctx=0x557eb76c5f20, ignore_child=0x0, errp=0x0) at +> +> > > ../block.c:7715 +> +> > > #19 0x0000557eb45e1b15 in bdrv_root_unref_child (child=0x557eb7966f30) +> +> > > at ../block.c:3317 +> +> > > #20 0x0000557eb45eeaa8 in block_job_remove_all_bdrv +> +> > > (job=0x557eb7952800) at ../blockjob.c:209 +> +> > > #21 0x0000557eb45ee641 in block_job_free (job=0x557eb7952800) at +> +> > > ../blockjob.c:82 +> +> > > #22 0x0000557eb45f17af in job_unref_locked (job=0x557eb7952800) at +> +> > > ../job.c:474 +> +> > > #23 0x0000557eb45f257d in job_do_dismiss_locked (job=0x557eb7952800) at +> +> > > ../job.c:771 +> +> > > #24 0x0000557eb45f25fe in job_dismiss_locked (jobptr=0x7ffd94b4f400, +> +> > > errp=0x7ffd94b4f488) at ../job.c:783 +> +> > > --Type for more, q to quit, c to continue without paging-- +> +> > > #25 0x0000557eb45d8e84 in qmp_job_dismiss (id=0x557eb7aa42b0 +> +> > > "commit-snap1", errp=0x7ffd94b4f488) at ../job-qmp.c:138 +> +> > > #26 0x0000557eb472f6a3 in qmp_marshal_job_dismiss (args=0x7fc52c00a3b0, +> +> > > ret=0x7fc53c880da8, errp=0x7fc53c880da0) at qapi/qapi-commands-job.c:221 +> +> > > #27 0x0000557eb47a35f3 in do_qmp_dispatch_bh (opaque=0x7fc53c880e40) at +> +> > > ../qapi/qmp-dispatch.c:128 +> +> > > #28 0x0000557eb47d1cd2 in aio_bh_call (bh=0x557eb79568f0) at +> +> > > ../util/async.c:172 +> +> > > #29 0x0000557eb47d1df5 in aio_bh_poll (ctx=0x557eb76c0200) at +> +> > > ../util/async.c:219 +> +> > > #30 0x0000557eb47b12f3 in aio_dispatch (ctx=0x557eb76c0200) at +> +> > > ../util/aio-posix.c:436 +> +> > > #31 0x0000557eb47d2266 in aio_ctx_dispatch (source=0x557eb76c0200, +> +> > > callback=0x0, user_data=0x0) at ../util/async.c:361 +> +> > > #32 0x00007fc549232f4f in g_main_dispatch (context=0x557eb76c6430) at +> +> > > ../glib/gmain.c:3364 +> +> > > #33 g_main_context_dispatch (context=0x557eb76c6430) at +> +> > > ../glib/gmain.c:4079 +> +> > > #34 0x0000557eb47d3ab1 in glib_pollfds_poll () at +> +> > > ../util/main-loop.c:287 +> +> > > #35 0x0000557eb47d3b38 in os_host_main_loop_wait (timeout=0) at +> +> > > ../util/main-loop.c:310 +> +> > > #36 0x0000557eb47d3c58 in main_loop_wait (nonblocking=0) at +> +> > > ../util/main-loop.c:589 +> +> > > #37 0x0000557eb4218b01 in qemu_main_loop () at ../system/runstate.c:835 +> +> > > #38 0x0000557eb46df166 in qemu_default_main (opaque=0x0) at +> +> > > ../system/main.c:50 +> +> > > #39 0x0000557eb46df215 in main (argc=24, argv=0x7ffd94b4f8d8) at +> +> > > ../system/main.c:80 +> +> > +> +> > And here's coroutine trying to acquire read lock: +> +> > +> +> > > (gdb) qemu coroutine reader_queue->entries.sqh_first +> +> > > #0 0x0000557eb47d7068 in qemu_coroutine_switch (from_=0x557eb7aa48b0, +> +> > > to_=0x7fc537fff508, action=COROUTINE_YIELD) at +> +> > > ../util/coroutine-ucontext.c:321 +> +> > > #1 0x0000557eb47d4d4a in qemu_coroutine_yield () at +> +> > > ../util/qemu-coroutine.c:339 +> +> > > #2 0x0000557eb47d56c8 in qemu_co_queue_wait_impl (queue=0x557eb59954c0 +> +> > > , lock=0x7fc53c57de50, flags=0) at +> +> > > ../util/qemu-coroutine-lock.c:60 +> +> > > #3 0x0000557eb461fea7 in bdrv_graph_co_rdlock () at +> +> > > ../block/graph-lock.c:231 +> +> > > #4 0x0000557eb460c81a in graph_lockable_auto_lock (x=0x7fc53c57dee3) +> +> > > at /home/root/src/qemu/master/include/block/graph-lock.h:213 +> +> > > #5 0x0000557eb460fa41 in blk_co_do_preadv_part +> +> > > (blk=0x557eb84c0810, offset=6890553344, bytes=4096, +> +> > > qiov=0x7fc530006988, qiov_offset=0, flags=BDRV_REQ_REGISTERED_BUF) at +> +> > > ../block/block-backend.c:1339 +> +> > > #6 0x0000557eb46104d7 in blk_aio_read_entry (opaque=0x7fc530003240) at +> +> > > ../block/block-backend.c:1619 +> +> > > #7 0x0000557eb47d6c40 in coroutine_trampoline (i0=-1213577040, +> +> > > i1=21886) at ../util/coroutine-ucontext.c:175 +> +> > > #8 0x00007fc547c2a360 in __start_context () at +> +> > > ../sysdeps/unix/sysv/linux/x86_64/__start_context.S:91 +> +> > > #9 0x00007ffd94b4ea40 in () +> +> > > #10 0x0000000000000000 in () +> +> > +> +> > So it looks like main thread is processing job-dismiss request and is +> +> > holding write lock taken in block_job_remove_all_bdrv() (frame #20 +> +> > above). At the same time iothread spawns a coroutine which performs IO +> +> > request. Before the coroutine is spawned, blk_aio_prwv() increases +> +> > 'in_flight' counter for Blk. Then blk_co_do_preadv_part() (frame #5) is +> +> > trying to acquire the read lock. But main thread isn't releasing the +> +> > lock as blk_root_drained_poll() returns true since blk->in_flight > 0. +> +> > Here's the deadlock. +> +> > +> +> > Any comments and suggestions on the subject are welcomed. Thanks! +> +> I think this is what the blk_wait_while_drained() call was supposed to +> +> address in blk_co_do_preadv_part(). However, with the use of multiple +> +> I/O threads, this is racy. +> +> +> +> Do you think that in your case we hit the small race window between the +> +> checks in blk_wait_while_drained() and GRAPH_RDLOCK_GUARD()? Or is there +> +> another reason why blk_wait_while_drained() didn't do its job? +> +> +> +At my opinion there is very big race window. Main thread has +> +eaten graph write lock. After that another coroutine is stalled +> +within GRAPH_RDLOCK_GUARD() as there is no drain at the moment and only +> +after that main thread has started drain. +You're right, I confused taking the write lock with draining there. + +> +That is why Fiona's idea is looking working. Though this would mean +> +that normally we should always do that at the moment when we acquire +> +write lock. May be even inside this function. +I actually see now that not all of my graph locking patches were merged. +At least I did have the thought that bdrv_drained_begin() must be marked +GRAPH_UNLOCKED because it polls. That means that calling it from inside +bdrv_try_change_aio_context() is actually forbidden (and that's the part +I didn't see back then because it doesn't have TSA annotations). + +If you refactor the code to move the drain out to before the lock is +taken, I think you end up with Fiona's patch, except you'll remove the +forbidden inner drain and add more annotations for some functions and +clarify the rules around them. I don't know, but I wouldn't be surprised +if along the process we find other bugs, too. + +So Fiona's drain looks right to me, but we should probably approach it +more systematically. + +Kevin + diff --git a/results/classifier/001/instruction/26095107 b/results/classifier/001/instruction/26095107 new file mode 100644 index 000000000..c06d35dd8 --- /dev/null +++ b/results/classifier/001/instruction/26095107 @@ -0,0 +1,158 @@ +instruction: 0.991 +other: 0.979 +semantic: 0.974 +mistranslation: 0.930 + +[Qemu-devel] [Bug Report] vm paused after succeeding to migrate + +Hi, all +I encounterd a bug when I try to migrate a windows vm. + +Enviroment information: +host A: cpu E5620(model WestmereEP without flag xsave) +host B: cpu E5-2643(model SandyBridgeEP with xsave) + +The reproduce steps is : +1. Start a windows 2008 vm with -cpu host(which means host-passthrough). +2. Migrate the vm to host B when cr4.OSXSAVE=0 (successfully). +3. Vm runs on host B for a while so that cr4.OSXSAVE changes to 1. +4. Then migrate the vm to host A (successfully), but vm was paused, and qemu +printed log as followed: + +KVM: entry failed, hardware error 0x80000021 + +If you're running a guest on an Intel machine without unrestricted mode +support, the failure can be most likely due to the guest entering an invalid +state for Intel VT. For example, the guest maybe running in big real mode +which is not supported on less recent Intel processors. + +EAX=019b3bb0 EBX=01a3ae80 ECX=01a61ce8 EDX=00000000 +ESI=01a62000 EDI=00000000 EBP=00000000 ESP=01718b20 +EIP=0185d982 EFL=00000286 [--S--P-] CPL=0 II=0 A20=1 SMM=0 HLT=0 +ES =0000 00000000 0000ffff 00009300 +CS =f000 ffff0000 0000ffff 00009b00 +SS =0000 00000000 0000ffff 00009300 +DS =0000 00000000 0000ffff 00009300 +FS =0000 00000000 0000ffff 00009300 +GS =0000 00000000 0000ffff 00009300 +LDT=0000 00000000 0000ffff 00008200 +TR =0000 00000000 0000ffff 00008b00 +GDT= 00000000 0000ffff +IDT= 00000000 0000ffff +CR0=60000010 CR2=00000000 CR3=00000000 CR4=00000000 +DR0=0000000000000000 DR1=0000000000000000 DR2=0000000000000000 +DR3=0000000000000000 +DR6=00000000ffff0ff0 DR7=0000000000000400 +EFER=0000000000000000 +Code=00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 <00> 00 00 00 +00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + +I have found that problem happened when kvm_put_sregs returns err -22(called by +kvm_arch_put_registers(qemu)). +Because kvm_arch_vcpu_ioctl_set_sregs(kvm-mod) checked that guest_cpuid_has no +X86_FEATURE_XSAVE but cr4.OSXSAVE=1. +So should we cancel migration when kvm_arch_put_registers returns error? + +* linzhecheng (address@hidden) wrote: +> +Hi, all +> +I encounterd a bug when I try to migrate a windows vm. +> +> +Enviroment information: +> +host A: cpu E5620(model WestmereEP without flag xsave) +> +host B: cpu E5-2643(model SandyBridgeEP with xsave) +> +> +The reproduce steps is : +> +1. Start a windows 2008 vm with -cpu host(which means host-passthrough). +> +2. Migrate the vm to host B when cr4.OSXSAVE=0 (successfully). +> +3. Vm runs on host B for a while so that cr4.OSXSAVE changes to 1. +> +4. Then migrate the vm to host A (successfully), but vm was paused, and qemu +> +printed log as followed: +Remember that migrating using -cpu host across different CPU models is NOT +expected to work. + +> +KVM: entry failed, hardware error 0x80000021 +> +> +If you're running a guest on an Intel machine without unrestricted mode +> +support, the failure can be most likely due to the guest entering an invalid +> +state for Intel VT. For example, the guest maybe running in big real mode +> +which is not supported on less recent Intel processors. +> +> +EAX=019b3bb0 EBX=01a3ae80 ECX=01a61ce8 EDX=00000000 +> +ESI=01a62000 EDI=00000000 EBP=00000000 ESP=01718b20 +> +EIP=0185d982 EFL=00000286 [--S--P-] CPL=0 II=0 A20=1 SMM=0 HLT=0 +> +ES =0000 00000000 0000ffff 00009300 +> +CS =f000 ffff0000 0000ffff 00009b00 +> +SS =0000 00000000 0000ffff 00009300 +> +DS =0000 00000000 0000ffff 00009300 +> +FS =0000 00000000 0000ffff 00009300 +> +GS =0000 00000000 0000ffff 00009300 +> +LDT=0000 00000000 0000ffff 00008200 +> +TR =0000 00000000 0000ffff 00008b00 +> +GDT= 00000000 0000ffff +> +IDT= 00000000 0000ffff +> +CR0=60000010 CR2=00000000 CR3=00000000 CR4=00000000 +> +DR0=0000000000000000 DR1=0000000000000000 DR2=0000000000000000 +> +DR3=0000000000000000 +> +DR6=00000000ffff0ff0 DR7=0000000000000400 +> +EFER=0000000000000000 +> +Code=00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 <00> 00 00 +> +00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +> +00 +> +> +I have found that problem happened when kvm_put_sregs returns err -22(called +> +by kvm_arch_put_registers(qemu)). +> +Because kvm_arch_vcpu_ioctl_set_sregs(kvm-mod) checked that guest_cpuid_has +> +no X86_FEATURE_XSAVE but cr4.OSXSAVE=1. +> +So should we cancel migration when kvm_arch_put_registers returns error? +It would seem good if we can make the migration fail there rather than +hitting that KVM error. +It looks like we need to do a bit of plumbing to convert the places that +call it to return a bool rather than void. + +Dave + +-- +Dr. David Alan Gilbert / address@hidden / Manchester, UK + diff --git a/results/classifier/001/instruction/33802194 b/results/classifier/001/instruction/33802194 new file mode 100644 index 000000000..b8e563ad9 --- /dev/null +++ b/results/classifier/001/instruction/33802194 @@ -0,0 +1,4939 @@ +instruction: 0.693 +mistranslation: 0.687 +semantic: 0.656 +other: 0.637 + +[BUG] cxl can not create region + +Hi list + +I want to test cxl functions in arm64, and found some problems I can't +figure out. + +My test environment: + +1. build latest bios from +https://github.com/tianocore/edk2.git +master +branch(cc2db6ebfb6d9d85ba4c7b35fba1fa37fffc0bc2) +2. build latest qemu-system-aarch64 from git://git.qemu.org/qemu.git +master branch(846dcf0ba4eff824c295f06550b8673ff3f31314). With cxl arm +support patch: +https://patchwork.kernel.org/project/cxl/cover/20220616141950.23374-1-Jonathan.Cameron@huawei.com/ +3. build Linux kernel from +https://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl.git +preview +branch(65fc1c3d26b96002a5aa1f4012fae4dc98fd5683) +4. build latest ndctl tools from +https://github.com/pmem/ndctl +create_region branch(8558b394e449779e3a4f3ae90fae77ede0bca159) + +And my qemu test commands: +sudo $QEMU_BIN -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 \ + -cpu max -smp 8 -nographic -no-reboot \ + -kernel $KERNEL -bios $BIOS_BIN \ + -drive if=none,file=$ROOTFS,format=qcow2,id=hd \ + -device virtio-blk-pci,drive=hd -append 'root=/dev/vda1 +nokaslr dyndbg="module cxl* +p"' \ + -object memory-backend-ram,size=4G,id=mem0 \ + -numa node,nodeid=0,cpus=0-7,memdev=mem0 \ + -net nic -net user,hostfwd=tcp::2222-:22 -enable-kvm \ + -object +memory-backend-file,id=cxl-mem0,share=on,mem-path=/tmp/cxltest.raw,size=256M +\ + -object +memory-backend-file,id=cxl-mem1,share=on,mem-path=/tmp/cxltest1.raw,size=256M +\ + -object +memory-backend-file,id=cxl-mem2,share=on,mem-path=/tmp/cxltest2.raw,size=256M +\ + -object +memory-backend-file,id=cxl-mem3,share=on,mem-path=/tmp/cxltest3.raw,size=256M +\ + -object +memory-backend-file,id=cxl-lsa0,share=on,mem-path=/tmp/lsa0.raw,size=256M +\ + -object +memory-backend-file,id=cxl-lsa1,share=on,mem-path=/tmp/lsa1.raw,size=256M +\ + -object +memory-backend-file,id=cxl-lsa2,share=on,mem-path=/tmp/lsa2.raw,size=256M +\ + -object +memory-backend-file,id=cxl-lsa3,share=on,mem-path=/tmp/lsa3.raw,size=256M +\ + -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \ + -device cxl-rp,port=0,bus=cxl.1,id=root_port0,chassis=0,slot=0 \ + -device cxl-upstream,bus=root_port0,id=us0 \ + -device cxl-downstream,port=0,bus=us0,id=swport0,chassis=0,slot=4 \ + -device +cxl-type3,bus=swport0,memdev=cxl-mem0,lsa=cxl-lsa0,id=cxl-pmem0 \ + -device cxl-downstream,port=1,bus=us0,id=swport1,chassis=0,slot=5 \ + -device +cxl-type3,bus=swport1,memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem1 \ + -device cxl-downstream,port=2,bus=us0,id=swport2,chassis=0,slot=6 \ + -device +cxl-type3,bus=swport2,memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem2 \ + -device cxl-downstream,port=3,bus=us0,id=swport3,chassis=0,slot=7 \ + -device +cxl-type3,bus=swport3,memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem3 \ + -M +cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=4k + +And I have got two problems. +1. When I want to create x1 region with command: "cxl create-region -d +decoder0.0 -w 1 -g 4096 mem0", kernel crashed with null pointer +reference. Crash log: + +[ 534.697324] cxl_region region0: config state: 0 +[ 534.697346] cxl_region region0: probe: -6 +[ 534.697368] cxl_acpi ACPI0017:00: decoder0.0: created region0 +[ 534.699115] cxl region0: mem0:endpoint3 decoder3.0 add: +mem0:decoder3.0 @ 0 next: none nr_eps: 1 nr_targets: 1 +[ 534.699149] cxl region0: 0000:0d:00.0:port2 decoder2.0 add: +mem0:decoder3.0 @ 0 next: mem0 nr_eps: 1 nr_targets: 1 +[ 534.699167] cxl region0: ACPI0016:00:port1 decoder1.0 add: +mem0:decoder3.0 @ 0 next: 0000:0d:00.0 nr_eps: 1 nr_targets: 1 +[ 534.699176] cxl region0: ACPI0016:00:port1 iw: 1 ig: 256 +[ 534.699182] cxl region0: ACPI0016:00:port1 target[0] = 0000:0c:00.0 +for mem0:decoder3.0 @ 0 +[ 534.699189] cxl region0: 0000:0d:00.0:port2 iw: 1 ig: 256 +[ 534.699193] cxl region0: 0000:0d:00.0:port2 target[0] = +0000:0e:00.0 for mem0:decoder3.0 @ 0 +[ 534.699405] Unable to handle kernel NULL pointer dereference at +virtual address 0000000000000000 +[ 534.701474] Mem abort info: +[ 534.701994] ESR = 0x0000000086000004 +[ 534.702653] EC = 0x21: IABT (current EL), IL = 32 bits +[ 534.703616] SET = 0, FnV = 0 +[ 534.704174] EA = 0, S1PTW = 0 +[ 534.704803] FSC = 0x04: level 0 translation fault +[ 534.705694] user pgtable: 4k pages, 48-bit VAs, pgdp=000000010144a000 +[ 534.706875] [0000000000000000] pgd=0000000000000000, p4d=0000000000000000 +[ 534.709855] Internal error: Oops: 86000004 [#1] PREEMPT SMP +[ 534.710301] Modules linked in: +[ 534.710546] CPU: 7 PID: 331 Comm: cxl Not tainted +5.19.0-rc3-00064-g65fc1c3d26b9-dirty #11 +[ 534.715393] Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 +[ 534.717179] pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) +[ 534.719190] pc : 0x0 +[ 534.719928] lr : commit_store+0x118/0x2cc +[ 534.721007] sp : ffff80000aec3c30 +[ 534.721793] x29: ffff80000aec3c30 x28: ffff0000da62e740 x27: ffff0000c0c06b30 +[ 534.723875] x26: 0000000000000000 x25: ffff0000c0a2a400 x24: ffff0000c0a29400 +[ 534.725440] x23: 0000000000000003 x22: 0000000000000000 x21: ffff0000c0c06800 +[ 534.727312] x20: 0000000000000000 x19: ffff0000c1559800 x18: 0000000000000000 +[ 534.729138] x17: 0000000000000000 x16: 0000000000000000 x15: 0000ffffd41fe838 +[ 534.731046] x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000000 +[ 534.732402] x11: 0000000000000000 x10: 0000000000000000 x9 : 0000000000000000 +[ 534.734432] x8 : 0000000000000000 x7 : 0000000000000000 x6 : ffff0000c0906e80 +[ 534.735921] x5 : 0000000000000000 x4 : 0000000000000000 x3 : ffff80000aec3bf0 +[ 534.737437] x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffff0000c155a000 +[ 534.738878] Call trace: +[ 534.739368] 0x0 +[ 534.739713] dev_attr_store+0x1c/0x30 +[ 534.740186] sysfs_kf_write+0x48/0x58 +[ 534.740961] kernfs_fop_write_iter+0x128/0x184 +[ 534.741872] new_sync_write+0xdc/0x158 +[ 534.742706] vfs_write+0x1ac/0x2a8 +[ 534.743440] ksys_write+0x68/0xf0 +[ 534.744328] __arm64_sys_write+0x1c/0x28 +[ 534.745180] invoke_syscall+0x44/0xf0 +[ 534.745989] el0_svc_common+0x4c/0xfc +[ 534.746661] do_el0_svc+0x60/0xa8 +[ 534.747378] el0_svc+0x2c/0x78 +[ 534.748066] el0t_64_sync_handler+0xb8/0x12c +[ 534.748919] el0t_64_sync+0x18c/0x190 +[ 534.749629] Code: bad PC value +[ 534.750169] ---[ end trace 0000000000000000 ]--- + +2. When I want to create x4 region with command: "cxl create-region -d +decoder0.0 -w 4 -g 4096 -m mem0 mem1 mem2 mem3". I got below errors: + +cxl region: create_region: region0: failed to set target3 to mem3 +cxl region: cmd_create_region: created 0 regions + +And kernel log as below: +[ 60.536663] cxl_region region0: config state: 0 +[ 60.536675] cxl_region region0: probe: -6 +[ 60.536696] cxl_acpi ACPI0017:00: decoder0.0: created region0 +[ 60.538251] cxl region0: mem0:endpoint3 decoder3.0 add: +mem0:decoder3.0 @ 0 next: none nr_eps: 1 nr_targets: 1 +[ 60.538278] cxl region0: 0000:0d:00.0:port2 decoder2.0 add: +mem0:decoder3.0 @ 0 next: mem0 nr_eps: 1 nr_targets: 1 +[ 60.538295] cxl region0: ACPI0016:00:port1 decoder1.0 add: +mem0:decoder3.0 @ 0 next: 0000:0d:00.0 nr_eps: 1 nr_targets: 1 +[ 60.538647] cxl region0: mem1:endpoint4 decoder4.0 add: +mem1:decoder4.0 @ 1 next: none nr_eps: 1 nr_targets: 1 +[ 60.538663] cxl region0: 0000:0d:00.0:port2 decoder2.0 add: +mem1:decoder4.0 @ 1 next: mem1 nr_eps: 2 nr_targets: 2 +[ 60.538675] cxl region0: ACPI0016:00:port1 decoder1.0 add: +mem1:decoder4.0 @ 1 next: 0000:0d:00.0 nr_eps: 2 nr_targets: 1 +[ 60.539311] cxl region0: mem2:endpoint5 decoder5.0 add: +mem2:decoder5.0 @ 2 next: none nr_eps: 1 nr_targets: 1 +[ 60.539332] cxl region0: 0000:0d:00.0:port2 decoder2.0 add: +mem2:decoder5.0 @ 2 next: mem2 nr_eps: 3 nr_targets: 3 +[ 60.539343] cxl region0: ACPI0016:00:port1 decoder1.0 add: +mem2:decoder5.0 @ 2 next: 0000:0d:00.0 nr_eps: 3 nr_targets: 1 +[ 60.539711] cxl region0: mem3:endpoint6 decoder6.0 add: +mem3:decoder6.0 @ 3 next: none nr_eps: 1 nr_targets: 1 +[ 60.539723] cxl region0: 0000:0d:00.0:port2 decoder2.0 add: +mem3:decoder6.0 @ 3 next: mem3 nr_eps: 4 nr_targets: 4 +[ 60.539735] cxl region0: ACPI0016:00:port1 decoder1.0 add: +mem3:decoder6.0 @ 3 next: 0000:0d:00.0 nr_eps: 4 nr_targets: 1 +[ 60.539742] cxl region0: ACPI0016:00:port1 iw: 1 ig: 256 +[ 60.539747] cxl region0: ACPI0016:00:port1 target[0] = 0000:0c:00.0 +for mem0:decoder3.0 @ 0 +[ 60.539754] cxl region0: 0000:0d:00.0:port2 iw: 4 ig: 512 +[ 60.539758] cxl region0: 0000:0d:00.0:port2 target[0] = +0000:0e:00.0 for mem0:decoder3.0 @ 0 +[ 60.539764] cxl region0: ACPI0016:00:port1: cannot host mem1:decoder4.0 at 1 + +I have tried to write sysfs node manually, got same errors. + +Hope I can get some helps here. + +Bob + +On Fri, 5 Aug 2022 10:20:23 +0800 +Bobo WL wrote: + +> +Hi list +> +> +I want to test cxl functions in arm64, and found some problems I can't +> +figure out. +Hi Bob, + +Glad to see people testing this code. + +> +> +My test environment: +> +> +1. build latest bios from +https://github.com/tianocore/edk2.git +master +> +branch(cc2db6ebfb6d9d85ba4c7b35fba1fa37fffc0bc2) +> +2. build latest qemu-system-aarch64 from git://git.qemu.org/qemu.git +> +master branch(846dcf0ba4eff824c295f06550b8673ff3f31314). With cxl arm +> +support patch: +> +https://patchwork.kernel.org/project/cxl/cover/20220616141950.23374-1-Jonathan.Cameron@huawei.com/ +> +3. build Linux kernel from +> +https://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl.git +preview +> +branch(65fc1c3d26b96002a5aa1f4012fae4dc98fd5683) +> +4. build latest ndctl tools from +https://github.com/pmem/ndctl +> +create_region branch(8558b394e449779e3a4f3ae90fae77ede0bca159) +> +> +And my qemu test commands: +> +sudo $QEMU_BIN -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 \ +> +-cpu max -smp 8 -nographic -no-reboot \ +> +-kernel $KERNEL -bios $BIOS_BIN \ +> +-drive if=none,file=$ROOTFS,format=qcow2,id=hd \ +> +-device virtio-blk-pci,drive=hd -append 'root=/dev/vda1 +> +nokaslr dyndbg="module cxl* +p"' \ +> +-object memory-backend-ram,size=4G,id=mem0 \ +> +-numa node,nodeid=0,cpus=0-7,memdev=mem0 \ +> +-net nic -net user,hostfwd=tcp::2222-:22 -enable-kvm \ +> +-object +> +memory-backend-file,id=cxl-mem0,share=on,mem-path=/tmp/cxltest.raw,size=256M +> +\ +> +-object +> +memory-backend-file,id=cxl-mem1,share=on,mem-path=/tmp/cxltest1.raw,size=256M +> +\ +> +-object +> +memory-backend-file,id=cxl-mem2,share=on,mem-path=/tmp/cxltest2.raw,size=256M +> +\ +> +-object +> +memory-backend-file,id=cxl-mem3,share=on,mem-path=/tmp/cxltest3.raw,size=256M +> +\ +> +-object +> +memory-backend-file,id=cxl-lsa0,share=on,mem-path=/tmp/lsa0.raw,size=256M +> +\ +> +-object +> +memory-backend-file,id=cxl-lsa1,share=on,mem-path=/tmp/lsa1.raw,size=256M +> +\ +> +-object +> +memory-backend-file,id=cxl-lsa2,share=on,mem-path=/tmp/lsa2.raw,size=256M +> +\ +> +-object +> +memory-backend-file,id=cxl-lsa3,share=on,mem-path=/tmp/lsa3.raw,size=256M +> +\ +> +-device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \ +> +-device cxl-rp,port=0,bus=cxl.1,id=root_port0,chassis=0,slot=0 \ +Probably not related to your problem, but there is a disconnect in QEMU / +kernel assumptionsaround the presence of an HDM decoder when a HB only +has a single root port. Spec allows it to be provided or not as an +implementation choice. +Kernel assumes it isn't provide. Qemu assumes it is. + +The temporary solution is to throw in a second root port on the HB and not +connect anything to it. Longer term I may special case this so that the +particular +decoder defaults to pass through settings in QEMU if there is only one root +port. + +> +-device cxl-upstream,bus=root_port0,id=us0 \ +> +-device cxl-downstream,port=0,bus=us0,id=swport0,chassis=0,slot=4 \ +> +-device +> +cxl-type3,bus=swport0,memdev=cxl-mem0,lsa=cxl-lsa0,id=cxl-pmem0 \ +> +-device cxl-downstream,port=1,bus=us0,id=swport1,chassis=0,slot=5 \ +> +-device +> +cxl-type3,bus=swport1,memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem1 \ +> +-device cxl-downstream,port=2,bus=us0,id=swport2,chassis=0,slot=6 \ +> +-device +> +cxl-type3,bus=swport2,memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem2 \ +> +-device cxl-downstream,port=3,bus=us0,id=swport3,chassis=0,slot=7 \ +> +-device +> +cxl-type3,bus=swport3,memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem3 \ +> +-M +> +cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=4k +> +> +And I have got two problems. +> +1. When I want to create x1 region with command: "cxl create-region -d +> +decoder0.0 -w 1 -g 4096 mem0", kernel crashed with null pointer +> +reference. Crash log: +> +> +[ 534.697324] cxl_region region0: config state: 0 +> +[ 534.697346] cxl_region region0: probe: -6 +Seems odd this is up here. But maybe fine. + +> +[ 534.697368] cxl_acpi ACPI0017:00: decoder0.0: created region0 +> +[ 534.699115] cxl region0: mem0:endpoint3 decoder3.0 add: +> +mem0:decoder3.0 @ 0 next: none nr_eps: 1 nr_targets: 1 +> +[ 534.699149] cxl region0: 0000:0d:00.0:port2 decoder2.0 add: +> +mem0:decoder3.0 @ 0 next: mem0 nr_eps: 1 nr_targets: 1 +> +[ 534.699167] cxl region0: ACPI0016:00:port1 decoder1.0 add: +> +mem0:decoder3.0 @ 0 next: 0000:0d:00.0 nr_eps: 1 nr_targets: 1 +> +[ 534.699176] cxl region0: ACPI0016:00:port1 iw: 1 ig: 256 +> +[ 534.699182] cxl region0: ACPI0016:00:port1 target[0] = 0000:0c:00.0 +> +for mem0:decoder3.0 @ 0 +> +[ 534.699189] cxl region0: 0000:0d:00.0:port2 iw: 1 ig: 256 +> +[ 534.699193] cxl region0: 0000:0d:00.0:port2 target[0] = +> +0000:0e:00.0 for mem0:decoder3.0 @ 0 +> +[ 534.699405] Unable to handle kernel NULL pointer dereference at +> +virtual address 0000000000000000 +> +[ 534.701474] Mem abort info: +> +[ 534.701994] ESR = 0x0000000086000004 +> +[ 534.702653] EC = 0x21: IABT (current EL), IL = 32 bits +> +[ 534.703616] SET = 0, FnV = 0 +> +[ 534.704174] EA = 0, S1PTW = 0 +> +[ 534.704803] FSC = 0x04: level 0 translation fault +> +[ 534.705694] user pgtable: 4k pages, 48-bit VAs, pgdp=000000010144a000 +> +[ 534.706875] [0000000000000000] pgd=0000000000000000, p4d=0000000000000000 +> +[ 534.709855] Internal error: Oops: 86000004 [#1] PREEMPT SMP +> +[ 534.710301] Modules linked in: +> +[ 534.710546] CPU: 7 PID: 331 Comm: cxl Not tainted +> +5.19.0-rc3-00064-g65fc1c3d26b9-dirty #11 +> +[ 534.715393] Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 +> +[ 534.717179] pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) +> +[ 534.719190] pc : 0x0 +> +[ 534.719928] lr : commit_store+0x118/0x2cc +> +[ 534.721007] sp : ffff80000aec3c30 +> +[ 534.721793] x29: ffff80000aec3c30 x28: ffff0000da62e740 x27: +> +ffff0000c0c06b30 +> +[ 534.723875] x26: 0000000000000000 x25: ffff0000c0a2a400 x24: +> +ffff0000c0a29400 +> +[ 534.725440] x23: 0000000000000003 x22: 0000000000000000 x21: +> +ffff0000c0c06800 +> +[ 534.727312] x20: 0000000000000000 x19: ffff0000c1559800 x18: +> +0000000000000000 +> +[ 534.729138] x17: 0000000000000000 x16: 0000000000000000 x15: +> +0000ffffd41fe838 +> +[ 534.731046] x14: 0000000000000000 x13: 0000000000000000 x12: +> +0000000000000000 +> +[ 534.732402] x11: 0000000000000000 x10: 0000000000000000 x9 : +> +0000000000000000 +> +[ 534.734432] x8 : 0000000000000000 x7 : 0000000000000000 x6 : +> +ffff0000c0906e80 +> +[ 534.735921] x5 : 0000000000000000 x4 : 0000000000000000 x3 : +> +ffff80000aec3bf0 +> +[ 534.737437] x2 : 0000000000000000 x1 : 0000000000000000 x0 : +> +ffff0000c155a000 +> +[ 534.738878] Call trace: +> +[ 534.739368] 0x0 +> +[ 534.739713] dev_attr_store+0x1c/0x30 +> +[ 534.740186] sysfs_kf_write+0x48/0x58 +> +[ 534.740961] kernfs_fop_write_iter+0x128/0x184 +> +[ 534.741872] new_sync_write+0xdc/0x158 +> +[ 534.742706] vfs_write+0x1ac/0x2a8 +> +[ 534.743440] ksys_write+0x68/0xf0 +> +[ 534.744328] __arm64_sys_write+0x1c/0x28 +> +[ 534.745180] invoke_syscall+0x44/0xf0 +> +[ 534.745989] el0_svc_common+0x4c/0xfc +> +[ 534.746661] do_el0_svc+0x60/0xa8 +> +[ 534.747378] el0_svc+0x2c/0x78 +> +[ 534.748066] el0t_64_sync_handler+0xb8/0x12c +> +[ 534.748919] el0t_64_sync+0x18c/0x190 +> +[ 534.749629] Code: bad PC value +> +[ 534.750169] ---[ end trace 0000000000000000 ]--- +> +> +2. When I want to create x4 region with command: "cxl create-region -d +> +decoder0.0 -w 4 -g 4096 -m mem0 mem1 mem2 mem3". I got below errors: +> +> +cxl region: create_region: region0: failed to set target3 to mem3 +> +cxl region: cmd_create_region: created 0 regions +> +> +And kernel log as below: +> +[ 60.536663] cxl_region region0: config state: 0 +> +[ 60.536675] cxl_region region0: probe: -6 +> +[ 60.536696] cxl_acpi ACPI0017:00: decoder0.0: created region0 +> +[ 60.538251] cxl region0: mem0:endpoint3 decoder3.0 add: +> +mem0:decoder3.0 @ 0 next: none nr_eps: 1 nr_targets: 1 +> +[ 60.538278] cxl region0: 0000:0d:00.0:port2 decoder2.0 add: +> +mem0:decoder3.0 @ 0 next: mem0 nr_eps: 1 nr_targets: 1 +> +[ 60.538295] cxl region0: ACPI0016:00:port1 decoder1.0 add: +> +mem0:decoder3.0 @ 0 next: 0000:0d:00.0 nr_eps: 1 nr_targets: 1 +> +[ 60.538647] cxl region0: mem1:endpoint4 decoder4.0 add: +> +mem1:decoder4.0 @ 1 next: none nr_eps: 1 nr_targets: 1 +> +[ 60.538663] cxl region0: 0000:0d:00.0:port2 decoder2.0 add: +> +mem1:decoder4.0 @ 1 next: mem1 nr_eps: 2 nr_targets: 2 +> +[ 60.538675] cxl region0: ACPI0016:00:port1 decoder1.0 add: +> +mem1:decoder4.0 @ 1 next: 0000:0d:00.0 nr_eps: 2 nr_targets: 1 +> +[ 60.539311] cxl region0: mem2:endpoint5 decoder5.0 add: +> +mem2:decoder5.0 @ 2 next: none nr_eps: 1 nr_targets: 1 +> +[ 60.539332] cxl region0: 0000:0d:00.0:port2 decoder2.0 add: +> +mem2:decoder5.0 @ 2 next: mem2 nr_eps: 3 nr_targets: 3 +> +[ 60.539343] cxl region0: ACPI0016:00:port1 decoder1.0 add: +> +mem2:decoder5.0 @ 2 next: 0000:0d:00.0 nr_eps: 3 nr_targets: 1 +> +[ 60.539711] cxl region0: mem3:endpoint6 decoder6.0 add: +> +mem3:decoder6.0 @ 3 next: none nr_eps: 1 nr_targets: 1 +> +[ 60.539723] cxl region0: 0000:0d:00.0:port2 decoder2.0 add: +> +mem3:decoder6.0 @ 3 next: mem3 nr_eps: 4 nr_targets: 4 +> +[ 60.539735] cxl region0: ACPI0016:00:port1 decoder1.0 add: +> +mem3:decoder6.0 @ 3 next: 0000:0d:00.0 nr_eps: 4 nr_targets: 1 +> +[ 60.539742] cxl region0: ACPI0016:00:port1 iw: 1 ig: 256 +> +[ 60.539747] cxl region0: ACPI0016:00:port1 target[0] = 0000:0c:00.0 +> +for mem0:decoder3.0 @ 0 +> +[ 60.539754] cxl region0: 0000:0d:00.0:port2 iw: 4 ig: 512 +This looks like off by 1 that should be fixed in the below mentioned +cxl/pending branch. That ig should be 256. Note the fix was +for a test case with a fat HB and no switch, but certainly looks +like this is the same issue. + +> +[ 60.539758] cxl region0: 0000:0d:00.0:port2 target[0] = +> +0000:0e:00.0 for mem0:decoder3.0 @ 0 +> +[ 60.539764] cxl region0: ACPI0016:00:port1: cannot host mem1:decoder4.0 at +> +1 +> +> +I have tried to write sysfs node manually, got same errors. +When stepping through by hand, which sysfs write triggers the crash above? + +Not sure it's related, but I've just sent out a fix to the +target register handling in QEMU. +20220808122051.14822-1-Jonathan.Cameron@huawei.com +/T/#m47ff985412ce44559e6b04d677c302f8cd371330">https://lore.kernel.org/linux-cxl/ +20220808122051.14822-1-Jonathan.Cameron@huawei.com +/T/#m47ff985412ce44559e6b04d677c302f8cd371330 +I did have one instance last week of triggering what looked to be a race +condition but +the stack trace doesn't looks related to what you've hit. + +It will probably be a few days before I have time to take a look at replicating +what you have seen. + +If you have time, try using the kernel.org cxl/pending branch as there are +a few additional fixes on there since you sent this email. Optimistic to hope +this is covered by one of those, but at least it will mean we are trying to +replicate +on same branch. + +Jonathan + + +> +> +Hope I can get some helps here. +> +> +Bob + +Hi Jonathan + +Thanks for your reply! + +On Mon, Aug 8, 2022 at 8:37 PM Jonathan Cameron + wrote: +> +> +Probably not related to your problem, but there is a disconnect in QEMU / +> +kernel assumptionsaround the presence of an HDM decoder when a HB only +> +has a single root port. Spec allows it to be provided or not as an +> +implementation choice. +> +Kernel assumes it isn't provide. Qemu assumes it is. +> +> +The temporary solution is to throw in a second root port on the HB and not +> +connect anything to it. Longer term I may special case this so that the +> +particular +> +decoder defaults to pass through settings in QEMU if there is only one root +> +port. +> +You are right! After adding an extra HB in qemu, I can create a x1 +region successfully. +But have some errors in Nvdimm: + +[ 74.925838] Unknown online node for memory at 0x10000000000, assuming node 0 +[ 74.925846] Unknown target node for memory at 0x10000000000, assuming node 0 +[ 74.927470] nd_region region0: nmem0: is disabled, failing probe + +And x4 region still failed with same errors, using latest cxl/preview +branch don't work. +I have picked "Two CXL emulation fixes" patches in qemu, still not working. + +Bob + +On Tue, 9 Aug 2022 21:07:06 +0800 +Bobo WL wrote: + +> +Hi Jonathan +> +> +Thanks for your reply! +> +> +On Mon, Aug 8, 2022 at 8:37 PM Jonathan Cameron +> + wrote: +> +> +> +> Probably not related to your problem, but there is a disconnect in QEMU / +> +> kernel assumptionsaround the presence of an HDM decoder when a HB only +> +> has a single root port. Spec allows it to be provided or not as an +> +> implementation choice. +> +> Kernel assumes it isn't provide. Qemu assumes it is. +> +> +> +> The temporary solution is to throw in a second root port on the HB and not +> +> connect anything to it. Longer term I may special case this so that the +> +> particular +> +> decoder defaults to pass through settings in QEMU if there is only one root +> +> port. +> +> +> +> +You are right! After adding an extra HB in qemu, I can create a x1 +> +region successfully. +> +But have some errors in Nvdimm: +> +> +[ 74.925838] Unknown online node for memory at 0x10000000000, assuming node > 0 +> +[ 74.925846] Unknown target node for memory at 0x10000000000, assuming node > 0 +> +[ 74.927470] nd_region region0: nmem0: is disabled, failing probe +Ah. I've seen this one, but not chased it down yet. Was on my todo list to +chase +down. Once I reach this state I can verify the HDM Decode is correct which is +what +I've been using to test (Which wasn't true until earlier this week). +I'm currently testing via devmem, more for historical reasons than because it +makes +that much sense anymore. + +> +> +And x4 region still failed with same errors, using latest cxl/preview +> +branch don't work. +> +I have picked "Two CXL emulation fixes" patches in qemu, still not working. +> +> +Bob + +On Tue, 9 Aug 2022 17:08:25 +0100 +Jonathan Cameron wrote: + +> +On Tue, 9 Aug 2022 21:07:06 +0800 +> +Bobo WL wrote: +> +> +> Hi Jonathan +> +> +> +> Thanks for your reply! +> +> +> +> On Mon, Aug 8, 2022 at 8:37 PM Jonathan Cameron +> +> wrote: +> +> > +> +> > Probably not related to your problem, but there is a disconnect in QEMU / +> +> > kernel assumptionsaround the presence of an HDM decoder when a HB only +> +> > has a single root port. Spec allows it to be provided or not as an +> +> > implementation choice. +> +> > Kernel assumes it isn't provide. Qemu assumes it is. +> +> > +> +> > The temporary solution is to throw in a second root port on the HB and not +> +> > connect anything to it. Longer term I may special case this so that the +> +> > particular +> +> > decoder defaults to pass through settings in QEMU if there is only one +> +> > root port. +> +> > +> +> +> +> You are right! After adding an extra HB in qemu, I can create a x1 +> +> region successfully. +> +> But have some errors in Nvdimm: +> +> +> +> [ 74.925838] Unknown online node for memory at 0x10000000000, assuming +> +> node 0 +> +> [ 74.925846] Unknown target node for memory at 0x10000000000, assuming +> +> node 0 +> +> [ 74.927470] nd_region region0: nmem0: is disabled, failing probe +> +> +Ah. I've seen this one, but not chased it down yet. Was on my todo list to +> +chase +> +down. Once I reach this state I can verify the HDM Decode is correct which is +> +what +> +I've been using to test (Which wasn't true until earlier this week). +> +I'm currently testing via devmem, more for historical reasons than because it +> +makes +> +that much sense anymore. +*embarassed cough*. We haven't fully hooked the LSA up in qemu yet. +I'd forgotten that was still on the todo list. I don't think it will +be particularly hard to do and will take a look in next few days. + +Very very indirectly this error is causing a driver probe fail that means that +we hit a code path that has a rather odd looking check on NDD_LABELING. +Should not have gotten near that path though - hence the problem is actually +when we call cxl_pmem_get_config_data() and it returns an error because +we haven't fully connected up the command in QEMU. + +Jonathan + + +> +> +> +> +> And x4 region still failed with same errors, using latest cxl/preview +> +> branch don't work. +> +> I have picked "Two CXL emulation fixes" patches in qemu, still not working. +> +> +> +> Bob + +On Thu, 11 Aug 2022 18:08:57 +0100 +Jonathan Cameron via wrote: + +> +On Tue, 9 Aug 2022 17:08:25 +0100 +> +Jonathan Cameron wrote: +> +> +> On Tue, 9 Aug 2022 21:07:06 +0800 +> +> Bobo WL wrote: +> +> +> +> > Hi Jonathan +> +> > +> +> > Thanks for your reply! +> +> > +> +> > On Mon, Aug 8, 2022 at 8:37 PM Jonathan Cameron +> +> > wrote: +> +> > > +> +> > > Probably not related to your problem, but there is a disconnect in QEMU +> +> > > / +> +> > > kernel assumptionsaround the presence of an HDM decoder when a HB only +> +> > > has a single root port. Spec allows it to be provided or not as an +> +> > > implementation choice. +> +> > > Kernel assumes it isn't provide. Qemu assumes it is. +> +> > > +> +> > > The temporary solution is to throw in a second root port on the HB and +> +> > > not +> +> > > connect anything to it. Longer term I may special case this so that +> +> > > the particular +> +> > > decoder defaults to pass through settings in QEMU if there is only one +> +> > > root port. +> +> > > +> +> > +> +> > You are right! After adding an extra HB in qemu, I can create a x1 +> +> > region successfully. +> +> > But have some errors in Nvdimm: +> +> > +> +> > [ 74.925838] Unknown online node for memory at 0x10000000000, assuming +> +> > node 0 +> +> > [ 74.925846] Unknown target node for memory at 0x10000000000, assuming +> +> > node 0 +> +> > [ 74.927470] nd_region region0: nmem0: is disabled, failing probe +> +> +> +> Ah. I've seen this one, but not chased it down yet. Was on my todo list to +> +> chase +> +> down. Once I reach this state I can verify the HDM Decode is correct which +> +> is what +> +> I've been using to test (Which wasn't true until earlier this week). +> +> I'm currently testing via devmem, more for historical reasons than because +> +> it makes +> +> that much sense anymore. +> +> +*embarassed cough*. We haven't fully hooked the LSA up in qemu yet. +> +I'd forgotten that was still on the todo list. I don't think it will +> +be particularly hard to do and will take a look in next few days. +> +> +Very very indirectly this error is causing a driver probe fail that means that +> +we hit a code path that has a rather odd looking check on NDD_LABELING. +> +Should not have gotten near that path though - hence the problem is actually +> +when we call cxl_pmem_get_config_data() and it returns an error because +> +we haven't fully connected up the command in QEMU. +So a least one bug in QEMU. We were not supporting variable length payloads on +mailbox +inputs (but were on outputs). That hasn't mattered until we get to LSA writes. +We just need to relax condition on the supplied length. + +diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c +index c352a935c4..fdda9529fe 100644 +--- a/hw/cxl/cxl-mailbox-utils.c ++++ b/hw/cxl/cxl-mailbox-utils.c +@@ -510,7 +510,7 @@ void cxl_process_mailbox(CXLDeviceState *cxl_dstate) + cxl_cmd = &cxl_cmd_set[set][cmd]; + h = cxl_cmd->handler; + if (h) { +- if (len == cxl_cmd->in) { ++ if (len == cxl_cmd->in || !cxl_cmd->in) { + cxl_cmd->payload = cxl_dstate->mbox_reg_state + + A_CXL_DEV_CMD_PAYLOAD; + ret = (*h)(cxl_cmd, cxl_dstate, &len); + + +This lets the nvdimm/region probe fine, but I'm getting some issues with +namespace capacity so I'll look at what is causing that next. +Unfortunately I'm not that familiar with the driver/nvdimm side of things +so it's take a while to figure out what kicks off what! + +Jonathan + +> +> +Jonathan +> +> +> +> +> +> > +> +> > And x4 region still failed with same errors, using latest cxl/preview +> +> > branch don't work. +> +> > I have picked "Two CXL emulation fixes" patches in qemu, still not +> +> > working. +> +> > +> +> > Bob +> +> + +Jonathan Cameron wrote: +> +On Thu, 11 Aug 2022 18:08:57 +0100 +> +Jonathan Cameron via wrote: +> +> +> On Tue, 9 Aug 2022 17:08:25 +0100 +> +> Jonathan Cameron wrote: +> +> +> +> > On Tue, 9 Aug 2022 21:07:06 +0800 +> +> > Bobo WL wrote: +> +> > +> +> > > Hi Jonathan +> +> > > +> +> > > Thanks for your reply! +> +> > > +> +> > > On Mon, Aug 8, 2022 at 8:37 PM Jonathan Cameron +> +> > > wrote: +> +> > > > +> +> > > > Probably not related to your problem, but there is a disconnect in +> +> > > > QEMU / +> +> > > > kernel assumptionsaround the presence of an HDM decoder when a HB only +> +> > > > has a single root port. Spec allows it to be provided or not as an +> +> > > > implementation choice. +> +> > > > Kernel assumes it isn't provide. Qemu assumes it is. +> +> > > > +> +> > > > The temporary solution is to throw in a second root port on the HB +> +> > > > and not +> +> > > > connect anything to it. Longer term I may special case this so that +> +> > > > the particular +> +> > > > decoder defaults to pass through settings in QEMU if there is only +> +> > > > one root port. +> +> > > > +> +> > > +> +> > > You are right! After adding an extra HB in qemu, I can create a x1 +> +> > > region successfully. +> +> > > But have some errors in Nvdimm: +> +> > > +> +> > > [ 74.925838] Unknown online node for memory at 0x10000000000, +> +> > > assuming node 0 +> +> > > [ 74.925846] Unknown target node for memory at 0x10000000000, +> +> > > assuming node 0 +> +> > > [ 74.927470] nd_region region0: nmem0: is disabled, failing probe +> +> > +> +> > Ah. I've seen this one, but not chased it down yet. Was on my todo list +> +> > to chase +> +> > down. Once I reach this state I can verify the HDM Decode is correct +> +> > which is what +> +> > I've been using to test (Which wasn't true until earlier this week). +> +> > I'm currently testing via devmem, more for historical reasons than +> +> > because it makes +> +> > that much sense anymore. +> +> +> +> *embarassed cough*. We haven't fully hooked the LSA up in qemu yet. +> +> I'd forgotten that was still on the todo list. I don't think it will +> +> be particularly hard to do and will take a look in next few days. +> +> +> +> Very very indirectly this error is causing a driver probe fail that means +> +> that +> +> we hit a code path that has a rather odd looking check on NDD_LABELING. +> +> Should not have gotten near that path though - hence the problem is actually +> +> when we call cxl_pmem_get_config_data() and it returns an error because +> +> we haven't fully connected up the command in QEMU. +> +> +So a least one bug in QEMU. We were not supporting variable length payloads +> +on mailbox +> +inputs (but were on outputs). That hasn't mattered until we get to LSA +> +writes. +> +We just need to relax condition on the supplied length. +> +> +diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c +> +index c352a935c4..fdda9529fe 100644 +> +--- a/hw/cxl/cxl-mailbox-utils.c +> ++++ b/hw/cxl/cxl-mailbox-utils.c +> +@@ -510,7 +510,7 @@ void cxl_process_mailbox(CXLDeviceState *cxl_dstate) +> +cxl_cmd = &cxl_cmd_set[set][cmd]; +> +h = cxl_cmd->handler; +> +if (h) { +> +- if (len == cxl_cmd->in) { +> ++ if (len == cxl_cmd->in || !cxl_cmd->in) { +> +cxl_cmd->payload = cxl_dstate->mbox_reg_state + +> +A_CXL_DEV_CMD_PAYLOAD; +> +ret = (*h)(cxl_cmd, cxl_dstate, &len); +> +> +> +This lets the nvdimm/region probe fine, but I'm getting some issues with +> +namespace capacity so I'll look at what is causing that next. +> +Unfortunately I'm not that familiar with the driver/nvdimm side of things +> +so it's take a while to figure out what kicks off what! +The whirlwind tour is that 'struct nd_region' instances that represent a +persitent memory address range are composed of one more mappings of +'struct nvdimm' objects. The nvdimm object is driven by the dimm driver +in drivers/nvdimm/dimm.c. That driver is mainly charged with unlocking +the dimm (if locked) and interrogating the label area to look for +namespace labels. + +The label command calls are routed to the '->ndctl()' callback that was +registered when the CXL nvdimm_bus_descriptor was created. That callback +handles both 'bus' scope calls, currently none for CXL, and per nvdimm +calls. cxl_pmem_nvdimm_ctl() translates those generic LIBNVDIMM commands +to CXL commands. + +The 'struct nvdimm' objects that the CXL side registers have the +NDD_LABELING flag set which means that namespaces need to be explicitly +created / provisioned from region capacity. Otherwise, if +drivers/nvdimm/dimm.c does not find a namespace-label-index block then +the region reverts to label-less mode and a default namespace equal to +the size of the region is instantiated. + +If you are seeing small mismatches in namespace capacity then it may +just be the fact that by default 'ndctl create-namespace' results in an +'fsdax' mode namespace which just means that it is a block device where +1.5% of the capacity is reserved for 'struct page' metadata. You should +be able to see namespace capacity == region capacity by doing "ndctl +create-namespace -m raw", and disable DAX operation. + +Hope that helps. + +On Fri, 12 Aug 2022 09:03:02 -0700 +Dan Williams wrote: + +> +Jonathan Cameron wrote: +> +> On Thu, 11 Aug 2022 18:08:57 +0100 +> +> Jonathan Cameron via wrote: +> +> +> +> > On Tue, 9 Aug 2022 17:08:25 +0100 +> +> > Jonathan Cameron wrote: +> +> > +> +> > > On Tue, 9 Aug 2022 21:07:06 +0800 +> +> > > Bobo WL wrote: +> +> > > +> +> > > > Hi Jonathan +> +> > > > +> +> > > > Thanks for your reply! +> +> > > > +> +> > > > On Mon, Aug 8, 2022 at 8:37 PM Jonathan Cameron +> +> > > > wrote: +> +> > > > > +> +> > > > > Probably not related to your problem, but there is a disconnect in +> +> > > > > QEMU / +> +> > > > > kernel assumptionsaround the presence of an HDM decoder when a HB +> +> > > > > only +> +> > > > > has a single root port. Spec allows it to be provided or not as an +> +> > > > > implementation choice. +> +> > > > > Kernel assumes it isn't provide. Qemu assumes it is. +> +> > > > > +> +> > > > > The temporary solution is to throw in a second root port on the HB +> +> > > > > and not +> +> > > > > connect anything to it. Longer term I may special case this so +> +> > > > > that the particular +> +> > > > > decoder defaults to pass through settings in QEMU if there is only +> +> > > > > one root port. +> +> > > > > +> +> > > > +> +> > > > You are right! After adding an extra HB in qemu, I can create a x1 +> +> > > > region successfully. +> +> > > > But have some errors in Nvdimm: +> +> > > > +> +> > > > [ 74.925838] Unknown online node for memory at 0x10000000000, +> +> > > > assuming node 0 +> +> > > > [ 74.925846] Unknown target node for memory at 0x10000000000, +> +> > > > assuming node 0 +> +> > > > [ 74.927470] nd_region region0: nmem0: is disabled, failing probe +> +> > > > +> +> > > +> +> > > Ah. I've seen this one, but not chased it down yet. Was on my todo +> +> > > list to chase +> +> > > down. Once I reach this state I can verify the HDM Decode is correct +> +> > > which is what +> +> > > I've been using to test (Which wasn't true until earlier this week). +> +> > > I'm currently testing via devmem, more for historical reasons than +> +> > > because it makes +> +> > > that much sense anymore. +> +> > +> +> > *embarassed cough*. We haven't fully hooked the LSA up in qemu yet. +> +> > I'd forgotten that was still on the todo list. I don't think it will +> +> > be particularly hard to do and will take a look in next few days. +> +> > +> +> > Very very indirectly this error is causing a driver probe fail that means +> +> > that +> +> > we hit a code path that has a rather odd looking check on NDD_LABELING. +> +> > Should not have gotten near that path though - hence the problem is +> +> > actually +> +> > when we call cxl_pmem_get_config_data() and it returns an error because +> +> > we haven't fully connected up the command in QEMU. +> +> +> +> So a least one bug in QEMU. We were not supporting variable length payloads +> +> on mailbox +> +> inputs (but were on outputs). That hasn't mattered until we get to LSA +> +> writes. +> +> We just need to relax condition on the supplied length. +> +> +> +> diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c +> +> index c352a935c4..fdda9529fe 100644 +> +> --- a/hw/cxl/cxl-mailbox-utils.c +> +> +++ b/hw/cxl/cxl-mailbox-utils.c +> +> @@ -510,7 +510,7 @@ void cxl_process_mailbox(CXLDeviceState *cxl_dstate) +> +> cxl_cmd = &cxl_cmd_set[set][cmd]; +> +> h = cxl_cmd->handler; +> +> if (h) { +> +> - if (len == cxl_cmd->in) { +> +> + if (len == cxl_cmd->in || !cxl_cmd->in) { +> +> cxl_cmd->payload = cxl_dstate->mbox_reg_state + +> +> A_CXL_DEV_CMD_PAYLOAD; +> +> ret = (*h)(cxl_cmd, cxl_dstate, &len); +> +> +> +> +> +> This lets the nvdimm/region probe fine, but I'm getting some issues with +> +> namespace capacity so I'll look at what is causing that next. +> +> Unfortunately I'm not that familiar with the driver/nvdimm side of things +> +> so it's take a while to figure out what kicks off what! +> +> +The whirlwind tour is that 'struct nd_region' instances that represent a +> +persitent memory address range are composed of one more mappings of +> +'struct nvdimm' objects. The nvdimm object is driven by the dimm driver +> +in drivers/nvdimm/dimm.c. That driver is mainly charged with unlocking +> +the dimm (if locked) and interrogating the label area to look for +> +namespace labels. +> +> +The label command calls are routed to the '->ndctl()' callback that was +> +registered when the CXL nvdimm_bus_descriptor was created. That callback +> +handles both 'bus' scope calls, currently none for CXL, and per nvdimm +> +calls. cxl_pmem_nvdimm_ctl() translates those generic LIBNVDIMM commands +> +to CXL commands. +> +> +The 'struct nvdimm' objects that the CXL side registers have the +> +NDD_LABELING flag set which means that namespaces need to be explicitly +> +created / provisioned from region capacity. Otherwise, if +> +drivers/nvdimm/dimm.c does not find a namespace-label-index block then +> +the region reverts to label-less mode and a default namespace equal to +> +the size of the region is instantiated. +> +> +If you are seeing small mismatches in namespace capacity then it may +> +just be the fact that by default 'ndctl create-namespace' results in an +> +'fsdax' mode namespace which just means that it is a block device where +> +1.5% of the capacity is reserved for 'struct page' metadata. You should +> +be able to see namespace capacity == region capacity by doing "ndctl +> +create-namespace -m raw", and disable DAX operation. +Currently ndctl create-namespace crashes qemu ;) +Which isn't ideal! + +> +> +Hope that helps. +Got me looking at the right code. Thanks! + +Jonathan + +On Fri, 12 Aug 2022 17:15:09 +0100 +Jonathan Cameron wrote: + +> +On Fri, 12 Aug 2022 09:03:02 -0700 +> +Dan Williams wrote: +> +> +> Jonathan Cameron wrote: +> +> > On Thu, 11 Aug 2022 18:08:57 +0100 +> +> > Jonathan Cameron via wrote: +> +> > +> +> > > On Tue, 9 Aug 2022 17:08:25 +0100 +> +> > > Jonathan Cameron wrote: +> +> > > +> +> > > > On Tue, 9 Aug 2022 21:07:06 +0800 +> +> > > > Bobo WL wrote: +> +> > > > +> +> > > > > Hi Jonathan +> +> > > > > +> +> > > > > Thanks for your reply! +> +> > > > > +> +> > > > > On Mon, Aug 8, 2022 at 8:37 PM Jonathan Cameron +> +> > > > > wrote: +> +> > > > > > +> +> > > > > > Probably not related to your problem, but there is a disconnect +> +> > > > > > in QEMU / +> +> > > > > > kernel assumptionsaround the presence of an HDM decoder when a HB +> +> > > > > > only +> +> > > > > > has a single root port. Spec allows it to be provided or not as +> +> > > > > > an implementation choice. +> +> > > > > > Kernel assumes it isn't provide. Qemu assumes it is. +> +> > > > > > +> +> > > > > > The temporary solution is to throw in a second root port on the +> +> > > > > > HB and not +> +> > > > > > connect anything to it. Longer term I may special case this so +> +> > > > > > that the particular +> +> > > > > > decoder defaults to pass through settings in QEMU if there is +> +> > > > > > only one root port. +> +> > > > > > +> +> > > > > +> +> > > > > You are right! After adding an extra HB in qemu, I can create a x1 +> +> > > > > region successfully. +> +> > > > > But have some errors in Nvdimm: +> +> > > > > +> +> > > > > [ 74.925838] Unknown online node for memory at 0x10000000000, +> +> > > > > assuming node 0 +> +> > > > > [ 74.925846] Unknown target node for memory at 0x10000000000, +> +> > > > > assuming node 0 +> +> > > > > [ 74.927470] nd_region region0: nmem0: is disabled, failing probe +> +> > > > > +> +> > > > +> +> > > > Ah. I've seen this one, but not chased it down yet. Was on my todo +> +> > > > list to chase +> +> > > > down. Once I reach this state I can verify the HDM Decode is correct +> +> > > > which is what +> +> > > > I've been using to test (Which wasn't true until earlier this week). +> +> > > > I'm currently testing via devmem, more for historical reasons than +> +> > > > because it makes +> +> > > > that much sense anymore. +> +> > > +> +> > > *embarassed cough*. We haven't fully hooked the LSA up in qemu yet. +> +> > > I'd forgotten that was still on the todo list. I don't think it will +> +> > > be particularly hard to do and will take a look in next few days. +> +> > > +> +> > > Very very indirectly this error is causing a driver probe fail that +> +> > > means that +> +> > > we hit a code path that has a rather odd looking check on NDD_LABELING. +> +> > > Should not have gotten near that path though - hence the problem is +> +> > > actually +> +> > > when we call cxl_pmem_get_config_data() and it returns an error because +> +> > > we haven't fully connected up the command in QEMU. +> +> > +> +> > So a least one bug in QEMU. We were not supporting variable length +> +> > payloads on mailbox +> +> > inputs (but were on outputs). That hasn't mattered until we get to LSA +> +> > writes. +> +> > We just need to relax condition on the supplied length. +> +> > +> +> > diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c +> +> > index c352a935c4..fdda9529fe 100644 +> +> > --- a/hw/cxl/cxl-mailbox-utils.c +> +> > +++ b/hw/cxl/cxl-mailbox-utils.c +> +> > @@ -510,7 +510,7 @@ void cxl_process_mailbox(CXLDeviceState *cxl_dstate) +> +> > cxl_cmd = &cxl_cmd_set[set][cmd]; +> +> > h = cxl_cmd->handler; +> +> > if (h) { +> +> > - if (len == cxl_cmd->in) { +> +> > + if (len == cxl_cmd->in || !cxl_cmd->in) { +> +> > cxl_cmd->payload = cxl_dstate->mbox_reg_state + +> +> > A_CXL_DEV_CMD_PAYLOAD; +> +> > ret = (*h)(cxl_cmd, cxl_dstate, &len); +> +> > +> +> > +> +> > This lets the nvdimm/region probe fine, but I'm getting some issues with +> +> > namespace capacity so I'll look at what is causing that next. +> +> > Unfortunately I'm not that familiar with the driver/nvdimm side of things +> +> > so it's take a while to figure out what kicks off what! +> +> +> +> The whirlwind tour is that 'struct nd_region' instances that represent a +> +> persitent memory address range are composed of one more mappings of +> +> 'struct nvdimm' objects. The nvdimm object is driven by the dimm driver +> +> in drivers/nvdimm/dimm.c. That driver is mainly charged with unlocking +> +> the dimm (if locked) and interrogating the label area to look for +> +> namespace labels. +> +> +> +> The label command calls are routed to the '->ndctl()' callback that was +> +> registered when the CXL nvdimm_bus_descriptor was created. That callback +> +> handles both 'bus' scope calls, currently none for CXL, and per nvdimm +> +> calls. cxl_pmem_nvdimm_ctl() translates those generic LIBNVDIMM commands +> +> to CXL commands. +> +> +> +> The 'struct nvdimm' objects that the CXL side registers have the +> +> NDD_LABELING flag set which means that namespaces need to be explicitly +> +> created / provisioned from region capacity. Otherwise, if +> +> drivers/nvdimm/dimm.c does not find a namespace-label-index block then +> +> the region reverts to label-less mode and a default namespace equal to +> +> the size of the region is instantiated. +> +> +> +> If you are seeing small mismatches in namespace capacity then it may +> +> just be the fact that by default 'ndctl create-namespace' results in an +> +> 'fsdax' mode namespace which just means that it is a block device where +> +> 1.5% of the capacity is reserved for 'struct page' metadata. You should +> +> be able to see namespace capacity == region capacity by doing "ndctl +> +> create-namespace -m raw", and disable DAX operation. +> +> +Currently ndctl create-namespace crashes qemu ;) +> +Which isn't ideal! +> +Found a cause for this one. Mailbox payload may be as small as 256 bytes. +We have code in kernel sanity checking that output payload fits in the +mailbox, but nothing on the input payload. Symptom is that we write just +off the end whatever size the payload is. Note doing this shouldn't crash +qemu - so I need to fix a range check somewhere. + +I think this is because cxl_pmem_get_config_size() returns the mailbox +payload size as being the available LSA size, forgetting to remove the +size of the headers on the set_lsa side of things. +https://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl.git/tree/drivers/cxl/pmem.c?h=next#n110 +I've hacked the max_payload to be -8 + +Now we still don't succeed in creating the namespace, but bonus is it doesn't +crash any more. + + +Jonathan + + + +> +> +> +> Hope that helps. +> +Got me looking at the right code. Thanks! +> +> +Jonathan +> +> + +On Mon, 15 Aug 2022 15:18:09 +0100 +Jonathan Cameron via wrote: + +> +On Fri, 12 Aug 2022 17:15:09 +0100 +> +Jonathan Cameron wrote: +> +> +> On Fri, 12 Aug 2022 09:03:02 -0700 +> +> Dan Williams wrote: +> +> +> +> > Jonathan Cameron wrote: +> +> > > On Thu, 11 Aug 2022 18:08:57 +0100 +> +> > > Jonathan Cameron via wrote: +> +> > > +> +> > > > On Tue, 9 Aug 2022 17:08:25 +0100 +> +> > > > Jonathan Cameron wrote: +> +> > > > +> +> > > > > On Tue, 9 Aug 2022 21:07:06 +0800 +> +> > > > > Bobo WL wrote: +> +> > > > > +> +> > > > > > Hi Jonathan +> +> > > > > > +> +> > > > > > Thanks for your reply! +> +> > > > > > +> +> > > > > > On Mon, Aug 8, 2022 at 8:37 PM Jonathan Cameron +> +> > > > > > wrote: +> +> > > > > > > +> +> > > > > > > Probably not related to your problem, but there is a disconnect +> +> > > > > > > in QEMU / +> +> > > > > > > kernel assumptionsaround the presence of an HDM decoder when a +> +> > > > > > > HB only +> +> > > > > > > has a single root port. Spec allows it to be provided or not as +> +> > > > > > > an implementation choice. +> +> > > > > > > Kernel assumes it isn't provide. Qemu assumes it is. +> +> > > > > > > +> +> > > > > > > The temporary solution is to throw in a second root port on the +> +> > > > > > > HB and not +> +> > > > > > > connect anything to it. Longer term I may special case this so +> +> > > > > > > that the particular +> +> > > > > > > decoder defaults to pass through settings in QEMU if there is +> +> > > > > > > only one root port. +> +> > > > > > > +> +> > > > > > +> +> > > > > > You are right! After adding an extra HB in qemu, I can create a x1 +> +> > > > > > region successfully. +> +> > > > > > But have some errors in Nvdimm: +> +> > > > > > +> +> > > > > > [ 74.925838] Unknown online node for memory at 0x10000000000, +> +> > > > > > assuming node 0 +> +> > > > > > [ 74.925846] Unknown target node for memory at 0x10000000000, +> +> > > > > > assuming node 0 +> +> > > > > > [ 74.927470] nd_region region0: nmem0: is disabled, failing +> +> > > > > > probe +> +> > > > > +> +> > > > > Ah. I've seen this one, but not chased it down yet. Was on my todo +> +> > > > > list to chase +> +> > > > > down. Once I reach this state I can verify the HDM Decode is +> +> > > > > correct which is what +> +> > > > > I've been using to test (Which wasn't true until earlier this +> +> > > > > week). +> +> > > > > I'm currently testing via devmem, more for historical reasons than +> +> > > > > because it makes +> +> > > > > that much sense anymore. +> +> > > > +> +> > > > *embarassed cough*. We haven't fully hooked the LSA up in qemu yet. +> +> > > > I'd forgotten that was still on the todo list. I don't think it will +> +> > > > be particularly hard to do and will take a look in next few days. +> +> > > > +> +> > > > Very very indirectly this error is causing a driver probe fail that +> +> > > > means that +> +> > > > we hit a code path that has a rather odd looking check on +> +> > > > NDD_LABELING. +> +> > > > Should not have gotten near that path though - hence the problem is +> +> > > > actually +> +> > > > when we call cxl_pmem_get_config_data() and it returns an error +> +> > > > because +> +> > > > we haven't fully connected up the command in QEMU. +> +> > > +> +> > > So a least one bug in QEMU. We were not supporting variable length +> +> > > payloads on mailbox +> +> > > inputs (but were on outputs). That hasn't mattered until we get to LSA +> +> > > writes. +> +> > > We just need to relax condition on the supplied length. +> +> > > +> +> > > diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c +> +> > > index c352a935c4..fdda9529fe 100644 +> +> > > --- a/hw/cxl/cxl-mailbox-utils.c +> +> > > +++ b/hw/cxl/cxl-mailbox-utils.c +> +> > > @@ -510,7 +510,7 @@ void cxl_process_mailbox(CXLDeviceState *cxl_dstate) +> +> > > cxl_cmd = &cxl_cmd_set[set][cmd]; +> +> > > h = cxl_cmd->handler; +> +> > > if (h) { +> +> > > - if (len == cxl_cmd->in) { +> +> > > + if (len == cxl_cmd->in || !cxl_cmd->in) { +> +> > > cxl_cmd->payload = cxl_dstate->mbox_reg_state + +> +> > > A_CXL_DEV_CMD_PAYLOAD; +> +> > > ret = (*h)(cxl_cmd, cxl_dstate, &len); +> +> > > +> +> > > +> +> > > This lets the nvdimm/region probe fine, but I'm getting some issues with +> +> > > namespace capacity so I'll look at what is causing that next. +> +> > > Unfortunately I'm not that familiar with the driver/nvdimm side of +> +> > > things +> +> > > so it's take a while to figure out what kicks off what! +> +> > +> +> > The whirlwind tour is that 'struct nd_region' instances that represent a +> +> > persitent memory address range are composed of one more mappings of +> +> > 'struct nvdimm' objects. The nvdimm object is driven by the dimm driver +> +> > in drivers/nvdimm/dimm.c. That driver is mainly charged with unlocking +> +> > the dimm (if locked) and interrogating the label area to look for +> +> > namespace labels. +> +> > +> +> > The label command calls are routed to the '->ndctl()' callback that was +> +> > registered when the CXL nvdimm_bus_descriptor was created. That callback +> +> > handles both 'bus' scope calls, currently none for CXL, and per nvdimm +> +> > calls. cxl_pmem_nvdimm_ctl() translates those generic LIBNVDIMM commands +> +> > to CXL commands. +> +> > +> +> > The 'struct nvdimm' objects that the CXL side registers have the +> +> > NDD_LABELING flag set which means that namespaces need to be explicitly +> +> > created / provisioned from region capacity. Otherwise, if +> +> > drivers/nvdimm/dimm.c does not find a namespace-label-index block then +> +> > the region reverts to label-less mode and a default namespace equal to +> +> > the size of the region is instantiated. +> +> > +> +> > If you are seeing small mismatches in namespace capacity then it may +> +> > just be the fact that by default 'ndctl create-namespace' results in an +> +> > 'fsdax' mode namespace which just means that it is a block device where +> +> > 1.5% of the capacity is reserved for 'struct page' metadata. You should +> +> > be able to see namespace capacity == region capacity by doing "ndctl +> +> > create-namespace -m raw", and disable DAX operation. +> +> +> +> Currently ndctl create-namespace crashes qemu ;) +> +> Which isn't ideal! +> +> +> +> +Found a cause for this one. Mailbox payload may be as small as 256 bytes. +> +We have code in kernel sanity checking that output payload fits in the +> +mailbox, but nothing on the input payload. Symptom is that we write just +> +off the end whatever size the payload is. Note doing this shouldn't crash +> +qemu - so I need to fix a range check somewhere. +> +> +I think this is because cxl_pmem_get_config_size() returns the mailbox +> +payload size as being the available LSA size, forgetting to remove the +> +size of the headers on the set_lsa side of things. +> +https://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl.git/tree/drivers/cxl/pmem.c?h=next#n110 +> +> +I've hacked the max_payload to be -8 +> +> +Now we still don't succeed in creating the namespace, but bonus is it doesn't +> +crash any more. +In the interests of defensive / correct handling from QEMU I took a +look into why it was crashing. Turns out that providing a NULL write callback +for +the memory device region (that the above overlarge write was spilling into) +isn't +a safe thing to do. Needs a stub. Oops. + +On plus side we might never have noticed this was going wrong without the crash +*silver lining in every cloud* + +Fix to follow... + +Jonathan + + +> +> +> +Jonathan +> +> +> +> +> > +> +> > Hope that helps. +> +> Got me looking at the right code. Thanks! +> +> +> +> Jonathan +> +> +> +> +> +> + +On Mon, 15 Aug 2022 at 15:55, Jonathan Cameron via wrote: +> +In the interests of defensive / correct handling from QEMU I took a +> +look into why it was crashing. Turns out that providing a NULL write +> +callback for +> +the memory device region (that the above overlarge write was spilling into) +> +isn't +> +a safe thing to do. Needs a stub. Oops. +Yeah. We've talked before about adding an assert so that that kind of +"missing function" bug is caught at device creation rather than only +if the guest tries to access the device, but we never quite got around +to it... + +-- PMM + +On Fri, 12 Aug 2022 16:44:03 +0100 +Jonathan Cameron wrote: + +> +On Thu, 11 Aug 2022 18:08:57 +0100 +> +Jonathan Cameron via wrote: +> +> +> On Tue, 9 Aug 2022 17:08:25 +0100 +> +> Jonathan Cameron wrote: +> +> +> +> > On Tue, 9 Aug 2022 21:07:06 +0800 +> +> > Bobo WL wrote: +> +> > +> +> > > Hi Jonathan +> +> > > +> +> > > Thanks for your reply! +> +> > > +> +> > > On Mon, Aug 8, 2022 at 8:37 PM Jonathan Cameron +> +> > > wrote: +> +> > > > +> +> > > > Probably not related to your problem, but there is a disconnect in +> +> > > > QEMU / +> +> > > > kernel assumptionsaround the presence of an HDM decoder when a HB only +> +> > > > has a single root port. Spec allows it to be provided or not as an +> +> > > > implementation choice. +> +> > > > Kernel assumes it isn't provide. Qemu assumes it is. +> +> > > > +> +> > > > The temporary solution is to throw in a second root port on the HB +> +> > > > and not +> +> > > > connect anything to it. Longer term I may special case this so that +> +> > > > the particular +> +> > > > decoder defaults to pass through settings in QEMU if there is only +> +> > > > one root port. +> +> > > > +> +> > > +> +> > > You are right! After adding an extra HB in qemu, I can create a x1 +> +> > > region successfully. +> +> > > But have some errors in Nvdimm: +> +> > > +> +> > > [ 74.925838] Unknown online node for memory at 0x10000000000, +> +> > > assuming node 0 +> +> > > [ 74.925846] Unknown target node for memory at 0x10000000000, +> +> > > assuming node 0 +> +> > > [ 74.927470] nd_region region0: nmem0: is disabled, failing probe +> +> > > +> +> > +> +> > Ah. I've seen this one, but not chased it down yet. Was on my todo list +> +> > to chase +> +> > down. Once I reach this state I can verify the HDM Decode is correct +> +> > which is what +> +> > I've been using to test (Which wasn't true until earlier this week). +> +> > I'm currently testing via devmem, more for historical reasons than +> +> > because it makes +> +> > that much sense anymore. +> +> +> +> *embarassed cough*. We haven't fully hooked the LSA up in qemu yet. +> +> I'd forgotten that was still on the todo list. I don't think it will +> +> be particularly hard to do and will take a look in next few days. +> +> +> +> Very very indirectly this error is causing a driver probe fail that means +> +> that +> +> we hit a code path that has a rather odd looking check on NDD_LABELING. +> +> Should not have gotten near that path though - hence the problem is actually +> +> when we call cxl_pmem_get_config_data() and it returns an error because +> +> we haven't fully connected up the command in QEMU. +> +> +So a least one bug in QEMU. We were not supporting variable length payloads +> +on mailbox +> +inputs (but were on outputs). That hasn't mattered until we get to LSA +> +writes. +> +We just need to relax condition on the supplied length. +> +> +diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c +> +index c352a935c4..fdda9529fe 100644 +> +--- a/hw/cxl/cxl-mailbox-utils.c +> ++++ b/hw/cxl/cxl-mailbox-utils.c +> +@@ -510,7 +510,7 @@ void cxl_process_mailbox(CXLDeviceState *cxl_dstate) +> +cxl_cmd = &cxl_cmd_set[set][cmd]; +> +h = cxl_cmd->handler; +> +if (h) { +> +- if (len == cxl_cmd->in) { +> ++ if (len == cxl_cmd->in || !cxl_cmd->in) { +Fix is wrong as we use ~0 as the placeholder for variable payload, not 0. + +With that fixed we hit new fun paths - after some errors we get the +worrying - not totally sure but looks like a failure on an error cleanup. +I'll chase down the error source, but even then this is probably triggerable by +hardware problem or similar. Some bonus prints in here from me chasing +error paths, but it's otherwise just cxl/next + the fix I posted earlier today. + +[ 69.919877] nd_bus ndbus0: START: nd_region.probe(region0) +[ 69.920108] nd_region_probe +[ 69.920623] ------------[ cut here ]------------ +[ 69.920675] refcount_t: addition on 0; use-after-free. +[ 69.921314] WARNING: CPU: 3 PID: 710 at lib/refcount.c:25 +refcount_warn_saturate+0xa0/0x144 +[ 69.926949] Modules linked in: cxl_pmem cxl_mem cxl_pci cxl_port cxl_acpi +cxl_core +[ 69.928830] CPU: 3 PID: 710 Comm: kworker/u8:9 Not tainted 5.19.0-rc3+ #399 +[ 69.930596] Hardware name: QEMU QEMU Virtual Machine, BIOS 0.0.0 02/06/2015 +[ 69.931482] Workqueue: events_unbound async_run_entry_fn +[ 69.932403] pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) +[ 69.934023] pc : refcount_warn_saturate+0xa0/0x144 +[ 69.935161] lr : refcount_warn_saturate+0xa0/0x144 +[ 69.936541] sp : ffff80000890b960 +[ 69.937921] x29: ffff80000890b960 x28: 0000000000000000 x27: 0000000000000000 +[ 69.940917] x26: ffffa54a90d5cb10 x25: ffffa54a90809e98 x24: 0000000000000000 +[ 69.942537] x23: ffffa54a91a3d8d8 x22: ffff0000c5254800 x21: ffff0000c5254800 +[ 69.944013] x20: ffff0000ce924180 x19: ffff0000c5254800 x18: ffffffffffffffff +[ 69.946100] x17: ffff5ab66e5ef000 x16: ffff80000801c000 x15: 0000000000000000 +[ 69.947585] x14: 0000000000000001 x13: 0a2e656572662d72 x12: 657466612d657375 +[ 69.948670] x11: 203b30206e6f206e x10: 6f69746964646120 x9 : ffffa54a8f63d288 +[ 69.950679] x8 : 206e6f206e6f6974 x7 : 69646461203a745f x6 : 00000000fffff31e +[ 69.952113] x5 : ffff0000ff61ba08 x4 : 00000000fffff31e x3 : ffff5ab66e5ef000 +root@debian:/sys/bus/cxl/devices/decoder0.0/region0# [ 69.954752] x2 : +0000000000000000 x1 : 0000000000000000 x0 : ffff0000c512e740 +[ 69.957098] Call trace: +[ 69.957959] refcount_warn_saturate+0xa0/0x144 +[ 69.958773] get_ndd+0x5c/0x80 +[ 69.959294] nd_region_register_namespaces+0xe4/0xe90 +[ 69.960253] nd_region_probe+0x100/0x290 +[ 69.960796] nvdimm_bus_probe+0xf4/0x1c0 +[ 69.962087] really_probe+0x19c/0x3f0 +[ 69.962620] __driver_probe_device+0x11c/0x190 +[ 69.963258] driver_probe_device+0x44/0xf4 +[ 69.963773] __device_attach_driver+0xa4/0x140 +[ 69.964471] bus_for_each_drv+0x84/0xe0 +[ 69.965068] __device_attach+0xb0/0x1f0 +[ 69.966101] device_initial_probe+0x20/0x30 +[ 69.967142] bus_probe_device+0xa4/0xb0 +[ 69.968104] device_add+0x3e8/0x910 +[ 69.969111] nd_async_device_register+0x24/0x74 +[ 69.969928] async_run_entry_fn+0x40/0x150 +[ 69.970725] process_one_work+0x1dc/0x450 +[ 69.971796] worker_thread+0x154/0x450 +[ 69.972700] kthread+0x118/0x120 +[ 69.974141] ret_from_fork+0x10/0x20 +[ 69.975141] ---[ end trace 0000000000000000 ]--- +[ 70.117887] Into nd_namespace_pmem_set_resource() + +> +cxl_cmd->payload = cxl_dstate->mbox_reg_state + +> +A_CXL_DEV_CMD_PAYLOAD; +> +ret = (*h)(cxl_cmd, cxl_dstate, &len); +> +> +> +This lets the nvdimm/region probe fine, but I'm getting some issues with +> +namespace capacity so I'll look at what is causing that next. +> +Unfortunately I'm not that familiar with the driver/nvdimm side of things +> +so it's take a while to figure out what kicks off what! +> +> +Jonathan +> +> +> +> +> Jonathan +> +> +> +> +> +> > +> +> > > +> +> > > And x4 region still failed with same errors, using latest cxl/preview +> +> > > branch don't work. +> +> > > I have picked "Two CXL emulation fixes" patches in qemu, still not +> +> > > working. +> +> > > +> +> > > Bob +> +> +> +> +> + +On Mon, 15 Aug 2022 18:04:44 +0100 +Jonathan Cameron wrote: + +> +On Fri, 12 Aug 2022 16:44:03 +0100 +> +Jonathan Cameron wrote: +> +> +> On Thu, 11 Aug 2022 18:08:57 +0100 +> +> Jonathan Cameron via wrote: +> +> +> +> > On Tue, 9 Aug 2022 17:08:25 +0100 +> +> > Jonathan Cameron wrote: +> +> > +> +> > > On Tue, 9 Aug 2022 21:07:06 +0800 +> +> > > Bobo WL wrote: +> +> > > +> +> > > > Hi Jonathan +> +> > > > +> +> > > > Thanks for your reply! +> +> > > > +> +> > > > On Mon, Aug 8, 2022 at 8:37 PM Jonathan Cameron +> +> > > > wrote: +> +> > > > > +> +> > > > > Probably not related to your problem, but there is a disconnect in +> +> > > > > QEMU / +> +> > > > > kernel assumptionsaround the presence of an HDM decoder when a HB +> +> > > > > only +> +> > > > > has a single root port. Spec allows it to be provided or not as an +> +> > > > > implementation choice. +> +> > > > > Kernel assumes it isn't provide. Qemu assumes it is. +> +> > > > > +> +> > > > > The temporary solution is to throw in a second root port on the HB +> +> > > > > and not +> +> > > > > connect anything to it. Longer term I may special case this so +> +> > > > > that the particular +> +> > > > > decoder defaults to pass through settings in QEMU if there is only +> +> > > > > one root port. +> +> > > > > +> +> > > > +> +> > > > You are right! After adding an extra HB in qemu, I can create a x1 +> +> > > > region successfully. +> +> > > > But have some errors in Nvdimm: +> +> > > > +> +> > > > [ 74.925838] Unknown online node for memory at 0x10000000000, +> +> > > > assuming node 0 +> +> > > > [ 74.925846] Unknown target node for memory at 0x10000000000, +> +> > > > assuming node 0 +> +> > > > [ 74.927470] nd_region region0: nmem0: is disabled, failing probe +> +> > > > +> +> > > +> +> > > Ah. I've seen this one, but not chased it down yet. Was on my todo +> +> > > list to chase +> +> > > down. Once I reach this state I can verify the HDM Decode is correct +> +> > > which is what +> +> > > I've been using to test (Which wasn't true until earlier this week). +> +> > > I'm currently testing via devmem, more for historical reasons than +> +> > > because it makes +> +> > > that much sense anymore. +> +> > +> +> > *embarassed cough*. We haven't fully hooked the LSA up in qemu yet. +> +> > I'd forgotten that was still on the todo list. I don't think it will +> +> > be particularly hard to do and will take a look in next few days. +> +> > +> +> > Very very indirectly this error is causing a driver probe fail that means +> +> > that +> +> > we hit a code path that has a rather odd looking check on NDD_LABELING. +> +> > Should not have gotten near that path though - hence the problem is +> +> > actually +> +> > when we call cxl_pmem_get_config_data() and it returns an error because +> +> > we haven't fully connected up the command in QEMU. +> +> +> +> So a least one bug in QEMU. We were not supporting variable length payloads +> +> on mailbox +> +> inputs (but were on outputs). That hasn't mattered until we get to LSA +> +> writes. +> +> We just need to relax condition on the supplied length. +> +> +> +> diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c +> +> index c352a935c4..fdda9529fe 100644 +> +> --- a/hw/cxl/cxl-mailbox-utils.c +> +> +++ b/hw/cxl/cxl-mailbox-utils.c +> +> @@ -510,7 +510,7 @@ void cxl_process_mailbox(CXLDeviceState *cxl_dstate) +> +> cxl_cmd = &cxl_cmd_set[set][cmd]; +> +> h = cxl_cmd->handler; +> +> if (h) { +> +> - if (len == cxl_cmd->in) { +> +> + if (len == cxl_cmd->in || !cxl_cmd->in) { +> +Fix is wrong as we use ~0 as the placeholder for variable payload, not 0. +Cause of the error is a failure in GET_LSA. +Reason, payload length is wrong in QEMU but was hidden previously by my wrong +fix here. Probably still a good idea to inject an error in GET_LSA and chase +down the refcount issue. + + +diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c +index fdda9529fe..e8565fbd6e 100644 +--- a/hw/cxl/cxl-mailbox-utils.c ++++ b/hw/cxl/cxl-mailbox-utils.c +@@ -489,7 +489,7 @@ static struct cxl_cmd cxl_cmd_set[256][256] = { + cmd_identify_memory_device, 0, 0 }, + [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO", + cmd_ccls_get_partition_info, 0, 0 }, +- [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 0, 0 }, ++ [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 }, + [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa, + ~0, IMMEDIATE_CONFIG_CHANGE | IMMEDIATE_DATA_CHANGE }, + [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST", +@@ -510,12 +510,13 @@ void cxl_process_mailbox(CXLDeviceState *cxl_dstate) + cxl_cmd = &cxl_cmd_set[set][cmd]; + h = cxl_cmd->handler; + if (h) { +- if (len == cxl_cmd->in || !cxl_cmd->in) { ++ if (len == cxl_cmd->in || cxl_cmd->in == ~0) { + cxl_cmd->payload = cxl_dstate->mbox_reg_state + + A_CXL_DEV_CMD_PAYLOAD; + +And woot, we get a namespace in the LSA :) + +I'll post QEMU fixes in next day or two. Kernel side now seems more or less +fine be it with suspicious refcount underflow. + +> +> +With that fixed we hit new fun paths - after some errors we get the +> +worrying - not totally sure but looks like a failure on an error cleanup. +> +I'll chase down the error source, but even then this is probably triggerable +> +by +> +hardware problem or similar. Some bonus prints in here from me chasing +> +error paths, but it's otherwise just cxl/next + the fix I posted earlier +> +today. +> +> +[ 69.919877] nd_bus ndbus0: START: nd_region.probe(region0) +> +[ 69.920108] nd_region_probe +> +[ 69.920623] ------------[ cut here ]------------ +> +[ 69.920675] refcount_t: addition on 0; use-after-free. +> +[ 69.921314] WARNING: CPU: 3 PID: 710 at lib/refcount.c:25 +> +refcount_warn_saturate+0xa0/0x144 +> +[ 69.926949] Modules linked in: cxl_pmem cxl_mem cxl_pci cxl_port cxl_acpi +> +cxl_core +> +[ 69.928830] CPU: 3 PID: 710 Comm: kworker/u8:9 Not tainted 5.19.0-rc3+ #399 +> +[ 69.930596] Hardware name: QEMU QEMU Virtual Machine, BIOS 0.0.0 02/06/2015 +> +[ 69.931482] Workqueue: events_unbound async_run_entry_fn +> +[ 69.932403] pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) +> +[ 69.934023] pc : refcount_warn_saturate+0xa0/0x144 +> +[ 69.935161] lr : refcount_warn_saturate+0xa0/0x144 +> +[ 69.936541] sp : ffff80000890b960 +> +[ 69.937921] x29: ffff80000890b960 x28: 0000000000000000 x27: +> +0000000000000000 +> +[ 69.940917] x26: ffffa54a90d5cb10 x25: ffffa54a90809e98 x24: +> +0000000000000000 +> +[ 69.942537] x23: ffffa54a91a3d8d8 x22: ffff0000c5254800 x21: +> +ffff0000c5254800 +> +[ 69.944013] x20: ffff0000ce924180 x19: ffff0000c5254800 x18: +> +ffffffffffffffff +> +[ 69.946100] x17: ffff5ab66e5ef000 x16: ffff80000801c000 x15: +> +0000000000000000 +> +[ 69.947585] x14: 0000000000000001 x13: 0a2e656572662d72 x12: +> +657466612d657375 +> +[ 69.948670] x11: 203b30206e6f206e x10: 6f69746964646120 x9 : +> +ffffa54a8f63d288 +> +[ 69.950679] x8 : 206e6f206e6f6974 x7 : 69646461203a745f x6 : +> +00000000fffff31e +> +[ 69.952113] x5 : ffff0000ff61ba08 x4 : 00000000fffff31e x3 : +> +ffff5ab66e5ef000 +> +root@debian:/sys/bus/cxl/devices/decoder0.0/region0# [ 69.954752] x2 : +> +0000000000000000 x1 : 0000000000000000 x0 : ffff0000c512e740 +> +[ 69.957098] Call trace: +> +[ 69.957959] refcount_warn_saturate+0xa0/0x144 +> +[ 69.958773] get_ndd+0x5c/0x80 +> +[ 69.959294] nd_region_register_namespaces+0xe4/0xe90 +> +[ 69.960253] nd_region_probe+0x100/0x290 +> +[ 69.960796] nvdimm_bus_probe+0xf4/0x1c0 +> +[ 69.962087] really_probe+0x19c/0x3f0 +> +[ 69.962620] __driver_probe_device+0x11c/0x190 +> +[ 69.963258] driver_probe_device+0x44/0xf4 +> +[ 69.963773] __device_attach_driver+0xa4/0x140 +> +[ 69.964471] bus_for_each_drv+0x84/0xe0 +> +[ 69.965068] __device_attach+0xb0/0x1f0 +> +[ 69.966101] device_initial_probe+0x20/0x30 +> +[ 69.967142] bus_probe_device+0xa4/0xb0 +> +[ 69.968104] device_add+0x3e8/0x910 +> +[ 69.969111] nd_async_device_register+0x24/0x74 +> +[ 69.969928] async_run_entry_fn+0x40/0x150 +> +[ 69.970725] process_one_work+0x1dc/0x450 +> +[ 69.971796] worker_thread+0x154/0x450 +> +[ 69.972700] kthread+0x118/0x120 +> +[ 69.974141] ret_from_fork+0x10/0x20 +> +[ 69.975141] ---[ end trace 0000000000000000 ]--- +> +[ 70.117887] Into nd_namespace_pmem_set_resource() +> +> +> cxl_cmd->payload = cxl_dstate->mbox_reg_state + +> +> A_CXL_DEV_CMD_PAYLOAD; +> +> ret = (*h)(cxl_cmd, cxl_dstate, &len); +> +> +> +> +> +> This lets the nvdimm/region probe fine, but I'm getting some issues with +> +> namespace capacity so I'll look at what is causing that next. +> +> Unfortunately I'm not that familiar with the driver/nvdimm side of things +> +> so it's take a while to figure out what kicks off what! +> +> +> +> Jonathan +> +> +> +> > +> +> > Jonathan +> +> > +> +> > +> +> > > +> +> > > > +> +> > > > And x4 region still failed with same errors, using latest cxl/preview +> +> > > > branch don't work. +> +> > > > I have picked "Two CXL emulation fixes" patches in qemu, still not +> +> > > > working. +> +> > > > +> +> > > > Bob +> +> > +> +> > +> +> +> + +Jonathan Cameron wrote: +> +On Fri, 12 Aug 2022 16:44:03 +0100 +> +Jonathan Cameron wrote: +> +> +> On Thu, 11 Aug 2022 18:08:57 +0100 +> +> Jonathan Cameron via wrote: +> +> +> +> > On Tue, 9 Aug 2022 17:08:25 +0100 +> +> > Jonathan Cameron wrote: +> +> > +> +> > > On Tue, 9 Aug 2022 21:07:06 +0800 +> +> > > Bobo WL wrote: +> +> > > +> +> > > > Hi Jonathan +> +> > > > +> +> > > > Thanks for your reply! +> +> > > > +> +> > > > On Mon, Aug 8, 2022 at 8:37 PM Jonathan Cameron +> +> > > > wrote: +> +> > > > > +> +> > > > > Probably not related to your problem, but there is a disconnect in +> +> > > > > QEMU / +> +> > > > > kernel assumptionsaround the presence of an HDM decoder when a HB +> +> > > > > only +> +> > > > > has a single root port. Spec allows it to be provided or not as an +> +> > > > > implementation choice. +> +> > > > > Kernel assumes it isn't provide. Qemu assumes it is. +> +> > > > > +> +> > > > > The temporary solution is to throw in a second root port on the HB +> +> > > > > and not +> +> > > > > connect anything to it. Longer term I may special case this so +> +> > > > > that the particular +> +> > > > > decoder defaults to pass through settings in QEMU if there is only +> +> > > > > one root port. +> +> > > > > +> +> > > > +> +> > > > You are right! After adding an extra HB in qemu, I can create a x1 +> +> > > > region successfully. +> +> > > > But have some errors in Nvdimm: +> +> > > > +> +> > > > [ 74.925838] Unknown online node for memory at 0x10000000000, +> +> > > > assuming node 0 +> +> > > > [ 74.925846] Unknown target node for memory at 0x10000000000, +> +> > > > assuming node 0 +> +> > > > [ 74.927470] nd_region region0: nmem0: is disabled, failing probe +> +> > > > +> +> > > +> +> > > Ah. I've seen this one, but not chased it down yet. Was on my todo +> +> > > list to chase +> +> > > down. Once I reach this state I can verify the HDM Decode is correct +> +> > > which is what +> +> > > I've been using to test (Which wasn't true until earlier this week). +> +> > > I'm currently testing via devmem, more for historical reasons than +> +> > > because it makes +> +> > > that much sense anymore. +> +> > +> +> > *embarassed cough*. We haven't fully hooked the LSA up in qemu yet. +> +> > I'd forgotten that was still on the todo list. I don't think it will +> +> > be particularly hard to do and will take a look in next few days. +> +> > +> +> > Very very indirectly this error is causing a driver probe fail that means +> +> > that +> +> > we hit a code path that has a rather odd looking check on NDD_LABELING. +> +> > Should not have gotten near that path though - hence the problem is +> +> > actually +> +> > when we call cxl_pmem_get_config_data() and it returns an error because +> +> > we haven't fully connected up the command in QEMU. +> +> +> +> So a least one bug in QEMU. We were not supporting variable length payloads +> +> on mailbox +> +> inputs (but were on outputs). That hasn't mattered until we get to LSA +> +> writes. +> +> We just need to relax condition on the supplied length. +> +> +> +> diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c +> +> index c352a935c4..fdda9529fe 100644 +> +> --- a/hw/cxl/cxl-mailbox-utils.c +> +> +++ b/hw/cxl/cxl-mailbox-utils.c +> +> @@ -510,7 +510,7 @@ void cxl_process_mailbox(CXLDeviceState *cxl_dstate) +> +> cxl_cmd = &cxl_cmd_set[set][cmd]; +> +> h = cxl_cmd->handler; +> +> if (h) { +> +> - if (len == cxl_cmd->in) { +> +> + if (len == cxl_cmd->in || !cxl_cmd->in) { +> +Fix is wrong as we use ~0 as the placeholder for variable payload, not 0. +> +> +With that fixed we hit new fun paths - after some errors we get the +> +worrying - not totally sure but looks like a failure on an error cleanup. +> +I'll chase down the error source, but even then this is probably triggerable +> +by +> +hardware problem or similar. Some bonus prints in here from me chasing +> +error paths, but it's otherwise just cxl/next + the fix I posted earlier +> +today. +One of the scenarios that I cannot rule out is nvdimm_probe() racing +nd_region_probe(), but given all the work it takes to create a region I +suspect all the nvdimm_probe() work to have completed... + +It is at least one potentially wrong hypothesis that needs to be chased +down. + +> +> +[ 69.919877] nd_bus ndbus0: START: nd_region.probe(region0) +> +[ 69.920108] nd_region_probe +> +[ 69.920623] ------------[ cut here ]------------ +> +[ 69.920675] refcount_t: addition on 0; use-after-free. +> +[ 69.921314] WARNING: CPU: 3 PID: 710 at lib/refcount.c:25 +> +refcount_warn_saturate+0xa0/0x144 +> +[ 69.926949] Modules linked in: cxl_pmem cxl_mem cxl_pci cxl_port cxl_acpi +> +cxl_core +> +[ 69.928830] CPU: 3 PID: 710 Comm: kworker/u8:9 Not tainted 5.19.0-rc3+ #399 +> +[ 69.930596] Hardware name: QEMU QEMU Virtual Machine, BIOS 0.0.0 02/06/2015 +> +[ 69.931482] Workqueue: events_unbound async_run_entry_fn +> +[ 69.932403] pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) +> +[ 69.934023] pc : refcount_warn_saturate+0xa0/0x144 +> +[ 69.935161] lr : refcount_warn_saturate+0xa0/0x144 +> +[ 69.936541] sp : ffff80000890b960 +> +[ 69.937921] x29: ffff80000890b960 x28: 0000000000000000 x27: +> +0000000000000000 +> +[ 69.940917] x26: ffffa54a90d5cb10 x25: ffffa54a90809e98 x24: +> +0000000000000000 +> +[ 69.942537] x23: ffffa54a91a3d8d8 x22: ffff0000c5254800 x21: +> +ffff0000c5254800 +> +[ 69.944013] x20: ffff0000ce924180 x19: ffff0000c5254800 x18: +> +ffffffffffffffff +> +[ 69.946100] x17: ffff5ab66e5ef000 x16: ffff80000801c000 x15: +> +0000000000000000 +> +[ 69.947585] x14: 0000000000000001 x13: 0a2e656572662d72 x12: +> +657466612d657375 +> +[ 69.948670] x11: 203b30206e6f206e x10: 6f69746964646120 x9 : +> +ffffa54a8f63d288 +> +[ 69.950679] x8 : 206e6f206e6f6974 x7 : 69646461203a745f x6 : +> +00000000fffff31e +> +[ 69.952113] x5 : ffff0000ff61ba08 x4 : 00000000fffff31e x3 : +> +ffff5ab66e5ef000 +> +root@debian:/sys/bus/cxl/devices/decoder0.0/region0# [ 69.954752] x2 : +> +0000000000000000 x1 : 0000000000000000 x0 : ffff0000c512e740 +> +[ 69.957098] Call trace: +> +[ 69.957959] refcount_warn_saturate+0xa0/0x144 +> +[ 69.958773] get_ndd+0x5c/0x80 +> +[ 69.959294] nd_region_register_namespaces+0xe4/0xe90 +> +[ 69.960253] nd_region_probe+0x100/0x290 +> +[ 69.960796] nvdimm_bus_probe+0xf4/0x1c0 +> +[ 69.962087] really_probe+0x19c/0x3f0 +> +[ 69.962620] __driver_probe_device+0x11c/0x190 +> +[ 69.963258] driver_probe_device+0x44/0xf4 +> +[ 69.963773] __device_attach_driver+0xa4/0x140 +> +[ 69.964471] bus_for_each_drv+0x84/0xe0 +> +[ 69.965068] __device_attach+0xb0/0x1f0 +> +[ 69.966101] device_initial_probe+0x20/0x30 +> +[ 69.967142] bus_probe_device+0xa4/0xb0 +> +[ 69.968104] device_add+0x3e8/0x910 +> +[ 69.969111] nd_async_device_register+0x24/0x74 +> +[ 69.969928] async_run_entry_fn+0x40/0x150 +> +[ 69.970725] process_one_work+0x1dc/0x450 +> +[ 69.971796] worker_thread+0x154/0x450 +> +[ 69.972700] kthread+0x118/0x120 +> +[ 69.974141] ret_from_fork+0x10/0x20 +> +[ 69.975141] ---[ end trace 0000000000000000 ]--- +> +[ 70.117887] Into nd_namespace_pmem_set_resource() + +On Mon, 15 Aug 2022 15:55:15 -0700 +Dan Williams wrote: + +> +Jonathan Cameron wrote: +> +> On Fri, 12 Aug 2022 16:44:03 +0100 +> +> Jonathan Cameron wrote: +> +> +> +> > On Thu, 11 Aug 2022 18:08:57 +0100 +> +> > Jonathan Cameron via wrote: +> +> > +> +> > > On Tue, 9 Aug 2022 17:08:25 +0100 +> +> > > Jonathan Cameron wrote: +> +> > > +> +> > > > On Tue, 9 Aug 2022 21:07:06 +0800 +> +> > > > Bobo WL wrote: +> +> > > > +> +> > > > > Hi Jonathan +> +> > > > > +> +> > > > > Thanks for your reply! +> +> > > > > +> +> > > > > On Mon, Aug 8, 2022 at 8:37 PM Jonathan Cameron +> +> > > > > wrote: +> +> > > > > > +> +> > > > > > Probably not related to your problem, but there is a disconnect +> +> > > > > > in QEMU / +> +> > > > > > kernel assumptionsaround the presence of an HDM decoder when a HB +> +> > > > > > only +> +> > > > > > has a single root port. Spec allows it to be provided or not as +> +> > > > > > an implementation choice. +> +> > > > > > Kernel assumes it isn't provide. Qemu assumes it is. +> +> > > > > > +> +> > > > > > The temporary solution is to throw in a second root port on the +> +> > > > > > HB and not +> +> > > > > > connect anything to it. Longer term I may special case this so +> +> > > > > > that the particular +> +> > > > > > decoder defaults to pass through settings in QEMU if there is +> +> > > > > > only one root port. +> +> > > > > > +> +> > > > > +> +> > > > > You are right! After adding an extra HB in qemu, I can create a x1 +> +> > > > > region successfully. +> +> > > > > But have some errors in Nvdimm: +> +> > > > > +> +> > > > > [ 74.925838] Unknown online node for memory at 0x10000000000, +> +> > > > > assuming node 0 +> +> > > > > [ 74.925846] Unknown target node for memory at 0x10000000000, +> +> > > > > assuming node 0 +> +> > > > > [ 74.927470] nd_region region0: nmem0: is disabled, failing probe +> +> > > > > +> +> > > > +> +> > > > Ah. I've seen this one, but not chased it down yet. Was on my todo +> +> > > > list to chase +> +> > > > down. Once I reach this state I can verify the HDM Decode is correct +> +> > > > which is what +> +> > > > I've been using to test (Which wasn't true until earlier this week). +> +> > > > I'm currently testing via devmem, more for historical reasons than +> +> > > > because it makes +> +> > > > that much sense anymore. +> +> > > +> +> > > *embarassed cough*. We haven't fully hooked the LSA up in qemu yet. +> +> > > I'd forgotten that was still on the todo list. I don't think it will +> +> > > be particularly hard to do and will take a look in next few days. +> +> > > +> +> > > Very very indirectly this error is causing a driver probe fail that +> +> > > means that +> +> > > we hit a code path that has a rather odd looking check on NDD_LABELING. +> +> > > Should not have gotten near that path though - hence the problem is +> +> > > actually +> +> > > when we call cxl_pmem_get_config_data() and it returns an error because +> +> > > we haven't fully connected up the command in QEMU. +> +> > +> +> > So a least one bug in QEMU. We were not supporting variable length +> +> > payloads on mailbox +> +> > inputs (but were on outputs). That hasn't mattered until we get to LSA +> +> > writes. +> +> > We just need to relax condition on the supplied length. +> +> > +> +> > diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c +> +> > index c352a935c4..fdda9529fe 100644 +> +> > --- a/hw/cxl/cxl-mailbox-utils.c +> +> > +++ b/hw/cxl/cxl-mailbox-utils.c +> +> > @@ -510,7 +510,7 @@ void cxl_process_mailbox(CXLDeviceState *cxl_dstate) +> +> > cxl_cmd = &cxl_cmd_set[set][cmd]; +> +> > h = cxl_cmd->handler; +> +> > if (h) { +> +> > - if (len == cxl_cmd->in) { +> +> > + if (len == cxl_cmd->in || !cxl_cmd->in) { +> +> Fix is wrong as we use ~0 as the placeholder for variable payload, not 0. +> +> +> +> With that fixed we hit new fun paths - after some errors we get the +> +> worrying - not totally sure but looks like a failure on an error cleanup. +> +> I'll chase down the error source, but even then this is probably +> +> triggerable by +> +> hardware problem or similar. Some bonus prints in here from me chasing +> +> error paths, but it's otherwise just cxl/next + the fix I posted earlier +> +> today. +> +> +One of the scenarios that I cannot rule out is nvdimm_probe() racing +> +nd_region_probe(), but given all the work it takes to create a region I +> +suspect all the nvdimm_probe() work to have completed... +> +> +It is at least one potentially wrong hypothesis that needs to be chased +> +down. +Maybe there should be a special award for the non-intuitive +ndctl create-namespace command (modifies existing namespace and might create +a different empty one...) I'm sure there is some interesting history behind +that one :) + +Upshot is I just threw a filesystem on fsdax and wrote some text files on it +to allow easy grepping. The right data ends up in the memory and a plausible +namespace description is stored in the LSA. + +So to some degree at least it's 'working' on an 8 way direct connected +set of emulated devices. + +One snag is that serial number support isn't yet upstream in QEMU. +(I have had it in my tree for a while but not posted it yet because of + QEMU feature freeze) +https://gitlab.com/jic23/qemu/-/commit/144c783ea8a5fbe169f46ea1ba92940157f42733 +That's needed for meaningful cookie generation. Otherwise you can build the +namespace once, but it won't work on next probe as the cookie is 0 and you +hit some error paths. + +Maybe sensible to add a sanity check and fail namespace creation if +cookie is 0? (Silly side question, but is there a theoretical risk of +a serial number / other data combination leading to a fletcher64() +checksum that happens to be 0 - that would give a very odd bug report!) + +So to make it work the following is needed: + +1) The kernel fix for mailbox buffer overflow. +2) Qemu fix for size of arguements for get_lsa +3) Qemu fix to allow variable size input arguements (for set_lsa) +4) Serial number patch above + command lines to qemu to set appropriate + serial numbers. + +I'll send out the QEMU fixes shortly and post the Serial number patch, +though that almost certainly won't go in until next QEMU development +cycle starts in a few weeks. + +Next up, run through same tests on some other topologies. + +Jonathan + +> +> +> +> +> [ 69.919877] nd_bus ndbus0: START: nd_region.probe(region0) +> +> [ 69.920108] nd_region_probe +> +> [ 69.920623] ------------[ cut here ]------------ +> +> [ 69.920675] refcount_t: addition on 0; use-after-free. +> +> [ 69.921314] WARNING: CPU: 3 PID: 710 at lib/refcount.c:25 +> +> refcount_warn_saturate+0xa0/0x144 +> +> [ 69.926949] Modules linked in: cxl_pmem cxl_mem cxl_pci cxl_port +> +> cxl_acpi cxl_core +> +> [ 69.928830] CPU: 3 PID: 710 Comm: kworker/u8:9 Not tainted 5.19.0-rc3+ +> +> #399 +> +> [ 69.930596] Hardware name: QEMU QEMU Virtual Machine, BIOS 0.0.0 +> +> 02/06/2015 +> +> [ 69.931482] Workqueue: events_unbound async_run_entry_fn +> +> [ 69.932403] pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS +> +> BTYPE=--) +> +> [ 69.934023] pc : refcount_warn_saturate+0xa0/0x144 +> +> [ 69.935161] lr : refcount_warn_saturate+0xa0/0x144 +> +> [ 69.936541] sp : ffff80000890b960 +> +> [ 69.937921] x29: ffff80000890b960 x28: 0000000000000000 x27: +> +> 0000000000000000 +> +> [ 69.940917] x26: ffffa54a90d5cb10 x25: ffffa54a90809e98 x24: +> +> 0000000000000000 +> +> [ 69.942537] x23: ffffa54a91a3d8d8 x22: ffff0000c5254800 x21: +> +> ffff0000c5254800 +> +> [ 69.944013] x20: ffff0000ce924180 x19: ffff0000c5254800 x18: +> +> ffffffffffffffff +> +> [ 69.946100] x17: ffff5ab66e5ef000 x16: ffff80000801c000 x15: +> +> 0000000000000000 +> +> [ 69.947585] x14: 0000000000000001 x13: 0a2e656572662d72 x12: +> +> 657466612d657375 +> +> [ 69.948670] x11: 203b30206e6f206e x10: 6f69746964646120 x9 : +> +> ffffa54a8f63d288 +> +> [ 69.950679] x8 : 206e6f206e6f6974 x7 : 69646461203a745f x6 : +> +> 00000000fffff31e +> +> [ 69.952113] x5 : ffff0000ff61ba08 x4 : 00000000fffff31e x3 : +> +> ffff5ab66e5ef000 +> +> root@debian:/sys/bus/cxl/devices/decoder0.0/region0# [ 69.954752] x2 : +> +> 0000000000000000 x1 : 0000000000000000 x0 : ffff0000c512e740 +> +> [ 69.957098] Call trace: +> +> [ 69.957959] refcount_warn_saturate+0xa0/0x144 +> +> [ 69.958773] get_ndd+0x5c/0x80 +> +> [ 69.959294] nd_region_register_namespaces+0xe4/0xe90 +> +> [ 69.960253] nd_region_probe+0x100/0x290 +> +> [ 69.960796] nvdimm_bus_probe+0xf4/0x1c0 +> +> [ 69.962087] really_probe+0x19c/0x3f0 +> +> [ 69.962620] __driver_probe_device+0x11c/0x190 +> +> [ 69.963258] driver_probe_device+0x44/0xf4 +> +> [ 69.963773] __device_attach_driver+0xa4/0x140 +> +> [ 69.964471] bus_for_each_drv+0x84/0xe0 +> +> [ 69.965068] __device_attach+0xb0/0x1f0 +> +> [ 69.966101] device_initial_probe+0x20/0x30 +> +> [ 69.967142] bus_probe_device+0xa4/0xb0 +> +> [ 69.968104] device_add+0x3e8/0x910 +> +> [ 69.969111] nd_async_device_register+0x24/0x74 +> +> [ 69.969928] async_run_entry_fn+0x40/0x150 +> +> [ 69.970725] process_one_work+0x1dc/0x450 +> +> [ 69.971796] worker_thread+0x154/0x450 +> +> [ 69.972700] kthread+0x118/0x120 +> +> [ 69.974141] ret_from_fork+0x10/0x20 +> +> [ 69.975141] ---[ end trace 0000000000000000 ]--- +> +> [ 70.117887] Into nd_namespace_pmem_set_resource() + +Bobo WL wrote: +> +Hi list +> +> +I want to test cxl functions in arm64, and found some problems I can't +> +figure out. +> +> +My test environment: +> +> +1. build latest bios from +https://github.com/tianocore/edk2.git +master +> +branch(cc2db6ebfb6d9d85ba4c7b35fba1fa37fffc0bc2) +> +2. build latest qemu-system-aarch64 from git://git.qemu.org/qemu.git +> +master branch(846dcf0ba4eff824c295f06550b8673ff3f31314). With cxl arm +> +support patch: +> +https://patchwork.kernel.org/project/cxl/cover/20220616141950.23374-1-Jonathan.Cameron@huawei.com/ +> +3. build Linux kernel from +> +https://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl.git +preview +> +branch(65fc1c3d26b96002a5aa1f4012fae4dc98fd5683) +> +4. build latest ndctl tools from +https://github.com/pmem/ndctl +> +create_region branch(8558b394e449779e3a4f3ae90fae77ede0bca159) +> +> +And my qemu test commands: +> +sudo $QEMU_BIN -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 \ +> +-cpu max -smp 8 -nographic -no-reboot \ +> +-kernel $KERNEL -bios $BIOS_BIN \ +> +-drive if=none,file=$ROOTFS,format=qcow2,id=hd \ +> +-device virtio-blk-pci,drive=hd -append 'root=/dev/vda1 +> +nokaslr dyndbg="module cxl* +p"' \ +> +-object memory-backend-ram,size=4G,id=mem0 \ +> +-numa node,nodeid=0,cpus=0-7,memdev=mem0 \ +> +-net nic -net user,hostfwd=tcp::2222-:22 -enable-kvm \ +> +-object +> +memory-backend-file,id=cxl-mem0,share=on,mem-path=/tmp/cxltest.raw,size=256M +> +\ +> +-object +> +memory-backend-file,id=cxl-mem1,share=on,mem-path=/tmp/cxltest1.raw,size=256M +> +\ +> +-object +> +memory-backend-file,id=cxl-mem2,share=on,mem-path=/tmp/cxltest2.raw,size=256M +> +\ +> +-object +> +memory-backend-file,id=cxl-mem3,share=on,mem-path=/tmp/cxltest3.raw,size=256M +> +\ +> +-object +> +memory-backend-file,id=cxl-lsa0,share=on,mem-path=/tmp/lsa0.raw,size=256M +> +\ +> +-object +> +memory-backend-file,id=cxl-lsa1,share=on,mem-path=/tmp/lsa1.raw,size=256M +> +\ +> +-object +> +memory-backend-file,id=cxl-lsa2,share=on,mem-path=/tmp/lsa2.raw,size=256M +> +\ +> +-object +> +memory-backend-file,id=cxl-lsa3,share=on,mem-path=/tmp/lsa3.raw,size=256M +> +\ +> +-device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \ +> +-device cxl-rp,port=0,bus=cxl.1,id=root_port0,chassis=0,slot=0 \ +> +-device cxl-upstream,bus=root_port0,id=us0 \ +> +-device cxl-downstream,port=0,bus=us0,id=swport0,chassis=0,slot=4 \ +> +-device +> +cxl-type3,bus=swport0,memdev=cxl-mem0,lsa=cxl-lsa0,id=cxl-pmem0 \ +> +-device cxl-downstream,port=1,bus=us0,id=swport1,chassis=0,slot=5 \ +> +-device +> +cxl-type3,bus=swport1,memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem1 \ +> +-device cxl-downstream,port=2,bus=us0,id=swport2,chassis=0,slot=6 \ +> +-device +> +cxl-type3,bus=swport2,memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem2 \ +> +-device cxl-downstream,port=3,bus=us0,id=swport3,chassis=0,slot=7 \ +> +-device +> +cxl-type3,bus=swport3,memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem3 \ +> +-M +> +cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=4k +> +> +And I have got two problems. +> +1. When I want to create x1 region with command: "cxl create-region -d +> +decoder0.0 -w 1 -g 4096 mem0", kernel crashed with null pointer +> +reference. Crash log: +> +> +[ 534.697324] cxl_region region0: config state: 0 +> +[ 534.697346] cxl_region region0: probe: -6 +> +[ 534.697368] cxl_acpi ACPI0017:00: decoder0.0: created region0 +> +[ 534.699115] cxl region0: mem0:endpoint3 decoder3.0 add: +> +mem0:decoder3.0 @ 0 next: none nr_eps: 1 nr_targets: 1 +> +[ 534.699149] cxl region0: 0000:0d:00.0:port2 decoder2.0 add: +> +mem0:decoder3.0 @ 0 next: mem0 nr_eps: 1 nr_targets: 1 +> +[ 534.699167] cxl region0: ACPI0016:00:port1 decoder1.0 add: +> +mem0:decoder3.0 @ 0 next: 0000:0d:00.0 nr_eps: 1 nr_targets: 1 +> +[ 534.699176] cxl region0: ACPI0016:00:port1 iw: 1 ig: 256 +> +[ 534.699182] cxl region0: ACPI0016:00:port1 target[0] = 0000:0c:00.0 +> +for mem0:decoder3.0 @ 0 +> +[ 534.699189] cxl region0: 0000:0d:00.0:port2 iw: 1 ig: 256 +> +[ 534.699193] cxl region0: 0000:0d:00.0:port2 target[0] = +> +0000:0e:00.0 for mem0:decoder3.0 @ 0 +> +[ 534.699405] Unable to handle kernel NULL pointer dereference at +> +virtual address 0000000000000000 +> +[ 534.701474] Mem abort info: +> +[ 534.701994] ESR = 0x0000000086000004 +> +[ 534.702653] EC = 0x21: IABT (current EL), IL = 32 bits +> +[ 534.703616] SET = 0, FnV = 0 +> +[ 534.704174] EA = 0, S1PTW = 0 +> +[ 534.704803] FSC = 0x04: level 0 translation fault +> +[ 534.705694] user pgtable: 4k pages, 48-bit VAs, pgdp=000000010144a000 +> +[ 534.706875] [0000000000000000] pgd=0000000000000000, p4d=0000000000000000 +> +[ 534.709855] Internal error: Oops: 86000004 [#1] PREEMPT SMP +> +[ 534.710301] Modules linked in: +> +[ 534.710546] CPU: 7 PID: 331 Comm: cxl Not tainted +> +5.19.0-rc3-00064-g65fc1c3d26b9-dirty #11 +> +[ 534.715393] Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 +> +[ 534.717179] pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) +> +[ 534.719190] pc : 0x0 +> +[ 534.719928] lr : commit_store+0x118/0x2cc +> +[ 534.721007] sp : ffff80000aec3c30 +> +[ 534.721793] x29: ffff80000aec3c30 x28: ffff0000da62e740 x27: +> +ffff0000c0c06b30 +> +[ 534.723875] x26: 0000000000000000 x25: ffff0000c0a2a400 x24: +> +ffff0000c0a29400 +> +[ 534.725440] x23: 0000000000000003 x22: 0000000000000000 x21: +> +ffff0000c0c06800 +> +[ 534.727312] x20: 0000000000000000 x19: ffff0000c1559800 x18: +> +0000000000000000 +> +[ 534.729138] x17: 0000000000000000 x16: 0000000000000000 x15: +> +0000ffffd41fe838 +> +[ 534.731046] x14: 0000000000000000 x13: 0000000000000000 x12: +> +0000000000000000 +> +[ 534.732402] x11: 0000000000000000 x10: 0000000000000000 x9 : +> +0000000000000000 +> +[ 534.734432] x8 : 0000000000000000 x7 : 0000000000000000 x6 : +> +ffff0000c0906e80 +> +[ 534.735921] x5 : 0000000000000000 x4 : 0000000000000000 x3 : +> +ffff80000aec3bf0 +> +[ 534.737437] x2 : 0000000000000000 x1 : 0000000000000000 x0 : +> +ffff0000c155a000 +> +[ 534.738878] Call trace: +> +[ 534.739368] 0x0 +> +[ 534.739713] dev_attr_store+0x1c/0x30 +> +[ 534.740186] sysfs_kf_write+0x48/0x58 +> +[ 534.740961] kernfs_fop_write_iter+0x128/0x184 +> +[ 534.741872] new_sync_write+0xdc/0x158 +> +[ 534.742706] vfs_write+0x1ac/0x2a8 +> +[ 534.743440] ksys_write+0x68/0xf0 +> +[ 534.744328] __arm64_sys_write+0x1c/0x28 +> +[ 534.745180] invoke_syscall+0x44/0xf0 +> +[ 534.745989] el0_svc_common+0x4c/0xfc +> +[ 534.746661] do_el0_svc+0x60/0xa8 +> +[ 534.747378] el0_svc+0x2c/0x78 +> +[ 534.748066] el0t_64_sync_handler+0xb8/0x12c +> +[ 534.748919] el0t_64_sync+0x18c/0x190 +> +[ 534.749629] Code: bad PC value +> +[ 534.750169] ---[ end trace 0000000000000000 ]--- +What was the top kernel commit when you ran this test? What is the line +number of "commit_store+0x118"? + +> +2. When I want to create x4 region with command: "cxl create-region -d +> +decoder0.0 -w 4 -g 4096 -m mem0 mem1 mem2 mem3". I got below errors: +> +> +cxl region: create_region: region0: failed to set target3 to mem3 +> +cxl region: cmd_create_region: created 0 regions +> +> +And kernel log as below: +> +[ 60.536663] cxl_region region0: config state: 0 +> +[ 60.536675] cxl_region region0: probe: -6 +> +[ 60.536696] cxl_acpi ACPI0017:00: decoder0.0: created region0 +> +[ 60.538251] cxl region0: mem0:endpoint3 decoder3.0 add: +> +mem0:decoder3.0 @ 0 next: none nr_eps: 1 nr_targets: 1 +> +[ 60.538278] cxl region0: 0000:0d:00.0:port2 decoder2.0 add: +> +mem0:decoder3.0 @ 0 next: mem0 nr_eps: 1 nr_targets: 1 +> +[ 60.538295] cxl region0: ACPI0016:00:port1 decoder1.0 add: +> +mem0:decoder3.0 @ 0 next: 0000:0d:00.0 nr_eps: 1 nr_targets: 1 +> +[ 60.538647] cxl region0: mem1:endpoint4 decoder4.0 add: +> +mem1:decoder4.0 @ 1 next: none nr_eps: 1 nr_targets: 1 +> +[ 60.538663] cxl region0: 0000:0d:00.0:port2 decoder2.0 add: +> +mem1:decoder4.0 @ 1 next: mem1 nr_eps: 2 nr_targets: 2 +> +[ 60.538675] cxl region0: ACPI0016:00:port1 decoder1.0 add: +> +mem1:decoder4.0 @ 1 next: 0000:0d:00.0 nr_eps: 2 nr_targets: 1 +> +[ 60.539311] cxl region0: mem2:endpoint5 decoder5.0 add: +> +mem2:decoder5.0 @ 2 next: none nr_eps: 1 nr_targets: 1 +> +[ 60.539332] cxl region0: 0000:0d:00.0:port2 decoder2.0 add: +> +mem2:decoder5.0 @ 2 next: mem2 nr_eps: 3 nr_targets: 3 +> +[ 60.539343] cxl region0: ACPI0016:00:port1 decoder1.0 add: +> +mem2:decoder5.0 @ 2 next: 0000:0d:00.0 nr_eps: 3 nr_targets: 1 +> +[ 60.539711] cxl region0: mem3:endpoint6 decoder6.0 add: +> +mem3:decoder6.0 @ 3 next: none nr_eps: 1 nr_targets: 1 +> +[ 60.539723] cxl region0: 0000:0d:00.0:port2 decoder2.0 add: +> +mem3:decoder6.0 @ 3 next: mem3 nr_eps: 4 nr_targets: 4 +> +[ 60.539735] cxl region0: ACPI0016:00:port1 decoder1.0 add: +> +mem3:decoder6.0 @ 3 next: 0000:0d:00.0 nr_eps: 4 nr_targets: 1 +> +[ 60.539742] cxl region0: ACPI0016:00:port1 iw: 1 ig: 256 +> +[ 60.539747] cxl region0: ACPI0016:00:port1 target[0] = 0000:0c:00.0 +> +for mem0:decoder3.0 @ 0 +> +[ 60.539754] cxl region0: 0000:0d:00.0:port2 iw: 4 ig: 512 +> +[ 60.539758] cxl region0: 0000:0d:00.0:port2 target[0] = +> +0000:0e:00.0 for mem0:decoder3.0 @ 0 +> +[ 60.539764] cxl region0: ACPI0016:00:port1: cannot host mem1:decoder4.0 at +> +1 +> +> +I have tried to write sysfs node manually, got same errors. +> +> +Hope I can get some helps here. +What is the output of: + + cxl list -MDTu -d decoder0.0 + +...? It might be the case that mem1 cannot be mapped by decoder0.0, or +at least not in the specified order, or that validation check is broken. + +Hi Dan, + +Thanks for your reply! + +On Mon, Aug 8, 2022 at 11:58 PM Dan Williams wrote: +> +> +What is the output of: +> +> +cxl list -MDTu -d decoder0.0 +> +> +...? It might be the case that mem1 cannot be mapped by decoder0.0, or +> +at least not in the specified order, or that validation check is broken. +Command "cxl list -MDTu -d decoder0.0" output: + +[ + { + "memdevs":[ + { + "memdev":"mem2", + "pmem_size":"256.00 MiB (268.44 MB)", + "ram_size":0, + "serial":"0", + "host":"0000:11:00.0" + }, + { + "memdev":"mem1", + "pmem_size":"256.00 MiB (268.44 MB)", + "ram_size":0, + "serial":"0", + "host":"0000:10:00.0" + }, + { + "memdev":"mem0", + "pmem_size":"256.00 MiB (268.44 MB)", + "ram_size":0, + "serial":"0", + "host":"0000:0f:00.0" + }, + { + "memdev":"mem3", + "pmem_size":"256.00 MiB (268.44 MB)", + "ram_size":0, + "serial":"0", + "host":"0000:12:00.0" + } + ] + }, + { + "root decoders":[ + { + "decoder":"decoder0.0", + "resource":"0x10000000000", + "size":"4.00 GiB (4.29 GB)", + "pmem_capable":true, + "volatile_capable":true, + "accelmem_capable":true, + "nr_targets":1, + "targets":[ + { + "target":"ACPI0016:01", + "alias":"pci0000:0c", + "position":0, + "id":"0xc" + } + ] + } + ] + } +] + +Bobo WL wrote: +> +Hi Dan, +> +> +Thanks for your reply! +> +> +On Mon, Aug 8, 2022 at 11:58 PM Dan Williams wrote: +> +> +> +> What is the output of: +> +> +> +> cxl list -MDTu -d decoder0.0 +> +> +> +> ...? It might be the case that mem1 cannot be mapped by decoder0.0, or +> +> at least not in the specified order, or that validation check is broken. +> +> +Command "cxl list -MDTu -d decoder0.0" output: +Thanks for this, I think I know the problem, but will try some +experiments with cxl_test first. + +Did the commit_store() crash stop reproducing with latest cxl/preview +branch? + +On Tue, Aug 9, 2022 at 11:17 PM Dan Williams wrote: +> +> +Bobo WL wrote: +> +> Hi Dan, +> +> +> +> Thanks for your reply! +> +> +> +> On Mon, Aug 8, 2022 at 11:58 PM Dan Williams +> +> wrote: +> +> > +> +> > What is the output of: +> +> > +> +> > cxl list -MDTu -d decoder0.0 +> +> > +> +> > ...? It might be the case that mem1 cannot be mapped by decoder0.0, or +> +> > at least not in the specified order, or that validation check is broken. +> +> +> +> Command "cxl list -MDTu -d decoder0.0" output: +> +> +Thanks for this, I think I know the problem, but will try some +> +experiments with cxl_test first. +> +> +Did the commit_store() crash stop reproducing with latest cxl/preview +> +branch? +No, still hitting this bug if don't add extra HB device in qemu + +Dan Williams wrote: +> +Bobo WL wrote: +> +> Hi Dan, +> +> +> +> Thanks for your reply! +> +> +> +> On Mon, Aug 8, 2022 at 11:58 PM Dan Williams +> +> wrote: +> +> > +> +> > What is the output of: +> +> > +> +> > cxl list -MDTu -d decoder0.0 +> +> > +> +> > ...? It might be the case that mem1 cannot be mapped by decoder0.0, or +> +> > at least not in the specified order, or that validation check is broken. +> +> +> +> Command "cxl list -MDTu -d decoder0.0" output: +> +> +Thanks for this, I think I know the problem, but will try some +> +experiments with cxl_test first. +Hmm, so my cxl_test experiment unfortunately passed so I'm not +reproducing the failure mode. This is the result of creating x4 region +with devices directly attached to a single host-bridge: + +# cxl create-region -d decoder3.5 -w 4 -m -g 256 mem{12,10,9,11} -s $((1<<30)) +{ + "region":"region8", + "resource":"0xf1f0000000", + "size":"1024.00 MiB (1073.74 MB)", + "interleave_ways":4, + "interleave_granularity":256, + "decode_state":"commit", + "mappings":[ + { + "position":3, + "memdev":"mem11", + "decoder":"decoder21.0" + }, + { + "position":2, + "memdev":"mem9", + "decoder":"decoder19.0" + }, + { + "position":1, + "memdev":"mem10", + "decoder":"decoder20.0" + }, + { + "position":0, + "memdev":"mem12", + "decoder":"decoder22.0" + } + ] +} +cxl region: cmd_create_region: created 1 region + +> +Did the commit_store() crash stop reproducing with latest cxl/preview +> +branch? +I missed the answer to this question. + +All of these changes are now in Linus' tree perhaps give that a try and +post the debug log again? + +On Thu, 11 Aug 2022 17:46:55 -0700 +Dan Williams wrote: + +> +Dan Williams wrote: +> +> Bobo WL wrote: +> +> > Hi Dan, +> +> > +> +> > Thanks for your reply! +> +> > +> +> > On Mon, Aug 8, 2022 at 11:58 PM Dan Williams +> +> > wrote: +> +> > > +> +> > > What is the output of: +> +> > > +> +> > > cxl list -MDTu -d decoder0.0 +> +> > > +> +> > > ...? It might be the case that mem1 cannot be mapped by decoder0.0, or +> +> > > at least not in the specified order, or that validation check is +> +> > > broken. +> +> > +> +> > Command "cxl list -MDTu -d decoder0.0" output: +> +> +> +> Thanks for this, I think I know the problem, but will try some +> +> experiments with cxl_test first. +> +> +Hmm, so my cxl_test experiment unfortunately passed so I'm not +> +reproducing the failure mode. This is the result of creating x4 region +> +with devices directly attached to a single host-bridge: +> +> +# cxl create-region -d decoder3.5 -w 4 -m -g 256 mem{12,10,9,11} -s $((1<<30)) +> +{ +> +"region":"region8", +> +"resource":"0xf1f0000000", +> +"size":"1024.00 MiB (1073.74 MB)", +> +"interleave_ways":4, +> +"interleave_granularity":256, +> +"decode_state":"commit", +> +"mappings":[ +> +{ +> +"position":3, +> +"memdev":"mem11", +> +"decoder":"decoder21.0" +> +}, +> +{ +> +"position":2, +> +"memdev":"mem9", +> +"decoder":"decoder19.0" +> +}, +> +{ +> +"position":1, +> +"memdev":"mem10", +> +"decoder":"decoder20.0" +> +}, +> +{ +> +"position":0, +> +"memdev":"mem12", +> +"decoder":"decoder22.0" +> +} +> +] +> +} +> +cxl region: cmd_create_region: created 1 region +> +> +> Did the commit_store() crash stop reproducing with latest cxl/preview +> +> branch? +> +> +I missed the answer to this question. +> +> +All of these changes are now in Linus' tree perhaps give that a try and +> +post the debug log again? +Hi Dan, + +I've moved onto looking at this one. +1 HB, 2RP (to make it configure the HDM decoder in the QEMU HB, I'll tidy that +up +at some stage), 1 switch, 4 downstream switch ports each with a type 3 + +I'm not getting a crash, but can't successfully setup a region. +Upon adding the final target +It's failing in check_last_peer() as pos < distance. +Seems distance is 4 which makes me think it's using the wrong level of the +heirarchy for +some reason or that distance check is wrong. +Wasn't a good idea to just skip that step though as it goes boom - though +stack trace is not useful. + +Jonathan + +On Wed, 17 Aug 2022 17:16:19 +0100 +Jonathan Cameron wrote: + +> +On Thu, 11 Aug 2022 17:46:55 -0700 +> +Dan Williams wrote: +> +> +> Dan Williams wrote: +> +> > Bobo WL wrote: +> +> > > Hi Dan, +> +> > > +> +> > > Thanks for your reply! +> +> > > +> +> > > On Mon, Aug 8, 2022 at 11:58 PM Dan Williams +> +> > > wrote: +> +> > > > +> +> > > > What is the output of: +> +> > > > +> +> > > > cxl list -MDTu -d decoder0.0 +> +> > > > +> +> > > > ...? It might be the case that mem1 cannot be mapped by decoder0.0, or +> +> > > > at least not in the specified order, or that validation check is +> +> > > > broken. +> +> > > +> +> > > Command "cxl list -MDTu -d decoder0.0" output: +> +> > +> +> > Thanks for this, I think I know the problem, but will try some +> +> > experiments with cxl_test first. +> +> +> +> Hmm, so my cxl_test experiment unfortunately passed so I'm not +> +> reproducing the failure mode. This is the result of creating x4 region +> +> with devices directly attached to a single host-bridge: +> +> +> +> # cxl create-region -d decoder3.5 -w 4 -m -g 256 mem{12,10,9,11} -s +> +> $((1<<30)) +> +> { +> +> "region":"region8", +> +> "resource":"0xf1f0000000", +> +> "size":"1024.00 MiB (1073.74 MB)", +> +> "interleave_ways":4, +> +> "interleave_granularity":256, +> +> "decode_state":"commit", +> +> "mappings":[ +> +> { +> +> "position":3, +> +> "memdev":"mem11", +> +> "decoder":"decoder21.0" +> +> }, +> +> { +> +> "position":2, +> +> "memdev":"mem9", +> +> "decoder":"decoder19.0" +> +> }, +> +> { +> +> "position":1, +> +> "memdev":"mem10", +> +> "decoder":"decoder20.0" +> +> }, +> +> { +> +> "position":0, +> +> "memdev":"mem12", +> +> "decoder":"decoder22.0" +> +> } +> +> ] +> +> } +> +> cxl region: cmd_create_region: created 1 region +> +> +> +> > Did the commit_store() crash stop reproducing with latest cxl/preview +> +> > branch? +> +> +> +> I missed the answer to this question. +> +> +> +> All of these changes are now in Linus' tree perhaps give that a try and +> +> post the debug log again? +> +> +Hi Dan, +> +> +I've moved onto looking at this one. +> +1 HB, 2RP (to make it configure the HDM decoder in the QEMU HB, I'll tidy +> +that up +> +at some stage), 1 switch, 4 downstream switch ports each with a type 3 +> +> +I'm not getting a crash, but can't successfully setup a region. +> +Upon adding the final target +> +It's failing in check_last_peer() as pos < distance. +> +Seems distance is 4 which makes me think it's using the wrong level of the +> +heirarchy for +> +some reason or that distance check is wrong. +> +Wasn't a good idea to just skip that step though as it goes boom - though +> +stack trace is not useful. +Turns out really weird corruption happens if you accidentally back two type3 +devices +with the same memory device. Who would have thought it :) + +That aside ignoring the check_last_peer() failure seems to make everything work +for this +topology. I'm not seeing the crash, so my guess is we fixed it somewhere along +the way. + +Now for the fun one. I've replicated the crash if we have + +1HB 1*RP 1SW, 4SW-DSP, 4Type3 + +Now, I'd expect to see it not 'work' because the QEMU HDM decoder won't be +programmed +but the null pointer dereference isn't related to that. + +The bug is straight forward. Not all decoders have commit callbacks... Will +send out +a possible fix shortly. + +Jonathan + + + +> +> +Jonathan +> +> +> +> +> +> + +On Thu, 18 Aug 2022 17:37:40 +0100 +Jonathan Cameron via wrote: + +> +On Wed, 17 Aug 2022 17:16:19 +0100 +> +Jonathan Cameron wrote: +> +> +> On Thu, 11 Aug 2022 17:46:55 -0700 +> +> Dan Williams wrote: +> +> +> +> > Dan Williams wrote: +> +> > > Bobo WL wrote: +> +> > > > Hi Dan, +> +> > > > +> +> > > > Thanks for your reply! +> +> > > > +> +> > > > On Mon, Aug 8, 2022 at 11:58 PM Dan Williams +> +> > > > wrote: +> +> > > > > +> +> > > > > What is the output of: +> +> > > > > +> +> > > > > cxl list -MDTu -d decoder0.0 +> +> > > > > +> +> > > > > ...? It might be the case that mem1 cannot be mapped by decoder0.0, +> +> > > > > or +> +> > > > > at least not in the specified order, or that validation check is +> +> > > > > broken. +> +> > > > +> +> > > > Command "cxl list -MDTu -d decoder0.0" output: +> +> > > +> +> > > Thanks for this, I think I know the problem, but will try some +> +> > > experiments with cxl_test first. +> +> > +> +> > Hmm, so my cxl_test experiment unfortunately passed so I'm not +> +> > reproducing the failure mode. This is the result of creating x4 region +> +> > with devices directly attached to a single host-bridge: +> +> > +> +> > # cxl create-region -d decoder3.5 -w 4 -m -g 256 mem{12,10,9,11} -s +> +> > $((1<<30)) +> +> > { +> +> > "region":"region8", +> +> > "resource":"0xf1f0000000", +> +> > "size":"1024.00 MiB (1073.74 MB)", +> +> > "interleave_ways":4, +> +> > "interleave_granularity":256, +> +> > "decode_state":"commit", +> +> > "mappings":[ +> +> > { +> +> > "position":3, +> +> > "memdev":"mem11", +> +> > "decoder":"decoder21.0" +> +> > }, +> +> > { +> +> > "position":2, +> +> > "memdev":"mem9", +> +> > "decoder":"decoder19.0" +> +> > }, +> +> > { +> +> > "position":1, +> +> > "memdev":"mem10", +> +> > "decoder":"decoder20.0" +> +> > }, +> +> > { +> +> > "position":0, +> +> > "memdev":"mem12", +> +> > "decoder":"decoder22.0" +> +> > } +> +> > ] +> +> > } +> +> > cxl region: cmd_create_region: created 1 region +> +> > +> +> > > Did the commit_store() crash stop reproducing with latest cxl/preview +> +> > > branch? +> +> > +> +> > I missed the answer to this question. +> +> > +> +> > All of these changes are now in Linus' tree perhaps give that a try and +> +> > post the debug log again? +> +> +> +> Hi Dan, +> +> +> +> I've moved onto looking at this one. +> +> 1 HB, 2RP (to make it configure the HDM decoder in the QEMU HB, I'll tidy +> +> that up +> +> at some stage), 1 switch, 4 downstream switch ports each with a type 3 +> +> +> +> I'm not getting a crash, but can't successfully setup a region. +> +> Upon adding the final target +> +> It's failing in check_last_peer() as pos < distance. +> +> Seems distance is 4 which makes me think it's using the wrong level of the +> +> heirarchy for +> +> some reason or that distance check is wrong. +> +> Wasn't a good idea to just skip that step though as it goes boom - though +> +> stack trace is not useful. +> +> +Turns out really weird corruption happens if you accidentally back two type3 +> +devices +> +with the same memory device. Who would have thought it :) +> +> +That aside ignoring the check_last_peer() failure seems to make everything +> +work for this +> +topology. I'm not seeing the crash, so my guess is we fixed it somewhere +> +along the way. +> +> +Now for the fun one. I've replicated the crash if we have +> +> +1HB 1*RP 1SW, 4SW-DSP, 4Type3 +> +> +Now, I'd expect to see it not 'work' because the QEMU HDM decoder won't be +> +programmed +> +but the null pointer dereference isn't related to that. +> +> +The bug is straight forward. Not all decoders have commit callbacks... Will +> +send out +> +a possible fix shortly. +> +For completeness I'm carrying this hack because I haven't gotten my head +around the right fix for check_last_peer() failing on this test topology. + +diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c +index c49d9a5f1091..275e143bd748 100644 +--- a/drivers/cxl/core/region.c ++++ b/drivers/cxl/core/region.c +@@ -978,7 +978,7 @@ static int cxl_port_setup_targets(struct cxl_port *port, + rc = check_last_peer(cxled, ep, cxl_rr, + distance); + if (rc) +- return rc; ++ // return rc; + goto out_target_set; + } + goto add_target; +-- + +I might find more bugs with more testing, but this is all the ones I've +seen so far + in Bobo's reports. Qemu fixes are now in upstream so +will be there in the release. + +As a reminder, testing on QEMU has a few corners... + +Need a patch to add serial number ECAP support. It is on list for revew, +but will have wait for after QEMU 7.1 release (which may be next week) + +QEMU still assumes HDM decoder on the host bridge will be programmed. +So if you want anything to work there should be at least +2 RP below the HB (no need to plug anything in to one of them). + +I don't want to add a commandline parameter to hide the decoder in QEMU +and detecting there is only one RP would require moving a bunch of static +stuff into runtime code (I think). + +I still think we should make the kernel check to see if there is a decoder, +but if not I might see how bad a hack it is to have QEMU ignore that decoder +if not committed in this one special case (HB HDM decoder with only one place +it can send stuff). Obviously that would be a break from specification +so less than idea! + +Thanks, + +Jonathan + +On Fri, 19 Aug 2022 09:46:55 +0100 +Jonathan Cameron wrote: + +> +On Thu, 18 Aug 2022 17:37:40 +0100 +> +Jonathan Cameron via wrote: +> +> +> On Wed, 17 Aug 2022 17:16:19 +0100 +> +> Jonathan Cameron wrote: +> +> +> +> > On Thu, 11 Aug 2022 17:46:55 -0700 +> +> > Dan Williams wrote: +> +> > +> +> > > Dan Williams wrote: +> +> > > > Bobo WL wrote: +> +> > > > > Hi Dan, +> +> > > > > +> +> > > > > Thanks for your reply! +> +> > > > > +> +> > > > > On Mon, Aug 8, 2022 at 11:58 PM Dan Williams +> +> > > > > wrote: +> +> > > > > > +> +> > > > > > What is the output of: +> +> > > > > > +> +> > > > > > cxl list -MDTu -d decoder0.0 +> +> > > > > > +> +> > > > > > ...? It might be the case that mem1 cannot be mapped by +> +> > > > > > decoder0.0, or +> +> > > > > > at least not in the specified order, or that validation check is +> +> > > > > > broken. +> +> > > > > +> +> > > > > Command "cxl list -MDTu -d decoder0.0" output: +> +> > > > +> +> > > > Thanks for this, I think I know the problem, but will try some +> +> > > > experiments with cxl_test first. +> +> > > +> +> > > Hmm, so my cxl_test experiment unfortunately passed so I'm not +> +> > > reproducing the failure mode. This is the result of creating x4 region +> +> > > with devices directly attached to a single host-bridge: +> +> > > +> +> > > # cxl create-region -d decoder3.5 -w 4 -m -g 256 mem{12,10,9,11} -s +> +> > > $((1<<30)) +> +> > > { +> +> > > "region":"region8", +> +> > > "resource":"0xf1f0000000", +> +> > > "size":"1024.00 MiB (1073.74 MB)", +> +> > > "interleave_ways":4, +> +> > > "interleave_granularity":256, +> +> > > "decode_state":"commit", +> +> > > "mappings":[ +> +> > > { +> +> > > "position":3, +> +> > > "memdev":"mem11", +> +> > > "decoder":"decoder21.0" +> +> > > }, +> +> > > { +> +> > > "position":2, +> +> > > "memdev":"mem9", +> +> > > "decoder":"decoder19.0" +> +> > > }, +> +> > > { +> +> > > "position":1, +> +> > > "memdev":"mem10", +> +> > > "decoder":"decoder20.0" +> +> > > }, +> +> > > { +> +> > > "position":0, +> +> > > "memdev":"mem12", +> +> > > "decoder":"decoder22.0" +> +> > > } +> +> > > ] +> +> > > } +> +> > > cxl region: cmd_create_region: created 1 region +> +> > > +> +> > > > Did the commit_store() crash stop reproducing with latest cxl/preview +> +> > > > branch? +> +> > > +> +> > > I missed the answer to this question. +> +> > > +> +> > > All of these changes are now in Linus' tree perhaps give that a try and +> +> > > post the debug log again? +> +> > +> +> > Hi Dan, +> +> > +> +> > I've moved onto looking at this one. +> +> > 1 HB, 2RP (to make it configure the HDM decoder in the QEMU HB, I'll tidy +> +> > that up +> +> > at some stage), 1 switch, 4 downstream switch ports each with a type 3 +> +> > +> +> > I'm not getting a crash, but can't successfully setup a region. +> +> > Upon adding the final target +> +> > It's failing in check_last_peer() as pos < distance. +> +> > Seems distance is 4 which makes me think it's using the wrong level of +> +> > the heirarchy for +> +> > some reason or that distance check is wrong. +> +> > Wasn't a good idea to just skip that step though as it goes boom - though +> +> > stack trace is not useful. +> +> +> +> Turns out really weird corruption happens if you accidentally back two +> +> type3 devices +> +> with the same memory device. Who would have thought it :) +> +> +> +> That aside ignoring the check_last_peer() failure seems to make everything +> +> work for this +> +> topology. I'm not seeing the crash, so my guess is we fixed it somewhere +> +> along the way. +> +> +> +> Now for the fun one. I've replicated the crash if we have +> +> +> +> 1HB 1*RP 1SW, 4SW-DSP, 4Type3 +> +> +> +> Now, I'd expect to see it not 'work' because the QEMU HDM decoder won't be +> +> programmed +> +> but the null pointer dereference isn't related to that. +> +> +> +> The bug is straight forward. Not all decoders have commit callbacks... +> +> Will send out +> +> a possible fix shortly. +> +> +> +For completeness I'm carrying this hack because I haven't gotten my head +> +around the right fix for check_last_peer() failing on this test topology. +> +> +diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c +> +index c49d9a5f1091..275e143bd748 100644 +> +--- a/drivers/cxl/core/region.c +> ++++ b/drivers/cxl/core/region.c +> +@@ -978,7 +978,7 @@ static int cxl_port_setup_targets(struct cxl_port *port, +> +rc = check_last_peer(cxled, ep, cxl_rr, +> +distance); +> +if (rc) +> +- return rc; +> ++ // return rc; +> +goto out_target_set; +> +} +> +goto add_target; +I'm still carrying this hack and still haven't worked out the right fix. + +Suggestions welcome! If not I'll hopefully get some time on this +towards the end of the week. + +Jonathan + diff --git a/results/classifier/001/instruction/42226390 b/results/classifier/001/instruction/42226390 new file mode 100644 index 000000000..1d455d6fa --- /dev/null +++ b/results/classifier/001/instruction/42226390 @@ -0,0 +1,187 @@ +instruction: 0.925 +semantic: 0.924 +other: 0.894 +mistranslation: 0.826 + +[BUG] AArch64 boot hang with -icount and -smp >1 (iothread locking issue?) + +Hello, + +I am encountering one or more bugs when using -icount and -smp >1 that I am +attempting to sort out. My current theory is that it is an iothread locking +issue. + +I am using a command-line like the following where $kernel is a recent upstream +AArch64 Linux kernel Image (I can provide a binary if that would be helpful - +let me know how is best to post): + + qemu-system-aarch64 \ + -M virt -cpu cortex-a57 -m 1G \ + -nographic \ + -smp 2 \ + -icount 0 \ + -kernel $kernel + +For any/all of the symptoms described below, they seem to disappear when I +either remove `-icount 0` or change smp to `-smp 1`. In other words, it is the +combination of `-smp >1` and `-icount` which triggers what I'm seeing. + +I am seeing two different (but seemingly related) behaviors. The first (and +what I originally started debugging) shows up as a boot hang. When booting +using the above command after Peter's "icount: Take iothread lock when running +QEMU timers" patch [1], The kernel boots for a while and then hangs after: + +> +...snip... +> +[ 0.010764] Serial: AMBA PL011 UART driver +> +[ 0.016334] 9000000.pl011: ttyAMA0 at MMIO 0x9000000 (irq = 13, base_baud +> += 0) is a PL011 rev1 +> +[ 0.016907] printk: console [ttyAMA0] enabled +> +[ 0.017624] KASLR enabled +> +[ 0.031986] HugeTLB: registered 16.0 GiB page size, pre-allocated 0 pages +> +[ 0.031986] HugeTLB: 16320 KiB vmemmap can be freed for a 16.0 GiB page +> +[ 0.031986] HugeTLB: registered 512 MiB page size, pre-allocated 0 pages +> +[ 0.031986] HugeTLB: 448 KiB vmemmap can be freed for a 512 MiB page +> +[ 0.031986] HugeTLB: registered 2.00 MiB page size, pre-allocated 0 pages +> +[ 0.031986] HugeTLB: 0 KiB vmemmap can be freed for a 2.00 MiB page +When it hangs here, I drop into QEMU's console, attach to the gdbserver, and it +always reports that it is at address 0xffff800008dc42e8 (as shown below from an +objdump of the vmlinux). I note this is in the middle of messing with timer +system registers - which makes me suspect we're attempting to take the iothread +lock when its already held: + +> +ffff800008dc42b8 : +> +ffff800008dc42b8: d503201f nop +> +ffff800008dc42bc: d503201f nop +> +ffff800008dc42c0: d503233f paciasp +> +ffff800008dc42c4: d53be321 mrs x1, cntv_ctl_el0 +> +ffff800008dc42c8: 32000021 orr w1, w1, #0x1 +> +ffff800008dc42cc: d5033fdf isb +> +ffff800008dc42d0: d53be042 mrs x2, cntvct_el0 +> +ffff800008dc42d4: ca020043 eor x3, x2, x2 +> +ffff800008dc42d8: 8b2363e3 add x3, sp, x3 +> +ffff800008dc42dc: f940007f ldr xzr, [x3] +> +ffff800008dc42e0: 8b020000 add x0, x0, x2 +> +ffff800008dc42e4: d51be340 msr cntv_cval_el0, x0 +> +* ffff800008dc42e8: 927ef820 and x0, x1, #0xfffffffffffffffd +> +ffff800008dc42ec: d51be320 msr cntv_ctl_el0, x0 +> +ffff800008dc42f0: d5033fdf isb +> +ffff800008dc42f4: 52800000 mov w0, #0x0 +> +// #0 +> +ffff800008dc42f8: d50323bf autiasp +> +ffff800008dc42fc: d65f03c0 ret +The second behavior is that prior to Peter's "icount: Take iothread lock when +running QEMU timers" patch [1], I observe the following message (same command +as above): + +> +ERROR:../accel/tcg/tcg-accel-ops.c:79:tcg_handle_interrupt: assertion failed: +> +(qemu_mutex_iothread_locked()) +> +Aborted (core dumped) +This is the same behavior described in Gitlab issue 1130 [0] and addressed by +[1]. I bisected the appearance of this assertion, and found it was introduced +by Pavel's "replay: rewrite async event handling" commit [2]. Commits prior to +that one boot successfully (neither assertions nor hangs) with `-icount 0 -smp +2`. + +I've looked over these two commits ([1], [2]), but it is not obvious to me +how/why they might be interacting to produce the boot hangs I'm seeing and +I welcome any help investigating further. + +Thanks! + +-Aaron Lindsay + +[0] - +https://gitlab.com/qemu-project/qemu/-/issues/1130 +[1] - +https://gitlab.com/qemu-project/qemu/-/commit/c7f26ded6d5065e4116f630f6a490b55f6c5f58e +[2] - +https://gitlab.com/qemu-project/qemu/-/commit/60618e2d77691e44bb78e23b2b0cf07b5c405e56 + +On Fri, 21 Oct 2022 at 16:48, Aaron Lindsay + wrote: +> +> +Hello, +> +> +I am encountering one or more bugs when using -icount and -smp >1 that I am +> +attempting to sort out. My current theory is that it is an iothread locking +> +issue. +Weird coincidence, that is a bug that's been in the tree for months +but was only reported to me earlier this week. Try reverting +commit a82fd5a4ec24d923ff1e -- that should fix it. +CAFEAcA_i8x00hD-4XX18ySLNbCB6ds1-DSazVb4yDnF8skjd9A@mail.gmail.com +/">https://lore.kernel.org/qemu-devel/ +CAFEAcA_i8x00hD-4XX18ySLNbCB6ds1-DSazVb4yDnF8skjd9A@mail.gmail.com +/ +has the explanation. + +thanks +-- PMM + +On Oct 21 17:00, Peter Maydell wrote: +> +On Fri, 21 Oct 2022 at 16:48, Aaron Lindsay +> + wrote: +> +> +> +> Hello, +> +> +> +> I am encountering one or more bugs when using -icount and -smp >1 that I am +> +> attempting to sort out. My current theory is that it is an iothread locking +> +> issue. +> +> +Weird coincidence, that is a bug that's been in the tree for months +> +but was only reported to me earlier this week. Try reverting +> +commit a82fd5a4ec24d923ff1e -- that should fix it. +I can confirm that reverting a82fd5a4ec24d923ff1e fixes it for me. +Thanks for the help and fast response! + +-Aaron + diff --git a/results/classifier/001/instruction/50773216 b/results/classifier/001/instruction/50773216 new file mode 100644 index 000000000..d887fe7b5 --- /dev/null +++ b/results/classifier/001/instruction/50773216 @@ -0,0 +1,110 @@ +instruction: 0.768 +other: 0.737 +semantic: 0.669 +mistranslation: 0.652 + +[Qemu-devel] Can I have someone's feedback on [bug 1809075] Concurrency bug on keyboard events: capslock LED messing up keycode streams causes character misses at guest kernel + +Hi everyone. +Can I please have someone's feedback on this bug? +https://bugs.launchpad.net/qemu/+bug/1809075 +Briefly, guest OS loses characters sent to it via vnc. And I spot the +bug in relation to ps2 driver. +I'm thinking of possible fixes and I might want to use a memory barrier. +But I would really like to have some suggestion from a qemu developer +first. For example, can we brutally drop capslock LED key events in ps2 +queue? +It is actually relevant to openQA, an automated QA tool for openSUSE. +And this bug blocks a few test cases for us. +Thank you in advance! + +Kind regards, +Gao Zhiyuan + +Cc'ing Marc-André & Gerd. + +On 12/19/18 10:31 AM, Gao Zhiyuan wrote: +> +Hi everyone. +> +> +Can I please have someone's feedback on this bug? +> +https://bugs.launchpad.net/qemu/+bug/1809075 +> +Briefly, guest OS loses characters sent to it via vnc. And I spot the +> +bug in relation to ps2 driver. +> +> +I'm thinking of possible fixes and I might want to use a memory barrier. +> +But I would really like to have some suggestion from a qemu developer +> +first. For example, can we brutally drop capslock LED key events in ps2 +> +queue? +> +> +It is actually relevant to openQA, an automated QA tool for openSUSE. +> +And this bug blocks a few test cases for us. +> +> +Thank you in advance! +> +> +Kind regards, +> +Gao Zhiyuan +> + +On Thu, Jan 03, 2019 at 12:05:54PM +0100, Philippe Mathieu-Daudé wrote: +> +Cc'ing Marc-André & Gerd. +> +> +On 12/19/18 10:31 AM, Gao Zhiyuan wrote: +> +> Hi everyone. +> +> +> +> Can I please have someone's feedback on this bug? +> +> +https://bugs.launchpad.net/qemu/+bug/1809075 +> +> Briefly, guest OS loses characters sent to it via vnc. And I spot the +> +> bug in relation to ps2 driver. +> +> +> +> I'm thinking of possible fixes and I might want to use a memory barrier. +> +> But I would really like to have some suggestion from a qemu developer +> +> first. For example, can we brutally drop capslock LED key events in ps2 +> +> queue? +There is no "capslock LED key event". 0xfa is KBD_REPLY_ACK, and the +device queues it in response to guest port writes. Yes, the ack can +race with actual key events. But IMO that isn't a bug in qemu. + +Probably the linux kernel just throws away everything until it got the +ack for the port write, and that way the key event gets lost. On +physical hardware you will not notice because it is next to impossible +to type fast enough to hit the race window. + +So, go fix the kernel. + +Alternatively fix vncdotool to send uppercase letters properly with +shift key pressed. Then qemu wouldn't generate capslock key events +(that happens because qemu thinks guest and host capslock state is out +of sync) and the guests's capslock led update request wouldn't get into +the way. + +cheers, + Gerd + diff --git a/results/classifier/001/instruction/51610399 b/results/classifier/001/instruction/51610399 new file mode 100644 index 000000000..a78585284 --- /dev/null +++ b/results/classifier/001/instruction/51610399 @@ -0,0 +1,308 @@ +instruction: 0.985 +other: 0.985 +semantic: 0.984 +mistranslation: 0.983 + +[BUG][powerpc] KVM Guest Boot Failure – Hangs at "Booting Linux via __start()” + +Bug Description: +Encountering a boot failure when launching a KVM guest with +qemu-system-ppc64. The guest hangs at boot, and the QEMU monitor +crashes. +Reproduction Steps: +# qemu-system-ppc64 --version +QEMU emulator version 9.2.50 (v9.2.0-2799-g0462a32b4f) +Copyright (c) 2003-2025 Fabrice Bellard and the QEMU Project developers +# /usr/bin/qemu-system-ppc64 -name avocado-vt-vm1 -machine +pseries,accel=kvm \ +-m 32768 -smp 32,sockets=1,cores=32,threads=1 -nographic \ + -device virtio-scsi-pci,id=scsi \ +-drive +file=/home/kvmci/tests/data/avocado-vt/images/rhel8.0devel-ppc64le.qcow2,if=none,id=drive0,format=qcow2 +\ +-device scsi-hd,drive=drive0,bus=scsi.0 \ + -netdev bridge,id=net0,br=virbr0 \ + -device virtio-net-pci,netdev=net0 \ + -serial pty \ + -device virtio-balloon-pci \ + -cpu host +QEMU 9.2.50 monitor - type 'help' for more information +char device redirected to /dev/pts/2 (label serial0) +(qemu) +(qemu) qemu-system-ppc64: warning: kernel_irqchip allowed but +unavailable: IRQ_XIVE capability must be present for KVM +Falling back to kernel-irqchip=off +** Qemu Hang + +(In another ssh session) +# screen /dev/pts/2 +Preparing to boot Linux version 6.10.4-200.fc40.ppc64le +(mockbuild@c23cc4e677614c34bb22d54eeea4dc1f) (gcc (GCC) 14.2.1 20240801 +(Red Hat 14.2.1-1), GNU ld version 2.41-37.fc40) #1 SMP Sun Aug 11 +15:20:17 UTC 2024 +Detected machine type: 0000000000000101 +command line: +BOOT_IMAGE=(ieee1275/disk,msdos2)/vmlinuz-6.10.4-200.fc40.ppc64le +root=/dev/mapper/fedora-root ro rd.lvm.lv=fedora/root crashkernel=1024M +Max number of cores passed to firmware: 2048 (NR_CPUS = 2048) +Calling ibm,client-architecture-support... done +memory layout at init: + memory_limit : 0000000000000000 (16 MB aligned) + alloc_bottom : 0000000008200000 + alloc_top : 0000000030000000 + alloc_top_hi : 0000000800000000 + rmo_top : 0000000030000000 + ram_top : 0000000800000000 +instantiating rtas at 0x000000002fff0000... done +prom_hold_cpus: skipped +copying OF device tree... +Building dt strings... +Building dt structure... +Device tree strings 0x0000000008210000 -> 0x0000000008210bd0 +Device tree struct 0x0000000008220000 -> 0x0000000008230000 +Quiescing Open Firmware ... +Booting Linux via __start() @ 0x0000000000440000 ... +** Guest Console Hang + + +Git Bisect: +Performing git bisect points to the following patch: +# git bisect bad +e8291ec16da80566c121c68d9112be458954d90b is the first bad commit +commit e8291ec16da80566c121c68d9112be458954d90b (HEAD) +Author: Nicholas Piggin +Date: Thu Dec 19 13:40:31 2024 +1000 + + target/ppc: fix timebase register reset state +(H)DEC and PURR get reset before icount does, which causes them to +be +skewed and not match the init state. This can cause replay to not +match the recorded trace exactly. For DEC and HDEC this is usually +not +noticable since they tend to get programmed before affecting the + target machine. PURR has been observed to cause replay bugs when + running Linux. + + Fix this by resetting using a time of 0. + + Message-ID: <20241219034035.1826173-2-npiggin@gmail.com> + Signed-off-by: Nicholas Piggin + + hw/ppc/ppc.c | 11 ++++++++--- + 1 file changed, 8 insertions(+), 3 deletions(-) + + +Reverting the patch helps boot the guest. +Thanks, +Misbah Anjum N + +Thanks for the report. + +Tricky problem. A secondary CPU is hanging before it is started by the +primary via rtas call. + +That secondary keeps calling kvm_cpu_exec(), which keeps exiting out +early with EXCP_HLT because kvm_arch_process_async_events() returns +true because that cpu has ->halted=1. That just goes around he run +loop because there is an interrupt pending (DEC). + +So it never runs. It also never releases the BQL, and another CPU, +the primary which is actually supposed to be running, is stuck in +spapr_set_all_lpcrs() in run_on_cpu() waiting for the BQL. + +This patch just exposes the bug I think, by causing the interrupt. +although I'm not quite sure why it's okay previously (-ve decrementer +values should be causing a timer exception too). The timer exception +should not be taken as an interrupt by those secondary CPUs, and it +doesn't because it is masked, until set_all_lpcrs sets an LPCR value +that enables powersave wakeup on decrementer interrupt. + +The start_powered_off sate just sets ->halted, which makes it look +like a powersaving state. Logically I think it's not the same thing +as far as spapr goes. I don't know why start_powered_off only sets +->halted, and not ->stop/stopped as well. + +Not sure how best to solve it cleanly. I'll send a revert if I can't +get something working soon. + +Thanks, +Nick + +On Tue Mar 18, 2025 at 7:09 AM AEST, misanjum wrote: +> +Bug Description: +> +Encountering a boot failure when launching a KVM guest with +> +qemu-system-ppc64. The guest hangs at boot, and the QEMU monitor +> +crashes. +> +> +> +Reproduction Steps: +> +# qemu-system-ppc64 --version +> +QEMU emulator version 9.2.50 (v9.2.0-2799-g0462a32b4f) +> +Copyright (c) 2003-2025 Fabrice Bellard and the QEMU Project developers +> +> +# /usr/bin/qemu-system-ppc64 -name avocado-vt-vm1 -machine +> +pseries,accel=kvm \ +> +-m 32768 -smp 32,sockets=1,cores=32,threads=1 -nographic \ +> +-device virtio-scsi-pci,id=scsi \ +> +-drive +> +file=/home/kvmci/tests/data/avocado-vt/images/rhel8.0devel-ppc64le.qcow2,if=none,id=drive0,format=qcow2 +> +> +\ +> +-device scsi-hd,drive=drive0,bus=scsi.0 \ +> +-netdev bridge,id=net0,br=virbr0 \ +> +-device virtio-net-pci,netdev=net0 \ +> +-serial pty \ +> +-device virtio-balloon-pci \ +> +-cpu host +> +QEMU 9.2.50 monitor - type 'help' for more information +> +char device redirected to /dev/pts/2 (label serial0) +> +(qemu) +> +(qemu) qemu-system-ppc64: warning: kernel_irqchip allowed but +> +unavailable: IRQ_XIVE capability must be present for KVM +> +Falling back to kernel-irqchip=off +> +** Qemu Hang +> +> +(In another ssh session) +> +# screen /dev/pts/2 +> +Preparing to boot Linux version 6.10.4-200.fc40.ppc64le +> +(mockbuild@c23cc4e677614c34bb22d54eeea4dc1f) (gcc (GCC) 14.2.1 20240801 +> +(Red Hat 14.2.1-1), GNU ld version 2.41-37.fc40) #1 SMP Sun Aug 11 +> +15:20:17 UTC 2024 +> +Detected machine type: 0000000000000101 +> +command line: +> +BOOT_IMAGE=(ieee1275/disk,msdos2)/vmlinuz-6.10.4-200.fc40.ppc64le +> +root=/dev/mapper/fedora-root ro rd.lvm.lv=fedora/root crashkernel=1024M +> +Max number of cores passed to firmware: 2048 (NR_CPUS = 2048) +> +Calling ibm,client-architecture-support... done +> +memory layout at init: +> +memory_limit : 0000000000000000 (16 MB aligned) +> +alloc_bottom : 0000000008200000 +> +alloc_top : 0000000030000000 +> +alloc_top_hi : 0000000800000000 +> +rmo_top : 0000000030000000 +> +ram_top : 0000000800000000 +> +instantiating rtas at 0x000000002fff0000... done +> +prom_hold_cpus: skipped +> +copying OF device tree... +> +Building dt strings... +> +Building dt structure... +> +Device tree strings 0x0000000008210000 -> 0x0000000008210bd0 +> +Device tree struct 0x0000000008220000 -> 0x0000000008230000 +> +Quiescing Open Firmware ... +> +Booting Linux via __start() @ 0x0000000000440000 ... +> +** Guest Console Hang +> +> +> +Git Bisect: +> +Performing git bisect points to the following patch: +> +# git bisect bad +> +e8291ec16da80566c121c68d9112be458954d90b is the first bad commit +> +commit e8291ec16da80566c121c68d9112be458954d90b (HEAD) +> +Author: Nicholas Piggin +> +Date: Thu Dec 19 13:40:31 2024 +1000 +> +> +target/ppc: fix timebase register reset state +> +> +(H)DEC and PURR get reset before icount does, which causes them to +> +be +> +skewed and not match the init state. This can cause replay to not +> +match the recorded trace exactly. For DEC and HDEC this is usually +> +not +> +noticable since they tend to get programmed before affecting the +> +target machine. PURR has been observed to cause replay bugs when +> +running Linux. +> +> +Fix this by resetting using a time of 0. +> +> +Message-ID: <20241219034035.1826173-2-npiggin@gmail.com> +> +Signed-off-by: Nicholas Piggin +> +> +hw/ppc/ppc.c | 11 ++++++++--- +> +1 file changed, 8 insertions(+), 3 deletions(-) +> +> +> +Reverting the patch helps boot the guest. +> +Thanks, +> +Misbah Anjum N + diff --git a/results/classifier/001/instruction/55961334 b/results/classifier/001/instruction/55961334 new file mode 100644 index 000000000..80cdabd29 --- /dev/null +++ b/results/classifier/001/instruction/55961334 @@ -0,0 +1,39 @@ +instruction: 0.803 +semantic: 0.775 +mistranslation: 0.718 +other: 0.715 + +[Bug] "-ht" flag ignored under KVM - guest still reports HT + +Hi Community, +We have observed that the 'ht' feature bit cannot be disabled when QEMU runs +with KVM acceleration. +qemu-system-x86_64 \ + --enable-kvm \ + -machine q35 \ + -cpu host,-ht \ + -smp 4 \ + -m 4G \ + -drive file=rootfs.img,format=raw \ + -nographic \ + -append 'console=ttyS0 root=/dev/sda rw' +Because '-ht' is specified, the guest should expose no HT capability +(cpuid.1.edx[28] = 0), and /proc/cpuinfo shouldn't show HT feature, but we still +saw ht in linux guest when run 'cat /proc/cpuinfo'. +XiaoYao mentioned that: + +It has been the behavior of QEMU since + + commit 400281af34e5ee6aa9f5496b53d8f82c6fef9319 + Author: Andre Przywara + Date: Wed Aug 19 15:42:42 2009 +0200 + + set CPUID bits to present cores and threads topology + +that we cannot remove HT CPUID bit from guest via "-cpu xxx,-ht" if the +VM has >= 2 vcpus. +I'd like to know whether there's a plan to address this issue, or if the current +behaviour is considered acceptable. +Best regards, +Ewan. + diff --git a/results/classifier/001/instruction/62179944 b/results/classifier/001/instruction/62179944 new file mode 100644 index 000000000..5dad058d5 --- /dev/null +++ b/results/classifier/001/instruction/62179944 @@ -0,0 +1,31 @@ +instruction: 0.693 +mistranslation: 0.533 +other: 0.519 +semantic: 0.454 + +[Qemu-devel] [BUG] network : windows os lost ip address of the network card  in some cases + +we found this problem for a long time 。For example, if we has three network +card in virtual xml file ,such as "network connection 1" / "network connection +2"/"network connection 3" 。 + +Echo network card has own ip address ,such as 192.168.1.1 / 2.1 /3.1 , when +delete the first card ,reboot the windows virtual os, then this problem +happened ! + + + + +we found that the sencond network card will replace the first one , then the +ip address of "network connection 2 " become 192.168.1.1 。 + + +Our third party users began to complain about this bug 。All the business of the +second ip lost !!! + +I mean both of windows and linux has this bug , we solve this bug in linux +throught bonding netcrad pci and mac address 。 + +There is no good solution on windows os . thera are ? we implemented a plan to +resumption of IP by QGA. Is there a better way ? + diff --git a/results/classifier/001/instruction/63565653 b/results/classifier/001/instruction/63565653 new file mode 100644 index 000000000..dfac92bf4 --- /dev/null +++ b/results/classifier/001/instruction/63565653 @@ -0,0 +1,49 @@ +instruction: 0.905 +other: 0.898 +semantic: 0.825 +mistranslation: 0.462 + +[Qemu-devel] [BUG]pcibus_reset assertion failure on guest reboot + +Qemu-2.6.2 + +Start a vm with vhost-net , do reboot and hot-unplug viritio-net nic in short +time, we touch +pcibus_reset assertion failure. + +Here is qemu log: +22:29:46.359386+08:00 acpi_pm1_cnt_write -> guest do soft power off +22:29:46.785310+08:00 qemu_devices_reset +22:29:46.788093+08:00 virtio_pci_device_unplugged -> virtio net unpluged +22:29:46.803427+08:00 pcibus_reset: Assertion `bus->irq_count[i] == 0' failed. + +Here is stack info: +(gdb) bt +#0 0x00007f9a336795d7 in raise () from /usr/lib64/libc.so.6 +#1 0x00007f9a3367acc8 in abort () from /usr/lib64/libc.so.6 +#2 0x00007f9a33672546 in __assert_fail_base () from /usr/lib64/libc.so.6 +#3 0x00007f9a336725f2 in __assert_fail () from /usr/lib64/libc.so.6 +#4 0x0000000000641884 in pcibus_reset (qbus=0x29eee60) at hw/pci/pci.c:283 +#5 0x00000000005bfc30 in qbus_reset_one (bus=0x29eee60, opaque=) at hw/core/qdev.c:319 +#6 0x00000000005c1b19 in qdev_walk_children (dev=0x29ed2b0, pre_devfn=0x0, +pre_busfn=0x0, post_devfn=0x5c2440 ... +#7 0x00000000005c1c59 in qbus_walk_children (bus=0x2736f80, pre_devfn=0x0, +pre_busfn=0x0, post_devfn=0x5c2440 ... +#8 0x00000000005513f5 in qemu_devices_reset () at vl.c:1998 +#9 0x00000000004cab9d in pc_machine_reset () at +/home/abuild/rpmbuild/BUILD/qemu-kvm-2.6.0/hw/i386/pc.c:1976 +#10 0x000000000055148b in qemu_system_reset (address@hidden) at vl.c:2011 +#11 0x000000000055164f in main_loop_should_exit () at vl.c:2169 +#12 0x0000000000551719 in main_loop () at vl.c:2212 +#13 0x000000000041c9a8 in main (argc=, argv=, +envp=) at vl.c:5130 +(gdb) f 4 +... +(gdb) p bus->irq_count[0] +$6 = 1 + +Seems pci_update_irq_disabled doesn't work well + +can anyone help? + diff --git a/results/classifier/001/instruction/70868267 b/results/classifier/001/instruction/70868267 new file mode 100644 index 000000000..ffcf905b4 --- /dev/null +++ b/results/classifier/001/instruction/70868267 @@ -0,0 +1,40 @@ +instruction: 0.778 +semantic: 0.635 +mistranslation: 0.537 +other: 0.236 + +[Qemu-devel] [BUG] Failed to compile using gcc7.1 + +Hi all, + +After upgrading gcc from 6.3.1 to 7.1.1, qemu can't be compiled with gcc. + +The error is: + +------ + CC block/blkdebug.o +block/blkdebug.c: In function 'blkdebug_refresh_filename': +block/blkdebug.c:693:31: error: '%s' directive output may be truncated +writing up to 4095 bytes into a region of size 4086 +[-Werror=format-truncation=] +"blkdebug:%s:%s", s->config_file ?: "", + ^~ +In file included from /usr/include/stdio.h:939:0, + from /home/adam/qemu/include/qemu/osdep.h:68, + from block/blkdebug.c:25: +/usr/include/bits/stdio2.h:64:10: note: '__builtin___snprintf_chk' +output 11 or more bytes (assuming 4106) into a destination of size 4096 +return __builtin___snprintf_chk (__s, __n, __USE_FORTIFY_LEVEL - 1, + ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + __bos (__s), __fmt, __va_arg_pack ()); + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +cc1: all warnings being treated as errors +make: *** [/home/adam/qemu/rules.mak:69: block/blkdebug.o] Error 1 +------ + +It seems that gcc 7 is introducing more restrict check for printf. +If using clang, although there are some extra warning, it can at least +pass the compile. +Thanks, +Qu + diff --git a/results/classifier/001/instruction/73660729 b/results/classifier/001/instruction/73660729 new file mode 100644 index 000000000..92d85cc82 --- /dev/null +++ b/results/classifier/001/instruction/73660729 @@ -0,0 +1,31 @@ +instruction: 0.753 +semantic: 0.698 +mistranslation: 0.633 +other: 0.620 + +[BUG]The latest qemu crashed when I tested cxl + +I test cxl with the patch:[v11,0/2] arm/virt: + CXL support via pxb_cxl. +https://patchwork.kernel.org/project/cxl/cover/20220616141950.23374-1-Jonathan.Cameron@huawei.com/ +But the qemu crashed,and showing an error: +qemu-system-aarch64: ../hw/arm/virt.c:1735: virt_get_high_memmap_enabled: + Assertion `ARRAY_SIZE(extended_memmap) - VIRT_LOWMEMMAP_LAST == ARRAY_SIZE(enabled_array)' failed. +Then I modify the patch to fix the bug: +diff --git a/hw/arm/virt.c b/hw/arm/virt.c +index ea2413a0ba..3d4cee3491 100644 +--- a/hw/arm/virt.c ++++ b/hw/arm/virt.c +@@ -1710,6 +1730,7 @@ static inline bool *virt_get_high_memmap_enabled(VirtMachineState + *vms, +&vms->highmem_redists, +&vms->highmem_ecam, +&vms->highmem_mmio, ++ &vms->cxl_devices_state.is_enabled, +}; +Now qemu works good. +Could you tell me when the patch( +arm/virt: + CXL support via pxb_cxl +) will be merged into upstream? + diff --git a/results/classifier/001/mistranslation/14887122 b/results/classifier/001/mistranslation/14887122 new file mode 100644 index 000000000..f13db3b86 --- /dev/null +++ b/results/classifier/001/mistranslation/14887122 @@ -0,0 +1,258 @@ +mistranslation: 0.930 +semantic: 0.928 +instruction: 0.905 +other: 0.890 + +[BUG][RFC] CPR transfer Issues: Socket permissions and PID files + +Hello, + +While testing CPR transfer I encountered two issues. The first is that the +transfer fails when running with pidfiles due to the destination qemu process +attempting to create the pidfile while it is still locked by the source +process. The second is that the transfer fails when running with the -run-with +user=$USERID parameter. This is because the destination qemu process creates +the UNIX sockets used for the CPR transfer before dropping to the lower +permissioned user, which causes them to be owned by the original user. The +source qemu process then does not have permission to connect to it because it +is already running as the lesser permissioned user. + +Reproducing the first issue: + +Create a source and destination qemu instance associated with the same VM where +both processes have the -pidfile parameter passed on the command line. You +should see the following error on the command line of the second process: + +qemu-system-x86_64: cannot create PID file: Cannot lock pid file: Resource +temporarily unavailable + +Reproducing the second issue: + +Create a source and destination qemu instance associated with the same VM where +both processes have -run-with user=$USERID passed on the command line, where +$USERID is a different user from the one launching the processes. Then attempt +a CPR transfer using UNIX sockets for the main and cpr sockets. You should +receive the following error via QMP: +{"error": {"class": "GenericError", "desc": "Failed to connect to 'cpr.sock': +Permission denied"}} + +I provided a minimal patch that works around the second issue. + +Thank you, +Ben Chaney + +--- +include/system/os-posix.h | 4 ++++ +os-posix.c | 8 -------- +util/qemu-sockets.c | 21 +++++++++++++++++++++ +3 files changed, 25 insertions(+), 8 deletions(-) + +diff --git a/include/system/os-posix.h b/include/system/os-posix.h +index ce5b3bccf8..2a414a914a 100644 +--- a/include/system/os-posix.h ++++ b/include/system/os-posix.h +@@ -55,6 +55,10 @@ void os_setup_limits(void); +void os_setup_post(void); +int os_mlock(bool on_fault); + ++extern struct passwd *user_pwd; ++extern uid_t user_uid; ++extern gid_t user_gid; ++ +/** +* qemu_alloc_stack: +* @sz: pointer to a size_t holding the requested usable stack size +diff --git a/os-posix.c b/os-posix.c +index 52925c23d3..9369b312a0 100644 +--- a/os-posix.c ++++ b/os-posix.c +@@ -86,14 +86,6 @@ void os_set_proc_name(const char *s) +} + + +-/* +- * Must set all three of these at once. +- * Legal combinations are unset by name by uid +- */ +-static struct passwd *user_pwd; /* NULL non-NULL NULL */ +-static uid_t user_uid = (uid_t)-1; /* -1 -1 >=0 */ +-static gid_t user_gid = (gid_t)-1; /* -1 -1 >=0 */ +- +/* +* Prepare to change user ID. user_id can be one of 3 forms: +* - a username, in which case user ID will be changed to its uid, +diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c +index 77477c1cd5..987977ead9 100644 +--- a/util/qemu-sockets.c ++++ b/util/qemu-sockets.c +@@ -871,6 +871,14 @@ static bool saddr_is_tight(UnixSocketAddress *saddr) +#endif +} + ++/* ++ * Must set all three of these at once. ++ * Legal combinations are unset by name by uid ++ */ ++struct passwd *user_pwd; /* NULL non-NULL NULL */ ++uid_t user_uid = (uid_t)-1; /* -1 -1 >=0 */ ++gid_t user_gid = (gid_t)-1; /* -1 -1 >=0 */ ++ +static int unix_listen_saddr(UnixSocketAddress *saddr, +int num, +Error **errp) +@@ -947,6 +955,19 @@ static int unix_listen_saddr(UnixSocketAddress *saddr, +error_setg_errno(errp, errno, "Failed to bind socket to %s", path); +goto err; +} ++ if (user_pwd) { ++ if (chown(un.sun_path, user_pwd->pw_uid, user_pwd->pw_gid) < 0) { ++ error_setg_errno(errp, errno, "Failed to change permissions on socket %s", +path); ++ goto err; ++ } ++ } ++ else if (user_uid != -1 && user_gid != -1) { ++ if (chown(un.sun_path, user_uid, user_gid) < 0) { ++ error_setg_errno(errp, errno, "Failed to change permissions on socket %s", +path); ++ goto err; ++ } ++ } ++ +if (listen(sock, num) < 0) { +error_setg_errno(errp, errno, "Failed to listen on socket"); +goto err; +-- +2.40.1 + +Thank you Ben. I appreciate you testing CPR and shaking out the bugs. +I will study these and propose patches. + +My initial reaction to the pidfile issue is that the orchestration layer must +pass a different filename when starting the destination qemu instance. When +using live update without containers, these types of resource conflicts in the +global namespaces are a known issue. + +- Steve + +On 3/14/2025 2:33 PM, Chaney, Ben wrote: +Hello, + +While testing CPR transfer I encountered two issues. The first is that the +transfer fails when running with pidfiles due to the destination qemu process +attempting to create the pidfile while it is still locked by the source +process. The second is that the transfer fails when running with the -run-with +user=$USERID parameter. This is because the destination qemu process creates +the UNIX sockets used for the CPR transfer before dropping to the lower +permissioned user, which causes them to be owned by the original user. The +source qemu process then does not have permission to connect to it because it +is already running as the lesser permissioned user. + +Reproducing the first issue: + +Create a source and destination qemu instance associated with the same VM where +both processes have the -pidfile parameter passed on the command line. You +should see the following error on the command line of the second process: + +qemu-system-x86_64: cannot create PID file: Cannot lock pid file: Resource +temporarily unavailable + +Reproducing the second issue: + +Create a source and destination qemu instance associated with the same VM where +both processes have -run-with user=$USERID passed on the command line, where +$USERID is a different user from the one launching the processes. Then attempt +a CPR transfer using UNIX sockets for the main and cpr sockets. You should +receive the following error via QMP: +{"error": {"class": "GenericError", "desc": "Failed to connect to 'cpr.sock': +Permission denied"}} + +I provided a minimal patch that works around the second issue. + +Thank you, +Ben Chaney + +--- +include/system/os-posix.h | 4 ++++ +os-posix.c | 8 -------- +util/qemu-sockets.c | 21 +++++++++++++++++++++ +3 files changed, 25 insertions(+), 8 deletions(-) + +diff --git a/include/system/os-posix.h b/include/system/os-posix.h +index ce5b3bccf8..2a414a914a 100644 +--- a/include/system/os-posix.h ++++ b/include/system/os-posix.h +@@ -55,6 +55,10 @@ void os_setup_limits(void); +void os_setup_post(void); +int os_mlock(bool on_fault); + ++extern struct passwd *user_pwd; ++extern uid_t user_uid; ++extern gid_t user_gid; ++ +/** +* qemu_alloc_stack: +* @sz: pointer to a size_t holding the requested usable stack size +diff --git a/os-posix.c b/os-posix.c +index 52925c23d3..9369b312a0 100644 +--- a/os-posix.c ++++ b/os-posix.c +@@ -86,14 +86,6 @@ void os_set_proc_name(const char *s) +} + + +-/* +- * Must set all three of these at once. +- * Legal combinations are unset by name by uid +- */ +-static struct passwd *user_pwd; /* NULL non-NULL NULL */ +-static uid_t user_uid = (uid_t)-1; /* -1 -1 >=0 */ +-static gid_t user_gid = (gid_t)-1; /* -1 -1 >=0 */ +- +/* +* Prepare to change user ID. user_id can be one of 3 forms: +* - a username, in which case user ID will be changed to its uid, +diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c +index 77477c1cd5..987977ead9 100644 +--- a/util/qemu-sockets.c ++++ b/util/qemu-sockets.c +@@ -871,6 +871,14 @@ static bool saddr_is_tight(UnixSocketAddress *saddr) +#endif +} + ++/* ++ * Must set all three of these at once. ++ * Legal combinations are unset by name by uid ++ */ ++struct passwd *user_pwd; /* NULL non-NULL NULL */ ++uid_t user_uid = (uid_t)-1; /* -1 -1 >=0 */ ++gid_t user_gid = (gid_t)-1; /* -1 -1 >=0 */ ++ +static int unix_listen_saddr(UnixSocketAddress *saddr, +int num, +Error **errp) +@@ -947,6 +955,19 @@ static int unix_listen_saddr(UnixSocketAddress *saddr, +error_setg_errno(errp, errno, "Failed to bind socket to %s", path); +goto err; +} ++ if (user_pwd) { ++ if (chown(un.sun_path, user_pwd->pw_uid, user_pwd->pw_gid) < 0) { ++ error_setg_errno(errp, errno, "Failed to change permissions on socket %s", +path); ++ goto err; ++ } ++ } ++ else if (user_uid != -1 && user_gid != -1) { ++ if (chown(un.sun_path, user_uid, user_gid) < 0) { ++ error_setg_errno(errp, errno, "Failed to change permissions on socket %s", +path); ++ goto err; ++ } ++ } ++ +if (listen(sock, num) < 0) { +error_setg_errno(errp, errno, "Failed to listen on socket"); +goto err; +-- +2.40.1 + diff --git a/results/classifier/001/mistranslation/22219210 b/results/classifier/001/mistranslation/22219210 new file mode 100644 index 000000000..95c3f61d1 --- /dev/null +++ b/results/classifier/001/mistranslation/22219210 @@ -0,0 +1,43 @@ +mistranslation: 0.472 +semantic: 0.387 +other: 0.345 +instruction: 0.261 + +[BUG][CPU hot-plug]CPU hot-plugs cause the qemu process to coredump + +Hello,Recently, when I was developing CPU hot-plugs under the loongarch +architecture, +I found that there was a problem with qemu cpu hot-plugs under x86 +architecture, +which caused the qemu process coredump when repeatedly inserting and +unplugging +the CPU when the TCG was accelerated. + + +The specific operation process is as follows: + +1.Use the following command to start the virtual machine + +qemu-system-x86_64 \ +-machine q35  \ +-cpu Broadwell-IBRS \ +-smp 1,maxcpus=4,sockets=4,cores=1,threads=1 \ +-m 4G \ +-drive file=~/anolis-8.8.qcow2  \ +-serial stdio   \ +-monitor telnet:localhost:4498,server,nowait + + +2.Enter QEMU Monitor via telnet for repeated CPU insertion and unplugging + +telnet 127.0.0.1 4498 +(qemu) device_add +Broadwell-IBRS-x86_64-cpu,socket-id=1,core-id=0,thread-id=0,id=cpu1 +(qemu) device_del cpu1 +(qemu) device_add +Broadwell-IBRS-x86_64-cpu,socket-id=1,core-id=0,thread-id=0,id=cpu1 +3.You will notice that the QEMU process has a coredump + +# malloc(): unsorted double linked list corrupted +Aborted (core dumped) + diff --git a/results/classifier/001/mistranslation/23270873 b/results/classifier/001/mistranslation/23270873 new file mode 100644 index 000000000..e4d4789c4 --- /dev/null +++ b/results/classifier/001/mistranslation/23270873 @@ -0,0 +1,692 @@ +mistranslation: 0.881 +other: 0.839 +instruction: 0.755 +semantic: 0.752 + +[Qemu-devel] [BUG?] aio_get_linux_aio: Assertion `ctx->linux_aio' failed + +Hi, + +I am seeing some strange QEMU assertion failures for qemu on s390x, +which prevents a guest from starting. + +Git bisecting points to the following commit as the source of the error. + +commit ed6e2161715c527330f936d44af4c547f25f687e +Author: Nishanth Aravamudan +Date: Fri Jun 22 12:37:00 2018 -0700 + + linux-aio: properly bubble up errors from initialization + + laio_init() can fail for a couple of reasons, which will lead to a NULL + pointer dereference in laio_attach_aio_context(). + + To solve this, add a aio_setup_linux_aio() function which is called + early in raw_open_common. If this fails, propagate the error up. The + signature of aio_get_linux_aio() was not modified, because it seems + preferable to return the actual errno from the possible failing + initialization calls. + + Additionally, when the AioContext changes, we need to associate a + LinuxAioState with the new AioContext. Use the bdrv_attach_aio_context + callback and call the new aio_setup_linux_aio(), which will allocate a +new AioContext if needed, and return errors on failures. If it +fails for +any reason, fallback to threaded AIO with an error message, as the + device is already in-use by the guest. + + Add an assert that aio_get_linux_aio() cannot return NULL. + + Signed-off-by: Nishanth Aravamudan + Message-id: address@hidden + Signed-off-by: Stefan Hajnoczi +Not sure what is causing this assertion to fail. Here is the qemu +command line of the guest, from qemu log, which throws this error: +LC_ALL=C PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin +QEMU_AUDIO_DRV=none /usr/local/bin/qemu-system-s390x -name +guest=rt_vm1,debug-threads=on -S -object +secret,id=masterKey0,format=raw,file=/var/lib/libvirt/qemu/domain-21-rt_vm1/master-key.aes +-machine s390-ccw-virtio-2.12,accel=kvm,usb=off,dump-guest-core=off -m +1024 -realtime mlock=off -smp 4,sockets=4,cores=1,threads=1 -object +iothread,id=iothread1 -uuid 0cde16cd-091d-41bd-9ac2-5243df5c9a0d +-display none -no-user-config -nodefaults -chardev +socket,id=charmonitor,fd=28,server,nowait -mon +chardev=charmonitor,id=monitor,mode=control -rtc base=utc -no-shutdown +-boot strict=on -drive +file=/dev/mapper/360050763998b0883980000002a000031,format=raw,if=none,id=drive-virtio-disk0,cache=none,aio=native +-device +virtio-blk-ccw,iothread=iothread1,scsi=off,devno=fe.0.0001,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=1,write-cache=on +-netdev tap,fd=30,id=hostnet0,vhost=on,vhostfd=31 -device +virtio-net-ccw,netdev=hostnet0,id=net0,mac=02:3a:c8:67:95:84,devno=fe.0.0000 +-netdev tap,fd=32,id=hostnet1,vhost=on,vhostfd=33 -device +virtio-net-ccw,netdev=hostnet1,id=net1,mac=52:54:00:2a:e5:08,devno=fe.0.0002 +-chardev pty,id=charconsole0 -device +sclpconsole,chardev=charconsole0,id=console0 -device +virtio-balloon-ccw,id=balloon0,devno=fe.3.ffba -sandbox +on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny +-msg timestamp=on +2018-07-17 15:48:42.252+0000: Domain id=21 is tainted: high-privileges +2018-07-17T15:48:42.279380Z qemu-system-s390x: -chardev +pty,id=charconsole0: char device redirected to /dev/pts/3 (label +charconsole0) +qemu-system-s390x: util/async.c:339: aio_get_linux_aio: Assertion +`ctx->linux_aio' failed. +2018-07-17 15:48:43.309+0000: shutting down, reason=failed + + +Any help debugging this would be greatly appreciated. + +Thank you +Farhan + +On 17.07.2018 [13:25:53 -0400], Farhan Ali wrote: +> +Hi, +> +> +I am seeing some strange QEMU assertion failures for qemu on s390x, +> +which prevents a guest from starting. +> +> +Git bisecting points to the following commit as the source of the error. +> +> +commit ed6e2161715c527330f936d44af4c547f25f687e +> +Author: Nishanth Aravamudan +> +Date: Fri Jun 22 12:37:00 2018 -0700 +> +> +linux-aio: properly bubble up errors from initialization +> +> +laio_init() can fail for a couple of reasons, which will lead to a NULL +> +pointer dereference in laio_attach_aio_context(). +> +> +To solve this, add a aio_setup_linux_aio() function which is called +> +early in raw_open_common. If this fails, propagate the error up. The +> +signature of aio_get_linux_aio() was not modified, because it seems +> +preferable to return the actual errno from the possible failing +> +initialization calls. +> +> +Additionally, when the AioContext changes, we need to associate a +> +LinuxAioState with the new AioContext. Use the bdrv_attach_aio_context +> +callback and call the new aio_setup_linux_aio(), which will allocate a +> +new AioContext if needed, and return errors on failures. If it fails for +> +any reason, fallback to threaded AIO with an error message, as the +> +device is already in-use by the guest. +> +> +Add an assert that aio_get_linux_aio() cannot return NULL. +> +> +Signed-off-by: Nishanth Aravamudan +> +Message-id: address@hidden +> +Signed-off-by: Stefan Hajnoczi +> +> +> +Not sure what is causing this assertion to fail. Here is the qemu command +> +line of the guest, from qemu log, which throws this error: +> +> +> +LC_ALL=C PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin +> +QEMU_AUDIO_DRV=none /usr/local/bin/qemu-system-s390x -name +> +guest=rt_vm1,debug-threads=on -S -object +> +secret,id=masterKey0,format=raw,file=/var/lib/libvirt/qemu/domain-21-rt_vm1/master-key.aes +> +-machine s390-ccw-virtio-2.12,accel=kvm,usb=off,dump-guest-core=off -m 1024 +> +-realtime mlock=off -smp 4,sockets=4,cores=1,threads=1 -object +> +iothread,id=iothread1 -uuid 0cde16cd-091d-41bd-9ac2-5243df5c9a0d -display +> +none -no-user-config -nodefaults -chardev +> +socket,id=charmonitor,fd=28,server,nowait -mon +> +chardev=charmonitor,id=monitor,mode=control -rtc base=utc -no-shutdown -boot +> +strict=on -drive +> +file=/dev/mapper/360050763998b0883980000002a000031,format=raw,if=none,id=drive-virtio-disk0,cache=none,aio=native +> +-device +> +virtio-blk-ccw,iothread=iothread1,scsi=off,devno=fe.0.0001,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=1,write-cache=on +> +-netdev tap,fd=30,id=hostnet0,vhost=on,vhostfd=31 -device +> +virtio-net-ccw,netdev=hostnet0,id=net0,mac=02:3a:c8:67:95:84,devno=fe.0.0000 +> +-netdev tap,fd=32,id=hostnet1,vhost=on,vhostfd=33 -device +> +virtio-net-ccw,netdev=hostnet1,id=net1,mac=52:54:00:2a:e5:08,devno=fe.0.0002 +> +-chardev pty,id=charconsole0 -device +> +sclpconsole,chardev=charconsole0,id=console0 -device +> +virtio-balloon-ccw,id=balloon0,devno=fe.3.ffba -sandbox +> +on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny -msg +> +timestamp=on +> +> +> +> +2018-07-17 15:48:42.252+0000: Domain id=21 is tainted: high-privileges +> +2018-07-17T15:48:42.279380Z qemu-system-s390x: -chardev pty,id=charconsole0: +> +char device redirected to /dev/pts/3 (label charconsole0) +> +qemu-system-s390x: util/async.c:339: aio_get_linux_aio: Assertion +> +`ctx->linux_aio' failed. +> +2018-07-17 15:48:43.309+0000: shutting down, reason=failed +> +> +> +Any help debugging this would be greatly appreciated. +iiuc, this possibly implies AIO was not actually used previously on this +guest (it might have silently been falling back to threaded IO?). I +don't have access to s390x, but would it be possible to run qemu under +gdb and see if aio_setup_linux_aio is being called at all (I think it +might not be, but I'm not sure why), and if so, if it's for the context +in question? + +If it's not being called first, could you see what callpath is calling +aio_get_linux_aio when this assertion trips? + +Thanks! +-Nish + +On 07/17/2018 04:52 PM, Nishanth Aravamudan wrote: +iiuc, this possibly implies AIO was not actually used previously on this +guest (it might have silently been falling back to threaded IO?). I +don't have access to s390x, but would it be possible to run qemu under +gdb and see if aio_setup_linux_aio is being called at all (I think it +might not be, but I'm not sure why), and if so, if it's for the context +in question? + +If it's not being called first, could you see what callpath is calling +aio_get_linux_aio when this assertion trips? + +Thanks! +-Nish +Hi Nishant, +From the coredump of the guest this is the call trace that calls +aio_get_linux_aio: +Stack trace of thread 145158: +#0 0x000003ff94dbe274 raise (libc.so.6) +#1 0x000003ff94da39a8 abort (libc.so.6) +#2 0x000003ff94db62ce __assert_fail_base (libc.so.6) +#3 0x000003ff94db634c __assert_fail (libc.so.6) +#4 0x000002aa20db067a aio_get_linux_aio (qemu-system-s390x) +#5 0x000002aa20d229a8 raw_aio_plug (qemu-system-s390x) +#6 0x000002aa20d309ee bdrv_io_plug (qemu-system-s390x) +#7 0x000002aa20b5a8ea virtio_blk_handle_vq (qemu-system-s390x) +#8 0x000002aa20db2f6e aio_dispatch_handlers (qemu-system-s390x) +#9 0x000002aa20db3c34 aio_poll (qemu-system-s390x) +#10 0x000002aa20be32a2 iothread_run (qemu-system-s390x) +#11 0x000003ff94f879a8 start_thread (libpthread.so.0) +#12 0x000003ff94e797ee thread_start (libc.so.6) + + +Thanks for taking a look and responding. + +Thanks +Farhan + +On 07/18/2018 09:42 AM, Farhan Ali wrote: +On 07/17/2018 04:52 PM, Nishanth Aravamudan wrote: +iiuc, this possibly implies AIO was not actually used previously on this +guest (it might have silently been falling back to threaded IO?). I +don't have access to s390x, but would it be possible to run qemu under +gdb and see if aio_setup_linux_aio is being called at all (I think it +might not be, but I'm not sure why), and if so, if it's for the context +in question? + +If it's not being called first, could you see what callpath is calling +aio_get_linux_aio when this assertion trips? + +Thanks! +-Nish +Hi Nishant, +From the coredump of the guest this is the call trace that calls +aio_get_linux_aio: +Stack trace of thread 145158: +#0  0x000003ff94dbe274 raise (libc.so.6) +#1  0x000003ff94da39a8 abort (libc.so.6) +#2  0x000003ff94db62ce __assert_fail_base (libc.so.6) +#3  0x000003ff94db634c __assert_fail (libc.so.6) +#4  0x000002aa20db067a aio_get_linux_aio (qemu-system-s390x) +#5  0x000002aa20d229a8 raw_aio_plug (qemu-system-s390x) +#6  0x000002aa20d309ee bdrv_io_plug (qemu-system-s390x) +#7  0x000002aa20b5a8ea virtio_blk_handle_vq (qemu-system-s390x) +#8  0x000002aa20db2f6e aio_dispatch_handlers (qemu-system-s390x) +#9  0x000002aa20db3c34 aio_poll (qemu-system-s390x) +#10 0x000002aa20be32a2 iothread_run (qemu-system-s390x) +#11 0x000003ff94f879a8 start_thread (libpthread.so.0) +#12 0x000003ff94e797ee thread_start (libc.so.6) + + +Thanks for taking a look and responding. + +Thanks +Farhan +Trying to debug a little further, the block device in this case is a +"host device". And looking at your commit carefully you use the +bdrv_attach_aio_context callback to setup a Linux AioContext. +For some reason the "host device" struct (BlockDriver bdrv_host_device +in block/file-posix.c) does not have a bdrv_attach_aio_context defined. +So a simple change of adding the callback to the struct solves the issue +and the guest starts fine. +diff --git a/block/file-posix.c b/block/file-posix.c +index 28824aa..b8d59fb 100644 +--- a/block/file-posix.c ++++ b/block/file-posix.c +@@ -3135,6 +3135,7 @@ static BlockDriver bdrv_host_device = { + .bdrv_refresh_limits = raw_refresh_limits, + .bdrv_io_plug = raw_aio_plug, + .bdrv_io_unplug = raw_aio_unplug, ++ .bdrv_attach_aio_context = raw_aio_attach_aio_context, + + .bdrv_co_truncate = raw_co_truncate, + .bdrv_getlength = raw_getlength, +I am not too familiar with block device code in QEMU, so not sure if +this is the right fix or if there are some underlying problems. +Thanks +Farhan + +On 18.07.2018 [11:10:27 -0400], Farhan Ali wrote: +> +> +> +On 07/18/2018 09:42 AM, Farhan Ali wrote: +> +> +> +> +> +> On 07/17/2018 04:52 PM, Nishanth Aravamudan wrote: +> +> > iiuc, this possibly implies AIO was not actually used previously on this +> +> > guest (it might have silently been falling back to threaded IO?). I +> +> > don't have access to s390x, but would it be possible to run qemu under +> +> > gdb and see if aio_setup_linux_aio is being called at all (I think it +> +> > might not be, but I'm not sure why), and if so, if it's for the context +> +> > in question? +> +> > +> +> > If it's not being called first, could you see what callpath is calling +> +> > aio_get_linux_aio when this assertion trips? +> +> > +> +> > Thanks! +> +> > -Nish +> +> +> +> +> +> Hi Nishant, +> +> +> +> From the coredump of the guest this is the call trace that calls +> +> aio_get_linux_aio: +> +> +> +> +> +> Stack trace of thread 145158: +> +> #0  0x000003ff94dbe274 raise (libc.so.6) +> +> #1  0x000003ff94da39a8 abort (libc.so.6) +> +> #2  0x000003ff94db62ce __assert_fail_base (libc.so.6) +> +> #3  0x000003ff94db634c __assert_fail (libc.so.6) +> +> #4  0x000002aa20db067a aio_get_linux_aio (qemu-system-s390x) +> +> #5  0x000002aa20d229a8 raw_aio_plug (qemu-system-s390x) +> +> #6  0x000002aa20d309ee bdrv_io_plug (qemu-system-s390x) +> +> #7  0x000002aa20b5a8ea virtio_blk_handle_vq (qemu-system-s390x) +> +> #8  0x000002aa20db2f6e aio_dispatch_handlers (qemu-system-s390x) +> +> #9  0x000002aa20db3c34 aio_poll (qemu-system-s390x) +> +> #10 0x000002aa20be32a2 iothread_run (qemu-system-s390x) +> +> #11 0x000003ff94f879a8 start_thread (libpthread.so.0) +> +> #12 0x000003ff94e797ee thread_start (libc.so.6) +> +> +> +> +> +> Thanks for taking a look and responding. +> +> +> +> Thanks +> +> Farhan +> +> +> +> +> +> +> +> +Trying to debug a little further, the block device in this case is a "host +> +device". And looking at your commit carefully you use the +> +bdrv_attach_aio_context callback to setup a Linux AioContext. +> +> +For some reason the "host device" struct (BlockDriver bdrv_host_device in +> +block/file-posix.c) does not have a bdrv_attach_aio_context defined. +> +So a simple change of adding the callback to the struct solves the issue and +> +the guest starts fine. +> +> +> +diff --git a/block/file-posix.c b/block/file-posix.c +> +index 28824aa..b8d59fb 100644 +> +--- a/block/file-posix.c +> ++++ b/block/file-posix.c +> +@@ -3135,6 +3135,7 @@ static BlockDriver bdrv_host_device = { +> +.bdrv_refresh_limits = raw_refresh_limits, +> +.bdrv_io_plug = raw_aio_plug, +> +.bdrv_io_unplug = raw_aio_unplug, +> ++ .bdrv_attach_aio_context = raw_aio_attach_aio_context, +> +> +.bdrv_co_truncate = raw_co_truncate, +> +.bdrv_getlength = raw_getlength, +> +> +> +> +I am not too familiar with block device code in QEMU, so not sure if +> +this is the right fix or if there are some underlying problems. +Oh this is quite embarassing! I only added the bdrv_attach_aio_context +callback for the file-backed device. Your fix is definitely corect for +host device. Let me make sure there weren't any others missed and I will +send out a properly formatted patch. Thank you for the quick testing and +turnaround! + +-Nish + +On 07/18/2018 08:52 PM, Nishanth Aravamudan wrote: +> +On 18.07.2018 [11:10:27 -0400], Farhan Ali wrote: +> +> +> +> +> +> On 07/18/2018 09:42 AM, Farhan Ali wrote: +> +>> +> +>> +> +>> On 07/17/2018 04:52 PM, Nishanth Aravamudan wrote: +> +>>> iiuc, this possibly implies AIO was not actually used previously on this +> +>>> guest (it might have silently been falling back to threaded IO?). I +> +>>> don't have access to s390x, but would it be possible to run qemu under +> +>>> gdb and see if aio_setup_linux_aio is being called at all (I think it +> +>>> might not be, but I'm not sure why), and if so, if it's for the context +> +>>> in question? +> +>>> +> +>>> If it's not being called first, could you see what callpath is calling +> +>>> aio_get_linux_aio when this assertion trips? +> +>>> +> +>>> Thanks! +> +>>> -Nish +> +>> +> +>> +> +>> Hi Nishant, +> +>> +> +>> From the coredump of the guest this is the call trace that calls +> +>> aio_get_linux_aio: +> +>> +> +>> +> +>> Stack trace of thread 145158: +> +>> #0  0x000003ff94dbe274 raise (libc.so.6) +> +>> #1  0x000003ff94da39a8 abort (libc.so.6) +> +>> #2  0x000003ff94db62ce __assert_fail_base (libc.so.6) +> +>> #3  0x000003ff94db634c __assert_fail (libc.so.6) +> +>> #4  0x000002aa20db067a aio_get_linux_aio (qemu-system-s390x) +> +>> #5  0x000002aa20d229a8 raw_aio_plug (qemu-system-s390x) +> +>> #6  0x000002aa20d309ee bdrv_io_plug (qemu-system-s390x) +> +>> #7  0x000002aa20b5a8ea virtio_blk_handle_vq (qemu-system-s390x) +> +>> #8  0x000002aa20db2f6e aio_dispatch_handlers (qemu-system-s390x) +> +>> #9  0x000002aa20db3c34 aio_poll (qemu-system-s390x) +> +>> #10 0x000002aa20be32a2 iothread_run (qemu-system-s390x) +> +>> #11 0x000003ff94f879a8 start_thread (libpthread.so.0) +> +>> #12 0x000003ff94e797ee thread_start (libc.so.6) +> +>> +> +>> +> +>> Thanks for taking a look and responding. +> +>> +> +>> Thanks +> +>> Farhan +> +>> +> +>> +> +>> +> +> +> +> Trying to debug a little further, the block device in this case is a "host +> +> device". And looking at your commit carefully you use the +> +> bdrv_attach_aio_context callback to setup a Linux AioContext. +> +> +> +> For some reason the "host device" struct (BlockDriver bdrv_host_device in +> +> block/file-posix.c) does not have a bdrv_attach_aio_context defined. +> +> So a simple change of adding the callback to the struct solves the issue and +> +> the guest starts fine. +> +> +> +> +> +> diff --git a/block/file-posix.c b/block/file-posix.c +> +> index 28824aa..b8d59fb 100644 +> +> --- a/block/file-posix.c +> +> +++ b/block/file-posix.c +> +> @@ -3135,6 +3135,7 @@ static BlockDriver bdrv_host_device = { +> +> .bdrv_refresh_limits = raw_refresh_limits, +> +> .bdrv_io_plug = raw_aio_plug, +> +> .bdrv_io_unplug = raw_aio_unplug, +> +> + .bdrv_attach_aio_context = raw_aio_attach_aio_context, +> +> +> +> .bdrv_co_truncate = raw_co_truncate, +> +> .bdrv_getlength = raw_getlength, +> +> +> +> +> +> +> +> I am not too familiar with block device code in QEMU, so not sure if +> +> this is the right fix or if there are some underlying problems. +> +> +Oh this is quite embarassing! I only added the bdrv_attach_aio_context +> +callback for the file-backed device. Your fix is definitely corect for +> +host device. Let me make sure there weren't any others missed and I will +> +send out a properly formatted patch. Thank you for the quick testing and +> +turnaround! +Farhan, can you respin your patch with proper sign-off and patch description? +Adding qemu-block. + +Hi Christian, + +On 19.07.2018 [08:55:20 +0200], Christian Borntraeger wrote: +> +> +> +On 07/18/2018 08:52 PM, Nishanth Aravamudan wrote: +> +> On 18.07.2018 [11:10:27 -0400], Farhan Ali wrote: +> +>> +> +>> +> +>> On 07/18/2018 09:42 AM, Farhan Ali wrote: + + +> +>> I am not too familiar with block device code in QEMU, so not sure if +> +>> this is the right fix or if there are some underlying problems. +> +> +> +> Oh this is quite embarassing! I only added the bdrv_attach_aio_context +> +> callback for the file-backed device. Your fix is definitely corect for +> +> host device. Let me make sure there weren't any others missed and I will +> +> send out a properly formatted patch. Thank you for the quick testing and +> +> turnaround! +> +> +Farhan, can you respin your patch with proper sign-off and patch description? +> +Adding qemu-block. +I sent it yesterday, sorry I didn't cc everyone from this e-mail: +http://lists.nongnu.org/archive/html/qemu-block/2018-07/msg00516.html +Thanks, +Nish + diff --git a/results/classifier/001/mistranslation/24930826 b/results/classifier/001/mistranslation/24930826 new file mode 100644 index 000000000..5f79c452f --- /dev/null +++ b/results/classifier/001/mistranslation/24930826 @@ -0,0 +1,33 @@ +mistranslation: 0.637 +instruction: 0.555 +other: 0.535 +semantic: 0.487 + +[Qemu-devel] [BUG] vhost-user: hot-unplug vhost-user nic for windows guest OS will fail with 100% reproduce rate + +Hi, guys + +I met a problem when hot-unplug vhost-user nic for Windows 2008 rc2 sp1 64 +(Guest OS) + +The xml of nic is as followed: + + + + + + +
+ + +Firstly, I use virsh attach-device win2008 vif.xml to hot-plug a nic for Guest +OS. This operation returns success. +After guest OS discover nic successfully, I use virsh detach-device win2008 +vif.xml to hot-unplug it. This operation will fail with 100% reproduce rate. + +However, if I hot-plug and hot-unplug virtio-net nic , it will not fail. + +I have analysis the process of qmp_device_del , I found that qemu have inject +interrupt to acpi to let it notice guest OS to remove nic. +I guess there is something wrong in Windows when handle the interrupt. + diff --git a/results/classifier/001/mistranslation/25842545 b/results/classifier/001/mistranslation/25842545 new file mode 100644 index 000000000..1ebfe2889 --- /dev/null +++ b/results/classifier/001/mistranslation/25842545 @@ -0,0 +1,202 @@ +mistranslation: 0.928 +other: 0.912 +instruction: 0.835 +semantic: 0.829 + +[Qemu-devel] [Bug?] Guest pause because VMPTRLD failed in KVM + +Hello, + + We encountered a problem that a guest paused because the KMOD report VMPTRLD +failed. + +The related information is as follows: + +1) Qemu command: + /usr/bin/qemu-kvm -name omu1 -S -machine pc-i440fx-2.3,accel=kvm,usb=off -cpu +host -m 15625 -realtime mlock=off -smp 8,sockets=1,cores=8,threads=1 -uuid +a2aacfff-6583-48b4-b6a4-e6830e519931 -no-user-config -nodefaults -chardev +socket,id=charmonitor,path=/var/lib/libvirt/qemu/omu1.monitor,server,nowait +-mon chardev=charmonitor,id=monitor,mode=control -rtc base=utc -no-shutdown +-boot strict=on -device piix3-usb-uhci,id=usb,bus=pci.0,addr=0x1.0x2 -device +virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=0x5 -drive +file=/home/env/guest1.qcow2,if=none,id=drive-virtio-disk0,format=qcow2,cache=none,aio=native + -device +virtio-blk-pci,scsi=off,bus=pci.0,addr=0x6,drive=drive-virtio-disk0,id=virtio-disk0 + -drive +file=/home/env/guest_300G.img,if=none,id=drive-virtio-disk1,format=raw,cache=none,aio=native + -device +virtio-blk-pci,scsi=off,bus=pci.0,addr=0x7,drive=drive-virtio-disk1,id=virtio-disk1 + -netdev tap,fd=25,id=hostnet0,vhost=on,vhostfd=26 -device +virtio-net-pci,netdev=hostnet0,id=net0,mac=00:00:80:05:00:00,bus=pci.0,addr=0x3 +-netdev tap,fd=27,id=hostnet1,vhost=on,vhostfd=28 -device +virtio-net-pci,netdev=hostnet1,id=net1,mac=00:00:80:05:00:01,bus=pci.0,addr=0x4 +-chardev pty,id=charserial0 -device isa-serial,chardev=charserial0,id=serial0 +-device usb-tablet,id=input0 -vnc 0.0.0.0:0 -device +cirrus-vga,id=video0,vgamem_mb=16,bus=pci.0,addr=0x2 -device +virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x8 -msg timestamp=on + + 2) Qemu log: + KVM: entry failed, hardware error 0x4 + RAX=00000000ffffffed RBX=ffff8803fa2d7fd8 RCX=0100000000000000 +RDX=0000000000000000 + RSI=0000000000000000 RDI=0000000000000046 RBP=ffff8803fa2d7e90 +RSP=ffff8803fa2efe90 + R8 =0000000000000000 R9 =0000000000000000 R10=0000000000000000 +R11=000000000000b69a + R12=0000000000000001 R13=ffffffff81a25b40 R14=0000000000000000 +R15=ffff8803fa2d7fd8 + RIP=ffffffff81053e16 RFL=00000286 [--S--P-] CPL=0 II=0 A20=1 SMM=0 HLT=0 + ES =0000 0000000000000000 ffffffff 00c00000 + CS =0010 0000000000000000 ffffffff 00a09b00 DPL=0 CS64 [-RA] + SS =0018 0000000000000000 ffffffff 00c09300 DPL=0 DS [-WA] + DS =0000 0000000000000000 ffffffff 00c00000 + FS =0000 0000000000000000 ffffffff 00c00000 + GS =0000 ffff88040f540000 ffffffff 00c00000 + LDT=0000 0000000000000000 ffffffff 00c00000 + TR =0040 ffff88040f550a40 00002087 00008b00 DPL=0 TSS64-busy + GDT= ffff88040f549000 0000007f + IDT= ffffffffff529000 00000fff + CR0=80050033 CR2=00007f81ca0c5000 CR3=00000003f5081000 CR4=000407e0 + DR0=0000000000000000 DR1=0000000000000000 DR2=0000000000000000 +DR3=0000000000000000 + DR6=00000000ffff0ff0 DR7=0000000000000400 + EFER=0000000000000d01 + Code=?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? +?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? + + 3) Demsg + [347315.028339] kvm: vmptrld ffff8817ec5f0000/17ec5f0000 failed + klogd 1.4.1, ---------- state change ---------- + [347315.039506] kvm: vmptrld ffff8817ec5f0000/17ec5f0000 failed + [347315.051728] kvm: vmptrld ffff8817ec5f0000/17ec5f0000 failed + [347315.057472] vmwrite error: reg 6c0a value ffff88307e66e480 (err +2120672384) + [347315.064567] Pid: 69523, comm: qemu-kvm Tainted: GF X +3.0.93-0.8-default #1 + [347315.064569] Call Trace: + [347315.064587] [] dump_trace+0x75/0x300 + [347315.064595] [] dump_stack+0x69/0x6f + [347315.064617] [] vmx_vcpu_load+0x11e/0x1d0 [kvm_intel] + [347315.064647] [] kvm_arch_vcpu_load+0x44/0x1d0 [kvm] + [347315.064669] [] finish_task_switch+0x81/0xe0 + [347315.064676] [] thread_return+0x3b/0x2a7 + [347315.064687] [] kvm_vcpu_block+0x65/0xa0 [kvm] + [347315.064703] [] __vcpu_run+0xd1/0x260 [kvm] + [347315.064732] [] kvm_arch_vcpu_ioctl_run+0x68/0x1a0 +[kvm] + [347315.064759] [] kvm_vcpu_ioctl+0x38e/0x580 [kvm] + [347315.064771] [] do_vfs_ioctl+0x8b/0x3b0 + [347315.064776] [] sys_ioctl+0xa1/0xb0 + [347315.064783] [] system_call_fastpath+0x16/0x1b + [347315.064797] [<00007fee51969ce7>] 0x7fee51969ce6 + [347315.064799] vmwrite error: reg 6c0c value ffff88307e664000 (err +2120630272) + [347315.064802] Pid: 69523, comm: qemu-kvm Tainted: GF X +3.0.93-0.8-default #1 + [347315.064803] Call Trace: + [347315.064807] [] dump_trace+0x75/0x300 + [347315.064811] [] dump_stack+0x69/0x6f + [347315.064817] [] vmx_vcpu_load+0x12c/0x1d0 [kvm_intel] + [347315.064832] [] kvm_arch_vcpu_load+0x44/0x1d0 [kvm] + [347315.064851] [] finish_task_switch+0x81/0xe0 + [347315.064855] [] thread_return+0x3b/0x2a7 + [347315.064865] [] kvm_vcpu_block+0x65/0xa0 [kvm] + [347315.064880] [] __vcpu_run+0xd1/0x260 [kvm] + [347315.064907] [] kvm_arch_vcpu_ioctl_run+0x68/0x1a0 +[kvm] + [347315.064933] [] kvm_vcpu_ioctl+0x38e/0x580 [kvm] + [347315.064943] [] do_vfs_ioctl+0x8b/0x3b0 + [347315.064947] [] sys_ioctl+0xa1/0xb0 + [347315.064951] [] system_call_fastpath+0x16/0x1b + [347315.064957] [<00007fee51969ce7>] 0x7fee51969ce6 + [347315.064959] vmwrite error: reg 6c10 value 0 (err 0) + + 4) The isssue can't be reporduced. I search the Intel VMX sepc about reaseons +of vmptrld failure: + The instruction fails if its operand is not properly aligned, sets +unsupported physical-address bits, or is equal to the VMXON + pointer. In addition, the instruction fails if the 32 bits in memory +referenced by the operand do not match the VMCS + revision identifier supported by this processor. + + But I can't find any cues from the KVM source code. It seems each + error conditions is impossible in theory. :( + +Any suggestions will be appreciated! Paolo? + +-- +Regards, +-Gonglei + +On 10/11/2016 15:10, gong lei wrote: +> +4) The isssue can't be reporduced. I search the Intel VMX sepc about +> +reaseons +> +of vmptrld failure: +> +The instruction fails if its operand is not properly aligned, sets +> +unsupported physical-address bits, or is equal to the VMXON +> +pointer. In addition, the instruction fails if the 32 bits in memory +> +referenced by the operand do not match the VMCS +> +revision identifier supported by this processor. +> +> +But I can't find any cues from the KVM source code. It seems each +> +error conditions is impossible in theory. :( +Yes, it should not happen. :( + +If it's not reproducible, it's really hard to say what it was, except a +random memory corruption elsewhere or even a bit flip (!). + +Paolo + +On 2016/11/17 20:39, Paolo Bonzini wrote: +> +> +On 10/11/2016 15:10, gong lei wrote: +> +> 4) The isssue can't be reporduced. I search the Intel VMX sepc about +> +> reaseons +> +> of vmptrld failure: +> +> The instruction fails if its operand is not properly aligned, sets +> +> unsupported physical-address bits, or is equal to the VMXON +> +> pointer. In addition, the instruction fails if the 32 bits in memory +> +> referenced by the operand do not match the VMCS +> +> revision identifier supported by this processor. +> +> +> +> But I can't find any cues from the KVM source code. It seems each +> +> error conditions is impossible in theory. :( +> +Yes, it should not happen. :( +> +> +If it's not reproducible, it's really hard to say what it was, except a +> +random memory corruption elsewhere or even a bit flip (!). +> +> +Paolo +Thanks for your reply, Paolo :) + +-- +Regards, +-Gonglei + diff --git a/results/classifier/001/mistranslation/26430026 b/results/classifier/001/mistranslation/26430026 new file mode 100644 index 000000000..ead1f32fd --- /dev/null +++ b/results/classifier/001/mistranslation/26430026 @@ -0,0 +1,165 @@ +mistranslation: 0.915 +semantic: 0.904 +instruction: 0.888 +other: 0.813 + +[BUG] cxl,i386: e820 mappings may not be correct for cxl + +Context included below from prior discussion + - `cxl create-region` would fail on inability to allocate memory + - traced this down to the memory region being marked RESERVED + - E820 map marks the CXL fixed memory window as RESERVED + + +Re: x86 errors, I found that region worked with this patch. (I also +added the SRAT patches the Davidlohr posted, but I do not think they are +relevant). + +I don't think this is correct, and setting this to E820_RAM causes the +system to fail to boot at all, but with this change `cxl create-region` +succeeds, which suggests our e820 mappings in the i386 machine are +incorrect. + +Anyone who can help or have an idea as to what e820 should actually be +doing with this region, or if this is correct and something else is +failing, please help! + + +diff --git a/hw/i386/pc.c b/hw/i386/pc.c +index 566accf7e6..a5e688a742 100644 +--- a/hw/i386/pc.c ++++ b/hw/i386/pc.c +@@ -1077,7 +1077,7 @@ void pc_memory_init(PCMachineState *pcms, + memory_region_init_io(&fw->mr, OBJECT(machine), &cfmws_ops, fw, + "cxl-fixed-memory-region", fw->size); + memory_region_add_subregion(system_memory, fw->base, &fw->mr); +- e820_add_entry(fw->base, fw->size, E820_RESERVED); ++ e820_add_entry(fw->base, fw->size, E820_NVS); + cxl_fmw_base += fw->size; + cxl_resv_end = cxl_fmw_base; + } + + +On Mon, Oct 10, 2022 at 05:32:42PM +0100, Jonathan Cameron wrote: +> +> +> > but i'm not sure of what to do with this info. We have some proof +> +> > that real hardware works with this no problem, and the only difference +> +> > is that the EFI/bios/firmware is setting the memory regions as `usable` +> +> > or `soft reserved`, which would imply the EDK2 is the blocker here +> +> > regardless of the OS driver status. +> +> > +> +> > But I'd seen elsewhere you had gotten some of this working, and I'm +> +> > failing to get anything working at the moment. If you have any input i +> +> > would greatly appreciate the help. +> +> > +> +> > QEMU config: +> +> > +> +> > /opt/qemu-cxl2/bin/qemu-system-x86_64 \ +> +> > -drive +> +> > file=/var/lib/libvirt/images/cxl.qcow2,format=qcow2,index=0,media=d\ +> +> > -m 2G,slots=4,maxmem=4G \ +> +> > -smp 4 \ +> +> > -machine type=q35,accel=kvm,cxl=on \ +> +> > -enable-kvm \ +> +> > -nographic \ +> +> > -device pxb-cxl,id=cxl.0,bus=pcie.0,bus_nr=52 \ +> +> > -device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 \ +> +> > -object memory-backend-file,id=cxl-mem0,mem-path=/tmp/cxl-mem0,size=256M \ +> +> > -object memory-backend-file,id=lsa0,mem-path=/tmp/cxl-lsa0,size=256M \ +> +> > -device cxl-type3,bus=rp0,pmem=true,memdev=cxl-mem0,lsa=lsa0,id=cxl-pmem0 +> +> > \ +> +> > -M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.size=256M +> +> > +> +> > I'd seen on the lists that you had seen issues with single-rp setups, +> +> > but no combination of configuration I've tried (including all the ones +> +> > in the docs and tests) lead to a successful region creation with +> +> > `cxl create-region` +> +> +> +> Hmm. Let me have a play. I've not run x86 tests for a while so +> +> perhaps something is missing there. +> +> +> +> I'm carrying a patch to override check_last_peer() in +> +> cxl_port_setup_targets() as that is wrong for some combinations, +> +> but that doesn't look like it's related to what you are seeing. +> +> +I'm not sure if it's relevant, but turned out I'd forgotten I'm carrying 3 +> +patches that aren't upstream (and one is a horrible hack). +> +> +Hack: +https://lore.kernel.org/linux-cxl/20220819094655.000005ed@huawei.com/ +> +Shouldn't affect a simple case like this... +> +> +https://lore.kernel.org/linux-cxl/20220819093133.00006c22@huawei.com/T/#t +> +(Dan's version) +> +> +https://lore.kernel.org/linux-cxl/20220815154044.24733-1-Jonathan.Cameron@huawei.com/T/#t +> +> +For writes to work you will currently need two rps (nothing on the second is +> +fine) +> +as we still haven't resolved if the kernel should support an HDM decoder on +> +a host bridge with one port. I think it should (Spec allows it), others +> +unconvinced. +> +> +Note I haven't shifted over to x86 yet so may still be something different +> +from +> +arm64. +> +> +Jonathan +> +> + diff --git a/results/classifier/001/mistranslation/36568044 b/results/classifier/001/mistranslation/36568044 new file mode 100644 index 000000000..719c03c74 --- /dev/null +++ b/results/classifier/001/mistranslation/36568044 @@ -0,0 +1,4581 @@ +mistranslation: 0.962 +instruction: 0.930 +other: 0.930 +semantic: 0.923 + +[BUG, RFC] cpr-transfer: qxl guest driver crashes after migration + +Hi all, + +We've been experimenting with cpr-transfer migration mode recently and +have discovered the following issue with the guest QXL driver: + +Run migration source: +> +EMULATOR=/path/to/emulator +> +ROOTFS=/path/to/image +> +QMPSOCK=/var/run/alma8qmp-src.sock +> +> +$EMULATOR -enable-kvm \ +> +-machine q35 \ +> +-cpu host -smp 2 -m 2G \ +> +-object +> +memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ram0,share=on\ +> +-machine memory-backend=ram0 \ +> +-machine aux-ram-share=on \ +> +-drive file=$ROOTFS,media=disk,if=virtio \ +> +-qmp unix:$QMPSOCK,server=on,wait=off \ +> +-nographic \ +> +-device qxl-vga +Run migration target: +> +EMULATOR=/path/to/emulator +> +ROOTFS=/path/to/image +> +QMPSOCK=/var/run/alma8qmp-dst.sock +> +> +> +> +$EMULATOR -enable-kvm \ +> +-machine q35 \ +> +-cpu host -smp 2 -m 2G \ +> +-object +> +memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ram0,share=on\ +> +-machine memory-backend=ram0 \ +> +-machine aux-ram-share=on \ +> +-drive file=$ROOTFS,media=disk,if=virtio \ +> +-qmp unix:$QMPSOCK,server=on,wait=off \ +> +-nographic \ +> +-device qxl-vga \ +> +-incoming tcp:0:44444 \ +> +-incoming '{"channel-type": "cpr", "addr": { "transport": "socket", +> +"type": "unix", "path": "/var/run/alma8cpr-dst.sock"}}' +Launch the migration: +> +QMPSHELL=/root/src/qemu/master/scripts/qmp/qmp-shell +> +QMPSOCK=/var/run/alma8qmp-src.sock +> +> +$QMPSHELL -p $QMPSOCK < +migrate-set-parameters mode=cpr-transfer +> +migrate +> +channels=[{"channel-type":"main","addr":{"transport":"socket","type":"inet","host":"0","port":"44444"}},{"channel-type":"cpr","addr":{"transport":"socket","type":"unix","path":"/var/run/alma8cpr-dst.sock"}}] +> +EOF +Then, after a while, QXL guest driver on target crashes spewing the +following messages: +> +[ 73.962002] [TTM] Buffer eviction failed +> +[ 73.962072] qxl 0000:00:02.0: object_init failed for (3149824, 0x00000001) +> +[ 73.962081] [drm:qxl_alloc_bo_reserved [qxl]] *ERROR* failed to allocate +> +VRAM BO +That seems to be a known kernel QXL driver bug: +https://lore.kernel.org/all/20220907094423.93581-1-min_halo@163.com/T/ +https://lore.kernel.org/lkml/ZTgydqRlK6WX_b29@eldamar.lan/ +(the latter discussion contains that reproduce script which speeds up +the crash in the guest): +> +#!/bin/bash +> +> +chvt 3 +> +> +for j in $(seq 80); do +> +echo "$(date) starting round $j" +> +if [ "$(journalctl --boot | grep "failed to allocate VRAM BO")" != "" +> +]; then +> +echo "bug was reproduced after $j tries" +> +exit 1 +> +fi +> +for i in $(seq 100); do +> +dmesg > /dev/tty3 +> +done +> +done +> +> +echo "bug could not be reproduced" +> +exit 0 +The bug itself seems to remain unfixed, as I was able to reproduce that +with Fedora 41 guest, as well as AlmaLinux 8 guest. However our +cpr-transfer code also seems to be buggy as it triggers the crash - +without the cpr-transfer migration the above reproduce doesn't lead to +crash on the source VM. + +I suspect that, as cpr-transfer doesn't migrate the guest memory, but +rather passes it through the memory backend object, our code might +somehow corrupt the VRAM. However, I wasn't able to trace the +corruption so far. + +Could somebody help the investigation and take a look into this? Any +suggestions would be appreciated. Thanks! + +Andrey + +On 2/28/2025 12:39 PM, Andrey Drobyshev wrote: +Hi all, + +We've been experimenting with cpr-transfer migration mode recently and +have discovered the following issue with the guest QXL driver: + +Run migration source: +EMULATOR=/path/to/emulator +ROOTFS=/path/to/image +QMPSOCK=/var/run/alma8qmp-src.sock + +$EMULATOR -enable-kvm \ + -machine q35 \ + -cpu host -smp 2 -m 2G \ + -object +memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ram0,share=on\ + -machine memory-backend=ram0 \ + -machine aux-ram-share=on \ + -drive file=$ROOTFS,media=disk,if=virtio \ + -qmp unix:$QMPSOCK,server=on,wait=off \ + -nographic \ + -device qxl-vga +Run migration target: +EMULATOR=/path/to/emulator +ROOTFS=/path/to/image +QMPSOCK=/var/run/alma8qmp-dst.sock +$EMULATOR -enable-kvm \ +-machine q35 \ + -cpu host -smp 2 -m 2G \ + -object +memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ram0,share=on\ + -machine memory-backend=ram0 \ + -machine aux-ram-share=on \ + -drive file=$ROOTFS,media=disk,if=virtio \ + -qmp unix:$QMPSOCK,server=on,wait=off \ + -nographic \ + -device qxl-vga \ + -incoming tcp:0:44444 \ + -incoming '{"channel-type": "cpr", "addr": { "transport": "socket", "type": "unix", +"path": "/var/run/alma8cpr-dst.sock"}}' +Launch the migration: +QMPSHELL=/root/src/qemu/master/scripts/qmp/qmp-shell +QMPSOCK=/var/run/alma8qmp-src.sock + +$QMPSHELL -p $QMPSOCK < /dev/tty3 + done +done + +echo "bug could not be reproduced" +exit 0 +The bug itself seems to remain unfixed, as I was able to reproduce that +with Fedora 41 guest, as well as AlmaLinux 8 guest. However our +cpr-transfer code also seems to be buggy as it triggers the crash - +without the cpr-transfer migration the above reproduce doesn't lead to +crash on the source VM. + +I suspect that, as cpr-transfer doesn't migrate the guest memory, but +rather passes it through the memory backend object, our code might +somehow corrupt the VRAM. However, I wasn't able to trace the +corruption so far. + +Could somebody help the investigation and take a look into this? Any +suggestions would be appreciated. Thanks! +Possibly some memory region created by qxl is not being preserved. +Try adding these traces to see what is preserved: + +-trace enable='*cpr*' +-trace enable='*ram_alloc*' + +- Steve + +On 2/28/2025 1:13 PM, Steven Sistare wrote: +On 2/28/2025 12:39 PM, Andrey Drobyshev wrote: +Hi all, + +We've been experimenting with cpr-transfer migration mode recently and +have discovered the following issue with the guest QXL driver: + +Run migration source: +EMULATOR=/path/to/emulator +ROOTFS=/path/to/image +QMPSOCK=/var/run/alma8qmp-src.sock + +$EMULATOR -enable-kvm \ +     -machine q35 \ +     -cpu host -smp 2 -m 2G \ +     -object +memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ram0,share=on\ +     -machine memory-backend=ram0 \ +     -machine aux-ram-share=on \ +     -drive file=$ROOTFS,media=disk,if=virtio \ +     -qmp unix:$QMPSOCK,server=on,wait=off \ +     -nographic \ +     -device qxl-vga +Run migration target: +EMULATOR=/path/to/emulator +ROOTFS=/path/to/image +QMPSOCK=/var/run/alma8qmp-dst.sock +$EMULATOR -enable-kvm \ +     -machine q35 \ +     -cpu host -smp 2 -m 2G \ +     -object +memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ram0,share=on\ +     -machine memory-backend=ram0 \ +     -machine aux-ram-share=on \ +     -drive file=$ROOTFS,media=disk,if=virtio \ +     -qmp unix:$QMPSOCK,server=on,wait=off \ +     -nographic \ +     -device qxl-vga \ +     -incoming tcp:0:44444 \ +     -incoming '{"channel-type": "cpr", "addr": { "transport": "socket", "type": "unix", +"path": "/var/run/alma8cpr-dst.sock"}}' +Launch the migration: +QMPSHELL=/root/src/qemu/master/scripts/qmp/qmp-shell +QMPSOCK=/var/run/alma8qmp-src.sock + +$QMPSHELL -p $QMPSOCK < /dev/tty3 +         done +done + +echo "bug could not be reproduced" +exit 0 +The bug itself seems to remain unfixed, as I was able to reproduce that +with Fedora 41 guest, as well as AlmaLinux 8 guest. However our +cpr-transfer code also seems to be buggy as it triggers the crash - +without the cpr-transfer migration the above reproduce doesn't lead to +crash on the source VM. + +I suspect that, as cpr-transfer doesn't migrate the guest memory, but +rather passes it through the memory backend object, our code might +somehow corrupt the VRAM.  However, I wasn't able to trace the +corruption so far. + +Could somebody help the investigation and take a look into this?  Any +suggestions would be appreciated.  Thanks! +Possibly some memory region created by qxl is not being preserved. +Try adding these traces to see what is preserved: + +-trace enable='*cpr*' +-trace enable='*ram_alloc*' +Also try adding this patch to see if it flags any ram blocks as not +compatible with cpr. A message is printed at migration start time. +1740667681-257312-1-git-send-email-steven.sistare@oracle.com +/">https://lore.kernel.org/qemu-devel/ +1740667681-257312-1-git-send-email-steven.sistare@oracle.com +/ +- Steve + +On 2/28/25 8:20 PM, Steven Sistare wrote: +> +On 2/28/2025 1:13 PM, Steven Sistare wrote: +> +> On 2/28/2025 12:39 PM, Andrey Drobyshev wrote: +> +>> Hi all, +> +>> +> +>> We've been experimenting with cpr-transfer migration mode recently and +> +>> have discovered the following issue with the guest QXL driver: +> +>> +> +>> Run migration source: +> +>>> EMULATOR=/path/to/emulator +> +>>> ROOTFS=/path/to/image +> +>>> QMPSOCK=/var/run/alma8qmp-src.sock +> +>>> +> +>>> $EMULATOR -enable-kvm \ +> +>>>      -machine q35 \ +> +>>>      -cpu host -smp 2 -m 2G \ +> +>>>      -object memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ +> +>>> ram0,share=on\ +> +>>>      -machine memory-backend=ram0 \ +> +>>>      -machine aux-ram-share=on \ +> +>>>      -drive file=$ROOTFS,media=disk,if=virtio \ +> +>>>      -qmp unix:$QMPSOCK,server=on,wait=off \ +> +>>>      -nographic \ +> +>>>      -device qxl-vga +> +>> +> +>> Run migration target: +> +>>> EMULATOR=/path/to/emulator +> +>>> ROOTFS=/path/to/image +> +>>> QMPSOCK=/var/run/alma8qmp-dst.sock +> +>>> $EMULATOR -enable-kvm \ +> +>>>      -machine q35 \ +> +>>>      -cpu host -smp 2 -m 2G \ +> +>>>      -object memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ +> +>>> ram0,share=on\ +> +>>>      -machine memory-backend=ram0 \ +> +>>>      -machine aux-ram-share=on \ +> +>>>      -drive file=$ROOTFS,media=disk,if=virtio \ +> +>>>      -qmp unix:$QMPSOCK,server=on,wait=off \ +> +>>>      -nographic \ +> +>>>      -device qxl-vga \ +> +>>>      -incoming tcp:0:44444 \ +> +>>>      -incoming '{"channel-type": "cpr", "addr": { "transport": +> +>>> "socket", "type": "unix", "path": "/var/run/alma8cpr-dst.sock"}}' +> +>> +> +>> +> +>> Launch the migration: +> +>>> QMPSHELL=/root/src/qemu/master/scripts/qmp/qmp-shell +> +>>> QMPSOCK=/var/run/alma8qmp-src.sock +> +>>> +> +>>> $QMPSHELL -p $QMPSOCK < +>>>      migrate-set-parameters mode=cpr-transfer +> +>>>      migrate channels=[{"channel-type":"main","addr": +> +>>> {"transport":"socket","type":"inet","host":"0","port":"44444"}}, +> +>>> {"channel-type":"cpr","addr": +> +>>> {"transport":"socket","type":"unix","path":"/var/run/alma8cpr- +> +>>> dst.sock"}}] +> +>>> EOF +> +>> +> +>> Then, after a while, QXL guest driver on target crashes spewing the +> +>> following messages: +> +>>> [   73.962002] [TTM] Buffer eviction failed +> +>>> [   73.962072] qxl 0000:00:02.0: object_init failed for (3149824, +> +>>> 0x00000001) +> +>>> [   73.962081] [drm:qxl_alloc_bo_reserved [qxl]] *ERROR* failed to +> +>>> allocate VRAM BO +> +>> +> +>> That seems to be a known kernel QXL driver bug: +> +>> +> +>> +https://lore.kernel.org/all/20220907094423.93581-1-min_halo@163.com/T/ +> +>> +https://lore.kernel.org/lkml/ZTgydqRlK6WX_b29@eldamar.lan/ +> +>> +> +>> (the latter discussion contains that reproduce script which speeds up +> +>> the crash in the guest): +> +>>> #!/bin/bash +> +>>> +> +>>> chvt 3 +> +>>> +> +>>> for j in $(seq 80); do +> +>>>          echo "$(date) starting round $j" +> +>>>          if [ "$(journalctl --boot | grep "failed to allocate VRAM +> +>>> BO")" != "" ]; then +> +>>>                  echo "bug was reproduced after $j tries" +> +>>>                  exit 1 +> +>>>          fi +> +>>>          for i in $(seq 100); do +> +>>>                  dmesg > /dev/tty3 +> +>>>          done +> +>>> done +> +>>> +> +>>> echo "bug could not be reproduced" +> +>>> exit 0 +> +>> +> +>> The bug itself seems to remain unfixed, as I was able to reproduce that +> +>> with Fedora 41 guest, as well as AlmaLinux 8 guest. However our +> +>> cpr-transfer code also seems to be buggy as it triggers the crash - +> +>> without the cpr-transfer migration the above reproduce doesn't lead to +> +>> crash on the source VM. +> +>> +> +>> I suspect that, as cpr-transfer doesn't migrate the guest memory, but +> +>> rather passes it through the memory backend object, our code might +> +>> somehow corrupt the VRAM.  However, I wasn't able to trace the +> +>> corruption so far. +> +>> +> +>> Could somebody help the investigation and take a look into this?  Any +> +>> suggestions would be appreciated.  Thanks! +> +> +> +> Possibly some memory region created by qxl is not being preserved. +> +> Try adding these traces to see what is preserved: +> +> +> +> -trace enable='*cpr*' +> +> -trace enable='*ram_alloc*' +> +> +Also try adding this patch to see if it flags any ram blocks as not +> +compatible with cpr.  A message is printed at migration start time. +> + +https://lore.kernel.org/qemu-devel/1740667681-257312-1-git-send-email- +> +steven.sistare@oracle.com/ +> +> +- Steve +> +With the traces enabled + the "migration: ram block cpr blockers" patch +applied: + +Source: +> +cpr_find_fd pc.bios, id 0 returns -1 +> +cpr_save_fd pc.bios, id 0, fd 22 +> +qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 22 host +> +0x7fec18e00000 +> +cpr_find_fd pc.rom, id 0 returns -1 +> +cpr_save_fd pc.rom, id 0, fd 23 +> +qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 23 host +> +0x7fec18c00000 +> +cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns -1 +> +cpr_save_fd 0000:00:01.0/e1000e.rom, id 0, fd 24 +> +qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size 262144 fd +> +24 host 0x7fec18a00000 +> +cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns -1 +> +cpr_save_fd 0000:00:02.0/vga.vram, id 0, fd 25 +> +qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size 67108864 +> +fd 25 host 0x7feb77e00000 +> +cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns -1 +> +cpr_save_fd 0000:00:02.0/qxl.vrom, id 0, fd 27 +> +qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 fd 27 +> +host 0x7fec18800000 +> +cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns -1 +> +cpr_save_fd 0000:00:02.0/qxl.vram, id 0, fd 28 +> +qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size 67108864 +> +fd 28 host 0x7feb73c00000 +> +cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns -1 +> +cpr_save_fd 0000:00:02.0/qxl.rom, id 0, fd 34 +> +qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 fd 34 +> +host 0x7fec18600000 +> +cpr_find_fd /rom@etc/acpi/tables, id 0 returns -1 +> +cpr_save_fd /rom@etc/acpi/tables, id 0, fd 35 +> +qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size 2097152 fd 35 +> +host 0x7fec18200000 +> +cpr_find_fd /rom@etc/table-loader, id 0 returns -1 +> +cpr_save_fd /rom@etc/table-loader, id 0, fd 36 +> +qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 fd 36 +> +host 0x7feb8b600000 +> +cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns -1 +> +cpr_save_fd /rom@etc/acpi/rsdp, id 0, fd 37 +> +qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd 37 host +> +0x7feb8b400000 +> +> +cpr_state_save cpr-transfer mode +> +cpr_transfer_output /var/run/alma8cpr-dst.sock +Target: +> +cpr_transfer_input /var/run/alma8cpr-dst.sock +> +cpr_state_load cpr-transfer mode +> +cpr_find_fd pc.bios, id 0 returns 20 +> +qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 20 host +> +0x7fcdc9800000 +> +cpr_find_fd pc.rom, id 0 returns 19 +> +qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 19 host +> +0x7fcdc9600000 +> +cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns 18 +> +qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size 262144 fd +> +18 host 0x7fcdc9400000 +> +cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns 17 +> +qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size 67108864 +> +fd 17 host 0x7fcd27e00000 +> +cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns 16 +> +qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 fd 16 +> +host 0x7fcdc9200000 +> +cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns 15 +> +qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size 67108864 +> +fd 15 host 0x7fcd23c00000 +> +cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns 14 +> +qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 fd 14 +> +host 0x7fcdc8800000 +> +cpr_find_fd /rom@etc/acpi/tables, id 0 returns 13 +> +qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size 2097152 fd 13 +> +host 0x7fcdc8400000 +> +cpr_find_fd /rom@etc/table-loader, id 0 returns 11 +> +qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 fd 11 +> +host 0x7fcdc8200000 +> +cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns 10 +> +qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd 10 host +> +0x7fcd3be00000 +Looks like both vga.vram and qxl.vram are being preserved (with the same +addresses), and no incompatible ram blocks are found during migration. + +Andrey + +On 2/28/25 8:35 PM, Andrey Drobyshev wrote: +> +On 2/28/25 8:20 PM, Steven Sistare wrote: +> +> On 2/28/2025 1:13 PM, Steven Sistare wrote: +> +>> On 2/28/2025 12:39 PM, Andrey Drobyshev wrote: +> +>>> Hi all, +> +>>> +> +>>> We've been experimenting with cpr-transfer migration mode recently and +> +>>> have discovered the following issue with the guest QXL driver: +> +>>> +> +>>> Run migration source: +> +>>>> EMULATOR=/path/to/emulator +> +>>>> ROOTFS=/path/to/image +> +>>>> QMPSOCK=/var/run/alma8qmp-src.sock +> +>>>> +> +>>>> $EMULATOR -enable-kvm \ +> +>>>>      -machine q35 \ +> +>>>>      -cpu host -smp 2 -m 2G \ +> +>>>>      -object memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ +> +>>>> ram0,share=on\ +> +>>>>      -machine memory-backend=ram0 \ +> +>>>>      -machine aux-ram-share=on \ +> +>>>>      -drive file=$ROOTFS,media=disk,if=virtio \ +> +>>>>      -qmp unix:$QMPSOCK,server=on,wait=off \ +> +>>>>      -nographic \ +> +>>>>      -device qxl-vga +> +>>> +> +>>> Run migration target: +> +>>>> EMULATOR=/path/to/emulator +> +>>>> ROOTFS=/path/to/image +> +>>>> QMPSOCK=/var/run/alma8qmp-dst.sock +> +>>>> $EMULATOR -enable-kvm \ +> +>>>>      -machine q35 \ +> +>>>>      -cpu host -smp 2 -m 2G \ +> +>>>>      -object memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ +> +>>>> ram0,share=on\ +> +>>>>      -machine memory-backend=ram0 \ +> +>>>>      -machine aux-ram-share=on \ +> +>>>>      -drive file=$ROOTFS,media=disk,if=virtio \ +> +>>>>      -qmp unix:$QMPSOCK,server=on,wait=off \ +> +>>>>      -nographic \ +> +>>>>      -device qxl-vga \ +> +>>>>      -incoming tcp:0:44444 \ +> +>>>>      -incoming '{"channel-type": "cpr", "addr": { "transport": +> +>>>> "socket", "type": "unix", "path": "/var/run/alma8cpr-dst.sock"}}' +> +>>> +> +>>> +> +>>> Launch the migration: +> +>>>> QMPSHELL=/root/src/qemu/master/scripts/qmp/qmp-shell +> +>>>> QMPSOCK=/var/run/alma8qmp-src.sock +> +>>>> +> +>>>> $QMPSHELL -p $QMPSOCK < +>>>>      migrate-set-parameters mode=cpr-transfer +> +>>>>      migrate channels=[{"channel-type":"main","addr": +> +>>>> {"transport":"socket","type":"inet","host":"0","port":"44444"}}, +> +>>>> {"channel-type":"cpr","addr": +> +>>>> {"transport":"socket","type":"unix","path":"/var/run/alma8cpr- +> +>>>> dst.sock"}}] +> +>>>> EOF +> +>>> +> +>>> Then, after a while, QXL guest driver on target crashes spewing the +> +>>> following messages: +> +>>>> [   73.962002] [TTM] Buffer eviction failed +> +>>>> [   73.962072] qxl 0000:00:02.0: object_init failed for (3149824, +> +>>>> 0x00000001) +> +>>>> [   73.962081] [drm:qxl_alloc_bo_reserved [qxl]] *ERROR* failed to +> +>>>> allocate VRAM BO +> +>>> +> +>>> That seems to be a known kernel QXL driver bug: +> +>>> +> +>>> +https://lore.kernel.org/all/20220907094423.93581-1-min_halo@163.com/T/ +> +>>> +https://lore.kernel.org/lkml/ZTgydqRlK6WX_b29@eldamar.lan/ +> +>>> +> +>>> (the latter discussion contains that reproduce script which speeds up +> +>>> the crash in the guest): +> +>>>> #!/bin/bash +> +>>>> +> +>>>> chvt 3 +> +>>>> +> +>>>> for j in $(seq 80); do +> +>>>>          echo "$(date) starting round $j" +> +>>>>          if [ "$(journalctl --boot | grep "failed to allocate VRAM +> +>>>> BO")" != "" ]; then +> +>>>>                  echo "bug was reproduced after $j tries" +> +>>>>                  exit 1 +> +>>>>          fi +> +>>>>          for i in $(seq 100); do +> +>>>>                  dmesg > /dev/tty3 +> +>>>>          done +> +>>>> done +> +>>>> +> +>>>> echo "bug could not be reproduced" +> +>>>> exit 0 +> +>>> +> +>>> The bug itself seems to remain unfixed, as I was able to reproduce that +> +>>> with Fedora 41 guest, as well as AlmaLinux 8 guest. However our +> +>>> cpr-transfer code also seems to be buggy as it triggers the crash - +> +>>> without the cpr-transfer migration the above reproduce doesn't lead to +> +>>> crash on the source VM. +> +>>> +> +>>> I suspect that, as cpr-transfer doesn't migrate the guest memory, but +> +>>> rather passes it through the memory backend object, our code might +> +>>> somehow corrupt the VRAM.  However, I wasn't able to trace the +> +>>> corruption so far. +> +>>> +> +>>> Could somebody help the investigation and take a look into this?  Any +> +>>> suggestions would be appreciated.  Thanks! +> +>> +> +>> Possibly some memory region created by qxl is not being preserved. +> +>> Try adding these traces to see what is preserved: +> +>> +> +>> -trace enable='*cpr*' +> +>> -trace enable='*ram_alloc*' +> +> +> +> Also try adding this patch to see if it flags any ram blocks as not +> +> compatible with cpr.  A message is printed at migration start time. +> +>  +https://lore.kernel.org/qemu-devel/1740667681-257312-1-git-send-email- +> +> steven.sistare@oracle.com/ +> +> +> +> - Steve +> +> +> +> +With the traces enabled + the "migration: ram block cpr blockers" patch +> +applied: +> +> +Source: +> +> cpr_find_fd pc.bios, id 0 returns -1 +> +> cpr_save_fd pc.bios, id 0, fd 22 +> +> qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 22 host +> +> 0x7fec18e00000 +> +> cpr_find_fd pc.rom, id 0 returns -1 +> +> cpr_save_fd pc.rom, id 0, fd 23 +> +> qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 23 host +> +> 0x7fec18c00000 +> +> cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns -1 +> +> cpr_save_fd 0000:00:01.0/e1000e.rom, id 0, fd 24 +> +> qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size 262144 fd +> +> 24 host 0x7fec18a00000 +> +> cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns -1 +> +> cpr_save_fd 0000:00:02.0/vga.vram, id 0, fd 25 +> +> qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size 67108864 +> +> fd 25 host 0x7feb77e00000 +> +> cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns -1 +> +> cpr_save_fd 0000:00:02.0/qxl.vrom, id 0, fd 27 +> +> qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 fd 27 +> +> host 0x7fec18800000 +> +> cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns -1 +> +> cpr_save_fd 0000:00:02.0/qxl.vram, id 0, fd 28 +> +> qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size 67108864 +> +> fd 28 host 0x7feb73c00000 +> +> cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns -1 +> +> cpr_save_fd 0000:00:02.0/qxl.rom, id 0, fd 34 +> +> qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 fd 34 +> +> host 0x7fec18600000 +> +> cpr_find_fd /rom@etc/acpi/tables, id 0 returns -1 +> +> cpr_save_fd /rom@etc/acpi/tables, id 0, fd 35 +> +> qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size 2097152 fd +> +> 35 host 0x7fec18200000 +> +> cpr_find_fd /rom@etc/table-loader, id 0 returns -1 +> +> cpr_save_fd /rom@etc/table-loader, id 0, fd 36 +> +> qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 fd 36 +> +> host 0x7feb8b600000 +> +> cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns -1 +> +> cpr_save_fd /rom@etc/acpi/rsdp, id 0, fd 37 +> +> qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd 37 host +> +> 0x7feb8b400000 +> +> +> +> cpr_state_save cpr-transfer mode +> +> cpr_transfer_output /var/run/alma8cpr-dst.sock +> +> +Target: +> +> cpr_transfer_input /var/run/alma8cpr-dst.sock +> +> cpr_state_load cpr-transfer mode +> +> cpr_find_fd pc.bios, id 0 returns 20 +> +> qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 20 host +> +> 0x7fcdc9800000 +> +> cpr_find_fd pc.rom, id 0 returns 19 +> +> qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 19 host +> +> 0x7fcdc9600000 +> +> cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns 18 +> +> qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size 262144 fd +> +> 18 host 0x7fcdc9400000 +> +> cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns 17 +> +> qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size 67108864 +> +> fd 17 host 0x7fcd27e00000 +> +> cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns 16 +> +> qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 fd 16 +> +> host 0x7fcdc9200000 +> +> cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns 15 +> +> qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size 67108864 +> +> fd 15 host 0x7fcd23c00000 +> +> cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns 14 +> +> qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 fd 14 +> +> host 0x7fcdc8800000 +> +> cpr_find_fd /rom@etc/acpi/tables, id 0 returns 13 +> +> qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size 2097152 fd +> +> 13 host 0x7fcdc8400000 +> +> cpr_find_fd /rom@etc/table-loader, id 0 returns 11 +> +> qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 fd 11 +> +> host 0x7fcdc8200000 +> +> cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns 10 +> +> qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd 10 host +> +> 0x7fcd3be00000 +> +> +Looks like both vga.vram and qxl.vram are being preserved (with the same +> +addresses), and no incompatible ram blocks are found during migration. +> +Sorry, addressed are not the same, of course. However corresponding ram +blocks do seem to be preserved and initialized. + +On 2/28/2025 1:37 PM, Andrey Drobyshev wrote: +On 2/28/25 8:35 PM, Andrey Drobyshev wrote: +On 2/28/25 8:20 PM, Steven Sistare wrote: +On 2/28/2025 1:13 PM, Steven Sistare wrote: +On 2/28/2025 12:39 PM, Andrey Drobyshev wrote: +Hi all, + +We've been experimenting with cpr-transfer migration mode recently and +have discovered the following issue with the guest QXL driver: + +Run migration source: +EMULATOR=/path/to/emulator +ROOTFS=/path/to/image +QMPSOCK=/var/run/alma8qmp-src.sock + +$EMULATOR -enable-kvm \ +      -machine q35 \ +      -cpu host -smp 2 -m 2G \ +      -object memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ +ram0,share=on\ +      -machine memory-backend=ram0 \ +      -machine aux-ram-share=on \ +      -drive file=$ROOTFS,media=disk,if=virtio \ +      -qmp unix:$QMPSOCK,server=on,wait=off \ +      -nographic \ +      -device qxl-vga +Run migration target: +EMULATOR=/path/to/emulator +ROOTFS=/path/to/image +QMPSOCK=/var/run/alma8qmp-dst.sock +$EMULATOR -enable-kvm \ +      -machine q35 \ +      -cpu host -smp 2 -m 2G \ +      -object memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ +ram0,share=on\ +      -machine memory-backend=ram0 \ +      -machine aux-ram-share=on \ +      -drive file=$ROOTFS,media=disk,if=virtio \ +      -qmp unix:$QMPSOCK,server=on,wait=off \ +      -nographic \ +      -device qxl-vga \ +      -incoming tcp:0:44444 \ +      -incoming '{"channel-type": "cpr", "addr": { "transport": +"socket", "type": "unix", "path": "/var/run/alma8cpr-dst.sock"}}' +Launch the migration: +QMPSHELL=/root/src/qemu/master/scripts/qmp/qmp-shell +QMPSOCK=/var/run/alma8qmp-src.sock + +$QMPSHELL -p $QMPSOCK < /dev/tty3 +          done +done + +echo "bug could not be reproduced" +exit 0 +The bug itself seems to remain unfixed, as I was able to reproduce that +with Fedora 41 guest, as well as AlmaLinux 8 guest. However our +cpr-transfer code also seems to be buggy as it triggers the crash - +without the cpr-transfer migration the above reproduce doesn't lead to +crash on the source VM. + +I suspect that, as cpr-transfer doesn't migrate the guest memory, but +rather passes it through the memory backend object, our code might +somehow corrupt the VRAM.  However, I wasn't able to trace the +corruption so far. + +Could somebody help the investigation and take a look into this?  Any +suggestions would be appreciated.  Thanks! +Possibly some memory region created by qxl is not being preserved. +Try adding these traces to see what is preserved: + +-trace enable='*cpr*' +-trace enable='*ram_alloc*' +Also try adding this patch to see if it flags any ram blocks as not +compatible with cpr.  A message is printed at migration start time. +  +https://lore.kernel.org/qemu-devel/1740667681-257312-1-git-send-email- +steven.sistare@oracle.com/ + +- Steve +With the traces enabled + the "migration: ram block cpr blockers" patch +applied: + +Source: +cpr_find_fd pc.bios, id 0 returns -1 +cpr_save_fd pc.bios, id 0, fd 22 +qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 22 host +0x7fec18e00000 +cpr_find_fd pc.rom, id 0 returns -1 +cpr_save_fd pc.rom, id 0, fd 23 +qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 23 host +0x7fec18c00000 +cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns -1 +cpr_save_fd 0000:00:01.0/e1000e.rom, id 0, fd 24 +qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size 262144 fd 24 +host 0x7fec18a00000 +cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns -1 +cpr_save_fd 0000:00:02.0/vga.vram, id 0, fd 25 +qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size 67108864 fd +25 host 0x7feb77e00000 +cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.vrom, id 0, fd 27 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 fd 27 host +0x7fec18800000 +cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.vram, id 0, fd 28 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size 67108864 fd +28 host 0x7feb73c00000 +cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.rom, id 0, fd 34 +qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 fd 34 host +0x7fec18600000 +cpr_find_fd /rom@etc/acpi/tables, id 0 returns -1 +cpr_save_fd /rom@etc/acpi/tables, id 0, fd 35 +qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size 2097152 fd 35 +host 0x7fec18200000 +cpr_find_fd /rom@etc/table-loader, id 0 returns -1 +cpr_save_fd /rom@etc/table-loader, id 0, fd 36 +qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 fd 36 host +0x7feb8b600000 +cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns -1 +cpr_save_fd /rom@etc/acpi/rsdp, id 0, fd 37 +qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd 37 host +0x7feb8b400000 + +cpr_state_save cpr-transfer mode +cpr_transfer_output /var/run/alma8cpr-dst.sock +Target: +cpr_transfer_input /var/run/alma8cpr-dst.sock +cpr_state_load cpr-transfer mode +cpr_find_fd pc.bios, id 0 returns 20 +qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 20 host +0x7fcdc9800000 +cpr_find_fd pc.rom, id 0 returns 19 +qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 19 host +0x7fcdc9600000 +cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns 18 +qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size 262144 fd 18 +host 0x7fcdc9400000 +cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns 17 +qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size 67108864 fd +17 host 0x7fcd27e00000 +cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns 16 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 fd 16 host +0x7fcdc9200000 +cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns 15 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size 67108864 fd +15 host 0x7fcd23c00000 +cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns 14 +qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 fd 14 host +0x7fcdc8800000 +cpr_find_fd /rom@etc/acpi/tables, id 0 returns 13 +qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size 2097152 fd 13 +host 0x7fcdc8400000 +cpr_find_fd /rom@etc/table-loader, id 0 returns 11 +qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 fd 11 host +0x7fcdc8200000 +cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns 10 +qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd 10 host +0x7fcd3be00000 +Looks like both vga.vram and qxl.vram are being preserved (with the same +addresses), and no incompatible ram blocks are found during migration. +Sorry, addressed are not the same, of course. However corresponding ram +blocks do seem to be preserved and initialized. +So far, I have not reproduced the guest driver failure. + +However, I have isolated places where new QEMU improperly writes to +the qxl memory regions prior to starting the guest, by mmap'ing them +readonly after cpr: + + qemu_ram_alloc_internal() + if (reused && (strstr(name, "qxl") || strstr("name", "vga"))) + ram_flags |= RAM_READONLY; + new_block = qemu_ram_alloc_from_fd(...) + +I have attached a draft fix; try it and let me know. +My console window looks fine before and after cpr, using +-vnc $hostip:0 -vga qxl + +- Steve +0001-hw-qxl-cpr-support-preliminary.patch +Description: +Text document + +On 3/4/25 9:05 PM, Steven Sistare wrote: +> +On 2/28/2025 1:37 PM, Andrey Drobyshev wrote: +> +> On 2/28/25 8:35 PM, Andrey Drobyshev wrote: +> +>> On 2/28/25 8:20 PM, Steven Sistare wrote: +> +>>> On 2/28/2025 1:13 PM, Steven Sistare wrote: +> +>>>> On 2/28/2025 12:39 PM, Andrey Drobyshev wrote: +> +>>>>> Hi all, +> +>>>>> +> +>>>>> We've been experimenting with cpr-transfer migration mode recently +> +>>>>> and +> +>>>>> have discovered the following issue with the guest QXL driver: +> +>>>>> +> +>>>>> Run migration source: +> +>>>>>> EMULATOR=/path/to/emulator +> +>>>>>> ROOTFS=/path/to/image +> +>>>>>> QMPSOCK=/var/run/alma8qmp-src.sock +> +>>>>>> +> +>>>>>> $EMULATOR -enable-kvm \ +> +>>>>>>       -machine q35 \ +> +>>>>>>       -cpu host -smp 2 -m 2G \ +> +>>>>>>       -object memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ +> +>>>>>> ram0,share=on\ +> +>>>>>>       -machine memory-backend=ram0 \ +> +>>>>>>       -machine aux-ram-share=on \ +> +>>>>>>       -drive file=$ROOTFS,media=disk,if=virtio \ +> +>>>>>>       -qmp unix:$QMPSOCK,server=on,wait=off \ +> +>>>>>>       -nographic \ +> +>>>>>>       -device qxl-vga +> +>>>>> +> +>>>>> Run migration target: +> +>>>>>> EMULATOR=/path/to/emulator +> +>>>>>> ROOTFS=/path/to/image +> +>>>>>> QMPSOCK=/var/run/alma8qmp-dst.sock +> +>>>>>> $EMULATOR -enable-kvm \ +> +>>>>>>       -machine q35 \ +> +>>>>>>       -cpu host -smp 2 -m 2G \ +> +>>>>>>       -object memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ +> +>>>>>> ram0,share=on\ +> +>>>>>>       -machine memory-backend=ram0 \ +> +>>>>>>       -machine aux-ram-share=on \ +> +>>>>>>       -drive file=$ROOTFS,media=disk,if=virtio \ +> +>>>>>>       -qmp unix:$QMPSOCK,server=on,wait=off \ +> +>>>>>>       -nographic \ +> +>>>>>>       -device qxl-vga \ +> +>>>>>>       -incoming tcp:0:44444 \ +> +>>>>>>       -incoming '{"channel-type": "cpr", "addr": { "transport": +> +>>>>>> "socket", "type": "unix", "path": "/var/run/alma8cpr-dst.sock"}}' +> +>>>>> +> +>>>>> +> +>>>>> Launch the migration: +> +>>>>>> QMPSHELL=/root/src/qemu/master/scripts/qmp/qmp-shell +> +>>>>>> QMPSOCK=/var/run/alma8qmp-src.sock +> +>>>>>> +> +>>>>>> $QMPSHELL -p $QMPSOCK < +>>>>>>       migrate-set-parameters mode=cpr-transfer +> +>>>>>>       migrate channels=[{"channel-type":"main","addr": +> +>>>>>> {"transport":"socket","type":"inet","host":"0","port":"44444"}}, +> +>>>>>> {"channel-type":"cpr","addr": +> +>>>>>> {"transport":"socket","type":"unix","path":"/var/run/alma8cpr- +> +>>>>>> dst.sock"}}] +> +>>>>>> EOF +> +>>>>> +> +>>>>> Then, after a while, QXL guest driver on target crashes spewing the +> +>>>>> following messages: +> +>>>>>> [   73.962002] [TTM] Buffer eviction failed +> +>>>>>> [   73.962072] qxl 0000:00:02.0: object_init failed for (3149824, +> +>>>>>> 0x00000001) +> +>>>>>> [   73.962081] [drm:qxl_alloc_bo_reserved [qxl]] *ERROR* failed to +> +>>>>>> allocate VRAM BO +> +>>>>> +> +>>>>> That seems to be a known kernel QXL driver bug: +> +>>>>> +> +>>>>> +https://lore.kernel.org/all/20220907094423.93581-1- +> +>>>>> min_halo@163.com/T/ +> +>>>>> +https://lore.kernel.org/lkml/ZTgydqRlK6WX_b29@eldamar.lan/ +> +>>>>> +> +>>>>> (the latter discussion contains that reproduce script which speeds up +> +>>>>> the crash in the guest): +> +>>>>>> #!/bin/bash +> +>>>>>> +> +>>>>>> chvt 3 +> +>>>>>> +> +>>>>>> for j in $(seq 80); do +> +>>>>>>           echo "$(date) starting round $j" +> +>>>>>>           if [ "$(journalctl --boot | grep "failed to allocate VRAM +> +>>>>>> BO")" != "" ]; then +> +>>>>>>                   echo "bug was reproduced after $j tries" +> +>>>>>>                   exit 1 +> +>>>>>>           fi +> +>>>>>>           for i in $(seq 100); do +> +>>>>>>                   dmesg > /dev/tty3 +> +>>>>>>           done +> +>>>>>> done +> +>>>>>> +> +>>>>>> echo "bug could not be reproduced" +> +>>>>>> exit 0 +> +>>>>> +> +>>>>> The bug itself seems to remain unfixed, as I was able to reproduce +> +>>>>> that +> +>>>>> with Fedora 41 guest, as well as AlmaLinux 8 guest. However our +> +>>>>> cpr-transfer code also seems to be buggy as it triggers the crash - +> +>>>>> without the cpr-transfer migration the above reproduce doesn't +> +>>>>> lead to +> +>>>>> crash on the source VM. +> +>>>>> +> +>>>>> I suspect that, as cpr-transfer doesn't migrate the guest memory, but +> +>>>>> rather passes it through the memory backend object, our code might +> +>>>>> somehow corrupt the VRAM.  However, I wasn't able to trace the +> +>>>>> corruption so far. +> +>>>>> +> +>>>>> Could somebody help the investigation and take a look into this?  Any +> +>>>>> suggestions would be appreciated.  Thanks! +> +>>>> +> +>>>> Possibly some memory region created by qxl is not being preserved. +> +>>>> Try adding these traces to see what is preserved: +> +>>>> +> +>>>> -trace enable='*cpr*' +> +>>>> -trace enable='*ram_alloc*' +> +>>> +> +>>> Also try adding this patch to see if it flags any ram blocks as not +> +>>> compatible with cpr.  A message is printed at migration start time. +> +>>>   +https://lore.kernel.org/qemu-devel/1740667681-257312-1-git-send- +> +>>> email- +> +>>> steven.sistare@oracle.com/ +> +>>> +> +>>> - Steve +> +>>> +> +>> +> +>> With the traces enabled + the "migration: ram block cpr blockers" patch +> +>> applied: +> +>> +> +>> Source: +> +>>> cpr_find_fd pc.bios, id 0 returns -1 +> +>>> cpr_save_fd pc.bios, id 0, fd 22 +> +>>> qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 22 host +> +>>> 0x7fec18e00000 +> +>>> cpr_find_fd pc.rom, id 0 returns -1 +> +>>> cpr_save_fd pc.rom, id 0, fd 23 +> +>>> qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 23 host +> +>>> 0x7fec18c00000 +> +>>> cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns -1 +> +>>> cpr_save_fd 0000:00:01.0/e1000e.rom, id 0, fd 24 +> +>>> qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size +> +>>> 262144 fd 24 host 0x7fec18a00000 +> +>>> cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns -1 +> +>>> cpr_save_fd 0000:00:02.0/vga.vram, id 0, fd 25 +> +>>> qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size +> +>>> 67108864 fd 25 host 0x7feb77e00000 +> +>>> cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns -1 +> +>>> cpr_save_fd 0000:00:02.0/qxl.vrom, id 0, fd 27 +> +>>> qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 +> +>>> fd 27 host 0x7fec18800000 +> +>>> cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns -1 +> +>>> cpr_save_fd 0000:00:02.0/qxl.vram, id 0, fd 28 +> +>>> qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size +> +>>> 67108864 fd 28 host 0x7feb73c00000 +> +>>> cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns -1 +> +>>> cpr_save_fd 0000:00:02.0/qxl.rom, id 0, fd 34 +> +>>> qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 +> +>>> fd 34 host 0x7fec18600000 +> +>>> cpr_find_fd /rom@etc/acpi/tables, id 0 returns -1 +> +>>> cpr_save_fd /rom@etc/acpi/tables, id 0, fd 35 +> +>>> qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size +> +>>> 2097152 fd 35 host 0x7fec18200000 +> +>>> cpr_find_fd /rom@etc/table-loader, id 0 returns -1 +> +>>> cpr_save_fd /rom@etc/table-loader, id 0, fd 36 +> +>>> qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 +> +>>> fd 36 host 0x7feb8b600000 +> +>>> cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns -1 +> +>>> cpr_save_fd /rom@etc/acpi/rsdp, id 0, fd 37 +> +>>> qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd +> +>>> 37 host 0x7feb8b400000 +> +>>> +> +>>> cpr_state_save cpr-transfer mode +> +>>> cpr_transfer_output /var/run/alma8cpr-dst.sock +> +>> +> +>> Target: +> +>>> cpr_transfer_input /var/run/alma8cpr-dst.sock +> +>>> cpr_state_load cpr-transfer mode +> +>>> cpr_find_fd pc.bios, id 0 returns 20 +> +>>> qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 20 host +> +>>> 0x7fcdc9800000 +> +>>> cpr_find_fd pc.rom, id 0 returns 19 +> +>>> qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 19 host +> +>>> 0x7fcdc9600000 +> +>>> cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns 18 +> +>>> qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size +> +>>> 262144 fd 18 host 0x7fcdc9400000 +> +>>> cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns 17 +> +>>> qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size +> +>>> 67108864 fd 17 host 0x7fcd27e00000 +> +>>> cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns 16 +> +>>> qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 +> +>>> fd 16 host 0x7fcdc9200000 +> +>>> cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns 15 +> +>>> qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size +> +>>> 67108864 fd 15 host 0x7fcd23c00000 +> +>>> cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns 14 +> +>>> qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 +> +>>> fd 14 host 0x7fcdc8800000 +> +>>> cpr_find_fd /rom@etc/acpi/tables, id 0 returns 13 +> +>>> qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size +> +>>> 2097152 fd 13 host 0x7fcdc8400000 +> +>>> cpr_find_fd /rom@etc/table-loader, id 0 returns 11 +> +>>> qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 +> +>>> fd 11 host 0x7fcdc8200000 +> +>>> cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns 10 +> +>>> qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd +> +>>> 10 host 0x7fcd3be00000 +> +>> +> +>> Looks like both vga.vram and qxl.vram are being preserved (with the same +> +>> addresses), and no incompatible ram blocks are found during migration. +> +> +> +> Sorry, addressed are not the same, of course.  However corresponding ram +> +> blocks do seem to be preserved and initialized. +> +> +So far, I have not reproduced the guest driver failure. +> +> +However, I have isolated places where new QEMU improperly writes to +> +the qxl memory regions prior to starting the guest, by mmap'ing them +> +readonly after cpr: +> +> +  qemu_ram_alloc_internal() +> +    if (reused && (strstr(name, "qxl") || strstr("name", "vga"))) +> +        ram_flags |= RAM_READONLY; +> +    new_block = qemu_ram_alloc_from_fd(...) +> +> +I have attached a draft fix; try it and let me know. +> +My console window looks fine before and after cpr, using +> +-vnc $hostip:0 -vga qxl +> +> +- Steve +Regarding the reproduce: when I launch the buggy version with the same +options as you, i.e. "-vnc 0.0.0.0:$port -vga qxl", and do cpr-transfer, +my VNC client silently hangs on the target after a while. Could it +happen on your stand as well? Could you try launching VM with +"-nographic -device qxl-vga"? That way VM's serial console is given you +directly in the shell, so when qxl driver crashes you're still able to +inspect the kernel messages. + +As for your patch, I can report that it doesn't resolve the issue as it +is. But I was able to track down another possible memory corruption +using your approach with readonly mmap'ing: + +> +Program terminated with signal SIGSEGV, Segmentation fault. +> +#0 init_qxl_ram (d=0x5638996e0e70) at ../hw/display/qxl.c:412 +> +412 d->ram->magic = cpu_to_le32(QXL_RAM_MAGIC); +> +[Current thread is 1 (Thread 0x7f1a4f83b480 (LWP 229798))] +> +(gdb) bt +> +#0 init_qxl_ram (d=0x5638996e0e70) at ../hw/display/qxl.c:412 +> +#1 0x0000563896e7f467 in qxl_realize_common (qxl=0x5638996e0e70, +> +errp=0x7ffd3c2b8170) at ../hw/display/qxl.c:2142 +> +#2 0x0000563896e7fda1 in qxl_realize_primary (dev=0x5638996e0e70, +> +errp=0x7ffd3c2b81d0) at ../hw/display/qxl.c:2257 +> +#3 0x0000563896c7e8f2 in pci_qdev_realize (qdev=0x5638996e0e70, +> +errp=0x7ffd3c2b8250) at ../hw/pci/pci.c:2174 +> +#4 0x00005638970eb54b in device_set_realized (obj=0x5638996e0e70, +> +value=true, errp=0x7ffd3c2b84e0) at ../hw/core/qdev.c:494 +> +#5 0x00005638970f5e14 in property_set_bool (obj=0x5638996e0e70, +> +v=0x5638996f3770, name=0x56389759b141 "realized", opaque=0x5638987893d0, +> +errp=0x7ffd3c2b84e0) +> +at ../qom/object.c:2374 +> +#6 0x00005638970f39f8 in object_property_set (obj=0x5638996e0e70, +> +name=0x56389759b141 "realized", v=0x5638996f3770, errp=0x7ffd3c2b84e0) +> +at ../qom/object.c:1449 +> +#7 0x00005638970f8586 in object_property_set_qobject (obj=0x5638996e0e70, +> +name=0x56389759b141 "realized", value=0x5638996df900, errp=0x7ffd3c2b84e0) +> +at ../qom/qom-qobject.c:28 +> +#8 0x00005638970f3d8d in object_property_set_bool (obj=0x5638996e0e70, +> +name=0x56389759b141 "realized", value=true, errp=0x7ffd3c2b84e0) +> +at ../qom/object.c:1519 +> +#9 0x00005638970eacb0 in qdev_realize (dev=0x5638996e0e70, +> +bus=0x563898cf3c20, errp=0x7ffd3c2b84e0) at ../hw/core/qdev.c:276 +> +#10 0x0000563896dba675 in qdev_device_add_from_qdict (opts=0x5638996dfe50, +> +from_json=false, errp=0x7ffd3c2b84e0) at ../system/qdev-monitor.c:714 +> +#11 0x0000563896dba721 in qdev_device_add (opts=0x563898786150, +> +errp=0x56389855dc40 ) at ../system/qdev-monitor.c:733 +> +#12 0x0000563896dc48f1 in device_init_func (opaque=0x0, opts=0x563898786150, +> +errp=0x56389855dc40 ) at ../system/vl.c:1207 +> +#13 0x000056389737a6cc in qemu_opts_foreach +> +(list=0x563898427b60 , func=0x563896dc48ca +> +, opaque=0x0, errp=0x56389855dc40 ) +> +at ../util/qemu-option.c:1135 +> +#14 0x0000563896dc89b5 in qemu_create_cli_devices () at ../system/vl.c:2745 +> +#15 0x0000563896dc8c00 in qmp_x_exit_preconfig (errp=0x56389855dc40 +> +) at ../system/vl.c:2806 +> +#16 0x0000563896dcb5de in qemu_init (argc=33, argv=0x7ffd3c2b8948) at +> +../system/vl.c:3838 +> +#17 0x0000563897297323 in main (argc=33, argv=0x7ffd3c2b8948) at +> +../system/main.c:72 +So the attached adjusted version of your patch does seem to help. At +least I can't reproduce the crash on my stand. + +I'm wondering, could it be useful to explicitly mark all the reused +memory regions readonly upon cpr-transfer, and then make them writable +back again after the migration is done? That way we will be segfaulting +early on instead of debugging tricky memory corruptions. + +Andrey +0001-hw-qxl-cpr-support-preliminary.patch +Description: +Text Data + +On 3/5/2025 11:50 AM, Andrey Drobyshev wrote: +On 3/4/25 9:05 PM, Steven Sistare wrote: +On 2/28/2025 1:37 PM, Andrey Drobyshev wrote: +On 2/28/25 8:35 PM, Andrey Drobyshev wrote: +On 2/28/25 8:20 PM, Steven Sistare wrote: +On 2/28/2025 1:13 PM, Steven Sistare wrote: +On 2/28/2025 12:39 PM, Andrey Drobyshev wrote: +Hi all, + +We've been experimenting with cpr-transfer migration mode recently +and +have discovered the following issue with the guest QXL driver: + +Run migration source: +EMULATOR=/path/to/emulator +ROOTFS=/path/to/image +QMPSOCK=/var/run/alma8qmp-src.sock + +$EMULATOR -enable-kvm \ +       -machine q35 \ +       -cpu host -smp 2 -m 2G \ +       -object memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ +ram0,share=on\ +       -machine memory-backend=ram0 \ +       -machine aux-ram-share=on \ +       -drive file=$ROOTFS,media=disk,if=virtio \ +       -qmp unix:$QMPSOCK,server=on,wait=off \ +       -nographic \ +       -device qxl-vga +Run migration target: +EMULATOR=/path/to/emulator +ROOTFS=/path/to/image +QMPSOCK=/var/run/alma8qmp-dst.sock +$EMULATOR -enable-kvm \ +       -machine q35 \ +       -cpu host -smp 2 -m 2G \ +       -object memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ +ram0,share=on\ +       -machine memory-backend=ram0 \ +       -machine aux-ram-share=on \ +       -drive file=$ROOTFS,media=disk,if=virtio \ +       -qmp unix:$QMPSOCK,server=on,wait=off \ +       -nographic \ +       -device qxl-vga \ +       -incoming tcp:0:44444 \ +       -incoming '{"channel-type": "cpr", "addr": { "transport": +"socket", "type": "unix", "path": "/var/run/alma8cpr-dst.sock"}}' +Launch the migration: +QMPSHELL=/root/src/qemu/master/scripts/qmp/qmp-shell +QMPSOCK=/var/run/alma8qmp-src.sock + +$QMPSHELL -p $QMPSOCK < /dev/tty3 +           done +done + +echo "bug could not be reproduced" +exit 0 +The bug itself seems to remain unfixed, as I was able to reproduce +that +with Fedora 41 guest, as well as AlmaLinux 8 guest. However our +cpr-transfer code also seems to be buggy as it triggers the crash - +without the cpr-transfer migration the above reproduce doesn't +lead to +crash on the source VM. + +I suspect that, as cpr-transfer doesn't migrate the guest memory, but +rather passes it through the memory backend object, our code might +somehow corrupt the VRAM.  However, I wasn't able to trace the +corruption so far. + +Could somebody help the investigation and take a look into this?  Any +suggestions would be appreciated.  Thanks! +Possibly some memory region created by qxl is not being preserved. +Try adding these traces to see what is preserved: + +-trace enable='*cpr*' +-trace enable='*ram_alloc*' +Also try adding this patch to see if it flags any ram blocks as not +compatible with cpr.  A message is printed at migration start time. +   +https://lore.kernel.org/qemu-devel/1740667681-257312-1-git-send- +email- +steven.sistare@oracle.com/ + +- Steve +With the traces enabled + the "migration: ram block cpr blockers" patch +applied: + +Source: +cpr_find_fd pc.bios, id 0 returns -1 +cpr_save_fd pc.bios, id 0, fd 22 +qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 22 host +0x7fec18e00000 +cpr_find_fd pc.rom, id 0 returns -1 +cpr_save_fd pc.rom, id 0, fd 23 +qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 23 host +0x7fec18c00000 +cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns -1 +cpr_save_fd 0000:00:01.0/e1000e.rom, id 0, fd 24 +qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size +262144 fd 24 host 0x7fec18a00000 +cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns -1 +cpr_save_fd 0000:00:02.0/vga.vram, id 0, fd 25 +qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size +67108864 fd 25 host 0x7feb77e00000 +cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.vrom, id 0, fd 27 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 +fd 27 host 0x7fec18800000 +cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.vram, id 0, fd 28 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size +67108864 fd 28 host 0x7feb73c00000 +cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.rom, id 0, fd 34 +qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 +fd 34 host 0x7fec18600000 +cpr_find_fd /rom@etc/acpi/tables, id 0 returns -1 +cpr_save_fd /rom@etc/acpi/tables, id 0, fd 35 +qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size +2097152 fd 35 host 0x7fec18200000 +cpr_find_fd /rom@etc/table-loader, id 0 returns -1 +cpr_save_fd /rom@etc/table-loader, id 0, fd 36 +qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 +fd 36 host 0x7feb8b600000 +cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns -1 +cpr_save_fd /rom@etc/acpi/rsdp, id 0, fd 37 +qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd +37 host 0x7feb8b400000 + +cpr_state_save cpr-transfer mode +cpr_transfer_output /var/run/alma8cpr-dst.sock +Target: +cpr_transfer_input /var/run/alma8cpr-dst.sock +cpr_state_load cpr-transfer mode +cpr_find_fd pc.bios, id 0 returns 20 +qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 20 host +0x7fcdc9800000 +cpr_find_fd pc.rom, id 0 returns 19 +qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 19 host +0x7fcdc9600000 +cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns 18 +qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size +262144 fd 18 host 0x7fcdc9400000 +cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns 17 +qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size +67108864 fd 17 host 0x7fcd27e00000 +cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns 16 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 +fd 16 host 0x7fcdc9200000 +cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns 15 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size +67108864 fd 15 host 0x7fcd23c00000 +cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns 14 +qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 +fd 14 host 0x7fcdc8800000 +cpr_find_fd /rom@etc/acpi/tables, id 0 returns 13 +qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size +2097152 fd 13 host 0x7fcdc8400000 +cpr_find_fd /rom@etc/table-loader, id 0 returns 11 +qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 +fd 11 host 0x7fcdc8200000 +cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns 10 +qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd +10 host 0x7fcd3be00000 +Looks like both vga.vram and qxl.vram are being preserved (with the same +addresses), and no incompatible ram blocks are found during migration. +Sorry, addressed are not the same, of course.  However corresponding ram +blocks do seem to be preserved and initialized. +So far, I have not reproduced the guest driver failure. + +However, I have isolated places where new QEMU improperly writes to +the qxl memory regions prior to starting the guest, by mmap'ing them +readonly after cpr: + +   qemu_ram_alloc_internal() +     if (reused && (strstr(name, "qxl") || strstr("name", "vga"))) +         ram_flags |= RAM_READONLY; +     new_block = qemu_ram_alloc_from_fd(...) + +I have attached a draft fix; try it and let me know. +My console window looks fine before and after cpr, using +-vnc $hostip:0 -vga qxl + +- Steve +Regarding the reproduce: when I launch the buggy version with the same +options as you, i.e. "-vnc 0.0.0.0:$port -vga qxl", and do cpr-transfer, +my VNC client silently hangs on the target after a while. Could it +happen on your stand as well? +cpr does not preserve the vnc connection and session. To test, I specify +port 0 for the source VM and port 1 for the dest. When the src vnc goes +dormant the dest vnc becomes active. +Could you try launching VM with +"-nographic -device qxl-vga"? That way VM's serial console is given you +directly in the shell, so when qxl driver crashes you're still able to +inspect the kernel messages. +I have been running like that, but have not reproduced the qxl driver crash, +and I suspect my guest image+kernel is too old. However, once I realized the +issue was post-cpr modification of qxl memory, I switched my attention to the +fix. +As for your patch, I can report that it doesn't resolve the issue as it +is. But I was able to track down another possible memory corruption +using your approach with readonly mmap'ing: +Program terminated with signal SIGSEGV, Segmentation fault. +#0 init_qxl_ram (d=0x5638996e0e70) at ../hw/display/qxl.c:412 +412 d->ram->magic = cpu_to_le32(QXL_RAM_MAGIC); +[Current thread is 1 (Thread 0x7f1a4f83b480 (LWP 229798))] +(gdb) bt +#0 init_qxl_ram (d=0x5638996e0e70) at ../hw/display/qxl.c:412 +#1 0x0000563896e7f467 in qxl_realize_common (qxl=0x5638996e0e70, +errp=0x7ffd3c2b8170) at ../hw/display/qxl.c:2142 +#2 0x0000563896e7fda1 in qxl_realize_primary (dev=0x5638996e0e70, +errp=0x7ffd3c2b81d0) at ../hw/display/qxl.c:2257 +#3 0x0000563896c7e8f2 in pci_qdev_realize (qdev=0x5638996e0e70, +errp=0x7ffd3c2b8250) at ../hw/pci/pci.c:2174 +#4 0x00005638970eb54b in device_set_realized (obj=0x5638996e0e70, value=true, +errp=0x7ffd3c2b84e0) at ../hw/core/qdev.c:494 +#5 0x00005638970f5e14 in property_set_bool (obj=0x5638996e0e70, v=0x5638996f3770, +name=0x56389759b141 "realized", opaque=0x5638987893d0, errp=0x7ffd3c2b84e0) + at ../qom/object.c:2374 +#6 0x00005638970f39f8 in object_property_set (obj=0x5638996e0e70, name=0x56389759b141 +"realized", v=0x5638996f3770, errp=0x7ffd3c2b84e0) + at ../qom/object.c:1449 +#7 0x00005638970f8586 in object_property_set_qobject (obj=0x5638996e0e70, +name=0x56389759b141 "realized", value=0x5638996df900, errp=0x7ffd3c2b84e0) + at ../qom/qom-qobject.c:28 +#8 0x00005638970f3d8d in object_property_set_bool (obj=0x5638996e0e70, +name=0x56389759b141 "realized", value=true, errp=0x7ffd3c2b84e0) + at ../qom/object.c:1519 +#9 0x00005638970eacb0 in qdev_realize (dev=0x5638996e0e70, bus=0x563898cf3c20, +errp=0x7ffd3c2b84e0) at ../hw/core/qdev.c:276 +#10 0x0000563896dba675 in qdev_device_add_from_qdict (opts=0x5638996dfe50, +from_json=false, errp=0x7ffd3c2b84e0) at ../system/qdev-monitor.c:714 +#11 0x0000563896dba721 in qdev_device_add (opts=0x563898786150, errp=0x56389855dc40 +) at ../system/qdev-monitor.c:733 +#12 0x0000563896dc48f1 in device_init_func (opaque=0x0, opts=0x563898786150, +errp=0x56389855dc40 ) at ../system/vl.c:1207 +#13 0x000056389737a6cc in qemu_opts_foreach + (list=0x563898427b60 , func=0x563896dc48ca , +opaque=0x0, errp=0x56389855dc40 ) + at ../util/qemu-option.c:1135 +#14 0x0000563896dc89b5 in qemu_create_cli_devices () at ../system/vl.c:2745 +#15 0x0000563896dc8c00 in qmp_x_exit_preconfig (errp=0x56389855dc40 +) at ../system/vl.c:2806 +#16 0x0000563896dcb5de in qemu_init (argc=33, argv=0x7ffd3c2b8948) at +../system/vl.c:3838 +#17 0x0000563897297323 in main (argc=33, argv=0x7ffd3c2b8948) at +../system/main.c:72 +So the attached adjusted version of your patch does seem to help. At +least I can't reproduce the crash on my stand. +Thanks for the stack trace; the calls to SPICE_RING_INIT in init_qxl_ram are +definitely harmful. Try V2 of the patch, attached, which skips the lines +of init_qxl_ram that modify guest memory. +I'm wondering, could it be useful to explicitly mark all the reused +memory regions readonly upon cpr-transfer, and then make them writable +back again after the migration is done? That way we will be segfaulting +early on instead of debugging tricky memory corruptions. +It's a useful debugging technique, but changing protection on a large memory +region +can be too expensive for production due to TLB shootdowns. + +Also, there are cases where writes are performed but the value is guaranteed to +be the same: + qxl_post_load() + qxl_set_mode() + d->rom->mode = cpu_to_le32(modenr); +The value is the same because mode and shadow_rom.mode were passed in vmstate +from old qemu. + +- Steve +0001-hw-qxl-cpr-support-preliminary-V2.patch +Description: +Text document + +On 3/5/25 22:19, Steven Sistare wrote: +On 3/5/2025 11:50 AM, Andrey Drobyshev wrote: +On 3/4/25 9:05 PM, Steven Sistare wrote: +On 2/28/2025 1:37 PM, Andrey Drobyshev wrote: +On 2/28/25 8:35 PM, Andrey Drobyshev wrote: +On 2/28/25 8:20 PM, Steven Sistare wrote: +On 2/28/2025 1:13 PM, Steven Sistare wrote: +On 2/28/2025 12:39 PM, Andrey Drobyshev wrote: +Hi all, + +We've been experimenting with cpr-transfer migration mode recently +and +have discovered the following issue with the guest QXL driver: + +Run migration source: +EMULATOR=/path/to/emulator +ROOTFS=/path/to/image +QMPSOCK=/var/run/alma8qmp-src.sock + +$EMULATOR -enable-kvm \ +       -machine q35 \ +       -cpu host -smp 2 -m 2G \ +       -object +memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ +ram0,share=on\ +       -machine memory-backend=ram0 \ +       -machine aux-ram-share=on \ +       -drive file=$ROOTFS,media=disk,if=virtio \ +       -qmp unix:$QMPSOCK,server=on,wait=off \ +       -nographic \ +       -device qxl-vga +Run migration target: +EMULATOR=/path/to/emulator +ROOTFS=/path/to/image +QMPSOCK=/var/run/alma8qmp-dst.sock +$EMULATOR -enable-kvm \ +       -machine q35 \ +       -cpu host -smp 2 -m 2G \ +       -object +memory-backend-file,id=ram0,size=2G,mem-path=/dev/shm/ +ram0,share=on\ +       -machine memory-backend=ram0 \ +       -machine aux-ram-share=on \ +       -drive file=$ROOTFS,media=disk,if=virtio \ +       -qmp unix:$QMPSOCK,server=on,wait=off \ +       -nographic \ +       -device qxl-vga \ +       -incoming tcp:0:44444 \ +       -incoming '{"channel-type": "cpr", "addr": { "transport": +"socket", "type": "unix", "path": "/var/run/alma8cpr-dst.sock"}}' +Launch the migration: +QMPSHELL=/root/src/qemu/master/scripts/qmp/qmp-shell +QMPSOCK=/var/run/alma8qmp-src.sock + +$QMPSHELL -p $QMPSOCK < /dev/tty3 +           done +done + +echo "bug could not be reproduced" +exit 0 +The bug itself seems to remain unfixed, as I was able to reproduce +that +with Fedora 41 guest, as well as AlmaLinux 8 guest. However our +cpr-transfer code also seems to be buggy as it triggers the +crash - +without the cpr-transfer migration the above reproduce doesn't +lead to +crash on the source VM. +I suspect that, as cpr-transfer doesn't migrate the guest +memory, but +rather passes it through the memory backend object, our code might +somehow corrupt the VRAM.  However, I wasn't able to trace the +corruption so far. +Could somebody help the investigation and take a look into +this?  Any +suggestions would be appreciated.  Thanks! +Possibly some memory region created by qxl is not being preserved. +Try adding these traces to see what is preserved: + +-trace enable='*cpr*' +-trace enable='*ram_alloc*' +Also try adding this patch to see if it flags any ram blocks as not +compatible with cpr.  A message is printed at migration start time. +https://lore.kernel.org/qemu-devel/1740667681-257312-1-git-send- +email- +steven.sistare@oracle.com/ + +- Steve +With the traces enabled + the "migration: ram block cpr blockers" +patch +applied: + +Source: +cpr_find_fd pc.bios, id 0 returns -1 +cpr_save_fd pc.bios, id 0, fd 22 +qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 22 host +0x7fec18e00000 +cpr_find_fd pc.rom, id 0 returns -1 +cpr_save_fd pc.rom, id 0, fd 23 +qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 23 host +0x7fec18c00000 +cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns -1 +cpr_save_fd 0000:00:01.0/e1000e.rom, id 0, fd 24 +qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size +262144 fd 24 host 0x7fec18a00000 +cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns -1 +cpr_save_fd 0000:00:02.0/vga.vram, id 0, fd 25 +qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size +67108864 fd 25 host 0x7feb77e00000 +cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.vrom, id 0, fd 27 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 +fd 27 host 0x7fec18800000 +cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.vram, id 0, fd 28 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size +67108864 fd 28 host 0x7feb73c00000 +cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.rom, id 0, fd 34 +qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 +fd 34 host 0x7fec18600000 +cpr_find_fd /rom@etc/acpi/tables, id 0 returns -1 +cpr_save_fd /rom@etc/acpi/tables, id 0, fd 35 +qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size +2097152 fd 35 host 0x7fec18200000 +cpr_find_fd /rom@etc/table-loader, id 0 returns -1 +cpr_save_fd /rom@etc/table-loader, id 0, fd 36 +qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 +fd 36 host 0x7feb8b600000 +cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns -1 +cpr_save_fd /rom@etc/acpi/rsdp, id 0, fd 37 +qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd +37 host 0x7feb8b400000 + +cpr_state_save cpr-transfer mode +cpr_transfer_output /var/run/alma8cpr-dst.sock +Target: +cpr_transfer_input /var/run/alma8cpr-dst.sock +cpr_state_load cpr-transfer mode +cpr_find_fd pc.bios, id 0 returns 20 +qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 20 host +0x7fcdc9800000 +cpr_find_fd pc.rom, id 0 returns 19 +qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 19 host +0x7fcdc9600000 +cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns 18 +qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size +262144 fd 18 host 0x7fcdc9400000 +cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns 17 +qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size +67108864 fd 17 host 0x7fcd27e00000 +cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns 16 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 +fd 16 host 0x7fcdc9200000 +cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns 15 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size +67108864 fd 15 host 0x7fcd23c00000 +cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns 14 +qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 +fd 14 host 0x7fcdc8800000 +cpr_find_fd /rom@etc/acpi/tables, id 0 returns 13 +qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size +2097152 fd 13 host 0x7fcdc8400000 +cpr_find_fd /rom@etc/table-loader, id 0 returns 11 +qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 +fd 11 host 0x7fcdc8200000 +cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns 10 +qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd +10 host 0x7fcd3be00000 +Looks like both vga.vram and qxl.vram are being preserved (with +the same +addresses), and no incompatible ram blocks are found during +migration. +Sorry, addressed are not the same, of course.  However +corresponding ram +blocks do seem to be preserved and initialized. +So far, I have not reproduced the guest driver failure. + +However, I have isolated places where new QEMU improperly writes to +the qxl memory regions prior to starting the guest, by mmap'ing them +readonly after cpr: + +   qemu_ram_alloc_internal() +     if (reused && (strstr(name, "qxl") || strstr("name", "vga"))) +         ram_flags |= RAM_READONLY; +     new_block = qemu_ram_alloc_from_fd(...) + +I have attached a draft fix; try it and let me know. +My console window looks fine before and after cpr, using +-vnc $hostip:0 -vga qxl + +- Steve +Regarding the reproduce: when I launch the buggy version with the same +options as you, i.e. "-vnc 0.0.0.0:$port -vga qxl", and do cpr-transfer, +my VNC client silently hangs on the target after a while.  Could it +happen on your stand as well? +cpr does not preserve the vnc connection and session.  To test, I specify +port 0 for the source VM and port 1 for the dest.  When the src vnc goes +dormant the dest vnc becomes active. +Could you try launching VM with +"-nographic -device qxl-vga"?  That way VM's serial console is given you +directly in the shell, so when qxl driver crashes you're still able to +inspect the kernel messages. +I have been running like that, but have not reproduced the qxl driver +crash, +and I suspect my guest image+kernel is too old.  However, once I +realized the +issue was post-cpr modification of qxl memory, I switched my attention +to the +fix. +As for your patch, I can report that it doesn't resolve the issue as it +is.  But I was able to track down another possible memory corruption +using your approach with readonly mmap'ing: +Program terminated with signal SIGSEGV, Segmentation fault. +#0  init_qxl_ram (d=0x5638996e0e70) at ../hw/display/qxl.c:412 +412         d->ram->magic       = cpu_to_le32(QXL_RAM_MAGIC); +[Current thread is 1 (Thread 0x7f1a4f83b480 (LWP 229798))] +(gdb) bt +#0  init_qxl_ram (d=0x5638996e0e70) at ../hw/display/qxl.c:412 +#1  0x0000563896e7f467 in qxl_realize_common (qxl=0x5638996e0e70, +errp=0x7ffd3c2b8170) at ../hw/display/qxl.c:2142 +#2  0x0000563896e7fda1 in qxl_realize_primary (dev=0x5638996e0e70, +errp=0x7ffd3c2b81d0) at ../hw/display/qxl.c:2257 +#3  0x0000563896c7e8f2 in pci_qdev_realize (qdev=0x5638996e0e70, +errp=0x7ffd3c2b8250) at ../hw/pci/pci.c:2174 +#4  0x00005638970eb54b in device_set_realized (obj=0x5638996e0e70, +value=true, errp=0x7ffd3c2b84e0) at ../hw/core/qdev.c:494 +#5  0x00005638970f5e14 in property_set_bool (obj=0x5638996e0e70, +v=0x5638996f3770, name=0x56389759b141 "realized", +opaque=0x5638987893d0, errp=0x7ffd3c2b84e0) +     at ../qom/object.c:2374 +#6  0x00005638970f39f8 in object_property_set (obj=0x5638996e0e70, +name=0x56389759b141 "realized", v=0x5638996f3770, errp=0x7ffd3c2b84e0) +     at ../qom/object.c:1449 +#7  0x00005638970f8586 in object_property_set_qobject +(obj=0x5638996e0e70, name=0x56389759b141 "realized", +value=0x5638996df900, errp=0x7ffd3c2b84e0) +     at ../qom/qom-qobject.c:28 +#8  0x00005638970f3d8d in object_property_set_bool +(obj=0x5638996e0e70, name=0x56389759b141 "realized", value=true, +errp=0x7ffd3c2b84e0) +     at ../qom/object.c:1519 +#9  0x00005638970eacb0 in qdev_realize (dev=0x5638996e0e70, +bus=0x563898cf3c20, errp=0x7ffd3c2b84e0) at ../hw/core/qdev.c:276 +#10 0x0000563896dba675 in qdev_device_add_from_qdict +(opts=0x5638996dfe50, from_json=false, errp=0x7ffd3c2b84e0) at +../system/qdev-monitor.c:714 +#11 0x0000563896dba721 in qdev_device_add (opts=0x563898786150, +errp=0x56389855dc40 ) at ../system/qdev-monitor.c:733 +#12 0x0000563896dc48f1 in device_init_func (opaque=0x0, +opts=0x563898786150, errp=0x56389855dc40 ) at +../system/vl.c:1207 +#13 0x000056389737a6cc in qemu_opts_foreach +     (list=0x563898427b60 , func=0x563896dc48ca +, opaque=0x0, errp=0x56389855dc40 ) +     at ../util/qemu-option.c:1135 +#14 0x0000563896dc89b5 in qemu_create_cli_devices () at +../system/vl.c:2745 +#15 0x0000563896dc8c00 in qmp_x_exit_preconfig (errp=0x56389855dc40 +) at ../system/vl.c:2806 +#16 0x0000563896dcb5de in qemu_init (argc=33, argv=0x7ffd3c2b8948) +at ../system/vl.c:3838 +#17 0x0000563897297323 in main (argc=33, argv=0x7ffd3c2b8948) at +../system/main.c:72 +So the attached adjusted version of your patch does seem to help.  At +least I can't reproduce the crash on my stand. +Thanks for the stack trace; the calls to SPICE_RING_INIT in +init_qxl_ram are +definitely harmful.  Try V2 of the patch, attached, which skips the lines +of init_qxl_ram that modify guest memory. +I'm wondering, could it be useful to explicitly mark all the reused +memory regions readonly upon cpr-transfer, and then make them writable +back again after the migration is done?  That way we will be segfaulting +early on instead of debugging tricky memory corruptions. +It's a useful debugging technique, but changing protection on a large +memory region +can be too expensive for production due to TLB shootdowns. +Good point. Though we could move this code under non-default option to +avoid re-writing. + +Den + +On 3/5/25 11:19 PM, Steven Sistare wrote: +> +On 3/5/2025 11:50 AM, Andrey Drobyshev wrote: +> +> On 3/4/25 9:05 PM, Steven Sistare wrote: +> +>> On 2/28/2025 1:37 PM, Andrey Drobyshev wrote: +> +>>> On 2/28/25 8:35 PM, Andrey Drobyshev wrote: +> +>>>> On 2/28/25 8:20 PM, Steven Sistare wrote: +> +>>>>> On 2/28/2025 1:13 PM, Steven Sistare wrote: +> +>>>>>> On 2/28/2025 12:39 PM, Andrey Drobyshev wrote: +> +>>>>>>> Hi all, +> +>>>>>>> +> +>>>>>>> We've been experimenting with cpr-transfer migration mode recently +> +>>>>>>> and +> +>>>>>>> have discovered the following issue with the guest QXL driver: +> +>>>>>>> +> +>>>>>>> Run migration source: +> +>>>>>>>> EMULATOR=/path/to/emulator +> +>>>>>>>> ROOTFS=/path/to/image +> +>>>>>>>> QMPSOCK=/var/run/alma8qmp-src.sock +> +>>>>>>>> +> +>>>>>>>> $EMULATOR -enable-kvm \ +> +>>>>>>>>        -machine q35 \ +> +>>>>>>>>        -cpu host -smp 2 -m 2G \ +> +>>>>>>>>        -object memory-backend-file,id=ram0,size=2G,mem-path=/ +> +>>>>>>>> dev/shm/ +> +>>>>>>>> ram0,share=on\ +> +>>>>>>>>        -machine memory-backend=ram0 \ +> +>>>>>>>>        -machine aux-ram-share=on \ +> +>>>>>>>>        -drive file=$ROOTFS,media=disk,if=virtio \ +> +>>>>>>>>        -qmp unix:$QMPSOCK,server=on,wait=off \ +> +>>>>>>>>        -nographic \ +> +>>>>>>>>        -device qxl-vga +> +>>>>>>> +> +>>>>>>> Run migration target: +> +>>>>>>>> EMULATOR=/path/to/emulator +> +>>>>>>>> ROOTFS=/path/to/image +> +>>>>>>>> QMPSOCK=/var/run/alma8qmp-dst.sock +> +>>>>>>>> $EMULATOR -enable-kvm \ +> +>>>>>>>>        -machine q35 \ +> +>>>>>>>>        -cpu host -smp 2 -m 2G \ +> +>>>>>>>>        -object memory-backend-file,id=ram0,size=2G,mem-path=/ +> +>>>>>>>> dev/shm/ +> +>>>>>>>> ram0,share=on\ +> +>>>>>>>>        -machine memory-backend=ram0 \ +> +>>>>>>>>        -machine aux-ram-share=on \ +> +>>>>>>>>        -drive file=$ROOTFS,media=disk,if=virtio \ +> +>>>>>>>>        -qmp unix:$QMPSOCK,server=on,wait=off \ +> +>>>>>>>>        -nographic \ +> +>>>>>>>>        -device qxl-vga \ +> +>>>>>>>>        -incoming tcp:0:44444 \ +> +>>>>>>>>        -incoming '{"channel-type": "cpr", "addr": { "transport": +> +>>>>>>>> "socket", "type": "unix", "path": "/var/run/alma8cpr-dst.sock"}}' +> +>>>>>>> +> +>>>>>>> +> +>>>>>>> Launch the migration: +> +>>>>>>>> QMPSHELL=/root/src/qemu/master/scripts/qmp/qmp-shell +> +>>>>>>>> QMPSOCK=/var/run/alma8qmp-src.sock +> +>>>>>>>> +> +>>>>>>>> $QMPSHELL -p $QMPSOCK < +>>>>>>>>        migrate-set-parameters mode=cpr-transfer +> +>>>>>>>>        migrate channels=[{"channel-type":"main","addr": +> +>>>>>>>> {"transport":"socket","type":"inet","host":"0","port":"44444"}}, +> +>>>>>>>> {"channel-type":"cpr","addr": +> +>>>>>>>> {"transport":"socket","type":"unix","path":"/var/run/alma8cpr- +> +>>>>>>>> dst.sock"}}] +> +>>>>>>>> EOF +> +>>>>>>> +> +>>>>>>> Then, after a while, QXL guest driver on target crashes spewing the +> +>>>>>>> following messages: +> +>>>>>>>> [   73.962002] [TTM] Buffer eviction failed +> +>>>>>>>> [   73.962072] qxl 0000:00:02.0: object_init failed for (3149824, +> +>>>>>>>> 0x00000001) +> +>>>>>>>> [   73.962081] [drm:qxl_alloc_bo_reserved [qxl]] *ERROR* failed to +> +>>>>>>>> allocate VRAM BO +> +>>>>>>> +> +>>>>>>> That seems to be a known kernel QXL driver bug: +> +>>>>>>> +> +>>>>>>> +https://lore.kernel.org/all/20220907094423.93581-1- +> +>>>>>>> min_halo@163.com/T/ +> +>>>>>>> +https://lore.kernel.org/lkml/ZTgydqRlK6WX_b29@eldamar.lan/ +> +>>>>>>> +> +>>>>>>> (the latter discussion contains that reproduce script which +> +>>>>>>> speeds up +> +>>>>>>> the crash in the guest): +> +>>>>>>>> #!/bin/bash +> +>>>>>>>> +> +>>>>>>>> chvt 3 +> +>>>>>>>> +> +>>>>>>>> for j in $(seq 80); do +> +>>>>>>>>            echo "$(date) starting round $j" +> +>>>>>>>>            if [ "$(journalctl --boot | grep "failed to allocate +> +>>>>>>>> VRAM +> +>>>>>>>> BO")" != "" ]; then +> +>>>>>>>>                    echo "bug was reproduced after $j tries" +> +>>>>>>>>                    exit 1 +> +>>>>>>>>            fi +> +>>>>>>>>            for i in $(seq 100); do +> +>>>>>>>>                    dmesg > /dev/tty3 +> +>>>>>>>>            done +> +>>>>>>>> done +> +>>>>>>>> +> +>>>>>>>> echo "bug could not be reproduced" +> +>>>>>>>> exit 0 +> +>>>>>>> +> +>>>>>>> The bug itself seems to remain unfixed, as I was able to reproduce +> +>>>>>>> that +> +>>>>>>> with Fedora 41 guest, as well as AlmaLinux 8 guest. However our +> +>>>>>>> cpr-transfer code also seems to be buggy as it triggers the crash - +> +>>>>>>> without the cpr-transfer migration the above reproduce doesn't +> +>>>>>>> lead to +> +>>>>>>> crash on the source VM. +> +>>>>>>> +> +>>>>>>> I suspect that, as cpr-transfer doesn't migrate the guest +> +>>>>>>> memory, but +> +>>>>>>> rather passes it through the memory backend object, our code might +> +>>>>>>> somehow corrupt the VRAM.  However, I wasn't able to trace the +> +>>>>>>> corruption so far. +> +>>>>>>> +> +>>>>>>> Could somebody help the investigation and take a look into +> +>>>>>>> this?  Any +> +>>>>>>> suggestions would be appreciated.  Thanks! +> +>>>>>> +> +>>>>>> Possibly some memory region created by qxl is not being preserved. +> +>>>>>> Try adding these traces to see what is preserved: +> +>>>>>> +> +>>>>>> -trace enable='*cpr*' +> +>>>>>> -trace enable='*ram_alloc*' +> +>>>>> +> +>>>>> Also try adding this patch to see if it flags any ram blocks as not +> +>>>>> compatible with cpr.  A message is printed at migration start time. +> +>>>>>    +https://lore.kernel.org/qemu-devel/1740667681-257312-1-git-send- +> +>>>>> email- +> +>>>>> steven.sistare@oracle.com/ +> +>>>>> +> +>>>>> - Steve +> +>>>>> +> +>>>> +> +>>>> With the traces enabled + the "migration: ram block cpr blockers" +> +>>>> patch +> +>>>> applied: +> +>>>> +> +>>>> Source: +> +>>>>> cpr_find_fd pc.bios, id 0 returns -1 +> +>>>>> cpr_save_fd pc.bios, id 0, fd 22 +> +>>>>> qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 22 host +> +>>>>> 0x7fec18e00000 +> +>>>>> cpr_find_fd pc.rom, id 0 returns -1 +> +>>>>> cpr_save_fd pc.rom, id 0, fd 23 +> +>>>>> qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 23 host +> +>>>>> 0x7fec18c00000 +> +>>>>> cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns -1 +> +>>>>> cpr_save_fd 0000:00:01.0/e1000e.rom, id 0, fd 24 +> +>>>>> qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size +> +>>>>> 262144 fd 24 host 0x7fec18a00000 +> +>>>>> cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns -1 +> +>>>>> cpr_save_fd 0000:00:02.0/vga.vram, id 0, fd 25 +> +>>>>> qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size +> +>>>>> 67108864 fd 25 host 0x7feb77e00000 +> +>>>>> cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns -1 +> +>>>>> cpr_save_fd 0000:00:02.0/qxl.vrom, id 0, fd 27 +> +>>>>> qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 +> +>>>>> fd 27 host 0x7fec18800000 +> +>>>>> cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns -1 +> +>>>>> cpr_save_fd 0000:00:02.0/qxl.vram, id 0, fd 28 +> +>>>>> qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size +> +>>>>> 67108864 fd 28 host 0x7feb73c00000 +> +>>>>> cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns -1 +> +>>>>> cpr_save_fd 0000:00:02.0/qxl.rom, id 0, fd 34 +> +>>>>> qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 +> +>>>>> fd 34 host 0x7fec18600000 +> +>>>>> cpr_find_fd /rom@etc/acpi/tables, id 0 returns -1 +> +>>>>> cpr_save_fd /rom@etc/acpi/tables, id 0, fd 35 +> +>>>>> qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size +> +>>>>> 2097152 fd 35 host 0x7fec18200000 +> +>>>>> cpr_find_fd /rom@etc/table-loader, id 0 returns -1 +> +>>>>> cpr_save_fd /rom@etc/table-loader, id 0, fd 36 +> +>>>>> qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 +> +>>>>> fd 36 host 0x7feb8b600000 +> +>>>>> cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns -1 +> +>>>>> cpr_save_fd /rom@etc/acpi/rsdp, id 0, fd 37 +> +>>>>> qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd +> +>>>>> 37 host 0x7feb8b400000 +> +>>>>> +> +>>>>> cpr_state_save cpr-transfer mode +> +>>>>> cpr_transfer_output /var/run/alma8cpr-dst.sock +> +>>>> +> +>>>> Target: +> +>>>>> cpr_transfer_input /var/run/alma8cpr-dst.sock +> +>>>>> cpr_state_load cpr-transfer mode +> +>>>>> cpr_find_fd pc.bios, id 0 returns 20 +> +>>>>> qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 20 host +> +>>>>> 0x7fcdc9800000 +> +>>>>> cpr_find_fd pc.rom, id 0 returns 19 +> +>>>>> qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 19 host +> +>>>>> 0x7fcdc9600000 +> +>>>>> cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns 18 +> +>>>>> qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size +> +>>>>> 262144 fd 18 host 0x7fcdc9400000 +> +>>>>> cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns 17 +> +>>>>> qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size +> +>>>>> 67108864 fd 17 host 0x7fcd27e00000 +> +>>>>> cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns 16 +> +>>>>> qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 +> +>>>>> fd 16 host 0x7fcdc9200000 +> +>>>>> cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns 15 +> +>>>>> qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size +> +>>>>> 67108864 fd 15 host 0x7fcd23c00000 +> +>>>>> cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns 14 +> +>>>>> qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 +> +>>>>> fd 14 host 0x7fcdc8800000 +> +>>>>> cpr_find_fd /rom@etc/acpi/tables, id 0 returns 13 +> +>>>>> qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size +> +>>>>> 2097152 fd 13 host 0x7fcdc8400000 +> +>>>>> cpr_find_fd /rom@etc/table-loader, id 0 returns 11 +> +>>>>> qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 +> +>>>>> fd 11 host 0x7fcdc8200000 +> +>>>>> cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns 10 +> +>>>>> qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd +> +>>>>> 10 host 0x7fcd3be00000 +> +>>>> +> +>>>> Looks like both vga.vram and qxl.vram are being preserved (with the +> +>>>> same +> +>>>> addresses), and no incompatible ram blocks are found during migration. +> +>>> +> +>>> Sorry, addressed are not the same, of course.  However corresponding +> +>>> ram +> +>>> blocks do seem to be preserved and initialized. +> +>> +> +>> So far, I have not reproduced the guest driver failure. +> +>> +> +>> However, I have isolated places where new QEMU improperly writes to +> +>> the qxl memory regions prior to starting the guest, by mmap'ing them +> +>> readonly after cpr: +> +>> +> +>>    qemu_ram_alloc_internal() +> +>>      if (reused && (strstr(name, "qxl") || strstr("name", "vga"))) +> +>>          ram_flags |= RAM_READONLY; +> +>>      new_block = qemu_ram_alloc_from_fd(...) +> +>> +> +>> I have attached a draft fix; try it and let me know. +> +>> My console window looks fine before and after cpr, using +> +>> -vnc $hostip:0 -vga qxl +> +>> +> +>> - Steve +> +> +> +> Regarding the reproduce: when I launch the buggy version with the same +> +> options as you, i.e. "-vnc 0.0.0.0:$port -vga qxl", and do cpr-transfer, +> +> my VNC client silently hangs on the target after a while.  Could it +> +> happen on your stand as well? +> +> +cpr does not preserve the vnc connection and session.  To test, I specify +> +port 0 for the source VM and port 1 for the dest.  When the src vnc goes +> +dormant the dest vnc becomes active. +> +Sure, I meant that VNC on the dest (on the port 1) works for a while +after the migration and then hangs, apparently after the guest QXL crash. + +> +> Could you try launching VM with +> +> "-nographic -device qxl-vga"?  That way VM's serial console is given you +> +> directly in the shell, so when qxl driver crashes you're still able to +> +> inspect the kernel messages. +> +> +I have been running like that, but have not reproduced the qxl driver +> +crash, +> +and I suspect my guest image+kernel is too old. +Yes, that's probably the case. But the crash occurs on my Fedora 41 +guest with the 6.11.5-300.fc41.x86_64 kernel, so newer kernels seem to +be buggy. + + +> +However, once I realized the +> +issue was post-cpr modification of qxl memory, I switched my attention +> +to the +> +fix. +> +> +> As for your patch, I can report that it doesn't resolve the issue as it +> +> is.  But I was able to track down another possible memory corruption +> +> using your approach with readonly mmap'ing: +> +> +> +>> Program terminated with signal SIGSEGV, Segmentation fault. +> +>> #0  init_qxl_ram (d=0x5638996e0e70) at ../hw/display/qxl.c:412 +> +>> 412         d->ram->magic       = cpu_to_le32(QXL_RAM_MAGIC); +> +>> [Current thread is 1 (Thread 0x7f1a4f83b480 (LWP 229798))] +> +>> (gdb) bt +> +>> #0  init_qxl_ram (d=0x5638996e0e70) at ../hw/display/qxl.c:412 +> +>> #1  0x0000563896e7f467 in qxl_realize_common (qxl=0x5638996e0e70, +> +>> errp=0x7ffd3c2b8170) at ../hw/display/qxl.c:2142 +> +>> #2  0x0000563896e7fda1 in qxl_realize_primary (dev=0x5638996e0e70, +> +>> errp=0x7ffd3c2b81d0) at ../hw/display/qxl.c:2257 +> +>> #3  0x0000563896c7e8f2 in pci_qdev_realize (qdev=0x5638996e0e70, +> +>> errp=0x7ffd3c2b8250) at ../hw/pci/pci.c:2174 +> +>> #4  0x00005638970eb54b in device_set_realized (obj=0x5638996e0e70, +> +>> value=true, errp=0x7ffd3c2b84e0) at ../hw/core/qdev.c:494 +> +>> #5  0x00005638970f5e14 in property_set_bool (obj=0x5638996e0e70, +> +>> v=0x5638996f3770, name=0x56389759b141 "realized", +> +>> opaque=0x5638987893d0, errp=0x7ffd3c2b84e0) +> +>>      at ../qom/object.c:2374 +> +>> #6  0x00005638970f39f8 in object_property_set (obj=0x5638996e0e70, +> +>> name=0x56389759b141 "realized", v=0x5638996f3770, errp=0x7ffd3c2b84e0) +> +>>      at ../qom/object.c:1449 +> +>> #7  0x00005638970f8586 in object_property_set_qobject +> +>> (obj=0x5638996e0e70, name=0x56389759b141 "realized", +> +>> value=0x5638996df900, errp=0x7ffd3c2b84e0) +> +>>      at ../qom/qom-qobject.c:28 +> +>> #8  0x00005638970f3d8d in object_property_set_bool +> +>> (obj=0x5638996e0e70, name=0x56389759b141 "realized", value=true, +> +>> errp=0x7ffd3c2b84e0) +> +>>      at ../qom/object.c:1519 +> +>> #9  0x00005638970eacb0 in qdev_realize (dev=0x5638996e0e70, +> +>> bus=0x563898cf3c20, errp=0x7ffd3c2b84e0) at ../hw/core/qdev.c:276 +> +>> #10 0x0000563896dba675 in qdev_device_add_from_qdict +> +>> (opts=0x5638996dfe50, from_json=false, errp=0x7ffd3c2b84e0) at ../ +> +>> system/qdev-monitor.c:714 +> +>> #11 0x0000563896dba721 in qdev_device_add (opts=0x563898786150, +> +>> errp=0x56389855dc40 ) at ../system/qdev-monitor.c:733 +> +>> #12 0x0000563896dc48f1 in device_init_func (opaque=0x0, +> +>> opts=0x563898786150, errp=0x56389855dc40 ) at ../system/ +> +>> vl.c:1207 +> +>> #13 0x000056389737a6cc in qemu_opts_foreach +> +>>      (list=0x563898427b60 , func=0x563896dc48ca +> +>> , opaque=0x0, errp=0x56389855dc40 ) +> +>>      at ../util/qemu-option.c:1135 +> +>> #14 0x0000563896dc89b5 in qemu_create_cli_devices () at ../system/ +> +>> vl.c:2745 +> +>> #15 0x0000563896dc8c00 in qmp_x_exit_preconfig (errp=0x56389855dc40 +> +>> ) at ../system/vl.c:2806 +> +>> #16 0x0000563896dcb5de in qemu_init (argc=33, argv=0x7ffd3c2b8948) +> +>> at ../system/vl.c:3838 +> +>> #17 0x0000563897297323 in main (argc=33, argv=0x7ffd3c2b8948) at ../ +> +>> system/main.c:72 +> +> +> +> So the attached adjusted version of your patch does seem to help.  At +> +> least I can't reproduce the crash on my stand. +> +> +Thanks for the stack trace; the calls to SPICE_RING_INIT in init_qxl_ram +> +are +> +definitely harmful.  Try V2 of the patch, attached, which skips the lines +> +of init_qxl_ram that modify guest memory. +> +Thanks, your v2 patch does seem to prevent the crash. Would you re-send +it to the list as a proper fix? + +> +> I'm wondering, could it be useful to explicitly mark all the reused +> +> memory regions readonly upon cpr-transfer, and then make them writable +> +> back again after the migration is done?  That way we will be segfaulting +> +> early on instead of debugging tricky memory corruptions. +> +> +It's a useful debugging technique, but changing protection on a large +> +memory region +> +can be too expensive for production due to TLB shootdowns. +> +> +Also, there are cases where writes are performed but the value is +> +guaranteed to +> +be the same: +> +  qxl_post_load() +> +    qxl_set_mode() +> +      d->rom->mode = cpu_to_le32(modenr); +> +The value is the same because mode and shadow_rom.mode were passed in +> +vmstate +> +from old qemu. +> +There're also cases where devices' ROM might be re-initialized. E.g. +this segfault occures upon further exploration of RO mapped RAM blocks: + +> +Program terminated with signal SIGSEGV, Segmentation fault. +> +#0 __memmove_avx_unaligned_erms () at +> +../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:664 +> +664 rep movsb +> +[Current thread is 1 (Thread 0x7f6e7d08b480 (LWP 310379))] +> +(gdb) bt +> +#0 __memmove_avx_unaligned_erms () at +> +../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:664 +> +#1 0x000055aa1d030ecd in rom_set_mr (rom=0x55aa200ba380, +> +owner=0x55aa2019ac10, name=0x7fffb8272bc0 "/rom@etc/acpi/tables", ro=true) +> +at ../hw/core/loader.c:1032 +> +#2 0x000055aa1d031577 in rom_add_blob +> +(name=0x55aa1da51f13 "etc/acpi/tables", blob=0x55aa208a1070, len=131072, +> +max_len=2097152, addr=18446744073709551615, fw_file_name=0x55aa1da51f13 +> +"etc/acpi/tables", fw_callback=0x55aa1d441f59 , +> +callback_opaque=0x55aa20ff0010, as=0x0, read_only=true) at +> +../hw/core/loader.c:1147 +> +#3 0x000055aa1cfd788d in acpi_add_rom_blob +> +(update=0x55aa1d441f59 , opaque=0x55aa20ff0010, +> +blob=0x55aa1fc9aa00, name=0x55aa1da51f13 "etc/acpi/tables") at +> +../hw/acpi/utils.c:46 +> +#4 0x000055aa1d44213f in acpi_setup () at ../hw/i386/acpi-build.c:2720 +> +#5 0x000055aa1d434199 in pc_machine_done (notifier=0x55aa1ff15050, data=0x0) +> +at ../hw/i386/pc.c:638 +> +#6 0x000055aa1d876845 in notifier_list_notify (list=0x55aa1ea25c10 +> +, data=0x0) at ../util/notify.c:39 +> +#7 0x000055aa1d039ee5 in qdev_machine_creation_done () at +> +../hw/core/machine.c:1749 +> +#8 0x000055aa1d2c7b3e in qemu_machine_creation_done (errp=0x55aa1ea5cc40 +> +) at ../system/vl.c:2779 +> +#9 0x000055aa1d2c7c7d in qmp_x_exit_preconfig (errp=0x55aa1ea5cc40 +> +) at ../system/vl.c:2807 +> +#10 0x000055aa1d2ca64f in qemu_init (argc=35, argv=0x7fffb82730e8) at +> +../system/vl.c:3838 +> +#11 0x000055aa1d79638c in main (argc=35, argv=0x7fffb82730e8) at +> +../system/main.c:72 +I'm not sure whether ACPI tables ROM in particular is rewritten with the +same content, but there might be cases where ROM can be read from file +system upon initialization. That is undesirable as guest kernel +certainly won't be too happy about sudden change of the device's ROM +content. + +So the issue we're dealing with here is any unwanted memory related +device initialization upon cpr. + +For now the only thing that comes to my mind is to make a test where we +put as many devices as we can into a VM, make ram blocks RO upon cpr +(and remap them as RW later after migration is done, if needed), and +catch any unwanted memory violations. As Den suggested, we might +consider adding that behaviour as a separate non-default option (or +"migrate" command flag specific to cpr-transfer), which would only be +used in the testing. + +Andrey + +On 3/6/25 16:16, Andrey Drobyshev wrote: +On 3/5/25 11:19 PM, Steven Sistare wrote: +On 3/5/2025 11:50 AM, Andrey Drobyshev wrote: +On 3/4/25 9:05 PM, Steven Sistare wrote: +On 2/28/2025 1:37 PM, Andrey Drobyshev wrote: +On 2/28/25 8:35 PM, Andrey Drobyshev wrote: +On 2/28/25 8:20 PM, Steven Sistare wrote: +On 2/28/2025 1:13 PM, Steven Sistare wrote: +On 2/28/2025 12:39 PM, Andrey Drobyshev wrote: +Hi all, + +We've been experimenting with cpr-transfer migration mode recently +and +have discovered the following issue with the guest QXL driver: + +Run migration source: +EMULATOR=/path/to/emulator +ROOTFS=/path/to/image +QMPSOCK=/var/run/alma8qmp-src.sock + +$EMULATOR -enable-kvm \ +        -machine q35 \ +        -cpu host -smp 2 -m 2G \ +        -object memory-backend-file,id=ram0,size=2G,mem-path=/ +dev/shm/ +ram0,share=on\ +        -machine memory-backend=ram0 \ +        -machine aux-ram-share=on \ +        -drive file=$ROOTFS,media=disk,if=virtio \ +        -qmp unix:$QMPSOCK,server=on,wait=off \ +        -nographic \ +        -device qxl-vga +Run migration target: +EMULATOR=/path/to/emulator +ROOTFS=/path/to/image +QMPSOCK=/var/run/alma8qmp-dst.sock +$EMULATOR -enable-kvm \ +        -machine q35 \ +        -cpu host -smp 2 -m 2G \ +        -object memory-backend-file,id=ram0,size=2G,mem-path=/ +dev/shm/ +ram0,share=on\ +        -machine memory-backend=ram0 \ +        -machine aux-ram-share=on \ +        -drive file=$ROOTFS,media=disk,if=virtio \ +        -qmp unix:$QMPSOCK,server=on,wait=off \ +        -nographic \ +        -device qxl-vga \ +        -incoming tcp:0:44444 \ +        -incoming '{"channel-type": "cpr", "addr": { "transport": +"socket", "type": "unix", "path": "/var/run/alma8cpr-dst.sock"}}' +Launch the migration: +QMPSHELL=/root/src/qemu/master/scripts/qmp/qmp-shell +QMPSOCK=/var/run/alma8qmp-src.sock + +$QMPSHELL -p $QMPSOCK < /dev/tty3 +            done +done + +echo "bug could not be reproduced" +exit 0 +The bug itself seems to remain unfixed, as I was able to reproduce +that +with Fedora 41 guest, as well as AlmaLinux 8 guest. However our +cpr-transfer code also seems to be buggy as it triggers the crash - +without the cpr-transfer migration the above reproduce doesn't +lead to +crash on the source VM. + +I suspect that, as cpr-transfer doesn't migrate the guest +memory, but +rather passes it through the memory backend object, our code might +somehow corrupt the VRAM.  However, I wasn't able to trace the +corruption so far. + +Could somebody help the investigation and take a look into +this?  Any +suggestions would be appreciated.  Thanks! +Possibly some memory region created by qxl is not being preserved. +Try adding these traces to see what is preserved: + +-trace enable='*cpr*' +-trace enable='*ram_alloc*' +Also try adding this patch to see if it flags any ram blocks as not +compatible with cpr.  A message is printed at migration start time. +    +https://lore.kernel.org/qemu-devel/1740667681-257312-1-git-send- +email- +steven.sistare@oracle.com/ + +- Steve +With the traces enabled + the "migration: ram block cpr blockers" +patch +applied: + +Source: +cpr_find_fd pc.bios, id 0 returns -1 +cpr_save_fd pc.bios, id 0, fd 22 +qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 22 host +0x7fec18e00000 +cpr_find_fd pc.rom, id 0 returns -1 +cpr_save_fd pc.rom, id 0, fd 23 +qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 23 host +0x7fec18c00000 +cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns -1 +cpr_save_fd 0000:00:01.0/e1000e.rom, id 0, fd 24 +qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size +262144 fd 24 host 0x7fec18a00000 +cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns -1 +cpr_save_fd 0000:00:02.0/vga.vram, id 0, fd 25 +qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size +67108864 fd 25 host 0x7feb77e00000 +cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.vrom, id 0, fd 27 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 +fd 27 host 0x7fec18800000 +cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.vram, id 0, fd 28 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size +67108864 fd 28 host 0x7feb73c00000 +cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.rom, id 0, fd 34 +qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 +fd 34 host 0x7fec18600000 +cpr_find_fd /rom@etc/acpi/tables, id 0 returns -1 +cpr_save_fd /rom@etc/acpi/tables, id 0, fd 35 +qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size +2097152 fd 35 host 0x7fec18200000 +cpr_find_fd /rom@etc/table-loader, id 0 returns -1 +cpr_save_fd /rom@etc/table-loader, id 0, fd 36 +qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 +fd 36 host 0x7feb8b600000 +cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns -1 +cpr_save_fd /rom@etc/acpi/rsdp, id 0, fd 37 +qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd +37 host 0x7feb8b400000 + +cpr_state_save cpr-transfer mode +cpr_transfer_output /var/run/alma8cpr-dst.sock +Target: +cpr_transfer_input /var/run/alma8cpr-dst.sock +cpr_state_load cpr-transfer mode +cpr_find_fd pc.bios, id 0 returns 20 +qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 20 host +0x7fcdc9800000 +cpr_find_fd pc.rom, id 0 returns 19 +qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 19 host +0x7fcdc9600000 +cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns 18 +qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size +262144 fd 18 host 0x7fcdc9400000 +cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns 17 +qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size +67108864 fd 17 host 0x7fcd27e00000 +cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns 16 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 +fd 16 host 0x7fcdc9200000 +cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns 15 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size +67108864 fd 15 host 0x7fcd23c00000 +cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns 14 +qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 +fd 14 host 0x7fcdc8800000 +cpr_find_fd /rom@etc/acpi/tables, id 0 returns 13 +qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size +2097152 fd 13 host 0x7fcdc8400000 +cpr_find_fd /rom@etc/table-loader, id 0 returns 11 +qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 +fd 11 host 0x7fcdc8200000 +cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns 10 +qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd +10 host 0x7fcd3be00000 +Looks like both vga.vram and qxl.vram are being preserved (with the +same +addresses), and no incompatible ram blocks are found during migration. +Sorry, addressed are not the same, of course.  However corresponding +ram +blocks do seem to be preserved and initialized. +So far, I have not reproduced the guest driver failure. + +However, I have isolated places where new QEMU improperly writes to +the qxl memory regions prior to starting the guest, by mmap'ing them +readonly after cpr: + +    qemu_ram_alloc_internal() +      if (reused && (strstr(name, "qxl") || strstr("name", "vga"))) +          ram_flags |= RAM_READONLY; +      new_block = qemu_ram_alloc_from_fd(...) + +I have attached a draft fix; try it and let me know. +My console window looks fine before and after cpr, using +-vnc $hostip:0 -vga qxl + +- Steve +Regarding the reproduce: when I launch the buggy version with the same +options as you, i.e. "-vnc 0.0.0.0:$port -vga qxl", and do cpr-transfer, +my VNC client silently hangs on the target after a while.  Could it +happen on your stand as well? +cpr does not preserve the vnc connection and session.  To test, I specify +port 0 for the source VM and port 1 for the dest.  When the src vnc goes +dormant the dest vnc becomes active. +Sure, I meant that VNC on the dest (on the port 1) works for a while +after the migration and then hangs, apparently after the guest QXL crash. +Could you try launching VM with +"-nographic -device qxl-vga"?  That way VM's serial console is given you +directly in the shell, so when qxl driver crashes you're still able to +inspect the kernel messages. +I have been running like that, but have not reproduced the qxl driver +crash, +and I suspect my guest image+kernel is too old. +Yes, that's probably the case. But the crash occurs on my Fedora 41 +guest with the 6.11.5-300.fc41.x86_64 kernel, so newer kernels seem to +be buggy. +However, once I realized the +issue was post-cpr modification of qxl memory, I switched my attention +to the +fix. +As for your patch, I can report that it doesn't resolve the issue as it +is.  But I was able to track down another possible memory corruption +using your approach with readonly mmap'ing: +Program terminated with signal SIGSEGV, Segmentation fault. +#0  init_qxl_ram (d=0x5638996e0e70) at ../hw/display/qxl.c:412 +412         d->ram->magic       = cpu_to_le32(QXL_RAM_MAGIC); +[Current thread is 1 (Thread 0x7f1a4f83b480 (LWP 229798))] +(gdb) bt +#0  init_qxl_ram (d=0x5638996e0e70) at ../hw/display/qxl.c:412 +#1  0x0000563896e7f467 in qxl_realize_common (qxl=0x5638996e0e70, +errp=0x7ffd3c2b8170) at ../hw/display/qxl.c:2142 +#2  0x0000563896e7fda1 in qxl_realize_primary (dev=0x5638996e0e70, +errp=0x7ffd3c2b81d0) at ../hw/display/qxl.c:2257 +#3  0x0000563896c7e8f2 in pci_qdev_realize (qdev=0x5638996e0e70, +errp=0x7ffd3c2b8250) at ../hw/pci/pci.c:2174 +#4  0x00005638970eb54b in device_set_realized (obj=0x5638996e0e70, +value=true, errp=0x7ffd3c2b84e0) at ../hw/core/qdev.c:494 +#5  0x00005638970f5e14 in property_set_bool (obj=0x5638996e0e70, +v=0x5638996f3770, name=0x56389759b141 "realized", +opaque=0x5638987893d0, errp=0x7ffd3c2b84e0) +      at ../qom/object.c:2374 +#6  0x00005638970f39f8 in object_property_set (obj=0x5638996e0e70, +name=0x56389759b141 "realized", v=0x5638996f3770, errp=0x7ffd3c2b84e0) +      at ../qom/object.c:1449 +#7  0x00005638970f8586 in object_property_set_qobject +(obj=0x5638996e0e70, name=0x56389759b141 "realized", +value=0x5638996df900, errp=0x7ffd3c2b84e0) +      at ../qom/qom-qobject.c:28 +#8  0x00005638970f3d8d in object_property_set_bool +(obj=0x5638996e0e70, name=0x56389759b141 "realized", value=true, +errp=0x7ffd3c2b84e0) +      at ../qom/object.c:1519 +#9  0x00005638970eacb0 in qdev_realize (dev=0x5638996e0e70, +bus=0x563898cf3c20, errp=0x7ffd3c2b84e0) at ../hw/core/qdev.c:276 +#10 0x0000563896dba675 in qdev_device_add_from_qdict +(opts=0x5638996dfe50, from_json=false, errp=0x7ffd3c2b84e0) at ../ +system/qdev-monitor.c:714 +#11 0x0000563896dba721 in qdev_device_add (opts=0x563898786150, +errp=0x56389855dc40 ) at ../system/qdev-monitor.c:733 +#12 0x0000563896dc48f1 in device_init_func (opaque=0x0, +opts=0x563898786150, errp=0x56389855dc40 ) at ../system/ +vl.c:1207 +#13 0x000056389737a6cc in qemu_opts_foreach +      (list=0x563898427b60 , func=0x563896dc48ca +, opaque=0x0, errp=0x56389855dc40 ) +      at ../util/qemu-option.c:1135 +#14 0x0000563896dc89b5 in qemu_create_cli_devices () at ../system/ +vl.c:2745 +#15 0x0000563896dc8c00 in qmp_x_exit_preconfig (errp=0x56389855dc40 +) at ../system/vl.c:2806 +#16 0x0000563896dcb5de in qemu_init (argc=33, argv=0x7ffd3c2b8948) +at ../system/vl.c:3838 +#17 0x0000563897297323 in main (argc=33, argv=0x7ffd3c2b8948) at ../ +system/main.c:72 +So the attached adjusted version of your patch does seem to help.  At +least I can't reproduce the crash on my stand. +Thanks for the stack trace; the calls to SPICE_RING_INIT in init_qxl_ram +are +definitely harmful.  Try V2 of the patch, attached, which skips the lines +of init_qxl_ram that modify guest memory. +Thanks, your v2 patch does seem to prevent the crash. Would you re-send +it to the list as a proper fix? +I'm wondering, could it be useful to explicitly mark all the reused +memory regions readonly upon cpr-transfer, and then make them writable +back again after the migration is done?  That way we will be segfaulting +early on instead of debugging tricky memory corruptions. +It's a useful debugging technique, but changing protection on a large +memory region +can be too expensive for production due to TLB shootdowns. + +Also, there are cases where writes are performed but the value is +guaranteed to +be the same: +   qxl_post_load() +     qxl_set_mode() +       d->rom->mode = cpu_to_le32(modenr); +The value is the same because mode and shadow_rom.mode were passed in +vmstate +from old qemu. +There're also cases where devices' ROM might be re-initialized. E.g. +this segfault occures upon further exploration of RO mapped RAM blocks: +Program terminated with signal SIGSEGV, Segmentation fault. +#0 __memmove_avx_unaligned_erms () at +../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:664 +664 rep movsb +[Current thread is 1 (Thread 0x7f6e7d08b480 (LWP 310379))] +(gdb) bt +#0 __memmove_avx_unaligned_erms () at +../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:664 +#1 0x000055aa1d030ecd in rom_set_mr (rom=0x55aa200ba380, owner=0x55aa2019ac10, +name=0x7fffb8272bc0 "/rom@etc/acpi/tables", ro=true) + at ../hw/core/loader.c:1032 +#2 0x000055aa1d031577 in rom_add_blob + (name=0x55aa1da51f13 "etc/acpi/tables", blob=0x55aa208a1070, len=131072, max_len=2097152, +addr=18446744073709551615, fw_file_name=0x55aa1da51f13 "etc/acpi/tables", +fw_callback=0x55aa1d441f59 , callback_opaque=0x55aa20ff0010, as=0x0, +read_only=true) at ../hw/core/loader.c:1147 +#3 0x000055aa1cfd788d in acpi_add_rom_blob + (update=0x55aa1d441f59 , opaque=0x55aa20ff0010, +blob=0x55aa1fc9aa00, name=0x55aa1da51f13 "etc/acpi/tables") at ../hw/acpi/utils.c:46 +#4 0x000055aa1d44213f in acpi_setup () at ../hw/i386/acpi-build.c:2720 +#5 0x000055aa1d434199 in pc_machine_done (notifier=0x55aa1ff15050, data=0x0) +at ../hw/i386/pc.c:638 +#6 0x000055aa1d876845 in notifier_list_notify (list=0x55aa1ea25c10 +, data=0x0) at ../util/notify.c:39 +#7 0x000055aa1d039ee5 in qdev_machine_creation_done () at +../hw/core/machine.c:1749 +#8 0x000055aa1d2c7b3e in qemu_machine_creation_done (errp=0x55aa1ea5cc40 +) at ../system/vl.c:2779 +#9 0x000055aa1d2c7c7d in qmp_x_exit_preconfig (errp=0x55aa1ea5cc40 +) at ../system/vl.c:2807 +#10 0x000055aa1d2ca64f in qemu_init (argc=35, argv=0x7fffb82730e8) at +../system/vl.c:3838 +#11 0x000055aa1d79638c in main (argc=35, argv=0x7fffb82730e8) at +../system/main.c:72 +I'm not sure whether ACPI tables ROM in particular is rewritten with the +same content, but there might be cases where ROM can be read from file +system upon initialization. That is undesirable as guest kernel +certainly won't be too happy about sudden change of the device's ROM +content. + +So the issue we're dealing with here is any unwanted memory related +device initialization upon cpr. + +For now the only thing that comes to my mind is to make a test where we +put as many devices as we can into a VM, make ram blocks RO upon cpr +(and remap them as RW later after migration is done, if needed), and +catch any unwanted memory violations. As Den suggested, we might +consider adding that behaviour as a separate non-default option (or +"migrate" command flag specific to cpr-transfer), which would only be +used in the testing. + +Andrey +No way. ACPI with the source must be used in the same way as BIOSes +and optional ROMs. + +Den + +On 3/6/2025 10:52 AM, Denis V. Lunev wrote: +On 3/6/25 16:16, Andrey Drobyshev wrote: +On 3/5/25 11:19 PM, Steven Sistare wrote: +On 3/5/2025 11:50 AM, Andrey Drobyshev wrote: +On 3/4/25 9:05 PM, Steven Sistare wrote: +On 2/28/2025 1:37 PM, Andrey Drobyshev wrote: +On 2/28/25 8:35 PM, Andrey Drobyshev wrote: +On 2/28/25 8:20 PM, Steven Sistare wrote: +On 2/28/2025 1:13 PM, Steven Sistare wrote: +On 2/28/2025 12:39 PM, Andrey Drobyshev wrote: +Hi all, + +We've been experimenting with cpr-transfer migration mode recently +and +have discovered the following issue with the guest QXL driver: + +Run migration source: +EMULATOR=/path/to/emulator +ROOTFS=/path/to/image +QMPSOCK=/var/run/alma8qmp-src.sock + +$EMULATOR -enable-kvm \ +        -machine q35 \ +        -cpu host -smp 2 -m 2G \ +        -object memory-backend-file,id=ram0,size=2G,mem-path=/ +dev/shm/ +ram0,share=on\ +        -machine memory-backend=ram0 \ +        -machine aux-ram-share=on \ +        -drive file=$ROOTFS,media=disk,if=virtio \ +        -qmp unix:$QMPSOCK,server=on,wait=off \ +        -nographic \ +        -device qxl-vga +Run migration target: +EMULATOR=/path/to/emulator +ROOTFS=/path/to/image +QMPSOCK=/var/run/alma8qmp-dst.sock +$EMULATOR -enable-kvm \ +        -machine q35 \ +        -cpu host -smp 2 -m 2G \ +        -object memory-backend-file,id=ram0,size=2G,mem-path=/ +dev/shm/ +ram0,share=on\ +        -machine memory-backend=ram0 \ +        -machine aux-ram-share=on \ +        -drive file=$ROOTFS,media=disk,if=virtio \ +        -qmp unix:$QMPSOCK,server=on,wait=off \ +        -nographic \ +        -device qxl-vga \ +        -incoming tcp:0:44444 \ +        -incoming '{"channel-type": "cpr", "addr": { "transport": +"socket", "type": "unix", "path": "/var/run/alma8cpr-dst.sock"}}' +Launch the migration: +QMPSHELL=/root/src/qemu/master/scripts/qmp/qmp-shell +QMPSOCK=/var/run/alma8qmp-src.sock + +$QMPSHELL -p $QMPSOCK < /dev/tty3 +            done +done + +echo "bug could not be reproduced" +exit 0 +The bug itself seems to remain unfixed, as I was able to reproduce +that +with Fedora 41 guest, as well as AlmaLinux 8 guest. However our +cpr-transfer code also seems to be buggy as it triggers the crash - +without the cpr-transfer migration the above reproduce doesn't +lead to +crash on the source VM. + +I suspect that, as cpr-transfer doesn't migrate the guest +memory, but +rather passes it through the memory backend object, our code might +somehow corrupt the VRAM.  However, I wasn't able to trace the +corruption so far. + +Could somebody help the investigation and take a look into +this?  Any +suggestions would be appreciated.  Thanks! +Possibly some memory region created by qxl is not being preserved. +Try adding these traces to see what is preserved: + +-trace enable='*cpr*' +-trace enable='*ram_alloc*' +Also try adding this patch to see if it flags any ram blocks as not +compatible with cpr.  A message is printed at migration start time. +    +https://lore.kernel.org/qemu-devel/1740667681-257312-1-git-send- +email- +steven.sistare@oracle.com/ + +- Steve +With the traces enabled + the "migration: ram block cpr blockers" +patch +applied: + +Source: +cpr_find_fd pc.bios, id 0 returns -1 +cpr_save_fd pc.bios, id 0, fd 22 +qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 22 host +0x7fec18e00000 +cpr_find_fd pc.rom, id 0 returns -1 +cpr_save_fd pc.rom, id 0, fd 23 +qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 23 host +0x7fec18c00000 +cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns -1 +cpr_save_fd 0000:00:01.0/e1000e.rom, id 0, fd 24 +qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size +262144 fd 24 host 0x7fec18a00000 +cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns -1 +cpr_save_fd 0000:00:02.0/vga.vram, id 0, fd 25 +qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size +67108864 fd 25 host 0x7feb77e00000 +cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.vrom, id 0, fd 27 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 +fd 27 host 0x7fec18800000 +cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.vram, id 0, fd 28 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size +67108864 fd 28 host 0x7feb73c00000 +cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.rom, id 0, fd 34 +qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 +fd 34 host 0x7fec18600000 +cpr_find_fd /rom@etc/acpi/tables, id 0 returns -1 +cpr_save_fd /rom@etc/acpi/tables, id 0, fd 35 +qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size +2097152 fd 35 host 0x7fec18200000 +cpr_find_fd /rom@etc/table-loader, id 0 returns -1 +cpr_save_fd /rom@etc/table-loader, id 0, fd 36 +qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 +fd 36 host 0x7feb8b600000 +cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns -1 +cpr_save_fd /rom@etc/acpi/rsdp, id 0, fd 37 +qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd +37 host 0x7feb8b400000 + +cpr_state_save cpr-transfer mode +cpr_transfer_output /var/run/alma8cpr-dst.sock +Target: +cpr_transfer_input /var/run/alma8cpr-dst.sock +cpr_state_load cpr-transfer mode +cpr_find_fd pc.bios, id 0 returns 20 +qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 20 host +0x7fcdc9800000 +cpr_find_fd pc.rom, id 0 returns 19 +qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 19 host +0x7fcdc9600000 +cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns 18 +qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size +262144 fd 18 host 0x7fcdc9400000 +cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns 17 +qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size +67108864 fd 17 host 0x7fcd27e00000 +cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns 16 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 +fd 16 host 0x7fcdc9200000 +cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns 15 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size +67108864 fd 15 host 0x7fcd23c00000 +cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns 14 +qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 +fd 14 host 0x7fcdc8800000 +cpr_find_fd /rom@etc/acpi/tables, id 0 returns 13 +qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size +2097152 fd 13 host 0x7fcdc8400000 +cpr_find_fd /rom@etc/table-loader, id 0 returns 11 +qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 +fd 11 host 0x7fcdc8200000 +cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns 10 +qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd +10 host 0x7fcd3be00000 +Looks like both vga.vram and qxl.vram are being preserved (with the +same +addresses), and no incompatible ram blocks are found during migration. +Sorry, addressed are not the same, of course.  However corresponding +ram +blocks do seem to be preserved and initialized. +So far, I have not reproduced the guest driver failure. + +However, I have isolated places where new QEMU improperly writes to +the qxl memory regions prior to starting the guest, by mmap'ing them +readonly after cpr: + +    qemu_ram_alloc_internal() +      if (reused && (strstr(name, "qxl") || strstr("name", "vga"))) +          ram_flags |= RAM_READONLY; +      new_block = qemu_ram_alloc_from_fd(...) + +I have attached a draft fix; try it and let me know. +My console window looks fine before and after cpr, using +-vnc $hostip:0 -vga qxl + +- Steve +Regarding the reproduce: when I launch the buggy version with the same +options as you, i.e. "-vnc 0.0.0.0:$port -vga qxl", and do cpr-transfer, +my VNC client silently hangs on the target after a while.  Could it +happen on your stand as well? +cpr does not preserve the vnc connection and session.  To test, I specify +port 0 for the source VM and port 1 for the dest.  When the src vnc goes +dormant the dest vnc becomes active. +Sure, I meant that VNC on the dest (on the port 1) works for a while +after the migration and then hangs, apparently after the guest QXL crash. +Could you try launching VM with +"-nographic -device qxl-vga"?  That way VM's serial console is given you +directly in the shell, so when qxl driver crashes you're still able to +inspect the kernel messages. +I have been running like that, but have not reproduced the qxl driver +crash, +and I suspect my guest image+kernel is too old. +Yes, that's probably the case.  But the crash occurs on my Fedora 41 +guest with the 6.11.5-300.fc41.x86_64 kernel, so newer kernels seem to +be buggy. +However, once I realized the +issue was post-cpr modification of qxl memory, I switched my attention +to the +fix. +As for your patch, I can report that it doesn't resolve the issue as it +is.  But I was able to track down another possible memory corruption +using your approach with readonly mmap'ing: +Program terminated with signal SIGSEGV, Segmentation fault. +#0  init_qxl_ram (d=0x5638996e0e70) at ../hw/display/qxl.c:412 +412         d->ram->magic       = cpu_to_le32(QXL_RAM_MAGIC); +[Current thread is 1 (Thread 0x7f1a4f83b480 (LWP 229798))] +(gdb) bt +#0  init_qxl_ram (d=0x5638996e0e70) at ../hw/display/qxl.c:412 +#1  0x0000563896e7f467 in qxl_realize_common (qxl=0x5638996e0e70, +errp=0x7ffd3c2b8170) at ../hw/display/qxl.c:2142 +#2  0x0000563896e7fda1 in qxl_realize_primary (dev=0x5638996e0e70, +errp=0x7ffd3c2b81d0) at ../hw/display/qxl.c:2257 +#3  0x0000563896c7e8f2 in pci_qdev_realize (qdev=0x5638996e0e70, +errp=0x7ffd3c2b8250) at ../hw/pci/pci.c:2174 +#4  0x00005638970eb54b in device_set_realized (obj=0x5638996e0e70, +value=true, errp=0x7ffd3c2b84e0) at ../hw/core/qdev.c:494 +#5  0x00005638970f5e14 in property_set_bool (obj=0x5638996e0e70, +v=0x5638996f3770, name=0x56389759b141 "realized", +opaque=0x5638987893d0, errp=0x7ffd3c2b84e0) +      at ../qom/object.c:2374 +#6  0x00005638970f39f8 in object_property_set (obj=0x5638996e0e70, +name=0x56389759b141 "realized", v=0x5638996f3770, errp=0x7ffd3c2b84e0) +      at ../qom/object.c:1449 +#7  0x00005638970f8586 in object_property_set_qobject +(obj=0x5638996e0e70, name=0x56389759b141 "realized", +value=0x5638996df900, errp=0x7ffd3c2b84e0) +      at ../qom/qom-qobject.c:28 +#8  0x00005638970f3d8d in object_property_set_bool +(obj=0x5638996e0e70, name=0x56389759b141 "realized", value=true, +errp=0x7ffd3c2b84e0) +      at ../qom/object.c:1519 +#9  0x00005638970eacb0 in qdev_realize (dev=0x5638996e0e70, +bus=0x563898cf3c20, errp=0x7ffd3c2b84e0) at ../hw/core/qdev.c:276 +#10 0x0000563896dba675 in qdev_device_add_from_qdict +(opts=0x5638996dfe50, from_json=false, errp=0x7ffd3c2b84e0) at ../ +system/qdev-monitor.c:714 +#11 0x0000563896dba721 in qdev_device_add (opts=0x563898786150, +errp=0x56389855dc40 ) at ../system/qdev-monitor.c:733 +#12 0x0000563896dc48f1 in device_init_func (opaque=0x0, +opts=0x563898786150, errp=0x56389855dc40 ) at ../system/ +vl.c:1207 +#13 0x000056389737a6cc in qemu_opts_foreach +      (list=0x563898427b60 , func=0x563896dc48ca +, opaque=0x0, errp=0x56389855dc40 ) +      at ../util/qemu-option.c:1135 +#14 0x0000563896dc89b5 in qemu_create_cli_devices () at ../system/ +vl.c:2745 +#15 0x0000563896dc8c00 in qmp_x_exit_preconfig (errp=0x56389855dc40 +) at ../system/vl.c:2806 +#16 0x0000563896dcb5de in qemu_init (argc=33, argv=0x7ffd3c2b8948) +at ../system/vl.c:3838 +#17 0x0000563897297323 in main (argc=33, argv=0x7ffd3c2b8948) at ../ +system/main.c:72 +So the attached adjusted version of your patch does seem to help.  At +least I can't reproduce the crash on my stand. +Thanks for the stack trace; the calls to SPICE_RING_INIT in init_qxl_ram +are +definitely harmful.  Try V2 of the patch, attached, which skips the lines +of init_qxl_ram that modify guest memory. +Thanks, your v2 patch does seem to prevent the crash.  Would you re-send +it to the list as a proper fix? +Yes. Was waiting for your confirmation. +I'm wondering, could it be useful to explicitly mark all the reused +memory regions readonly upon cpr-transfer, and then make them writable +back again after the migration is done?  That way we will be segfaulting +early on instead of debugging tricky memory corruptions. +It's a useful debugging technique, but changing protection on a large +memory region +can be too expensive for production due to TLB shootdowns. + +Also, there are cases where writes are performed but the value is +guaranteed to +be the same: +   qxl_post_load() +     qxl_set_mode() +       d->rom->mode = cpu_to_le32(modenr); +The value is the same because mode and shadow_rom.mode were passed in +vmstate +from old qemu. +There're also cases where devices' ROM might be re-initialized.  E.g. +this segfault occures upon further exploration of RO mapped RAM blocks: +Program terminated with signal SIGSEGV, Segmentation fault. +#0  __memmove_avx_unaligned_erms () at +../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:664 +664             rep     movsb +[Current thread is 1 (Thread 0x7f6e7d08b480 (LWP 310379))] +(gdb) bt +#0  __memmove_avx_unaligned_erms () at +../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:664 +#1  0x000055aa1d030ecd in rom_set_mr (rom=0x55aa200ba380, owner=0x55aa2019ac10, +name=0x7fffb8272bc0 "/rom@etc/acpi/tables", ro=true) +     at ../hw/core/loader.c:1032 +#2  0x000055aa1d031577 in rom_add_blob +     (name=0x55aa1da51f13 "etc/acpi/tables", blob=0x55aa208a1070, len=131072, max_len=2097152, +addr=18446744073709551615, fw_file_name=0x55aa1da51f13 "etc/acpi/tables", +fw_callback=0x55aa1d441f59 , callback_opaque=0x55aa20ff0010, as=0x0, +read_only=true) at ../hw/core/loader.c:1147 +#3  0x000055aa1cfd788d in acpi_add_rom_blob +     (update=0x55aa1d441f59 , opaque=0x55aa20ff0010, +blob=0x55aa1fc9aa00, name=0x55aa1da51f13 "etc/acpi/tables") at ../hw/acpi/utils.c:46 +#4  0x000055aa1d44213f in acpi_setup () at ../hw/i386/acpi-build.c:2720 +#5  0x000055aa1d434199 in pc_machine_done (notifier=0x55aa1ff15050, data=0x0) +at ../hw/i386/pc.c:638 +#6  0x000055aa1d876845 in notifier_list_notify (list=0x55aa1ea25c10 +, data=0x0) at ../util/notify.c:39 +#7  0x000055aa1d039ee5 in qdev_machine_creation_done () at +../hw/core/machine.c:1749 +#8  0x000055aa1d2c7b3e in qemu_machine_creation_done (errp=0x55aa1ea5cc40 +) at ../system/vl.c:2779 +#9  0x000055aa1d2c7c7d in qmp_x_exit_preconfig (errp=0x55aa1ea5cc40 +) at ../system/vl.c:2807 +#10 0x000055aa1d2ca64f in qemu_init (argc=35, argv=0x7fffb82730e8) at +../system/vl.c:3838 +#11 0x000055aa1d79638c in main (argc=35, argv=0x7fffb82730e8) at +../system/main.c:72 +I'm not sure whether ACPI tables ROM in particular is rewritten with the +same content, but there might be cases where ROM can be read from file +system upon initialization.  That is undesirable as guest kernel +certainly won't be too happy about sudden change of the device's ROM +content. + +So the issue we're dealing with here is any unwanted memory related +device initialization upon cpr. + +For now the only thing that comes to my mind is to make a test where we +put as many devices as we can into a VM, make ram blocks RO upon cpr +(and remap them as RW later after migration is done, if needed), and +catch any unwanted memory violations.  As Den suggested, we might +consider adding that behaviour as a separate non-default option (or +"migrate" command flag specific to cpr-transfer), which would only be +used in the testing. +I'll look into adding an option, but there may be too many false positives, +such as the qxl_set_mode case above. And the maintainers may object to me +eliminating the false positives by adding more CPR_IN tests, due to gratuitous +(from their POV) ugliness. + +But I will use the technique to look for more write violations. +Andrey +No way. ACPI with the source must be used in the same way as BIOSes +and optional ROMs. +Yup, its a bug. Will fix. + +- Steve + +see +1741380954-341079-1-git-send-email-steven.sistare@oracle.com +/">https://lore.kernel.org/qemu-devel/ +1741380954-341079-1-git-send-email-steven.sistare@oracle.com +/ +- Steve + +On 3/6/2025 11:13 AM, Steven Sistare wrote: +On 3/6/2025 10:52 AM, Denis V. Lunev wrote: +On 3/6/25 16:16, Andrey Drobyshev wrote: +On 3/5/25 11:19 PM, Steven Sistare wrote: +On 3/5/2025 11:50 AM, Andrey Drobyshev wrote: +On 3/4/25 9:05 PM, Steven Sistare wrote: +On 2/28/2025 1:37 PM, Andrey Drobyshev wrote: +On 2/28/25 8:35 PM, Andrey Drobyshev wrote: +On 2/28/25 8:20 PM, Steven Sistare wrote: +On 2/28/2025 1:13 PM, Steven Sistare wrote: +On 2/28/2025 12:39 PM, Andrey Drobyshev wrote: +Hi all, + +We've been experimenting with cpr-transfer migration mode recently +and +have discovered the following issue with the guest QXL driver: + +Run migration source: +EMULATOR=/path/to/emulator +ROOTFS=/path/to/image +QMPSOCK=/var/run/alma8qmp-src.sock + +$EMULATOR -enable-kvm \ +        -machine q35 \ +        -cpu host -smp 2 -m 2G \ +        -object memory-backend-file,id=ram0,size=2G,mem-path=/ +dev/shm/ +ram0,share=on\ +        -machine memory-backend=ram0 \ +        -machine aux-ram-share=on \ +        -drive file=$ROOTFS,media=disk,if=virtio \ +        -qmp unix:$QMPSOCK,server=on,wait=off \ +        -nographic \ +        -device qxl-vga +Run migration target: +EMULATOR=/path/to/emulator +ROOTFS=/path/to/image +QMPSOCK=/var/run/alma8qmp-dst.sock +$EMULATOR -enable-kvm \ +        -machine q35 \ +        -cpu host -smp 2 -m 2G \ +        -object memory-backend-file,id=ram0,size=2G,mem-path=/ +dev/shm/ +ram0,share=on\ +        -machine memory-backend=ram0 \ +        -machine aux-ram-share=on \ +        -drive file=$ROOTFS,media=disk,if=virtio \ +        -qmp unix:$QMPSOCK,server=on,wait=off \ +        -nographic \ +        -device qxl-vga \ +        -incoming tcp:0:44444 \ +        -incoming '{"channel-type": "cpr", "addr": { "transport": +"socket", "type": "unix", "path": "/var/run/alma8cpr-dst.sock"}}' +Launch the migration: +QMPSHELL=/root/src/qemu/master/scripts/qmp/qmp-shell +QMPSOCK=/var/run/alma8qmp-src.sock + +$QMPSHELL -p $QMPSOCK < /dev/tty3 +            done +done + +echo "bug could not be reproduced" +exit 0 +The bug itself seems to remain unfixed, as I was able to reproduce +that +with Fedora 41 guest, as well as AlmaLinux 8 guest. However our +cpr-transfer code also seems to be buggy as it triggers the crash - +without the cpr-transfer migration the above reproduce doesn't +lead to +crash on the source VM. + +I suspect that, as cpr-transfer doesn't migrate the guest +memory, but +rather passes it through the memory backend object, our code might +somehow corrupt the VRAM.  However, I wasn't able to trace the +corruption so far. + +Could somebody help the investigation and take a look into +this?  Any +suggestions would be appreciated.  Thanks! +Possibly some memory region created by qxl is not being preserved. +Try adding these traces to see what is preserved: + +-trace enable='*cpr*' +-trace enable='*ram_alloc*' +Also try adding this patch to see if it flags any ram blocks as not +compatible with cpr.  A message is printed at migration start time. +    +https://lore.kernel.org/qemu-devel/1740667681-257312-1-git-send- +email- +steven.sistare@oracle.com/ + +- Steve +With the traces enabled + the "migration: ram block cpr blockers" +patch +applied: + +Source: +cpr_find_fd pc.bios, id 0 returns -1 +cpr_save_fd pc.bios, id 0, fd 22 +qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 22 host +0x7fec18e00000 +cpr_find_fd pc.rom, id 0 returns -1 +cpr_save_fd pc.rom, id 0, fd 23 +qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 23 host +0x7fec18c00000 +cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns -1 +cpr_save_fd 0000:00:01.0/e1000e.rom, id 0, fd 24 +qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size +262144 fd 24 host 0x7fec18a00000 +cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns -1 +cpr_save_fd 0000:00:02.0/vga.vram, id 0, fd 25 +qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size +67108864 fd 25 host 0x7feb77e00000 +cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.vrom, id 0, fd 27 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 +fd 27 host 0x7fec18800000 +cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.vram, id 0, fd 28 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size +67108864 fd 28 host 0x7feb73c00000 +cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns -1 +cpr_save_fd 0000:00:02.0/qxl.rom, id 0, fd 34 +qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 +fd 34 host 0x7fec18600000 +cpr_find_fd /rom@etc/acpi/tables, id 0 returns -1 +cpr_save_fd /rom@etc/acpi/tables, id 0, fd 35 +qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size +2097152 fd 35 host 0x7fec18200000 +cpr_find_fd /rom@etc/table-loader, id 0 returns -1 +cpr_save_fd /rom@etc/table-loader, id 0, fd 36 +qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 +fd 36 host 0x7feb8b600000 +cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns -1 +cpr_save_fd /rom@etc/acpi/rsdp, id 0, fd 37 +qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd +37 host 0x7feb8b400000 + +cpr_state_save cpr-transfer mode +cpr_transfer_output /var/run/alma8cpr-dst.sock +Target: +cpr_transfer_input /var/run/alma8cpr-dst.sock +cpr_state_load cpr-transfer mode +cpr_find_fd pc.bios, id 0 returns 20 +qemu_ram_alloc_shared pc.bios size 262144 max_size 262144 fd 20 host +0x7fcdc9800000 +cpr_find_fd pc.rom, id 0 returns 19 +qemu_ram_alloc_shared pc.rom size 131072 max_size 131072 fd 19 host +0x7fcdc9600000 +cpr_find_fd 0000:00:01.0/e1000e.rom, id 0 returns 18 +qemu_ram_alloc_shared 0000:00:01.0/e1000e.rom size 262144 max_size +262144 fd 18 host 0x7fcdc9400000 +cpr_find_fd 0000:00:02.0/vga.vram, id 0 returns 17 +qemu_ram_alloc_shared 0000:00:02.0/vga.vram size 67108864 max_size +67108864 fd 17 host 0x7fcd27e00000 +cpr_find_fd 0000:00:02.0/qxl.vrom, id 0 returns 16 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vrom size 8192 max_size 8192 +fd 16 host 0x7fcdc9200000 +cpr_find_fd 0000:00:02.0/qxl.vram, id 0 returns 15 +qemu_ram_alloc_shared 0000:00:02.0/qxl.vram size 67108864 max_size +67108864 fd 15 host 0x7fcd23c00000 +cpr_find_fd 0000:00:02.0/qxl.rom, id 0 returns 14 +qemu_ram_alloc_shared 0000:00:02.0/qxl.rom size 65536 max_size 65536 +fd 14 host 0x7fcdc8800000 +cpr_find_fd /rom@etc/acpi/tables, id 0 returns 13 +qemu_ram_alloc_shared /rom@etc/acpi/tables size 131072 max_size +2097152 fd 13 host 0x7fcdc8400000 +cpr_find_fd /rom@etc/table-loader, id 0 returns 11 +qemu_ram_alloc_shared /rom@etc/table-loader size 4096 max_size 65536 +fd 11 host 0x7fcdc8200000 +cpr_find_fd /rom@etc/acpi/rsdp, id 0 returns 10 +qemu_ram_alloc_shared /rom@etc/acpi/rsdp size 4096 max_size 4096 fd +10 host 0x7fcd3be00000 +Looks like both vga.vram and qxl.vram are being preserved (with the +same +addresses), and no incompatible ram blocks are found during migration. +Sorry, addressed are not the same, of course.  However corresponding +ram +blocks do seem to be preserved and initialized. +So far, I have not reproduced the guest driver failure. + +However, I have isolated places where new QEMU improperly writes to +the qxl memory regions prior to starting the guest, by mmap'ing them +readonly after cpr: + +    qemu_ram_alloc_internal() +      if (reused && (strstr(name, "qxl") || strstr("name", "vga"))) +          ram_flags |= RAM_READONLY; +      new_block = qemu_ram_alloc_from_fd(...) + +I have attached a draft fix; try it and let me know. +My console window looks fine before and after cpr, using +-vnc $hostip:0 -vga qxl + +- Steve +Regarding the reproduce: when I launch the buggy version with the same +options as you, i.e. "-vnc 0.0.0.0:$port -vga qxl", and do cpr-transfer, +my VNC client silently hangs on the target after a while.  Could it +happen on your stand as well? +cpr does not preserve the vnc connection and session.  To test, I specify +port 0 for the source VM and port 1 for the dest.  When the src vnc goes +dormant the dest vnc becomes active. +Sure, I meant that VNC on the dest (on the port 1) works for a while +after the migration and then hangs, apparently after the guest QXL crash. +Could you try launching VM with +"-nographic -device qxl-vga"?  That way VM's serial console is given you +directly in the shell, so when qxl driver crashes you're still able to +inspect the kernel messages. +I have been running like that, but have not reproduced the qxl driver +crash, +and I suspect my guest image+kernel is too old. +Yes, that's probably the case.  But the crash occurs on my Fedora 41 +guest with the 6.11.5-300.fc41.x86_64 kernel, so newer kernels seem to +be buggy. +However, once I realized the +issue was post-cpr modification of qxl memory, I switched my attention +to the +fix. +As for your patch, I can report that it doesn't resolve the issue as it +is.  But I was able to track down another possible memory corruption +using your approach with readonly mmap'ing: +Program terminated with signal SIGSEGV, Segmentation fault. +#0  init_qxl_ram (d=0x5638996e0e70) at ../hw/display/qxl.c:412 +412         d->ram->magic       = cpu_to_le32(QXL_RAM_MAGIC); +[Current thread is 1 (Thread 0x7f1a4f83b480 (LWP 229798))] +(gdb) bt +#0  init_qxl_ram (d=0x5638996e0e70) at ../hw/display/qxl.c:412 +#1  0x0000563896e7f467 in qxl_realize_common (qxl=0x5638996e0e70, +errp=0x7ffd3c2b8170) at ../hw/display/qxl.c:2142 +#2  0x0000563896e7fda1 in qxl_realize_primary (dev=0x5638996e0e70, +errp=0x7ffd3c2b81d0) at ../hw/display/qxl.c:2257 +#3  0x0000563896c7e8f2 in pci_qdev_realize (qdev=0x5638996e0e70, +errp=0x7ffd3c2b8250) at ../hw/pci/pci.c:2174 +#4  0x00005638970eb54b in device_set_realized (obj=0x5638996e0e70, +value=true, errp=0x7ffd3c2b84e0) at ../hw/core/qdev.c:494 +#5  0x00005638970f5e14 in property_set_bool (obj=0x5638996e0e70, +v=0x5638996f3770, name=0x56389759b141 "realized", +opaque=0x5638987893d0, errp=0x7ffd3c2b84e0) +      at ../qom/object.c:2374 +#6  0x00005638970f39f8 in object_property_set (obj=0x5638996e0e70, +name=0x56389759b141 "realized", v=0x5638996f3770, errp=0x7ffd3c2b84e0) +      at ../qom/object.c:1449 +#7  0x00005638970f8586 in object_property_set_qobject +(obj=0x5638996e0e70, name=0x56389759b141 "realized", +value=0x5638996df900, errp=0x7ffd3c2b84e0) +      at ../qom/qom-qobject.c:28 +#8  0x00005638970f3d8d in object_property_set_bool +(obj=0x5638996e0e70, name=0x56389759b141 "realized", value=true, +errp=0x7ffd3c2b84e0) +      at ../qom/object.c:1519 +#9  0x00005638970eacb0 in qdev_realize (dev=0x5638996e0e70, +bus=0x563898cf3c20, errp=0x7ffd3c2b84e0) at ../hw/core/qdev.c:276 +#10 0x0000563896dba675 in qdev_device_add_from_qdict +(opts=0x5638996dfe50, from_json=false, errp=0x7ffd3c2b84e0) at ../ +system/qdev-monitor.c:714 +#11 0x0000563896dba721 in qdev_device_add (opts=0x563898786150, +errp=0x56389855dc40 ) at ../system/qdev-monitor.c:733 +#12 0x0000563896dc48f1 in device_init_func (opaque=0x0, +opts=0x563898786150, errp=0x56389855dc40 ) at ../system/ +vl.c:1207 +#13 0x000056389737a6cc in qemu_opts_foreach +      (list=0x563898427b60 , func=0x563896dc48ca +, opaque=0x0, errp=0x56389855dc40 ) +      at ../util/qemu-option.c:1135 +#14 0x0000563896dc89b5 in qemu_create_cli_devices () at ../system/ +vl.c:2745 +#15 0x0000563896dc8c00 in qmp_x_exit_preconfig (errp=0x56389855dc40 +) at ../system/vl.c:2806 +#16 0x0000563896dcb5de in qemu_init (argc=33, argv=0x7ffd3c2b8948) +at ../system/vl.c:3838 +#17 0x0000563897297323 in main (argc=33, argv=0x7ffd3c2b8948) at ../ +system/main.c:72 +So the attached adjusted version of your patch does seem to help.  At +least I can't reproduce the crash on my stand. +Thanks for the stack trace; the calls to SPICE_RING_INIT in init_qxl_ram +are +definitely harmful.  Try V2 of the patch, attached, which skips the lines +of init_qxl_ram that modify guest memory. +Thanks, your v2 patch does seem to prevent the crash.  Would you re-send +it to the list as a proper fix? +Yes.  Was waiting for your confirmation. +I'm wondering, could it be useful to explicitly mark all the reused +memory regions readonly upon cpr-transfer, and then make them writable +back again after the migration is done?  That way we will be segfaulting +early on instead of debugging tricky memory corruptions. +It's a useful debugging technique, but changing protection on a large +memory region +can be too expensive for production due to TLB shootdowns. + +Also, there are cases where writes are performed but the value is +guaranteed to +be the same: +   qxl_post_load() +     qxl_set_mode() +       d->rom->mode = cpu_to_le32(modenr); +The value is the same because mode and shadow_rom.mode were passed in +vmstate +from old qemu. +There're also cases where devices' ROM might be re-initialized.  E.g. +this segfault occures upon further exploration of RO mapped RAM blocks: +Program terminated with signal SIGSEGV, Segmentation fault. +#0  __memmove_avx_unaligned_erms () at +../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:664 +664             rep     movsb +[Current thread is 1 (Thread 0x7f6e7d08b480 (LWP 310379))] +(gdb) bt +#0  __memmove_avx_unaligned_erms () at +../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:664 +#1  0x000055aa1d030ecd in rom_set_mr (rom=0x55aa200ba380, owner=0x55aa2019ac10, +name=0x7fffb8272bc0 "/rom@etc/acpi/tables", ro=true) +     at ../hw/core/loader.c:1032 +#2  0x000055aa1d031577 in rom_add_blob +     (name=0x55aa1da51f13 "etc/acpi/tables", blob=0x55aa208a1070, len=131072, max_len=2097152, +addr=18446744073709551615, fw_file_name=0x55aa1da51f13 "etc/acpi/tables", +fw_callback=0x55aa1d441f59 , callback_opaque=0x55aa20ff0010, as=0x0, +read_only=true) at ../hw/core/loader.c:1147 +#3  0x000055aa1cfd788d in acpi_add_rom_blob +     (update=0x55aa1d441f59 , opaque=0x55aa20ff0010, +blob=0x55aa1fc9aa00, name=0x55aa1da51f13 "etc/acpi/tables") at ../hw/acpi/utils.c:46 +#4  0x000055aa1d44213f in acpi_setup () at ../hw/i386/acpi-build.c:2720 +#5  0x000055aa1d434199 in pc_machine_done (notifier=0x55aa1ff15050, data=0x0) +at ../hw/i386/pc.c:638 +#6  0x000055aa1d876845 in notifier_list_notify (list=0x55aa1ea25c10 +, data=0x0) at ../util/notify.c:39 +#7  0x000055aa1d039ee5 in qdev_machine_creation_done () at +../hw/core/machine.c:1749 +#8  0x000055aa1d2c7b3e in qemu_machine_creation_done (errp=0x55aa1ea5cc40 +) at ../system/vl.c:2779 +#9  0x000055aa1d2c7c7d in qmp_x_exit_preconfig (errp=0x55aa1ea5cc40 +) at ../system/vl.c:2807 +#10 0x000055aa1d2ca64f in qemu_init (argc=35, argv=0x7fffb82730e8) at +../system/vl.c:3838 +#11 0x000055aa1d79638c in main (argc=35, argv=0x7fffb82730e8) at +../system/main.c:72 +I'm not sure whether ACPI tables ROM in particular is rewritten with the +same content, but there might be cases where ROM can be read from file +system upon initialization.  That is undesirable as guest kernel +certainly won't be too happy about sudden change of the device's ROM +content. + +So the issue we're dealing with here is any unwanted memory related +device initialization upon cpr. + +For now the only thing that comes to my mind is to make a test where we +put as many devices as we can into a VM, make ram blocks RO upon cpr +(and remap them as RW later after migration is done, if needed), and +catch any unwanted memory violations.  As Den suggested, we might +consider adding that behaviour as a separate non-default option (or +"migrate" command flag specific to cpr-transfer), which would only be +used in the testing. +I'll look into adding an option, but there may be too many false positives, +such as the qxl_set_mode case above.  And the maintainers may object to me +eliminating the false positives by adding more CPR_IN tests, due to gratuitous +(from their POV) ugliness. + +But I will use the technique to look for more write violations. +Andrey +No way. ACPI with the source must be used in the same way as BIOSes +and optional ROMs. +Yup, its a bug.  Will fix. + +- Steve + diff --git a/results/classifier/001/mistranslation/64322995 b/results/classifier/001/mistranslation/64322995 new file mode 100644 index 000000000..2f16ce872 --- /dev/null +++ b/results/classifier/001/mistranslation/64322995 @@ -0,0 +1,54 @@ +mistranslation: 0.936 +semantic: 0.906 +other: 0.881 +instruction: 0.864 + +[Qemu-devel] [BUG] trace: QEMU hangs on initialization with the "simple" backend + +While starting the softmmu version of QEMU, the simple backend waits for the +writeout thread to signal a condition variable when initializing the output file +path. But since the writeout thread has not been created, it just waits forever. + +Thanks, + Lluis + +On Tue, Feb 09, 2016 at 09:24:04PM +0100, Lluís Vilanova wrote: +> +While starting the softmmu version of QEMU, the simple backend waits for the +> +writeout thread to signal a condition variable when initializing the output +> +file +> +path. But since the writeout thread has not been created, it just waits +> +forever. +Denis Lunev posted a fix: +https://patchwork.ozlabs.org/patch/580968/ +Stefan +signature.asc +Description: +PGP signature + +Stefan Hajnoczi writes: + +> +On Tue, Feb 09, 2016 at 09:24:04PM +0100, Lluís Vilanova wrote: +> +> While starting the softmmu version of QEMU, the simple backend waits for the +> +> writeout thread to signal a condition variable when initializing the output +> +> file +> +> path. But since the writeout thread has not been created, it just waits +> +> forever. +> +Denis Lunev posted a fix: +> +https://patchwork.ozlabs.org/patch/580968/ +Great, thanks. + +Lluis + diff --git a/results/classifier/001/mistranslation/70294255 b/results/classifier/001/mistranslation/70294255 new file mode 100644 index 000000000..67353acda --- /dev/null +++ b/results/classifier/001/mistranslation/70294255 @@ -0,0 +1,1061 @@ +mistranslation: 0.862 +semantic: 0.858 +instruction: 0.856 +other: 0.852 + +[Qemu-devel] 答复: Re: 答复: Re: 答复: Re: 答复: Re: [BUG]COLO failover hang + +hi: + +yes.it is better. + +And should we delete + + + + +#ifdef WIN32 + + QIO_CHANNEL(cioc)->event = CreateEvent(NULL, FALSE, FALSE, NULL) + +#endif + + + + +in qio_channel_socket_accept? + +qio_channel_socket_new already have it. + + + + + + + + + + + + +原始邮件 + + + +发件人: address@hidden +收件人:王广10165992 +抄送人: address@hidden address@hidden address@hidden address@hidden +日 期 :2017å¹´03月22日 15:03 +主 题 :Re: [Qemu-devel] 答复: Re: 答复: Re: 答复: Re: [BUG]COLO failover hang + + + + + +Hi, + +On 2017/3/22 9:42, address@hidden wrote: +> diff --git a/migration/socket.c b/migration/socket.c +> +> +> index 13966f1..d65a0ea 100644 +> +> +> --- a/migration/socket.c +> +> +> +++ b/migration/socket.c +> +> +> @@ -147,8 +147,9 @@ static gboolean +socket_accept_incoming_migration(QIOChannel *ioc, +> +> +> } +> +> +> +> +> +> trace_migration_socket_incoming_accepted() +> +> +> +> +> +> qio_channel_set_name(QIO_CHANNEL(sioc), "migration-socket-incoming") +> +> +> + qio_channel_set_feature(QIO_CHANNEL(sioc), QIO_CHANNEL_FEATURE_SHUTDOWN) +> +> +> migration_channel_process_incoming(migrate_get_current(), +> +> +> QIO_CHANNEL(sioc)) +> +> +> object_unref(OBJECT(sioc)) +> +> +> +> +> Is this patch ok? +> + +Yes, i think this works, but a better way maybe to call +qio_channel_set_feature() +in qio_channel_socket_accept(), we didn't set the SHUTDOWN feature for the +socket accept fd, +Or fix it by this: + +diff --git a/io/channel-socket.c b/io/channel-socket.c +index f546c68..ce6894c 100644 +--- a/io/channel-socket.c ++++ b/io/channel-socket.c +@@ -330,9 +330,8 @@ qio_channel_socket_accept(QIOChannelSocket *ioc, + Error **errp) + { + QIOChannelSocket *cioc +- +- cioc = QIO_CHANNEL_SOCKET(object_new(TYPE_QIO_CHANNEL_SOCKET)) +- cioc->fd = -1 ++ ++ cioc = qio_channel_socket_new() + cioc->remoteAddrLen = sizeof(ioc->remoteAddr) + cioc->localAddrLen = sizeof(ioc->localAddr) + + +Thanks, +Hailiang + +> I have test it . The test could not hang any more. +> +> +> +> +> +> +> +> +> +> +> +> +> 原始邮件 +> +> +> +> 发件人: address@hidden +> 收件人: address@hidden address@hidden +> 抄送人: address@hidden address@hidden address@hidden +> 日 期 :2017å¹´03月22日 09:11 +> 主 题 :Re: [Qemu-devel] 答复: Re: 答复: Re: [BUG]COLO failover hang +> +> +> +> +> +> On 2017/3/21 19:56, Dr. David Alan Gilbert wrote: +> > * Hailiang Zhang (address@hidden) wrote: +> >> Hi, +> >> +> >> Thanks for reporting this, and i confirmed it in my test, and it is a bug. +> >> +> >> Though we tried to call qemu_file_shutdown() to shutdown the related fd, in +> >> case COLO thread/incoming thread is stuck in read/write() while do +failover, +> >> but it didn't take effect, because all the fd used by COLO (also migration) +> >> has been wrapped by qio channel, and it will not call the shutdown API if +> >> we didn't qio_channel_set_feature(QIO_CHANNEL(sioc), +QIO_CHANNEL_FEATURE_SHUTDOWN). +> >> +> >> Cc: Dr. David Alan Gilbert address@hidden +> >> +> >> I doubted migration cancel has the same problem, it may be stuck in write() +> >> if we tried to cancel migration. +> >> +> >> void fd_start_outgoing_migration(MigrationState *s, const char *fdname, +Error **errp) +> >> { +> >> qio_channel_set_name(QIO_CHANNEL(ioc), "migration-fd-outgoing") +> >> migration_channel_connect(s, ioc, NULL) +> >> ... ... +> >> We didn't call qio_channel_set_feature(QIO_CHANNEL(sioc), +QIO_CHANNEL_FEATURE_SHUTDOWN) above, +> >> and the +> >> migrate_fd_cancel() +> >> { +> >> ... ... +> >> if (s->state == MIGRATION_STATUS_CANCELLING && f) { +> >> qemu_file_shutdown(f) --> This will not take effect. No ? +> >> } +> >> } +> > +> > (cc'd in Daniel Berrange). +> > I see that we call qio_channel_set_feature(ioc, +QIO_CHANNEL_FEATURE_SHUTDOWN) at the +> > top of qio_channel_socket_new so I think that's safe isn't it? +> > +> +> Hmm, you are right, this problem is only exist for the migration incoming fd, +thanks. +> +> > Dave +> > +> >> Thanks, +> >> Hailiang +> >> +> >> On 2017/3/21 16:10, address@hidden wrote: +> >>> Thank you。 +> >>> +> >>> I have test aready。 +> >>> +> >>> When the Primary Node panic,the Secondary Node qemu hang at the same +place。 +> >>> +> >>> Incorrding +http://wiki.qemu-project.org/Features/COLO +,kill Primary Node +qemu will not produce the problem,but Primary Node panic can。 +> >>> +> >>> I think due to the feature of channel does not support +QIO_CHANNEL_FEATURE_SHUTDOWN. +> >>> +> >>> +> >>> when failover,channel_shutdown could not shut down the channel. +> >>> +> >>> +> >>> so the colo_process_incoming_thread will hang at recvmsg. +> >>> +> >>> +> >>> I test a patch: +> >>> +> >>> +> >>> diff --git a/migration/socket.c b/migration/socket.c +> >>> +> >>> +> >>> index 13966f1..d65a0ea 100644 +> >>> +> >>> +> >>> --- a/migration/socket.c +> >>> +> >>> +> >>> +++ b/migration/socket.c +> >>> +> >>> +> >>> @@ -147,8 +147,9 @@ static gboolean +socket_accept_incoming_migration(QIOChannel *ioc, +> >>> +> >>> +> >>> } +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> trace_migration_socket_incoming_accepted() +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> qio_channel_set_name(QIO_CHANNEL(sioc), +"migration-socket-incoming") +> >>> +> >>> +> >>> + qio_channel_set_feature(QIO_CHANNEL(sioc), +QIO_CHANNEL_FEATURE_SHUTDOWN) +> >>> +> >>> +> >>> migration_channel_process_incoming(migrate_get_current(), +> >>> +> >>> +> >>> QIO_CHANNEL(sioc)) +> >>> +> >>> +> >>> object_unref(OBJECT(sioc)) +> >>> +> >>> +> >>> +> >>> +> >>> My test will not hang any more. +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> 原始邮件 +> >>> +> >>> +> >>> +> >>> 发件人: address@hidden +> >>> 收件人:王广10165992 address@hidden +> >>> 抄送人: address@hidden address@hidden +> >>> 日 期 :2017å¹´03月21日 15:58 +> >>> 主 题 :Re: [Qemu-devel] 答复: Re: [BUG]COLO failover hang +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> Hi,Wang. +> >>> +> >>> You can test this branch: +> >>> +> >>> +https://github.com/coloft/qemu/tree/colo-v5.1-developing-COLO-frame-v21-with-shared-disk +> >>> +> >>> and please follow wiki ensure your own configuration correctly. +> >>> +> >>> +http://wiki.qemu-project.org/Features/COLO +> >>> +> >>> +> >>> Thanks +> >>> +> >>> Zhang Chen +> >>> +> >>> +> >>> On 03/21/2017 03:27 PM, address@hidden wrote: +> >>> > +> >>> > hi. +> >>> > +> >>> > I test the git qemu master have the same problem. +> >>> > +> >>> > (gdb) bt +> >>> > +> >>> > #0 qio_channel_socket_readv (ioc=0x7f65911b4e50, iov=0x7f64ef3fd880, +> >>> > niov=1, fds=0x0, nfds=0x0, errp=0x0) at io/channel-socket.c:461 +> >>> > +> >>> > #1 0x00007f658e4aa0c2 in qio_channel_read +> >>> > (address@hidden, address@hidden "", +> >>> > address@hidden, address@hidden) at io/channel.c:114 +> >>> > +> >>> > #2 0x00007f658e3ea990 in channel_get_buffer (opaque=<optimized out>, +> >>> > buf=0x7f65907cb838 "", pos=<optimized out>, size=32768) at +> >>> > migration/qemu-file-channel.c:78 +> >>> > +> >>> > #3 0x00007f658e3e97fc in qemu_fill_buffer (f=0x7f65907cb800) at +> >>> > migration/qemu-file.c:295 +> >>> > +> >>> > #4 0x00007f658e3ea2e1 in qemu_peek_byte (address@hidden, +> >>> > address@hidden) at migration/qemu-file.c:555 +> >>> > +> >>> > #5 0x00007f658e3ea34b in qemu_get_byte (address@hidden) at +> >>> > migration/qemu-file.c:568 +> >>> > +> >>> > #6 0x00007f658e3ea552 in qemu_get_be32 (address@hidden) at +> >>> > migration/qemu-file.c:648 +> >>> > +> >>> > #7 0x00007f658e3e66e5 in colo_receive_message (f=0x7f65907cb800, +> >>> > address@hidden) at migration/colo.c:244 +> >>> > +> >>> > #8 0x00007f658e3e681e in colo_receive_check_message (f=<optimized +> >>> > out>, address@hidden, +> >>> > address@hidden) +> >>> > +> >>> > at migration/colo.c:264 +> >>> > +> >>> > #9 0x00007f658e3e740e in colo_process_incoming_thread +> >>> > (opaque=0x7f658eb30360 <mis_current.31286>) at migration/colo.c:577 +> >>> > +> >>> > #10 0x00007f658be09df3 in start_thread () from /lib64/libpthread.so.0 +> >>> > +> >>> > #11 0x00007f65881983ed in clone () from /lib64/libc.so.6 +> >>> > +> >>> > (gdb) p ioc->name +> >>> > +> >>> > $2 = 0x7f658ff7d5c0 "migration-socket-incoming" +> >>> > +> >>> > (gdb) p ioc->features Do not support QIO_CHANNEL_FEATURE_SHUTDOWN +> >>> > +> >>> > $3 = 0 +> >>> > +> >>> > +> >>> > (gdb) bt +> >>> > +> >>> > #0 socket_accept_incoming_migration (ioc=0x7fdcceeafa90, +> >>> > condition=G_IO_IN, opaque=0x7fdcceeafa90) at migration/socket.c:137 +> >>> > +> >>> > #1 0x00007fdcc6966350 in g_main_dispatch (context=<optimized out>) at +> >>> > gmain.c:3054 +> >>> > +> >>> > #2 g_main_context_dispatch (context=<optimized out>, +> >>> > address@hidden) at gmain.c:3630 +> >>> > +> >>> > #3 0x00007fdccb8a6dcc in glib_pollfds_poll () at util/main-loop.c:213 +> >>> > +> >>> > #4 os_host_main_loop_wait (timeout=<optimized out>) at +> >>> > util/main-loop.c:258 +> >>> > +> >>> > #5 main_loop_wait (address@hidden) at +> >>> > util/main-loop.c:506 +> >>> > +> >>> > #6 0x00007fdccb526187 in main_loop () at vl.c:1898 +> >>> > +> >>> > #7 main (argc=<optimized out>, argv=<optimized out>, envp=<optimized +> >>> > out>) at vl.c:4709 +> >>> > +> >>> > (gdb) p ioc->features +> >>> > +> >>> > $1 = 6 +> >>> > +> >>> > (gdb) p ioc->name +> >>> > +> >>> > $2 = 0x7fdcce1b1ab0 "migration-socket-listener" +> >>> > +> >>> > +> >>> > May be socket_accept_incoming_migration should +> >>> > call qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN)?? +> >>> > +> >>> > +> >>> > thank you. +> >>> > +> >>> > +> >>> > +> >>> > +> >>> > +> >>> > 原始邮件 +> >>> > address@hidden +> >>> > address@hidden +> >>> > address@hidden@huawei.com> +> >>> > *日 期 :*2017å¹´03月16日 14:46 +> >>> > *主 题 :**Re: [Qemu-devel] COLO failover hang* +> >>> > +> >>> > +> >>> > +> >>> > +> >>> > On 03/15/2017 05:06 PM, wangguang wrote: +> >>> > > am testing QEMU COLO feature described here [QEMU +> >>> > > Wiki]( +http://wiki.qemu-project.org/Features/COLO +). +> >>> > > +> >>> > > When the Primary Node panic,the Secondary Node qemu hang. +> >>> > > hang at recvmsg in qio_channel_socket_readv. +> >>> > > And I run { 'execute': 'nbd-server-stop' } and { "execute": +> >>> > > "x-colo-lost-heartbeat" } in Secondary VM's +> >>> > > monitor,the Secondary Node qemu still hang at recvmsg . +> >>> > > +> >>> > > I found that the colo in qemu is not complete yet. +> >>> > > Do the colo have any plan for development? +> >>> > +> >>> > Yes, We are developing. You can see some of patch we pushing. +> >>> > +> >>> > > Has anyone ever run it successfully? Any help is appreciated! +> >>> > +> >>> > In our internal version can run it successfully, +> >>> > The failover detail you can ask Zhanghailiang for help. +> >>> > Next time if you have some question about COLO, +> >>> > please cc me and zhanghailiang address@hidden +> >>> > +> >>> > +> >>> > Thanks +> >>> > Zhang Chen +> >>> > +> >>> > +> >>> > > +> >>> > > +> >>> > > +> >>> > > centos7.2+qemu2.7.50 +> >>> > > (gdb) bt +> >>> > > #0 0x00007f3e00cc86ad in recvmsg () from /lib64/libpthread.so.0 +> >>> > > #1 0x00007f3e0332b738 in qio_channel_socket_readv (ioc=<optimized +out>, +> >>> > > iov=<optimized out>, niov=<optimized out>, fds=0x0, nfds=0x0, +errp=0x0) at +> >>> > > io/channel-socket.c:497 +> >>> > > #2 0x00007f3e03329472 in qio_channel_read (address@hidden, +> >>> > > address@hidden "", address@hidden, +> >>> > > address@hidden) at io/channel.c:97 +> >>> > > #3 0x00007f3e032750e0 in channel_get_buffer (opaque=<optimized out>, +> >>> > > buf=0x7f3e05910f38 "", pos=<optimized out>, size=32768) at +> >>> > > migration/qemu-file-channel.c:78 +> >>> > > #4 0x00007f3e0327412c in qemu_fill_buffer (f=0x7f3e05910f00) at +> >>> > > migration/qemu-file.c:257 +> >>> > > #5 0x00007f3e03274a41 in qemu_peek_byte (address@hidden, +> >>> > > address@hidden) at migration/qemu-file.c:510 +> >>> > > #6 0x00007f3e03274aab in qemu_get_byte (address@hidden) at +> >>> > > migration/qemu-file.c:523 +> >>> > > #7 0x00007f3e03274cb2 in qemu_get_be32 (address@hidden) at +> >>> > > migration/qemu-file.c:603 +> >>> > > #8 0x00007f3e03271735 in colo_receive_message (f=0x7f3e05910f00, +> >>> > > address@hidden) at migration/colo.c:215 +> >>> > > #9 0x00007f3e0327250d in colo_wait_handle_message +(errp=0x7f3d62bfaa48, +> >>> > > checkpoint_request=<synthetic pointer>, f=<optimized out>) at +> >>> > > migration/colo.c:546 +> >>> > > #10 colo_process_incoming_thread (opaque=0x7f3e067245e0) at +> >>> > > migration/colo.c:649 +> >>> > > #11 0x00007f3e00cc1df3 in start_thread () from /lib64/libpthread.so.0 +> >>> > > #12 0x00007f3dfc9c03ed in clone () from /lib64/libc..so.6 +> >>> > > +> >>> > > +> >>> > > +> >>> > > +> >>> > > +> >>> > > -- +> >>> > > View this message in context: +http://qemu.11.n7.nabble.com/COLO-failover-hang-tp473250.html +> >>> > > Sent from the Developer mailing list archive at Nabble.com. +> >>> > > +> >>> > > +> >>> > > +> >>> > > +> >>> > +> >>> > -- +> >>> > Thanks +> >>> > Zhang Chen +> >>> > +> >>> > +> >>> > +> >>> > +> >>> > +> >>> +> >> +> > -- +> > Dr. David Alan Gilbert / address@hidden / Manchester, UK +> > +> > . +> > +> + +On 2017/3/22 16:09, address@hidden wrote: +hi: + +yes.it is better. + +And should we delete +Yes, you are right. +#ifdef WIN32 + + QIO_CHANNEL(cioc)->event = CreateEvent(NULL, FALSE, FALSE, NULL) + +#endif + + + + +in qio_channel_socket_accept? + +qio_channel_socket_new already have it. + + + + + + + + + + + + +原始邮件 + + + +发件人: address@hidden +收件人:王广10165992 +抄送人: address@hidden address@hidden address@hidden address@hidden +日 期 :2017å¹´03月22日 15:03 +主 题 :Re: [Qemu-devel] 答复: Re: 答复: Re: 答复: Re: [BUG]COLO failover hang + + + + + +Hi, + +On 2017/3/22 9:42, address@hidden wrote: +> diff --git a/migration/socket.c b/migration/socket.c +> +> +> index 13966f1..d65a0ea 100644 +> +> +> --- a/migration/socket.c +> +> +> +++ b/migration/socket.c +> +> +> @@ -147,8 +147,9 @@ static gboolean +socket_accept_incoming_migration(QIOChannel *ioc, +> +> +> } +> +> +> +> +> +> trace_migration_socket_incoming_accepted() +> +> +> +> +> +> qio_channel_set_name(QIO_CHANNEL(sioc), "migration-socket-incoming") +> +> +> + qio_channel_set_feature(QIO_CHANNEL(sioc), QIO_CHANNEL_FEATURE_SHUTDOWN) +> +> +> migration_channel_process_incoming(migrate_get_current(), +> +> +> QIO_CHANNEL(sioc)) +> +> +> object_unref(OBJECT(sioc)) +> +> +> +> +> Is this patch ok? +> + +Yes, i think this works, but a better way maybe to call +qio_channel_set_feature() +in qio_channel_socket_accept(), we didn't set the SHUTDOWN feature for the +socket accept fd, +Or fix it by this: + +diff --git a/io/channel-socket.c b/io/channel-socket.c +index f546c68..ce6894c 100644 +--- a/io/channel-socket.c ++++ b/io/channel-socket.c +@@ -330,9 +330,8 @@ qio_channel_socket_accept(QIOChannelSocket *ioc, + Error **errp) + { + QIOChannelSocket *cioc +- +- cioc = QIO_CHANNEL_SOCKET(object_new(TYPE_QIO_CHANNEL_SOCKET)) +- cioc->fd = -1 ++ ++ cioc = qio_channel_socket_new() + cioc->remoteAddrLen = sizeof(ioc->remoteAddr) + cioc->localAddrLen = sizeof(ioc->localAddr) + + +Thanks, +Hailiang + +> I have test it . The test could not hang any more. +> +> +> +> +> +> +> +> +> +> +> +> +> 原始邮件 +> +> +> +> 发件人: address@hidden +> 收件人: address@hidden address@hidden +> 抄送人: address@hidden address@hidden address@hidden +> 日 期 :2017å¹´03月22日 09:11 +> 主 题 :Re: [Qemu-devel] 答复: Re: 答复: Re: [BUG]COLO failover hang +> +> +> +> +> +> On 2017/3/21 19:56, Dr. David Alan Gilbert wrote: +> > * Hailiang Zhang (address@hidden) wrote: +> >> Hi, +> >> +> >> Thanks for reporting this, and i confirmed it in my test, and it is a bug. +> >> +> >> Though we tried to call qemu_file_shutdown() to shutdown the related fd, in +> >> case COLO thread/incoming thread is stuck in read/write() while do +failover, +> >> but it didn't take effect, because all the fd used by COLO (also migration) +> >> has been wrapped by qio channel, and it will not call the shutdown API if +> >> we didn't qio_channel_set_feature(QIO_CHANNEL(sioc), +QIO_CHANNEL_FEATURE_SHUTDOWN). +> >> +> >> Cc: Dr. David Alan Gilbert address@hidden +> >> +> >> I doubted migration cancel has the same problem, it may be stuck in write() +> >> if we tried to cancel migration. +> >> +> >> void fd_start_outgoing_migration(MigrationState *s, const char *fdname, +Error **errp) +> >> { +> >> qio_channel_set_name(QIO_CHANNEL(ioc), "migration-fd-outgoing") +> >> migration_channel_connect(s, ioc, NULL) +> >> ... ... +> >> We didn't call qio_channel_set_feature(QIO_CHANNEL(sioc), +QIO_CHANNEL_FEATURE_SHUTDOWN) above, +> >> and the +> >> migrate_fd_cancel() +> >> { +> >> ... ... +> >> if (s->state == MIGRATION_STATUS_CANCELLING && f) { +> >> qemu_file_shutdown(f) --> This will not take effect. No ? +> >> } +> >> } +> > +> > (cc'd in Daniel Berrange). +> > I see that we call qio_channel_set_feature(ioc, +QIO_CHANNEL_FEATURE_SHUTDOWN) at the +> > top of qio_channel_socket_new so I think that's safe isn't it? +> > +> +> Hmm, you are right, this problem is only exist for the migration incoming fd, +thanks. +> +> > Dave +> > +> >> Thanks, +> >> Hailiang +> >> +> >> On 2017/3/21 16:10, address@hidden wrote: +> >>> Thank you。 +> >>> +> >>> I have test aready。 +> >>> +> >>> When the Primary Node panic,the Secondary Node qemu hang at the same +place。 +> >>> +> >>> Incorrding +http://wiki.qemu-project.org/Features/COLO +,kill Primary Node +qemu will not produce the problem,but Primary Node panic can。 +> >>> +> >>> I think due to the feature of channel does not support +QIO_CHANNEL_FEATURE_SHUTDOWN. +> >>> +> >>> +> >>> when failover,channel_shutdown could not shut down the channel. +> >>> +> >>> +> >>> so the colo_process_incoming_thread will hang at recvmsg. +> >>> +> >>> +> >>> I test a patch: +> >>> +> >>> +> >>> diff --git a/migration/socket.c b/migration/socket.c +> >>> +> >>> +> >>> index 13966f1..d65a0ea 100644 +> >>> +> >>> +> >>> --- a/migration/socket.c +> >>> +> >>> +> >>> +++ b/migration/socket.c +> >>> +> >>> +> >>> @@ -147,8 +147,9 @@ static gboolean +socket_accept_incoming_migration(QIOChannel *ioc, +> >>> +> >>> +> >>> } +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> trace_migration_socket_incoming_accepted() +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> qio_channel_set_name(QIO_CHANNEL(sioc), +"migration-socket-incoming") +> >>> +> >>> +> >>> + qio_channel_set_feature(QIO_CHANNEL(sioc), +QIO_CHANNEL_FEATURE_SHUTDOWN) +> >>> +> >>> +> >>> migration_channel_process_incoming(migrate_get_current(), +> >>> +> >>> +> >>> QIO_CHANNEL(sioc)) +> >>> +> >>> +> >>> object_unref(OBJECT(sioc)) +> >>> +> >>> +> >>> +> >>> +> >>> My test will not hang any more. +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> 原始邮件 +> >>> +> >>> +> >>> +> >>> 发件人: address@hidden +> >>> 收件人:王广10165992 address@hidden +> >>> 抄送人: address@hidden address@hidden +> >>> 日 期 :2017å¹´03月21日 15:58 +> >>> 主 题 :Re: [Qemu-devel] 答复: Re: [BUG]COLO failover hang +> >>> +> >>> +> >>> +> >>> +> >>> +> >>> Hi,Wang. +> >>> +> >>> You can test this branch: +> >>> +> >>> +https://github.com/coloft/qemu/tree/colo-v5.1-developing-COLO-frame-v21-with-shared-disk +> >>> +> >>> and please follow wiki ensure your own configuration correctly. +> >>> +> >>> +http://wiki.qemu-project.org/Features/COLO +> >>> +> >>> +> >>> Thanks +> >>> +> >>> Zhang Chen +> >>> +> >>> +> >>> On 03/21/2017 03:27 PM, address@hidden wrote: +> >>> > +> >>> > hi. +> >>> > +> >>> > I test the git qemu master have the same problem. +> >>> > +> >>> > (gdb) bt +> >>> > +> >>> > #0 qio_channel_socket_readv (ioc=0x7f65911b4e50, iov=0x7f64ef3fd880, +> >>> > niov=1, fds=0x0, nfds=0x0, errp=0x0) at io/channel-socket.c:461 +> >>> > +> >>> > #1 0x00007f658e4aa0c2 in qio_channel_read +> >>> > (address@hidden, address@hidden "", +> >>> > address@hidden, address@hidden) at io/channel.c:114 +> >>> > +> >>> > #2 0x00007f658e3ea990 in channel_get_buffer (opaque=<optimized out>, +> >>> > buf=0x7f65907cb838 "", pos=<optimized out>, size=32768) at +> >>> > migration/qemu-file-channel.c:78 +> >>> > +> >>> > #3 0x00007f658e3e97fc in qemu_fill_buffer (f=0x7f65907cb800) at +> >>> > migration/qemu-file.c:295 +> >>> > +> >>> > #4 0x00007f658e3ea2e1 in qemu_peek_byte (address@hidden, +> >>> > address@hidden) at migration/qemu-file.c:555 +> >>> > +> >>> > #5 0x00007f658e3ea34b in qemu_get_byte (address@hidden) at +> >>> > migration/qemu-file.c:568 +> >>> > +> >>> > #6 0x00007f658e3ea552 in qemu_get_be32 (address@hidden) at +> >>> > migration/qemu-file.c:648 +> >>> > +> >>> > #7 0x00007f658e3e66e5 in colo_receive_message (f=0x7f65907cb800, +> >>> > address@hidden) at migration/colo.c:244 +> >>> > +> >>> > #8 0x00007f658e3e681e in colo_receive_check_message (f=<optimized +> >>> > out>, address@hidden, +> >>> > address@hidden) +> >>> > +> >>> > at migration/colo.c:264 +> >>> > +> >>> > #9 0x00007f658e3e740e in colo_process_incoming_thread +> >>> > (opaque=0x7f658eb30360 <mis_current.31286>) at migration/colo.c:577 +> >>> > +> >>> > #10 0x00007f658be09df3 in start_thread () from /lib64/libpthread.so.0 +> >>> > +> >>> > #11 0x00007f65881983ed in clone () from /lib64/libc.so.6 +> >>> > +> >>> > (gdb) p ioc->name +> >>> > +> >>> > $2 = 0x7f658ff7d5c0 "migration-socket-incoming" +> >>> > +> >>> > (gdb) p ioc->features Do not support QIO_CHANNEL_FEATURE_SHUTDOWN +> >>> > +> >>> > $3 = 0 +> >>> > +> >>> > +> >>> > (gdb) bt +> >>> > +> >>> > #0 socket_accept_incoming_migration (ioc=0x7fdcceeafa90, +> >>> > condition=G_IO_IN, opaque=0x7fdcceeafa90) at migration/socket.c:137 +> >>> > +> >>> > #1 0x00007fdcc6966350 in g_main_dispatch (context=<optimized out>) at +> >>> > gmain.c:3054 +> >>> > +> >>> > #2 g_main_context_dispatch (context=<optimized out>, +> >>> > address@hidden) at gmain.c:3630 +> >>> > +> >>> > #3 0x00007fdccb8a6dcc in glib_pollfds_poll () at util/main-loop.c:213 +> >>> > +> >>> > #4 os_host_main_loop_wait (timeout=<optimized out>) at +> >>> > util/main-loop.c:258 +> >>> > +> >>> > #5 main_loop_wait (address@hidden) at +> >>> > util/main-loop.c:506 +> >>> > +> >>> > #6 0x00007fdccb526187 in main_loop () at vl.c:1898 +> >>> > +> >>> > #7 main (argc=<optimized out>, argv=<optimized out>, envp=<optimized +> >>> > out>) at vl.c:4709 +> >>> > +> >>> > (gdb) p ioc->features +> >>> > +> >>> > $1 = 6 +> >>> > +> >>> > (gdb) p ioc->name +> >>> > +> >>> > $2 = 0x7fdcce1b1ab0 "migration-socket-listener" +> >>> > +> >>> > +> >>> > May be socket_accept_incoming_migration should +> >>> > call qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN)?? +> >>> > +> >>> > +> >>> > thank you. +> >>> > +> >>> > +> >>> > +> >>> > +> >>> > +> >>> > 原始邮件 +> >>> > address@hidden +> >>> > address@hidden +> >>> > address@hidden@huawei.com> +> >>> > *日 期 :*2017å¹´03月16日 14:46 +> >>> > *主 题 :**Re: [Qemu-devel] COLO failover hang* +> >>> > +> >>> > +> >>> > +> >>> > +> >>> > On 03/15/2017 05:06 PM, wangguang wrote: +> >>> > > am testing QEMU COLO feature described here [QEMU +> >>> > > Wiki]( +http://wiki.qemu-project.org/Features/COLO +). +> >>> > > +> >>> > > When the Primary Node panic,the Secondary Node qemu hang. +> >>> > > hang at recvmsg in qio_channel_socket_readv. +> >>> > > And I run { 'execute': 'nbd-server-stop' } and { "execute": +> >>> > > "x-colo-lost-heartbeat" } in Secondary VM's +> >>> > > monitor,the Secondary Node qemu still hang at recvmsg . +> >>> > > +> >>> > > I found that the colo in qemu is not complete yet. +> >>> > > Do the colo have any plan for development? +> >>> > +> >>> > Yes, We are developing. You can see some of patch we pushing. +> >>> > +> >>> > > Has anyone ever run it successfully? Any help is appreciated! +> >>> > +> >>> > In our internal version can run it successfully, +> >>> > The failover detail you can ask Zhanghailiang for help. +> >>> > Next time if you have some question about COLO, +> >>> > please cc me and zhanghailiang address@hidden +> >>> > +> >>> > +> >>> > Thanks +> >>> > Zhang Chen +> >>> > +> >>> > +> >>> > > +> >>> > > +> >>> > > +> >>> > > centos7.2+qemu2.7.50 +> >>> > > (gdb) bt +> >>> > > #0 0x00007f3e00cc86ad in recvmsg () from /lib64/libpthread.so.0 +> >>> > > #1 0x00007f3e0332b738 in qio_channel_socket_readv (ioc=<optimized +out>, +> >>> > > iov=<optimized out>, niov=<optimized out>, fds=0x0, nfds=0x0, +errp=0x0) at +> >>> > > io/channel-socket.c:497 +> >>> > > #2 0x00007f3e03329472 in qio_channel_read (address@hidden, +> >>> > > address@hidden "", address@hidden, +> >>> > > address@hidden) at io/channel.c:97 +> >>> > > #3 0x00007f3e032750e0 in channel_get_buffer (opaque=<optimized out>, +> >>> > > buf=0x7f3e05910f38 "", pos=<optimized out>, size=32768) at +> >>> > > migration/qemu-file-channel.c:78 +> >>> > > #4 0x00007f3e0327412c in qemu_fill_buffer (f=0x7f3e05910f00) at +> >>> > > migration/qemu-file.c:257 +> >>> > > #5 0x00007f3e03274a41 in qemu_peek_byte (address@hidden, +> >>> > > address@hidden) at migration/qemu-file.c:510 +> >>> > > #6 0x00007f3e03274aab in qemu_get_byte (address@hidden) at +> >>> > > migration/qemu-file.c:523 +> >>> > > #7 0x00007f3e03274cb2 in qemu_get_be32 (address@hidden) at +> >>> > > migration/qemu-file.c:603 +> >>> > > #8 0x00007f3e03271735 in colo_receive_message (f=0x7f3e05910f00, +> >>> > > address@hidden) at migration/colo.c:215 +> >>> > > #9 0x00007f3e0327250d in colo_wait_handle_message +(errp=0x7f3d62bfaa48, +> >>> > > checkpoint_request=<synthetic pointer>, f=<optimized out>) at +> >>> > > migration/colo.c:546 +> >>> > > #10 colo_process_incoming_thread (opaque=0x7f3e067245e0) at +> >>> > > migration/colo.c:649 +> >>> > > #11 0x00007f3e00cc1df3 in start_thread () from /lib64/libpthread.so.0 +> >>> > > #12 0x00007f3dfc9c03ed in clone () from /lib64/libc..so.6 +> >>> > > +> >>> > > +> >>> > > +> >>> > > +> >>> > > +> >>> > > -- +> >>> > > View this message in context: +http://qemu.11.n7.nabble.com/COLO-failover-hang-tp473250.html +> >>> > > Sent from the Developer mailing list archive at Nabble.com. +> >>> > > +> >>> > > +> >>> > > +> >>> > > +> >>> > +> >>> > -- +> >>> > Thanks +> >>> > Zhang Chen +> >>> > +> >>> > +> >>> > +> >>> > +> >>> > +> >>> +> >> +> > -- +> > Dr. David Alan Gilbert / address@hidden / Manchester, UK +> > +> > . +> > +> + diff --git a/results/classifier/001/mistranslation/71456293 b/results/classifier/001/mistranslation/71456293 new file mode 100644 index 000000000..746a624cc --- /dev/null +++ b/results/classifier/001/mistranslation/71456293 @@ -0,0 +1,1486 @@ +mistranslation: 0.659 +instruction: 0.624 +semantic: 0.600 +other: 0.598 + +[Qemu-devel][bug] qemu crash when migrate vm and vm's disks + +When migrate vm and vm’s disks target host qemu crash due to an invalid free. +#0  object_unref (obj=0x1000) at /qemu-2.12/rpmbuild/BUILD/qemu-2.12/qom/object.c:920 +#1  0x0000560434d79e79 in memory_region_unref (mr=) +at /qemu-2.12/rpmbuild/BUILD/qemu-2.12/memory.c:1730 +#2  flatview_destroy (view=0x560439653880) at /qemu-2.12/rpmbuild/BUILD/qemu-2.12/memory.c:292 +#3  0x000056043514dfbe in call_rcu_thread (opaque=) +at /qemu-2.12/rpmbuild/BUILD/qemu-2.12/util/rcu.c:284 +#4  0x00007fbc2b36fe25 in start_thread () from /lib64/libpthread.so.0 +#5  0x00007fbc2b099bad in clone () from /lib64/libc.so.6 +test base qemu-2.12.0 +, +but use lastest qemu(v6.0.0-rc2) also reproduce. +As follow patch can resolve this problem: +https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg02272.html +Steps to reproduce: +(1) Create VM (virsh define) +(2) Add 64 virtio scsi disks +(3) migrate vm and vm’disks +------------------------------------------------------------------------------------------------------------------------------------- +本邮件及其附件含有新华三集团的保密信息,仅限于发送给上面地址中列出 +的个人或群组。禁止任何其他人以任何形式使用(包括但不限于全部或部分地泄露、复制、 +或散发)本邮件中的信息。如果您错收了本邮件,请您立即电话或邮件通知发件人并删除本 +邮件! +This e-mail and its attachments contain confidential information from New H3C, which is +intended only for the person or entity whose address is listed above. Any use of the +information contained herein in any way (including, but not limited to, total or partial +disclosure, reproduction, or dissemination) by persons other than the intended +recipient(s) is prohibited. If you receive this e-mail in error, please notify the sender +by phone or email immediately and delete it! + +* Yuchen (yu.chen@h3c.com) wrote: +> +When migrate vm and vm’s disks target host qemu crash due to an invalid free. +> +> +#0 object_unref (obj=0x1000) at +> +/qemu-2.12/rpmbuild/BUILD/qemu-2.12/qom/object.c:920 +> +#1 0x0000560434d79e79 in memory_region_unref (mr=) +> +at /qemu-2.12/rpmbuild/BUILD/qemu-2.12/memory.c:1730 +> +#2 flatview_destroy (view=0x560439653880) at +> +/qemu-2.12/rpmbuild/BUILD/qemu-2.12/memory.c:292 +> +#3 0x000056043514dfbe in call_rcu_thread (opaque=) +> +at /qemu-2.12/rpmbuild/BUILD/qemu-2.12/util/rcu.c:284 +> +#4 0x00007fbc2b36fe25 in start_thread () from /lib64/libpthread.so.0 +> +#5 0x00007fbc2b099bad in clone () from /lib64/libc.so.6 +> +> +test base qemu-2.12.0,but use lastest qemu(v6.0.0-rc2) also reproduce. +Interesting. + +> +As follow patch can resolve this problem: +> +https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg02272.html +That's a pci/rcu change; ccing Paolo and Micahel. + +> +Steps to reproduce: +> +(1) Create VM (virsh define) +> +(2) Add 64 virtio scsi disks +Is that hot adding the disks later, or are they included in the VM at +creation? +Can you provide a libvirt XML example? + +> +(3) migrate vm and vm’disks +What do you mean by 'and vm disks' - are you doing a block migration? + +Dave + +> +------------------------------------------------------------------------------------------------------------------------------------- +> +本邮件及其附件含有新华三集团的保密信息,仅限于发送给上面地址中列出 +> +的个人或群组。禁止任何其他人以任何形式使用(包括但不限于全部或部分地泄露、复制、 +> +或散发)本邮件中的信息。如果您错收了本邮件,请您立即电话或邮件通知发件人并删除本 +> +邮件! +> +This e-mail and its attachments contain confidential information from New +> +H3C, which is +> +intended only for the person or entity whose address is listed above. Any use +> +of the +> +information contained herein in any way (including, but not limited to, total +> +or partial +> +disclosure, reproduction, or dissemination) by persons other than the intended +> +recipient(s) is prohibited. If you receive this e-mail in error, please +> +notify the sender +> +by phone or email immediately and delete it! +-- +Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK + +> +-----邮件原件----- +> +发件人: Dr. David Alan Gilbert [ +mailto:dgilbert@redhat.com +] +> +发送时间: 2021å¹´4月8日 19:27 +> +收件人: yuchen (Cloud) ; pbonzini@redhat.com; +> +mst@redhat.com +> +抄送: qemu-devel@nongnu.org +> +主题: Re: [Qemu-devel][bug] qemu crash when migrate vm and vm's disks +> +> +* Yuchen (yu.chen@h3c.com) wrote: +> +> When migrate vm and vm’s disks target host qemu crash due to an invalid +> +free. +> +> +> +> #0 object_unref (obj=0x1000) at +> +> /qemu-2.12/rpmbuild/BUILD/qemu-2.12/qom/object.c:920 +> +> #1 0x0000560434d79e79 in memory_region_unref (mr=) +> +> at /qemu-2.12/rpmbuild/BUILD/qemu-2.12/memory.c:1730 +> +> #2 flatview_destroy (view=0x560439653880) at +> +> /qemu-2.12/rpmbuild/BUILD/qemu-2.12/memory.c:292 +> +> #3 0x000056043514dfbe in call_rcu_thread (opaque=) +> +> at /qemu-2.12/rpmbuild/BUILD/qemu-2.12/util/rcu.c:284 +> +> #4 0x00007fbc2b36fe25 in start_thread () from /lib64/libpthread.so.0 +> +> #5 0x00007fbc2b099bad in clone () from /lib64/libc.so.6 +> +> +> +> test base qemu-2.12.0,but use lastest qemu(v6.0.0-rc2) also reproduce. +> +> +Interesting. +> +> +> As follow patch can resolve this problem: +> +> +https://lists.gnu.org/archive/html/qemu-devel/2018-07/msg02272.html +> +> +That's a pci/rcu change; ccing Paolo and Micahel. +> +> +> Steps to reproduce: +> +> (1) Create VM (virsh define) +> +> (2) Add 64 virtio scsi disks +> +> +Is that hot adding the disks later, or are they included in the VM at +> +creation? +> +Can you provide a libvirt XML example? +> +Include disks in the VM at creation + +vm disks xml (only virtio scsi disks): + + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + + + + +
+ + + + +
+ + + +vm disks xml (only virtio disks): + + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + + +
+ + + + +
+ + + +> +> (3) migrate vm and vm’disks +> +> +What do you mean by 'and vm disks' - are you doing a block migration? +> +Yes, block migration. +In fact, only migration domain also reproduced. + +> +Dave +> +> +> ---------------------------------------------------------------------- +> +> --------------------------------------------------------------- +> +Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK +------------------------------------------------------------------------------------------------------------------------------------- +本邮件及其附件含有新华三集团的保密信息,仅限于发送给上面地址中列出 +的个人或群组。禁止任何其他人以任何形式使用(包括但不限于全部或部分地泄露、复制、 +或散发)本邮件中的信息。如果您错收了本邮件,请您立即电话或邮件通知发件人并删除本 +邮件! +This e-mail and its attachments contain confidential information from New H3C, +which is +intended only for the person or entity whose address is listed above. Any use +of the +information contained herein in any way (including, but not limited to, total +or partial +disclosure, reproduction, or dissemination) by persons other than the intended +recipient(s) is prohibited. If you receive this e-mail in error, please notify +the sender +by phone or email immediately and delete it! + diff --git a/results/classifier/001/mistranslation/74466963 b/results/classifier/001/mistranslation/74466963 new file mode 100644 index 000000000..fffafcf77 --- /dev/null +++ b/results/classifier/001/mistranslation/74466963 @@ -0,0 +1,1878 @@ +mistranslation: 0.927 +instruction: 0.903 +semantic: 0.891 +other: 0.877 + +[Qemu-devel] [TCG only][Migration Bug? ] Occasionally, the content of VM's memory is inconsistent between Source and Destination of migration + +Hi all, + +Does anyboday remember the similar issue post by hailiang months ago +http://patchwork.ozlabs.org/patch/454322/ +At least tow bugs about migration had been fixed since that. +And now we found the same issue at the tcg vm(kvm is fine), after +migration, the content VM's memory is inconsistent. +we add a patch to check memory content, you can find it from affix + +steps to reporduce: +1) apply the patch and re-build qemu +2) prepare the ubuntu guest and run memtest in grub. +soruce side: +x86_64-softmmu/qemu-system-x86_64 -netdev tap,id=hn0 -device +e1000,id=net-pci0,netdev=hn0,mac=52:54:00:12:34:65 -boot c -drive +if=none,file=/home/lizj/ubuntu.raw,id=drive-virtio-disk0 -device +virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0 +-vnc :7 -m 128 -smp 1 -device piix3-usb-uhci -device usb-tablet -qmp +tcp::4444,server,nowait -monitor stdio -cpu qemu64 -machine +pc-i440fx-2.3,accel=tcg,usb=off +destination side: +x86_64-softmmu/qemu-system-x86_64 -netdev tap,id=hn0 -device +e1000,id=net-pci0,netdev=hn0,mac=52:54:00:12:34:65 -boot c -drive +if=none,file=/home/lizj/ubuntu.raw,id=drive-virtio-disk0 -device +virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0 +-vnc :7 -m 128 -smp 1 -device piix3-usb-uhci -device usb-tablet -qmp +tcp::4444,server,nowait -monitor stdio -cpu qemu64 -machine +pc-i440fx-2.3,accel=tcg,usb=off -incoming tcp:0:8881 +3) start migration +with 1000M NIC, migration will finish within 3 min. + +at source: +(qemu) migrate tcp:192.168.2.66:8881 +after saving ram complete +e9e725df678d392b1a83b3a917f332bb +qemu-system-x86_64: end ram md5 +(qemu) + +at destination: +...skip... +Completed load of VM with exit code 0 seq iteration 1264 +Completed load of VM with exit code 0 seq iteration 1265 +Completed load of VM with exit code 0 seq iteration 1266 +qemu-system-x86_64: after loading state section id 2(ram) +49c2dac7bde0e5e22db7280dcb3824f9 +qemu-system-x86_64: end ram md5 +qemu-system-x86_64: qemu_loadvm_state: after cpu_synchronize_all_post_init + +49c2dac7bde0e5e22db7280dcb3824f9 +qemu-system-x86_64: end ram md5 + +This occurs occasionally and only at tcg machine. It seems that +some pages dirtied in source side don't transferred to destination. +This problem can be reproduced even if we disable virtio. +Is it OK for some pages that not transferred to destination when do +migration ? Or is it a bug? +Any idea... + +=================md5 check patch============================= + +diff --git a/Makefile.target b/Makefile.target +index 962d004..e2cb8e9 100644 +--- a/Makefile.target ++++ b/Makefile.target +@@ -139,7 +139,7 @@ obj-y += memory.o cputlb.o + obj-y += memory_mapping.o + obj-y += dump.o + obj-y += migration/ram.o migration/savevm.o +-LIBS := $(libs_softmmu) $(LIBS) ++LIBS := $(libs_softmmu) $(LIBS) -lplumb + + # xen support + obj-$(CONFIG_XEN) += xen-common.o +diff --git a/migration/ram.c b/migration/ram.c +index 1eb155a..3b7a09d 100644 +--- a/migration/ram.c ++++ b/migration/ram.c +@@ -2513,7 +2513,7 @@ static int ram_load(QEMUFile *f, void *opaque, int +version_id) +} + + rcu_read_unlock(); +- DPRINTF("Completed load of VM with exit code %d seq iteration " ++ fprintf(stderr, "Completed load of VM with exit code %d seq iteration " + "%" PRIu64 "\n", ret, seq_iter); + return ret; + } +diff --git a/migration/savevm.c b/migration/savevm.c +index 0ad1b93..3feaa61 100644 +--- a/migration/savevm.c ++++ b/migration/savevm.c +@@ -891,6 +891,29 @@ void qemu_savevm_state_header(QEMUFile *f) + + } + ++#include "exec/ram_addr.h" ++#include "qemu/rcu_queue.h" ++#include ++#ifndef MD5_DIGEST_LENGTH ++#define MD5_DIGEST_LENGTH 16 ++#endif ++ ++static void check_host_md5(void) ++{ ++ int i; ++ unsigned char md[MD5_DIGEST_LENGTH]; ++ rcu_read_lock(); ++ RAMBlock *block = QLIST_FIRST_RCU(&ram_list.blocks);/* Only check +'pc.ram' block */ ++ rcu_read_unlock(); ++ ++ MD5(block->host, block->used_length, md); ++ for(i = 0; i < MD5_DIGEST_LENGTH; i++) { ++ fprintf(stderr, "%02x", md[i]); ++ } ++ fprintf(stderr, "\n"); ++ error_report("end ram md5"); ++} ++ + void qemu_savevm_state_begin(QEMUFile *f, + const MigrationParams *params) + { +@@ -1056,6 +1079,10 @@ void qemu_savevm_state_complete_precopy(QEMUFile +*f, bool iterable_only) +save_section_header(f, se, QEMU_VM_SECTION_END); + + ret = se->ops->save_live_complete_precopy(f, se->opaque); ++ ++ fprintf(stderr, "after saving %s complete\n", se->idstr); ++ check_host_md5(); ++ + trace_savevm_section_end(se->idstr, se->section_id, ret); + save_section_footer(f, se); + if (ret < 0) { +@@ -1791,6 +1818,11 @@ static int qemu_loadvm_state_main(QEMUFile *f, +MigrationIncomingState *mis) +section_id, le->se->idstr); + return ret; + } ++ if (section_type == QEMU_VM_SECTION_END) { ++ error_report("after loading state section id %d(%s)", ++ section_id, le->se->idstr); ++ check_host_md5(); ++ } + if (!check_section_footer(f, le)) { + return -EINVAL; + } +@@ -1901,6 +1933,8 @@ int qemu_loadvm_state(QEMUFile *f) + } + + cpu_synchronize_all_post_init(); ++ error_report("%s: after cpu_synchronize_all_post_init\n", __func__); ++ check_host_md5(); + + return ret; + } + +* Li Zhijian (address@hidden) wrote: +> +Hi all, +> +> +Does anyboday remember the similar issue post by hailiang months ago +> +http://patchwork.ozlabs.org/patch/454322/ +> +At least tow bugs about migration had been fixed since that. +Yes, I wondered what happened to that. + +> +And now we found the same issue at the tcg vm(kvm is fine), after migration, +> +the content VM's memory is inconsistent. +Hmm, TCG only - I don't know much about that; but I guess something must +be accessing memory without using the proper macros/functions so +it doesn't mark it as dirty. + +> +we add a patch to check memory content, you can find it from affix +> +> +steps to reporduce: +> +1) apply the patch and re-build qemu +> +2) prepare the ubuntu guest and run memtest in grub. +> +soruce side: +> +x86_64-softmmu/qemu-system-x86_64 -netdev tap,id=hn0 -device +> +e1000,id=net-pci0,netdev=hn0,mac=52:54:00:12:34:65 -boot c -drive +> +if=none,file=/home/lizj/ubuntu.raw,id=drive-virtio-disk0 -device +> +virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0 +> +-vnc :7 -m 128 -smp 1 -device piix3-usb-uhci -device usb-tablet -qmp +> +tcp::4444,server,nowait -monitor stdio -cpu qemu64 -machine +> +pc-i440fx-2.3,accel=tcg,usb=off +> +> +destination side: +> +x86_64-softmmu/qemu-system-x86_64 -netdev tap,id=hn0 -device +> +e1000,id=net-pci0,netdev=hn0,mac=52:54:00:12:34:65 -boot c -drive +> +if=none,file=/home/lizj/ubuntu.raw,id=drive-virtio-disk0 -device +> +virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0 +> +-vnc :7 -m 128 -smp 1 -device piix3-usb-uhci -device usb-tablet -qmp +> +tcp::4444,server,nowait -monitor stdio -cpu qemu64 -machine +> +pc-i440fx-2.3,accel=tcg,usb=off -incoming tcp:0:8881 +> +> +3) start migration +> +with 1000M NIC, migration will finish within 3 min. +> +> +at source: +> +(qemu) migrate tcp:192.168.2.66:8881 +> +after saving ram complete +> +e9e725df678d392b1a83b3a917f332bb +> +qemu-system-x86_64: end ram md5 +> +(qemu) +> +> +at destination: +> +...skip... +> +Completed load of VM with exit code 0 seq iteration 1264 +> +Completed load of VM with exit code 0 seq iteration 1265 +> +Completed load of VM with exit code 0 seq iteration 1266 +> +qemu-system-x86_64: after loading state section id 2(ram) +> +49c2dac7bde0e5e22db7280dcb3824f9 +> +qemu-system-x86_64: end ram md5 +> +qemu-system-x86_64: qemu_loadvm_state: after cpu_synchronize_all_post_init +> +> +49c2dac7bde0e5e22db7280dcb3824f9 +> +qemu-system-x86_64: end ram md5 +> +> +This occurs occasionally and only at tcg machine. It seems that +> +some pages dirtied in source side don't transferred to destination. +> +This problem can be reproduced even if we disable virtio. +> +> +Is it OK for some pages that not transferred to destination when do +> +migration ? Or is it a bug? +I'm pretty sure that means it's a bug. Hard to find though, I guess +at least memtest is smaller than a big OS. I think I'd dump the whole +of memory on both sides, hexdump and diff them - I'd guess it would +just be one byte/word different, maybe that would offer some idea what +wrote it. + +Dave + +> +Any idea... +> +> +=================md5 check patch============================= +> +> +diff --git a/Makefile.target b/Makefile.target +> +index 962d004..e2cb8e9 100644 +> +--- a/Makefile.target +> ++++ b/Makefile.target +> +@@ -139,7 +139,7 @@ obj-y += memory.o cputlb.o +> +obj-y += memory_mapping.o +> +obj-y += dump.o +> +obj-y += migration/ram.o migration/savevm.o +> +-LIBS := $(libs_softmmu) $(LIBS) +> ++LIBS := $(libs_softmmu) $(LIBS) -lplumb +> +> +# xen support +> +obj-$(CONFIG_XEN) += xen-common.o +> +diff --git a/migration/ram.c b/migration/ram.c +> +index 1eb155a..3b7a09d 100644 +> +--- a/migration/ram.c +> ++++ b/migration/ram.c +> +@@ -2513,7 +2513,7 @@ static int ram_load(QEMUFile *f, void *opaque, int +> +version_id) +> +} +> +> +rcu_read_unlock(); +> +- DPRINTF("Completed load of VM with exit code %d seq iteration " +> ++ fprintf(stderr, "Completed load of VM with exit code %d seq iteration " +> +"%" PRIu64 "\n", ret, seq_iter); +> +return ret; +> +} +> +diff --git a/migration/savevm.c b/migration/savevm.c +> +index 0ad1b93..3feaa61 100644 +> +--- a/migration/savevm.c +> ++++ b/migration/savevm.c +> +@@ -891,6 +891,29 @@ void qemu_savevm_state_header(QEMUFile *f) +> +> +} +> +> ++#include "exec/ram_addr.h" +> ++#include "qemu/rcu_queue.h" +> ++#include +> ++#ifndef MD5_DIGEST_LENGTH +> ++#define MD5_DIGEST_LENGTH 16 +> ++#endif +> ++ +> ++static void check_host_md5(void) +> ++{ +> ++ int i; +> ++ unsigned char md[MD5_DIGEST_LENGTH]; +> ++ rcu_read_lock(); +> ++ RAMBlock *block = QLIST_FIRST_RCU(&ram_list.blocks);/* Only check +> +'pc.ram' block */ +> ++ rcu_read_unlock(); +> ++ +> ++ MD5(block->host, block->used_length, md); +> ++ for(i = 0; i < MD5_DIGEST_LENGTH; i++) { +> ++ fprintf(stderr, "%02x", md[i]); +> ++ } +> ++ fprintf(stderr, "\n"); +> ++ error_report("end ram md5"); +> ++} +> ++ +> +void qemu_savevm_state_begin(QEMUFile *f, +> +const MigrationParams *params) +> +{ +> +@@ -1056,6 +1079,10 @@ void qemu_savevm_state_complete_precopy(QEMUFile *f, +> +bool iterable_only) +> +save_section_header(f, se, QEMU_VM_SECTION_END); +> +> +ret = se->ops->save_live_complete_precopy(f, se->opaque); +> ++ +> ++ fprintf(stderr, "after saving %s complete\n", se->idstr); +> ++ check_host_md5(); +> ++ +> +trace_savevm_section_end(se->idstr, se->section_id, ret); +> +save_section_footer(f, se); +> +if (ret < 0) { +> +@@ -1791,6 +1818,11 @@ static int qemu_loadvm_state_main(QEMUFile *f, +> +MigrationIncomingState *mis) +> +section_id, le->se->idstr); +> +return ret; +> +} +> ++ if (section_type == QEMU_VM_SECTION_END) { +> ++ error_report("after loading state section id %d(%s)", +> ++ section_id, le->se->idstr); +> ++ check_host_md5(); +> ++ } +> +if (!check_section_footer(f, le)) { +> +return -EINVAL; +> +} +> +@@ -1901,6 +1933,8 @@ int qemu_loadvm_state(QEMUFile *f) +> +} +> +> +cpu_synchronize_all_post_init(); +> ++ error_report("%s: after cpu_synchronize_all_post_init\n", __func__); +> ++ check_host_md5(); +> +> +return ret; +> +} +> +> +> +-- +Dr. David Alan Gilbert / address@hidden / Manchester, UK + +On 2015/12/3 17:24, Dr. David Alan Gilbert wrote: +* Li Zhijian (address@hidden) wrote: +Hi all, + +Does anyboday remember the similar issue post by hailiang months ago +http://patchwork.ozlabs.org/patch/454322/ +At least tow bugs about migration had been fixed since that. +Yes, I wondered what happened to that. +And now we found the same issue at the tcg vm(kvm is fine), after migration, +the content VM's memory is inconsistent. +Hmm, TCG only - I don't know much about that; but I guess something must +be accessing memory without using the proper macros/functions so +it doesn't mark it as dirty. +we add a patch to check memory content, you can find it from affix + +steps to reporduce: +1) apply the patch and re-build qemu +2) prepare the ubuntu guest and run memtest in grub. +soruce side: +x86_64-softmmu/qemu-system-x86_64 -netdev tap,id=hn0 -device +e1000,id=net-pci0,netdev=hn0,mac=52:54:00:12:34:65 -boot c -drive +if=none,file=/home/lizj/ubuntu.raw,id=drive-virtio-disk0 -device +virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0 +-vnc :7 -m 128 -smp 1 -device piix3-usb-uhci -device usb-tablet -qmp +tcp::4444,server,nowait -monitor stdio -cpu qemu64 -machine +pc-i440fx-2.3,accel=tcg,usb=off + +destination side: +x86_64-softmmu/qemu-system-x86_64 -netdev tap,id=hn0 -device +e1000,id=net-pci0,netdev=hn0,mac=52:54:00:12:34:65 -boot c -drive +if=none,file=/home/lizj/ubuntu.raw,id=drive-virtio-disk0 -device +virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0 +-vnc :7 -m 128 -smp 1 -device piix3-usb-uhci -device usb-tablet -qmp +tcp::4444,server,nowait -monitor stdio -cpu qemu64 -machine +pc-i440fx-2.3,accel=tcg,usb=off -incoming tcp:0:8881 + +3) start migration +with 1000M NIC, migration will finish within 3 min. + +at source: +(qemu) migrate tcp:192.168.2.66:8881 +after saving ram complete +e9e725df678d392b1a83b3a917f332bb +qemu-system-x86_64: end ram md5 +(qemu) + +at destination: +...skip... +Completed load of VM with exit code 0 seq iteration 1264 +Completed load of VM with exit code 0 seq iteration 1265 +Completed load of VM with exit code 0 seq iteration 1266 +qemu-system-x86_64: after loading state section id 2(ram) +49c2dac7bde0e5e22db7280dcb3824f9 +qemu-system-x86_64: end ram md5 +qemu-system-x86_64: qemu_loadvm_state: after cpu_synchronize_all_post_init + +49c2dac7bde0e5e22db7280dcb3824f9 +qemu-system-x86_64: end ram md5 + +This occurs occasionally and only at tcg machine. It seems that +some pages dirtied in source side don't transferred to destination. +This problem can be reproduced even if we disable virtio. + +Is it OK for some pages that not transferred to destination when do +migration ? Or is it a bug? +I'm pretty sure that means it's a bug. Hard to find though, I guess +at least memtest is smaller than a big OS. I think I'd dump the whole +of memory on both sides, hexdump and diff them - I'd guess it would +just be one byte/word different, maybe that would offer some idea what +wrote it. +Maybe one better way to do that is with the help of userfaultfd's write-protect +capability. It is still in the development by Andrea Arcangeli, but there +is a RFC version available, please refer to +http://www.spinics.net/lists/linux-mm/msg97422.html +(I'm developing live memory snapshot which based on it, maybe this is another +scene where we +can use userfaultfd's WP ;) ). +Dave +Any idea... + +=================md5 check patch============================= + +diff --git a/Makefile.target b/Makefile.target +index 962d004..e2cb8e9 100644 +--- a/Makefile.target ++++ b/Makefile.target +@@ -139,7 +139,7 @@ obj-y += memory.o cputlb.o + obj-y += memory_mapping.o + obj-y += dump.o + obj-y += migration/ram.o migration/savevm.o +-LIBS := $(libs_softmmu) $(LIBS) ++LIBS := $(libs_softmmu) $(LIBS) -lplumb + + # xen support + obj-$(CONFIG_XEN) += xen-common.o +diff --git a/migration/ram.c b/migration/ram.c +index 1eb155a..3b7a09d 100644 +--- a/migration/ram.c ++++ b/migration/ram.c +@@ -2513,7 +2513,7 @@ static int ram_load(QEMUFile *f, void *opaque, int +version_id) + } + + rcu_read_unlock(); +- DPRINTF("Completed load of VM with exit code %d seq iteration " ++ fprintf(stderr, "Completed load of VM with exit code %d seq iteration " + "%" PRIu64 "\n", ret, seq_iter); + return ret; + } +diff --git a/migration/savevm.c b/migration/savevm.c +index 0ad1b93..3feaa61 100644 +--- a/migration/savevm.c ++++ b/migration/savevm.c +@@ -891,6 +891,29 @@ void qemu_savevm_state_header(QEMUFile *f) + + } + ++#include "exec/ram_addr.h" ++#include "qemu/rcu_queue.h" ++#include ++#ifndef MD5_DIGEST_LENGTH ++#define MD5_DIGEST_LENGTH 16 ++#endif ++ ++static void check_host_md5(void) ++{ ++ int i; ++ unsigned char md[MD5_DIGEST_LENGTH]; ++ rcu_read_lock(); ++ RAMBlock *block = QLIST_FIRST_RCU(&ram_list.blocks);/* Only check +'pc.ram' block */ ++ rcu_read_unlock(); ++ ++ MD5(block->host, block->used_length, md); ++ for(i = 0; i < MD5_DIGEST_LENGTH; i++) { ++ fprintf(stderr, "%02x", md[i]); ++ } ++ fprintf(stderr, "\n"); ++ error_report("end ram md5"); ++} ++ + void qemu_savevm_state_begin(QEMUFile *f, + const MigrationParams *params) + { +@@ -1056,6 +1079,10 @@ void qemu_savevm_state_complete_precopy(QEMUFile *f, +bool iterable_only) + save_section_header(f, se, QEMU_VM_SECTION_END); + + ret = se->ops->save_live_complete_precopy(f, se->opaque); ++ ++ fprintf(stderr, "after saving %s complete\n", se->idstr); ++ check_host_md5(); ++ + trace_savevm_section_end(se->idstr, se->section_id, ret); + save_section_footer(f, se); + if (ret < 0) { +@@ -1791,6 +1818,11 @@ static int qemu_loadvm_state_main(QEMUFile *f, +MigrationIncomingState *mis) + section_id, le->se->idstr); + return ret; + } ++ if (section_type == QEMU_VM_SECTION_END) { ++ error_report("after loading state section id %d(%s)", ++ section_id, le->se->idstr); ++ check_host_md5(); ++ } + if (!check_section_footer(f, le)) { + return -EINVAL; + } +@@ -1901,6 +1933,8 @@ int qemu_loadvm_state(QEMUFile *f) + } + + cpu_synchronize_all_post_init(); ++ error_report("%s: after cpu_synchronize_all_post_init\n", __func__); ++ check_host_md5(); + + return ret; + } +-- +Dr. David Alan Gilbert / address@hidden / Manchester, UK + +. + +On 12/03/2015 05:37 PM, Hailiang Zhang wrote: +On 2015/12/3 17:24, Dr. David Alan Gilbert wrote: +* Li Zhijian (address@hidden) wrote: +Hi all, + +Does anyboday remember the similar issue post by hailiang months ago +http://patchwork.ozlabs.org/patch/454322/ +At least tow bugs about migration had been fixed since that. +Yes, I wondered what happened to that. +And now we found the same issue at the tcg vm(kvm is fine), after +migration, +the content VM's memory is inconsistent. +Hmm, TCG only - I don't know much about that; but I guess something must +be accessing memory without using the proper macros/functions so +it doesn't mark it as dirty. +we add a patch to check memory content, you can find it from affix + +steps to reporduce: +1) apply the patch and re-build qemu +2) prepare the ubuntu guest and run memtest in grub. +soruce side: +x86_64-softmmu/qemu-system-x86_64 -netdev tap,id=hn0 -device +e1000,id=net-pci0,netdev=hn0,mac=52:54:00:12:34:65 -boot c -drive +if=none,file=/home/lizj/ubuntu.raw,id=drive-virtio-disk0 -device +virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0 + +-vnc :7 -m 128 -smp 1 -device piix3-usb-uhci -device usb-tablet -qmp +tcp::4444,server,nowait -monitor stdio -cpu qemu64 -machine +pc-i440fx-2.3,accel=tcg,usb=off + +destination side: +x86_64-softmmu/qemu-system-x86_64 -netdev tap,id=hn0 -device +e1000,id=net-pci0,netdev=hn0,mac=52:54:00:12:34:65 -boot c -drive +if=none,file=/home/lizj/ubuntu.raw,id=drive-virtio-disk0 -device +virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0 + +-vnc :7 -m 128 -smp 1 -device piix3-usb-uhci -device usb-tablet -qmp +tcp::4444,server,nowait -monitor stdio -cpu qemu64 -machine +pc-i440fx-2.3,accel=tcg,usb=off -incoming tcp:0:8881 + +3) start migration +with 1000M NIC, migration will finish within 3 min. + +at source: +(qemu) migrate tcp:192.168.2.66:8881 +after saving ram complete +e9e725df678d392b1a83b3a917f332bb +qemu-system-x86_64: end ram md5 +(qemu) + +at destination: +...skip... +Completed load of VM with exit code 0 seq iteration 1264 +Completed load of VM with exit code 0 seq iteration 1265 +Completed load of VM with exit code 0 seq iteration 1266 +qemu-system-x86_64: after loading state section id 2(ram) +49c2dac7bde0e5e22db7280dcb3824f9 +qemu-system-x86_64: end ram md5 +qemu-system-x86_64: qemu_loadvm_state: after +cpu_synchronize_all_post_init + +49c2dac7bde0e5e22db7280dcb3824f9 +qemu-system-x86_64: end ram md5 + +This occurs occasionally and only at tcg machine. It seems that +some pages dirtied in source side don't transferred to destination. +This problem can be reproduced even if we disable virtio. + +Is it OK for some pages that not transferred to destination when do +migration ? Or is it a bug? +I'm pretty sure that means it's a bug. Hard to find though, I guess +at least memtest is smaller than a big OS. I think I'd dump the whole +of memory on both sides, hexdump and diff them - I'd guess it would +just be one byte/word different, maybe that would offer some idea what +wrote it. +Maybe one better way to do that is with the help of userfaultfd's +write-protect +capability. It is still in the development by Andrea Arcangeli, but there +is a RFC version available, please refer to +http://www.spinics.net/lists/linux-mm/msg97422.html +(I'm developing live memory snapshot which based on it, maybe this is +another scene where we +can use userfaultfd's WP ;) ). +sounds good. + +thanks +Li +Dave +Any idea... + +=================md5 check patch============================= + +diff --git a/Makefile.target b/Makefile.target +index 962d004..e2cb8e9 100644 +--- a/Makefile.target ++++ b/Makefile.target +@@ -139,7 +139,7 @@ obj-y += memory.o cputlb.o + obj-y += memory_mapping.o + obj-y += dump.o + obj-y += migration/ram.o migration/savevm.o +-LIBS := $(libs_softmmu) $(LIBS) ++LIBS := $(libs_softmmu) $(LIBS) -lplumb + + # xen support + obj-$(CONFIG_XEN) += xen-common.o +diff --git a/migration/ram.c b/migration/ram.c +index 1eb155a..3b7a09d 100644 +--- a/migration/ram.c ++++ b/migration/ram.c +@@ -2513,7 +2513,7 @@ static int ram_load(QEMUFile *f, void *opaque, int +version_id) + } + + rcu_read_unlock(); +- DPRINTF("Completed load of VM with exit code %d seq iteration " ++ fprintf(stderr, "Completed load of VM with exit code %d seq +iteration " + "%" PRIu64 "\n", ret, seq_iter); + return ret; + } +diff --git a/migration/savevm.c b/migration/savevm.c +index 0ad1b93..3feaa61 100644 +--- a/migration/savevm.c ++++ b/migration/savevm.c +@@ -891,6 +891,29 @@ void qemu_savevm_state_header(QEMUFile *f) + + } + ++#include "exec/ram_addr.h" ++#include "qemu/rcu_queue.h" ++#include ++#ifndef MD5_DIGEST_LENGTH ++#define MD5_DIGEST_LENGTH 16 ++#endif ++ ++static void check_host_md5(void) ++{ ++ int i; ++ unsigned char md[MD5_DIGEST_LENGTH]; ++ rcu_read_lock(); ++ RAMBlock *block = QLIST_FIRST_RCU(&ram_list.blocks);/* Only check +'pc.ram' block */ ++ rcu_read_unlock(); ++ ++ MD5(block->host, block->used_length, md); ++ for(i = 0; i < MD5_DIGEST_LENGTH; i++) { ++ fprintf(stderr, "%02x", md[i]); ++ } ++ fprintf(stderr, "\n"); ++ error_report("end ram md5"); ++} ++ + void qemu_savevm_state_begin(QEMUFile *f, + const MigrationParams *params) + { +@@ -1056,6 +1079,10 @@ void +qemu_savevm_state_complete_precopy(QEMUFile *f, +bool iterable_only) + save_section_header(f, se, QEMU_VM_SECTION_END); + + ret = se->ops->save_live_complete_precopy(f, se->opaque); ++ ++ fprintf(stderr, "after saving %s complete\n", se->idstr); ++ check_host_md5(); ++ + trace_savevm_section_end(se->idstr, se->section_id, ret); + save_section_footer(f, se); + if (ret < 0) { +@@ -1791,6 +1818,11 @@ static int qemu_loadvm_state_main(QEMUFile *f, +MigrationIncomingState *mis) + section_id, le->se->idstr); + return ret; + } ++ if (section_type == QEMU_VM_SECTION_END) { ++ error_report("after loading state section id %d(%s)", ++ section_id, le->se->idstr); ++ check_host_md5(); ++ } + if (!check_section_footer(f, le)) { + return -EINVAL; + } +@@ -1901,6 +1933,8 @@ int qemu_loadvm_state(QEMUFile *f) + } + + cpu_synchronize_all_post_init(); ++ error_report("%s: after cpu_synchronize_all_post_init\n", +__func__); ++ check_host_md5(); + + return ret; + } +-- +Dr. David Alan Gilbert / address@hidden / Manchester, UK + +. +. +-- +Best regards. +Li Zhijian (8555) + +On 12/03/2015 05:24 PM, Dr. David Alan Gilbert wrote: +* Li Zhijian (address@hidden) wrote: +Hi all, + +Does anyboday remember the similar issue post by hailiang months ago +http://patchwork.ozlabs.org/patch/454322/ +At least tow bugs about migration had been fixed since that. +Yes, I wondered what happened to that. +And now we found the same issue at the tcg vm(kvm is fine), after migration, +the content VM's memory is inconsistent. +Hmm, TCG only - I don't know much about that; but I guess something must +be accessing memory without using the proper macros/functions so +it doesn't mark it as dirty. +we add a patch to check memory content, you can find it from affix + +steps to reporduce: +1) apply the patch and re-build qemu +2) prepare the ubuntu guest and run memtest in grub. +soruce side: +x86_64-softmmu/qemu-system-x86_64 -netdev tap,id=hn0 -device +e1000,id=net-pci0,netdev=hn0,mac=52:54:00:12:34:65 -boot c -drive +if=none,file=/home/lizj/ubuntu.raw,id=drive-virtio-disk0 -device +virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0 +-vnc :7 -m 128 -smp 1 -device piix3-usb-uhci -device usb-tablet -qmp +tcp::4444,server,nowait -monitor stdio -cpu qemu64 -machine +pc-i440fx-2.3,accel=tcg,usb=off + +destination side: +x86_64-softmmu/qemu-system-x86_64 -netdev tap,id=hn0 -device +e1000,id=net-pci0,netdev=hn0,mac=52:54:00:12:34:65 -boot c -drive +if=none,file=/home/lizj/ubuntu.raw,id=drive-virtio-disk0 -device +virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0 +-vnc :7 -m 128 -smp 1 -device piix3-usb-uhci -device usb-tablet -qmp +tcp::4444,server,nowait -monitor stdio -cpu qemu64 -machine +pc-i440fx-2.3,accel=tcg,usb=off -incoming tcp:0:8881 + +3) start migration +with 1000M NIC, migration will finish within 3 min. + +at source: +(qemu) migrate tcp:192.168.2.66:8881 +after saving ram complete +e9e725df678d392b1a83b3a917f332bb +qemu-system-x86_64: end ram md5 +(qemu) + +at destination: +...skip... +Completed load of VM with exit code 0 seq iteration 1264 +Completed load of VM with exit code 0 seq iteration 1265 +Completed load of VM with exit code 0 seq iteration 1266 +qemu-system-x86_64: after loading state section id 2(ram) +49c2dac7bde0e5e22db7280dcb3824f9 +qemu-system-x86_64: end ram md5 +qemu-system-x86_64: qemu_loadvm_state: after cpu_synchronize_all_post_init + +49c2dac7bde0e5e22db7280dcb3824f9 +qemu-system-x86_64: end ram md5 + +This occurs occasionally and only at tcg machine. It seems that +some pages dirtied in source side don't transferred to destination. +This problem can be reproduced even if we disable virtio. + +Is it OK for some pages that not transferred to destination when do +migration ? Or is it a bug? +I'm pretty sure that means it's a bug. Hard to find though, I guess +at least memtest is smaller than a big OS. I think I'd dump the whole +of memory on both sides, hexdump and diff them - I'd guess it would +just be one byte/word different, maybe that would offer some idea what +wrote it. +I try to dump and compare them, more than 10 pages are different. +in source side, they are random value rather than always 'FF' 'FB' 'EF' +'BF'... in destination. +and not all of the different pages are continuous. + +thanks +Li +Dave +Any idea... + +=================md5 check patch============================= + +diff --git a/Makefile.target b/Makefile.target +index 962d004..e2cb8e9 100644 +--- a/Makefile.target ++++ b/Makefile.target +@@ -139,7 +139,7 @@ obj-y += memory.o cputlb.o + obj-y += memory_mapping.o + obj-y += dump.o + obj-y += migration/ram.o migration/savevm.o +-LIBS := $(libs_softmmu) $(LIBS) ++LIBS := $(libs_softmmu) $(LIBS) -lplumb + + # xen support + obj-$(CONFIG_XEN) += xen-common.o +diff --git a/migration/ram.c b/migration/ram.c +index 1eb155a..3b7a09d 100644 +--- a/migration/ram.c ++++ b/migration/ram.c +@@ -2513,7 +2513,7 @@ static int ram_load(QEMUFile *f, void *opaque, int +version_id) + } + + rcu_read_unlock(); +- DPRINTF("Completed load of VM with exit code %d seq iteration " ++ fprintf(stderr, "Completed load of VM with exit code %d seq iteration " + "%" PRIu64 "\n", ret, seq_iter); + return ret; + } +diff --git a/migration/savevm.c b/migration/savevm.c +index 0ad1b93..3feaa61 100644 +--- a/migration/savevm.c ++++ b/migration/savevm.c +@@ -891,6 +891,29 @@ void qemu_savevm_state_header(QEMUFile *f) + + } + ++#include "exec/ram_addr.h" ++#include "qemu/rcu_queue.h" ++#include ++#ifndef MD5_DIGEST_LENGTH ++#define MD5_DIGEST_LENGTH 16 ++#endif ++ ++static void check_host_md5(void) ++{ ++ int i; ++ unsigned char md[MD5_DIGEST_LENGTH]; ++ rcu_read_lock(); ++ RAMBlock *block = QLIST_FIRST_RCU(&ram_list.blocks);/* Only check +'pc.ram' block */ ++ rcu_read_unlock(); ++ ++ MD5(block->host, block->used_length, md); ++ for(i = 0; i < MD5_DIGEST_LENGTH; i++) { ++ fprintf(stderr, "%02x", md[i]); ++ } ++ fprintf(stderr, "\n"); ++ error_report("end ram md5"); ++} ++ + void qemu_savevm_state_begin(QEMUFile *f, + const MigrationParams *params) + { +@@ -1056,6 +1079,10 @@ void qemu_savevm_state_complete_precopy(QEMUFile *f, +bool iterable_only) + save_section_header(f, se, QEMU_VM_SECTION_END); + + ret = se->ops->save_live_complete_precopy(f, se->opaque); ++ ++ fprintf(stderr, "after saving %s complete\n", se->idstr); ++ check_host_md5(); ++ + trace_savevm_section_end(se->idstr, se->section_id, ret); + save_section_footer(f, se); + if (ret < 0) { +@@ -1791,6 +1818,11 @@ static int qemu_loadvm_state_main(QEMUFile *f, +MigrationIncomingState *mis) + section_id, le->se->idstr); + return ret; + } ++ if (section_type == QEMU_VM_SECTION_END) { ++ error_report("after loading state section id %d(%s)", ++ section_id, le->se->idstr); ++ check_host_md5(); ++ } + if (!check_section_footer(f, le)) { + return -EINVAL; + } +@@ -1901,6 +1933,8 @@ int qemu_loadvm_state(QEMUFile *f) + } + + cpu_synchronize_all_post_init(); ++ error_report("%s: after cpu_synchronize_all_post_init\n", __func__); ++ check_host_md5(); + + return ret; + } +-- +Dr. David Alan Gilbert / address@hidden / Manchester, UK + + +. +-- +Best regards. +Li Zhijian (8555) + +* Li Zhijian (address@hidden) wrote: +> +> +> +On 12/03/2015 05:24 PM, Dr. David Alan Gilbert wrote: +> +>* Li Zhijian (address@hidden) wrote: +> +>>Hi all, +> +>> +> +>>Does anyboday remember the similar issue post by hailiang months ago +> +>> +http://patchwork.ozlabs.org/patch/454322/ +> +>>At least tow bugs about migration had been fixed since that. +> +> +> +>Yes, I wondered what happened to that. +> +> +> +>>And now we found the same issue at the tcg vm(kvm is fine), after migration, +> +>>the content VM's memory is inconsistent. +> +> +> +>Hmm, TCG only - I don't know much about that; but I guess something must +> +>be accessing memory without using the proper macros/functions so +> +>it doesn't mark it as dirty. +> +> +> +>>we add a patch to check memory content, you can find it from affix +> +>> +> +>>steps to reporduce: +> +>>1) apply the patch and re-build qemu +> +>>2) prepare the ubuntu guest and run memtest in grub. +> +>>soruce side: +> +>>x86_64-softmmu/qemu-system-x86_64 -netdev tap,id=hn0 -device +> +>>e1000,id=net-pci0,netdev=hn0,mac=52:54:00:12:34:65 -boot c -drive +> +>>if=none,file=/home/lizj/ubuntu.raw,id=drive-virtio-disk0 -device +> +>>virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0 +> +>>-vnc :7 -m 128 -smp 1 -device piix3-usb-uhci -device usb-tablet -qmp +> +>>tcp::4444,server,nowait -monitor stdio -cpu qemu64 -machine +> +>>pc-i440fx-2.3,accel=tcg,usb=off +> +>> +> +>>destination side: +> +>>x86_64-softmmu/qemu-system-x86_64 -netdev tap,id=hn0 -device +> +>>e1000,id=net-pci0,netdev=hn0,mac=52:54:00:12:34:65 -boot c -drive +> +>>if=none,file=/home/lizj/ubuntu.raw,id=drive-virtio-disk0 -device +> +>>virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0 +> +>>-vnc :7 -m 128 -smp 1 -device piix3-usb-uhci -device usb-tablet -qmp +> +>>tcp::4444,server,nowait -monitor stdio -cpu qemu64 -machine +> +>>pc-i440fx-2.3,accel=tcg,usb=off -incoming tcp:0:8881 +> +>> +> +>>3) start migration +> +>>with 1000M NIC, migration will finish within 3 min. +> +>> +> +>>at source: +> +>>(qemu) migrate tcp:192.168.2.66:8881 +> +>>after saving ram complete +> +>>e9e725df678d392b1a83b3a917f332bb +> +>>qemu-system-x86_64: end ram md5 +> +>>(qemu) +> +>> +> +>>at destination: +> +>>...skip... +> +>>Completed load of VM with exit code 0 seq iteration 1264 +> +>>Completed load of VM with exit code 0 seq iteration 1265 +> +>>Completed load of VM with exit code 0 seq iteration 1266 +> +>>qemu-system-x86_64: after loading state section id 2(ram) +> +>>49c2dac7bde0e5e22db7280dcb3824f9 +> +>>qemu-system-x86_64: end ram md5 +> +>>qemu-system-x86_64: qemu_loadvm_state: after cpu_synchronize_all_post_init +> +>> +> +>>49c2dac7bde0e5e22db7280dcb3824f9 +> +>>qemu-system-x86_64: end ram md5 +> +>> +> +>>This occurs occasionally and only at tcg machine. It seems that +> +>>some pages dirtied in source side don't transferred to destination. +> +>>This problem can be reproduced even if we disable virtio. +> +>> +> +>>Is it OK for some pages that not transferred to destination when do +> +>>migration ? Or is it a bug? +> +> +> +>I'm pretty sure that means it's a bug. Hard to find though, I guess +> +>at least memtest is smaller than a big OS. I think I'd dump the whole +> +>of memory on both sides, hexdump and diff them - I'd guess it would +> +>just be one byte/word different, maybe that would offer some idea what +> +>wrote it. +> +> +I try to dump and compare them, more than 10 pages are different. +> +in source side, they are random value rather than always 'FF' 'FB' 'EF' +> +'BF'... in destination. +> +> +and not all of the different pages are continuous. +I wonder if it happens on all of memtest's different test patterns, +perhaps it might be possible to narrow it down if you tell memtest +to only run one test at a time. + +Dave + +> +> +thanks +> +Li +> +> +> +> +> +>Dave +> +> +> +>>Any idea... +> +>> +> +>>=================md5 check patch============================= +> +>> +> +>>diff --git a/Makefile.target b/Makefile.target +> +>>index 962d004..e2cb8e9 100644 +> +>>--- a/Makefile.target +> +>>+++ b/Makefile.target +> +>>@@ -139,7 +139,7 @@ obj-y += memory.o cputlb.o +> +>> obj-y += memory_mapping.o +> +>> obj-y += dump.o +> +>> obj-y += migration/ram.o migration/savevm.o +> +>>-LIBS := $(libs_softmmu) $(LIBS) +> +>>+LIBS := $(libs_softmmu) $(LIBS) -lplumb +> +>> +> +>> # xen support +> +>> obj-$(CONFIG_XEN) += xen-common.o +> +>>diff --git a/migration/ram.c b/migration/ram.c +> +>>index 1eb155a..3b7a09d 100644 +> +>>--- a/migration/ram.c +> +>>+++ b/migration/ram.c +> +>>@@ -2513,7 +2513,7 @@ static int ram_load(QEMUFile *f, void *opaque, int +> +>>version_id) +> +>> } +> +>> +> +>> rcu_read_unlock(); +> +>>- DPRINTF("Completed load of VM with exit code %d seq iteration " +> +>>+ fprintf(stderr, "Completed load of VM with exit code %d seq iteration " +> +>> "%" PRIu64 "\n", ret, seq_iter); +> +>> return ret; +> +>> } +> +>>diff --git a/migration/savevm.c b/migration/savevm.c +> +>>index 0ad1b93..3feaa61 100644 +> +>>--- a/migration/savevm.c +> +>>+++ b/migration/savevm.c +> +>>@@ -891,6 +891,29 @@ void qemu_savevm_state_header(QEMUFile *f) +> +>> +> +>> } +> +>> +> +>>+#include "exec/ram_addr.h" +> +>>+#include "qemu/rcu_queue.h" +> +>>+#include +> +>>+#ifndef MD5_DIGEST_LENGTH +> +>>+#define MD5_DIGEST_LENGTH 16 +> +>>+#endif +> +>>+ +> +>>+static void check_host_md5(void) +> +>>+{ +> +>>+ int i; +> +>>+ unsigned char md[MD5_DIGEST_LENGTH]; +> +>>+ rcu_read_lock(); +> +>>+ RAMBlock *block = QLIST_FIRST_RCU(&ram_list.blocks);/* Only check +> +>>'pc.ram' block */ +> +>>+ rcu_read_unlock(); +> +>>+ +> +>>+ MD5(block->host, block->used_length, md); +> +>>+ for(i = 0; i < MD5_DIGEST_LENGTH; i++) { +> +>>+ fprintf(stderr, "%02x", md[i]); +> +>>+ } +> +>>+ fprintf(stderr, "\n"); +> +>>+ error_report("end ram md5"); +> +>>+} +> +>>+ +> +>> void qemu_savevm_state_begin(QEMUFile *f, +> +>> const MigrationParams *params) +> +>> { +> +>>@@ -1056,6 +1079,10 @@ void qemu_savevm_state_complete_precopy(QEMUFile *f, +> +>>bool iterable_only) +> +>> save_section_header(f, se, QEMU_VM_SECTION_END); +> +>> +> +>> ret = se->ops->save_live_complete_precopy(f, se->opaque); +> +>>+ +> +>>+ fprintf(stderr, "after saving %s complete\n", se->idstr); +> +>>+ check_host_md5(); +> +>>+ +> +>> trace_savevm_section_end(se->idstr, se->section_id, ret); +> +>> save_section_footer(f, se); +> +>> if (ret < 0) { +> +>>@@ -1791,6 +1818,11 @@ static int qemu_loadvm_state_main(QEMUFile *f, +> +>>MigrationIncomingState *mis) +> +>> section_id, le->se->idstr); +> +>> return ret; +> +>> } +> +>>+ if (section_type == QEMU_VM_SECTION_END) { +> +>>+ error_report("after loading state section id %d(%s)", +> +>>+ section_id, le->se->idstr); +> +>>+ check_host_md5(); +> +>>+ } +> +>> if (!check_section_footer(f, le)) { +> +>> return -EINVAL; +> +>> } +> +>>@@ -1901,6 +1933,8 @@ int qemu_loadvm_state(QEMUFile *f) +> +>> } +> +>> +> +>> cpu_synchronize_all_post_init(); +> +>>+ error_report("%s: after cpu_synchronize_all_post_init\n", __func__); +> +>>+ check_host_md5(); +> +>> +> +>> return ret; +> +>> } +> +>> +> +>> +> +>> +> +>-- +> +>Dr. David Alan Gilbert / address@hidden / Manchester, UK +> +> +> +> +> +>. +> +> +> +> +-- +> +Best regards. +> +Li Zhijian (8555) +> +> +-- +Dr. David Alan Gilbert / address@hidden / Manchester, UK + +Li Zhijian wrote: +> +Hi all, +> +> +Does anyboday remember the similar issue post by hailiang months ago +> +http://patchwork.ozlabs.org/patch/454322/ +> +At least tow bugs about migration had been fixed since that. +> +> +And now we found the same issue at the tcg vm(kvm is fine), after +> +migration, the content VM's memory is inconsistent. +> +> +we add a patch to check memory content, you can find it from affix +> +> +steps to reporduce: +> +1) apply the patch and re-build qemu +> +2) prepare the ubuntu guest and run memtest in grub. +> +soruce side: +> +x86_64-softmmu/qemu-system-x86_64 -netdev tap,id=hn0 -device +> +e1000,id=net-pci0,netdev=hn0,mac=52:54:00:12:34:65 -boot c -drive +> +if=none,file=/home/lizj/ubuntu.raw,id=drive-virtio-disk0 -device +> +virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0 +> +-vnc :7 -m 128 -smp 1 -device piix3-usb-uhci -device usb-tablet -qmp +> +tcp::4444,server,nowait -monitor stdio -cpu qemu64 -machine +> +pc-i440fx-2.3,accel=tcg,usb=off +> +> +destination side: +> +x86_64-softmmu/qemu-system-x86_64 -netdev tap,id=hn0 -device +> +e1000,id=net-pci0,netdev=hn0,mac=52:54:00:12:34:65 -boot c -drive +> +if=none,file=/home/lizj/ubuntu.raw,id=drive-virtio-disk0 -device +> +virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0 +> +-vnc :7 -m 128 -smp 1 -device piix3-usb-uhci -device usb-tablet -qmp +> +tcp::4444,server,nowait -monitor stdio -cpu qemu64 -machine +> +pc-i440fx-2.3,accel=tcg,usb=off -incoming tcp:0:8881 +> +> +3) start migration +> +with 1000M NIC, migration will finish within 3 min. +> +> +at source: +> +(qemu) migrate tcp:192.168.2.66:8881 +> +after saving ram complete +> +e9e725df678d392b1a83b3a917f332bb +> +qemu-system-x86_64: end ram md5 +> +(qemu) +> +> +at destination: +> +...skip... +> +Completed load of VM with exit code 0 seq iteration 1264 +> +Completed load of VM with exit code 0 seq iteration 1265 +> +Completed load of VM with exit code 0 seq iteration 1266 +> +qemu-system-x86_64: after loading state section id 2(ram) +> +49c2dac7bde0e5e22db7280dcb3824f9 +> +qemu-system-x86_64: end ram md5 +> +qemu-system-x86_64: qemu_loadvm_state: after cpu_synchronize_all_post_init +> +> +49c2dac7bde0e5e22db7280dcb3824f9 +> +qemu-system-x86_64: end ram md5 +> +> +This occurs occasionally and only at tcg machine. It seems that +> +some pages dirtied in source side don't transferred to destination. +> +This problem can be reproduced even if we disable virtio. +> +> +Is it OK for some pages that not transferred to destination when do +> +migration ? Or is it a bug? +> +> +Any idea... +Thanks for describing how to reproduce the bug. +If some pages are not transferred to destination then it is a bug, so we +need to know what the problem is, notice that the problem can be that +TCG is not marking dirty some page, that Migration code "forgets" about +that page, or anything eles altogether, that is what we need to find. + +There are more posibilities, I am not sure that memtest is on 32bit +mode, and it is inside posibility that we are missing some state when we +are on real mode. + +Will try to take a look at this. + +THanks, again. + + +> +> +=================md5 check patch============================= +> +> +diff --git a/Makefile.target b/Makefile.target +> +index 962d004..e2cb8e9 100644 +> +--- a/Makefile.target +> ++++ b/Makefile.target +> +@@ -139,7 +139,7 @@ obj-y += memory.o cputlb.o +> +obj-y += memory_mapping.o +> +obj-y += dump.o +> +obj-y += migration/ram.o migration/savevm.o +> +-LIBS := $(libs_softmmu) $(LIBS) +> ++LIBS := $(libs_softmmu) $(LIBS) -lplumb +> +> +# xen support +> +obj-$(CONFIG_XEN) += xen-common.o +> +diff --git a/migration/ram.c b/migration/ram.c +> +index 1eb155a..3b7a09d 100644 +> +--- a/migration/ram.c +> ++++ b/migration/ram.c +> +@@ -2513,7 +2513,7 @@ static int ram_load(QEMUFile *f, void *opaque, +> +int version_id) +> +} +> +> +rcu_read_unlock(); +> +- DPRINTF("Completed load of VM with exit code %d seq iteration " +> ++ fprintf(stderr, "Completed load of VM with exit code %d seq iteration " +> +"%" PRIu64 "\n", ret, seq_iter); +> +return ret; +> +} +> +diff --git a/migration/savevm.c b/migration/savevm.c +> +index 0ad1b93..3feaa61 100644 +> +--- a/migration/savevm.c +> ++++ b/migration/savevm.c +> +@@ -891,6 +891,29 @@ void qemu_savevm_state_header(QEMUFile *f) +> +> +} +> +> ++#include "exec/ram_addr.h" +> ++#include "qemu/rcu_queue.h" +> ++#include +> ++#ifndef MD5_DIGEST_LENGTH +> ++#define MD5_DIGEST_LENGTH 16 +> ++#endif +> ++ +> ++static void check_host_md5(void) +> ++{ +> ++ int i; +> ++ unsigned char md[MD5_DIGEST_LENGTH]; +> ++ rcu_read_lock(); +> ++ RAMBlock *block = QLIST_FIRST_RCU(&ram_list.blocks);/* Only check +> +'pc.ram' block */ +> ++ rcu_read_unlock(); +> ++ +> ++ MD5(block->host, block->used_length, md); +> ++ for(i = 0; i < MD5_DIGEST_LENGTH; i++) { +> ++ fprintf(stderr, "%02x", md[i]); +> ++ } +> ++ fprintf(stderr, "\n"); +> ++ error_report("end ram md5"); +> ++} +> ++ +> +void qemu_savevm_state_begin(QEMUFile *f, +> +const MigrationParams *params) +> +{ +> +@@ -1056,6 +1079,10 @@ void +> +qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only) +> +save_section_header(f, se, QEMU_VM_SECTION_END); +> +> +ret = se->ops->save_live_complete_precopy(f, se->opaque); +> ++ +> ++ fprintf(stderr, "after saving %s complete\n", se->idstr); +> ++ check_host_md5(); +> ++ +> +trace_savevm_section_end(se->idstr, se->section_id, ret); +> +save_section_footer(f, se); +> +if (ret < 0) { +> +@@ -1791,6 +1818,11 @@ static int qemu_loadvm_state_main(QEMUFile *f, +> +MigrationIncomingState *mis) +> +section_id, le->se->idstr); +> +return ret; +> +} +> ++ if (section_type == QEMU_VM_SECTION_END) { +> ++ error_report("after loading state section id %d(%s)", +> ++ section_id, le->se->idstr); +> ++ check_host_md5(); +> ++ } +> +if (!check_section_footer(f, le)) { +> +return -EINVAL; +> +} +> +@@ -1901,6 +1933,8 @@ int qemu_loadvm_state(QEMUFile *f) +> +} +> +> +cpu_synchronize_all_post_init(); +> ++ error_report("%s: after cpu_synchronize_all_post_init\n", __func__); +> ++ check_host_md5(); +> +> +return ret; +> +} + +> +> +Thanks for describing how to reproduce the bug. +> +If some pages are not transferred to destination then it is a bug, so we need +> +to know what the problem is, notice that the problem can be that TCG is not +> +marking dirty some page, that Migration code "forgets" about that page, or +> +anything eles altogether, that is what we need to find. +> +> +There are more posibilities, I am not sure that memtest is on 32bit mode, and +> +it is inside posibility that we are missing some state when we are on real +> +mode. +> +> +Will try to take a look at this. +> +> +THanks, again. +> +Hi Juan & Amit + + Do you think we should add a mechanism to check the data integrity during LM +like Zhijian's patch did? it may be very helpful for developers. + Actually, I did the similar thing before in order to make sure that I did the +right thing we I change the code related to LM. + +Liang + +On (Fri) 04 Dec 2015 [01:43:07], Li, Liang Z wrote: +> +> +> +> Thanks for describing how to reproduce the bug. +> +> If some pages are not transferred to destination then it is a bug, so we +> +> need +> +> to know what the problem is, notice that the problem can be that TCG is not +> +> marking dirty some page, that Migration code "forgets" about that page, or +> +> anything eles altogether, that is what we need to find. +> +> +> +> There are more posibilities, I am not sure that memtest is on 32bit mode, +> +> and +> +> it is inside posibility that we are missing some state when we are on real +> +> mode. +> +> +> +> Will try to take a look at this. +> +> +> +> THanks, again. +> +> +> +> +Hi Juan & Amit +> +> +Do you think we should add a mechanism to check the data integrity during LM +> +like Zhijian's patch did? it may be very helpful for developers. +> +Actually, I did the similar thing before in order to make sure that I did +> +the right thing we I change the code related to LM. +If you mean for debugging, something that's not always on, then I'm +fine with it. + +A script that goes along that shows the result of comparison of the +diff will be helpful too, something that shows how many pages are +differnt, how many bytes in a page on average, and so on. + + Amit + diff --git a/results/classifier/001/mistranslation/74545755 b/results/classifier/001/mistranslation/74545755 new file mode 100644 index 000000000..32d247ac7 --- /dev/null +++ b/results/classifier/001/mistranslation/74545755 @@ -0,0 +1,344 @@ +mistranslation: 0.752 +instruction: 0.700 +other: 0.683 +semantic: 0.669 + +[Bug Report][RFC PATCH 0/1] block: fix failing assert on paused VM migration + +There's a bug (failing assert) which is reproduced during migration of +a paused VM. I am able to reproduce it on a stand with 2 nodes and a common +NFS share, with VM's disk on that share. + +root@fedora40-1-vm:~# virsh domblklist alma8-vm + Target Source +------------------------------------------ + sda /mnt/shared/images/alma8.qcow2 + +root@fedora40-1-vm:~# df -Th /mnt/shared +Filesystem Type Size Used Avail Use% Mounted on +127.0.0.1:/srv/nfsd nfs4 63G 16G 48G 25% /mnt/shared + +On the 1st node: + +root@fedora40-1-vm:~# virsh start alma8-vm ; virsh suspend alma8-vm +root@fedora40-1-vm:~# virsh migrate --compressed --p2p --persistent +--undefinesource --live alma8-vm qemu+ssh://fedora40-2-vm/system + +Then on the 2nd node: + +root@fedora40-2-vm:~# virsh migrate --compressed --p2p --persistent +--undefinesource --live alma8-vm qemu+ssh://fedora40-1-vm/system +error: operation failed: domain is not running + +root@fedora40-2-vm:~# tail -3 /var/log/libvirt/qemu/alma8-vm.log +2024-09-19 13:53:33.336+0000: initiating migration +qemu-system-x86_64: ../block.c:6976: int +bdrv_inactivate_recurse(BlockDriverState *): Assertion `!(bs->open_flags & +BDRV_O_INACTIVE)' failed. +2024-09-19 13:53:42.991+0000: shutting down, reason=crashed + +Backtrace: + +(gdb) bt +#0 0x00007f7eaa2f1664 in __pthread_kill_implementation () at /lib64/libc.so.6 +#1 0x00007f7eaa298c4e in raise () at /lib64/libc.so.6 +#2 0x00007f7eaa280902 in abort () at /lib64/libc.so.6 +#3 0x00007f7eaa28081e in __assert_fail_base.cold () at /lib64/libc.so.6 +#4 0x00007f7eaa290d87 in __assert_fail () at /lib64/libc.so.6 +#5 0x0000563c38b95eb8 in bdrv_inactivate_recurse (bs=0x563c3b6c60c0) at +../block.c:6976 +#6 0x0000563c38b95aeb in bdrv_inactivate_all () at ../block.c:7038 +#7 0x0000563c3884d354 in qemu_savevm_state_complete_precopy_non_iterable +(f=0x563c3b700c20, in_postcopy=false, inactivate_disks=true) + at ../migration/savevm.c:1571 +#8 0x0000563c3884dc1a in qemu_savevm_state_complete_precopy (f=0x563c3b700c20, +iterable_only=false, inactivate_disks=true) at ../migration/savevm.c:1631 +#9 0x0000563c3883a340 in migration_completion_precopy (s=0x563c3b4d51f0, +current_active_state=) at ../migration/migration.c:2780 +#10 migration_completion (s=0x563c3b4d51f0) at ../migration/migration.c:2844 +#11 migration_iteration_run (s=0x563c3b4d51f0) at ../migration/migration.c:3270 +#12 migration_thread (opaque=0x563c3b4d51f0) at ../migration/migration.c:3536 +#13 0x0000563c38dbcf14 in qemu_thread_start (args=0x563c3c2d5bf0) at +../util/qemu-thread-posix.c:541 +#14 0x00007f7eaa2ef6d7 in start_thread () at /lib64/libc.so.6 +#15 0x00007f7eaa373414 in clone () at /lib64/libc.so.6 + +What happens here is that after 1st migration BDS related to HDD remains +inactive as VM is still paused. Then when we initiate 2nd migration, +bdrv_inactivate_all() leads to the attempt to set BDRV_O_INACTIVE flag +on that node which is already set, thus assert fails. + +Attached patch which simply skips setting flag if it's already set is more +of a kludge than a clean solution. Should we use more sophisticated logic +which allows some of the nodes be in inactive state prior to the migration, +and takes them into account during bdrv_inactivate_all()? Comments would +be appreciated. + +Andrey + +Andrey Drobyshev (1): + block: do not fail when inactivating node which is inactive + + block.c | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +-- +2.39.3 + +Instead of throwing an assert let's just ignore that flag is already set +and return. We assume that it's going to be safe to ignore. Otherwise +this assert fails when migrating a paused VM back and forth. + +Ideally we'd like to have a more sophisticated solution, e.g. not even +scan the nodes which should be inactive at this point. + +Signed-off-by: Andrey Drobyshev +--- + block.c | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +diff --git a/block.c b/block.c +index 7d90007cae..c1dcf906d1 100644 +--- a/block.c ++++ b/block.c +@@ -6973,7 +6973,15 @@ static int GRAPH_RDLOCK +bdrv_inactivate_recurse(BlockDriverState *bs) + return 0; + } + +- assert(!(bs->open_flags & BDRV_O_INACTIVE)); ++ if (bs->open_flags & BDRV_O_INACTIVE) { ++ /* ++ * Return here instead of throwing assert as a workaround to ++ * prevent failure on migrating paused VM. ++ * Here we assume that if we're trying to inactivate BDS that's ++ * already inactive, it's safe to just ignore it. ++ */ ++ return 0; ++ } + + /* Inactivate this node */ + if (bs->drv->bdrv_inactivate) { +-- +2.39.3 + +[add migration maintainers] + +On 24.09.24 15:56, Andrey Drobyshev wrote: +Instead of throwing an assert let's just ignore that flag is already set +and return. We assume that it's going to be safe to ignore. Otherwise +this assert fails when migrating a paused VM back and forth. + +Ideally we'd like to have a more sophisticated solution, e.g. not even +scan the nodes which should be inactive at this point. + +Signed-off-by: Andrey Drobyshev +--- + block.c | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +diff --git a/block.c b/block.c +index 7d90007cae..c1dcf906d1 100644 +--- a/block.c ++++ b/block.c +@@ -6973,7 +6973,15 @@ static int GRAPH_RDLOCK +bdrv_inactivate_recurse(BlockDriverState *bs) + return 0; + } +- assert(!(bs->open_flags & BDRV_O_INACTIVE)); ++ if (bs->open_flags & BDRV_O_INACTIVE) { ++ /* ++ * Return here instead of throwing assert as a workaround to ++ * prevent failure on migrating paused VM. ++ * Here we assume that if we're trying to inactivate BDS that's ++ * already inactive, it's safe to just ignore it. ++ */ ++ return 0; ++ } +/* Inactivate this node */ +if (bs->drv->bdrv_inactivate) { +I doubt that this a correct way to go. + +As far as I understand, "inactive" actually means that "storage is not belong to +qemu, but to someone else (another qemu process for example), and may be changed +transparently". In turn this means that Qemu should do nothing with inactive disks. So the +problem is that nobody called bdrv_activate_all on target, and we shouldn't ignore that. + +Hmm, I see in process_incoming_migration_bh() we do call bdrv_activate_all(), +but only in some scenarios. May be, the condition should be less strict here. + +Why we need any condition here at all? Don't we want to activate block-layer on +target after migration anyway? + +-- +Best regards, +Vladimir + +On 9/30/24 12:25 PM, Vladimir Sementsov-Ogievskiy wrote: +> +[add migration maintainers] +> +> +On 24.09.24 15:56, Andrey Drobyshev wrote: +> +> [...] +> +> +I doubt that this a correct way to go. +> +> +As far as I understand, "inactive" actually means that "storage is not +> +belong to qemu, but to someone else (another qemu process for example), +> +and may be changed transparently". In turn this means that Qemu should +> +do nothing with inactive disks. So the problem is that nobody called +> +bdrv_activate_all on target, and we shouldn't ignore that. +> +> +Hmm, I see in process_incoming_migration_bh() we do call +> +bdrv_activate_all(), but only in some scenarios. May be, the condition +> +should be less strict here. +> +> +Why we need any condition here at all? Don't we want to activate +> +block-layer on target after migration anyway? +> +Hmm I'm not sure about the unconditional activation, since we at least +have to honor LATE_BLOCK_ACTIVATE cap if it's set (and probably delay it +in such a case). In current libvirt upstream I see such code: + +> +/* Migration capabilities which should always be enabled as long as they +> +> +* are supported by QEMU. If the capability is supposed to be enabled on both +> +> +* sides of migration, it won't be enabled unless both sides support it. +> +> +*/ +> +> +static const qemuMigrationParamsAlwaysOnItem qemuMigrationParamsAlwaysOn[] = +> +{ +> +> +{QEMU_MIGRATION_CAP_PAUSE_BEFORE_SWITCHOVER, +> +> +QEMU_MIGRATION_SOURCE}, +> +> +> +> +{QEMU_MIGRATION_CAP_LATE_BLOCK_ACTIVATE, +> +> +QEMU_MIGRATION_DESTINATION}, +> +> +}; +which means that libvirt always wants LATE_BLOCK_ACTIVATE to be set. + +The code from process_incoming_migration_bh() you're referring to: + +> +/* If capability late_block_activate is set: +> +> +* Only fire up the block code now if we're going to restart the +> +> +* VM, else 'cont' will do it. +> +> +* This causes file locking to happen; so we don't want it to happen +> +> +* unless we really are starting the VM. +> +> +*/ +> +> +if (!migrate_late_block_activate() || +> +> +(autostart && (!global_state_received() || +> +> +runstate_is_live(global_state_get_runstate())))) { +> +> +/* Make sure all file formats throw away their mutable metadata. +> +> +> +* If we get an error here, just don't restart the VM yet. */ +> +> +bdrv_activate_all(&local_err); +> +> +if (local_err) { +> +> +error_report_err(local_err); +> +> +local_err = NULL; +> +> +autostart = false; +> +> +} +> +> +} +It states explicitly that we're either going to start VM right at this +point if (autostart == true), or we wait till "cont" command happens. +None of this is going to happen if we start another migration while +still being in PAUSED state. So I think it seems reasonable to take +such case into account. For instance, this patch does prevent the crash: + +> +diff --git a/migration/migration.c b/migration/migration.c +> +index ae2be31557..3222f6745b 100644 +> +--- a/migration/migration.c +> ++++ b/migration/migration.c +> +@@ -733,7 +733,8 @@ static void process_incoming_migration_bh(void *opaque) +> +*/ +> +if (!migrate_late_block_activate() || +> +(autostart && (!global_state_received() || +> +- runstate_is_live(global_state_get_runstate())))) { +> ++ runstate_is_live(global_state_get_runstate()))) || +> ++ (!autostart && global_state_get_runstate() == RUN_STATE_PAUSED)) { +> +/* Make sure all file formats throw away their mutable metadata. +> +* If we get an error here, just don't restart the VM yet. */ +> +bdrv_activate_all(&local_err); +What are your thoughts on it? + +Andrey + diff --git a/results/classifier/001/mistranslation/80604314 b/results/classifier/001/mistranslation/80604314 new file mode 100644 index 000000000..798c2e866 --- /dev/null +++ b/results/classifier/001/mistranslation/80604314 @@ -0,0 +1,1480 @@ +mistranslation: 0.922 +other: 0.898 +semantic: 0.890 +instruction: 0.877 + +[BUG] vhost-vdpa: qemu-system-s390x crashes with second virtio-net-ccw device + +When I start qemu with a second virtio-net-ccw device (i.e. adding +-device virtio-net-ccw in addition to the autogenerated device), I get +a segfault. gdb points to + +#0 0x000055d6ab52681d in virtio_net_get_config (vdev=, + config=0x55d6ad9e3f80 "RT") at /home/cohuck/git/qemu/hw/net/virtio-net.c:146 +146 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { + +(backtrace doesn't go further) + +Starting qemu with no additional "-device virtio-net-ccw" (i.e., only +the autogenerated virtio-net-ccw device is present) works. Specifying +several "-device virtio-net-pci" works as well. + +Things break with 1e0a84ea49b6 ("vhost-vdpa: introduce vhost-vdpa net +client"), 38140cc4d971 ("vhost_net: introduce set_config & get_config") +works (in-between state does not compile). + +This is reproducible with tcg as well. Same problem both with +--enable-vhost-vdpa and --disable-vhost-vdpa. + +Have not yet tried to figure out what might be special with +virtio-ccw... anyone have an idea? + +[This should probably be considered a blocker?] + +On Fri, Jul 24, 2020 at 03:27:18PM +0200, Cornelia Huck wrote: +> +When I start qemu with a second virtio-net-ccw device (i.e. adding +> +-device virtio-net-ccw in addition to the autogenerated device), I get +> +a segfault. gdb points to +> +> +#0 0x000055d6ab52681d in virtio_net_get_config (vdev=, +> +config=0x55d6ad9e3f80 "RT") at +> +/home/cohuck/git/qemu/hw/net/virtio-net.c:146 +> +146 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { +> +> +(backtrace doesn't go further) +> +> +Starting qemu with no additional "-device virtio-net-ccw" (i.e., only +> +the autogenerated virtio-net-ccw device is present) works. Specifying +> +several "-device virtio-net-pci" works as well. +> +> +Things break with 1e0a84ea49b6 ("vhost-vdpa: introduce vhost-vdpa net +> +client"), 38140cc4d971 ("vhost_net: introduce set_config & get_config") +> +works (in-between state does not compile). +Ouch. I didn't test all in-between states :( +But I wish we had a 0-day instrastructure like kernel has, +that catches things like that. + +> +This is reproducible with tcg as well. Same problem both with +> +--enable-vhost-vdpa and --disable-vhost-vdpa. +> +> +Have not yet tried to figure out what might be special with +> +virtio-ccw... anyone have an idea? +> +> +[This should probably be considered a blocker?] + +On Fri, 24 Jul 2020 09:30:58 -0400 +"Michael S. Tsirkin" wrote: + +> +On Fri, Jul 24, 2020 at 03:27:18PM +0200, Cornelia Huck wrote: +> +> When I start qemu with a second virtio-net-ccw device (i.e. adding +> +> -device virtio-net-ccw in addition to the autogenerated device), I get +> +> a segfault. gdb points to +> +> +> +> #0 0x000055d6ab52681d in virtio_net_get_config (vdev=, +> +> config=0x55d6ad9e3f80 "RT") at +> +> /home/cohuck/git/qemu/hw/net/virtio-net.c:146 +> +> 146 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { +> +> +> +> (backtrace doesn't go further) +The core was incomplete, but running under gdb directly shows that it +is just a bog-standard config space access (first for that device). + +The cause of the crash is that nc->peer is not set... no idea how that +can happen, not that familiar with that part of QEMU. (Should the code +check, or is that really something that should not happen?) + +What I don't understand is why it is set correctly for the first, +autogenerated virtio-net-ccw device, but not for the second one, and +why virtio-net-pci doesn't show these problems. The only difference +between -ccw and -pci that comes to my mind here is that config space +accesses for ccw are done via an asynchronous operation, so timing +might be different. + +> +> +> +> Starting qemu with no additional "-device virtio-net-ccw" (i.e., only +> +> the autogenerated virtio-net-ccw device is present) works. Specifying +> +> several "-device virtio-net-pci" works as well. +> +> +> +> Things break with 1e0a84ea49b6 ("vhost-vdpa: introduce vhost-vdpa net +> +> client"), 38140cc4d971 ("vhost_net: introduce set_config & get_config") +> +> works (in-between state does not compile). +> +> +Ouch. I didn't test all in-between states :( +> +But I wish we had a 0-day instrastructure like kernel has, +> +that catches things like that. +Yep, that would be useful... so patchew only builds the complete series? + +> +> +> This is reproducible with tcg as well. Same problem both with +> +> --enable-vhost-vdpa and --disable-vhost-vdpa. +> +> +> +> Have not yet tried to figure out what might be special with +> +> virtio-ccw... anyone have an idea? +> +> +> +> [This should probably be considered a blocker?] +I think so, as it makes s390x unusable with more that one +virtio-net-ccw device, and I don't even see a workaround. + +On Fri, Jul 24, 2020 at 04:56:27PM +0200, Cornelia Huck wrote: +> +On Fri, 24 Jul 2020 09:30:58 -0400 +> +"Michael S. Tsirkin" wrote: +> +> +> On Fri, Jul 24, 2020 at 03:27:18PM +0200, Cornelia Huck wrote: +> +> > When I start qemu with a second virtio-net-ccw device (i.e. adding +> +> > -device virtio-net-ccw in addition to the autogenerated device), I get +> +> > a segfault. gdb points to +> +> > +> +> > #0 0x000055d6ab52681d in virtio_net_get_config (vdev=, +> +> > config=0x55d6ad9e3f80 "RT") at +> +> > /home/cohuck/git/qemu/hw/net/virtio-net.c:146 +> +> > 146 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { +> +> > +> +> > (backtrace doesn't go further) +> +> +The core was incomplete, but running under gdb directly shows that it +> +is just a bog-standard config space access (first for that device). +> +> +The cause of the crash is that nc->peer is not set... no idea how that +> +can happen, not that familiar with that part of QEMU. (Should the code +> +check, or is that really something that should not happen?) +> +> +What I don't understand is why it is set correctly for the first, +> +autogenerated virtio-net-ccw device, but not for the second one, and +> +why virtio-net-pci doesn't show these problems. The only difference +> +between -ccw and -pci that comes to my mind here is that config space +> +accesses for ccw are done via an asynchronous operation, so timing +> +might be different. +Hopefully Jason has an idea. Could you post a full command line +please? Do you need a working guest to trigger this? Does this trigger +on an x86 host? + +> +> > +> +> > Starting qemu with no additional "-device virtio-net-ccw" (i.e., only +> +> > the autogenerated virtio-net-ccw device is present) works. Specifying +> +> > several "-device virtio-net-pci" works as well. +> +> > +> +> > Things break with 1e0a84ea49b6 ("vhost-vdpa: introduce vhost-vdpa net +> +> > client"), 38140cc4d971 ("vhost_net: introduce set_config & get_config") +> +> > works (in-between state does not compile). +> +> +> +> Ouch. I didn't test all in-between states :( +> +> But I wish we had a 0-day instrastructure like kernel has, +> +> that catches things like that. +> +> +Yep, that would be useful... so patchew only builds the complete series? +> +> +> +> +> > This is reproducible with tcg as well. Same problem both with +> +> > --enable-vhost-vdpa and --disable-vhost-vdpa. +> +> > +> +> > Have not yet tried to figure out what might be special with +> +> > virtio-ccw... anyone have an idea? +> +> > +> +> > [This should probably be considered a blocker?] +> +> +I think so, as it makes s390x unusable with more that one +> +virtio-net-ccw device, and I don't even see a workaround. + +On Fri, 24 Jul 2020 11:17:57 -0400 +"Michael S. Tsirkin" wrote: + +> +On Fri, Jul 24, 2020 at 04:56:27PM +0200, Cornelia Huck wrote: +> +> On Fri, 24 Jul 2020 09:30:58 -0400 +> +> "Michael S. Tsirkin" wrote: +> +> +> +> > On Fri, Jul 24, 2020 at 03:27:18PM +0200, Cornelia Huck wrote: +> +> > > When I start qemu with a second virtio-net-ccw device (i.e. adding +> +> > > -device virtio-net-ccw in addition to the autogenerated device), I get +> +> > > a segfault. gdb points to +> +> > > +> +> > > #0 0x000055d6ab52681d in virtio_net_get_config (vdev=, +> +> > > config=0x55d6ad9e3f80 "RT") at +> +> > > /home/cohuck/git/qemu/hw/net/virtio-net.c:146 +> +> > > 146 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { +> +> > > +> +> > > (backtrace doesn't go further) +> +> +> +> The core was incomplete, but running under gdb directly shows that it +> +> is just a bog-standard config space access (first for that device). +> +> +> +> The cause of the crash is that nc->peer is not set... no idea how that +> +> can happen, not that familiar with that part of QEMU. (Should the code +> +> check, or is that really something that should not happen?) +> +> +> +> What I don't understand is why it is set correctly for the first, +> +> autogenerated virtio-net-ccw device, but not for the second one, and +> +> why virtio-net-pci doesn't show these problems. The only difference +> +> between -ccw and -pci that comes to my mind here is that config space +> +> accesses for ccw are done via an asynchronous operation, so timing +> +> might be different. +> +> +Hopefully Jason has an idea. Could you post a full command line +> +please? Do you need a working guest to trigger this? Does this trigger +> +on an x86 host? +Yes, it does trigger with tcg-on-x86 as well. I've been using + +s390x-softmmu/qemu-system-s390x -M s390-ccw-virtio,accel=tcg -cpu qemu,zpci=on +-m 1024 -nographic -device virtio-scsi-ccw,id=scsi0,devno=fe.0.0001 +-drive file=/path/to/image,format=qcow2,if=none,id=drive-scsi0-0-0-0 +-device +scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,drive=drive-scsi0-0-0-0,id=scsi0-0-0-0,bootindex=1 + +-device virtio-net-ccw + +It seems it needs the guest actually doing something with the nics; I +cannot reproduce the crash if I use the old advent calendar moon buggy +image and just add a virtio-net-ccw device. + +(I don't think it's a problem with my local build, as I see the problem +both on my laptop and on an LPAR.) + +> +> +> > > +> +> > > Starting qemu with no additional "-device virtio-net-ccw" (i.e., only +> +> > > the autogenerated virtio-net-ccw device is present) works. Specifying +> +> > > several "-device virtio-net-pci" works as well. +> +> > > +> +> > > Things break with 1e0a84ea49b6 ("vhost-vdpa: introduce vhost-vdpa net +> +> > > client"), 38140cc4d971 ("vhost_net: introduce set_config & get_config") +> +> > > works (in-between state does not compile). +> +> > +> +> > Ouch. I didn't test all in-between states :( +> +> > But I wish we had a 0-day instrastructure like kernel has, +> +> > that catches things like that. +> +> +> +> Yep, that would be useful... so patchew only builds the complete series? +> +> +> +> > +> +> > > This is reproducible with tcg as well. Same problem both with +> +> > > --enable-vhost-vdpa and --disable-vhost-vdpa. +> +> > > +> +> > > Have not yet tried to figure out what might be special with +> +> > > virtio-ccw... anyone have an idea? +> +> > > +> +> > > [This should probably be considered a blocker?] +> +> +> +> I think so, as it makes s390x unusable with more that one +> +> virtio-net-ccw device, and I don't even see a workaround. +> + +On 2020/7/24 下午11:34, Cornelia Huck wrote: +On Fri, 24 Jul 2020 11:17:57 -0400 +"Michael S. Tsirkin" wrote: +On Fri, Jul 24, 2020 at 04:56:27PM +0200, Cornelia Huck wrote: +On Fri, 24 Jul 2020 09:30:58 -0400 +"Michael S. Tsirkin" wrote: +On Fri, Jul 24, 2020 at 03:27:18PM +0200, Cornelia Huck wrote: +When I start qemu with a second virtio-net-ccw device (i.e. adding +-device virtio-net-ccw in addition to the autogenerated device), I get +a segfault. gdb points to + +#0 0x000055d6ab52681d in virtio_net_get_config (vdev=, + config=0x55d6ad9e3f80 "RT") at +/home/cohuck/git/qemu/hw/net/virtio-net.c:146 +146 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { + +(backtrace doesn't go further) +The core was incomplete, but running under gdb directly shows that it +is just a bog-standard config space access (first for that device). + +The cause of the crash is that nc->peer is not set... no idea how that +can happen, not that familiar with that part of QEMU. (Should the code +check, or is that really something that should not happen?) + +What I don't understand is why it is set correctly for the first, +autogenerated virtio-net-ccw device, but not for the second one, and +why virtio-net-pci doesn't show these problems. The only difference +between -ccw and -pci that comes to my mind here is that config space +accesses for ccw are done via an asynchronous operation, so timing +might be different. +Hopefully Jason has an idea. Could you post a full command line +please? Do you need a working guest to trigger this? Does this trigger +on an x86 host? +Yes, it does trigger with tcg-on-x86 as well. I've been using + +s390x-softmmu/qemu-system-s390x -M s390-ccw-virtio,accel=tcg -cpu qemu,zpci=on +-m 1024 -nographic -device virtio-scsi-ccw,id=scsi0,devno=fe.0.0001 +-drive file=/path/to/image,format=qcow2,if=none,id=drive-scsi0-0-0-0 +-device +scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,drive=drive-scsi0-0-0-0,id=scsi0-0-0-0,bootindex=1 +-device virtio-net-ccw + +It seems it needs the guest actually doing something with the nics; I +cannot reproduce the crash if I use the old advent calendar moon buggy +image and just add a virtio-net-ccw device. + +(I don't think it's a problem with my local build, as I see the problem +both on my laptop and on an LPAR.) +It looks to me we forget the check the existence of peer. + +Please try the attached patch to see if it works. + +Thanks +0001-virtio-net-check-the-existence-of-peer-before-accesi.patch +Description: +Text Data + +On Sat, 25 Jul 2020 08:40:07 +0800 +Jason Wang wrote: + +> +On 2020/7/24 下午11:34, Cornelia Huck wrote: +> +> On Fri, 24 Jul 2020 11:17:57 -0400 +> +> "Michael S. Tsirkin" wrote: +> +> +> +>> On Fri, Jul 24, 2020 at 04:56:27PM +0200, Cornelia Huck wrote: +> +>>> On Fri, 24 Jul 2020 09:30:58 -0400 +> +>>> "Michael S. Tsirkin" wrote: +> +>>> +> +>>>> On Fri, Jul 24, 2020 at 03:27:18PM +0200, Cornelia Huck wrote: +> +>>>>> When I start qemu with a second virtio-net-ccw device (i.e. adding +> +>>>>> -device virtio-net-ccw in addition to the autogenerated device), I get +> +>>>>> a segfault. gdb points to +> +>>>>> +> +>>>>> #0 0x000055d6ab52681d in virtio_net_get_config (vdev=, +> +>>>>> config=0x55d6ad9e3f80 "RT") at +> +>>>>> /home/cohuck/git/qemu/hw/net/virtio-net.c:146 +> +>>>>> 146 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { +> +>>>>> +> +>>>>> (backtrace doesn't go further) +> +>>> The core was incomplete, but running under gdb directly shows that it +> +>>> is just a bog-standard config space access (first for that device). +> +>>> +> +>>> The cause of the crash is that nc->peer is not set... no idea how that +> +>>> can happen, not that familiar with that part of QEMU. (Should the code +> +>>> check, or is that really something that should not happen?) +> +>>> +> +>>> What I don't understand is why it is set correctly for the first, +> +>>> autogenerated virtio-net-ccw device, but not for the second one, and +> +>>> why virtio-net-pci doesn't show these problems. The only difference +> +>>> between -ccw and -pci that comes to my mind here is that config space +> +>>> accesses for ccw are done via an asynchronous operation, so timing +> +>>> might be different. +> +>> Hopefully Jason has an idea. Could you post a full command line +> +>> please? Do you need a working guest to trigger this? Does this trigger +> +>> on an x86 host? +> +> Yes, it does trigger with tcg-on-x86 as well. I've been using +> +> +> +> s390x-softmmu/qemu-system-s390x -M s390-ccw-virtio,accel=tcg -cpu +> +> qemu,zpci=on +> +> -m 1024 -nographic -device virtio-scsi-ccw,id=scsi0,devno=fe.0.0001 +> +> -drive file=/path/to/image,format=qcow2,if=none,id=drive-scsi0-0-0-0 +> +> -device +> +> scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,drive=drive-scsi0-0-0-0,id=scsi0-0-0-0,bootindex=1 +> +> -device virtio-net-ccw +> +> +> +> It seems it needs the guest actually doing something with the nics; I +> +> cannot reproduce the crash if I use the old advent calendar moon buggy +> +> image and just add a virtio-net-ccw device. +> +> +> +> (I don't think it's a problem with my local build, as I see the problem +> +> both on my laptop and on an LPAR.) +> +> +> +It looks to me we forget the check the existence of peer. +> +> +Please try the attached patch to see if it works. +Thanks, that patch gets my guest up and running again. So, FWIW, + +Tested-by: Cornelia Huck + +Any idea why this did not hit with virtio-net-pci (or the autogenerated +virtio-net-ccw device)? + +On 2020/7/27 下午2:43, Cornelia Huck wrote: +On Sat, 25 Jul 2020 08:40:07 +0800 +Jason Wang wrote: +On 2020/7/24 下午11:34, Cornelia Huck wrote: +On Fri, 24 Jul 2020 11:17:57 -0400 +"Michael S. Tsirkin" wrote: +On Fri, Jul 24, 2020 at 04:56:27PM +0200, Cornelia Huck wrote: +On Fri, 24 Jul 2020 09:30:58 -0400 +"Michael S. Tsirkin" wrote: +On Fri, Jul 24, 2020 at 03:27:18PM +0200, Cornelia Huck wrote: +When I start qemu with a second virtio-net-ccw device (i.e. adding +-device virtio-net-ccw in addition to the autogenerated device), I get +a segfault. gdb points to + +#0 0x000055d6ab52681d in virtio_net_get_config (vdev=, + config=0x55d6ad9e3f80 "RT") at +/home/cohuck/git/qemu/hw/net/virtio-net.c:146 +146 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { + +(backtrace doesn't go further) +The core was incomplete, but running under gdb directly shows that it +is just a bog-standard config space access (first for that device). + +The cause of the crash is that nc->peer is not set... no idea how that +can happen, not that familiar with that part of QEMU. (Should the code +check, or is that really something that should not happen?) + +What I don't understand is why it is set correctly for the first, +autogenerated virtio-net-ccw device, but not for the second one, and +why virtio-net-pci doesn't show these problems. The only difference +between -ccw and -pci that comes to my mind here is that config space +accesses for ccw are done via an asynchronous operation, so timing +might be different. +Hopefully Jason has an idea. Could you post a full command line +please? Do you need a working guest to trigger this? Does this trigger +on an x86 host? +Yes, it does trigger with tcg-on-x86 as well. I've been using + +s390x-softmmu/qemu-system-s390x -M s390-ccw-virtio,accel=tcg -cpu qemu,zpci=on +-m 1024 -nographic -device virtio-scsi-ccw,id=scsi0,devno=fe.0.0001 +-drive file=/path/to/image,format=qcow2,if=none,id=drive-scsi0-0-0-0 +-device +scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,drive=drive-scsi0-0-0-0,id=scsi0-0-0-0,bootindex=1 +-device virtio-net-ccw + +It seems it needs the guest actually doing something with the nics; I +cannot reproduce the crash if I use the old advent calendar moon buggy +image and just add a virtio-net-ccw device. + +(I don't think it's a problem with my local build, as I see the problem +both on my laptop and on an LPAR.) +It looks to me we forget the check the existence of peer. + +Please try the attached patch to see if it works. +Thanks, that patch gets my guest up and running again. So, FWIW, + +Tested-by: Cornelia Huck + +Any idea why this did not hit with virtio-net-pci (or the autogenerated +virtio-net-ccw device)? +It can be hit with virtio-net-pci as well (just start without peer). +For autogenerated virtio-net-cww, I think the reason is that it has +already had a peer set. +Thanks + +On Mon, 27 Jul 2020 15:38:12 +0800 +Jason Wang wrote: + +> +On 2020/7/27 下午2:43, Cornelia Huck wrote: +> +> On Sat, 25 Jul 2020 08:40:07 +0800 +> +> Jason Wang wrote: +> +> +> +>> On 2020/7/24 下午11:34, Cornelia Huck wrote: +> +>>> On Fri, 24 Jul 2020 11:17:57 -0400 +> +>>> "Michael S. Tsirkin" wrote: +> +>>> +> +>>>> On Fri, Jul 24, 2020 at 04:56:27PM +0200, Cornelia Huck wrote: +> +>>>>> On Fri, 24 Jul 2020 09:30:58 -0400 +> +>>>>> "Michael S. Tsirkin" wrote: +> +>>>>> +> +>>>>>> On Fri, Jul 24, 2020 at 03:27:18PM +0200, Cornelia Huck wrote: +> +>>>>>>> When I start qemu with a second virtio-net-ccw device (i.e. adding +> +>>>>>>> -device virtio-net-ccw in addition to the autogenerated device), I get +> +>>>>>>> a segfault. gdb points to +> +>>>>>>> +> +>>>>>>> #0 0x000055d6ab52681d in virtio_net_get_config (vdev=, +> +>>>>>>> config=0x55d6ad9e3f80 "RT") at +> +>>>>>>> /home/cohuck/git/qemu/hw/net/virtio-net.c:146 +> +>>>>>>> 146 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { +> +>>>>>>> +> +>>>>>>> (backtrace doesn't go further) +> +>>>>> The core was incomplete, but running under gdb directly shows that it +> +>>>>> is just a bog-standard config space access (first for that device). +> +>>>>> +> +>>>>> The cause of the crash is that nc->peer is not set... no idea how that +> +>>>>> can happen, not that familiar with that part of QEMU. (Should the code +> +>>>>> check, or is that really something that should not happen?) +> +>>>>> +> +>>>>> What I don't understand is why it is set correctly for the first, +> +>>>>> autogenerated virtio-net-ccw device, but not for the second one, and +> +>>>>> why virtio-net-pci doesn't show these problems. The only difference +> +>>>>> between -ccw and -pci that comes to my mind here is that config space +> +>>>>> accesses for ccw are done via an asynchronous operation, so timing +> +>>>>> might be different. +> +>>>> Hopefully Jason has an idea. Could you post a full command line +> +>>>> please? Do you need a working guest to trigger this? Does this trigger +> +>>>> on an x86 host? +> +>>> Yes, it does trigger with tcg-on-x86 as well. I've been using +> +>>> +> +>>> s390x-softmmu/qemu-system-s390x -M s390-ccw-virtio,accel=tcg -cpu +> +>>> qemu,zpci=on +> +>>> -m 1024 -nographic -device virtio-scsi-ccw,id=scsi0,devno=fe.0.0001 +> +>>> -drive file=/path/to/image,format=qcow2,if=none,id=drive-scsi0-0-0-0 +> +>>> -device +> +>>> scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,drive=drive-scsi0-0-0-0,id=scsi0-0-0-0,bootindex=1 +> +>>> -device virtio-net-ccw +> +>>> +> +>>> It seems it needs the guest actually doing something with the nics; I +> +>>> cannot reproduce the crash if I use the old advent calendar moon buggy +> +>>> image and just add a virtio-net-ccw device. +> +>>> +> +>>> (I don't think it's a problem with my local build, as I see the problem +> +>>> both on my laptop and on an LPAR.) +> +>> +> +>> It looks to me we forget the check the existence of peer. +> +>> +> +>> Please try the attached patch to see if it works. +> +> Thanks, that patch gets my guest up and running again. So, FWIW, +> +> +> +> Tested-by: Cornelia Huck +> +> +> +> Any idea why this did not hit with virtio-net-pci (or the autogenerated +> +> virtio-net-ccw device)? +> +> +> +It can be hit with virtio-net-pci as well (just start without peer). +Hm, I had not been able to reproduce the crash with a 'naked' -device +virtio-net-pci. But checking seems to be the right idea anyway. + +> +> +For autogenerated virtio-net-cww, I think the reason is that it has +> +already had a peer set. +Ok, that might well be. + +On 2020/7/27 下午4:41, Cornelia Huck wrote: +On Mon, 27 Jul 2020 15:38:12 +0800 +Jason Wang wrote: +On 2020/7/27 下午2:43, Cornelia Huck wrote: +On Sat, 25 Jul 2020 08:40:07 +0800 +Jason Wang wrote: +On 2020/7/24 下午11:34, Cornelia Huck wrote: +On Fri, 24 Jul 2020 11:17:57 -0400 +"Michael S. Tsirkin" wrote: +On Fri, Jul 24, 2020 at 04:56:27PM +0200, Cornelia Huck wrote: +On Fri, 24 Jul 2020 09:30:58 -0400 +"Michael S. Tsirkin" wrote: +On Fri, Jul 24, 2020 at 03:27:18PM +0200, Cornelia Huck wrote: +When I start qemu with a second virtio-net-ccw device (i.e. adding +-device virtio-net-ccw in addition to the autogenerated device), I get +a segfault. gdb points to + +#0 0x000055d6ab52681d in virtio_net_get_config (vdev=, + config=0x55d6ad9e3f80 "RT") at +/home/cohuck/git/qemu/hw/net/virtio-net.c:146 +146 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { + +(backtrace doesn't go further) +The core was incomplete, but running under gdb directly shows that it +is just a bog-standard config space access (first for that device). + +The cause of the crash is that nc->peer is not set... no idea how that +can happen, not that familiar with that part of QEMU. (Should the code +check, or is that really something that should not happen?) + +What I don't understand is why it is set correctly for the first, +autogenerated virtio-net-ccw device, but not for the second one, and +why virtio-net-pci doesn't show these problems. The only difference +between -ccw and -pci that comes to my mind here is that config space +accesses for ccw are done via an asynchronous operation, so timing +might be different. +Hopefully Jason has an idea. Could you post a full command line +please? Do you need a working guest to trigger this? Does this trigger +on an x86 host? +Yes, it does trigger with tcg-on-x86 as well. I've been using + +s390x-softmmu/qemu-system-s390x -M s390-ccw-virtio,accel=tcg -cpu qemu,zpci=on +-m 1024 -nographic -device virtio-scsi-ccw,id=scsi0,devno=fe.0.0001 +-drive file=/path/to/image,format=qcow2,if=none,id=drive-scsi0-0-0-0 +-device +scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,drive=drive-scsi0-0-0-0,id=scsi0-0-0-0,bootindex=1 +-device virtio-net-ccw + +It seems it needs the guest actually doing something with the nics; I +cannot reproduce the crash if I use the old advent calendar moon buggy +image and just add a virtio-net-ccw device. + +(I don't think it's a problem with my local build, as I see the problem +both on my laptop and on an LPAR.) +It looks to me we forget the check the existence of peer. + +Please try the attached patch to see if it works. +Thanks, that patch gets my guest up and running again. So, FWIW, + +Tested-by: Cornelia Huck + +Any idea why this did not hit with virtio-net-pci (or the autogenerated +virtio-net-ccw device)? +It can be hit with virtio-net-pci as well (just start without peer). +Hm, I had not been able to reproduce the crash with a 'naked' -device +virtio-net-pci. But checking seems to be the right idea anyway. +Sorry for being unclear, I meant for networking part, you just need +start without peer, and you need a real guest (any Linux) that is trying +to access the config space of virtio-net. +Thanks +For autogenerated virtio-net-cww, I think the reason is that it has +already had a peer set. +Ok, that might well be. + +On Mon, Jul 27, 2020 at 04:51:23PM +0800, Jason Wang wrote: +> +> +On 2020/7/27 下午4:41, Cornelia Huck wrote: +> +> On Mon, 27 Jul 2020 15:38:12 +0800 +> +> Jason Wang wrote: +> +> +> +> > On 2020/7/27 下午2:43, Cornelia Huck wrote: +> +> > > On Sat, 25 Jul 2020 08:40:07 +0800 +> +> > > Jason Wang wrote: +> +> > > > On 2020/7/24 下午11:34, Cornelia Huck wrote: +> +> > > > > On Fri, 24 Jul 2020 11:17:57 -0400 +> +> > > > > "Michael S. Tsirkin" wrote: +> +> > > > > > On Fri, Jul 24, 2020 at 04:56:27PM +0200, Cornelia Huck wrote: +> +> > > > > > > On Fri, 24 Jul 2020 09:30:58 -0400 +> +> > > > > > > "Michael S. Tsirkin" wrote: +> +> > > > > > > > On Fri, Jul 24, 2020 at 03:27:18PM +0200, Cornelia Huck wrote: +> +> > > > > > > > > When I start qemu with a second virtio-net-ccw device (i.e. +> +> > > > > > > > > adding +> +> > > > > > > > > -device virtio-net-ccw in addition to the autogenerated +> +> > > > > > > > > device), I get +> +> > > > > > > > > a segfault. gdb points to +> +> > > > > > > > > +> +> > > > > > > > > #0 0x000055d6ab52681d in virtio_net_get_config +> +> > > > > > > > > (vdev=, +> +> > > > > > > > > config=0x55d6ad9e3f80 "RT") at +> +> > > > > > > > > /home/cohuck/git/qemu/hw/net/virtio-net.c:146 +> +> > > > > > > > > 146 if (nc->peer->info->type == +> +> > > > > > > > > NET_CLIENT_DRIVER_VHOST_VDPA) { +> +> > > > > > > > > +> +> > > > > > > > > (backtrace doesn't go further) +> +> > > > > > > The core was incomplete, but running under gdb directly shows +> +> > > > > > > that it +> +> > > > > > > is just a bog-standard config space access (first for that +> +> > > > > > > device). +> +> > > > > > > +> +> > > > > > > The cause of the crash is that nc->peer is not set... no idea +> +> > > > > > > how that +> +> > > > > > > can happen, not that familiar with that part of QEMU. (Should +> +> > > > > > > the code +> +> > > > > > > check, or is that really something that should not happen?) +> +> > > > > > > +> +> > > > > > > What I don't understand is why it is set correctly for the +> +> > > > > > > first, +> +> > > > > > > autogenerated virtio-net-ccw device, but not for the second +> +> > > > > > > one, and +> +> > > > > > > why virtio-net-pci doesn't show these problems. The only +> +> > > > > > > difference +> +> > > > > > > between -ccw and -pci that comes to my mind here is that config +> +> > > > > > > space +> +> > > > > > > accesses for ccw are done via an asynchronous operation, so +> +> > > > > > > timing +> +> > > > > > > might be different. +> +> > > > > > Hopefully Jason has an idea. Could you post a full command line +> +> > > > > > please? Do you need a working guest to trigger this? Does this +> +> > > > > > trigger +> +> > > > > > on an x86 host? +> +> > > > > Yes, it does trigger with tcg-on-x86 as well. I've been using +> +> > > > > +> +> > > > > s390x-softmmu/qemu-system-s390x -M s390-ccw-virtio,accel=tcg -cpu +> +> > > > > qemu,zpci=on +> +> > > > > -m 1024 -nographic -device virtio-scsi-ccw,id=scsi0,devno=fe.0.0001 +> +> > > > > -drive file=/path/to/image,format=qcow2,if=none,id=drive-scsi0-0-0-0 +> +> > > > > -device +> +> > > > > scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,drive=drive-scsi0-0-0-0,id=scsi0-0-0-0,bootindex=1 +> +> > > > > -device virtio-net-ccw +> +> > > > > +> +> > > > > It seems it needs the guest actually doing something with the nics; +> +> > > > > I +> +> > > > > cannot reproduce the crash if I use the old advent calendar moon +> +> > > > > buggy +> +> > > > > image and just add a virtio-net-ccw device. +> +> > > > > +> +> > > > > (I don't think it's a problem with my local build, as I see the +> +> > > > > problem +> +> > > > > both on my laptop and on an LPAR.) +> +> > > > It looks to me we forget the check the existence of peer. +> +> > > > +> +> > > > Please try the attached patch to see if it works. +> +> > > Thanks, that patch gets my guest up and running again. So, FWIW, +> +> > > +> +> > > Tested-by: Cornelia Huck +> +> > > +> +> > > Any idea why this did not hit with virtio-net-pci (or the autogenerated +> +> > > virtio-net-ccw device)? +> +> > +> +> > It can be hit with virtio-net-pci as well (just start without peer). +> +> Hm, I had not been able to reproduce the crash with a 'naked' -device +> +> virtio-net-pci. But checking seems to be the right idea anyway. +> +> +> +Sorry for being unclear, I meant for networking part, you just need start +> +without peer, and you need a real guest (any Linux) that is trying to access +> +the config space of virtio-net. +> +> +Thanks +A pxe guest will do it, but that doesn't support ccw, right? + +I'm still unclear why this triggers with ccw but not pci - +any idea? + +> +> +> +> +> > For autogenerated virtio-net-cww, I think the reason is that it has +> +> > already had a peer set. +> +> Ok, that might well be. +> +> +> +> + +On 2020/7/27 下午7:43, Michael S. Tsirkin wrote: +On Mon, Jul 27, 2020 at 04:51:23PM +0800, Jason Wang wrote: +On 2020/7/27 下午4:41, Cornelia Huck wrote: +On Mon, 27 Jul 2020 15:38:12 +0800 +Jason Wang wrote: +On 2020/7/27 下午2:43, Cornelia Huck wrote: +On Sat, 25 Jul 2020 08:40:07 +0800 +Jason Wang wrote: +On 2020/7/24 下午11:34, Cornelia Huck wrote: +On Fri, 24 Jul 2020 11:17:57 -0400 +"Michael S. Tsirkin" wrote: +On Fri, Jul 24, 2020 at 04:56:27PM +0200, Cornelia Huck wrote: +On Fri, 24 Jul 2020 09:30:58 -0400 +"Michael S. Tsirkin" wrote: +On Fri, Jul 24, 2020 at 03:27:18PM +0200, Cornelia Huck wrote: +When I start qemu with a second virtio-net-ccw device (i.e. adding +-device virtio-net-ccw in addition to the autogenerated device), I get +a segfault. gdb points to + +#0 0x000055d6ab52681d in virtio_net_get_config (vdev=, + config=0x55d6ad9e3f80 "RT") at +/home/cohuck/git/qemu/hw/net/virtio-net.c:146 +146 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { + +(backtrace doesn't go further) +The core was incomplete, but running under gdb directly shows that it +is just a bog-standard config space access (first for that device). + +The cause of the crash is that nc->peer is not set... no idea how that +can happen, not that familiar with that part of QEMU. (Should the code +check, or is that really something that should not happen?) + +What I don't understand is why it is set correctly for the first, +autogenerated virtio-net-ccw device, but not for the second one, and +why virtio-net-pci doesn't show these problems. The only difference +between -ccw and -pci that comes to my mind here is that config space +accesses for ccw are done via an asynchronous operation, so timing +might be different. +Hopefully Jason has an idea. Could you post a full command line +please? Do you need a working guest to trigger this? Does this trigger +on an x86 host? +Yes, it does trigger with tcg-on-x86 as well. I've been using + +s390x-softmmu/qemu-system-s390x -M s390-ccw-virtio,accel=tcg -cpu qemu,zpci=on +-m 1024 -nographic -device virtio-scsi-ccw,id=scsi0,devno=fe.0.0001 +-drive file=/path/to/image,format=qcow2,if=none,id=drive-scsi0-0-0-0 +-device +scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,drive=drive-scsi0-0-0-0,id=scsi0-0-0-0,bootindex=1 +-device virtio-net-ccw + +It seems it needs the guest actually doing something with the nics; I +cannot reproduce the crash if I use the old advent calendar moon buggy +image and just add a virtio-net-ccw device. + +(I don't think it's a problem with my local build, as I see the problem +both on my laptop and on an LPAR.) +It looks to me we forget the check the existence of peer. + +Please try the attached patch to see if it works. +Thanks, that patch gets my guest up and running again. So, FWIW, + +Tested-by: Cornelia Huck + +Any idea why this did not hit with virtio-net-pci (or the autogenerated +virtio-net-ccw device)? +It can be hit with virtio-net-pci as well (just start without peer). +Hm, I had not been able to reproduce the crash with a 'naked' -device +virtio-net-pci. But checking seems to be the right idea anyway. +Sorry for being unclear, I meant for networking part, you just need start +without peer, and you need a real guest (any Linux) that is trying to access +the config space of virtio-net. + +Thanks +A pxe guest will do it, but that doesn't support ccw, right? +Yes, it depends on the cli actually. +I'm still unclear why this triggers with ccw but not pci - +any idea? +I don't test pxe but I can reproduce this with pci (just start a linux +guest without a peer). +Thanks + +On Mon, Jul 27, 2020 at 08:44:09PM +0800, Jason Wang wrote: +> +> +On 2020/7/27 下午7:43, Michael S. Tsirkin wrote: +> +> On Mon, Jul 27, 2020 at 04:51:23PM +0800, Jason Wang wrote: +> +> > On 2020/7/27 下午4:41, Cornelia Huck wrote: +> +> > > On Mon, 27 Jul 2020 15:38:12 +0800 +> +> > > Jason Wang wrote: +> +> > > +> +> > > > On 2020/7/27 下午2:43, Cornelia Huck wrote: +> +> > > > > On Sat, 25 Jul 2020 08:40:07 +0800 +> +> > > > > Jason Wang wrote: +> +> > > > > > On 2020/7/24 下午11:34, Cornelia Huck wrote: +> +> > > > > > > On Fri, 24 Jul 2020 11:17:57 -0400 +> +> > > > > > > "Michael S. Tsirkin" wrote: +> +> > > > > > > > On Fri, Jul 24, 2020 at 04:56:27PM +0200, Cornelia Huck wrote: +> +> > > > > > > > > On Fri, 24 Jul 2020 09:30:58 -0400 +> +> > > > > > > > > "Michael S. Tsirkin" wrote: +> +> > > > > > > > > > On Fri, Jul 24, 2020 at 03:27:18PM +0200, Cornelia Huck +> +> > > > > > > > > > wrote: +> +> > > > > > > > > > > When I start qemu with a second virtio-net-ccw device +> +> > > > > > > > > > > (i.e. adding +> +> > > > > > > > > > > -device virtio-net-ccw in addition to the autogenerated +> +> > > > > > > > > > > device), I get +> +> > > > > > > > > > > a segfault. gdb points to +> +> > > > > > > > > > > +> +> > > > > > > > > > > #0 0x000055d6ab52681d in virtio_net_get_config +> +> > > > > > > > > > > (vdev=, +> +> > > > > > > > > > > config=0x55d6ad9e3f80 "RT") at +> +> > > > > > > > > > > /home/cohuck/git/qemu/hw/net/virtio-net.c:146 +> +> > > > > > > > > > > 146 if (nc->peer->info->type == +> +> > > > > > > > > > > NET_CLIENT_DRIVER_VHOST_VDPA) { +> +> > > > > > > > > > > +> +> > > > > > > > > > > (backtrace doesn't go further) +> +> > > > > > > > > The core was incomplete, but running under gdb directly +> +> > > > > > > > > shows that it +> +> > > > > > > > > is just a bog-standard config space access (first for that +> +> > > > > > > > > device). +> +> > > > > > > > > +> +> > > > > > > > > The cause of the crash is that nc->peer is not set... no +> +> > > > > > > > > idea how that +> +> > > > > > > > > can happen, not that familiar with that part of QEMU. +> +> > > > > > > > > (Should the code +> +> > > > > > > > > check, or is that really something that should not happen?) +> +> > > > > > > > > +> +> > > > > > > > > What I don't understand is why it is set correctly for the +> +> > > > > > > > > first, +> +> > > > > > > > > autogenerated virtio-net-ccw device, but not for the second +> +> > > > > > > > > one, and +> +> > > > > > > > > why virtio-net-pci doesn't show these problems. The only +> +> > > > > > > > > difference +> +> > > > > > > > > between -ccw and -pci that comes to my mind here is that +> +> > > > > > > > > config space +> +> > > > > > > > > accesses for ccw are done via an asynchronous operation, so +> +> > > > > > > > > timing +> +> > > > > > > > > might be different. +> +> > > > > > > > Hopefully Jason has an idea. Could you post a full command +> +> > > > > > > > line +> +> > > > > > > > please? Do you need a working guest to trigger this? Does +> +> > > > > > > > this trigger +> +> > > > > > > > on an x86 host? +> +> > > > > > > Yes, it does trigger with tcg-on-x86 as well. I've been using +> +> > > > > > > +> +> > > > > > > s390x-softmmu/qemu-system-s390x -M s390-ccw-virtio,accel=tcg +> +> > > > > > > -cpu qemu,zpci=on +> +> > > > > > > -m 1024 -nographic -device +> +> > > > > > > virtio-scsi-ccw,id=scsi0,devno=fe.0.0001 +> +> > > > > > > -drive +> +> > > > > > > file=/path/to/image,format=qcow2,if=none,id=drive-scsi0-0-0-0 +> +> > > > > > > -device +> +> > > > > > > scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,drive=drive-scsi0-0-0-0,id=scsi0-0-0-0,bootindex=1 +> +> > > > > > > -device virtio-net-ccw +> +> > > > > > > +> +> > > > > > > It seems it needs the guest actually doing something with the +> +> > > > > > > nics; I +> +> > > > > > > cannot reproduce the crash if I use the old advent calendar +> +> > > > > > > moon buggy +> +> > > > > > > image and just add a virtio-net-ccw device. +> +> > > > > > > +> +> > > > > > > (I don't think it's a problem with my local build, as I see the +> +> > > > > > > problem +> +> > > > > > > both on my laptop and on an LPAR.) +> +> > > > > > It looks to me we forget the check the existence of peer. +> +> > > > > > +> +> > > > > > Please try the attached patch to see if it works. +> +> > > > > Thanks, that patch gets my guest up and running again. So, FWIW, +> +> > > > > +> +> > > > > Tested-by: Cornelia Huck +> +> > > > > +> +> > > > > Any idea why this did not hit with virtio-net-pci (or the +> +> > > > > autogenerated +> +> > > > > virtio-net-ccw device)? +> +> > > > It can be hit with virtio-net-pci as well (just start without peer). +> +> > > Hm, I had not been able to reproduce the crash with a 'naked' -device +> +> > > virtio-net-pci. But checking seems to be the right idea anyway. +> +> > Sorry for being unclear, I meant for networking part, you just need start +> +> > without peer, and you need a real guest (any Linux) that is trying to +> +> > access +> +> > the config space of virtio-net. +> +> > +> +> > Thanks +> +> A pxe guest will do it, but that doesn't support ccw, right? +> +> +> +Yes, it depends on the cli actually. +> +> +> +> +> +> I'm still unclear why this triggers with ccw but not pci - +> +> any idea? +> +> +> +I don't test pxe but I can reproduce this with pci (just start a linux guest +> +without a peer). +> +> +Thanks +> +Might be a good addition to a unit test. Not sure what would the +test do exactly: just make sure guest runs? Looks like a lot of work +for an empty test ... maybe we can poke at the guest config with +qtest commands at least. + +-- +MST + +On 2020/7/27 下午9:16, Michael S. Tsirkin wrote: +On Mon, Jul 27, 2020 at 08:44:09PM +0800, Jason Wang wrote: +On 2020/7/27 下午7:43, Michael S. Tsirkin wrote: +On Mon, Jul 27, 2020 at 04:51:23PM +0800, Jason Wang wrote: +On 2020/7/27 下午4:41, Cornelia Huck wrote: +On Mon, 27 Jul 2020 15:38:12 +0800 +Jason Wang wrote: +On 2020/7/27 下午2:43, Cornelia Huck wrote: +On Sat, 25 Jul 2020 08:40:07 +0800 +Jason Wang wrote: +On 2020/7/24 下午11:34, Cornelia Huck wrote: +On Fri, 24 Jul 2020 11:17:57 -0400 +"Michael S. Tsirkin" wrote: +On Fri, Jul 24, 2020 at 04:56:27PM +0200, Cornelia Huck wrote: +On Fri, 24 Jul 2020 09:30:58 -0400 +"Michael S. Tsirkin" wrote: +On Fri, Jul 24, 2020 at 03:27:18PM +0200, Cornelia Huck wrote: +When I start qemu with a second virtio-net-ccw device (i.e. adding +-device virtio-net-ccw in addition to the autogenerated device), I get +a segfault. gdb points to + +#0 0x000055d6ab52681d in virtio_net_get_config (vdev=, + config=0x55d6ad9e3f80 "RT") at +/home/cohuck/git/qemu/hw/net/virtio-net.c:146 +146 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { + +(backtrace doesn't go further) +The core was incomplete, but running under gdb directly shows that it +is just a bog-standard config space access (first for that device). + +The cause of the crash is that nc->peer is not set... no idea how that +can happen, not that familiar with that part of QEMU. (Should the code +check, or is that really something that should not happen?) + +What I don't understand is why it is set correctly for the first, +autogenerated virtio-net-ccw device, but not for the second one, and +why virtio-net-pci doesn't show these problems. The only difference +between -ccw and -pci that comes to my mind here is that config space +accesses for ccw are done via an asynchronous operation, so timing +might be different. +Hopefully Jason has an idea. Could you post a full command line +please? Do you need a working guest to trigger this? Does this trigger +on an x86 host? +Yes, it does trigger with tcg-on-x86 as well. I've been using + +s390x-softmmu/qemu-system-s390x -M s390-ccw-virtio,accel=tcg -cpu qemu,zpci=on +-m 1024 -nographic -device virtio-scsi-ccw,id=scsi0,devno=fe.0.0001 +-drive file=/path/to/image,format=qcow2,if=none,id=drive-scsi0-0-0-0 +-device +scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,drive=drive-scsi0-0-0-0,id=scsi0-0-0-0,bootindex=1 +-device virtio-net-ccw + +It seems it needs the guest actually doing something with the nics; I +cannot reproduce the crash if I use the old advent calendar moon buggy +image and just add a virtio-net-ccw device. + +(I don't think it's a problem with my local build, as I see the problem +both on my laptop and on an LPAR.) +It looks to me we forget the check the existence of peer. + +Please try the attached patch to see if it works. +Thanks, that patch gets my guest up and running again. So, FWIW, + +Tested-by: Cornelia Huck + +Any idea why this did not hit with virtio-net-pci (or the autogenerated +virtio-net-ccw device)? +It can be hit with virtio-net-pci as well (just start without peer). +Hm, I had not been able to reproduce the crash with a 'naked' -device +virtio-net-pci. But checking seems to be the right idea anyway. +Sorry for being unclear, I meant for networking part, you just need start +without peer, and you need a real guest (any Linux) that is trying to access +the config space of virtio-net. + +Thanks +A pxe guest will do it, but that doesn't support ccw, right? +Yes, it depends on the cli actually. +I'm still unclear why this triggers with ccw but not pci - +any idea? +I don't test pxe but I can reproduce this with pci (just start a linux guest +without a peer). + +Thanks +Might be a good addition to a unit test. Not sure what would the +test do exactly: just make sure guest runs? Looks like a lot of work +for an empty test ... maybe we can poke at the guest config with +qtest commands at least. +That should work or we can simply extend the exist virtio-net qtest to +do that. +Thanks + diff --git a/results/classifier/001/mistranslation/80615920 b/results/classifier/001/mistranslation/80615920 new file mode 100644 index 000000000..97712c2f5 --- /dev/null +++ b/results/classifier/001/mistranslation/80615920 @@ -0,0 +1,348 @@ +mistranslation: 0.800 +other: 0.786 +instruction: 0.751 +semantic: 0.737 + +[BUG] accel/tcg: cpu_exec_longjmp_cleanup: assertion failed: (cpu == current_cpu) + +It seems there is a bug in SIGALRM handling when 486 system emulates x86_64 +code. + +This code: + +#include +#include +#include +#include +#include + +pthread_t thread1, thread2; + +// Signal handler for SIGALRM +void alarm_handler(int sig) { + // Do nothing, just wake up the other thread +} + +// Thread 1 function +void* thread1_func(void* arg) { + // Set up the signal handler for SIGALRM + signal(SIGALRM, alarm_handler); + + // Wait for 5 seconds + sleep(1); + + // Send SIGALRM signal to thread 2 + pthread_kill(thread2, SIGALRM); + + return NULL; +} + +// Thread 2 function +void* thread2_func(void* arg) { + // Wait for the SIGALRM signal + pause(); + + printf("Thread 2 woke up!\n"); + + return NULL; +} + +int main() { + // Create thread 1 + if (pthread_create(&thread1, NULL, thread1_func, NULL) != 0) { + fprintf(stderr, "Failed to create thread 1\n"); + return 1; + } + + // Create thread 2 + if (pthread_create(&thread2, NULL, thread2_func, NULL) != 0) { + fprintf(stderr, "Failed to create thread 2\n"); + return 1; + } + + // Wait for both threads to finish + pthread_join(thread1, NULL); + pthread_join(thread2, NULL); + + return 0; +} + + +Fails with this -strace log (there are also unsupported syscalls 334 and 435, +but it seems it doesn't affect the code much): + +... +736 rt_sigaction(SIGALRM,0x000000001123ec20,0x000000001123ecc0) = 0 +736 clock_nanosleep(CLOCK_REALTIME,0,{tv_sec = 1,tv_nsec = 0},{tv_sec = +1,tv_nsec = 0}) +736 rt_sigprocmask(SIG_BLOCK,0x00000000109fad20,0x0000000010800b38,8) = 0 +736 Unknown syscall 435 +736 +clone(CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD|CLONE_SYSVSEM|CLONE_SETTLS|CLONE_PARENT_SETTID| + ... +736 rt_sigprocmask(SIG_SETMASK,0x0000000010800b38,NULL,8) +736 set_robust_list(0x11a419a0,0) = -1 errno=38 (Function not implemented) +736 rt_sigprocmask(SIG_SETMASK,0x0000000011a41fb0,NULL,8) = 0 + = 0 +736 pause(0,0,2,277186368,0,295966400) +736 +futex(0x000000001123f990,FUTEX_CLOCK_REALTIME|FUTEX_WAIT_BITSET,738,NULL,NULL,0) + = 0 +736 rt_sigprocmask(SIG_BLOCK,0x00000000109fad20,0x000000001123ee88,8) = 0 +736 getpid() = 736 +736 tgkill(736,739,SIGALRM) = 0 + = -1 errno=4 (Interrupted system call) +--- SIGALRM {si_signo=SIGALRM, si_code=SI_TKILL, si_pid=736, si_uid=0} --- +0x48874a != 0x3c69e10 +736 rt_sigprocmask(SIG_SETMASK,0x000000001123ee88,NULL,8) = 0 +** +ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion failed: +(cpu == current_cpu) +Bail out! ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion +failed: (cpu == current_cpu) +0x48874a != 0x3c69e10 +** +ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion failed: +(cpu == current_cpu) +Bail out! ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion +failed: (cpu == current_cpu) +# + +The code fails either with or without -singlestep, the command line: + +/usr/bin/qemu-x86_64 -L /opt/x86_64 -strace -singlestep /opt/x86_64/alarm.bin + +Source code of QEMU 8.1.1 was modified with patch "[PATCH] qemu/timer: Don't +use RDTSC on i486" [1], +with added few ioctls (not relevant) and cpu_exec_longjmp_cleanup() now prints +current pointers of +cpu and current_cpu (line "0x48874a != 0x3c69e10"). + +config.log (built as a part of buildroot, basically the minimal possible +configuration for running x86_64 on 486): + +# Configured with: +'/mnt/hd_8tb_p1/p1/home/crossgen/buildroot_486_2/output/build/qemu-8.1.1/configure' + '--prefix=/usr' +'--cross-prefix=/mnt/hd_8tb_p1/p1/home/crossgen/buildroot_486_2/output/host/bin/i486-buildroot-linux-gnu-' + '--audio-drv-list=' +'--python=/mnt/hd_8tb_p1/p1/home/crossgen/buildroot_486_2/output/host/bin/python3' + +'--ninja=/mnt/hd_8tb_p1/p1/home/crossgen/buildroot_486_2/output/host/bin/ninja' +'--disable-alsa' '--disable-bpf' '--disable-brlapi' '--disable-bsd-user' +'--disable-cap-ng' '--disable-capstone' '--disable-containers' +'--disable-coreaudio' '--disable-curl' '--disable-curses' +'--disable-dbus-display' '--disable-docs' '--disable-dsound' '--disable-hvf' +'--disable-jack' '--disable-libiscsi' '--disable-linux-aio' +'--disable-linux-io-uring' '--disable-malloc-trim' '--disable-membarrier' +'--disable-mpath' '--disable-netmap' '--disable-opengl' '--disable-oss' +'--disable-pa' '--disable-rbd' '--disable-sanitizers' '--disable-selinux' +'--disable-sparse' '--disable-strip' '--disable-vde' '--disable-vhost-crypto' +'--disable-vhost-user-blk-server' '--disable-virtfs' '--disable-whpx' +'--disable-xen' '--disable-attr' '--disable-kvm' '--disable-vhost-net' +'--disable-download' '--disable-hexagon-idef-parser' '--disable-system' +'--enable-linux-user' '--target-list=x86_64-linux-user' '--disable-vhost-user' +'--disable-slirp' '--disable-sdl' '--disable-fdt' '--enable-trace-backends=nop' +'--disable-tools' '--disable-guest-agent' '--disable-fuse' +'--disable-fuse-lseek' '--disable-seccomp' '--disable-libssh' +'--disable-libusb' '--disable-vnc' '--disable-nettle' '--disable-numa' +'--disable-pipewire' '--disable-spice' '--disable-usb-redir' +'--disable-install-blobs' + +Emulation of the same x86_64 code with qemu 6.2.0 installed on another x86_64 +native machine works fine. + +[1] +https://lists.nongnu.org/archive/html/qemu-devel/2023-11/msg05387.html +Best regards, +Petr + +On Sat, 25 Nov 2023 at 13:09, Petr Cvek wrote: +> +> +It seems there is a bug in SIGALRM handling when 486 system emulates x86_64 +> +code. +486 host is pretty well out of support currently. Can you reproduce +this on a less ancient host CPU type ? + +> +ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion failed: +> +(cpu == current_cpu) +> +Bail out! ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: +> +assertion failed: (cpu == current_cpu) +> +0x48874a != 0x3c69e10 +> +** +> +ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion failed: +> +(cpu == current_cpu) +> +Bail out! ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: +> +assertion failed: (cpu == current_cpu) +What compiler version do you build QEMU with? That +assert is there because we have seen some buggy compilers +in the past which don't correctly preserve the variable +value as the setjmp/longjmp spec requires them to. + +thanks +-- PMM + +Dne 27. 11. 23 v 10:37 Peter Maydell napsal(a): +> +On Sat, 25 Nov 2023 at 13:09, Petr Cvek wrote: +> +> +> +> It seems there is a bug in SIGALRM handling when 486 system emulates x86_64 +> +> code. +> +> +486 host is pretty well out of support currently. Can you reproduce +> +this on a less ancient host CPU type ? +> +It seems it only fails when the code is compiled for i486. QEMU built with the +same compiler with -march=i586 and above runs on the same physical hardware +without a problem. All -march= variants were executed on ryzen 3600. + +> +> ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion +> +> failed: (cpu == current_cpu) +> +> Bail out! ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: +> +> assertion failed: (cpu == current_cpu) +> +> 0x48874a != 0x3c69e10 +> +> ** +> +> ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion +> +> failed: (cpu == current_cpu) +> +> Bail out! ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: +> +> assertion failed: (cpu == current_cpu) +> +> +What compiler version do you build QEMU with? That +> +assert is there because we have seen some buggy compilers +> +in the past which don't correctly preserve the variable +> +value as the setjmp/longjmp spec requires them to. +> +i486 and i586+ code variants were compiled with GCC 13.2.0 (more exactly, +slackware64 current multilib distribution). + +i486 binary which runs on the real 486 is also GCC 13.2.0 and installed as a +part of the buildroot crosscompiler (about two week old git snapshot). + +> +thanks +> +-- PMM +best regards, +Petr + +On 11/25/23 07:08, Petr Cvek wrote: +ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion failed: +(cpu == current_cpu) +Bail out! ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion +failed: (cpu == current_cpu) +# + +The code fails either with or without -singlestep, the command line: + +/usr/bin/qemu-x86_64 -L /opt/x86_64 -strace -singlestep /opt/x86_64/alarm.bin + +Source code of QEMU 8.1.1 was modified with patch "[PATCH] qemu/timer: Don't use +RDTSC on i486" [1], +with added few ioctls (not relevant) and cpu_exec_longjmp_cleanup() now prints +current pointers of +cpu and current_cpu (line "0x48874a != 0x3c69e10"). +If you try this again with 8.2-rc2, you should not see an assertion failure. +You should see instead + +QEMU internal SIGILL {code=ILLOPC, addr=0x12345678} +which I think more accurately summarizes the situation of attempting RDTSC on hardware +that does not support it. +r~ + +Dne 29. 11. 23 v 15:25 Richard Henderson napsal(a): +> +On 11/25/23 07:08, Petr Cvek wrote: +> +> ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: assertion +> +> failed: (cpu == current_cpu) +> +> Bail out! ERROR:../accel/tcg/cpu-exec.c:546:cpu_exec_longjmp_cleanup: +> +> assertion failed: (cpu == current_cpu) +> +> # +> +> +> +> The code fails either with or without -singlestep, the command line: +> +> +> +> /usr/bin/qemu-x86_64 -L /opt/x86_64 -strace -singlestep +> +> /opt/x86_64/alarm.bin +> +> +> +> Source code of QEMU 8.1.1 was modified with patch "[PATCH] qemu/timer: Don't +> +> use RDTSC on i486" [1], +> +> with added few ioctls (not relevant) and cpu_exec_longjmp_cleanup() now +> +> prints current pointers of +> +> cpu and current_cpu (line "0x48874a != 0x3c69e10"). +> +> +> +If you try this again with 8.2-rc2, you should not see an assertion failure. +> +You should see instead +> +> +QEMU internal SIGILL {code=ILLOPC, addr=0x12345678} +> +> +which I think more accurately summarizes the situation of attempting RDTSC on +> +hardware that does not support it. +> +> +Compilation of vanilla qemu v8.2.0-rc2 with -march=i486 by GCC 13.2.0 and +running the resulting binary on ryzen still leads to: + +** +ERROR:../accel/tcg/cpu-exec.c:533:cpu_exec_longjmp_cleanup: assertion failed: +(cpu == current_cpu) +Bail out! ERROR:../accel/tcg/cpu-exec.c:533:cpu_exec_longjmp_cleanup: assertion +failed: (cpu == current_cpu) +Aborted + +> +> +r~ +Petr + diff --git a/results/classifier/001/other/02364653 b/results/classifier/001/other/02364653 new file mode 100644 index 000000000..0142a9653 --- /dev/null +++ b/results/classifier/001/other/02364653 @@ -0,0 +1,363 @@ +other: 0.956 +semantic: 0.942 +instruction: 0.927 +mistranslation: 0.912 + +[Qemu-devel] [BUG] Inappropriate size of target_sigset_t + +Hello, Peter, Laurent, + +While working on another problem yesterday, I think I discovered a +long-standing bug in QEMU Linux user mode: our target_sigset_t structure is +eight times smaller as it should be! + +In this code segment from syscalls_def.h: + +#ifdef TARGET_MIPS +#define TARGET_NSIG 128 +#else +#define TARGET_NSIG 64 +#endif +#define TARGET_NSIG_BPW TARGET_ABI_BITS +#define TARGET_NSIG_WORDS (TARGET_NSIG / TARGET_NSIG_BPW) + +typedef struct { + abi_ulong sig[TARGET_NSIG_WORDS]; +} target_sigset_t; + +... TARGET_ABI_BITS should be replaced by eight times smaller constant (in +fact, semantically, we need TARGET_ABI_BYTES, but it is not defined) (what is +needed is actually "a byte per signal" in target_sigset_t, and we allow "a bit +per signal"). + +All this probably sounds to you like something impossible, since this code is +in QEMU "since forever", but I checked everything, and the bug seems real. I +wish you can prove me wrong. + +I just wanted to let you know about this, given the sensitive timing of current +softfreeze, and the fact that I won't be able to do more investigation on this +in coming weeks, since I am busy with other tasks, but perhaps you can analyze +and do something which you consider appropriate. + +Yours, +Aleksandar + +Le 03/07/2019 à 21:46, Aleksandar Markovic a écrit : +> +Hello, Peter, Laurent, +> +> +While working on another problem yesterday, I think I discovered a +> +long-standing bug in QEMU Linux user mode: our target_sigset_t structure is +> +eight times smaller as it should be! +> +> +In this code segment from syscalls_def.h: +> +> +#ifdef TARGET_MIPS +> +#define TARGET_NSIG 128 +> +#else +> +#define TARGET_NSIG 64 +> +#endif +> +#define TARGET_NSIG_BPW TARGET_ABI_BITS +> +#define TARGET_NSIG_WORDS (TARGET_NSIG / TARGET_NSIG_BPW) +> +> +typedef struct { +> +abi_ulong sig[TARGET_NSIG_WORDS]; +> +} target_sigset_t; +> +> +... TARGET_ABI_BITS should be replaced by eight times smaller constant (in +> +fact, semantically, we need TARGET_ABI_BYTES, but it is not defined) (what is +> +needed is actually "a byte per signal" in target_sigset_t, and we allow "a +> +bit per signal"). +TARGET_NSIG is divided by TARGET_ABI_BITS which gives you the number of +abi_ulong words we need in target_sigset_t. + +> +All this probably sounds to you like something impossible, since this code is +> +in QEMU "since forever", but I checked everything, and the bug seems real. I +> +wish you can prove me wrong. +> +> +I just wanted to let you know about this, given the sensitive timing of +> +current softfreeze, and the fact that I won't be able to do more +> +investigation on this in coming weeks, since I am busy with other tasks, but +> +perhaps you can analyze and do something which you consider appropriate. +If I compare with kernel, it looks good: + +In Linux: + + arch/mips/include/uapi/asm/signal.h + + #define _NSIG 128 + #define _NSIG_BPW (sizeof(unsigned long) * 8) + #define _NSIG_WORDS (_NSIG / _NSIG_BPW) + + typedef struct { + unsigned long sig[_NSIG_WORDS]; + } sigset_t; + +_NSIG_BPW is 8 * 8 = 64 on MIPS64 or 4 * 8 = 32 on MIPS + +In QEMU: + +TARGET_NSIG_BPW is TARGET_ABI_BITS which is TARGET_LONG_BITS which is +64 on MIPS64 and 32 on MIPS. + +I think there is no problem. + +Thanks, +Laurent + +From: Laurent Vivier +> +If I compare with kernel, it looks good: +> +... +> +I think there is no problem. +Sure, thanks for such fast response - again, I am glad if you are right. +However, for some reason, glibc (and musl too) define sigset_t differently than +kernel. Please take a look. I am not sure if this is covered fine in our code. + +Yours, +Aleksandar + +> +Thanks, +> +Laurent + +On Wed, 3 Jul 2019 at 21:20, Aleksandar Markovic wrote: +> +> +From: Laurent Vivier +> +> If I compare with kernel, it looks good: +> +> ... +> +> I think there is no problem. +> +> +Sure, thanks for such fast response - again, I am glad if you are right. +> +However, for some reason, glibc (and musl too) define sigset_t differently +> +than kernel. Please take a look. I am not sure if this is covered fine in our +> +code. +Yeah, the libc definitions of sigset_t don't match the +kernel ones (this is for obscure historical reasons IIRC). +We're providing implementations of the target +syscall interface, so our target_sigset_t should be the +target kernel's version (and the target libc's version doesn't +matter to us). On the other hand we will be using the +host libc version, I think, so a little caution is required +and it's possible we have some bugs in our code. + +thanks +-- PMM + +> +From: Peter Maydell +> +> +On Wed, 3 Jul 2019 at 21:20, Aleksandar Markovic wrote: +> +> +> +> From: Laurent Vivier +> +> > If I compare with kernel, it looks good: +> +> > ... +> +> > I think there is no problem. +> +> +> +> Sure, thanks for such fast response - again, I am glad if you are right. +> +> However, for some reason, glibc (and musl too) define sigset_t differently +> +> than kernel. Please take a look. I am not sure if this is covered fine in +> +> our code. +> +> +Yeah, the libc definitions of sigset_t don't match the +> +kernel ones (this is for obscure historical reasons IIRC). +> +We're providing implementations of the target +> +syscall interface, so our target_sigset_t should be the +> +target kernel's version (and the target libc's version doesn't +> +matter to us). On the other hand we will be using the +> +host libc version, I think, so a little caution is required +> +and it's possible we have some bugs in our code. +OK, I gather than this is not something that requires our immediate attention +(for 4.1), but we can analyze it later on. + +Thanks for response!! + +Sincerely, +Aleksandar + +> +thanks +> +-- PMM + +Le 03/07/2019 à 22:28, Peter Maydell a écrit : +> +On Wed, 3 Jul 2019 at 21:20, Aleksandar Markovic wrote: +> +> +> +> From: Laurent Vivier +> +>> If I compare with kernel, it looks good: +> +>> ... +> +>> I think there is no problem. +> +> +> +> Sure, thanks for such fast response - again, I am glad if you are right. +> +> However, for some reason, glibc (and musl too) define sigset_t differently +> +> than kernel. Please take a look. I am not sure if this is covered fine in +> +> our code. +> +> +Yeah, the libc definitions of sigset_t don't match the +> +kernel ones (this is for obscure historical reasons IIRC). +> +We're providing implementations of the target +> +syscall interface, so our target_sigset_t should be the +> +target kernel's version (and the target libc's version doesn't +> +matter to us). On the other hand we will be using the +> +host libc version, I think, so a little caution is required +> +and it's possible we have some bugs in our code. +It's why we need host_to_target_sigset_internal() and +target_to_host_sigset_internal() that translates bits and bytes between +guest kernel interface and host libc interface. + +void host_to_target_sigset_internal(target_sigset_t *d, + const sigset_t *s) +{ + int i; + target_sigemptyset(d); + for (i = 1; i <= TARGET_NSIG; i++) { + if (sigismember(s, i)) { + target_sigaddset(d, host_to_target_signal(i)); + } + } +} + +void target_to_host_sigset_internal(sigset_t *d, + const target_sigset_t *s) +{ + int i; + sigemptyset(d); + for (i = 1; i <= TARGET_NSIG; i++) { + if (target_sigismember(s, i)) { + sigaddset(d, target_to_host_signal(i)); + } + } +} + +Thanks, +Laurent + +Hi Aleksandar, + +On Wed, Jul 3, 2019 at 12:48 PM Aleksandar Markovic + wrote: +> +#define TARGET_NSIG_BPW TARGET_ABI_BITS +> +#define TARGET_NSIG_WORDS (TARGET_NSIG / TARGET_NSIG_BPW) +> +> +typedef struct { +> +abi_ulong sig[TARGET_NSIG_WORDS]; +> +} target_sigset_t; +> +> +... TARGET_ABI_BITS should be replaced by eight times smaller constant (in +> +fact, +> +semantically, we need TARGET_ABI_BYTES, but it is not defined) (what is needed +> +is actually "a byte per signal" in target_sigset_t, and we allow "a bit per +> +signal"). +Why do we need a byte per target signal, if the functions in linux-user/signal.c +operate with bits? + +-- +Thanks. +-- Max + +> +Why do we need a byte per target signal, if the functions in +> +linux-user/signal.c +> +operate with bits? +Max, + +I did not base my findings on code analysis, but on dumping size/offsets of +elements of some structures, as they are emulated in QEMU, and in real systems. +So, I can't really answer your question. + +Yours, +Aleksandar + +> +-- +> +Thanks. +> +-- Max + diff --git a/results/classifier/001/other/02572177 b/results/classifier/001/other/02572177 new file mode 100644 index 000000000..55a82678b --- /dev/null +++ b/results/classifier/001/other/02572177 @@ -0,0 +1,421 @@ +other: 0.869 +instruction: 0.794 +semantic: 0.770 +mistranslation: 0.693 + +[Qemu-devel] 答复: Re: [BUG]COLO failover hang + +hi. + + +I test the git qemu master have the same problem. + + +(gdb) bt + + +#0 qio_channel_socket_readv (ioc=0x7f65911b4e50, iov=0x7f64ef3fd880, niov=1, +fds=0x0, nfds=0x0, errp=0x0) at io/channel-socket.c:461 + + +#1 0x00007f658e4aa0c2 in qio_channel_read (address@hidden, address@hidden "", +address@hidden, address@hidden) at io/channel.c:114 + + +#2 0x00007f658e3ea990 in channel_get_buffer (opaque=<optimized out>, +buf=0x7f65907cb838 "", pos=<optimized out>, size=32768) at +migration/qemu-file-channel.c:78 + + +#3 0x00007f658e3e97fc in qemu_fill_buffer (f=0x7f65907cb800) at +migration/qemu-file.c:295 + + +#4 0x00007f658e3ea2e1 in qemu_peek_byte (address@hidden, address@hidden) at +migration/qemu-file.c:555 + + +#5 0x00007f658e3ea34b in qemu_get_byte (address@hidden) at +migration/qemu-file.c:568 + + +#6 0x00007f658e3ea552 in qemu_get_be32 (address@hidden) at +migration/qemu-file.c:648 + + +#7 0x00007f658e3e66e5 in colo_receive_message (f=0x7f65907cb800, +address@hidden) at migration/colo.c:244 + + +#8 0x00007f658e3e681e in colo_receive_check_message (f=<optimized out>, +address@hidden, address@hidden) + + + at migration/colo.c:264 + + +#9 0x00007f658e3e740e in colo_process_incoming_thread (opaque=0x7f658eb30360 +<mis_current.31286>) at migration/colo.c:577 + + +#10 0x00007f658be09df3 in start_thread () from /lib64/libpthread.so.0 + + +#11 0x00007f65881983ed in clone () from /lib64/libc.so.6 + + +(gdb) p ioc->name + + +$2 = 0x7f658ff7d5c0 "migration-socket-incoming" + + +(gdb) p ioc->features Do not support QIO_CHANNEL_FEATURE_SHUTDOWN + + +$3 = 0 + + + + + +(gdb) bt + + +#0 socket_accept_incoming_migration (ioc=0x7fdcceeafa90, condition=G_IO_IN, +opaque=0x7fdcceeafa90) at migration/socket.c:137 + + +#1 0x00007fdcc6966350 in g_main_dispatch (context=<optimized out>) at +gmain.c:3054 + + +#2 g_main_context_dispatch (context=<optimized out>, address@hidden) at +gmain.c:3630 + + +#3 0x00007fdccb8a6dcc in glib_pollfds_poll () at util/main-loop.c:213 + + +#4 os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:258 + + +#5 main_loop_wait (address@hidden) at util/main-loop.c:506 + + +#6 0x00007fdccb526187 in main_loop () at vl.c:1898 + + +#7 main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at +vl.c:4709 + + +(gdb) p ioc->features + + +$1 = 6 + + +(gdb) p ioc->name + + +$2 = 0x7fdcce1b1ab0 "migration-socket-listener" + + + + + +May be socket_accept_incoming_migration should call +qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN)?? + + + + + +thank you. + + + + + + + + + + + + + + + +原始邮件 + + + +发件人: address@hidden +收件人:王广10165992 address@hidden +抄送人: address@hidden address@hidden +日 期 :2017å¹´03月16日 14:46 +主 题 :Re: [Qemu-devel] COLO failover hang + + + + + + + +On 03/15/2017 05:06 PM, wangguang wrote: +> am testing QEMU COLO feature described here [QEMU +> Wiki]( +http://wiki.qemu-project.org/Features/COLO +). +> +> When the Primary Node panic,the Secondary Node qemu hang. +> hang at recvmsg in qio_channel_socket_readv. +> And I run { 'execute': 'nbd-server-stop' } and { "execute": +> "x-colo-lost-heartbeat" } in Secondary VM's +> monitor,the Secondary Node qemu still hang at recvmsg . +> +> I found that the colo in qemu is not complete yet. +> Do the colo have any plan for development? + +Yes, We are developing. You can see some of patch we pushing. + +> Has anyone ever run it successfully? Any help is appreciated! + +In our internal version can run it successfully, +The failover detail you can ask Zhanghailiang for help. +Next time if you have some question about COLO, +please cc me and zhanghailiang address@hidden + + +Thanks +Zhang Chen + + +> +> +> +> centos7.2+qemu2.7.50 +> (gdb) bt +> #0 0x00007f3e00cc86ad in recvmsg () from /lib64/libpthread.so.0 +> #1 0x00007f3e0332b738 in qio_channel_socket_readv (ioc=<optimized out>, +> iov=<optimized out>, niov=<optimized out>, fds=0x0, nfds=0x0, errp=0x0) at +> io/channel-socket.c:497 +> #2 0x00007f3e03329472 in qio_channel_read (address@hidden, +> address@hidden "", address@hidden, +> address@hidden) at io/channel.c:97 +> #3 0x00007f3e032750e0 in channel_get_buffer (opaque=<optimized out>, +> buf=0x7f3e05910f38 "", pos=<optimized out>, size=32768) at +> migration/qemu-file-channel.c:78 +> #4 0x00007f3e0327412c in qemu_fill_buffer (f=0x7f3e05910f00) at +> migration/qemu-file.c:257 +> #5 0x00007f3e03274a41 in qemu_peek_byte (address@hidden, +> address@hidden) at migration/qemu-file.c:510 +> #6 0x00007f3e03274aab in qemu_get_byte (address@hidden) at +> migration/qemu-file.c:523 +> #7 0x00007f3e03274cb2 in qemu_get_be32 (address@hidden) at +> migration/qemu-file.c:603 +> #8 0x00007f3e03271735 in colo_receive_message (f=0x7f3e05910f00, +> address@hidden) at migration/colo.c:215 +> #9 0x00007f3e0327250d in colo_wait_handle_message (errp=0x7f3d62bfaa48, +> checkpoint_request=<synthetic pointer>, f=<optimized out>) at +> migration/colo.c:546 +> #10 colo_process_incoming_thread (opaque=0x7f3e067245e0) at +> migration/colo.c:649 +> #11 0x00007f3e00cc1df3 in start_thread () from /lib64/libpthread.so.0 +> #12 0x00007f3dfc9c03ed in clone () from /lib64/libc.so.6 +> +> +> +> +> +> -- +> View this message in context: +http://qemu.11.n7.nabble.com/COLO-failover-hang-tp473250.html +> Sent from the Developer mailing list archive at Nabble.com. +> +> +> +> + +-- +Thanks +Zhang Chen + +Hi,Wang. + +You can test this branch: +https://github.com/coloft/qemu/tree/colo-v5.1-developing-COLO-frame-v21-with-shared-disk +and please follow wiki ensure your own configuration correctly. +http://wiki.qemu-project.org/Features/COLO +Thanks + +Zhang Chen + + +On 03/21/2017 03:27 PM, address@hidden wrote: +hi. + +I test the git qemu master have the same problem. + +(gdb) bt +#0 qio_channel_socket_readv (ioc=0x7f65911b4e50, iov=0x7f64ef3fd880, +niov=1, fds=0x0, nfds=0x0, errp=0x0) at io/channel-socket.c:461 +#1 0x00007f658e4aa0c2 in qio_channel_read +(address@hidden, address@hidden "", +address@hidden, address@hidden) at io/channel.c:114 +#2 0x00007f658e3ea990 in channel_get_buffer (opaque=<optimized out>, +buf=0x7f65907cb838 "", pos=<optimized out>, size=32768) at +migration/qemu-file-channel.c:78 +#3 0x00007f658e3e97fc in qemu_fill_buffer (f=0x7f65907cb800) at +migration/qemu-file.c:295 +#4 0x00007f658e3ea2e1 in qemu_peek_byte (address@hidden, +address@hidden) at migration/qemu-file.c:555 +#5 0x00007f658e3ea34b in qemu_get_byte (address@hidden) at +migration/qemu-file.c:568 +#6 0x00007f658e3ea552 in qemu_get_be32 (address@hidden) at +migration/qemu-file.c:648 +#7 0x00007f658e3e66e5 in colo_receive_message (f=0x7f65907cb800, +address@hidden) at migration/colo.c:244 +#8 0x00007f658e3e681e in colo_receive_check_message (f=<optimized +out>, address@hidden, +address@hidden) +at migration/colo.c:264 +#9 0x00007f658e3e740e in colo_process_incoming_thread +(opaque=0x7f658eb30360 <mis_current.31286>) at migration/colo.c:577 +#10 0x00007f658be09df3 in start_thread () from /lib64/libpthread.so.0 + +#11 0x00007f65881983ed in clone () from /lib64/libc.so.6 + +(gdb) p ioc->name + +$2 = 0x7f658ff7d5c0 "migration-socket-incoming" + +(gdb) p ioc->features Do not support QIO_CHANNEL_FEATURE_SHUTDOWN + +$3 = 0 + + +(gdb) bt +#0 socket_accept_incoming_migration (ioc=0x7fdcceeafa90, +condition=G_IO_IN, opaque=0x7fdcceeafa90) at migration/socket.c:137 +#1 0x00007fdcc6966350 in g_main_dispatch (context=<optimized out>) at +gmain.c:3054 +#2 g_main_context_dispatch (context=<optimized out>, +address@hidden) at gmain.c:3630 +#3 0x00007fdccb8a6dcc in glib_pollfds_poll () at util/main-loop.c:213 +#4 os_host_main_loop_wait (timeout=<optimized out>) at +util/main-loop.c:258 +#5 main_loop_wait (address@hidden) at +util/main-loop.c:506 +#6 0x00007fdccb526187 in main_loop () at vl.c:1898 +#7 main (argc=<optimized out>, argv=<optimized out>, envp=<optimized +out>) at vl.c:4709 +(gdb) p ioc->features + +$1 = 6 + +(gdb) p ioc->name + +$2 = 0x7fdcce1b1ab0 "migration-socket-listener" +May be socket_accept_incoming_migration should +call qio_channel_set_feature(ioc, QIO_CHANNEL_FEATURE_SHUTDOWN)?? +thank you. + + + + + +原始邮件 +address@hidden; +*收件人:*王广10165992;address@hidden; +address@hidden;address@hidden; +*日 期 :*2017å¹´03月16日 14:46 +*主 题 :**Re: [Qemu-devel] COLO failover hang* + + + + +On 03/15/2017 05:06 PM, wangguang wrote: +> am testing QEMU COLO feature described here [QEMU +> Wiki]( +http://wiki.qemu-project.org/Features/COLO +). +> +> When the Primary Node panic,the Secondary Node qemu hang. +> hang at recvmsg in qio_channel_socket_readv. +> And I run { 'execute': 'nbd-server-stop' } and { "execute": +> "x-colo-lost-heartbeat" } in Secondary VM's +> monitor,the Secondary Node qemu still hang at recvmsg . +> +> I found that the colo in qemu is not complete yet. +> Do the colo have any plan for development? + +Yes, We are developing. You can see some of patch we pushing. + +> Has anyone ever run it successfully? Any help is appreciated! + +In our internal version can run it successfully, +The failover detail you can ask Zhanghailiang for help. +Next time if you have some question about COLO, +please cc me and zhanghailiang address@hidden + + +Thanks +Zhang Chen + + +> +> +> +> centos7.2+qemu2.7.50 +> (gdb) bt +> #0 0x00007f3e00cc86ad in recvmsg () from /lib64/libpthread.so.0 +> #1 0x00007f3e0332b738 in qio_channel_socket_readv (ioc=<optimized out>, +> iov=<optimized out>, niov=<optimized out>, fds=0x0, nfds=0x0, errp=0x0) at +> io/channel-socket.c:497 +> #2 0x00007f3e03329472 in qio_channel_read (address@hidden, +> address@hidden "", address@hidden, +> address@hidden) at io/channel.c:97 +> #3 0x00007f3e032750e0 in channel_get_buffer (opaque=<optimized out>, +> buf=0x7f3e05910f38 "", pos=<optimized out>, size=32768) at +> migration/qemu-file-channel.c:78 +> #4 0x00007f3e0327412c in qemu_fill_buffer (f=0x7f3e05910f00) at +> migration/qemu-file.c:257 +> #5 0x00007f3e03274a41 in qemu_peek_byte (address@hidden, +> address@hidden) at migration/qemu-file.c:510 +> #6 0x00007f3e03274aab in qemu_get_byte (address@hidden) at +> migration/qemu-file.c:523 +> #7 0x00007f3e03274cb2 in qemu_get_be32 (address@hidden) at +> migration/qemu-file.c:603 +> #8 0x00007f3e03271735 in colo_receive_message (f=0x7f3e05910f00, +> address@hidden) at migration/colo.c:215 +> #9 0x00007f3e0327250d in colo_wait_handle_message (errp=0x7f3d62bfaa48, +> checkpoint_request=<synthetic pointer>, f=<optimized out>) at +> migration/colo.c:546 +> #10 colo_process_incoming_thread (opaque=0x7f3e067245e0) at +> migration/colo.c:649 +> #11 0x00007f3e00cc1df3 in start_thread () from /lib64/libpthread.so.0 +> #12 0x00007f3dfc9c03ed in clone () from /lib64/libc.so.6 +> +> +> +> +> +> -- +> View this message in context: +http://qemu.11.n7.nabble.com/COLO-failover-hang-tp473250.html +> Sent from the Developer mailing list archive at Nabble.com. +> +> +> +> + +-- +Thanks +Zhang Chen +-- +Thanks +Zhang Chen + diff --git a/results/classifier/001/other/04472277 b/results/classifier/001/other/04472277 new file mode 100644 index 000000000..95b28c963 --- /dev/null +++ b/results/classifier/001/other/04472277 @@ -0,0 +1,576 @@ +other: 0.846 +instruction: 0.845 +mistranslation: 0.817 +semantic: 0.815 + +[BUG][KVM_SET_USER_MEMORY_REGION] KVM_SET_USER_MEMORY_REGION failed + +Hi all, +I start a VM in openstack, and openstack use libvirt to start qemu VM, but now log show this ERROR. +Is there any one know this? +The ERROR log from /var/log/libvirt/qemu/instance-0000000e.log +``` +2023-03-14T10:09:17.674114Z qemu-system-x86_64: kvm_set_user_memory_region: KVM_SET_USER_MEMORY_REGION failed, slot=4, start=0xfffffffffe000000, size=0x2000: Invalid argument +kvm_set_phys_mem: error registering slot: Invalid argument +2023-03-14 10:09:18.198+0000: shutting down, reason=crashed +``` +The xml file +``` +root@c1c2:~# cat /etc/libvirt/qemu/instance-0000000e.xml + + +  instance-0000000e +  ff91d2dc-69a1-43ef-abde-c9e4e9a0305b +  +    +      +      provider-instance +      2023-03-14 10:09:13 +      +        64 +        1 +        0 +        0 +        1 +      +      +        admin +        admin +      +      +      +        +          +        +      +    +  +  65536 +  65536 +  1 +  +    +      OpenStack Foundation +      OpenStack Nova +      25.1.0 +      ff91d2dc-69a1-43ef-abde-c9e4e9a0305b +      ff91d2dc-69a1-43ef-abde-c9e4e9a0305b +      Virtual Machine +    +  +  +    hvm +    +    +  +  +    +    +    +  +  +    +  +  +    +    +    +  +  destroy +  restart +  destroy +  +    /usr/bin/qemu-system-x86_64 +    +      +      +      +     
+    +    +     
+    +    +    +      +      +       
+      +     
+    +    +      +      +        +      +    +    +      +      +    +    +     
+    +    +    +    +      +    +  Â