##// END OF EJS Templates
merge with stable...
Gregory Szorc -
r36778:7bf80d9d merge default
parent child Browse files
Show More
This diff has been collapsed as it changes many lines, (1502 lines changed) Show them Hide them
@@ -0,0 +1,1502 b''
1 #require killdaemons
2
3 $ cat > fakeremoteuser.py << EOF
4 > import os
5 > from mercurial.hgweb import hgweb_mod
6 > from mercurial import wireproto
7 > class testenvhgweb(hgweb_mod.hgweb):
8 > def __call__(self, env, respond):
9 > # Allow REMOTE_USER to define authenticated user.
10 > if r'REMOTE_USER' in os.environ:
11 > env[r'REMOTE_USER'] = os.environ[r'REMOTE_USER']
12 > # Allow REQUEST_METHOD to override HTTP method
13 > if r'REQUEST_METHOD' in os.environ:
14 > env[r'REQUEST_METHOD'] = os.environ[r'REQUEST_METHOD']
15 > return super(testenvhgweb, self).__call__(env, respond)
16 > hgweb_mod.hgweb = testenvhgweb
17 >
18 > @wireproto.wireprotocommand('customreadnoperm')
19 > def customread(repo, proto):
20 > return b'read-only command no defined permissions\n'
21 > @wireproto.wireprotocommand('customwritenoperm')
22 > def customwritenoperm(repo, proto):
23 > return b'write command no defined permissions\n'
24 > wireproto.permissions['customreadwithperm'] = 'pull'
25 > @wireproto.wireprotocommand('customreadwithperm')
26 > def customreadwithperm(repo, proto):
27 > return b'read-only command w/ defined permissions\n'
28 > wireproto.permissions['customwritewithperm'] = 'push'
29 > @wireproto.wireprotocommand('customwritewithperm')
30 > def customwritewithperm(repo, proto):
31 > return b'write command w/ defined permissions\n'
32 > EOF
33
34 $ cat >> $HGRCPATH << EOF
35 > [extensions]
36 > fakeremoteuser = $TESTTMP/fakeremoteuser.py
37 > strip =
38 > EOF
39
40 $ hg init test
41 $ cd test
42 $ echo a > a
43 $ hg ci -Ama
44 adding a
45 $ cd ..
46 $ hg clone test test2
47 updating to branch default
48 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 $ cd test2
50 $ echo a >> a
51 $ hg ci -mb
52 $ hg book bm -r 0
53 $ cd ../test
54
55 web.deny_read=* prevents access to wire protocol for all users
56
57 $ cat > .hg/hgrc <<EOF
58 > [web]
59 > deny_read = *
60 > EOF
61
62 $ hg serve -p $HGPORT -d --pid-file hg.pid
63 $ cat hg.pid > $DAEMON_PIDS
64
65 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities'
66 401 read not authorized
67
68 0
69 read not authorized
70 [1]
71
72 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
73 401 read not authorized
74
75 0
76 read not authorized
77 [1]
78
79 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=listkeys' --requestheader 'x-hgarg-1=namespace=phases'
80 401 read not authorized
81
82 0
83 read not authorized
84 [1]
85
86 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=listkeys+namespace%3Dphases'
87 401 read not authorized
88
89 0
90 read not authorized
91 [1]
92
93 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadnoperm'
94 401 read not authorized
95
96 0
97 read not authorized
98 [1]
99
100 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadwithperm'
101 401 read not authorized
102
103 0
104 read not authorized
105 [1]
106
107 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
108 401 read not authorized
109
110 0
111 read not authorized
112 [1]
113
114 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
115 401 read not authorized
116
117 0
118 read not authorized
119 [1]
120
121 $ hg --cwd ../test2 pull http://localhost:$HGPORT/
122 pulling from http://localhost:$HGPORT/
123 abort: authorization failed
124 [255]
125
126 $ killdaemons.py
127
128 web.deny_read=* with REMOTE_USER set still locks out clients
129
130 $ REMOTE_USER=authed_user hg serve -p $HGPORT -d --pid-file hg.pid
131 $ cat hg.pid > $DAEMON_PIDS
132
133 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities'
134 401 read not authorized
135
136 0
137 read not authorized
138 [1]
139
140 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
141 401 read not authorized
142
143 0
144 read not authorized
145 [1]
146
147 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=listkeys+namespace%3Dphases'
148 401 read not authorized
149
150 0
151 read not authorized
152 [1]
153
154 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadnoperm'
155 401 read not authorized
156
157 0
158 read not authorized
159 [1]
160
161 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadwithperm'
162 401 read not authorized
163
164 0
165 read not authorized
166 [1]
167
168 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
169 401 read not authorized
170
171 0
172 read not authorized
173 [1]
174
175 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
176 401 read not authorized
177
178 0
179 read not authorized
180 [1]
181
182 $ hg --cwd ../test2 pull http://localhost:$HGPORT/
183 pulling from http://localhost:$HGPORT/
184 abort: authorization failed
185 [255]
186
187 $ killdaemons.py
188
189 web.deny_read=<user> denies access to unauthenticated user
190
191 $ cat > .hg/hgrc <<EOF
192 > [web]
193 > deny_read = baduser1,baduser2
194 > EOF
195
196 $ hg serve -p $HGPORT -d --pid-file hg.pid
197 $ cat hg.pid > $DAEMON_PIDS
198
199 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=listkeys' --requestheader 'x-hgarg-1=namespace=phases'
200 401 read not authorized
201
202 0
203 read not authorized
204 [1]
205
206 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=listkeys+namespace%3Dphases'
207 401 read not authorized
208
209 0
210 read not authorized
211 [1]
212
213 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadnoperm'
214 401 read not authorized
215
216 0
217 read not authorized
218 [1]
219
220 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadwithperm'
221 401 read not authorized
222
223 0
224 read not authorized
225 [1]
226
227 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
228 401 read not authorized
229
230 0
231 read not authorized
232 [1]
233
234 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
235 401 read not authorized
236
237 0
238 read not authorized
239 [1]
240
241 $ hg --cwd ../test2 pull http://localhost:$HGPORT/
242 pulling from http://localhost:$HGPORT/
243 abort: authorization failed
244 [255]
245
246 $ killdaemons.py
247
248 web.deny_read=<user> denies access to users in deny list
249
250 $ REMOTE_USER=baduser2 hg serve -p $HGPORT -d --pid-file hg.pid
251 $ cat hg.pid > $DAEMON_PIDS
252
253 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=listkeys' --requestheader 'x-hgarg-1=namespace=phases'
254 401 read not authorized
255
256 0
257 read not authorized
258 [1]
259
260 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=listkeys+namespace%3Dphases'
261 401 read not authorized
262
263 0
264 read not authorized
265 [1]
266
267 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadnoperm'
268 401 read not authorized
269
270 0
271 read not authorized
272 [1]
273
274 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadwithperm'
275 401 read not authorized
276
277 0
278 read not authorized
279 [1]
280
281 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
282 401 read not authorized
283
284 0
285 read not authorized
286 [1]
287
288 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
289 401 read not authorized
290
291 0
292 read not authorized
293 [1]
294
295 $ hg --cwd ../test2 pull http://localhost:$HGPORT/
296 pulling from http://localhost:$HGPORT/
297 abort: authorization failed
298 [255]
299
300 $ killdaemons.py
301
302 web.deny_read=<user> allows access to authenticated users not in list
303
304 $ REMOTE_USER=gooduser hg serve -p $HGPORT -d --pid-file hg.pid
305 $ cat hg.pid > $DAEMON_PIDS
306
307 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=listkeys' --requestheader 'x-hgarg-1=namespace=phases'
308 200 Script output follows
309
310 cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b 1
311 publishing True (no-eol)
312
313 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=listkeys+namespace%3Dphases'
314 200 Script output follows
315
316 cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b 1
317 publishing True (no-eol)
318
319 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadnoperm'
320 405 push requires POST request
321
322 0
323 push requires POST request
324 [1]
325
326 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadwithperm'
327 200 Script output follows
328
329 read-only command w/ defined permissions
330
331 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
332 405 push requires POST request
333
334 0
335 push requires POST request
336 [1]
337
338 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
339 405 push requires POST request
340
341 0
342 push requires POST request
343 [1]
344
345 $ hg --cwd ../test2 pull http://localhost:$HGPORT/
346 pulling from http://localhost:$HGPORT/
347 searching for changes
348 no changes found
349
350 $ killdaemons.py
351
352 web.allow_read=* allows reads for unauthenticated users
353
354 $ cat > .hg/hgrc <<EOF
355 > [web]
356 > allow_read = *
357 > EOF
358
359 $ hg serve -p $HGPORT -d --pid-file hg.pid
360 $ cat hg.pid > $DAEMON_PIDS
361
362 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=listkeys' --requestheader 'x-hgarg-1=namespace=phases'
363 200 Script output follows
364
365 cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b 1
366 publishing True (no-eol)
367
368 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=listkeys+namespace%3Dphases'
369 200 Script output follows
370
371 cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b 1
372 publishing True (no-eol)
373
374 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadnoperm'
375 405 push requires POST request
376
377 0
378 push requires POST request
379 [1]
380
381 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadwithperm'
382 200 Script output follows
383
384 read-only command w/ defined permissions
385
386 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
387 405 push requires POST request
388
389 0
390 push requires POST request
391 [1]
392
393 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
394 405 push requires POST request
395
396 0
397 push requires POST request
398 [1]
399
400 $ hg --cwd ../test2 pull http://localhost:$HGPORT/
401 pulling from http://localhost:$HGPORT/
402 searching for changes
403 no changes found
404
405 $ killdaemons.py
406
407 web.allow_read=* allows read for authenticated user
408
409 $ REMOTE_USER=authed_user hg serve -p $HGPORT -d --pid-file hg.pid
410 $ cat hg.pid > $DAEMON_PIDS
411
412 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=listkeys' --requestheader 'x-hgarg-1=namespace=phases'
413 200 Script output follows
414
415 cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b 1
416 publishing True (no-eol)
417
418 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=listkeys+namespace%3Dphases'
419 200 Script output follows
420
421 cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b 1
422 publishing True (no-eol)
423
424 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadnoperm'
425 405 push requires POST request
426
427 0
428 push requires POST request
429 [1]
430
431 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadwithperm'
432 200 Script output follows
433
434 read-only command w/ defined permissions
435
436 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
437 405 push requires POST request
438
439 0
440 push requires POST request
441 [1]
442
443 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
444 405 push requires POST request
445
446 0
447 push requires POST request
448 [1]
449
450 $ hg --cwd ../test2 pull http://localhost:$HGPORT/
451 pulling from http://localhost:$HGPORT/
452 searching for changes
453 no changes found
454
455 $ killdaemons.py
456
457 web.allow_read=<user> does not allow unauthenticated users to read
458
459 $ cat > .hg/hgrc <<EOF
460 > [web]
461 > allow_read = gooduser
462 > EOF
463
464 $ hg serve -p $HGPORT -d --pid-file hg.pid
465 $ cat hg.pid > $DAEMON_PIDS
466
467 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=listkeys' --requestheader 'x-hgarg-1=namespace=phases'
468 401 read not authorized
469
470 0
471 read not authorized
472 [1]
473
474 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=listkeys+namespace%3Dphases'
475 401 read not authorized
476
477 0
478 read not authorized
479 [1]
480
481 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadnoperm'
482 401 read not authorized
483
484 0
485 read not authorized
486 [1]
487
488 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadwithperm'
489 401 read not authorized
490
491 0
492 read not authorized
493 [1]
494
495 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
496 401 read not authorized
497
498 0
499 read not authorized
500 [1]
501
502 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
503 401 read not authorized
504
505 0
506 read not authorized
507 [1]
508
509 $ hg --cwd ../test2 pull http://localhost:$HGPORT/
510 pulling from http://localhost:$HGPORT/
511 abort: authorization failed
512 [255]
513
514 $ killdaemons.py
515
516 web.allow_read=<user> does not allow user not in list to read
517
518 $ REMOTE_USER=baduser hg serve -p $HGPORT -d --pid-file hg.pid
519 $ cat hg.pid > $DAEMON_PIDS
520
521 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=listkeys' --requestheader 'x-hgarg-1=namespace=phases'
522 401 read not authorized
523
524 0
525 read not authorized
526 [1]
527
528 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=listkeys+namespace%3Dphases'
529 401 read not authorized
530
531 0
532 read not authorized
533 [1]
534
535 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadnoperm'
536 401 read not authorized
537
538 0
539 read not authorized
540 [1]
541
542 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadwithperm'
543 401 read not authorized
544
545 0
546 read not authorized
547 [1]
548
549 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
550 401 read not authorized
551
552 0
553 read not authorized
554 [1]
555
556 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
557 401 read not authorized
558
559 0
560 read not authorized
561 [1]
562
563 $ hg --cwd ../test2 pull http://localhost:$HGPORT/
564 pulling from http://localhost:$HGPORT/
565 abort: authorization failed
566 [255]
567
568 $ killdaemons.py
569
570 web.allow_read=<user> allows read from user in list
571
572 $ REMOTE_USER=gooduser hg serve -p $HGPORT -d --pid-file hg.pid
573 $ cat hg.pid > $DAEMON_PIDS
574
575 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=listkeys' --requestheader 'x-hgarg-1=namespace=phases'
576 200 Script output follows
577
578 cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b 1
579 publishing True (no-eol)
580
581 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=listkeys+namespace%3Dphases'
582 200 Script output follows
583
584 cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b 1
585 publishing True (no-eol)
586
587 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadnoperm'
588 405 push requires POST request
589
590 0
591 push requires POST request
592 [1]
593
594 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadwithperm'
595 200 Script output follows
596
597 read-only command w/ defined permissions
598
599 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
600 405 push requires POST request
601
602 0
603 push requires POST request
604 [1]
605
606 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
607 405 push requires POST request
608
609 0
610 push requires POST request
611 [1]
612
613 $ hg --cwd ../test2 pull http://localhost:$HGPORT/
614 pulling from http://localhost:$HGPORT/
615 searching for changes
616 no changes found
617
618 $ killdaemons.py
619
620 web.deny_read takes precedence over web.allow_read
621
622 $ cat > .hg/hgrc <<EOF
623 > [web]
624 > allow_read = baduser
625 > deny_read = baduser
626 > EOF
627
628 $ REMOTE_USER=baduser hg serve -p $HGPORT -d --pid-file hg.pid
629 $ cat hg.pid > $DAEMON_PIDS
630
631 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=listkeys' --requestheader 'x-hgarg-1=namespace=phases'
632 401 read not authorized
633
634 0
635 read not authorized
636 [1]
637
638 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=listkeys+namespace%3Dphases'
639 401 read not authorized
640
641 0
642 read not authorized
643 [1]
644
645 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadnoperm'
646 401 read not authorized
647
648 0
649 read not authorized
650 [1]
651
652 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadwithperm'
653 401 read not authorized
654
655 0
656 read not authorized
657 [1]
658
659 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
660 401 read not authorized
661
662 0
663 read not authorized
664 [1]
665
666 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
667 401 read not authorized
668
669 0
670 read not authorized
671 [1]
672
673 $ hg --cwd ../test2 pull http://localhost:$HGPORT/
674 pulling from http://localhost:$HGPORT/
675 abort: authorization failed
676 [255]
677
678 $ killdaemons.py
679
680 web.allow-pull=false denies read access to repo
681
682 $ cat > .hg/hgrc <<EOF
683 > [web]
684 > allow-pull = false
685 > EOF
686
687 $ hg serve -p $HGPORT -d --pid-file hg.pid
688 $ cat hg.pid > $DAEMON_PIDS
689
690 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities'
691 401 pull not authorized
692
693 0
694 pull not authorized
695 [1]
696
697 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=listkeys' --requestheader 'x-hgarg-1=namespace=phases'
698 401 pull not authorized
699
700 0
701 pull not authorized
702 [1]
703
704 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=listkeys+namespace%3Dphases'
705 401 pull not authorized
706
707 0
708 pull not authorized
709 [1]
710
711 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadnoperm'
712 405 push requires POST request
713
714 0
715 push requires POST request
716 [1]
717
718 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadwithperm'
719 401 pull not authorized
720
721 0
722 pull not authorized
723 [1]
724
725 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
726 405 push requires POST request
727
728 0
729 push requires POST request
730 [1]
731
732 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
733 405 push requires POST request
734
735 0
736 push requires POST request
737 [1]
738
739 $ hg --cwd ../test2 pull http://localhost:$HGPORT/
740 pulling from http://localhost:$HGPORT/
741 abort: authorization failed
742 [255]
743
744 $ killdaemons.py
745
746 Attempting a write command with HTTP GET fails
747
748 $ cat > .hg/hgrc <<EOF
749 > EOF
750
751 $ REQUEST_METHOD=GET hg serve -p $HGPORT -d --pid-file hg.pid
752 $ cat hg.pid > $DAEMON_PIDS
753
754 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=pushkey' --requestheader 'x-hgarg-1=namespace=bookmarks&key=bm&old=&new=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
755 405 push requires POST request
756
757 0
758 push requires POST request
759 [1]
760
761 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=pushkey+namespace%3Dbookmarks%2Ckey%3Dbm%2Cold%3D%2Cnew%3Dcb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
762 405 push requires POST request
763
764 0
765 push requires POST request
766 [1]
767
768 $ hg bookmarks
769 no bookmarks set
770 $ hg bookmark -d bm
771 abort: bookmark 'bm' does not exist
772 [255]
773
774 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
775 405 push requires POST request
776
777 0
778 push requires POST request
779 [1]
780
781 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
782 405 push requires POST request
783
784 0
785 push requires POST request
786 [1]
787
788 $ killdaemons.py
789
790 Attempting a write command with an unknown HTTP verb fails
791
792 $ REQUEST_METHOD=someverb hg serve -p $HGPORT -d --pid-file hg.pid
793 $ cat hg.pid > $DAEMON_PIDS
794
795 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=pushkey' --requestheader 'x-hgarg-1=namespace=bookmarks&key=bm&old=&new=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
796 405 push requires POST request
797
798 0
799 push requires POST request
800 [1]
801
802 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=pushkey+namespace%3Dbookmarks%2Ckey%3Dbm%2Cold%3D%2Cnew%3Dcb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
803 405 push requires POST request
804
805 0
806 push requires POST request
807 [1]
808
809 $ hg bookmarks
810 no bookmarks set
811 $ hg bookmark -d bm
812 abort: bookmark 'bm' does not exist
813 [255]
814
815 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
816 405 push requires POST request
817
818 0
819 push requires POST request
820 [1]
821
822 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
823 405 push requires POST request
824
825 0
826 push requires POST request
827 [1]
828
829 $ killdaemons.py
830
831 Pushing on a plaintext channel is disabled by default
832
833 $ cat > .hg/hgrc <<EOF
834 > EOF
835
836 $ REQUEST_METHOD=POST hg serve -p $HGPORT -d --pid-file hg.pid
837 $ cat hg.pid > $DAEMON_PIDS
838
839 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=pushkey' --requestheader 'x-hgarg-1=namespace=bookmarks&key=bm&old=&new=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
840 403 ssl required
841
842 0
843 ssl required
844 [1]
845
846 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=pushkey+namespace%3Dbookmarks%2Ckey%3Dbm%2Cold%3D%2Cnew%3Dcb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
847 403 ssl required
848
849 0
850 ssl required
851 [1]
852
853 $ hg bookmarks
854 no bookmarks set
855
856 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
857 403 ssl required
858
859 0
860 ssl required
861 [1]
862
863 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
864 403 ssl required
865
866 0
867 ssl required
868 [1]
869
870 Reset server to remove REQUEST_METHOD hack to test hg client
871
872 $ killdaemons.py
873 $ hg serve -p $HGPORT -d --pid-file hg.pid
874 $ cat hg.pid > $DAEMON_PIDS
875
876 $ hg --cwd ../test2 push -B bm http://localhost:$HGPORT/
877 pushing to http://localhost:$HGPORT/
878 searching for changes
879 no changes found
880 abort: HTTP Error 403: ssl required
881 [255]
882
883 $ hg --cwd ../test2 push http://localhost:$HGPORT/
884 pushing to http://localhost:$HGPORT/
885 searching for changes
886 abort: HTTP Error 403: ssl required
887 [255]
888
889 $ killdaemons.py
890
891 web.deny_push=* denies pushing to unauthenticated users
892
893 $ cat > .hg/hgrc <<EOF
894 > [web]
895 > push_ssl = false
896 > deny_push = *
897 > EOF
898
899 $ REQUEST_METHOD=POST hg serve -p $HGPORT -d --pid-file hg.pid
900 $ cat hg.pid > $DAEMON_PIDS
901
902 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=pushkey' --requestheader 'x-hgarg-1=namespace=bookmarks&key=bm&old=&new=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
903 401 push not authorized
904
905 0
906 push not authorized
907 [1]
908
909 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=pushkey+namespace%3Dbookmarks%2Ckey%3Dbm%2Cold%3D%2Cnew%3Dcb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
910 401 push not authorized
911
912 0
913 push not authorized
914 [1]
915
916 $ hg bookmarks
917 no bookmarks set
918
919 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
920 401 push not authorized
921
922 0
923 push not authorized
924 [1]
925
926 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
927 401 push not authorized
928
929 0
930 push not authorized
931 [1]
932
933 Reset server to remove REQUEST_METHOD hack to test hg client
934
935 $ killdaemons.py
936 $ hg serve -p $HGPORT -d --pid-file hg.pid
937 $ cat hg.pid > $DAEMON_PIDS
938
939 $ hg --cwd ../test2 push -B bm http://localhost:$HGPORT/
940 pushing to http://localhost:$HGPORT/
941 searching for changes
942 no changes found
943 abort: authorization failed
944 [255]
945
946 $ hg --cwd ../test2 push http://localhost:$HGPORT/
947 pushing to http://localhost:$HGPORT/
948 searching for changes
949 abort: authorization failed
950 [255]
951
952 $ killdaemons.py
953
954 web.deny_push=* denies pushing to authenticated users
955
956 $ REMOTE_USER=someuser REQUEST_METHOD=POST hg serve -p $HGPORT -d --pid-file hg.pid
957 $ cat hg.pid > $DAEMON_PIDS
958
959 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=pushkey' --requestheader 'x-hgarg-1=namespace=bookmarks&key=bm&old=&new=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
960 401 push not authorized
961
962 0
963 push not authorized
964 [1]
965
966 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=pushkey+namespace%3Dbookmarks%2Ckey%3Dbm%2Cold%3D%2Cnew%3Dcb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
967 401 push not authorized
968
969 0
970 push not authorized
971 [1]
972
973 $ hg bookmarks
974 no bookmarks set
975
976 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
977 401 push not authorized
978
979 0
980 push not authorized
981 [1]
982
983 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
984 401 push not authorized
985
986 0
987 push not authorized
988 [1]
989
990 Reset server to remove REQUEST_METHOD hack to test hg client
991
992 $ killdaemons.py
993 $ REMOTE_USER=someuser hg serve -p $HGPORT -d --pid-file hg.pid
994 $ cat hg.pid > $DAEMON_PIDS
995
996 $ hg --cwd ../test2 push -B bm http://localhost:$HGPORT/
997 pushing to http://localhost:$HGPORT/
998 searching for changes
999 no changes found
1000 abort: authorization failed
1001 [255]
1002
1003 $ hg --cwd ../test2 push http://localhost:$HGPORT/
1004 pushing to http://localhost:$HGPORT/
1005 searching for changes
1006 abort: authorization failed
1007 [255]
1008
1009 $ killdaemons.py
1010
1011 web.deny_push=<user> denies pushing to user in list
1012
1013 $ cat > .hg/hgrc <<EOF
1014 > [web]
1015 > push_ssl = false
1016 > deny_push = baduser
1017 > EOF
1018
1019 $ REMOTE_USER=baduser REQUEST_METHOD=POST hg serve -p $HGPORT -d --pid-file hg.pid
1020 $ cat hg.pid > $DAEMON_PIDS
1021
1022 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=pushkey' --requestheader 'x-hgarg-1=namespace=bookmarks&key=bm&old=&new=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
1023 401 push not authorized
1024
1025 0
1026 push not authorized
1027 [1]
1028
1029 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=pushkey+namespace%3Dbookmarks%2Ckey%3Dbm%2Cold%3D%2Cnew%3Dcb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
1030 401 push not authorized
1031
1032 0
1033 push not authorized
1034 [1]
1035
1036 $ hg bookmarks
1037 no bookmarks set
1038
1039 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
1040 401 push not authorized
1041
1042 0
1043 push not authorized
1044 [1]
1045
1046 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
1047 401 push not authorized
1048
1049 0
1050 push not authorized
1051 [1]
1052
1053 Reset server to remove REQUEST_METHOD hack to test hg client
1054
1055 $ killdaemons.py
1056 $ REMOTE_USER=baduser hg serve -p $HGPORT -d --pid-file hg.pid
1057 $ cat hg.pid > $DAEMON_PIDS
1058
1059 $ hg --cwd ../test2 push -B bm http://localhost:$HGPORT/
1060 pushing to http://localhost:$HGPORT/
1061 searching for changes
1062 no changes found
1063 abort: authorization failed
1064 [255]
1065
1066 $ hg --cwd ../test2 push http://localhost:$HGPORT/
1067 pushing to http://localhost:$HGPORT/
1068 searching for changes
1069 abort: authorization failed
1070 [255]
1071
1072 $ killdaemons.py
1073
1074 web.deny_push=<user> denies pushing to user not in list because allow-push isn't set
1075
1076 $ REMOTE_USER=gooduser REQUEST_METHOD=POST hg serve -p $HGPORT -d --pid-file hg.pid
1077 $ cat hg.pid > $DAEMON_PIDS
1078
1079 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=pushkey' --requestheader 'x-hgarg-1=namespace=bookmarks&key=bm&old=&new=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
1080 401 push not authorized
1081
1082 0
1083 push not authorized
1084 [1]
1085
1086 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=pushkey+namespace%3Dbookmarks%2Ckey%3Dbm%2Cold%3D%2Cnew%3Dcb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
1087 401 push not authorized
1088
1089 0
1090 push not authorized
1091 [1]
1092
1093 $ hg bookmarks
1094 no bookmarks set
1095
1096 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
1097 401 push not authorized
1098
1099 0
1100 push not authorized
1101 [1]
1102
1103 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
1104 401 push not authorized
1105
1106 0
1107 push not authorized
1108 [1]
1109
1110 Reset server to remove REQUEST_METHOD hack to test hg client
1111
1112 $ killdaemons.py
1113 $ REMOTE_USER=gooduser hg serve -p $HGPORT -d --pid-file hg.pid
1114 $ cat hg.pid > $DAEMON_PIDS
1115
1116 $ hg --cwd ../test2 push -B bm http://localhost:$HGPORT/
1117 pushing to http://localhost:$HGPORT/
1118 searching for changes
1119 no changes found
1120 abort: authorization failed
1121 [255]
1122
1123 $ hg --cwd ../test2 push http://localhost:$HGPORT/
1124 pushing to http://localhost:$HGPORT/
1125 searching for changes
1126 abort: authorization failed
1127 [255]
1128
1129 $ killdaemons.py
1130
1131 web.allow-push=* allows pushes from unauthenticated users
1132
1133 $ cat > .hg/hgrc <<EOF
1134 > [web]
1135 > push_ssl = false
1136 > allow-push = *
1137 > EOF
1138
1139 $ REQUEST_METHOD=POST hg serve -p $HGPORT -d --pid-file hg.pid
1140 $ cat hg.pid > $DAEMON_PIDS
1141
1142 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=pushkey' --requestheader 'x-hgarg-1=namespace=bookmarks&key=bm&old=&new=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
1143 200 Script output follows
1144
1145 1
1146
1147 $ hg bookmarks
1148 bm 0:cb9a9f314b8b
1149 $ hg book -d bm
1150
1151 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
1152 200 Script output follows
1153
1154 write command no defined permissions
1155
1156 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
1157 200 Script output follows
1158
1159 write command w/ defined permissions
1160
1161 Reset server to remove REQUEST_METHOD hack to test hg client
1162
1163 $ killdaemons.py
1164 $ hg serve -p $HGPORT -d --pid-file hg.pid
1165 $ cat hg.pid > $DAEMON_PIDS
1166
1167 $ hg --cwd ../test2 push -B bm http://localhost:$HGPORT/
1168 pushing to http://localhost:$HGPORT/
1169 searching for changes
1170 no changes found
1171 exporting bookmark bm
1172 [1]
1173
1174 $ hg book -d bm
1175
1176 $ hg --cwd ../test2 push http://localhost:$HGPORT/
1177 pushing to http://localhost:$HGPORT/
1178 searching for changes
1179 remote: adding changesets
1180 remote: adding manifests
1181 remote: adding file changes
1182 remote: added 1 changesets with 1 changes to 1 files
1183
1184 $ hg strip -r 1:
1185 saved backup bundle to $TESTTMP/test/.hg/strip-backup/ba677d0156c1-eea704d7-backup.hg
1186
1187 $ killdaemons.py
1188
1189 web.allow-push=* allows pushes from authenticated users
1190
1191 $ REMOTE_USER=someuser REQUEST_METHOD=POST hg serve -p $HGPORT -d --pid-file hg.pid
1192 $ cat hg.pid > $DAEMON_PIDS
1193
1194 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=pushkey' --requestheader 'x-hgarg-1=namespace=bookmarks&key=bm&old=&new=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
1195 200 Script output follows
1196
1197 1
1198
1199 $ hg bookmarks
1200 bm 0:cb9a9f314b8b
1201 $ hg book -d bm
1202
1203 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
1204 200 Script output follows
1205
1206 write command no defined permissions
1207
1208 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
1209 200 Script output follows
1210
1211 write command w/ defined permissions
1212
1213 Reset server to remove REQUEST_METHOD hack to test hg client
1214
1215 $ killdaemons.py
1216 $ REMOTE_USER=someuser hg serve -p $HGPORT -d --pid-file hg.pid
1217 $ cat hg.pid > $DAEMON_PIDS
1218
1219 $ hg --cwd ../test2 push -B bm http://localhost:$HGPORT/
1220 pushing to http://localhost:$HGPORT/
1221 searching for changes
1222 no changes found
1223 exporting bookmark bm
1224 [1]
1225
1226 $ hg book -d bm
1227
1228 $ hg --cwd ../test2 push http://localhost:$HGPORT/
1229 pushing to http://localhost:$HGPORT/
1230 searching for changes
1231 remote: adding changesets
1232 remote: adding manifests
1233 remote: adding file changes
1234 remote: added 1 changesets with 1 changes to 1 files
1235
1236 $ hg strip -r 1:
1237 saved backup bundle to $TESTTMP/test/.hg/strip-backup/ba677d0156c1-eea704d7-backup.hg
1238
1239 $ killdaemons.py
1240
1241 web.allow-push=<user> denies push to user not in list
1242
1243 $ cat > .hg/hgrc <<EOF
1244 > [web]
1245 > push_ssl = false
1246 > allow-push = gooduser
1247 > EOF
1248
1249 $ REMOTE_USER=baduser REQUEST_METHOD=POST hg serve -p $HGPORT -d --pid-file hg.pid
1250 $ cat hg.pid > $DAEMON_PIDS
1251
1252 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=pushkey' --requestheader 'x-hgarg-1=namespace=bookmarks&key=bm&old=&new=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
1253 401 push not authorized
1254
1255 0
1256 push not authorized
1257 [1]
1258
1259 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=pushkey+namespace%3Dbookmarks%2Ckey%3Dbm%2Cold%3D%2Cnew%3Dcb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
1260 401 push not authorized
1261
1262 0
1263 push not authorized
1264 [1]
1265
1266 $ hg bookmarks
1267 no bookmarks set
1268
1269 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
1270 401 push not authorized
1271
1272 0
1273 push not authorized
1274 [1]
1275
1276 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
1277 401 push not authorized
1278
1279 0
1280 push not authorized
1281 [1]
1282
1283 Reset server to remove REQUEST_METHOD hack to test hg client
1284
1285 $ killdaemons.py
1286 $ REMOTE_USER=baduser hg serve -p $HGPORT -d --pid-file hg.pid
1287 $ cat hg.pid > $DAEMON_PIDS
1288
1289 $ hg --cwd ../test2 push -B bm http://localhost:$HGPORT/
1290 pushing to http://localhost:$HGPORT/
1291 searching for changes
1292 no changes found
1293 abort: authorization failed
1294 [255]
1295
1296 $ hg --cwd ../test2 push http://localhost:$HGPORT/
1297 pushing to http://localhost:$HGPORT/
1298 searching for changes
1299 abort: authorization failed
1300 [255]
1301
1302 $ killdaemons.py
1303
1304 web.allow-push=<user> allows push from user in list
1305
1306 $ REMOTE_USER=gooduser REQUEST_METHOD=POST hg serve -p $HGPORT -d --pid-file hg.pid
1307 $ cat hg.pid > $DAEMON_PIDS
1308
1309 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=pushkey' --requestheader 'x-hgarg-1=namespace=bookmarks&key=bm&old=&new=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
1310 200 Script output follows
1311
1312 1
1313
1314 $ hg bookmarks
1315 bm 0:cb9a9f314b8b
1316 $ hg book -d bm
1317
1318 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=pushkey+namespace%3Dbookmarks%2Ckey%3Dbm%2Cold%3D%2Cnew%3Dcb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
1319 200 Script output follows
1320
1321 1
1322
1323 $ hg bookmarks
1324 bm 0:cb9a9f314b8b
1325 $ hg book -d bm
1326
1327 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
1328 200 Script output follows
1329
1330 write command no defined permissions
1331
1332 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
1333 200 Script output follows
1334
1335 write command w/ defined permissions
1336
1337 Reset server to remove REQUEST_METHOD hack to test hg client
1338
1339 $ killdaemons.py
1340 $ REMOTE_USER=gooduser hg serve -p $HGPORT -d --pid-file hg.pid
1341 $ cat hg.pid > $DAEMON_PIDS
1342
1343 $ hg --cwd ../test2 push -B bm http://localhost:$HGPORT/
1344 pushing to http://localhost:$HGPORT/
1345 searching for changes
1346 no changes found
1347 exporting bookmark bm
1348 [1]
1349
1350 $ hg book -d bm
1351
1352 $ hg --cwd ../test2 push http://localhost:$HGPORT/
1353 pushing to http://localhost:$HGPORT/
1354 searching for changes
1355 remote: adding changesets
1356 remote: adding manifests
1357 remote: adding file changes
1358 remote: added 1 changesets with 1 changes to 1 files
1359
1360 $ hg strip -r 1:
1361 saved backup bundle to $TESTTMP/test/.hg/strip-backup/ba677d0156c1-eea704d7-backup.hg
1362
1363 $ killdaemons.py
1364
1365 web.deny_push takes precedence over web.allow_push
1366
1367 $ cat > .hg/hgrc <<EOF
1368 > [web]
1369 > push_ssl = false
1370 > allow-push = someuser
1371 > deny_push = someuser
1372 > EOF
1373
1374 $ REMOTE_USER=someuser REQUEST_METHOD=POST hg serve -p $HGPORT -d --pid-file hg.pid
1375 $ cat hg.pid > $DAEMON_PIDS
1376
1377 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=pushkey' --requestheader 'x-hgarg-1=namespace=bookmarks&key=bm&old=&new=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
1378 401 push not authorized
1379
1380 0
1381 push not authorized
1382 [1]
1383
1384 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=pushkey+namespace%3Dbookmarks%2Ckey%3Dbm%2Cold%3D%2Cnew%3Dcb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
1385 401 push not authorized
1386
1387 0
1388 push not authorized
1389 [1]
1390
1391 $ hg bookmarks
1392 no bookmarks set
1393
1394 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
1395 401 push not authorized
1396
1397 0
1398 push not authorized
1399 [1]
1400
1401 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
1402 401 push not authorized
1403
1404 0
1405 push not authorized
1406 [1]
1407
1408 Reset server to remove REQUEST_METHOD hack to test hg client
1409
1410 $ killdaemons.py
1411 $ REMOTE_USER=someuser hg serve -p $HGPORT -d --pid-file hg.pid
1412 $ cat hg.pid > $DAEMON_PIDS
1413
1414 $ hg --cwd ../test2 push -B bm http://localhost:$HGPORT/
1415 pushing to http://localhost:$HGPORT/
1416 searching for changes
1417 no changes found
1418 abort: authorization failed
1419 [255]
1420
1421 $ hg --cwd ../test2 push http://localhost:$HGPORT/
1422 pushing to http://localhost:$HGPORT/
1423 searching for changes
1424 abort: authorization failed
1425 [255]
1426
1427 $ killdaemons.py
1428
1429 web.allow-push has no effect if web.deny_read is set
1430
1431 $ cat > .hg/hgrc <<EOF
1432 > [web]
1433 > push_ssl = false
1434 > allow-push = *
1435 > deny_read = *
1436 > EOF
1437
1438 $ REQUEST_METHOD=POST REMOTE_USER=someuser hg serve -p $HGPORT -d --pid-file hg.pid
1439 $ cat hg.pid > $DAEMON_PIDS
1440
1441 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=pushkey' --requestheader 'x-hgarg-1=namespace=bookmarks&key=bm&old=&new=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
1442 401 read not authorized
1443
1444 0
1445 read not authorized
1446 [1]
1447
1448 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=batch' --requestheader 'x-hgarg-1=cmds=pushkey+namespace%3Dbookmarks%2Ckey%3Dbm%2Cold%3D%2Cnew%3Dcb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b'
1449 401 read not authorized
1450
1451 0
1452 read not authorized
1453 [1]
1454
1455 $ hg bookmarks
1456 no bookmarks set
1457
1458 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadnoperm'
1459 401 read not authorized
1460
1461 0
1462 read not authorized
1463 [1]
1464
1465 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customreadwithperm'
1466 401 read not authorized
1467
1468 0
1469 read not authorized
1470 [1]
1471
1472 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritenoperm'
1473 401 read not authorized
1474
1475 0
1476 read not authorized
1477 [1]
1478
1479 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=customwritewithperm'
1480 401 read not authorized
1481
1482 0
1483 read not authorized
1484 [1]
1485
1486 Reset server to remove REQUEST_METHOD hack to test hg client
1487
1488 $ killdaemons.py
1489 $ REMOTE_USER=someuser hg serve -p $HGPORT -d --pid-file hg.pid
1490 $ cat hg.pid > $DAEMON_PIDS
1491
1492 $ hg --cwd ../test2 push -B bm http://localhost:$HGPORT/
1493 pushing to http://localhost:$HGPORT/
1494 abort: authorization failed
1495 [255]
1496
1497 $ hg --cwd ../test2 push http://localhost:$HGPORT/
1498 pushing to http://localhost:$HGPORT/
1499 abort: authorization failed
1500 [255]
1501
1502 $ killdaemons.py
@@ -0,0 +1,97 b''
1 In this test, we want to test LFS bundle application on both LFS and non-LFS
2 repos.
3
4 To make it more interesting, the file revisions will contain hg filelog
5 metadata ('\1\n'). The bundle will have 1 file revision overlapping with the
6 destination repo.
7
8 # rev 1 2 3
9 # repo: yes yes no
10 # bundle: no (base) yes yes (deltabase: 2 if possible)
11
12 It is interesting because rev 2 could have been stored as LFS in the repo, and
13 non-LFS in the bundle; or vice-versa.
14
15 Init
16
17 $ cat >> $HGRCPATH << EOF
18 > [extensions]
19 > lfs=
20 > drawdag=$TESTDIR/drawdag.py
21 > [lfs]
22 > url=file:$TESTTMP/lfs-remote
23 > EOF
24
25 Helper functions
26
27 $ commitxy() {
28 > hg debugdrawdag "$@" <<'EOS'
29 > Y # Y/X=\1\nAAAA\nE\nF
30 > | # Y/Y=\1\nAAAA\nG\nH
31 > X # X/X=\1\nAAAA\nC\n
32 > # X/Y=\1\nAAAA\nD\n
33 > EOS
34 > }
35
36 $ commitz() {
37 > hg debugdrawdag "$@" <<'EOS'
38 > Z # Z/X=\1\nAAAA\nI\n
39 > | # Z/Y=\1\nAAAA\nJ\n
40 > | # Z/Z=\1\nZ
41 > Y
42 > EOS
43 > }
44
45 $ enablelfs() {
46 > cat >> .hg/hgrc <<EOF
47 > [lfs]
48 > track=all()
49 > EOF
50 > }
51
52 Generate bundles
53
54 $ for i in normal lfs; do
55 > NAME=src-$i
56 > hg init $TESTTMP/$NAME
57 > cd $TESTTMP/$NAME
58 > [ $i = lfs ] && enablelfs
59 > commitxy
60 > commitz
61 > hg bundle -q --base X -r Y+Z $TESTTMP/$NAME.bundle
62 > SRCNAMES="$SRCNAMES $NAME"
63 > done
64
65 Prepare destination repos
66
67 $ for i in normal lfs; do
68 > NAME=dst-$i
69 > hg init $TESTTMP/$NAME
70 > cd $TESTTMP/$NAME
71 > [ $i = lfs ] && enablelfs
72 > commitxy
73 > DSTNAMES="$DSTNAMES $NAME"
74 > done
75
76 Apply bundles
77
78 $ for i in $SRCNAMES; do
79 > for j in $DSTNAMES; do
80 > echo ---- Applying $i.bundle to $j ----
81 > cp -R $TESTTMP/$j $TESTTMP/tmp-$i-$j
82 > cd $TESTTMP/tmp-$i-$j
83 > if hg unbundle $TESTTMP/$i.bundle -q 2>/dev/null; then
84 > hg verify -q && echo OK
85 > else
86 > echo CRASHED
87 > fi
88 > done
89 > done
90 ---- Applying src-normal.bundle to dst-normal ----
91 OK
92 ---- Applying src-normal.bundle to dst-lfs ----
93 OK
94 ---- Applying src-lfs.bundle to dst-normal ----
95 OK
96 ---- Applying src-lfs.bundle to dst-lfs ----
97 OK
@@ -1,160 +1,162 b''
1 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0 iD8DBQBEYmO2ywK+sNU5EO8RAnaYAKCO7x15xUn5mnhqWNXqk/ehlhRt2QCfRDfY0LrUq2q4oK/KypuJYPHgq1A=
1 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0 iD8DBQBEYmO2ywK+sNU5EO8RAnaYAKCO7x15xUn5mnhqWNXqk/ehlhRt2QCfRDfY0LrUq2q4oK/KypuJYPHgq1A=
2 2be3001847cb18a23c403439d9e7d0ace30804e9 0 iD8DBQBExUbjywK+sNU5EO8RAhzxAKCtyHAQUzcTSZTqlfJ0by6vhREwWQCghaQFHfkfN0l9/40EowNhuMOKnJk=
2 2be3001847cb18a23c403439d9e7d0ace30804e9 0 iD8DBQBExUbjywK+sNU5EO8RAhzxAKCtyHAQUzcTSZTqlfJ0by6vhREwWQCghaQFHfkfN0l9/40EowNhuMOKnJk=
3 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0 iD8DBQBFfL2QywK+sNU5EO8RAjYFAKCoGlaWRTeMsjdmxAjUYx6diZxOBwCfY6IpBYsKvPTwB3oktnPt5Rmrlys=
3 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0 iD8DBQBFfL2QywK+sNU5EO8RAjYFAKCoGlaWRTeMsjdmxAjUYx6diZxOBwCfY6IpBYsKvPTwB3oktnPt5Rmrlys=
4 27230c29bfec36d5540fbe1c976810aefecfd1d2 0 iD8DBQBFheweywK+sNU5EO8RAt7VAKCrqJQWT2/uo2RWf0ZI4bLp6v82jACgjrMdsaTbxRsypcmEsdPhlG6/8F4=
4 27230c29bfec36d5540fbe1c976810aefecfd1d2 0 iD8DBQBFheweywK+sNU5EO8RAt7VAKCrqJQWT2/uo2RWf0ZI4bLp6v82jACgjrMdsaTbxRsypcmEsdPhlG6/8F4=
5 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0 iD8DBQBGgHicywK+sNU5EO8RAgNxAJ0VG8ixAaeudx4sZbhngI1syu49HQCeNUJQfWBgA8bkJ2pvsFpNxwYaX3I=
5 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0 iD8DBQBGgHicywK+sNU5EO8RAgNxAJ0VG8ixAaeudx4sZbhngI1syu49HQCeNUJQfWBgA8bkJ2pvsFpNxwYaX3I=
6 23889160905a1b09fffe1c07378e9fc1827606eb 0 iD8DBQBHGTzoywK+sNU5EO8RAr/UAJ0Y8s4jQtzgS+G9vM8z6CWBThZ8fwCcCT5XDj2XwxKkz/0s6UELwjsO3LU=
6 23889160905a1b09fffe1c07378e9fc1827606eb 0 iD8DBQBHGTzoywK+sNU5EO8RAr/UAJ0Y8s4jQtzgS+G9vM8z6CWBThZ8fwCcCT5XDj2XwxKkz/0s6UELwjsO3LU=
7 bae2e9c838e90a393bae3973a7850280413e091a 0 iD8DBQBH6DO5ywK+sNU5EO8RAsfrAJ0e4r9c9GF/MJsM7Xjd3NesLRC3+ACffj6+6HXdZf8cswAoFPO+DY00oD0=
7 bae2e9c838e90a393bae3973a7850280413e091a 0 iD8DBQBH6DO5ywK+sNU5EO8RAsfrAJ0e4r9c9GF/MJsM7Xjd3NesLRC3+ACffj6+6HXdZf8cswAoFPO+DY00oD0=
8 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 0 iD8DBQBINdwsywK+sNU5EO8RAjIUAKCPmlFJSpsPAAUKF+iNHAwVnwmzeQCdEXrL27CWclXuUKdbQC8De7LICtE=
8 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 0 iD8DBQBINdwsywK+sNU5EO8RAjIUAKCPmlFJSpsPAAUKF+iNHAwVnwmzeQCdEXrL27CWclXuUKdbQC8De7LICtE=
9 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 0 iD8DBQBIo1wpywK+sNU5EO8RAmRNAJ94x3OFt6blbqu/yBoypm/AJ44fuACfUaldXcV5z9tht97hSp22DVTEPGc=
9 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 0 iD8DBQBIo1wpywK+sNU5EO8RAmRNAJ94x3OFt6blbqu/yBoypm/AJ44fuACfUaldXcV5z9tht97hSp22DVTEPGc=
10 2a67430f92f15ea5159c26b09ec4839a0c549a26 0 iEYEABECAAYFAkk1hykACgkQywK+sNU5EO85QACeNJNUanjc2tl4wUoPHNuv+lSj0ZMAoIm93wSTc/feyYnO2YCaQ1iyd9Nu
10 2a67430f92f15ea5159c26b09ec4839a0c549a26 0 iEYEABECAAYFAkk1hykACgkQywK+sNU5EO85QACeNJNUanjc2tl4wUoPHNuv+lSj0ZMAoIm93wSTc/feyYnO2YCaQ1iyd9Nu
11 3773e510d433969e277b1863c317b674cbee2065 0 iEYEABECAAYFAklNbbAACgkQywK+sNU5EO8o+gCfeb2/lfIJZMvyDA1m+G1CsBAxfFsAoIa6iAMG8SBY7hW1Q85Yf/LXEvaE
11 3773e510d433969e277b1863c317b674cbee2065 0 iEYEABECAAYFAklNbbAACgkQywK+sNU5EO8o+gCfeb2/lfIJZMvyDA1m+G1CsBAxfFsAoIa6iAMG8SBY7hW1Q85Yf/LXEvaE
12 11a4eb81fb4f4742451591489e2797dc47903277 0 iEYEABECAAYFAklcAnsACgkQywK+sNU5EO+uXwCbBVHNNsLy1g7BlAyQJwadYVyHOXoAoKvtAVO71+bv7EbVoukwTzT+P4Sx
12 11a4eb81fb4f4742451591489e2797dc47903277 0 iEYEABECAAYFAklcAnsACgkQywK+sNU5EO+uXwCbBVHNNsLy1g7BlAyQJwadYVyHOXoAoKvtAVO71+bv7EbVoukwTzT+P4Sx
13 11efa41037e280d08cfb07c09ad485df30fb0ea8 0 iEYEABECAAYFAkmvJRQACgkQywK+sNU5EO9XZwCeLMgDgPSMWMm6vgjL4lDs2pEc5+0AnRxfiFbpbBfuEFTqKz9nbzeyoBlx
13 11efa41037e280d08cfb07c09ad485df30fb0ea8 0 iEYEABECAAYFAkmvJRQACgkQywK+sNU5EO9XZwCeLMgDgPSMWMm6vgjL4lDs2pEc5+0AnRxfiFbpbBfuEFTqKz9nbzeyoBlx
14 02981000012e3adf40c4849bd7b3d5618f9ce82d 0 iEYEABECAAYFAknEH3wACgkQywK+sNU5EO+uXwCeI+LbLMmhjU1lKSfU3UWJHjjUC7oAoIZLvYDGOL/tNZFUuatc3RnZ2eje
14 02981000012e3adf40c4849bd7b3d5618f9ce82d 0 iEYEABECAAYFAknEH3wACgkQywK+sNU5EO+uXwCeI+LbLMmhjU1lKSfU3UWJHjjUC7oAoIZLvYDGOL/tNZFUuatc3RnZ2eje
15 196d40e7c885fa6e95f89134809b3ec7bdbca34b 0 iEYEABECAAYFAkpL2X4ACgkQywK+sNU5EO9FOwCfXJycjyKJXsvQqKkHrglwOQhEKS4An36GfKzptfN8b1qNc3+ya/5c2WOM
15 196d40e7c885fa6e95f89134809b3ec7bdbca34b 0 iEYEABECAAYFAkpL2X4ACgkQywK+sNU5EO9FOwCfXJycjyKJXsvQqKkHrglwOQhEKS4An36GfKzptfN8b1qNc3+ya/5c2WOM
16 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 0 iEYEABECAAYFAkpopLIACgkQywK+sNU5EO8QSgCfZ0ztsd071rOa2lhmp9Fyue/WoI0AoLTei80/xrhRlB8L/rZEf2KBl8dA
16 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 0 iEYEABECAAYFAkpopLIACgkQywK+sNU5EO8QSgCfZ0ztsd071rOa2lhmp9Fyue/WoI0AoLTei80/xrhRlB8L/rZEf2KBl8dA
17 31ec469f9b556f11819937cf68ee53f2be927ebf 0 iEYEABECAAYFAksBuxAACgkQywK+sNU5EO+mBwCfagB+A0txzWZ6dRpug3LEoK7Z1QsAoKpbk8vsLjv6/oRDicSk/qBu33+m
17 31ec469f9b556f11819937cf68ee53f2be927ebf 0 iEYEABECAAYFAksBuxAACgkQywK+sNU5EO+mBwCfagB+A0txzWZ6dRpug3LEoK7Z1QsAoKpbk8vsLjv6/oRDicSk/qBu33+m
18 439d7ea6fe3aa4ab9ec274a68846779153789de9 0 iEYEABECAAYFAksVw0kACgkQywK+sNU5EO/oZwCfdfBEkgp38xq6wN2F4nj+SzofrJIAnjmxt04vaJSeOOeHylHvk6lzuQsw
18 439d7ea6fe3aa4ab9ec274a68846779153789de9 0 iEYEABECAAYFAksVw0kACgkQywK+sNU5EO/oZwCfdfBEkgp38xq6wN2F4nj+SzofrJIAnjmxt04vaJSeOOeHylHvk6lzuQsw
19 296a0b14a68621f6990c54fdba0083f6f20935bf 0 iEYEABECAAYFAks+jCoACgkQywK+sNU5EO9J8wCeMUGF9E/gS2UBsqIz56WS4HMPRPUAoI5J95mwEIK8Clrl7qFRidNI6APq
19 296a0b14a68621f6990c54fdba0083f6f20935bf 0 iEYEABECAAYFAks+jCoACgkQywK+sNU5EO9J8wCeMUGF9E/gS2UBsqIz56WS4HMPRPUAoI5J95mwEIK8Clrl7qFRidNI6APq
20 4aa619c4c2c09907034d9824ebb1dd0e878206eb 0 iEYEABECAAYFAktm9IsACgkQywK+sNU5EO9XGgCgk4HclRQhexEtooPE5GcUCdB6M8EAn2ptOhMVbIoO+JncA+tNACPFXh0O
20 4aa619c4c2c09907034d9824ebb1dd0e878206eb 0 iEYEABECAAYFAktm9IsACgkQywK+sNU5EO9XGgCgk4HclRQhexEtooPE5GcUCdB6M8EAn2ptOhMVbIoO+JncA+tNACPFXh0O
21 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 0 iEYEABECAAYFAkuRoSQACgkQywK+sNU5EO//3QCeJDc5r2uFyFCtAlpSA27DEE5rrxAAn2FSwTy9fhrB3QAdDQlwkEZcQzDh
21 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 0 iEYEABECAAYFAkuRoSQACgkQywK+sNU5EO//3QCeJDc5r2uFyFCtAlpSA27DEE5rrxAAn2FSwTy9fhrB3QAdDQlwkEZcQzDh
22 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 0 iEYEABECAAYFAku1IwIACgkQywK+sNU5EO9MjgCdHLVwkTZlNHxhcznZKBL1rjN+J7cAoLLWi9LTL6f/TgBaPSKOy1ublbaW
22 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 0 iEYEABECAAYFAku1IwIACgkQywK+sNU5EO9MjgCdHLVwkTZlNHxhcznZKBL1rjN+J7cAoLLWi9LTL6f/TgBaPSKOy1ublbaW
23 39f725929f0c48c5fb3b90c071fc3066012456ca 0 iEYEABECAAYFAkvclvsACgkQywK+sNU5EO9FSwCeL9i5x8ALW/LE5+lCX6MFEAe4MhwAn1ev5o6SX6GrNdDfKweiemfO2VBk
23 39f725929f0c48c5fb3b90c071fc3066012456ca 0 iEYEABECAAYFAkvclvsACgkQywK+sNU5EO9FSwCeL9i5x8ALW/LE5+lCX6MFEAe4MhwAn1ev5o6SX6GrNdDfKweiemfO2VBk
24 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 0 iEYEABECAAYFAkvsKTkACgkQywK+sNU5EO9qEACgiSiRGvTG2vXGJ65tUSOIYihTuFAAnRzRIqEVSw8M8/RGeUXRps0IzaCO
24 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 0 iEYEABECAAYFAkvsKTkACgkQywK+sNU5EO9qEACgiSiRGvTG2vXGJ65tUSOIYihTuFAAnRzRIqEVSw8M8/RGeUXRps0IzaCO
25 24fe2629c6fd0c74c90bd066e77387c2b02e8437 0 iEYEABECAAYFAkwFLRsACgkQywK+sNU5EO+pJACgp13tPI+pbwKZV+LeMjcQ4H6tCZYAoJebzhd6a8yYx6qiwpJxA9BXZNXy
25 24fe2629c6fd0c74c90bd066e77387c2b02e8437 0 iEYEABECAAYFAkwFLRsACgkQywK+sNU5EO+pJACgp13tPI+pbwKZV+LeMjcQ4H6tCZYAoJebzhd6a8yYx6qiwpJxA9BXZNXy
26 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 0 iEYEABECAAYFAkwsyxcACgkQywK+sNU5EO+crACfUpNAF57PmClkSri9nJcBjb2goN4AniPCNaKvnki7TnUsi1u2oxltpKKL
26 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 0 iEYEABECAAYFAkwsyxcACgkQywK+sNU5EO+crACfUpNAF57PmClkSri9nJcBjb2goN4AniPCNaKvnki7TnUsi1u2oxltpKKL
27 bf1774d95bde614af3956d92b20e2a0c68c5fec7 0 iEYEABECAAYFAkxVwccACgkQywK+sNU5EO+oFQCeJzwZ+we1fIIyBGCddHceOUAN++cAnjvT6A8ZWW0zV21NXIFF1qQmjxJd
27 bf1774d95bde614af3956d92b20e2a0c68c5fec7 0 iEYEABECAAYFAkxVwccACgkQywK+sNU5EO+oFQCeJzwZ+we1fIIyBGCddHceOUAN++cAnjvT6A8ZWW0zV21NXIFF1qQmjxJd
28 c00f03a4982e467fb6b6bd45908767db6df4771d 0 iEYEABECAAYFAkxXDqsACgkQywK+sNU5EO/GJACfT9Rz4hZOxPQEs91JwtmfjevO84gAmwSmtfo5mmWSm8gtTUebCcdTv0Kf
28 c00f03a4982e467fb6b6bd45908767db6df4771d 0 iEYEABECAAYFAkxXDqsACgkQywK+sNU5EO/GJACfT9Rz4hZOxPQEs91JwtmfjevO84gAmwSmtfo5mmWSm8gtTUebCcdTv0Kf
29 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 0 iD8DBQBMdo+qywK+sNU5EO8RAqQpAJ975BL2CCAiWMz9SXthNQ9xG181IwCgp4O+KViHPkufZVFn2aTKMNvcr1A=
29 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 0 iD8DBQBMdo+qywK+sNU5EO8RAqQpAJ975BL2CCAiWMz9SXthNQ9xG181IwCgp4O+KViHPkufZVFn2aTKMNvcr1A=
30 93d8bff78c96fe7e33237b257558ee97290048a4 0 iD8DBQBMpfvdywK+sNU5EO8RAsxVAJ0UaL1XB51C76JUBhafc9GBefuMxwCdEWkTOzwvE0SarJBe9i008jhbqW4=
30 93d8bff78c96fe7e33237b257558ee97290048a4 0 iD8DBQBMpfvdywK+sNU5EO8RAsxVAJ0UaL1XB51C76JUBhafc9GBefuMxwCdEWkTOzwvE0SarJBe9i008jhbqW4=
31 333421b9e0f96c7bc788e5667c146a58a9440a55 0 iD8DBQBMz0HOywK+sNU5EO8RAlsEAJ0USh6yOG7OrWkADGunVt9QimBQnwCbBqeMnKgSbwEw8jZwE3Iz1mdrYlo=
31 333421b9e0f96c7bc788e5667c146a58a9440a55 0 iD8DBQBMz0HOywK+sNU5EO8RAlsEAJ0USh6yOG7OrWkADGunVt9QimBQnwCbBqeMnKgSbwEw8jZwE3Iz1mdrYlo=
32 4438875ec01bd0fc32be92b0872eb6daeed4d44f 0 iD8DBQBM4WYUywK+sNU5EO8RAhCVAJ0dJswachwFAHALmk1x0RJehxzqPQCbBNskP9n/X689jB+btNTZTyKU/fw=
32 4438875ec01bd0fc32be92b0872eb6daeed4d44f 0 iD8DBQBM4WYUywK+sNU5EO8RAhCVAJ0dJswachwFAHALmk1x0RJehxzqPQCbBNskP9n/X689jB+btNTZTyKU/fw=
33 6aff4f144ad356311318b0011df0bb21f2c97429 0 iD8DBQBM9uxXywK+sNU5EO8RAv+4AKCDj4qKP16GdPaq1tP6BUwpM/M1OACfRyzLPp/qiiN8xJTWoWYSe/XjJug=
33 6aff4f144ad356311318b0011df0bb21f2c97429 0 iD8DBQBM9uxXywK+sNU5EO8RAv+4AKCDj4qKP16GdPaq1tP6BUwpM/M1OACfRyzLPp/qiiN8xJTWoWYSe/XjJug=
34 e3bf16703e2601de99e563cdb3a5d50b64e6d320 0 iD8DBQBNH8WqywK+sNU5EO8RAiQTAJ9sBO+TeiGro4si77VVaQaA6jcRUgCfSA28dBbjj0oFoQwvPoZjANiZBH8=
34 e3bf16703e2601de99e563cdb3a5d50b64e6d320 0 iD8DBQBNH8WqywK+sNU5EO8RAiQTAJ9sBO+TeiGro4si77VVaQaA6jcRUgCfSA28dBbjj0oFoQwvPoZjANiZBH8=
35 a6c855c32ea081da3c3b8ff628f1847ff271482f 0 iD8DBQBNSJJ+ywK+sNU5EO8RAoJaAKCweDEF70fu+r1Zn7pYDXdlk5RuSgCeO9gK/eit8Lin/1n3pO7aYguFLok=
35 a6c855c32ea081da3c3b8ff628f1847ff271482f 0 iD8DBQBNSJJ+ywK+sNU5EO8RAoJaAKCweDEF70fu+r1Zn7pYDXdlk5RuSgCeO9gK/eit8Lin/1n3pO7aYguFLok=
36 2b2155623ee2559caf288fd333f30475966c4525 0 iD8DBQBNSJeBywK+sNU5EO8RAm1KAJ4hW9Cm9nHaaGJguchBaPLlAr+O3wCgqgmMok8bdAS06N6PL60PSTM//Gg=
36 2b2155623ee2559caf288fd333f30475966c4525 0 iD8DBQBNSJeBywK+sNU5EO8RAm1KAJ4hW9Cm9nHaaGJguchBaPLlAr+O3wCgqgmMok8bdAS06N6PL60PSTM//Gg=
37 2616325766e3504c8ae7c84bd15ee610901fe91d 0 iD8DBQBNbWy9ywK+sNU5EO8RAlWCAJ4mW8HbzjJj9GpK98muX7k+7EvEHwCfaTLbC/DH3QEsZBhEP+M8tzL6RU4=
37 2616325766e3504c8ae7c84bd15ee610901fe91d 0 iD8DBQBNbWy9ywK+sNU5EO8RAlWCAJ4mW8HbzjJj9GpK98muX7k+7EvEHwCfaTLbC/DH3QEsZBhEP+M8tzL6RU4=
38 aa1f3be38ab127280761889d2dca906ca465b5f4 0 iD8DBQBNeQq7ywK+sNU5EO8RAlEOAJ4tlEDdetE9lKfjGgjbkcR8PrC3egCfXCfF3qNVvU/2YYjpgvRwevjvDy0=
38 aa1f3be38ab127280761889d2dca906ca465b5f4 0 iD8DBQBNeQq7ywK+sNU5EO8RAlEOAJ4tlEDdetE9lKfjGgjbkcR8PrC3egCfXCfF3qNVvU/2YYjpgvRwevjvDy0=
39 b032bec2c0a651ca0ddecb65714bfe6770f67d70 0 iD8DBQBNlg5kywK+sNU5EO8RAnGEAJ9gmEx6MfaR4XcG2m/93vwtfyzs3gCgltzx8/YdHPwqDwRX/WbpYgi33is=
39 b032bec2c0a651ca0ddecb65714bfe6770f67d70 0 iD8DBQBNlg5kywK+sNU5EO8RAnGEAJ9gmEx6MfaR4XcG2m/93vwtfyzs3gCgltzx8/YdHPwqDwRX/WbpYgi33is=
40 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 0 iD8DBQBNvTy4ywK+sNU5EO8RAmp8AJ9QnxK4jTJ7G722MyeBxf0UXEdGwACgtlM7BKtNQfbEH/fOW5y+45W88VI=
40 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 0 iD8DBQBNvTy4ywK+sNU5EO8RAmp8AJ9QnxK4jTJ7G722MyeBxf0UXEdGwACgtlM7BKtNQfbEH/fOW5y+45W88VI=
41 733af5d9f6b22387913e1d11350fb8cb7c1487dd 0 iD8DBQBN5q/8ywK+sNU5EO8RArRGAKCNGT94GKIYtSuwZ57z1sQbcw6uLACfffpbMV4NAPMl8womAwg+7ZPKnIU=
41 733af5d9f6b22387913e1d11350fb8cb7c1487dd 0 iD8DBQBN5q/8ywK+sNU5EO8RArRGAKCNGT94GKIYtSuwZ57z1sQbcw6uLACfffpbMV4NAPMl8womAwg+7ZPKnIU=
42 de9eb6b1da4fc522b1cab16d86ca166204c24f25 0 iD8DBQBODhfhywK+sNU5EO8RAr2+AJ4ugbAj8ae8/K0bYZzx3sascIAg1QCeK3b+zbbVVqd3b7CDpwFnaX8kTd4=
42 de9eb6b1da4fc522b1cab16d86ca166204c24f25 0 iD8DBQBODhfhywK+sNU5EO8RAr2+AJ4ugbAj8ae8/K0bYZzx3sascIAg1QCeK3b+zbbVVqd3b7CDpwFnaX8kTd4=
43 4a43e23b8c55b4566b8200bf69fe2158485a2634 0 iD8DBQBONzIMywK+sNU5EO8RAj5SAJ0aPS3+JHnyI6bHB2Fl0LImbDmagwCdGbDLp1S7TFobxXudOH49bX45Iik=
43 4a43e23b8c55b4566b8200bf69fe2158485a2634 0 iD8DBQBONzIMywK+sNU5EO8RAj5SAJ0aPS3+JHnyI6bHB2Fl0LImbDmagwCdGbDLp1S7TFobxXudOH49bX45Iik=
44 d629f1e89021103f1753addcef6b310e4435b184 0 iD8DBQBOWAsBywK+sNU5EO8RAht4AJwJl9oNFopuGkj5m8aKuf7bqPkoAQCeNrEm7UhFsZKYT5iUOjnMV7s2LaM=
44 d629f1e89021103f1753addcef6b310e4435b184 0 iD8DBQBOWAsBywK+sNU5EO8RAht4AJwJl9oNFopuGkj5m8aKuf7bqPkoAQCeNrEm7UhFsZKYT5iUOjnMV7s2LaM=
45 351a9292e430e35766c552066ed3e87c557b803b 0 iD8DBQBOh3zUywK+sNU5EO8RApFMAKCD3Y/u3avDFndznwqfG5UeTHMlvACfUivPIVQZyDZnhZMq0UhC6zhCEQg=
45 351a9292e430e35766c552066ed3e87c557b803b 0 iD8DBQBOh3zUywK+sNU5EO8RApFMAKCD3Y/u3avDFndznwqfG5UeTHMlvACfUivPIVQZyDZnhZMq0UhC6zhCEQg=
46 384082750f2c51dc917d85a7145748330fa6ef4d 0 iD8DBQBOmd+OywK+sNU5EO8RAgDgAJ9V/X+G7VLwhTpHrZNiOHabzSyzYQCdE2kKfIevJUYB9QLAWCWP6DPwrwI=
46 384082750f2c51dc917d85a7145748330fa6ef4d 0 iD8DBQBOmd+OywK+sNU5EO8RAgDgAJ9V/X+G7VLwhTpHrZNiOHabzSyzYQCdE2kKfIevJUYB9QLAWCWP6DPwrwI=
47 41453d55b481ddfcc1dacb445179649e24ca861d 0 iD8DBQBOsFhpywK+sNU5EO8RAqM6AKCyfxUae3/zLuiLdQz+JR78690eMACfQ6JTBQib4AbE+rUDdkeFYg9K/+4=
47 41453d55b481ddfcc1dacb445179649e24ca861d 0 iD8DBQBOsFhpywK+sNU5EO8RAqM6AKCyfxUae3/zLuiLdQz+JR78690eMACfQ6JTBQib4AbE+rUDdkeFYg9K/+4=
48 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 0 iD8DBQBO1/fWywK+sNU5EO8RAmoPAKCR5lpv1D6JLURHD8KVLSV4GRVEBgCgnd0Sy78ligNfqAMafmACRDvj7vo=
48 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 0 iD8DBQBO1/fWywK+sNU5EO8RAmoPAKCR5lpv1D6JLURHD8KVLSV4GRVEBgCgnd0Sy78ligNfqAMafmACRDvj7vo=
49 6344043924497cd06d781d9014c66802285072e4 0 iD8DBQBPALgmywK+sNU5EO8RAlfhAJ9nYOdWnhfVDHYtDTJAyJtXBAQS9wCgnefoSQt7QABkbGxM+Q85UYEBuD0=
49 6344043924497cd06d781d9014c66802285072e4 0 iD8DBQBPALgmywK+sNU5EO8RAlfhAJ9nYOdWnhfVDHYtDTJAyJtXBAQS9wCgnefoSQt7QABkbGxM+Q85UYEBuD0=
50 db33555eafeaf9df1e18950e29439eaa706d399b 0 iD8DBQBPGdzxywK+sNU5EO8RAppkAJ9jOXhUVE/97CPgiMA0pMGiIYnesQCfengAszcBiSiKGugiI8Okc9ghU+Y=
50 db33555eafeaf9df1e18950e29439eaa706d399b 0 iD8DBQBPGdzxywK+sNU5EO8RAppkAJ9jOXhUVE/97CPgiMA0pMGiIYnesQCfengAszcBiSiKGugiI8Okc9ghU+Y=
51 2aa5b51f310fb3befd26bed99c02267f5c12c734 0 iD8DBQBPKZ9bywK+sNU5EO8RAt1TAJ45r1eJ0YqSkInzrrayg4TVCh0SnQCgm0GA/Ua74jnnDwVQ60lAwROuz1Q=
51 2aa5b51f310fb3befd26bed99c02267f5c12c734 0 iD8DBQBPKZ9bywK+sNU5EO8RAt1TAJ45r1eJ0YqSkInzrrayg4TVCh0SnQCgm0GA/Ua74jnnDwVQ60lAwROuz1Q=
52 53e2cd303ecf8ca7c7eeebd785c34e5ed6b0f4a4 0 iD8DBQBPT/fvywK+sNU5EO8RAnfYAKCn7d0vwqIb100YfWm1F7nFD5B+FACeM02YHpQLSNsztrBCObtqcnfod7Q=
52 53e2cd303ecf8ca7c7eeebd785c34e5ed6b0f4a4 0 iD8DBQBPT/fvywK+sNU5EO8RAnfYAKCn7d0vwqIb100YfWm1F7nFD5B+FACeM02YHpQLSNsztrBCObtqcnfod7Q=
53 b9bd95e61b49c221c4cca24e6da7c946fc02f992 0 iD8DBQBPeLsIywK+sNU5EO8RAvpNAKCtKe2gitz8dYn52IRF0hFOPCR7AQCfRJL/RWCFweu2T1vH/mUOCf8SXXc=
53 b9bd95e61b49c221c4cca24e6da7c946fc02f992 0 iD8DBQBPeLsIywK+sNU5EO8RAvpNAKCtKe2gitz8dYn52IRF0hFOPCR7AQCfRJL/RWCFweu2T1vH/mUOCf8SXXc=
54 d9e2f09d5488c395ae9ddbb320ceacd24757e055 0 iD8DBQBPju/dywK+sNU5EO8RArBYAJ9xtifdbk+hCOJO8OZa4JfHX8OYZQCeKPMBaBWiT8N/WHoOm1XU0q+iono=
54 d9e2f09d5488c395ae9ddbb320ceacd24757e055 0 iD8DBQBPju/dywK+sNU5EO8RArBYAJ9xtifdbk+hCOJO8OZa4JfHX8OYZQCeKPMBaBWiT8N/WHoOm1XU0q+iono=
55 00182b3d087909e3c3ae44761efecdde8f319ef3 0 iD8DBQBPoFhIywK+sNU5EO8RAhzhAKCBj1n2jxPTkZNJJ5pSp3soa+XHIgCgsZZpAQxOpXwCp0eCdNGe0+pmxmg=
55 00182b3d087909e3c3ae44761efecdde8f319ef3 0 iD8DBQBPoFhIywK+sNU5EO8RAhzhAKCBj1n2jxPTkZNJJ5pSp3soa+XHIgCgsZZpAQxOpXwCp0eCdNGe0+pmxmg=
56 5983de86462c5a9f42a3ad0f5e90ce5b1d221d25 0 iD8DBQBPovNWywK+sNU5EO8RAhgiAJ980T91FdPTRMmVONDhpkMsZwVIMACgg3bKvoWSeuCW28llUhAJtUjrMv0=
56 5983de86462c5a9f42a3ad0f5e90ce5b1d221d25 0 iD8DBQBPovNWywK+sNU5EO8RAhgiAJ980T91FdPTRMmVONDhpkMsZwVIMACgg3bKvoWSeuCW28llUhAJtUjrMv0=
57 85a358df5bbbe404ca25730c9c459b34263441dc 0 iD8DBQBPyZsWywK+sNU5EO8RAnpLAJ48qrGDJRT+pteS0mSQ11haqHstPwCdG4ccGbk+0JHb7aNy8/NRGAOqn9w=
57 85a358df5bbbe404ca25730c9c459b34263441dc 0 iD8DBQBPyZsWywK+sNU5EO8RAnpLAJ48qrGDJRT+pteS0mSQ11haqHstPwCdG4ccGbk+0JHb7aNy8/NRGAOqn9w=
58 b013baa3898e117959984fc64c29d8c784d2f28b 0 iD8DBQBP8QOPywK+sNU5EO8RAqimAKCFRSx0lvG6y8vne2IhNG062Hn0dACeMLI5/zhpWpHBIVeAAquYfx2XFeA=
58 b013baa3898e117959984fc64c29d8c784d2f28b 0 iD8DBQBP8QOPywK+sNU5EO8RAqimAKCFRSx0lvG6y8vne2IhNG062Hn0dACeMLI5/zhpWpHBIVeAAquYfx2XFeA=
59 7f5094bb3f423fc799e471aac2aee81a7ce57a0b 0 iD8DBQBQGiL8ywK+sNU5EO8RAq5oAJ4rMMCPx6O+OuzNXVOexogedWz/QgCeIiIxLd76I4pXO48tdXhr0hQcBuM=
59 7f5094bb3f423fc799e471aac2aee81a7ce57a0b 0 iD8DBQBQGiL8ywK+sNU5EO8RAq5oAJ4rMMCPx6O+OuzNXVOexogedWz/QgCeIiIxLd76I4pXO48tdXhr0hQcBuM=
60 072209ae4ddb654eb2d5fd35bff358c738414432 0 iD8DBQBQQkq0ywK+sNU5EO8RArDTAJ9nk5CySnNAjAXYvqvx4uWCw9ThZwCgqmFRehH/l+oTwj3f8nw8u8qTCdc=
60 072209ae4ddb654eb2d5fd35bff358c738414432 0 iD8DBQBQQkq0ywK+sNU5EO8RArDTAJ9nk5CySnNAjAXYvqvx4uWCw9ThZwCgqmFRehH/l+oTwj3f8nw8u8qTCdc=
61 b3f0f9a39c4e1d0250048cd803ab03542d6f140a 0 iD8DBQBQamltywK+sNU5EO8RAlsqAJ4qF/m6aFu4mJCOKTiAP5RvZFK02ACfawYShUZO6OXEFfveU0aAxDR0M1k=
61 b3f0f9a39c4e1d0250048cd803ab03542d6f140a 0 iD8DBQBQamltywK+sNU5EO8RAlsqAJ4qF/m6aFu4mJCOKTiAP5RvZFK02ACfawYShUZO6OXEFfveU0aAxDR0M1k=
62 d118a4f4fd16d9b558ec3f3e87bfee772861d2b7 0 iD8DBQBQgPV5ywK+sNU5EO8RArylAJ0abcx5NlDjyv3ZDWpAfRIHyRsJtQCgn4TMuEayqgxzrvadQZHdTEU2g38=
62 d118a4f4fd16d9b558ec3f3e87bfee772861d2b7 0 iD8DBQBQgPV5ywK+sNU5EO8RArylAJ0abcx5NlDjyv3ZDWpAfRIHyRsJtQCgn4TMuEayqgxzrvadQZHdTEU2g38=
63 195ad823b5d58c68903a6153a25e3fb4ed25239d 0 iD8DBQBQkuT9ywK+sNU5EO8RAhB4AKCeerItoK2Jipm2cVf4euGofAa/WACeJj3TVd4pFILpb+ogj7ebweFLJi0=
63 195ad823b5d58c68903a6153a25e3fb4ed25239d 0 iD8DBQBQkuT9ywK+sNU5EO8RAhB4AKCeerItoK2Jipm2cVf4euGofAa/WACeJj3TVd4pFILpb+ogj7ebweFLJi0=
64 0c10cf8191469e7c3c8844922e17e71a176cb7cb 0 iD8DBQBQvQWoywK+sNU5EO8RAnq3AJoCn98u4geFx5YaQaeh99gFhCd7bQCgjoBwBSUyOvGd0yBy60E3Vv3VZhM=
64 0c10cf8191469e7c3c8844922e17e71a176cb7cb 0 iD8DBQBQvQWoywK+sNU5EO8RAnq3AJoCn98u4geFx5YaQaeh99gFhCd7bQCgjoBwBSUyOvGd0yBy60E3Vv3VZhM=
65 a4765077b65e6ae29ba42bab7834717b5072d5ba 0 iD8DBQBQ486sywK+sNU5EO8RAhmJAJ90aLfLKZhmcZN7kqphigQJxiFOQACeJ5IUZxjGKH4xzi3MrgIcx9n+dB0=
65 a4765077b65e6ae29ba42bab7834717b5072d5ba 0 iD8DBQBQ486sywK+sNU5EO8RAhmJAJ90aLfLKZhmcZN7kqphigQJxiFOQACeJ5IUZxjGKH4xzi3MrgIcx9n+dB0=
66 f5fbe15ca7449f2c9a3cf817c86d0ae68b307214 0 iD8DBQBQ+yuYywK+sNU5EO8RAm9JAJoD/UciWvpGeKBcpGtZJBFJVcL/HACghDXSgQ+xQDjB+6uGrdgAQsRR1Lg=
66 f5fbe15ca7449f2c9a3cf817c86d0ae68b307214 0 iD8DBQBQ+yuYywK+sNU5EO8RAm9JAJoD/UciWvpGeKBcpGtZJBFJVcL/HACghDXSgQ+xQDjB+6uGrdgAQsRR1Lg=
67 a6088c05e43a8aee0472ca3a4f6f8d7dd914ebbf 0 iD8DBQBRDDROywK+sNU5EO8RAh75AJ9uJCGoCWnP0Lv/+XuYs4hvUl+sAgCcD36QgAnuw8IQXrvv684BAXAnHcA=
67 a6088c05e43a8aee0472ca3a4f6f8d7dd914ebbf 0 iD8DBQBRDDROywK+sNU5EO8RAh75AJ9uJCGoCWnP0Lv/+XuYs4hvUl+sAgCcD36QgAnuw8IQXrvv684BAXAnHcA=
68 7511d4df752e61fe7ae4f3682e0a0008573b0402 0 iD8DBQBRFYaoywK+sNU5EO8RAuErAJoDyhXn+lptU3+AevVdwAIeNFyR2gCdHzPHyWd+JDeWCUR+pSOBi8O2ppM=
68 7511d4df752e61fe7ae4f3682e0a0008573b0402 0 iD8DBQBRFYaoywK+sNU5EO8RAuErAJoDyhXn+lptU3+AevVdwAIeNFyR2gCdHzPHyWd+JDeWCUR+pSOBi8O2ppM=
69 5b7175377babacce80a6c1e12366d8032a6d4340 0 iD8DBQBRMCYgywK+sNU5EO8RAq1/AKCWKlt9ysibyQgYwoxxIOZv5J8rpwCcDSHQaaf1fFZUTnQsOePwcM2Y/Sg=
69 5b7175377babacce80a6c1e12366d8032a6d4340 0 iD8DBQBRMCYgywK+sNU5EO8RAq1/AKCWKlt9ysibyQgYwoxxIOZv5J8rpwCcDSHQaaf1fFZUTnQsOePwcM2Y/Sg=
70 50c922c1b5145dab8baefefb0437d363b6a6c21c 0 iD8DBQBRWnUnywK+sNU5EO8RAuQRAJwM42cJqJPeqJ0jVNdMqKMDqr4dSACeP0cRVGz1gitMuV0x8f3mrZrqc7I=
70 50c922c1b5145dab8baefefb0437d363b6a6c21c 0 iD8DBQBRWnUnywK+sNU5EO8RAuQRAJwM42cJqJPeqJ0jVNdMqKMDqr4dSACeP0cRVGz1gitMuV0x8f3mrZrqc7I=
71 8a7bd2dccd44ed571afe7424cd7f95594f27c092 0 iD8DBQBRXfBvywK+sNU5EO8RAn+LAKCsMmflbuXjYRxlzFwId5ptm8TZcwCdGkyLbZcASBOkzQUm/WW1qfknJHU=
71 8a7bd2dccd44ed571afe7424cd7f95594f27c092 0 iD8DBQBRXfBvywK+sNU5EO8RAn+LAKCsMmflbuXjYRxlzFwId5ptm8TZcwCdGkyLbZcASBOkzQUm/WW1qfknJHU=
72 292cd385856d98bacb2c3086f8897bc660c2beea 0 iD8DBQBRcM0BywK+sNU5EO8RAjp4AKCJBykQbvXhKuvLSMxKx3a2TBiXcACfbr/kLg5GlZTF/XDPmY+PyHgI/GM=
72 292cd385856d98bacb2c3086f8897bc660c2beea 0 iD8DBQBRcM0BywK+sNU5EO8RAjp4AKCJBykQbvXhKuvLSMxKx3a2TBiXcACfbr/kLg5GlZTF/XDPmY+PyHgI/GM=
73 23f785b38af38d2fca6b8f3db56b8007a84cd73a 0 iD8DBQBRgZwNywK+sNU5EO8RAmO4AJ4u2ILGuimRP6MJgE2t65LZ5dAdkACgiENEstIdrlFC80p+sWKD81kKIYI=
73 23f785b38af38d2fca6b8f3db56b8007a84cd73a 0 iD8DBQBRgZwNywK+sNU5EO8RAmO4AJ4u2ILGuimRP6MJgE2t65LZ5dAdkACgiENEstIdrlFC80p+sWKD81kKIYI=
74 ddc7a6be20212d18f3e27d9d7e6f079a66d96f21 0 iD8DBQBRkswvywK+sNU5EO8RAiYYAJsHTHyHbJeAgmGvBTmDrfcKu4doUgCeLm7eGBjx7yAPUvEtxef8rAkQmXI=
74 ddc7a6be20212d18f3e27d9d7e6f079a66d96f21 0 iD8DBQBRkswvywK+sNU5EO8RAiYYAJsHTHyHbJeAgmGvBTmDrfcKu4doUgCeLm7eGBjx7yAPUvEtxef8rAkQmXI=
75 cceaf7af4c9e9e6fa2dbfdcfe9856c5da69c4ffd 0 iD8DBQBRqnFLywK+sNU5EO8RAsWNAJ9RR6t+y1DLFc2HeH0eN9VfZAKF9gCeJ8ezvhtKq/LMs0/nvcgKQc/d5jk=
75 cceaf7af4c9e9e6fa2dbfdcfe9856c5da69c4ffd 0 iD8DBQBRqnFLywK+sNU5EO8RAsWNAJ9RR6t+y1DLFc2HeH0eN9VfZAKF9gCeJ8ezvhtKq/LMs0/nvcgKQc/d5jk=
76 009794acc6e37a650f0fae37872e733382ac1c0c 0 iD8DBQBR0guxywK+sNU5EO8RArNkAKCq9pMihVzP8Os5kCmgbWpe5C37wgCgqzuPZTHvAsXF5wTyaSTMVa9Ccq4=
76 009794acc6e37a650f0fae37872e733382ac1c0c 0 iD8DBQBR0guxywK+sNU5EO8RArNkAKCq9pMihVzP8Os5kCmgbWpe5C37wgCgqzuPZTHvAsXF5wTyaSTMVa9Ccq4=
77 f0d7721d7322dcfb5af33599c2543f27335334bb 0 iD8DBQBR8taaywK+sNU5EO8RAqeEAJ4idDhhDuEsgsUjeQgWNj498matHACfT67gSF5w0ylsrBx1Hb52HkGXDm0=
77 f0d7721d7322dcfb5af33599c2543f27335334bb 0 iD8DBQBR8taaywK+sNU5EO8RAqeEAJ4idDhhDuEsgsUjeQgWNj498matHACfT67gSF5w0ylsrBx1Hb52HkGXDm0=
78 f37b5a17e6a0ee17afde2cdde5393dd74715fb58 0 iD8DBQBR+ymFywK+sNU5EO8RAuSdAJkBMcd9DAZ3rWE9WGKPm2YZ8LBoXACfXn/wbEsVy7ZgJoUwiWmHSnQaWCI=
78 f37b5a17e6a0ee17afde2cdde5393dd74715fb58 0 iD8DBQBR+ymFywK+sNU5EO8RAuSdAJkBMcd9DAZ3rWE9WGKPm2YZ8LBoXACfXn/wbEsVy7ZgJoUwiWmHSnQaWCI=
79 335a558f81dc73afeab4d7be63617392b130117f 0 iQIVAwUAUiZrIyBXgaxoKi1yAQK2iw//cquNqqSkc8Re5/TZT9I6NH+lh6DbOKjJP0Xl1Wqq0K+KSIUgZG4G32ovaEb2l5X0uY+3unRPiZ0ebl0YSw4Fb2ZiPIADXLBTOYRrY2Wwd3tpJeGI6wEgZt3SfcITV/g7NJrCjT3FlYoSOIayrExM80InSdcEM0Q3Rx6HKzY2acyxzgZeAtAW5ohFvHilSvY6p5Gcm4+QptMxvw45GPdreUmjeXZxNXNXZ8P+MjMz/QJbai/N7PjmK8lqnhkBsT48Ng/KhhmOkGntNJ2/ImBWLFGcWngSvJ7sfWwnyhndvGhe0Hq1NcCf7I8TjNDxU5TR+m+uW7xjXdLoDbUjBdX4sKXnh8ZjbYiODKBOrrDq25cf8nA/tnpKyE/qsVy60kOk6loY4XKiYmn1V49Ta0emmDx0hqo3HgxHHsHX0NDnGdWGol7cPRET0RzVobKq1A0jnrhPooWidvLh9bPzLonrWDo+ib+DuySoRkuYUK4pgZJ2mbg6daFOBEZygkSyRB8bo1UQUP7EgQDrWe4khb/5GHEfDkrQz3qu/sXvc0Ir1mOUWBFPHC2DjjCn/oMJuUkG1SwM8l2Bfv7h67ssES6YQ2+RjOix4yid7EXS/Ogl45PzCIPSI5+BbNs10JhE0w5uErBHlF53EDTe/TSLc+GU6DB6PP6dH912Njdr3jpNSUQ=
79 335a558f81dc73afeab4d7be63617392b130117f 0 iQIVAwUAUiZrIyBXgaxoKi1yAQK2iw//cquNqqSkc8Re5/TZT9I6NH+lh6DbOKjJP0Xl1Wqq0K+KSIUgZG4G32ovaEb2l5X0uY+3unRPiZ0ebl0YSw4Fb2ZiPIADXLBTOYRrY2Wwd3tpJeGI6wEgZt3SfcITV/g7NJrCjT3FlYoSOIayrExM80InSdcEM0Q3Rx6HKzY2acyxzgZeAtAW5ohFvHilSvY6p5Gcm4+QptMxvw45GPdreUmjeXZxNXNXZ8P+MjMz/QJbai/N7PjmK8lqnhkBsT48Ng/KhhmOkGntNJ2/ImBWLFGcWngSvJ7sfWwnyhndvGhe0Hq1NcCf7I8TjNDxU5TR+m+uW7xjXdLoDbUjBdX4sKXnh8ZjbYiODKBOrrDq25cf8nA/tnpKyE/qsVy60kOk6loY4XKiYmn1V49Ta0emmDx0hqo3HgxHHsHX0NDnGdWGol7cPRET0RzVobKq1A0jnrhPooWidvLh9bPzLonrWDo+ib+DuySoRkuYUK4pgZJ2mbg6daFOBEZygkSyRB8bo1UQUP7EgQDrWe4khb/5GHEfDkrQz3qu/sXvc0Ir1mOUWBFPHC2DjjCn/oMJuUkG1SwM8l2Bfv7h67ssES6YQ2+RjOix4yid7EXS/Ogl45PzCIPSI5+BbNs10JhE0w5uErBHlF53EDTe/TSLc+GU6DB6PP6dH912Njdr3jpNSUQ=
80 e7fa36d2ad3a7944a52dca126458d6f482db3524 0 iQIVAwUAUktg4yBXgaxoKi1yAQLO0g//du/2ypYYUfmM/yZ4zztNKIvgMSGTDVbCCGB2y2/wk2EcolpjpGTkcgnJT413ksYtw78ZU+mvv0RjgrFCm8DQ8kroJaQZ2qHmtSUb42hPBPvtg6kL9YaA4yvp87uUBpFRavGS5uX4hhEIyvZKzhXUBvqtL3TfwR7ld21bj8j00wudqELyyU9IrojIY9jkJ3XL/4shBGgP7u6OK5g8yJ6zTnWgysUetxHBPrYjG25lziiiZQFvZqK1B3PUqAOaFPltQs0PB8ipOCAHQgJsjaREj8VmC3+rskmSSy66NHm6gAB9+E8oAgOcU7FzWbdYgnz4kR3M7TQvHX9U61NinPXC6Q9d1VPhO3E6sIGvqJ4YeQOn65V9ezYuIpFSlgQzCHMmLVnOV96Uv1R/Z39I4w7D3S5qoZcQT/siQwGbsZoPMGFYmqOK1da5TZWrrJWkYzc9xvzT9m3q3Wds5pmCmo4b/dIqDifWwYEcNAZ0/YLHwCN5SEZWuunkEwtU5o7TZAv3bvDDA6WxUrrHI/y9/qvvhXxsJnY8IueNhshdmWZfXKz+lJi2Dvk7DUlEQ1zZWSsozi1E+3biMPJO47jsxjoT/jmE5+GHLCgcnXXDVBeaVal99IOaTRFukiz2EMsry1s8fnwEE5XKDKRlU/dOPfsje0gc7bgE0QD/u3E4NJ99g9A=
80 e7fa36d2ad3a7944a52dca126458d6f482db3524 0 iQIVAwUAUktg4yBXgaxoKi1yAQLO0g//du/2ypYYUfmM/yZ4zztNKIvgMSGTDVbCCGB2y2/wk2EcolpjpGTkcgnJT413ksYtw78ZU+mvv0RjgrFCm8DQ8kroJaQZ2qHmtSUb42hPBPvtg6kL9YaA4yvp87uUBpFRavGS5uX4hhEIyvZKzhXUBvqtL3TfwR7ld21bj8j00wudqELyyU9IrojIY9jkJ3XL/4shBGgP7u6OK5g8yJ6zTnWgysUetxHBPrYjG25lziiiZQFvZqK1B3PUqAOaFPltQs0PB8ipOCAHQgJsjaREj8VmC3+rskmSSy66NHm6gAB9+E8oAgOcU7FzWbdYgnz4kR3M7TQvHX9U61NinPXC6Q9d1VPhO3E6sIGvqJ4YeQOn65V9ezYuIpFSlgQzCHMmLVnOV96Uv1R/Z39I4w7D3S5qoZcQT/siQwGbsZoPMGFYmqOK1da5TZWrrJWkYzc9xvzT9m3q3Wds5pmCmo4b/dIqDifWwYEcNAZ0/YLHwCN5SEZWuunkEwtU5o7TZAv3bvDDA6WxUrrHI/y9/qvvhXxsJnY8IueNhshdmWZfXKz+lJi2Dvk7DUlEQ1zZWSsozi1E+3biMPJO47jsxjoT/jmE5+GHLCgcnXXDVBeaVal99IOaTRFukiz2EMsry1s8fnwEE5XKDKRlU/dOPfsje0gc7bgE0QD/u3E4NJ99g9A=
81 1596f2d8f2421314b1ddead8f7d0c91009358994 0 iQIVAwUAUmRq+yBXgaxoKi1yAQLolhAAi+l4ZFdQTu9yJDv22YmkmHH4fI3d5VBYgvfJPufpyaj7pX626QNW18UNcGSw2BBpYHIJzWPkk/4XznLVKr4Ciw2N3/yqloEFV0V2SSrTbMWiR9qXI4KJH+Df3KZnKs3FgiYpXkErL4GWkc1jLVR50xQ5RnkMljjtCd0NTeV2PHZ6gP2qbu6CS+5sm3AFhTDGnx8GicbMw76ZNw5M2G+T48yH9jn5KQi2SBThfi4H9Bpr8FDuR7PzQLgw9SbtYxtdQxNkK55k0nG4oLDxduNakU6SH9t8n8tdCfMt58kTzlQVrPFiTFjKu2n2JioDTz2HEivbZ5H757cu7SvpX8gW3paeBc57e+GOLMisMZABXLICq59c3QnrMwFY4FG+5cpiHVXoaZz/0bYCJx+IhU4QLWqZuzb18KSyHUCqQRzXlzS6QV5O7dY5YNQXFC44j/dS5zdgWMYo2mc6mVP2OaPUn7F6aQh5MCDYorPIOkcNjOg7ytajo7DXbzWt5Al8qt6386BJksyR3GAonc09+l8IFeNxk8HZNP4ETQ8aWj0dC9jgBDPK43T2Bju/i84s+U/bRe4tGSQalZUEv06mkIH/VRJp5w2izYTsdIjA4FT9d36OhaxlfoO1X6tHR9AyA3bF/g/ozvBwuo3kTRUUqo+Ggvx/DmcPQdDiZZQIqDBXch0=
81 1596f2d8f2421314b1ddead8f7d0c91009358994 0 iQIVAwUAUmRq+yBXgaxoKi1yAQLolhAAi+l4ZFdQTu9yJDv22YmkmHH4fI3d5VBYgvfJPufpyaj7pX626QNW18UNcGSw2BBpYHIJzWPkk/4XznLVKr4Ciw2N3/yqloEFV0V2SSrTbMWiR9qXI4KJH+Df3KZnKs3FgiYpXkErL4GWkc1jLVR50xQ5RnkMljjtCd0NTeV2PHZ6gP2qbu6CS+5sm3AFhTDGnx8GicbMw76ZNw5M2G+T48yH9jn5KQi2SBThfi4H9Bpr8FDuR7PzQLgw9SbtYxtdQxNkK55k0nG4oLDxduNakU6SH9t8n8tdCfMt58kTzlQVrPFiTFjKu2n2JioDTz2HEivbZ5H757cu7SvpX8gW3paeBc57e+GOLMisMZABXLICq59c3QnrMwFY4FG+5cpiHVXoaZz/0bYCJx+IhU4QLWqZuzb18KSyHUCqQRzXlzS6QV5O7dY5YNQXFC44j/dS5zdgWMYo2mc6mVP2OaPUn7F6aQh5MCDYorPIOkcNjOg7ytajo7DXbzWt5Al8qt6386BJksyR3GAonc09+l8IFeNxk8HZNP4ETQ8aWj0dC9jgBDPK43T2Bju/i84s+U/bRe4tGSQalZUEv06mkIH/VRJp5w2izYTsdIjA4FT9d36OhaxlfoO1X6tHR9AyA3bF/g/ozvBwuo3kTRUUqo+Ggvx/DmcPQdDiZZQIqDBXch0=
82 d825e4025e39d1c39db943cdc89818abd0a87c27 0 iQIVAwUAUnQlXiBXgaxoKi1yAQJd3BAAi7LjMSpXmdR7B8K98C3/By4YHsCOAocMl3JXiLd7SXwKmlta1zxtkgWwWJnNYE3lVJvGCl+l4YsGKmFu755MGXlyORh1x4ohckoC1a8cqnbNAgD6CSvjSaZfnINLGZQP1wIP4yWj0FftKVANQBjj/xkkxO530mjBYnUvyA4PeDd5A1AOUUu6qHzX6S5LcprEt7iktLI+Ae1dYTkiCpckDtyYUKIk3RK/4AGWwGCPddVWeV5bDxLs8GHyMbqdBwx+2EAMtyZfXT+z6MDRsL/gEBVOXHb/UR0qpYED+qFnbtTlxqQkRE/wBhwDoRzUgcSuukQ9iPn79WNDSdT5b6Jd393uEO5BNF/DB6rrOiWmlpoooWgTY9kcwGB02v0hhLrH5r1wkv8baaPl+qjCjBxf4CNKm/83KN5/umGbZlORqPSN5JVxK6vDNwFFmHLaZbMT1g27GsGOWm84VH+dgolgk4nmRNSO37eTNM5Y1C3Zf2amiqDSRcAxCgseg0Jh10G7i52SSTcZPI2MqrwT9eIyg8PTIxT1D5bPcCzkg5nTTL6S7bet7OSwynRnHslhvVUBly8aIj4eY/5cQqAucUUa5sq6xLD8N27Tl+sQi+kE6KtWu2c0ZhpouflYp55XNMHgU4KeFcVcDtHfJRF6THT6tFcHFNauCHbhfN2F33ANMP4=
82 d825e4025e39d1c39db943cdc89818abd0a87c27 0 iQIVAwUAUnQlXiBXgaxoKi1yAQJd3BAAi7LjMSpXmdR7B8K98C3/By4YHsCOAocMl3JXiLd7SXwKmlta1zxtkgWwWJnNYE3lVJvGCl+l4YsGKmFu755MGXlyORh1x4ohckoC1a8cqnbNAgD6CSvjSaZfnINLGZQP1wIP4yWj0FftKVANQBjj/xkkxO530mjBYnUvyA4PeDd5A1AOUUu6qHzX6S5LcprEt7iktLI+Ae1dYTkiCpckDtyYUKIk3RK/4AGWwGCPddVWeV5bDxLs8GHyMbqdBwx+2EAMtyZfXT+z6MDRsL/gEBVOXHb/UR0qpYED+qFnbtTlxqQkRE/wBhwDoRzUgcSuukQ9iPn79WNDSdT5b6Jd393uEO5BNF/DB6rrOiWmlpoooWgTY9kcwGB02v0hhLrH5r1wkv8baaPl+qjCjBxf4CNKm/83KN5/umGbZlORqPSN5JVxK6vDNwFFmHLaZbMT1g27GsGOWm84VH+dgolgk4nmRNSO37eTNM5Y1C3Zf2amiqDSRcAxCgseg0Jh10G7i52SSTcZPI2MqrwT9eIyg8PTIxT1D5bPcCzkg5nTTL6S7bet7OSwynRnHslhvVUBly8aIj4eY/5cQqAucUUa5sq6xLD8N27Tl+sQi+kE6KtWu2c0ZhpouflYp55XNMHgU4KeFcVcDtHfJRF6THT6tFcHFNauCHbhfN2F33ANMP4=
83 209e04a06467e2969c0cc6501335be0406d46ef0 0 iQIVAwUAUpv1oCBXgaxoKi1yAQKOFBAAma2wlsr3w/5NvDwq2rmOrgtNDq1DnNqcXloaOdwegX1z3/N++5uVjLjI0VyguexnwK+7E8rypMZ+4glaiZvIiGPnGMYbG9iOoz5XBhtUHzI5ECYfm5QU81by9VmCIvArDFe5Hlnz4XaXpEGnAwPywD+yzV3/+tyoV7MgsVinCMtbX9OF84/ubWKNzq2810FpQRfYoCOrF8sUed/1TcQrSm1eMB/PnuxjFCFySiR6J7Urd9bJoJIDtdZOQeeHaL5Z8Pcsyzjoe/9oTwJ3L3tl/NMZtRxiQUWtfRA0zvEnQ4QEkZSDMd/JnGiWHPVeP4P92+YN15za9yhneEAtustrTNAmVF2Uh92RIlmkG475HFhvwPJ4DfCx0vU1OOKX/U4c1rifW7H7HaipoaMlsDU2VFsAHcc3YF8ulVt27bH2yUaLGJz7eqpt+3DzZTKp4d/brZA2EkbVgsoYP+XYLbzxfwWlaMwiN3iCnlTFbNogH8MxhfHFWBj6ouikqOz8HlNl6BmSQiUCBnz5fquVpXmW2Md+TDekk+uOW9mvk1QMU62br+Z6PEZupkdTrqKaz+8ZMWvTRct8SiOcu7R11LpfERyrwYGGPei0P2YrEGIWGgXvEobXoPTSl7J+mpOA/rp2Q1zA3ihjgzwtGZZF+ThQXZGIMGaA2YPgzuYRqY8l5oc=
83 209e04a06467e2969c0cc6501335be0406d46ef0 0 iQIVAwUAUpv1oCBXgaxoKi1yAQKOFBAAma2wlsr3w/5NvDwq2rmOrgtNDq1DnNqcXloaOdwegX1z3/N++5uVjLjI0VyguexnwK+7E8rypMZ+4glaiZvIiGPnGMYbG9iOoz5XBhtUHzI5ECYfm5QU81by9VmCIvArDFe5Hlnz4XaXpEGnAwPywD+yzV3/+tyoV7MgsVinCMtbX9OF84/ubWKNzq2810FpQRfYoCOrF8sUed/1TcQrSm1eMB/PnuxjFCFySiR6J7Urd9bJoJIDtdZOQeeHaL5Z8Pcsyzjoe/9oTwJ3L3tl/NMZtRxiQUWtfRA0zvEnQ4QEkZSDMd/JnGiWHPVeP4P92+YN15za9yhneEAtustrTNAmVF2Uh92RIlmkG475HFhvwPJ4DfCx0vU1OOKX/U4c1rifW7H7HaipoaMlsDU2VFsAHcc3YF8ulVt27bH2yUaLGJz7eqpt+3DzZTKp4d/brZA2EkbVgsoYP+XYLbzxfwWlaMwiN3iCnlTFbNogH8MxhfHFWBj6ouikqOz8HlNl6BmSQiUCBnz5fquVpXmW2Md+TDekk+uOW9mvk1QMU62br+Z6PEZupkdTrqKaz+8ZMWvTRct8SiOcu7R11LpfERyrwYGGPei0P2YrEGIWGgXvEobXoPTSl7J+mpOA/rp2Q1zA3ihjgzwtGZZF+ThQXZGIMGaA2YPgzuYRqY8l5oc=
84 ca387377df7a3a67dbb90b6336b781cdadc3ef41 0 iQIVAwUAUsThISBXgaxoKi1yAQJpvRAAkRkCWLjHBZnWxX9Oe6t2HQgkSsmn9wMHvXXGFkcAmrqJ86yfyrxLq2Ns0X7Qwky37kOwKsywM53FQlsx9j//Y+ncnGZoObFTz9YTuSbOHGVsTbAruXWxBrGOf1nFTlg8afcbH0jPfQXwxf3ptfBhgsFCzORcqc8HNopAW+2sgXGhHnbVtq6LF90PWkbKjCCQLiX3da1uETGAElrl4jA5Y2i64S1Q/2X+UFrNslkIIRCGmAJ6BnE6KLJaUftpfbN7Br7a3z9xxWqxRYDOinxDgfAPAucOJPLgMVQ0bJIallaRu7KTmIWKIuSBgg1/hgfoX8I1w49WrTGp0gGY140kl8RWwczAz/SB03Xtbl2+h6PV7rUV2K/5g61DkwdVbWqXM9wmJZmvjEKK0qQbBT0By4QSEDNcKKqtaFFwhFzx4dkXph0igHOtXhSNzMd8PsFx/NRn9NLFIpirxfqVDwakpDNBZw4Q9hUAlTPxSFL3vD9/Zs7lV4/dAvvl+tixJEi2k/iv248b/AI1PrPIQEqDvjrozzzYvrS4HtbkUn+IiHiepQaYnpqKoXvBu6btK/nv0GTxB5OwVJzMA1RPDcxIFfZA2AazHjrXiPAl5uWYEddEvRjaCiF8xkQkfiXzLOoqhKQHdwPGcfMFEs9lNR8BrB2ZOajBJc8RPsFDswhT5h4=
84 ca387377df7a3a67dbb90b6336b781cdadc3ef41 0 iQIVAwUAUsThISBXgaxoKi1yAQJpvRAAkRkCWLjHBZnWxX9Oe6t2HQgkSsmn9wMHvXXGFkcAmrqJ86yfyrxLq2Ns0X7Qwky37kOwKsywM53FQlsx9j//Y+ncnGZoObFTz9YTuSbOHGVsTbAruXWxBrGOf1nFTlg8afcbH0jPfQXwxf3ptfBhgsFCzORcqc8HNopAW+2sgXGhHnbVtq6LF90PWkbKjCCQLiX3da1uETGAElrl4jA5Y2i64S1Q/2X+UFrNslkIIRCGmAJ6BnE6KLJaUftpfbN7Br7a3z9xxWqxRYDOinxDgfAPAucOJPLgMVQ0bJIallaRu7KTmIWKIuSBgg1/hgfoX8I1w49WrTGp0gGY140kl8RWwczAz/SB03Xtbl2+h6PV7rUV2K/5g61DkwdVbWqXM9wmJZmvjEKK0qQbBT0By4QSEDNcKKqtaFFwhFzx4dkXph0igHOtXhSNzMd8PsFx/NRn9NLFIpirxfqVDwakpDNBZw4Q9hUAlTPxSFL3vD9/Zs7lV4/dAvvl+tixJEi2k/iv248b/AI1PrPIQEqDvjrozzzYvrS4HtbkUn+IiHiepQaYnpqKoXvBu6btK/nv0GTxB5OwVJzMA1RPDcxIFfZA2AazHjrXiPAl5uWYEddEvRjaCiF8xkQkfiXzLOoqhKQHdwPGcfMFEs9lNR8BrB2ZOajBJc8RPsFDswhT5h4=
85 8862469e16f9236208581b20de5f96bd13cc039d 0 iQIVAwUAUt7cLSBXgaxoKi1yAQLOkRAAidp501zafqe+JnDwlf7ORcJc+FgCE6mK1gxDfReCbkMsY7AzspogU7orqfSmr6XXdrDwmk3Y5x3mf44OGzNQjvuNWhqnTgJ7sOcU/lICGQUc8WiGNzHEMFGX9S+K4dpUaBf8Tcl8pU3iArhlthDghW6SZeDFB/FDBaUx9dkdFp6eXrmu4OuGRZEvwUvPtCGxIL7nKNnufI1du/MsWQxvC2ORHbMNtRq6tjA0fLZi4SvbySuYifQRS32BfHkFS5Qu4/40+1k7kd0YFyyQUvIsVa17lrix3zDqMavG8x7oOlqM/axDMBT6DhpdBMAdc5qqf8myz8lwjlFjyDUL6u3Z4/yE0nUrmEudXiXwG0xbVoEN8SCNrDmmvFMt6qdCpdDMkHr2TuSh0Hh4FT5CDkzPI8ZRssv/01j/QvIO3c/xlbpGRPWpsPXEVOz3pmjYN4qyQesnBKWCENsQLy/8s2rey8iQgx2GtsrNw8+wGX6XE4v3QtwUrRe12hWoNrEHWl0xnLv2mvAFqdMAMpFY6EpOKLlE4hoCs2CmTJ2dv6e2tiGTXGU6/frI5iuNRK61OXnH5OjEc8DCGH/GC7NXyDOXOB+7BdBvvf50l2C/vxR2TKgTncLtHeLCrR0GHNHsxqRo1UDwOWur0r7fdfCRvb2tIr5LORCqKYVKd60/BAXjHWc=
85 8862469e16f9236208581b20de5f96bd13cc039d 0 iQIVAwUAUt7cLSBXgaxoKi1yAQLOkRAAidp501zafqe+JnDwlf7ORcJc+FgCE6mK1gxDfReCbkMsY7AzspogU7orqfSmr6XXdrDwmk3Y5x3mf44OGzNQjvuNWhqnTgJ7sOcU/lICGQUc8WiGNzHEMFGX9S+K4dpUaBf8Tcl8pU3iArhlthDghW6SZeDFB/FDBaUx9dkdFp6eXrmu4OuGRZEvwUvPtCGxIL7nKNnufI1du/MsWQxvC2ORHbMNtRq6tjA0fLZi4SvbySuYifQRS32BfHkFS5Qu4/40+1k7kd0YFyyQUvIsVa17lrix3zDqMavG8x7oOlqM/axDMBT6DhpdBMAdc5qqf8myz8lwjlFjyDUL6u3Z4/yE0nUrmEudXiXwG0xbVoEN8SCNrDmmvFMt6qdCpdDMkHr2TuSh0Hh4FT5CDkzPI8ZRssv/01j/QvIO3c/xlbpGRPWpsPXEVOz3pmjYN4qyQesnBKWCENsQLy/8s2rey8iQgx2GtsrNw8+wGX6XE4v3QtwUrRe12hWoNrEHWl0xnLv2mvAFqdMAMpFY6EpOKLlE4hoCs2CmTJ2dv6e2tiGTXGU6/frI5iuNRK61OXnH5OjEc8DCGH/GC7NXyDOXOB+7BdBvvf50l2C/vxR2TKgTncLtHeLCrR0GHNHsxqRo1UDwOWur0r7fdfCRvb2tIr5LORCqKYVKd60/BAXjHWc=
86 3cec5134e9c4bceab6a00c60f52a4f80677a78f2 0 iQIVAwUAUu1lIyBXgaxoKi1yAQIzCBAAizSWvTkWt8+tReM9jUetoSToF+XahLhn381AYdErFCBErX4bNL+vyEj+Jt2DHsAfabkvNBe3k7rtFlXHwpq6POa/ciFGPDhFlplNv6yN1jOKBlMsgdjpn7plZKcLHODOigU7IMlgg70Um8qVrRgQ8FhvbVgR2I5+CD6bucFzqo78wNl9mCIHIQCpGKIUoz56GbwT+rUpEB182Z3u6rf4NWj35RZLGAicVV2A2eAAFh4ZvuC+Z0tXMkp6Gq9cINawZgqfLbzVYJeXBtJC39lHPyp5P3LaEVRhntc9YTwbfkVGjyJZR60iYrieeKpOYRnzgHauPVdgVhkTkBxshmEPY7svKYSQqlj8hLuFa+a3ajbIPrpQAAi1MgtamA991atNqGiSTjdZa9kLQvfdn0k80+gkCxpuO56PhvtdjKsYVRgQMTYmQVQdh3x4WbQOSqTADXXIZUaWxx4RmNSlxY7KD+3lPP09teOD+A3B2cP60bC5NsCfULtQFXQzdC7NvfIyYfYBTZa+Pv6HFkVe10cbnqTt83hBy0D77vdaegPRe56qDNU+GrIG2/rosnlKGFjFoK/pTYkR9uzfkrhEjLwyfkoXlBqY+376W0PC5fP10pJeQBS9DuXpCPlgtyW0Jy1ayCT1YR4QJC4n75vZwTFBFRBhSi0HqFquOgy83+O0Q/k=
86 3cec5134e9c4bceab6a00c60f52a4f80677a78f2 0 iQIVAwUAUu1lIyBXgaxoKi1yAQIzCBAAizSWvTkWt8+tReM9jUetoSToF+XahLhn381AYdErFCBErX4bNL+vyEj+Jt2DHsAfabkvNBe3k7rtFlXHwpq6POa/ciFGPDhFlplNv6yN1jOKBlMsgdjpn7plZKcLHODOigU7IMlgg70Um8qVrRgQ8FhvbVgR2I5+CD6bucFzqo78wNl9mCIHIQCpGKIUoz56GbwT+rUpEB182Z3u6rf4NWj35RZLGAicVV2A2eAAFh4ZvuC+Z0tXMkp6Gq9cINawZgqfLbzVYJeXBtJC39lHPyp5P3LaEVRhntc9YTwbfkVGjyJZR60iYrieeKpOYRnzgHauPVdgVhkTkBxshmEPY7svKYSQqlj8hLuFa+a3ajbIPrpQAAi1MgtamA991atNqGiSTjdZa9kLQvfdn0k80+gkCxpuO56PhvtdjKsYVRgQMTYmQVQdh3x4WbQOSqTADXXIZUaWxx4RmNSlxY7KD+3lPP09teOD+A3B2cP60bC5NsCfULtQFXQzdC7NvfIyYfYBTZa+Pv6HFkVe10cbnqTt83hBy0D77vdaegPRe56qDNU+GrIG2/rosnlKGFjFoK/pTYkR9uzfkrhEjLwyfkoXlBqY+376W0PC5fP10pJeQBS9DuXpCPlgtyW0Jy1ayCT1YR4QJC4n75vZwTFBFRBhSi0HqFquOgy83+O0Q/k=
87 b96cb15ec9e04d8ac5ee08b34fcbbe4200588965 0 iQIVAwUAUxJPlyBXgaxoKi1yAQLIRA//Qh9qzoYthPAWAUNbzybWXC/oMBI2X89NQC7l1ivKhv7cn9L79D8SWXM18q7LTwLdlwOkV/a0NTE3tkQTLvxJpfnRLCBbMOcGiIn/PxsAae8IhMAUbR7qz+XOynHOs60ZhK9X8seQHJRf1YtOI9gYTL/WYk8Cnpmc6xZQ90TNhoPPkpdfe8Y236V11SbYtN14fmrPaWQ3GXwyrvQaqM1F7BxSnC/sbm9+/wprsTa8gRQo7YQL/T5jJQgFiatG3yayrDdJtoRq3TZKtsxw8gtQdfVCrrBibbysjM8++dnwA92apHNUY8LzyptPy7rSDXRrIpPUWGGTQTD+6HQwkcLFtIuUpw4I75SV3z2r6LyOLKzDJUIunKOOYFS/rEIQGxZHxZOBAvbI+73mHAn3pJqm+UAA7R1n7tk3JyQncg50qJlm9zIUPGpNFcdEqak5iXzGYx292VlcE+fbJYeIPWggpilaVUgdmXtMCG0O0uX6C8MDmzVDCjd6FzDJ4GTZwgmWJaamvls85CkZgyN/UqlisfFXub0A1h7qAzBSVpP1+Ti+UbBjlrGX8BMRYHRGYIeIq16elcWwSpLgshjDwNn2r2EdwX8xKU5mucgTzSLprbOYGdQaqnvf6e8IX5WMBgwVW9YdY9yJKSLF7kE1AlM9nfVcXwOK4mHoMvnNgiX3zsw=
87 b96cb15ec9e04d8ac5ee08b34fcbbe4200588965 0 iQIVAwUAUxJPlyBXgaxoKi1yAQLIRA//Qh9qzoYthPAWAUNbzybWXC/oMBI2X89NQC7l1ivKhv7cn9L79D8SWXM18q7LTwLdlwOkV/a0NTE3tkQTLvxJpfnRLCBbMOcGiIn/PxsAae8IhMAUbR7qz+XOynHOs60ZhK9X8seQHJRf1YtOI9gYTL/WYk8Cnpmc6xZQ90TNhoPPkpdfe8Y236V11SbYtN14fmrPaWQ3GXwyrvQaqM1F7BxSnC/sbm9+/wprsTa8gRQo7YQL/T5jJQgFiatG3yayrDdJtoRq3TZKtsxw8gtQdfVCrrBibbysjM8++dnwA92apHNUY8LzyptPy7rSDXRrIpPUWGGTQTD+6HQwkcLFtIuUpw4I75SV3z2r6LyOLKzDJUIunKOOYFS/rEIQGxZHxZOBAvbI+73mHAn3pJqm+UAA7R1n7tk3JyQncg50qJlm9zIUPGpNFcdEqak5iXzGYx292VlcE+fbJYeIPWggpilaVUgdmXtMCG0O0uX6C8MDmzVDCjd6FzDJ4GTZwgmWJaamvls85CkZgyN/UqlisfFXub0A1h7qAzBSVpP1+Ti+UbBjlrGX8BMRYHRGYIeIq16elcWwSpLgshjDwNn2r2EdwX8xKU5mucgTzSLprbOYGdQaqnvf6e8IX5WMBgwVW9YdY9yJKSLF7kE1AlM9nfVcXwOK4mHoMvnNgiX3zsw=
88 3f83fc5cfe715d292069ee8417c83804f6c6c1e4 0 iQIVAwUAUztENyBXgaxoKi1yAQIpkhAAmJj5JRTSn0Dn/OTAHggalw8KYFbAck1X35Wg9O7ku7sd+cOnNnkYfqAdz2m5ikqWHP7aWMiNkNy7Ree2110NqkQVYG/2AJStXBdIOmewqnjDlNt+rbJQN/JsjeKSCy+ToNvhqX5cTM9DF2pwRjMsTXVff307S6/3pga244i+RFAeG3WCUrzfDu641MGFLjG4atCj8ZFLg9DcW5bsRiOs5ZK5Il+UAb2yyoS2KNQ70VLhYULhGtqq9tuO4nLRGN3DX/eDcYfncPCav1GckW4OZKakcbLtAdW0goSgGWloxcM+j2E6Z1JZ9tOTTkFN77EvX0ZWZLmYM7sUN1meFnKbVxrtGKlMelwKwlT252c65PAKa9zsTaRUKvN7XclyxZAYVCsiCQ/V08NXhNgXJXcoKUAeGNf6wruOyvRU9teia8fAiuHJoY58WC8jC4nYG3iZTnl+zNj2A5xuEUpYHhjUfe3rNJeK7CwUpJKlbxopu5mnW9AE9ITfI490eaapRLTojOBDJNqCORAtbggMD46fLeCOzzB8Gl70U2p5P34F92Sn6mgERFKh/10XwJcj4ZIeexbQK8lqQ2cIanDN9dAmbvavPTY8grbANuq+vXDGxjIjfxapqzsSPqUJ5KnfTQyLq5NWwquR9t38XvHZfktkd140BFKwIUAIlKKaFfYXXtM=
88 3f83fc5cfe715d292069ee8417c83804f6c6c1e4 0 iQIVAwUAUztENyBXgaxoKi1yAQIpkhAAmJj5JRTSn0Dn/OTAHggalw8KYFbAck1X35Wg9O7ku7sd+cOnNnkYfqAdz2m5ikqWHP7aWMiNkNy7Ree2110NqkQVYG/2AJStXBdIOmewqnjDlNt+rbJQN/JsjeKSCy+ToNvhqX5cTM9DF2pwRjMsTXVff307S6/3pga244i+RFAeG3WCUrzfDu641MGFLjG4atCj8ZFLg9DcW5bsRiOs5ZK5Il+UAb2yyoS2KNQ70VLhYULhGtqq9tuO4nLRGN3DX/eDcYfncPCav1GckW4OZKakcbLtAdW0goSgGWloxcM+j2E6Z1JZ9tOTTkFN77EvX0ZWZLmYM7sUN1meFnKbVxrtGKlMelwKwlT252c65PAKa9zsTaRUKvN7XclyxZAYVCsiCQ/V08NXhNgXJXcoKUAeGNf6wruOyvRU9teia8fAiuHJoY58WC8jC4nYG3iZTnl+zNj2A5xuEUpYHhjUfe3rNJeK7CwUpJKlbxopu5mnW9AE9ITfI490eaapRLTojOBDJNqCORAtbggMD46fLeCOzzB8Gl70U2p5P34F92Sn6mgERFKh/10XwJcj4ZIeexbQK8lqQ2cIanDN9dAmbvavPTY8grbANuq+vXDGxjIjfxapqzsSPqUJ5KnfTQyLq5NWwquR9t38XvHZfktkd140BFKwIUAIlKKaFfYXXtM=
89 564f55b251224f16508dd1311452db7780dafe2b 0 iQIVAwUAU1BmFSBXgaxoKi1yAQJ2Aw//bjK++xJuZCIdktg/i5FxBwoxdbipfTkKsN/YjUwrEmroYM8IkqIsO+U54OGCYWr3NPJ3VS8wUQeJ+NF3ffcjmjC297R9J+X0c5G90DdQUYX44jG/tP8Tqpev4Q7DLCXT26aRwEMdJQpq0eGaqv55E5Cxnyt3RrLCqe7RjPresZFg7iYrro5nq8TGYwBhessHXnCix9QI0HtXiLpms+0UGz8Sbi9nEYW+M0OZCyO1TvykCpFzEsLNwqqtFvhOMD/AMiWcTKNUpjmOn3V83xjWl+jnDUt7BxJ7n1efUnlwl4IeWlSUb73q/durtaymb97cSdKFmXHv4pdAShQEuEpVVGO1WELsKoXmbj30ItTW2V3KvNbjFsvIdDo7zLCpXyTq1HC56W7QCIMINX2qT+hrAMWC12tPQ05f89Cv1+jpk6eOPFqIHFdi663AjyrnGll8nwN7HJWwtA5wTXisu3bec51FAq4yJTzPMtOE9spz36E+Go2hZ1cAv9oCSceZcM0wB8KiMfaZJKNZNZk1jvsdiio4CcdASOFQPOspz07GqQxVP7W+F1Oz32LgwcNAEAS/f3juwDj45GYfAWJrTh3dnJy5DTD2LVC7KtkxxUVkWkqxivnDB9anj++FN9eyekxzut5eFED+WrCfZMcSPW0ai7wbslhKUhCwSf/v3DgGwsM=
89 564f55b251224f16508dd1311452db7780dafe2b 0 iQIVAwUAU1BmFSBXgaxoKi1yAQJ2Aw//bjK++xJuZCIdktg/i5FxBwoxdbipfTkKsN/YjUwrEmroYM8IkqIsO+U54OGCYWr3NPJ3VS8wUQeJ+NF3ffcjmjC297R9J+X0c5G90DdQUYX44jG/tP8Tqpev4Q7DLCXT26aRwEMdJQpq0eGaqv55E5Cxnyt3RrLCqe7RjPresZFg7iYrro5nq8TGYwBhessHXnCix9QI0HtXiLpms+0UGz8Sbi9nEYW+M0OZCyO1TvykCpFzEsLNwqqtFvhOMD/AMiWcTKNUpjmOn3V83xjWl+jnDUt7BxJ7n1efUnlwl4IeWlSUb73q/durtaymb97cSdKFmXHv4pdAShQEuEpVVGO1WELsKoXmbj30ItTW2V3KvNbjFsvIdDo7zLCpXyTq1HC56W7QCIMINX2qT+hrAMWC12tPQ05f89Cv1+jpk6eOPFqIHFdi663AjyrnGll8nwN7HJWwtA5wTXisu3bec51FAq4yJTzPMtOE9spz36E+Go2hZ1cAv9oCSceZcM0wB8KiMfaZJKNZNZk1jvsdiio4CcdASOFQPOspz07GqQxVP7W+F1Oz32LgwcNAEAS/f3juwDj45GYfAWJrTh3dnJy5DTD2LVC7KtkxxUVkWkqxivnDB9anj++FN9eyekxzut5eFED+WrCfZMcSPW0ai7wbslhKUhCwSf/v3DgGwsM=
90 2195ac506c6ababe86985b932f4948837c0891b5 0 iQIVAwUAU2LO/CBXgaxoKi1yAQI/3w/7BT/VRPyxey6tYp7i5cONIlEB3gznebGYwm0SGYNE6lsvS2VLh6ztb+j4eqOadr8Ssna6bslBx+dVsm+VuJ+vrNLMucD5Uc+fhn6dAfVqg+YBzUEaedI5yNsJizcJUDI7hUVsxiPiiYd9hchCWJ+z2tVt2jCyG2lMV2rbW36AM89sgz/wn5/AaAFsgoS6up/uzA3Tmw+qZSO6dZChb4Q8midIUWEbNzVhokgYcw7/HmjmvkvV9RJYiG8aBnMdQmxTE69q2dTjnnDL6wu61WU2FpTN09HRFbemUqzAfoJp8MmXq6jWgfLcm0cI3kRo7ZNpnEkmVKsfKQCXXiaR4alt9IQpQ6Jl7LSYsYI+D4ejpYysIsZyAE8qzltYhBKJWqO27A5V4WdJsoTgA/RwKfPRlci4PY8I4N466S7PBXVz/Cc5EpFkecvrgceTmBafb8JEi+gPiD2Po4vtW3bCeV4xldiEXHeJ77byUz7fZU7jL78SjJVOCCQTJfKZVr36kTz3KlaOz3E700RxzEFDYbK7I41mdANeQBmNNbcvRTy5ma6W6I3McEcAH4wqM5fFQ8YS+QWJxk85Si8KtaDPqoEdC/0dQPavuU/jAVjhV8IbmmkOtO7WvOHQDBtrR15yMxGMnUwMrPHaRNKdHNYRG0LL7lpCtdMi1mzLQgHYY9SRYvI=
90 2195ac506c6ababe86985b932f4948837c0891b5 0 iQIVAwUAU2LO/CBXgaxoKi1yAQI/3w/7BT/VRPyxey6tYp7i5cONIlEB3gznebGYwm0SGYNE6lsvS2VLh6ztb+j4eqOadr8Ssna6bslBx+dVsm+VuJ+vrNLMucD5Uc+fhn6dAfVqg+YBzUEaedI5yNsJizcJUDI7hUVsxiPiiYd9hchCWJ+z2tVt2jCyG2lMV2rbW36AM89sgz/wn5/AaAFsgoS6up/uzA3Tmw+qZSO6dZChb4Q8midIUWEbNzVhokgYcw7/HmjmvkvV9RJYiG8aBnMdQmxTE69q2dTjnnDL6wu61WU2FpTN09HRFbemUqzAfoJp8MmXq6jWgfLcm0cI3kRo7ZNpnEkmVKsfKQCXXiaR4alt9IQpQ6Jl7LSYsYI+D4ejpYysIsZyAE8qzltYhBKJWqO27A5V4WdJsoTgA/RwKfPRlci4PY8I4N466S7PBXVz/Cc5EpFkecvrgceTmBafb8JEi+gPiD2Po4vtW3bCeV4xldiEXHeJ77byUz7fZU7jL78SjJVOCCQTJfKZVr36kTz3KlaOz3E700RxzEFDYbK7I41mdANeQBmNNbcvRTy5ma6W6I3McEcAH4wqM5fFQ8YS+QWJxk85Si8KtaDPqoEdC/0dQPavuU/jAVjhV8IbmmkOtO7WvOHQDBtrR15yMxGMnUwMrPHaRNKdHNYRG0LL7lpCtdMi1mzLQgHYY9SRYvI=
91 269c80ee5b3cb3684fa8edc61501b3506d02eb10 0 iQIVAwUAU4uX5CBXgaxoKi1yAQLpdg/+OxulOKwZN+Nr7xsRhUijYjyAElRf2mGDvMrbAOA2xNf85DOXjOrX5TKETumf1qANA5cHa1twA8wYgxUzhx30H+w5EsLjyeSsOncRnD5WZNqSoIq2XevT0T4c8xdyNftyBqK4h/SC/t2h3vEiSCUaGcfNK8yk4XO45MIk4kk9nlA9jNWdA5ZMLgEFBye2ggz0JjEAPUkVDqlr9sNORDEbnwZxGPV8CK9HaL/I8VWClaFgjKQmjqV3SQsNFe2XPffzXmIipFJ+ODuXVxYpAsvLiGmcfuUfSDHQ4L9QvjBsWe1PgYMr/6CY/lPYmR+xW5mJUE9eIdN4MYcXgicLrmMpdF5pToNccNCMtfa6CDvEasPRqe2bDzL/Q9dQbdOVE/boaYBlgmYLL+/u+dpqip9KkyGgbSo9uJzst1mLTCzJmr5bw+surul28i9HM+4+Lewg4UUdHLz46no1lfTlB5o5EAhiOZBTEVdoBaKfewVpDa/aBRvtWX7UMVRG5qrtA0sXwydN00Jaqkr9m20W0jWjtc1ZC72QCrynVHOyfIb2rN98rnuy2QN4bTvjNpNjHOhhhPTOoVo0YYPdiUupm46vymUTQCmWsglU4Rlaa3vXneP7JenL5TV8WLPs9J28lF0IkOnyBXY7OFcpvYO1euu7iR1VdjfrQukMyaX18usymiA=
91 269c80ee5b3cb3684fa8edc61501b3506d02eb10 0 iQIVAwUAU4uX5CBXgaxoKi1yAQLpdg/+OxulOKwZN+Nr7xsRhUijYjyAElRf2mGDvMrbAOA2xNf85DOXjOrX5TKETumf1qANA5cHa1twA8wYgxUzhx30H+w5EsLjyeSsOncRnD5WZNqSoIq2XevT0T4c8xdyNftyBqK4h/SC/t2h3vEiSCUaGcfNK8yk4XO45MIk4kk9nlA9jNWdA5ZMLgEFBye2ggz0JjEAPUkVDqlr9sNORDEbnwZxGPV8CK9HaL/I8VWClaFgjKQmjqV3SQsNFe2XPffzXmIipFJ+ODuXVxYpAsvLiGmcfuUfSDHQ4L9QvjBsWe1PgYMr/6CY/lPYmR+xW5mJUE9eIdN4MYcXgicLrmMpdF5pToNccNCMtfa6CDvEasPRqe2bDzL/Q9dQbdOVE/boaYBlgmYLL+/u+dpqip9KkyGgbSo9uJzst1mLTCzJmr5bw+surul28i9HM+4+Lewg4UUdHLz46no1lfTlB5o5EAhiOZBTEVdoBaKfewVpDa/aBRvtWX7UMVRG5qrtA0sXwydN00Jaqkr9m20W0jWjtc1ZC72QCrynVHOyfIb2rN98rnuy2QN4bTvjNpNjHOhhhPTOoVo0YYPdiUupm46vymUTQCmWsglU4Rlaa3vXneP7JenL5TV8WLPs9J28lF0IkOnyBXY7OFcpvYO1euu7iR1VdjfrQukMyaX18usymiA=
92 2d8cd3d0e83c7336c0cb45a9f88638363f993848 0 iQIVAwUAU7OLTCBXgaxoKi1yAQJ+pw/+M3yOesgf55eo3PUTZw02QZxDyEg9ElrRc6664/QFXaJuYdz8H3LGG/NYs8uEdYihiGpS1Qc70jwd1IoUlrCELsaSSZpzWQ+VpQFX29aooBoetfL+8WgqV8zJHCtY0E1EBg/Z3ZL3n2OS++fVeWlKtp5mwEq8uLTUmhIS7GseP3bIG/CwF2Zz4bzhmPGK8V2s74aUvELZLCfkBE1ULNs7Nou1iPDGnhYOD53eq1KGIPlIg1rnLbyYw5bhS20wy5IxkWf2eCaXfmQBTG61kO5m3nkzfVgtxmZHLqYggISTJXUovfGsWZcp5a71clCSMVal+Mfviw8L/UPHG0Ie1c36djJiFLxM0f2HlwVMjegQOZSAeMGg1YL1xnIys2zMMsKgEeR+JISTal1pJyLcT9x5mr1HCnUczSGXE5zsixN+PORRnZOqcEZTa2mHJ1h5jJeEm36B/eR57BMJG+i0QgZqTpLzYTFrp2eWokGMjFB1MvgAkL2YoRsw9h6TeIwqzK8mFwLi28bf1c90gX9uMbwY/NOqGzfQKBR9bvCjs2k/gmJ+qd5AbC3DvOxHnN6hRZUqNq76Bo4F+CUVcjQ/NXnfnOIVNbILpl5Un5kl+8wLFM+mNxDxduajaUwLhSHZofKmmCSLbuuaGmQTC7a/4wzhQM9e5dX0X/8sOo8CptW7uw4=
92 2d8cd3d0e83c7336c0cb45a9f88638363f993848 0 iQIVAwUAU7OLTCBXgaxoKi1yAQJ+pw/+M3yOesgf55eo3PUTZw02QZxDyEg9ElrRc6664/QFXaJuYdz8H3LGG/NYs8uEdYihiGpS1Qc70jwd1IoUlrCELsaSSZpzWQ+VpQFX29aooBoetfL+8WgqV8zJHCtY0E1EBg/Z3ZL3n2OS++fVeWlKtp5mwEq8uLTUmhIS7GseP3bIG/CwF2Zz4bzhmPGK8V2s74aUvELZLCfkBE1ULNs7Nou1iPDGnhYOD53eq1KGIPlIg1rnLbyYw5bhS20wy5IxkWf2eCaXfmQBTG61kO5m3nkzfVgtxmZHLqYggISTJXUovfGsWZcp5a71clCSMVal+Mfviw8L/UPHG0Ie1c36djJiFLxM0f2HlwVMjegQOZSAeMGg1YL1xnIys2zMMsKgEeR+JISTal1pJyLcT9x5mr1HCnUczSGXE5zsixN+PORRnZOqcEZTa2mHJ1h5jJeEm36B/eR57BMJG+i0QgZqTpLzYTFrp2eWokGMjFB1MvgAkL2YoRsw9h6TeIwqzK8mFwLi28bf1c90gX9uMbwY/NOqGzfQKBR9bvCjs2k/gmJ+qd5AbC3DvOxHnN6hRZUqNq76Bo4F+CUVcjQ/NXnfnOIVNbILpl5Un5kl+8wLFM+mNxDxduajaUwLhSHZofKmmCSLbuuaGmQTC7a/4wzhQM9e5dX0X/8sOo8CptW7uw4=
93 6c36dc6cd61a0e1b563f1d51e55bdf4dacf12162 0 iQIVAwUAU8n97yBXgaxoKi1yAQKqcA/+MT0VFoP6N8fHnlxj85maoM2HfZbAzX7oEW1B8F1WH6rHESHDexDWIYWJ2XnEeTD4GCXN0/1p+O/I0IMPNzqoSz8BU0SR4+ejhRkGrKG7mcFiF5G8enxaiISn9nmax6DyRfqtOQBzuXYGObXg9PGvMS6zbR0SorJK61xX7fSsUNN6BAvHJfpwcVkOrrFAIpEhs/Gh9wg0oUKCffO/Abs6oS+P6nGLylpIyXqC7rKZ4uPVc6Ljh9DOcpV4NCU6kQbNE7Ty79E0/JWWLsHOEY4F4WBzI7rVh7dOkRMmfNGaqvKkuNkJOEqTR1o1o73Hhbxn4NU7IPbVP/zFKC+/4QVtcPk2IPlpK1MqA1H2hBNYZhJlNhvAa7LwkIxM0916/zQ8dbFAzp6Ay/t/L0tSEcIrudTz2KTrY0WKw+pkzB/nTwaS3XZre6H2B+gszskmf1Y41clkIy/nH9K7zBuzANWyK3+bm40vmMoBbbnsweUAKkyCwqm4KTyQoYQWzu/ZiZcI+Uuk/ajJ9s7EhJbIlSnYG9ttWL/IZ1h+qPU9mqVO9fcaqkeL/NIRh+IsnzaWo0zmHU1bK+/E29PPGGf3v6+IEJmXg7lvNl5pHiMd2tb7RNO/UaNSv1Y2E9naD4FQwSWo38GRBcnRGuKCLdZNHGUR+6dYo6BJCGG8wtZvNXb3TOo=
93 6c36dc6cd61a0e1b563f1d51e55bdf4dacf12162 0 iQIVAwUAU8n97yBXgaxoKi1yAQKqcA/+MT0VFoP6N8fHnlxj85maoM2HfZbAzX7oEW1B8F1WH6rHESHDexDWIYWJ2XnEeTD4GCXN0/1p+O/I0IMPNzqoSz8BU0SR4+ejhRkGrKG7mcFiF5G8enxaiISn9nmax6DyRfqtOQBzuXYGObXg9PGvMS6zbR0SorJK61xX7fSsUNN6BAvHJfpwcVkOrrFAIpEhs/Gh9wg0oUKCffO/Abs6oS+P6nGLylpIyXqC7rKZ4uPVc6Ljh9DOcpV4NCU6kQbNE7Ty79E0/JWWLsHOEY4F4WBzI7rVh7dOkRMmfNGaqvKkuNkJOEqTR1o1o73Hhbxn4NU7IPbVP/zFKC+/4QVtcPk2IPlpK1MqA1H2hBNYZhJlNhvAa7LwkIxM0916/zQ8dbFAzp6Ay/t/L0tSEcIrudTz2KTrY0WKw+pkzB/nTwaS3XZre6H2B+gszskmf1Y41clkIy/nH9K7zBuzANWyK3+bm40vmMoBbbnsweUAKkyCwqm4KTyQoYQWzu/ZiZcI+Uuk/ajJ9s7EhJbIlSnYG9ttWL/IZ1h+qPU9mqVO9fcaqkeL/NIRh+IsnzaWo0zmHU1bK+/E29PPGGf3v6+IEJmXg7lvNl5pHiMd2tb7RNO/UaNSv1Y2E9naD4FQwSWo38GRBcnRGuKCLdZNHGUR+6dYo6BJCGG8wtZvNXb3TOo=
94 3178e49892020336491cdc6945885c4de26ffa8b 0 iQIVAwUAU9whUCBXgaxoKi1yAQJDKxAAoGzdHXV/BvZ598VExEQ8IqkmBVIP1QZDVBr/orMc1eFM4tbGKxumMGbqgJsg+NetI0irkh/YWeJQ13lT4Og72iJ+4UC9eF9pcpUKr/0eBYdU2N/p2MIbVNWh3aF5QkbuQpSri0VbHOWkxqwoqrrwXEjgHaKYP4PKh+Dzukax4yzBUIyzAG38pt4a8hbjnozCl2uAikxk4Ojg+ZufhPoZWgFEuYzSfK5SrwVKOwuxKYFGbbVGTQMIXLvBhOipAmHp4JMEYHfG85kwuyx/DCDbGmXKPQYQfClwjJ4ob/IwG8asyMsPWs+09vrvpVO08HBuph3GjuiWJ1fhEef/ImWmZdQySI9Y4SjwP4dMVfzLCnY+PYPDM9Sq/5Iee13gI2lVM2NtAfQZPXh9l8u6SbCir1UhMNMx0qVMkqMAATmiZ+ETHCO75q4Wdcmnv5fk2PbvaGBVtrHGeiyuz5mK/j4cMbd0R9R0hR1PyC4dOhNqOnbqELNIe0rKNByG1RkpiQYsqZTU6insmnZrv4fVsxfA4JOObPfKNT4oa24MHS73ldLFCfQAuIxVE7RDJJ3bHeh/yO6Smo28FuVRldBl5e+wj2MykS8iVcuSa1smw6gJ14iLBH369nlR3fAAQxI0omVYPDHLr7SsH3vJasTaCD7V3SL4lW6vo/yaAh4ImlTAE+Y=
94 3178e49892020336491cdc6945885c4de26ffa8b 0 iQIVAwUAU9whUCBXgaxoKi1yAQJDKxAAoGzdHXV/BvZ598VExEQ8IqkmBVIP1QZDVBr/orMc1eFM4tbGKxumMGbqgJsg+NetI0irkh/YWeJQ13lT4Og72iJ+4UC9eF9pcpUKr/0eBYdU2N/p2MIbVNWh3aF5QkbuQpSri0VbHOWkxqwoqrrwXEjgHaKYP4PKh+Dzukax4yzBUIyzAG38pt4a8hbjnozCl2uAikxk4Ojg+ZufhPoZWgFEuYzSfK5SrwVKOwuxKYFGbbVGTQMIXLvBhOipAmHp4JMEYHfG85kwuyx/DCDbGmXKPQYQfClwjJ4ob/IwG8asyMsPWs+09vrvpVO08HBuph3GjuiWJ1fhEef/ImWmZdQySI9Y4SjwP4dMVfzLCnY+PYPDM9Sq/5Iee13gI2lVM2NtAfQZPXh9l8u6SbCir1UhMNMx0qVMkqMAATmiZ+ETHCO75q4Wdcmnv5fk2PbvaGBVtrHGeiyuz5mK/j4cMbd0R9R0hR1PyC4dOhNqOnbqELNIe0rKNByG1RkpiQYsqZTU6insmnZrv4fVsxfA4JOObPfKNT4oa24MHS73ldLFCfQAuIxVE7RDJJ3bHeh/yO6Smo28FuVRldBl5e+wj2MykS8iVcuSa1smw6gJ14iLBH369nlR3fAAQxI0omVYPDHLr7SsH3vJasTaCD7V3SL4lW6vo/yaAh4ImlTAE+Y=
95 5dc91146f35369949ea56b40172308158b59063a 0 iQIVAwUAVAUgJyBXgaxoKi1yAQJkEg/9EXFZvPpuvU7AjII1dlIT8F534AXrO30+H6hweg+h2mUCSb/mZnbo3Jr1tATgBWbIKkYmmsiIKNlJMFNPZTWhImGcVA93t6v85tSFiNJRI2QP9ypl5wTt2KhiS/s7GbUYCtPDm6xyNYoSvDo6vXJ5mfGlgFZY5gYLwEHq/lIRWLWD4EWYWbk5yN+B7rHu6A1n3yro73UR8DudEhYYqC23KbWEqFOiNd1IGj3UJlxIHUE4AcDukxbfiMWrKvv1kuT/vXak3X7cLXlO56aUbMopvaUflA3PSr3XAqynDd69cxACo/T36fuwzCQN4ICpdzGTos0rQALSr7CKF5YP9LMhVhCsOn0pCsAkSiw4HxxbcHQLl+t+0rchNysc4dWGwDt6GAfYcdm3fPtGFtA3qsN8lOpCquFH3TAZ3TrIjLFoTOk6s1xX1x5rjP/DAHc/y3KZU0Ffx3TwdQEEEIFaAXaxQG848rdfzV42+dnFnXh1G/MIrKAmv3ZSUkQ3XJfGc7iu82FsYE1NLHriUQDmMRBzCoQ1Rn1Kji119Cxf5rsMcQ6ZISR1f0jDCUS/qxlHvSqETLp8H63NSUfvuKSC7uC6pGvq9XQm1JRNO5UuJfK6tHzy0jv9bt2IRo2xbmvpDu9L5oHHd3JePsAmFmbrFf/7Qem3JyzEvRcpdcdHtefxcxc=
95 5dc91146f35369949ea56b40172308158b59063a 0 iQIVAwUAVAUgJyBXgaxoKi1yAQJkEg/9EXFZvPpuvU7AjII1dlIT8F534AXrO30+H6hweg+h2mUCSb/mZnbo3Jr1tATgBWbIKkYmmsiIKNlJMFNPZTWhImGcVA93t6v85tSFiNJRI2QP9ypl5wTt2KhiS/s7GbUYCtPDm6xyNYoSvDo6vXJ5mfGlgFZY5gYLwEHq/lIRWLWD4EWYWbk5yN+B7rHu6A1n3yro73UR8DudEhYYqC23KbWEqFOiNd1IGj3UJlxIHUE4AcDukxbfiMWrKvv1kuT/vXak3X7cLXlO56aUbMopvaUflA3PSr3XAqynDd69cxACo/T36fuwzCQN4ICpdzGTos0rQALSr7CKF5YP9LMhVhCsOn0pCsAkSiw4HxxbcHQLl+t+0rchNysc4dWGwDt6GAfYcdm3fPtGFtA3qsN8lOpCquFH3TAZ3TrIjLFoTOk6s1xX1x5rjP/DAHc/y3KZU0Ffx3TwdQEEEIFaAXaxQG848rdfzV42+dnFnXh1G/MIrKAmv3ZSUkQ3XJfGc7iu82FsYE1NLHriUQDmMRBzCoQ1Rn1Kji119Cxf5rsMcQ6ZISR1f0jDCUS/qxlHvSqETLp8H63NSUfvuKSC7uC6pGvq9XQm1JRNO5UuJfK6tHzy0jv9bt2IRo2xbmvpDu9L5oHHd3JePsAmFmbrFf/7Qem3JyzEvRcpdcdHtefxcxc=
96 f768c888aaa68d12dd7f509dcc7f01c9584357d0 0 iQIVAwUAVCxczSBXgaxoKi1yAQJYiA/9HnqKuU7IsGACgsUGt+YaqZQumg077Anj158kihSytmSts6xDxqVY1UQB38dqAKLJrQc7RbN0YK0NVCKZZrx/4OqgWvjiL5qWUJKqQzsDx4LGTUlbPlZNZawW2urmmYW6c9ZZDs1EVnVeZMDrOdntddtnBgtILDwrZ8o3U7FwSlfnm03vTkqUMj9okA3AsI8+lQIlo4qbqjQJYwvUC1ZezRdQwaT1LyoWUgjmhoZ1XWcWKOs9baikaJr6fMv8vZpwmaOY1+pztxYlROeSPVWt9P6yOf0Hi/2eg8AwSZLaX96xfk9IvXUSItg/wjTWP9BhnNs/ulwTnN8QOgSXpYxH4RXwsYOyU7BvwAekA9xi17wuzPrGEliScplxICIZ7jiiwv/VngMvM9AYw2mNBvZt2ZIGrrLaK6pq/zBm5tbviwqt5/8U5aqO8k1O0e4XYm5WmQ1c2AkXRO+xwvFpondlSF2y0flzf2FRXP82QMfsy7vxIP0KmaQ4ex+J8krZgMjNTwXh2M4tdYNtu5AehJQEP3l6giy2srkMDuFLqoe1yECjVlGdgA86ve3J/84I8KGgsufYMhfQnwHHGXCbONcNsDvO0QOee6CIQVcdKCG7dac3M89SC6Ns2CjuC8BIYDRnxbGQb7Fvn4ZcadyJKKbXQJzMgRV25K6BAwTIdvYAtgU=
96 f768c888aaa68d12dd7f509dcc7f01c9584357d0 0 iQIVAwUAVCxczSBXgaxoKi1yAQJYiA/9HnqKuU7IsGACgsUGt+YaqZQumg077Anj158kihSytmSts6xDxqVY1UQB38dqAKLJrQc7RbN0YK0NVCKZZrx/4OqgWvjiL5qWUJKqQzsDx4LGTUlbPlZNZawW2urmmYW6c9ZZDs1EVnVeZMDrOdntddtnBgtILDwrZ8o3U7FwSlfnm03vTkqUMj9okA3AsI8+lQIlo4qbqjQJYwvUC1ZezRdQwaT1LyoWUgjmhoZ1XWcWKOs9baikaJr6fMv8vZpwmaOY1+pztxYlROeSPVWt9P6yOf0Hi/2eg8AwSZLaX96xfk9IvXUSItg/wjTWP9BhnNs/ulwTnN8QOgSXpYxH4RXwsYOyU7BvwAekA9xi17wuzPrGEliScplxICIZ7jiiwv/VngMvM9AYw2mNBvZt2ZIGrrLaK6pq/zBm5tbviwqt5/8U5aqO8k1O0e4XYm5WmQ1c2AkXRO+xwvFpondlSF2y0flzf2FRXP82QMfsy7vxIP0KmaQ4ex+J8krZgMjNTwXh2M4tdYNtu5AehJQEP3l6giy2srkMDuFLqoe1yECjVlGdgA86ve3J/84I8KGgsufYMhfQnwHHGXCbONcNsDvO0QOee6CIQVcdKCG7dac3M89SC6Ns2CjuC8BIYDRnxbGQb7Fvn4ZcadyJKKbXQJzMgRV25K6BAwTIdvYAtgU=
97 7f8d16af8cae246fa5a48e723d48d58b015aed94 0 iQIVAwUAVEL0XyBXgaxoKi1yAQJLkRAAjZhpUju5nnSYtN9S0/vXS/tjuAtBTUdGwc0mz97VrM6Yhc6BjSCZL59tjeqQaoH7Lqf94pRAtZyIB2Vj/VVMDbM+/eaoSr1JixxppU+a4eqScaj82944u4C5YMSMC22PMvEwqKmy87RinZKJlFwSQ699zZ5g6mnNq8xeAiDlYhoF2QKzUXwnKxzpvjGsYhYGDMmVS1QPmky4WGvuTl6KeGkv8LidKf7r6/2RZeMcq+yjJ7R0RTtyjo1cM5dMcn/jRdwZxuV4cmFweCAeoy5guV+X6du022TpVndjOSDoKiRgdk7pTuaToXIy+9bleHpEo9bwKx58wvOMg7sirAYjrA4Xcx762RHiUuidTTPktm8sNsBQmgwJZ8Pzm+8TyHjFGLnBfeiDbQQEdLCXloz0jVOVRflDfMays1WpAYUV8XNOsgxnD2jDU8L0NLkJiX5Y0OerGq9AZ+XbgJFVBFhaOfsm2PEc3jq00GOLzrGzA+4b3CGpFzM3EyK9OnnwbP7SqCGb7PJgjmQ7IO8IWEmVYGaKtWONSm8zRLcKdH8xuk8iN1qCkBXMty/wfTEVTkIlMVEDbslYkVfj0rAPJ8B37bfe0Yz4CEMkCmARIB1rIOpMhnavXGuD50OP2PBBY/8DyC5aY97z9f04na/ffk+l7rWaHihjHufKIApt5OnfJ1w=
97 7f8d16af8cae246fa5a48e723d48d58b015aed94 0 iQIVAwUAVEL0XyBXgaxoKi1yAQJLkRAAjZhpUju5nnSYtN9S0/vXS/tjuAtBTUdGwc0mz97VrM6Yhc6BjSCZL59tjeqQaoH7Lqf94pRAtZyIB2Vj/VVMDbM+/eaoSr1JixxppU+a4eqScaj82944u4C5YMSMC22PMvEwqKmy87RinZKJlFwSQ699zZ5g6mnNq8xeAiDlYhoF2QKzUXwnKxzpvjGsYhYGDMmVS1QPmky4WGvuTl6KeGkv8LidKf7r6/2RZeMcq+yjJ7R0RTtyjo1cM5dMcn/jRdwZxuV4cmFweCAeoy5guV+X6du022TpVndjOSDoKiRgdk7pTuaToXIy+9bleHpEo9bwKx58wvOMg7sirAYjrA4Xcx762RHiUuidTTPktm8sNsBQmgwJZ8Pzm+8TyHjFGLnBfeiDbQQEdLCXloz0jVOVRflDfMays1WpAYUV8XNOsgxnD2jDU8L0NLkJiX5Y0OerGq9AZ+XbgJFVBFhaOfsm2PEc3jq00GOLzrGzA+4b3CGpFzM3EyK9OnnwbP7SqCGb7PJgjmQ7IO8IWEmVYGaKtWONSm8zRLcKdH8xuk8iN1qCkBXMty/wfTEVTkIlMVEDbslYkVfj0rAPJ8B37bfe0Yz4CEMkCmARIB1rIOpMhnavXGuD50OP2PBBY/8DyC5aY97z9f04na/ffk+l7rWaHihjHufKIApt5OnfJ1w=
98 ced632394371a36953ce4d394f86278ae51a2aae 0 iQIVAwUAVFWpfSBXgaxoKi1yAQLCQw//cvCi/Di3z/2ZEDQt4Ayyxv18gzewqrYyoElgnEzr5uTynD9Mf25hprstKla/Y5C6q+y0K6qCHPimGOkz3H+wZ2GVUgLKAwMABkfSb5IZiLTGaB2DjAJKZRwB6h43wG/DSFggE3dYszWuyHW88c72ZzVF5CSNc4J1ARLjDSgnNYJQ6XdPw3C9KgiLFDXzynPpZbPg0AK5bdPUKJruMeIKPn36Hx/Tv5GXUrbc2/lcnyRDFWisaDl0X/5eLdA+r3ID0cSmyPLYOeCgszRiW++KGw+PPDsWVeM3ZaZ9SgaBWU7MIn9A7yQMnnSzgDbN+9v/VMT3zbk1WJXlQQK8oA+CCdHH9EY33RfZ6ST/lr3pSQbUG1hdK6Sw+H6WMkOnnEk6HtLwa4xZ3HjDpoPkhVV+S0C7D5WWOovbubxuBiW5v8tK4sIOS6bAaKevTBKRbo4Rs6qmS/Ish5Q+z5bKst80cyEdi4QSoPZ/W+6kh1KfOprMxynwPQhtEcDYW2gfLpgPIM7RdXPKukLlkV2qX3eF/tqApGU4KNdP4I3N80Ri0h+6tVU/K4TMYzlRV3ziLBumJ4TnBrTHU3X6AfZUfTgslQzokX8/7a3tbctX6kZuJPggLGisdFSdirHbrUc+y5VKuJtPr+LxxgZKRFbs2VpJRem6FvwGNyndWLv32v0GMtQ=
98 ced632394371a36953ce4d394f86278ae51a2aae 0 iQIVAwUAVFWpfSBXgaxoKi1yAQLCQw//cvCi/Di3z/2ZEDQt4Ayyxv18gzewqrYyoElgnEzr5uTynD9Mf25hprstKla/Y5C6q+y0K6qCHPimGOkz3H+wZ2GVUgLKAwMABkfSb5IZiLTGaB2DjAJKZRwB6h43wG/DSFggE3dYszWuyHW88c72ZzVF5CSNc4J1ARLjDSgnNYJQ6XdPw3C9KgiLFDXzynPpZbPg0AK5bdPUKJruMeIKPn36Hx/Tv5GXUrbc2/lcnyRDFWisaDl0X/5eLdA+r3ID0cSmyPLYOeCgszRiW++KGw+PPDsWVeM3ZaZ9SgaBWU7MIn9A7yQMnnSzgDbN+9v/VMT3zbk1WJXlQQK8oA+CCdHH9EY33RfZ6ST/lr3pSQbUG1hdK6Sw+H6WMkOnnEk6HtLwa4xZ3HjDpoPkhVV+S0C7D5WWOovbubxuBiW5v8tK4sIOS6bAaKevTBKRbo4Rs6qmS/Ish5Q+z5bKst80cyEdi4QSoPZ/W+6kh1KfOprMxynwPQhtEcDYW2gfLpgPIM7RdXPKukLlkV2qX3eF/tqApGU4KNdP4I3N80Ri0h+6tVU/K4TMYzlRV3ziLBumJ4TnBrTHU3X6AfZUfTgslQzokX8/7a3tbctX6kZuJPggLGisdFSdirHbrUc+y5VKuJtPr+LxxgZKRFbs2VpJRem6FvwGNyndWLv32v0GMtQ=
99 643c58303fb0ec020907af28b9e486be299ba043 0 iQIVAwUAVGKawCBXgaxoKi1yAQL7zxAAjpXKNvzm/PKVlTfDjuVOYZ9H8w9QKUZ0vfrNJrN6Eo6hULIostbdRc25FcMWocegTqvKbz3IG+L2TKOIdZJS9M9QS4URybUd37URq4Jai8kMiJY31KixNNnjO2G1B39aIXUhY+EPx12aY31/OVy4laXIVtN6qpSncjo9baXSOMZmx6RyA1dbyfwXRjT/aODCGHZXgLJHS/kHlkCsThVlqYQ4rUCDkXIeMqIGF1CR0KjfmKpp1fS14OMgpLgdnt9+pnBZ+qcf1YdpOeQob1zwunjMYOyYC74FyOTdwaynU2iDsuBrmkE8kgEedIn7+WWe9fp/6TQJMVOeTQPZBNSRRSUYCw5Tg/0L/+jLtzjc2mY4444sDPbR7scrtU+/GtvlR5z0Y5pofwEdFME7PZNOp9a4kMiSa7ZERyGdN7U1pDu9JU6BZRz+nPzW217PVnTF7YFV/GGUzMTk9i7EZb5M4T9r9gfxFSMPeT5ct712CdBfyRlsSbSWk8XclTXwW385kLVYNDtOukWrvEiwxpA14Xb/ZUXbIDZVf5rP2HrZHMkghzeUYPjRn/IlgYUt7sDNmqFZNIc9mRFrZC9uFQ/Nul5InZodNODQDM+nHpxaztt4xl4qKep8SDEPAQjNr8biC6T9MtLKbWbSKDlqYYNv0pb2PuGub3y9rvkF1Y05mgM=
99 643c58303fb0ec020907af28b9e486be299ba043 0 iQIVAwUAVGKawCBXgaxoKi1yAQL7zxAAjpXKNvzm/PKVlTfDjuVOYZ9H8w9QKUZ0vfrNJrN6Eo6hULIostbdRc25FcMWocegTqvKbz3IG+L2TKOIdZJS9M9QS4URybUd37URq4Jai8kMiJY31KixNNnjO2G1B39aIXUhY+EPx12aY31/OVy4laXIVtN6qpSncjo9baXSOMZmx6RyA1dbyfwXRjT/aODCGHZXgLJHS/kHlkCsThVlqYQ4rUCDkXIeMqIGF1CR0KjfmKpp1fS14OMgpLgdnt9+pnBZ+qcf1YdpOeQob1zwunjMYOyYC74FyOTdwaynU2iDsuBrmkE8kgEedIn7+WWe9fp/6TQJMVOeTQPZBNSRRSUYCw5Tg/0L/+jLtzjc2mY4444sDPbR7scrtU+/GtvlR5z0Y5pofwEdFME7PZNOp9a4kMiSa7ZERyGdN7U1pDu9JU6BZRz+nPzW217PVnTF7YFV/GGUzMTk9i7EZb5M4T9r9gfxFSMPeT5ct712CdBfyRlsSbSWk8XclTXwW385kLVYNDtOukWrvEiwxpA14Xb/ZUXbIDZVf5rP2HrZHMkghzeUYPjRn/IlgYUt7sDNmqFZNIc9mRFrZC9uFQ/Nul5InZodNODQDM+nHpxaztt4xl4qKep8SDEPAQjNr8biC6T9MtLKbWbSKDlqYYNv0pb2PuGub3y9rvkF1Y05mgM=
100 902554884335e5ca3661d63be9978eb4aec3f68a 0 iQIVAwUAVH0KMyBXgaxoKi1yAQLUKxAAjgyYpmqD0Ji5OQ3995yX0dmwHOaaSuYpq71VUsOMYBskjH4xE2UgcTrX8RWUf0E+Ya91Nw3veTf+IZlYLaWuOYuJPRzw+zD1sVY8xprwqBOXNaA7n8SsTqZPSh6qgw4S0pUm0xJUOZzUP1l9S7BtIdJP7KwZ7hs9YZev4r9M3G15xOIPn5qJqBAtIeE6f5+ezoyOpSPZFtLFc4qKQ/YWzOT5uuSaYogXgVByXRFaO84+1TD93LR0PyVWxhwU9JrDU5d7P/bUTW1BXdjsxTbBnigWswKHC71EHpgz/HCYxivVL30qNdOm4Fow1Ec2GdUzGunSqTPrq18ScZDYW1x87f3JuqPM+ce/lxRWBBqP1yE30/8l/Us67m6enWXdGER8aL1lYTGOIWAhvJpfzv9KebaUq1gMFLo6j+OfwR3rYPiCHgi20nTNBa+LOceWFjCGzFa3T9UQWHW/MBElfAxK65uecbGRRYY9V1/+wxtTUiS6ixpmzL8S7uUd5n6oMaeeMiD82NLgPIbMyUHQv6eFEcCj0U9NT2uKbFRmclMs5V+8D+RTCsLJ55R9PD5OoRw/6K/coqqPShYmJvgYsFQPzXVpQdCRae31xdfGFmd5KUetqyrT+4GUdJWzSm0giSgovpEJNxXglrvNdvSO7fX3R1oahhwOwtGqMwNilcK+iDw=
100 902554884335e5ca3661d63be9978eb4aec3f68a 0 iQIVAwUAVH0KMyBXgaxoKi1yAQLUKxAAjgyYpmqD0Ji5OQ3995yX0dmwHOaaSuYpq71VUsOMYBskjH4xE2UgcTrX8RWUf0E+Ya91Nw3veTf+IZlYLaWuOYuJPRzw+zD1sVY8xprwqBOXNaA7n8SsTqZPSh6qgw4S0pUm0xJUOZzUP1l9S7BtIdJP7KwZ7hs9YZev4r9M3G15xOIPn5qJqBAtIeE6f5+ezoyOpSPZFtLFc4qKQ/YWzOT5uuSaYogXgVByXRFaO84+1TD93LR0PyVWxhwU9JrDU5d7P/bUTW1BXdjsxTbBnigWswKHC71EHpgz/HCYxivVL30qNdOm4Fow1Ec2GdUzGunSqTPrq18ScZDYW1x87f3JuqPM+ce/lxRWBBqP1yE30/8l/Us67m6enWXdGER8aL1lYTGOIWAhvJpfzv9KebaUq1gMFLo6j+OfwR3rYPiCHgi20nTNBa+LOceWFjCGzFa3T9UQWHW/MBElfAxK65uecbGRRYY9V1/+wxtTUiS6ixpmzL8S7uUd5n6oMaeeMiD82NLgPIbMyUHQv6eFEcCj0U9NT2uKbFRmclMs5V+8D+RTCsLJ55R9PD5OoRw/6K/coqqPShYmJvgYsFQPzXVpQdCRae31xdfGFmd5KUetqyrT+4GUdJWzSm0giSgovpEJNxXglrvNdvSO7fX3R1oahhwOwtGqMwNilcK+iDw=
101 6dad422ecc5adb63d9fa649eeb8e05a5f9bc4900 0 iQIVAwUAVJNALCBXgaxoKi1yAQKgmw/+OFbHHOMmN2zs2lI2Y0SoMALPNQBInMBq2E6RMCMbfcS9Cn75iD29DnvBwAYNWaWsYEGyheJ7JjGBiuNKPOrLaHkdjG+5ypbhAfNDyHDiteMsXfH7D1L+cTOAB8yvhimZHOTTVF0zb/uRyVIPNowAyervUVRjDptzdfcvjUS+X+/Ufgwms6Y4CcuzFLFCxpmryJhLtOpwUPLlzIqeNkFOYWkHanCgtZX03PNIWhorH3AWOc9yztwWPQ+kcKl3FMlyuNMPhS/ElxSF6GHGtreRbtP+ZLoSIOMb2QBKpGDpZLgJ3JQEHDcZ0h5CLZWL9dDUJR3M8pg1qglqMFSWMgRPTzxPS4QntPgT/Ewd3+U5oCZUh052fG41OeCZ0CnVCpqi5PjUIDhzQkONxRCN2zbjQ2GZY7glbXoqytissihEIVP9m7RmBVq1rbjOKr+yUetJ9gOZcsMtZiCEq4Uj2cbA1x32MQv7rxwAgQP1kgQ62b0sN08HTjQpI7/IkNALLIDHoQWWr45H97i34qK1dd5uCOnYk7juvhGNX5XispxNnC01/CUVNnqChfDHpgnDjgT+1H618LiTgUAD3zo4IVAhCqF5XWsS4pQEENOB3Msffi62fYowvJx7f/htWeRLZ2OA+B85hhDiD4QBdHCRoz3spVp0asNqDxX4f4ndj8RlzfM=
101 6dad422ecc5adb63d9fa649eeb8e05a5f9bc4900 0 iQIVAwUAVJNALCBXgaxoKi1yAQKgmw/+OFbHHOMmN2zs2lI2Y0SoMALPNQBInMBq2E6RMCMbfcS9Cn75iD29DnvBwAYNWaWsYEGyheJ7JjGBiuNKPOrLaHkdjG+5ypbhAfNDyHDiteMsXfH7D1L+cTOAB8yvhimZHOTTVF0zb/uRyVIPNowAyervUVRjDptzdfcvjUS+X+/Ufgwms6Y4CcuzFLFCxpmryJhLtOpwUPLlzIqeNkFOYWkHanCgtZX03PNIWhorH3AWOc9yztwWPQ+kcKl3FMlyuNMPhS/ElxSF6GHGtreRbtP+ZLoSIOMb2QBKpGDpZLgJ3JQEHDcZ0h5CLZWL9dDUJR3M8pg1qglqMFSWMgRPTzxPS4QntPgT/Ewd3+U5oCZUh052fG41OeCZ0CnVCpqi5PjUIDhzQkONxRCN2zbjQ2GZY7glbXoqytissihEIVP9m7RmBVq1rbjOKr+yUetJ9gOZcsMtZiCEq4Uj2cbA1x32MQv7rxwAgQP1kgQ62b0sN08HTjQpI7/IkNALLIDHoQWWr45H97i34qK1dd5uCOnYk7juvhGNX5XispxNnC01/CUVNnqChfDHpgnDjgT+1H618LiTgUAD3zo4IVAhCqF5XWsS4pQEENOB3Msffi62fYowvJx7f/htWeRLZ2OA+B85hhDiD4QBdHCRoz3spVp0asNqDxX4f4ndj8RlzfM=
102 1265a3a71d75396f5d4cf6935ae7d9ba5407a547 0 iQIVAwUAVKXKYCBXgaxoKi1yAQIfsA/+PFfaWuZ6Jna12Y3MpKMnBCXYLWEJgMNlWHWzwU8lD26SKSlvMyHQsVZlkld2JmFugUCn1OV3OA4YWT6BA7VALq6Zsdcu5Dc8LRbyajBUkzGRpOUyWuFzjkCpGVbrQzbCR/bel/BBXzSqL4ipdtWgJ4y+WpZIhWkNXclBkR52b5hUTjN9vzhyhVVI7eURGwIEf7vVs1fDOcEGtaGY/ynzMTzyxIDsEEygCZau86wpKlYlqhCgxKDyzyGfpH3B1UlNGFt1afW8AWe1eHjdqC7TJZpMqmQ/Ju8vco8Xht6OXw4ZLHj7y39lpccfKTBLiK/cAKSg+xgyaH/BLhzoEkNAwYSFAB4i4IoV0KUC8nFxHfsoswBxJnMqU751ziMrpZ/XHZ1xQoEOdXgz2I04vlRn8xtynOVhcgjoAXwtbia7oNh/qCH/hl5/CdAtaawuCxJBf237F+cwur4PMAAvsGefRfZco/DInpr3qegr8rwInTxlO48ZG+o5xA4TPwT0QQTUjMdNfC146ZSbp65wG7VxJDocMZ8KJN/lqPaOvX+FVYWq4YnJhlldiV9DGgmym1AAaP0D3te2GcfHXpt/f6NYUPpgiBHy0GnOlNcQyGnnONg1A6oKVWB3k7WP28+PQbQEiCIFk2nkf5VZmye7OdHRGKOFfuprYFP1WwTWnVoNX9c=
102 1265a3a71d75396f5d4cf6935ae7d9ba5407a547 0 iQIVAwUAVKXKYCBXgaxoKi1yAQIfsA/+PFfaWuZ6Jna12Y3MpKMnBCXYLWEJgMNlWHWzwU8lD26SKSlvMyHQsVZlkld2JmFugUCn1OV3OA4YWT6BA7VALq6Zsdcu5Dc8LRbyajBUkzGRpOUyWuFzjkCpGVbrQzbCR/bel/BBXzSqL4ipdtWgJ4y+WpZIhWkNXclBkR52b5hUTjN9vzhyhVVI7eURGwIEf7vVs1fDOcEGtaGY/ynzMTzyxIDsEEygCZau86wpKlYlqhCgxKDyzyGfpH3B1UlNGFt1afW8AWe1eHjdqC7TJZpMqmQ/Ju8vco8Xht6OXw4ZLHj7y39lpccfKTBLiK/cAKSg+xgyaH/BLhzoEkNAwYSFAB4i4IoV0KUC8nFxHfsoswBxJnMqU751ziMrpZ/XHZ1xQoEOdXgz2I04vlRn8xtynOVhcgjoAXwtbia7oNh/qCH/hl5/CdAtaawuCxJBf237F+cwur4PMAAvsGefRfZco/DInpr3qegr8rwInTxlO48ZG+o5xA4TPwT0QQTUjMdNfC146ZSbp65wG7VxJDocMZ8KJN/lqPaOvX+FVYWq4YnJhlldiV9DGgmym1AAaP0D3te2GcfHXpt/f6NYUPpgiBHy0GnOlNcQyGnnONg1A6oKVWB3k7WP28+PQbQEiCIFk2nkf5VZmye7OdHRGKOFfuprYFP1WwTWnVoNX9c=
103 db8e3f7948b1fdeb9ad12d448fc3525759908b9f 0 iQIVAwUAVLsaciBXgaxoKi1yAQKMIA//a90/GvySL9UID+iYvzV2oDaAPDD0T+4Xs43I7DT5NIoDz+3yq2VV54XevQe5lYiURmsb/Q9nX2VR/Qq1J9c/R6Gy+CIfmJ3HzMZ0aAX8ZlZgQPYZKh/2kY5Ojl++k6MTqbqcrICNs4+UE/4IAxPyOfu5gy7TpdJmRZo2J3lWVC2Jbhd02Mzb+tjtfbOM+QcQxPwt9PpqmQszJceyVYOSm3jvD1uJdSOC04tBQrQwrxktQ09Om0LUMMaB5zFXpJtqUzfw7l4U4AaddEmkd3vUfLtHxc21RB01c3cpe2dJnjifDfwseLsI8rS4jmi/91c74TeBatSOhvbqzEkm/p8xZFXE4Uh+EpWjTsVqmfQaRq6NfNCR7I/kvGv8Ps6w8mg8uX8fd8lx+GJbodj+Uy0X3oqHyqPMky/df5i79zADBDuz+yuxFfDD9i22DJPIYcilfGgwpIUuO2lER5nSMVmReuWTVBnT6SEN66Q4KR8zLtIRr+t1qUUCy6wYbgwrdHVCbgMF8RPOVZPjbs17RIqcHjch0Xc7bShKGhQg4WHDjXHK61w4tOa1Yp7jT6COkl01XC9BLcGxJYKFvNCbeDZQGvVgJNoEvHxBxD9rGMVRjfuxeJawc2fGzZJn0ySyLDW0pfd4EJNgTh9bLdPjWz2VlXqn4A6bgaLgTPqjmN0VBXw=
103 db8e3f7948b1fdeb9ad12d448fc3525759908b9f 0 iQIVAwUAVLsaciBXgaxoKi1yAQKMIA//a90/GvySL9UID+iYvzV2oDaAPDD0T+4Xs43I7DT5NIoDz+3yq2VV54XevQe5lYiURmsb/Q9nX2VR/Qq1J9c/R6Gy+CIfmJ3HzMZ0aAX8ZlZgQPYZKh/2kY5Ojl++k6MTqbqcrICNs4+UE/4IAxPyOfu5gy7TpdJmRZo2J3lWVC2Jbhd02Mzb+tjtfbOM+QcQxPwt9PpqmQszJceyVYOSm3jvD1uJdSOC04tBQrQwrxktQ09Om0LUMMaB5zFXpJtqUzfw7l4U4AaddEmkd3vUfLtHxc21RB01c3cpe2dJnjifDfwseLsI8rS4jmi/91c74TeBatSOhvbqzEkm/p8xZFXE4Uh+EpWjTsVqmfQaRq6NfNCR7I/kvGv8Ps6w8mg8uX8fd8lx+GJbodj+Uy0X3oqHyqPMky/df5i79zADBDuz+yuxFfDD9i22DJPIYcilfGgwpIUuO2lER5nSMVmReuWTVBnT6SEN66Q4KR8zLtIRr+t1qUUCy6wYbgwrdHVCbgMF8RPOVZPjbs17RIqcHjch0Xc7bShKGhQg4WHDjXHK61w4tOa1Yp7jT6COkl01XC9BLcGxJYKFvNCbeDZQGvVgJNoEvHxBxD9rGMVRjfuxeJawc2fGzZJn0ySyLDW0pfd4EJNgTh9bLdPjWz2VlXqn4A6bgaLgTPqjmN0VBXw=
104 fbdd5195528fae4f41feebc1838215c110b25d6a 0 iQIVAwUAVM7fBCBXgaxoKi1yAQKoYw/+LeIGcjQmHIVFQULsiBtPDf+eGAADQoP3mKBy+eX/3Fa0qqUNfES2Q3Y6RRApyZ1maPRMt8BvvhZMgQsu9QIrmf3zsFxZGFwoyrIj4hM3xvAbEZXqmWiR85/Ywd4ImeLaZ0c7mkO1/HGF1n2Mv47bfM4hhNe7VGJSSrTY4srFHDfk4IG9f18DukJVzRD9/dZeBw6eUN1ukuLEgQAD5Sl47bUdKSetglOSR1PjXfZ1hjtz5ywUyBc5P9p3LC4wSvlcJKl22zEvB3L0hkoDcPsdIPEnJAeXxKlR1rQpoA3fEgrstGiSNUW/9Tj0VekAHLO95SExmQyoG/AhbjRRzIj4uQ0aevCJyiAhkv+ffOSf99PMW9L1k3tVjLhpMWEz9BOAWyX7cDFWj5t/iktI046O9HGN9SGVx18e9xM6pEgRcLA2TyjEmtkA4jX0JeN7WeCweMLiSxyGP7pSPSJdpJeXaFtRpSF62p/G0Z5wN9s05LHqDyqNVtCvg4WjkuV5LZSdLbMcYBWGBxQzCG6qowXFXIawmbaFiBZwTfOgNls9ndz5RGupAaxY317prxPFv/pXoesc1P8bdK09ZvjhbmmD66Q/BmS2dOMQ8rXRjuVdlR8j2QBtFZxekMcRD02nBAVnwHg1VWQMIRaGjdgmW4wOkirWVn7me177FnBxrxW1tG4=
104 fbdd5195528fae4f41feebc1838215c110b25d6a 0 iQIVAwUAVM7fBCBXgaxoKi1yAQKoYw/+LeIGcjQmHIVFQULsiBtPDf+eGAADQoP3mKBy+eX/3Fa0qqUNfES2Q3Y6RRApyZ1maPRMt8BvvhZMgQsu9QIrmf3zsFxZGFwoyrIj4hM3xvAbEZXqmWiR85/Ywd4ImeLaZ0c7mkO1/HGF1n2Mv47bfM4hhNe7VGJSSrTY4srFHDfk4IG9f18DukJVzRD9/dZeBw6eUN1ukuLEgQAD5Sl47bUdKSetglOSR1PjXfZ1hjtz5ywUyBc5P9p3LC4wSvlcJKl22zEvB3L0hkoDcPsdIPEnJAeXxKlR1rQpoA3fEgrstGiSNUW/9Tj0VekAHLO95SExmQyoG/AhbjRRzIj4uQ0aevCJyiAhkv+ffOSf99PMW9L1k3tVjLhpMWEz9BOAWyX7cDFWj5t/iktI046O9HGN9SGVx18e9xM6pEgRcLA2TyjEmtkA4jX0JeN7WeCweMLiSxyGP7pSPSJdpJeXaFtRpSF62p/G0Z5wN9s05LHqDyqNVtCvg4WjkuV5LZSdLbMcYBWGBxQzCG6qowXFXIawmbaFiBZwTfOgNls9ndz5RGupAaxY317prxPFv/pXoesc1P8bdK09ZvjhbmmD66Q/BmS2dOMQ8rXRjuVdlR8j2QBtFZxekMcRD02nBAVnwHg1VWQMIRaGjdgmW4wOkirWVn7me177FnBxrxW1tG4=
105 5b4ed033390bf6e2879c8f5c28c84e1ee3b87231 0 iQIVAwUAVPQL9CBXgaxoKi1yAQJIXxAAtD2hWhaKa+lABmCOYG92FE/WdqY/91Xv5atTL8Xeko/MkirIKZiOuxNWX+J34TVevINZSWmMfDSc5TkGxktL9jW/pDB/CXn+CVZpxRabPYFH9HM2K3g8VaTV1MFtV2+feOMDIPCmq5ogMF9/kXjmifiEBrJcFsE82fdexJ3OHoOY4iHFxEhh3GzvNqEQygk4VeU6VYziNvSQj9G//PsK3Bmk7zm5ScsZcMVML3SIYFuej1b1PI1v0N8mmCRooVNBGhD/eA0iLtdh/hSb9s/8UgJ4f9HOcx9zqs8V4i14lpd/fo0+yvFuVrVbWGzrDrk5EKLENhVPwvc1KA32PTQ4Z9u7VQIBIxq3K5lL2VlCMIYc1BSaSQBjuiLm8VdN6iDuf5poNZhk1rvtpQgpxJzh362dlGtR/iTJuLCeW7gCqWUAorLTeHy0bLQ/jSOeTAGys8bUHtlRL4QbnhLbUmJmRYVvCJ+Yt1aTgTSNcoFjoLJarR1169BXgdCA38BgReUL6kB224UJSTzB1hJUyB2LvCWrXZMipZmR99Iwdq7MePD3+AoSIXQNUMY9blxuuF5x7W2ikNXmVWuab4Z8rQRtmGqEuIMBSunxAnZSn+i8057dFKlq+/yGy+WW3RQg+RnLnwZs1zCDTfu98/GT5k5hFpjXZeUWWiOVwQJ5HrqncCw=
105 5b4ed033390bf6e2879c8f5c28c84e1ee3b87231 0 iQIVAwUAVPQL9CBXgaxoKi1yAQJIXxAAtD2hWhaKa+lABmCOYG92FE/WdqY/91Xv5atTL8Xeko/MkirIKZiOuxNWX+J34TVevINZSWmMfDSc5TkGxktL9jW/pDB/CXn+CVZpxRabPYFH9HM2K3g8VaTV1MFtV2+feOMDIPCmq5ogMF9/kXjmifiEBrJcFsE82fdexJ3OHoOY4iHFxEhh3GzvNqEQygk4VeU6VYziNvSQj9G//PsK3Bmk7zm5ScsZcMVML3SIYFuej1b1PI1v0N8mmCRooVNBGhD/eA0iLtdh/hSb9s/8UgJ4f9HOcx9zqs8V4i14lpd/fo0+yvFuVrVbWGzrDrk5EKLENhVPwvc1KA32PTQ4Z9u7VQIBIxq3K5lL2VlCMIYc1BSaSQBjuiLm8VdN6iDuf5poNZhk1rvtpQgpxJzh362dlGtR/iTJuLCeW7gCqWUAorLTeHy0bLQ/jSOeTAGys8bUHtlRL4QbnhLbUmJmRYVvCJ+Yt1aTgTSNcoFjoLJarR1169BXgdCA38BgReUL6kB224UJSTzB1hJUyB2LvCWrXZMipZmR99Iwdq7MePD3+AoSIXQNUMY9blxuuF5x7W2ikNXmVWuab4Z8rQRtmGqEuIMBSunxAnZSn+i8057dFKlq+/yGy+WW3RQg+RnLnwZs1zCDTfu98/GT5k5hFpjXZeUWWiOVwQJ5HrqncCw=
106 07a92bbd02e5e3a625e0820389b47786b02b2cea 0 iQIVAwUAVPSP9SBXgaxoKi1yAQLkBQ//dRQExJHFepJfZ0gvGnUoYI4APsLmne5XtfeXJ8OtUyC4a6RylxA5BavDWgXwUh9BGhOX2cBSz1fyvzohrPrvNnlBrYKAvOIJGEAiBTXHYTxHINEKPtDF92Uz23T0Rn/wnSvvlbWF7Pvd+0DMJpFDEyr9n6jvVLR7mgxMaCqZbVaB1W/wTwDjni780WgVx8OPUXkLx3/DyarMcIiPeI5UN+FeHDovTsBWFC95msFLm80PMRPuHOejWp65yyEemGujZEPO2D5VVah7fshM2HTz63+bkEBYoqrftuv3vXKBRG78MIrUrKpqxmnCKNKDUUWJ4yk3+NwuOiHlKdly5kZ7MNFaL73XKo8HH287lDWz0lIazs91dQA9a9JOyTsp8YqGtIJGGCbhrUDtiQJ199oBU84mw3VH/EEzm4mPv4sW5fm7BnnoH/a+9vXySc+498rkdLlzFwxrQkWyJ/pFOx4UA3mCtGQK+OSwLPc+X4SRqA4fiyqKxVAL1kpLTSDL3QA82I7GzBaXsxUXzS4nmteMhUyzTdwAhKVydL0gC3d7NmkAFSyRjdGzutUUXshYxg0ywRgYebe8uzJcTj4nNRgaalYLdg3guuDulD+dJmILsrcLmA6KD/pvfDn8PYt+4ZjNIvN2E9GF6uXDu4Ux+AlOTLk9BChxUF8uBX9ev5cvWtQ=
106 07a92bbd02e5e3a625e0820389b47786b02b2cea 0 iQIVAwUAVPSP9SBXgaxoKi1yAQLkBQ//dRQExJHFepJfZ0gvGnUoYI4APsLmne5XtfeXJ8OtUyC4a6RylxA5BavDWgXwUh9BGhOX2cBSz1fyvzohrPrvNnlBrYKAvOIJGEAiBTXHYTxHINEKPtDF92Uz23T0Rn/wnSvvlbWF7Pvd+0DMJpFDEyr9n6jvVLR7mgxMaCqZbVaB1W/wTwDjni780WgVx8OPUXkLx3/DyarMcIiPeI5UN+FeHDovTsBWFC95msFLm80PMRPuHOejWp65yyEemGujZEPO2D5VVah7fshM2HTz63+bkEBYoqrftuv3vXKBRG78MIrUrKpqxmnCKNKDUUWJ4yk3+NwuOiHlKdly5kZ7MNFaL73XKo8HH287lDWz0lIazs91dQA9a9JOyTsp8YqGtIJGGCbhrUDtiQJ199oBU84mw3VH/EEzm4mPv4sW5fm7BnnoH/a+9vXySc+498rkdLlzFwxrQkWyJ/pFOx4UA3mCtGQK+OSwLPc+X4SRqA4fiyqKxVAL1kpLTSDL3QA82I7GzBaXsxUXzS4nmteMhUyzTdwAhKVydL0gC3d7NmkAFSyRjdGzutUUXshYxg0ywRgYebe8uzJcTj4nNRgaalYLdg3guuDulD+dJmILsrcLmA6KD/pvfDn8PYt+4ZjNIvN2E9GF6uXDu4Ux+AlOTLk9BChxUF8uBX9ev5cvWtQ=
107 2e2e9a0750f91a6fe0ad88e4de34f8efefdcab08 0 iQIVAwUAVRw4nyBXgaxoKi1yAQIFExAAkbCPtLjQlJvPaYCL1KhNR+ZVAmn7JrFH3XhvR26RayYbs4NxR3W1BhwhDy9+W+28szEx1kQvmr6t1bXAFywY0tNJOeuLU7uFfmbgAfYgkQ9kpsQNqFYkjbCyftw0S9vX9VOJ9DqUoDWuKfX7VzjkwE9dCfKI5F+dvzxnd6ZFjB85nyHBQuTZlzXl0+csY212RJ2G2j/mzEBVyeZj9l7Rm+1X8AC1xQMWRJGiyd0b7nhYqoOcceeJFAV1t9QO4+gjmkM5kL0orjxTnuVsxPTxcC5ca1BfidPWrZEto3duHWNiATGnCDylxxr52BxCAS+BWePW9J0PROtw1pYaZ9pF4N5X5LSXJzqX7ZiNGckxqIjry09+Tbsa8FS0VkkYBEiGotpuo4Jd05V6qpXfW2JqAfEVo6X6aGvPM2B7ZUtKi30I4J+WprrOP3WgZ/ZWHe1ERYKgjDqisn3t/D40q30WQUeQGltGsOX0Udqma2RjBugO5BHGzJ2yer4GdJXg7q1OMzrjAEuz1IoKvIB/o1pg86quVA4H2gQnL1B8t1M38/DIafyw7mrEY4Z3GL44Reev63XVvDE099Vbhqp7ufwq81Fpq7Xxa5vsr9SJ+8IqqQr8AcYSuK3G3L6BmIuSUAYMRqgl35FWoWkGyZIG5c6K6zI8w5Pb0aGi6Lb2Wfb9zbc=
107 2e2e9a0750f91a6fe0ad88e4de34f8efefdcab08 0 iQIVAwUAVRw4nyBXgaxoKi1yAQIFExAAkbCPtLjQlJvPaYCL1KhNR+ZVAmn7JrFH3XhvR26RayYbs4NxR3W1BhwhDy9+W+28szEx1kQvmr6t1bXAFywY0tNJOeuLU7uFfmbgAfYgkQ9kpsQNqFYkjbCyftw0S9vX9VOJ9DqUoDWuKfX7VzjkwE9dCfKI5F+dvzxnd6ZFjB85nyHBQuTZlzXl0+csY212RJ2G2j/mzEBVyeZj9l7Rm+1X8AC1xQMWRJGiyd0b7nhYqoOcceeJFAV1t9QO4+gjmkM5kL0orjxTnuVsxPTxcC5ca1BfidPWrZEto3duHWNiATGnCDylxxr52BxCAS+BWePW9J0PROtw1pYaZ9pF4N5X5LSXJzqX7ZiNGckxqIjry09+Tbsa8FS0VkkYBEiGotpuo4Jd05V6qpXfW2JqAfEVo6X6aGvPM2B7ZUtKi30I4J+WprrOP3WgZ/ZWHe1ERYKgjDqisn3t/D40q30WQUeQGltGsOX0Udqma2RjBugO5BHGzJ2yer4GdJXg7q1OMzrjAEuz1IoKvIB/o1pg86quVA4H2gQnL1B8t1M38/DIafyw7mrEY4Z3GL44Reev63XVvDE099Vbhqp7ufwq81Fpq7Xxa5vsr9SJ+8IqqQr8AcYSuK3G3L6BmIuSUAYMRqgl35FWoWkGyZIG5c6K6zI8w5Pb0aGi6Lb2Wfb9zbc=
108 e89f909edffad558b56f4affa8239e4832f88de0 0 iQIVAwUAVTBozCBXgaxoKi1yAQLHeg/+IvfpPmG7OSqCoHvMVETYdrqT7lKCwfCQWMFOC/2faWs1n4R/qQNm6ckE5OY888RK8tVQ7ue03Pg/iyWgQlYfS7Njd3WPjS4JsnEBxIvuGkIu6TPIXAUAH0PFTBh0cZEICDpPEVT2X3bPRwDHA+hUE9RrxM5zJ39Fpk/pTYCjQ9UKfEhXlEfka75YB39g2Y/ssaSbn5w/tAAx8sL72Y4G96D4IV2seLHZhB3VQ7UZKThEWn6UdVOoKj+urIwGaBYMeekGVtHSh6fnHOw3EtDO9mQ5HtAz2Bl4CwRYN8eSN+Dwgr+mdk8MWpQQJ+i1A8jUhUp8gn1Pe5GkIH4CWZ9+AvLLnshe2MkVaTT1g7EQk37tFkkdZDRBsOHIvpF71B9pEA1gMUlX4gKgh5YwukgpQlDmFCfY7XmX6eXw9Ub+EckEwYuGMz7Fbwe9J/Ce4DxvgJgq3/cu/jb3bmbewH6tZmcrlqziqqA8GySIwcURnF1c37e7+e7x1jhFJfCWpHzvCusjKhUp9tZsl9Rt1Bo/y41QY+avY7//ymhbwTMKgqjzCYoA+ipF4JfZlFiZF+JhvOSIFb0ltkfdqKD+qOjlkFaglvQU1bpGKLJ6cz4Xk2Jqt5zhcrpyDMGVv9aiWywCK2ZP34RNaJ6ZFwzwdpXihqgkm5dBGoZ4ztFUfmjXzIg=
108 e89f909edffad558b56f4affa8239e4832f88de0 0 iQIVAwUAVTBozCBXgaxoKi1yAQLHeg/+IvfpPmG7OSqCoHvMVETYdrqT7lKCwfCQWMFOC/2faWs1n4R/qQNm6ckE5OY888RK8tVQ7ue03Pg/iyWgQlYfS7Njd3WPjS4JsnEBxIvuGkIu6TPIXAUAH0PFTBh0cZEICDpPEVT2X3bPRwDHA+hUE9RrxM5zJ39Fpk/pTYCjQ9UKfEhXlEfka75YB39g2Y/ssaSbn5w/tAAx8sL72Y4G96D4IV2seLHZhB3VQ7UZKThEWn6UdVOoKj+urIwGaBYMeekGVtHSh6fnHOw3EtDO9mQ5HtAz2Bl4CwRYN8eSN+Dwgr+mdk8MWpQQJ+i1A8jUhUp8gn1Pe5GkIH4CWZ9+AvLLnshe2MkVaTT1g7EQk37tFkkdZDRBsOHIvpF71B9pEA1gMUlX4gKgh5YwukgpQlDmFCfY7XmX6eXw9Ub+EckEwYuGMz7Fbwe9J/Ce4DxvgJgq3/cu/jb3bmbewH6tZmcrlqziqqA8GySIwcURnF1c37e7+e7x1jhFJfCWpHzvCusjKhUp9tZsl9Rt1Bo/y41QY+avY7//ymhbwTMKgqjzCYoA+ipF4JfZlFiZF+JhvOSIFb0ltkfdqKD+qOjlkFaglvQU1bpGKLJ6cz4Xk2Jqt5zhcrpyDMGVv9aiWywCK2ZP34RNaJ6ZFwzwdpXihqgkm5dBGoZ4ztFUfmjXzIg=
109 8cc6036bca532e06681c5a8fa37efaa812de67b5 0 iQIVAwUAVUP0xCBXgaxoKi1yAQLIChAAme3kg1Z0V8t5PnWKDoIvscIeAsD2s6EhMy1SofmdZ4wvYD1VmGC6TgXMCY7ssvRBhxqwG3GxwYpwELASuw2GYfVot2scN7+b8Hs5jHtkQevKbxarYni+ZI9mw/KldnJixD1yW3j+LoJFh/Fu6GD2yrfGIhimFLozcwUu3EbLk7JzyHSn7/8NFjLJz0foAYfcbowU9/BFwNVLrQPnsUbWcEifsq5bYso9MBO9k+25yLgqHoqMbGpJcgjubNy1cWoKnlKS+lOJl0/waAk+aIjHXMzFpRRuJDjxEZn7V4VdV5d23nrBTcit1BfMzga5df7VrLPVRbom1Bi0kQ0BDeDex3hHNqHS5X+HSrd/njzP1xp8twG8hTE+njv85PWoGBTo1eUGW/esChIJKA5f3/F4B9ErgBNNOKnYmRgxixd562OWAwAQZK0r0roe2H/Mfg2VvgxT0kHd22NQLoAv0YI4jcXcCFrnV/80vHUQ8AsAYAbkLcz1jkfk3YwYDP8jbJCqcwJRt9ialYKJwvXlEe0TMeGdq7EjCO0z/pIpu82k2R/C0FtCFih3bUvJEmWoVVx8UGkDDQEORLbzxQCt0IOiQGFcoCCxgQmL0x9ZoljCWg5vZuuhU4uSOuRTuM+aa4xoLkeOcvgGRSOXrqfkV8JpWKoJB4dmY2qSuxw8LsAAzK0=
109 8cc6036bca532e06681c5a8fa37efaa812de67b5 0 iQIVAwUAVUP0xCBXgaxoKi1yAQLIChAAme3kg1Z0V8t5PnWKDoIvscIeAsD2s6EhMy1SofmdZ4wvYD1VmGC6TgXMCY7ssvRBhxqwG3GxwYpwELASuw2GYfVot2scN7+b8Hs5jHtkQevKbxarYni+ZI9mw/KldnJixD1yW3j+LoJFh/Fu6GD2yrfGIhimFLozcwUu3EbLk7JzyHSn7/8NFjLJz0foAYfcbowU9/BFwNVLrQPnsUbWcEifsq5bYso9MBO9k+25yLgqHoqMbGpJcgjubNy1cWoKnlKS+lOJl0/waAk+aIjHXMzFpRRuJDjxEZn7V4VdV5d23nrBTcit1BfMzga5df7VrLPVRbom1Bi0kQ0BDeDex3hHNqHS5X+HSrd/njzP1xp8twG8hTE+njv85PWoGBTo1eUGW/esChIJKA5f3/F4B9ErgBNNOKnYmRgxixd562OWAwAQZK0r0roe2H/Mfg2VvgxT0kHd22NQLoAv0YI4jcXcCFrnV/80vHUQ8AsAYAbkLcz1jkfk3YwYDP8jbJCqcwJRt9ialYKJwvXlEe0TMeGdq7EjCO0z/pIpu82k2R/C0FtCFih3bUvJEmWoVVx8UGkDDQEORLbzxQCt0IOiQGFcoCCxgQmL0x9ZoljCWg5vZuuhU4uSOuRTuM+aa4xoLkeOcvgGRSOXrqfkV8JpWKoJB4dmY2qSuxw8LsAAzK0=
110 ed18f4acf435a2824c6f49fba40f42b9df5da7ad 0 iQIVAwUAVWy9mCBXgaxoKi1yAQIm+Q/+I/tV8DC51d4f/6T5OR+motlIx9U5za5p9XUUzfp3tzSY2PutVko/FclajVdFekZsK5pUzlh/GZhfe1jjyEEIr3UC3yWk8hMcvvS+2UDmfy81QxN7Uf0kz4mZOlME6d/fYDzf4cDKkkCXoec3kyZBw7L84mteUcrJoyb5K3fkQBrK5CG/CV7+uZN6b9+quKjtDhDEkAyc6phNanzWNgiHGucEbNgXsKM01HmV1TnN4GXTKx8y2UDalIJOPyes2OWHggibMHbaNnGnwSBAK+k29yaQ5FD0rsA+q0j3TijA1NfqvtluNEPbFOx/wJV4CxonYad93gWyEdgU34LRqqw1bx7PFUvew2/T3TJsxQLoCt67OElE7ScG8evuNEe8/4r3LDnzYFx7QMP5r5+B7PxVpj/DT+buS16BhYS8pXMMqLynFOQkX5uhEM7mNC0JTXQsBMHSDAcizVDrdFCF2OSfQjLpUfFP1VEWX7EInqj7hZrd+GE7TfBD8/rwSBSkkCX2aa9uKyt6Ius1GgQUuEETskAUvvpsNBzZxtvGpMMhqQLGlJYnBbhOmsbOyTSnXU66KJ5e/H3O0KRrF09i74v30DaY4uIH8xG6KpSkfw5s/oiLCtagfc0goUvvojk9pACDR3CKM/jVC63EVp2oUcjT72jUgSLxBgi7siLD8IW86wc=
110 ed18f4acf435a2824c6f49fba40f42b9df5da7ad 0 iQIVAwUAVWy9mCBXgaxoKi1yAQIm+Q/+I/tV8DC51d4f/6T5OR+motlIx9U5za5p9XUUzfp3tzSY2PutVko/FclajVdFekZsK5pUzlh/GZhfe1jjyEEIr3UC3yWk8hMcvvS+2UDmfy81QxN7Uf0kz4mZOlME6d/fYDzf4cDKkkCXoec3kyZBw7L84mteUcrJoyb5K3fkQBrK5CG/CV7+uZN6b9+quKjtDhDEkAyc6phNanzWNgiHGucEbNgXsKM01HmV1TnN4GXTKx8y2UDalIJOPyes2OWHggibMHbaNnGnwSBAK+k29yaQ5FD0rsA+q0j3TijA1NfqvtluNEPbFOx/wJV4CxonYad93gWyEdgU34LRqqw1bx7PFUvew2/T3TJsxQLoCt67OElE7ScG8evuNEe8/4r3LDnzYFx7QMP5r5+B7PxVpj/DT+buS16BhYS8pXMMqLynFOQkX5uhEM7mNC0JTXQsBMHSDAcizVDrdFCF2OSfQjLpUfFP1VEWX7EInqj7hZrd+GE7TfBD8/rwSBSkkCX2aa9uKyt6Ius1GgQUuEETskAUvvpsNBzZxtvGpMMhqQLGlJYnBbhOmsbOyTSnXU66KJ5e/H3O0KRrF09i74v30DaY4uIH8xG6KpSkfw5s/oiLCtagfc0goUvvojk9pACDR3CKM/jVC63EVp2oUcjT72jUgSLxBgi7siLD8IW86wc=
111 540cd0ddac49c1125b2e013aa2ff18ecbd4dd954 0 iQIVAwUAVZRtzSBXgaxoKi1yAQJVLhAAtfn+8OzHIp6wRC4NUbkImAJRLsNTRPKeRSWPCF5O5XXQ84hp+86qjhndIE6mcJSAt4cVP8uky6sEa8ULd6b3ACRBvtgZtsecA9S/KtRjyE9CKr8nP+ogBNqJPaYlTz9RuwGedOd+8I9lYgsnRjfaHSByNMX08WEHtWqAWhSkAz/HO32ardS38cN97fckCgQtA8v7c77nBT7vcw4epgxyUQvMUxUhqmCVVhVfz8JXa5hyJxFrOtqgaVuQ1B5Y/EKxcyZT+JNHPtu3V1uc1awS/w16CEPstNBSFHax5MuT9UbY0mV2ZITP99EkM+vdomh82VHdnMo0i7Pz7XF45ychD4cteroO9gGqDDt9j7hd1rubBX1bfkPsd/APJlyeshusyTj+FqsUD/HDlvM9LRjY1HpU7i7yAlLQQ3851XKMLUPNFYu2r3bo8Wt/CCHtJvB4wYuH+7Wo3muudpU01ziJBxQrUWwPbUrG+7LvO1iEEVxB8l+8Vq0mU3Te7lJi1kGetm6xHNbtvQip5P2YUqvv+lLo/K8KoJDxsh63Y01JGwdmUDb8mnFlRx4J7hQJaoNEvz3cgnc4X8gDJD8sUOjGOPnbtz2QwTY+zj/5+FdLxWDCxNrHX5vvkVdJHcCqEfVvQTKfDMOUeKuhjI7GD7t3xRPfUxq19jjoLPe7aqn1Z1s=
111 540cd0ddac49c1125b2e013aa2ff18ecbd4dd954 0 iQIVAwUAVZRtzSBXgaxoKi1yAQJVLhAAtfn+8OzHIp6wRC4NUbkImAJRLsNTRPKeRSWPCF5O5XXQ84hp+86qjhndIE6mcJSAt4cVP8uky6sEa8ULd6b3ACRBvtgZtsecA9S/KtRjyE9CKr8nP+ogBNqJPaYlTz9RuwGedOd+8I9lYgsnRjfaHSByNMX08WEHtWqAWhSkAz/HO32ardS38cN97fckCgQtA8v7c77nBT7vcw4epgxyUQvMUxUhqmCVVhVfz8JXa5hyJxFrOtqgaVuQ1B5Y/EKxcyZT+JNHPtu3V1uc1awS/w16CEPstNBSFHax5MuT9UbY0mV2ZITP99EkM+vdomh82VHdnMo0i7Pz7XF45ychD4cteroO9gGqDDt9j7hd1rubBX1bfkPsd/APJlyeshusyTj+FqsUD/HDlvM9LRjY1HpU7i7yAlLQQ3851XKMLUPNFYu2r3bo8Wt/CCHtJvB4wYuH+7Wo3muudpU01ziJBxQrUWwPbUrG+7LvO1iEEVxB8l+8Vq0mU3Te7lJi1kGetm6xHNbtvQip5P2YUqvv+lLo/K8KoJDxsh63Y01JGwdmUDb8mnFlRx4J7hQJaoNEvz3cgnc4X8gDJD8sUOjGOPnbtz2QwTY+zj/5+FdLxWDCxNrHX5vvkVdJHcCqEfVvQTKfDMOUeKuhjI7GD7t3xRPfUxq19jjoLPe7aqn1Z1s=
112 96a38d44ba093bd1d1ecfd34119e94056030278b 0 iQIVAwUAVarUUyBXgaxoKi1yAQIfJw/+MG/0736F/9IvzgCTF6omIC+9kS8JH0n/JBGPhpbPAHK4xxjhOOz6m3Ia3c3HNoy+I6calwU6YV7k5dUzlyLhM0Z5oYpdrH+OBNxDEsD5SfhclfR63MK1kmgtD33izijsZ++6a+ZaVfyxpMTksKOktWSIDD63a5b/avb6nKY64KwJcbbeXPdelxvXV7TXYm0GvWc46BgvrHOJpYHCDaXorAn6BMq7EQF8sxdNK4GVMNMVk1njve0HOg3Kz8llPB/7QmddZXYLFGmWqICyUn1IsJDfePxzh8sOYVCbxAgitTJHJJmmH5gzVzw7t7ljtmxSJpcUGQJB2MphejmNFGfgvJPB9c6xOCfUqDjxN5m24V+UYesZntpfgs3lpfvE7785IpVnf6WfKG4PKty01ome/joHlDlrRTekKMlpiBapGMfv8EHvPBrOA+5yAHNfKsmcyCcjD1nvXYZ2/X9qY35AhdcBuNkyp55oPDOdtYIHfnOIxlYMKG1dusDx3Z4eveF0lQTzfRVoE5w+k9A2Ov3Zx0aiSkFFevJjrq5QBfs9dAiT8JYgBmWhaJzCtJm12lQirRMKR/br88Vwt/ry/UVY9cereMNvRYUGOGfC8CGGDCw4WDD+qWvyB3mmrXVuMlXxQRIZRJy5KazaQXsBWuIsx4kgGqC5Uo+yzpiQ1VMuCyI=
112 96a38d44ba093bd1d1ecfd34119e94056030278b 0 iQIVAwUAVarUUyBXgaxoKi1yAQIfJw/+MG/0736F/9IvzgCTF6omIC+9kS8JH0n/JBGPhpbPAHK4xxjhOOz6m3Ia3c3HNoy+I6calwU6YV7k5dUzlyLhM0Z5oYpdrH+OBNxDEsD5SfhclfR63MK1kmgtD33izijsZ++6a+ZaVfyxpMTksKOktWSIDD63a5b/avb6nKY64KwJcbbeXPdelxvXV7TXYm0GvWc46BgvrHOJpYHCDaXorAn6BMq7EQF8sxdNK4GVMNMVk1njve0HOg3Kz8llPB/7QmddZXYLFGmWqICyUn1IsJDfePxzh8sOYVCbxAgitTJHJJmmH5gzVzw7t7ljtmxSJpcUGQJB2MphejmNFGfgvJPB9c6xOCfUqDjxN5m24V+UYesZntpfgs3lpfvE7785IpVnf6WfKG4PKty01ome/joHlDlrRTekKMlpiBapGMfv8EHvPBrOA+5yAHNfKsmcyCcjD1nvXYZ2/X9qY35AhdcBuNkyp55oPDOdtYIHfnOIxlYMKG1dusDx3Z4eveF0lQTzfRVoE5w+k9A2Ov3Zx0aiSkFFevJjrq5QBfs9dAiT8JYgBmWhaJzCtJm12lQirRMKR/br88Vwt/ry/UVY9cereMNvRYUGOGfC8CGGDCw4WDD+qWvyB3mmrXVuMlXxQRIZRJy5KazaQXsBWuIsx4kgGqC5Uo+yzpiQ1VMuCyI=
113 21aa1c313b05b1a85f8ffa1120d51579ddf6bf24 0 iQIVAwUAVbuouCBXgaxoKi1yAQL2ng//eI1w51F4YkDiUAhrZuc8RE/chEd2o4F6Jyu9laA03vbim598ntqGjX3+UkOyTQ/zGVeZfW2cNG8zkJjSLk138DHCYl2YPPD/yxqMOJp/a7U34+HrA0aE5Y2pcfx+FofZHRvRtt40UCngicjKivko8au7Ezayidpa/vQbc6dNvGrwwk4KMgOP2HYIfHgCirR5UmaWtNpzlLhf9E7JSNL5ZXij3nt6AgEPyn0OvmmOLyUARO/JTJ6vVyLEtwiXg7B3sF5RpmyFDhrkZ+MbFHgL4k/3y9Lb97WaZl8nXJIaNPOTPJqkApFY/56S12PKYK4js2OgU+QsX1XWvouAhEx6CC6Jk9EHhr6+9qxYFhBJw7RjbswUG6LvJy/kBe+Ei5UbYg9dATf3VxQ6Gqs19lebtzltERH2yNwaHyVeqqakPSonOaUyxGMRRosvNHyrTTor38j8d27KksgpocXzBPZcc1MlS3vJg2nIwZlc9EKM9z5R0J1KAi1Z/+xzBjiGRYg5EZY6ElAw30eCjGta7tXlBssJiKeHut7QTLxCZHQuX1tKxDDs1qlXlGCMbrFqo0EiF9hTssptRG3ZyLwMdzEjnh4ki6gzONZKDI8uayAS3N+CEtWcGUtiA9OwuiFXTwodmles/Mh14LEhiVZoDK3L9TPcY22o2qRuku/6wq6QKsg=
113 21aa1c313b05b1a85f8ffa1120d51579ddf6bf24 0 iQIVAwUAVbuouCBXgaxoKi1yAQL2ng//eI1w51F4YkDiUAhrZuc8RE/chEd2o4F6Jyu9laA03vbim598ntqGjX3+UkOyTQ/zGVeZfW2cNG8zkJjSLk138DHCYl2YPPD/yxqMOJp/a7U34+HrA0aE5Y2pcfx+FofZHRvRtt40UCngicjKivko8au7Ezayidpa/vQbc6dNvGrwwk4KMgOP2HYIfHgCirR5UmaWtNpzlLhf9E7JSNL5ZXij3nt6AgEPyn0OvmmOLyUARO/JTJ6vVyLEtwiXg7B3sF5RpmyFDhrkZ+MbFHgL4k/3y9Lb97WaZl8nXJIaNPOTPJqkApFY/56S12PKYK4js2OgU+QsX1XWvouAhEx6CC6Jk9EHhr6+9qxYFhBJw7RjbswUG6LvJy/kBe+Ei5UbYg9dATf3VxQ6Gqs19lebtzltERH2yNwaHyVeqqakPSonOaUyxGMRRosvNHyrTTor38j8d27KksgpocXzBPZcc1MlS3vJg2nIwZlc9EKM9z5R0J1KAi1Z/+xzBjiGRYg5EZY6ElAw30eCjGta7tXlBssJiKeHut7QTLxCZHQuX1tKxDDs1qlXlGCMbrFqo0EiF9hTssptRG3ZyLwMdzEjnh4ki6gzONZKDI8uayAS3N+CEtWcGUtiA9OwuiFXTwodmles/Mh14LEhiVZoDK3L9TPcY22o2qRuku/6wq6QKsg=
114 1a45e49a6bed023deb229102a8903234d18054d3 0 iQIVAwUAVeYa2SBXgaxoKi1yAQLWVA//Q7vU0YzngbxIbrTPvfFiNTJcT4bx9u1xMHRZf6QBIE3KtRHKTooJwH9lGR0HHM+8DWWZup3Vzo6JuWHMGoW0v5fzDyk2czwM9BgQQPfEmoJ/ZuBMevTkTZngjgHVwhP3tHFym8Rk9vVxyiZd35EcxP+4F817GCzD+K7XliIBqVggmv9YeQDXfEtvo7UZrMPPec79t8tzt2UadI3KC1jWUriTS1Fg1KxgXW6srD80D10bYyCkkdo/KfF6BGZ9SkF+U3b95cuqSmOfoyyQwUA3JbMXXOnIefnC7lqRC2QTC6mYDx5hIkBiwymXJBe8rpq/S94VVvPGfW6A5upyeCZISLEEnAz0GlykdpIy/NogzhmWpbAMOus05Xnen6xPdNig6c/M5ZleRxVobNrZSd7c5qI3aUUyfMKXlY1j9oiUTjSKH1IizwaI3aL/MM70eErBxXiLs2tpQvZeaVLn3kwCB5YhywO3LK0x+FNx4Gl90deAXMYibGNiLTq9grpB8fuLg9M90JBjFkeYkrSJ2yGYumYyP/WBA3mYEYGDLNstOby4riTU3WCqVl+eah6ss3l+gNDjLxiMtJZ/g0gQACaAvxQ9tYp5eeRMuLRTp79QQPxv97s8IyVwE/TlPlcSFlEXAzsBvqvsolQXRVi9AxA6M2davYabBYAgRf6rRfgujoU=
114 1a45e49a6bed023deb229102a8903234d18054d3 0 iQIVAwUAVeYa2SBXgaxoKi1yAQLWVA//Q7vU0YzngbxIbrTPvfFiNTJcT4bx9u1xMHRZf6QBIE3KtRHKTooJwH9lGR0HHM+8DWWZup3Vzo6JuWHMGoW0v5fzDyk2czwM9BgQQPfEmoJ/ZuBMevTkTZngjgHVwhP3tHFym8Rk9vVxyiZd35EcxP+4F817GCzD+K7XliIBqVggmv9YeQDXfEtvo7UZrMPPec79t8tzt2UadI3KC1jWUriTS1Fg1KxgXW6srD80D10bYyCkkdo/KfF6BGZ9SkF+U3b95cuqSmOfoyyQwUA3JbMXXOnIefnC7lqRC2QTC6mYDx5hIkBiwymXJBe8rpq/S94VVvPGfW6A5upyeCZISLEEnAz0GlykdpIy/NogzhmWpbAMOus05Xnen6xPdNig6c/M5ZleRxVobNrZSd7c5qI3aUUyfMKXlY1j9oiUTjSKH1IizwaI3aL/MM70eErBxXiLs2tpQvZeaVLn3kwCB5YhywO3LK0x+FNx4Gl90deAXMYibGNiLTq9grpB8fuLg9M90JBjFkeYkrSJ2yGYumYyP/WBA3mYEYGDLNstOby4riTU3WCqVl+eah6ss3l+gNDjLxiMtJZ/g0gQACaAvxQ9tYp5eeRMuLRTp79QQPxv97s8IyVwE/TlPlcSFlEXAzsBvqvsolQXRVi9AxA6M2davYabBYAgRf6rRfgujoU=
115 9a466b9f9792e3ad7ae3fc6c43c3ff2e136b718d 0 iQIVAwUAVg1oMSBXgaxoKi1yAQLPag/+Pv0+pR9b9Y5RflEcERUzVu92q+l/JEiP7PHP9pAZuXoQ0ikYBFo1Ygw8tkIG00dgEaLk/2b7E3OxaU9pjU3thoX//XpTcbkJtVhe7Bkjh9/S3dRpm2FWNL9n0qnywebziB45Xs8XzUwBZTYOkVRInYr/NzSo8KNbQH1B4u2g56veb8u/7GtEvBSGnMGVYKhVUZ3jxyDf371QkdafMOJPpogkZcVhXusvMZPDBYtTIzswyxBJ2jxHzjt8+EKs+FI3FxzvQ9Ze3M5Daa7xfiHI3sOgECO8GMVaJi0F49lttKx08KONw8xLlEof+cJ+qxLxQ42X5XOQglJ2/bv5ES5JiZYAti2XSXbZK96p4wexqL4hnaLVU/2iEUfqB9Sj6itEuhGOknPD9fQo1rZXYIS8CT5nGTNG4rEpLFN6VwWn1btIMNkEHw998zU7N3HAOk6adD6zGcntUfMBvQC3V4VK3o7hp8PGeySrWrOLcC/xLKM+XRonz46woJK5D8w8lCVYAxBWEGKAFtj9hv9R8Ye9gCW0Q8BvJ7MwGpn+7fLQ1BVZdV1LZQTSBUr5u8mNeDsRo4H2hITQRhUeElIwlMsUbbN078a4JPOUgPz1+Fi8oHRccBchN6I40QohL934zhcKXQ+NXYN8BgpCicPztSg8O8Y/qvhFP12Zu4tOH8P/dFY=
115 9a466b9f9792e3ad7ae3fc6c43c3ff2e136b718d 0 iQIVAwUAVg1oMSBXgaxoKi1yAQLPag/+Pv0+pR9b9Y5RflEcERUzVu92q+l/JEiP7PHP9pAZuXoQ0ikYBFo1Ygw8tkIG00dgEaLk/2b7E3OxaU9pjU3thoX//XpTcbkJtVhe7Bkjh9/S3dRpm2FWNL9n0qnywebziB45Xs8XzUwBZTYOkVRInYr/NzSo8KNbQH1B4u2g56veb8u/7GtEvBSGnMGVYKhVUZ3jxyDf371QkdafMOJPpogkZcVhXusvMZPDBYtTIzswyxBJ2jxHzjt8+EKs+FI3FxzvQ9Ze3M5Daa7xfiHI3sOgECO8GMVaJi0F49lttKx08KONw8xLlEof+cJ+qxLxQ42X5XOQglJ2/bv5ES5JiZYAti2XSXbZK96p4wexqL4hnaLVU/2iEUfqB9Sj6itEuhGOknPD9fQo1rZXYIS8CT5nGTNG4rEpLFN6VwWn1btIMNkEHw998zU7N3HAOk6adD6zGcntUfMBvQC3V4VK3o7hp8PGeySrWrOLcC/xLKM+XRonz46woJK5D8w8lCVYAxBWEGKAFtj9hv9R8Ye9gCW0Q8BvJ7MwGpn+7fLQ1BVZdV1LZQTSBUr5u8mNeDsRo4H2hITQRhUeElIwlMsUbbN078a4JPOUgPz1+Fi8oHRccBchN6I40QohL934zhcKXQ+NXYN8BgpCicPztSg8O8Y/qvhFP12Zu4tOH8P/dFY=
116 b66e3ca0b90c3095ea28dfd39aa24247bebf5c20 0 iQIVAwUAViarTyBXgaxoKi1yAQLZgRAAh7c7ebn7kUWI5M/b/T6qHGjFrU5azkjamzy9IG+KIa2hZgSMxyEM7JJUFqKP4TiWa3sW03bjKGSM/SjjDSSyheX+JIVSPNyKrBwneYhPq45Ius8eiHziClkt0CSsl2d9xDRpI0JmHbN0Pf8nh7rnbL+231GDAOT6dP+2S8K1HGa/0BgEcL9gpYs4/2GyjL+hBSUjyrabzvwe48DCN5W0tEJbGFw5YEADxdfbVbNEuXL81tR4PFGiJxPW0QKRLDB74MWmiWC0gi2ZC/IhbNBZ2sLb6694d4Bx4PVwtiARh63HNXVMEaBrFu1S9NcMQyHvAOc6Zw4izF/PCeTcdEnPk8J1t5PTz09Lp0EAKxe7CWIViy350ke5eiaxO3ySrNMX6d83BOHLDqEFMSWm+ad+KEMT4CJrK4X/n/XMgEFAaU5nWlIRqrLRIeU2Ifc625T0Xh4BgTqXPpytQxhgV5b+Fi6duNk4cy+QnHT4ymxI6BPD9HvSQwc+O7h37qjvJVZmpQX6AP8O75Yza8ZbcYKRIIxZzOkwNpzE5A/vpvP5bCRn7AGcT3ORWmAYr/etr3vxUvt2fQz6U/R4S915V+AeWBdcp+uExu6VZ42M0vhhh0lyzx1VRJGVdV+LoxFKkaC42d0yT+O1QEhSB7WL1D3/a/iWubv6ieB/cvNMhFaK9DA=
116 b66e3ca0b90c3095ea28dfd39aa24247bebf5c20 0 iQIVAwUAViarTyBXgaxoKi1yAQLZgRAAh7c7ebn7kUWI5M/b/T6qHGjFrU5azkjamzy9IG+KIa2hZgSMxyEM7JJUFqKP4TiWa3sW03bjKGSM/SjjDSSyheX+JIVSPNyKrBwneYhPq45Ius8eiHziClkt0CSsl2d9xDRpI0JmHbN0Pf8nh7rnbL+231GDAOT6dP+2S8K1HGa/0BgEcL9gpYs4/2GyjL+hBSUjyrabzvwe48DCN5W0tEJbGFw5YEADxdfbVbNEuXL81tR4PFGiJxPW0QKRLDB74MWmiWC0gi2ZC/IhbNBZ2sLb6694d4Bx4PVwtiARh63HNXVMEaBrFu1S9NcMQyHvAOc6Zw4izF/PCeTcdEnPk8J1t5PTz09Lp0EAKxe7CWIViy350ke5eiaxO3ySrNMX6d83BOHLDqEFMSWm+ad+KEMT4CJrK4X/n/XMgEFAaU5nWlIRqrLRIeU2Ifc625T0Xh4BgTqXPpytQxhgV5b+Fi6duNk4cy+QnHT4ymxI6BPD9HvSQwc+O7h37qjvJVZmpQX6AP8O75Yza8ZbcYKRIIxZzOkwNpzE5A/vpvP5bCRn7AGcT3ORWmAYr/etr3vxUvt2fQz6U/R4S915V+AeWBdcp+uExu6VZ42M0vhhh0lyzx1VRJGVdV+LoxFKkaC42d0yT+O1QEhSB7WL1D3/a/iWubv6ieB/cvNMhFaK9DA=
117 47dd34f2e7272be9e3b2a5a83cd0d20be44293f4 0 iQIVAwUAVjZiKiBXgaxoKi1yAQKBWQ/+JcE37vprSOA5e0ezs/avC7leR6hTlXy9O5bpFnvMpbVMTUp+KfBE4HxTT0KKXKh9lGtNaQ+lAmHuy1OQE1hBKPIaCUd8/1gunGsXgRM3TJ9LwjFd4qFpOMxvOouc6kW5kmea7V9W2fg6aFNjjc/4/0J3HMOIjmf2fFz87xqR1xX8iezJ57A4pUPNViJlOWXRzfa56cI6VUe5qOMD0NRXcY+JyI5qW25Y/aL5D9loeKflpzd53Ue+Pu3qlhddJd3PVkaAiVDH+DYyRb8sKgwuiEsyaBO18IBgC8eDmTohEJt6707A+WNhwBJwp9aOUhHC7caaKRYhEKuDRQ3op++VqwuxbFRXx22XYR9bEzQIlpsv9GY2k8SShU5MZqUKIhk8vppFI6RaID5bmALnLLmjmXfSPYSJDzDuCP5UTQgI3PKPOATorVrqMdKzfb7FiwtcTvtHAXpOgLaY9P9XIePbnei6Rx9TfoHYDvzFWRqzSjl21xR+ZUrJtG2fx7XLbMjEAZJcnjP++GRvNbHBOi57aX0l2LO1peQqZVMULoIivaoLFP3i16RuXXQ/bvKyHmKjJzGrLc0QCa0yfrvV2m30RRMaYlOv7ToJfdfZLXvSAP0zbAuDaXdjGnq7gpfIlNE3xM+kQ75Akcf4V4fK1p061EGBQvQz6Ov3PkPiWL/bxrQ=
117 47dd34f2e7272be9e3b2a5a83cd0d20be44293f4 0 iQIVAwUAVjZiKiBXgaxoKi1yAQKBWQ/+JcE37vprSOA5e0ezs/avC7leR6hTlXy9O5bpFnvMpbVMTUp+KfBE4HxTT0KKXKh9lGtNaQ+lAmHuy1OQE1hBKPIaCUd8/1gunGsXgRM3TJ9LwjFd4qFpOMxvOouc6kW5kmea7V9W2fg6aFNjjc/4/0J3HMOIjmf2fFz87xqR1xX8iezJ57A4pUPNViJlOWXRzfa56cI6VUe5qOMD0NRXcY+JyI5qW25Y/aL5D9loeKflpzd53Ue+Pu3qlhddJd3PVkaAiVDH+DYyRb8sKgwuiEsyaBO18IBgC8eDmTohEJt6707A+WNhwBJwp9aOUhHC7caaKRYhEKuDRQ3op++VqwuxbFRXx22XYR9bEzQIlpsv9GY2k8SShU5MZqUKIhk8vppFI6RaID5bmALnLLmjmXfSPYSJDzDuCP5UTQgI3PKPOATorVrqMdKzfb7FiwtcTvtHAXpOgLaY9P9XIePbnei6Rx9TfoHYDvzFWRqzSjl21xR+ZUrJtG2fx7XLbMjEAZJcnjP++GRvNbHBOi57aX0l2LO1peQqZVMULoIivaoLFP3i16RuXXQ/bvKyHmKjJzGrLc0QCa0yfrvV2m30RRMaYlOv7ToJfdfZLXvSAP0zbAuDaXdjGnq7gpfIlNE3xM+kQ75Akcf4V4fK1p061EGBQvQz6Ov3PkPiWL/bxrQ=
118 1aa5083cbebbe7575c88f3402ab377539b484897 0 iQIVAwUAVkEdCCBXgaxoKi1yAQKdWg//crTr5gsnHQppuD1p+PPn3/7SMsWJ7bgbuaXgERDLC0zWMfhM2oMmu/4jqXnpangdBVvb0SojejgzxoBo9FfRQiIoKt0vxmmn+S8CrEwb99rpP4M7lgyMAInKPMXQdYxkoDNwL70Afmog6eBtlxjYnu8nmUE/swu6JoVns+tF8UOvIKFYbuCcGujo2pUOQC0xBGiHeHSGRDJOlWmY2d7D/PkQtQE/u/d4QZt7enTHMiV44XVJ8+0U0f1ZQE7V+hNWf+IjwcZtL95dnQzUKs6tXMIln/OwO+eJ3d61BfLvmABvCwUC9IepPssNSFBUfGqBAP5wXOzFIPSYn00IWpmZtCnpUNL99X1IV3RP+p99gnEDTScQFPYt5B0q5I1nFdRh1p48BSF/kjPA7V++UfBwMXrrYLKhUR9BjmrRzYnyXJKwbH6iCNj5hsXUkVrBdBi/FnMczgsVILfFcIXUfnJD3E/dG+1lmuObg6dEynxiGChTuaR4KkLa5ZRkUcUl6fWlSRsqSNbGEEbdwcI+nTCZqJUlLSghumhs0Z89Hs1nltBd1ALX2VLJEHrKMrFQ8NfEBeCB6ENqMJi5qPlq354MCdGOZ9RvisX/HlxE4Q61BW0+EwnyXSch6LFSOS3axOocUazMoK1XiOTJSv/5bAsnwb0ztDWeUj9fZEJL+SWtgB8=
118 1aa5083cbebbe7575c88f3402ab377539b484897 0 iQIVAwUAVkEdCCBXgaxoKi1yAQKdWg//crTr5gsnHQppuD1p+PPn3/7SMsWJ7bgbuaXgERDLC0zWMfhM2oMmu/4jqXnpangdBVvb0SojejgzxoBo9FfRQiIoKt0vxmmn+S8CrEwb99rpP4M7lgyMAInKPMXQdYxkoDNwL70Afmog6eBtlxjYnu8nmUE/swu6JoVns+tF8UOvIKFYbuCcGujo2pUOQC0xBGiHeHSGRDJOlWmY2d7D/PkQtQE/u/d4QZt7enTHMiV44XVJ8+0U0f1ZQE7V+hNWf+IjwcZtL95dnQzUKs6tXMIln/OwO+eJ3d61BfLvmABvCwUC9IepPssNSFBUfGqBAP5wXOzFIPSYn00IWpmZtCnpUNL99X1IV3RP+p99gnEDTScQFPYt5B0q5I1nFdRh1p48BSF/kjPA7V++UfBwMXrrYLKhUR9BjmrRzYnyXJKwbH6iCNj5hsXUkVrBdBi/FnMczgsVILfFcIXUfnJD3E/dG+1lmuObg6dEynxiGChTuaR4KkLa5ZRkUcUl6fWlSRsqSNbGEEbdwcI+nTCZqJUlLSghumhs0Z89Hs1nltBd1ALX2VLJEHrKMrFQ8NfEBeCB6ENqMJi5qPlq354MCdGOZ9RvisX/HlxE4Q61BW0+EwnyXSch6LFSOS3axOocUazMoK1XiOTJSv/5bAsnwb0ztDWeUj9fZEJL+SWtgB8=
119 2d437a0f3355834a9485bbbeb30a52a052c98f19 0 iQIVAwUAVl5U9CBXgaxoKi1yAQLocg//a4YFz9UVSIEzVEJMUPJnN2dBvEXRpwpb5CdKPd428+18K6VWZd5Mc6xNNRV5AV/hCYylgqDplIvyOvwCj7uN8nEOrLUQQ0Pp37M5ZIX8ZVCK/wgchJ2ltabUG1NrZ7/JA84U79VGLAECMnD0Z9WvZDESpVXmdXfxrk1eCc3omRB0ofNghEx+xpYworfZsu8aap1GHQuBsjPv4VyUWGpMq/KA01PdxRTELmrJnfSyr0nPKwxlI5KsbA1GOe+Mk3tp5HJ42DZqLtKSGPirf6E+6lRJeB0H7EpotN4wD3yZDsw6AgRb2C/ay/3T3Oz7CN+45mwuujV9Cxx5zs1EeOgZcqgA/hXMcwlQyvQDMrWpO8ytSBm6MhOuFOTB3HnUxfsnfSocLJsbNwGWKceAzACcXSqapveVAz/7h+InFgl/8Qce28UJdnX5wro5gP6UWt+xrvc7vfmVGgI3oxbiOUrfglhkjmrxBjEiDQy4BWH7HWMZUVxnqPQRcxIE10+dv0KtM/PBkbUtnbGJ88opFBGkFweje5vQcZy/duuPEIufRkPr8EV47QjOxlvldEjlLq3+QUdJZEgCIFw1X0y7Pix4dsPFjwOmAyo4El1ePrdFzG3dXSVA3eHvMDRnYnNlue9wHvKhYbBle5xTOZBgGuMzhDVe+54JLql5JYr4WrI1pvA=
119 2d437a0f3355834a9485bbbeb30a52a052c98f19 0 iQIVAwUAVl5U9CBXgaxoKi1yAQLocg//a4YFz9UVSIEzVEJMUPJnN2dBvEXRpwpb5CdKPd428+18K6VWZd5Mc6xNNRV5AV/hCYylgqDplIvyOvwCj7uN8nEOrLUQQ0Pp37M5ZIX8ZVCK/wgchJ2ltabUG1NrZ7/JA84U79VGLAECMnD0Z9WvZDESpVXmdXfxrk1eCc3omRB0ofNghEx+xpYworfZsu8aap1GHQuBsjPv4VyUWGpMq/KA01PdxRTELmrJnfSyr0nPKwxlI5KsbA1GOe+Mk3tp5HJ42DZqLtKSGPirf6E+6lRJeB0H7EpotN4wD3yZDsw6AgRb2C/ay/3T3Oz7CN+45mwuujV9Cxx5zs1EeOgZcqgA/hXMcwlQyvQDMrWpO8ytSBm6MhOuFOTB3HnUxfsnfSocLJsbNwGWKceAzACcXSqapveVAz/7h+InFgl/8Qce28UJdnX5wro5gP6UWt+xrvc7vfmVGgI3oxbiOUrfglhkjmrxBjEiDQy4BWH7HWMZUVxnqPQRcxIE10+dv0KtM/PBkbUtnbGJ88opFBGkFweje5vQcZy/duuPEIufRkPr8EV47QjOxlvldEjlLq3+QUdJZEgCIFw1X0y7Pix4dsPFjwOmAyo4El1ePrdFzG3dXSVA3eHvMDRnYnNlue9wHvKhYbBle5xTOZBgGuMzhDVe+54JLql5JYr4WrI1pvA=
120 ea389970c08449440587712117f178d33bab3f1e 0 iQIVAwUAVociGyBXgaxoKi1yAQJx9Q//TzMypcls5CQW3DM9xY1Q+RFeIw1LcDIev6NDBjUYxULb2WIK2qPw4Th5czF622SMd+XO/kiQeWYp9IW90MZOUVT1YGgUPKlKWMjkf0lZEPzprHjHq0+z/no1kBCBQg2uUOLsb6Y7zom4hFCyPsxXOk5nnxcFEK0VDbODa9zoKb/flyQ7rtzs+Z6BljIQ0TJAJsXs+6XgrW1XJ/f6nbeqsQyPklIBJuGKiaU1Pg8wQe6QqFaO1NYgM3hBETku6r3OTpUhu/2FTUZ7yDWGGzBqmifxzdHoj7/B+2qzRpII77PlZqoe6XF+UOObSFnhKvXKLjlGY5cy3SXBMbHkPcYtHua8wYR8LqO2bYYnsDd9qD0DJ+LlqH0ZMUkB2Cdk9q/cp1PGJWGlYYecHP87DLuWKwS+a6LhVI9TGkIUosVtLaIMsUUEz83RJFb4sSGOXtjk5DDznn9QW8ltXXMTdGQwFq1vmuiXATYenhszbvagrnbAnDyNFths4IhS1jG8237SB36nGmO3zQm5V7AMHfSrISB/8VPyY4Si7uvAV2kMWxuMhYuQbBwVx/KxbKrYjowuvJvCKaV101rWxvSeU2wDih20v+dnQKPveRNnO8AAK/ICflVVsISkd7hXcfk+SnhfxcPQTr+HQIJEW9wt5Q8WbgHk9wuR8kgXQEX6tCGpT/w=
120 ea389970c08449440587712117f178d33bab3f1e 0 iQIVAwUAVociGyBXgaxoKi1yAQJx9Q//TzMypcls5CQW3DM9xY1Q+RFeIw1LcDIev6NDBjUYxULb2WIK2qPw4Th5czF622SMd+XO/kiQeWYp9IW90MZOUVT1YGgUPKlKWMjkf0lZEPzprHjHq0+z/no1kBCBQg2uUOLsb6Y7zom4hFCyPsxXOk5nnxcFEK0VDbODa9zoKb/flyQ7rtzs+Z6BljIQ0TJAJsXs+6XgrW1XJ/f6nbeqsQyPklIBJuGKiaU1Pg8wQe6QqFaO1NYgM3hBETku6r3OTpUhu/2FTUZ7yDWGGzBqmifxzdHoj7/B+2qzRpII77PlZqoe6XF+UOObSFnhKvXKLjlGY5cy3SXBMbHkPcYtHua8wYR8LqO2bYYnsDd9qD0DJ+LlqH0ZMUkB2Cdk9q/cp1PGJWGlYYecHP87DLuWKwS+a6LhVI9TGkIUosVtLaIMsUUEz83RJFb4sSGOXtjk5DDznn9QW8ltXXMTdGQwFq1vmuiXATYenhszbvagrnbAnDyNFths4IhS1jG8237SB36nGmO3zQm5V7AMHfSrISB/8VPyY4Si7uvAV2kMWxuMhYuQbBwVx/KxbKrYjowuvJvCKaV101rWxvSeU2wDih20v+dnQKPveRNnO8AAK/ICflVVsISkd7hXcfk+SnhfxcPQTr+HQIJEW9wt5Q8WbgHk9wuR8kgXQEX6tCGpT/w=
121 158bdc8965720ca4061f8f8d806563cfc7cdb62e 0 iQIVAwUAVqBhFyBXgaxoKi1yAQLJpQ//S8kdgmVlS+CI0d2hQVGYWB/eK+tcntG+bZKLto4bvVy5d0ymlDL0x7VrJMOkwzkU1u/GaYo3L6CVEiM/JGCgB32bllrpx+KwQ0AyHswMZruo/6xrjDIYymLMEJ9yonXBZsG7pf2saYTHm3C5/ZIPkrDZSlssJHJDdeWqd75hUnx3nX8dZ4jIIxYDhtdB5/EmuEGOVlbeBHVpwfDXidSJUHJRwJvDqezUlN003sQdUvOHHtRqBrhsYEhHqPMOxDidAgCvjSfWZQKOTKaPE/gQo/BP3GU++Fg55jBz+SBXpdfQJI2Gd8FZfjLkhFa9vTTTcd10YCd4CZbYLpj/4R2xWj1U4oTVEFa6d+AA5Yyu8xG53XSCCPyzfagyuyfLqsaq5r1qDZO/Mh5KZCTvc9xSF5KXj57mKvzMDpiNeQcamGmsV4yXxymKJKGMQvbnzqp+ItIdbnfk38Nuac8rqNnGmFYwMIPa50680vSZT/NhrlPJ8FVTJlfHtSUZbdjPpsqw7BgjFWaVUdwgCKIGERiK7zfR0innj9rF5oVwT8EbKiaR1uVxOKnTwZzPCbdO1euNg/HutZLVQmugiLAv5Z38L3YZf5bH7zJdUydhiTI4mGn/mgncsKXoSarnnduhoYu9OsQZc9pndhxjAEuAslEIyBsLy81fR2HOhUzw5FGNgdY=
121 158bdc8965720ca4061f8f8d806563cfc7cdb62e 0 iQIVAwUAVqBhFyBXgaxoKi1yAQLJpQ//S8kdgmVlS+CI0d2hQVGYWB/eK+tcntG+bZKLto4bvVy5d0ymlDL0x7VrJMOkwzkU1u/GaYo3L6CVEiM/JGCgB32bllrpx+KwQ0AyHswMZruo/6xrjDIYymLMEJ9yonXBZsG7pf2saYTHm3C5/ZIPkrDZSlssJHJDdeWqd75hUnx3nX8dZ4jIIxYDhtdB5/EmuEGOVlbeBHVpwfDXidSJUHJRwJvDqezUlN003sQdUvOHHtRqBrhsYEhHqPMOxDidAgCvjSfWZQKOTKaPE/gQo/BP3GU++Fg55jBz+SBXpdfQJI2Gd8FZfjLkhFa9vTTTcd10YCd4CZbYLpj/4R2xWj1U4oTVEFa6d+AA5Yyu8xG53XSCCPyzfagyuyfLqsaq5r1qDZO/Mh5KZCTvc9xSF5KXj57mKvzMDpiNeQcamGmsV4yXxymKJKGMQvbnzqp+ItIdbnfk38Nuac8rqNnGmFYwMIPa50680vSZT/NhrlPJ8FVTJlfHtSUZbdjPpsqw7BgjFWaVUdwgCKIGERiK7zfR0innj9rF5oVwT8EbKiaR1uVxOKnTwZzPCbdO1euNg/HutZLVQmugiLAv5Z38L3YZf5bH7zJdUydhiTI4mGn/mgncsKXoSarnnduhoYu9OsQZc9pndhxjAEuAslEIyBsLy81fR2HOhUzw5FGNgdY=
122 2408645de650d8a29a6ce9e7dce601d8dd0d1474 0 iQIVAwUAVq/xFSBXgaxoKi1yAQLsxhAAg+E6uJCtZZOugrrFi9S6C20SRPBwHwmw22PC5z3Ufp9Vf3vqSL/+zmWI9d/yezIVcTXgM9rKCvq58sZvo4FuO2ngPx7bL9LMJ3qx0IyHUKjwa3AwrzjSzvVhNIrRoimD+lVBI/GLmoszpMICM+Nyg3D41fNJKs6YpnwwsHNJkjMwz0n2SHAShWAgIilyANNVnwnzHE68AIkB/gBkUGtrjf6xB9mXQxAv4GPco/234FAkX9xSWsM0Rx+JLLrSBXoHmIlmu9LPjC0AKn8/DDke+fj7bFaF7hdJBUYOtlYH6f7NIvyZSpw0FHl7jPxoRCtXzIV+1dZEbbIMIXzNtzPFVDYDfMhLqpTgthkZ9x0UaMaHecCUWYYBp8G/IyVS40GJodl8xnRiXUkFejbK/NDdR1f9iZS0dtiFu66cATMdb6d+MG+zW0nDKiQmBt6bwynysqn4g3SIGQFEPyEoRy0bXiefHrlkeHbdfc4zgoejx3ywcRDMGvUbpWs5C43EPu44irKXcqC695vAny3A7nZpt/XP5meDdOF67DNQPvhFdjPPbJBpSsUi2hUlZ+599wUfr3lNVzeEzHT7XApTOf6ysuGtHH3qcVHpFqQSRL1MI0f2xL13UadgTVWYrnHEis7f+ncwlWiR0ucpJB3+dQQh3NVGVo89MfbIZPkA8iil03U=
122 2408645de650d8a29a6ce9e7dce601d8dd0d1474 0 iQIVAwUAVq/xFSBXgaxoKi1yAQLsxhAAg+E6uJCtZZOugrrFi9S6C20SRPBwHwmw22PC5z3Ufp9Vf3vqSL/+zmWI9d/yezIVcTXgM9rKCvq58sZvo4FuO2ngPx7bL9LMJ3qx0IyHUKjwa3AwrzjSzvVhNIrRoimD+lVBI/GLmoszpMICM+Nyg3D41fNJKs6YpnwwsHNJkjMwz0n2SHAShWAgIilyANNVnwnzHE68AIkB/gBkUGtrjf6xB9mXQxAv4GPco/234FAkX9xSWsM0Rx+JLLrSBXoHmIlmu9LPjC0AKn8/DDke+fj7bFaF7hdJBUYOtlYH6f7NIvyZSpw0FHl7jPxoRCtXzIV+1dZEbbIMIXzNtzPFVDYDfMhLqpTgthkZ9x0UaMaHecCUWYYBp8G/IyVS40GJodl8xnRiXUkFejbK/NDdR1f9iZS0dtiFu66cATMdb6d+MG+zW0nDKiQmBt6bwynysqn4g3SIGQFEPyEoRy0bXiefHrlkeHbdfc4zgoejx3ywcRDMGvUbpWs5C43EPu44irKXcqC695vAny3A7nZpt/XP5meDdOF67DNQPvhFdjPPbJBpSsUi2hUlZ+599wUfr3lNVzeEzHT7XApTOf6ysuGtHH3qcVHpFqQSRL1MI0f2xL13UadgTVWYrnHEis7f+ncwlWiR0ucpJB3+dQQh3NVGVo89MfbIZPkA8iil03U=
123 b698abf971e7377d9b7ec7fc8c52df45255b0329 0 iQIVAwUAVrJ4YCBXgaxoKi1yAQJsKw/+JHSR0bIyarO4/VilFwsYxCprOnPxmUdS4qc4yjvpbf7Dqqr/OnOHJA29LrMoqWqsHgREepemjqiNindwNtlZec+KgmbF08ihSBBpls96UTTYTcytKRkkbrB+FhwB0iDl/o8RgGPniyG6M7gOp6p8pXQVRCOToIY1B/G0rtpkcU1N3GbiZntO5Fm/LPAVIE74VaDsamMopQ/wEB8qiERngX/M8SjO1ZSaVNW6KjRUsarLXQB9ziVJBolK/WnQsDwEeuWU2udpjBiOHnFC6h84uBpc8rLGhr419bKMJcjgl+0sl2zHGPY2edQYuJqVjVENzf4zzZA+xPgKw3GrSTpd37PEnGU/fufdJ0X+pp3kvmO1cV3TsvVMTCn7NvS6+w8SGdHdwKQQwelYI6vmJnjuOCATbafJiHMaOQ0GVYYk6PPoGrYcQ081x6dStCMaHIPOV1Wirwd2wq+SN9Ql8H6njftBf5Sa5tVWdW/zrhsltMsdZYZagZ/oFT3t83exL0rgZ96bZFs0j3HO3APELygIVuQ6ybPsFyToMDbURNDvr7ZqPKhQkkdHIUMqEez5ReuVgpbO9CWV/yWpB1/ZCpjNBZyDvw05kG2mOoC7AbHc8aLUS/8DetAmhwyb48LW4qjfUkO7RyxVSxqdnaBOMlsg1wsP2S+SlkZKsDHjcquZJ5U=
123 b698abf971e7377d9b7ec7fc8c52df45255b0329 0 iQIVAwUAVrJ4YCBXgaxoKi1yAQJsKw/+JHSR0bIyarO4/VilFwsYxCprOnPxmUdS4qc4yjvpbf7Dqqr/OnOHJA29LrMoqWqsHgREepemjqiNindwNtlZec+KgmbF08ihSBBpls96UTTYTcytKRkkbrB+FhwB0iDl/o8RgGPniyG6M7gOp6p8pXQVRCOToIY1B/G0rtpkcU1N3GbiZntO5Fm/LPAVIE74VaDsamMopQ/wEB8qiERngX/M8SjO1ZSaVNW6KjRUsarLXQB9ziVJBolK/WnQsDwEeuWU2udpjBiOHnFC6h84uBpc8rLGhr419bKMJcjgl+0sl2zHGPY2edQYuJqVjVENzf4zzZA+xPgKw3GrSTpd37PEnGU/fufdJ0X+pp3kvmO1cV3TsvVMTCn7NvS6+w8SGdHdwKQQwelYI6vmJnjuOCATbafJiHMaOQ0GVYYk6PPoGrYcQ081x6dStCMaHIPOV1Wirwd2wq+SN9Ql8H6njftBf5Sa5tVWdW/zrhsltMsdZYZagZ/oFT3t83exL0rgZ96bZFs0j3HO3APELygIVuQ6ybPsFyToMDbURNDvr7ZqPKhQkkdHIUMqEez5ReuVgpbO9CWV/yWpB1/ZCpjNBZyDvw05kG2mOoC7AbHc8aLUS/8DetAmhwyb48LW4qjfUkO7RyxVSxqdnaBOMlsg1wsP2S+SlkZKsDHjcquZJ5U=
124 d493d64757eb45ada99fcb3693e479a51b7782da 0 iQIVAwUAVtYt4SBXgaxoKi1yAQL6TQ/9FzYE/xOSC2LYqPdPjCXNjGuZdN1WMf/8fUMYT83NNOoLEBGx37C0bAxgD4/P03FwYMuP37IjIcX8vN6fWvtG9Oo0o2n/oR3SKjpsheh2zxhAFX3vXhFD4U18wCz/DnM0O1qGJwJ49kk/99WNgDWeW4n9dMzTFpcaeZBCu1REbZQS40Z+ArXTDCr60g5TLN1XR1WKEzQJvF71rvaE6P8d3GLoGobTIJMLi5UnMwGsnsv2/EIPrWHQiAY9ZEnYq6deU/4RMh9c7afZie9I+ycIA/qVH6vXNt3/a2BP3Frmv8IvKPzqwnoWmIUamew9lLf1joD5joBy8Yu+qMW0/s6DYUGQ4Slk9qIfn6wh4ySgT/7FJUMcayx9ONDq7920RjRc+XFpD8B3Zhj2mM+0g9At1FgX2w2Gkf957oz2nlgTVh9sdPvP6UvWzhqszPMpdG5Vt0oc5vuyobW333qSkufCxi5gmH7do1DIzErMcy8b6IpZUDeQ/dakKwLQpZVVPF15IrNa/zsOW55SrGrL8/ErM/mXNQBBAqvRsOLq2njFqK2JaoG6biH21DMjHVZFw2wBRoLQxbOppfz2/e3mNkNy9HjgJTW3+0iHWvRzMSjwRbk9BlbkmH6kG5163ElHq3Ft3uuQyZBL9I5SQxlHi9s/CV0YSTYthpWR3ChKIMoqBQ0=
124 d493d64757eb45ada99fcb3693e479a51b7782da 0 iQIVAwUAVtYt4SBXgaxoKi1yAQL6TQ/9FzYE/xOSC2LYqPdPjCXNjGuZdN1WMf/8fUMYT83NNOoLEBGx37C0bAxgD4/P03FwYMuP37IjIcX8vN6fWvtG9Oo0o2n/oR3SKjpsheh2zxhAFX3vXhFD4U18wCz/DnM0O1qGJwJ49kk/99WNgDWeW4n9dMzTFpcaeZBCu1REbZQS40Z+ArXTDCr60g5TLN1XR1WKEzQJvF71rvaE6P8d3GLoGobTIJMLi5UnMwGsnsv2/EIPrWHQiAY9ZEnYq6deU/4RMh9c7afZie9I+ycIA/qVH6vXNt3/a2BP3Frmv8IvKPzqwnoWmIUamew9lLf1joD5joBy8Yu+qMW0/s6DYUGQ4Slk9qIfn6wh4ySgT/7FJUMcayx9ONDq7920RjRc+XFpD8B3Zhj2mM+0g9At1FgX2w2Gkf957oz2nlgTVh9sdPvP6UvWzhqszPMpdG5Vt0oc5vuyobW333qSkufCxi5gmH7do1DIzErMcy8b6IpZUDeQ/dakKwLQpZVVPF15IrNa/zsOW55SrGrL8/ErM/mXNQBBAqvRsOLq2njFqK2JaoG6biH21DMjHVZFw2wBRoLQxbOppfz2/e3mNkNy9HjgJTW3+0iHWvRzMSjwRbk9BlbkmH6kG5163ElHq3Ft3uuQyZBL9I5SQxlHi9s/CV0YSTYthpWR3ChKIMoqBQ0=
125 ae279d4a19e9683214cbd1fe8298cf0b50571432 0 iQIVAwUAVvqzViBXgaxoKi1yAQKUCxAAtctMD3ydbe+li3iYjhY5qT0wyHwPr9fcLqsQUJ4ZtD4sK3oxCRZFWFxNBk5bIIyiwusSEJPiPddoQ7NljSZlYDI0HR3R4vns55fmDwPG07Ykf7aSyqr+c2ppCGzn2/2ID476FNtzKqjF+LkVyadgI9vgZk5S4BgdSlfSRBL+1KtB1BlF5etIZnc5U9qs1uqzZJc06xyyF8HlrmMZkAvRUbsx/JzA5LgzZ2WzueaxZgYzYjDk0nPLgyPPBj0DVyWXnW/kdRNmKHNbaZ9aZlWmdPCEoq5iBm71d7Xoa61shmeuVZWvxHNqXdjVMHVeT61cRxjdfxTIkJwvlRGwpy7V17vTgzWFxw6QJpmr7kupRo3idsDydLDPHGUsxP3uMZFsp6+4rEe6qbafjNajkRyiw7kVGCxboOFN0rLVJPZwZGksEIkw58IHcPhZNT1bHHocWOA/uHJTAynfKsAdv/LDdGKcZWUCFOzlokw54xbPvdrBtEOnYNp15OY01IAJd2FCUki5WHvhELUggTjfank1Tc3/Rt1KrGOFhg80CWq6eMiuiWkHGvYq3fjNLbgjl3JJatUFoB+cX1ulDOGsLJEXQ4v5DNHgel0o2H395owNlStksSeW1UBVk0hUK/ADtVUYKAPEIFiboh1iDpEOl40JVnYdsGz3w5FLj2w+16/1vWs=
125 ae279d4a19e9683214cbd1fe8298cf0b50571432 0 iQIVAwUAVvqzViBXgaxoKi1yAQKUCxAAtctMD3ydbe+li3iYjhY5qT0wyHwPr9fcLqsQUJ4ZtD4sK3oxCRZFWFxNBk5bIIyiwusSEJPiPddoQ7NljSZlYDI0HR3R4vns55fmDwPG07Ykf7aSyqr+c2ppCGzn2/2ID476FNtzKqjF+LkVyadgI9vgZk5S4BgdSlfSRBL+1KtB1BlF5etIZnc5U9qs1uqzZJc06xyyF8HlrmMZkAvRUbsx/JzA5LgzZ2WzueaxZgYzYjDk0nPLgyPPBj0DVyWXnW/kdRNmKHNbaZ9aZlWmdPCEoq5iBm71d7Xoa61shmeuVZWvxHNqXdjVMHVeT61cRxjdfxTIkJwvlRGwpy7V17vTgzWFxw6QJpmr7kupRo3idsDydLDPHGUsxP3uMZFsp6+4rEe6qbafjNajkRyiw7kVGCxboOFN0rLVJPZwZGksEIkw58IHcPhZNT1bHHocWOA/uHJTAynfKsAdv/LDdGKcZWUCFOzlokw54xbPvdrBtEOnYNp15OY01IAJd2FCUki5WHvhELUggTjfank1Tc3/Rt1KrGOFhg80CWq6eMiuiWkHGvYq3fjNLbgjl3JJatUFoB+cX1ulDOGsLJEXQ4v5DNHgel0o2H395owNlStksSeW1UBVk0hUK/ADtVUYKAPEIFiboh1iDpEOl40JVnYdsGz3w5FLj2w+16/1vWs=
126 740156eedf2c450aee58b1a90b0e826f47c5da64 0 iQIVAwUAVxLGMCBXgaxoKi1yAQLhIg/8DDX+sCz7LmqO47/FfTo+OqGR+bTTqpfK3WebitL0Z6hbXPj7s45jijqIFGqKgMPqS5oom1xeuGTPHdYA0NNoc/mxSCuNLfuXYolpNWPN71HeSDRV9SnhMThG5HSxI+P0Ye4rbsCHrVV+ib1rV81QE2kZ9aZsJd0HnGd512xJ+2ML7AXweM/4lcLmMthN+oi/dv1OGLzfckrcr/fEATCLZt55eO7idx11J1Fk4ptQ6dQ/bKznlD4hneyy1HMPsGxw+bCXrMF2C/nUiRLHdKgGqZ+cDq6loQRfFlQoIhfoEnWC424qbjH4rvHgkZHqC59Oi/ti9Hi75oq9Tb79yzlCY/fGsdrlJpEzrTQdHFMHUoO9CC+JYObXHRo3ALnC5350ZBKxlkdpmucrHTgcDabfhRlx9vDxP4RDopm2hAjk2LJH7bdxnGEyZYkTOZ3hXKnVpt2hUQb4jyzzC9Kl47TFpPKNVKI+NLqRRZAIdXXiy24KD7WzzE6L0NNK0/IeqKBENLL8I1PmDQ6XmYTQVhTuad1jjm2PZDyGiXmJFZO1O/NGecVTvVynKsDT6XhEvzyEtjXqD98rrhbeMHTcmNSwwJMDvm9ws0075sLQyq2EYFG6ECWFypdA/jfumTmxOTkMtuy/V1Gyq7YJ8YaksZ7fXNY9VuJFP72grmlXc6Dvpr4=
126 740156eedf2c450aee58b1a90b0e826f47c5da64 0 iQIVAwUAVxLGMCBXgaxoKi1yAQLhIg/8DDX+sCz7LmqO47/FfTo+OqGR+bTTqpfK3WebitL0Z6hbXPj7s45jijqIFGqKgMPqS5oom1xeuGTPHdYA0NNoc/mxSCuNLfuXYolpNWPN71HeSDRV9SnhMThG5HSxI+P0Ye4rbsCHrVV+ib1rV81QE2kZ9aZsJd0HnGd512xJ+2ML7AXweM/4lcLmMthN+oi/dv1OGLzfckrcr/fEATCLZt55eO7idx11J1Fk4ptQ6dQ/bKznlD4hneyy1HMPsGxw+bCXrMF2C/nUiRLHdKgGqZ+cDq6loQRfFlQoIhfoEnWC424qbjH4rvHgkZHqC59Oi/ti9Hi75oq9Tb79yzlCY/fGsdrlJpEzrTQdHFMHUoO9CC+JYObXHRo3ALnC5350ZBKxlkdpmucrHTgcDabfhRlx9vDxP4RDopm2hAjk2LJH7bdxnGEyZYkTOZ3hXKnVpt2hUQb4jyzzC9Kl47TFpPKNVKI+NLqRRZAIdXXiy24KD7WzzE6L0NNK0/IeqKBENLL8I1PmDQ6XmYTQVhTuad1jjm2PZDyGiXmJFZO1O/NGecVTvVynKsDT6XhEvzyEtjXqD98rrhbeMHTcmNSwwJMDvm9ws0075sLQyq2EYFG6ECWFypdA/jfumTmxOTkMtuy/V1Gyq7YJ8YaksZ7fXNY9VuJFP72grmlXc6Dvpr4=
127 f85de28eae32e7d3064b1a1321309071bbaaa069 0 iQIVAwUAVyZQaiBXgaxoKi1yAQJhCQ//WrRZ55k3VI/OgY+I/HvgFHOC0sbhe207Kedxvy00a3AtXM6wa5E95GNX04QxUfTWUf5ZHDfEgj0/mQywNrH1oJG47iPZSs+qXNLqtgAaXtrih6r4/ruUwFCRFxqK9mkhjG61SKicw3Q7uGva950g6ZUE5BsZ7XJWgoDcJzWKR+AH992G6H//Fhi4zFQAmB34++sm80wV6wMxVKA/qhQzetooTR2x9qrHpvCKMzKllleJe48yzPLJjQoaaVgXCDav0eIePFNw0WvVSldOEp/ADDdTGa65qsC1rO2BB1Cu5+frJ/vUoo0PwIgqgD6p2i41hfIKvkp6130TxmRVxUx+ma8gBYEpPIabV0flLU72gq8lMlGBBSnQ+fcZsfs/Ug0xRN0tzkEScmZFiDxRGk0y7IalXzv6irwOyC2fZCajXGJDzkROQXWMgy9eKkwuFhZBmPVYtrATSq3jHLVmJg5vfdeiVzA6NKxAgGm2z8AsRrijKK8WRqFYiH6xcWKG5u+FroPQdKa0nGCkPSTH3tvC6fAHTVm7JeXch5QE/LiS9Y575pM2PeIP+k+Fr1ugK0AEvYJAXa5UIIcdszPyI+TwPTtWaQ83X99qGAdmRWLvSYjqevOVr7F/fhO3XKFXRCcHA3EzVYnG7nWiVACYF3H2UgN4PWjStbx/Qhhdi9xAuks=
127 f85de28eae32e7d3064b1a1321309071bbaaa069 0 iQIVAwUAVyZQaiBXgaxoKi1yAQJhCQ//WrRZ55k3VI/OgY+I/HvgFHOC0sbhe207Kedxvy00a3AtXM6wa5E95GNX04QxUfTWUf5ZHDfEgj0/mQywNrH1oJG47iPZSs+qXNLqtgAaXtrih6r4/ruUwFCRFxqK9mkhjG61SKicw3Q7uGva950g6ZUE5BsZ7XJWgoDcJzWKR+AH992G6H//Fhi4zFQAmB34++sm80wV6wMxVKA/qhQzetooTR2x9qrHpvCKMzKllleJe48yzPLJjQoaaVgXCDav0eIePFNw0WvVSldOEp/ADDdTGa65qsC1rO2BB1Cu5+frJ/vUoo0PwIgqgD6p2i41hfIKvkp6130TxmRVxUx+ma8gBYEpPIabV0flLU72gq8lMlGBBSnQ+fcZsfs/Ug0xRN0tzkEScmZFiDxRGk0y7IalXzv6irwOyC2fZCajXGJDzkROQXWMgy9eKkwuFhZBmPVYtrATSq3jHLVmJg5vfdeiVzA6NKxAgGm2z8AsRrijKK8WRqFYiH6xcWKG5u+FroPQdKa0nGCkPSTH3tvC6fAHTVm7JeXch5QE/LiS9Y575pM2PeIP+k+Fr1ugK0AEvYJAXa5UIIcdszPyI+TwPTtWaQ83X99qGAdmRWLvSYjqevOVr7F/fhO3XKFXRCcHA3EzVYnG7nWiVACYF3H2UgN4PWjStbx/Qhhdi9xAuks=
128 a56296f55a5e1038ea5016dace2076b693c28a56 0 iQIVAwUAVyZarCBXgaxoKi1yAQL87g/8D7whM3e08HVGDHHEkVUgqLIfueVy1mx0AkRvelmZmwaocFNGpZTd3AjSwy6qXbRNZFXrWU85JJvQCi3PSo/8bK43kwqLJ4lv+Hv2zVTvz30vbLWTSndH3oVRu38lIA7b5K9J4y50pMCwjKLG9iyp+aQG4RBz76fJMlhXy0gu38A8JZVKEeAnQCbtzxKXBzsC8k0/ku/bEQEoo9D4AAGlVTbl5AsHMp3Z6NWu7kEHAX/52/VKU2I0LxYqRxoL1tjTVGkAQfkOHz1gOhLXUgGSYmA9Fb265AYj9cnGWCfyNonlE0Rrk2kAsrjBTGiLyb8WvK/TZmRo4ZpNukzenS9UuAOKxA22Kf9+oN9kKBu1HnwqusYDH9pto1WInCZKV1al7DMBXbGFcnyTXk2xuiTGhVRG5LzCO2QMByBLXiYl77WqqJnzxK3v5lAc/immJl5qa3ATUlTnVBjAs+6cbsbCoY6sjXCT0ClndA9+iZZ1TjPnmLrSeFh5AoE8WHmnFV6oqGN4caX6wiIW5vO+x5Q2ruSsDrwXosXIYzm+0KYKRq9O+MaTwR44Dvq3/RyeIu/cif/Nc7B8bR5Kf7OiRf2T5u97MYAomwGcQfXqgUfm6y7D3Yg+IdAdAJKitxhRPsqqdxIuteXMvOvwukXNDiWP1zsKoYLI37EcwzvbGLUlZvg=
128 a56296f55a5e1038ea5016dace2076b693c28a56 0 iQIVAwUAVyZarCBXgaxoKi1yAQL87g/8D7whM3e08HVGDHHEkVUgqLIfueVy1mx0AkRvelmZmwaocFNGpZTd3AjSwy6qXbRNZFXrWU85JJvQCi3PSo/8bK43kwqLJ4lv+Hv2zVTvz30vbLWTSndH3oVRu38lIA7b5K9J4y50pMCwjKLG9iyp+aQG4RBz76fJMlhXy0gu38A8JZVKEeAnQCbtzxKXBzsC8k0/ku/bEQEoo9D4AAGlVTbl5AsHMp3Z6NWu7kEHAX/52/VKU2I0LxYqRxoL1tjTVGkAQfkOHz1gOhLXUgGSYmA9Fb265AYj9cnGWCfyNonlE0Rrk2kAsrjBTGiLyb8WvK/TZmRo4ZpNukzenS9UuAOKxA22Kf9+oN9kKBu1HnwqusYDH9pto1WInCZKV1al7DMBXbGFcnyTXk2xuiTGhVRG5LzCO2QMByBLXiYl77WqqJnzxK3v5lAc/immJl5qa3ATUlTnVBjAs+6cbsbCoY6sjXCT0ClndA9+iZZ1TjPnmLrSeFh5AoE8WHmnFV6oqGN4caX6wiIW5vO+x5Q2ruSsDrwXosXIYzm+0KYKRq9O+MaTwR44Dvq3/RyeIu/cif/Nc7B8bR5Kf7OiRf2T5u97MYAomwGcQfXqgUfm6y7D3Yg+IdAdAJKitxhRPsqqdxIuteXMvOvwukXNDiWP1zsKoYLI37EcwzvbGLUlZvg=
129 aaabed77791a75968a12b8c43ad263631a23ee81 0 iQIVAwUAVzpH4CBXgaxoKi1yAQLm5A/9GUYv9CeIepjcdWSBAtNhCBJcqgk2cBcV0XaeQomfxqYWfbW2fze6eE+TrXPKTX1ajycgqquMyo3asQolhHXwasv8+5CQxowjGfyVg7N/kyyjgmJljI+rCi74VfnsEhvG/J4GNr8JLVQmSICfALqQjw7XN8doKthYhwOfIY2vY419613v4oeBQXSsItKC/tfKw9lYvlk4qJKDffJQFyAekgv43ovWqHNkl4LaR6ubtjOsxCnxHfr7OtpX3muM9MLT/obBax5I3EsmiDTQBOjbvI6TcLczs5tVCnTa1opQsPUcEmdA4WpUEiTnLl9lk9le/BIImfYfEP33oVYmubRlKhJYnUiu89ao9L+48FBoqCY88HqbjQI1GO6icfRJN/+NLVeE9wubltbWFETH6e2Q+Ex4+lkul1tQMLPcPt10suMHnEo3/FcOTPt6/DKeMpsYgckHSJq5KzTg632xifyySmb9qkpdGGpY9lRal6FHw3rAhRBqucMgxso4BwC51h04RImtCUQPoA3wpb4BvCHba/thpsUFnHefOvsu3ei4JyHXZK84LPwOj31PcucNFdGDTW6jvKrF1vVUIVS9uMJkJXPu0V4i/oEQSUKifJZivROlpvj1eHy3KeMtjq2kjGyXY2KdzxpT8wX/oYJhCtm1XWMui5f24XBjE6xOcjjm8k4=
129 aaabed77791a75968a12b8c43ad263631a23ee81 0 iQIVAwUAVzpH4CBXgaxoKi1yAQLm5A/9GUYv9CeIepjcdWSBAtNhCBJcqgk2cBcV0XaeQomfxqYWfbW2fze6eE+TrXPKTX1ajycgqquMyo3asQolhHXwasv8+5CQxowjGfyVg7N/kyyjgmJljI+rCi74VfnsEhvG/J4GNr8JLVQmSICfALqQjw7XN8doKthYhwOfIY2vY419613v4oeBQXSsItKC/tfKw9lYvlk4qJKDffJQFyAekgv43ovWqHNkl4LaR6ubtjOsxCnxHfr7OtpX3muM9MLT/obBax5I3EsmiDTQBOjbvI6TcLczs5tVCnTa1opQsPUcEmdA4WpUEiTnLl9lk9le/BIImfYfEP33oVYmubRlKhJYnUiu89ao9L+48FBoqCY88HqbjQI1GO6icfRJN/+NLVeE9wubltbWFETH6e2Q+Ex4+lkul1tQMLPcPt10suMHnEo3/FcOTPt6/DKeMpsYgckHSJq5KzTg632xifyySmb9qkpdGGpY9lRal6FHw3rAhRBqucMgxso4BwC51h04RImtCUQPoA3wpb4BvCHba/thpsUFnHefOvsu3ei4JyHXZK84LPwOj31PcucNFdGDTW6jvKrF1vVUIVS9uMJkJXPu0V4i/oEQSUKifJZivROlpvj1eHy3KeMtjq2kjGyXY2KdzxpT8wX/oYJhCtm1XWMui5f24XBjE6xOcjjm8k4=
130 a9764ab80e11bcf6a37255db7dd079011f767c6c 0 iQIVAwUAV09KHyBXgaxoKi1yAQJBWg/+OywRrqU+zvnL1tHJ95PgatsF7S4ZAHZFR098+oCjUDtKpvnm71o2TKiY4D5cckyD2KNwLWg/qW6V+5+2EYU0Y/ViwPVcngib/ZeJP+Nr44TK3YZMRmfFuUEEzA7sZ2r2Gm8eswv//W79I0hXJeFd/o6FgLnn7AbOjcOn3IhWdGAP6jUHv9zyJigQv6K9wgyvAnK1RQE+2CgMcoyeqao/zs23IPXI6XUHOwfrQ7XrQ83+ciMqN7XNRx+TKsUQoYeUew4AanoDSMPAQ4kIudsP5tOgKeLRPmHX9zg6Y5S1nTpLRNdyAxuNuyZtkQxDYcG5Hft/SIx27tZUo3gywHL2U+9RYD2nvXqaWzT3sYB2sPBOiq7kjHRgvothkXemAFsbq2nKFrN0PRua9WG4l3ny0xYmDFPlJ/s0E9XhmQaqy+uXtVbA2XdLEvE6pQ0YWbHEKMniW26w6LJkx4IV6RX/7Kpq7byw/bW65tu/BzgISKau5FYLY4CqZJH7f8QBg3XWpzB91AR494tdsD+ugM45wrY/6awGQx9CY5SAzGqTyFuSFQxgB2rBurb01seZPf8nqG8V13UYXfX/O3/WMOBMr7U/RVqmAA0ZMYOyEwfVUmHqrFjkxpXX+JdNKRiA1GJp5sdRpCxSeXdQ/Ni6AAGZV2IyRb4G4Y++1vP4yPBalas=
130 a9764ab80e11bcf6a37255db7dd079011f767c6c 0 iQIVAwUAV09KHyBXgaxoKi1yAQJBWg/+OywRrqU+zvnL1tHJ95PgatsF7S4ZAHZFR098+oCjUDtKpvnm71o2TKiY4D5cckyD2KNwLWg/qW6V+5+2EYU0Y/ViwPVcngib/ZeJP+Nr44TK3YZMRmfFuUEEzA7sZ2r2Gm8eswv//W79I0hXJeFd/o6FgLnn7AbOjcOn3IhWdGAP6jUHv9zyJigQv6K9wgyvAnK1RQE+2CgMcoyeqao/zs23IPXI6XUHOwfrQ7XrQ83+ciMqN7XNRx+TKsUQoYeUew4AanoDSMPAQ4kIudsP5tOgKeLRPmHX9zg6Y5S1nTpLRNdyAxuNuyZtkQxDYcG5Hft/SIx27tZUo3gywHL2U+9RYD2nvXqaWzT3sYB2sPBOiq7kjHRgvothkXemAFsbq2nKFrN0PRua9WG4l3ny0xYmDFPlJ/s0E9XhmQaqy+uXtVbA2XdLEvE6pQ0YWbHEKMniW26w6LJkx4IV6RX/7Kpq7byw/bW65tu/BzgISKau5FYLY4CqZJH7f8QBg3XWpzB91AR494tdsD+ugM45wrY/6awGQx9CY5SAzGqTyFuSFQxgB2rBurb01seZPf8nqG8V13UYXfX/O3/WMOBMr7U/RVqmAA0ZMYOyEwfVUmHqrFjkxpXX+JdNKRiA1GJp5sdRpCxSeXdQ/Ni6AAGZV2IyRb4G4Y++1vP4yPBalas=
131 26a5d605b8683a292bb89aea11f37a81b06ac016 0 iQIVAwUAV3bOsSBXgaxoKi1yAQLiDg//fxmcNpTUedsXqEwNdGFJsJ2E25OANgyv1saZHNfbYFWXIR8g4nyjNaj2SjtXF0wzOq5aHlMWXjMZPOT6pQBdTnOYDdgv+O8DGpgHs5x/f+uuxtpVkdxR6uRP0/ImlTEtDix8VQiN3nTu5A0N3C7E2y+D1JIIyTp6vyjzxvGQTY0MD/qgB55Dn6khx8c3phDtMkzmVEwL4ItJxVRVNw1m+2FOXHu++hJEruJdeMV0CKOV6LVbXHho+yt3jQDKhlIgJ65EPLKrf+yRalQtSWpu7y/vUMcEUde9XeQ5x05ebCiI4MkJ0ULQro/Bdx9vBHkAstUC7D+L5y45ZnhHjOwxz9c3GQMZQt1HuyORqbBhf9hvOkUQ2GhlDHc5U04nBe0VhEoCw9ra54n+AgUyqWr4CWimSW6pMTdquCzAAbcJWgdNMwDHrMalCYHhJksKFARKq3uSTR1Noz7sOCSIEQvOozawKSQfOwGxn/5bNepKh4uIRelC1uEDoqculqCLgAruzcMNIMndNVYaJ09IohJzA9jVApa+SZVPAeREg71lnS3d8jaWh1Lu5JFlAAKQeKGVJmNm40Y3HBjtHQDrI67TT59oDAhjo420Wf9VFCaj2k0weYBLWSeJhfUZ5x3PVpAHUvP/rnHPwNYyY0wVoQEvM/bnQdcpICmKhqcK+vKjDrM=
131 26a5d605b8683a292bb89aea11f37a81b06ac016 0 iQIVAwUAV3bOsSBXgaxoKi1yAQLiDg//fxmcNpTUedsXqEwNdGFJsJ2E25OANgyv1saZHNfbYFWXIR8g4nyjNaj2SjtXF0wzOq5aHlMWXjMZPOT6pQBdTnOYDdgv+O8DGpgHs5x/f+uuxtpVkdxR6uRP0/ImlTEtDix8VQiN3nTu5A0N3C7E2y+D1JIIyTp6vyjzxvGQTY0MD/qgB55Dn6khx8c3phDtMkzmVEwL4ItJxVRVNw1m+2FOXHu++hJEruJdeMV0CKOV6LVbXHho+yt3jQDKhlIgJ65EPLKrf+yRalQtSWpu7y/vUMcEUde9XeQ5x05ebCiI4MkJ0ULQro/Bdx9vBHkAstUC7D+L5y45ZnhHjOwxz9c3GQMZQt1HuyORqbBhf9hvOkUQ2GhlDHc5U04nBe0VhEoCw9ra54n+AgUyqWr4CWimSW6pMTdquCzAAbcJWgdNMwDHrMalCYHhJksKFARKq3uSTR1Noz7sOCSIEQvOozawKSQfOwGxn/5bNepKh4uIRelC1uEDoqculqCLgAruzcMNIMndNVYaJ09IohJzA9jVApa+SZVPAeREg71lnS3d8jaWh1Lu5JFlAAKQeKGVJmNm40Y3HBjtHQDrI67TT59oDAhjo420Wf9VFCaj2k0weYBLWSeJhfUZ5x3PVpAHUvP/rnHPwNYyY0wVoQEvM/bnQdcpICmKhqcK+vKjDrM=
132 519bb4f9d3a47a6e83c2b414d58811ed38f503c2 0 iQIVAwUAV42tNyBXgaxoKi1yAQI/Iw//V0NtxpVD4sClotAwffBVW42Uv+SG+07CJoOuFYnmHZv/plOzXuuJlmm95L00/qyRCCTUyAGxK/eP5cAKP2V99ln6rNhh8gpgvmZlnYjU3gqFv8tCQ+fkwgRiWmgKjRL6/bK9FY5cO7ATLVu3kCkFd8CEgzlAaUqBfkNFxZxLDLvKqRlhXxVXhKjvkKg5DZ6eJqRQY7w3UqqR+sF1rMLtVyt490Wqv7YQKwcvY7MEKTyH4twGLx/RhBpBi+GccVKvWC011ffjSjxqAfQqrrSVt0Ld1Khj2/p1bDDYpTgtdDgCzclSXWEQpmSdFRBF5wYs/pDMUreI/E6mlWkB4hfZZk1NBRPRWYikXwnhU3ziubCGesZDyBYLrK1vT+tf6giseo22YQmDnOftbS999Pcn04cyCafeFuOjkubYaINB25T20GS5Wb4a0nHPRAOOVxzk/m/arwYgF0ZZZDDvJ48TRMDf3XOc1jc5qZ7AN/OQKbvh2B08vObnnPm3lmBY1qOnhwzJxpNiq+Z/ypokGXQkGBfKUo7rWHJy5iXLb3Biv9AhxY9d5pSTjBmTAYJEic3q03ztzlnfMyi+C13+YxFAbSSNGBP8Hejkkz0NvmB1TBuCKpnZA8spxY5rhZ/zMx+cCw8hQvWHHDUURps7SQvZEfrJSCGJFPDHL3vbfK+LNwI=
132 519bb4f9d3a47a6e83c2b414d58811ed38f503c2 0 iQIVAwUAV42tNyBXgaxoKi1yAQI/Iw//V0NtxpVD4sClotAwffBVW42Uv+SG+07CJoOuFYnmHZv/plOzXuuJlmm95L00/qyRCCTUyAGxK/eP5cAKP2V99ln6rNhh8gpgvmZlnYjU3gqFv8tCQ+fkwgRiWmgKjRL6/bK9FY5cO7ATLVu3kCkFd8CEgzlAaUqBfkNFxZxLDLvKqRlhXxVXhKjvkKg5DZ6eJqRQY7w3UqqR+sF1rMLtVyt490Wqv7YQKwcvY7MEKTyH4twGLx/RhBpBi+GccVKvWC011ffjSjxqAfQqrrSVt0Ld1Khj2/p1bDDYpTgtdDgCzclSXWEQpmSdFRBF5wYs/pDMUreI/E6mlWkB4hfZZk1NBRPRWYikXwnhU3ziubCGesZDyBYLrK1vT+tf6giseo22YQmDnOftbS999Pcn04cyCafeFuOjkubYaINB25T20GS5Wb4a0nHPRAOOVxzk/m/arwYgF0ZZZDDvJ48TRMDf3XOc1jc5qZ7AN/OQKbvh2B08vObnnPm3lmBY1qOnhwzJxpNiq+Z/ypokGXQkGBfKUo7rWHJy5iXLb3Biv9AhxY9d5pSTjBmTAYJEic3q03ztzlnfMyi+C13+YxFAbSSNGBP8Hejkkz0NvmB1TBuCKpnZA8spxY5rhZ/zMx+cCw8hQvWHHDUURps7SQvZEfrJSCGJFPDHL3vbfK+LNwI=
133 299546f84e68dbb9bd026f0f3a974ce4bdb93686 0 iQIcBAABCAAGBQJXn3rFAAoJELnJ3IJKpb3VmZoQAK0cdOfi/OURglnN0vYYGwdvSXTPpZauPEYEpwML3dW1j6HRnl5L+H8D8vlYzahK95X4+NNBhqtyyB6wmIVI0NkYfXfd6ACntJE/EnTdLIHIP2NAAoVsggIjiNr26ubRegaD5ya63Ofxz+Yq5iRsUUfHet7o+CyFhExyzdu+Vcz1/E9GztxNfTDVpC/mf+RMLwQTfHOhoTVbaamLCmGAIjw39w72X+vRMJoYNF44te6PvsfI67+6uuC0+9DjMnp5eL/hquSQ1qfks71rnWwxuiPcUDZloIueowVmt0z0sO4loSP1nZ5IP/6ZOoAzSjspqsxeay9sKP0kzSYLGsmCi29otyVSnXiKtyMCW5z5iM6k8XQcMi5mWy9RcpqlNYD7RUTn3g0+a8u7F6UEtske3/qoweJLPhtTmBNOfDNw4JXwOBSZea0QnIIjCeCc4ZGqfojPpbvcA4rkRpxI23YoMrT2v/kp4wgwrqK9fi8ctt8WbXpmGoAQDXWj2bWcuzj94HsAhLduFKv6sxoDz871hqjmjjnjQSU7TSNNnVzdzwqYkMB+BvhcNYxk6lcx3Aif3AayGdrWDubtU/ZRNoLzBwe6gm0udRMXBj4D/60GD6TIkYeL7HjJwfBb6Bf7qvQ6y7g0zbYG9uwBmMeduU7XchErGqQGSEyyJH3DG9OLaFOj
133 299546f84e68dbb9bd026f0f3a974ce4bdb93686 0 iQIcBAABCAAGBQJXn3rFAAoJELnJ3IJKpb3VmZoQAK0cdOfi/OURglnN0vYYGwdvSXTPpZauPEYEpwML3dW1j6HRnl5L+H8D8vlYzahK95X4+NNBhqtyyB6wmIVI0NkYfXfd6ACntJE/EnTdLIHIP2NAAoVsggIjiNr26ubRegaD5ya63Ofxz+Yq5iRsUUfHet7o+CyFhExyzdu+Vcz1/E9GztxNfTDVpC/mf+RMLwQTfHOhoTVbaamLCmGAIjw39w72X+vRMJoYNF44te6PvsfI67+6uuC0+9DjMnp5eL/hquSQ1qfks71rnWwxuiPcUDZloIueowVmt0z0sO4loSP1nZ5IP/6ZOoAzSjspqsxeay9sKP0kzSYLGsmCi29otyVSnXiKtyMCW5z5iM6k8XQcMi5mWy9RcpqlNYD7RUTn3g0+a8u7F6UEtske3/qoweJLPhtTmBNOfDNw4JXwOBSZea0QnIIjCeCc4ZGqfojPpbvcA4rkRpxI23YoMrT2v/kp4wgwrqK9fi8ctt8WbXpmGoAQDXWj2bWcuzj94HsAhLduFKv6sxoDz871hqjmjjnjQSU7TSNNnVzdzwqYkMB+BvhcNYxk6lcx3Aif3AayGdrWDubtU/ZRNoLzBwe6gm0udRMXBj4D/60GD6TIkYeL7HjJwfBb6Bf7qvQ6y7g0zbYG9uwBmMeduU7XchErGqQGSEyyJH3DG9OLaFOj
134 ccd436f7db6d5d7b9af89715179b911d031d44f1 0 iQIVAwUAV8h7F0emf/qjRqrOAQjmdhAAgYhom8fzL/YHeVLddm71ZB+pKDviKASKGSrBHY4D5Szrh/pYTedmG9IptYue5vzXpspHAaGvZN5xkwrz1/5nmnCsLA8DFaYT9qCkize6EYzxSBtA/W1S9Mv5tObinr1EX9rCSyI4HEJYE8i1IQM5h07SqUsMKDoasd4e29t6gRWg5pfOYq1kc2MTck35W9ff1Fii8S28dqbO3cLU6g5K0pT0JLCZIq7hyTNQdxHAYfebxkVl7PZrZR383IrnyotXVKFFc44qinv94T50uR4yUNYPQ8Gu0TgoGQQjBjk1Lrxot2xpgPQAy8vx+EOJgpg/yNZnYkmJZMxjDkTGVrwvXtOXZzmy2jti7PniET9hUBCU7aNHnoJJLzIf+Vb1CIRP0ypJl8GYCZx6HIYwOQH6EtcaeUqq3r+WXWv74ijIE7OApotmutM9buTvdOLdZddBzFPIjykc6cXO+W4E0kl6u9/OHtaZ3Nynh0ejBRafRWAVw2yU3T9SgQyICsmYWJCThkj14WqCJr2b7jfGlg9MkQOUG6/3f4xz2R3SgyUD8KiGsq/vdBE53zh0YA9gppLoum6AY+z61G1NhVGlrtps90txZBehuARUUz2dJC0pBMRy8XFwXMewDSIe6ATg25pHZsxHfhcalBpJncBl8pORs7oQl+GKBVxlnV4jm1pCzLU=
134 ccd436f7db6d5d7b9af89715179b911d031d44f1 0 iQIVAwUAV8h7F0emf/qjRqrOAQjmdhAAgYhom8fzL/YHeVLddm71ZB+pKDviKASKGSrBHY4D5Szrh/pYTedmG9IptYue5vzXpspHAaGvZN5xkwrz1/5nmnCsLA8DFaYT9qCkize6EYzxSBtA/W1S9Mv5tObinr1EX9rCSyI4HEJYE8i1IQM5h07SqUsMKDoasd4e29t6gRWg5pfOYq1kc2MTck35W9ff1Fii8S28dqbO3cLU6g5K0pT0JLCZIq7hyTNQdxHAYfebxkVl7PZrZR383IrnyotXVKFFc44qinv94T50uR4yUNYPQ8Gu0TgoGQQjBjk1Lrxot2xpgPQAy8vx+EOJgpg/yNZnYkmJZMxjDkTGVrwvXtOXZzmy2jti7PniET9hUBCU7aNHnoJJLzIf+Vb1CIRP0ypJl8GYCZx6HIYwOQH6EtcaeUqq3r+WXWv74ijIE7OApotmutM9buTvdOLdZddBzFPIjykc6cXO+W4E0kl6u9/OHtaZ3Nynh0ejBRafRWAVw2yU3T9SgQyICsmYWJCThkj14WqCJr2b7jfGlg9MkQOUG6/3f4xz2R3SgyUD8KiGsq/vdBE53zh0YA9gppLoum6AY+z61G1NhVGlrtps90txZBehuARUUz2dJC0pBMRy8XFwXMewDSIe6ATg25pHZsxHfhcalBpJncBl8pORs7oQl+GKBVxlnV4jm1pCzLU=
135 149433e68974eb5c63ccb03f794d8b57339a80c4 0 iQIcBAABAgAGBQJX8AfCAAoJELnJ3IJKpb3VnNAP/3umS8tohcZTr4m6DJm9u4XGr2m3FWQmjTEfimGpsOuBC8oCgsq0eAlORYcV68zDax+vQHQu3pqfPXaX+y4ZFDuz0ForNRiPJn+Q+tj1+NrOT1e8h4gH0nSK4rDxEGaa6x01fyC/xQMqN6iNfzbLLB7+WadZlyBRbHaUeZFDlPxPDf1rjDpu1vqwtOrVzSxMasRGEceiUegwsFdFMAefCq0ya/pKe9oV+GgGfR4qNrP7BfpOBcN/Po/ctkFCbLOhHbu6M7HpBSiD57BUy5lfhQQtSjzCKEVTyrWEH0ApjjXKuJzLSyq7xsHKQSOPMgGQprGehyzdCETlZOdauGrC0t9vBCr7kXEhXtycqxBC03vknA2eNeV610VX+HgO9VpCVZWHtENiArhALCcpoEsJvT29xCBYpSii/wnTpYJFT9yW8tjQCxH0zrmEZJvO1/nMINEBQFScB/nzUELn9asnghNf6vMpSGy0fSM27j87VAXCzJ5lqa6WCL/RrKgvYflow/m5AzUfMQhpqpH1vmh4ba1zZ4123lgnW4pNZDV9kmwXrEagGbWe1rnmsMzHugsECiYQyIngjWzHfpHgyEr49Uc5bMM1MlTypeHYYL4kV1jJ8Ou0SC4aV+49p8Onmb2NlVY7JKV7hqDCuZPI164YXMxhPNst4XK0/ENhoOE+8iB6
135 149433e68974eb5c63ccb03f794d8b57339a80c4 0 iQIcBAABAgAGBQJX8AfCAAoJELnJ3IJKpb3VnNAP/3umS8tohcZTr4m6DJm9u4XGr2m3FWQmjTEfimGpsOuBC8oCgsq0eAlORYcV68zDax+vQHQu3pqfPXaX+y4ZFDuz0ForNRiPJn+Q+tj1+NrOT1e8h4gH0nSK4rDxEGaa6x01fyC/xQMqN6iNfzbLLB7+WadZlyBRbHaUeZFDlPxPDf1rjDpu1vqwtOrVzSxMasRGEceiUegwsFdFMAefCq0ya/pKe9oV+GgGfR4qNrP7BfpOBcN/Po/ctkFCbLOhHbu6M7HpBSiD57BUy5lfhQQtSjzCKEVTyrWEH0ApjjXKuJzLSyq7xsHKQSOPMgGQprGehyzdCETlZOdauGrC0t9vBCr7kXEhXtycqxBC03vknA2eNeV610VX+HgO9VpCVZWHtENiArhALCcpoEsJvT29xCBYpSii/wnTpYJFT9yW8tjQCxH0zrmEZJvO1/nMINEBQFScB/nzUELn9asnghNf6vMpSGy0fSM27j87VAXCzJ5lqa6WCL/RrKgvYflow/m5AzUfMQhpqpH1vmh4ba1zZ4123lgnW4pNZDV9kmwXrEagGbWe1rnmsMzHugsECiYQyIngjWzHfpHgyEr49Uc5bMM1MlTypeHYYL4kV1jJ8Ou0SC4aV+49p8Onmb2NlVY7JKV7hqDCuZPI164YXMxhPNst4XK0/ENhoOE+8iB6
136 438173c415874f6ac653efc1099dec9c9150e90f 0 iQIVAwUAWAZ3okemf/qjRqrOAQj89xAAw/6QZ07yqvH+aZHeGQfgJ/X1Nze/hSMzkqbwGkuUOWD5ztN8+c39EXCn8JlqyLUPD7uGzhTV0299k5fGRihLIseXr0hy/cvVW16uqfeKJ/4/qL9zLS3rwSAgWbaHd1s6UQZVfGCb8V6oC1dkJxfrE9h6kugBqV97wStIRxmCpMDjsFv/zdNwsv6eEdxbiMilLn2/IbWXFOVKJzzv9iEY5Pu5McFR+nnrMyUZQhyGtVPLSkoEPsOysorfCZaVLJ6MnVaJunp9XEv94Pqx9+k+shsQvJHWkc0Nnb6uDHZYkLR5v2AbFsbJ9jDHsdr9A7qeQTiZay7PGI0uPoIrkmLya3cYbU1ADhwloAeQ/3gZLaJaKEjrXcFSsz7AZ9yq74rTwiPulF8uqZxJUodk2m/zy83HBrxxp/vgxWJ5JP2WXPtB8qKY+05umAt4rQS+fd2H/xOu2V2d5Mq1WmgknLBLC0ItaNaf91sSHtgEy22GtcvWQE7S6VWU1PoSYmOLITdJKAsmb7Eq+yKDW9nt0lOpUu2wUhBGctlgXgcWOmJP6gL6edIg66czAkVBp/fpKNl8Z/A0hhpuH7nW7GW/mzLVQnc+JW4wqUVkwlur3NRfvSt5ZyTY/SaR++nRf62h7PHIjU+f0kWQRdCcEQ0X38b8iAjeXcsOW8NCOPpm0zcz3i8=
136 438173c415874f6ac653efc1099dec9c9150e90f 0 iQIVAwUAWAZ3okemf/qjRqrOAQj89xAAw/6QZ07yqvH+aZHeGQfgJ/X1Nze/hSMzkqbwGkuUOWD5ztN8+c39EXCn8JlqyLUPD7uGzhTV0299k5fGRihLIseXr0hy/cvVW16uqfeKJ/4/qL9zLS3rwSAgWbaHd1s6UQZVfGCb8V6oC1dkJxfrE9h6kugBqV97wStIRxmCpMDjsFv/zdNwsv6eEdxbiMilLn2/IbWXFOVKJzzv9iEY5Pu5McFR+nnrMyUZQhyGtVPLSkoEPsOysorfCZaVLJ6MnVaJunp9XEv94Pqx9+k+shsQvJHWkc0Nnb6uDHZYkLR5v2AbFsbJ9jDHsdr9A7qeQTiZay7PGI0uPoIrkmLya3cYbU1ADhwloAeQ/3gZLaJaKEjrXcFSsz7AZ9yq74rTwiPulF8uqZxJUodk2m/zy83HBrxxp/vgxWJ5JP2WXPtB8qKY+05umAt4rQS+fd2H/xOu2V2d5Mq1WmgknLBLC0ItaNaf91sSHtgEy22GtcvWQE7S6VWU1PoSYmOLITdJKAsmb7Eq+yKDW9nt0lOpUu2wUhBGctlgXgcWOmJP6gL6edIg66czAkVBp/fpKNl8Z/A0hhpuH7nW7GW/mzLVQnc+JW4wqUVkwlur3NRfvSt5ZyTY/SaR++nRf62h7PHIjU+f0kWQRdCcEQ0X38b8iAjeXcsOW8NCOPpm0zcz3i8=
137 eab27446995210c334c3d06f1a659e3b9b5da769 0 iQIcBAABCAAGBQJYGNsXAAoJELnJ3IJKpb3Vf30QAK/dq5vEHEkufLGiYxxkvIyiRaswS+8jamXeHMQrdK8CuokcQYhEv9xiUI6FMIoX4Zc0xfoFCBc+X4qE+Ed9SFYWgQkDs/roJq1C1mTYA+KANMqJkDt00QZq536snFQvjCXAA5fwR/DpgGOOuGMRfvbjh7x8mPyVoPr4HDQCGFXnTYdn193HpTOqUsipzIV5OJqQ9p0sfJjwKP4ZfD0tqqdjTkNwMyJuwuRaReXFvGGCjH2PqkZE/FwQG0NJJjt0xaMUmv5U5tXHC9tEVobVV/qEslqfbH2v1YPF5d8Jmdn7F76FU5J0nTd+3rIVjYGYSt01cR6wtGnzvr/7kw9kbChw4wYhXxnmIALSd48FpA1qWjlPcAdHfUUwObxOxfqmlnBGtAQFK+p5VXCsxDZEIT9MSxscfCjyDQZpkY5S5B3PFIRg6V9bdl5a4rEt27aucuKTHj1Ok2vip4WfaIKk28YMjjzuOQRbr6Pp7mJcCC1/ERHUJdLsaQP+dy18z6XbDjX3O2JDRNYbCBexQyV/Kfrt5EOS5fXiByQUHv+PyR+9Ju6QWkkcFBfgsxq25kFl+eos4V9lxPOY5jDpw2BWu9TyHtTWkjL/YxDUGwUO9WA/WzrcT4skr9FYrFV/oEgi8MkwydC0cFICDfd6tr9upqkkr1W025Im1UBXXJ89bTVj
137 eab27446995210c334c3d06f1a659e3b9b5da769 0 iQIcBAABCAAGBQJYGNsXAAoJELnJ3IJKpb3Vf30QAK/dq5vEHEkufLGiYxxkvIyiRaswS+8jamXeHMQrdK8CuokcQYhEv9xiUI6FMIoX4Zc0xfoFCBc+X4qE+Ed9SFYWgQkDs/roJq1C1mTYA+KANMqJkDt00QZq536snFQvjCXAA5fwR/DpgGOOuGMRfvbjh7x8mPyVoPr4HDQCGFXnTYdn193HpTOqUsipzIV5OJqQ9p0sfJjwKP4ZfD0tqqdjTkNwMyJuwuRaReXFvGGCjH2PqkZE/FwQG0NJJjt0xaMUmv5U5tXHC9tEVobVV/qEslqfbH2v1YPF5d8Jmdn7F76FU5J0nTd+3rIVjYGYSt01cR6wtGnzvr/7kw9kbChw4wYhXxnmIALSd48FpA1qWjlPcAdHfUUwObxOxfqmlnBGtAQFK+p5VXCsxDZEIT9MSxscfCjyDQZpkY5S5B3PFIRg6V9bdl5a4rEt27aucuKTHj1Ok2vip4WfaIKk28YMjjzuOQRbr6Pp7mJcCC1/ERHUJdLsaQP+dy18z6XbDjX3O2JDRNYbCBexQyV/Kfrt5EOS5fXiByQUHv+PyR+9Ju6QWkkcFBfgsxq25kFl+eos4V9lxPOY5jDpw2BWu9TyHtTWkjL/YxDUGwUO9WA/WzrcT4skr9FYrFV/oEgi8MkwydC0cFICDfd6tr9upqkkr1W025Im1UBXXJ89bTVj
138 b3b1ae98f6a0e14c1e1ba806a6c18e193b6dae5c 0 iQIVAwUAWECEaEemf/qjRqrOAQjuZw/+IWJKnKOsaUMcB9ly3Fo/eskqDL6A0j69IXTJDeBDGMoyGbQU/gZyX2yc6Sw3EhwTSCXu5vKpzg3a6e8MNrC1iHqli4wJ/jPY7XtmiqTYDixdsBLNk46VfOi73ooFe08wVDSNB65xpZsrtPDSioNmQ2kSJwSHb71UlauS4xGkM74vuDpWvX5OZRSfBqMh6NjG5RwBBnS8mzA0SW2dCI2jSc5SCGIzIZpzM0xUN21xzq0YQbrk9qEsmi7ks0eowdhUjeET2wSWwhOK4jS4IfMyRO7KueUB05yHs4mChj9kNFNWtSzXKwKBQbZzwO/1Y7IJjU+AsbWkiUu+6ipqBPQWzS28gCwGOrv5BcIJS+tzsvLUKWgcixyfy5UAqJ32gCdzKC54FUpT2zL6Ad0vXGM6WkpZA7yworN4RCFPexXbi0x2GSTLG8PyIoZ4Iwgtj5NtsEDHrz0380FxgnKUIC3ny2SVuPlyD+9wepD3QYcxdRk1BIzcFT9ZxNlgil3IXRVPwVejvQ/zr6/ILdhBnZ8ojjvVCy3b86B1OhZj/ZByYo5QaykVqWl0V9vJOZlZfvOpm2HiDhm/2uNrVWxG4O6EwhnekAdaJYmeLq1YbhIfGA6KVOaB9Yi5A5BxK9QGXBZ6sLj+dIUD3QR47r9yAqVQE8Gr/Oh6oQXBQqOQv7WzBBs=
138 b3b1ae98f6a0e14c1e1ba806a6c18e193b6dae5c 0 iQIVAwUAWECEaEemf/qjRqrOAQjuZw/+IWJKnKOsaUMcB9ly3Fo/eskqDL6A0j69IXTJDeBDGMoyGbQU/gZyX2yc6Sw3EhwTSCXu5vKpzg3a6e8MNrC1iHqli4wJ/jPY7XtmiqTYDixdsBLNk46VfOi73ooFe08wVDSNB65xpZsrtPDSioNmQ2kSJwSHb71UlauS4xGkM74vuDpWvX5OZRSfBqMh6NjG5RwBBnS8mzA0SW2dCI2jSc5SCGIzIZpzM0xUN21xzq0YQbrk9qEsmi7ks0eowdhUjeET2wSWwhOK4jS4IfMyRO7KueUB05yHs4mChj9kNFNWtSzXKwKBQbZzwO/1Y7IJjU+AsbWkiUu+6ipqBPQWzS28gCwGOrv5BcIJS+tzsvLUKWgcixyfy5UAqJ32gCdzKC54FUpT2zL6Ad0vXGM6WkpZA7yworN4RCFPexXbi0x2GSTLG8PyIoZ4Iwgtj5NtsEDHrz0380FxgnKUIC3ny2SVuPlyD+9wepD3QYcxdRk1BIzcFT9ZxNlgil3IXRVPwVejvQ/zr6/ILdhBnZ8ojjvVCy3b86B1OhZj/ZByYo5QaykVqWl0V9vJOZlZfvOpm2HiDhm/2uNrVWxG4O6EwhnekAdaJYmeLq1YbhIfGA6KVOaB9Yi5A5BxK9QGXBZ6sLj+dIUD3QR47r9yAqVQE8Gr/Oh6oQXBQqOQv7WzBBs=
139 e69874dc1f4e142746ff3df91e678a09c6fc208c 0 iQIVAwUAWG0oGUemf/qjRqrOAQh3uhAAu4TN7jkkgH7Hxn8S1cB6Ru0x8MQutzzzpjShhsE/G7nzCxsZ5eWdJ5ItwXmKhunb7T0og54CGcTxfmdPtCI7AhhHh9/TM2Hv1EBcsXCiwjG8E+P6X1UJkijgTGjNWuCvEDOsQAvgywslECBNnXp2QA5I5UdCMeqDdTAb8ujvbD8I4pxUx1xXKY18DgQGJh13mRlfkEVnPxUi2n8emnwPLjbVVkVISkMFUkaOl8a4fOeZC1xzDpoQocoH2Q8DYa9RCPPSHHSYPNMWGCdNGN2CoAurcHWWvc7jNU28/tBhTazfFv8LYh63lLQ8SIIPZHJAOxo45ufMspzUfNgoD6y3vlF5aW7DpdxwYHnueh7S1Fxgtd9cOnxmxQsgiF4LK0a+VXOi/Tli/fivZHDRCGHJvJgsMQm7pzkay9sGohes6jAnsOv2E8DwFC71FO/btrAp07IRFxH9WhUeMsXLMS9oBlubMxMM58M+xzSKApK6bz2MkLsx9cewmfmfbJnRIK1xDv+J+77pWWNGlxCCjl1WU+aA3M7G8HzwAqjL75ASOWtBrJlFXvlLgzobwwetg6cm44Rv1P39i3rDySZvi4BDlOQHWFupgMKiXnZ1PeL7eBDs/aawrE0V2ysNkf9An+XJZkos2JSLPWcoNigfXNUu5c1AqsERvHA246XJzqvCEK8=
139 e69874dc1f4e142746ff3df91e678a09c6fc208c 0 iQIVAwUAWG0oGUemf/qjRqrOAQh3uhAAu4TN7jkkgH7Hxn8S1cB6Ru0x8MQutzzzpjShhsE/G7nzCxsZ5eWdJ5ItwXmKhunb7T0og54CGcTxfmdPtCI7AhhHh9/TM2Hv1EBcsXCiwjG8E+P6X1UJkijgTGjNWuCvEDOsQAvgywslECBNnXp2QA5I5UdCMeqDdTAb8ujvbD8I4pxUx1xXKY18DgQGJh13mRlfkEVnPxUi2n8emnwPLjbVVkVISkMFUkaOl8a4fOeZC1xzDpoQocoH2Q8DYa9RCPPSHHSYPNMWGCdNGN2CoAurcHWWvc7jNU28/tBhTazfFv8LYh63lLQ8SIIPZHJAOxo45ufMspzUfNgoD6y3vlF5aW7DpdxwYHnueh7S1Fxgtd9cOnxmxQsgiF4LK0a+VXOi/Tli/fivZHDRCGHJvJgsMQm7pzkay9sGohes6jAnsOv2E8DwFC71FO/btrAp07IRFxH9WhUeMsXLMS9oBlubMxMM58M+xzSKApK6bz2MkLsx9cewmfmfbJnRIK1xDv+J+77pWWNGlxCCjl1WU+aA3M7G8HzwAqjL75ASOWtBrJlFXvlLgzobwwetg6cm44Rv1P39i3rDySZvi4BDlOQHWFupgMKiXnZ1PeL7eBDs/aawrE0V2ysNkf9An+XJZkos2JSLPWcoNigfXNUu5c1AqsERvHA246XJzqvCEK8=
140 a1dd2c0c479e0550040542e392e87bc91262517e 0 iQIcBAABCAAGBQJYgBBEAAoJELnJ3IJKpb3VJosP/10rr3onsVbL8E+ri1Q0TJc8uhqIsBVyD/vS1MJtbxRaAdIV92o13YOent0o5ASFF/0yzVKlOWPQRjsYYbYY967k1TruDaWxJAnpeFgMni2Afl/qyWrW4AY2xegZNZCfMmwJA+uSJDdAn+jPV40XbuCZ+OgyZo5S05dfclHFxdc8rPKeUsJtvs5PMmCL3iQl1sulp1ASjuhRtFWZgSFsC6rb2Y7evD66ikL93+0/BPEB4SVX17vB/XEzdmh4ntyt4+d1XAznLHS33IU8UHbTkUmLy+82WnNH7HBB2V7gO47m/HhvaYjEfeW0bqMzN3aOUf30Vy/wB4HHsvkBGDgL5PYVHRRovGcAuCmnYbOkawqbRewW5oDs7UT3HbShNpxCxfsYpo7deHr11zWA3ooWCSlIRRREU4BfwVmn+Ds1hT5HM28Q6zr6GQZegDUbiT9i1zU0EpyfTpH7gc6NTVQrO1z1p70NBnQMqXcHjWJwjSwLER2Qify9MjrGXTL6ofD5zVZKobeRmq94mf3lDq26H7coraM9X5h9xa49VgAcRHzn/WQ6wcFCKDQr6FT67hTUOlF7Jriv8/5h/ziSZr10fCObKeKWN8Skur29VIAHHY4NuUqbM55WohD+jZ2O3d4tze1eWm5MDgWD8RlrfYhQ+cLOwH65AOtts0LNZwlvJuC7
140 a1dd2c0c479e0550040542e392e87bc91262517e 0 iQIcBAABCAAGBQJYgBBEAAoJELnJ3IJKpb3VJosP/10rr3onsVbL8E+ri1Q0TJc8uhqIsBVyD/vS1MJtbxRaAdIV92o13YOent0o5ASFF/0yzVKlOWPQRjsYYbYY967k1TruDaWxJAnpeFgMni2Afl/qyWrW4AY2xegZNZCfMmwJA+uSJDdAn+jPV40XbuCZ+OgyZo5S05dfclHFxdc8rPKeUsJtvs5PMmCL3iQl1sulp1ASjuhRtFWZgSFsC6rb2Y7evD66ikL93+0/BPEB4SVX17vB/XEzdmh4ntyt4+d1XAznLHS33IU8UHbTkUmLy+82WnNH7HBB2V7gO47m/HhvaYjEfeW0bqMzN3aOUf30Vy/wB4HHsvkBGDgL5PYVHRRovGcAuCmnYbOkawqbRewW5oDs7UT3HbShNpxCxfsYpo7deHr11zWA3ooWCSlIRRREU4BfwVmn+Ds1hT5HM28Q6zr6GQZegDUbiT9i1zU0EpyfTpH7gc6NTVQrO1z1p70NBnQMqXcHjWJwjSwLER2Qify9MjrGXTL6ofD5zVZKobeRmq94mf3lDq26H7coraM9X5h9xa49VgAcRHzn/WQ6wcFCKDQr6FT67hTUOlF7Jriv8/5h/ziSZr10fCObKeKWN8Skur29VIAHHY4NuUqbM55WohD+jZ2O3d4tze1eWm5MDgWD8RlrfYhQ+cLOwH65AOtts0LNZwlvJuC7
141 e1526da1e6d84e03146151c9b6e6950fe9a83d7d 0 iQIVAwUAWJIKpUemf/qjRqrOAQjjThAAvl1K/GZBrkanwEPXomewHkWKTEy1s5d5oWmPPGrSb9G4LM/3/abSbQ7fnzkS6IWi4Ao0za68w/MohaVGKoMAslRbelaTqlus0wE3zxb2yQ/j2NeZzFnFEuR/vbUug7uzH+onko2jXrt7VcPNXLOa1/g5CWwaf/YPfJO4zv+atlzBHvuFcQCkdbcOJkccCnBUoR7y0PJoBJX6K7wJQ+hWLdcY4nVaxkGPRmsZJo9qogXZMw1CwJVjofxRI0S/5vMtEqh8srYsg7qlTNv8eYnwdpfuunn2mI7Khx10Tz85PZDnr3SGRiFvdfmT30pI7jL3bhOHALkaoy2VevteJjIyMxANTvjIUBNQUi+7Kj3VIKmkL9NAMAQBbshiQL1wTrXdqOeC8Nm1BfCQEox2yiC6pDFbXVbguwJZ5VKFizTTK6f6BdNYKTVx8lNEdjAsWH8ojgGWwGXBbTkClULHezJ/sODaZzK/+M/IzbGmlF27jJYpdJX8fUoybZNw9lXwIfQQWHmQHEOJYCljD9G1tvYY70+xAFexgBX5Ib48UK4DRITVNecyQZL7bLTzGcM0TAE0EtD4M42wawsYP3Cva9UxShFLICQdPoa4Wmfs6uLbXG1DDLol/j7b6bL+6W8E3AlW+aAPc8GZm51/w3VlYqqciWTc12OJpu8FiD0pZ/iBw+E=
141 e1526da1e6d84e03146151c9b6e6950fe9a83d7d 0 iQIVAwUAWJIKpUemf/qjRqrOAQjjThAAvl1K/GZBrkanwEPXomewHkWKTEy1s5d5oWmPPGrSb9G4LM/3/abSbQ7fnzkS6IWi4Ao0za68w/MohaVGKoMAslRbelaTqlus0wE3zxb2yQ/j2NeZzFnFEuR/vbUug7uzH+onko2jXrt7VcPNXLOa1/g5CWwaf/YPfJO4zv+atlzBHvuFcQCkdbcOJkccCnBUoR7y0PJoBJX6K7wJQ+hWLdcY4nVaxkGPRmsZJo9qogXZMw1CwJVjofxRI0S/5vMtEqh8srYsg7qlTNv8eYnwdpfuunn2mI7Khx10Tz85PZDnr3SGRiFvdfmT30pI7jL3bhOHALkaoy2VevteJjIyMxANTvjIUBNQUi+7Kj3VIKmkL9NAMAQBbshiQL1wTrXdqOeC8Nm1BfCQEox2yiC6pDFbXVbguwJZ5VKFizTTK6f6BdNYKTVx8lNEdjAsWH8ojgGWwGXBbTkClULHezJ/sODaZzK/+M/IzbGmlF27jJYpdJX8fUoybZNw9lXwIfQQWHmQHEOJYCljD9G1tvYY70+xAFexgBX5Ib48UK4DRITVNecyQZL7bLTzGcM0TAE0EtD4M42wawsYP3Cva9UxShFLICQdPoa4Wmfs6uLbXG1DDLol/j7b6bL+6W8E3AlW+aAPc8GZm51/w3VlYqqciWTc12OJpu8FiD0pZ/iBw+E=
142 25703b624d27e3917d978af56d6ad59331e0464a 0 iQIcBAABCAAGBQJYuMSwAAoJELnJ3IJKpb3VL3YP/iKWY3+K3cLUBD3Ne5MhfS7N3t6rlk9YD4kmU8JnVeV1oAfg36VCylpbJLBnmQdvC8AfBJOkXi6DHp9RKXXmlsOeoppdWYGX5RMOzuwuGPBii6cA6KFd+WBpBJlRtklz61qGCAtv4q8V1mga0yucihghzt4lD/PPz7mk6yUBL8s3rK+bIHGdEhnK2dfnn/U2G0K/vGgsYZESORISuBclCrrc7M3/v1D+FBMCEYX9FXYU4PhYkKXK1mSqzCB7oENu/WP4ijl1nRnEIyzBV9pKO4ylnXTpbZAr/e4PofzjzPXb0zume1191C3wvgJ4eDautGide/Pxls5s6fJRaIowf5XVYQ5srX/NC9N3K77Hy01t5u8nwcyAhjmajZYuB9j37nmiwFawqS/y2eHovrUjkGdelV8OM7/iAexPRC8i2NcGk0m6XuzWy1Dxr8453VD8Hh3tTeafd6v5uHXSLjwogpu/th5rk/i9/5GBzc1MyJgRTwBhVHi/yFxfyakrSU7HT2cwX/Lb5KgWccogqfvrFYQABIBanxLIeZxTv8OIjC75EYknbxYtvvgb35ZdJytwrTHSZN0S7Ua2dHx2KUnHB6thbLu/v9fYrCgFF76DK4Ogd22Cbvv6NqRoglG26d0bqdwz/l1n3o416YjupteW8LMxHzuwiJy69WP1yi10eNDq
142 25703b624d27e3917d978af56d6ad59331e0464a 0 iQIcBAABCAAGBQJYuMSwAAoJELnJ3IJKpb3VL3YP/iKWY3+K3cLUBD3Ne5MhfS7N3t6rlk9YD4kmU8JnVeV1oAfg36VCylpbJLBnmQdvC8AfBJOkXi6DHp9RKXXmlsOeoppdWYGX5RMOzuwuGPBii6cA6KFd+WBpBJlRtklz61qGCAtv4q8V1mga0yucihghzt4lD/PPz7mk6yUBL8s3rK+bIHGdEhnK2dfnn/U2G0K/vGgsYZESORISuBclCrrc7M3/v1D+FBMCEYX9FXYU4PhYkKXK1mSqzCB7oENu/WP4ijl1nRnEIyzBV9pKO4ylnXTpbZAr/e4PofzjzPXb0zume1191C3wvgJ4eDautGide/Pxls5s6fJRaIowf5XVYQ5srX/NC9N3K77Hy01t5u8nwcyAhjmajZYuB9j37nmiwFawqS/y2eHovrUjkGdelV8OM7/iAexPRC8i2NcGk0m6XuzWy1Dxr8453VD8Hh3tTeafd6v5uHXSLjwogpu/th5rk/i9/5GBzc1MyJgRTwBhVHi/yFxfyakrSU7HT2cwX/Lb5KgWccogqfvrFYQABIBanxLIeZxTv8OIjC75EYknbxYtvvgb35ZdJytwrTHSZN0S7Ua2dHx2KUnHB6thbLu/v9fYrCgFF76DK4Ogd22Cbvv6NqRoglG26d0bqdwz/l1n3o416YjupteW8LMxHzuwiJy69WP1yi10eNDq
143 ed5b25874d998ababb181a939dd37a16ea644435 0 iQIcBAABCAAGBQJY4r/gAAoJELnJ3IJKpb3VtwYP/RuTmo252ExXQk/n5zGJZvZQnI86vO1+yGuyOlGFFBwf1v3sOLW1HD7fxF6/GdT8CSQrRqtC17Ya3qtayfY/0AEiSuH2bklBXSB1H5wPyguS5iLqyilCJY0SkHYBIDhJ0xftuIjsa805wdMm3OdclnTOkYT+K1WL8Ylbx/Ni2Lsx1rPpYdcQ/HlTkr5ca1ZbNOOSxSNI4+ilGlKbdSYeEsmqB2sDEiSaDEoxGGoSgzAE9+5Q2FfCGXV0bq4vfmEPoT9lhB4kANE+gcFUvsJTu8Z7EdF8y3CJLiy8+KHO/VLKTGJ1pMperbig9nAXl1AOt+izBFGJGTolbR/ShkkDWB/QVcqIF5CysAWMgnHAx7HjnMDBOANcKzhMMfOi3GUvOCNNIqIIoJHKRHaRk0YbMdt7z2mKpTrRQ9Zadz764jXOqqrPgQFM3jkBHzAvZz9yShrHGh42Y+iReAF9pAN0xPjyZ5Y2qp+DSl0bIQqrAet6Zd3QuoJtXczAeRrAvgn7O9MyLnMyE5s7xxI7o8M7zfWtChLF8ytJUzmRo3iVJNOJH+Zls9N30PGw6vubQAnB5ieaVTv8lnNpcAnEQD/i0tmRSxzyyqoOQbnItIPKFOsaYW+eX9sgJmObU3yDc5k3cs+yAFD2CM/uiUsLcTKyxPNcP1JHBYpwhOjIGczSHVS1
143 ed5b25874d998ababb181a939dd37a16ea644435 0 iQIcBAABCAAGBQJY4r/gAAoJELnJ3IJKpb3VtwYP/RuTmo252ExXQk/n5zGJZvZQnI86vO1+yGuyOlGFFBwf1v3sOLW1HD7fxF6/GdT8CSQrRqtC17Ya3qtayfY/0AEiSuH2bklBXSB1H5wPyguS5iLqyilCJY0SkHYBIDhJ0xftuIjsa805wdMm3OdclnTOkYT+K1WL8Ylbx/Ni2Lsx1rPpYdcQ/HlTkr5ca1ZbNOOSxSNI4+ilGlKbdSYeEsmqB2sDEiSaDEoxGGoSgzAE9+5Q2FfCGXV0bq4vfmEPoT9lhB4kANE+gcFUvsJTu8Z7EdF8y3CJLiy8+KHO/VLKTGJ1pMperbig9nAXl1AOt+izBFGJGTolbR/ShkkDWB/QVcqIF5CysAWMgnHAx7HjnMDBOANcKzhMMfOi3GUvOCNNIqIIoJHKRHaRk0YbMdt7z2mKpTrRQ9Zadz764jXOqqrPgQFM3jkBHzAvZz9yShrHGh42Y+iReAF9pAN0xPjyZ5Y2qp+DSl0bIQqrAet6Zd3QuoJtXczAeRrAvgn7O9MyLnMyE5s7xxI7o8M7zfWtChLF8ytJUzmRo3iVJNOJH+Zls9N30PGw6vubQAnB5ieaVTv8lnNpcAnEQD/i0tmRSxzyyqoOQbnItIPKFOsaYW+eX9sgJmObU3yDc5k3cs+yAFD2CM/uiUsLcTKyxPNcP1JHBYpwhOjIGczSHVS1
144 77eaf9539499a1b8be259ffe7ada787d07857f80 0 iQIcBAABCAAGBQJY9iz9AAoJELnJ3IJKpb3VYqEQAJNkB09sXgYRLA4kGQv3p4v02q9WZ1lHkAhOlNwIh7Zp+pGvT33nHZffByA0v+xtJNV9TNMIFFjkCg3jl5Z42CCe33ZlezGBAzXU+70QPvOR0ojlYk+FdMfeSyCBzWYokIpImwNmwNGKVrUAfywdikCsUC2aRjKg4Mn7GnqWl9WrBG6JEOOUamdx8qV2f6g/utRiqj4YQ86P0y4K3yakwc1LMM+vRfrwvsf1+DZ9t7QRENNKQ6gRnUdfryqSFIWn1VkBVMwIN5W3yIrTMfgH1wAZxbnYHrN5qDK7mcbP7bOA3XWJuEC+3QRnheRFd/21O1dMFuYjaKApXPHRlTGRMOaz2eydbfBopUS1BtfYEh4/B/1yJb9/HDw6LiAjea7ACHiaNec83z643005AvtUuWhjX3QTPkYlQzWaosanGy1IOGtXCPp1L0A+9gUpqyqycfPjQCbST5KRzYSZn3Ngmed5Bb6jsgvg5e5y0En/SQgK/pTKnxemAmFFVvIIrrWGRKj0AD0IFEHEepmwprPRs97EZPoBPFAGmVRuASBeIhFQxSDIXV0ebHJoUmz5w1rTy7U3Eq0ff6nW14kjWOUplatXz5LpWJ3VkZKrI+4gelto5xpTI6gJl2nmezhXQIlInk17cPuxmiHjeMdlOHZRh/zICLhQNL5fGne0ZL+qlrXY
144 77eaf9539499a1b8be259ffe7ada787d07857f80 0 iQIcBAABCAAGBQJY9iz9AAoJELnJ3IJKpb3VYqEQAJNkB09sXgYRLA4kGQv3p4v02q9WZ1lHkAhOlNwIh7Zp+pGvT33nHZffByA0v+xtJNV9TNMIFFjkCg3jl5Z42CCe33ZlezGBAzXU+70QPvOR0ojlYk+FdMfeSyCBzWYokIpImwNmwNGKVrUAfywdikCsUC2aRjKg4Mn7GnqWl9WrBG6JEOOUamdx8qV2f6g/utRiqj4YQ86P0y4K3yakwc1LMM+vRfrwvsf1+DZ9t7QRENNKQ6gRnUdfryqSFIWn1VkBVMwIN5W3yIrTMfgH1wAZxbnYHrN5qDK7mcbP7bOA3XWJuEC+3QRnheRFd/21O1dMFuYjaKApXPHRlTGRMOaz2eydbfBopUS1BtfYEh4/B/1yJb9/HDw6LiAjea7ACHiaNec83z643005AvtUuWhjX3QTPkYlQzWaosanGy1IOGtXCPp1L0A+9gUpqyqycfPjQCbST5KRzYSZn3Ngmed5Bb6jsgvg5e5y0En/SQgK/pTKnxemAmFFVvIIrrWGRKj0AD0IFEHEepmwprPRs97EZPoBPFAGmVRuASBeIhFQxSDIXV0ebHJoUmz5w1rTy7U3Eq0ff6nW14kjWOUplatXz5LpWJ3VkZKrI+4gelto5xpTI6gJl2nmezhXQIlInk17cPuxmiHjeMdlOHZRh/zICLhQNL5fGne0ZL+qlrXY
145 616e788321cc4ae9975b7f0c54c849f36d82182b 0 iQIVAwUAWPZuQkemf/qjRqrOAQjFlg/9HXEegJMv8FP+uILPoaiA2UCiqWUL2MVJ0K1cvafkwUq+Iwir8sTe4VJ1v6V+ZRiOuzs4HMnoGJrIks4vHRbAxJ3J6xCfvrsbHdl59grv54vuoL5FlZvkdIe8L7/ovKrUmNwPWZX2v+ffFPrsEBeVlVrXpp4wOPhDxCKTmjYVOp87YqXfJsud7EQFPqpV4jX8DEDtJWT95OE9x0srBg0HpSE95d/BM4TuXTVNI8fV41YEqearKeFIhLxu37HxUmGmkAALCi8RJmm4hVpUHgk3tAVzImI8DglUqnC6VEfaYb+PKzIqHelhb66JO/48qN2S/JXihpNHAVUBysBT0b1xEnc6eNsF2fQEB+bEcf8IGj7/ILee1cmwPtoK2OXR2+xWWWjlu2keVcKeI0yAajJw/dP21yvVzVq0ypst7iD+EGHLJWJSmZscbyH5ICr+TJ5yQvIGZJtfsAdAUUTM2xpqSDW4mT5kYyg75URbQ3AKI7lOhJBmkkGQErE4zIQMkaAqcWziVF20xiRWfJoFxT2fK5weaRGIjELH49NLlyvZxYc4LlRo9lIdC7l/6lYDdTx15VuEj1zx/91y/d7OtPm+KCA2Bbdqth8m/fMD8trfQ6jSG/wgsvjZ+S0eoXa92qIR/igsCI+6EwP7duuzL2iyKOPXupQVNN10PKI7EuKv4Lk=
145 616e788321cc4ae9975b7f0c54c849f36d82182b 0 iQIVAwUAWPZuQkemf/qjRqrOAQjFlg/9HXEegJMv8FP+uILPoaiA2UCiqWUL2MVJ0K1cvafkwUq+Iwir8sTe4VJ1v6V+ZRiOuzs4HMnoGJrIks4vHRbAxJ3J6xCfvrsbHdl59grv54vuoL5FlZvkdIe8L7/ovKrUmNwPWZX2v+ffFPrsEBeVlVrXpp4wOPhDxCKTmjYVOp87YqXfJsud7EQFPqpV4jX8DEDtJWT95OE9x0srBg0HpSE95d/BM4TuXTVNI8fV41YEqearKeFIhLxu37HxUmGmkAALCi8RJmm4hVpUHgk3tAVzImI8DglUqnC6VEfaYb+PKzIqHelhb66JO/48qN2S/JXihpNHAVUBysBT0b1xEnc6eNsF2fQEB+bEcf8IGj7/ILee1cmwPtoK2OXR2+xWWWjlu2keVcKeI0yAajJw/dP21yvVzVq0ypst7iD+EGHLJWJSmZscbyH5ICr+TJ5yQvIGZJtfsAdAUUTM2xpqSDW4mT5kYyg75URbQ3AKI7lOhJBmkkGQErE4zIQMkaAqcWziVF20xiRWfJoFxT2fK5weaRGIjELH49NLlyvZxYc4LlRo9lIdC7l/6lYDdTx15VuEj1zx/91y/d7OtPm+KCA2Bbdqth8m/fMD8trfQ6jSG/wgsvjZ+S0eoXa92qIR/igsCI+6EwP7duuzL2iyKOPXupQVNN10PKI7EuKv4Lk=
146 bb96d4a497432722623ae60d9bc734a1e360179e 0 iQIVAwUAWQkDfEemf/qjRqrOAQierQ/7BuQ0IW0T0cglgqIgkLuYLx2VXJCTEtRNCWmrH2UMK7fAdpAhN0xf+xedv56zYHrlyHpbskDbWvsKIHJdw/4bQitXaIFTyuMMtSR5vXy4Nly34O/Xs2uGb3Y5qwdubeK2nZr4lSPgiRHb/zI/B1Oy8GX830ljmIOY7B0nUWy4DrXcy/M41SnAMLFyD1K6T/8tkv7M4Fai7dQoF9EmIIkShVPktI3lqp3m7infZ4XnJqcqUB0NSfQZwZaUaoalOdCvEIe3ab5ewgl/CuvlDI4oqMQGjXCtNLbtiZSwo6hvudO6ewT+Zn/VdabkZyRtXUxu56ajjd6h22nU1+vknqDzo5tzw6oh1Ubzf8tzyv3Gmmr+tlOjzfK7tXXnT3vR9aEGli0qri0DzOpsDSY0pDC7EsS4LINPoNdsGQrGQdoX++AISROlNjvyuo4Vrp26tPHCSupkKOXuZaiozycAa2Q+aI1EvkPZSXe8SAXKDVtFn05ZB58YVkFzZKAYAxkE/ven59zb4aIbOgR12tZbJoZZsVHrlf/TcDtiXVfIMEMsCtJ1tPgD1rAsEURWRxK3mJ0Ev6KTHgNz4PeBhq1gIP/Y665aX2+cCjc4+vApPUienh5aOr1bQFpIDyYZsafHGMUFNCwRh8bX98oTGa0hjqz4ypwXE4Wztjdc+48UiHARp/Y=
146 bb96d4a497432722623ae60d9bc734a1e360179e 0 iQIVAwUAWQkDfEemf/qjRqrOAQierQ/7BuQ0IW0T0cglgqIgkLuYLx2VXJCTEtRNCWmrH2UMK7fAdpAhN0xf+xedv56zYHrlyHpbskDbWvsKIHJdw/4bQitXaIFTyuMMtSR5vXy4Nly34O/Xs2uGb3Y5qwdubeK2nZr4lSPgiRHb/zI/B1Oy8GX830ljmIOY7B0nUWy4DrXcy/M41SnAMLFyD1K6T/8tkv7M4Fai7dQoF9EmIIkShVPktI3lqp3m7infZ4XnJqcqUB0NSfQZwZaUaoalOdCvEIe3ab5ewgl/CuvlDI4oqMQGjXCtNLbtiZSwo6hvudO6ewT+Zn/VdabkZyRtXUxu56ajjd6h22nU1+vknqDzo5tzw6oh1Ubzf8tzyv3Gmmr+tlOjzfK7tXXnT3vR9aEGli0qri0DzOpsDSY0pDC7EsS4LINPoNdsGQrGQdoX++AISROlNjvyuo4Vrp26tPHCSupkKOXuZaiozycAa2Q+aI1EvkPZSXe8SAXKDVtFn05ZB58YVkFzZKAYAxkE/ven59zb4aIbOgR12tZbJoZZsVHrlf/TcDtiXVfIMEMsCtJ1tPgD1rAsEURWRxK3mJ0Ev6KTHgNz4PeBhq1gIP/Y665aX2+cCjc4+vApPUienh5aOr1bQFpIDyYZsafHGMUFNCwRh8bX98oTGa0hjqz4ypwXE4Wztjdc+48UiHARp/Y=
147 c850f0ed54c1d42f9aa079ad528f8127e5775217 0 iQIVAwUAWTQINUemf/qjRqrOAQjZDw//b4pEgHYfWRVDEmLZtevysfhlJzbSyLAnWgNnRUVdSwl4WRF1r6ds/q7N4Ege5wQHjOpRtx4jC3y/riMbrLUlaeUXzCdqKgm4JcINS1nXy3IfkeDdUKyOR9upjaVhIEzCMRpyzabdYuflh5CoxayO7GFk2iZ8c1oAl4QzuLSspn9w+znqDg0HrMDbRNijStSulNjkqutih9UqT/PYizhE1UjL0NSnpYyD1vDljsHModJc2dhSzuZ1c4VFZHkienk+CNyeLtVKg8aC+Ej/Ppwq6FlE461T/RxOEzf+WFAc9F4iJibSN2kAFB4ySJ43y+OKkvzAwc5XbUx0y6OlWn2Ph+5T54sIwqasG3DjXyVrwVtAvCrcWUmOyS0RfkKoDVepMPIhFXyrhGqUYSq25Gt6tHVtIrlcWARIGGWlsE+PSHi87qcnSjs4xUzZwVvJWz4fuM1AUG/GTpyt4w3kB85XQikIINkmSTmsM/2/ar75T6jBL3kqOCGOL3n7bVZsGXllhkkQ7e/jqPPWnNXm8scDYdT3WENNu34zZp5ZmqdTXPAIIaqGswnU04KfUSEoYtOMri3E2VvrgMkiINm9BOKpgeTsMb3dkYRw2ZY3UAH9QfdX9BZywk6v3kkE5ghLWMUoQ4sqRlTo7mJKA8+EodjmIGRV/kAv1f7pigg6pIWWEyo=
147 c850f0ed54c1d42f9aa079ad528f8127e5775217 0 iQIVAwUAWTQINUemf/qjRqrOAQjZDw//b4pEgHYfWRVDEmLZtevysfhlJzbSyLAnWgNnRUVdSwl4WRF1r6ds/q7N4Ege5wQHjOpRtx4jC3y/riMbrLUlaeUXzCdqKgm4JcINS1nXy3IfkeDdUKyOR9upjaVhIEzCMRpyzabdYuflh5CoxayO7GFk2iZ8c1oAl4QzuLSspn9w+znqDg0HrMDbRNijStSulNjkqutih9UqT/PYizhE1UjL0NSnpYyD1vDljsHModJc2dhSzuZ1c4VFZHkienk+CNyeLtVKg8aC+Ej/Ppwq6FlE461T/RxOEzf+WFAc9F4iJibSN2kAFB4ySJ43y+OKkvzAwc5XbUx0y6OlWn2Ph+5T54sIwqasG3DjXyVrwVtAvCrcWUmOyS0RfkKoDVepMPIhFXyrhGqUYSq25Gt6tHVtIrlcWARIGGWlsE+PSHi87qcnSjs4xUzZwVvJWz4fuM1AUG/GTpyt4w3kB85XQikIINkmSTmsM/2/ar75T6jBL3kqOCGOL3n7bVZsGXllhkkQ7e/jqPPWnNXm8scDYdT3WENNu34zZp5ZmqdTXPAIIaqGswnU04KfUSEoYtOMri3E2VvrgMkiINm9BOKpgeTsMb3dkYRw2ZY3UAH9QfdX9BZywk6v3kkE5ghLWMUoQ4sqRlTo7mJKA8+EodjmIGRV/kAv1f7pigg6pIWWEyo=
148 26c49ed51a698ec016d2b4c6b44ca3c3f73cc788 0 iQIcBAABCAAGBQJZXQSmAAoJELnJ3IJKpb3VmTwP/jsxFTlKzWU8EnEhEViiP2YREOD3AXU7685DIMnoyVAsZgxrt0CG6Y92b5sINCeh5B0ORPQ7+xi2Xmz6tX8EeAR+/Dpdx6K623yExf8kq91zgfMvYkatNMu6ZVfywibYZAASq02oKoX7WqSPcQG/OwgtdFiGacCrG5iMH7wRv0N9hPc6D5vAV8/H/Inq8twpSG5SGDpCdKj7KPZiY8DFu/3OXatJtl+byg8zWT4FCYKkBPvmZp8/sRhDKBgwr3RvF1p84uuw/QxXjt+DmGxgtjvObjHr+shCMcKBAuZ4RtZmyEo/0L81uaTElHu1ejsEzsEKxs+8YifnH070PTFoV4VXQyXfTc8AyaqHE6rzX96a/HjQiJnL4dFeTZIrUhGK3AkObFLWJxVTo4J8+oliBQQldIh1H2yb1ZMfwapLnUGIqSieHDGZ6K2ccNJK8Q7IRhTCvYc0cjsnbwTpV4cebGqf3WXZhX0cZN+TNfhh/HGRzR1EeAAavjJqpDam1OBA5TmtJd/lHLIRVR5jyG+r4SK0XDlJ8uSfah7MpVH6aQ6UrycPyFusGXQlIqJ1DYQaBrI/SRJfIvRUmvVz9WgKLe83oC3Ui3aWR9rNjMb2InuQuXjeZaeaYfBAUYACcGfCZpZZvoEkMHCqtTng1rbbFnKMFk5kVy9YWuVgK9Iuh0O5
148 26c49ed51a698ec016d2b4c6b44ca3c3f73cc788 0 iQIcBAABCAAGBQJZXQSmAAoJELnJ3IJKpb3VmTwP/jsxFTlKzWU8EnEhEViiP2YREOD3AXU7685DIMnoyVAsZgxrt0CG6Y92b5sINCeh5B0ORPQ7+xi2Xmz6tX8EeAR+/Dpdx6K623yExf8kq91zgfMvYkatNMu6ZVfywibYZAASq02oKoX7WqSPcQG/OwgtdFiGacCrG5iMH7wRv0N9hPc6D5vAV8/H/Inq8twpSG5SGDpCdKj7KPZiY8DFu/3OXatJtl+byg8zWT4FCYKkBPvmZp8/sRhDKBgwr3RvF1p84uuw/QxXjt+DmGxgtjvObjHr+shCMcKBAuZ4RtZmyEo/0L81uaTElHu1ejsEzsEKxs+8YifnH070PTFoV4VXQyXfTc8AyaqHE6rzX96a/HjQiJnL4dFeTZIrUhGK3AkObFLWJxVTo4J8+oliBQQldIh1H2yb1ZMfwapLnUGIqSieHDGZ6K2ccNJK8Q7IRhTCvYc0cjsnbwTpV4cebGqf3WXZhX0cZN+TNfhh/HGRzR1EeAAavjJqpDam1OBA5TmtJd/lHLIRVR5jyG+r4SK0XDlJ8uSfah7MpVH6aQ6UrycPyFusGXQlIqJ1DYQaBrI/SRJfIvRUmvVz9WgKLe83oC3Ui3aWR9rNjMb2InuQuXjeZaeaYfBAUYACcGfCZpZZvoEkMHCqtTng1rbbFnKMFk5kVy9YWuVgK9Iuh0O5
149 857876ebaed4e315f63157bd157d6ce553c7ab73 0 iQIVAwUAWW9XW0emf/qjRqrOAQhI7A//cKXIM4l8vrWWsc1Os4knXm/2UaexmAwV70TpviKL9RxCy5zBP/EapCaGRCH8uNPOQTkWGR9Aucm3CtxhggCMzULQxxeH86mEpWf1xILWLySPXW/t2f+2zxrwLSAxxqFJtuYv83Pe8CnS3y4BlgHnBKYXH8XXuW8uvfc0lHKblhrspGBIAinx7vPLoGQcpYrn9USWUKq5d9FaCLQCDT9501FHKf5dlYQajevCUDnewtn5ohelOXjTJQClW3aygv/z+98Kq7ZhayeIiZu+SeP+Ay7lZPklXcy6eyRiQtGCa1yesb9v53jKtgxWewV4o6zyuUesdknZ/IBeNUgw8LepqTIJo6/ckyvBOsSQcda81DuYNUChZLYTSXYPHEUmYiz6CvNoLEgHF/oO5p6CZXOPWbmLWrAFd+0+1Tuq8BSh+PSdEREM3ZLOikkXoVzTKBgu4zpMvmBnjliBg7WhixkcG0v5WunlV9/oHAIpsKdL7AatU+oCPulp+xDpTKzRazEemYiWG9zYKzwSMk9Nc17e2tk+EtFSPsPo4iVCXMgdIZSTNBvynKEFXZQVPWVa+bYRdAmbSY8awiX7exxYL10UcpnN2q/AH/F7rQzAmo8eZ3OtD0+3Nk3JRx0/CMyzKLPYDpdUgwmaPb+s2Bsy7f7TfmA7jTa69YqB1/zVwlWULr0=
149 857876ebaed4e315f63157bd157d6ce553c7ab73 0 iQIVAwUAWW9XW0emf/qjRqrOAQhI7A//cKXIM4l8vrWWsc1Os4knXm/2UaexmAwV70TpviKL9RxCy5zBP/EapCaGRCH8uNPOQTkWGR9Aucm3CtxhggCMzULQxxeH86mEpWf1xILWLySPXW/t2f+2zxrwLSAxxqFJtuYv83Pe8CnS3y4BlgHnBKYXH8XXuW8uvfc0lHKblhrspGBIAinx7vPLoGQcpYrn9USWUKq5d9FaCLQCDT9501FHKf5dlYQajevCUDnewtn5ohelOXjTJQClW3aygv/z+98Kq7ZhayeIiZu+SeP+Ay7lZPklXcy6eyRiQtGCa1yesb9v53jKtgxWewV4o6zyuUesdknZ/IBeNUgw8LepqTIJo6/ckyvBOsSQcda81DuYNUChZLYTSXYPHEUmYiz6CvNoLEgHF/oO5p6CZXOPWbmLWrAFd+0+1Tuq8BSh+PSdEREM3ZLOikkXoVzTKBgu4zpMvmBnjliBg7WhixkcG0v5WunlV9/oHAIpsKdL7AatU+oCPulp+xDpTKzRazEemYiWG9zYKzwSMk9Nc17e2tk+EtFSPsPo4iVCXMgdIZSTNBvynKEFXZQVPWVa+bYRdAmbSY8awiX7exxYL10UcpnN2q/AH/F7rQzAmo8eZ3OtD0+3Nk3JRx0/CMyzKLPYDpdUgwmaPb+s2Bsy7f7TfmA7jTa69YqB1/zVwlWULr0=
150 5544af8622863796a0027566f6b646e10d522c4c 0 iQIcBAABCAAGBQJZjJflAAoJELnJ3IJKpb3V19kQALCvTdPrpce5+rBNbFtLGNFxTMDol1dUy87EUAWiArnfOzW3rKBdYxvxDL23BpgUfjRm1fAXdayVvlj6VC6Dyb195OLmc/I9z7SjFxsfmxWilF6U0GIa3W0x37i05EjfcccrBIuSLrvR6AWyJhjLOBCcyAqD/HcEom00/L+o2ry9CDQNLEeVuNewJiupcUqsTIG2yS26lWbtLZuoqS2T4Nlg8wjJhiSXlsZSuAF55iUJKlTQP6KyWReiaYuEVfm/Bybp0A2bFcZCYpWPwnwKBdSCHhIalH8PO57gh9J7xJVnyyBg5PU6n4l6PrGOmKhNiU/xyNe36tEAdMW6svcVvt8hiY0dnwWqR6wgnFFDu0lnTMUcjsy5M5FBY6wSw9Fph8zcNRzYyaeUbasNonPvrIrk21nT3ET3RzVR3ri2nJDVF+0GlpogGfk9k7wY3808091BMsyV3448ZPKQeWiK4Yy4UOUwbKV7YAsS5MdDnC1uKjl4GwLn9UCY/+Q2/2R0CBZ13Tox+Nbo6hBRuRGtFIbLK9j7IIUhhZrIZFSh8cDNkC+UMaS52L5z7ECvoYIUpw+MJ7NkMLHIVGZ2Nxn0C7IbGO6uHyR7D6bdNpxilU+WZStHk0ppZItRTm/htar4jifnaCI8F8OQNYmZ3cQhxx6qV2Tyow8arvWb1NYXrocG
150 5544af8622863796a0027566f6b646e10d522c4c 0 iQIcBAABCAAGBQJZjJflAAoJELnJ3IJKpb3V19kQALCvTdPrpce5+rBNbFtLGNFxTMDol1dUy87EUAWiArnfOzW3rKBdYxvxDL23BpgUfjRm1fAXdayVvlj6VC6Dyb195OLmc/I9z7SjFxsfmxWilF6U0GIa3W0x37i05EjfcccrBIuSLrvR6AWyJhjLOBCcyAqD/HcEom00/L+o2ry9CDQNLEeVuNewJiupcUqsTIG2yS26lWbtLZuoqS2T4Nlg8wjJhiSXlsZSuAF55iUJKlTQP6KyWReiaYuEVfm/Bybp0A2bFcZCYpWPwnwKBdSCHhIalH8PO57gh9J7xJVnyyBg5PU6n4l6PrGOmKhNiU/xyNe36tEAdMW6svcVvt8hiY0dnwWqR6wgnFFDu0lnTMUcjsy5M5FBY6wSw9Fph8zcNRzYyaeUbasNonPvrIrk21nT3ET3RzVR3ri2nJDVF+0GlpogGfk9k7wY3808091BMsyV3448ZPKQeWiK4Yy4UOUwbKV7YAsS5MdDnC1uKjl4GwLn9UCY/+Q2/2R0CBZ13Tox+Nbo6hBRuRGtFIbLK9j7IIUhhZrIZFSh8cDNkC+UMaS52L5z7ECvoYIUpw+MJ7NkMLHIVGZ2Nxn0C7IbGO6uHyR7D6bdNpxilU+WZStHk0ppZItRTm/htar4jifnaCI8F8OQNYmZ3cQhxx6qV2Tyow8arvWb1NYXrocG
151 943c91326b23954e6e1c6960d0239511f9530258 0 iQIcBAABCAAGBQJZjKKZAAoJELnJ3IJKpb3VGQkP/0iF6Khef0lBaRhbSAPwa7RUBb3iaBeuwmeic/hUjMoU1E5NR36bDDaF3u2di5mIYPBONFIeCPf9/DKyFkidueX1UnlAQa3mjh/QfKTb4/yO2Nrk7eH+QtrYxVUUYYjwgp4rS0Nd/++I1IUOor54vqJzJ7ZnM5O1RsE7VI1esAC/BTlUuO354bbm08B0owsZBwVvcVvpV4zeTvq5qyPxBJ3M0kw83Pgwh3JZB9IYhOabhSUBcA2fIPHgYGYnJVC+bLOeMWI1HJkJeoYfClNUiQUjAmi0cdTC733eQnHkDw7xyyFi+zkKu6JmU1opxkHSuj4Hrjul7Gtw3vVWWUPufz3AK7oymNp2Xr5y1HQLDtNJP3jicTTG1ae2TdX5Az3ze0I8VGbpR81/6ShAvY2cSKttV3I+2k4epxTTTf0xaZS1eUdnFOox6acElG2reNzx7EYYxpHj17K8N2qNzyY78iPgbJ+L39PBFoiGXMZJqWCxxIHoK1MxlXa8WwSnsXAU768dJvEn2N1x3fl+aeaWzeM4/5Qd83YjFuCeycuRnIo3rejSX3rWFAwZE0qQHKI5YWdKDLxIfdHTjdfMP7np+zLcHt0DV/dHmj2hKQgU0OK04fx7BrmdS1tw67Y9bL3H3TDohn7khU1FrqrKVuqSLbLsxnNyWRbZQF+DCoYrHlIW
151 943c91326b23954e6e1c6960d0239511f9530258 0 iQIcBAABCAAGBQJZjKKZAAoJELnJ3IJKpb3VGQkP/0iF6Khef0lBaRhbSAPwa7RUBb3iaBeuwmeic/hUjMoU1E5NR36bDDaF3u2di5mIYPBONFIeCPf9/DKyFkidueX1UnlAQa3mjh/QfKTb4/yO2Nrk7eH+QtrYxVUUYYjwgp4rS0Nd/++I1IUOor54vqJzJ7ZnM5O1RsE7VI1esAC/BTlUuO354bbm08B0owsZBwVvcVvpV4zeTvq5qyPxBJ3M0kw83Pgwh3JZB9IYhOabhSUBcA2fIPHgYGYnJVC+bLOeMWI1HJkJeoYfClNUiQUjAmi0cdTC733eQnHkDw7xyyFi+zkKu6JmU1opxkHSuj4Hrjul7Gtw3vVWWUPufz3AK7oymNp2Xr5y1HQLDtNJP3jicTTG1ae2TdX5Az3ze0I8VGbpR81/6ShAvY2cSKttV3I+2k4epxTTTf0xaZS1eUdnFOox6acElG2reNzx7EYYxpHj17K8N2qNzyY78iPgbJ+L39PBFoiGXMZJqWCxxIHoK1MxlXa8WwSnsXAU768dJvEn2N1x3fl+aeaWzeM4/5Qd83YjFuCeycuRnIo3rejSX3rWFAwZE0qQHKI5YWdKDLxIfdHTjdfMP7np+zLcHt0DV/dHmj2hKQgU0OK04fx7BrmdS1tw67Y9bL3H3TDohn7khU1FrqrKVuqSLbLsxnNyWRbZQF+DCoYrHlIW
152 3fee7f7d2da04226914c2258cc2884dc27384fd7 0 iQIcBAABCAAGBQJZjOJfAAoJELnJ3IJKpb3VvikP/iGjfahwkl2BDZYGq6Ia64a0bhEh0iltoWTCCDKMbHuuO+7h07fHpBl/XX5XPnS7imBUVWLOARhVL7aDPb0tu5NZzMKN57XUC/0FWFyf7lXXAVaOapR4kP8RtQvnoxfNSLRgiZQL88KIRBgFc8pbl8hLA6UbcHPsOk4dXKvmfPfHBHnzdUEDcSXDdyOBhuyOSzRs8egXVi3WeX6OaXG3twkw/uCF3pgOMOSyWVDwD+KvK+IBmSxCTKXzsb+pqpc7pPOFWhSXjpbuYUcI5Qy7mpd0bFL3qNqgvUNq2gX5mT6zH/TsVD10oSUjYYqKMO+gi34OgTVWRRoQfWBwrQwxsC/MxH6ZeOetl2YkS13OxdmYpNAFNQ8ye0vZigJRA+wHoC9dn0h8c5X4VJt/dufHeXc887EGJpLg6GDXi5Emr2ydAUhBJKlpi2yss22AmiQ4G9NE1hAjxqhPvkgBK/hpbr3FurV4hjTG6XKsF8I0WdbYz2CW/FEbp1+4T49ChhrwW0orZdEQX7IEjXr45Hs5sTInT90Hy2XG3Kovi0uVMt15cKsSEYDoFHkR4NgCZX2Y+qS5ryH8yqor3xtel3KsBIy6Ywn8pAo2f8flW3nro/O6x+0NKGV+ZZ0uo/FctuQLBrQVs025T1ai/6MbscQXvFVZVPKrUzlQaNPf/IwNOaRa
152 3fee7f7d2da04226914c2258cc2884dc27384fd7 0 iQIcBAABCAAGBQJZjOJfAAoJELnJ3IJKpb3VvikP/iGjfahwkl2BDZYGq6Ia64a0bhEh0iltoWTCCDKMbHuuO+7h07fHpBl/XX5XPnS7imBUVWLOARhVL7aDPb0tu5NZzMKN57XUC/0FWFyf7lXXAVaOapR4kP8RtQvnoxfNSLRgiZQL88KIRBgFc8pbl8hLA6UbcHPsOk4dXKvmfPfHBHnzdUEDcSXDdyOBhuyOSzRs8egXVi3WeX6OaXG3twkw/uCF3pgOMOSyWVDwD+KvK+IBmSxCTKXzsb+pqpc7pPOFWhSXjpbuYUcI5Qy7mpd0bFL3qNqgvUNq2gX5mT6zH/TsVD10oSUjYYqKMO+gi34OgTVWRRoQfWBwrQwxsC/MxH6ZeOetl2YkS13OxdmYpNAFNQ8ye0vZigJRA+wHoC9dn0h8c5X4VJt/dufHeXc887EGJpLg6GDXi5Emr2ydAUhBJKlpi2yss22AmiQ4G9NE1hAjxqhPvkgBK/hpbr3FurV4hjTG6XKsF8I0WdbYz2CW/FEbp1+4T49ChhrwW0orZdEQX7IEjXr45Hs5sTInT90Hy2XG3Kovi0uVMt15cKsSEYDoFHkR4NgCZX2Y+qS5ryH8yqor3xtel3KsBIy6Ywn8pAo2f8flW3nro/O6x+0NKGV+ZZ0uo/FctuQLBrQVs025T1ai/6MbscQXvFVZVPKrUzlQaNPf/IwNOaRa
153 920977f72c7b70acfdaf56ab35360584d7845827 0 iQIcBAABCAAGBQJZv+wSAAoJELnJ3IJKpb3VH3kQAJp3OkV6qOPXBnlOSSodbVZveEQ5dGJfG9hk+VokcK6MFnieAFouROoGNlQXQtzj6cMqK+LGCP/NeJEG323gAxpxMzc32g7TqbVEhKNqNK8HvQSt04aCVZXtBmP0cPzc348UPP1X1iPTkyZxaJ0kHulaHVptwGbFZZyhwGefauU4eMafJsYqwgiGmvDpjUFu6P8YJXliYeTo1HX2lNChS1xmvJbop1YHfBYACsi8Eron0vMuhaQ+TKYq8Zd762u2roRYnaQ23ubEaVsjGDUYxXXVmit2gdaEKk+6Rq2I+EgcI5XvFzK8gvoP7siz6FL1jVf715k9/UYoWj9KDNUm8cweiyiUpjHQt0S+Ro9ryKvQy6tQVunRZqBN/kZWVth/FlMbUENbxVyXZcXv+m7OLvk+vyK7UZ7yT+OBzgRr0PyUuafzSVW3e+RZJtGxYGM5ew2bWQ8L6wuBucRYZOSnXXtCw7cKEMlK3BTjfAfpHUdIZIG492R9d6aOECUK/MpNvCiXXaZoh5Kj4a0dARiuWFCZxWwt3bmOg13oQ841zLdzOi/YZe15vCm8OB4Ffg6CkmPKhZhnMwVbFmlaBcoaeMzzpMuog91J1M2zgEUBTYwe/HKiNr/0iilJMPFRpZ+zEb2GvVoc8FMttXi8aomlXf/6LHCC9ndexGC29jIzl41+
153 920977f72c7b70acfdaf56ab35360584d7845827 0 iQIcBAABCAAGBQJZv+wSAAoJELnJ3IJKpb3VH3kQAJp3OkV6qOPXBnlOSSodbVZveEQ5dGJfG9hk+VokcK6MFnieAFouROoGNlQXQtzj6cMqK+LGCP/NeJEG323gAxpxMzc32g7TqbVEhKNqNK8HvQSt04aCVZXtBmP0cPzc348UPP1X1iPTkyZxaJ0kHulaHVptwGbFZZyhwGefauU4eMafJsYqwgiGmvDpjUFu6P8YJXliYeTo1HX2lNChS1xmvJbop1YHfBYACsi8Eron0vMuhaQ+TKYq8Zd762u2roRYnaQ23ubEaVsjGDUYxXXVmit2gdaEKk+6Rq2I+EgcI5XvFzK8gvoP7siz6FL1jVf715k9/UYoWj9KDNUm8cweiyiUpjHQt0S+Ro9ryKvQy6tQVunRZqBN/kZWVth/FlMbUENbxVyXZcXv+m7OLvk+vyK7UZ7yT+OBzgRr0PyUuafzSVW3e+RZJtGxYGM5ew2bWQ8L6wuBucRYZOSnXXtCw7cKEMlK3BTjfAfpHUdIZIG492R9d6aOECUK/MpNvCiXXaZoh5Kj4a0dARiuWFCZxWwt3bmOg13oQ841zLdzOi/YZe15vCm8OB4Ffg6CkmPKhZhnMwVbFmlaBcoaeMzzpMuog91J1M2zgEUBTYwe/HKiNr/0iilJMPFRpZ+zEb2GvVoc8FMttXi8aomlXf/6LHCC9ndexGC29jIzl41+
154 2f427b57bf9019c6dc3750baa539dc22c1be50f6 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlnQtVIQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91TTkD/409sWTM9vUH2qkqNTb1IXyGpqzb9UGOSVDioz6rvgZEBgh9D1oBTWnfBXW8sOWR0A7iCL6qZh2Yi7g7p0mKGXh9LZViLtSwwMSXpNiGBO7RVPW+NQ6DOY5Rhr0i08UBiVEkZXHeIVCd2Bd6mhAiUsm5iUh9Jne10wO8cIxeAUnsx4DBdHBMWLg6AZKWllSgN+r9H+7wnOhDbkvj1Cu6+ugKpEs+xvbTh47OTyM+w9tC1aoZD4HhfR5w5O16FC+TIoE6wmWut6e2pxIMHDB3H08Dky6gNjucY/ntJXvOZW5kYrQA3LHKks8ebpjsIXesOAvReOAsDz0drwzbWZan9Cbj8yWoYz/HCgHCnX3WqKKORSP5pvdrsqYua9DXtJwBeSWY4vbIM2kECAiyw1SrOGudxlyWBlW1f1jhGR2DsBlwoieeAvUVoaNwO7pYirwxR4nFPdLDRCQ4hLK/GFiuyr+lGoc1WUzVRNBYD3udcOZAbqq4JhWLf0Gvd5xP0rn1cJNhHMvrPH4Ki4a5KeeK6gQI7GT9/+PPQzTdpxXj6KwofktJtVNqm5sJmJ+wMIddnobFlNNLZ/F7OMONWajuVhh+vSOV34YLdhqzAR5XItkeJL6qyAJjNH5PjsnhT7nMqjgwriPz6xxYOLJWgtK5ZqcSCx4gWy9KJVVja8wJ7rRUg==
154 2f427b57bf9019c6dc3750baa539dc22c1be50f6 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlnQtVIQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91TTkD/409sWTM9vUH2qkqNTb1IXyGpqzb9UGOSVDioz6rvgZEBgh9D1oBTWnfBXW8sOWR0A7iCL6qZh2Yi7g7p0mKGXh9LZViLtSwwMSXpNiGBO7RVPW+NQ6DOY5Rhr0i08UBiVEkZXHeIVCd2Bd6mhAiUsm5iUh9Jne10wO8cIxeAUnsx4DBdHBMWLg6AZKWllSgN+r9H+7wnOhDbkvj1Cu6+ugKpEs+xvbTh47OTyM+w9tC1aoZD4HhfR5w5O16FC+TIoE6wmWut6e2pxIMHDB3H08Dky6gNjucY/ntJXvOZW5kYrQA3LHKks8ebpjsIXesOAvReOAsDz0drwzbWZan9Cbj8yWoYz/HCgHCnX3WqKKORSP5pvdrsqYua9DXtJwBeSWY4vbIM2kECAiyw1SrOGudxlyWBlW1f1jhGR2DsBlwoieeAvUVoaNwO7pYirwxR4nFPdLDRCQ4hLK/GFiuyr+lGoc1WUzVRNBYD3udcOZAbqq4JhWLf0Gvd5xP0rn1cJNhHMvrPH4Ki4a5KeeK6gQI7GT9/+PPQzTdpxXj6KwofktJtVNqm5sJmJ+wMIddnobFlNNLZ/F7OMONWajuVhh+vSOV34YLdhqzAR5XItkeJL6qyAJjNH5PjsnhT7nMqjgwriPz6xxYOLJWgtK5ZqcSCx4gWy9KJVVja8wJ7rRUg==
155 1e2454b60e5936f5e77498cab2648db469504487 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAlnqRBUhHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrOAQQP/28EzmTKFL/RxmNYePdzqrmcdJ2tn+s7OYmGdtneN2sESZ4MK0xb5Q8Mkm+41aXS52zzJdz9ynwdun8DG4wZ3sE5MOG+GgK6K0ecOv1XTKS3a2DkUM0fl5hlcXN7Zz7m7m5M6sy6vSxHP7kTyzQWt//z175ZLSQEu1a0nm/BLH+HP9e8DfnJ2Nfcnwp32kV0Nj1xTqjRV1Yo/oCnXfVvsxEJU+CDUGBiLc29ZcoWVbTw9c1VcxihJ6k0pK711KZ+bedSk7yc1OudiJF7idjB0bLQY6ESHNNNjK8uLppok0RsyuhvvDTAoTsl1rMKGmXMM0Ela3/5oxZ/5lUZB73vEJhzEi48ULvstpq82EO39KylkEfQxwMBPhnBIHQaGRkl7QPLXGOYUDMY6gT08Sm3e8/NqEJc/AgckXehpH3gSS2Ji2xg7/E8H5plGsswFidw//oYTTwm0j0halWpB521TD2wmjkjRHXzk1mj0EoFQUMfwHTIZU3E8flUBasD3mZ9XqZJPr66RV7QCrXayH75B/i0CyNqd/Hv5Tkf2TlC3EkEBZwZyAjqw7EyL1LuS936sc7fWuMFsH5k/fwjVwzIc1LmP+nmk2Dd9hIC66vec4w1QZeeAXuDKgOJjvQzj2n+uYRuObl4kKcxvoXqgQN0glGuB1IW7lPllGHR1kplhoub
155 1e2454b60e5936f5e77498cab2648db469504487 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAlnqRBUhHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrOAQQP/28EzmTKFL/RxmNYePdzqrmcdJ2tn+s7OYmGdtneN2sESZ4MK0xb5Q8Mkm+41aXS52zzJdz9ynwdun8DG4wZ3sE5MOG+GgK6K0ecOv1XTKS3a2DkUM0fl5hlcXN7Zz7m7m5M6sy6vSxHP7kTyzQWt//z175ZLSQEu1a0nm/BLH+HP9e8DfnJ2Nfcnwp32kV0Nj1xTqjRV1Yo/oCnXfVvsxEJU+CDUGBiLc29ZcoWVbTw9c1VcxihJ6k0pK711KZ+bedSk7yc1OudiJF7idjB0bLQY6ESHNNNjK8uLppok0RsyuhvvDTAoTsl1rMKGmXMM0Ela3/5oxZ/5lUZB73vEJhzEi48ULvstpq82EO39KylkEfQxwMBPhnBIHQaGRkl7QPLXGOYUDMY6gT08Sm3e8/NqEJc/AgckXehpH3gSS2Ji2xg7/E8H5plGsswFidw//oYTTwm0j0halWpB521TD2wmjkjRHXzk1mj0EoFQUMfwHTIZU3E8flUBasD3mZ9XqZJPr66RV7QCrXayH75B/i0CyNqd/Hv5Tkf2TlC3EkEBZwZyAjqw7EyL1LuS936sc7fWuMFsH5k/fwjVwzIc1LmP+nmk2Dd9hIC66vec4w1QZeeAXuDKgOJjvQzj2n+uYRuObl4kKcxvoXqgQN0glGuB1IW7lPllGHR1kplhoub
156 0ccb43d4cf01d013ae05917ec4f305509f851b2d 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAln6Qp8hHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrOJ8MP/2ufm/dbrFoE0F8hewhztG1vS4stus13lZ9lmM9kza8OKeOgY/MDH8GaV3O8GnRiCNUFsVD8JEIexE31c84H2Ie7VQO0GQSUHSyMCRrbED6IvfrWp6EZ6RDNPk4LHBfxCuPmuVHGRoGZtsLKJBPIxIHJKWMlEJlj9BZuUxZp/8kurQ6CXwblVbFzXdOaZQlioOBH27Bk3S0+gXfJ+wA2ed5XOQvT9jwjqC8y/1t8obaoPTpzyAvb9NArG+9RT9vfNN42aWISZNwg6RW5oLJISqoGrAes6EoG7dZfOC0UoKMVYXoNvZzJvVlMHyjugIoid+WI+V8y9bPrRTfbPCmocCzEzCOLEHQta8roNijB0bKcq8hmQPHcMyXlj1Srnqlco49jbhftgJoPTwzb10wQyU0VFvaZDPW/EQUT3M/k4j3sVESjANdyG1iu6EDV080LK1LgAdhjpKMBbf6mcgAe06/07XFMbKNrZMEislOcVFp98BSKjdioUNpy91rCeSmkEsASJ3yMArRnSkuVgpyrtJaGWl79VUcmOwKhUOA/8MXMz/Oqu7hvve/sgv71xlnim460nnLw6YHPyeeCsz6KSoUK3knFXAbTk/0jvU1ixUZbI122aMzX04UgPGeTukCOUw49XfaOdN+x0YXlkl4PsrnRQhIoixY2gosPpK4YO73G
156 0ccb43d4cf01d013ae05917ec4f305509f851b2d 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAln6Qp8hHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrOJ8MP/2ufm/dbrFoE0F8hewhztG1vS4stus13lZ9lmM9kza8OKeOgY/MDH8GaV3O8GnRiCNUFsVD8JEIexE31c84H2Ie7VQO0GQSUHSyMCRrbED6IvfrWp6EZ6RDNPk4LHBfxCuPmuVHGRoGZtsLKJBPIxIHJKWMlEJlj9BZuUxZp/8kurQ6CXwblVbFzXdOaZQlioOBH27Bk3S0+gXfJ+wA2ed5XOQvT9jwjqC8y/1t8obaoPTpzyAvb9NArG+9RT9vfNN42aWISZNwg6RW5oLJISqoGrAes6EoG7dZfOC0UoKMVYXoNvZzJvVlMHyjugIoid+WI+V8y9bPrRTfbPCmocCzEzCOLEHQta8roNijB0bKcq8hmQPHcMyXlj1Srnqlco49jbhftgJoPTwzb10wQyU0VFvaZDPW/EQUT3M/k4j3sVESjANdyG1iu6EDV080LK1LgAdhjpKMBbf6mcgAe06/07XFMbKNrZMEislOcVFp98BSKjdioUNpy91rCeSmkEsASJ3yMArRnSkuVgpyrtJaGWl79VUcmOwKhUOA/8MXMz/Oqu7hvve/sgv71xlnim460nnLw6YHPyeeCsz6KSoUK3knFXAbTk/0jvU1ixUZbI122aMzX04UgPGeTukCOUw49XfaOdN+x0YXlkl4PsrnRQhIoixY2gosPpK4YO73G
157 cabc840ffdee8a72f3689fb77dd74d04fdc2bc04 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAloB+EYQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91TfwEAC/pYW7TC8mQnqSJzde4yiv2+zgflfJzRlg5rbvlUQl1gSBla3sFADZcic0ebAc+8XUu8eIzyPX+oa4wjsHvL13silUCkUzTEEQLqfKPX1bhA4mwfSDb5A7v2VZ5q8qhRGnlhTsB79ML8uBOhR/Bigdm2ixURPEZ37pWljiMp9XWBMtxPxXn/m0n5CDViibX6QqQCR4k3orcsIGd72YXU6B8NGbBN8qlqMSd0pGvSF4vM2cgVhz7D71+zU4XL/HVP97aU9GsOwN9QWW029DOJu6KG6x51WWtfD/tzyNDu7+lZ5/IKyqHX4tyqCIXEGAsQ3XypeHgCq5hV3E6LJLRqPcLpUNDiQlCg6tNPRaOuMC878MRIlffKqMH+sWo8Z7zHrut+LfRh5/k1aCh4J+FIlE6Hgbvbvv2Z8JxDpUKl0Tr+i0oHNTapbGXIecq1ZFR4kcdchodUHXBC2E6HWR50/ek5YKPddzw8WPGsBtzXMfkhFr3WkvyP2Gbe2XJnkuYptTJA+u2CfhrvgmWsYlvt/myTaMZQEzZ+uir4Xoo5NvzqTL30SFqPrP4Nh0n9G6vpVJl/eZxoYK9jL3VC0vDhnZXitkvDpjXZuJqw/HgExXWKZFfiQ3X2HY48v1gvJiSegZ5rX+uGGJtW2/Mp5FidePEgnFIqZW/yhBfs2Hzj1D2A==
157 cabc840ffdee8a72f3689fb77dd74d04fdc2bc04 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAloB+EYQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91TfwEAC/pYW7TC8mQnqSJzde4yiv2+zgflfJzRlg5rbvlUQl1gSBla3sFADZcic0ebAc+8XUu8eIzyPX+oa4wjsHvL13silUCkUzTEEQLqfKPX1bhA4mwfSDb5A7v2VZ5q8qhRGnlhTsB79ML8uBOhR/Bigdm2ixURPEZ37pWljiMp9XWBMtxPxXn/m0n5CDViibX6QqQCR4k3orcsIGd72YXU6B8NGbBN8qlqMSd0pGvSF4vM2cgVhz7D71+zU4XL/HVP97aU9GsOwN9QWW029DOJu6KG6x51WWtfD/tzyNDu7+lZ5/IKyqHX4tyqCIXEGAsQ3XypeHgCq5hV3E6LJLRqPcLpUNDiQlCg6tNPRaOuMC878MRIlffKqMH+sWo8Z7zHrut+LfRh5/k1aCh4J+FIlE6Hgbvbvv2Z8JxDpUKl0Tr+i0oHNTapbGXIecq1ZFR4kcdchodUHXBC2E6HWR50/ek5YKPddzw8WPGsBtzXMfkhFr3WkvyP2Gbe2XJnkuYptTJA+u2CfhrvgmWsYlvt/myTaMZQEzZ+uir4Xoo5NvzqTL30SFqPrP4Nh0n9G6vpVJl/eZxoYK9jL3VC0vDhnZXitkvDpjXZuJqw/HgExXWKZFfiQ3X2HY48v1gvJiSegZ5rX+uGGJtW2/Mp5FidePEgnFIqZW/yhBfs2Hzj1D2A==
158 a92b9f8e11ba330614cdfd6af0e03b15c1ff3797 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAlohslshHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrO7P8P/1qGts96acEdB9BZbK/Eesalb1wUByLXZoP8j+1wWwqh/Kq/q7V4Qe0z1jw/92oZbmnLy2C8sDhWv/XKxACKv69oPrcqQix1E8M+07u88ZXqHJMSxkOmvA2Vimp9EG1qgje+qchgOVgvhEhysA96bRpEnc6V0RnBqI5UdfbKtlfBmX5mUE/qsoBZhly1FTmzV1bhYlGgNLyqtJQpcbA34wyPoywsp8DRBiHWrIzz5XNR+DJFTOe4Kqio1i5r8R4QSIM5vtTbj5pbsmtGcP2CsFC9S3xTSAU6AEJKxGpubPk3ckNj3P9zolvR7krU5Jt8LIgXSVaKLt9rPhmxCbPrLtORgXkUupJcrwzQl+oYz5bkl9kowFa959waIPYoCuuW402mOTDq/L3xwDH9AKK5rELPl3fNo+5OIDKAKRIu6zRSAzBtyGT6kkfb1NSghumP4scR7cgUmLaNibZBa8eJj92gwf+ucSGoB/dF/YHWNe0jY09LFK3nyCoftmyLzxcRk1JLGNngw8MCIuisHTskhxSm/qlX7qjunoZnA3yy9behhy/YaFt4YzYZbMTivt2gszX5ktToaDqfxWDYdIa79kp8G68rYPeybelTS74LwbK3blXPI3I1nddkW52znHYLvW6BYyi+QQ5jPZLkiOC+AF0q+c4gYmPaLVN/mpMZjjmB
158 a92b9f8e11ba330614cdfd6af0e03b15c1ff3797 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAlohslshHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrO7P8P/1qGts96acEdB9BZbK/Eesalb1wUByLXZoP8j+1wWwqh/Kq/q7V4Qe0z1jw/92oZbmnLy2C8sDhWv/XKxACKv69oPrcqQix1E8M+07u88ZXqHJMSxkOmvA2Vimp9EG1qgje+qchgOVgvhEhysA96bRpEnc6V0RnBqI5UdfbKtlfBmX5mUE/qsoBZhly1FTmzV1bhYlGgNLyqtJQpcbA34wyPoywsp8DRBiHWrIzz5XNR+DJFTOe4Kqio1i5r8R4QSIM5vtTbj5pbsmtGcP2CsFC9S3xTSAU6AEJKxGpubPk3ckNj3P9zolvR7krU5Jt8LIgXSVaKLt9rPhmxCbPrLtORgXkUupJcrwzQl+oYz5bkl9kowFa959waIPYoCuuW402mOTDq/L3xwDH9AKK5rELPl3fNo+5OIDKAKRIu6zRSAzBtyGT6kkfb1NSghumP4scR7cgUmLaNibZBa8eJj92gwf+ucSGoB/dF/YHWNe0jY09LFK3nyCoftmyLzxcRk1JLGNngw8MCIuisHTskhxSm/qlX7qjunoZnA3yy9behhy/YaFt4YzYZbMTivt2gszX5ktToaDqfxWDYdIa79kp8G68rYPeybelTS74LwbK3blXPI3I1nddkW52znHYLvW6BYyi+QQ5jPZLkiOC+AF0q+c4gYmPaLVN/mpMZjjmB
159 27b6df1b5adbdf647cf5c6675b40575e1b197c60 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlpmbwIQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91W4BD/4h+y7QH7FkNcueOBrmdci7w1apkPX7KuknKxf8+FmA1QDGWYATnqD6IcAk3+f4reO4n9qc0y2BGrIz/pyTSIHvJW+ORrbPCKVrXlfUgkUK3TumtRObt8B75BVBBNaJ93r1yOALpo/K8wSwRrBF+Yl6aCoFiibUEbfcfaOAHVqZXKC1ZPtLRwq5NHIw0wWB0qNoAXj+FJV1EHO7SEjj2lXqw/r0HriQMdObWLgAb6QVUq7oVMpAumUeuQtZ169qHdqYfF1OLdCnsVBcwYEz/cBLC43bvYiwFxSkbAFyl656caWiwA3PISFSzP9Co0zWU/Qf8f7dTdAdT/orzCfUq8YoXqryfRSxi+8L8/EMxankzdW73Rx5X+0539pSq+gDDtTOyNuW6+CZwa5D84b31rsd+jTx8zVm3SRHRKsoGF2EEMQkWmDbhIFjX5W1fE84Ul3umypv+lPSvCPlQpIqv2hZmcTR12sgjdBjU8z+Zcq22SHFybqiYNmWpkVUtiMvTlHMoJfi5PI6xF8D2dxV4ErG+NflqdjaXydgnbO6D3/A1FCASig0wL4jMxSeRqnRRqLihN3VaGG2QH6MLJ+Ty6YuoonKtopw9JNOZydr/XN7K5LcjX1T3+31qmnHZyBXRSejWl9XN93IDbQcnMBWHkz/cJLN0kKu4pvnV8UGUcyXfA==
159 27b6df1b5adbdf647cf5c6675b40575e1b197c60 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlpmbwIQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91W4BD/4h+y7QH7FkNcueOBrmdci7w1apkPX7KuknKxf8+FmA1QDGWYATnqD6IcAk3+f4reO4n9qc0y2BGrIz/pyTSIHvJW+ORrbPCKVrXlfUgkUK3TumtRObt8B75BVBBNaJ93r1yOALpo/K8wSwRrBF+Yl6aCoFiibUEbfcfaOAHVqZXKC1ZPtLRwq5NHIw0wWB0qNoAXj+FJV1EHO7SEjj2lXqw/r0HriQMdObWLgAb6QVUq7oVMpAumUeuQtZ169qHdqYfF1OLdCnsVBcwYEz/cBLC43bvYiwFxSkbAFyl656caWiwA3PISFSzP9Co0zWU/Qf8f7dTdAdT/orzCfUq8YoXqryfRSxi+8L8/EMxankzdW73Rx5X+0539pSq+gDDtTOyNuW6+CZwa5D84b31rsd+jTx8zVm3SRHRKsoGF2EEMQkWmDbhIFjX5W1fE84Ul3umypv+lPSvCPlQpIqv2hZmcTR12sgjdBjU8z+Zcq22SHFybqiYNmWpkVUtiMvTlHMoJfi5PI6xF8D2dxV4ErG+NflqdjaXydgnbO6D3/A1FCASig0wL4jMxSeRqnRRqLihN3VaGG2QH6MLJ+Ty6YuoonKtopw9JNOZydr/XN7K5LcjX1T3+31qmnHZyBXRSejWl9XN93IDbQcnMBWHkz/cJLN0kKu4pvnV8UGUcyXfA==
160 d334afc585e29577f271c5eda03378736a16ca6b 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlpzZuUQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91TiDEADDD6Tn04UjgrZ36nAqOcHaG1ZT2Cm1/sbTw+6duAhf3+uKWFqi2bgcdCBkdfRH7KfEU0GNsPpiC6mzWw3PDWmGhnLJAkR+9FTBU0edK01hkNW8RelDTL5J9IzIGwrP4KFfcUue6yrxU8GnSxnf5Vy/N5ZZzLV/P3hdBte5We9PD5KHPAwTzzcZ9Wiog700rFDDChyFq7hNQ3H0GpknF6+Ck5XmJ3DOqt1MFHk9V4Z/ASU59cQXKOeaMChlBpTb1gIIWjOE99v5aY06dc1WlwttuHtCZvZgtAduRAB6XYWyniS/7nXBv0MXD3EWbpH1pkOaWUxw217HpNP4g9Yo3u/i8UW+NkSJOeXtC1CFjWmUNj138IhS1pogaiPPnIs+H6eOJsmnGhN2KbOMjA5Dn9vSTi6s/98TarfUSiwxA4L7fJy5qowFETftuBO0fJpbB8+ZtpnjNp0MMKed27OUSv69i6BmLrP+eqk+MVO6PovvIySlWAP9/REM/I5/mFkqoI+ruT4a9osNGDZ4Jqb382b7EmpEMDdgb7+ezsybgDfizuaTs/LBae7h79o1m30DxZ/EZ5C+2LY8twbGSORvZN4ViMVhIhWBTlOE/iVBOj807Y2OaUURcuLfHRmaCcfF1uIzg0uNB/aM/WSE0+AXh2IX+mipoTS3eh/V2EKldBHcOQ==
160 d334afc585e29577f271c5eda03378736a16ca6b 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlpzZuUQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91TiDEADDD6Tn04UjgrZ36nAqOcHaG1ZT2Cm1/sbTw+6duAhf3+uKWFqi2bgcdCBkdfRH7KfEU0GNsPpiC6mzWw3PDWmGhnLJAkR+9FTBU0edK01hkNW8RelDTL5J9IzIGwrP4KFfcUue6yrxU8GnSxnf5Vy/N5ZZzLV/P3hdBte5We9PD5KHPAwTzzcZ9Wiog700rFDDChyFq7hNQ3H0GpknF6+Ck5XmJ3DOqt1MFHk9V4Z/ASU59cQXKOeaMChlBpTb1gIIWjOE99v5aY06dc1WlwttuHtCZvZgtAduRAB6XYWyniS/7nXBv0MXD3EWbpH1pkOaWUxw217HpNP4g9Yo3u/i8UW+NkSJOeXtC1CFjWmUNj138IhS1pogaiPPnIs+H6eOJsmnGhN2KbOMjA5Dn9vSTi6s/98TarfUSiwxA4L7fJy5qowFETftuBO0fJpbB8+ZtpnjNp0MMKed27OUSv69i6BmLrP+eqk+MVO6PovvIySlWAP9/REM/I5/mFkqoI+ruT4a9osNGDZ4Jqb382b7EmpEMDdgb7+ezsybgDfizuaTs/LBae7h79o1m30DxZ/EZ5C+2LY8twbGSORvZN4ViMVhIhWBTlOE/iVBOj807Y2OaUURcuLfHRmaCcfF1uIzg0uNB/aM/WSE0+AXh2IX+mipoTS3eh/V2EKldBHcOQ==
161 369aadf7a3264b03c8b09efce715bc41e6ab4a9b 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAlqe5w8hHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrO1lUQAK6+S26rE3AMt6667ClT+ubPl+nNMRkWJXa8EyPplBUGTPdMheViOe+28dCsveJxqUF7A4TMLMA/eIj4cRIwmVbBaivfQKnG5GMZ+9N6j6oqE/OAJujdHzzZ3+o9KJGtRgJP2tzdY/6qkXwL3WN6KULz7pSkrKZLOiNfj4k2bf3bXeB7d3N5erxJYlhddlPBlHXImRkWiPR/bdaAaYJq+EEWCbia6MWXlSAqEjIgQi+ytuh/9Z+QSsJCsECDRqEExZClqHGkCLYhST99NqqdYCGJzAFMgh+xWxZxI0LO08pJxYctHGoHm+vvRVMfmdbxEydEy01H6jX+1e7Yq44bovIiIOkaXCTSuEBol+R5aPKJhgvqgZ5IlcTLoIYQBE3MZMKZ89NWy3TvgcNkQiOPCCkKs1+DukXKqTt62zOTxfa6mIZDCXdGai6vZBJ5b0yeEd3HV96yHb9dFlS5w1cG7prIBRv5BkqEaFbRMGZGV31Ri7BuVu0O68Pfdq+R+4A1YLdJ0H5DySe2dGlwE2DMKhdtVu1bie4UWHK10TphmqhBk6B9Ew2+tASCU7iczAqRzyzMLBTHIfCYO2R+5Yuh0CApt47KV23OcLje9nORyE2yaDTbVUPiXzdOnbRaCQf7eW5/1y/LLjG6OwtuETTcHKh7ruko+u7rFL96a4DNlNdk
162 8bba684efde7f45add05f737952093bb2aa07155 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAlqe6dkhHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrOJmIQALUVCoWUFYYaRxGH4OpmIQ2o1JrMefvarFhaPY1r3+G87sjXgw15uobEQDtoybTUYbcdSxJQT1KE1FOm3wU0VyN6PY9c1PMEAVgJlve0eDiXNNlBsoYMXnpq1HidZknkjpXgUPdE/LElxpJJRlJQZlS29bkGmEDZQBoOvlcZoBRDSYcbM07wn7d+1gmJkcHViDBMAbSrudfO0OYzDC1BjtGyKm7Mes2WB1yFYw+ySa8hF/xPKEDvoZINOE5n3PBJiCvPuTw3PqsHvWgKOA1Obx9fATlxj7EHBLfKBTNfpUwPMRSH1cmA+qUS9mRDrdLvrThwalr6D3r2RJ2ntOipcZpKMmxARRV+VUAI1K6H0/Ws3XAxENqhF7RgRruJFVq8G8EcHJLZEoVHsR+VOnd/pzgkFKS+tIsYYRcMpL0DdMF8pV3xrEFahgRhaEZOh4jsG3Z+sGLVFFl7DdMqeGs6m/TwDrvfuYtGczfGRB0wqu8KOwhR1BjNJKcr4lk35GKwSXmI1vk6Z1gAm0e13995lqbCJwkuOKynQlHWVOR6hu3ypvAgV/zXLF5t8HHtL48sOJ8a33THuJT4whbXSIb9BQXu/NQnNhK8G3Kly5UN88vL4a3sZi/Y86h4R2fKOSib/txJ3ydLbMeS8LlJMqeF/hrBanVF0r15NZ2CdmL1Qxim
@@ -1,173 +1,175 b''
1 d40cc5aacc31ed673d9b5b24f98bee78c283062c 0.4f
1 d40cc5aacc31ed673d9b5b24f98bee78c283062c 0.4f
2 1c590d34bf61e2ea12c71738e5a746cd74586157 0.4e
2 1c590d34bf61e2ea12c71738e5a746cd74586157 0.4e
3 7eca4cfa8aad5fce9a04f7d8acadcd0452e2f34e 0.4d
3 7eca4cfa8aad5fce9a04f7d8acadcd0452e2f34e 0.4d
4 b4d0c3786ad3e47beacf8412157326a32b6d25a4 0.4c
4 b4d0c3786ad3e47beacf8412157326a32b6d25a4 0.4c
5 f40273b0ad7b3a6d3012fd37736d0611f41ecf54 0.5
5 f40273b0ad7b3a6d3012fd37736d0611f41ecf54 0.5
6 0a28dfe59f8fab54a5118c5be4f40da34a53cdb7 0.5b
6 0a28dfe59f8fab54a5118c5be4f40da34a53cdb7 0.5b
7 12e0fdbc57a0be78f0e817fd1d170a3615cd35da 0.6
7 12e0fdbc57a0be78f0e817fd1d170a3615cd35da 0.6
8 4ccf3de52989b14c3d84e1097f59e39a992e00bd 0.6b
8 4ccf3de52989b14c3d84e1097f59e39a992e00bd 0.6b
9 eac9c8efcd9bd8244e72fb6821f769f450457a32 0.6c
9 eac9c8efcd9bd8244e72fb6821f769f450457a32 0.6c
10 979c049974485125e1f9357f6bbe9c1b548a64c3 0.7
10 979c049974485125e1f9357f6bbe9c1b548a64c3 0.7
11 3a56574f329a368d645853e0f9e09472aee62349 0.8
11 3a56574f329a368d645853e0f9e09472aee62349 0.8
12 6a03cff2b0f5d30281e6addefe96b993582f2eac 0.8.1
12 6a03cff2b0f5d30281e6addefe96b993582f2eac 0.8.1
13 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0.9
13 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0.9
14 2be3001847cb18a23c403439d9e7d0ace30804e9 0.9.1
14 2be3001847cb18a23c403439d9e7d0ace30804e9 0.9.1
15 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0.9.2
15 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0.9.2
16 27230c29bfec36d5540fbe1c976810aefecfd1d2 0.9.3
16 27230c29bfec36d5540fbe1c976810aefecfd1d2 0.9.3
17 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0.9.4
17 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0.9.4
18 23889160905a1b09fffe1c07378e9fc1827606eb 0.9.5
18 23889160905a1b09fffe1c07378e9fc1827606eb 0.9.5
19 bae2e9c838e90a393bae3973a7850280413e091a 1.0
19 bae2e9c838e90a393bae3973a7850280413e091a 1.0
20 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 1.0.1
20 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 1.0.1
21 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 1.0.2
21 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 1.0.2
22 2a67430f92f15ea5159c26b09ec4839a0c549a26 1.1
22 2a67430f92f15ea5159c26b09ec4839a0c549a26 1.1
23 3773e510d433969e277b1863c317b674cbee2065 1.1.1
23 3773e510d433969e277b1863c317b674cbee2065 1.1.1
24 11a4eb81fb4f4742451591489e2797dc47903277 1.1.2
24 11a4eb81fb4f4742451591489e2797dc47903277 1.1.2
25 11efa41037e280d08cfb07c09ad485df30fb0ea8 1.2
25 11efa41037e280d08cfb07c09ad485df30fb0ea8 1.2
26 02981000012e3adf40c4849bd7b3d5618f9ce82d 1.2.1
26 02981000012e3adf40c4849bd7b3d5618f9ce82d 1.2.1
27 196d40e7c885fa6e95f89134809b3ec7bdbca34b 1.3
27 196d40e7c885fa6e95f89134809b3ec7bdbca34b 1.3
28 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 1.3.1
28 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 1.3.1
29 31ec469f9b556f11819937cf68ee53f2be927ebf 1.4
29 31ec469f9b556f11819937cf68ee53f2be927ebf 1.4
30 439d7ea6fe3aa4ab9ec274a68846779153789de9 1.4.1
30 439d7ea6fe3aa4ab9ec274a68846779153789de9 1.4.1
31 296a0b14a68621f6990c54fdba0083f6f20935bf 1.4.2
31 296a0b14a68621f6990c54fdba0083f6f20935bf 1.4.2
32 4aa619c4c2c09907034d9824ebb1dd0e878206eb 1.4.3
32 4aa619c4c2c09907034d9824ebb1dd0e878206eb 1.4.3
33 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 1.5
33 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 1.5
34 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 1.5.1
34 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 1.5.1
35 39f725929f0c48c5fb3b90c071fc3066012456ca 1.5.2
35 39f725929f0c48c5fb3b90c071fc3066012456ca 1.5.2
36 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 1.5.3
36 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 1.5.3
37 24fe2629c6fd0c74c90bd066e77387c2b02e8437 1.5.4
37 24fe2629c6fd0c74c90bd066e77387c2b02e8437 1.5.4
38 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 1.6
38 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 1.6
39 bf1774d95bde614af3956d92b20e2a0c68c5fec7 1.6.1
39 bf1774d95bde614af3956d92b20e2a0c68c5fec7 1.6.1
40 c00f03a4982e467fb6b6bd45908767db6df4771d 1.6.2
40 c00f03a4982e467fb6b6bd45908767db6df4771d 1.6.2
41 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 1.6.3
41 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 1.6.3
42 93d8bff78c96fe7e33237b257558ee97290048a4 1.6.4
42 93d8bff78c96fe7e33237b257558ee97290048a4 1.6.4
43 333421b9e0f96c7bc788e5667c146a58a9440a55 1.7
43 333421b9e0f96c7bc788e5667c146a58a9440a55 1.7
44 4438875ec01bd0fc32be92b0872eb6daeed4d44f 1.7.1
44 4438875ec01bd0fc32be92b0872eb6daeed4d44f 1.7.1
45 6aff4f144ad356311318b0011df0bb21f2c97429 1.7.2
45 6aff4f144ad356311318b0011df0bb21f2c97429 1.7.2
46 e3bf16703e2601de99e563cdb3a5d50b64e6d320 1.7.3
46 e3bf16703e2601de99e563cdb3a5d50b64e6d320 1.7.3
47 a6c855c32ea081da3c3b8ff628f1847ff271482f 1.7.4
47 a6c855c32ea081da3c3b8ff628f1847ff271482f 1.7.4
48 2b2155623ee2559caf288fd333f30475966c4525 1.7.5
48 2b2155623ee2559caf288fd333f30475966c4525 1.7.5
49 2616325766e3504c8ae7c84bd15ee610901fe91d 1.8
49 2616325766e3504c8ae7c84bd15ee610901fe91d 1.8
50 aa1f3be38ab127280761889d2dca906ca465b5f4 1.8.1
50 aa1f3be38ab127280761889d2dca906ca465b5f4 1.8.1
51 b032bec2c0a651ca0ddecb65714bfe6770f67d70 1.8.2
51 b032bec2c0a651ca0ddecb65714bfe6770f67d70 1.8.2
52 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 1.8.3
52 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 1.8.3
53 733af5d9f6b22387913e1d11350fb8cb7c1487dd 1.8.4
53 733af5d9f6b22387913e1d11350fb8cb7c1487dd 1.8.4
54 de9eb6b1da4fc522b1cab16d86ca166204c24f25 1.9
54 de9eb6b1da4fc522b1cab16d86ca166204c24f25 1.9
55 4a43e23b8c55b4566b8200bf69fe2158485a2634 1.9.1
55 4a43e23b8c55b4566b8200bf69fe2158485a2634 1.9.1
56 d629f1e89021103f1753addcef6b310e4435b184 1.9.2
56 d629f1e89021103f1753addcef6b310e4435b184 1.9.2
57 351a9292e430e35766c552066ed3e87c557b803b 1.9.3
57 351a9292e430e35766c552066ed3e87c557b803b 1.9.3
58 384082750f2c51dc917d85a7145748330fa6ef4d 2.0-rc
58 384082750f2c51dc917d85a7145748330fa6ef4d 2.0-rc
59 41453d55b481ddfcc1dacb445179649e24ca861d 2.0
59 41453d55b481ddfcc1dacb445179649e24ca861d 2.0
60 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 2.0.1
60 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 2.0.1
61 6344043924497cd06d781d9014c66802285072e4 2.0.2
61 6344043924497cd06d781d9014c66802285072e4 2.0.2
62 db33555eafeaf9df1e18950e29439eaa706d399b 2.1-rc
62 db33555eafeaf9df1e18950e29439eaa706d399b 2.1-rc
63 2aa5b51f310fb3befd26bed99c02267f5c12c734 2.1
63 2aa5b51f310fb3befd26bed99c02267f5c12c734 2.1
64 53e2cd303ecf8ca7c7eeebd785c34e5ed6b0f4a4 2.1.1
64 53e2cd303ecf8ca7c7eeebd785c34e5ed6b0f4a4 2.1.1
65 b9bd95e61b49c221c4cca24e6da7c946fc02f992 2.1.2
65 b9bd95e61b49c221c4cca24e6da7c946fc02f992 2.1.2
66 d9e2f09d5488c395ae9ddbb320ceacd24757e055 2.2-rc
66 d9e2f09d5488c395ae9ddbb320ceacd24757e055 2.2-rc
67 00182b3d087909e3c3ae44761efecdde8f319ef3 2.2
67 00182b3d087909e3c3ae44761efecdde8f319ef3 2.2
68 5983de86462c5a9f42a3ad0f5e90ce5b1d221d25 2.2.1
68 5983de86462c5a9f42a3ad0f5e90ce5b1d221d25 2.2.1
69 85a358df5bbbe404ca25730c9c459b34263441dc 2.2.2
69 85a358df5bbbe404ca25730c9c459b34263441dc 2.2.2
70 b013baa3898e117959984fc64c29d8c784d2f28b 2.2.3
70 b013baa3898e117959984fc64c29d8c784d2f28b 2.2.3
71 a06e2681dd1786e2354d84a5fa9c1c88dd4fa3e0 2.3-rc
71 a06e2681dd1786e2354d84a5fa9c1c88dd4fa3e0 2.3-rc
72 7f5094bb3f423fc799e471aac2aee81a7ce57a0b 2.3
72 7f5094bb3f423fc799e471aac2aee81a7ce57a0b 2.3
73 072209ae4ddb654eb2d5fd35bff358c738414432 2.3.1
73 072209ae4ddb654eb2d5fd35bff358c738414432 2.3.1
74 b3f0f9a39c4e1d0250048cd803ab03542d6f140a 2.3.2
74 b3f0f9a39c4e1d0250048cd803ab03542d6f140a 2.3.2
75 d118a4f4fd16d9b558ec3f3e87bfee772861d2b7 2.4-rc
75 d118a4f4fd16d9b558ec3f3e87bfee772861d2b7 2.4-rc
76 195ad823b5d58c68903a6153a25e3fb4ed25239d 2.4
76 195ad823b5d58c68903a6153a25e3fb4ed25239d 2.4
77 0c10cf8191469e7c3c8844922e17e71a176cb7cb 2.4.1
77 0c10cf8191469e7c3c8844922e17e71a176cb7cb 2.4.1
78 a4765077b65e6ae29ba42bab7834717b5072d5ba 2.4.2
78 a4765077b65e6ae29ba42bab7834717b5072d5ba 2.4.2
79 f5fbe15ca7449f2c9a3cf817c86d0ae68b307214 2.5-rc
79 f5fbe15ca7449f2c9a3cf817c86d0ae68b307214 2.5-rc
80 a6088c05e43a8aee0472ca3a4f6f8d7dd914ebbf 2.5
80 a6088c05e43a8aee0472ca3a4f6f8d7dd914ebbf 2.5
81 7511d4df752e61fe7ae4f3682e0a0008573b0402 2.5.1
81 7511d4df752e61fe7ae4f3682e0a0008573b0402 2.5.1
82 5b7175377babacce80a6c1e12366d8032a6d4340 2.5.2
82 5b7175377babacce80a6c1e12366d8032a6d4340 2.5.2
83 50c922c1b5145dab8baefefb0437d363b6a6c21c 2.5.3
83 50c922c1b5145dab8baefefb0437d363b6a6c21c 2.5.3
84 8a7bd2dccd44ed571afe7424cd7f95594f27c092 2.5.4
84 8a7bd2dccd44ed571afe7424cd7f95594f27c092 2.5.4
85 292cd385856d98bacb2c3086f8897bc660c2beea 2.6-rc
85 292cd385856d98bacb2c3086f8897bc660c2beea 2.6-rc
86 23f785b38af38d2fca6b8f3db56b8007a84cd73a 2.6
86 23f785b38af38d2fca6b8f3db56b8007a84cd73a 2.6
87 ddc7a6be20212d18f3e27d9d7e6f079a66d96f21 2.6.1
87 ddc7a6be20212d18f3e27d9d7e6f079a66d96f21 2.6.1
88 cceaf7af4c9e9e6fa2dbfdcfe9856c5da69c4ffd 2.6.2
88 cceaf7af4c9e9e6fa2dbfdcfe9856c5da69c4ffd 2.6.2
89 009794acc6e37a650f0fae37872e733382ac1c0c 2.6.3
89 009794acc6e37a650f0fae37872e733382ac1c0c 2.6.3
90 f0d7721d7322dcfb5af33599c2543f27335334bb 2.7-rc
90 f0d7721d7322dcfb5af33599c2543f27335334bb 2.7-rc
91 f37b5a17e6a0ee17afde2cdde5393dd74715fb58 2.7
91 f37b5a17e6a0ee17afde2cdde5393dd74715fb58 2.7
92 335a558f81dc73afeab4d7be63617392b130117f 2.7.1
92 335a558f81dc73afeab4d7be63617392b130117f 2.7.1
93 e7fa36d2ad3a7944a52dca126458d6f482db3524 2.7.2
93 e7fa36d2ad3a7944a52dca126458d6f482db3524 2.7.2
94 1596f2d8f2421314b1ddead8f7d0c91009358994 2.8-rc
94 1596f2d8f2421314b1ddead8f7d0c91009358994 2.8-rc
95 d825e4025e39d1c39db943cdc89818abd0a87c27 2.8
95 d825e4025e39d1c39db943cdc89818abd0a87c27 2.8
96 209e04a06467e2969c0cc6501335be0406d46ef0 2.8.1
96 209e04a06467e2969c0cc6501335be0406d46ef0 2.8.1
97 ca387377df7a3a67dbb90b6336b781cdadc3ef41 2.8.2
97 ca387377df7a3a67dbb90b6336b781cdadc3ef41 2.8.2
98 8862469e16f9236208581b20de5f96bd13cc039d 2.9-rc
98 8862469e16f9236208581b20de5f96bd13cc039d 2.9-rc
99 3cec5134e9c4bceab6a00c60f52a4f80677a78f2 2.9
99 3cec5134e9c4bceab6a00c60f52a4f80677a78f2 2.9
100 b96cb15ec9e04d8ac5ee08b34fcbbe4200588965 2.9.1
100 b96cb15ec9e04d8ac5ee08b34fcbbe4200588965 2.9.1
101 3f83fc5cfe715d292069ee8417c83804f6c6c1e4 2.9.2
101 3f83fc5cfe715d292069ee8417c83804f6c6c1e4 2.9.2
102 564f55b251224f16508dd1311452db7780dafe2b 3.0-rc
102 564f55b251224f16508dd1311452db7780dafe2b 3.0-rc
103 2195ac506c6ababe86985b932f4948837c0891b5 3.0
103 2195ac506c6ababe86985b932f4948837c0891b5 3.0
104 269c80ee5b3cb3684fa8edc61501b3506d02eb10 3.0.1
104 269c80ee5b3cb3684fa8edc61501b3506d02eb10 3.0.1
105 2d8cd3d0e83c7336c0cb45a9f88638363f993848 3.0.2
105 2d8cd3d0e83c7336c0cb45a9f88638363f993848 3.0.2
106 6c36dc6cd61a0e1b563f1d51e55bdf4dacf12162 3.1-rc
106 6c36dc6cd61a0e1b563f1d51e55bdf4dacf12162 3.1-rc
107 3178e49892020336491cdc6945885c4de26ffa8b 3.1
107 3178e49892020336491cdc6945885c4de26ffa8b 3.1
108 5dc91146f35369949ea56b40172308158b59063a 3.1.1
108 5dc91146f35369949ea56b40172308158b59063a 3.1.1
109 f768c888aaa68d12dd7f509dcc7f01c9584357d0 3.1.2
109 f768c888aaa68d12dd7f509dcc7f01c9584357d0 3.1.2
110 7f8d16af8cae246fa5a48e723d48d58b015aed94 3.2-rc
110 7f8d16af8cae246fa5a48e723d48d58b015aed94 3.2-rc
111 ced632394371a36953ce4d394f86278ae51a2aae 3.2
111 ced632394371a36953ce4d394f86278ae51a2aae 3.2
112 643c58303fb0ec020907af28b9e486be299ba043 3.2.1
112 643c58303fb0ec020907af28b9e486be299ba043 3.2.1
113 902554884335e5ca3661d63be9978eb4aec3f68a 3.2.2
113 902554884335e5ca3661d63be9978eb4aec3f68a 3.2.2
114 6dad422ecc5adb63d9fa649eeb8e05a5f9bc4900 3.2.3
114 6dad422ecc5adb63d9fa649eeb8e05a5f9bc4900 3.2.3
115 1265a3a71d75396f5d4cf6935ae7d9ba5407a547 3.2.4
115 1265a3a71d75396f5d4cf6935ae7d9ba5407a547 3.2.4
116 db8e3f7948b1fdeb9ad12d448fc3525759908b9f 3.3-rc
116 db8e3f7948b1fdeb9ad12d448fc3525759908b9f 3.3-rc
117 fbdd5195528fae4f41feebc1838215c110b25d6a 3.3
117 fbdd5195528fae4f41feebc1838215c110b25d6a 3.3
118 5b4ed033390bf6e2879c8f5c28c84e1ee3b87231 3.3.1
118 5b4ed033390bf6e2879c8f5c28c84e1ee3b87231 3.3.1
119 07a92bbd02e5e3a625e0820389b47786b02b2cea 3.3.2
119 07a92bbd02e5e3a625e0820389b47786b02b2cea 3.3.2
120 2e2e9a0750f91a6fe0ad88e4de34f8efefdcab08 3.3.3
120 2e2e9a0750f91a6fe0ad88e4de34f8efefdcab08 3.3.3
121 e89f909edffad558b56f4affa8239e4832f88de0 3.4-rc
121 e89f909edffad558b56f4affa8239e4832f88de0 3.4-rc
122 8cc6036bca532e06681c5a8fa37efaa812de67b5 3.4
122 8cc6036bca532e06681c5a8fa37efaa812de67b5 3.4
123 ed18f4acf435a2824c6f49fba40f42b9df5da7ad 3.4.1
123 ed18f4acf435a2824c6f49fba40f42b9df5da7ad 3.4.1
124 540cd0ddac49c1125b2e013aa2ff18ecbd4dd954 3.4.2
124 540cd0ddac49c1125b2e013aa2ff18ecbd4dd954 3.4.2
125 96a38d44ba093bd1d1ecfd34119e94056030278b 3.5-rc
125 96a38d44ba093bd1d1ecfd34119e94056030278b 3.5-rc
126 21aa1c313b05b1a85f8ffa1120d51579ddf6bf24 3.5
126 21aa1c313b05b1a85f8ffa1120d51579ddf6bf24 3.5
127 1a45e49a6bed023deb229102a8903234d18054d3 3.5.1
127 1a45e49a6bed023deb229102a8903234d18054d3 3.5.1
128 9a466b9f9792e3ad7ae3fc6c43c3ff2e136b718d 3.5.2
128 9a466b9f9792e3ad7ae3fc6c43c3ff2e136b718d 3.5.2
129 b66e3ca0b90c3095ea28dfd39aa24247bebf5c20 3.6-rc
129 b66e3ca0b90c3095ea28dfd39aa24247bebf5c20 3.6-rc
130 47dd34f2e7272be9e3b2a5a83cd0d20be44293f4 3.6
130 47dd34f2e7272be9e3b2a5a83cd0d20be44293f4 3.6
131 1aa5083cbebbe7575c88f3402ab377539b484897 3.6.1
131 1aa5083cbebbe7575c88f3402ab377539b484897 3.6.1
132 2d437a0f3355834a9485bbbeb30a52a052c98f19 3.6.2
132 2d437a0f3355834a9485bbbeb30a52a052c98f19 3.6.2
133 ea389970c08449440587712117f178d33bab3f1e 3.6.3
133 ea389970c08449440587712117f178d33bab3f1e 3.6.3
134 158bdc8965720ca4061f8f8d806563cfc7cdb62e 3.7-rc
134 158bdc8965720ca4061f8f8d806563cfc7cdb62e 3.7-rc
135 2408645de650d8a29a6ce9e7dce601d8dd0d1474 3.7
135 2408645de650d8a29a6ce9e7dce601d8dd0d1474 3.7
136 b698abf971e7377d9b7ec7fc8c52df45255b0329 3.7.1
136 b698abf971e7377d9b7ec7fc8c52df45255b0329 3.7.1
137 d493d64757eb45ada99fcb3693e479a51b7782da 3.7.2
137 d493d64757eb45ada99fcb3693e479a51b7782da 3.7.2
138 ae279d4a19e9683214cbd1fe8298cf0b50571432 3.7.3
138 ae279d4a19e9683214cbd1fe8298cf0b50571432 3.7.3
139 740156eedf2c450aee58b1a90b0e826f47c5da64 3.8-rc
139 740156eedf2c450aee58b1a90b0e826f47c5da64 3.8-rc
140 f85de28eae32e7d3064b1a1321309071bbaaa069 3.8
140 f85de28eae32e7d3064b1a1321309071bbaaa069 3.8
141 a56296f55a5e1038ea5016dace2076b693c28a56 3.8.1
141 a56296f55a5e1038ea5016dace2076b693c28a56 3.8.1
142 aaabed77791a75968a12b8c43ad263631a23ee81 3.8.2
142 aaabed77791a75968a12b8c43ad263631a23ee81 3.8.2
143 a9764ab80e11bcf6a37255db7dd079011f767c6c 3.8.3
143 a9764ab80e11bcf6a37255db7dd079011f767c6c 3.8.3
144 26a5d605b8683a292bb89aea11f37a81b06ac016 3.8.4
144 26a5d605b8683a292bb89aea11f37a81b06ac016 3.8.4
145 519bb4f9d3a47a6e83c2b414d58811ed38f503c2 3.9-rc
145 519bb4f9d3a47a6e83c2b414d58811ed38f503c2 3.9-rc
146 299546f84e68dbb9bd026f0f3a974ce4bdb93686 3.9
146 299546f84e68dbb9bd026f0f3a974ce4bdb93686 3.9
147 ccd436f7db6d5d7b9af89715179b911d031d44f1 3.9.1
147 ccd436f7db6d5d7b9af89715179b911d031d44f1 3.9.1
148 149433e68974eb5c63ccb03f794d8b57339a80c4 3.9.2
148 149433e68974eb5c63ccb03f794d8b57339a80c4 3.9.2
149 438173c415874f6ac653efc1099dec9c9150e90f 4.0-rc
149 438173c415874f6ac653efc1099dec9c9150e90f 4.0-rc
150 eab27446995210c334c3d06f1a659e3b9b5da769 4.0
150 eab27446995210c334c3d06f1a659e3b9b5da769 4.0
151 b3b1ae98f6a0e14c1e1ba806a6c18e193b6dae5c 4.0.1
151 b3b1ae98f6a0e14c1e1ba806a6c18e193b6dae5c 4.0.1
152 e69874dc1f4e142746ff3df91e678a09c6fc208c 4.0.2
152 e69874dc1f4e142746ff3df91e678a09c6fc208c 4.0.2
153 a1dd2c0c479e0550040542e392e87bc91262517e 4.1-rc
153 a1dd2c0c479e0550040542e392e87bc91262517e 4.1-rc
154 e1526da1e6d84e03146151c9b6e6950fe9a83d7d 4.1
154 e1526da1e6d84e03146151c9b6e6950fe9a83d7d 4.1
155 25703b624d27e3917d978af56d6ad59331e0464a 4.1.1
155 25703b624d27e3917d978af56d6ad59331e0464a 4.1.1
156 ed5b25874d998ababb181a939dd37a16ea644435 4.1.2
156 ed5b25874d998ababb181a939dd37a16ea644435 4.1.2
157 77eaf9539499a1b8be259ffe7ada787d07857f80 4.1.3
157 77eaf9539499a1b8be259ffe7ada787d07857f80 4.1.3
158 616e788321cc4ae9975b7f0c54c849f36d82182b 4.2-rc
158 616e788321cc4ae9975b7f0c54c849f36d82182b 4.2-rc
159 bb96d4a497432722623ae60d9bc734a1e360179e 4.2
159 bb96d4a497432722623ae60d9bc734a1e360179e 4.2
160 c850f0ed54c1d42f9aa079ad528f8127e5775217 4.2.1
160 c850f0ed54c1d42f9aa079ad528f8127e5775217 4.2.1
161 26c49ed51a698ec016d2b4c6b44ca3c3f73cc788 4.2.2
161 26c49ed51a698ec016d2b4c6b44ca3c3f73cc788 4.2.2
162 857876ebaed4e315f63157bd157d6ce553c7ab73 4.3-rc
162 857876ebaed4e315f63157bd157d6ce553c7ab73 4.3-rc
163 5544af8622863796a0027566f6b646e10d522c4c 4.3
163 5544af8622863796a0027566f6b646e10d522c4c 4.3
164 943c91326b23954e6e1c6960d0239511f9530258 4.2.3
164 943c91326b23954e6e1c6960d0239511f9530258 4.2.3
165 3fee7f7d2da04226914c2258cc2884dc27384fd7 4.3.1
165 3fee7f7d2da04226914c2258cc2884dc27384fd7 4.3.1
166 920977f72c7b70acfdaf56ab35360584d7845827 4.3.2
166 920977f72c7b70acfdaf56ab35360584d7845827 4.3.2
167 2f427b57bf9019c6dc3750baa539dc22c1be50f6 4.3.3
167 2f427b57bf9019c6dc3750baa539dc22c1be50f6 4.3.3
168 1e2454b60e5936f5e77498cab2648db469504487 4.4-rc
168 1e2454b60e5936f5e77498cab2648db469504487 4.4-rc
169 0ccb43d4cf01d013ae05917ec4f305509f851b2d 4.4
169 0ccb43d4cf01d013ae05917ec4f305509f851b2d 4.4
170 cabc840ffdee8a72f3689fb77dd74d04fdc2bc04 4.4.1
170 cabc840ffdee8a72f3689fb77dd74d04fdc2bc04 4.4.1
171 a92b9f8e11ba330614cdfd6af0e03b15c1ff3797 4.4.2
171 a92b9f8e11ba330614cdfd6af0e03b15c1ff3797 4.4.2
172 27b6df1b5adbdf647cf5c6675b40575e1b197c60 4.5-rc
172 27b6df1b5adbdf647cf5c6675b40575e1b197c60 4.5-rc
173 d334afc585e29577f271c5eda03378736a16ca6b 4.5
173 d334afc585e29577f271c5eda03378736a16ca6b 4.5
174 369aadf7a3264b03c8b09efce715bc41e6ab4a9b 4.5.1
175 8bba684efde7f45add05f737952093bb2aa07155 4.5.2
@@ -1,205 +1,205 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles extension: uisetup'''
9 '''setup for largefiles extension: uisetup'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13
13
14 from mercurial.hgweb import (
14 from mercurial.hgweb import (
15 hgweb_mod,
16 webcommands,
15 webcommands,
17 )
16 )
18
17
19 from mercurial import (
18 from mercurial import (
20 archival,
19 archival,
21 cmdutil,
20 cmdutil,
22 commands,
21 commands,
23 copies,
22 copies,
24 exchange,
23 exchange,
25 extensions,
24 extensions,
26 filemerge,
25 filemerge,
27 hg,
26 hg,
28 httppeer,
27 httppeer,
29 merge,
28 merge,
30 scmutil,
29 scmutil,
31 sshpeer,
30 sshpeer,
32 subrepo,
31 subrepo,
33 upgrade,
32 upgrade,
34 url,
33 url,
35 wireproto,
34 wireproto,
36 )
35 )
37
36
38 from . import (
37 from . import (
39 overrides,
38 overrides,
40 proto,
39 proto,
41 )
40 )
42
41
43 def uisetup(ui):
42 def uisetup(ui):
44 # Disable auto-status for some commands which assume that all
43 # Disable auto-status for some commands which assume that all
45 # files in the result are under Mercurial's control
44 # files in the result are under Mercurial's control
46
45
47 entry = extensions.wrapcommand(commands.table, 'add',
46 entry = extensions.wrapcommand(commands.table, 'add',
48 overrides.overrideadd)
47 overrides.overrideadd)
49 addopt = [('', 'large', None, _('add as largefile')),
48 addopt = [('', 'large', None, _('add as largefile')),
50 ('', 'normal', None, _('add as normal file')),
49 ('', 'normal', None, _('add as normal file')),
51 ('', 'lfsize', '', _('add all files above this size '
50 ('', 'lfsize', '', _('add all files above this size '
52 '(in megabytes) as largefiles '
51 '(in megabytes) as largefiles '
53 '(default: 10)'))]
52 '(default: 10)'))]
54 entry[1].extend(addopt)
53 entry[1].extend(addopt)
55
54
56 # The scmutil function is called both by the (trivial) addremove command,
55 # The scmutil function is called both by the (trivial) addremove command,
57 # and in the process of handling commit -A (issue3542)
56 # and in the process of handling commit -A (issue3542)
58 extensions.wrapfunction(scmutil, 'addremove', overrides.scmutiladdremove)
57 extensions.wrapfunction(scmutil, 'addremove', overrides.scmutiladdremove)
59 extensions.wrapfunction(cmdutil, 'add', overrides.cmdutiladd)
58 extensions.wrapfunction(cmdutil, 'add', overrides.cmdutiladd)
60 extensions.wrapfunction(cmdutil, 'remove', overrides.cmdutilremove)
59 extensions.wrapfunction(cmdutil, 'remove', overrides.cmdutilremove)
61 extensions.wrapfunction(cmdutil, 'forget', overrides.cmdutilforget)
60 extensions.wrapfunction(cmdutil, 'forget', overrides.cmdutilforget)
62
61
63 extensions.wrapfunction(copies, 'pathcopies', overrides.copiespathcopies)
62 extensions.wrapfunction(copies, 'pathcopies', overrides.copiespathcopies)
64
63
65 extensions.wrapfunction(upgrade, 'preservedrequirements',
64 extensions.wrapfunction(upgrade, 'preservedrequirements',
66 overrides.upgraderequirements)
65 overrides.upgraderequirements)
67
66
68 extensions.wrapfunction(upgrade, 'supporteddestrequirements',
67 extensions.wrapfunction(upgrade, 'supporteddestrequirements',
69 overrides.upgraderequirements)
68 overrides.upgraderequirements)
70
69
71 # Subrepos call status function
70 # Subrepos call status function
72 entry = extensions.wrapcommand(commands.table, 'status',
71 entry = extensions.wrapcommand(commands.table, 'status',
73 overrides.overridestatus)
72 overrides.overridestatus)
74 extensions.wrapfunction(subrepo.hgsubrepo, 'status',
73 extensions.wrapfunction(subrepo.hgsubrepo, 'status',
75 overrides.overridestatusfn)
74 overrides.overridestatusfn)
76
75
77 entry = extensions.wrapcommand(commands.table, 'log',
76 entry = extensions.wrapcommand(commands.table, 'log',
78 overrides.overridelog)
77 overrides.overridelog)
79 entry = extensions.wrapcommand(commands.table, 'rollback',
78 entry = extensions.wrapcommand(commands.table, 'rollback',
80 overrides.overriderollback)
79 overrides.overriderollback)
81 entry = extensions.wrapcommand(commands.table, 'verify',
80 entry = extensions.wrapcommand(commands.table, 'verify',
82 overrides.overrideverify)
81 overrides.overrideverify)
83
82
84 verifyopt = [('', 'large', None,
83 verifyopt = [('', 'large', None,
85 _('verify that all largefiles in current revision exists')),
84 _('verify that all largefiles in current revision exists')),
86 ('', 'lfa', None,
85 ('', 'lfa', None,
87 _('verify largefiles in all revisions, not just current')),
86 _('verify largefiles in all revisions, not just current')),
88 ('', 'lfc', None,
87 ('', 'lfc', None,
89 _('verify local largefile contents, not just existence'))]
88 _('verify local largefile contents, not just existence'))]
90 entry[1].extend(verifyopt)
89 entry[1].extend(verifyopt)
91
90
92 entry = extensions.wrapcommand(commands.table, 'debugstate',
91 entry = extensions.wrapcommand(commands.table, 'debugstate',
93 overrides.overridedebugstate)
92 overrides.overridedebugstate)
94 debugstateopt = [('', 'large', None, _('display largefiles dirstate'))]
93 debugstateopt = [('', 'large', None, _('display largefiles dirstate'))]
95 entry[1].extend(debugstateopt)
94 entry[1].extend(debugstateopt)
96
95
97 outgoing = lambda orgfunc, *arg, **kwargs: orgfunc(*arg, **kwargs)
96 outgoing = lambda orgfunc, *arg, **kwargs: orgfunc(*arg, **kwargs)
98 entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
97 entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
99 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
98 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
100 entry[1].extend(outgoingopt)
99 entry[1].extend(outgoingopt)
101 cmdutil.outgoinghooks.add('largefiles', overrides.outgoinghook)
100 cmdutil.outgoinghooks.add('largefiles', overrides.outgoinghook)
102 entry = extensions.wrapcommand(commands.table, 'summary',
101 entry = extensions.wrapcommand(commands.table, 'summary',
103 overrides.overridesummary)
102 overrides.overridesummary)
104 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
103 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
105 entry[1].extend(summaryopt)
104 entry[1].extend(summaryopt)
106 cmdutil.summaryremotehooks.add('largefiles', overrides.summaryremotehook)
105 cmdutil.summaryremotehooks.add('largefiles', overrides.summaryremotehook)
107
106
108 entry = extensions.wrapcommand(commands.table, 'pull',
107 entry = extensions.wrapcommand(commands.table, 'pull',
109 overrides.overridepull)
108 overrides.overridepull)
110 pullopt = [('', 'all-largefiles', None,
109 pullopt = [('', 'all-largefiles', None,
111 _('download all pulled versions of largefiles (DEPRECATED)')),
110 _('download all pulled versions of largefiles (DEPRECATED)')),
112 ('', 'lfrev', [],
111 ('', 'lfrev', [],
113 _('download largefiles for these revisions'), _('REV'))]
112 _('download largefiles for these revisions'), _('REV'))]
114 entry[1].extend(pullopt)
113 entry[1].extend(pullopt)
115
114
116 entry = extensions.wrapcommand(commands.table, 'push',
115 entry = extensions.wrapcommand(commands.table, 'push',
117 overrides.overridepush)
116 overrides.overridepush)
118 pushopt = [('', 'lfrev', [],
117 pushopt = [('', 'lfrev', [],
119 _('upload largefiles for these revisions'), _('REV'))]
118 _('upload largefiles for these revisions'), _('REV'))]
120 entry[1].extend(pushopt)
119 entry[1].extend(pushopt)
121 extensions.wrapfunction(exchange, 'pushoperation',
120 extensions.wrapfunction(exchange, 'pushoperation',
122 overrides.exchangepushoperation)
121 overrides.exchangepushoperation)
123
122
124 entry = extensions.wrapcommand(commands.table, 'clone',
123 entry = extensions.wrapcommand(commands.table, 'clone',
125 overrides.overrideclone)
124 overrides.overrideclone)
126 cloneopt = [('', 'all-largefiles', None,
125 cloneopt = [('', 'all-largefiles', None,
127 _('download all versions of all largefiles'))]
126 _('download all versions of all largefiles'))]
128 entry[1].extend(cloneopt)
127 entry[1].extend(cloneopt)
129 extensions.wrapfunction(hg, 'clone', overrides.hgclone)
128 extensions.wrapfunction(hg, 'clone', overrides.hgclone)
130 extensions.wrapfunction(hg, 'postshare', overrides.hgpostshare)
129 extensions.wrapfunction(hg, 'postshare', overrides.hgpostshare)
131
130
132 entry = extensions.wrapcommand(commands.table, 'cat',
131 entry = extensions.wrapcommand(commands.table, 'cat',
133 overrides.overridecat)
132 overrides.overridecat)
134 extensions.wrapfunction(merge, '_checkunknownfile',
133 extensions.wrapfunction(merge, '_checkunknownfile',
135 overrides.overridecheckunknownfile)
134 overrides.overridecheckunknownfile)
136 extensions.wrapfunction(merge, 'calculateupdates',
135 extensions.wrapfunction(merge, 'calculateupdates',
137 overrides.overridecalculateupdates)
136 overrides.overridecalculateupdates)
138 extensions.wrapfunction(merge, 'recordupdates',
137 extensions.wrapfunction(merge, 'recordupdates',
139 overrides.mergerecordupdates)
138 overrides.mergerecordupdates)
140 extensions.wrapfunction(merge, 'update', overrides.mergeupdate)
139 extensions.wrapfunction(merge, 'update', overrides.mergeupdate)
141 extensions.wrapfunction(filemerge, '_filemerge',
140 extensions.wrapfunction(filemerge, '_filemerge',
142 overrides.overridefilemerge)
141 overrides.overridefilemerge)
143 extensions.wrapfunction(cmdutil, 'copy', overrides.overridecopy)
142 extensions.wrapfunction(cmdutil, 'copy', overrides.overridecopy)
144
143
145 # Summary calls dirty on the subrepos
144 # Summary calls dirty on the subrepos
146 extensions.wrapfunction(subrepo.hgsubrepo, 'dirty', overrides.overridedirty)
145 extensions.wrapfunction(subrepo.hgsubrepo, 'dirty', overrides.overridedirty)
147
146
148 extensions.wrapfunction(cmdutil, 'revert', overrides.overriderevert)
147 extensions.wrapfunction(cmdutil, 'revert', overrides.overriderevert)
149
148
150 extensions.wrapcommand(commands.table, 'archive',
149 extensions.wrapcommand(commands.table, 'archive',
151 overrides.overridearchivecmd)
150 overrides.overridearchivecmd)
152 extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
151 extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
153 extensions.wrapfunction(subrepo.hgsubrepo, 'archive',
152 extensions.wrapfunction(subrepo.hgsubrepo, 'archive',
154 overrides.hgsubrepoarchive)
153 overrides.hgsubrepoarchive)
155 extensions.wrapfunction(webcommands, 'archive', overrides.hgwebarchive)
154 extensions.wrapfunction(webcommands, 'archive', overrides.hgwebarchive)
156 extensions.wrapfunction(cmdutil, 'bailifchanged',
155 extensions.wrapfunction(cmdutil, 'bailifchanged',
157 overrides.overridebailifchanged)
156 overrides.overridebailifchanged)
158
157
159 extensions.wrapfunction(cmdutil, 'postcommitstatus',
158 extensions.wrapfunction(cmdutil, 'postcommitstatus',
160 overrides.postcommitstatus)
159 overrides.postcommitstatus)
161 extensions.wrapfunction(scmutil, 'marktouched',
160 extensions.wrapfunction(scmutil, 'marktouched',
162 overrides.scmutilmarktouched)
161 overrides.scmutilmarktouched)
163
162
164 extensions.wrapfunction(url, 'open',
163 extensions.wrapfunction(url, 'open',
165 overrides.openlargefile)
164 overrides.openlargefile)
166
165
167 # create the new wireproto commands ...
166 # create the new wireproto commands ...
168 wireproto.wireprotocommand('putlfile', 'sha')(proto.putlfile)
167 wireproto.wireprotocommand('putlfile', 'sha')(proto.putlfile)
169 wireproto.wireprotocommand('getlfile', 'sha')(proto.getlfile)
168 wireproto.wireprotocommand('getlfile', 'sha')(proto.getlfile)
170 wireproto.wireprotocommand('statlfile', 'sha')(proto.statlfile)
169 wireproto.wireprotocommand('statlfile', 'sha')(proto.statlfile)
171 wireproto.wireprotocommand('lheads', '')(wireproto.heads)
170 wireproto.wireprotocommand('lheads', '')(wireproto.heads)
172
171
173 # ... and wrap some existing ones
172 # ... and wrap some existing ones
174 wireproto.commands['heads'].func = proto.heads
173 wireproto.commands['heads'].func = proto.heads
175
174
176 # make putlfile behave the same as push and {get,stat}lfile behave
175 # make putlfile behave the same as push and {get,stat}lfile behave
177 # the same as pull w.r.t. permissions checks
176 # the same as pull w.r.t. permissions checks
178 hgweb_mod.perms['putlfile'] = 'push'
177 wireproto.permissions['putlfile'] = 'push'
179 hgweb_mod.perms['getlfile'] = 'pull'
178 wireproto.permissions['getlfile'] = 'pull'
180 hgweb_mod.perms['statlfile'] = 'pull'
179 wireproto.permissions['statlfile'] = 'pull'
180 wireproto.permissions['lheads'] = 'pull'
181
181
182 extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath)
182 extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath)
183
183
184 extensions.wrapfunction(wireproto, '_capabilities', proto._capabilities)
184 extensions.wrapfunction(wireproto, '_capabilities', proto._capabilities)
185
185
186 # can't do this in reposetup because it needs to have happened before
186 # can't do this in reposetup because it needs to have happened before
187 # wirerepo.__init__ is called
187 # wirerepo.__init__ is called
188 proto.ssholdcallstream = sshpeer.sshv1peer._callstream
188 proto.ssholdcallstream = sshpeer.sshv1peer._callstream
189 proto.httpoldcallstream = httppeer.httppeer._callstream
189 proto.httpoldcallstream = httppeer.httppeer._callstream
190 sshpeer.sshv1peer._callstream = proto.sshrepocallstream
190 sshpeer.sshv1peer._callstream = proto.sshrepocallstream
191 httppeer.httppeer._callstream = proto.httprepocallstream
191 httppeer.httppeer._callstream = proto.httprepocallstream
192
192
193 # override some extensions' stuff as well
193 # override some extensions' stuff as well
194 for name, module in extensions.extensions():
194 for name, module in extensions.extensions():
195 if name == 'purge':
195 if name == 'purge':
196 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
196 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
197 overrides.overridepurge)
197 overrides.overridepurge)
198 if name == 'rebase':
198 if name == 'rebase':
199 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
199 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
200 overrides.overriderebase)
200 overrides.overriderebase)
201 extensions.wrapfunction(module, 'rebase',
201 extensions.wrapfunction(module, 'rebase',
202 overrides.overriderebase)
202 overrides.overriderebase)
203 if name == 'transplant':
203 if name == 'transplant':
204 extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
204 extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
205 overrides.overridetransplant)
205 overrides.overridetransplant)
@@ -1,1005 +1,1010 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 dagutil,
23 dagutil,
24 error,
24 error,
25 mdiff,
25 mdiff,
26 phases,
26 phases,
27 pycompat,
27 pycompat,
28 util,
28 util,
29 )
29 )
30
30
31 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
31 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
32 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
32 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
33 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
33 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
34
34
35 # When narrowing is finalized and no longer subject to format changes,
35 # When narrowing is finalized and no longer subject to format changes,
36 # we should move this to just "narrow" or similar.
36 # we should move this to just "narrow" or similar.
37 NARROW_REQUIREMENT = 'narrowhg-experimental'
37 NARROW_REQUIREMENT = 'narrowhg-experimental'
38
38
39 readexactly = util.readexactly
39 readexactly = util.readexactly
40
40
41 def getchunk(stream):
41 def getchunk(stream):
42 """return the next chunk from stream as a string"""
42 """return the next chunk from stream as a string"""
43 d = readexactly(stream, 4)
43 d = readexactly(stream, 4)
44 l = struct.unpack(">l", d)[0]
44 l = struct.unpack(">l", d)[0]
45 if l <= 4:
45 if l <= 4:
46 if l:
46 if l:
47 raise error.Abort(_("invalid chunk length %d") % l)
47 raise error.Abort(_("invalid chunk length %d") % l)
48 return ""
48 return ""
49 return readexactly(stream, l - 4)
49 return readexactly(stream, l - 4)
50
50
51 def chunkheader(length):
51 def chunkheader(length):
52 """return a changegroup chunk header (string)"""
52 """return a changegroup chunk header (string)"""
53 return struct.pack(">l", length + 4)
53 return struct.pack(">l", length + 4)
54
54
55 def closechunk():
55 def closechunk():
56 """return a changegroup chunk header (string) for a zero-length chunk"""
56 """return a changegroup chunk header (string) for a zero-length chunk"""
57 return struct.pack(">l", 0)
57 return struct.pack(">l", 0)
58
58
59 def writechunks(ui, chunks, filename, vfs=None):
59 def writechunks(ui, chunks, filename, vfs=None):
60 """Write chunks to a file and return its filename.
60 """Write chunks to a file and return its filename.
61
61
62 The stream is assumed to be a bundle file.
62 The stream is assumed to be a bundle file.
63 Existing files will not be overwritten.
63 Existing files will not be overwritten.
64 If no filename is specified, a temporary file is created.
64 If no filename is specified, a temporary file is created.
65 """
65 """
66 fh = None
66 fh = None
67 cleanup = None
67 cleanup = None
68 try:
68 try:
69 if filename:
69 if filename:
70 if vfs:
70 if vfs:
71 fh = vfs.open(filename, "wb")
71 fh = vfs.open(filename, "wb")
72 else:
72 else:
73 # Increase default buffer size because default is usually
73 # Increase default buffer size because default is usually
74 # small (4k is common on Linux).
74 # small (4k is common on Linux).
75 fh = open(filename, "wb", 131072)
75 fh = open(filename, "wb", 131072)
76 else:
76 else:
77 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
77 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
78 fh = os.fdopen(fd, pycompat.sysstr("wb"))
78 fh = os.fdopen(fd, pycompat.sysstr("wb"))
79 cleanup = filename
79 cleanup = filename
80 for c in chunks:
80 for c in chunks:
81 fh.write(c)
81 fh.write(c)
82 cleanup = None
82 cleanup = None
83 return filename
83 return filename
84 finally:
84 finally:
85 if fh is not None:
85 if fh is not None:
86 fh.close()
86 fh.close()
87 if cleanup is not None:
87 if cleanup is not None:
88 if filename and vfs:
88 if filename and vfs:
89 vfs.unlink(cleanup)
89 vfs.unlink(cleanup)
90 else:
90 else:
91 os.unlink(cleanup)
91 os.unlink(cleanup)
92
92
93 class cg1unpacker(object):
93 class cg1unpacker(object):
94 """Unpacker for cg1 changegroup streams.
94 """Unpacker for cg1 changegroup streams.
95
95
96 A changegroup unpacker handles the framing of the revision data in
96 A changegroup unpacker handles the framing of the revision data in
97 the wire format. Most consumers will want to use the apply()
97 the wire format. Most consumers will want to use the apply()
98 method to add the changes from the changegroup to a repository.
98 method to add the changes from the changegroup to a repository.
99
99
100 If you're forwarding a changegroup unmodified to another consumer,
100 If you're forwarding a changegroup unmodified to another consumer,
101 use getchunks(), which returns an iterator of changegroup
101 use getchunks(), which returns an iterator of changegroup
102 chunks. This is mostly useful for cases where you need to know the
102 chunks. This is mostly useful for cases where you need to know the
103 data stream has ended by observing the end of the changegroup.
103 data stream has ended by observing the end of the changegroup.
104
104
105 deltachunk() is useful only if you're applying delta data. Most
105 deltachunk() is useful only if you're applying delta data. Most
106 consumers should prefer apply() instead.
106 consumers should prefer apply() instead.
107
107
108 A few other public methods exist. Those are used only for
108 A few other public methods exist. Those are used only for
109 bundlerepo and some debug commands - their use is discouraged.
109 bundlerepo and some debug commands - their use is discouraged.
110 """
110 """
111 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
111 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
112 deltaheadersize = struct.calcsize(deltaheader)
112 deltaheadersize = struct.calcsize(deltaheader)
113 version = '01'
113 version = '01'
114 _grouplistcount = 1 # One list of files after the manifests
114 _grouplistcount = 1 # One list of files after the manifests
115
115
116 def __init__(self, fh, alg, extras=None):
116 def __init__(self, fh, alg, extras=None):
117 if alg is None:
117 if alg is None:
118 alg = 'UN'
118 alg = 'UN'
119 if alg not in util.compengines.supportedbundletypes:
119 if alg not in util.compengines.supportedbundletypes:
120 raise error.Abort(_('unknown stream compression type: %s')
120 raise error.Abort(_('unknown stream compression type: %s')
121 % alg)
121 % alg)
122 if alg == 'BZ':
122 if alg == 'BZ':
123 alg = '_truncatedBZ'
123 alg = '_truncatedBZ'
124
124
125 compengine = util.compengines.forbundletype(alg)
125 compengine = util.compengines.forbundletype(alg)
126 self._stream = compengine.decompressorreader(fh)
126 self._stream = compengine.decompressorreader(fh)
127 self._type = alg
127 self._type = alg
128 self.extras = extras or {}
128 self.extras = extras or {}
129 self.callback = None
129 self.callback = None
130
130
131 # These methods (compressed, read, seek, tell) all appear to only
131 # These methods (compressed, read, seek, tell) all appear to only
132 # be used by bundlerepo, but it's a little hard to tell.
132 # be used by bundlerepo, but it's a little hard to tell.
133 def compressed(self):
133 def compressed(self):
134 return self._type is not None and self._type != 'UN'
134 return self._type is not None and self._type != 'UN'
135 def read(self, l):
135 def read(self, l):
136 return self._stream.read(l)
136 return self._stream.read(l)
137 def seek(self, pos):
137 def seek(self, pos):
138 return self._stream.seek(pos)
138 return self._stream.seek(pos)
139 def tell(self):
139 def tell(self):
140 return self._stream.tell()
140 return self._stream.tell()
141 def close(self):
141 def close(self):
142 return self._stream.close()
142 return self._stream.close()
143
143
144 def _chunklength(self):
144 def _chunklength(self):
145 d = readexactly(self._stream, 4)
145 d = readexactly(self._stream, 4)
146 l = struct.unpack(">l", d)[0]
146 l = struct.unpack(">l", d)[0]
147 if l <= 4:
147 if l <= 4:
148 if l:
148 if l:
149 raise error.Abort(_("invalid chunk length %d") % l)
149 raise error.Abort(_("invalid chunk length %d") % l)
150 return 0
150 return 0
151 if self.callback:
151 if self.callback:
152 self.callback()
152 self.callback()
153 return l - 4
153 return l - 4
154
154
155 def changelogheader(self):
155 def changelogheader(self):
156 """v10 does not have a changelog header chunk"""
156 """v10 does not have a changelog header chunk"""
157 return {}
157 return {}
158
158
159 def manifestheader(self):
159 def manifestheader(self):
160 """v10 does not have a manifest header chunk"""
160 """v10 does not have a manifest header chunk"""
161 return {}
161 return {}
162
162
163 def filelogheader(self):
163 def filelogheader(self):
164 """return the header of the filelogs chunk, v10 only has the filename"""
164 """return the header of the filelogs chunk, v10 only has the filename"""
165 l = self._chunklength()
165 l = self._chunklength()
166 if not l:
166 if not l:
167 return {}
167 return {}
168 fname = readexactly(self._stream, l)
168 fname = readexactly(self._stream, l)
169 return {'filename': fname}
169 return {'filename': fname}
170
170
171 def _deltaheader(self, headertuple, prevnode):
171 def _deltaheader(self, headertuple, prevnode):
172 node, p1, p2, cs = headertuple
172 node, p1, p2, cs = headertuple
173 if prevnode is None:
173 if prevnode is None:
174 deltabase = p1
174 deltabase = p1
175 else:
175 else:
176 deltabase = prevnode
176 deltabase = prevnode
177 flags = 0
177 flags = 0
178 return node, p1, p2, deltabase, cs, flags
178 return node, p1, p2, deltabase, cs, flags
179
179
180 def deltachunk(self, prevnode):
180 def deltachunk(self, prevnode):
181 l = self._chunklength()
181 l = self._chunklength()
182 if not l:
182 if not l:
183 return {}
183 return {}
184 headerdata = readexactly(self._stream, self.deltaheadersize)
184 headerdata = readexactly(self._stream, self.deltaheadersize)
185 header = struct.unpack(self.deltaheader, headerdata)
185 header = struct.unpack(self.deltaheader, headerdata)
186 delta = readexactly(self._stream, l - self.deltaheadersize)
186 delta = readexactly(self._stream, l - self.deltaheadersize)
187 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
187 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
188 return (node, p1, p2, cs, deltabase, delta, flags)
188 return (node, p1, p2, cs, deltabase, delta, flags)
189
189
190 def getchunks(self):
190 def getchunks(self):
191 """returns all the chunks contains in the bundle
191 """returns all the chunks contains in the bundle
192
192
193 Used when you need to forward the binary stream to a file or another
193 Used when you need to forward the binary stream to a file or another
194 network API. To do so, it parse the changegroup data, otherwise it will
194 network API. To do so, it parse the changegroup data, otherwise it will
195 block in case of sshrepo because it don't know the end of the stream.
195 block in case of sshrepo because it don't know the end of the stream.
196 """
196 """
197 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
197 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
198 # and a list of filelogs. For changegroup 3, we expect 4 parts:
198 # and a list of filelogs. For changegroup 3, we expect 4 parts:
199 # changelog, manifestlog, a list of tree manifestlogs, and a list of
199 # changelog, manifestlog, a list of tree manifestlogs, and a list of
200 # filelogs.
200 # filelogs.
201 #
201 #
202 # Changelog and manifestlog parts are terminated with empty chunks. The
202 # Changelog and manifestlog parts are terminated with empty chunks. The
203 # tree and file parts are a list of entry sections. Each entry section
203 # tree and file parts are a list of entry sections. Each entry section
204 # is a series of chunks terminating in an empty chunk. The list of these
204 # is a series of chunks terminating in an empty chunk. The list of these
205 # entry sections is terminated in yet another empty chunk, so we know
205 # entry sections is terminated in yet another empty chunk, so we know
206 # we've reached the end of the tree/file list when we reach an empty
206 # we've reached the end of the tree/file list when we reach an empty
207 # chunk that was proceeded by no non-empty chunks.
207 # chunk that was proceeded by no non-empty chunks.
208
208
209 parts = 0
209 parts = 0
210 while parts < 2 + self._grouplistcount:
210 while parts < 2 + self._grouplistcount:
211 noentries = True
211 noentries = True
212 while True:
212 while True:
213 chunk = getchunk(self)
213 chunk = getchunk(self)
214 if not chunk:
214 if not chunk:
215 # The first two empty chunks represent the end of the
215 # The first two empty chunks represent the end of the
216 # changelog and the manifestlog portions. The remaining
216 # changelog and the manifestlog portions. The remaining
217 # empty chunks represent either A) the end of individual
217 # empty chunks represent either A) the end of individual
218 # tree or file entries in the file list, or B) the end of
218 # tree or file entries in the file list, or B) the end of
219 # the entire list. It's the end of the entire list if there
219 # the entire list. It's the end of the entire list if there
220 # were no entries (i.e. noentries is True).
220 # were no entries (i.e. noentries is True).
221 if parts < 2:
221 if parts < 2:
222 parts += 1
222 parts += 1
223 elif noentries:
223 elif noentries:
224 parts += 1
224 parts += 1
225 break
225 break
226 noentries = False
226 noentries = False
227 yield chunkheader(len(chunk))
227 yield chunkheader(len(chunk))
228 pos = 0
228 pos = 0
229 while pos < len(chunk):
229 while pos < len(chunk):
230 next = pos + 2**20
230 next = pos + 2**20
231 yield chunk[pos:next]
231 yield chunk[pos:next]
232 pos = next
232 pos = next
233 yield closechunk()
233 yield closechunk()
234
234
235 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
235 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
236 # We know that we'll never have more manifests than we had
236 # We know that we'll never have more manifests than we had
237 # changesets.
237 # changesets.
238 self.callback = prog(_('manifests'), numchanges)
238 self.callback = prog(_('manifests'), numchanges)
239 # no need to check for empty manifest group here:
239 # no need to check for empty manifest group here:
240 # if the result of the merge of 1 and 2 is the same in 3 and 4,
240 # if the result of the merge of 1 and 2 is the same in 3 and 4,
241 # no new manifest will be created and the manifest group will
241 # no new manifest will be created and the manifest group will
242 # be empty during the pull
242 # be empty during the pull
243 self.manifestheader()
243 self.manifestheader()
244 deltas = self.deltaiter()
244 deltas = self.deltaiter()
245 repo.manifestlog._revlog.addgroup(deltas, revmap, trp)
245 repo.manifestlog._revlog.addgroup(deltas, revmap, trp)
246 repo.ui.progress(_('manifests'), None)
246 repo.ui.progress(_('manifests'), None)
247 self.callback = None
247 self.callback = None
248
248
249 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
249 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
250 expectedtotal=None):
250 expectedtotal=None):
251 """Add the changegroup returned by source.read() to this repo.
251 """Add the changegroup returned by source.read() to this repo.
252 srctype is a string like 'push', 'pull', or 'unbundle'. url is
252 srctype is a string like 'push', 'pull', or 'unbundle'. url is
253 the URL of the repo where this changegroup is coming from.
253 the URL of the repo where this changegroup is coming from.
254
254
255 Return an integer summarizing the change to this repo:
255 Return an integer summarizing the change to this repo:
256 - nothing changed or no source: 0
256 - nothing changed or no source: 0
257 - more heads than before: 1+added heads (2..n)
257 - more heads than before: 1+added heads (2..n)
258 - fewer heads than before: -1-removed heads (-2..-n)
258 - fewer heads than before: -1-removed heads (-2..-n)
259 - number of heads stays the same: 1
259 - number of heads stays the same: 1
260 """
260 """
261 repo = repo.unfiltered()
261 repo = repo.unfiltered()
262 def csmap(x):
262 def csmap(x):
263 repo.ui.debug("add changeset %s\n" % short(x))
263 repo.ui.debug("add changeset %s\n" % short(x))
264 return len(cl)
264 return len(cl)
265
265
266 def revmap(x):
266 def revmap(x):
267 return cl.rev(x)
267 return cl.rev(x)
268
268
269 changesets = files = revisions = 0
269 changesets = files = revisions = 0
270
270
271 try:
271 try:
272 # The transaction may already carry source information. In this
272 # The transaction may already carry source information. In this
273 # case we use the top level data. We overwrite the argument
273 # case we use the top level data. We overwrite the argument
274 # because we need to use the top level value (if they exist)
274 # because we need to use the top level value (if they exist)
275 # in this function.
275 # in this function.
276 srctype = tr.hookargs.setdefault('source', srctype)
276 srctype = tr.hookargs.setdefault('source', srctype)
277 url = tr.hookargs.setdefault('url', url)
277 url = tr.hookargs.setdefault('url', url)
278 repo.hook('prechangegroup',
278 repo.hook('prechangegroup',
279 throw=True, **pycompat.strkwargs(tr.hookargs))
279 throw=True, **pycompat.strkwargs(tr.hookargs))
280
280
281 # write changelog data to temp files so concurrent readers
281 # write changelog data to temp files so concurrent readers
282 # will not see an inconsistent view
282 # will not see an inconsistent view
283 cl = repo.changelog
283 cl = repo.changelog
284 cl.delayupdate(tr)
284 cl.delayupdate(tr)
285 oldheads = set(cl.heads())
285 oldheads = set(cl.heads())
286
286
287 trp = weakref.proxy(tr)
287 trp = weakref.proxy(tr)
288 # pull off the changeset group
288 # pull off the changeset group
289 repo.ui.status(_("adding changesets\n"))
289 repo.ui.status(_("adding changesets\n"))
290 clstart = len(cl)
290 clstart = len(cl)
291 class prog(object):
291 class prog(object):
292 def __init__(self, step, total):
292 def __init__(self, step, total):
293 self._step = step
293 self._step = step
294 self._total = total
294 self._total = total
295 self._count = 1
295 self._count = 1
296 def __call__(self):
296 def __call__(self):
297 repo.ui.progress(self._step, self._count, unit=_('chunks'),
297 repo.ui.progress(self._step, self._count, unit=_('chunks'),
298 total=self._total)
298 total=self._total)
299 self._count += 1
299 self._count += 1
300 self.callback = prog(_('changesets'), expectedtotal)
300 self.callback = prog(_('changesets'), expectedtotal)
301
301
302 efiles = set()
302 efiles = set()
303 def onchangelog(cl, node):
303 def onchangelog(cl, node):
304 efiles.update(cl.readfiles(node))
304 efiles.update(cl.readfiles(node))
305
305
306 self.changelogheader()
306 self.changelogheader()
307 deltas = self.deltaiter()
307 deltas = self.deltaiter()
308 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
308 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
309 efiles = len(efiles)
309 efiles = len(efiles)
310
310
311 if not cgnodes:
311 if not cgnodes:
312 repo.ui.develwarn('applied empty changegroup',
312 repo.ui.develwarn('applied empty changegroup',
313 config='warn-empty-changegroup')
313 config='warn-empty-changegroup')
314 clend = len(cl)
314 clend = len(cl)
315 changesets = clend - clstart
315 changesets = clend - clstart
316 repo.ui.progress(_('changesets'), None)
316 repo.ui.progress(_('changesets'), None)
317 self.callback = None
317 self.callback = None
318
318
319 # pull off the manifest group
319 # pull off the manifest group
320 repo.ui.status(_("adding manifests\n"))
320 repo.ui.status(_("adding manifests\n"))
321 self._unpackmanifests(repo, revmap, trp, prog, changesets)
321 self._unpackmanifests(repo, revmap, trp, prog, changesets)
322
322
323 needfiles = {}
323 needfiles = {}
324 if repo.ui.configbool('server', 'validate'):
324 if repo.ui.configbool('server', 'validate'):
325 cl = repo.changelog
325 cl = repo.changelog
326 ml = repo.manifestlog
326 ml = repo.manifestlog
327 # validate incoming csets have their manifests
327 # validate incoming csets have their manifests
328 for cset in xrange(clstart, clend):
328 for cset in xrange(clstart, clend):
329 mfnode = cl.changelogrevision(cset).manifest
329 mfnode = cl.changelogrevision(cset).manifest
330 mfest = ml[mfnode].readdelta()
330 mfest = ml[mfnode].readdelta()
331 # store file cgnodes we must see
331 # store file cgnodes we must see
332 for f, n in mfest.iteritems():
332 for f, n in mfest.iteritems():
333 needfiles.setdefault(f, set()).add(n)
333 needfiles.setdefault(f, set()).add(n)
334
334
335 # process the files
335 # process the files
336 repo.ui.status(_("adding file changes\n"))
336 repo.ui.status(_("adding file changes\n"))
337 newrevs, newfiles = _addchangegroupfiles(
337 newrevs, newfiles = _addchangegroupfiles(
338 repo, self, revmap, trp, efiles, needfiles)
338 repo, self, revmap, trp, efiles, needfiles)
339 revisions += newrevs
339 revisions += newrevs
340 files += newfiles
340 files += newfiles
341
341
342 deltaheads = 0
342 deltaheads = 0
343 if oldheads:
343 if oldheads:
344 heads = cl.heads()
344 heads = cl.heads()
345 deltaheads = len(heads) - len(oldheads)
345 deltaheads = len(heads) - len(oldheads)
346 for h in heads:
346 for h in heads:
347 if h not in oldheads and repo[h].closesbranch():
347 if h not in oldheads and repo[h].closesbranch():
348 deltaheads -= 1
348 deltaheads -= 1
349 htext = ""
349 htext = ""
350 if deltaheads:
350 if deltaheads:
351 htext = _(" (%+d heads)") % deltaheads
351 htext = _(" (%+d heads)") % deltaheads
352
352
353 repo.ui.status(_("added %d changesets"
353 repo.ui.status(_("added %d changesets"
354 " with %d changes to %d files%s\n")
354 " with %d changes to %d files%s\n")
355 % (changesets, revisions, files, htext))
355 % (changesets, revisions, files, htext))
356 repo.invalidatevolatilesets()
356 repo.invalidatevolatilesets()
357
357
358 if changesets > 0:
358 if changesets > 0:
359 if 'node' not in tr.hookargs:
359 if 'node' not in tr.hookargs:
360 tr.hookargs['node'] = hex(cl.node(clstart))
360 tr.hookargs['node'] = hex(cl.node(clstart))
361 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
361 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
362 hookargs = dict(tr.hookargs)
362 hookargs = dict(tr.hookargs)
363 else:
363 else:
364 hookargs = dict(tr.hookargs)
364 hookargs = dict(tr.hookargs)
365 hookargs['node'] = hex(cl.node(clstart))
365 hookargs['node'] = hex(cl.node(clstart))
366 hookargs['node_last'] = hex(cl.node(clend - 1))
366 hookargs['node_last'] = hex(cl.node(clend - 1))
367 repo.hook('pretxnchangegroup',
367 repo.hook('pretxnchangegroup',
368 throw=True, **pycompat.strkwargs(hookargs))
368 throw=True, **pycompat.strkwargs(hookargs))
369
369
370 added = [cl.node(r) for r in xrange(clstart, clend)]
370 added = [cl.node(r) for r in xrange(clstart, clend)]
371 phaseall = None
371 phaseall = None
372 if srctype in ('push', 'serve'):
372 if srctype in ('push', 'serve'):
373 # Old servers can not push the boundary themselves.
373 # Old servers can not push the boundary themselves.
374 # New servers won't push the boundary if changeset already
374 # New servers won't push the boundary if changeset already
375 # exists locally as secret
375 # exists locally as secret
376 #
376 #
377 # We should not use added here but the list of all change in
377 # We should not use added here but the list of all change in
378 # the bundle
378 # the bundle
379 if repo.publishing():
379 if repo.publishing():
380 targetphase = phaseall = phases.public
380 targetphase = phaseall = phases.public
381 else:
381 else:
382 # closer target phase computation
382 # closer target phase computation
383
383
384 # Those changesets have been pushed from the
384 # Those changesets have been pushed from the
385 # outside, their phases are going to be pushed
385 # outside, their phases are going to be pushed
386 # alongside. Therefor `targetphase` is
386 # alongside. Therefor `targetphase` is
387 # ignored.
387 # ignored.
388 targetphase = phaseall = phases.draft
388 targetphase = phaseall = phases.draft
389 if added:
389 if added:
390 phases.registernew(repo, tr, targetphase, added)
390 phases.registernew(repo, tr, targetphase, added)
391 if phaseall is not None:
391 if phaseall is not None:
392 phases.advanceboundary(repo, tr, phaseall, cgnodes)
392 phases.advanceboundary(repo, tr, phaseall, cgnodes)
393
393
394 if changesets > 0:
394 if changesets > 0:
395
395
396 def runhooks():
396 def runhooks():
397 # These hooks run when the lock releases, not when the
397 # These hooks run when the lock releases, not when the
398 # transaction closes. So it's possible for the changelog
398 # transaction closes. So it's possible for the changelog
399 # to have changed since we last saw it.
399 # to have changed since we last saw it.
400 if clstart >= len(repo):
400 if clstart >= len(repo):
401 return
401 return
402
402
403 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
403 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
404
404
405 for n in added:
405 for n in added:
406 args = hookargs.copy()
406 args = hookargs.copy()
407 args['node'] = hex(n)
407 args['node'] = hex(n)
408 del args['node_last']
408 del args['node_last']
409 repo.hook("incoming", **pycompat.strkwargs(args))
409 repo.hook("incoming", **pycompat.strkwargs(args))
410
410
411 newheads = [h for h in repo.heads()
411 newheads = [h for h in repo.heads()
412 if h not in oldheads]
412 if h not in oldheads]
413 repo.ui.log("incoming",
413 repo.ui.log("incoming",
414 "%d incoming changes - new heads: %s\n",
414 "%d incoming changes - new heads: %s\n",
415 len(added),
415 len(added),
416 ', '.join([hex(c[:6]) for c in newheads]))
416 ', '.join([hex(c[:6]) for c in newheads]))
417
417
418 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
418 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
419 lambda tr: repo._afterlock(runhooks))
419 lambda tr: repo._afterlock(runhooks))
420 finally:
420 finally:
421 repo.ui.flush()
421 repo.ui.flush()
422 # never return 0 here:
422 # never return 0 here:
423 if deltaheads < 0:
423 if deltaheads < 0:
424 ret = deltaheads - 1
424 ret = deltaheads - 1
425 else:
425 else:
426 ret = deltaheads + 1
426 ret = deltaheads + 1
427 return ret
427 return ret
428
428
429 def deltaiter(self):
429 def deltaiter(self):
430 """
430 """
431 returns an iterator of the deltas in this changegroup
431 returns an iterator of the deltas in this changegroup
432
432
433 Useful for passing to the underlying storage system to be stored.
433 Useful for passing to the underlying storage system to be stored.
434 """
434 """
435 chain = None
435 chain = None
436 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
436 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
437 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
437 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
438 yield chunkdata
438 yield chunkdata
439 chain = chunkdata[0]
439 chain = chunkdata[0]
440
440
441 class cg2unpacker(cg1unpacker):
441 class cg2unpacker(cg1unpacker):
442 """Unpacker for cg2 streams.
442 """Unpacker for cg2 streams.
443
443
444 cg2 streams add support for generaldelta, so the delta header
444 cg2 streams add support for generaldelta, so the delta header
445 format is slightly different. All other features about the data
445 format is slightly different. All other features about the data
446 remain the same.
446 remain the same.
447 """
447 """
448 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
448 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
449 deltaheadersize = struct.calcsize(deltaheader)
449 deltaheadersize = struct.calcsize(deltaheader)
450 version = '02'
450 version = '02'
451
451
452 def _deltaheader(self, headertuple, prevnode):
452 def _deltaheader(self, headertuple, prevnode):
453 node, p1, p2, deltabase, cs = headertuple
453 node, p1, p2, deltabase, cs = headertuple
454 flags = 0
454 flags = 0
455 return node, p1, p2, deltabase, cs, flags
455 return node, p1, p2, deltabase, cs, flags
456
456
457 class cg3unpacker(cg2unpacker):
457 class cg3unpacker(cg2unpacker):
458 """Unpacker for cg3 streams.
458 """Unpacker for cg3 streams.
459
459
460 cg3 streams add support for exchanging treemanifests and revlog
460 cg3 streams add support for exchanging treemanifests and revlog
461 flags. It adds the revlog flags to the delta header and an empty chunk
461 flags. It adds the revlog flags to the delta header and an empty chunk
462 separating manifests and files.
462 separating manifests and files.
463 """
463 """
464 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
464 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
465 deltaheadersize = struct.calcsize(deltaheader)
465 deltaheadersize = struct.calcsize(deltaheader)
466 version = '03'
466 version = '03'
467 _grouplistcount = 2 # One list of manifests and one list of files
467 _grouplistcount = 2 # One list of manifests and one list of files
468
468
469 def _deltaheader(self, headertuple, prevnode):
469 def _deltaheader(self, headertuple, prevnode):
470 node, p1, p2, deltabase, cs, flags = headertuple
470 node, p1, p2, deltabase, cs, flags = headertuple
471 return node, p1, p2, deltabase, cs, flags
471 return node, p1, p2, deltabase, cs, flags
472
472
473 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
473 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
474 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
474 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
475 numchanges)
475 numchanges)
476 for chunkdata in iter(self.filelogheader, {}):
476 for chunkdata in iter(self.filelogheader, {}):
477 # If we get here, there are directory manifests in the changegroup
477 # If we get here, there are directory manifests in the changegroup
478 d = chunkdata["filename"]
478 d = chunkdata["filename"]
479 repo.ui.debug("adding %s revisions\n" % d)
479 repo.ui.debug("adding %s revisions\n" % d)
480 dirlog = repo.manifestlog._revlog.dirlog(d)
480 dirlog = repo.manifestlog._revlog.dirlog(d)
481 deltas = self.deltaiter()
481 deltas = self.deltaiter()
482 if not dirlog.addgroup(deltas, revmap, trp):
482 if not dirlog.addgroup(deltas, revmap, trp):
483 raise error.Abort(_("received dir revlog group is empty"))
483 raise error.Abort(_("received dir revlog group is empty"))
484
484
485 class headerlessfixup(object):
485 class headerlessfixup(object):
486 def __init__(self, fh, h):
486 def __init__(self, fh, h):
487 self._h = h
487 self._h = h
488 self._fh = fh
488 self._fh = fh
489 def read(self, n):
489 def read(self, n):
490 if self._h:
490 if self._h:
491 d, self._h = self._h[:n], self._h[n:]
491 d, self._h = self._h[:n], self._h[n:]
492 if len(d) < n:
492 if len(d) < n:
493 d += readexactly(self._fh, n - len(d))
493 d += readexactly(self._fh, n - len(d))
494 return d
494 return d
495 return readexactly(self._fh, n)
495 return readexactly(self._fh, n)
496
496
497 class cg1packer(object):
497 class cg1packer(object):
498 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
498 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
499 version = '01'
499 version = '01'
500 def __init__(self, repo, bundlecaps=None):
500 def __init__(self, repo, bundlecaps=None):
501 """Given a source repo, construct a bundler.
501 """Given a source repo, construct a bundler.
502
502
503 bundlecaps is optional and can be used to specify the set of
503 bundlecaps is optional and can be used to specify the set of
504 capabilities which can be used to build the bundle. While bundlecaps is
504 capabilities which can be used to build the bundle. While bundlecaps is
505 unused in core Mercurial, extensions rely on this feature to communicate
505 unused in core Mercurial, extensions rely on this feature to communicate
506 capabilities to customize the changegroup packer.
506 capabilities to customize the changegroup packer.
507 """
507 """
508 # Set of capabilities we can use to build the bundle.
508 # Set of capabilities we can use to build the bundle.
509 if bundlecaps is None:
509 if bundlecaps is None:
510 bundlecaps = set()
510 bundlecaps = set()
511 self._bundlecaps = bundlecaps
511 self._bundlecaps = bundlecaps
512 # experimental config: bundle.reorder
512 # experimental config: bundle.reorder
513 reorder = repo.ui.config('bundle', 'reorder')
513 reorder = repo.ui.config('bundle', 'reorder')
514 if reorder == 'auto':
514 if reorder == 'auto':
515 reorder = None
515 reorder = None
516 else:
516 else:
517 reorder = util.parsebool(reorder)
517 reorder = util.parsebool(reorder)
518 self._repo = repo
518 self._repo = repo
519 self._reorder = reorder
519 self._reorder = reorder
520 self._progress = repo.ui.progress
520 self._progress = repo.ui.progress
521 if self._repo.ui.verbose and not self._repo.ui.debugflag:
521 if self._repo.ui.verbose and not self._repo.ui.debugflag:
522 self._verbosenote = self._repo.ui.note
522 self._verbosenote = self._repo.ui.note
523 else:
523 else:
524 self._verbosenote = lambda s: None
524 self._verbosenote = lambda s: None
525
525
526 def close(self):
526 def close(self):
527 return closechunk()
527 return closechunk()
528
528
529 def fileheader(self, fname):
529 def fileheader(self, fname):
530 return chunkheader(len(fname)) + fname
530 return chunkheader(len(fname)) + fname
531
531
532 # Extracted both for clarity and for overriding in extensions.
532 # Extracted both for clarity and for overriding in extensions.
533 def _sortgroup(self, revlog, nodelist, lookup):
533 def _sortgroup(self, revlog, nodelist, lookup):
534 """Sort nodes for change group and turn them into revnums."""
534 """Sort nodes for change group and turn them into revnums."""
535 # for generaldelta revlogs, we linearize the revs; this will both be
535 # for generaldelta revlogs, we linearize the revs; this will both be
536 # much quicker and generate a much smaller bundle
536 # much quicker and generate a much smaller bundle
537 if (revlog._generaldelta and self._reorder is None) or self._reorder:
537 if (revlog._generaldelta and self._reorder is None) or self._reorder:
538 dag = dagutil.revlogdag(revlog)
538 dag = dagutil.revlogdag(revlog)
539 return dag.linearize(set(revlog.rev(n) for n in nodelist))
539 return dag.linearize(set(revlog.rev(n) for n in nodelist))
540 else:
540 else:
541 return sorted([revlog.rev(n) for n in nodelist])
541 return sorted([revlog.rev(n) for n in nodelist])
542
542
543 def group(self, nodelist, revlog, lookup, units=None):
543 def group(self, nodelist, revlog, lookup, units=None):
544 """Calculate a delta group, yielding a sequence of changegroup chunks
544 """Calculate a delta group, yielding a sequence of changegroup chunks
545 (strings).
545 (strings).
546
546
547 Given a list of changeset revs, return a set of deltas and
547 Given a list of changeset revs, return a set of deltas and
548 metadata corresponding to nodes. The first delta is
548 metadata corresponding to nodes. The first delta is
549 first parent(nodelist[0]) -> nodelist[0], the receiver is
549 first parent(nodelist[0]) -> nodelist[0], the receiver is
550 guaranteed to have this parent as it has all history before
550 guaranteed to have this parent as it has all history before
551 these changesets. In the case firstparent is nullrev the
551 these changesets. In the case firstparent is nullrev the
552 changegroup starts with a full revision.
552 changegroup starts with a full revision.
553
553
554 If units is not None, progress detail will be generated, units specifies
554 If units is not None, progress detail will be generated, units specifies
555 the type of revlog that is touched (changelog, manifest, etc.).
555 the type of revlog that is touched (changelog, manifest, etc.).
556 """
556 """
557 # if we don't have any revisions touched by these changesets, bail
557 # if we don't have any revisions touched by these changesets, bail
558 if len(nodelist) == 0:
558 if len(nodelist) == 0:
559 yield self.close()
559 yield self.close()
560 return
560 return
561
561
562 revs = self._sortgroup(revlog, nodelist, lookup)
562 revs = self._sortgroup(revlog, nodelist, lookup)
563
563
564 # add the parent of the first rev
564 # add the parent of the first rev
565 p = revlog.parentrevs(revs[0])[0]
565 p = revlog.parentrevs(revs[0])[0]
566 revs.insert(0, p)
566 revs.insert(0, p)
567
567
568 # build deltas
568 # build deltas
569 total = len(revs) - 1
569 total = len(revs) - 1
570 msgbundling = _('bundling')
570 msgbundling = _('bundling')
571 for r in xrange(len(revs) - 1):
571 for r in xrange(len(revs) - 1):
572 if units is not None:
572 if units is not None:
573 self._progress(msgbundling, r + 1, unit=units, total=total)
573 self._progress(msgbundling, r + 1, unit=units, total=total)
574 prev, curr = revs[r], revs[r + 1]
574 prev, curr = revs[r], revs[r + 1]
575 linknode = lookup(revlog.node(curr))
575 linknode = lookup(revlog.node(curr))
576 for c in self.revchunk(revlog, curr, prev, linknode):
576 for c in self.revchunk(revlog, curr, prev, linknode):
577 yield c
577 yield c
578
578
579 if units is not None:
579 if units is not None:
580 self._progress(msgbundling, None)
580 self._progress(msgbundling, None)
581 yield self.close()
581 yield self.close()
582
582
583 # filter any nodes that claim to be part of the known set
583 # filter any nodes that claim to be part of the known set
584 def prune(self, revlog, missing, commonrevs):
584 def prune(self, revlog, missing, commonrevs):
585 rr, rl = revlog.rev, revlog.linkrev
585 rr, rl = revlog.rev, revlog.linkrev
586 return [n for n in missing if rl(rr(n)) not in commonrevs]
586 return [n for n in missing if rl(rr(n)) not in commonrevs]
587
587
588 def _packmanifests(self, dir, mfnodes, lookuplinknode):
588 def _packmanifests(self, dir, mfnodes, lookuplinknode):
589 """Pack flat manifests into a changegroup stream."""
589 """Pack flat manifests into a changegroup stream."""
590 assert not dir
590 assert not dir
591 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
591 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
592 lookuplinknode, units=_('manifests')):
592 lookuplinknode, units=_('manifests')):
593 yield chunk
593 yield chunk
594
594
595 def _manifestsdone(self):
595 def _manifestsdone(self):
596 return ''
596 return ''
597
597
598 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
598 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
599 '''yield a sequence of changegroup chunks (strings)'''
599 '''yield a sequence of changegroup chunks (strings)'''
600 repo = self._repo
600 repo = self._repo
601 cl = repo.changelog
601 cl = repo.changelog
602
602
603 clrevorder = {}
603 clrevorder = {}
604 mfs = {} # needed manifests
604 mfs = {} # needed manifests
605 fnodes = {} # needed file nodes
605 fnodes = {} # needed file nodes
606 changedfiles = set()
606 changedfiles = set()
607
607
608 # Callback for the changelog, used to collect changed files and manifest
608 # Callback for the changelog, used to collect changed files and manifest
609 # nodes.
609 # nodes.
610 # Returns the linkrev node (identity in the changelog case).
610 # Returns the linkrev node (identity in the changelog case).
611 def lookupcl(x):
611 def lookupcl(x):
612 c = cl.read(x)
612 c = cl.read(x)
613 clrevorder[x] = len(clrevorder)
613 clrevorder[x] = len(clrevorder)
614 n = c[0]
614 n = c[0]
615 # record the first changeset introducing this manifest version
615 # record the first changeset introducing this manifest version
616 mfs.setdefault(n, x)
616 mfs.setdefault(n, x)
617 # Record a complete list of potentially-changed files in
617 # Record a complete list of potentially-changed files in
618 # this manifest.
618 # this manifest.
619 changedfiles.update(c[3])
619 changedfiles.update(c[3])
620 return x
620 return x
621
621
622 self._verbosenote(_('uncompressed size of bundle content:\n'))
622 self._verbosenote(_('uncompressed size of bundle content:\n'))
623 size = 0
623 size = 0
624 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
624 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
625 size += len(chunk)
625 size += len(chunk)
626 yield chunk
626 yield chunk
627 self._verbosenote(_('%8.i (changelog)\n') % size)
627 self._verbosenote(_('%8.i (changelog)\n') % size)
628
628
629 # We need to make sure that the linkrev in the changegroup refers to
629 # We need to make sure that the linkrev in the changegroup refers to
630 # the first changeset that introduced the manifest or file revision.
630 # the first changeset that introduced the manifest or file revision.
631 # The fastpath is usually safer than the slowpath, because the filelogs
631 # The fastpath is usually safer than the slowpath, because the filelogs
632 # are walked in revlog order.
632 # are walked in revlog order.
633 #
633 #
634 # When taking the slowpath with reorder=None and the manifest revlog
634 # When taking the slowpath with reorder=None and the manifest revlog
635 # uses generaldelta, the manifest may be walked in the "wrong" order.
635 # uses generaldelta, the manifest may be walked in the "wrong" order.
636 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
636 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
637 # cc0ff93d0c0c).
637 # cc0ff93d0c0c).
638 #
638 #
639 # When taking the fastpath, we are only vulnerable to reordering
639 # When taking the fastpath, we are only vulnerable to reordering
640 # of the changelog itself. The changelog never uses generaldelta, so
640 # of the changelog itself. The changelog never uses generaldelta, so
641 # it is only reordered when reorder=True. To handle this case, we
641 # it is only reordered when reorder=True. To handle this case, we
642 # simply take the slowpath, which already has the 'clrevorder' logic.
642 # simply take the slowpath, which already has the 'clrevorder' logic.
643 # This was also fixed in cc0ff93d0c0c.
643 # This was also fixed in cc0ff93d0c0c.
644 fastpathlinkrev = fastpathlinkrev and not self._reorder
644 fastpathlinkrev = fastpathlinkrev and not self._reorder
645 # Treemanifests don't work correctly with fastpathlinkrev
645 # Treemanifests don't work correctly with fastpathlinkrev
646 # either, because we don't discover which directory nodes to
646 # either, because we don't discover which directory nodes to
647 # send along with files. This could probably be fixed.
647 # send along with files. This could probably be fixed.
648 fastpathlinkrev = fastpathlinkrev and (
648 fastpathlinkrev = fastpathlinkrev and (
649 'treemanifest' not in repo.requirements)
649 'treemanifest' not in repo.requirements)
650
650
651 for chunk in self.generatemanifests(commonrevs, clrevorder,
651 for chunk in self.generatemanifests(commonrevs, clrevorder,
652 fastpathlinkrev, mfs, fnodes, source):
652 fastpathlinkrev, mfs, fnodes, source):
653 yield chunk
653 yield chunk
654 mfs.clear()
654 mfs.clear()
655 clrevs = set(cl.rev(x) for x in clnodes)
655 clrevs = set(cl.rev(x) for x in clnodes)
656
656
657 if not fastpathlinkrev:
657 if not fastpathlinkrev:
658 def linknodes(unused, fname):
658 def linknodes(unused, fname):
659 return fnodes.get(fname, {})
659 return fnodes.get(fname, {})
660 else:
660 else:
661 cln = cl.node
661 cln = cl.node
662 def linknodes(filerevlog, fname):
662 def linknodes(filerevlog, fname):
663 llr = filerevlog.linkrev
663 llr = filerevlog.linkrev
664 fln = filerevlog.node
664 fln = filerevlog.node
665 revs = ((r, llr(r)) for r in filerevlog)
665 revs = ((r, llr(r)) for r in filerevlog)
666 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
666 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
667
667
668 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
668 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
669 source):
669 source):
670 yield chunk
670 yield chunk
671
671
672 yield self.close()
672 yield self.close()
673
673
674 if clnodes:
674 if clnodes:
675 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
675 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
676
676
677 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
677 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
678 fnodes, source):
678 fnodes, source):
679 """Returns an iterator of changegroup chunks containing manifests.
679 """Returns an iterator of changegroup chunks containing manifests.
680
680
681 `source` is unused here, but is used by extensions like remotefilelog to
681 `source` is unused here, but is used by extensions like remotefilelog to
682 change what is sent based in pulls vs pushes, etc.
682 change what is sent based in pulls vs pushes, etc.
683 """
683 """
684 repo = self._repo
684 repo = self._repo
685 mfl = repo.manifestlog
685 mfl = repo.manifestlog
686 dirlog = mfl._revlog.dirlog
686 dirlog = mfl._revlog.dirlog
687 tmfnodes = {'': mfs}
687 tmfnodes = {'': mfs}
688
688
689 # Callback for the manifest, used to collect linkrevs for filelog
689 # Callback for the manifest, used to collect linkrevs for filelog
690 # revisions.
690 # revisions.
691 # Returns the linkrev node (collected in lookupcl).
691 # Returns the linkrev node (collected in lookupcl).
692 def makelookupmflinknode(dir, nodes):
692 def makelookupmflinknode(dir, nodes):
693 if fastpathlinkrev:
693 if fastpathlinkrev:
694 assert not dir
694 assert not dir
695 return mfs.__getitem__
695 return mfs.__getitem__
696
696
697 def lookupmflinknode(x):
697 def lookupmflinknode(x):
698 """Callback for looking up the linknode for manifests.
698 """Callback for looking up the linknode for manifests.
699
699
700 Returns the linkrev node for the specified manifest.
700 Returns the linkrev node for the specified manifest.
701
701
702 SIDE EFFECT:
702 SIDE EFFECT:
703
703
704 1) fclnodes gets populated with the list of relevant
704 1) fclnodes gets populated with the list of relevant
705 file nodes if we're not using fastpathlinkrev
705 file nodes if we're not using fastpathlinkrev
706 2) When treemanifests are in use, collects treemanifest nodes
706 2) When treemanifests are in use, collects treemanifest nodes
707 to send
707 to send
708
708
709 Note that this means manifests must be completely sent to
709 Note that this means manifests must be completely sent to
710 the client before you can trust the list of files and
710 the client before you can trust the list of files and
711 treemanifests to send.
711 treemanifests to send.
712 """
712 """
713 clnode = nodes[x]
713 clnode = nodes[x]
714 mdata = mfl.get(dir, x).readfast(shallow=True)
714 mdata = mfl.get(dir, x).readfast(shallow=True)
715 for p, n, fl in mdata.iterentries():
715 for p, n, fl in mdata.iterentries():
716 if fl == 't': # subdirectory manifest
716 if fl == 't': # subdirectory manifest
717 subdir = dir + p + '/'
717 subdir = dir + p + '/'
718 tmfclnodes = tmfnodes.setdefault(subdir, {})
718 tmfclnodes = tmfnodes.setdefault(subdir, {})
719 tmfclnode = tmfclnodes.setdefault(n, clnode)
719 tmfclnode = tmfclnodes.setdefault(n, clnode)
720 if clrevorder[clnode] < clrevorder[tmfclnode]:
720 if clrevorder[clnode] < clrevorder[tmfclnode]:
721 tmfclnodes[n] = clnode
721 tmfclnodes[n] = clnode
722 else:
722 else:
723 f = dir + p
723 f = dir + p
724 fclnodes = fnodes.setdefault(f, {})
724 fclnodes = fnodes.setdefault(f, {})
725 fclnode = fclnodes.setdefault(n, clnode)
725 fclnode = fclnodes.setdefault(n, clnode)
726 if clrevorder[clnode] < clrevorder[fclnode]:
726 if clrevorder[clnode] < clrevorder[fclnode]:
727 fclnodes[n] = clnode
727 fclnodes[n] = clnode
728 return clnode
728 return clnode
729 return lookupmflinknode
729 return lookupmflinknode
730
730
731 size = 0
731 size = 0
732 while tmfnodes:
732 while tmfnodes:
733 dir, nodes = tmfnodes.popitem()
733 dir, nodes = tmfnodes.popitem()
734 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
734 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
735 if not dir or prunednodes:
735 if not dir or prunednodes:
736 for x in self._packmanifests(dir, prunednodes,
736 for x in self._packmanifests(dir, prunednodes,
737 makelookupmflinknode(dir, nodes)):
737 makelookupmflinknode(dir, nodes)):
738 size += len(x)
738 size += len(x)
739 yield x
739 yield x
740 self._verbosenote(_('%8.i (manifests)\n') % size)
740 self._verbosenote(_('%8.i (manifests)\n') % size)
741 yield self._manifestsdone()
741 yield self._manifestsdone()
742
742
743 # The 'source' parameter is useful for extensions
743 # The 'source' parameter is useful for extensions
744 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
744 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
745 repo = self._repo
745 repo = self._repo
746 progress = self._progress
746 progress = self._progress
747 msgbundling = _('bundling')
747 msgbundling = _('bundling')
748
748
749 total = len(changedfiles)
749 total = len(changedfiles)
750 # for progress output
750 # for progress output
751 msgfiles = _('files')
751 msgfiles = _('files')
752 for i, fname in enumerate(sorted(changedfiles)):
752 for i, fname in enumerate(sorted(changedfiles)):
753 filerevlog = repo.file(fname)
753 filerevlog = repo.file(fname)
754 if not filerevlog:
754 if not filerevlog:
755 raise error.Abort(_("empty or missing revlog for %s") % fname)
755 raise error.Abort(_("empty or missing revlog for %s") % fname)
756
756
757 linkrevnodes = linknodes(filerevlog, fname)
757 linkrevnodes = linknodes(filerevlog, fname)
758 # Lookup for filenodes, we collected the linkrev nodes above in the
758 # Lookup for filenodes, we collected the linkrev nodes above in the
759 # fastpath case and with lookupmf in the slowpath case.
759 # fastpath case and with lookupmf in the slowpath case.
760 def lookupfilelog(x):
760 def lookupfilelog(x):
761 return linkrevnodes[x]
761 return linkrevnodes[x]
762
762
763 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
763 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
764 if filenodes:
764 if filenodes:
765 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
765 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
766 total=total)
766 total=total)
767 h = self.fileheader(fname)
767 h = self.fileheader(fname)
768 size = len(h)
768 size = len(h)
769 yield h
769 yield h
770 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
770 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
771 size += len(chunk)
771 size += len(chunk)
772 yield chunk
772 yield chunk
773 self._verbosenote(_('%8.i %s\n') % (size, fname))
773 self._verbosenote(_('%8.i %s\n') % (size, fname))
774 progress(msgbundling, None)
774 progress(msgbundling, None)
775
775
776 def deltaparent(self, revlog, rev, p1, p2, prev):
776 def deltaparent(self, revlog, rev, p1, p2, prev):
777 if not revlog.candelta(prev, rev):
778 raise error.ProgrammingError('cg1 should not be used in this case')
777 return prev
779 return prev
778
780
779 def revchunk(self, revlog, rev, prev, linknode):
781 def revchunk(self, revlog, rev, prev, linknode):
780 node = revlog.node(rev)
782 node = revlog.node(rev)
781 p1, p2 = revlog.parentrevs(rev)
783 p1, p2 = revlog.parentrevs(rev)
782 base = self.deltaparent(revlog, rev, p1, p2, prev)
784 base = self.deltaparent(revlog, rev, p1, p2, prev)
783
785
784 prefix = ''
786 prefix = ''
785 if revlog.iscensored(base) or revlog.iscensored(rev):
787 if revlog.iscensored(base) or revlog.iscensored(rev):
786 try:
788 try:
787 delta = revlog.revision(node, raw=True)
789 delta = revlog.revision(node, raw=True)
788 except error.CensoredNodeError as e:
790 except error.CensoredNodeError as e:
789 delta = e.tombstone
791 delta = e.tombstone
790 if base == nullrev:
792 if base == nullrev:
791 prefix = mdiff.trivialdiffheader(len(delta))
793 prefix = mdiff.trivialdiffheader(len(delta))
792 else:
794 else:
793 baselen = revlog.rawsize(base)
795 baselen = revlog.rawsize(base)
794 prefix = mdiff.replacediffheader(baselen, len(delta))
796 prefix = mdiff.replacediffheader(baselen, len(delta))
795 elif base == nullrev:
797 elif base == nullrev:
796 delta = revlog.revision(node, raw=True)
798 delta = revlog.revision(node, raw=True)
797 prefix = mdiff.trivialdiffheader(len(delta))
799 prefix = mdiff.trivialdiffheader(len(delta))
798 else:
800 else:
799 delta = revlog.revdiff(base, rev)
801 delta = revlog.revdiff(base, rev)
800 p1n, p2n = revlog.parents(node)
802 p1n, p2n = revlog.parents(node)
801 basenode = revlog.node(base)
803 basenode = revlog.node(base)
802 flags = revlog.flags(rev)
804 flags = revlog.flags(rev)
803 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
805 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
804 meta += prefix
806 meta += prefix
805 l = len(meta) + len(delta)
807 l = len(meta) + len(delta)
806 yield chunkheader(l)
808 yield chunkheader(l)
807 yield meta
809 yield meta
808 yield delta
810 yield delta
809 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
811 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
810 # do nothing with basenode, it is implicitly the previous one in HG10
812 # do nothing with basenode, it is implicitly the previous one in HG10
811 # do nothing with flags, it is implicitly 0 for cg1 and cg2
813 # do nothing with flags, it is implicitly 0 for cg1 and cg2
812 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
814 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
813
815
814 class cg2packer(cg1packer):
816 class cg2packer(cg1packer):
815 version = '02'
817 version = '02'
816 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
818 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
817
819
818 def __init__(self, repo, bundlecaps=None):
820 def __init__(self, repo, bundlecaps=None):
819 super(cg2packer, self).__init__(repo, bundlecaps)
821 super(cg2packer, self).__init__(repo, bundlecaps)
820 if self._reorder is None:
822 if self._reorder is None:
821 # Since generaldelta is directly supported by cg2, reordering
823 # Since generaldelta is directly supported by cg2, reordering
822 # generally doesn't help, so we disable it by default (treating
824 # generally doesn't help, so we disable it by default (treating
823 # bundle.reorder=auto just like bundle.reorder=False).
825 # bundle.reorder=auto just like bundle.reorder=False).
824 self._reorder = False
826 self._reorder = False
825
827
826 def deltaparent(self, revlog, rev, p1, p2, prev):
828 def deltaparent(self, revlog, rev, p1, p2, prev):
827 dp = revlog.deltaparent(rev)
829 dp = revlog.deltaparent(rev)
828 if dp == nullrev and revlog.storedeltachains:
830 if dp == nullrev and revlog.storedeltachains:
829 # Avoid sending full revisions when delta parent is null. Pick prev
831 # Avoid sending full revisions when delta parent is null. Pick prev
830 # in that case. It's tempting to pick p1 in this case, as p1 will
832 # in that case. It's tempting to pick p1 in this case, as p1 will
831 # be smaller in the common case. However, computing a delta against
833 # be smaller in the common case. However, computing a delta against
832 # p1 may require resolving the raw text of p1, which could be
834 # p1 may require resolving the raw text of p1, which could be
833 # expensive. The revlog caches should have prev cached, meaning
835 # expensive. The revlog caches should have prev cached, meaning
834 # less CPU for changegroup generation. There is likely room to add
836 # less CPU for changegroup generation. There is likely room to add
835 # a flag and/or config option to control this behavior.
837 # a flag and/or config option to control this behavior.
836 return prev
838 base = prev
837 elif dp == nullrev:
839 elif dp == nullrev:
838 # revlog is configured to use full snapshot for a reason,
840 # revlog is configured to use full snapshot for a reason,
839 # stick to full snapshot.
841 # stick to full snapshot.
840 return nullrev
842 base = nullrev
841 elif dp not in (p1, p2, prev):
843 elif dp not in (p1, p2, prev):
842 # Pick prev when we can't be sure remote has the base revision.
844 # Pick prev when we can't be sure remote has the base revision.
843 return prev
845 return prev
844 else:
846 else:
845 return dp
847 base = dp
848 if base != nullrev and not revlog.candelta(base, rev):
849 base = nullrev
850 return base
846
851
847 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
852 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
848 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
853 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
849 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
854 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
850
855
851 class cg3packer(cg2packer):
856 class cg3packer(cg2packer):
852 version = '03'
857 version = '03'
853 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
858 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
854
859
855 def _packmanifests(self, dir, mfnodes, lookuplinknode):
860 def _packmanifests(self, dir, mfnodes, lookuplinknode):
856 if dir:
861 if dir:
857 yield self.fileheader(dir)
862 yield self.fileheader(dir)
858
863
859 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
864 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
860 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
865 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
861 units=_('manifests')):
866 units=_('manifests')):
862 yield chunk
867 yield chunk
863
868
864 def _manifestsdone(self):
869 def _manifestsdone(self):
865 return self.close()
870 return self.close()
866
871
867 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
872 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
868 return struct.pack(
873 return struct.pack(
869 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
874 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
870
875
871 _packermap = {'01': (cg1packer, cg1unpacker),
876 _packermap = {'01': (cg1packer, cg1unpacker),
872 # cg2 adds support for exchanging generaldelta
877 # cg2 adds support for exchanging generaldelta
873 '02': (cg2packer, cg2unpacker),
878 '02': (cg2packer, cg2unpacker),
874 # cg3 adds support for exchanging revlog flags and treemanifests
879 # cg3 adds support for exchanging revlog flags and treemanifests
875 '03': (cg3packer, cg3unpacker),
880 '03': (cg3packer, cg3unpacker),
876 }
881 }
877
882
878 def allsupportedversions(repo):
883 def allsupportedversions(repo):
879 versions = set(_packermap.keys())
884 versions = set(_packermap.keys())
880 if not (repo.ui.configbool('experimental', 'changegroup3') or
885 if not (repo.ui.configbool('experimental', 'changegroup3') or
881 repo.ui.configbool('experimental', 'treemanifest') or
886 repo.ui.configbool('experimental', 'treemanifest') or
882 'treemanifest' in repo.requirements):
887 'treemanifest' in repo.requirements):
883 versions.discard('03')
888 versions.discard('03')
884 return versions
889 return versions
885
890
886 # Changegroup versions that can be applied to the repo
891 # Changegroup versions that can be applied to the repo
887 def supportedincomingversions(repo):
892 def supportedincomingversions(repo):
888 return allsupportedversions(repo)
893 return allsupportedversions(repo)
889
894
890 # Changegroup versions that can be created from the repo
895 # Changegroup versions that can be created from the repo
891 def supportedoutgoingversions(repo):
896 def supportedoutgoingversions(repo):
892 versions = allsupportedversions(repo)
897 versions = allsupportedversions(repo)
893 if 'treemanifest' in repo.requirements:
898 if 'treemanifest' in repo.requirements:
894 # Versions 01 and 02 support only flat manifests and it's just too
899 # Versions 01 and 02 support only flat manifests and it's just too
895 # expensive to convert between the flat manifest and tree manifest on
900 # expensive to convert between the flat manifest and tree manifest on
896 # the fly. Since tree manifests are hashed differently, all of history
901 # the fly. Since tree manifests are hashed differently, all of history
897 # would have to be converted. Instead, we simply don't even pretend to
902 # would have to be converted. Instead, we simply don't even pretend to
898 # support versions 01 and 02.
903 # support versions 01 and 02.
899 versions.discard('01')
904 versions.discard('01')
900 versions.discard('02')
905 versions.discard('02')
901 if NARROW_REQUIREMENT in repo.requirements:
906 if NARROW_REQUIREMENT in repo.requirements:
902 # Versions 01 and 02 don't support revlog flags, and we need to
907 # Versions 01 and 02 don't support revlog flags, and we need to
903 # support that for stripping and unbundling to work.
908 # support that for stripping and unbundling to work.
904 versions.discard('01')
909 versions.discard('01')
905 versions.discard('02')
910 versions.discard('02')
906 return versions
911 return versions
907
912
908 def localversion(repo):
913 def localversion(repo):
909 # Finds the best version to use for bundles that are meant to be used
914 # Finds the best version to use for bundles that are meant to be used
910 # locally, such as those from strip and shelve, and temporary bundles.
915 # locally, such as those from strip and shelve, and temporary bundles.
911 return max(supportedoutgoingversions(repo))
916 return max(supportedoutgoingversions(repo))
912
917
913 def safeversion(repo):
918 def safeversion(repo):
914 # Finds the smallest version that it's safe to assume clients of the repo
919 # Finds the smallest version that it's safe to assume clients of the repo
915 # will support. For example, all hg versions that support generaldelta also
920 # will support. For example, all hg versions that support generaldelta also
916 # support changegroup 02.
921 # support changegroup 02.
917 versions = supportedoutgoingversions(repo)
922 versions = supportedoutgoingversions(repo)
918 if 'generaldelta' in repo.requirements:
923 if 'generaldelta' in repo.requirements:
919 versions.discard('01')
924 versions.discard('01')
920 assert versions
925 assert versions
921 return min(versions)
926 return min(versions)
922
927
923 def getbundler(version, repo, bundlecaps=None):
928 def getbundler(version, repo, bundlecaps=None):
924 assert version in supportedoutgoingversions(repo)
929 assert version in supportedoutgoingversions(repo)
925 return _packermap[version][0](repo, bundlecaps)
930 return _packermap[version][0](repo, bundlecaps)
926
931
927 def getunbundler(version, fh, alg, extras=None):
932 def getunbundler(version, fh, alg, extras=None):
928 return _packermap[version][1](fh, alg, extras=extras)
933 return _packermap[version][1](fh, alg, extras=extras)
929
934
930 def _changegroupinfo(repo, nodes, source):
935 def _changegroupinfo(repo, nodes, source):
931 if repo.ui.verbose or source == 'bundle':
936 if repo.ui.verbose or source == 'bundle':
932 repo.ui.status(_("%d changesets found\n") % len(nodes))
937 repo.ui.status(_("%d changesets found\n") % len(nodes))
933 if repo.ui.debugflag:
938 if repo.ui.debugflag:
934 repo.ui.debug("list of changesets:\n")
939 repo.ui.debug("list of changesets:\n")
935 for node in nodes:
940 for node in nodes:
936 repo.ui.debug("%s\n" % hex(node))
941 repo.ui.debug("%s\n" % hex(node))
937
942
938 def makechangegroup(repo, outgoing, version, source, fastpath=False,
943 def makechangegroup(repo, outgoing, version, source, fastpath=False,
939 bundlecaps=None):
944 bundlecaps=None):
940 cgstream = makestream(repo, outgoing, version, source,
945 cgstream = makestream(repo, outgoing, version, source,
941 fastpath=fastpath, bundlecaps=bundlecaps)
946 fastpath=fastpath, bundlecaps=bundlecaps)
942 return getunbundler(version, util.chunkbuffer(cgstream), None,
947 return getunbundler(version, util.chunkbuffer(cgstream), None,
943 {'clcount': len(outgoing.missing) })
948 {'clcount': len(outgoing.missing) })
944
949
945 def makestream(repo, outgoing, version, source, fastpath=False,
950 def makestream(repo, outgoing, version, source, fastpath=False,
946 bundlecaps=None):
951 bundlecaps=None):
947 bundler = getbundler(version, repo, bundlecaps=bundlecaps)
952 bundler = getbundler(version, repo, bundlecaps=bundlecaps)
948
953
949 repo = repo.unfiltered()
954 repo = repo.unfiltered()
950 commonrevs = outgoing.common
955 commonrevs = outgoing.common
951 csets = outgoing.missing
956 csets = outgoing.missing
952 heads = outgoing.missingheads
957 heads = outgoing.missingheads
953 # We go through the fast path if we get told to, or if all (unfiltered
958 # We go through the fast path if we get told to, or if all (unfiltered
954 # heads have been requested (since we then know there all linkrevs will
959 # heads have been requested (since we then know there all linkrevs will
955 # be pulled by the client).
960 # be pulled by the client).
956 heads.sort()
961 heads.sort()
957 fastpathlinkrev = fastpath or (
962 fastpathlinkrev = fastpath or (
958 repo.filtername is None and heads == sorted(repo.heads()))
963 repo.filtername is None and heads == sorted(repo.heads()))
959
964
960 repo.hook('preoutgoing', throw=True, source=source)
965 repo.hook('preoutgoing', throw=True, source=source)
961 _changegroupinfo(repo, csets, source)
966 _changegroupinfo(repo, csets, source)
962 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
967 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
963
968
964 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
969 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
965 revisions = 0
970 revisions = 0
966 files = 0
971 files = 0
967 for chunkdata in iter(source.filelogheader, {}):
972 for chunkdata in iter(source.filelogheader, {}):
968 files += 1
973 files += 1
969 f = chunkdata["filename"]
974 f = chunkdata["filename"]
970 repo.ui.debug("adding %s revisions\n" % f)
975 repo.ui.debug("adding %s revisions\n" % f)
971 repo.ui.progress(_('files'), files, unit=_('files'),
976 repo.ui.progress(_('files'), files, unit=_('files'),
972 total=expectedfiles)
977 total=expectedfiles)
973 fl = repo.file(f)
978 fl = repo.file(f)
974 o = len(fl)
979 o = len(fl)
975 try:
980 try:
976 deltas = source.deltaiter()
981 deltas = source.deltaiter()
977 if not fl.addgroup(deltas, revmap, trp):
982 if not fl.addgroup(deltas, revmap, trp):
978 raise error.Abort(_("received file revlog group is empty"))
983 raise error.Abort(_("received file revlog group is empty"))
979 except error.CensoredBaseError as e:
984 except error.CensoredBaseError as e:
980 raise error.Abort(_("received delta base is censored: %s") % e)
985 raise error.Abort(_("received delta base is censored: %s") % e)
981 revisions += len(fl) - o
986 revisions += len(fl) - o
982 if f in needfiles:
987 if f in needfiles:
983 needs = needfiles[f]
988 needs = needfiles[f]
984 for new in xrange(o, len(fl)):
989 for new in xrange(o, len(fl)):
985 n = fl.node(new)
990 n = fl.node(new)
986 if n in needs:
991 if n in needs:
987 needs.remove(n)
992 needs.remove(n)
988 else:
993 else:
989 raise error.Abort(
994 raise error.Abort(
990 _("received spurious file revlog entry"))
995 _("received spurious file revlog entry"))
991 if not needs:
996 if not needs:
992 del needfiles[f]
997 del needfiles[f]
993 repo.ui.progress(_('files'), None)
998 repo.ui.progress(_('files'), None)
994
999
995 for f, needs in needfiles.iteritems():
1000 for f, needs in needfiles.iteritems():
996 fl = repo.file(f)
1001 fl = repo.file(f)
997 for n in needs:
1002 for n in needs:
998 try:
1003 try:
999 fl.rev(n)
1004 fl.rev(n)
1000 except error.LookupError:
1005 except error.LookupError:
1001 raise error.Abort(
1006 raise error.Abort(
1002 _('missing file data for %s:%s - run hg verify') %
1007 _('missing file data for %s:%s - run hg verify') %
1003 (f, hex(n)))
1008 (f, hex(n)))
1004
1009
1005 return revisions, files
1010 return revisions, files
@@ -1,484 +1,486 b''
1 # hgweb/hgweb_mod.py - Web interface for a repository.
1 # hgweb/hgweb_mod.py - Web interface for a repository.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import contextlib
11 import contextlib
12 import os
12 import os
13
13
14 from .common import (
14 from .common import (
15 ErrorResponse,
15 ErrorResponse,
16 HTTP_BAD_REQUEST,
16 HTTP_BAD_REQUEST,
17 HTTP_NOT_FOUND,
17 HTTP_NOT_FOUND,
18 HTTP_NOT_MODIFIED,
18 HTTP_NOT_MODIFIED,
19 HTTP_OK,
19 HTTP_OK,
20 HTTP_SERVER_ERROR,
20 HTTP_SERVER_ERROR,
21 caching,
21 caching,
22 cspvalues,
22 cspvalues,
23 permhooks,
23 permhooks,
24 )
24 )
25 from .request import wsgirequest
25 from .request import wsgirequest
26
26
27 from .. import (
27 from .. import (
28 encoding,
28 encoding,
29 error,
29 error,
30 formatter,
30 formatter,
31 hg,
31 hg,
32 hook,
32 hook,
33 profiling,
33 profiling,
34 pycompat,
34 pycompat,
35 repoview,
35 repoview,
36 templatefilters,
36 templatefilters,
37 templater,
37 templater,
38 ui as uimod,
38 ui as uimod,
39 util,
39 util,
40 wireproto,
40 wireprotoserver,
41 wireprotoserver,
41 )
42 )
42
43
43 from . import (
44 from . import (
44 webcommands,
45 webcommands,
45 webutil,
46 webutil,
46 wsgicgi,
47 wsgicgi,
47 )
48 )
48
49
49 perms = {
50 # Aliased for API compatibility.
50 'changegroup': 'pull',
51 perms = wireproto.permissions
51 'changegroupsubset': 'pull',
52 'getbundle': 'pull',
53 'stream_out': 'pull',
54 'listkeys': 'pull',
55 'unbundle': 'push',
56 'pushkey': 'push',
57 }
58
52
59 archivespecs = util.sortdict((
53 archivespecs = util.sortdict((
60 ('zip', ('application/zip', 'zip', '.zip', None)),
54 ('zip', ('application/zip', 'zip', '.zip', None)),
61 ('gz', ('application/x-gzip', 'tgz', '.tar.gz', None)),
55 ('gz', ('application/x-gzip', 'tgz', '.tar.gz', None)),
62 ('bz2', ('application/x-bzip2', 'tbz2', '.tar.bz2', None)),
56 ('bz2', ('application/x-bzip2', 'tbz2', '.tar.bz2', None)),
63 ))
57 ))
64
58
65 def getstyle(req, configfn, templatepath):
59 def getstyle(req, configfn, templatepath):
66 fromreq = req.form.get('style', [None])[0]
60 fromreq = req.form.get('style', [None])[0]
67 styles = (
61 styles = (
68 fromreq,
62 fromreq,
69 configfn('web', 'style'),
63 configfn('web', 'style'),
70 'paper',
64 'paper',
71 )
65 )
72 return styles, templater.stylemap(styles, templatepath)
66 return styles, templater.stylemap(styles, templatepath)
73
67
74 def makebreadcrumb(url, prefix=''):
68 def makebreadcrumb(url, prefix=''):
75 '''Return a 'URL breadcrumb' list
69 '''Return a 'URL breadcrumb' list
76
70
77 A 'URL breadcrumb' is a list of URL-name pairs,
71 A 'URL breadcrumb' is a list of URL-name pairs,
78 corresponding to each of the path items on a URL.
72 corresponding to each of the path items on a URL.
79 This can be used to create path navigation entries.
73 This can be used to create path navigation entries.
80 '''
74 '''
81 if url.endswith('/'):
75 if url.endswith('/'):
82 url = url[:-1]
76 url = url[:-1]
83 if prefix:
77 if prefix:
84 url = '/' + prefix + url
78 url = '/' + prefix + url
85 relpath = url
79 relpath = url
86 if relpath.startswith('/'):
80 if relpath.startswith('/'):
87 relpath = relpath[1:]
81 relpath = relpath[1:]
88
82
89 breadcrumb = []
83 breadcrumb = []
90 urlel = url
84 urlel = url
91 pathitems = [''] + relpath.split('/')
85 pathitems = [''] + relpath.split('/')
92 for pathel in reversed(pathitems):
86 for pathel in reversed(pathitems):
93 if not pathel or not urlel:
87 if not pathel or not urlel:
94 break
88 break
95 breadcrumb.append({'url': urlel, 'name': pathel})
89 breadcrumb.append({'url': urlel, 'name': pathel})
96 urlel = os.path.dirname(urlel)
90 urlel = os.path.dirname(urlel)
97 return reversed(breadcrumb)
91 return reversed(breadcrumb)
98
92
99 class requestcontext(object):
93 class requestcontext(object):
100 """Holds state/context for an individual request.
94 """Holds state/context for an individual request.
101
95
102 Servers can be multi-threaded. Holding state on the WSGI application
96 Servers can be multi-threaded. Holding state on the WSGI application
103 is prone to race conditions. Instances of this class exist to hold
97 is prone to race conditions. Instances of this class exist to hold
104 mutable and race-free state for requests.
98 mutable and race-free state for requests.
105 """
99 """
106 def __init__(self, app, repo):
100 def __init__(self, app, repo):
107 self.repo = repo
101 self.repo = repo
108 self.reponame = app.reponame
102 self.reponame = app.reponame
109
103
110 self.archivespecs = archivespecs
104 self.archivespecs = archivespecs
111
105
112 self.maxchanges = self.configint('web', 'maxchanges')
106 self.maxchanges = self.configint('web', 'maxchanges')
113 self.stripecount = self.configint('web', 'stripes')
107 self.stripecount = self.configint('web', 'stripes')
114 self.maxshortchanges = self.configint('web', 'maxshortchanges')
108 self.maxshortchanges = self.configint('web', 'maxshortchanges')
115 self.maxfiles = self.configint('web', 'maxfiles')
109 self.maxfiles = self.configint('web', 'maxfiles')
116 self.allowpull = self.configbool('web', 'allow-pull')
110 self.allowpull = self.configbool('web', 'allow-pull')
117
111
118 # we use untrusted=False to prevent a repo owner from using
112 # we use untrusted=False to prevent a repo owner from using
119 # web.templates in .hg/hgrc to get access to any file readable
113 # web.templates in .hg/hgrc to get access to any file readable
120 # by the user running the CGI script
114 # by the user running the CGI script
121 self.templatepath = self.config('web', 'templates', untrusted=False)
115 self.templatepath = self.config('web', 'templates', untrusted=False)
122
116
123 # This object is more expensive to build than simple config values.
117 # This object is more expensive to build than simple config values.
124 # It is shared across requests. The app will replace the object
118 # It is shared across requests. The app will replace the object
125 # if it is updated. Since this is a reference and nothing should
119 # if it is updated. Since this is a reference and nothing should
126 # modify the underlying object, it should be constant for the lifetime
120 # modify the underlying object, it should be constant for the lifetime
127 # of the request.
121 # of the request.
128 self.websubtable = app.websubtable
122 self.websubtable = app.websubtable
129
123
130 self.csp, self.nonce = cspvalues(self.repo.ui)
124 self.csp, self.nonce = cspvalues(self.repo.ui)
131
125
132 # Trust the settings from the .hg/hgrc files by default.
126 # Trust the settings from the .hg/hgrc files by default.
133 def config(self, section, name, default=uimod._unset, untrusted=True):
127 def config(self, section, name, default=uimod._unset, untrusted=True):
134 return self.repo.ui.config(section, name, default,
128 return self.repo.ui.config(section, name, default,
135 untrusted=untrusted)
129 untrusted=untrusted)
136
130
137 def configbool(self, section, name, default=uimod._unset, untrusted=True):
131 def configbool(self, section, name, default=uimod._unset, untrusted=True):
138 return self.repo.ui.configbool(section, name, default,
132 return self.repo.ui.configbool(section, name, default,
139 untrusted=untrusted)
133 untrusted=untrusted)
140
134
141 def configint(self, section, name, default=uimod._unset, untrusted=True):
135 def configint(self, section, name, default=uimod._unset, untrusted=True):
142 return self.repo.ui.configint(section, name, default,
136 return self.repo.ui.configint(section, name, default,
143 untrusted=untrusted)
137 untrusted=untrusted)
144
138
145 def configlist(self, section, name, default=uimod._unset, untrusted=True):
139 def configlist(self, section, name, default=uimod._unset, untrusted=True):
146 return self.repo.ui.configlist(section, name, default,
140 return self.repo.ui.configlist(section, name, default,
147 untrusted=untrusted)
141 untrusted=untrusted)
148
142
149 def archivelist(self, nodeid):
143 def archivelist(self, nodeid):
150 allowed = self.configlist('web', 'allow_archive')
144 allowed = self.configlist('web', 'allow_archive')
151 for typ, spec in self.archivespecs.iteritems():
145 for typ, spec in self.archivespecs.iteritems():
152 if typ in allowed or self.configbool('web', 'allow%s' % typ):
146 if typ in allowed or self.configbool('web', 'allow%s' % typ):
153 yield {'type': typ, 'extension': spec[2], 'node': nodeid}
147 yield {'type': typ, 'extension': spec[2], 'node': nodeid}
154
148
155 def templater(self, req):
149 def templater(self, req):
156 # determine scheme, port and server name
150 # determine scheme, port and server name
157 # this is needed to create absolute urls
151 # this is needed to create absolute urls
158
152
159 proto = req.env.get('wsgi.url_scheme')
153 proto = req.env.get('wsgi.url_scheme')
160 if proto == 'https':
154 if proto == 'https':
161 proto = 'https'
155 proto = 'https'
162 default_port = '443'
156 default_port = '443'
163 else:
157 else:
164 proto = 'http'
158 proto = 'http'
165 default_port = '80'
159 default_port = '80'
166
160
167 port = req.env[r'SERVER_PORT']
161 port = req.env[r'SERVER_PORT']
168 port = port != default_port and (r':' + port) or r''
162 port = port != default_port and (r':' + port) or r''
169 urlbase = r'%s://%s%s' % (proto, req.env[r'SERVER_NAME'], port)
163 urlbase = r'%s://%s%s' % (proto, req.env[r'SERVER_NAME'], port)
170 logourl = self.config('web', 'logourl')
164 logourl = self.config('web', 'logourl')
171 logoimg = self.config('web', 'logoimg')
165 logoimg = self.config('web', 'logoimg')
172 staticurl = self.config('web', 'staticurl') or req.url + 'static/'
166 staticurl = self.config('web', 'staticurl') or req.url + 'static/'
173 if not staticurl.endswith('/'):
167 if not staticurl.endswith('/'):
174 staticurl += '/'
168 staticurl += '/'
175
169
176 # some functions for the templater
170 # some functions for the templater
177
171
178 def motd(**map):
172 def motd(**map):
179 yield self.config('web', 'motd')
173 yield self.config('web', 'motd')
180
174
181 # figure out which style to use
175 # figure out which style to use
182
176
183 vars = {}
177 vars = {}
184 styles, (style, mapfile) = getstyle(req, self.config,
178 styles, (style, mapfile) = getstyle(req, self.config,
185 self.templatepath)
179 self.templatepath)
186 if style == styles[0]:
180 if style == styles[0]:
187 vars['style'] = style
181 vars['style'] = style
188
182
189 start = '&' if req.url[-1] == r'?' else '?'
183 start = '&' if req.url[-1] == r'?' else '?'
190 sessionvars = webutil.sessionvars(vars, start)
184 sessionvars = webutil.sessionvars(vars, start)
191
185
192 if not self.reponame:
186 if not self.reponame:
193 self.reponame = (self.config('web', 'name', '')
187 self.reponame = (self.config('web', 'name', '')
194 or req.env.get('REPO_NAME')
188 or req.env.get('REPO_NAME')
195 or req.url.strip('/') or self.repo.root)
189 or req.url.strip('/') or self.repo.root)
196
190
197 def websubfilter(text):
191 def websubfilter(text):
198 return templatefilters.websub(text, self.websubtable)
192 return templatefilters.websub(text, self.websubtable)
199
193
200 # create the templater
194 # create the templater
201 # TODO: export all keywords: defaults = templatekw.keywords.copy()
195 # TODO: export all keywords: defaults = templatekw.keywords.copy()
202 defaults = {
196 defaults = {
203 'url': req.url,
197 'url': req.url,
204 'logourl': logourl,
198 'logourl': logourl,
205 'logoimg': logoimg,
199 'logoimg': logoimg,
206 'staticurl': staticurl,
200 'staticurl': staticurl,
207 'urlbase': urlbase,
201 'urlbase': urlbase,
208 'repo': self.reponame,
202 'repo': self.reponame,
209 'encoding': encoding.encoding,
203 'encoding': encoding.encoding,
210 'motd': motd,
204 'motd': motd,
211 'sessionvars': sessionvars,
205 'sessionvars': sessionvars,
212 'pathdef': makebreadcrumb(req.url),
206 'pathdef': makebreadcrumb(req.url),
213 'style': style,
207 'style': style,
214 'nonce': self.nonce,
208 'nonce': self.nonce,
215 }
209 }
216 tres = formatter.templateresources(self.repo.ui, self.repo)
210 tres = formatter.templateresources(self.repo.ui, self.repo)
217 tmpl = templater.templater.frommapfile(mapfile,
211 tmpl = templater.templater.frommapfile(mapfile,
218 filters={'websub': websubfilter},
212 filters={'websub': websubfilter},
219 defaults=defaults,
213 defaults=defaults,
220 resources=tres)
214 resources=tres)
221 return tmpl
215 return tmpl
222
216
223
217
224 class hgweb(object):
218 class hgweb(object):
225 """HTTP server for individual repositories.
219 """HTTP server for individual repositories.
226
220
227 Instances of this class serve HTTP responses for a particular
221 Instances of this class serve HTTP responses for a particular
228 repository.
222 repository.
229
223
230 Instances are typically used as WSGI applications.
224 Instances are typically used as WSGI applications.
231
225
232 Some servers are multi-threaded. On these servers, there may
226 Some servers are multi-threaded. On these servers, there may
233 be multiple active threads inside __call__.
227 be multiple active threads inside __call__.
234 """
228 """
235 def __init__(self, repo, name=None, baseui=None):
229 def __init__(self, repo, name=None, baseui=None):
236 if isinstance(repo, str):
230 if isinstance(repo, str):
237 if baseui:
231 if baseui:
238 u = baseui.copy()
232 u = baseui.copy()
239 else:
233 else:
240 u = uimod.ui.load()
234 u = uimod.ui.load()
241 r = hg.repository(u, repo)
235 r = hg.repository(u, repo)
242 else:
236 else:
243 # we trust caller to give us a private copy
237 # we trust caller to give us a private copy
244 r = repo
238 r = repo
245
239
246 r.ui.setconfig('ui', 'report_untrusted', 'off', 'hgweb')
240 r.ui.setconfig('ui', 'report_untrusted', 'off', 'hgweb')
247 r.baseui.setconfig('ui', 'report_untrusted', 'off', 'hgweb')
241 r.baseui.setconfig('ui', 'report_untrusted', 'off', 'hgweb')
248 r.ui.setconfig('ui', 'nontty', 'true', 'hgweb')
242 r.ui.setconfig('ui', 'nontty', 'true', 'hgweb')
249 r.baseui.setconfig('ui', 'nontty', 'true', 'hgweb')
243 r.baseui.setconfig('ui', 'nontty', 'true', 'hgweb')
250 # resolve file patterns relative to repo root
244 # resolve file patterns relative to repo root
251 r.ui.setconfig('ui', 'forcecwd', r.root, 'hgweb')
245 r.ui.setconfig('ui', 'forcecwd', r.root, 'hgweb')
252 r.baseui.setconfig('ui', 'forcecwd', r.root, 'hgweb')
246 r.baseui.setconfig('ui', 'forcecwd', r.root, 'hgweb')
253 # displaying bundling progress bar while serving feel wrong and may
247 # displaying bundling progress bar while serving feel wrong and may
254 # break some wsgi implementation.
248 # break some wsgi implementation.
255 r.ui.setconfig('progress', 'disable', 'true', 'hgweb')
249 r.ui.setconfig('progress', 'disable', 'true', 'hgweb')
256 r.baseui.setconfig('progress', 'disable', 'true', 'hgweb')
250 r.baseui.setconfig('progress', 'disable', 'true', 'hgweb')
257 self._repos = [hg.cachedlocalrepo(self._webifyrepo(r))]
251 self._repos = [hg.cachedlocalrepo(self._webifyrepo(r))]
258 self._lastrepo = self._repos[0]
252 self._lastrepo = self._repos[0]
259 hook.redirect(True)
253 hook.redirect(True)
260 self.reponame = name
254 self.reponame = name
261
255
262 def _webifyrepo(self, repo):
256 def _webifyrepo(self, repo):
263 repo = getwebview(repo)
257 repo = getwebview(repo)
264 self.websubtable = webutil.getwebsubs(repo)
258 self.websubtable = webutil.getwebsubs(repo)
265 return repo
259 return repo
266
260
267 @contextlib.contextmanager
261 @contextlib.contextmanager
268 def _obtainrepo(self):
262 def _obtainrepo(self):
269 """Obtain a repo unique to the caller.
263 """Obtain a repo unique to the caller.
270
264
271 Internally we maintain a stack of cachedlocalrepo instances
265 Internally we maintain a stack of cachedlocalrepo instances
272 to be handed out. If one is available, we pop it and return it,
266 to be handed out. If one is available, we pop it and return it,
273 ensuring it is up to date in the process. If one is not available,
267 ensuring it is up to date in the process. If one is not available,
274 we clone the most recently used repo instance and return it.
268 we clone the most recently used repo instance and return it.
275
269
276 It is currently possible for the stack to grow without bounds
270 It is currently possible for the stack to grow without bounds
277 if the server allows infinite threads. However, servers should
271 if the server allows infinite threads. However, servers should
278 have a thread limit, thus establishing our limit.
272 have a thread limit, thus establishing our limit.
279 """
273 """
280 if self._repos:
274 if self._repos:
281 cached = self._repos.pop()
275 cached = self._repos.pop()
282 r, created = cached.fetch()
276 r, created = cached.fetch()
283 else:
277 else:
284 cached = self._lastrepo.copy()
278 cached = self._lastrepo.copy()
285 r, created = cached.fetch()
279 r, created = cached.fetch()
286 if created:
280 if created:
287 r = self._webifyrepo(r)
281 r = self._webifyrepo(r)
288
282
289 self._lastrepo = cached
283 self._lastrepo = cached
290 self.mtime = cached.mtime
284 self.mtime = cached.mtime
291 try:
285 try:
292 yield r
286 yield r
293 finally:
287 finally:
294 self._repos.append(cached)
288 self._repos.append(cached)
295
289
296 def run(self):
290 def run(self):
297 """Start a server from CGI environment.
291 """Start a server from CGI environment.
298
292
299 Modern servers should be using WSGI and should avoid this
293 Modern servers should be using WSGI and should avoid this
300 method, if possible.
294 method, if possible.
301 """
295 """
302 if not encoding.environ.get('GATEWAY_INTERFACE',
296 if not encoding.environ.get('GATEWAY_INTERFACE',
303 '').startswith("CGI/1."):
297 '').startswith("CGI/1."):
304 raise RuntimeError("This function is only intended to be "
298 raise RuntimeError("This function is only intended to be "
305 "called while running as a CGI script.")
299 "called while running as a CGI script.")
306 wsgicgi.launch(self)
300 wsgicgi.launch(self)
307
301
308 def __call__(self, env, respond):
302 def __call__(self, env, respond):
309 """Run the WSGI application.
303 """Run the WSGI application.
310
304
311 This may be called by multiple threads.
305 This may be called by multiple threads.
312 """
306 """
313 req = wsgirequest(env, respond)
307 req = wsgirequest(env, respond)
314 return self.run_wsgi(req)
308 return self.run_wsgi(req)
315
309
316 def run_wsgi(self, req):
310 def run_wsgi(self, req):
317 """Internal method to run the WSGI application.
311 """Internal method to run the WSGI application.
318
312
319 This is typically only called by Mercurial. External consumers
313 This is typically only called by Mercurial. External consumers
320 should be using instances of this class as the WSGI application.
314 should be using instances of this class as the WSGI application.
321 """
315 """
322 with self._obtainrepo() as repo:
316 with self._obtainrepo() as repo:
323 profile = repo.ui.configbool('profiling', 'enabled')
317 profile = repo.ui.configbool('profiling', 'enabled')
324 with profiling.profile(repo.ui, enabled=profile):
318 with profiling.profile(repo.ui, enabled=profile):
325 for r in self._runwsgi(req, repo):
319 for r in self._runwsgi(req, repo):
326 yield r
320 yield r
327
321
328 def _runwsgi(self, req, repo):
322 def _runwsgi(self, req, repo):
329 rctx = requestcontext(self, repo)
323 rctx = requestcontext(self, repo)
330
324
331 # This state is global across all threads.
325 # This state is global across all threads.
332 encoding.encoding = rctx.config('web', 'encoding')
326 encoding.encoding = rctx.config('web', 'encoding')
333 rctx.repo.ui.environ = req.env
327 rctx.repo.ui.environ = req.env
334
328
335 if rctx.csp:
329 if rctx.csp:
336 # hgwebdir may have added CSP header. Since we generate our own,
330 # hgwebdir may have added CSP header. Since we generate our own,
337 # replace it.
331 # replace it.
338 req.headers = [h for h in req.headers
332 req.headers = [h for h in req.headers
339 if h[0] != 'Content-Security-Policy']
333 if h[0] != 'Content-Security-Policy']
340 req.headers.append(('Content-Security-Policy', rctx.csp))
334 req.headers.append(('Content-Security-Policy', rctx.csp))
341
335
342 # work with CGI variables to create coherent structure
336 # work with CGI variables to create coherent structure
343 # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME
337 # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME
344
338
345 req.url = req.env[r'SCRIPT_NAME']
339 req.url = req.env[r'SCRIPT_NAME']
346 if not req.url.endswith('/'):
340 if not req.url.endswith('/'):
347 req.url += '/'
341 req.url += '/'
348 if req.env.get('REPO_NAME'):
342 if req.env.get('REPO_NAME'):
349 req.url += req.env[r'REPO_NAME'] + r'/'
343 req.url += req.env[r'REPO_NAME'] + r'/'
350
344
351 if r'PATH_INFO' in req.env:
345 if r'PATH_INFO' in req.env:
352 parts = req.env[r'PATH_INFO'].strip('/').split('/')
346 parts = req.env[r'PATH_INFO'].strip('/').split('/')
353 repo_parts = req.env.get(r'REPO_NAME', r'').split(r'/')
347 repo_parts = req.env.get(r'REPO_NAME', r'').split(r'/')
354 if parts[:len(repo_parts)] == repo_parts:
348 if parts[:len(repo_parts)] == repo_parts:
355 parts = parts[len(repo_parts):]
349 parts = parts[len(repo_parts):]
356 query = '/'.join(parts)
350 query = '/'.join(parts)
357 else:
351 else:
358 query = req.env[r'QUERY_STRING'].partition(r'&')[0]
352 query = req.env[r'QUERY_STRING'].partition(r'&')[0]
359 query = query.partition(r';')[0]
353 query = query.partition(r';')[0]
360
354
361 # Route it to a wire protocol handler if it looks like a wire protocol
355 # Route it to a wire protocol handler if it looks like a wire protocol
362 # request.
356 # request.
363 protohandler = wireprotoserver.parsehttprequest(rctx.repo, req, query)
357 protohandler = wireprotoserver.parsehttprequest(rctx.repo, req, query)
364
358
365 if protohandler:
359 if protohandler:
366 cmd = protohandler['cmd']
360 cmd = protohandler['cmd']
367 try:
361 try:
368 if query:
362 if query:
369 raise ErrorResponse(HTTP_NOT_FOUND)
363 raise ErrorResponse(HTTP_NOT_FOUND)
370 if cmd in perms:
364
371 self.check_perm(rctx, req, perms[cmd])
365 # TODO fold this into parsehttprequest
366 req.checkperm = lambda op: self.check_perm(rctx, req, op)
367 protohandler['proto'].checkperm = req.checkperm
368
369 # Assume commands with no defined permissions are writes /
370 # for pushes. This is the safest from a security perspective
371 # because it doesn't allow commands with undefined semantics
372 # from bypassing permissions checks.
373 req.checkperm(perms.get(cmd, 'push'))
374
375 return protohandler['dispatch']()
372 except ErrorResponse as inst:
376 except ErrorResponse as inst:
373 return protohandler['handleerror'](inst)
377 return protohandler['handleerror'](inst)
374
378
375 return protohandler['dispatch']()
376
377 # translate user-visible url structure to internal structure
379 # translate user-visible url structure to internal structure
378
380
379 args = query.split('/', 2)
381 args = query.split('/', 2)
380 if 'cmd' not in req.form and args and args[0]:
382 if 'cmd' not in req.form and args and args[0]:
381 cmd = args.pop(0)
383 cmd = args.pop(0)
382 style = cmd.rfind('-')
384 style = cmd.rfind('-')
383 if style != -1:
385 if style != -1:
384 req.form['style'] = [cmd[:style]]
386 req.form['style'] = [cmd[:style]]
385 cmd = cmd[style + 1:]
387 cmd = cmd[style + 1:]
386
388
387 # avoid accepting e.g. style parameter as command
389 # avoid accepting e.g. style parameter as command
388 if util.safehasattr(webcommands, cmd):
390 if util.safehasattr(webcommands, cmd):
389 req.form['cmd'] = [cmd]
391 req.form['cmd'] = [cmd]
390
392
391 if cmd == 'static':
393 if cmd == 'static':
392 req.form['file'] = ['/'.join(args)]
394 req.form['file'] = ['/'.join(args)]
393 else:
395 else:
394 if args and args[0]:
396 if args and args[0]:
395 node = args.pop(0).replace('%2F', '/')
397 node = args.pop(0).replace('%2F', '/')
396 req.form['node'] = [node]
398 req.form['node'] = [node]
397 if args:
399 if args:
398 req.form['file'] = args
400 req.form['file'] = args
399
401
400 ua = req.env.get('HTTP_USER_AGENT', '')
402 ua = req.env.get('HTTP_USER_AGENT', '')
401 if cmd == 'rev' and 'mercurial' in ua:
403 if cmd == 'rev' and 'mercurial' in ua:
402 req.form['style'] = ['raw']
404 req.form['style'] = ['raw']
403
405
404 if cmd == 'archive':
406 if cmd == 'archive':
405 fn = req.form['node'][0]
407 fn = req.form['node'][0]
406 for type_, spec in rctx.archivespecs.iteritems():
408 for type_, spec in rctx.archivespecs.iteritems():
407 ext = spec[2]
409 ext = spec[2]
408 if fn.endswith(ext):
410 if fn.endswith(ext):
409 req.form['node'] = [fn[:-len(ext)]]
411 req.form['node'] = [fn[:-len(ext)]]
410 req.form['type'] = [type_]
412 req.form['type'] = [type_]
411 else:
413 else:
412 cmd = req.form.get('cmd', [''])[0]
414 cmd = req.form.get('cmd', [''])[0]
413
415
414 # process the web interface request
416 # process the web interface request
415
417
416 try:
418 try:
417 tmpl = rctx.templater(req)
419 tmpl = rctx.templater(req)
418 ctype = tmpl('mimetype', encoding=encoding.encoding)
420 ctype = tmpl('mimetype', encoding=encoding.encoding)
419 ctype = templater.stringify(ctype)
421 ctype = templater.stringify(ctype)
420
422
421 # check read permissions non-static content
423 # check read permissions non-static content
422 if cmd != 'static':
424 if cmd != 'static':
423 self.check_perm(rctx, req, None)
425 self.check_perm(rctx, req, None)
424
426
425 if cmd == '':
427 if cmd == '':
426 req.form['cmd'] = [tmpl.cache['default']]
428 req.form['cmd'] = [tmpl.cache['default']]
427 cmd = req.form['cmd'][0]
429 cmd = req.form['cmd'][0]
428
430
429 # Don't enable caching if using a CSP nonce because then it wouldn't
431 # Don't enable caching if using a CSP nonce because then it wouldn't
430 # be a nonce.
432 # be a nonce.
431 if rctx.configbool('web', 'cache') and not rctx.nonce:
433 if rctx.configbool('web', 'cache') and not rctx.nonce:
432 caching(self, req) # sets ETag header or raises NOT_MODIFIED
434 caching(self, req) # sets ETag header or raises NOT_MODIFIED
433 if cmd not in webcommands.__all__:
435 if cmd not in webcommands.__all__:
434 msg = 'no such method: %s' % cmd
436 msg = 'no such method: %s' % cmd
435 raise ErrorResponse(HTTP_BAD_REQUEST, msg)
437 raise ErrorResponse(HTTP_BAD_REQUEST, msg)
436 elif cmd == 'file' and 'raw' in req.form.get('style', []):
438 elif cmd == 'file' and 'raw' in req.form.get('style', []):
437 rctx.ctype = ctype
439 rctx.ctype = ctype
438 content = webcommands.rawfile(rctx, req, tmpl)
440 content = webcommands.rawfile(rctx, req, tmpl)
439 else:
441 else:
440 content = getattr(webcommands, cmd)(rctx, req, tmpl)
442 content = getattr(webcommands, cmd)(rctx, req, tmpl)
441 req.respond(HTTP_OK, ctype)
443 req.respond(HTTP_OK, ctype)
442
444
443 return content
445 return content
444
446
445 except (error.LookupError, error.RepoLookupError) as err:
447 except (error.LookupError, error.RepoLookupError) as err:
446 req.respond(HTTP_NOT_FOUND, ctype)
448 req.respond(HTTP_NOT_FOUND, ctype)
447 msg = pycompat.bytestr(err)
449 msg = pycompat.bytestr(err)
448 if (util.safehasattr(err, 'name') and
450 if (util.safehasattr(err, 'name') and
449 not isinstance(err, error.ManifestLookupError)):
451 not isinstance(err, error.ManifestLookupError)):
450 msg = 'revision not found: %s' % err.name
452 msg = 'revision not found: %s' % err.name
451 return tmpl('error', error=msg)
453 return tmpl('error', error=msg)
452 except (error.RepoError, error.RevlogError) as inst:
454 except (error.RepoError, error.RevlogError) as inst:
453 req.respond(HTTP_SERVER_ERROR, ctype)
455 req.respond(HTTP_SERVER_ERROR, ctype)
454 return tmpl('error', error=pycompat.bytestr(inst))
456 return tmpl('error', error=pycompat.bytestr(inst))
455 except ErrorResponse as inst:
457 except ErrorResponse as inst:
456 req.respond(inst, ctype)
458 req.respond(inst, ctype)
457 if inst.code == HTTP_NOT_MODIFIED:
459 if inst.code == HTTP_NOT_MODIFIED:
458 # Not allowed to return a body on a 304
460 # Not allowed to return a body on a 304
459 return ['']
461 return ['']
460 return tmpl('error', error=pycompat.bytestr(inst))
462 return tmpl('error', error=pycompat.bytestr(inst))
461
463
462 def check_perm(self, rctx, req, op):
464 def check_perm(self, rctx, req, op):
463 for permhook in permhooks:
465 for permhook in permhooks:
464 permhook(rctx, req, op)
466 permhook(rctx, req, op)
465
467
466 def getwebview(repo):
468 def getwebview(repo):
467 """The 'web.view' config controls changeset filter to hgweb. Possible
469 """The 'web.view' config controls changeset filter to hgweb. Possible
468 values are ``served``, ``visible`` and ``all``. Default is ``served``.
470 values are ``served``, ``visible`` and ``all``. Default is ``served``.
469 The ``served`` filter only shows changesets that can be pulled from the
471 The ``served`` filter only shows changesets that can be pulled from the
470 hgweb instance. The``visible`` filter includes secret changesets but
472 hgweb instance. The``visible`` filter includes secret changesets but
471 still excludes "hidden" one.
473 still excludes "hidden" one.
472
474
473 See the repoview module for details.
475 See the repoview module for details.
474
476
475 The option has been around undocumented since Mercurial 2.5, but no
477 The option has been around undocumented since Mercurial 2.5, but no
476 user ever asked about it. So we better keep it undocumented for now."""
478 user ever asked about it. So we better keep it undocumented for now."""
477 # experimental config: web.view
479 # experimental config: web.view
478 viewconfig = repo.ui.config('web', 'view', untrusted=True)
480 viewconfig = repo.ui.config('web', 'view', untrusted=True)
479 if viewconfig == 'all':
481 if viewconfig == 'all':
480 return repo.unfiltered()
482 return repo.unfiltered()
481 elif viewconfig in repoview.filtertable:
483 elif viewconfig in repoview.filtertable:
482 return repo.filtered(viewconfig)
484 return repo.filtered(viewconfig)
483 else:
485 else:
484 return repo.filtered('served')
486 return repo.filtered('served')
@@ -1,2501 +1,2531 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import hashlib
19 import hashlib
20 import heapq
20 import heapq
21 import os
21 import os
22 import struct
22 import struct
23 import zlib
23 import zlib
24
24
25 # import stuff from node for others to import from revlog
25 # import stuff from node for others to import from revlog
26 from .node import (
26 from .node import (
27 bin,
27 bin,
28 hex,
28 hex,
29 nullid,
29 nullid,
30 nullrev,
30 nullrev,
31 wdirhex,
31 wdirhex,
32 wdirid,
32 wdirid,
33 wdirrev,
33 wdirrev,
34 )
34 )
35 from .i18n import _
35 from .i18n import _
36 from .thirdparty import (
36 from .thirdparty import (
37 attr,
37 attr,
38 )
38 )
39 from . import (
39 from . import (
40 ancestor,
40 ancestor,
41 error,
41 error,
42 mdiff,
42 mdiff,
43 policy,
43 policy,
44 pycompat,
44 pycompat,
45 templatefilters,
45 templatefilters,
46 util,
46 util,
47 )
47 )
48
48
49 parsers = policy.importmod(r'parsers')
49 parsers = policy.importmod(r'parsers')
50
50
51 # Aliased for performance.
51 # Aliased for performance.
52 _zlibdecompress = zlib.decompress
52 _zlibdecompress = zlib.decompress
53
53
54 # revlog header flags
54 # revlog header flags
55 REVLOGV0 = 0
55 REVLOGV0 = 0
56 REVLOGV1 = 1
56 REVLOGV1 = 1
57 # Dummy value until file format is finalized.
57 # Dummy value until file format is finalized.
58 # Reminder: change the bounds check in revlog.__init__ when this is changed.
58 # Reminder: change the bounds check in revlog.__init__ when this is changed.
59 REVLOGV2 = 0xDEAD
59 REVLOGV2 = 0xDEAD
60 FLAG_INLINE_DATA = (1 << 16)
60 FLAG_INLINE_DATA = (1 << 16)
61 FLAG_GENERALDELTA = (1 << 17)
61 FLAG_GENERALDELTA = (1 << 17)
62 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
62 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
63 REVLOG_DEFAULT_FORMAT = REVLOGV1
63 REVLOG_DEFAULT_FORMAT = REVLOGV1
64 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
64 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
65 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
65 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
66 REVLOGV2_FLAGS = REVLOGV1_FLAGS
66 REVLOGV2_FLAGS = REVLOGV1_FLAGS
67
67
68 # revlog index flags
68 # revlog index flags
69 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
69 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
70 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
70 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
71 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
71 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
72 REVIDX_DEFAULT_FLAGS = 0
72 REVIDX_DEFAULT_FLAGS = 0
73 # stable order in which flags need to be processed and their processors applied
73 # stable order in which flags need to be processed and their processors applied
74 REVIDX_FLAGS_ORDER = [
74 REVIDX_FLAGS_ORDER = [
75 REVIDX_ISCENSORED,
75 REVIDX_ISCENSORED,
76 REVIDX_ELLIPSIS,
76 REVIDX_ELLIPSIS,
77 REVIDX_EXTSTORED,
77 REVIDX_EXTSTORED,
78 ]
78 ]
79 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
79 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
80 # bitmark for flags that could cause rawdata content change
81 REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
80
82
81 # max size of revlog with inline data
83 # max size of revlog with inline data
82 _maxinline = 131072
84 _maxinline = 131072
83 _chunksize = 1048576
85 _chunksize = 1048576
84
86
85 RevlogError = error.RevlogError
87 RevlogError = error.RevlogError
86 LookupError = error.LookupError
88 LookupError = error.LookupError
87 CensoredNodeError = error.CensoredNodeError
89 CensoredNodeError = error.CensoredNodeError
88 ProgrammingError = error.ProgrammingError
90 ProgrammingError = error.ProgrammingError
89
91
90 # Store flag processors (cf. 'addflagprocessor()' to register)
92 # Store flag processors (cf. 'addflagprocessor()' to register)
91 _flagprocessors = {
93 _flagprocessors = {
92 REVIDX_ISCENSORED: None,
94 REVIDX_ISCENSORED: None,
93 }
95 }
94
96
95 def addflagprocessor(flag, processor):
97 def addflagprocessor(flag, processor):
96 """Register a flag processor on a revision data flag.
98 """Register a flag processor on a revision data flag.
97
99
98 Invariant:
100 Invariant:
99 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER.
101 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
102 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
100 - Only one flag processor can be registered on a specific flag.
103 - Only one flag processor can be registered on a specific flag.
101 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
104 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
102 following signatures:
105 following signatures:
103 - (read) f(self, rawtext) -> text, bool
106 - (read) f(self, rawtext) -> text, bool
104 - (write) f(self, text) -> rawtext, bool
107 - (write) f(self, text) -> rawtext, bool
105 - (raw) f(self, rawtext) -> bool
108 - (raw) f(self, rawtext) -> bool
106 "text" is presented to the user. "rawtext" is stored in revlog data, not
109 "text" is presented to the user. "rawtext" is stored in revlog data, not
107 directly visible to the user.
110 directly visible to the user.
108 The boolean returned by these transforms is used to determine whether
111 The boolean returned by these transforms is used to determine whether
109 the returned text can be used for hash integrity checking. For example,
112 the returned text can be used for hash integrity checking. For example,
110 if "write" returns False, then "text" is used to generate hash. If
113 if "write" returns False, then "text" is used to generate hash. If
111 "write" returns True, that basically means "rawtext" returned by "write"
114 "write" returns True, that basically means "rawtext" returned by "write"
112 should be used to generate hash. Usually, "write" and "read" return
115 should be used to generate hash. Usually, "write" and "read" return
113 different booleans. And "raw" returns a same boolean as "write".
116 different booleans. And "raw" returns a same boolean as "write".
114
117
115 Note: The 'raw' transform is used for changegroup generation and in some
118 Note: The 'raw' transform is used for changegroup generation and in some
116 debug commands. In this case the transform only indicates whether the
119 debug commands. In this case the transform only indicates whether the
117 contents can be used for hash integrity checks.
120 contents can be used for hash integrity checks.
118 """
121 """
119 if not flag & REVIDX_KNOWN_FLAGS:
122 if not flag & REVIDX_KNOWN_FLAGS:
120 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
123 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
121 raise ProgrammingError(msg)
124 raise ProgrammingError(msg)
122 if flag not in REVIDX_FLAGS_ORDER:
125 if flag not in REVIDX_FLAGS_ORDER:
123 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
126 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
124 raise ProgrammingError(msg)
127 raise ProgrammingError(msg)
125 if flag in _flagprocessors:
128 if flag in _flagprocessors:
126 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
129 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
127 raise error.Abort(msg)
130 raise error.Abort(msg)
128 _flagprocessors[flag] = processor
131 _flagprocessors[flag] = processor
129
132
130 def getoffset(q):
133 def getoffset(q):
131 return int(q >> 16)
134 return int(q >> 16)
132
135
133 def gettype(q):
136 def gettype(q):
134 return int(q & 0xFFFF)
137 return int(q & 0xFFFF)
135
138
136 def offset_type(offset, type):
139 def offset_type(offset, type):
137 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
140 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
138 raise ValueError('unknown revlog index flags')
141 raise ValueError('unknown revlog index flags')
139 return int(int(offset) << 16 | type)
142 return int(int(offset) << 16 | type)
140
143
141 _nullhash = hashlib.sha1(nullid)
144 _nullhash = hashlib.sha1(nullid)
142
145
143 def hash(text, p1, p2):
146 def hash(text, p1, p2):
144 """generate a hash from the given text and its parent hashes
147 """generate a hash from the given text and its parent hashes
145
148
146 This hash combines both the current file contents and its history
149 This hash combines both the current file contents and its history
147 in a manner that makes it easy to distinguish nodes with the same
150 in a manner that makes it easy to distinguish nodes with the same
148 content in the revision graph.
151 content in the revision graph.
149 """
152 """
150 # As of now, if one of the parent node is null, p2 is null
153 # As of now, if one of the parent node is null, p2 is null
151 if p2 == nullid:
154 if p2 == nullid:
152 # deep copy of a hash is faster than creating one
155 # deep copy of a hash is faster than creating one
153 s = _nullhash.copy()
156 s = _nullhash.copy()
154 s.update(p1)
157 s.update(p1)
155 else:
158 else:
156 # none of the parent nodes are nullid
159 # none of the parent nodes are nullid
157 if p1 < p2:
160 if p1 < p2:
158 a = p1
161 a = p1
159 b = p2
162 b = p2
160 else:
163 else:
161 a = p2
164 a = p2
162 b = p1
165 b = p1
163 s = hashlib.sha1(a)
166 s = hashlib.sha1(a)
164 s.update(b)
167 s.update(b)
165 s.update(text)
168 s.update(text)
166 return s.digest()
169 return s.digest()
167
170
168 def _trimchunk(revlog, revs, startidx, endidx=None):
171 def _trimchunk(revlog, revs, startidx, endidx=None):
169 """returns revs[startidx:endidx] without empty trailing revs
172 """returns revs[startidx:endidx] without empty trailing revs
170 """
173 """
171 length = revlog.length
174 length = revlog.length
172
175
173 if endidx is None:
176 if endidx is None:
174 endidx = len(revs)
177 endidx = len(revs)
175
178
176 # Trim empty revs at the end, but never the very first revision of a chain
179 # Trim empty revs at the end, but never the very first revision of a chain
177 while endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0:
180 while endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0:
178 endidx -= 1
181 endidx -= 1
179
182
180 return revs[startidx:endidx]
183 return revs[startidx:endidx]
181
184
182 def _slicechunk(revlog, revs):
185 def _slicechunk(revlog, revs):
183 """slice revs to reduce the amount of unrelated data to be read from disk.
186 """slice revs to reduce the amount of unrelated data to be read from disk.
184
187
185 ``revs`` is sliced into groups that should be read in one time.
188 ``revs`` is sliced into groups that should be read in one time.
186 Assume that revs are sorted.
189 Assume that revs are sorted.
187 """
190 """
188 start = revlog.start
191 start = revlog.start
189 length = revlog.length
192 length = revlog.length
190
193
191 if len(revs) <= 1:
194 if len(revs) <= 1:
192 yield revs
195 yield revs
193 return
196 return
194
197
195 startbyte = start(revs[0])
198 startbyte = start(revs[0])
196 endbyte = start(revs[-1]) + length(revs[-1])
199 endbyte = start(revs[-1]) + length(revs[-1])
197 readdata = deltachainspan = endbyte - startbyte
200 readdata = deltachainspan = endbyte - startbyte
198
201
199 chainpayload = sum(length(r) for r in revs)
202 chainpayload = sum(length(r) for r in revs)
200
203
201 if deltachainspan:
204 if deltachainspan:
202 density = chainpayload / float(deltachainspan)
205 density = chainpayload / float(deltachainspan)
203 else:
206 else:
204 density = 1.0
207 density = 1.0
205
208
206 # Store the gaps in a heap to have them sorted by decreasing size
209 # Store the gaps in a heap to have them sorted by decreasing size
207 gapsheap = []
210 gapsheap = []
208 heapq.heapify(gapsheap)
211 heapq.heapify(gapsheap)
209 prevend = None
212 prevend = None
210 for i, rev in enumerate(revs):
213 for i, rev in enumerate(revs):
211 revstart = start(rev)
214 revstart = start(rev)
212 revlen = length(rev)
215 revlen = length(rev)
213
216
214 # Skip empty revisions to form larger holes
217 # Skip empty revisions to form larger holes
215 if revlen == 0:
218 if revlen == 0:
216 continue
219 continue
217
220
218 if prevend is not None:
221 if prevend is not None:
219 gapsize = revstart - prevend
222 gapsize = revstart - prevend
220 # only consider holes that are large enough
223 # only consider holes that are large enough
221 if gapsize > revlog._srmingapsize:
224 if gapsize > revlog._srmingapsize:
222 heapq.heappush(gapsheap, (-gapsize, i))
225 heapq.heappush(gapsheap, (-gapsize, i))
223
226
224 prevend = revstart + revlen
227 prevend = revstart + revlen
225
228
226 # Collect the indices of the largest holes until the density is acceptable
229 # Collect the indices of the largest holes until the density is acceptable
227 indicesheap = []
230 indicesheap = []
228 heapq.heapify(indicesheap)
231 heapq.heapify(indicesheap)
229 while gapsheap and density < revlog._srdensitythreshold:
232 while gapsheap and density < revlog._srdensitythreshold:
230 oppgapsize, gapidx = heapq.heappop(gapsheap)
233 oppgapsize, gapidx = heapq.heappop(gapsheap)
231
234
232 heapq.heappush(indicesheap, gapidx)
235 heapq.heappush(indicesheap, gapidx)
233
236
234 # the gap sizes are stored as negatives to be sorted decreasingly
237 # the gap sizes are stored as negatives to be sorted decreasingly
235 # by the heap
238 # by the heap
236 readdata -= (-oppgapsize)
239 readdata -= (-oppgapsize)
237 if readdata > 0:
240 if readdata > 0:
238 density = chainpayload / float(readdata)
241 density = chainpayload / float(readdata)
239 else:
242 else:
240 density = 1.0
243 density = 1.0
241
244
242 # Cut the revs at collected indices
245 # Cut the revs at collected indices
243 previdx = 0
246 previdx = 0
244 while indicesheap:
247 while indicesheap:
245 idx = heapq.heappop(indicesheap)
248 idx = heapq.heappop(indicesheap)
246
249
247 chunk = _trimchunk(revlog, revs, previdx, idx)
250 chunk = _trimchunk(revlog, revs, previdx, idx)
248 if chunk:
251 if chunk:
249 yield chunk
252 yield chunk
250
253
251 previdx = idx
254 previdx = idx
252
255
253 chunk = _trimchunk(revlog, revs, previdx)
256 chunk = _trimchunk(revlog, revs, previdx)
254 if chunk:
257 if chunk:
255 yield chunk
258 yield chunk
256
259
257 @attr.s(slots=True, frozen=True)
260 @attr.s(slots=True, frozen=True)
258 class _deltainfo(object):
261 class _deltainfo(object):
259 distance = attr.ib()
262 distance = attr.ib()
260 deltalen = attr.ib()
263 deltalen = attr.ib()
261 data = attr.ib()
264 data = attr.ib()
262 base = attr.ib()
265 base = attr.ib()
263 chainbase = attr.ib()
266 chainbase = attr.ib()
264 chainlen = attr.ib()
267 chainlen = attr.ib()
265 compresseddeltalen = attr.ib()
268 compresseddeltalen = attr.ib()
266
269
267 class _deltacomputer(object):
270 class _deltacomputer(object):
268 def __init__(self, revlog):
271 def __init__(self, revlog):
269 self.revlog = revlog
272 self.revlog = revlog
270
273
271 def _getcandidaterevs(self, p1, p2, cachedelta):
274 def _getcandidaterevs(self, p1, p2, cachedelta):
272 """
275 """
273 Provides revisions that present an interest to be diffed against,
276 Provides revisions that present an interest to be diffed against,
274 grouped by level of easiness.
277 grouped by level of easiness.
275 """
278 """
276 revlog = self.revlog
279 revlog = self.revlog
277 curr = len(revlog)
280 curr = len(revlog)
278 prev = curr - 1
281 prev = curr - 1
279 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
282 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
280
283
281 # should we try to build a delta?
284 # should we try to build a delta?
282 if prev != nullrev and revlog.storedeltachains:
285 if prev != nullrev and revlog.storedeltachains:
283 tested = set()
286 tested = set()
284 # This condition is true most of the time when processing
287 # This condition is true most of the time when processing
285 # changegroup data into a generaldelta repo. The only time it
288 # changegroup data into a generaldelta repo. The only time it
286 # isn't true is if this is the first revision in a delta chain
289 # isn't true is if this is the first revision in a delta chain
287 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
290 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
288 if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
291 if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
289 # Assume what we received from the server is a good choice
292 # Assume what we received from the server is a good choice
290 # build delta will reuse the cache
293 # build delta will reuse the cache
291 yield (cachedelta[0],)
294 yield (cachedelta[0],)
292 tested.add(cachedelta[0])
295 tested.add(cachedelta[0])
293
296
294 if revlog._generaldelta:
297 if revlog._generaldelta:
295 # exclude already lazy tested base if any
298 # exclude already lazy tested base if any
296 parents = [p for p in (p1r, p2r)
299 parents = [p for p in (p1r, p2r)
297 if p != nullrev and p not in tested]
300 if p != nullrev and p not in tested]
298 if parents and not revlog._aggressivemergedeltas:
301 if parents and not revlog._aggressivemergedeltas:
299 # Pick whichever parent is closer to us (to minimize the
302 # Pick whichever parent is closer to us (to minimize the
300 # chance of having to build a fulltext).
303 # chance of having to build a fulltext).
301 parents = [max(parents)]
304 parents = [max(parents)]
302 tested.update(parents)
305 tested.update(parents)
303 yield parents
306 yield parents
304
307
305 if prev not in tested:
308 if prev not in tested:
306 # other approach failed try against prev to hopefully save us a
309 # other approach failed try against prev to hopefully save us a
307 # fulltext.
310 # fulltext.
308 yield (prev,)
311 yield (prev,)
309
312
310 def buildtext(self, revinfo, fh):
313 def buildtext(self, revinfo, fh):
311 """Builds a fulltext version of a revision
314 """Builds a fulltext version of a revision
312
315
313 revinfo: _revisioninfo instance that contains all needed info
316 revinfo: _revisioninfo instance that contains all needed info
314 fh: file handle to either the .i or the .d revlog file,
317 fh: file handle to either the .i or the .d revlog file,
315 depending on whether it is inlined or not
318 depending on whether it is inlined or not
316 """
319 """
317 btext = revinfo.btext
320 btext = revinfo.btext
318 if btext[0] is not None:
321 if btext[0] is not None:
319 return btext[0]
322 return btext[0]
320
323
321 revlog = self.revlog
324 revlog = self.revlog
322 cachedelta = revinfo.cachedelta
325 cachedelta = revinfo.cachedelta
323 flags = revinfo.flags
326 flags = revinfo.flags
324 node = revinfo.node
327 node = revinfo.node
325
328
326 baserev = cachedelta[0]
329 baserev = cachedelta[0]
327 delta = cachedelta[1]
330 delta = cachedelta[1]
328 # special case deltas which replace entire base; no need to decode
331 # special case deltas which replace entire base; no need to decode
329 # base revision. this neatly avoids censored bases, which throw when
332 # base revision. this neatly avoids censored bases, which throw when
330 # they're decoded.
333 # they're decoded.
331 hlen = struct.calcsize(">lll")
334 hlen = struct.calcsize(">lll")
332 if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
335 if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
333 len(delta) - hlen):
336 len(delta) - hlen):
334 btext[0] = delta[hlen:]
337 btext[0] = delta[hlen:]
335 else:
338 else:
336 basetext = revlog.revision(baserev, _df=fh, raw=True)
339 # deltabase is rawtext before changed by flag processors, which is
340 # equivalent to non-raw text
341 basetext = revlog.revision(baserev, _df=fh, raw=False)
337 btext[0] = mdiff.patch(basetext, delta)
342 btext[0] = mdiff.patch(basetext, delta)
338
343
339 try:
344 try:
340 res = revlog._processflags(btext[0], flags, 'read', raw=True)
345 res = revlog._processflags(btext[0], flags, 'read', raw=True)
341 btext[0], validatehash = res
346 btext[0], validatehash = res
342 if validatehash:
347 if validatehash:
343 revlog.checkhash(btext[0], node, p1=revinfo.p1, p2=revinfo.p2)
348 revlog.checkhash(btext[0], node, p1=revinfo.p1, p2=revinfo.p2)
344 if flags & REVIDX_ISCENSORED:
349 if flags & REVIDX_ISCENSORED:
345 raise RevlogError(_('node %s is not censored') % node)
350 raise RevlogError(_('node %s is not censored') % node)
346 except CensoredNodeError:
351 except CensoredNodeError:
347 # must pass the censored index flag to add censored revisions
352 # must pass the censored index flag to add censored revisions
348 if not flags & REVIDX_ISCENSORED:
353 if not flags & REVIDX_ISCENSORED:
349 raise
354 raise
350 return btext[0]
355 return btext[0]
351
356
352 def _builddeltadiff(self, base, revinfo, fh):
357 def _builddeltadiff(self, base, revinfo, fh):
353 revlog = self.revlog
358 revlog = self.revlog
354 t = self.buildtext(revinfo, fh)
359 t = self.buildtext(revinfo, fh)
355 if revlog.iscensored(base):
360 if revlog.iscensored(base):
356 # deltas based on a censored revision must replace the
361 # deltas based on a censored revision must replace the
357 # full content in one patch, so delta works everywhere
362 # full content in one patch, so delta works everywhere
358 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
363 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
359 delta = header + t
364 delta = header + t
360 else:
365 else:
361 ptext = revlog.revision(base, _df=fh, raw=True)
366 ptext = revlog.revision(base, _df=fh, raw=True)
362 delta = mdiff.textdiff(ptext, t)
367 delta = mdiff.textdiff(ptext, t)
363
368
364 return delta
369 return delta
365
370
366 def _builddeltainfo(self, revinfo, base, fh):
371 def _builddeltainfo(self, revinfo, base, fh):
367 # can we use the cached delta?
372 # can we use the cached delta?
368 if revinfo.cachedelta and revinfo.cachedelta[0] == base:
373 if revinfo.cachedelta and revinfo.cachedelta[0] == base:
369 delta = revinfo.cachedelta[1]
374 delta = revinfo.cachedelta[1]
370 else:
375 else:
371 delta = self._builddeltadiff(base, revinfo, fh)
376 delta = self._builddeltadiff(base, revinfo, fh)
372 revlog = self.revlog
377 revlog = self.revlog
373 header, data = revlog.compress(delta)
378 header, data = revlog.compress(delta)
374 deltalen = len(header) + len(data)
379 deltalen = len(header) + len(data)
375 chainbase = revlog.chainbase(base)
380 chainbase = revlog.chainbase(base)
376 offset = revlog.end(len(revlog) - 1)
381 offset = revlog.end(len(revlog) - 1)
377 dist = deltalen + offset - revlog.start(chainbase)
382 dist = deltalen + offset - revlog.start(chainbase)
378 if revlog._generaldelta:
383 if revlog._generaldelta:
379 deltabase = base
384 deltabase = base
380 else:
385 else:
381 deltabase = chainbase
386 deltabase = chainbase
382 chainlen, compresseddeltalen = revlog._chaininfo(base)
387 chainlen, compresseddeltalen = revlog._chaininfo(base)
383 chainlen += 1
388 chainlen += 1
384 compresseddeltalen += deltalen
389 compresseddeltalen += deltalen
385 return _deltainfo(dist, deltalen, (header, data), deltabase,
390 return _deltainfo(dist, deltalen, (header, data), deltabase,
386 chainbase, chainlen, compresseddeltalen)
391 chainbase, chainlen, compresseddeltalen)
387
392
388 def finddeltainfo(self, revinfo, fh):
393 def finddeltainfo(self, revinfo, fh):
389 """Find an acceptable delta against a candidate revision
394 """Find an acceptable delta against a candidate revision
390
395
391 revinfo: information about the revision (instance of _revisioninfo)
396 revinfo: information about the revision (instance of _revisioninfo)
392 fh: file handle to either the .i or the .d revlog file,
397 fh: file handle to either the .i or the .d revlog file,
393 depending on whether it is inlined or not
398 depending on whether it is inlined or not
394
399
395 Returns the first acceptable candidate revision, as ordered by
400 Returns the first acceptable candidate revision, as ordered by
396 _getcandidaterevs
401 _getcandidaterevs
397 """
402 """
398 cachedelta = revinfo.cachedelta
403 cachedelta = revinfo.cachedelta
399 p1 = revinfo.p1
404 p1 = revinfo.p1
400 p2 = revinfo.p2
405 p2 = revinfo.p2
401 revlog = self.revlog
406 revlog = self.revlog
402
407
403 deltainfo = None
408 deltainfo = None
404 for candidaterevs in self._getcandidaterevs(p1, p2, cachedelta):
409 for candidaterevs in self._getcandidaterevs(p1, p2, cachedelta):
405 nominateddeltas = []
410 nominateddeltas = []
406 for candidaterev in candidaterevs:
411 for candidaterev in candidaterevs:
412 # no delta for rawtext-changing revs (see "candelta" for why)
413 if revlog.flags(candidaterev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
414 continue
407 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
415 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
408 if revlog._isgooddeltainfo(candidatedelta, revinfo.textlen):
416 if revlog._isgooddeltainfo(candidatedelta, revinfo.textlen):
409 nominateddeltas.append(candidatedelta)
417 nominateddeltas.append(candidatedelta)
410 if nominateddeltas:
418 if nominateddeltas:
411 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
419 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
412 break
420 break
413
421
414 return deltainfo
422 return deltainfo
415
423
416 @attr.s(slots=True, frozen=True)
424 @attr.s(slots=True, frozen=True)
417 class _revisioninfo(object):
425 class _revisioninfo(object):
418 """Information about a revision that allows building its fulltext
426 """Information about a revision that allows building its fulltext
419 node: expected hash of the revision
427 node: expected hash of the revision
420 p1, p2: parent revs of the revision
428 p1, p2: parent revs of the revision
421 btext: built text cache consisting of a one-element list
429 btext: built text cache consisting of a one-element list
422 cachedelta: (baserev, uncompressed_delta) or None
430 cachedelta: (baserev, uncompressed_delta) or None
423 flags: flags associated to the revision storage
431 flags: flags associated to the revision storage
424
432
425 One of btext[0] or cachedelta must be set.
433 One of btext[0] or cachedelta must be set.
426 """
434 """
427 node = attr.ib()
435 node = attr.ib()
428 p1 = attr.ib()
436 p1 = attr.ib()
429 p2 = attr.ib()
437 p2 = attr.ib()
430 btext = attr.ib()
438 btext = attr.ib()
431 textlen = attr.ib()
439 textlen = attr.ib()
432 cachedelta = attr.ib()
440 cachedelta = attr.ib()
433 flags = attr.ib()
441 flags = attr.ib()
434
442
435 # index v0:
443 # index v0:
436 # 4 bytes: offset
444 # 4 bytes: offset
437 # 4 bytes: compressed length
445 # 4 bytes: compressed length
438 # 4 bytes: base rev
446 # 4 bytes: base rev
439 # 4 bytes: link rev
447 # 4 bytes: link rev
440 # 20 bytes: parent 1 nodeid
448 # 20 bytes: parent 1 nodeid
441 # 20 bytes: parent 2 nodeid
449 # 20 bytes: parent 2 nodeid
442 # 20 bytes: nodeid
450 # 20 bytes: nodeid
443 indexformatv0 = struct.Struct(">4l20s20s20s")
451 indexformatv0 = struct.Struct(">4l20s20s20s")
444 indexformatv0_pack = indexformatv0.pack
452 indexformatv0_pack = indexformatv0.pack
445 indexformatv0_unpack = indexformatv0.unpack
453 indexformatv0_unpack = indexformatv0.unpack
446
454
447 class revlogoldio(object):
455 class revlogoldio(object):
448 def __init__(self):
456 def __init__(self):
449 self.size = indexformatv0.size
457 self.size = indexformatv0.size
450
458
451 def parseindex(self, data, inline):
459 def parseindex(self, data, inline):
452 s = self.size
460 s = self.size
453 index = []
461 index = []
454 nodemap = {nullid: nullrev}
462 nodemap = {nullid: nullrev}
455 n = off = 0
463 n = off = 0
456 l = len(data)
464 l = len(data)
457 while off + s <= l:
465 while off + s <= l:
458 cur = data[off:off + s]
466 cur = data[off:off + s]
459 off += s
467 off += s
460 e = indexformatv0_unpack(cur)
468 e = indexformatv0_unpack(cur)
461 # transform to revlogv1 format
469 # transform to revlogv1 format
462 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
470 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
463 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
471 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
464 index.append(e2)
472 index.append(e2)
465 nodemap[e[6]] = n
473 nodemap[e[6]] = n
466 n += 1
474 n += 1
467
475
468 # add the magic null revision at -1
476 # add the magic null revision at -1
469 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
477 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
470
478
471 return index, nodemap, None
479 return index, nodemap, None
472
480
473 def packentry(self, entry, node, version, rev):
481 def packentry(self, entry, node, version, rev):
474 if gettype(entry[0]):
482 if gettype(entry[0]):
475 raise RevlogError(_('index entry flags need revlog version 1'))
483 raise RevlogError(_('index entry flags need revlog version 1'))
476 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
484 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
477 node(entry[5]), node(entry[6]), entry[7])
485 node(entry[5]), node(entry[6]), entry[7])
478 return indexformatv0_pack(*e2)
486 return indexformatv0_pack(*e2)
479
487
480 # index ng:
488 # index ng:
481 # 6 bytes: offset
489 # 6 bytes: offset
482 # 2 bytes: flags
490 # 2 bytes: flags
483 # 4 bytes: compressed length
491 # 4 bytes: compressed length
484 # 4 bytes: uncompressed length
492 # 4 bytes: uncompressed length
485 # 4 bytes: base rev
493 # 4 bytes: base rev
486 # 4 bytes: link rev
494 # 4 bytes: link rev
487 # 4 bytes: parent 1 rev
495 # 4 bytes: parent 1 rev
488 # 4 bytes: parent 2 rev
496 # 4 bytes: parent 2 rev
489 # 32 bytes: nodeid
497 # 32 bytes: nodeid
490 indexformatng = struct.Struct(">Qiiiiii20s12x")
498 indexformatng = struct.Struct(">Qiiiiii20s12x")
491 indexformatng_pack = indexformatng.pack
499 indexformatng_pack = indexformatng.pack
492 versionformat = struct.Struct(">I")
500 versionformat = struct.Struct(">I")
493 versionformat_pack = versionformat.pack
501 versionformat_pack = versionformat.pack
494 versionformat_unpack = versionformat.unpack
502 versionformat_unpack = versionformat.unpack
495
503
496 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
504 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
497 # signed integer)
505 # signed integer)
498 _maxentrysize = 0x7fffffff
506 _maxentrysize = 0x7fffffff
499
507
500 class revlogio(object):
508 class revlogio(object):
501 def __init__(self):
509 def __init__(self):
502 self.size = indexformatng.size
510 self.size = indexformatng.size
503
511
504 def parseindex(self, data, inline):
512 def parseindex(self, data, inline):
505 # call the C implementation to parse the index data
513 # call the C implementation to parse the index data
506 index, cache = parsers.parse_index2(data, inline)
514 index, cache = parsers.parse_index2(data, inline)
507 return index, getattr(index, 'nodemap', None), cache
515 return index, getattr(index, 'nodemap', None), cache
508
516
509 def packentry(self, entry, node, version, rev):
517 def packentry(self, entry, node, version, rev):
510 p = indexformatng_pack(*entry)
518 p = indexformatng_pack(*entry)
511 if rev == 0:
519 if rev == 0:
512 p = versionformat_pack(version) + p[4:]
520 p = versionformat_pack(version) + p[4:]
513 return p
521 return p
514
522
515 class revlog(object):
523 class revlog(object):
516 """
524 """
517 the underlying revision storage object
525 the underlying revision storage object
518
526
519 A revlog consists of two parts, an index and the revision data.
527 A revlog consists of two parts, an index and the revision data.
520
528
521 The index is a file with a fixed record size containing
529 The index is a file with a fixed record size containing
522 information on each revision, including its nodeid (hash), the
530 information on each revision, including its nodeid (hash), the
523 nodeids of its parents, the position and offset of its data within
531 nodeids of its parents, the position and offset of its data within
524 the data file, and the revision it's based on. Finally, each entry
532 the data file, and the revision it's based on. Finally, each entry
525 contains a linkrev entry that can serve as a pointer to external
533 contains a linkrev entry that can serve as a pointer to external
526 data.
534 data.
527
535
528 The revision data itself is a linear collection of data chunks.
536 The revision data itself is a linear collection of data chunks.
529 Each chunk represents a revision and is usually represented as a
537 Each chunk represents a revision and is usually represented as a
530 delta against the previous chunk. To bound lookup time, runs of
538 delta against the previous chunk. To bound lookup time, runs of
531 deltas are limited to about 2 times the length of the original
539 deltas are limited to about 2 times the length of the original
532 version data. This makes retrieval of a version proportional to
540 version data. This makes retrieval of a version proportional to
533 its size, or O(1) relative to the number of revisions.
541 its size, or O(1) relative to the number of revisions.
534
542
535 Both pieces of the revlog are written to in an append-only
543 Both pieces of the revlog are written to in an append-only
536 fashion, which means we never need to rewrite a file to insert or
544 fashion, which means we never need to rewrite a file to insert or
537 remove data, and can use some simple techniques to avoid the need
545 remove data, and can use some simple techniques to avoid the need
538 for locking while reading.
546 for locking while reading.
539
547
540 If checkambig, indexfile is opened with checkambig=True at
548 If checkambig, indexfile is opened with checkambig=True at
541 writing, to avoid file stat ambiguity.
549 writing, to avoid file stat ambiguity.
542
550
543 If mmaplargeindex is True, and an mmapindexthreshold is set, the
551 If mmaplargeindex is True, and an mmapindexthreshold is set, the
544 index will be mmapped rather than read if it is larger than the
552 index will be mmapped rather than read if it is larger than the
545 configured threshold.
553 configured threshold.
546 """
554 """
547 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
555 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
548 mmaplargeindex=False):
556 mmaplargeindex=False):
549 """
557 """
550 create a revlog object
558 create a revlog object
551
559
552 opener is a function that abstracts the file opening operation
560 opener is a function that abstracts the file opening operation
553 and can be used to implement COW semantics or the like.
561 and can be used to implement COW semantics or the like.
554 """
562 """
555 self.indexfile = indexfile
563 self.indexfile = indexfile
556 self.datafile = datafile or (indexfile[:-2] + ".d")
564 self.datafile = datafile or (indexfile[:-2] + ".d")
557 self.opener = opener
565 self.opener = opener
558 # When True, indexfile is opened with checkambig=True at writing, to
566 # When True, indexfile is opened with checkambig=True at writing, to
559 # avoid file stat ambiguity.
567 # avoid file stat ambiguity.
560 self._checkambig = checkambig
568 self._checkambig = checkambig
561 # 3-tuple of (node, rev, text) for a raw revision.
569 # 3-tuple of (node, rev, text) for a raw revision.
562 self._cache = None
570 self._cache = None
563 # Maps rev to chain base rev.
571 # Maps rev to chain base rev.
564 self._chainbasecache = util.lrucachedict(100)
572 self._chainbasecache = util.lrucachedict(100)
565 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
573 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
566 self._chunkcache = (0, '')
574 self._chunkcache = (0, '')
567 # How much data to read and cache into the raw revlog data cache.
575 # How much data to read and cache into the raw revlog data cache.
568 self._chunkcachesize = 65536
576 self._chunkcachesize = 65536
569 self._maxchainlen = None
577 self._maxchainlen = None
570 self._aggressivemergedeltas = False
578 self._aggressivemergedeltas = False
571 self.index = []
579 self.index = []
572 # Mapping of partial identifiers to full nodes.
580 # Mapping of partial identifiers to full nodes.
573 self._pcache = {}
581 self._pcache = {}
574 # Mapping of revision integer to full node.
582 # Mapping of revision integer to full node.
575 self._nodecache = {nullid: nullrev}
583 self._nodecache = {nullid: nullrev}
576 self._nodepos = None
584 self._nodepos = None
577 self._compengine = 'zlib'
585 self._compengine = 'zlib'
578 self._maxdeltachainspan = -1
586 self._maxdeltachainspan = -1
579 self._withsparseread = False
587 self._withsparseread = False
580 self._srdensitythreshold = 0.25
588 self._srdensitythreshold = 0.25
581 self._srmingapsize = 262144
589 self._srmingapsize = 262144
582
590
583 mmapindexthreshold = None
591 mmapindexthreshold = None
584 v = REVLOG_DEFAULT_VERSION
592 v = REVLOG_DEFAULT_VERSION
585 opts = getattr(opener, 'options', None)
593 opts = getattr(opener, 'options', None)
586 if opts is not None:
594 if opts is not None:
587 if 'revlogv2' in opts:
595 if 'revlogv2' in opts:
588 # version 2 revlogs always use generaldelta.
596 # version 2 revlogs always use generaldelta.
589 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
597 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
590 elif 'revlogv1' in opts:
598 elif 'revlogv1' in opts:
591 if 'generaldelta' in opts:
599 if 'generaldelta' in opts:
592 v |= FLAG_GENERALDELTA
600 v |= FLAG_GENERALDELTA
593 else:
601 else:
594 v = 0
602 v = 0
595 if 'chunkcachesize' in opts:
603 if 'chunkcachesize' in opts:
596 self._chunkcachesize = opts['chunkcachesize']
604 self._chunkcachesize = opts['chunkcachesize']
597 if 'maxchainlen' in opts:
605 if 'maxchainlen' in opts:
598 self._maxchainlen = opts['maxchainlen']
606 self._maxchainlen = opts['maxchainlen']
599 if 'aggressivemergedeltas' in opts:
607 if 'aggressivemergedeltas' in opts:
600 self._aggressivemergedeltas = opts['aggressivemergedeltas']
608 self._aggressivemergedeltas = opts['aggressivemergedeltas']
601 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
609 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
602 if 'compengine' in opts:
610 if 'compengine' in opts:
603 self._compengine = opts['compengine']
611 self._compengine = opts['compengine']
604 if 'maxdeltachainspan' in opts:
612 if 'maxdeltachainspan' in opts:
605 self._maxdeltachainspan = opts['maxdeltachainspan']
613 self._maxdeltachainspan = opts['maxdeltachainspan']
606 if mmaplargeindex and 'mmapindexthreshold' in opts:
614 if mmaplargeindex and 'mmapindexthreshold' in opts:
607 mmapindexthreshold = opts['mmapindexthreshold']
615 mmapindexthreshold = opts['mmapindexthreshold']
608 self._withsparseread = bool(opts.get('with-sparse-read', False))
616 self._withsparseread = bool(opts.get('with-sparse-read', False))
609 if 'sparse-read-density-threshold' in opts:
617 if 'sparse-read-density-threshold' in opts:
610 self._srdensitythreshold = opts['sparse-read-density-threshold']
618 self._srdensitythreshold = opts['sparse-read-density-threshold']
611 if 'sparse-read-min-gap-size' in opts:
619 if 'sparse-read-min-gap-size' in opts:
612 self._srmingapsize = opts['sparse-read-min-gap-size']
620 self._srmingapsize = opts['sparse-read-min-gap-size']
613
621
614 if self._chunkcachesize <= 0:
622 if self._chunkcachesize <= 0:
615 raise RevlogError(_('revlog chunk cache size %r is not greater '
623 raise RevlogError(_('revlog chunk cache size %r is not greater '
616 'than 0') % self._chunkcachesize)
624 'than 0') % self._chunkcachesize)
617 elif self._chunkcachesize & (self._chunkcachesize - 1):
625 elif self._chunkcachesize & (self._chunkcachesize - 1):
618 raise RevlogError(_('revlog chunk cache size %r is not a power '
626 raise RevlogError(_('revlog chunk cache size %r is not a power '
619 'of 2') % self._chunkcachesize)
627 'of 2') % self._chunkcachesize)
620
628
621 indexdata = ''
629 indexdata = ''
622 self._initempty = True
630 self._initempty = True
623 try:
631 try:
624 with self._indexfp() as f:
632 with self._indexfp() as f:
625 if (mmapindexthreshold is not None and
633 if (mmapindexthreshold is not None and
626 self.opener.fstat(f).st_size >= mmapindexthreshold):
634 self.opener.fstat(f).st_size >= mmapindexthreshold):
627 indexdata = util.buffer(util.mmapread(f))
635 indexdata = util.buffer(util.mmapread(f))
628 else:
636 else:
629 indexdata = f.read()
637 indexdata = f.read()
630 if len(indexdata) > 0:
638 if len(indexdata) > 0:
631 v = versionformat_unpack(indexdata[:4])[0]
639 v = versionformat_unpack(indexdata[:4])[0]
632 self._initempty = False
640 self._initempty = False
633 except IOError as inst:
641 except IOError as inst:
634 if inst.errno != errno.ENOENT:
642 if inst.errno != errno.ENOENT:
635 raise
643 raise
636
644
637 self.version = v
645 self.version = v
638 self._inline = v & FLAG_INLINE_DATA
646 self._inline = v & FLAG_INLINE_DATA
639 self._generaldelta = v & FLAG_GENERALDELTA
647 self._generaldelta = v & FLAG_GENERALDELTA
640 flags = v & ~0xFFFF
648 flags = v & ~0xFFFF
641 fmt = v & 0xFFFF
649 fmt = v & 0xFFFF
642 if fmt == REVLOGV0:
650 if fmt == REVLOGV0:
643 if flags:
651 if flags:
644 raise RevlogError(_('unknown flags (%#04x) in version %d '
652 raise RevlogError(_('unknown flags (%#04x) in version %d '
645 'revlog %s') %
653 'revlog %s') %
646 (flags >> 16, fmt, self.indexfile))
654 (flags >> 16, fmt, self.indexfile))
647 elif fmt == REVLOGV1:
655 elif fmt == REVLOGV1:
648 if flags & ~REVLOGV1_FLAGS:
656 if flags & ~REVLOGV1_FLAGS:
649 raise RevlogError(_('unknown flags (%#04x) in version %d '
657 raise RevlogError(_('unknown flags (%#04x) in version %d '
650 'revlog %s') %
658 'revlog %s') %
651 (flags >> 16, fmt, self.indexfile))
659 (flags >> 16, fmt, self.indexfile))
652 elif fmt == REVLOGV2:
660 elif fmt == REVLOGV2:
653 if flags & ~REVLOGV2_FLAGS:
661 if flags & ~REVLOGV2_FLAGS:
654 raise RevlogError(_('unknown flags (%#04x) in version %d '
662 raise RevlogError(_('unknown flags (%#04x) in version %d '
655 'revlog %s') %
663 'revlog %s') %
656 (flags >> 16, fmt, self.indexfile))
664 (flags >> 16, fmt, self.indexfile))
657 else:
665 else:
658 raise RevlogError(_('unknown version (%d) in revlog %s') %
666 raise RevlogError(_('unknown version (%d) in revlog %s') %
659 (fmt, self.indexfile))
667 (fmt, self.indexfile))
660
668
661 self.storedeltachains = True
669 self.storedeltachains = True
662
670
663 self._io = revlogio()
671 self._io = revlogio()
664 if self.version == REVLOGV0:
672 if self.version == REVLOGV0:
665 self._io = revlogoldio()
673 self._io = revlogoldio()
666 try:
674 try:
667 d = self._io.parseindex(indexdata, self._inline)
675 d = self._io.parseindex(indexdata, self._inline)
668 except (ValueError, IndexError):
676 except (ValueError, IndexError):
669 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
677 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
670 self.index, nodemap, self._chunkcache = d
678 self.index, nodemap, self._chunkcache = d
671 if nodemap is not None:
679 if nodemap is not None:
672 self.nodemap = self._nodecache = nodemap
680 self.nodemap = self._nodecache = nodemap
673 if not self._chunkcache:
681 if not self._chunkcache:
674 self._chunkclear()
682 self._chunkclear()
675 # revnum -> (chain-length, sum-delta-length)
683 # revnum -> (chain-length, sum-delta-length)
676 self._chaininfocache = {}
684 self._chaininfocache = {}
677 # revlog header -> revlog compressor
685 # revlog header -> revlog compressor
678 self._decompressors = {}
686 self._decompressors = {}
679
687
680 @util.propertycache
688 @util.propertycache
681 def _compressor(self):
689 def _compressor(self):
682 return util.compengines[self._compengine].revlogcompressor()
690 return util.compengines[self._compengine].revlogcompressor()
683
691
684 def _indexfp(self, mode='r'):
692 def _indexfp(self, mode='r'):
685 """file object for the revlog's index file"""
693 """file object for the revlog's index file"""
686 args = {r'mode': mode}
694 args = {r'mode': mode}
687 if mode != 'r':
695 if mode != 'r':
688 args[r'checkambig'] = self._checkambig
696 args[r'checkambig'] = self._checkambig
689 if mode == 'w':
697 if mode == 'w':
690 args[r'atomictemp'] = True
698 args[r'atomictemp'] = True
691 return self.opener(self.indexfile, **args)
699 return self.opener(self.indexfile, **args)
692
700
693 def _datafp(self, mode='r'):
701 def _datafp(self, mode='r'):
694 """file object for the revlog's data file"""
702 """file object for the revlog's data file"""
695 return self.opener(self.datafile, mode=mode)
703 return self.opener(self.datafile, mode=mode)
696
704
697 @contextlib.contextmanager
705 @contextlib.contextmanager
698 def _datareadfp(self, existingfp=None):
706 def _datareadfp(self, existingfp=None):
699 """file object suitable to read data"""
707 """file object suitable to read data"""
700 if existingfp is not None:
708 if existingfp is not None:
701 yield existingfp
709 yield existingfp
702 else:
710 else:
703 if self._inline:
711 if self._inline:
704 func = self._indexfp
712 func = self._indexfp
705 else:
713 else:
706 func = self._datafp
714 func = self._datafp
707 with func() as fp:
715 with func() as fp:
708 yield fp
716 yield fp
709
717
710 def tip(self):
718 def tip(self):
711 return self.node(len(self.index) - 2)
719 return self.node(len(self.index) - 2)
712 def __contains__(self, rev):
720 def __contains__(self, rev):
713 return 0 <= rev < len(self)
721 return 0 <= rev < len(self)
714 def __len__(self):
722 def __len__(self):
715 return len(self.index) - 1
723 return len(self.index) - 1
716 def __iter__(self):
724 def __iter__(self):
717 return iter(xrange(len(self)))
725 return iter(xrange(len(self)))
718 def revs(self, start=0, stop=None):
726 def revs(self, start=0, stop=None):
719 """iterate over all rev in this revlog (from start to stop)"""
727 """iterate over all rev in this revlog (from start to stop)"""
720 step = 1
728 step = 1
721 if stop is not None:
729 if stop is not None:
722 if start > stop:
730 if start > stop:
723 step = -1
731 step = -1
724 stop += step
732 stop += step
725 else:
733 else:
726 stop = len(self)
734 stop = len(self)
727 return xrange(start, stop, step)
735 return xrange(start, stop, step)
728
736
729 @util.propertycache
737 @util.propertycache
730 def nodemap(self):
738 def nodemap(self):
731 self.rev(self.node(0))
739 self.rev(self.node(0))
732 return self._nodecache
740 return self._nodecache
733
741
734 def hasnode(self, node):
742 def hasnode(self, node):
735 try:
743 try:
736 self.rev(node)
744 self.rev(node)
737 return True
745 return True
738 except KeyError:
746 except KeyError:
739 return False
747 return False
740
748
749 def candelta(self, baserev, rev):
750 """whether two revisions (baserev, rev) can be delta-ed or not"""
751 # Disable delta if either rev requires a content-changing flag
752 # processor (ex. LFS). This is because such flag processor can alter
753 # the rawtext content that the delta will be based on, and two clients
754 # could have a same revlog node with different flags (i.e. different
755 # rawtext contents) and the delta could be incompatible.
756 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
757 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
758 return False
759 return True
760
741 def clearcaches(self):
761 def clearcaches(self):
742 self._cache = None
762 self._cache = None
743 self._chainbasecache.clear()
763 self._chainbasecache.clear()
744 self._chunkcache = (0, '')
764 self._chunkcache = (0, '')
745 self._pcache = {}
765 self._pcache = {}
746
766
747 try:
767 try:
748 self._nodecache.clearcaches()
768 self._nodecache.clearcaches()
749 except AttributeError:
769 except AttributeError:
750 self._nodecache = {nullid: nullrev}
770 self._nodecache = {nullid: nullrev}
751 self._nodepos = None
771 self._nodepos = None
752
772
753 def rev(self, node):
773 def rev(self, node):
754 try:
774 try:
755 return self._nodecache[node]
775 return self._nodecache[node]
756 except TypeError:
776 except TypeError:
757 raise
777 raise
758 except RevlogError:
778 except RevlogError:
759 # parsers.c radix tree lookup failed
779 # parsers.c radix tree lookup failed
760 if node == wdirid:
780 if node == wdirid:
761 raise error.WdirUnsupported
781 raise error.WdirUnsupported
762 raise LookupError(node, self.indexfile, _('no node'))
782 raise LookupError(node, self.indexfile, _('no node'))
763 except KeyError:
783 except KeyError:
764 # pure python cache lookup failed
784 # pure python cache lookup failed
765 n = self._nodecache
785 n = self._nodecache
766 i = self.index
786 i = self.index
767 p = self._nodepos
787 p = self._nodepos
768 if p is None:
788 if p is None:
769 p = len(i) - 2
789 p = len(i) - 2
770 for r in xrange(p, -1, -1):
790 for r in xrange(p, -1, -1):
771 v = i[r][7]
791 v = i[r][7]
772 n[v] = r
792 n[v] = r
773 if v == node:
793 if v == node:
774 self._nodepos = r - 1
794 self._nodepos = r - 1
775 return r
795 return r
776 if node == wdirid:
796 if node == wdirid:
777 raise error.WdirUnsupported
797 raise error.WdirUnsupported
778 raise LookupError(node, self.indexfile, _('no node'))
798 raise LookupError(node, self.indexfile, _('no node'))
779
799
780 # Accessors for index entries.
800 # Accessors for index entries.
781
801
782 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
802 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
783 # are flags.
803 # are flags.
784 def start(self, rev):
804 def start(self, rev):
785 return int(self.index[rev][0] >> 16)
805 return int(self.index[rev][0] >> 16)
786
806
787 def flags(self, rev):
807 def flags(self, rev):
788 return self.index[rev][0] & 0xFFFF
808 return self.index[rev][0] & 0xFFFF
789
809
790 def length(self, rev):
810 def length(self, rev):
791 return self.index[rev][1]
811 return self.index[rev][1]
792
812
793 def rawsize(self, rev):
813 def rawsize(self, rev):
794 """return the length of the uncompressed text for a given revision"""
814 """return the length of the uncompressed text for a given revision"""
795 l = self.index[rev][2]
815 l = self.index[rev][2]
796 if l >= 0:
816 if l >= 0:
797 return l
817 return l
798
818
799 t = self.revision(rev, raw=True)
819 t = self.revision(rev, raw=True)
800 return len(t)
820 return len(t)
801
821
802 def size(self, rev):
822 def size(self, rev):
803 """length of non-raw text (processed by a "read" flag processor)"""
823 """length of non-raw text (processed by a "read" flag processor)"""
804 # fast path: if no "read" flag processor could change the content,
824 # fast path: if no "read" flag processor could change the content,
805 # size is rawsize. note: ELLIPSIS is known to not change the content.
825 # size is rawsize. note: ELLIPSIS is known to not change the content.
806 flags = self.flags(rev)
826 flags = self.flags(rev)
807 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
827 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
808 return self.rawsize(rev)
828 return self.rawsize(rev)
809
829
810 return len(self.revision(rev, raw=False))
830 return len(self.revision(rev, raw=False))
811
831
812 def chainbase(self, rev):
832 def chainbase(self, rev):
813 base = self._chainbasecache.get(rev)
833 base = self._chainbasecache.get(rev)
814 if base is not None:
834 if base is not None:
815 return base
835 return base
816
836
817 index = self.index
837 index = self.index
818 base = index[rev][3]
838 base = index[rev][3]
819 while base != rev:
839 while base != rev:
820 rev = base
840 rev = base
821 base = index[rev][3]
841 base = index[rev][3]
822
842
823 self._chainbasecache[rev] = base
843 self._chainbasecache[rev] = base
824 return base
844 return base
825
845
826 def linkrev(self, rev):
846 def linkrev(self, rev):
827 return self.index[rev][4]
847 return self.index[rev][4]
828
848
829 def parentrevs(self, rev):
849 def parentrevs(self, rev):
830 try:
850 try:
831 entry = self.index[rev]
851 entry = self.index[rev]
832 except IndexError:
852 except IndexError:
833 if rev == wdirrev:
853 if rev == wdirrev:
834 raise error.WdirUnsupported
854 raise error.WdirUnsupported
835 raise
855 raise
836
856
837 return entry[5], entry[6]
857 return entry[5], entry[6]
838
858
839 def node(self, rev):
859 def node(self, rev):
840 try:
860 try:
841 return self.index[rev][7]
861 return self.index[rev][7]
842 except IndexError:
862 except IndexError:
843 if rev == wdirrev:
863 if rev == wdirrev:
844 raise error.WdirUnsupported
864 raise error.WdirUnsupported
845 raise
865 raise
846
866
847 # Derived from index values.
867 # Derived from index values.
848
868
849 def end(self, rev):
869 def end(self, rev):
850 return self.start(rev) + self.length(rev)
870 return self.start(rev) + self.length(rev)
851
871
852 def parents(self, node):
872 def parents(self, node):
853 i = self.index
873 i = self.index
854 d = i[self.rev(node)]
874 d = i[self.rev(node)]
855 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
875 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
856
876
857 def chainlen(self, rev):
877 def chainlen(self, rev):
858 return self._chaininfo(rev)[0]
878 return self._chaininfo(rev)[0]
859
879
860 def _chaininfo(self, rev):
880 def _chaininfo(self, rev):
861 chaininfocache = self._chaininfocache
881 chaininfocache = self._chaininfocache
862 if rev in chaininfocache:
882 if rev in chaininfocache:
863 return chaininfocache[rev]
883 return chaininfocache[rev]
864 index = self.index
884 index = self.index
865 generaldelta = self._generaldelta
885 generaldelta = self._generaldelta
866 iterrev = rev
886 iterrev = rev
867 e = index[iterrev]
887 e = index[iterrev]
868 clen = 0
888 clen = 0
869 compresseddeltalen = 0
889 compresseddeltalen = 0
870 while iterrev != e[3]:
890 while iterrev != e[3]:
871 clen += 1
891 clen += 1
872 compresseddeltalen += e[1]
892 compresseddeltalen += e[1]
873 if generaldelta:
893 if generaldelta:
874 iterrev = e[3]
894 iterrev = e[3]
875 else:
895 else:
876 iterrev -= 1
896 iterrev -= 1
877 if iterrev in chaininfocache:
897 if iterrev in chaininfocache:
878 t = chaininfocache[iterrev]
898 t = chaininfocache[iterrev]
879 clen += t[0]
899 clen += t[0]
880 compresseddeltalen += t[1]
900 compresseddeltalen += t[1]
881 break
901 break
882 e = index[iterrev]
902 e = index[iterrev]
883 else:
903 else:
884 # Add text length of base since decompressing that also takes
904 # Add text length of base since decompressing that also takes
885 # work. For cache hits the length is already included.
905 # work. For cache hits the length is already included.
886 compresseddeltalen += e[1]
906 compresseddeltalen += e[1]
887 r = (clen, compresseddeltalen)
907 r = (clen, compresseddeltalen)
888 chaininfocache[rev] = r
908 chaininfocache[rev] = r
889 return r
909 return r
890
910
891 def _deltachain(self, rev, stoprev=None):
911 def _deltachain(self, rev, stoprev=None):
892 """Obtain the delta chain for a revision.
912 """Obtain the delta chain for a revision.
893
913
894 ``stoprev`` specifies a revision to stop at. If not specified, we
914 ``stoprev`` specifies a revision to stop at. If not specified, we
895 stop at the base of the chain.
915 stop at the base of the chain.
896
916
897 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
917 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
898 revs in ascending order and ``stopped`` is a bool indicating whether
918 revs in ascending order and ``stopped`` is a bool indicating whether
899 ``stoprev`` was hit.
919 ``stoprev`` was hit.
900 """
920 """
901 # Try C implementation.
921 # Try C implementation.
902 try:
922 try:
903 return self.index.deltachain(rev, stoprev, self._generaldelta)
923 return self.index.deltachain(rev, stoprev, self._generaldelta)
904 except AttributeError:
924 except AttributeError:
905 pass
925 pass
906
926
907 chain = []
927 chain = []
908
928
909 # Alias to prevent attribute lookup in tight loop.
929 # Alias to prevent attribute lookup in tight loop.
910 index = self.index
930 index = self.index
911 generaldelta = self._generaldelta
931 generaldelta = self._generaldelta
912
932
913 iterrev = rev
933 iterrev = rev
914 e = index[iterrev]
934 e = index[iterrev]
915 while iterrev != e[3] and iterrev != stoprev:
935 while iterrev != e[3] and iterrev != stoprev:
916 chain.append(iterrev)
936 chain.append(iterrev)
917 if generaldelta:
937 if generaldelta:
918 iterrev = e[3]
938 iterrev = e[3]
919 else:
939 else:
920 iterrev -= 1
940 iterrev -= 1
921 e = index[iterrev]
941 e = index[iterrev]
922
942
923 if iterrev == stoprev:
943 if iterrev == stoprev:
924 stopped = True
944 stopped = True
925 else:
945 else:
926 chain.append(iterrev)
946 chain.append(iterrev)
927 stopped = False
947 stopped = False
928
948
929 chain.reverse()
949 chain.reverse()
930 return chain, stopped
950 return chain, stopped
931
951
932 def ancestors(self, revs, stoprev=0, inclusive=False):
952 def ancestors(self, revs, stoprev=0, inclusive=False):
933 """Generate the ancestors of 'revs' in reverse topological order.
953 """Generate the ancestors of 'revs' in reverse topological order.
934 Does not generate revs lower than stoprev.
954 Does not generate revs lower than stoprev.
935
955
936 See the documentation for ancestor.lazyancestors for more details."""
956 See the documentation for ancestor.lazyancestors for more details."""
937
957
938 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
958 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
939 inclusive=inclusive)
959 inclusive=inclusive)
940
960
941 def descendants(self, revs):
961 def descendants(self, revs):
942 """Generate the descendants of 'revs' in revision order.
962 """Generate the descendants of 'revs' in revision order.
943
963
944 Yield a sequence of revision numbers starting with a child of
964 Yield a sequence of revision numbers starting with a child of
945 some rev in revs, i.e., each revision is *not* considered a
965 some rev in revs, i.e., each revision is *not* considered a
946 descendant of itself. Results are ordered by revision number (a
966 descendant of itself. Results are ordered by revision number (a
947 topological sort)."""
967 topological sort)."""
948 first = min(revs)
968 first = min(revs)
949 if first == nullrev:
969 if first == nullrev:
950 for i in self:
970 for i in self:
951 yield i
971 yield i
952 return
972 return
953
973
954 seen = set(revs)
974 seen = set(revs)
955 for i in self.revs(start=first + 1):
975 for i in self.revs(start=first + 1):
956 for x in self.parentrevs(i):
976 for x in self.parentrevs(i):
957 if x != nullrev and x in seen:
977 if x != nullrev and x in seen:
958 seen.add(i)
978 seen.add(i)
959 yield i
979 yield i
960 break
980 break
961
981
962 def findcommonmissing(self, common=None, heads=None):
982 def findcommonmissing(self, common=None, heads=None):
963 """Return a tuple of the ancestors of common and the ancestors of heads
983 """Return a tuple of the ancestors of common and the ancestors of heads
964 that are not ancestors of common. In revset terminology, we return the
984 that are not ancestors of common. In revset terminology, we return the
965 tuple:
985 tuple:
966
986
967 ::common, (::heads) - (::common)
987 ::common, (::heads) - (::common)
968
988
969 The list is sorted by revision number, meaning it is
989 The list is sorted by revision number, meaning it is
970 topologically sorted.
990 topologically sorted.
971
991
972 'heads' and 'common' are both lists of node IDs. If heads is
992 'heads' and 'common' are both lists of node IDs. If heads is
973 not supplied, uses all of the revlog's heads. If common is not
993 not supplied, uses all of the revlog's heads. If common is not
974 supplied, uses nullid."""
994 supplied, uses nullid."""
975 if common is None:
995 if common is None:
976 common = [nullid]
996 common = [nullid]
977 if heads is None:
997 if heads is None:
978 heads = self.heads()
998 heads = self.heads()
979
999
980 common = [self.rev(n) for n in common]
1000 common = [self.rev(n) for n in common]
981 heads = [self.rev(n) for n in heads]
1001 heads = [self.rev(n) for n in heads]
982
1002
983 # we want the ancestors, but inclusive
1003 # we want the ancestors, but inclusive
984 class lazyset(object):
1004 class lazyset(object):
985 def __init__(self, lazyvalues):
1005 def __init__(self, lazyvalues):
986 self.addedvalues = set()
1006 self.addedvalues = set()
987 self.lazyvalues = lazyvalues
1007 self.lazyvalues = lazyvalues
988
1008
989 def __contains__(self, value):
1009 def __contains__(self, value):
990 return value in self.addedvalues or value in self.lazyvalues
1010 return value in self.addedvalues or value in self.lazyvalues
991
1011
992 def __iter__(self):
1012 def __iter__(self):
993 added = self.addedvalues
1013 added = self.addedvalues
994 for r in added:
1014 for r in added:
995 yield r
1015 yield r
996 for r in self.lazyvalues:
1016 for r in self.lazyvalues:
997 if not r in added:
1017 if not r in added:
998 yield r
1018 yield r
999
1019
1000 def add(self, value):
1020 def add(self, value):
1001 self.addedvalues.add(value)
1021 self.addedvalues.add(value)
1002
1022
1003 def update(self, values):
1023 def update(self, values):
1004 self.addedvalues.update(values)
1024 self.addedvalues.update(values)
1005
1025
1006 has = lazyset(self.ancestors(common))
1026 has = lazyset(self.ancestors(common))
1007 has.add(nullrev)
1027 has.add(nullrev)
1008 has.update(common)
1028 has.update(common)
1009
1029
1010 # take all ancestors from heads that aren't in has
1030 # take all ancestors from heads that aren't in has
1011 missing = set()
1031 missing = set()
1012 visit = collections.deque(r for r in heads if r not in has)
1032 visit = collections.deque(r for r in heads if r not in has)
1013 while visit:
1033 while visit:
1014 r = visit.popleft()
1034 r = visit.popleft()
1015 if r in missing:
1035 if r in missing:
1016 continue
1036 continue
1017 else:
1037 else:
1018 missing.add(r)
1038 missing.add(r)
1019 for p in self.parentrevs(r):
1039 for p in self.parentrevs(r):
1020 if p not in has:
1040 if p not in has:
1021 visit.append(p)
1041 visit.append(p)
1022 missing = list(missing)
1042 missing = list(missing)
1023 missing.sort()
1043 missing.sort()
1024 return has, [self.node(miss) for miss in missing]
1044 return has, [self.node(miss) for miss in missing]
1025
1045
1026 def incrementalmissingrevs(self, common=None):
1046 def incrementalmissingrevs(self, common=None):
1027 """Return an object that can be used to incrementally compute the
1047 """Return an object that can be used to incrementally compute the
1028 revision numbers of the ancestors of arbitrary sets that are not
1048 revision numbers of the ancestors of arbitrary sets that are not
1029 ancestors of common. This is an ancestor.incrementalmissingancestors
1049 ancestors of common. This is an ancestor.incrementalmissingancestors
1030 object.
1050 object.
1031
1051
1032 'common' is a list of revision numbers. If common is not supplied, uses
1052 'common' is a list of revision numbers. If common is not supplied, uses
1033 nullrev.
1053 nullrev.
1034 """
1054 """
1035 if common is None:
1055 if common is None:
1036 common = [nullrev]
1056 common = [nullrev]
1037
1057
1038 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1058 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1039
1059
1040 def findmissingrevs(self, common=None, heads=None):
1060 def findmissingrevs(self, common=None, heads=None):
1041 """Return the revision numbers of the ancestors of heads that
1061 """Return the revision numbers of the ancestors of heads that
1042 are not ancestors of common.
1062 are not ancestors of common.
1043
1063
1044 More specifically, return a list of revision numbers corresponding to
1064 More specifically, return a list of revision numbers corresponding to
1045 nodes N such that every N satisfies the following constraints:
1065 nodes N such that every N satisfies the following constraints:
1046
1066
1047 1. N is an ancestor of some node in 'heads'
1067 1. N is an ancestor of some node in 'heads'
1048 2. N is not an ancestor of any node in 'common'
1068 2. N is not an ancestor of any node in 'common'
1049
1069
1050 The list is sorted by revision number, meaning it is
1070 The list is sorted by revision number, meaning it is
1051 topologically sorted.
1071 topologically sorted.
1052
1072
1053 'heads' and 'common' are both lists of revision numbers. If heads is
1073 'heads' and 'common' are both lists of revision numbers. If heads is
1054 not supplied, uses all of the revlog's heads. If common is not
1074 not supplied, uses all of the revlog's heads. If common is not
1055 supplied, uses nullid."""
1075 supplied, uses nullid."""
1056 if common is None:
1076 if common is None:
1057 common = [nullrev]
1077 common = [nullrev]
1058 if heads is None:
1078 if heads is None:
1059 heads = self.headrevs()
1079 heads = self.headrevs()
1060
1080
1061 inc = self.incrementalmissingrevs(common=common)
1081 inc = self.incrementalmissingrevs(common=common)
1062 return inc.missingancestors(heads)
1082 return inc.missingancestors(heads)
1063
1083
1064 def findmissing(self, common=None, heads=None):
1084 def findmissing(self, common=None, heads=None):
1065 """Return the ancestors of heads that are not ancestors of common.
1085 """Return the ancestors of heads that are not ancestors of common.
1066
1086
1067 More specifically, return a list of nodes N such that every N
1087 More specifically, return a list of nodes N such that every N
1068 satisfies the following constraints:
1088 satisfies the following constraints:
1069
1089
1070 1. N is an ancestor of some node in 'heads'
1090 1. N is an ancestor of some node in 'heads'
1071 2. N is not an ancestor of any node in 'common'
1091 2. N is not an ancestor of any node in 'common'
1072
1092
1073 The list is sorted by revision number, meaning it is
1093 The list is sorted by revision number, meaning it is
1074 topologically sorted.
1094 topologically sorted.
1075
1095
1076 'heads' and 'common' are both lists of node IDs. If heads is
1096 'heads' and 'common' are both lists of node IDs. If heads is
1077 not supplied, uses all of the revlog's heads. If common is not
1097 not supplied, uses all of the revlog's heads. If common is not
1078 supplied, uses nullid."""
1098 supplied, uses nullid."""
1079 if common is None:
1099 if common is None:
1080 common = [nullid]
1100 common = [nullid]
1081 if heads is None:
1101 if heads is None:
1082 heads = self.heads()
1102 heads = self.heads()
1083
1103
1084 common = [self.rev(n) for n in common]
1104 common = [self.rev(n) for n in common]
1085 heads = [self.rev(n) for n in heads]
1105 heads = [self.rev(n) for n in heads]
1086
1106
1087 inc = self.incrementalmissingrevs(common=common)
1107 inc = self.incrementalmissingrevs(common=common)
1088 return [self.node(r) for r in inc.missingancestors(heads)]
1108 return [self.node(r) for r in inc.missingancestors(heads)]
1089
1109
1090 def nodesbetween(self, roots=None, heads=None):
1110 def nodesbetween(self, roots=None, heads=None):
1091 """Return a topological path from 'roots' to 'heads'.
1111 """Return a topological path from 'roots' to 'heads'.
1092
1112
1093 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1113 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1094 topologically sorted list of all nodes N that satisfy both of
1114 topologically sorted list of all nodes N that satisfy both of
1095 these constraints:
1115 these constraints:
1096
1116
1097 1. N is a descendant of some node in 'roots'
1117 1. N is a descendant of some node in 'roots'
1098 2. N is an ancestor of some node in 'heads'
1118 2. N is an ancestor of some node in 'heads'
1099
1119
1100 Every node is considered to be both a descendant and an ancestor
1120 Every node is considered to be both a descendant and an ancestor
1101 of itself, so every reachable node in 'roots' and 'heads' will be
1121 of itself, so every reachable node in 'roots' and 'heads' will be
1102 included in 'nodes'.
1122 included in 'nodes'.
1103
1123
1104 'outroots' is the list of reachable nodes in 'roots', i.e., the
1124 'outroots' is the list of reachable nodes in 'roots', i.e., the
1105 subset of 'roots' that is returned in 'nodes'. Likewise,
1125 subset of 'roots' that is returned in 'nodes'. Likewise,
1106 'outheads' is the subset of 'heads' that is also in 'nodes'.
1126 'outheads' is the subset of 'heads' that is also in 'nodes'.
1107
1127
1108 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1128 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1109 unspecified, uses nullid as the only root. If 'heads' is
1129 unspecified, uses nullid as the only root. If 'heads' is
1110 unspecified, uses list of all of the revlog's heads."""
1130 unspecified, uses list of all of the revlog's heads."""
1111 nonodes = ([], [], [])
1131 nonodes = ([], [], [])
1112 if roots is not None:
1132 if roots is not None:
1113 roots = list(roots)
1133 roots = list(roots)
1114 if not roots:
1134 if not roots:
1115 return nonodes
1135 return nonodes
1116 lowestrev = min([self.rev(n) for n in roots])
1136 lowestrev = min([self.rev(n) for n in roots])
1117 else:
1137 else:
1118 roots = [nullid] # Everybody's a descendant of nullid
1138 roots = [nullid] # Everybody's a descendant of nullid
1119 lowestrev = nullrev
1139 lowestrev = nullrev
1120 if (lowestrev == nullrev) and (heads is None):
1140 if (lowestrev == nullrev) and (heads is None):
1121 # We want _all_ the nodes!
1141 # We want _all_ the nodes!
1122 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1142 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1123 if heads is None:
1143 if heads is None:
1124 # All nodes are ancestors, so the latest ancestor is the last
1144 # All nodes are ancestors, so the latest ancestor is the last
1125 # node.
1145 # node.
1126 highestrev = len(self) - 1
1146 highestrev = len(self) - 1
1127 # Set ancestors to None to signal that every node is an ancestor.
1147 # Set ancestors to None to signal that every node is an ancestor.
1128 ancestors = None
1148 ancestors = None
1129 # Set heads to an empty dictionary for later discovery of heads
1149 # Set heads to an empty dictionary for later discovery of heads
1130 heads = {}
1150 heads = {}
1131 else:
1151 else:
1132 heads = list(heads)
1152 heads = list(heads)
1133 if not heads:
1153 if not heads:
1134 return nonodes
1154 return nonodes
1135 ancestors = set()
1155 ancestors = set()
1136 # Turn heads into a dictionary so we can remove 'fake' heads.
1156 # Turn heads into a dictionary so we can remove 'fake' heads.
1137 # Also, later we will be using it to filter out the heads we can't
1157 # Also, later we will be using it to filter out the heads we can't
1138 # find from roots.
1158 # find from roots.
1139 heads = dict.fromkeys(heads, False)
1159 heads = dict.fromkeys(heads, False)
1140 # Start at the top and keep marking parents until we're done.
1160 # Start at the top and keep marking parents until we're done.
1141 nodestotag = set(heads)
1161 nodestotag = set(heads)
1142 # Remember where the top was so we can use it as a limit later.
1162 # Remember where the top was so we can use it as a limit later.
1143 highestrev = max([self.rev(n) for n in nodestotag])
1163 highestrev = max([self.rev(n) for n in nodestotag])
1144 while nodestotag:
1164 while nodestotag:
1145 # grab a node to tag
1165 # grab a node to tag
1146 n = nodestotag.pop()
1166 n = nodestotag.pop()
1147 # Never tag nullid
1167 # Never tag nullid
1148 if n == nullid:
1168 if n == nullid:
1149 continue
1169 continue
1150 # A node's revision number represents its place in a
1170 # A node's revision number represents its place in a
1151 # topologically sorted list of nodes.
1171 # topologically sorted list of nodes.
1152 r = self.rev(n)
1172 r = self.rev(n)
1153 if r >= lowestrev:
1173 if r >= lowestrev:
1154 if n not in ancestors:
1174 if n not in ancestors:
1155 # If we are possibly a descendant of one of the roots
1175 # If we are possibly a descendant of one of the roots
1156 # and we haven't already been marked as an ancestor
1176 # and we haven't already been marked as an ancestor
1157 ancestors.add(n) # Mark as ancestor
1177 ancestors.add(n) # Mark as ancestor
1158 # Add non-nullid parents to list of nodes to tag.
1178 # Add non-nullid parents to list of nodes to tag.
1159 nodestotag.update([p for p in self.parents(n) if
1179 nodestotag.update([p for p in self.parents(n) if
1160 p != nullid])
1180 p != nullid])
1161 elif n in heads: # We've seen it before, is it a fake head?
1181 elif n in heads: # We've seen it before, is it a fake head?
1162 # So it is, real heads should not be the ancestors of
1182 # So it is, real heads should not be the ancestors of
1163 # any other heads.
1183 # any other heads.
1164 heads.pop(n)
1184 heads.pop(n)
1165 if not ancestors:
1185 if not ancestors:
1166 return nonodes
1186 return nonodes
1167 # Now that we have our set of ancestors, we want to remove any
1187 # Now that we have our set of ancestors, we want to remove any
1168 # roots that are not ancestors.
1188 # roots that are not ancestors.
1169
1189
1170 # If one of the roots was nullid, everything is included anyway.
1190 # If one of the roots was nullid, everything is included anyway.
1171 if lowestrev > nullrev:
1191 if lowestrev > nullrev:
1172 # But, since we weren't, let's recompute the lowest rev to not
1192 # But, since we weren't, let's recompute the lowest rev to not
1173 # include roots that aren't ancestors.
1193 # include roots that aren't ancestors.
1174
1194
1175 # Filter out roots that aren't ancestors of heads
1195 # Filter out roots that aren't ancestors of heads
1176 roots = [root for root in roots if root in ancestors]
1196 roots = [root for root in roots if root in ancestors]
1177 # Recompute the lowest revision
1197 # Recompute the lowest revision
1178 if roots:
1198 if roots:
1179 lowestrev = min([self.rev(root) for root in roots])
1199 lowestrev = min([self.rev(root) for root in roots])
1180 else:
1200 else:
1181 # No more roots? Return empty list
1201 # No more roots? Return empty list
1182 return nonodes
1202 return nonodes
1183 else:
1203 else:
1184 # We are descending from nullid, and don't need to care about
1204 # We are descending from nullid, and don't need to care about
1185 # any other roots.
1205 # any other roots.
1186 lowestrev = nullrev
1206 lowestrev = nullrev
1187 roots = [nullid]
1207 roots = [nullid]
1188 # Transform our roots list into a set.
1208 # Transform our roots list into a set.
1189 descendants = set(roots)
1209 descendants = set(roots)
1190 # Also, keep the original roots so we can filter out roots that aren't
1210 # Also, keep the original roots so we can filter out roots that aren't
1191 # 'real' roots (i.e. are descended from other roots).
1211 # 'real' roots (i.e. are descended from other roots).
1192 roots = descendants.copy()
1212 roots = descendants.copy()
1193 # Our topologically sorted list of output nodes.
1213 # Our topologically sorted list of output nodes.
1194 orderedout = []
1214 orderedout = []
1195 # Don't start at nullid since we don't want nullid in our output list,
1215 # Don't start at nullid since we don't want nullid in our output list,
1196 # and if nullid shows up in descendants, empty parents will look like
1216 # and if nullid shows up in descendants, empty parents will look like
1197 # they're descendants.
1217 # they're descendants.
1198 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1218 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1199 n = self.node(r)
1219 n = self.node(r)
1200 isdescendant = False
1220 isdescendant = False
1201 if lowestrev == nullrev: # Everybody is a descendant of nullid
1221 if lowestrev == nullrev: # Everybody is a descendant of nullid
1202 isdescendant = True
1222 isdescendant = True
1203 elif n in descendants:
1223 elif n in descendants:
1204 # n is already a descendant
1224 # n is already a descendant
1205 isdescendant = True
1225 isdescendant = True
1206 # This check only needs to be done here because all the roots
1226 # This check only needs to be done here because all the roots
1207 # will start being marked is descendants before the loop.
1227 # will start being marked is descendants before the loop.
1208 if n in roots:
1228 if n in roots:
1209 # If n was a root, check if it's a 'real' root.
1229 # If n was a root, check if it's a 'real' root.
1210 p = tuple(self.parents(n))
1230 p = tuple(self.parents(n))
1211 # If any of its parents are descendants, it's not a root.
1231 # If any of its parents are descendants, it's not a root.
1212 if (p[0] in descendants) or (p[1] in descendants):
1232 if (p[0] in descendants) or (p[1] in descendants):
1213 roots.remove(n)
1233 roots.remove(n)
1214 else:
1234 else:
1215 p = tuple(self.parents(n))
1235 p = tuple(self.parents(n))
1216 # A node is a descendant if either of its parents are
1236 # A node is a descendant if either of its parents are
1217 # descendants. (We seeded the dependents list with the roots
1237 # descendants. (We seeded the dependents list with the roots
1218 # up there, remember?)
1238 # up there, remember?)
1219 if (p[0] in descendants) or (p[1] in descendants):
1239 if (p[0] in descendants) or (p[1] in descendants):
1220 descendants.add(n)
1240 descendants.add(n)
1221 isdescendant = True
1241 isdescendant = True
1222 if isdescendant and ((ancestors is None) or (n in ancestors)):
1242 if isdescendant and ((ancestors is None) or (n in ancestors)):
1223 # Only include nodes that are both descendants and ancestors.
1243 # Only include nodes that are both descendants and ancestors.
1224 orderedout.append(n)
1244 orderedout.append(n)
1225 if (ancestors is not None) and (n in heads):
1245 if (ancestors is not None) and (n in heads):
1226 # We're trying to figure out which heads are reachable
1246 # We're trying to figure out which heads are reachable
1227 # from roots.
1247 # from roots.
1228 # Mark this head as having been reached
1248 # Mark this head as having been reached
1229 heads[n] = True
1249 heads[n] = True
1230 elif ancestors is None:
1250 elif ancestors is None:
1231 # Otherwise, we're trying to discover the heads.
1251 # Otherwise, we're trying to discover the heads.
1232 # Assume this is a head because if it isn't, the next step
1252 # Assume this is a head because if it isn't, the next step
1233 # will eventually remove it.
1253 # will eventually remove it.
1234 heads[n] = True
1254 heads[n] = True
1235 # But, obviously its parents aren't.
1255 # But, obviously its parents aren't.
1236 for p in self.parents(n):
1256 for p in self.parents(n):
1237 heads.pop(p, None)
1257 heads.pop(p, None)
1238 heads = [head for head, flag in heads.iteritems() if flag]
1258 heads = [head for head, flag in heads.iteritems() if flag]
1239 roots = list(roots)
1259 roots = list(roots)
1240 assert orderedout
1260 assert orderedout
1241 assert roots
1261 assert roots
1242 assert heads
1262 assert heads
1243 return (orderedout, roots, heads)
1263 return (orderedout, roots, heads)
1244
1264
1245 def headrevs(self):
1265 def headrevs(self):
1246 try:
1266 try:
1247 return self.index.headrevs()
1267 return self.index.headrevs()
1248 except AttributeError:
1268 except AttributeError:
1249 return self._headrevs()
1269 return self._headrevs()
1250
1270
1251 def computephases(self, roots):
1271 def computephases(self, roots):
1252 return self.index.computephasesmapsets(roots)
1272 return self.index.computephasesmapsets(roots)
1253
1273
1254 def _headrevs(self):
1274 def _headrevs(self):
1255 count = len(self)
1275 count = len(self)
1256 if not count:
1276 if not count:
1257 return [nullrev]
1277 return [nullrev]
1258 # we won't iter over filtered rev so nobody is a head at start
1278 # we won't iter over filtered rev so nobody is a head at start
1259 ishead = [0] * (count + 1)
1279 ishead = [0] * (count + 1)
1260 index = self.index
1280 index = self.index
1261 for r in self:
1281 for r in self:
1262 ishead[r] = 1 # I may be an head
1282 ishead[r] = 1 # I may be an head
1263 e = index[r]
1283 e = index[r]
1264 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1284 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1265 return [r for r, val in enumerate(ishead) if val]
1285 return [r for r, val in enumerate(ishead) if val]
1266
1286
1267 def heads(self, start=None, stop=None):
1287 def heads(self, start=None, stop=None):
1268 """return the list of all nodes that have no children
1288 """return the list of all nodes that have no children
1269
1289
1270 if start is specified, only heads that are descendants of
1290 if start is specified, only heads that are descendants of
1271 start will be returned
1291 start will be returned
1272 if stop is specified, it will consider all the revs from stop
1292 if stop is specified, it will consider all the revs from stop
1273 as if they had no children
1293 as if they had no children
1274 """
1294 """
1275 if start is None and stop is None:
1295 if start is None and stop is None:
1276 if not len(self):
1296 if not len(self):
1277 return [nullid]
1297 return [nullid]
1278 return [self.node(r) for r in self.headrevs()]
1298 return [self.node(r) for r in self.headrevs()]
1279
1299
1280 if start is None:
1300 if start is None:
1281 start = nullid
1301 start = nullid
1282 if stop is None:
1302 if stop is None:
1283 stop = []
1303 stop = []
1284 stoprevs = set([self.rev(n) for n in stop])
1304 stoprevs = set([self.rev(n) for n in stop])
1285 startrev = self.rev(start)
1305 startrev = self.rev(start)
1286 reachable = {startrev}
1306 reachable = {startrev}
1287 heads = {startrev}
1307 heads = {startrev}
1288
1308
1289 parentrevs = self.parentrevs
1309 parentrevs = self.parentrevs
1290 for r in self.revs(start=startrev + 1):
1310 for r in self.revs(start=startrev + 1):
1291 for p in parentrevs(r):
1311 for p in parentrevs(r):
1292 if p in reachable:
1312 if p in reachable:
1293 if r not in stoprevs:
1313 if r not in stoprevs:
1294 reachable.add(r)
1314 reachable.add(r)
1295 heads.add(r)
1315 heads.add(r)
1296 if p in heads and p not in stoprevs:
1316 if p in heads and p not in stoprevs:
1297 heads.remove(p)
1317 heads.remove(p)
1298
1318
1299 return [self.node(r) for r in heads]
1319 return [self.node(r) for r in heads]
1300
1320
1301 def children(self, node):
1321 def children(self, node):
1302 """find the children of a given node"""
1322 """find the children of a given node"""
1303 c = []
1323 c = []
1304 p = self.rev(node)
1324 p = self.rev(node)
1305 for r in self.revs(start=p + 1):
1325 for r in self.revs(start=p + 1):
1306 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1326 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1307 if prevs:
1327 if prevs:
1308 for pr in prevs:
1328 for pr in prevs:
1309 if pr == p:
1329 if pr == p:
1310 c.append(self.node(r))
1330 c.append(self.node(r))
1311 elif p == nullrev:
1331 elif p == nullrev:
1312 c.append(self.node(r))
1332 c.append(self.node(r))
1313 return c
1333 return c
1314
1334
1315 def descendant(self, start, end):
1335 def descendant(self, start, end):
1316 if start == nullrev:
1336 if start == nullrev:
1317 return True
1337 return True
1318 for i in self.descendants([start]):
1338 for i in self.descendants([start]):
1319 if i == end:
1339 if i == end:
1320 return True
1340 return True
1321 elif i > end:
1341 elif i > end:
1322 break
1342 break
1323 return False
1343 return False
1324
1344
1325 def commonancestorsheads(self, a, b):
1345 def commonancestorsheads(self, a, b):
1326 """calculate all the heads of the common ancestors of nodes a and b"""
1346 """calculate all the heads of the common ancestors of nodes a and b"""
1327 a, b = self.rev(a), self.rev(b)
1347 a, b = self.rev(a), self.rev(b)
1328 try:
1348 try:
1329 ancs = self.index.commonancestorsheads(a, b)
1349 ancs = self.index.commonancestorsheads(a, b)
1330 except (AttributeError, OverflowError): # C implementation failed
1350 except (AttributeError, OverflowError): # C implementation failed
1331 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
1351 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
1332 return pycompat.maplist(self.node, ancs)
1352 return pycompat.maplist(self.node, ancs)
1333
1353
1334 def isancestor(self, a, b):
1354 def isancestor(self, a, b):
1335 """return True if node a is an ancestor of node b
1355 """return True if node a is an ancestor of node b
1336
1356
1337 The implementation of this is trivial but the use of
1357 The implementation of this is trivial but the use of
1338 commonancestorsheads is not."""
1358 commonancestorsheads is not."""
1339 return a in self.commonancestorsheads(a, b)
1359 return a in self.commonancestorsheads(a, b)
1340
1360
1341 def ancestor(self, a, b):
1361 def ancestor(self, a, b):
1342 """calculate the "best" common ancestor of nodes a and b"""
1362 """calculate the "best" common ancestor of nodes a and b"""
1343
1363
1344 a, b = self.rev(a), self.rev(b)
1364 a, b = self.rev(a), self.rev(b)
1345 try:
1365 try:
1346 ancs = self.index.ancestors(a, b)
1366 ancs = self.index.ancestors(a, b)
1347 except (AttributeError, OverflowError):
1367 except (AttributeError, OverflowError):
1348 ancs = ancestor.ancestors(self.parentrevs, a, b)
1368 ancs = ancestor.ancestors(self.parentrevs, a, b)
1349 if ancs:
1369 if ancs:
1350 # choose a consistent winner when there's a tie
1370 # choose a consistent winner when there's a tie
1351 return min(map(self.node, ancs))
1371 return min(map(self.node, ancs))
1352 return nullid
1372 return nullid
1353
1373
1354 def _match(self, id):
1374 def _match(self, id):
1355 if isinstance(id, int):
1375 if isinstance(id, int):
1356 # rev
1376 # rev
1357 return self.node(id)
1377 return self.node(id)
1358 if len(id) == 20:
1378 if len(id) == 20:
1359 # possibly a binary node
1379 # possibly a binary node
1360 # odds of a binary node being all hex in ASCII are 1 in 10**25
1380 # odds of a binary node being all hex in ASCII are 1 in 10**25
1361 try:
1381 try:
1362 node = id
1382 node = id
1363 self.rev(node) # quick search the index
1383 self.rev(node) # quick search the index
1364 return node
1384 return node
1365 except LookupError:
1385 except LookupError:
1366 pass # may be partial hex id
1386 pass # may be partial hex id
1367 try:
1387 try:
1368 # str(rev)
1388 # str(rev)
1369 rev = int(id)
1389 rev = int(id)
1370 if "%d" % rev != id:
1390 if "%d" % rev != id:
1371 raise ValueError
1391 raise ValueError
1372 if rev < 0:
1392 if rev < 0:
1373 rev = len(self) + rev
1393 rev = len(self) + rev
1374 if rev < 0 or rev >= len(self):
1394 if rev < 0 or rev >= len(self):
1375 raise ValueError
1395 raise ValueError
1376 return self.node(rev)
1396 return self.node(rev)
1377 except (ValueError, OverflowError):
1397 except (ValueError, OverflowError):
1378 pass
1398 pass
1379 if len(id) == 40:
1399 if len(id) == 40:
1380 try:
1400 try:
1381 # a full hex nodeid?
1401 # a full hex nodeid?
1382 node = bin(id)
1402 node = bin(id)
1383 self.rev(node)
1403 self.rev(node)
1384 return node
1404 return node
1385 except (TypeError, LookupError):
1405 except (TypeError, LookupError):
1386 pass
1406 pass
1387
1407
1388 def _partialmatch(self, id):
1408 def _partialmatch(self, id):
1389 maybewdir = wdirhex.startswith(id)
1409 maybewdir = wdirhex.startswith(id)
1390 try:
1410 try:
1391 partial = self.index.partialmatch(id)
1411 partial = self.index.partialmatch(id)
1392 if partial and self.hasnode(partial):
1412 if partial and self.hasnode(partial):
1393 if maybewdir:
1413 if maybewdir:
1394 # single 'ff...' match in radix tree, ambiguous with wdir
1414 # single 'ff...' match in radix tree, ambiguous with wdir
1395 raise RevlogError
1415 raise RevlogError
1396 return partial
1416 return partial
1397 if maybewdir:
1417 if maybewdir:
1398 # no 'ff...' match in radix tree, wdir identified
1418 # no 'ff...' match in radix tree, wdir identified
1399 raise error.WdirUnsupported
1419 raise error.WdirUnsupported
1400 return None
1420 return None
1401 except RevlogError:
1421 except RevlogError:
1402 # parsers.c radix tree lookup gave multiple matches
1422 # parsers.c radix tree lookup gave multiple matches
1403 # fast path: for unfiltered changelog, radix tree is accurate
1423 # fast path: for unfiltered changelog, radix tree is accurate
1404 if not getattr(self, 'filteredrevs', None):
1424 if not getattr(self, 'filteredrevs', None):
1405 raise LookupError(id, self.indexfile,
1425 raise LookupError(id, self.indexfile,
1406 _('ambiguous identifier'))
1426 _('ambiguous identifier'))
1407 # fall through to slow path that filters hidden revisions
1427 # fall through to slow path that filters hidden revisions
1408 except (AttributeError, ValueError):
1428 except (AttributeError, ValueError):
1409 # we are pure python, or key was too short to search radix tree
1429 # we are pure python, or key was too short to search radix tree
1410 pass
1430 pass
1411
1431
1412 if id in self._pcache:
1432 if id in self._pcache:
1413 return self._pcache[id]
1433 return self._pcache[id]
1414
1434
1415 if len(id) < 40:
1435 if len(id) < 40:
1416 try:
1436 try:
1417 # hex(node)[:...]
1437 # hex(node)[:...]
1418 l = len(id) // 2 # grab an even number of digits
1438 l = len(id) // 2 # grab an even number of digits
1419 prefix = bin(id[:l * 2])
1439 prefix = bin(id[:l * 2])
1420 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1440 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1421 nl = [n for n in nl if hex(n).startswith(id) and
1441 nl = [n for n in nl if hex(n).startswith(id) and
1422 self.hasnode(n)]
1442 self.hasnode(n)]
1423 if len(nl) > 0:
1443 if len(nl) > 0:
1424 if len(nl) == 1 and not maybewdir:
1444 if len(nl) == 1 and not maybewdir:
1425 self._pcache[id] = nl[0]
1445 self._pcache[id] = nl[0]
1426 return nl[0]
1446 return nl[0]
1427 raise LookupError(id, self.indexfile,
1447 raise LookupError(id, self.indexfile,
1428 _('ambiguous identifier'))
1448 _('ambiguous identifier'))
1429 if maybewdir:
1449 if maybewdir:
1430 raise error.WdirUnsupported
1450 raise error.WdirUnsupported
1431 return None
1451 return None
1432 except TypeError:
1452 except TypeError:
1433 pass
1453 pass
1434
1454
1435 def lookup(self, id):
1455 def lookup(self, id):
1436 """locate a node based on:
1456 """locate a node based on:
1437 - revision number or str(revision number)
1457 - revision number or str(revision number)
1438 - nodeid or subset of hex nodeid
1458 - nodeid or subset of hex nodeid
1439 """
1459 """
1440 n = self._match(id)
1460 n = self._match(id)
1441 if n is not None:
1461 if n is not None:
1442 return n
1462 return n
1443 n = self._partialmatch(id)
1463 n = self._partialmatch(id)
1444 if n:
1464 if n:
1445 return n
1465 return n
1446
1466
1447 raise LookupError(id, self.indexfile, _('no match found'))
1467 raise LookupError(id, self.indexfile, _('no match found'))
1448
1468
1449 def shortest(self, hexnode, minlength=1):
1469 def shortest(self, hexnode, minlength=1):
1450 """Find the shortest unambiguous prefix that matches hexnode."""
1470 """Find the shortest unambiguous prefix that matches hexnode."""
1451 def isvalid(test):
1471 def isvalid(test):
1452 try:
1472 try:
1453 if self._partialmatch(test) is None:
1473 if self._partialmatch(test) is None:
1454 return False
1474 return False
1455
1475
1456 try:
1476 try:
1457 i = int(test)
1477 i = int(test)
1458 # if we are a pure int, then starting with zero will not be
1478 # if we are a pure int, then starting with zero will not be
1459 # confused as a rev; or, obviously, if the int is larger
1479 # confused as a rev; or, obviously, if the int is larger
1460 # than the value of the tip rev
1480 # than the value of the tip rev
1461 if test[0] == '0' or i > len(self):
1481 if test[0] == '0' or i > len(self):
1462 return True
1482 return True
1463 return False
1483 return False
1464 except ValueError:
1484 except ValueError:
1465 return True
1485 return True
1466 except error.RevlogError:
1486 except error.RevlogError:
1467 return False
1487 return False
1468 except error.WdirUnsupported:
1488 except error.WdirUnsupported:
1469 # single 'ff...' match
1489 # single 'ff...' match
1470 return True
1490 return True
1471
1491
1472 shortest = hexnode
1492 shortest = hexnode
1473 startlength = max(6, minlength)
1493 startlength = max(6, minlength)
1474 length = startlength
1494 length = startlength
1475 while True:
1495 while True:
1476 test = hexnode[:length]
1496 test = hexnode[:length]
1477 if isvalid(test):
1497 if isvalid(test):
1478 shortest = test
1498 shortest = test
1479 if length == minlength or length > startlength:
1499 if length == minlength or length > startlength:
1480 return shortest
1500 return shortest
1481 length -= 1
1501 length -= 1
1482 else:
1502 else:
1483 length += 1
1503 length += 1
1484 if len(shortest) <= length:
1504 if len(shortest) <= length:
1485 return shortest
1505 return shortest
1486
1506
1487 def cmp(self, node, text):
1507 def cmp(self, node, text):
1488 """compare text with a given file revision
1508 """compare text with a given file revision
1489
1509
1490 returns True if text is different than what is stored.
1510 returns True if text is different than what is stored.
1491 """
1511 """
1492 p1, p2 = self.parents(node)
1512 p1, p2 = self.parents(node)
1493 return hash(text, p1, p2) != node
1513 return hash(text, p1, p2) != node
1494
1514
1495 def _cachesegment(self, offset, data):
1515 def _cachesegment(self, offset, data):
1496 """Add a segment to the revlog cache.
1516 """Add a segment to the revlog cache.
1497
1517
1498 Accepts an absolute offset and the data that is at that location.
1518 Accepts an absolute offset and the data that is at that location.
1499 """
1519 """
1500 o, d = self._chunkcache
1520 o, d = self._chunkcache
1501 # try to add to existing cache
1521 # try to add to existing cache
1502 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1522 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1503 self._chunkcache = o, d + data
1523 self._chunkcache = o, d + data
1504 else:
1524 else:
1505 self._chunkcache = offset, data
1525 self._chunkcache = offset, data
1506
1526
1507 def _readsegment(self, offset, length, df=None):
1527 def _readsegment(self, offset, length, df=None):
1508 """Load a segment of raw data from the revlog.
1528 """Load a segment of raw data from the revlog.
1509
1529
1510 Accepts an absolute offset, length to read, and an optional existing
1530 Accepts an absolute offset, length to read, and an optional existing
1511 file handle to read from.
1531 file handle to read from.
1512
1532
1513 If an existing file handle is passed, it will be seeked and the
1533 If an existing file handle is passed, it will be seeked and the
1514 original seek position will NOT be restored.
1534 original seek position will NOT be restored.
1515
1535
1516 Returns a str or buffer of raw byte data.
1536 Returns a str or buffer of raw byte data.
1517 """
1537 """
1518 # Cache data both forward and backward around the requested
1538 # Cache data both forward and backward around the requested
1519 # data, in a fixed size window. This helps speed up operations
1539 # data, in a fixed size window. This helps speed up operations
1520 # involving reading the revlog backwards.
1540 # involving reading the revlog backwards.
1521 cachesize = self._chunkcachesize
1541 cachesize = self._chunkcachesize
1522 realoffset = offset & ~(cachesize - 1)
1542 realoffset = offset & ~(cachesize - 1)
1523 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1543 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1524 - realoffset)
1544 - realoffset)
1525 with self._datareadfp(df) as df:
1545 with self._datareadfp(df) as df:
1526 df.seek(realoffset)
1546 df.seek(realoffset)
1527 d = df.read(reallength)
1547 d = df.read(reallength)
1528 self._cachesegment(realoffset, d)
1548 self._cachesegment(realoffset, d)
1529 if offset != realoffset or reallength != length:
1549 if offset != realoffset or reallength != length:
1530 return util.buffer(d, offset - realoffset, length)
1550 return util.buffer(d, offset - realoffset, length)
1531 return d
1551 return d
1532
1552
1533 def _getsegment(self, offset, length, df=None):
1553 def _getsegment(self, offset, length, df=None):
1534 """Obtain a segment of raw data from the revlog.
1554 """Obtain a segment of raw data from the revlog.
1535
1555
1536 Accepts an absolute offset, length of bytes to obtain, and an
1556 Accepts an absolute offset, length of bytes to obtain, and an
1537 optional file handle to the already-opened revlog. If the file
1557 optional file handle to the already-opened revlog. If the file
1538 handle is used, it's original seek position will not be preserved.
1558 handle is used, it's original seek position will not be preserved.
1539
1559
1540 Requests for data may be returned from a cache.
1560 Requests for data may be returned from a cache.
1541
1561
1542 Returns a str or a buffer instance of raw byte data.
1562 Returns a str or a buffer instance of raw byte data.
1543 """
1563 """
1544 o, d = self._chunkcache
1564 o, d = self._chunkcache
1545 l = len(d)
1565 l = len(d)
1546
1566
1547 # is it in the cache?
1567 # is it in the cache?
1548 cachestart = offset - o
1568 cachestart = offset - o
1549 cacheend = cachestart + length
1569 cacheend = cachestart + length
1550 if cachestart >= 0 and cacheend <= l:
1570 if cachestart >= 0 and cacheend <= l:
1551 if cachestart == 0 and cacheend == l:
1571 if cachestart == 0 and cacheend == l:
1552 return d # avoid a copy
1572 return d # avoid a copy
1553 return util.buffer(d, cachestart, cacheend - cachestart)
1573 return util.buffer(d, cachestart, cacheend - cachestart)
1554
1574
1555 return self._readsegment(offset, length, df=df)
1575 return self._readsegment(offset, length, df=df)
1556
1576
1557 def _getsegmentforrevs(self, startrev, endrev, df=None):
1577 def _getsegmentforrevs(self, startrev, endrev, df=None):
1558 """Obtain a segment of raw data corresponding to a range of revisions.
1578 """Obtain a segment of raw data corresponding to a range of revisions.
1559
1579
1560 Accepts the start and end revisions and an optional already-open
1580 Accepts the start and end revisions and an optional already-open
1561 file handle to be used for reading. If the file handle is read, its
1581 file handle to be used for reading. If the file handle is read, its
1562 seek position will not be preserved.
1582 seek position will not be preserved.
1563
1583
1564 Requests for data may be satisfied by a cache.
1584 Requests for data may be satisfied by a cache.
1565
1585
1566 Returns a 2-tuple of (offset, data) for the requested range of
1586 Returns a 2-tuple of (offset, data) for the requested range of
1567 revisions. Offset is the integer offset from the beginning of the
1587 revisions. Offset is the integer offset from the beginning of the
1568 revlog and data is a str or buffer of the raw byte data.
1588 revlog and data is a str or buffer of the raw byte data.
1569
1589
1570 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1590 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1571 to determine where each revision's data begins and ends.
1591 to determine where each revision's data begins and ends.
1572 """
1592 """
1573 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1593 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1574 # (functions are expensive).
1594 # (functions are expensive).
1575 index = self.index
1595 index = self.index
1576 istart = index[startrev]
1596 istart = index[startrev]
1577 start = int(istart[0] >> 16)
1597 start = int(istart[0] >> 16)
1578 if startrev == endrev:
1598 if startrev == endrev:
1579 end = start + istart[1]
1599 end = start + istart[1]
1580 else:
1600 else:
1581 iend = index[endrev]
1601 iend = index[endrev]
1582 end = int(iend[0] >> 16) + iend[1]
1602 end = int(iend[0] >> 16) + iend[1]
1583
1603
1584 if self._inline:
1604 if self._inline:
1585 start += (startrev + 1) * self._io.size
1605 start += (startrev + 1) * self._io.size
1586 end += (endrev + 1) * self._io.size
1606 end += (endrev + 1) * self._io.size
1587 length = end - start
1607 length = end - start
1588
1608
1589 return start, self._getsegment(start, length, df=df)
1609 return start, self._getsegment(start, length, df=df)
1590
1610
1591 def _chunk(self, rev, df=None):
1611 def _chunk(self, rev, df=None):
1592 """Obtain a single decompressed chunk for a revision.
1612 """Obtain a single decompressed chunk for a revision.
1593
1613
1594 Accepts an integer revision and an optional already-open file handle
1614 Accepts an integer revision and an optional already-open file handle
1595 to be used for reading. If used, the seek position of the file will not
1615 to be used for reading. If used, the seek position of the file will not
1596 be preserved.
1616 be preserved.
1597
1617
1598 Returns a str holding uncompressed data for the requested revision.
1618 Returns a str holding uncompressed data for the requested revision.
1599 """
1619 """
1600 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1620 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1601
1621
1602 def _chunks(self, revs, df=None):
1622 def _chunks(self, revs, df=None):
1603 """Obtain decompressed chunks for the specified revisions.
1623 """Obtain decompressed chunks for the specified revisions.
1604
1624
1605 Accepts an iterable of numeric revisions that are assumed to be in
1625 Accepts an iterable of numeric revisions that are assumed to be in
1606 ascending order. Also accepts an optional already-open file handle
1626 ascending order. Also accepts an optional already-open file handle
1607 to be used for reading. If used, the seek position of the file will
1627 to be used for reading. If used, the seek position of the file will
1608 not be preserved.
1628 not be preserved.
1609
1629
1610 This function is similar to calling ``self._chunk()`` multiple times,
1630 This function is similar to calling ``self._chunk()`` multiple times,
1611 but is faster.
1631 but is faster.
1612
1632
1613 Returns a list with decompressed data for each requested revision.
1633 Returns a list with decompressed data for each requested revision.
1614 """
1634 """
1615 if not revs:
1635 if not revs:
1616 return []
1636 return []
1617 start = self.start
1637 start = self.start
1618 length = self.length
1638 length = self.length
1619 inline = self._inline
1639 inline = self._inline
1620 iosize = self._io.size
1640 iosize = self._io.size
1621 buffer = util.buffer
1641 buffer = util.buffer
1622
1642
1623 l = []
1643 l = []
1624 ladd = l.append
1644 ladd = l.append
1625
1645
1626 if not self._withsparseread:
1646 if not self._withsparseread:
1627 slicedchunks = (revs,)
1647 slicedchunks = (revs,)
1628 else:
1648 else:
1629 slicedchunks = _slicechunk(self, revs)
1649 slicedchunks = _slicechunk(self, revs)
1630
1650
1631 for revschunk in slicedchunks:
1651 for revschunk in slicedchunks:
1632 firstrev = revschunk[0]
1652 firstrev = revschunk[0]
1633 # Skip trailing revisions with empty diff
1653 # Skip trailing revisions with empty diff
1634 for lastrev in revschunk[::-1]:
1654 for lastrev in revschunk[::-1]:
1635 if length(lastrev) != 0:
1655 if length(lastrev) != 0:
1636 break
1656 break
1637
1657
1638 try:
1658 try:
1639 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1659 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1640 except OverflowError:
1660 except OverflowError:
1641 # issue4215 - we can't cache a run of chunks greater than
1661 # issue4215 - we can't cache a run of chunks greater than
1642 # 2G on Windows
1662 # 2G on Windows
1643 return [self._chunk(rev, df=df) for rev in revschunk]
1663 return [self._chunk(rev, df=df) for rev in revschunk]
1644
1664
1645 decomp = self.decompress
1665 decomp = self.decompress
1646 for rev in revschunk:
1666 for rev in revschunk:
1647 chunkstart = start(rev)
1667 chunkstart = start(rev)
1648 if inline:
1668 if inline:
1649 chunkstart += (rev + 1) * iosize
1669 chunkstart += (rev + 1) * iosize
1650 chunklength = length(rev)
1670 chunklength = length(rev)
1651 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1671 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1652
1672
1653 return l
1673 return l
1654
1674
1655 def _chunkclear(self):
1675 def _chunkclear(self):
1656 """Clear the raw chunk cache."""
1676 """Clear the raw chunk cache."""
1657 self._chunkcache = (0, '')
1677 self._chunkcache = (0, '')
1658
1678
1659 def deltaparent(self, rev):
1679 def deltaparent(self, rev):
1660 """return deltaparent of the given revision"""
1680 """return deltaparent of the given revision"""
1661 base = self.index[rev][3]
1681 base = self.index[rev][3]
1662 if base == rev:
1682 if base == rev:
1663 return nullrev
1683 return nullrev
1664 elif self._generaldelta:
1684 elif self._generaldelta:
1665 return base
1685 return base
1666 else:
1686 else:
1667 return rev - 1
1687 return rev - 1
1668
1688
1669 def revdiff(self, rev1, rev2):
1689 def revdiff(self, rev1, rev2):
1670 """return or calculate a delta between two revisions
1690 """return or calculate a delta between two revisions
1671
1691
1672 The delta calculated is in binary form and is intended to be written to
1692 The delta calculated is in binary form and is intended to be written to
1673 revlog data directly. So this function needs raw revision data.
1693 revlog data directly. So this function needs raw revision data.
1674 """
1694 """
1675 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1695 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1676 return bytes(self._chunk(rev2))
1696 return bytes(self._chunk(rev2))
1677
1697
1678 return mdiff.textdiff(self.revision(rev1, raw=True),
1698 return mdiff.textdiff(self.revision(rev1, raw=True),
1679 self.revision(rev2, raw=True))
1699 self.revision(rev2, raw=True))
1680
1700
1681 def revision(self, nodeorrev, _df=None, raw=False):
1701 def revision(self, nodeorrev, _df=None, raw=False):
1682 """return an uncompressed revision of a given node or revision
1702 """return an uncompressed revision of a given node or revision
1683 number.
1703 number.
1684
1704
1685 _df - an existing file handle to read from. (internal-only)
1705 _df - an existing file handle to read from. (internal-only)
1686 raw - an optional argument specifying if the revision data is to be
1706 raw - an optional argument specifying if the revision data is to be
1687 treated as raw data when applying flag transforms. 'raw' should be set
1707 treated as raw data when applying flag transforms. 'raw' should be set
1688 to True when generating changegroups or in debug commands.
1708 to True when generating changegroups or in debug commands.
1689 """
1709 """
1690 if isinstance(nodeorrev, int):
1710 if isinstance(nodeorrev, int):
1691 rev = nodeorrev
1711 rev = nodeorrev
1692 node = self.node(rev)
1712 node = self.node(rev)
1693 else:
1713 else:
1694 node = nodeorrev
1714 node = nodeorrev
1695 rev = None
1715 rev = None
1696
1716
1697 cachedrev = None
1717 cachedrev = None
1698 flags = None
1718 flags = None
1699 rawtext = None
1719 rawtext = None
1700 if node == nullid:
1720 if node == nullid:
1701 return ""
1721 return ""
1702 if self._cache:
1722 if self._cache:
1703 if self._cache[0] == node:
1723 if self._cache[0] == node:
1704 # _cache only stores rawtext
1724 # _cache only stores rawtext
1705 if raw:
1725 if raw:
1706 return self._cache[2]
1726 return self._cache[2]
1707 # duplicated, but good for perf
1727 # duplicated, but good for perf
1708 if rev is None:
1728 if rev is None:
1709 rev = self.rev(node)
1729 rev = self.rev(node)
1710 if flags is None:
1730 if flags is None:
1711 flags = self.flags(rev)
1731 flags = self.flags(rev)
1712 # no extra flags set, no flag processor runs, text = rawtext
1732 # no extra flags set, no flag processor runs, text = rawtext
1713 if flags == REVIDX_DEFAULT_FLAGS:
1733 if flags == REVIDX_DEFAULT_FLAGS:
1714 return self._cache[2]
1734 return self._cache[2]
1715 # rawtext is reusable. need to run flag processor
1735 # rawtext is reusable. need to run flag processor
1716 rawtext = self._cache[2]
1736 rawtext = self._cache[2]
1717
1737
1718 cachedrev = self._cache[1]
1738 cachedrev = self._cache[1]
1719
1739
1720 # look up what we need to read
1740 # look up what we need to read
1721 if rawtext is None:
1741 if rawtext is None:
1722 if rev is None:
1742 if rev is None:
1723 rev = self.rev(node)
1743 rev = self.rev(node)
1724
1744
1725 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1745 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1726 if stopped:
1746 if stopped:
1727 rawtext = self._cache[2]
1747 rawtext = self._cache[2]
1728
1748
1729 # drop cache to save memory
1749 # drop cache to save memory
1730 self._cache = None
1750 self._cache = None
1731
1751
1732 bins = self._chunks(chain, df=_df)
1752 bins = self._chunks(chain, df=_df)
1733 if rawtext is None:
1753 if rawtext is None:
1734 rawtext = bytes(bins[0])
1754 rawtext = bytes(bins[0])
1735 bins = bins[1:]
1755 bins = bins[1:]
1736
1756
1737 rawtext = mdiff.patches(rawtext, bins)
1757 rawtext = mdiff.patches(rawtext, bins)
1738 self._cache = (node, rev, rawtext)
1758 self._cache = (node, rev, rawtext)
1739
1759
1740 if flags is None:
1760 if flags is None:
1741 if rev is None:
1761 if rev is None:
1742 rev = self.rev(node)
1762 rev = self.rev(node)
1743 flags = self.flags(rev)
1763 flags = self.flags(rev)
1744
1764
1745 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1765 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1746 if validatehash:
1766 if validatehash:
1747 self.checkhash(text, node, rev=rev)
1767 self.checkhash(text, node, rev=rev)
1748
1768
1749 return text
1769 return text
1750
1770
1751 def hash(self, text, p1, p2):
1771 def hash(self, text, p1, p2):
1752 """Compute a node hash.
1772 """Compute a node hash.
1753
1773
1754 Available as a function so that subclasses can replace the hash
1774 Available as a function so that subclasses can replace the hash
1755 as needed.
1775 as needed.
1756 """
1776 """
1757 return hash(text, p1, p2)
1777 return hash(text, p1, p2)
1758
1778
1759 def _processflags(self, text, flags, operation, raw=False):
1779 def _processflags(self, text, flags, operation, raw=False):
1760 """Inspect revision data flags and applies transforms defined by
1780 """Inspect revision data flags and applies transforms defined by
1761 registered flag processors.
1781 registered flag processors.
1762
1782
1763 ``text`` - the revision data to process
1783 ``text`` - the revision data to process
1764 ``flags`` - the revision flags
1784 ``flags`` - the revision flags
1765 ``operation`` - the operation being performed (read or write)
1785 ``operation`` - the operation being performed (read or write)
1766 ``raw`` - an optional argument describing if the raw transform should be
1786 ``raw`` - an optional argument describing if the raw transform should be
1767 applied.
1787 applied.
1768
1788
1769 This method processes the flags in the order (or reverse order if
1789 This method processes the flags in the order (or reverse order if
1770 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1790 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1771 flag processors registered for present flags. The order of flags defined
1791 flag processors registered for present flags. The order of flags defined
1772 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1792 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1773
1793
1774 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1794 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1775 processed text and ``validatehash`` is a bool indicating whether the
1795 processed text and ``validatehash`` is a bool indicating whether the
1776 returned text should be checked for hash integrity.
1796 returned text should be checked for hash integrity.
1777
1797
1778 Note: If the ``raw`` argument is set, it has precedence over the
1798 Note: If the ``raw`` argument is set, it has precedence over the
1779 operation and will only update the value of ``validatehash``.
1799 operation and will only update the value of ``validatehash``.
1780 """
1800 """
1781 # fast path: no flag processors will run
1801 # fast path: no flag processors will run
1782 if flags == 0:
1802 if flags == 0:
1783 return text, True
1803 return text, True
1784 if not operation in ('read', 'write'):
1804 if not operation in ('read', 'write'):
1785 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
1805 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
1786 # Check all flags are known.
1806 # Check all flags are known.
1787 if flags & ~REVIDX_KNOWN_FLAGS:
1807 if flags & ~REVIDX_KNOWN_FLAGS:
1788 raise RevlogError(_("incompatible revision flag '%#x'") %
1808 raise RevlogError(_("incompatible revision flag '%#x'") %
1789 (flags & ~REVIDX_KNOWN_FLAGS))
1809 (flags & ~REVIDX_KNOWN_FLAGS))
1790 validatehash = True
1810 validatehash = True
1791 # Depending on the operation (read or write), the order might be
1811 # Depending on the operation (read or write), the order might be
1792 # reversed due to non-commutative transforms.
1812 # reversed due to non-commutative transforms.
1793 orderedflags = REVIDX_FLAGS_ORDER
1813 orderedflags = REVIDX_FLAGS_ORDER
1794 if operation == 'write':
1814 if operation == 'write':
1795 orderedflags = reversed(orderedflags)
1815 orderedflags = reversed(orderedflags)
1796
1816
1797 for flag in orderedflags:
1817 for flag in orderedflags:
1798 # If a flagprocessor has been registered for a known flag, apply the
1818 # If a flagprocessor has been registered for a known flag, apply the
1799 # related operation transform and update result tuple.
1819 # related operation transform and update result tuple.
1800 if flag & flags:
1820 if flag & flags:
1801 vhash = True
1821 vhash = True
1802
1822
1803 if flag not in _flagprocessors:
1823 if flag not in _flagprocessors:
1804 message = _("missing processor for flag '%#x'") % (flag)
1824 message = _("missing processor for flag '%#x'") % (flag)
1805 raise RevlogError(message)
1825 raise RevlogError(message)
1806
1826
1807 processor = _flagprocessors[flag]
1827 processor = _flagprocessors[flag]
1808 if processor is not None:
1828 if processor is not None:
1809 readtransform, writetransform, rawtransform = processor
1829 readtransform, writetransform, rawtransform = processor
1810
1830
1811 if raw:
1831 if raw:
1812 vhash = rawtransform(self, text)
1832 vhash = rawtransform(self, text)
1813 elif operation == 'read':
1833 elif operation == 'read':
1814 text, vhash = readtransform(self, text)
1834 text, vhash = readtransform(self, text)
1815 else: # write operation
1835 else: # write operation
1816 text, vhash = writetransform(self, text)
1836 text, vhash = writetransform(self, text)
1817 validatehash = validatehash and vhash
1837 validatehash = validatehash and vhash
1818
1838
1819 return text, validatehash
1839 return text, validatehash
1820
1840
1821 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1841 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1822 """Check node hash integrity.
1842 """Check node hash integrity.
1823
1843
1824 Available as a function so that subclasses can extend hash mismatch
1844 Available as a function so that subclasses can extend hash mismatch
1825 behaviors as needed.
1845 behaviors as needed.
1826 """
1846 """
1827 if p1 is None and p2 is None:
1847 if p1 is None and p2 is None:
1828 p1, p2 = self.parents(node)
1848 p1, p2 = self.parents(node)
1829 if node != self.hash(text, p1, p2):
1849 if node != self.hash(text, p1, p2):
1830 revornode = rev
1850 revornode = rev
1831 if revornode is None:
1851 if revornode is None:
1832 revornode = templatefilters.short(hex(node))
1852 revornode = templatefilters.short(hex(node))
1833 raise RevlogError(_("integrity check failed on %s:%s")
1853 raise RevlogError(_("integrity check failed on %s:%s")
1834 % (self.indexfile, pycompat.bytestr(revornode)))
1854 % (self.indexfile, pycompat.bytestr(revornode)))
1835
1855
1836 def _enforceinlinesize(self, tr, fp=None):
1856 def _enforceinlinesize(self, tr, fp=None):
1837 """Check if the revlog is too big for inline and convert if so.
1857 """Check if the revlog is too big for inline and convert if so.
1838
1858
1839 This should be called after revisions are added to the revlog. If the
1859 This should be called after revisions are added to the revlog. If the
1840 revlog has grown too large to be an inline revlog, it will convert it
1860 revlog has grown too large to be an inline revlog, it will convert it
1841 to use multiple index and data files.
1861 to use multiple index and data files.
1842 """
1862 """
1843 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1863 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1844 return
1864 return
1845
1865
1846 trinfo = tr.find(self.indexfile)
1866 trinfo = tr.find(self.indexfile)
1847 if trinfo is None:
1867 if trinfo is None:
1848 raise RevlogError(_("%s not found in the transaction")
1868 raise RevlogError(_("%s not found in the transaction")
1849 % self.indexfile)
1869 % self.indexfile)
1850
1870
1851 trindex = trinfo[2]
1871 trindex = trinfo[2]
1852 if trindex is not None:
1872 if trindex is not None:
1853 dataoff = self.start(trindex)
1873 dataoff = self.start(trindex)
1854 else:
1874 else:
1855 # revlog was stripped at start of transaction, use all leftover data
1875 # revlog was stripped at start of transaction, use all leftover data
1856 trindex = len(self) - 1
1876 trindex = len(self) - 1
1857 dataoff = self.end(-2)
1877 dataoff = self.end(-2)
1858
1878
1859 tr.add(self.datafile, dataoff)
1879 tr.add(self.datafile, dataoff)
1860
1880
1861 if fp:
1881 if fp:
1862 fp.flush()
1882 fp.flush()
1863 fp.close()
1883 fp.close()
1864
1884
1865 with self._datafp('w') as df:
1885 with self._datafp('w') as df:
1866 for r in self:
1886 for r in self:
1867 df.write(self._getsegmentforrevs(r, r)[1])
1887 df.write(self._getsegmentforrevs(r, r)[1])
1868
1888
1869 with self._indexfp('w') as fp:
1889 with self._indexfp('w') as fp:
1870 self.version &= ~FLAG_INLINE_DATA
1890 self.version &= ~FLAG_INLINE_DATA
1871 self._inline = False
1891 self._inline = False
1872 io = self._io
1892 io = self._io
1873 for i in self:
1893 for i in self:
1874 e = io.packentry(self.index[i], self.node, self.version, i)
1894 e = io.packentry(self.index[i], self.node, self.version, i)
1875 fp.write(e)
1895 fp.write(e)
1876
1896
1877 # the temp file replace the real index when we exit the context
1897 # the temp file replace the real index when we exit the context
1878 # manager
1898 # manager
1879
1899
1880 tr.replace(self.indexfile, trindex * self._io.size)
1900 tr.replace(self.indexfile, trindex * self._io.size)
1881 self._chunkclear()
1901 self._chunkclear()
1882
1902
1883 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1903 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1884 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1904 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1885 """add a revision to the log
1905 """add a revision to the log
1886
1906
1887 text - the revision data to add
1907 text - the revision data to add
1888 transaction - the transaction object used for rollback
1908 transaction - the transaction object used for rollback
1889 link - the linkrev data to add
1909 link - the linkrev data to add
1890 p1, p2 - the parent nodeids of the revision
1910 p1, p2 - the parent nodeids of the revision
1891 cachedelta - an optional precomputed delta
1911 cachedelta - an optional precomputed delta
1892 node - nodeid of revision; typically node is not specified, and it is
1912 node - nodeid of revision; typically node is not specified, and it is
1893 computed by default as hash(text, p1, p2), however subclasses might
1913 computed by default as hash(text, p1, p2), however subclasses might
1894 use different hashing method (and override checkhash() in such case)
1914 use different hashing method (and override checkhash() in such case)
1895 flags - the known flags to set on the revision
1915 flags - the known flags to set on the revision
1896 deltacomputer - an optional _deltacomputer instance shared between
1916 deltacomputer - an optional _deltacomputer instance shared between
1897 multiple calls
1917 multiple calls
1898 """
1918 """
1899 if link == nullrev:
1919 if link == nullrev:
1900 raise RevlogError(_("attempted to add linkrev -1 to %s")
1920 raise RevlogError(_("attempted to add linkrev -1 to %s")
1901 % self.indexfile)
1921 % self.indexfile)
1902
1922
1903 if flags:
1923 if flags:
1904 node = node or self.hash(text, p1, p2)
1924 node = node or self.hash(text, p1, p2)
1905
1925
1906 rawtext, validatehash = self._processflags(text, flags, 'write')
1926 rawtext, validatehash = self._processflags(text, flags, 'write')
1907
1927
1908 # If the flag processor modifies the revision data, ignore any provided
1928 # If the flag processor modifies the revision data, ignore any provided
1909 # cachedelta.
1929 # cachedelta.
1910 if rawtext != text:
1930 if rawtext != text:
1911 cachedelta = None
1931 cachedelta = None
1912
1932
1913 if len(rawtext) > _maxentrysize:
1933 if len(rawtext) > _maxentrysize:
1914 raise RevlogError(
1934 raise RevlogError(
1915 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1935 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1916 % (self.indexfile, len(rawtext)))
1936 % (self.indexfile, len(rawtext)))
1917
1937
1918 node = node or self.hash(rawtext, p1, p2)
1938 node = node or self.hash(rawtext, p1, p2)
1919 if node in self.nodemap:
1939 if node in self.nodemap:
1920 return node
1940 return node
1921
1941
1922 if validatehash:
1942 if validatehash:
1923 self.checkhash(rawtext, node, p1=p1, p2=p2)
1943 self.checkhash(rawtext, node, p1=p1, p2=p2)
1924
1944
1925 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1945 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1926 flags, cachedelta=cachedelta,
1946 flags, cachedelta=cachedelta,
1927 deltacomputer=deltacomputer)
1947 deltacomputer=deltacomputer)
1928
1948
1929 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1949 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1930 cachedelta=None, deltacomputer=None):
1950 cachedelta=None, deltacomputer=None):
1931 """add a raw revision with known flags, node and parents
1951 """add a raw revision with known flags, node and parents
1932 useful when reusing a revision not stored in this revlog (ex: received
1952 useful when reusing a revision not stored in this revlog (ex: received
1933 over wire, or read from an external bundle).
1953 over wire, or read from an external bundle).
1934 """
1954 """
1935 dfh = None
1955 dfh = None
1936 if not self._inline:
1956 if not self._inline:
1937 dfh = self._datafp("a+")
1957 dfh = self._datafp("a+")
1938 ifh = self._indexfp("a+")
1958 ifh = self._indexfp("a+")
1939 try:
1959 try:
1940 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1960 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1941 flags, cachedelta, ifh, dfh,
1961 flags, cachedelta, ifh, dfh,
1942 deltacomputer=deltacomputer)
1962 deltacomputer=deltacomputer)
1943 finally:
1963 finally:
1944 if dfh:
1964 if dfh:
1945 dfh.close()
1965 dfh.close()
1946 ifh.close()
1966 ifh.close()
1947
1967
1948 def compress(self, data):
1968 def compress(self, data):
1949 """Generate a possibly-compressed representation of data."""
1969 """Generate a possibly-compressed representation of data."""
1950 if not data:
1970 if not data:
1951 return '', data
1971 return '', data
1952
1972
1953 compressed = self._compressor.compress(data)
1973 compressed = self._compressor.compress(data)
1954
1974
1955 if compressed:
1975 if compressed:
1956 # The revlog compressor added the header in the returned data.
1976 # The revlog compressor added the header in the returned data.
1957 return '', compressed
1977 return '', compressed
1958
1978
1959 if data[0:1] == '\0':
1979 if data[0:1] == '\0':
1960 return '', data
1980 return '', data
1961 return 'u', data
1981 return 'u', data
1962
1982
1963 def decompress(self, data):
1983 def decompress(self, data):
1964 """Decompress a revlog chunk.
1984 """Decompress a revlog chunk.
1965
1985
1966 The chunk is expected to begin with a header identifying the
1986 The chunk is expected to begin with a header identifying the
1967 format type so it can be routed to an appropriate decompressor.
1987 format type so it can be routed to an appropriate decompressor.
1968 """
1988 """
1969 if not data:
1989 if not data:
1970 return data
1990 return data
1971
1991
1972 # Revlogs are read much more frequently than they are written and many
1992 # Revlogs are read much more frequently than they are written and many
1973 # chunks only take microseconds to decompress, so performance is
1993 # chunks only take microseconds to decompress, so performance is
1974 # important here.
1994 # important here.
1975 #
1995 #
1976 # We can make a few assumptions about revlogs:
1996 # We can make a few assumptions about revlogs:
1977 #
1997 #
1978 # 1) the majority of chunks will be compressed (as opposed to inline
1998 # 1) the majority of chunks will be compressed (as opposed to inline
1979 # raw data).
1999 # raw data).
1980 # 2) decompressing *any* data will likely by at least 10x slower than
2000 # 2) decompressing *any* data will likely by at least 10x slower than
1981 # returning raw inline data.
2001 # returning raw inline data.
1982 # 3) we want to prioritize common and officially supported compression
2002 # 3) we want to prioritize common and officially supported compression
1983 # engines
2003 # engines
1984 #
2004 #
1985 # It follows that we want to optimize for "decompress compressed data
2005 # It follows that we want to optimize for "decompress compressed data
1986 # when encoded with common and officially supported compression engines"
2006 # when encoded with common and officially supported compression engines"
1987 # case over "raw data" and "data encoded by less common or non-official
2007 # case over "raw data" and "data encoded by less common or non-official
1988 # compression engines." That is why we have the inline lookup first
2008 # compression engines." That is why we have the inline lookup first
1989 # followed by the compengines lookup.
2009 # followed by the compengines lookup.
1990 #
2010 #
1991 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2011 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1992 # compressed chunks. And this matters for changelog and manifest reads.
2012 # compressed chunks. And this matters for changelog and manifest reads.
1993 t = data[0:1]
2013 t = data[0:1]
1994
2014
1995 if t == 'x':
2015 if t == 'x':
1996 try:
2016 try:
1997 return _zlibdecompress(data)
2017 return _zlibdecompress(data)
1998 except zlib.error as e:
2018 except zlib.error as e:
1999 raise RevlogError(_('revlog decompress error: %s') %
2019 raise RevlogError(_('revlog decompress error: %s') %
2000 util.forcebytestr(e))
2020 util.forcebytestr(e))
2001 # '\0' is more common than 'u' so it goes first.
2021 # '\0' is more common than 'u' so it goes first.
2002 elif t == '\0':
2022 elif t == '\0':
2003 return data
2023 return data
2004 elif t == 'u':
2024 elif t == 'u':
2005 return util.buffer(data, 1)
2025 return util.buffer(data, 1)
2006
2026
2007 try:
2027 try:
2008 compressor = self._decompressors[t]
2028 compressor = self._decompressors[t]
2009 except KeyError:
2029 except KeyError:
2010 try:
2030 try:
2011 engine = util.compengines.forrevlogheader(t)
2031 engine = util.compengines.forrevlogheader(t)
2012 compressor = engine.revlogcompressor()
2032 compressor = engine.revlogcompressor()
2013 self._decompressors[t] = compressor
2033 self._decompressors[t] = compressor
2014 except KeyError:
2034 except KeyError:
2015 raise RevlogError(_('unknown compression type %r') % t)
2035 raise RevlogError(_('unknown compression type %r') % t)
2016
2036
2017 return compressor.decompress(data)
2037 return compressor.decompress(data)
2018
2038
2019 def _isgooddeltainfo(self, d, textlen):
2039 def _isgooddeltainfo(self, d, textlen):
2020 """Returns True if the given delta is good. Good means that it is within
2040 """Returns True if the given delta is good. Good means that it is within
2021 the disk span, disk size, and chain length bounds that we know to be
2041 the disk span, disk size, and chain length bounds that we know to be
2022 performant."""
2042 performant."""
2023 if d is None:
2043 if d is None:
2024 return False
2044 return False
2025
2045
2026 # - 'd.distance' is the distance from the base revision -- bounding it
2046 # - 'd.distance' is the distance from the base revision -- bounding it
2027 # limits the amount of I/O we need to do.
2047 # limits the amount of I/O we need to do.
2028 # - 'd.compresseddeltalen' is the sum of the total size of deltas we
2048 # - 'd.compresseddeltalen' is the sum of the total size of deltas we
2029 # need to apply -- bounding it limits the amount of CPU we consume.
2049 # need to apply -- bounding it limits the amount of CPU we consume.
2030
2050
2031 defaultmax = textlen * 4
2051 defaultmax = textlen * 4
2032 maxdist = self._maxdeltachainspan
2052 maxdist = self._maxdeltachainspan
2033 if not maxdist:
2053 if not maxdist:
2034 maxdist = d.distance # ensure the conditional pass
2054 maxdist = d.distance # ensure the conditional pass
2035 maxdist = max(maxdist, defaultmax)
2055 maxdist = max(maxdist, defaultmax)
2036 if (d.distance > maxdist or d.deltalen > textlen or
2056 if (d.distance > maxdist or d.deltalen > textlen or
2037 d.compresseddeltalen > textlen * 2 or
2057 d.compresseddeltalen > textlen * 2 or
2038 (self._maxchainlen and d.chainlen > self._maxchainlen)):
2058 (self._maxchainlen and d.chainlen > self._maxchainlen)):
2039 return False
2059 return False
2040
2060
2041 return True
2061 return True
2042
2062
2043 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
2063 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
2044 cachedelta, ifh, dfh, alwayscache=False,
2064 cachedelta, ifh, dfh, alwayscache=False,
2045 deltacomputer=None):
2065 deltacomputer=None):
2046 """internal function to add revisions to the log
2066 """internal function to add revisions to the log
2047
2067
2048 see addrevision for argument descriptions.
2068 see addrevision for argument descriptions.
2049
2069
2050 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2070 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2051
2071
2052 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2072 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2053 be used.
2073 be used.
2054
2074
2055 invariants:
2075 invariants:
2056 - rawtext is optional (can be None); if not set, cachedelta must be set.
2076 - rawtext is optional (can be None); if not set, cachedelta must be set.
2057 if both are set, they must correspond to each other.
2077 if both are set, they must correspond to each other.
2058 """
2078 """
2059 if node == nullid:
2079 if node == nullid:
2060 raise RevlogError(_("%s: attempt to add null revision") %
2080 raise RevlogError(_("%s: attempt to add null revision") %
2061 (self.indexfile))
2081 (self.indexfile))
2062 if node == wdirid:
2082 if node == wdirid:
2063 raise RevlogError(_("%s: attempt to add wdir revision") %
2083 raise RevlogError(_("%s: attempt to add wdir revision") %
2064 (self.indexfile))
2084 (self.indexfile))
2065
2085
2066 if self._inline:
2086 if self._inline:
2067 fh = ifh
2087 fh = ifh
2068 else:
2088 else:
2069 fh = dfh
2089 fh = dfh
2070
2090
2071 btext = [rawtext]
2091 btext = [rawtext]
2072
2092
2073 curr = len(self)
2093 curr = len(self)
2074 prev = curr - 1
2094 prev = curr - 1
2075 offset = self.end(prev)
2095 offset = self.end(prev)
2076 p1r, p2r = self.rev(p1), self.rev(p2)
2096 p1r, p2r = self.rev(p1), self.rev(p2)
2077
2097
2078 # full versions are inserted when the needed deltas
2098 # full versions are inserted when the needed deltas
2079 # become comparable to the uncompressed text
2099 # become comparable to the uncompressed text
2080 if rawtext is None:
2100 if rawtext is None:
2081 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
2101 # need rawtext size, before changed by flag processors, which is
2102 # the non-raw size. use revlog explicitly to avoid filelog's extra
2103 # logic that might remove metadata size.
2104 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2082 cachedelta[1])
2105 cachedelta[1])
2083 else:
2106 else:
2084 textlen = len(rawtext)
2107 textlen = len(rawtext)
2085
2108
2086 if deltacomputer is None:
2109 if deltacomputer is None:
2087 deltacomputer = _deltacomputer(self)
2110 deltacomputer = _deltacomputer(self)
2088
2111
2089 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2112 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2090 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2113
2114 # no delta for flag processor revision (see "candelta" for why)
2115 # not calling candelta since only one revision needs test, also to
2116 # avoid overhead fetching flags again.
2117 if flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
2118 deltainfo = None
2119 else:
2120 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2091
2121
2092 if deltainfo is not None:
2122 if deltainfo is not None:
2093 base = deltainfo.base
2123 base = deltainfo.base
2094 chainbase = deltainfo.chainbase
2124 chainbase = deltainfo.chainbase
2095 data = deltainfo.data
2125 data = deltainfo.data
2096 l = deltainfo.deltalen
2126 l = deltainfo.deltalen
2097 else:
2127 else:
2098 rawtext = deltacomputer.buildtext(revinfo, fh)
2128 rawtext = deltacomputer.buildtext(revinfo, fh)
2099 data = self.compress(rawtext)
2129 data = self.compress(rawtext)
2100 l = len(data[1]) + len(data[0])
2130 l = len(data[1]) + len(data[0])
2101 base = chainbase = curr
2131 base = chainbase = curr
2102
2132
2103 e = (offset_type(offset, flags), l, textlen,
2133 e = (offset_type(offset, flags), l, textlen,
2104 base, link, p1r, p2r, node)
2134 base, link, p1r, p2r, node)
2105 self.index.insert(-1, e)
2135 self.index.insert(-1, e)
2106 self.nodemap[node] = curr
2136 self.nodemap[node] = curr
2107
2137
2108 entry = self._io.packentry(e, self.node, self.version, curr)
2138 entry = self._io.packentry(e, self.node, self.version, curr)
2109 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
2139 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
2110
2140
2111 if alwayscache and rawtext is None:
2141 if alwayscache and rawtext is None:
2112 rawtext = deltacomputer._buildtext(revinfo, fh)
2142 rawtext = deltacomputer._buildtext(revinfo, fh)
2113
2143
2114 if type(rawtext) == bytes: # only accept immutable objects
2144 if type(rawtext) == bytes: # only accept immutable objects
2115 self._cache = (node, curr, rawtext)
2145 self._cache = (node, curr, rawtext)
2116 self._chainbasecache[curr] = chainbase
2146 self._chainbasecache[curr] = chainbase
2117 return node
2147 return node
2118
2148
2119 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2149 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2120 # Files opened in a+ mode have inconsistent behavior on various
2150 # Files opened in a+ mode have inconsistent behavior on various
2121 # platforms. Windows requires that a file positioning call be made
2151 # platforms. Windows requires that a file positioning call be made
2122 # when the file handle transitions between reads and writes. See
2152 # when the file handle transitions between reads and writes. See
2123 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2153 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2124 # platforms, Python or the platform itself can be buggy. Some versions
2154 # platforms, Python or the platform itself can be buggy. Some versions
2125 # of Solaris have been observed to not append at the end of the file
2155 # of Solaris have been observed to not append at the end of the file
2126 # if the file was seeked to before the end. See issue4943 for more.
2156 # if the file was seeked to before the end. See issue4943 for more.
2127 #
2157 #
2128 # We work around this issue by inserting a seek() before writing.
2158 # We work around this issue by inserting a seek() before writing.
2129 # Note: This is likely not necessary on Python 3.
2159 # Note: This is likely not necessary on Python 3.
2130 ifh.seek(0, os.SEEK_END)
2160 ifh.seek(0, os.SEEK_END)
2131 if dfh:
2161 if dfh:
2132 dfh.seek(0, os.SEEK_END)
2162 dfh.seek(0, os.SEEK_END)
2133
2163
2134 curr = len(self) - 1
2164 curr = len(self) - 1
2135 if not self._inline:
2165 if not self._inline:
2136 transaction.add(self.datafile, offset)
2166 transaction.add(self.datafile, offset)
2137 transaction.add(self.indexfile, curr * len(entry))
2167 transaction.add(self.indexfile, curr * len(entry))
2138 if data[0]:
2168 if data[0]:
2139 dfh.write(data[0])
2169 dfh.write(data[0])
2140 dfh.write(data[1])
2170 dfh.write(data[1])
2141 ifh.write(entry)
2171 ifh.write(entry)
2142 else:
2172 else:
2143 offset += curr * self._io.size
2173 offset += curr * self._io.size
2144 transaction.add(self.indexfile, offset, curr)
2174 transaction.add(self.indexfile, offset, curr)
2145 ifh.write(entry)
2175 ifh.write(entry)
2146 ifh.write(data[0])
2176 ifh.write(data[0])
2147 ifh.write(data[1])
2177 ifh.write(data[1])
2148 self._enforceinlinesize(transaction, ifh)
2178 self._enforceinlinesize(transaction, ifh)
2149
2179
2150 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2180 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2151 """
2181 """
2152 add a delta group
2182 add a delta group
2153
2183
2154 given a set of deltas, add them to the revision log. the
2184 given a set of deltas, add them to the revision log. the
2155 first delta is against its parent, which should be in our
2185 first delta is against its parent, which should be in our
2156 log, the rest are against the previous delta.
2186 log, the rest are against the previous delta.
2157
2187
2158 If ``addrevisioncb`` is defined, it will be called with arguments of
2188 If ``addrevisioncb`` is defined, it will be called with arguments of
2159 this revlog and the node that was added.
2189 this revlog and the node that was added.
2160 """
2190 """
2161
2191
2162 nodes = []
2192 nodes = []
2163
2193
2164 r = len(self)
2194 r = len(self)
2165 end = 0
2195 end = 0
2166 if r:
2196 if r:
2167 end = self.end(r - 1)
2197 end = self.end(r - 1)
2168 ifh = self._indexfp("a+")
2198 ifh = self._indexfp("a+")
2169 isize = r * self._io.size
2199 isize = r * self._io.size
2170 if self._inline:
2200 if self._inline:
2171 transaction.add(self.indexfile, end + isize, r)
2201 transaction.add(self.indexfile, end + isize, r)
2172 dfh = None
2202 dfh = None
2173 else:
2203 else:
2174 transaction.add(self.indexfile, isize, r)
2204 transaction.add(self.indexfile, isize, r)
2175 transaction.add(self.datafile, end)
2205 transaction.add(self.datafile, end)
2176 dfh = self._datafp("a+")
2206 dfh = self._datafp("a+")
2177 def flush():
2207 def flush():
2178 if dfh:
2208 if dfh:
2179 dfh.flush()
2209 dfh.flush()
2180 ifh.flush()
2210 ifh.flush()
2181 try:
2211 try:
2182 deltacomputer = _deltacomputer(self)
2212 deltacomputer = _deltacomputer(self)
2183 # loop through our set of deltas
2213 # loop through our set of deltas
2184 for data in deltas:
2214 for data in deltas:
2185 node, p1, p2, linknode, deltabase, delta, flags = data
2215 node, p1, p2, linknode, deltabase, delta, flags = data
2186 link = linkmapper(linknode)
2216 link = linkmapper(linknode)
2187 flags = flags or REVIDX_DEFAULT_FLAGS
2217 flags = flags or REVIDX_DEFAULT_FLAGS
2188
2218
2189 nodes.append(node)
2219 nodes.append(node)
2190
2220
2191 if node in self.nodemap:
2221 if node in self.nodemap:
2192 # this can happen if two branches make the same change
2222 # this can happen if two branches make the same change
2193 continue
2223 continue
2194
2224
2195 for p in (p1, p2):
2225 for p in (p1, p2):
2196 if p not in self.nodemap:
2226 if p not in self.nodemap:
2197 raise LookupError(p, self.indexfile,
2227 raise LookupError(p, self.indexfile,
2198 _('unknown parent'))
2228 _('unknown parent'))
2199
2229
2200 if deltabase not in self.nodemap:
2230 if deltabase not in self.nodemap:
2201 raise LookupError(deltabase, self.indexfile,
2231 raise LookupError(deltabase, self.indexfile,
2202 _('unknown delta base'))
2232 _('unknown delta base'))
2203
2233
2204 baserev = self.rev(deltabase)
2234 baserev = self.rev(deltabase)
2205
2235
2206 if baserev != nullrev and self.iscensored(baserev):
2236 if baserev != nullrev and self.iscensored(baserev):
2207 # if base is censored, delta must be full replacement in a
2237 # if base is censored, delta must be full replacement in a
2208 # single patch operation
2238 # single patch operation
2209 hlen = struct.calcsize(">lll")
2239 hlen = struct.calcsize(">lll")
2210 oldlen = self.rawsize(baserev)
2240 oldlen = self.rawsize(baserev)
2211 newlen = len(delta) - hlen
2241 newlen = len(delta) - hlen
2212 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2242 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2213 raise error.CensoredBaseError(self.indexfile,
2243 raise error.CensoredBaseError(self.indexfile,
2214 self.node(baserev))
2244 self.node(baserev))
2215
2245
2216 if not flags and self._peek_iscensored(baserev, delta, flush):
2246 if not flags and self._peek_iscensored(baserev, delta, flush):
2217 flags |= REVIDX_ISCENSORED
2247 flags |= REVIDX_ISCENSORED
2218
2248
2219 # We assume consumers of addrevisioncb will want to retrieve
2249 # We assume consumers of addrevisioncb will want to retrieve
2220 # the added revision, which will require a call to
2250 # the added revision, which will require a call to
2221 # revision(). revision() will fast path if there is a cache
2251 # revision(). revision() will fast path if there is a cache
2222 # hit. So, we tell _addrevision() to always cache in this case.
2252 # hit. So, we tell _addrevision() to always cache in this case.
2223 # We're only using addgroup() in the context of changegroup
2253 # We're only using addgroup() in the context of changegroup
2224 # generation so the revision data can always be handled as raw
2254 # generation so the revision data can always be handled as raw
2225 # by the flagprocessor.
2255 # by the flagprocessor.
2226 self._addrevision(node, None, transaction, link,
2256 self._addrevision(node, None, transaction, link,
2227 p1, p2, flags, (baserev, delta),
2257 p1, p2, flags, (baserev, delta),
2228 ifh, dfh,
2258 ifh, dfh,
2229 alwayscache=bool(addrevisioncb),
2259 alwayscache=bool(addrevisioncb),
2230 deltacomputer=deltacomputer)
2260 deltacomputer=deltacomputer)
2231
2261
2232 if addrevisioncb:
2262 if addrevisioncb:
2233 addrevisioncb(self, node)
2263 addrevisioncb(self, node)
2234
2264
2235 if not dfh and not self._inline:
2265 if not dfh and not self._inline:
2236 # addrevision switched from inline to conventional
2266 # addrevision switched from inline to conventional
2237 # reopen the index
2267 # reopen the index
2238 ifh.close()
2268 ifh.close()
2239 dfh = self._datafp("a+")
2269 dfh = self._datafp("a+")
2240 ifh = self._indexfp("a+")
2270 ifh = self._indexfp("a+")
2241 finally:
2271 finally:
2242 if dfh:
2272 if dfh:
2243 dfh.close()
2273 dfh.close()
2244 ifh.close()
2274 ifh.close()
2245
2275
2246 return nodes
2276 return nodes
2247
2277
2248 def iscensored(self, rev):
2278 def iscensored(self, rev):
2249 """Check if a file revision is censored."""
2279 """Check if a file revision is censored."""
2250 return False
2280 return False
2251
2281
2252 def _peek_iscensored(self, baserev, delta, flush):
2282 def _peek_iscensored(self, baserev, delta, flush):
2253 """Quickly check if a delta produces a censored revision."""
2283 """Quickly check if a delta produces a censored revision."""
2254 return False
2284 return False
2255
2285
2256 def getstrippoint(self, minlink):
2286 def getstrippoint(self, minlink):
2257 """find the minimum rev that must be stripped to strip the linkrev
2287 """find the minimum rev that must be stripped to strip the linkrev
2258
2288
2259 Returns a tuple containing the minimum rev and a set of all revs that
2289 Returns a tuple containing the minimum rev and a set of all revs that
2260 have linkrevs that will be broken by this strip.
2290 have linkrevs that will be broken by this strip.
2261 """
2291 """
2262 brokenrevs = set()
2292 brokenrevs = set()
2263 strippoint = len(self)
2293 strippoint = len(self)
2264
2294
2265 heads = {}
2295 heads = {}
2266 futurelargelinkrevs = set()
2296 futurelargelinkrevs = set()
2267 for head in self.headrevs():
2297 for head in self.headrevs():
2268 headlinkrev = self.linkrev(head)
2298 headlinkrev = self.linkrev(head)
2269 heads[head] = headlinkrev
2299 heads[head] = headlinkrev
2270 if headlinkrev >= minlink:
2300 if headlinkrev >= minlink:
2271 futurelargelinkrevs.add(headlinkrev)
2301 futurelargelinkrevs.add(headlinkrev)
2272
2302
2273 # This algorithm involves walking down the rev graph, starting at the
2303 # This algorithm involves walking down the rev graph, starting at the
2274 # heads. Since the revs are topologically sorted according to linkrev,
2304 # heads. Since the revs are topologically sorted according to linkrev,
2275 # once all head linkrevs are below the minlink, we know there are
2305 # once all head linkrevs are below the minlink, we know there are
2276 # no more revs that could have a linkrev greater than minlink.
2306 # no more revs that could have a linkrev greater than minlink.
2277 # So we can stop walking.
2307 # So we can stop walking.
2278 while futurelargelinkrevs:
2308 while futurelargelinkrevs:
2279 strippoint -= 1
2309 strippoint -= 1
2280 linkrev = heads.pop(strippoint)
2310 linkrev = heads.pop(strippoint)
2281
2311
2282 if linkrev < minlink:
2312 if linkrev < minlink:
2283 brokenrevs.add(strippoint)
2313 brokenrevs.add(strippoint)
2284 else:
2314 else:
2285 futurelargelinkrevs.remove(linkrev)
2315 futurelargelinkrevs.remove(linkrev)
2286
2316
2287 for p in self.parentrevs(strippoint):
2317 for p in self.parentrevs(strippoint):
2288 if p != nullrev:
2318 if p != nullrev:
2289 plinkrev = self.linkrev(p)
2319 plinkrev = self.linkrev(p)
2290 heads[p] = plinkrev
2320 heads[p] = plinkrev
2291 if plinkrev >= minlink:
2321 if plinkrev >= minlink:
2292 futurelargelinkrevs.add(plinkrev)
2322 futurelargelinkrevs.add(plinkrev)
2293
2323
2294 return strippoint, brokenrevs
2324 return strippoint, brokenrevs
2295
2325
2296 def strip(self, minlink, transaction):
2326 def strip(self, minlink, transaction):
2297 """truncate the revlog on the first revision with a linkrev >= minlink
2327 """truncate the revlog on the first revision with a linkrev >= minlink
2298
2328
2299 This function is called when we're stripping revision minlink and
2329 This function is called when we're stripping revision minlink and
2300 its descendants from the repository.
2330 its descendants from the repository.
2301
2331
2302 We have to remove all revisions with linkrev >= minlink, because
2332 We have to remove all revisions with linkrev >= minlink, because
2303 the equivalent changelog revisions will be renumbered after the
2333 the equivalent changelog revisions will be renumbered after the
2304 strip.
2334 strip.
2305
2335
2306 So we truncate the revlog on the first of these revisions, and
2336 So we truncate the revlog on the first of these revisions, and
2307 trust that the caller has saved the revisions that shouldn't be
2337 trust that the caller has saved the revisions that shouldn't be
2308 removed and that it'll re-add them after this truncation.
2338 removed and that it'll re-add them after this truncation.
2309 """
2339 """
2310 if len(self) == 0:
2340 if len(self) == 0:
2311 return
2341 return
2312
2342
2313 rev, _ = self.getstrippoint(minlink)
2343 rev, _ = self.getstrippoint(minlink)
2314 if rev == len(self):
2344 if rev == len(self):
2315 return
2345 return
2316
2346
2317 # first truncate the files on disk
2347 # first truncate the files on disk
2318 end = self.start(rev)
2348 end = self.start(rev)
2319 if not self._inline:
2349 if not self._inline:
2320 transaction.add(self.datafile, end)
2350 transaction.add(self.datafile, end)
2321 end = rev * self._io.size
2351 end = rev * self._io.size
2322 else:
2352 else:
2323 end += rev * self._io.size
2353 end += rev * self._io.size
2324
2354
2325 transaction.add(self.indexfile, end)
2355 transaction.add(self.indexfile, end)
2326
2356
2327 # then reset internal state in memory to forget those revisions
2357 # then reset internal state in memory to forget those revisions
2328 self._cache = None
2358 self._cache = None
2329 self._chaininfocache = {}
2359 self._chaininfocache = {}
2330 self._chunkclear()
2360 self._chunkclear()
2331 for x in xrange(rev, len(self)):
2361 for x in xrange(rev, len(self)):
2332 del self.nodemap[self.node(x)]
2362 del self.nodemap[self.node(x)]
2333
2363
2334 del self.index[rev:-1]
2364 del self.index[rev:-1]
2335
2365
2336 def checksize(self):
2366 def checksize(self):
2337 expected = 0
2367 expected = 0
2338 if len(self):
2368 if len(self):
2339 expected = max(0, self.end(len(self) - 1))
2369 expected = max(0, self.end(len(self) - 1))
2340
2370
2341 try:
2371 try:
2342 with self._datafp() as f:
2372 with self._datafp() as f:
2343 f.seek(0, 2)
2373 f.seek(0, 2)
2344 actual = f.tell()
2374 actual = f.tell()
2345 dd = actual - expected
2375 dd = actual - expected
2346 except IOError as inst:
2376 except IOError as inst:
2347 if inst.errno != errno.ENOENT:
2377 if inst.errno != errno.ENOENT:
2348 raise
2378 raise
2349 dd = 0
2379 dd = 0
2350
2380
2351 try:
2381 try:
2352 f = self.opener(self.indexfile)
2382 f = self.opener(self.indexfile)
2353 f.seek(0, 2)
2383 f.seek(0, 2)
2354 actual = f.tell()
2384 actual = f.tell()
2355 f.close()
2385 f.close()
2356 s = self._io.size
2386 s = self._io.size
2357 i = max(0, actual // s)
2387 i = max(0, actual // s)
2358 di = actual - (i * s)
2388 di = actual - (i * s)
2359 if self._inline:
2389 if self._inline:
2360 databytes = 0
2390 databytes = 0
2361 for r in self:
2391 for r in self:
2362 databytes += max(0, self.length(r))
2392 databytes += max(0, self.length(r))
2363 dd = 0
2393 dd = 0
2364 di = actual - len(self) * s - databytes
2394 di = actual - len(self) * s - databytes
2365 except IOError as inst:
2395 except IOError as inst:
2366 if inst.errno != errno.ENOENT:
2396 if inst.errno != errno.ENOENT:
2367 raise
2397 raise
2368 di = 0
2398 di = 0
2369
2399
2370 return (dd, di)
2400 return (dd, di)
2371
2401
2372 def files(self):
2402 def files(self):
2373 res = [self.indexfile]
2403 res = [self.indexfile]
2374 if not self._inline:
2404 if not self._inline:
2375 res.append(self.datafile)
2405 res.append(self.datafile)
2376 return res
2406 return res
2377
2407
2378 DELTAREUSEALWAYS = 'always'
2408 DELTAREUSEALWAYS = 'always'
2379 DELTAREUSESAMEREVS = 'samerevs'
2409 DELTAREUSESAMEREVS = 'samerevs'
2380 DELTAREUSENEVER = 'never'
2410 DELTAREUSENEVER = 'never'
2381
2411
2382 DELTAREUSEFULLADD = 'fulladd'
2412 DELTAREUSEFULLADD = 'fulladd'
2383
2413
2384 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2414 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2385
2415
2386 def clone(self, tr, destrevlog, addrevisioncb=None,
2416 def clone(self, tr, destrevlog, addrevisioncb=None,
2387 deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None):
2417 deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None):
2388 """Copy this revlog to another, possibly with format changes.
2418 """Copy this revlog to another, possibly with format changes.
2389
2419
2390 The destination revlog will contain the same revisions and nodes.
2420 The destination revlog will contain the same revisions and nodes.
2391 However, it may not be bit-for-bit identical due to e.g. delta encoding
2421 However, it may not be bit-for-bit identical due to e.g. delta encoding
2392 differences.
2422 differences.
2393
2423
2394 The ``deltareuse`` argument control how deltas from the existing revlog
2424 The ``deltareuse`` argument control how deltas from the existing revlog
2395 are preserved in the destination revlog. The argument can have the
2425 are preserved in the destination revlog. The argument can have the
2396 following values:
2426 following values:
2397
2427
2398 DELTAREUSEALWAYS
2428 DELTAREUSEALWAYS
2399 Deltas will always be reused (if possible), even if the destination
2429 Deltas will always be reused (if possible), even if the destination
2400 revlog would not select the same revisions for the delta. This is the
2430 revlog would not select the same revisions for the delta. This is the
2401 fastest mode of operation.
2431 fastest mode of operation.
2402 DELTAREUSESAMEREVS
2432 DELTAREUSESAMEREVS
2403 Deltas will be reused if the destination revlog would pick the same
2433 Deltas will be reused if the destination revlog would pick the same
2404 revisions for the delta. This mode strikes a balance between speed
2434 revisions for the delta. This mode strikes a balance between speed
2405 and optimization.
2435 and optimization.
2406 DELTAREUSENEVER
2436 DELTAREUSENEVER
2407 Deltas will never be reused. This is the slowest mode of execution.
2437 Deltas will never be reused. This is the slowest mode of execution.
2408 This mode can be used to recompute deltas (e.g. if the diff/delta
2438 This mode can be used to recompute deltas (e.g. if the diff/delta
2409 algorithm changes).
2439 algorithm changes).
2410
2440
2411 Delta computation can be slow, so the choice of delta reuse policy can
2441 Delta computation can be slow, so the choice of delta reuse policy can
2412 significantly affect run time.
2442 significantly affect run time.
2413
2443
2414 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2444 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2415 two extremes. Deltas will be reused if they are appropriate. But if the
2445 two extremes. Deltas will be reused if they are appropriate. But if the
2416 delta could choose a better revision, it will do so. This means if you
2446 delta could choose a better revision, it will do so. This means if you
2417 are converting a non-generaldelta revlog to a generaldelta revlog,
2447 are converting a non-generaldelta revlog to a generaldelta revlog,
2418 deltas will be recomputed if the delta's parent isn't a parent of the
2448 deltas will be recomputed if the delta's parent isn't a parent of the
2419 revision.
2449 revision.
2420
2450
2421 In addition to the delta policy, the ``aggressivemergedeltas`` argument
2451 In addition to the delta policy, the ``aggressivemergedeltas`` argument
2422 controls whether to compute deltas against both parents for merges.
2452 controls whether to compute deltas against both parents for merges.
2423 By default, the current default is used.
2453 By default, the current default is used.
2424 """
2454 """
2425 if deltareuse not in self.DELTAREUSEALL:
2455 if deltareuse not in self.DELTAREUSEALL:
2426 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2456 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2427
2457
2428 if len(destrevlog):
2458 if len(destrevlog):
2429 raise ValueError(_('destination revlog is not empty'))
2459 raise ValueError(_('destination revlog is not empty'))
2430
2460
2431 if getattr(self, 'filteredrevs', None):
2461 if getattr(self, 'filteredrevs', None):
2432 raise ValueError(_('source revlog has filtered revisions'))
2462 raise ValueError(_('source revlog has filtered revisions'))
2433 if getattr(destrevlog, 'filteredrevs', None):
2463 if getattr(destrevlog, 'filteredrevs', None):
2434 raise ValueError(_('destination revlog has filtered revisions'))
2464 raise ValueError(_('destination revlog has filtered revisions'))
2435
2465
2436 # lazydeltabase controls whether to reuse a cached delta, if possible.
2466 # lazydeltabase controls whether to reuse a cached delta, if possible.
2437 oldlazydeltabase = destrevlog._lazydeltabase
2467 oldlazydeltabase = destrevlog._lazydeltabase
2438 oldamd = destrevlog._aggressivemergedeltas
2468 oldamd = destrevlog._aggressivemergedeltas
2439
2469
2440 try:
2470 try:
2441 if deltareuse == self.DELTAREUSEALWAYS:
2471 if deltareuse == self.DELTAREUSEALWAYS:
2442 destrevlog._lazydeltabase = True
2472 destrevlog._lazydeltabase = True
2443 elif deltareuse == self.DELTAREUSESAMEREVS:
2473 elif deltareuse == self.DELTAREUSESAMEREVS:
2444 destrevlog._lazydeltabase = False
2474 destrevlog._lazydeltabase = False
2445
2475
2446 destrevlog._aggressivemergedeltas = aggressivemergedeltas or oldamd
2476 destrevlog._aggressivemergedeltas = aggressivemergedeltas or oldamd
2447
2477
2448 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2478 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2449 self.DELTAREUSESAMEREVS)
2479 self.DELTAREUSESAMEREVS)
2450
2480
2451 deltacomputer = _deltacomputer(destrevlog)
2481 deltacomputer = _deltacomputer(destrevlog)
2452 index = self.index
2482 index = self.index
2453 for rev in self:
2483 for rev in self:
2454 entry = index[rev]
2484 entry = index[rev]
2455
2485
2456 # Some classes override linkrev to take filtered revs into
2486 # Some classes override linkrev to take filtered revs into
2457 # account. Use raw entry from index.
2487 # account. Use raw entry from index.
2458 flags = entry[0] & 0xffff
2488 flags = entry[0] & 0xffff
2459 linkrev = entry[4]
2489 linkrev = entry[4]
2460 p1 = index[entry[5]][7]
2490 p1 = index[entry[5]][7]
2461 p2 = index[entry[6]][7]
2491 p2 = index[entry[6]][7]
2462 node = entry[7]
2492 node = entry[7]
2463
2493
2464 # (Possibly) reuse the delta from the revlog if allowed and
2494 # (Possibly) reuse the delta from the revlog if allowed and
2465 # the revlog chunk is a delta.
2495 # the revlog chunk is a delta.
2466 cachedelta = None
2496 cachedelta = None
2467 rawtext = None
2497 rawtext = None
2468 if populatecachedelta:
2498 if populatecachedelta:
2469 dp = self.deltaparent(rev)
2499 dp = self.deltaparent(rev)
2470 if dp != nullrev:
2500 if dp != nullrev:
2471 cachedelta = (dp, bytes(self._chunk(rev)))
2501 cachedelta = (dp, bytes(self._chunk(rev)))
2472
2502
2473 if not cachedelta:
2503 if not cachedelta:
2474 rawtext = self.revision(rev, raw=True)
2504 rawtext = self.revision(rev, raw=True)
2475
2505
2476
2506
2477 if deltareuse == self.DELTAREUSEFULLADD:
2507 if deltareuse == self.DELTAREUSEFULLADD:
2478 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2508 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2479 cachedelta=cachedelta,
2509 cachedelta=cachedelta,
2480 node=node, flags=flags,
2510 node=node, flags=flags,
2481 deltacomputer=deltacomputer)
2511 deltacomputer=deltacomputer)
2482 else:
2512 else:
2483 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2513 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2484 checkambig=False)
2514 checkambig=False)
2485 dfh = None
2515 dfh = None
2486 if not destrevlog._inline:
2516 if not destrevlog._inline:
2487 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2517 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2488 try:
2518 try:
2489 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2519 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2490 p2, flags, cachedelta, ifh, dfh,
2520 p2, flags, cachedelta, ifh, dfh,
2491 deltacomputer=deltacomputer)
2521 deltacomputer=deltacomputer)
2492 finally:
2522 finally:
2493 if dfh:
2523 if dfh:
2494 dfh.close()
2524 dfh.close()
2495 ifh.close()
2525 ifh.close()
2496
2526
2497 if addrevisioncb:
2527 if addrevisioncb:
2498 addrevisioncb(self, rev, node)
2528 addrevisioncb(self, rev, node)
2499 finally:
2529 finally:
2500 destrevlog._lazydeltabase = oldlazydeltabase
2530 destrevlog._lazydeltabase = oldlazydeltabase
2501 destrevlog._aggressivemergedeltas = oldamd
2531 destrevlog._aggressivemergedeltas = oldamd
@@ -1,1091 +1,1126 b''
1 # wireproto.py - generic wire protocol support functions
1 # wireproto.py - generic wire protocol support functions
2 #
2 #
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11 import os
11 import os
12 import tempfile
12 import tempfile
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 bin,
16 bin,
17 hex,
17 hex,
18 nullid,
18 nullid,
19 )
19 )
20
20
21 from . import (
21 from . import (
22 bundle2,
22 bundle2,
23 changegroup as changegroupmod,
23 changegroup as changegroupmod,
24 discovery,
24 discovery,
25 encoding,
25 encoding,
26 error,
26 error,
27 exchange,
27 exchange,
28 peer,
28 peer,
29 pushkey as pushkeymod,
29 pushkey as pushkeymod,
30 pycompat,
30 pycompat,
31 repository,
31 repository,
32 streamclone,
32 streamclone,
33 util,
33 util,
34 wireprototypes,
34 wireprototypes,
35 )
35 )
36
36
37 urlerr = util.urlerr
37 urlerr = util.urlerr
38 urlreq = util.urlreq
38 urlreq = util.urlreq
39
39
40 bytesresponse = wireprototypes.bytesresponse
40 bytesresponse = wireprototypes.bytesresponse
41 ooberror = wireprototypes.ooberror
41 ooberror = wireprototypes.ooberror
42 pushres = wireprototypes.pushres
42 pushres = wireprototypes.pushres
43 pusherr = wireprototypes.pusherr
43 pusherr = wireprototypes.pusherr
44 streamres = wireprototypes.streamres
44 streamres = wireprototypes.streamres
45 streamres_legacy = wireprototypes.streamreslegacy
45 streamres_legacy = wireprototypes.streamreslegacy
46
46
47 bundle2requiredmain = _('incompatible Mercurial client; bundle2 required')
47 bundle2requiredmain = _('incompatible Mercurial client; bundle2 required')
48 bundle2requiredhint = _('see https://www.mercurial-scm.org/wiki/'
48 bundle2requiredhint = _('see https://www.mercurial-scm.org/wiki/'
49 'IncompatibleClient')
49 'IncompatibleClient')
50 bundle2required = '%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
50 bundle2required = '%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
51
51
52 class remoteiterbatcher(peer.iterbatcher):
52 class remoteiterbatcher(peer.iterbatcher):
53 def __init__(self, remote):
53 def __init__(self, remote):
54 super(remoteiterbatcher, self).__init__()
54 super(remoteiterbatcher, self).__init__()
55 self._remote = remote
55 self._remote = remote
56
56
57 def __getattr__(self, name):
57 def __getattr__(self, name):
58 # Validate this method is batchable, since submit() only supports
58 # Validate this method is batchable, since submit() only supports
59 # batchable methods.
59 # batchable methods.
60 fn = getattr(self._remote, name)
60 fn = getattr(self._remote, name)
61 if not getattr(fn, 'batchable', None):
61 if not getattr(fn, 'batchable', None):
62 raise error.ProgrammingError('Attempted to batch a non-batchable '
62 raise error.ProgrammingError('Attempted to batch a non-batchable '
63 'call to %r' % name)
63 'call to %r' % name)
64
64
65 return super(remoteiterbatcher, self).__getattr__(name)
65 return super(remoteiterbatcher, self).__getattr__(name)
66
66
67 def submit(self):
67 def submit(self):
68 """Break the batch request into many patch calls and pipeline them.
68 """Break the batch request into many patch calls and pipeline them.
69
69
70 This is mostly valuable over http where request sizes can be
70 This is mostly valuable over http where request sizes can be
71 limited, but can be used in other places as well.
71 limited, but can be used in other places as well.
72 """
72 """
73 # 2-tuple of (command, arguments) that represents what will be
73 # 2-tuple of (command, arguments) that represents what will be
74 # sent over the wire.
74 # sent over the wire.
75 requests = []
75 requests = []
76
76
77 # 4-tuple of (command, final future, @batchable generator, remote
77 # 4-tuple of (command, final future, @batchable generator, remote
78 # future).
78 # future).
79 results = []
79 results = []
80
80
81 for command, args, opts, finalfuture in self.calls:
81 for command, args, opts, finalfuture in self.calls:
82 mtd = getattr(self._remote, command)
82 mtd = getattr(self._remote, command)
83 batchable = mtd.batchable(mtd.__self__, *args, **opts)
83 batchable = mtd.batchable(mtd.__self__, *args, **opts)
84
84
85 commandargs, fremote = next(batchable)
85 commandargs, fremote = next(batchable)
86 assert fremote
86 assert fremote
87 requests.append((command, commandargs))
87 requests.append((command, commandargs))
88 results.append((command, finalfuture, batchable, fremote))
88 results.append((command, finalfuture, batchable, fremote))
89
89
90 if requests:
90 if requests:
91 self._resultiter = self._remote._submitbatch(requests)
91 self._resultiter = self._remote._submitbatch(requests)
92
92
93 self._results = results
93 self._results = results
94
94
95 def results(self):
95 def results(self):
96 for command, finalfuture, batchable, remotefuture in self._results:
96 for command, finalfuture, batchable, remotefuture in self._results:
97 # Get the raw result, set it in the remote future, feed it
97 # Get the raw result, set it in the remote future, feed it
98 # back into the @batchable generator so it can be decoded, and
98 # back into the @batchable generator so it can be decoded, and
99 # set the result on the final future to this value.
99 # set the result on the final future to this value.
100 remoteresult = next(self._resultiter)
100 remoteresult = next(self._resultiter)
101 remotefuture.set(remoteresult)
101 remotefuture.set(remoteresult)
102 finalfuture.set(next(batchable))
102 finalfuture.set(next(batchable))
103
103
104 # Verify our @batchable generators only emit 2 values.
104 # Verify our @batchable generators only emit 2 values.
105 try:
105 try:
106 next(batchable)
106 next(batchable)
107 except StopIteration:
107 except StopIteration:
108 pass
108 pass
109 else:
109 else:
110 raise error.ProgrammingError('%s @batchable generator emitted '
110 raise error.ProgrammingError('%s @batchable generator emitted '
111 'unexpected value count' % command)
111 'unexpected value count' % command)
112
112
113 yield finalfuture.value
113 yield finalfuture.value
114
114
115 # Forward a couple of names from peer to make wireproto interactions
115 # Forward a couple of names from peer to make wireproto interactions
116 # slightly more sensible.
116 # slightly more sensible.
117 batchable = peer.batchable
117 batchable = peer.batchable
118 future = peer.future
118 future = peer.future
119
119
120 # list of nodes encoding / decoding
120 # list of nodes encoding / decoding
121
121
122 def decodelist(l, sep=' '):
122 def decodelist(l, sep=' '):
123 if l:
123 if l:
124 return [bin(v) for v in l.split(sep)]
124 return [bin(v) for v in l.split(sep)]
125 return []
125 return []
126
126
127 def encodelist(l, sep=' '):
127 def encodelist(l, sep=' '):
128 try:
128 try:
129 return sep.join(map(hex, l))
129 return sep.join(map(hex, l))
130 except TypeError:
130 except TypeError:
131 raise
131 raise
132
132
133 # batched call argument encoding
133 # batched call argument encoding
134
134
135 def escapearg(plain):
135 def escapearg(plain):
136 return (plain
136 return (plain
137 .replace(':', ':c')
137 .replace(':', ':c')
138 .replace(',', ':o')
138 .replace(',', ':o')
139 .replace(';', ':s')
139 .replace(';', ':s')
140 .replace('=', ':e'))
140 .replace('=', ':e'))
141
141
142 def unescapearg(escaped):
142 def unescapearg(escaped):
143 return (escaped
143 return (escaped
144 .replace(':e', '=')
144 .replace(':e', '=')
145 .replace(':s', ';')
145 .replace(':s', ';')
146 .replace(':o', ',')
146 .replace(':o', ',')
147 .replace(':c', ':'))
147 .replace(':c', ':'))
148
148
149 def encodebatchcmds(req):
149 def encodebatchcmds(req):
150 """Return a ``cmds`` argument value for the ``batch`` command."""
150 """Return a ``cmds`` argument value for the ``batch`` command."""
151 cmds = []
151 cmds = []
152 for op, argsdict in req:
152 for op, argsdict in req:
153 # Old servers didn't properly unescape argument names. So prevent
153 # Old servers didn't properly unescape argument names. So prevent
154 # the sending of argument names that may not be decoded properly by
154 # the sending of argument names that may not be decoded properly by
155 # servers.
155 # servers.
156 assert all(escapearg(k) == k for k in argsdict)
156 assert all(escapearg(k) == k for k in argsdict)
157
157
158 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
158 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
159 for k, v in argsdict.iteritems())
159 for k, v in argsdict.iteritems())
160 cmds.append('%s %s' % (op, args))
160 cmds.append('%s %s' % (op, args))
161
161
162 return ';'.join(cmds)
162 return ';'.join(cmds)
163
163
164 # mapping of options accepted by getbundle and their types
164 # mapping of options accepted by getbundle and their types
165 #
165 #
166 # Meant to be extended by extensions. It is extensions responsibility to ensure
166 # Meant to be extended by extensions. It is extensions responsibility to ensure
167 # such options are properly processed in exchange.getbundle.
167 # such options are properly processed in exchange.getbundle.
168 #
168 #
169 # supported types are:
169 # supported types are:
170 #
170 #
171 # :nodes: list of binary nodes
171 # :nodes: list of binary nodes
172 # :csv: list of comma-separated values
172 # :csv: list of comma-separated values
173 # :scsv: list of comma-separated values return as set
173 # :scsv: list of comma-separated values return as set
174 # :plain: string with no transformation needed.
174 # :plain: string with no transformation needed.
175 gboptsmap = {'heads': 'nodes',
175 gboptsmap = {'heads': 'nodes',
176 'bookmarks': 'boolean',
176 'bookmarks': 'boolean',
177 'common': 'nodes',
177 'common': 'nodes',
178 'obsmarkers': 'boolean',
178 'obsmarkers': 'boolean',
179 'phases': 'boolean',
179 'phases': 'boolean',
180 'bundlecaps': 'scsv',
180 'bundlecaps': 'scsv',
181 'listkeys': 'csv',
181 'listkeys': 'csv',
182 'cg': 'boolean',
182 'cg': 'boolean',
183 'cbattempted': 'boolean',
183 'cbattempted': 'boolean',
184 'stream': 'boolean',
184 'stream': 'boolean',
185 }
185 }
186
186
187 # client side
187 # client side
188
188
189 class wirepeer(repository.legacypeer):
189 class wirepeer(repository.legacypeer):
190 """Client-side interface for communicating with a peer repository.
190 """Client-side interface for communicating with a peer repository.
191
191
192 Methods commonly call wire protocol commands of the same name.
192 Methods commonly call wire protocol commands of the same name.
193
193
194 See also httppeer.py and sshpeer.py for protocol-specific
194 See also httppeer.py and sshpeer.py for protocol-specific
195 implementations of this interface.
195 implementations of this interface.
196 """
196 """
197 # Begin of basewirepeer interface.
197 # Begin of basewirepeer interface.
198
198
199 def iterbatch(self):
199 def iterbatch(self):
200 return remoteiterbatcher(self)
200 return remoteiterbatcher(self)
201
201
202 @batchable
202 @batchable
203 def lookup(self, key):
203 def lookup(self, key):
204 self.requirecap('lookup', _('look up remote revision'))
204 self.requirecap('lookup', _('look up remote revision'))
205 f = future()
205 f = future()
206 yield {'key': encoding.fromlocal(key)}, f
206 yield {'key': encoding.fromlocal(key)}, f
207 d = f.value
207 d = f.value
208 success, data = d[:-1].split(" ", 1)
208 success, data = d[:-1].split(" ", 1)
209 if int(success):
209 if int(success):
210 yield bin(data)
210 yield bin(data)
211 else:
211 else:
212 self._abort(error.RepoError(data))
212 self._abort(error.RepoError(data))
213
213
214 @batchable
214 @batchable
215 def heads(self):
215 def heads(self):
216 f = future()
216 f = future()
217 yield {}, f
217 yield {}, f
218 d = f.value
218 d = f.value
219 try:
219 try:
220 yield decodelist(d[:-1])
220 yield decodelist(d[:-1])
221 except ValueError:
221 except ValueError:
222 self._abort(error.ResponseError(_("unexpected response:"), d))
222 self._abort(error.ResponseError(_("unexpected response:"), d))
223
223
224 @batchable
224 @batchable
225 def known(self, nodes):
225 def known(self, nodes):
226 f = future()
226 f = future()
227 yield {'nodes': encodelist(nodes)}, f
227 yield {'nodes': encodelist(nodes)}, f
228 d = f.value
228 d = f.value
229 try:
229 try:
230 yield [bool(int(b)) for b in d]
230 yield [bool(int(b)) for b in d]
231 except ValueError:
231 except ValueError:
232 self._abort(error.ResponseError(_("unexpected response:"), d))
232 self._abort(error.ResponseError(_("unexpected response:"), d))
233
233
234 @batchable
234 @batchable
235 def branchmap(self):
235 def branchmap(self):
236 f = future()
236 f = future()
237 yield {}, f
237 yield {}, f
238 d = f.value
238 d = f.value
239 try:
239 try:
240 branchmap = {}
240 branchmap = {}
241 for branchpart in d.splitlines():
241 for branchpart in d.splitlines():
242 branchname, branchheads = branchpart.split(' ', 1)
242 branchname, branchheads = branchpart.split(' ', 1)
243 branchname = encoding.tolocal(urlreq.unquote(branchname))
243 branchname = encoding.tolocal(urlreq.unquote(branchname))
244 branchheads = decodelist(branchheads)
244 branchheads = decodelist(branchheads)
245 branchmap[branchname] = branchheads
245 branchmap[branchname] = branchheads
246 yield branchmap
246 yield branchmap
247 except TypeError:
247 except TypeError:
248 self._abort(error.ResponseError(_("unexpected response:"), d))
248 self._abort(error.ResponseError(_("unexpected response:"), d))
249
249
250 @batchable
250 @batchable
251 def listkeys(self, namespace):
251 def listkeys(self, namespace):
252 if not self.capable('pushkey'):
252 if not self.capable('pushkey'):
253 yield {}, None
253 yield {}, None
254 f = future()
254 f = future()
255 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
255 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
256 yield {'namespace': encoding.fromlocal(namespace)}, f
256 yield {'namespace': encoding.fromlocal(namespace)}, f
257 d = f.value
257 d = f.value
258 self.ui.debug('received listkey for "%s": %i bytes\n'
258 self.ui.debug('received listkey for "%s": %i bytes\n'
259 % (namespace, len(d)))
259 % (namespace, len(d)))
260 yield pushkeymod.decodekeys(d)
260 yield pushkeymod.decodekeys(d)
261
261
262 @batchable
262 @batchable
263 def pushkey(self, namespace, key, old, new):
263 def pushkey(self, namespace, key, old, new):
264 if not self.capable('pushkey'):
264 if not self.capable('pushkey'):
265 yield False, None
265 yield False, None
266 f = future()
266 f = future()
267 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
267 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
268 yield {'namespace': encoding.fromlocal(namespace),
268 yield {'namespace': encoding.fromlocal(namespace),
269 'key': encoding.fromlocal(key),
269 'key': encoding.fromlocal(key),
270 'old': encoding.fromlocal(old),
270 'old': encoding.fromlocal(old),
271 'new': encoding.fromlocal(new)}, f
271 'new': encoding.fromlocal(new)}, f
272 d = f.value
272 d = f.value
273 d, output = d.split('\n', 1)
273 d, output = d.split('\n', 1)
274 try:
274 try:
275 d = bool(int(d))
275 d = bool(int(d))
276 except ValueError:
276 except ValueError:
277 raise error.ResponseError(
277 raise error.ResponseError(
278 _('push failed (unexpected response):'), d)
278 _('push failed (unexpected response):'), d)
279 for l in output.splitlines(True):
279 for l in output.splitlines(True):
280 self.ui.status(_('remote: '), l)
280 self.ui.status(_('remote: '), l)
281 yield d
281 yield d
282
282
283 def stream_out(self):
283 def stream_out(self):
284 return self._callstream('stream_out')
284 return self._callstream('stream_out')
285
285
286 def getbundle(self, source, **kwargs):
286 def getbundle(self, source, **kwargs):
287 kwargs = pycompat.byteskwargs(kwargs)
287 kwargs = pycompat.byteskwargs(kwargs)
288 self.requirecap('getbundle', _('look up remote changes'))
288 self.requirecap('getbundle', _('look up remote changes'))
289 opts = {}
289 opts = {}
290 bundlecaps = kwargs.get('bundlecaps')
290 bundlecaps = kwargs.get('bundlecaps')
291 if bundlecaps is not None:
291 if bundlecaps is not None:
292 kwargs['bundlecaps'] = sorted(bundlecaps)
292 kwargs['bundlecaps'] = sorted(bundlecaps)
293 else:
293 else:
294 bundlecaps = () # kwargs could have it to None
294 bundlecaps = () # kwargs could have it to None
295 for key, value in kwargs.iteritems():
295 for key, value in kwargs.iteritems():
296 if value is None:
296 if value is None:
297 continue
297 continue
298 keytype = gboptsmap.get(key)
298 keytype = gboptsmap.get(key)
299 if keytype is None:
299 if keytype is None:
300 raise error.ProgrammingError(
300 raise error.ProgrammingError(
301 'Unexpectedly None keytype for key %s' % key)
301 'Unexpectedly None keytype for key %s' % key)
302 elif keytype == 'nodes':
302 elif keytype == 'nodes':
303 value = encodelist(value)
303 value = encodelist(value)
304 elif keytype in ('csv', 'scsv'):
304 elif keytype in ('csv', 'scsv'):
305 value = ','.join(value)
305 value = ','.join(value)
306 elif keytype == 'boolean':
306 elif keytype == 'boolean':
307 value = '%i' % bool(value)
307 value = '%i' % bool(value)
308 elif keytype != 'plain':
308 elif keytype != 'plain':
309 raise KeyError('unknown getbundle option type %s'
309 raise KeyError('unknown getbundle option type %s'
310 % keytype)
310 % keytype)
311 opts[key] = value
311 opts[key] = value
312 f = self._callcompressable("getbundle", **pycompat.strkwargs(opts))
312 f = self._callcompressable("getbundle", **pycompat.strkwargs(opts))
313 if any((cap.startswith('HG2') for cap in bundlecaps)):
313 if any((cap.startswith('HG2') for cap in bundlecaps)):
314 return bundle2.getunbundler(self.ui, f)
314 return bundle2.getunbundler(self.ui, f)
315 else:
315 else:
316 return changegroupmod.cg1unpacker(f, 'UN')
316 return changegroupmod.cg1unpacker(f, 'UN')
317
317
318 def unbundle(self, cg, heads, url):
318 def unbundle(self, cg, heads, url):
319 '''Send cg (a readable file-like object representing the
319 '''Send cg (a readable file-like object representing the
320 changegroup to push, typically a chunkbuffer object) to the
320 changegroup to push, typically a chunkbuffer object) to the
321 remote server as a bundle.
321 remote server as a bundle.
322
322
323 When pushing a bundle10 stream, return an integer indicating the
323 When pushing a bundle10 stream, return an integer indicating the
324 result of the push (see changegroup.apply()).
324 result of the push (see changegroup.apply()).
325
325
326 When pushing a bundle20 stream, return a bundle20 stream.
326 When pushing a bundle20 stream, return a bundle20 stream.
327
327
328 `url` is the url the client thinks it's pushing to, which is
328 `url` is the url the client thinks it's pushing to, which is
329 visible to hooks.
329 visible to hooks.
330 '''
330 '''
331
331
332 if heads != ['force'] and self.capable('unbundlehash'):
332 if heads != ['force'] and self.capable('unbundlehash'):
333 heads = encodelist(['hashed',
333 heads = encodelist(['hashed',
334 hashlib.sha1(''.join(sorted(heads))).digest()])
334 hashlib.sha1(''.join(sorted(heads))).digest()])
335 else:
335 else:
336 heads = encodelist(heads)
336 heads = encodelist(heads)
337
337
338 if util.safehasattr(cg, 'deltaheader'):
338 if util.safehasattr(cg, 'deltaheader'):
339 # this a bundle10, do the old style call sequence
339 # this a bundle10, do the old style call sequence
340 ret, output = self._callpush("unbundle", cg, heads=heads)
340 ret, output = self._callpush("unbundle", cg, heads=heads)
341 if ret == "":
341 if ret == "":
342 raise error.ResponseError(
342 raise error.ResponseError(
343 _('push failed:'), output)
343 _('push failed:'), output)
344 try:
344 try:
345 ret = int(ret)
345 ret = int(ret)
346 except ValueError:
346 except ValueError:
347 raise error.ResponseError(
347 raise error.ResponseError(
348 _('push failed (unexpected response):'), ret)
348 _('push failed (unexpected response):'), ret)
349
349
350 for l in output.splitlines(True):
350 for l in output.splitlines(True):
351 self.ui.status(_('remote: '), l)
351 self.ui.status(_('remote: '), l)
352 else:
352 else:
353 # bundle2 push. Send a stream, fetch a stream.
353 # bundle2 push. Send a stream, fetch a stream.
354 stream = self._calltwowaystream('unbundle', cg, heads=heads)
354 stream = self._calltwowaystream('unbundle', cg, heads=heads)
355 ret = bundle2.getunbundler(self.ui, stream)
355 ret = bundle2.getunbundler(self.ui, stream)
356 return ret
356 return ret
357
357
358 # End of basewirepeer interface.
358 # End of basewirepeer interface.
359
359
360 # Begin of baselegacywirepeer interface.
360 # Begin of baselegacywirepeer interface.
361
361
362 def branches(self, nodes):
362 def branches(self, nodes):
363 n = encodelist(nodes)
363 n = encodelist(nodes)
364 d = self._call("branches", nodes=n)
364 d = self._call("branches", nodes=n)
365 try:
365 try:
366 br = [tuple(decodelist(b)) for b in d.splitlines()]
366 br = [tuple(decodelist(b)) for b in d.splitlines()]
367 return br
367 return br
368 except ValueError:
368 except ValueError:
369 self._abort(error.ResponseError(_("unexpected response:"), d))
369 self._abort(error.ResponseError(_("unexpected response:"), d))
370
370
371 def between(self, pairs):
371 def between(self, pairs):
372 batch = 8 # avoid giant requests
372 batch = 8 # avoid giant requests
373 r = []
373 r = []
374 for i in xrange(0, len(pairs), batch):
374 for i in xrange(0, len(pairs), batch):
375 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
375 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
376 d = self._call("between", pairs=n)
376 d = self._call("between", pairs=n)
377 try:
377 try:
378 r.extend(l and decodelist(l) or [] for l in d.splitlines())
378 r.extend(l and decodelist(l) or [] for l in d.splitlines())
379 except ValueError:
379 except ValueError:
380 self._abort(error.ResponseError(_("unexpected response:"), d))
380 self._abort(error.ResponseError(_("unexpected response:"), d))
381 return r
381 return r
382
382
383 def changegroup(self, nodes, kind):
383 def changegroup(self, nodes, kind):
384 n = encodelist(nodes)
384 n = encodelist(nodes)
385 f = self._callcompressable("changegroup", roots=n)
385 f = self._callcompressable("changegroup", roots=n)
386 return changegroupmod.cg1unpacker(f, 'UN')
386 return changegroupmod.cg1unpacker(f, 'UN')
387
387
388 def changegroupsubset(self, bases, heads, kind):
388 def changegroupsubset(self, bases, heads, kind):
389 self.requirecap('changegroupsubset', _('look up remote changes'))
389 self.requirecap('changegroupsubset', _('look up remote changes'))
390 bases = encodelist(bases)
390 bases = encodelist(bases)
391 heads = encodelist(heads)
391 heads = encodelist(heads)
392 f = self._callcompressable("changegroupsubset",
392 f = self._callcompressable("changegroupsubset",
393 bases=bases, heads=heads)
393 bases=bases, heads=heads)
394 return changegroupmod.cg1unpacker(f, 'UN')
394 return changegroupmod.cg1unpacker(f, 'UN')
395
395
396 # End of baselegacywirepeer interface.
396 # End of baselegacywirepeer interface.
397
397
398 def _submitbatch(self, req):
398 def _submitbatch(self, req):
399 """run batch request <req> on the server
399 """run batch request <req> on the server
400
400
401 Returns an iterator of the raw responses from the server.
401 Returns an iterator of the raw responses from the server.
402 """
402 """
403 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
403 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
404 chunk = rsp.read(1024)
404 chunk = rsp.read(1024)
405 work = [chunk]
405 work = [chunk]
406 while chunk:
406 while chunk:
407 while ';' not in chunk and chunk:
407 while ';' not in chunk and chunk:
408 chunk = rsp.read(1024)
408 chunk = rsp.read(1024)
409 work.append(chunk)
409 work.append(chunk)
410 merged = ''.join(work)
410 merged = ''.join(work)
411 while ';' in merged:
411 while ';' in merged:
412 one, merged = merged.split(';', 1)
412 one, merged = merged.split(';', 1)
413 yield unescapearg(one)
413 yield unescapearg(one)
414 chunk = rsp.read(1024)
414 chunk = rsp.read(1024)
415 work = [merged, chunk]
415 work = [merged, chunk]
416 yield unescapearg(''.join(work))
416 yield unescapearg(''.join(work))
417
417
418 def _submitone(self, op, args):
418 def _submitone(self, op, args):
419 return self._call(op, **pycompat.strkwargs(args))
419 return self._call(op, **pycompat.strkwargs(args))
420
420
421 def debugwireargs(self, one, two, three=None, four=None, five=None):
421 def debugwireargs(self, one, two, three=None, four=None, five=None):
422 # don't pass optional arguments left at their default value
422 # don't pass optional arguments left at their default value
423 opts = {}
423 opts = {}
424 if three is not None:
424 if three is not None:
425 opts[r'three'] = three
425 opts[r'three'] = three
426 if four is not None:
426 if four is not None:
427 opts[r'four'] = four
427 opts[r'four'] = four
428 return self._call('debugwireargs', one=one, two=two, **opts)
428 return self._call('debugwireargs', one=one, two=two, **opts)
429
429
430 def _call(self, cmd, **args):
430 def _call(self, cmd, **args):
431 """execute <cmd> on the server
431 """execute <cmd> on the server
432
432
433 The command is expected to return a simple string.
433 The command is expected to return a simple string.
434
434
435 returns the server reply as a string."""
435 returns the server reply as a string."""
436 raise NotImplementedError()
436 raise NotImplementedError()
437
437
438 def _callstream(self, cmd, **args):
438 def _callstream(self, cmd, **args):
439 """execute <cmd> on the server
439 """execute <cmd> on the server
440
440
441 The command is expected to return a stream. Note that if the
441 The command is expected to return a stream. Note that if the
442 command doesn't return a stream, _callstream behaves
442 command doesn't return a stream, _callstream behaves
443 differently for ssh and http peers.
443 differently for ssh and http peers.
444
444
445 returns the server reply as a file like object.
445 returns the server reply as a file like object.
446 """
446 """
447 raise NotImplementedError()
447 raise NotImplementedError()
448
448
449 def _callcompressable(self, cmd, **args):
449 def _callcompressable(self, cmd, **args):
450 """execute <cmd> on the server
450 """execute <cmd> on the server
451
451
452 The command is expected to return a stream.
452 The command is expected to return a stream.
453
453
454 The stream may have been compressed in some implementations. This
454 The stream may have been compressed in some implementations. This
455 function takes care of the decompression. This is the only difference
455 function takes care of the decompression. This is the only difference
456 with _callstream.
456 with _callstream.
457
457
458 returns the server reply as a file like object.
458 returns the server reply as a file like object.
459 """
459 """
460 raise NotImplementedError()
460 raise NotImplementedError()
461
461
462 def _callpush(self, cmd, fp, **args):
462 def _callpush(self, cmd, fp, **args):
463 """execute a <cmd> on server
463 """execute a <cmd> on server
464
464
465 The command is expected to be related to a push. Push has a special
465 The command is expected to be related to a push. Push has a special
466 return method.
466 return method.
467
467
468 returns the server reply as a (ret, output) tuple. ret is either
468 returns the server reply as a (ret, output) tuple. ret is either
469 empty (error) or a stringified int.
469 empty (error) or a stringified int.
470 """
470 """
471 raise NotImplementedError()
471 raise NotImplementedError()
472
472
473 def _calltwowaystream(self, cmd, fp, **args):
473 def _calltwowaystream(self, cmd, fp, **args):
474 """execute <cmd> on server
474 """execute <cmd> on server
475
475
476 The command will send a stream to the server and get a stream in reply.
476 The command will send a stream to the server and get a stream in reply.
477 """
477 """
478 raise NotImplementedError()
478 raise NotImplementedError()
479
479
480 def _abort(self, exception):
480 def _abort(self, exception):
481 """clearly abort the wire protocol connection and raise the exception
481 """clearly abort the wire protocol connection and raise the exception
482 """
482 """
483 raise NotImplementedError()
483 raise NotImplementedError()
484
484
485 # server side
485 # server side
486
486
487 # wire protocol command can either return a string or one of these classes.
487 # wire protocol command can either return a string or one of these classes.
488
488
489 def getdispatchrepo(repo, proto, command):
489 def getdispatchrepo(repo, proto, command):
490 """Obtain the repo used for processing wire protocol commands.
490 """Obtain the repo used for processing wire protocol commands.
491
491
492 The intent of this function is to serve as a monkeypatch point for
492 The intent of this function is to serve as a monkeypatch point for
493 extensions that need commands to operate on different repo views under
493 extensions that need commands to operate on different repo views under
494 specialized circumstances.
494 specialized circumstances.
495 """
495 """
496 return repo.filtered('served')
496 return repo.filtered('served')
497
497
498 def dispatch(repo, proto, command):
498 def dispatch(repo, proto, command):
499 repo = getdispatchrepo(repo, proto, command)
499 repo = getdispatchrepo(repo, proto, command)
500 func, spec = commands[command]
500 func, spec = commands[command]
501 args = proto.getargs(spec)
501 args = proto.getargs(spec)
502 return func(repo, proto, *args)
502 return func(repo, proto, *args)
503
503
504 def options(cmd, keys, others):
504 def options(cmd, keys, others):
505 opts = {}
505 opts = {}
506 for k in keys:
506 for k in keys:
507 if k in others:
507 if k in others:
508 opts[k] = others[k]
508 opts[k] = others[k]
509 del others[k]
509 del others[k]
510 if others:
510 if others:
511 util.stderr.write("warning: %s ignored unexpected arguments %s\n"
511 util.stderr.write("warning: %s ignored unexpected arguments %s\n"
512 % (cmd, ",".join(others)))
512 % (cmd, ",".join(others)))
513 return opts
513 return opts
514
514
515 def bundle1allowed(repo, action):
515 def bundle1allowed(repo, action):
516 """Whether a bundle1 operation is allowed from the server.
516 """Whether a bundle1 operation is allowed from the server.
517
517
518 Priority is:
518 Priority is:
519
519
520 1. server.bundle1gd.<action> (if generaldelta active)
520 1. server.bundle1gd.<action> (if generaldelta active)
521 2. server.bundle1.<action>
521 2. server.bundle1.<action>
522 3. server.bundle1gd (if generaldelta active)
522 3. server.bundle1gd (if generaldelta active)
523 4. server.bundle1
523 4. server.bundle1
524 """
524 """
525 ui = repo.ui
525 ui = repo.ui
526 gd = 'generaldelta' in repo.requirements
526 gd = 'generaldelta' in repo.requirements
527
527
528 if gd:
528 if gd:
529 v = ui.configbool('server', 'bundle1gd.%s' % action)
529 v = ui.configbool('server', 'bundle1gd.%s' % action)
530 if v is not None:
530 if v is not None:
531 return v
531 return v
532
532
533 v = ui.configbool('server', 'bundle1.%s' % action)
533 v = ui.configbool('server', 'bundle1.%s' % action)
534 if v is not None:
534 if v is not None:
535 return v
535 return v
536
536
537 if gd:
537 if gd:
538 v = ui.configbool('server', 'bundle1gd')
538 v = ui.configbool('server', 'bundle1gd')
539 if v is not None:
539 if v is not None:
540 return v
540 return v
541
541
542 return ui.configbool('server', 'bundle1')
542 return ui.configbool('server', 'bundle1')
543
543
544 def supportedcompengines(ui, role):
544 def supportedcompengines(ui, role):
545 """Obtain the list of supported compression engines for a request."""
545 """Obtain the list of supported compression engines for a request."""
546 assert role in (util.CLIENTROLE, util.SERVERROLE)
546 assert role in (util.CLIENTROLE, util.SERVERROLE)
547
547
548 compengines = util.compengines.supportedwireengines(role)
548 compengines = util.compengines.supportedwireengines(role)
549
549
550 # Allow config to override default list and ordering.
550 # Allow config to override default list and ordering.
551 if role == util.SERVERROLE:
551 if role == util.SERVERROLE:
552 configengines = ui.configlist('server', 'compressionengines')
552 configengines = ui.configlist('server', 'compressionengines')
553 config = 'server.compressionengines'
553 config = 'server.compressionengines'
554 else:
554 else:
555 # This is currently implemented mainly to facilitate testing. In most
555 # This is currently implemented mainly to facilitate testing. In most
556 # cases, the server should be in charge of choosing a compression engine
556 # cases, the server should be in charge of choosing a compression engine
557 # because a server has the most to lose from a sub-optimal choice. (e.g.
557 # because a server has the most to lose from a sub-optimal choice. (e.g.
558 # CPU DoS due to an expensive engine or a network DoS due to poor
558 # CPU DoS due to an expensive engine or a network DoS due to poor
559 # compression ratio).
559 # compression ratio).
560 configengines = ui.configlist('experimental',
560 configengines = ui.configlist('experimental',
561 'clientcompressionengines')
561 'clientcompressionengines')
562 config = 'experimental.clientcompressionengines'
562 config = 'experimental.clientcompressionengines'
563
563
564 # No explicit config. Filter out the ones that aren't supposed to be
564 # No explicit config. Filter out the ones that aren't supposed to be
565 # advertised and return default ordering.
565 # advertised and return default ordering.
566 if not configengines:
566 if not configengines:
567 attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
567 attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
568 return [e for e in compengines
568 return [e for e in compengines
569 if getattr(e.wireprotosupport(), attr) > 0]
569 if getattr(e.wireprotosupport(), attr) > 0]
570
570
571 # If compression engines are listed in the config, assume there is a good
571 # If compression engines are listed in the config, assume there is a good
572 # reason for it (like server operators wanting to achieve specific
572 # reason for it (like server operators wanting to achieve specific
573 # performance characteristics). So fail fast if the config references
573 # performance characteristics). So fail fast if the config references
574 # unusable compression engines.
574 # unusable compression engines.
575 validnames = set(e.name() for e in compengines)
575 validnames = set(e.name() for e in compengines)
576 invalidnames = set(e for e in configengines if e not in validnames)
576 invalidnames = set(e for e in configengines if e not in validnames)
577 if invalidnames:
577 if invalidnames:
578 raise error.Abort(_('invalid compression engine defined in %s: %s') %
578 raise error.Abort(_('invalid compression engine defined in %s: %s') %
579 (config, ', '.join(sorted(invalidnames))))
579 (config, ', '.join(sorted(invalidnames))))
580
580
581 compengines = [e for e in compengines if e.name() in configengines]
581 compengines = [e for e in compengines if e.name() in configengines]
582 compengines = sorted(compengines,
582 compengines = sorted(compengines,
583 key=lambda e: configengines.index(e.name()))
583 key=lambda e: configengines.index(e.name()))
584
584
585 if not compengines:
585 if not compengines:
586 raise error.Abort(_('%s config option does not specify any known '
586 raise error.Abort(_('%s config option does not specify any known '
587 'compression engines') % config,
587 'compression engines') % config,
588 hint=_('usable compression engines: %s') %
588 hint=_('usable compression engines: %s') %
589 ', '.sorted(validnames))
589 ', '.sorted(validnames))
590
590
591 return compengines
591 return compengines
592
592
593 class commandentry(object):
593 class commandentry(object):
594 """Represents a declared wire protocol command."""
594 """Represents a declared wire protocol command."""
595 def __init__(self, func, args='', transports=None):
595 def __init__(self, func, args='', transports=None):
596 self.func = func
596 self.func = func
597 self.args = args
597 self.args = args
598 self.transports = transports or set()
598 self.transports = transports or set()
599
599
600 def _merge(self, func, args):
600 def _merge(self, func, args):
601 """Merge this instance with an incoming 2-tuple.
601 """Merge this instance with an incoming 2-tuple.
602
602
603 This is called when a caller using the old 2-tuple API attempts
603 This is called when a caller using the old 2-tuple API attempts
604 to replace an instance. The incoming values are merged with
604 to replace an instance. The incoming values are merged with
605 data not captured by the 2-tuple and a new instance containing
605 data not captured by the 2-tuple and a new instance containing
606 the union of the two objects is returned.
606 the union of the two objects is returned.
607 """
607 """
608 return commandentry(func, args=args, transports=set(self.transports))
608 return commandentry(func, args=args, transports=set(self.transports))
609
609
610 # Old code treats instances as 2-tuples. So expose that interface.
610 # Old code treats instances as 2-tuples. So expose that interface.
611 def __iter__(self):
611 def __iter__(self):
612 yield self.func
612 yield self.func
613 yield self.args
613 yield self.args
614
614
615 def __getitem__(self, i):
615 def __getitem__(self, i):
616 if i == 0:
616 if i == 0:
617 return self.func
617 return self.func
618 elif i == 1:
618 elif i == 1:
619 return self.args
619 return self.args
620 else:
620 else:
621 raise IndexError('can only access elements 0 and 1')
621 raise IndexError('can only access elements 0 and 1')
622
622
623 class commanddict(dict):
623 class commanddict(dict):
624 """Container for registered wire protocol commands.
624 """Container for registered wire protocol commands.
625
625
626 It behaves like a dict. But __setitem__ is overwritten to allow silent
626 It behaves like a dict. But __setitem__ is overwritten to allow silent
627 coercion of values from 2-tuples for API compatibility.
627 coercion of values from 2-tuples for API compatibility.
628 """
628 """
629 def __setitem__(self, k, v):
629 def __setitem__(self, k, v):
630 if isinstance(v, commandentry):
630 if isinstance(v, commandentry):
631 pass
631 pass
632 # Cast 2-tuples to commandentry instances.
632 # Cast 2-tuples to commandentry instances.
633 elif isinstance(v, tuple):
633 elif isinstance(v, tuple):
634 if len(v) != 2:
634 if len(v) != 2:
635 raise ValueError('command tuples must have exactly 2 elements')
635 raise ValueError('command tuples must have exactly 2 elements')
636
636
637 # It is common for extensions to wrap wire protocol commands via
637 # It is common for extensions to wrap wire protocol commands via
638 # e.g. ``wireproto.commands[x] = (newfn, args)``. Because callers
638 # e.g. ``wireproto.commands[x] = (newfn, args)``. Because callers
639 # doing this aren't aware of the new API that uses objects to store
639 # doing this aren't aware of the new API that uses objects to store
640 # command entries, we automatically merge old state with new.
640 # command entries, we automatically merge old state with new.
641 if k in self:
641 if k in self:
642 v = self[k]._merge(v[0], v[1])
642 v = self[k]._merge(v[0], v[1])
643 else:
643 else:
644 # Use default values from @wireprotocommand.
644 # Use default values from @wireprotocommand.
645 v = commandentry(v[0], args=v[1],
645 v = commandentry(v[0], args=v[1],
646 transports=set(wireprototypes.TRANSPORTS))
646 transports=set(wireprototypes.TRANSPORTS))
647 else:
647 else:
648 raise ValueError('command entries must be commandentry instances '
648 raise ValueError('command entries must be commandentry instances '
649 'or 2-tuples')
649 'or 2-tuples')
650
650
651 return super(commanddict, self).__setitem__(k, v)
651 return super(commanddict, self).__setitem__(k, v)
652
652
653 def commandavailable(self, command, proto):
653 def commandavailable(self, command, proto):
654 """Determine if a command is available for the requested protocol."""
654 """Determine if a command is available for the requested protocol."""
655 assert proto.name in wireprototypes.TRANSPORTS
655 assert proto.name in wireprototypes.TRANSPORTS
656
656
657 entry = self.get(command)
657 entry = self.get(command)
658
658
659 if not entry:
659 if not entry:
660 return False
660 return False
661
661
662 if proto.name not in entry.transports:
662 if proto.name not in entry.transports:
663 return False
663 return False
664
664
665 return True
665 return True
666
666
667 # Constants specifying which transports a wire protocol command should be
667 # Constants specifying which transports a wire protocol command should be
668 # available on. For use with @wireprotocommand.
668 # available on. For use with @wireprotocommand.
669 POLICY_ALL = 'all'
669 POLICY_ALL = 'all'
670 POLICY_V1_ONLY = 'v1-only'
670 POLICY_V1_ONLY = 'v1-only'
671 POLICY_V2_ONLY = 'v2-only'
671 POLICY_V2_ONLY = 'v2-only'
672
672
673 commands = commanddict()
673 commands = commanddict()
674
674
675 # Maps wire protocol name to operation type. This is used for permissions
676 # checking. All defined @wireiprotocommand should have an entry in this
677 # dict.
678 permissions = {}
679
675 def wireprotocommand(name, args='', transportpolicy=POLICY_ALL):
680 def wireprotocommand(name, args='', transportpolicy=POLICY_ALL):
676 """Decorator to declare a wire protocol command.
681 """Decorator to declare a wire protocol command.
677
682
678 ``name`` is the name of the wire protocol command being provided.
683 ``name`` is the name of the wire protocol command being provided.
679
684
680 ``args`` is a space-delimited list of named arguments that the command
685 ``args`` is a space-delimited list of named arguments that the command
681 accepts. ``*`` is a special value that says to accept all arguments.
686 accepts. ``*`` is a special value that says to accept all arguments.
682
687
683 ``transportpolicy`` is a POLICY_* constant denoting which transports
688 ``transportpolicy`` is a POLICY_* constant denoting which transports
684 this wire protocol command should be exposed to. By default, commands
689 this wire protocol command should be exposed to. By default, commands
685 are exposed to all wire protocol transports.
690 are exposed to all wire protocol transports.
686 """
691 """
687 if transportpolicy == POLICY_ALL:
692 if transportpolicy == POLICY_ALL:
688 transports = set(wireprototypes.TRANSPORTS)
693 transports = set(wireprototypes.TRANSPORTS)
689 elif transportpolicy == POLICY_V1_ONLY:
694 elif transportpolicy == POLICY_V1_ONLY:
690 transports = {k for k, v in wireprototypes.TRANSPORTS.items()
695 transports = {k for k, v in wireprototypes.TRANSPORTS.items()
691 if v['version'] == 1}
696 if v['version'] == 1}
692 elif transportpolicy == POLICY_V2_ONLY:
697 elif transportpolicy == POLICY_V2_ONLY:
693 transports = {k for k, v in wireprototypes.TRANSPORTS.items()
698 transports = {k for k, v in wireprototypes.TRANSPORTS.items()
694 if v['version'] == 2}
699 if v['version'] == 2}
695 else:
700 else:
696 raise error.Abort(_('invalid transport policy value: %s') %
701 raise error.Abort(_('invalid transport policy value: %s') %
697 transportpolicy)
702 transportpolicy)
698
703
699 def register(func):
704 def register(func):
700 commands[name] = commandentry(func, args=args, transports=transports)
705 commands[name] = commandentry(func, args=args, transports=transports)
701 return func
706 return func
702 return register
707 return register
703
708
709 # TODO define a more appropriate permissions type to use for this.
710 permissions['batch'] = 'pull'
704 @wireprotocommand('batch', 'cmds *')
711 @wireprotocommand('batch', 'cmds *')
705 def batch(repo, proto, cmds, others):
712 def batch(repo, proto, cmds, others):
706 repo = repo.filtered("served")
713 repo = repo.filtered("served")
707 res = []
714 res = []
708 for pair in cmds.split(';'):
715 for pair in cmds.split(';'):
709 op, args = pair.split(' ', 1)
716 op, args = pair.split(' ', 1)
710 vals = {}
717 vals = {}
711 for a in args.split(','):
718 for a in args.split(','):
712 if a:
719 if a:
713 n, v = a.split('=')
720 n, v = a.split('=')
714 vals[unescapearg(n)] = unescapearg(v)
721 vals[unescapearg(n)] = unescapearg(v)
715 func, spec = commands[op]
722 func, spec = commands[op]
723
724 # If the protocol supports permissions checking, perform that
725 # checking on each batched command.
726 # TODO formalize permission checking as part of protocol interface.
727 if util.safehasattr(proto, 'checkperm'):
728 # Assume commands with no defined permissions are writes / for
729 # pushes. This is the safest from a security perspective because
730 # it doesn't allow commands with undefined semantics from
731 # bypassing permissions checks.
732 proto.checkperm(permissions.get(op, 'push'))
733
716 if spec:
734 if spec:
717 keys = spec.split()
735 keys = spec.split()
718 data = {}
736 data = {}
719 for k in keys:
737 for k in keys:
720 if k == '*':
738 if k == '*':
721 star = {}
739 star = {}
722 for key in vals.keys():
740 for key in vals.keys():
723 if key not in keys:
741 if key not in keys:
724 star[key] = vals[key]
742 star[key] = vals[key]
725 data['*'] = star
743 data['*'] = star
726 else:
744 else:
727 data[k] = vals[k]
745 data[k] = vals[k]
728 result = func(repo, proto, *[data[k] for k in keys])
746 result = func(repo, proto, *[data[k] for k in keys])
729 else:
747 else:
730 result = func(repo, proto)
748 result = func(repo, proto)
731 if isinstance(result, ooberror):
749 if isinstance(result, ooberror):
732 return result
750 return result
733
751
734 # For now, all batchable commands must return bytesresponse or
752 # For now, all batchable commands must return bytesresponse or
735 # raw bytes (for backwards compatibility).
753 # raw bytes (for backwards compatibility).
736 assert isinstance(result, (bytesresponse, bytes))
754 assert isinstance(result, (bytesresponse, bytes))
737 if isinstance(result, bytesresponse):
755 if isinstance(result, bytesresponse):
738 result = result.data
756 result = result.data
739 res.append(escapearg(result))
757 res.append(escapearg(result))
740
758
741 return bytesresponse(';'.join(res))
759 return bytesresponse(';'.join(res))
742
760
761 permissions['between'] = 'pull'
743 @wireprotocommand('between', 'pairs', transportpolicy=POLICY_V1_ONLY)
762 @wireprotocommand('between', 'pairs', transportpolicy=POLICY_V1_ONLY)
744 def between(repo, proto, pairs):
763 def between(repo, proto, pairs):
745 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
764 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
746 r = []
765 r = []
747 for b in repo.between(pairs):
766 for b in repo.between(pairs):
748 r.append(encodelist(b) + "\n")
767 r.append(encodelist(b) + "\n")
749
768
750 return bytesresponse(''.join(r))
769 return bytesresponse(''.join(r))
751
770
771 permissions['branchmap'] = 'pull'
752 @wireprotocommand('branchmap')
772 @wireprotocommand('branchmap')
753 def branchmap(repo, proto):
773 def branchmap(repo, proto):
754 branchmap = repo.branchmap()
774 branchmap = repo.branchmap()
755 heads = []
775 heads = []
756 for branch, nodes in branchmap.iteritems():
776 for branch, nodes in branchmap.iteritems():
757 branchname = urlreq.quote(encoding.fromlocal(branch))
777 branchname = urlreq.quote(encoding.fromlocal(branch))
758 branchnodes = encodelist(nodes)
778 branchnodes = encodelist(nodes)
759 heads.append('%s %s' % (branchname, branchnodes))
779 heads.append('%s %s' % (branchname, branchnodes))
760
780
761 return bytesresponse('\n'.join(heads))
781 return bytesresponse('\n'.join(heads))
762
782
783 permissions['branches'] = 'pull'
763 @wireprotocommand('branches', 'nodes', transportpolicy=POLICY_V1_ONLY)
784 @wireprotocommand('branches', 'nodes', transportpolicy=POLICY_V1_ONLY)
764 def branches(repo, proto, nodes):
785 def branches(repo, proto, nodes):
765 nodes = decodelist(nodes)
786 nodes = decodelist(nodes)
766 r = []
787 r = []
767 for b in repo.branches(nodes):
788 for b in repo.branches(nodes):
768 r.append(encodelist(b) + "\n")
789 r.append(encodelist(b) + "\n")
769
790
770 return bytesresponse(''.join(r))
791 return bytesresponse(''.join(r))
771
792
793 permissions['clonebundles'] = 'pull'
772 @wireprotocommand('clonebundles', '')
794 @wireprotocommand('clonebundles', '')
773 def clonebundles(repo, proto):
795 def clonebundles(repo, proto):
774 """Server command for returning info for available bundles to seed clones.
796 """Server command for returning info for available bundles to seed clones.
775
797
776 Clients will parse this response and determine what bundle to fetch.
798 Clients will parse this response and determine what bundle to fetch.
777
799
778 Extensions may wrap this command to filter or dynamically emit data
800 Extensions may wrap this command to filter or dynamically emit data
779 depending on the request. e.g. you could advertise URLs for the closest
801 depending on the request. e.g. you could advertise URLs for the closest
780 data center given the client's IP address.
802 data center given the client's IP address.
781 """
803 """
782 return bytesresponse(repo.vfs.tryread('clonebundles.manifest'))
804 return bytesresponse(repo.vfs.tryread('clonebundles.manifest'))
783
805
784 wireprotocaps = ['lookup', 'branchmap', 'pushkey',
806 wireprotocaps = ['lookup', 'branchmap', 'pushkey',
785 'known', 'getbundle', 'unbundlehash', 'batch']
807 'known', 'getbundle', 'unbundlehash', 'batch']
786
808
787 def _capabilities(repo, proto):
809 def _capabilities(repo, proto):
788 """return a list of capabilities for a repo
810 """return a list of capabilities for a repo
789
811
790 This function exists to allow extensions to easily wrap capabilities
812 This function exists to allow extensions to easily wrap capabilities
791 computation
813 computation
792
814
793 - returns a lists: easy to alter
815 - returns a lists: easy to alter
794 - change done here will be propagated to both `capabilities` and `hello`
816 - change done here will be propagated to both `capabilities` and `hello`
795 command without any other action needed.
817 command without any other action needed.
796 """
818 """
797 # copy to prevent modification of the global list
819 # copy to prevent modification of the global list
798 caps = list(wireprotocaps)
820 caps = list(wireprotocaps)
799
821
800 # Command of same name as capability isn't exposed to version 1 of
822 # Command of same name as capability isn't exposed to version 1 of
801 # transports. So conditionally add it.
823 # transports. So conditionally add it.
802 if commands.commandavailable('changegroupsubset', proto):
824 if commands.commandavailable('changegroupsubset', proto):
803 caps.append('changegroupsubset')
825 caps.append('changegroupsubset')
804
826
805 if streamclone.allowservergeneration(repo):
827 if streamclone.allowservergeneration(repo):
806 if repo.ui.configbool('server', 'preferuncompressed'):
828 if repo.ui.configbool('server', 'preferuncompressed'):
807 caps.append('stream-preferred')
829 caps.append('stream-preferred')
808 requiredformats = repo.requirements & repo.supportedformats
830 requiredformats = repo.requirements & repo.supportedformats
809 # if our local revlogs are just revlogv1, add 'stream' cap
831 # if our local revlogs are just revlogv1, add 'stream' cap
810 if not requiredformats - {'revlogv1'}:
832 if not requiredformats - {'revlogv1'}:
811 caps.append('stream')
833 caps.append('stream')
812 # otherwise, add 'streamreqs' detailing our local revlog format
834 # otherwise, add 'streamreqs' detailing our local revlog format
813 else:
835 else:
814 caps.append('streamreqs=%s' % ','.join(sorted(requiredformats)))
836 caps.append('streamreqs=%s' % ','.join(sorted(requiredformats)))
815 if repo.ui.configbool('experimental', 'bundle2-advertise'):
837 if repo.ui.configbool('experimental', 'bundle2-advertise'):
816 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role='server'))
838 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role='server'))
817 caps.append('bundle2=' + urlreq.quote(capsblob))
839 caps.append('bundle2=' + urlreq.quote(capsblob))
818 caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
840 caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
819
841
820 return proto.addcapabilities(repo, caps)
842 return proto.addcapabilities(repo, caps)
821
843
822 # If you are writing an extension and consider wrapping this function. Wrap
844 # If you are writing an extension and consider wrapping this function. Wrap
823 # `_capabilities` instead.
845 # `_capabilities` instead.
846 permissions['capabilities'] = 'pull'
824 @wireprotocommand('capabilities')
847 @wireprotocommand('capabilities')
825 def capabilities(repo, proto):
848 def capabilities(repo, proto):
826 return bytesresponse(' '.join(_capabilities(repo, proto)))
849 return bytesresponse(' '.join(_capabilities(repo, proto)))
827
850
851 permissions['changegroup'] = 'pull'
828 @wireprotocommand('changegroup', 'roots', transportpolicy=POLICY_V1_ONLY)
852 @wireprotocommand('changegroup', 'roots', transportpolicy=POLICY_V1_ONLY)
829 def changegroup(repo, proto, roots):
853 def changegroup(repo, proto, roots):
830 nodes = decodelist(roots)
854 nodes = decodelist(roots)
831 outgoing = discovery.outgoing(repo, missingroots=nodes,
855 outgoing = discovery.outgoing(repo, missingroots=nodes,
832 missingheads=repo.heads())
856 missingheads=repo.heads())
833 cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve')
857 cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve')
834 gen = iter(lambda: cg.read(32768), '')
858 gen = iter(lambda: cg.read(32768), '')
835 return streamres(gen=gen)
859 return streamres(gen=gen)
836
860
861 permissions['changegroupsubset'] = 'pull'
837 @wireprotocommand('changegroupsubset', 'bases heads',
862 @wireprotocommand('changegroupsubset', 'bases heads',
838 transportpolicy=POLICY_V1_ONLY)
863 transportpolicy=POLICY_V1_ONLY)
839 def changegroupsubset(repo, proto, bases, heads):
864 def changegroupsubset(repo, proto, bases, heads):
840 bases = decodelist(bases)
865 bases = decodelist(bases)
841 heads = decodelist(heads)
866 heads = decodelist(heads)
842 outgoing = discovery.outgoing(repo, missingroots=bases,
867 outgoing = discovery.outgoing(repo, missingroots=bases,
843 missingheads=heads)
868 missingheads=heads)
844 cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve')
869 cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve')
845 gen = iter(lambda: cg.read(32768), '')
870 gen = iter(lambda: cg.read(32768), '')
846 return streamres(gen=gen)
871 return streamres(gen=gen)
847
872
873 permissions['debugwireargs'] = 'pull'
848 @wireprotocommand('debugwireargs', 'one two *')
874 @wireprotocommand('debugwireargs', 'one two *')
849 def debugwireargs(repo, proto, one, two, others):
875 def debugwireargs(repo, proto, one, two, others):
850 # only accept optional args from the known set
876 # only accept optional args from the known set
851 opts = options('debugwireargs', ['three', 'four'], others)
877 opts = options('debugwireargs', ['three', 'four'], others)
852 return bytesresponse(repo.debugwireargs(one, two,
878 return bytesresponse(repo.debugwireargs(one, two,
853 **pycompat.strkwargs(opts)))
879 **pycompat.strkwargs(opts)))
854
880
881 permissions['getbundle'] = 'pull'
855 @wireprotocommand('getbundle', '*')
882 @wireprotocommand('getbundle', '*')
856 def getbundle(repo, proto, others):
883 def getbundle(repo, proto, others):
857 opts = options('getbundle', gboptsmap.keys(), others)
884 opts = options('getbundle', gboptsmap.keys(), others)
858 for k, v in opts.iteritems():
885 for k, v in opts.iteritems():
859 keytype = gboptsmap[k]
886 keytype = gboptsmap[k]
860 if keytype == 'nodes':
887 if keytype == 'nodes':
861 opts[k] = decodelist(v)
888 opts[k] = decodelist(v)
862 elif keytype == 'csv':
889 elif keytype == 'csv':
863 opts[k] = list(v.split(','))
890 opts[k] = list(v.split(','))
864 elif keytype == 'scsv':
891 elif keytype == 'scsv':
865 opts[k] = set(v.split(','))
892 opts[k] = set(v.split(','))
866 elif keytype == 'boolean':
893 elif keytype == 'boolean':
867 # Client should serialize False as '0', which is a non-empty string
894 # Client should serialize False as '0', which is a non-empty string
868 # so it evaluates as a True bool.
895 # so it evaluates as a True bool.
869 if v == '0':
896 if v == '0':
870 opts[k] = False
897 opts[k] = False
871 else:
898 else:
872 opts[k] = bool(v)
899 opts[k] = bool(v)
873 elif keytype != 'plain':
900 elif keytype != 'plain':
874 raise KeyError('unknown getbundle option type %s'
901 raise KeyError('unknown getbundle option type %s'
875 % keytype)
902 % keytype)
876
903
877 if not bundle1allowed(repo, 'pull'):
904 if not bundle1allowed(repo, 'pull'):
878 if not exchange.bundle2requested(opts.get('bundlecaps')):
905 if not exchange.bundle2requested(opts.get('bundlecaps')):
879 if proto.name == 'http-v1':
906 if proto.name == 'http-v1':
880 return ooberror(bundle2required)
907 return ooberror(bundle2required)
881 raise error.Abort(bundle2requiredmain,
908 raise error.Abort(bundle2requiredmain,
882 hint=bundle2requiredhint)
909 hint=bundle2requiredhint)
883
910
884 prefercompressed = True
911 prefercompressed = True
885
912
886 try:
913 try:
887 if repo.ui.configbool('server', 'disablefullbundle'):
914 if repo.ui.configbool('server', 'disablefullbundle'):
888 # Check to see if this is a full clone.
915 # Check to see if this is a full clone.
889 clheads = set(repo.changelog.heads())
916 clheads = set(repo.changelog.heads())
890 changegroup = opts.get('cg', True)
917 changegroup = opts.get('cg', True)
891 heads = set(opts.get('heads', set()))
918 heads = set(opts.get('heads', set()))
892 common = set(opts.get('common', set()))
919 common = set(opts.get('common', set()))
893 common.discard(nullid)
920 common.discard(nullid)
894 if changegroup and not common and clheads == heads:
921 if changegroup and not common and clheads == heads:
895 raise error.Abort(
922 raise error.Abort(
896 _('server has pull-based clones disabled'),
923 _('server has pull-based clones disabled'),
897 hint=_('remove --pull if specified or upgrade Mercurial'))
924 hint=_('remove --pull if specified or upgrade Mercurial'))
898
925
899 info, chunks = exchange.getbundlechunks(repo, 'serve',
926 info, chunks = exchange.getbundlechunks(repo, 'serve',
900 **pycompat.strkwargs(opts))
927 **pycompat.strkwargs(opts))
901 prefercompressed = info.get('prefercompressed', True)
928 prefercompressed = info.get('prefercompressed', True)
902 except error.Abort as exc:
929 except error.Abort as exc:
903 # cleanly forward Abort error to the client
930 # cleanly forward Abort error to the client
904 if not exchange.bundle2requested(opts.get('bundlecaps')):
931 if not exchange.bundle2requested(opts.get('bundlecaps')):
905 if proto.name == 'http-v1':
932 if proto.name == 'http-v1':
906 return ooberror(pycompat.bytestr(exc) + '\n')
933 return ooberror(pycompat.bytestr(exc) + '\n')
907 raise # cannot do better for bundle1 + ssh
934 raise # cannot do better for bundle1 + ssh
908 # bundle2 request expect a bundle2 reply
935 # bundle2 request expect a bundle2 reply
909 bundler = bundle2.bundle20(repo.ui)
936 bundler = bundle2.bundle20(repo.ui)
910 manargs = [('message', pycompat.bytestr(exc))]
937 manargs = [('message', pycompat.bytestr(exc))]
911 advargs = []
938 advargs = []
912 if exc.hint is not None:
939 if exc.hint is not None:
913 advargs.append(('hint', exc.hint))
940 advargs.append(('hint', exc.hint))
914 bundler.addpart(bundle2.bundlepart('error:abort',
941 bundler.addpart(bundle2.bundlepart('error:abort',
915 manargs, advargs))
942 manargs, advargs))
916 chunks = bundler.getchunks()
943 chunks = bundler.getchunks()
917 prefercompressed = False
944 prefercompressed = False
918
945
919 return streamres(gen=chunks, prefer_uncompressed=not prefercompressed)
946 return streamres(gen=chunks, prefer_uncompressed=not prefercompressed)
920
947
948 permissions['heads'] = 'pull'
921 @wireprotocommand('heads')
949 @wireprotocommand('heads')
922 def heads(repo, proto):
950 def heads(repo, proto):
923 h = repo.heads()
951 h = repo.heads()
924 return bytesresponse(encodelist(h) + '\n')
952 return bytesresponse(encodelist(h) + '\n')
925
953
954 permissions['hello'] = 'pull'
926 @wireprotocommand('hello')
955 @wireprotocommand('hello')
927 def hello(repo, proto):
956 def hello(repo, proto):
928 """Called as part of SSH handshake to obtain server info.
957 """Called as part of SSH handshake to obtain server info.
929
958
930 Returns a list of lines describing interesting things about the
959 Returns a list of lines describing interesting things about the
931 server, in an RFC822-like format.
960 server, in an RFC822-like format.
932
961
933 Currently, the only one defined is ``capabilities``, which consists of a
962 Currently, the only one defined is ``capabilities``, which consists of a
934 line of space separated tokens describing server abilities:
963 line of space separated tokens describing server abilities:
935
964
936 capabilities: <token0> <token1> <token2>
965 capabilities: <token0> <token1> <token2>
937 """
966 """
938 caps = capabilities(repo, proto).data
967 caps = capabilities(repo, proto).data
939 return bytesresponse('capabilities: %s\n' % caps)
968 return bytesresponse('capabilities: %s\n' % caps)
940
969
970 permissions['listkeys'] = 'pull'
941 @wireprotocommand('listkeys', 'namespace')
971 @wireprotocommand('listkeys', 'namespace')
942 def listkeys(repo, proto, namespace):
972 def listkeys(repo, proto, namespace):
943 d = sorted(repo.listkeys(encoding.tolocal(namespace)).items())
973 d = sorted(repo.listkeys(encoding.tolocal(namespace)).items())
944 return bytesresponse(pushkeymod.encodekeys(d))
974 return bytesresponse(pushkeymod.encodekeys(d))
945
975
976 permissions['lookup'] = 'pull'
946 @wireprotocommand('lookup', 'key')
977 @wireprotocommand('lookup', 'key')
947 def lookup(repo, proto, key):
978 def lookup(repo, proto, key):
948 try:
979 try:
949 k = encoding.tolocal(key)
980 k = encoding.tolocal(key)
950 c = repo[k]
981 c = repo[k]
951 r = c.hex()
982 r = c.hex()
952 success = 1
983 success = 1
953 except Exception as inst:
984 except Exception as inst:
954 r = util.forcebytestr(inst)
985 r = util.forcebytestr(inst)
955 success = 0
986 success = 0
956 return bytesresponse('%d %s\n' % (success, r))
987 return bytesresponse('%d %s\n' % (success, r))
957
988
989 permissions['known'] = 'pull'
958 @wireprotocommand('known', 'nodes *')
990 @wireprotocommand('known', 'nodes *')
959 def known(repo, proto, nodes, others):
991 def known(repo, proto, nodes, others):
960 v = ''.join(b and '1' or '0' for b in repo.known(decodelist(nodes)))
992 v = ''.join(b and '1' or '0' for b in repo.known(decodelist(nodes)))
961 return bytesresponse(v)
993 return bytesresponse(v)
962
994
995 permissions['pushkey'] = 'push'
963 @wireprotocommand('pushkey', 'namespace key old new')
996 @wireprotocommand('pushkey', 'namespace key old new')
964 def pushkey(repo, proto, namespace, key, old, new):
997 def pushkey(repo, proto, namespace, key, old, new):
965 # compatibility with pre-1.8 clients which were accidentally
998 # compatibility with pre-1.8 clients which were accidentally
966 # sending raw binary nodes rather than utf-8-encoded hex
999 # sending raw binary nodes rather than utf-8-encoded hex
967 if len(new) == 20 and util.escapestr(new) != new:
1000 if len(new) == 20 and util.escapestr(new) != new:
968 # looks like it could be a binary node
1001 # looks like it could be a binary node
969 try:
1002 try:
970 new.decode('utf-8')
1003 new.decode('utf-8')
971 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
1004 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
972 except UnicodeDecodeError:
1005 except UnicodeDecodeError:
973 pass # binary, leave unmodified
1006 pass # binary, leave unmodified
974 else:
1007 else:
975 new = encoding.tolocal(new) # normal path
1008 new = encoding.tolocal(new) # normal path
976
1009
977 with proto.mayberedirectstdio() as output:
1010 with proto.mayberedirectstdio() as output:
978 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
1011 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
979 encoding.tolocal(old), new) or False
1012 encoding.tolocal(old), new) or False
980
1013
981 output = output.getvalue() if output else ''
1014 output = output.getvalue() if output else ''
982 return bytesresponse('%d\n%s' % (int(r), output))
1015 return bytesresponse('%d\n%s' % (int(r), output))
983
1016
1017 permissions['stream_out'] = 'pull'
984 @wireprotocommand('stream_out')
1018 @wireprotocommand('stream_out')
985 def stream(repo, proto):
1019 def stream(repo, proto):
986 '''If the server supports streaming clone, it advertises the "stream"
1020 '''If the server supports streaming clone, it advertises the "stream"
987 capability with a value representing the version and flags of the repo
1021 capability with a value representing the version and flags of the repo
988 it is serving. Client checks to see if it understands the format.
1022 it is serving. Client checks to see if it understands the format.
989 '''
1023 '''
990 return streamres_legacy(streamclone.generatev1wireproto(repo))
1024 return streamres_legacy(streamclone.generatev1wireproto(repo))
991
1025
1026 permissions['unbundle'] = 'push'
992 @wireprotocommand('unbundle', 'heads')
1027 @wireprotocommand('unbundle', 'heads')
993 def unbundle(repo, proto, heads):
1028 def unbundle(repo, proto, heads):
994 their_heads = decodelist(heads)
1029 their_heads = decodelist(heads)
995
1030
996 with proto.mayberedirectstdio() as output:
1031 with proto.mayberedirectstdio() as output:
997 try:
1032 try:
998 exchange.check_heads(repo, their_heads, 'preparing changes')
1033 exchange.check_heads(repo, their_heads, 'preparing changes')
999
1034
1000 # write bundle data to temporary file because it can be big
1035 # write bundle data to temporary file because it can be big
1001 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
1036 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
1002 fp = os.fdopen(fd, pycompat.sysstr('wb+'))
1037 fp = os.fdopen(fd, pycompat.sysstr('wb+'))
1003 r = 0
1038 r = 0
1004 try:
1039 try:
1005 proto.forwardpayload(fp)
1040 proto.forwardpayload(fp)
1006 fp.seek(0)
1041 fp.seek(0)
1007 gen = exchange.readbundle(repo.ui, fp, None)
1042 gen = exchange.readbundle(repo.ui, fp, None)
1008 if (isinstance(gen, changegroupmod.cg1unpacker)
1043 if (isinstance(gen, changegroupmod.cg1unpacker)
1009 and not bundle1allowed(repo, 'push')):
1044 and not bundle1allowed(repo, 'push')):
1010 if proto.name == 'http-v1':
1045 if proto.name == 'http-v1':
1011 # need to special case http because stderr do not get to
1046 # need to special case http because stderr do not get to
1012 # the http client on failed push so we need to abuse
1047 # the http client on failed push so we need to abuse
1013 # some other error type to make sure the message get to
1048 # some other error type to make sure the message get to
1014 # the user.
1049 # the user.
1015 return ooberror(bundle2required)
1050 return ooberror(bundle2required)
1016 raise error.Abort(bundle2requiredmain,
1051 raise error.Abort(bundle2requiredmain,
1017 hint=bundle2requiredhint)
1052 hint=bundle2requiredhint)
1018
1053
1019 r = exchange.unbundle(repo, gen, their_heads, 'serve',
1054 r = exchange.unbundle(repo, gen, their_heads, 'serve',
1020 proto.client())
1055 proto.client())
1021 if util.safehasattr(r, 'addpart'):
1056 if util.safehasattr(r, 'addpart'):
1022 # The return looks streamable, we are in the bundle2 case
1057 # The return looks streamable, we are in the bundle2 case
1023 # and should return a stream.
1058 # and should return a stream.
1024 return streamres_legacy(gen=r.getchunks())
1059 return streamres_legacy(gen=r.getchunks())
1025 return pushres(r, output.getvalue() if output else '')
1060 return pushres(r, output.getvalue() if output else '')
1026
1061
1027 finally:
1062 finally:
1028 fp.close()
1063 fp.close()
1029 os.unlink(tempname)
1064 os.unlink(tempname)
1030
1065
1031 except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
1066 except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
1032 # handle non-bundle2 case first
1067 # handle non-bundle2 case first
1033 if not getattr(exc, 'duringunbundle2', False):
1068 if not getattr(exc, 'duringunbundle2', False):
1034 try:
1069 try:
1035 raise
1070 raise
1036 except error.Abort:
1071 except error.Abort:
1037 # The old code we moved used util.stderr directly.
1072 # The old code we moved used util.stderr directly.
1038 # We did not change it to minimise code change.
1073 # We did not change it to minimise code change.
1039 # This need to be moved to something proper.
1074 # This need to be moved to something proper.
1040 # Feel free to do it.
1075 # Feel free to do it.
1041 util.stderr.write("abort: %s\n" % exc)
1076 util.stderr.write("abort: %s\n" % exc)
1042 if exc.hint is not None:
1077 if exc.hint is not None:
1043 util.stderr.write("(%s)\n" % exc.hint)
1078 util.stderr.write("(%s)\n" % exc.hint)
1044 return pushres(0, output.getvalue() if output else '')
1079 return pushres(0, output.getvalue() if output else '')
1045 except error.PushRaced:
1080 except error.PushRaced:
1046 return pusherr(pycompat.bytestr(exc),
1081 return pusherr(pycompat.bytestr(exc),
1047 output.getvalue() if output else '')
1082 output.getvalue() if output else '')
1048
1083
1049 bundler = bundle2.bundle20(repo.ui)
1084 bundler = bundle2.bundle20(repo.ui)
1050 for out in getattr(exc, '_bundle2salvagedoutput', ()):
1085 for out in getattr(exc, '_bundle2salvagedoutput', ()):
1051 bundler.addpart(out)
1086 bundler.addpart(out)
1052 try:
1087 try:
1053 try:
1088 try:
1054 raise
1089 raise
1055 except error.PushkeyFailed as exc:
1090 except error.PushkeyFailed as exc:
1056 # check client caps
1091 # check client caps
1057 remotecaps = getattr(exc, '_replycaps', None)
1092 remotecaps = getattr(exc, '_replycaps', None)
1058 if (remotecaps is not None
1093 if (remotecaps is not None
1059 and 'pushkey' not in remotecaps.get('error', ())):
1094 and 'pushkey' not in remotecaps.get('error', ())):
1060 # no support remote side, fallback to Abort handler.
1095 # no support remote side, fallback to Abort handler.
1061 raise
1096 raise
1062 part = bundler.newpart('error:pushkey')
1097 part = bundler.newpart('error:pushkey')
1063 part.addparam('in-reply-to', exc.partid)
1098 part.addparam('in-reply-to', exc.partid)
1064 if exc.namespace is not None:
1099 if exc.namespace is not None:
1065 part.addparam('namespace', exc.namespace,
1100 part.addparam('namespace', exc.namespace,
1066 mandatory=False)
1101 mandatory=False)
1067 if exc.key is not None:
1102 if exc.key is not None:
1068 part.addparam('key', exc.key, mandatory=False)
1103 part.addparam('key', exc.key, mandatory=False)
1069 if exc.new is not None:
1104 if exc.new is not None:
1070 part.addparam('new', exc.new, mandatory=False)
1105 part.addparam('new', exc.new, mandatory=False)
1071 if exc.old is not None:
1106 if exc.old is not None:
1072 part.addparam('old', exc.old, mandatory=False)
1107 part.addparam('old', exc.old, mandatory=False)
1073 if exc.ret is not None:
1108 if exc.ret is not None:
1074 part.addparam('ret', exc.ret, mandatory=False)
1109 part.addparam('ret', exc.ret, mandatory=False)
1075 except error.BundleValueError as exc:
1110 except error.BundleValueError as exc:
1076 errpart = bundler.newpart('error:unsupportedcontent')
1111 errpart = bundler.newpart('error:unsupportedcontent')
1077 if exc.parttype is not None:
1112 if exc.parttype is not None:
1078 errpart.addparam('parttype', exc.parttype)
1113 errpart.addparam('parttype', exc.parttype)
1079 if exc.params:
1114 if exc.params:
1080 errpart.addparam('params', '\0'.join(exc.params))
1115 errpart.addparam('params', '\0'.join(exc.params))
1081 except error.Abort as exc:
1116 except error.Abort as exc:
1082 manargs = [('message', util.forcebytestr(exc))]
1117 manargs = [('message', util.forcebytestr(exc))]
1083 advargs = []
1118 advargs = []
1084 if exc.hint is not None:
1119 if exc.hint is not None:
1085 advargs.append(('hint', exc.hint))
1120 advargs.append(('hint', exc.hint))
1086 bundler.addpart(bundle2.bundlepart('error:abort',
1121 bundler.addpart(bundle2.bundlepart('error:abort',
1087 manargs, advargs))
1122 manargs, advargs))
1088 except error.PushRaced as exc:
1123 except error.PushRaced as exc:
1089 bundler.newpart('error:pushraced',
1124 bundler.newpart('error:pushraced',
1090 [('message', util.forcebytestr(exc))])
1125 [('message', util.forcebytestr(exc))])
1091 return streamres_legacy(gen=bundler.getchunks())
1126 return streamres_legacy(gen=bundler.getchunks())
@@ -1,434 +1,435 b''
1 # drawdag.py - convert ASCII revision DAG to actual changesets
1 # drawdag.py - convert ASCII revision DAG to actual changesets
2 #
2 #
3 # Copyright 2016 Facebook, Inc.
3 # Copyright 2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """
7 """
8 create changesets from an ASCII graph for testing purpose.
8 create changesets from an ASCII graph for testing purpose.
9
9
10 For example, given the following input::
10 For example, given the following input::
11
11
12 c d
12 c d
13 |/
13 |/
14 b
14 b
15 |
15 |
16 a
16 a
17
17
18 4 changesets and 4 local tags will be created.
18 4 changesets and 4 local tags will be created.
19 `hg log -G -T "{rev} {desc} (tag: {tags})"` will output::
19 `hg log -G -T "{rev} {desc} (tag: {tags})"` will output::
20
20
21 o 3 d (tag: d tip)
21 o 3 d (tag: d tip)
22 |
22 |
23 | o 2 c (tag: c)
23 | o 2 c (tag: c)
24 |/
24 |/
25 o 1 b (tag: b)
25 o 1 b (tag: b)
26 |
26 |
27 o 0 a (tag: a)
27 o 0 a (tag: a)
28
28
29 For root nodes (nodes without parents) in the graph, they can be revsets
29 For root nodes (nodes without parents) in the graph, they can be revsets
30 pointing to existing nodes. The ASCII graph could also have disconnected
30 pointing to existing nodes. The ASCII graph could also have disconnected
31 components with same names referring to the same changeset.
31 components with same names referring to the same changeset.
32
32
33 Therefore, given the repo having the 4 changesets (and tags) above, with the
33 Therefore, given the repo having the 4 changesets (and tags) above, with the
34 following ASCII graph as input::
34 following ASCII graph as input::
35
35
36 foo bar bar foo
36 foo bar bar foo
37 | / | |
37 | / | |
38 ancestor(c,d) a baz
38 ancestor(c,d) a baz
39
39
40 The result (`hg log -G -T "{desc}"`) will look like::
40 The result (`hg log -G -T "{desc}"`) will look like::
41
41
42 o foo
42 o foo
43 |\
43 |\
44 +---o bar
44 +---o bar
45 | | |
45 | | |
46 | o | baz
46 | o | baz
47 | /
47 | /
48 +---o d
48 +---o d
49 | |
49 | |
50 +---o c
50 +---o c
51 | |
51 | |
52 o | b
52 o | b
53 |/
53 |/
54 o a
54 o a
55
55
56 Note that if you take the above `hg log` output directly as input. It will work
56 Note that if you take the above `hg log` output directly as input. It will work
57 as expected - the result would be an isomorphic graph::
57 as expected - the result would be an isomorphic graph::
58
58
59 o foo
59 o foo
60 |\
60 |\
61 | | o d
61 | | o d
62 | |/
62 | |/
63 | | o c
63 | | o c
64 | |/
64 | |/
65 | | o bar
65 | | o bar
66 | |/|
66 | |/|
67 | o | b
67 | o | b
68 | |/
68 | |/
69 o / baz
69 o / baz
70 /
70 /
71 o a
71 o a
72
72
73 This is because 'o' is specially handled in the input: instead of using 'o' as
73 This is because 'o' is specially handled in the input: instead of using 'o' as
74 the node name, the word to the right will be used.
74 the node name, the word to the right will be used.
75
75
76 Some special comments could have side effects:
76 Some special comments could have side effects:
77
77
78 - Create obsmarkers
78 - Create obsmarkers
79 # replace: A -> B -> C -> D # chained 1 to 1 replacements
79 # replace: A -> B -> C -> D # chained 1 to 1 replacements
80 # split: A -> B, C # 1 to many
80 # split: A -> B, C # 1 to many
81 # prune: A, B, C # many to nothing
81 # prune: A, B, C # many to nothing
82 """
82 """
83 from __future__ import absolute_import, print_function
83 from __future__ import absolute_import, print_function
84
84
85 import collections
85 import collections
86 import itertools
86 import itertools
87 import re
87 import re
88
88
89 from mercurial.i18n import _
89 from mercurial.i18n import _
90 from mercurial import (
90 from mercurial import (
91 context,
91 context,
92 error,
92 error,
93 node,
93 node,
94 obsolete,
94 obsolete,
95 pycompat,
95 pycompat,
96 registrar,
96 registrar,
97 scmutil,
97 scmutil,
98 tags as tagsmod,
98 tags as tagsmod,
99 )
99 )
100
100
101 cmdtable = {}
101 cmdtable = {}
102 command = registrar.command(cmdtable)
102 command = registrar.command(cmdtable)
103
103
104 _pipechars = b'\\/+-|'
104 _pipechars = b'\\/+-|'
105 _nonpipechars = b''.join(pycompat.bytechr(i) for i in range(33, 127)
105 _nonpipechars = b''.join(pycompat.bytechr(i) for i in range(33, 127)
106 if pycompat.bytechr(i) not in _pipechars)
106 if pycompat.bytechr(i) not in _pipechars)
107
107
108 def _isname(ch):
108 def _isname(ch):
109 """char -> bool. return True if ch looks like part of a name, False
109 """char -> bool. return True if ch looks like part of a name, False
110 otherwise"""
110 otherwise"""
111 return ch in _nonpipechars
111 return ch in _nonpipechars
112
112
113 def _parseasciigraph(text):
113 def _parseasciigraph(text):
114 r"""str -> {str : [str]}. convert the ASCII graph to edges
114 r"""str -> {str : [str]}. convert the ASCII graph to edges
115
115
116 >>> import pprint
116 >>> import pprint
117 >>> pprint.pprint({pycompat.sysstr(k): [pycompat.sysstr(vv) for vv in v]
117 >>> pprint.pprint({pycompat.sysstr(k): [pycompat.sysstr(vv) for vv in v]
118 ... for k, v in _parseasciigraph(br'''
118 ... for k, v in _parseasciigraph(br'''
119 ... G
119 ... G
120 ... |
120 ... |
121 ... I D C F # split: B -> E, F, G
121 ... I D C F # split: B -> E, F, G
122 ... \ \| | # replace: C -> D -> H
122 ... \ \| | # replace: C -> D -> H
123 ... H B E # prune: F, I
123 ... H B E # prune: F, I
124 ... \|/
124 ... \|/
125 ... A
125 ... A
126 ... ''').items()})
126 ... ''').items()})
127 {'A': [],
127 {'A': [],
128 'B': ['A'],
128 'B': ['A'],
129 'C': ['B'],
129 'C': ['B'],
130 'D': ['B'],
130 'D': ['B'],
131 'E': ['A'],
131 'E': ['A'],
132 'F': ['E'],
132 'F': ['E'],
133 'G': ['F'],
133 'G': ['F'],
134 'H': ['A'],
134 'H': ['A'],
135 'I': ['H']}
135 'I': ['H']}
136 >>> pprint.pprint({pycompat.sysstr(k): [pycompat.sysstr(vv) for vv in v]
136 >>> pprint.pprint({pycompat.sysstr(k): [pycompat.sysstr(vv) for vv in v]
137 ... for k, v in _parseasciigraph(br'''
137 ... for k, v in _parseasciigraph(br'''
138 ... o foo
138 ... o foo
139 ... |\
139 ... |\
140 ... +---o bar
140 ... +---o bar
141 ... | | |
141 ... | | |
142 ... | o | baz
142 ... | o | baz
143 ... | /
143 ... | /
144 ... +---o d
144 ... +---o d
145 ... | |
145 ... | |
146 ... +---o c
146 ... +---o c
147 ... | |
147 ... | |
148 ... o | b
148 ... o | b
149 ... |/
149 ... |/
150 ... o a
150 ... o a
151 ... ''').items()})
151 ... ''').items()})
152 {'a': [],
152 {'a': [],
153 'b': ['a'],
153 'b': ['a'],
154 'bar': ['b', 'a'],
154 'bar': ['b', 'a'],
155 'baz': [],
155 'baz': [],
156 'c': ['b'],
156 'c': ['b'],
157 'd': ['b'],
157 'd': ['b'],
158 'foo': ['baz', 'b']}
158 'foo': ['baz', 'b']}
159 """
159 """
160 lines = text.splitlines()
160 lines = text.splitlines()
161 edges = collections.defaultdict(list) # {node: []}
161 edges = collections.defaultdict(list) # {node: []}
162
162
163 def get(y, x):
163 def get(y, x):
164 """(int, int) -> char. give a coordinate, return the char. return a
164 """(int, int) -> char. give a coordinate, return the char. return a
165 space for anything out of range"""
165 space for anything out of range"""
166 if x < 0 or y < 0:
166 if x < 0 or y < 0:
167 return b' '
167 return b' '
168 try:
168 try:
169 return lines[y][x:x + 1] or b' '
169 return lines[y][x:x + 1] or b' '
170 except IndexError:
170 except IndexError:
171 return b' '
171 return b' '
172
172
173 def getname(y, x):
173 def getname(y, x):
174 """(int, int) -> str. like get(y, x) but concatenate left and right
174 """(int, int) -> str. like get(y, x) but concatenate left and right
175 parts. if name is an 'o', try to replace it to the right"""
175 parts. if name is an 'o', try to replace it to the right"""
176 result = b''
176 result = b''
177 for i in itertools.count(0):
177 for i in itertools.count(0):
178 ch = get(y, x - i)
178 ch = get(y, x - i)
179 if not _isname(ch):
179 if not _isname(ch):
180 break
180 break
181 result = ch + result
181 result = ch + result
182 for i in itertools.count(1):
182 for i in itertools.count(1):
183 ch = get(y, x + i)
183 ch = get(y, x + i)
184 if not _isname(ch):
184 if not _isname(ch):
185 break
185 break
186 result += ch
186 result += ch
187 if result == b'o':
187 if result == b'o':
188 # special handling, find the name to the right
188 # special handling, find the name to the right
189 result = b''
189 result = b''
190 for i in itertools.count(2):
190 for i in itertools.count(2):
191 ch = get(y, x + i)
191 ch = get(y, x + i)
192 if ch == b' ' or ch in _pipechars:
192 if ch == b' ' or ch in _pipechars:
193 if result or x + i >= len(lines[y]):
193 if result or x + i >= len(lines[y]):
194 break
194 break
195 else:
195 else:
196 result += ch
196 result += ch
197 return result or b'o'
197 return result or b'o'
198 return result
198 return result
199
199
200 def parents(y, x):
200 def parents(y, x):
201 """(int, int) -> [str]. follow the ASCII edges at given position,
201 """(int, int) -> [str]. follow the ASCII edges at given position,
202 return a list of parents"""
202 return a list of parents"""
203 visited = {(y, x)}
203 visited = {(y, x)}
204 visit = []
204 visit = []
205 result = []
205 result = []
206
206
207 def follow(y, x, expected):
207 def follow(y, x, expected):
208 """conditionally append (y, x) to visit array, if it's a char
208 """conditionally append (y, x) to visit array, if it's a char
209 in excepted. 'o' in expected means an '_isname' test.
209 in excepted. 'o' in expected means an '_isname' test.
210 if '-' (or '+') is not in excepted, and get(y, x) is '-' (or '+'),
210 if '-' (or '+') is not in excepted, and get(y, x) is '-' (or '+'),
211 the next line (y + 1, x) will be checked instead."""
211 the next line (y + 1, x) will be checked instead."""
212 ch = get(y, x)
212 ch = get(y, x)
213 if any(ch == c and c not in expected for c in (b'-', b'+')):
213 if any(ch == c and c not in expected for c in (b'-', b'+')):
214 y += 1
214 y += 1
215 return follow(y + 1, x, expected)
215 return follow(y + 1, x, expected)
216 if ch in expected or (b'o' in expected and _isname(ch)):
216 if ch in expected or (b'o' in expected and _isname(ch)):
217 visit.append((y, x))
217 visit.append((y, x))
218
218
219 # -o- # starting point:
219 # -o- # starting point:
220 # /|\ # follow '-' (horizontally), and '/|\' (to the bottom)
220 # /|\ # follow '-' (horizontally), and '/|\' (to the bottom)
221 follow(y + 1, x, b'|')
221 follow(y + 1, x, b'|')
222 follow(y + 1, x - 1, b'/')
222 follow(y + 1, x - 1, b'/')
223 follow(y + 1, x + 1, b'\\')
223 follow(y + 1, x + 1, b'\\')
224 follow(y, x - 1, b'-')
224 follow(y, x - 1, b'-')
225 follow(y, x + 1, b'-')
225 follow(y, x + 1, b'-')
226
226
227 while visit:
227 while visit:
228 y, x = visit.pop()
228 y, x = visit.pop()
229 if (y, x) in visited:
229 if (y, x) in visited:
230 continue
230 continue
231 visited.add((y, x))
231 visited.add((y, x))
232 ch = get(y, x)
232 ch = get(y, x)
233 if _isname(ch):
233 if _isname(ch):
234 result.append(getname(y, x))
234 result.append(getname(y, x))
235 continue
235 continue
236 elif ch == b'|':
236 elif ch == b'|':
237 follow(y + 1, x, b'/|o')
237 follow(y + 1, x, b'/|o')
238 follow(y + 1, x - 1, b'/')
238 follow(y + 1, x - 1, b'/')
239 follow(y + 1, x + 1, b'\\')
239 follow(y + 1, x + 1, b'\\')
240 elif ch == b'+':
240 elif ch == b'+':
241 follow(y, x - 1, b'-')
241 follow(y, x - 1, b'-')
242 follow(y, x + 1, b'-')
242 follow(y, x + 1, b'-')
243 follow(y + 1, x - 1, b'/')
243 follow(y + 1, x - 1, b'/')
244 follow(y + 1, x + 1, b'\\')
244 follow(y + 1, x + 1, b'\\')
245 follow(y + 1, x, b'|')
245 follow(y + 1, x, b'|')
246 elif ch == b'\\':
246 elif ch == b'\\':
247 follow(y + 1, x + 1, b'\\|o')
247 follow(y + 1, x + 1, b'\\|o')
248 elif ch == b'/':
248 elif ch == b'/':
249 follow(y + 1, x - 1, b'/|o')
249 follow(y + 1, x - 1, b'/|o')
250 elif ch == b'-':
250 elif ch == b'-':
251 follow(y, x - 1, b'-+o')
251 follow(y, x - 1, b'-+o')
252 follow(y, x + 1, b'-+o')
252 follow(y, x + 1, b'-+o')
253 return result
253 return result
254
254
255 for y, line in enumerate(lines):
255 for y, line in enumerate(lines):
256 for x, ch in enumerate(pycompat.bytestr(line)):
256 for x, ch in enumerate(pycompat.bytestr(line)):
257 if ch == b'#': # comment
257 if ch == b'#': # comment
258 break
258 break
259 if _isname(ch):
259 if _isname(ch):
260 edges[getname(y, x)] += parents(y, x)
260 edges[getname(y, x)] += parents(y, x)
261
261
262 return dict(edges)
262 return dict(edges)
263
263
264 class simplefilectx(object):
264 class simplefilectx(object):
265 def __init__(self, path, data):
265 def __init__(self, path, data):
266 self._data = data
266 self._data = data
267 self._path = path
267 self._path = path
268
268
269 def data(self):
269 def data(self):
270 return self._data
270 return self._data
271
271
272 def filenode(self):
272 def filenode(self):
273 return None
273 return None
274
274
275 def path(self):
275 def path(self):
276 return self._path
276 return self._path
277
277
278 def renamed(self):
278 def renamed(self):
279 return None
279 return None
280
280
281 def flags(self):
281 def flags(self):
282 return b''
282 return b''
283
283
284 class simplecommitctx(context.committablectx):
284 class simplecommitctx(context.committablectx):
285 def __init__(self, repo, name, parentctxs, added):
285 def __init__(self, repo, name, parentctxs, added):
286 opts = {
286 opts = {
287 'changes': scmutil.status([], list(added), [], [], [], [], []),
287 'changes': scmutil.status([], list(added), [], [], [], [], []),
288 'date': b'0 0',
288 'date': b'0 0',
289 'extra': {b'branch': b'default'},
289 'extra': {b'branch': b'default'},
290 }
290 }
291 super(simplecommitctx, self).__init__(self, name, **opts)
291 super(simplecommitctx, self).__init__(self, name, **opts)
292 self._repo = repo
292 self._repo = repo
293 self._added = added
293 self._added = added
294 self._parents = parentctxs
294 self._parents = parentctxs
295 while len(self._parents) < 2:
295 while len(self._parents) < 2:
296 self._parents.append(repo[node.nullid])
296 self._parents.append(repo[node.nullid])
297
297
298 def filectx(self, key):
298 def filectx(self, key):
299 return simplefilectx(key, self._added[key])
299 return simplefilectx(key, self._added[key])
300
300
301 def commit(self):
301 def commit(self):
302 return self._repo.commitctx(self)
302 return self._repo.commitctx(self)
303
303
304 def _walkgraph(edges):
304 def _walkgraph(edges):
305 """yield node, parents in topologically order"""
305 """yield node, parents in topologically order"""
306 visible = set(edges.keys())
306 visible = set(edges.keys())
307 remaining = {} # {str: [str]}
307 remaining = {} # {str: [str]}
308 for k, vs in edges.items():
308 for k, vs in edges.items():
309 for v in vs:
309 for v in vs:
310 if v not in remaining:
310 if v not in remaining:
311 remaining[v] = []
311 remaining[v] = []
312 remaining[k] = vs[:]
312 remaining[k] = vs[:]
313 while remaining:
313 while remaining:
314 leafs = [k for k, v in remaining.items() if not v]
314 leafs = [k for k, v in remaining.items() if not v]
315 if not leafs:
315 if not leafs:
316 raise error.Abort(_('the graph has cycles'))
316 raise error.Abort(_('the graph has cycles'))
317 for leaf in sorted(leafs):
317 for leaf in sorted(leafs):
318 if leaf in visible:
318 if leaf in visible:
319 yield leaf, edges[leaf]
319 yield leaf, edges[leaf]
320 del remaining[leaf]
320 del remaining[leaf]
321 for k, v in remaining.items():
321 for k, v in remaining.items():
322 if leaf in v:
322 if leaf in v:
323 v.remove(leaf)
323 v.remove(leaf)
324
324
325 def _getcomments(text):
325 def _getcomments(text):
326 """
326 """
327 >>> [pycompat.sysstr(s) for s in _getcomments(br'''
327 >>> [pycompat.sysstr(s) for s in _getcomments(br'''
328 ... G
328 ... G
329 ... |
329 ... |
330 ... I D C F # split: B -> E, F, G
330 ... I D C F # split: B -> E, F, G
331 ... \ \| | # replace: C -> D -> H
331 ... \ \| | # replace: C -> D -> H
332 ... H B E # prune: F, I
332 ... H B E # prune: F, I
333 ... \|/
333 ... \|/
334 ... A
334 ... A
335 ... ''')]
335 ... ''')]
336 ['split: B -> E, F, G', 'replace: C -> D -> H', 'prune: F, I']
336 ['split: B -> E, F, G', 'replace: C -> D -> H', 'prune: F, I']
337 """
337 """
338 for line in text.splitlines():
338 for line in text.splitlines():
339 if b' # ' not in line:
339 if b' # ' not in line:
340 continue
340 continue
341 yield line.split(b' # ', 1)[1].split(b' # ')[0].strip()
341 yield line.split(b' # ', 1)[1].split(b' # ')[0].strip()
342
342
343 @command(b'debugdrawdag', [])
343 @command(b'debugdrawdag', [])
344 def debugdrawdag(ui, repo, **opts):
344 def debugdrawdag(ui, repo, **opts):
345 """read an ASCII graph from stdin and create changesets
345 """read an ASCII graph from stdin and create changesets
346
346
347 The ASCII graph is like what :hg:`log -G` outputs, with each `o` replaced
347 The ASCII graph is like what :hg:`log -G` outputs, with each `o` replaced
348 to the name of the node. The command will create dummy changesets and local
348 to the name of the node. The command will create dummy changesets and local
349 tags with those names to make the dummy changesets easier to be referred
349 tags with those names to make the dummy changesets easier to be referred
350 to.
350 to.
351
351
352 If the name of a node is a single character 'o', It will be replaced by the
352 If the name of a node is a single character 'o', It will be replaced by the
353 word to the right. This makes it easier to reuse
353 word to the right. This makes it easier to reuse
354 :hg:`log -G -T '{desc}'` outputs.
354 :hg:`log -G -T '{desc}'` outputs.
355
355
356 For root (no parents) nodes, revset can be used to query existing repo.
356 For root (no parents) nodes, revset can be used to query existing repo.
357 Note that the revset cannot have confusing characters which can be seen as
357 Note that the revset cannot have confusing characters which can be seen as
358 the part of the graph edges, like `|/+-\`.
358 the part of the graph edges, like `|/+-\`.
359 """
359 """
360 text = ui.fin.read()
360 text = ui.fin.read()
361
361
362 # parse the graph and make sure len(parents) <= 2 for each node
362 # parse the graph and make sure len(parents) <= 2 for each node
363 edges = _parseasciigraph(text)
363 edges = _parseasciigraph(text)
364 for k, v in edges.items():
364 for k, v in edges.items():
365 if len(v) > 2:
365 if len(v) > 2:
366 raise error.Abort(_('%s: too many parents: %s')
366 raise error.Abort(_('%s: too many parents: %s')
367 % (k, b' '.join(v)))
367 % (k, b' '.join(v)))
368
368
369 # parse comments to get extra file content instructions
369 # parse comments to get extra file content instructions
370 files = collections.defaultdict(dict) # {(name, path): content}
370 files = collections.defaultdict(dict) # {(name, path): content}
371 comments = list(_getcomments(text))
371 comments = list(_getcomments(text))
372 filere = re.compile(br'^(\w+)/([\w/]+)\s*=\s*(.*)$', re.M)
372 filere = re.compile(br'^(\w+)/([\w/]+)\s*=\s*(.*)$', re.M)
373 for name, path, content in filere.findall(b'\n'.join(comments)):
373 for name, path, content in filere.findall(b'\n'.join(comments)):
374 files[name][path] = content.replace(br'\n', b'\n')
374 content = content.replace(br'\n', b'\n').replace(br'\1', b'\1')
375 files[name][path] = content
375
376
376 committed = {None: node.nullid} # {name: node}
377 committed = {None: node.nullid} # {name: node}
377
378
378 # for leaf nodes, try to find existing nodes in repo
379 # for leaf nodes, try to find existing nodes in repo
379 for name, parents in edges.items():
380 for name, parents in edges.items():
380 if len(parents) == 0:
381 if len(parents) == 0:
381 try:
382 try:
382 committed[name] = scmutil.revsingle(repo, name)
383 committed[name] = scmutil.revsingle(repo, name)
383 except error.RepoLookupError:
384 except error.RepoLookupError:
384 pass
385 pass
385
386
386 # commit in topological order
387 # commit in topological order
387 for name, parents in _walkgraph(edges):
388 for name, parents in _walkgraph(edges):
388 if name in committed:
389 if name in committed:
389 continue
390 continue
390 pctxs = [repo[committed[n]] for n in parents]
391 pctxs = [repo[committed[n]] for n in parents]
391 pctxs.sort(key=lambda c: c.node())
392 pctxs.sort(key=lambda c: c.node())
392 added = {}
393 added = {}
393 if len(parents) > 1:
394 if len(parents) > 1:
394 # If it's a merge, take the files and contents from the parents
395 # If it's a merge, take the files and contents from the parents
395 for f in pctxs[1].manifest():
396 for f in pctxs[1].manifest():
396 if f not in pctxs[0].manifest():
397 if f not in pctxs[0].manifest():
397 added[f] = pctxs[1][f].data()
398 added[f] = pctxs[1][f].data()
398 else:
399 else:
399 # If it's not a merge, add a single file
400 # If it's not a merge, add a single file
400 added[name] = name
401 added[name] = name
401 # add extra file contents in comments
402 # add extra file contents in comments
402 for path, content in files.get(name, {}).items():
403 for path, content in files.get(name, {}).items():
403 added[path] = content
404 added[path] = content
404 ctx = simplecommitctx(repo, name, pctxs, added)
405 ctx = simplecommitctx(repo, name, pctxs, added)
405 n = ctx.commit()
406 n = ctx.commit()
406 committed[name] = n
407 committed[name] = n
407 tagsmod.tag(repo, [name], n, message=None, user=None, date=None,
408 tagsmod.tag(repo, [name], n, message=None, user=None, date=None,
408 local=True)
409 local=True)
409
410
410 # handle special comments
411 # handle special comments
411 with repo.wlock(), repo.lock(), repo.transaction(b'drawdag'):
412 with repo.wlock(), repo.lock(), repo.transaction(b'drawdag'):
412 getctx = lambda x: repo.unfiltered()[committed[x.strip()]]
413 getctx = lambda x: repo.unfiltered()[committed[x.strip()]]
413 for comment in comments:
414 for comment in comments:
414 rels = [] # obsolete relationships
415 rels = [] # obsolete relationships
415 args = comment.split(b':', 1)
416 args = comment.split(b':', 1)
416 if len(args) <= 1:
417 if len(args) <= 1:
417 continue
418 continue
418
419
419 cmd = args[0].strip()
420 cmd = args[0].strip()
420 arg = args[1].strip()
421 arg = args[1].strip()
421
422
422 if cmd in (b'replace', b'rebase', b'amend'):
423 if cmd in (b'replace', b'rebase', b'amend'):
423 nodes = [getctx(m) for m in arg.split(b'->')]
424 nodes = [getctx(m) for m in arg.split(b'->')]
424 for i in range(len(nodes) - 1):
425 for i in range(len(nodes) - 1):
425 rels.append((nodes[i], (nodes[i + 1],)))
426 rels.append((nodes[i], (nodes[i + 1],)))
426 elif cmd in (b'split',):
427 elif cmd in (b'split',):
427 pre, succs = arg.split(b'->')
428 pre, succs = arg.split(b'->')
428 succs = succs.split(b',')
429 succs = succs.split(b',')
429 rels.append((getctx(pre), (getctx(s) for s in succs)))
430 rels.append((getctx(pre), (getctx(s) for s in succs)))
430 elif cmd in (b'prune',):
431 elif cmd in (b'prune',):
431 for n in arg.split(b','):
432 for n in arg.split(b','):
432 rels.append((getctx(n), ()))
433 rels.append((getctx(n), ()))
433 if rels:
434 if rels:
434 obsolete.createmarkers(repo, rels, date=(0, 0), operation=cmd)
435 obsolete.createmarkers(repo, rels, date=(0, 0), operation=cmd)
@@ -1,1035 +1,1041 b''
1 $ HGMERGE=true; export HGMERGE
1 $ HGMERGE=true; export HGMERGE
2
2
3 init
3 init
4
4
5 $ hg init repo
5 $ hg init repo
6 $ cd repo
6 $ cd repo
7
7
8 commit
8 commit
9
9
10 $ echo 'a' > a
10 $ echo 'a' > a
11 $ hg ci -A -m test -u nobody -d '1 0'
11 $ hg ci -A -m test -u nobody -d '1 0'
12 adding a
12 adding a
13
13
14 annotate -c
14 annotate -c
15
15
16 $ hg annotate -c a
16 $ hg annotate -c a
17 8435f90966e4: a
17 8435f90966e4: a
18
18
19 annotate -cl
19 annotate -cl
20
20
21 $ hg annotate -cl a
21 $ hg annotate -cl a
22 8435f90966e4:1: a
22 8435f90966e4:1: a
23
23
24 annotate -d
24 annotate -d
25
25
26 $ hg annotate -d a
26 $ hg annotate -d a
27 Thu Jan 01 00:00:01 1970 +0000: a
27 Thu Jan 01 00:00:01 1970 +0000: a
28
28
29 annotate -n
29 annotate -n
30
30
31 $ hg annotate -n a
31 $ hg annotate -n a
32 0: a
32 0: a
33
33
34 annotate -nl
34 annotate -nl
35
35
36 $ hg annotate -nl a
36 $ hg annotate -nl a
37 0:1: a
37 0:1: a
38
38
39 annotate -u
39 annotate -u
40
40
41 $ hg annotate -u a
41 $ hg annotate -u a
42 nobody: a
42 nobody: a
43
43
44 annotate -cdnu
44 annotate -cdnu
45
45
46 $ hg annotate -cdnu a
46 $ hg annotate -cdnu a
47 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
47 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
48
48
49 annotate -cdnul
49 annotate -cdnul
50
50
51 $ hg annotate -cdnul a
51 $ hg annotate -cdnul a
52 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
52 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
53
53
54 annotate (JSON)
54 annotate (JSON)
55
55
56 $ hg annotate -Tjson a
56 $ hg annotate -Tjson a
57 [
57 [
58 {
58 {
59 "abspath": "a",
59 "abspath": "a",
60 "lines": [{"line": "a\n", "rev": 0}],
60 "lines": [{"line": "a\n", "rev": 0}],
61 "path": "a"
61 "path": "a"
62 }
62 }
63 ]
63 ]
64
64
65 $ hg annotate -Tjson -cdfnul a
65 $ hg annotate -Tjson -cdfnul a
66 [
66 [
67 {
67 {
68 "abspath": "a",
68 "abspath": "a",
69 "lines": [{"date": [1.0, 0], "file": "a", "line": "a\n", "line_number": 1, "node": "8435f90966e442695d2ded29fdade2bac5ad8065", "rev": 0, "user": "nobody"}],
69 "lines": [{"date": [1.0, 0], "file": "a", "line": "a\n", "line_number": 1, "node": "8435f90966e442695d2ded29fdade2bac5ad8065", "rev": 0, "user": "nobody"}],
70 "path": "a"
70 "path": "a"
71 }
71 }
72 ]
72 ]
73
73
74 $ cat <<EOF >>a
74 $ cat <<EOF >>a
75 > a
75 > a
76 > a
76 > a
77 > EOF
77 > EOF
78 $ hg ci -ma1 -d '1 0'
78 $ hg ci -ma1 -d '1 0'
79 $ hg cp a b
79 $ hg cp a b
80 $ hg ci -mb -d '1 0'
80 $ hg ci -mb -d '1 0'
81 $ cat <<EOF >> b
81 $ cat <<EOF >> b
82 > b4
82 > b4
83 > b5
83 > b5
84 > b6
84 > b6
85 > EOF
85 > EOF
86 $ hg ci -mb2 -d '2 0'
86 $ hg ci -mb2 -d '2 0'
87
87
88 annotate multiple files (JSON)
88 annotate multiple files (JSON)
89
89
90 $ hg annotate -Tjson a b
90 $ hg annotate -Tjson a b
91 [
91 [
92 {
92 {
93 "abspath": "a",
93 "abspath": "a",
94 "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}],
94 "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}],
95 "path": "a"
95 "path": "a"
96 },
96 },
97 {
97 {
98 "abspath": "b",
98 "abspath": "b",
99 "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}, {"line": "b4\n", "rev": 3}, {"line": "b5\n", "rev": 3}, {"line": "b6\n", "rev": 3}],
99 "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}, {"line": "b4\n", "rev": 3}, {"line": "b5\n", "rev": 3}, {"line": "b6\n", "rev": 3}],
100 "path": "b"
100 "path": "b"
101 }
101 }
102 ]
102 ]
103
103
104 annotate multiple files (template)
104 annotate multiple files (template)
105
105
106 $ hg annotate -T'== {abspath} ==\n{lines % "{rev}: {line}"}' a b
106 $ hg annotate -T'== {abspath} ==\n{lines % "{rev}: {line}"}' a b
107 == a ==
107 == a ==
108 0: a
108 0: a
109 1: a
109 1: a
110 1: a
110 1: a
111 == b ==
111 == b ==
112 0: a
112 0: a
113 1: a
113 1: a
114 1: a
114 1: a
115 3: b4
115 3: b4
116 3: b5
116 3: b5
117 3: b6
117 3: b6
118
118
119 annotate -n b
119 annotate -n b
120
120
121 $ hg annotate -n b
121 $ hg annotate -n b
122 0: a
122 0: a
123 1: a
123 1: a
124 1: a
124 1: a
125 3: b4
125 3: b4
126 3: b5
126 3: b5
127 3: b6
127 3: b6
128
128
129 annotate --no-follow b
129 annotate --no-follow b
130
130
131 $ hg annotate --no-follow b
131 $ hg annotate --no-follow b
132 2: a
132 2: a
133 2: a
133 2: a
134 2: a
134 2: a
135 3: b4
135 3: b4
136 3: b5
136 3: b5
137 3: b6
137 3: b6
138
138
139 annotate -nl b
139 annotate -nl b
140
140
141 $ hg annotate -nl b
141 $ hg annotate -nl b
142 0:1: a
142 0:1: a
143 1:2: a
143 1:2: a
144 1:3: a
144 1:3: a
145 3:4: b4
145 3:4: b4
146 3:5: b5
146 3:5: b5
147 3:6: b6
147 3:6: b6
148
148
149 annotate -nf b
149 annotate -nf b
150
150
151 $ hg annotate -nf b
151 $ hg annotate -nf b
152 0 a: a
152 0 a: a
153 1 a: a
153 1 a: a
154 1 a: a
154 1 a: a
155 3 b: b4
155 3 b: b4
156 3 b: b5
156 3 b: b5
157 3 b: b6
157 3 b: b6
158
158
159 annotate -nlf b
159 annotate -nlf b
160
160
161 $ hg annotate -nlf b
161 $ hg annotate -nlf b
162 0 a:1: a
162 0 a:1: a
163 1 a:2: a
163 1 a:2: a
164 1 a:3: a
164 1 a:3: a
165 3 b:4: b4
165 3 b:4: b4
166 3 b:5: b5
166 3 b:5: b5
167 3 b:6: b6
167 3 b:6: b6
168
168
169 $ hg up -C 2
169 $ hg up -C 2
170 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
170 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
171 $ cat <<EOF >> b
171 $ cat <<EOF >> b
172 > b4
172 > b4
173 > c
173 > c
174 > b5
174 > b5
175 > EOF
175 > EOF
176 $ hg ci -mb2.1 -d '2 0'
176 $ hg ci -mb2.1 -d '2 0'
177 created new head
177 created new head
178 $ hg merge
178 $ hg merge
179 merging b
179 merging b
180 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
180 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
181 (branch merge, don't forget to commit)
181 (branch merge, don't forget to commit)
182 $ hg ci -mmergeb -d '3 0'
182 $ hg ci -mmergeb -d '3 0'
183
183
184 annotate after merge
184 annotate after merge
185
185
186 $ hg annotate -nf b
186 $ hg annotate -nf b
187 0 a: a
187 0 a: a
188 1 a: a
188 1 a: a
189 1 a: a
189 1 a: a
190 3 b: b4
190 3 b: b4
191 4 b: c
191 4 b: c
192 3 b: b5
192 3 b: b5
193
193
194 annotate after merge with -l
194 annotate after merge with -l
195
195
196 $ hg annotate -nlf b
196 $ hg annotate -nlf b
197 0 a:1: a
197 0 a:1: a
198 1 a:2: a
198 1 a:2: a
199 1 a:3: a
199 1 a:3: a
200 3 b:4: b4
200 3 b:4: b4
201 4 b:5: c
201 4 b:5: c
202 3 b:5: b5
202 3 b:5: b5
203
203
204 $ hg up -C 1
204 $ hg up -C 1
205 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
205 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
206 $ hg cp a b
206 $ hg cp a b
207 $ cat <<EOF > b
207 $ cat <<EOF > b
208 > a
208 > a
209 > z
209 > z
210 > a
210 > a
211 > EOF
211 > EOF
212 $ hg ci -mc -d '3 0'
212 $ hg ci -mc -d '3 0'
213 created new head
213 created new head
214 $ hg merge
214 $ hg merge
215 merging b
215 merging b
216 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
216 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
217 (branch merge, don't forget to commit)
217 (branch merge, don't forget to commit)
218 $ cat <<EOF >> b
218 $ cat <<EOF >> b
219 > b4
219 > b4
220 > c
220 > c
221 > b5
221 > b5
222 > EOF
222 > EOF
223 $ echo d >> b
223 $ echo d >> b
224 $ hg ci -mmerge2 -d '4 0'
224 $ hg ci -mmerge2 -d '4 0'
225
225
226 annotate after rename merge
226 annotate after rename merge
227
227
228 $ hg annotate -nf b
228 $ hg annotate -nf b
229 0 a: a
229 0 a: a
230 6 b: z
230 6 b: z
231 1 a: a
231 1 a: a
232 3 b: b4
232 3 b: b4
233 4 b: c
233 4 b: c
234 3 b: b5
234 3 b: b5
235 7 b: d
235 7 b: d
236
236
237 annotate after rename merge with -l
237 annotate after rename merge with -l
238
238
239 $ hg annotate -nlf b
239 $ hg annotate -nlf b
240 0 a:1: a
240 0 a:1: a
241 6 b:2: z
241 6 b:2: z
242 1 a:3: a
242 1 a:3: a
243 3 b:4: b4
243 3 b:4: b4
244 4 b:5: c
244 4 b:5: c
245 3 b:5: b5
245 3 b:5: b5
246 7 b:7: d
246 7 b:7: d
247
247
248 --skip nothing (should be the same as no --skip at all)
248 --skip nothing (should be the same as no --skip at all)
249
249
250 $ hg annotate -nlf b --skip '1::0'
250 $ hg annotate -nlf b --skip '1::0'
251 0 a:1: a
251 0 a:1: a
252 6 b:2: z
252 6 b:2: z
253 1 a:3: a
253 1 a:3: a
254 3 b:4: b4
254 3 b:4: b4
255 4 b:5: c
255 4 b:5: c
256 3 b:5: b5
256 3 b:5: b5
257 7 b:7: d
257 7 b:7: d
258
258
259 --skip a modified line. Note a slight behavior difference in pure - this is
259 --skip a modified line. Note a slight behavior difference in pure - this is
260 because the pure code comes up with slightly different deltas internally.
260 because the pure code comes up with slightly different deltas internally.
261
261
262 $ hg annotate -nlf b --skip 6
262 $ hg annotate -nlf b --skip 6
263 0 a:1: a
263 0 a:1: a
264 1 a:2* z (no-pure !)
264 1 a:2* z (no-pure !)
265 0 a:1* z (pure !)
265 0 a:1* z (pure !)
266 1 a:3: a
266 1 a:3: a
267 3 b:4: b4
267 3 b:4: b4
268 4 b:5: c
268 4 b:5: c
269 3 b:5: b5
269 3 b:5: b5
270 7 b:7: d
270 7 b:7: d
271
271
272 --skip added lines (and test multiple skip)
272 --skip added lines (and test multiple skip)
273
273
274 $ hg annotate -nlf b --skip 3
274 $ hg annotate -nlf b --skip 3
275 0 a:1: a
275 0 a:1: a
276 6 b:2: z
276 6 b:2: z
277 1 a:3: a
277 1 a:3: a
278 1 a:3* b4
278 1 a:3* b4
279 4 b:5: c
279 4 b:5: c
280 1 a:3* b5
280 1 a:3* b5
281 7 b:7: d
281 7 b:7: d
282
282
283 $ hg annotate -nlf b --skip 4
283 $ hg annotate -nlf b --skip 4
284 0 a:1: a
284 0 a:1: a
285 6 b:2: z
285 6 b:2: z
286 1 a:3: a
286 1 a:3: a
287 3 b:4: b4
287 3 b:4: b4
288 1 a:3* c
288 1 a:3* c
289 3 b:5: b5
289 3 b:5: b5
290 7 b:7: d
290 7 b:7: d
291
291
292 $ hg annotate -nlf b --skip 3 --skip 4
292 $ hg annotate -nlf b --skip 3 --skip 4
293 0 a:1: a
293 0 a:1: a
294 6 b:2: z
294 6 b:2: z
295 1 a:3: a
295 1 a:3: a
296 1 a:3* b4
296 1 a:3* b4
297 1 a:3* c
297 1 a:3* c
298 1 a:3* b5
298 1 a:3* b5
299 7 b:7: d
299 7 b:7: d
300
300
301 $ hg annotate -nlf b --skip 'merge()'
301 $ hg annotate -nlf b --skip 'merge()'
302 0 a:1: a
302 0 a:1: a
303 6 b:2: z
303 6 b:2: z
304 1 a:3: a
304 1 a:3: a
305 3 b:4: b4
305 3 b:4: b4
306 4 b:5: c
306 4 b:5: c
307 3 b:5: b5
307 3 b:5: b5
308 3 b:5* d
308 3 b:5* d
309
309
310 --skip everything -- use the revision the file was introduced in
310 --skip everything -- use the revision the file was introduced in
311
311
312 $ hg annotate -nlf b --skip 'all()'
312 $ hg annotate -nlf b --skip 'all()'
313 0 a:1: a
313 0 a:1: a
314 0 a:1* z
314 0 a:1* z
315 0 a:1* a
315 0 a:1* a
316 0 a:1* b4
316 0 a:1* b4
317 0 a:1* c
317 0 a:1* c
318 0 a:1* b5
318 0 a:1* b5
319 0 a:1* d
319 0 a:1* d
320
320
321 Issue2807: alignment of line numbers with -l
321 Issue2807: alignment of line numbers with -l
322
322
323 $ echo more >> b
323 $ echo more >> b
324 $ hg ci -mmore -d '5 0'
324 $ hg ci -mmore -d '5 0'
325 $ echo more >> b
325 $ echo more >> b
326 $ hg ci -mmore -d '6 0'
326 $ hg ci -mmore -d '6 0'
327 $ echo more >> b
327 $ echo more >> b
328 $ hg ci -mmore -d '7 0'
328 $ hg ci -mmore -d '7 0'
329 $ hg annotate -nlf b
329 $ hg annotate -nlf b
330 0 a: 1: a
330 0 a: 1: a
331 6 b: 2: z
331 6 b: 2: z
332 1 a: 3: a
332 1 a: 3: a
333 3 b: 4: b4
333 3 b: 4: b4
334 4 b: 5: c
334 4 b: 5: c
335 3 b: 5: b5
335 3 b: 5: b5
336 7 b: 7: d
336 7 b: 7: d
337 8 b: 8: more
337 8 b: 8: more
338 9 b: 9: more
338 9 b: 9: more
339 10 b:10: more
339 10 b:10: more
340
340
341 linkrev vs rev
341 linkrev vs rev
342
342
343 $ hg annotate -r tip -n a
343 $ hg annotate -r tip -n a
344 0: a
344 0: a
345 1: a
345 1: a
346 1: a
346 1: a
347
347
348 linkrev vs rev with -l
348 linkrev vs rev with -l
349
349
350 $ hg annotate -r tip -nl a
350 $ hg annotate -r tip -nl a
351 0:1: a
351 0:1: a
352 1:2: a
352 1:2: a
353 1:3: a
353 1:3: a
354
354
355 Issue589: "undelete" sequence leads to crash
355 Issue589: "undelete" sequence leads to crash
356
356
357 annotate was crashing when trying to --follow something
357 annotate was crashing when trying to --follow something
358
358
359 like A -> B -> A
359 like A -> B -> A
360
360
361 generate ABA rename configuration
361 generate ABA rename configuration
362
362
363 $ echo foo > foo
363 $ echo foo > foo
364 $ hg add foo
364 $ hg add foo
365 $ hg ci -m addfoo
365 $ hg ci -m addfoo
366 $ hg rename foo bar
366 $ hg rename foo bar
367 $ hg ci -m renamefoo
367 $ hg ci -m renamefoo
368 $ hg rename bar foo
368 $ hg rename bar foo
369 $ hg ci -m renamebar
369 $ hg ci -m renamebar
370
370
371 annotate after ABA with follow
371 annotate after ABA with follow
372
372
373 $ hg annotate --follow foo
373 $ hg annotate --follow foo
374 foo: foo
374 foo: foo
375
375
376 missing file
376 missing file
377
377
378 $ hg ann nosuchfile
378 $ hg ann nosuchfile
379 abort: nosuchfile: no such file in rev e9e6b4fa872f
379 abort: nosuchfile: no such file in rev e9e6b4fa872f
380 [255]
380 [255]
381
381
382 annotate file without '\n' on last line
382 annotate file without '\n' on last line
383
383
384 $ printf "" > c
384 $ printf "" > c
385 $ hg ci -A -m test -u nobody -d '1 0'
385 $ hg ci -A -m test -u nobody -d '1 0'
386 adding c
386 adding c
387 $ hg annotate c
387 $ hg annotate c
388 $ printf "a\nb" > c
388 $ printf "a\nb" > c
389 $ hg ci -m test
389 $ hg ci -m test
390 $ hg annotate c
390 $ hg annotate c
391 [0-9]+: a (re)
391 [0-9]+: a (re)
392 [0-9]+: b (re)
392 [0-9]+: b (re)
393
393
394 Issue3841: check annotation of the file of which filelog includes
394 Issue3841: check annotation of the file of which filelog includes
395 merging between the revision and its ancestor
395 merging between the revision and its ancestor
396
396
397 to reproduce the situation with recent Mercurial, this script uses (1)
397 to reproduce the situation with recent Mercurial, this script uses (1)
398 "hg debugsetparents" to merge without ancestor check by "hg merge",
398 "hg debugsetparents" to merge without ancestor check by "hg merge",
399 and (2) the extension to allow filelog merging between the revision
399 and (2) the extension to allow filelog merging between the revision
400 and its ancestor by overriding "repo._filecommit".
400 and its ancestor by overriding "repo._filecommit".
401
401
402 $ cat > ../legacyrepo.py <<EOF
402 $ cat > ../legacyrepo.py <<EOF
403 > from __future__ import absolute_import
403 > from __future__ import absolute_import
404 > from mercurial import error, node
404 > from mercurial import error, node
405 > def reposetup(ui, repo):
405 > def reposetup(ui, repo):
406 > class legacyrepo(repo.__class__):
406 > class legacyrepo(repo.__class__):
407 > def _filecommit(self, fctx, manifest1, manifest2,
407 > def _filecommit(self, fctx, manifest1, manifest2,
408 > linkrev, tr, changelist):
408 > linkrev, tr, changelist):
409 > fname = fctx.path()
409 > fname = fctx.path()
410 > text = fctx.data()
410 > text = fctx.data()
411 > flog = self.file(fname)
411 > flog = self.file(fname)
412 > fparent1 = manifest1.get(fname, node.nullid)
412 > fparent1 = manifest1.get(fname, node.nullid)
413 > fparent2 = manifest2.get(fname, node.nullid)
413 > fparent2 = manifest2.get(fname, node.nullid)
414 > meta = {}
414 > meta = {}
415 > copy = fctx.renamed()
415 > copy = fctx.renamed()
416 > if copy and copy[0] != fname:
416 > if copy and copy[0] != fname:
417 > raise error.Abort('copying is not supported')
417 > raise error.Abort('copying is not supported')
418 > if fparent2 != node.nullid:
418 > if fparent2 != node.nullid:
419 > changelist.append(fname)
419 > changelist.append(fname)
420 > return flog.add(text, meta, tr, linkrev,
420 > return flog.add(text, meta, tr, linkrev,
421 > fparent1, fparent2)
421 > fparent1, fparent2)
422 > raise error.Abort('only merging is supported')
422 > raise error.Abort('only merging is supported')
423 > repo.__class__ = legacyrepo
423 > repo.__class__ = legacyrepo
424 > EOF
424 > EOF
425
425
426 $ cat > baz <<EOF
426 $ cat > baz <<EOF
427 > 1
427 > 1
428 > 2
428 > 2
429 > 3
429 > 3
430 > 4
430 > 4
431 > 5
431 > 5
432 > EOF
432 > EOF
433 $ hg add baz
433 $ hg add baz
434 $ hg commit -m "baz:0"
434 $ hg commit -m "baz:0"
435
435
436 $ cat > baz <<EOF
436 $ cat > baz <<EOF
437 > 1 baz:1
437 > 1 baz:1
438 > 2
438 > 2
439 > 3
439 > 3
440 > 4
440 > 4
441 > 5
441 > 5
442 > EOF
442 > EOF
443 $ hg commit -m "baz:1"
443 $ hg commit -m "baz:1"
444
444
445 $ cat > baz <<EOF
445 $ cat > baz <<EOF
446 > 1 baz:1
446 > 1 baz:1
447 > 2 baz:2
447 > 2 baz:2
448 > 3
448 > 3
449 > 4
449 > 4
450 > 5
450 > 5
451 > EOF
451 > EOF
452 $ hg debugsetparents 17 17
452 $ hg debugsetparents 17 17
453 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:2"
453 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:2"
454 $ hg debugindexdot .hg/store/data/baz.i
454 $ hg debugindexdot .hg/store/data/baz.i
455 digraph G {
455 digraph G {
456 -1 -> 0
456 -1 -> 0
457 0 -> 1
457 0 -> 1
458 1 -> 2
458 1 -> 2
459 1 -> 2
459 1 -> 2
460 }
460 }
461 $ hg annotate baz
461 $ hg annotate baz
462 17: 1 baz:1
462 17: 1 baz:1
463 18: 2 baz:2
463 18: 2 baz:2
464 16: 3
464 16: 3
465 16: 4
465 16: 4
466 16: 5
466 16: 5
467
467
468 $ cat > baz <<EOF
468 $ cat > baz <<EOF
469 > 1 baz:1
469 > 1 baz:1
470 > 2 baz:2
470 > 2 baz:2
471 > 3 baz:3
471 > 3 baz:3
472 > 4
472 > 4
473 > 5
473 > 5
474 > EOF
474 > EOF
475 $ hg commit -m "baz:3"
475 $ hg commit -m "baz:3"
476
476
477 $ cat > baz <<EOF
477 $ cat > baz <<EOF
478 > 1 baz:1
478 > 1 baz:1
479 > 2 baz:2
479 > 2 baz:2
480 > 3 baz:3
480 > 3 baz:3
481 > 4 baz:4
481 > 4 baz:4
482 > 5
482 > 5
483 > EOF
483 > EOF
484 $ hg debugsetparents 19 18
484 $ hg debugsetparents 19 18
485 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:4"
485 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:4"
486 $ hg debugindexdot .hg/store/data/baz.i
486 $ hg debugindexdot .hg/store/data/baz.i
487 digraph G {
487 digraph G {
488 -1 -> 0
488 -1 -> 0
489 0 -> 1
489 0 -> 1
490 1 -> 2
490 1 -> 2
491 1 -> 2
491 1 -> 2
492 2 -> 3
492 2 -> 3
493 3 -> 4
493 3 -> 4
494 2 -> 4
494 2 -> 4
495 }
495 }
496 $ hg annotate baz
496 $ hg annotate baz
497 17: 1 baz:1
497 17: 1 baz:1
498 18: 2 baz:2
498 18: 2 baz:2
499 19: 3 baz:3
499 19: 3 baz:3
500 20: 4 baz:4
500 20: 4 baz:4
501 16: 5
501 16: 5
502
502
503 annotate clean file
503 annotate clean file
504
504
505 $ hg annotate -ncr "wdir()" foo
505 $ hg annotate -ncr "wdir()" foo
506 11 472b18db256d : foo
506 11 472b18db256d : foo
507
507
508 annotate modified file
508 annotate modified file
509
509
510 $ echo foofoo >> foo
510 $ echo foofoo >> foo
511 $ hg annotate -r "wdir()" foo
511 $ hg annotate -r "wdir()" foo
512 11 : foo
512 11 : foo
513 20+: foofoo
513 20+: foofoo
514
514
515 $ hg annotate -cr "wdir()" foo
515 $ hg annotate -cr "wdir()" foo
516 472b18db256d : foo
516 472b18db256d : foo
517 b6bedd5477e7+: foofoo
517 b6bedd5477e7+: foofoo
518
518
519 $ hg annotate -ncr "wdir()" foo
519 $ hg annotate -ncr "wdir()" foo
520 11 472b18db256d : foo
520 11 472b18db256d : foo
521 20 b6bedd5477e7+: foofoo
521 20 b6bedd5477e7+: foofoo
522
522
523 $ hg annotate --debug -ncr "wdir()" foo
523 $ hg annotate --debug -ncr "wdir()" foo
524 11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
524 11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
525 20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
525 20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
526
526
527 $ hg annotate -udr "wdir()" foo
527 $ hg annotate -udr "wdir()" foo
528 test Thu Jan 01 00:00:00 1970 +0000: foo
528 test Thu Jan 01 00:00:00 1970 +0000: foo
529 test [A-Za-z0-9:+ ]+: foofoo (re)
529 test [A-Za-z0-9:+ ]+: foofoo (re)
530
530
531 $ hg annotate -ncr "wdir()" -Tjson foo
531 $ hg annotate -ncr "wdir()" -Tjson foo
532 [
532 [
533 {
533 {
534 "abspath": "foo",
534 "abspath": "foo",
535 "lines": [{"line": "foo\n", "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd", "rev": 11}, {"line": "foofoo\n", "node": null, "rev": null}],
535 "lines": [{"line": "foo\n", "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd", "rev": 11}, {"line": "foofoo\n", "node": null, "rev": null}],
536 "path": "foo"
536 "path": "foo"
537 }
537 }
538 ]
538 ]
539
539
540 annotate added file
540 annotate added file
541
541
542 $ echo bar > bar
542 $ echo bar > bar
543 $ hg add bar
543 $ hg add bar
544 $ hg annotate -ncr "wdir()" bar
544 $ hg annotate -ncr "wdir()" bar
545 20 b6bedd5477e7+: bar
545 20 b6bedd5477e7+: bar
546
546
547 annotate renamed file
547 annotate renamed file
548
548
549 $ hg rename foo renamefoo2
549 $ hg rename foo renamefoo2
550 $ hg annotate -ncr "wdir()" renamefoo2
550 $ hg annotate -ncr "wdir()" renamefoo2
551 11 472b18db256d : foo
551 11 472b18db256d : foo
552 20 b6bedd5477e7+: foofoo
552 20 b6bedd5477e7+: foofoo
553
553
554 annotate missing file
554 annotate missing file
555
555
556 $ rm baz
556 $ rm baz
557
557
558 $ hg annotate -ncr "wdir()" baz
558 $ hg annotate -ncr "wdir()" baz
559 abort: $TESTTMP\repo\baz: $ENOENT$ (windows !)
559 abort: $TESTTMP\repo\baz: $ENOENT$ (windows !)
560 abort: $ENOENT$: $TESTTMP/repo/baz (no-windows !)
560 abort: $ENOENT$: $TESTTMP/repo/baz (no-windows !)
561 [255]
561 [255]
562
562
563 annotate removed file
563 annotate removed file
564
564
565 $ hg rm baz
565 $ hg rm baz
566
566
567 $ hg annotate -ncr "wdir()" baz
567 $ hg annotate -ncr "wdir()" baz
568 abort: $TESTTMP\repo\baz: $ENOENT$ (windows !)
568 abort: $TESTTMP\repo\baz: $ENOENT$ (windows !)
569 abort: $ENOENT$: $TESTTMP/repo/baz (no-windows !)
569 abort: $ENOENT$: $TESTTMP/repo/baz (no-windows !)
570 [255]
570 [255]
571
571
572 $ hg revert --all --no-backup --quiet
572 $ hg revert --all --no-backup --quiet
573 $ hg id -n
573 $ hg id -n
574 20
574 20
575
575
576 Test followlines() revset; we usually check both followlines(pat, range) and
576 Test followlines() revset; we usually check both followlines(pat, range) and
577 followlines(pat, range, descend=True) to make sure both give the same result
577 followlines(pat, range, descend=True) to make sure both give the same result
578 when they should.
578 when they should.
579
579
580 $ echo a >> foo
580 $ echo a >> foo
581 $ hg ci -m 'foo: add a'
581 $ hg ci -m 'foo: add a'
582 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5)'
582 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5)'
583 16: baz:0
583 16: baz:0
584 19: baz:3
584 19: baz:3
585 20: baz:4
585 20: baz:4
586 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=20)'
586 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=20)'
587 16: baz:0
587 16: baz:0
588 19: baz:3
588 19: baz:3
589 20: baz:4
589 20: baz:4
590 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19)'
590 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19)'
591 16: baz:0
591 16: baz:0
592 19: baz:3
592 19: baz:3
593 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=True)'
593 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=True)'
594 19: baz:3
594 19: baz:3
595 20: baz:4
595 20: baz:4
596 $ printf "0\n0\n" | cat - baz > baz1
596 $ printf "0\n0\n" | cat - baz > baz1
597 $ mv baz1 baz
597 $ mv baz1 baz
598 $ hg ci -m 'added two lines with 0'
598 $ hg ci -m 'added two lines with 0'
599 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
599 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
600 16: baz:0
600 16: baz:0
601 19: baz:3
601 19: baz:3
602 20: baz:4
602 20: baz:4
603 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, descend=true, startrev=19)'
603 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, descend=true, startrev=19)'
604 19: baz:3
604 19: baz:3
605 20: baz:4
605 20: baz:4
606 $ echo 6 >> baz
606 $ echo 6 >> baz
607 $ hg ci -m 'added line 8'
607 $ hg ci -m 'added line 8'
608 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
608 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
609 16: baz:0
609 16: baz:0
610 19: baz:3
610 19: baz:3
611 20: baz:4
611 20: baz:4
612 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=1)'
612 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=1)'
613 19: baz:3
613 19: baz:3
614 20: baz:4
614 20: baz:4
615 $ sed 's/3/3+/' baz > baz.new
615 $ sed 's/3/3+/' baz > baz.new
616 $ mv baz.new baz
616 $ mv baz.new baz
617 $ hg ci -m 'baz:3->3+'
617 $ hg ci -m 'baz:3->3+'
618 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, descend=0)'
618 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, descend=0)'
619 16: baz:0
619 16: baz:0
620 19: baz:3
620 19: baz:3
621 20: baz:4
621 20: baz:4
622 24: baz:3->3+
622 24: baz:3->3+
623 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=17, descend=True)'
623 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=17, descend=True)'
624 19: baz:3
624 19: baz:3
625 20: baz:4
625 20: baz:4
626 24: baz:3->3+
626 24: baz:3->3+
627 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 1:2, descend=false)'
627 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 1:2, descend=false)'
628 22: added two lines with 0
628 22: added two lines with 0
629
629
630 file patterns are okay
630 file patterns are okay
631 $ hg log -T '{rev}: {desc}\n' -r 'followlines("path:baz", 1:2)'
631 $ hg log -T '{rev}: {desc}\n' -r 'followlines("path:baz", 1:2)'
632 22: added two lines with 0
632 22: added two lines with 0
633
633
634 renames are followed
634 renames are followed
635 $ hg mv baz qux
635 $ hg mv baz qux
636 $ sed 's/4/4+/' qux > qux.new
636 $ sed 's/4/4+/' qux > qux.new
637 $ mv qux.new qux
637 $ mv qux.new qux
638 $ hg ci -m 'qux:4->4+'
638 $ hg ci -m 'qux:4->4+'
639 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
639 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
640 16: baz:0
640 16: baz:0
641 19: baz:3
641 19: baz:3
642 20: baz:4
642 20: baz:4
643 24: baz:3->3+
643 24: baz:3->3+
644 25: qux:4->4+
644 25: qux:4->4+
645
645
646 but are missed when following children
646 but are missed when following children
647 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=22, descend=True)'
647 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=22, descend=True)'
648 24: baz:3->3+
648 24: baz:3->3+
649
649
650 merge
650 merge
651 $ hg up 24 --quiet
651 $ hg up 24 --quiet
652 $ echo 7 >> baz
652 $ echo 7 >> baz
653 $ hg ci -m 'one more line, out of line range'
653 $ hg ci -m 'one more line, out of line range'
654 created new head
654 created new head
655 $ sed 's/3+/3-/' baz > baz.new
655 $ sed 's/3+/3-/' baz > baz.new
656 $ mv baz.new baz
656 $ mv baz.new baz
657 $ hg ci -m 'baz:3+->3-'
657 $ hg ci -m 'baz:3+->3-'
658 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
658 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
659 16: baz:0
659 16: baz:0
660 19: baz:3
660 19: baz:3
661 20: baz:4
661 20: baz:4
662 24: baz:3->3+
662 24: baz:3->3+
663 27: baz:3+->3-
663 27: baz:3+->3-
664 $ hg merge 25
664 $ hg merge 25
665 merging baz and qux to qux
665 merging baz and qux to qux
666 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
666 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
667 (branch merge, don't forget to commit)
667 (branch merge, don't forget to commit)
668 $ hg ci -m merge
668 $ hg ci -m merge
669 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
669 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
670 16: baz:0
670 16: baz:0
671 19: baz:3
671 19: baz:3
672 20: baz:4
672 20: baz:4
673 24: baz:3->3+
673 24: baz:3->3+
674 25: qux:4->4+
674 25: qux:4->4+
675 27: baz:3+->3-
675 27: baz:3+->3-
676 28: merge
676 28: merge
677 $ hg up 25 --quiet
677 $ hg up 25 --quiet
678 $ hg merge 27
678 $ hg merge 27
679 merging qux and baz to qux
679 merging qux and baz to qux
680 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
680 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
681 (branch merge, don't forget to commit)
681 (branch merge, don't forget to commit)
682 $ hg ci -m 'merge from other side'
682 $ hg ci -m 'merge from other side'
683 created new head
683 created new head
684 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
684 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
685 16: baz:0
685 16: baz:0
686 19: baz:3
686 19: baz:3
687 20: baz:4
687 20: baz:4
688 24: baz:3->3+
688 24: baz:3->3+
689 25: qux:4->4+
689 25: qux:4->4+
690 27: baz:3+->3-
690 27: baz:3+->3-
691 29: merge from other side
691 29: merge from other side
692 $ hg up 24 --quiet
692 $ hg up 24 --quiet
693
693
694 we are missing the branch with rename when following children
694 we are missing the branch with rename when following children
695 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=26, descend=True)'
695 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=26, descend=True)'
696 27: baz:3+->3-
696 27: baz:3+->3-
697
697
698 we follow all branches in descending direction
698 we follow all branches in descending direction
699 $ hg up 23 --quiet
699 $ hg up 23 --quiet
700 $ sed 's/3/+3/' baz > baz.new
700 $ sed 's/3/+3/' baz > baz.new
701 $ mv baz.new baz
701 $ mv baz.new baz
702 $ hg ci -m 'baz:3->+3'
702 $ hg ci -m 'baz:3->+3'
703 created new head
703 created new head
704 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 2:5, startrev=16, descend=True)' --graph
704 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 2:5, startrev=16, descend=True)' --graph
705 @ 30: baz:3->+3
705 @ 30: baz:3->+3
706 :
706 :
707 : o 27: baz:3+->3-
707 : o 27: baz:3+->3-
708 : :
708 : :
709 : o 24: baz:3->3+
709 : o 24: baz:3->3+
710 :/
710 :/
711 o 20: baz:4
711 o 20: baz:4
712 |\
712 |\
713 | o 19: baz:3
713 | o 19: baz:3
714 |/
714 |/
715 o 18: baz:2
715 o 18: baz:2
716 :
716 :
717 o 16: baz:0
717 o 16: baz:0
718 |
718 |
719 ~
719 ~
720
720
721 Issue5595: on a merge changeset with different line ranges depending on
721 Issue5595: on a merge changeset with different line ranges depending on
722 parent, be conservative and use the surrounding interval to avoid loosing
722 parent, be conservative and use the surrounding interval to avoid loosing
723 track of possible further descendants in specified range.
723 track of possible further descendants in specified range.
724
724
725 $ hg up 23 --quiet
725 $ hg up 23 --quiet
726 $ hg cat baz -r 24
726 $ hg cat baz -r 24
727 0
727 0
728 0
728 0
729 1 baz:1
729 1 baz:1
730 2 baz:2
730 2 baz:2
731 3+ baz:3
731 3+ baz:3
732 4 baz:4
732 4 baz:4
733 5
733 5
734 6
734 6
735 $ cat > baz << EOF
735 $ cat > baz << EOF
736 > 0
736 > 0
737 > 0
737 > 0
738 > a
738 > a
739 > b
739 > b
740 > 3+ baz:3
740 > 3+ baz:3
741 > 4 baz:4
741 > 4 baz:4
742 > y
742 > y
743 > z
743 > z
744 > EOF
744 > EOF
745 $ hg ci -m 'baz: mostly rewrite with some content from 24'
745 $ hg ci -m 'baz: mostly rewrite with some content from 24'
746 created new head
746 created new head
747 $ hg merge --tool :merge-other 24
747 $ hg merge --tool :merge-other 24
748 merging baz
748 merging baz
749 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
749 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
750 (branch merge, don't forget to commit)
750 (branch merge, don't forget to commit)
751 $ hg ci -m 'merge forgetting about baz rewrite'
751 $ hg ci -m 'merge forgetting about baz rewrite'
752 $ cat > baz << EOF
752 $ cat > baz << EOF
753 > 0
753 > 0
754 > 0
754 > 0
755 > 1 baz:1
755 > 1 baz:1
756 > 2+ baz:2
756 > 2+ baz:2
757 > 3+ baz:3
757 > 3+ baz:3
758 > 4 baz:4
758 > 4 baz:4
759 > 5
759 > 5
760 > 6
760 > 6
761 > EOF
761 > EOF
762 $ hg ci -m 'baz: narrow change (2->2+)'
762 $ hg ci -m 'baz: narrow change (2->2+)'
763 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:4, startrev=20, descend=True)' --graph
763 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:4, startrev=20, descend=True)' --graph
764 @ 33: baz: narrow change (2->2+)
764 @ 33: baz: narrow change (2->2+)
765 |
765 |
766 o 32: merge forgetting about baz rewrite
766 o 32: merge forgetting about baz rewrite
767 |\
767 |\
768 | o 31: baz: mostly rewrite with some content from 24
768 | o 31: baz: mostly rewrite with some content from 24
769 | :
769 | :
770 | : o 30: baz:3->+3
770 | : o 30: baz:3->+3
771 | :/
771 | :/
772 +---o 27: baz:3+->3-
772 +---o 27: baz:3+->3-
773 | :
773 | :
774 o : 24: baz:3->3+
774 o : 24: baz:3->3+
775 :/
775 :/
776 o 20: baz:4
776 o 20: baz:4
777 |\
777 |\
778 ~ ~
778 ~ ~
779
779
780 check error cases
780 check error cases
781 $ hg up 24 --quiet
781 $ hg up 24 --quiet
782 $ hg log -r 'followlines()'
782 $ hg log -r 'followlines()'
783 hg: parse error: followlines takes at least 1 positional arguments
783 hg: parse error: followlines takes at least 1 positional arguments
784 [255]
784 [255]
785 $ hg log -r 'followlines(baz)'
785 $ hg log -r 'followlines(baz)'
786 hg: parse error: followlines requires a line range
786 hg: parse error: followlines requires a line range
787 [255]
787 [255]
788 $ hg log -r 'followlines(baz, 1)'
788 $ hg log -r 'followlines(baz, 1)'
789 hg: parse error: followlines expects a line range
789 hg: parse error: followlines expects a line range
790 [255]
790 [255]
791 $ hg log -r 'followlines(baz, 1:2, startrev=desc("b"))'
791 $ hg log -r 'followlines(baz, 1:2, startrev=desc("b"))'
792 hg: parse error: followlines expects exactly one revision
792 hg: parse error: followlines expects exactly one revision
793 [255]
793 [255]
794 $ hg log -r 'followlines("glob:*", 1:2)'
794 $ hg log -r 'followlines("glob:*", 1:2)'
795 hg: parse error: followlines expects exactly one file
795 hg: parse error: followlines expects exactly one file
796 [255]
796 [255]
797 $ hg log -r 'followlines(baz, 1:)'
797 $ hg log -r 'followlines(baz, 1:)'
798 hg: parse error: line range bounds must be integers
798 hg: parse error: line range bounds must be integers
799 [255]
799 [255]
800 $ hg log -r 'followlines(baz, :1)'
800 $ hg log -r 'followlines(baz, :1)'
801 hg: parse error: line range bounds must be integers
801 hg: parse error: line range bounds must be integers
802 [255]
802 [255]
803 $ hg log -r 'followlines(baz, x:4)'
803 $ hg log -r 'followlines(baz, x:4)'
804 hg: parse error: line range bounds must be integers
804 hg: parse error: line range bounds must be integers
805 [255]
805 [255]
806 $ hg log -r 'followlines(baz, 5:4)'
806 $ hg log -r 'followlines(baz, 5:4)'
807 hg: parse error: line range must be positive
807 hg: parse error: line range must be positive
808 [255]
808 [255]
809 $ hg log -r 'followlines(baz, 0:4)'
809 $ hg log -r 'followlines(baz, 0:4)'
810 hg: parse error: fromline must be strictly positive
810 hg: parse error: fromline must be strictly positive
811 [255]
811 [255]
812 $ hg log -r 'followlines(baz, 2:40)'
812 $ hg log -r 'followlines(baz, 2:40)'
813 abort: line range exceeds file size
813 abort: line range exceeds file size
814 [255]
814 [255]
815 $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=[1])'
815 $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=[1])'
816 hg: parse error at 43: not a prefix: [
816 hg: parse error at 43: not a prefix: [
817 (followlines(baz, 2:4, startrev=20, descend=[1])
817 (followlines(baz, 2:4, startrev=20, descend=[1])
818 ^ here)
818 ^ here)
819 [255]
819 [255]
820 $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=a)'
820 $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=a)'
821 hg: parse error: descend argument must be a boolean
821 hg: parse error: descend argument must be a boolean
822 [255]
822 [255]
823
823
824 Test empty annotate output
824 Test empty annotate output
825
825
826 $ printf '\0' > binary
826 $ printf '\0' > binary
827 $ touch empty
827 $ touch empty
828 $ hg ci -qAm 'add binary and empty files'
828 $ hg ci -qAm 'add binary and empty files'
829
829
830 $ hg annotate binary empty
830 $ hg annotate binary empty
831 binary: binary file
831 binary: binary file
832
832
833 $ hg annotate -Tjson binary empty
833 $ hg annotate -Tjson binary empty
834 [
834 [
835 {
835 {
836 "abspath": "binary",
836 "abspath": "binary",
837 "path": "binary"
837 "path": "binary"
838 },
838 },
839 {
839 {
840 "abspath": "empty",
840 "abspath": "empty",
841 "lines": [],
841 "lines": [],
842 "path": "empty"
842 "path": "empty"
843 }
843 }
844 ]
844 ]
845
845
846 Test annotate with whitespace options
846 Test annotate with whitespace options
847
847
848 $ cd ..
848 $ cd ..
849 $ hg init repo-ws
849 $ hg init repo-ws
850 $ cd repo-ws
850 $ cd repo-ws
851 $ cat > a <<EOF
851 $ cat > a <<EOF
852 > aa
852 > aa
853 >
853 >
854 > b b
854 > b b
855 > EOF
855 > EOF
856 $ hg ci -Am "adda"
856 $ hg ci -Am "adda"
857 adding a
857 adding a
858 $ sed 's/EOL$//g' > a <<EOF
858 $ sed 's/EOL$//g' > a <<EOF
859 > a a
859 > a a
860 >
860 >
861 > EOL
861 > EOL
862 > b b
862 > b b
863 > EOF
863 > EOF
864 $ hg ci -m "changea"
864 $ hg ci -m "changea"
865
865
866 Annotate with no option
866 Annotate with no option
867
867
868 $ hg annotate a
868 $ hg annotate a
869 1: a a
869 1: a a
870 0:
870 0:
871 1:
871 1:
872 1: b b
872 1: b b
873
873
874 Annotate with --ignore-space-change
874 Annotate with --ignore-space-change
875
875
876 $ hg annotate --ignore-space-change a
876 $ hg annotate --ignore-space-change a
877 1: a a
877 1: a a
878 1:
878 1:
879 0:
879 0:
880 0: b b
880 0: b b
881
881
882 Annotate with --ignore-all-space
882 Annotate with --ignore-all-space
883
883
884 $ hg annotate --ignore-all-space a
884 $ hg annotate --ignore-all-space a
885 0: a a
885 0: a a
886 0:
886 0:
887 1:
887 1:
888 0: b b
888 0: b b
889
889
890 Annotate with --ignore-blank-lines (similar to no options case)
890 Annotate with --ignore-blank-lines (similar to no options case)
891
891
892 $ hg annotate --ignore-blank-lines a
892 $ hg annotate --ignore-blank-lines a
893 1: a a
893 1: a a
894 0:
894 0:
895 1:
895 1:
896 1: b b
896 1: b b
897
897
898 $ cd ..
898 $ cd ..
899
899
900 Annotate with orphaned CR (issue5798)
900 Annotate with orphaned CR (issue5798)
901 -------------------------------------
901 -------------------------------------
902
902
903 $ hg init repo-cr
903 $ hg init repo-cr
904 $ cd repo-cr
904 $ cd repo-cr
905
905
906 $ substcr() {
906 $ cat <<'EOF' >> "$TESTTMP/substcr.py"
907 > sed 's/\r/[CR]/g'
907 > import sys
908 > }
908 > from mercurial import util
909 > util.setbinary(sys.stdin)
910 > util.setbinary(sys.stdout)
911 > stdin = getattr(sys.stdin, 'buffer', sys.stdin)
912 > stdout = getattr(sys.stdout, 'buffer', sys.stdout)
913 > stdout.write(stdin.read().replace(b'\r', b'[CR]'))
914 > EOF
909
915
910 >>> with open('a', 'wb') as f:
916 >>> with open('a', 'wb') as f:
911 ... f.write(b'0a\r0b\r\n0c\r0d\r\n0e\n0f\n0g')
917 ... f.write(b'0a\r0b\r\n0c\r0d\r\n0e\n0f\n0g')
912 $ hg ci -qAm0
918 $ hg ci -qAm0
913 >>> with open('a', 'wb') as f:
919 >>> with open('a', 'wb') as f:
914 ... f.write(b'0a\r0b\r\n1c\r1d\r\n0e\n1f\n0g')
920 ... f.write(b'0a\r0b\r\n1c\r1d\r\n0e\n1f\n0g')
915 $ hg ci -m1
921 $ hg ci -m1
916
922
917 $ hg annotate -r0 a | substcr
923 $ hg annotate -r0 a | $PYTHON "$TESTTMP/substcr.py"
918 0: 0a[CR]0b[CR]
924 0: 0a[CR]0b[CR]
919 0: 0c[CR]0d[CR]
925 0: 0c[CR]0d[CR]
920 0: 0e
926 0: 0e
921 0: 0f
927 0: 0f
922 0: 0g
928 0: 0g
923 $ hg annotate -r1 a | substcr
929 $ hg annotate -r1 a | $PYTHON "$TESTTMP/substcr.py"
924 0: 0a[CR]0b[CR]
930 0: 0a[CR]0b[CR]
925 1: 1c[CR]1d[CR]
931 1: 1c[CR]1d[CR]
926 0: 0e
932 0: 0e
927 1: 1f
933 1: 1f
928 0: 0g
934 0: 0g
929
935
930 $ cd ..
936 $ cd ..
931
937
932 Annotate with linkrev pointing to another branch
938 Annotate with linkrev pointing to another branch
933 ------------------------------------------------
939 ------------------------------------------------
934
940
935 create history with a filerev whose linkrev points to another branch
941 create history with a filerev whose linkrev points to another branch
936
942
937 $ hg init branchedlinkrev
943 $ hg init branchedlinkrev
938 $ cd branchedlinkrev
944 $ cd branchedlinkrev
939 $ echo A > a
945 $ echo A > a
940 $ hg commit -Am 'contentA'
946 $ hg commit -Am 'contentA'
941 adding a
947 adding a
942 $ echo B >> a
948 $ echo B >> a
943 $ hg commit -m 'contentB'
949 $ hg commit -m 'contentB'
944 $ hg up --rev 'desc(contentA)'
950 $ hg up --rev 'desc(contentA)'
945 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
951 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
946 $ echo unrelated > unrelated
952 $ echo unrelated > unrelated
947 $ hg commit -Am 'unrelated'
953 $ hg commit -Am 'unrelated'
948 adding unrelated
954 adding unrelated
949 created new head
955 created new head
950 $ hg graft -r 'desc(contentB)'
956 $ hg graft -r 'desc(contentB)'
951 grafting 1:fd27c222e3e6 "contentB"
957 grafting 1:fd27c222e3e6 "contentB"
952 $ echo C >> a
958 $ echo C >> a
953 $ hg commit -m 'contentC'
959 $ hg commit -m 'contentC'
954 $ echo W >> a
960 $ echo W >> a
955 $ hg log -G
961 $ hg log -G
956 @ changeset: 4:072f1e8df249
962 @ changeset: 4:072f1e8df249
957 | tag: tip
963 | tag: tip
958 | user: test
964 | user: test
959 | date: Thu Jan 01 00:00:00 1970 +0000
965 | date: Thu Jan 01 00:00:00 1970 +0000
960 | summary: contentC
966 | summary: contentC
961 |
967 |
962 o changeset: 3:ff38df03cc4b
968 o changeset: 3:ff38df03cc4b
963 | user: test
969 | user: test
964 | date: Thu Jan 01 00:00:00 1970 +0000
970 | date: Thu Jan 01 00:00:00 1970 +0000
965 | summary: contentB
971 | summary: contentB
966 |
972 |
967 o changeset: 2:62aaf3f6fc06
973 o changeset: 2:62aaf3f6fc06
968 | parent: 0:f0932f74827e
974 | parent: 0:f0932f74827e
969 | user: test
975 | user: test
970 | date: Thu Jan 01 00:00:00 1970 +0000
976 | date: Thu Jan 01 00:00:00 1970 +0000
971 | summary: unrelated
977 | summary: unrelated
972 |
978 |
973 | o changeset: 1:fd27c222e3e6
979 | o changeset: 1:fd27c222e3e6
974 |/ user: test
980 |/ user: test
975 | date: Thu Jan 01 00:00:00 1970 +0000
981 | date: Thu Jan 01 00:00:00 1970 +0000
976 | summary: contentB
982 | summary: contentB
977 |
983 |
978 o changeset: 0:f0932f74827e
984 o changeset: 0:f0932f74827e
979 user: test
985 user: test
980 date: Thu Jan 01 00:00:00 1970 +0000
986 date: Thu Jan 01 00:00:00 1970 +0000
981 summary: contentA
987 summary: contentA
982
988
983
989
984 Annotate should list ancestor of starting revision only
990 Annotate should list ancestor of starting revision only
985
991
986 $ hg annotate a
992 $ hg annotate a
987 0: A
993 0: A
988 3: B
994 3: B
989 4: C
995 4: C
990
996
991 $ hg annotate a -r 'wdir()'
997 $ hg annotate a -r 'wdir()'
992 0 : A
998 0 : A
993 3 : B
999 3 : B
994 4 : C
1000 4 : C
995 4+: W
1001 4+: W
996
1002
997 Even when the starting revision is the linkrev-shadowed one:
1003 Even when the starting revision is the linkrev-shadowed one:
998
1004
999 $ hg annotate a -r 3
1005 $ hg annotate a -r 3
1000 0: A
1006 0: A
1001 3: B
1007 3: B
1002
1008
1003 $ cd ..
1009 $ cd ..
1004
1010
1005 Issue5360: Deleted chunk in p1 of a merge changeset
1011 Issue5360: Deleted chunk in p1 of a merge changeset
1006
1012
1007 $ hg init repo-5360
1013 $ hg init repo-5360
1008 $ cd repo-5360
1014 $ cd repo-5360
1009 $ echo 1 > a
1015 $ echo 1 > a
1010 $ hg commit -A a -m 1
1016 $ hg commit -A a -m 1
1011 $ echo 2 >> a
1017 $ echo 2 >> a
1012 $ hg commit -m 2
1018 $ hg commit -m 2
1013 $ echo a > a
1019 $ echo a > a
1014 $ hg commit -m a
1020 $ hg commit -m a
1015 $ hg update '.^' -q
1021 $ hg update '.^' -q
1016 $ echo 3 >> a
1022 $ echo 3 >> a
1017 $ hg commit -m 3 -q
1023 $ hg commit -m 3 -q
1018 $ hg merge 2 -q
1024 $ hg merge 2 -q
1019 $ cat > a << EOF
1025 $ cat > a << EOF
1020 > b
1026 > b
1021 > 1
1027 > 1
1022 > 2
1028 > 2
1023 > 3
1029 > 3
1024 > a
1030 > a
1025 > EOF
1031 > EOF
1026 $ hg resolve --mark -q
1032 $ hg resolve --mark -q
1027 $ hg commit -m m
1033 $ hg commit -m m
1028 $ hg annotate a
1034 $ hg annotate a
1029 4: b
1035 4: b
1030 0: 1
1036 0: 1
1031 1: 2
1037 1: 2
1032 3: 3
1038 3: 3
1033 2: a
1039 2: a
1034
1040
1035 $ cd ..
1041 $ cd ..
@@ -1,412 +1,404 b''
1 #require serve
1 #require serve
2
2
3 This test is a duplicate of 'test-http.t', feel free to factor out
3 This test is a duplicate of 'test-http.t', feel free to factor out
4 parts that are not bundle1/bundle2 specific.
4 parts that are not bundle1/bundle2 specific.
5
5
6 $ cat << EOF >> $HGRCPATH
6 $ cat << EOF >> $HGRCPATH
7 > [devel]
7 > [devel]
8 > # This test is dedicated to interaction through old bundle
8 > # This test is dedicated to interaction through old bundle
9 > legacy.exchange = bundle1
9 > legacy.exchange = bundle1
10 > EOF
10 > EOF
11
11
12 $ hg init test
12 $ hg init test
13 $ cd test
13 $ cd test
14 $ echo foo>foo
14 $ echo foo>foo
15 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
15 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
16 $ echo foo>foo.d/foo
16 $ echo foo>foo.d/foo
17 $ echo bar>foo.d/bAr.hg.d/BaR
17 $ echo bar>foo.d/bAr.hg.d/BaR
18 $ echo bar>foo.d/baR.d.hg/bAR
18 $ echo bar>foo.d/baR.d.hg/bAR
19 $ hg commit -A -m 1
19 $ hg commit -A -m 1
20 adding foo
20 adding foo
21 adding foo.d/bAr.hg.d/BaR
21 adding foo.d/bAr.hg.d/BaR
22 adding foo.d/baR.d.hg/bAR
22 adding foo.d/baR.d.hg/bAR
23 adding foo.d/foo
23 adding foo.d/foo
24 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
24 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
25 $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid
25 $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid
26
26
27 Test server address cannot be reused
27 Test server address cannot be reused
28
28
29 $ hg serve -p $HGPORT1 2>&1
29 $ hg serve -p $HGPORT1 2>&1
30 abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$
30 abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$
31 [255]
31 [255]
32
32
33 $ cd ..
33 $ cd ..
34 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
34 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
35
35
36 clone via stream
36 clone via stream
37
37
38 $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
38 $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
39 streaming all changes
39 streaming all changes
40 6 files to transfer, 606 bytes of data
40 6 files to transfer, 606 bytes of data
41 transferred * bytes in * seconds (*/sec) (glob)
41 transferred * bytes in * seconds (*/sec) (glob)
42 searching for changes
42 searching for changes
43 no changes found
43 no changes found
44 updating to branch default
44 updating to branch default
45 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
45 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 $ hg verify -R copy
46 $ hg verify -R copy
47 checking changesets
47 checking changesets
48 checking manifests
48 checking manifests
49 crosschecking files in changesets and manifests
49 crosschecking files in changesets and manifests
50 checking files
50 checking files
51 4 files, 1 changesets, 4 total revisions
51 4 files, 1 changesets, 4 total revisions
52
52
53 try to clone via stream, should use pull instead
53 try to clone via stream, should use pull instead
54
54
55 $ hg clone --stream http://localhost:$HGPORT1/ copy2
55 $ hg clone --stream http://localhost:$HGPORT1/ copy2
56 warning: stream clone requested but server has them disabled
56 warning: stream clone requested but server has them disabled
57 requesting all changes
57 requesting all changes
58 adding changesets
58 adding changesets
59 adding manifests
59 adding manifests
60 adding file changes
60 adding file changes
61 added 1 changesets with 4 changes to 4 files
61 added 1 changesets with 4 changes to 4 files
62 new changesets 8b6053c928fe
62 new changesets 8b6053c928fe
63 updating to branch default
63 updating to branch default
64 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
64 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
65
65
66 try to clone via stream but missing requirements, so should use pull instead
66 try to clone via stream but missing requirements, so should use pull instead
67
67
68 $ cat > $TESTTMP/removesupportedformat.py << EOF
68 $ cat > $TESTTMP/removesupportedformat.py << EOF
69 > from mercurial import localrepo
69 > from mercurial import localrepo
70 > def extsetup(ui):
70 > def extsetup(ui):
71 > localrepo.localrepository.supportedformats.remove(b'generaldelta')
71 > localrepo.localrepository.supportedformats.remove(b'generaldelta')
72 > EOF
72 > EOF
73
73
74 $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
74 $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
75 warning: stream clone requested but client is missing requirements: generaldelta
75 warning: stream clone requested but client is missing requirements: generaldelta
76 (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information)
76 (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information)
77 requesting all changes
77 requesting all changes
78 adding changesets
78 adding changesets
79 adding manifests
79 adding manifests
80 adding file changes
80 adding file changes
81 added 1 changesets with 4 changes to 4 files
81 added 1 changesets with 4 changes to 4 files
82 new changesets 8b6053c928fe
82 new changesets 8b6053c928fe
83 updating to branch default
83 updating to branch default
84 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
84 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
85
85
86 clone via pull
86 clone via pull
87
87
88 $ hg clone http://localhost:$HGPORT1/ copy-pull
88 $ hg clone http://localhost:$HGPORT1/ copy-pull
89 requesting all changes
89 requesting all changes
90 adding changesets
90 adding changesets
91 adding manifests
91 adding manifests
92 adding file changes
92 adding file changes
93 added 1 changesets with 4 changes to 4 files
93 added 1 changesets with 4 changes to 4 files
94 new changesets 8b6053c928fe
94 new changesets 8b6053c928fe
95 updating to branch default
95 updating to branch default
96 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
96 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
97 $ hg verify -R copy-pull
97 $ hg verify -R copy-pull
98 checking changesets
98 checking changesets
99 checking manifests
99 checking manifests
100 crosschecking files in changesets and manifests
100 crosschecking files in changesets and manifests
101 checking files
101 checking files
102 4 files, 1 changesets, 4 total revisions
102 4 files, 1 changesets, 4 total revisions
103 $ cd test
103 $ cd test
104 $ echo bar > bar
104 $ echo bar > bar
105 $ hg commit -A -d '1 0' -m 2
105 $ hg commit -A -d '1 0' -m 2
106 adding bar
106 adding bar
107 $ cd ..
107 $ cd ..
108
108
109 clone over http with --update
109 clone over http with --update
110
110
111 $ hg clone http://localhost:$HGPORT1/ updated --update 0
111 $ hg clone http://localhost:$HGPORT1/ updated --update 0
112 requesting all changes
112 requesting all changes
113 adding changesets
113 adding changesets
114 adding manifests
114 adding manifests
115 adding file changes
115 adding file changes
116 added 2 changesets with 5 changes to 5 files
116 added 2 changesets with 5 changes to 5 files
117 new changesets 8b6053c928fe:5fed3813f7f5
117 new changesets 8b6053c928fe:5fed3813f7f5
118 updating to branch default
118 updating to branch default
119 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
119 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
120 $ hg log -r . -R updated
120 $ hg log -r . -R updated
121 changeset: 0:8b6053c928fe
121 changeset: 0:8b6053c928fe
122 user: test
122 user: test
123 date: Thu Jan 01 00:00:00 1970 +0000
123 date: Thu Jan 01 00:00:00 1970 +0000
124 summary: 1
124 summary: 1
125
125
126 $ rm -rf updated
126 $ rm -rf updated
127
127
128 incoming via HTTP
128 incoming via HTTP
129
129
130 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
130 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
131 adding changesets
131 adding changesets
132 adding manifests
132 adding manifests
133 adding file changes
133 adding file changes
134 added 1 changesets with 4 changes to 4 files
134 added 1 changesets with 4 changes to 4 files
135 new changesets 8b6053c928fe
135 new changesets 8b6053c928fe
136 updating to branch default
136 updating to branch default
137 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
137 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
138 $ cd partial
138 $ cd partial
139 $ touch LOCAL
139 $ touch LOCAL
140 $ hg ci -qAm LOCAL
140 $ hg ci -qAm LOCAL
141 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
141 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
142 comparing with http://localhost:$HGPORT1/
142 comparing with http://localhost:$HGPORT1/
143 searching for changes
143 searching for changes
144 2
144 2
145 $ cd ..
145 $ cd ..
146
146
147 pull
147 pull
148
148
149 $ cd copy-pull
149 $ cd copy-pull
150 $ cat >> .hg/hgrc <<EOF
150 $ cat >> .hg/hgrc <<EOF
151 > [hooks]
151 > [hooks]
152 > changegroup = sh -c "printenv.py changegroup"
152 > changegroup = sh -c "printenv.py changegroup"
153 > EOF
153 > EOF
154 $ hg pull
154 $ hg pull
155 pulling from http://localhost:$HGPORT1/
155 pulling from http://localhost:$HGPORT1/
156 searching for changes
156 searching for changes
157 adding changesets
157 adding changesets
158 adding manifests
158 adding manifests
159 adding file changes
159 adding file changes
160 added 1 changesets with 1 changes to 1 files
160 added 1 changesets with 1 changes to 1 files
161 new changesets 5fed3813f7f5
161 new changesets 5fed3813f7f5
162 changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=http://localhost:$HGPORT1/
162 changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=http://localhost:$HGPORT1/
163 (run 'hg update' to get a working copy)
163 (run 'hg update' to get a working copy)
164 $ cd ..
164 $ cd ..
165
165
166 clone from invalid URL
166 clone from invalid URL
167
167
168 $ hg clone http://localhost:$HGPORT/bad
168 $ hg clone http://localhost:$HGPORT/bad
169 abort: HTTP Error 404: Not Found
169 abort: HTTP Error 404: Not Found
170 [255]
170 [255]
171
171
172 test http authentication
172 test http authentication
173 + use the same server to test server side streaming preference
173 + use the same server to test server side streaming preference
174
174
175 $ cd test
175 $ cd test
176 $ cat << EOT > userpass.py
176 $ cat << EOT > userpass.py
177 > import base64
177 > import base64
178 > from mercurial.hgweb import common
178 > from mercurial.hgweb import common
179 > def perform_authentication(hgweb, req, op):
179 > def perform_authentication(hgweb, req, op):
180 > auth = req.env.get('HTTP_AUTHORIZATION')
180 > auth = req.env.get('HTTP_AUTHORIZATION')
181 > if not auth:
181 > if not auth:
182 > raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, 'who',
182 > raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, 'who',
183 > [('WWW-Authenticate', 'Basic Realm="mercurial"')])
183 > [('WWW-Authenticate', 'Basic Realm="mercurial"')])
184 > if base64.b64decode(auth.split()[1]).split(b':', 1) != [b'user',
184 > if base64.b64decode(auth.split()[1]).split(b':', 1) != [b'user',
185 > b'pass']:
185 > b'pass']:
186 > raise common.ErrorResponse(common.HTTP_FORBIDDEN, 'no')
186 > raise common.ErrorResponse(common.HTTP_FORBIDDEN, 'no')
187 > def extsetup():
187 > def extsetup():
188 > common.permhooks.insert(0, perform_authentication)
188 > common.permhooks.insert(0, perform_authentication)
189 > EOT
189 > EOT
190 $ hg serve --config extensions.x=userpass.py -p $HGPORT2 -d --pid-file=pid \
190 $ hg serve --config extensions.x=userpass.py -p $HGPORT2 -d --pid-file=pid \
191 > --config server.preferuncompressed=True \
191 > --config server.preferuncompressed=True \
192 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
192 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
193 $ cat pid >> $DAEMON_PIDS
193 $ cat pid >> $DAEMON_PIDS
194
194
195 $ cat << EOF > get_pass.py
195 $ cat << EOF > get_pass.py
196 > import getpass
196 > import getpass
197 > def newgetpass(arg):
197 > def newgetpass(arg):
198 > return "pass"
198 > return "pass"
199 > getpass.getpass = newgetpass
199 > getpass.getpass = newgetpass
200 > EOF
200 > EOF
201
201
202 $ hg id http://localhost:$HGPORT2/
202 $ hg id http://localhost:$HGPORT2/
203 abort: http authorization required for http://localhost:$HGPORT2/
203 abort: http authorization required for http://localhost:$HGPORT2/
204 [255]
204 [255]
205 $ hg id http://localhost:$HGPORT2/
205 $ hg id http://localhost:$HGPORT2/
206 abort: http authorization required for http://localhost:$HGPORT2/
206 abort: http authorization required for http://localhost:$HGPORT2/
207 [255]
207 [255]
208 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
208 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
209 http authorization required for http://localhost:$HGPORT2/
209 http authorization required for http://localhost:$HGPORT2/
210 realm: mercurial
210 realm: mercurial
211 user: user
211 user: user
212 password: 5fed3813f7f5
212 password: 5fed3813f7f5
213 $ hg id http://user:pass@localhost:$HGPORT2/
213 $ hg id http://user:pass@localhost:$HGPORT2/
214 5fed3813f7f5
214 5fed3813f7f5
215 $ echo '[auth]' >> .hg/hgrc
215 $ echo '[auth]' >> .hg/hgrc
216 $ echo 'l.schemes=http' >> .hg/hgrc
216 $ echo 'l.schemes=http' >> .hg/hgrc
217 $ echo 'l.prefix=lo' >> .hg/hgrc
217 $ echo 'l.prefix=lo' >> .hg/hgrc
218 $ echo 'l.username=user' >> .hg/hgrc
218 $ echo 'l.username=user' >> .hg/hgrc
219 $ echo 'l.password=pass' >> .hg/hgrc
219 $ echo 'l.password=pass' >> .hg/hgrc
220 $ hg id http://localhost:$HGPORT2/
220 $ hg id http://localhost:$HGPORT2/
221 5fed3813f7f5
221 5fed3813f7f5
222 $ hg id http://localhost:$HGPORT2/
222 $ hg id http://localhost:$HGPORT2/
223 5fed3813f7f5
223 5fed3813f7f5
224 $ hg id http://user@localhost:$HGPORT2/
224 $ hg id http://user@localhost:$HGPORT2/
225 5fed3813f7f5
225 5fed3813f7f5
226 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
226 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
227 streaming all changes
227 streaming all changes
228 7 files to transfer, 916 bytes of data
228 7 files to transfer, 916 bytes of data
229 transferred * bytes in * seconds (*/sec) (glob)
229 transferred * bytes in * seconds (*/sec) (glob)
230 searching for changes
230 searching for changes
231 no changes found
231 no changes found
232 updating to branch default
232 updating to branch default
233 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
233 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
234 --pull should override server's preferuncompressed
234 --pull should override server's preferuncompressed
235 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
235 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
236 requesting all changes
236 requesting all changes
237 adding changesets
237 adding changesets
238 adding manifests
238 adding manifests
239 adding file changes
239 adding file changes
240 added 2 changesets with 5 changes to 5 files
240 added 2 changesets with 5 changes to 5 files
241 new changesets 8b6053c928fe:5fed3813f7f5
241 new changesets 8b6053c928fe:5fed3813f7f5
242 updating to branch default
242 updating to branch default
243 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
243 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
244
244
245 $ hg id http://user2@localhost:$HGPORT2/
245 $ hg id http://user2@localhost:$HGPORT2/
246 abort: http authorization required for http://localhost:$HGPORT2/
246 abort: http authorization required for http://localhost:$HGPORT2/
247 [255]
247 [255]
248 $ hg id http://user:pass2@localhost:$HGPORT2/
248 $ hg id http://user:pass2@localhost:$HGPORT2/
249 abort: HTTP Error 403: no
249 abort: HTTP Error 403: no
250 [255]
250 [255]
251
251
252 $ hg -R dest tag -r tip top
252 $ hg -R dest tag -r tip top
253 $ hg -R dest push http://user:pass@localhost:$HGPORT2/
253 $ hg -R dest push http://user:pass@localhost:$HGPORT2/
254 pushing to http://user:***@localhost:$HGPORT2/
254 pushing to http://user:***@localhost:$HGPORT2/
255 searching for changes
255 searching for changes
256 remote: adding changesets
256 remote: adding changesets
257 remote: adding manifests
257 remote: adding manifests
258 remote: adding file changes
258 remote: adding file changes
259 remote: added 1 changesets with 1 changes to 1 files
259 remote: added 1 changesets with 1 changes to 1 files
260 $ hg rollback -q
260 $ hg rollback -q
261
261
262 $ sed 's/.*] "/"/' < ../access.log
262 $ sed 's/.*] "/"/' < ../access.log
263 "GET /?cmd=capabilities HTTP/1.1" 200 -
263 "GET /?cmd=capabilities HTTP/1.1" 401 -
264 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
264 "GET /?cmd=capabilities HTTP/1.1" 401 -
265 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
265 "GET /?cmd=capabilities HTTP/1.1" 401 -
266 "GET /?cmd=capabilities HTTP/1.1" 200 -
266 "GET /?cmd=capabilities HTTP/1.1" 200 -
267 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
267 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
268 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
268 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
269 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
270 "GET /?cmd=capabilities HTTP/1.1" 401 -
269 "GET /?cmd=capabilities HTTP/1.1" 200 -
271 "GET /?cmd=capabilities HTTP/1.1" 200 -
270 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
272 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
271 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
272 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
273 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
274 "GET /?cmd=capabilities HTTP/1.1" 200 -
275 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
276 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
277 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
273 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
278 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
274 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
275 "GET /?cmd=capabilities HTTP/1.1" 401 -
279 "GET /?cmd=capabilities HTTP/1.1" 200 -
276 "GET /?cmd=capabilities HTTP/1.1" 200 -
280 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
277 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
281 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
282 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
278 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
283 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
279 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
280 "GET /?cmd=capabilities HTTP/1.1" 401 -
284 "GET /?cmd=capabilities HTTP/1.1" 200 -
281 "GET /?cmd=capabilities HTTP/1.1" 200 -
285 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
282 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
286 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
287 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
283 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
288 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
284 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
285 "GET /?cmd=capabilities HTTP/1.1" 401 -
289 "GET /?cmd=capabilities HTTP/1.1" 200 -
286 "GET /?cmd=capabilities HTTP/1.1" 200 -
290 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
287 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
291 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
292 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
288 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
293 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
289 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
290 "GET /?cmd=capabilities HTTP/1.1" 401 -
294 "GET /?cmd=capabilities HTTP/1.1" 200 -
291 "GET /?cmd=capabilities HTTP/1.1" 200 -
295 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
292 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
296 "GET /?cmd=stream_out HTTP/1.1" 401 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
297 "GET /?cmd=stream_out HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
293 "GET /?cmd=stream_out HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
298 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
294 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
299 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
295 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
300 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
296 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
297 "GET /?cmd=capabilities HTTP/1.1" 401 -
301 "GET /?cmd=capabilities HTTP/1.1" 200 -
298 "GET /?cmd=capabilities HTTP/1.1" 200 -
302 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
303 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
299 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
304 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
300 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
305 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
301 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
306 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
302 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
307 "GET /?cmd=capabilities HTTP/1.1" 200 -
303 "GET /?cmd=capabilities HTTP/1.1" 401 -
308 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
304 "GET /?cmd=capabilities HTTP/1.1" 401 -
309 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
305 "GET /?cmd=capabilities HTTP/1.1" 403 -
310 "GET /?cmd=capabilities HTTP/1.1" 200 -
306 "GET /?cmd=capabilities HTTP/1.1" 401 -
311 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
312 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
313 "GET /?cmd=listkeys HTTP/1.1" 403 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
314 "GET /?cmd=capabilities HTTP/1.1" 200 -
307 "GET /?cmd=capabilities HTTP/1.1" 200 -
315 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
308 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
316 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
317 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
309 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
318 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
310 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
319 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
311 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
320 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
312 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
321 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
313 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
322 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+5eb5abfefeea63c80dd7553bcc3783f37e0c5524* (glob)
314 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+5eb5abfefeea63c80dd7553bcc3783f37e0c5524* (glob)
323 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
315 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
324
316
325 $ cd ..
317 $ cd ..
326
318
327 clone of serve with repo in root and unserved subrepo (issue2970)
319 clone of serve with repo in root and unserved subrepo (issue2970)
328
320
329 $ hg --cwd test init sub
321 $ hg --cwd test init sub
330 $ echo empty > test/sub/empty
322 $ echo empty > test/sub/empty
331 $ hg --cwd test/sub add empty
323 $ hg --cwd test/sub add empty
332 $ hg --cwd test/sub commit -qm 'add empty'
324 $ hg --cwd test/sub commit -qm 'add empty'
333 $ hg --cwd test/sub tag -r 0 something
325 $ hg --cwd test/sub tag -r 0 something
334 $ echo sub = sub > test/.hgsub
326 $ echo sub = sub > test/.hgsub
335 $ hg --cwd test add .hgsub
327 $ hg --cwd test add .hgsub
336 $ hg --cwd test commit -qm 'add subrepo'
328 $ hg --cwd test commit -qm 'add subrepo'
337 $ hg clone http://localhost:$HGPORT noslash-clone
329 $ hg clone http://localhost:$HGPORT noslash-clone
338 requesting all changes
330 requesting all changes
339 adding changesets
331 adding changesets
340 adding manifests
332 adding manifests
341 adding file changes
333 adding file changes
342 added 3 changesets with 7 changes to 7 files
334 added 3 changesets with 7 changes to 7 files
343 new changesets 8b6053c928fe:56f9bc90cce6
335 new changesets 8b6053c928fe:56f9bc90cce6
344 updating to branch default
336 updating to branch default
345 abort: HTTP Error 404: Not Found
337 abort: HTTP Error 404: Not Found
346 [255]
338 [255]
347 $ hg clone http://localhost:$HGPORT/ slash-clone
339 $ hg clone http://localhost:$HGPORT/ slash-clone
348 requesting all changes
340 requesting all changes
349 adding changesets
341 adding changesets
350 adding manifests
342 adding manifests
351 adding file changes
343 adding file changes
352 added 3 changesets with 7 changes to 7 files
344 added 3 changesets with 7 changes to 7 files
353 new changesets 8b6053c928fe:56f9bc90cce6
345 new changesets 8b6053c928fe:56f9bc90cce6
354 updating to branch default
346 updating to branch default
355 abort: HTTP Error 404: Not Found
347 abort: HTTP Error 404: Not Found
356 [255]
348 [255]
357
349
358 check error log
350 check error log
359
351
360 $ cat error.log
352 $ cat error.log
361
353
362 Check error reporting while pulling/cloning
354 Check error reporting while pulling/cloning
363
355
364 $ $RUNTESTDIR/killdaemons.py
356 $ $RUNTESTDIR/killdaemons.py
365 $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py
357 $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py
366 $ cat hg3.pid >> $DAEMON_PIDS
358 $ cat hg3.pid >> $DAEMON_PIDS
367 $ hg clone http://localhost:$HGPORT/ abort-clone
359 $ hg clone http://localhost:$HGPORT/ abort-clone
368 requesting all changes
360 requesting all changes
369 abort: remote error:
361 abort: remote error:
370 this is an exercise
362 this is an exercise
371 [255]
363 [255]
372 $ cat error.log
364 $ cat error.log
373
365
374 disable pull-based clones
366 disable pull-based clones
375
367
376 $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True
368 $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True
377 $ cat hg4.pid >> $DAEMON_PIDS
369 $ cat hg4.pid >> $DAEMON_PIDS
378 $ hg clone http://localhost:$HGPORT1/ disable-pull-clone
370 $ hg clone http://localhost:$HGPORT1/ disable-pull-clone
379 requesting all changes
371 requesting all changes
380 abort: remote error:
372 abort: remote error:
381 server has pull-based clones disabled
373 server has pull-based clones disabled
382 [255]
374 [255]
383
375
384 ... but keep stream clones working
376 ... but keep stream clones working
385
377
386 $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone
378 $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone
387 streaming all changes
379 streaming all changes
388 * files to transfer, * of data (glob)
380 * files to transfer, * of data (glob)
389 transferred * in * seconds (* KB/sec) (glob)
381 transferred * in * seconds (* KB/sec) (glob)
390 searching for changes
382 searching for changes
391 no changes found
383 no changes found
392
384
393 ... and also keep partial clones and pulls working
385 ... and also keep partial clones and pulls working
394 $ hg clone http://localhost:$HGPORT1 --rev 0 test-partial-clone
386 $ hg clone http://localhost:$HGPORT1 --rev 0 test-partial-clone
395 adding changesets
387 adding changesets
396 adding manifests
388 adding manifests
397 adding file changes
389 adding file changes
398 added 1 changesets with 4 changes to 4 files
390 added 1 changesets with 4 changes to 4 files
399 new changesets 8b6053c928fe
391 new changesets 8b6053c928fe
400 updating to branch default
392 updating to branch default
401 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
393 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
402 $ hg pull -R test-partial-clone
394 $ hg pull -R test-partial-clone
403 pulling from http://localhost:$HGPORT1/
395 pulling from http://localhost:$HGPORT1/
404 searching for changes
396 searching for changes
405 adding changesets
397 adding changesets
406 adding manifests
398 adding manifests
407 adding file changes
399 adding file changes
408 added 2 changesets with 3 changes to 3 files
400 added 2 changesets with 3 changes to 3 files
409 new changesets 5fed3813f7f5:56f9bc90cce6
401 new changesets 5fed3813f7f5:56f9bc90cce6
410 (run 'hg update' to get a working copy)
402 (run 'hg update' to get a working copy)
411
403
412 $ cat error.log
404 $ cat error.log
@@ -1,560 +1,552 b''
1 #require killdaemons serve
1 #require killdaemons serve
2
2
3 $ hg init test
3 $ hg init test
4 $ cd test
4 $ cd test
5 $ echo foo>foo
5 $ echo foo>foo
6 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
6 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
7 $ echo foo>foo.d/foo
7 $ echo foo>foo.d/foo
8 $ echo bar>foo.d/bAr.hg.d/BaR
8 $ echo bar>foo.d/bAr.hg.d/BaR
9 $ echo bar>foo.d/baR.d.hg/bAR
9 $ echo bar>foo.d/baR.d.hg/bAR
10 $ hg commit -A -m 1
10 $ hg commit -A -m 1
11 adding foo
11 adding foo
12 adding foo.d/bAr.hg.d/BaR
12 adding foo.d/bAr.hg.d/BaR
13 adding foo.d/baR.d.hg/bAR
13 adding foo.d/baR.d.hg/bAR
14 adding foo.d/foo
14 adding foo.d/foo
15 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
15 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
16 $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid
16 $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid
17
17
18 Test server address cannot be reused
18 Test server address cannot be reused
19
19
20 $ hg serve -p $HGPORT1 2>&1
20 $ hg serve -p $HGPORT1 2>&1
21 abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$
21 abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$
22 [255]
22 [255]
23
23
24 $ cd ..
24 $ cd ..
25 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
25 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
26
26
27 clone via stream
27 clone via stream
28
28
29 $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
29 $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
30 streaming all changes
30 streaming all changes
31 6 files to transfer, 606 bytes of data
31 6 files to transfer, 606 bytes of data
32 transferred * bytes in * seconds (*/sec) (glob)
32 transferred * bytes in * seconds (*/sec) (glob)
33 searching for changes
33 searching for changes
34 no changes found
34 no changes found
35 updating to branch default
35 updating to branch default
36 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
36 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
37 $ hg verify -R copy
37 $ hg verify -R copy
38 checking changesets
38 checking changesets
39 checking manifests
39 checking manifests
40 crosschecking files in changesets and manifests
40 crosschecking files in changesets and manifests
41 checking files
41 checking files
42 4 files, 1 changesets, 4 total revisions
42 4 files, 1 changesets, 4 total revisions
43
43
44 try to clone via stream, should use pull instead
44 try to clone via stream, should use pull instead
45
45
46 $ hg clone --stream http://localhost:$HGPORT1/ copy2
46 $ hg clone --stream http://localhost:$HGPORT1/ copy2
47 warning: stream clone requested but server has them disabled
47 warning: stream clone requested but server has them disabled
48 requesting all changes
48 requesting all changes
49 adding changesets
49 adding changesets
50 adding manifests
50 adding manifests
51 adding file changes
51 adding file changes
52 added 1 changesets with 4 changes to 4 files
52 added 1 changesets with 4 changes to 4 files
53 new changesets 8b6053c928fe
53 new changesets 8b6053c928fe
54 updating to branch default
54 updating to branch default
55 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
55 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
56
56
57 try to clone via stream but missing requirements, so should use pull instead
57 try to clone via stream but missing requirements, so should use pull instead
58
58
59 $ cat > $TESTTMP/removesupportedformat.py << EOF
59 $ cat > $TESTTMP/removesupportedformat.py << EOF
60 > from mercurial import localrepo
60 > from mercurial import localrepo
61 > def extsetup(ui):
61 > def extsetup(ui):
62 > localrepo.localrepository.supportedformats.remove('generaldelta')
62 > localrepo.localrepository.supportedformats.remove('generaldelta')
63 > EOF
63 > EOF
64
64
65 $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
65 $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
66 warning: stream clone requested but client is missing requirements: generaldelta
66 warning: stream clone requested but client is missing requirements: generaldelta
67 (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information)
67 (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information)
68 requesting all changes
68 requesting all changes
69 adding changesets
69 adding changesets
70 adding manifests
70 adding manifests
71 adding file changes
71 adding file changes
72 added 1 changesets with 4 changes to 4 files
72 added 1 changesets with 4 changes to 4 files
73 new changesets 8b6053c928fe
73 new changesets 8b6053c928fe
74 updating to branch default
74 updating to branch default
75 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
75 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
76
76
77 clone via pull
77 clone via pull
78
78
79 $ hg clone http://localhost:$HGPORT1/ copy-pull
79 $ hg clone http://localhost:$HGPORT1/ copy-pull
80 requesting all changes
80 requesting all changes
81 adding changesets
81 adding changesets
82 adding manifests
82 adding manifests
83 adding file changes
83 adding file changes
84 added 1 changesets with 4 changes to 4 files
84 added 1 changesets with 4 changes to 4 files
85 new changesets 8b6053c928fe
85 new changesets 8b6053c928fe
86 updating to branch default
86 updating to branch default
87 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
87 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
88 $ hg verify -R copy-pull
88 $ hg verify -R copy-pull
89 checking changesets
89 checking changesets
90 checking manifests
90 checking manifests
91 crosschecking files in changesets and manifests
91 crosschecking files in changesets and manifests
92 checking files
92 checking files
93 4 files, 1 changesets, 4 total revisions
93 4 files, 1 changesets, 4 total revisions
94 $ cd test
94 $ cd test
95 $ echo bar > bar
95 $ echo bar > bar
96 $ hg commit -A -d '1 0' -m 2
96 $ hg commit -A -d '1 0' -m 2
97 adding bar
97 adding bar
98 $ cd ..
98 $ cd ..
99
99
100 clone over http with --update
100 clone over http with --update
101
101
102 $ hg clone http://localhost:$HGPORT1/ updated --update 0
102 $ hg clone http://localhost:$HGPORT1/ updated --update 0
103 requesting all changes
103 requesting all changes
104 adding changesets
104 adding changesets
105 adding manifests
105 adding manifests
106 adding file changes
106 adding file changes
107 added 2 changesets with 5 changes to 5 files
107 added 2 changesets with 5 changes to 5 files
108 new changesets 8b6053c928fe:5fed3813f7f5
108 new changesets 8b6053c928fe:5fed3813f7f5
109 updating to branch default
109 updating to branch default
110 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
110 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
111 $ hg log -r . -R updated
111 $ hg log -r . -R updated
112 changeset: 0:8b6053c928fe
112 changeset: 0:8b6053c928fe
113 user: test
113 user: test
114 date: Thu Jan 01 00:00:00 1970 +0000
114 date: Thu Jan 01 00:00:00 1970 +0000
115 summary: 1
115 summary: 1
116
116
117 $ rm -rf updated
117 $ rm -rf updated
118
118
119 incoming via HTTP
119 incoming via HTTP
120
120
121 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
121 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
122 adding changesets
122 adding changesets
123 adding manifests
123 adding manifests
124 adding file changes
124 adding file changes
125 added 1 changesets with 4 changes to 4 files
125 added 1 changesets with 4 changes to 4 files
126 new changesets 8b6053c928fe
126 new changesets 8b6053c928fe
127 updating to branch default
127 updating to branch default
128 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
128 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
129 $ cd partial
129 $ cd partial
130 $ touch LOCAL
130 $ touch LOCAL
131 $ hg ci -qAm LOCAL
131 $ hg ci -qAm LOCAL
132 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
132 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
133 comparing with http://localhost:$HGPORT1/
133 comparing with http://localhost:$HGPORT1/
134 searching for changes
134 searching for changes
135 2
135 2
136 $ cd ..
136 $ cd ..
137
137
138 pull
138 pull
139
139
140 $ cd copy-pull
140 $ cd copy-pull
141 $ cat >> .hg/hgrc <<EOF
141 $ cat >> .hg/hgrc <<EOF
142 > [hooks]
142 > [hooks]
143 > changegroup = sh -c "printenv.py changegroup"
143 > changegroup = sh -c "printenv.py changegroup"
144 > EOF
144 > EOF
145 $ hg pull
145 $ hg pull
146 pulling from http://localhost:$HGPORT1/
146 pulling from http://localhost:$HGPORT1/
147 searching for changes
147 searching for changes
148 adding changesets
148 adding changesets
149 adding manifests
149 adding manifests
150 adding file changes
150 adding file changes
151 added 1 changesets with 1 changes to 1 files
151 added 1 changesets with 1 changes to 1 files
152 new changesets 5fed3813f7f5
152 new changesets 5fed3813f7f5
153 changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=http://localhost:$HGPORT1/
153 changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=http://localhost:$HGPORT1/
154 (run 'hg update' to get a working copy)
154 (run 'hg update' to get a working copy)
155 $ cd ..
155 $ cd ..
156
156
157 clone from invalid URL
157 clone from invalid URL
158
158
159 $ hg clone http://localhost:$HGPORT/bad
159 $ hg clone http://localhost:$HGPORT/bad
160 abort: HTTP Error 404: Not Found
160 abort: HTTP Error 404: Not Found
161 [255]
161 [255]
162
162
163 test http authentication
163 test http authentication
164 + use the same server to test server side streaming preference
164 + use the same server to test server side streaming preference
165
165
166 $ cd test
166 $ cd test
167 $ cat << EOT > userpass.py
167 $ cat << EOT > userpass.py
168 > import base64
168 > import base64
169 > from mercurial.hgweb import common
169 > from mercurial.hgweb import common
170 > def perform_authentication(hgweb, req, op):
170 > def perform_authentication(hgweb, req, op):
171 > auth = req.env.get('HTTP_AUTHORIZATION')
171 > auth = req.env.get('HTTP_AUTHORIZATION')
172 > if not auth:
172 > if not auth:
173 > raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, 'who',
173 > raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, 'who',
174 > [('WWW-Authenticate', 'Basic Realm="mercurial"')])
174 > [('WWW-Authenticate', 'Basic Realm="mercurial"')])
175 > if base64.b64decode(auth.split()[1]).split(':', 1) != ['user', 'pass']:
175 > if base64.b64decode(auth.split()[1]).split(':', 1) != ['user', 'pass']:
176 > raise common.ErrorResponse(common.HTTP_FORBIDDEN, 'no')
176 > raise common.ErrorResponse(common.HTTP_FORBIDDEN, 'no')
177 > def extsetup():
177 > def extsetup():
178 > common.permhooks.insert(0, perform_authentication)
178 > common.permhooks.insert(0, perform_authentication)
179 > EOT
179 > EOT
180 $ hg serve --config extensions.x=userpass.py -p $HGPORT2 -d --pid-file=pid \
180 $ hg serve --config extensions.x=userpass.py -p $HGPORT2 -d --pid-file=pid \
181 > --config server.preferuncompressed=True \
181 > --config server.preferuncompressed=True \
182 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
182 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
183 $ cat pid >> $DAEMON_PIDS
183 $ cat pid >> $DAEMON_PIDS
184
184
185 $ cat << EOF > get_pass.py
185 $ cat << EOF > get_pass.py
186 > import getpass
186 > import getpass
187 > def newgetpass(arg):
187 > def newgetpass(arg):
188 > return "pass"
188 > return "pass"
189 > getpass.getpass = newgetpass
189 > getpass.getpass = newgetpass
190 > EOF
190 > EOF
191
191
192 $ hg id http://localhost:$HGPORT2/
192 $ hg id http://localhost:$HGPORT2/
193 abort: http authorization required for http://localhost:$HGPORT2/
193 abort: http authorization required for http://localhost:$HGPORT2/
194 [255]
194 [255]
195 $ hg id http://localhost:$HGPORT2/
195 $ hg id http://localhost:$HGPORT2/
196 abort: http authorization required for http://localhost:$HGPORT2/
196 abort: http authorization required for http://localhost:$HGPORT2/
197 [255]
197 [255]
198 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
198 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
199 http authorization required for http://localhost:$HGPORT2/
199 http authorization required for http://localhost:$HGPORT2/
200 realm: mercurial
200 realm: mercurial
201 user: user
201 user: user
202 password: 5fed3813f7f5
202 password: 5fed3813f7f5
203 $ hg id http://user:pass@localhost:$HGPORT2/
203 $ hg id http://user:pass@localhost:$HGPORT2/
204 5fed3813f7f5
204 5fed3813f7f5
205 $ echo '[auth]' >> .hg/hgrc
205 $ echo '[auth]' >> .hg/hgrc
206 $ echo 'l.schemes=http' >> .hg/hgrc
206 $ echo 'l.schemes=http' >> .hg/hgrc
207 $ echo 'l.prefix=lo' >> .hg/hgrc
207 $ echo 'l.prefix=lo' >> .hg/hgrc
208 $ echo 'l.username=user' >> .hg/hgrc
208 $ echo 'l.username=user' >> .hg/hgrc
209 $ echo 'l.password=pass' >> .hg/hgrc
209 $ echo 'l.password=pass' >> .hg/hgrc
210 $ hg id http://localhost:$HGPORT2/
210 $ hg id http://localhost:$HGPORT2/
211 5fed3813f7f5
211 5fed3813f7f5
212 $ hg id http://localhost:$HGPORT2/
212 $ hg id http://localhost:$HGPORT2/
213 5fed3813f7f5
213 5fed3813f7f5
214 $ hg id http://user@localhost:$HGPORT2/
214 $ hg id http://user@localhost:$HGPORT2/
215 5fed3813f7f5
215 5fed3813f7f5
216 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
216 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
217 streaming all changes
217 streaming all changes
218 7 files to transfer, 916 bytes of data
218 7 files to transfer, 916 bytes of data
219 transferred * bytes in * seconds (*/sec) (glob)
219 transferred * bytes in * seconds (*/sec) (glob)
220 searching for changes
220 searching for changes
221 no changes found
221 no changes found
222 updating to branch default
222 updating to branch default
223 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
223 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
224 --pull should override server's preferuncompressed
224 --pull should override server's preferuncompressed
225 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
225 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
226 requesting all changes
226 requesting all changes
227 adding changesets
227 adding changesets
228 adding manifests
228 adding manifests
229 adding file changes
229 adding file changes
230 added 2 changesets with 5 changes to 5 files
230 added 2 changesets with 5 changes to 5 files
231 new changesets 8b6053c928fe:5fed3813f7f5
231 new changesets 8b6053c928fe:5fed3813f7f5
232 updating to branch default
232 updating to branch default
233 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
233 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
234
234
235 $ hg id http://user2@localhost:$HGPORT2/
235 $ hg id http://user2@localhost:$HGPORT2/
236 abort: http authorization required for http://localhost:$HGPORT2/
236 abort: http authorization required for http://localhost:$HGPORT2/
237 [255]
237 [255]
238 $ hg id http://user:pass2@localhost:$HGPORT2/
238 $ hg id http://user:pass2@localhost:$HGPORT2/
239 abort: HTTP Error 403: no
239 abort: HTTP Error 403: no
240 [255]
240 [255]
241
241
242 $ hg -R dest tag -r tip top
242 $ hg -R dest tag -r tip top
243 $ hg -R dest push http://user:pass@localhost:$HGPORT2/
243 $ hg -R dest push http://user:pass@localhost:$HGPORT2/
244 pushing to http://user:***@localhost:$HGPORT2/
244 pushing to http://user:***@localhost:$HGPORT2/
245 searching for changes
245 searching for changes
246 remote: adding changesets
246 remote: adding changesets
247 remote: adding manifests
247 remote: adding manifests
248 remote: adding file changes
248 remote: adding file changes
249 remote: added 1 changesets with 1 changes to 1 files
249 remote: added 1 changesets with 1 changes to 1 files
250 $ hg rollback -q
250 $ hg rollback -q
251 $ hg -R dest push http://user:pass@localhost:$HGPORT2/ --debug --config devel.debug.peer-request=yes
251 $ hg -R dest push http://user:pass@localhost:$HGPORT2/ --debug --config devel.debug.peer-request=yes
252 pushing to http://user:***@localhost:$HGPORT2/
252 pushing to http://user:***@localhost:$HGPORT2/
253 using http://localhost:$HGPORT2/
253 using http://localhost:$HGPORT2/
254 http auth: user user, password ****
254 http auth: user user, password ****
255 sending capabilities command
255 sending capabilities command
256 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=capabilities
256 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=capabilities
257 http auth: user user, password ****
257 devel-peer-request: finished in *.???? seconds (200) (glob)
258 devel-peer-request: finished in *.???? seconds (200) (glob)
258 query 1; heads
259 query 1; heads
259 sending batch command
260 sending batch command
260 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=batch
261 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=batch
261 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
262 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
262 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$
263 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$
263 devel-peer-request: 68 bytes of commands arguments in headers
264 devel-peer-request: 68 bytes of commands arguments in headers
264 devel-peer-request: finished in *.???? seconds (200) (glob)
265 devel-peer-request: finished in *.???? seconds (200) (glob)
265 searching for changes
266 searching for changes
266 all remote heads known locally
267 all remote heads known locally
267 preparing listkeys for "phases"
268 preparing listkeys for "phases"
268 sending listkeys command
269 sending listkeys command
269 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
270 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
270 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
271 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
271 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$
272 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$
272 devel-peer-request: 16 bytes of commands arguments in headers
273 devel-peer-request: 16 bytes of commands arguments in headers
273 http auth: user user, password ****
274 devel-peer-request: finished in *.???? seconds (200) (glob)
274 devel-peer-request: finished in *.???? seconds (200) (glob)
275 received listkey for "phases": 58 bytes
275 received listkey for "phases": 58 bytes
276 checking for updated bookmarks
276 checking for updated bookmarks
277 preparing listkeys for "bookmarks"
277 preparing listkeys for "bookmarks"
278 sending listkeys command
278 sending listkeys command
279 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
279 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
280 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
280 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
281 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$
281 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$
282 devel-peer-request: 19 bytes of commands arguments in headers
282 devel-peer-request: 19 bytes of commands arguments in headers
283 devel-peer-request: finished in *.???? seconds (200) (glob)
283 devel-peer-request: finished in *.???? seconds (200) (glob)
284 received listkey for "bookmarks": 0 bytes
284 received listkey for "bookmarks": 0 bytes
285 sending branchmap command
285 sending branchmap command
286 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=branchmap
286 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=branchmap
287 devel-peer-request: Vary X-HgProto-1
287 devel-peer-request: Vary X-HgProto-1
288 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$
288 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$
289 devel-peer-request: finished in *.???? seconds (200) (glob)
289 devel-peer-request: finished in *.???? seconds (200) (glob)
290 sending branchmap command
290 sending branchmap command
291 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=branchmap
291 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=branchmap
292 devel-peer-request: Vary X-HgProto-1
292 devel-peer-request: Vary X-HgProto-1
293 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$
293 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$
294 devel-peer-request: finished in *.???? seconds (200) (glob)
294 devel-peer-request: finished in *.???? seconds (200) (glob)
295 preparing listkeys for "bookmarks"
295 preparing listkeys for "bookmarks"
296 sending listkeys command
296 sending listkeys command
297 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
297 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
298 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
298 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
299 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$
299 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$
300 devel-peer-request: 19 bytes of commands arguments in headers
300 devel-peer-request: 19 bytes of commands arguments in headers
301 devel-peer-request: finished in *.???? seconds (200) (glob)
301 devel-peer-request: finished in *.???? seconds (200) (glob)
302 received listkey for "bookmarks": 0 bytes
302 received listkey for "bookmarks": 0 bytes
303 1 changesets found
303 1 changesets found
304 list of changesets:
304 list of changesets:
305 7f4e523d01f2cc3765ac8934da3d14db775ff872
305 7f4e523d01f2cc3765ac8934da3d14db775ff872
306 bundle2-output-bundle: "HG20", 5 parts total
306 bundle2-output-bundle: "HG20", 5 parts total
307 bundle2-output-part: "replycaps" 188 bytes payload
307 bundle2-output-part: "replycaps" 188 bytes payload
308 bundle2-output-part: "check:phases" 24 bytes payload
308 bundle2-output-part: "check:phases" 24 bytes payload
309 bundle2-output-part: "check:heads" streamed payload
309 bundle2-output-part: "check:heads" streamed payload
310 bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
310 bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
311 bundle2-output-part: "phase-heads" 24 bytes payload
311 bundle2-output-part: "phase-heads" 24 bytes payload
312 sending unbundle command
312 sending unbundle command
313 sending 996 bytes
313 sending 996 bytes
314 devel-peer-request: POST http://localhost:$HGPORT2/?cmd=unbundle
314 devel-peer-request: POST http://localhost:$HGPORT2/?cmd=unbundle
315 devel-peer-request: Content-length 996
315 devel-peer-request: Content-length 996
316 devel-peer-request: Content-type application/mercurial-0.1
316 devel-peer-request: Content-type application/mercurial-0.1
317 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
317 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
318 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$
318 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$
319 devel-peer-request: 16 bytes of commands arguments in headers
319 devel-peer-request: 16 bytes of commands arguments in headers
320 devel-peer-request: 996 bytes of data
320 devel-peer-request: 996 bytes of data
321 devel-peer-request: finished in *.???? seconds (200) (glob)
321 devel-peer-request: finished in *.???? seconds (200) (glob)
322 bundle2-input-bundle: no-transaction
322 bundle2-input-bundle: no-transaction
323 bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
323 bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
324 bundle2-input-part: "output" (advisory) (params: 0 advisory) supported
324 bundle2-input-part: "output" (advisory) (params: 0 advisory) supported
325 bundle2-input-part: total payload size 100
325 bundle2-input-part: total payload size 100
326 remote: adding changesets
326 remote: adding changesets
327 remote: adding manifests
327 remote: adding manifests
328 remote: adding file changes
328 remote: adding file changes
329 remote: added 1 changesets with 1 changes to 1 files
329 remote: added 1 changesets with 1 changes to 1 files
330 bundle2-input-part: "output" (advisory) supported
330 bundle2-input-part: "output" (advisory) supported
331 bundle2-input-bundle: 2 parts total
331 bundle2-input-bundle: 2 parts total
332 preparing listkeys for "phases"
332 preparing listkeys for "phases"
333 sending listkeys command
333 sending listkeys command
334 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
334 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
335 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
335 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
336 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$
336 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$
337 devel-peer-request: 16 bytes of commands arguments in headers
337 devel-peer-request: 16 bytes of commands arguments in headers
338 devel-peer-request: finished in *.???? seconds (200) (glob)
338 devel-peer-request: finished in *.???? seconds (200) (glob)
339 received listkey for "phases": 15 bytes
339 received listkey for "phases": 15 bytes
340 $ hg rollback -q
340 $ hg rollback -q
341
341
342 $ sed 's/.*] "/"/' < ../access.log
342 $ sed 's/.*] "/"/' < ../access.log
343 "GET /?cmd=capabilities HTTP/1.1" 200 -
343 "GET /?cmd=capabilities HTTP/1.1" 401 -
344 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
344 "GET /?cmd=capabilities HTTP/1.1" 401 -
345 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
345 "GET /?cmd=capabilities HTTP/1.1" 401 -
346 "GET /?cmd=capabilities HTTP/1.1" 200 -
346 "GET /?cmd=capabilities HTTP/1.1" 200 -
347 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
347 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
348 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
348 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
349 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
350 "GET /?cmd=capabilities HTTP/1.1" 401 -
349 "GET /?cmd=capabilities HTTP/1.1" 200 -
351 "GET /?cmd=capabilities HTTP/1.1" 200 -
350 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
352 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
351 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
352 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
353 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
354 "GET /?cmd=capabilities HTTP/1.1" 200 -
355 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
356 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
357 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
353 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
358 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
354 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
355 "GET /?cmd=capabilities HTTP/1.1" 401 -
359 "GET /?cmd=capabilities HTTP/1.1" 200 -
356 "GET /?cmd=capabilities HTTP/1.1" 200 -
360 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
357 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
361 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
362 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
358 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
363 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
359 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
360 "GET /?cmd=capabilities HTTP/1.1" 401 -
364 "GET /?cmd=capabilities HTTP/1.1" 200 -
361 "GET /?cmd=capabilities HTTP/1.1" 200 -
365 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
362 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
366 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
367 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
363 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
368 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
364 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
365 "GET /?cmd=capabilities HTTP/1.1" 401 -
369 "GET /?cmd=capabilities HTTP/1.1" 200 -
366 "GET /?cmd=capabilities HTTP/1.1" 200 -
370 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
367 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
371 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
372 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
368 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
373 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
369 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
370 "GET /?cmd=capabilities HTTP/1.1" 401 -
374 "GET /?cmd=capabilities HTTP/1.1" 200 -
371 "GET /?cmd=capabilities HTTP/1.1" 200 -
375 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
372 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
376 "GET /?cmd=stream_out HTTP/1.1" 401 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
377 "GET /?cmd=stream_out HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
373 "GET /?cmd=stream_out HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
378 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
374 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
379 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
375 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
376 "GET /?cmd=capabilities HTTP/1.1" 401 -
380 "GET /?cmd=capabilities HTTP/1.1" 200 -
377 "GET /?cmd=capabilities HTTP/1.1" 200 -
381 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
378 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
382 "GET /?cmd=getbundle HTTP/1.1" 401 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
383 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
379 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
384 "GET /?cmd=capabilities HTTP/1.1" 200 -
380 "GET /?cmd=capabilities HTTP/1.1" 401 -
385 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
381 "GET /?cmd=capabilities HTTP/1.1" 401 -
386 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
382 "GET /?cmd=capabilities HTTP/1.1" 403 -
387 "GET /?cmd=capabilities HTTP/1.1" 200 -
383 "GET /?cmd=capabilities HTTP/1.1" 401 -
388 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
389 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
390 "GET /?cmd=listkeys HTTP/1.1" 403 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
391 "GET /?cmd=capabilities HTTP/1.1" 200 -
384 "GET /?cmd=capabilities HTTP/1.1" 200 -
392 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
385 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
393 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
394 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
386 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
395 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
387 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
396 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
388 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
397 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
389 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
398 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
390 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
399 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365* (glob)
391 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365* (glob)
400 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
392 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
393 "GET /?cmd=capabilities HTTP/1.1" 401 -
401 "GET /?cmd=capabilities HTTP/1.1" 200 -
394 "GET /?cmd=capabilities HTTP/1.1" 200 -
402 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
395 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
403 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
404 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
396 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
405 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
397 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
406 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
398 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
407 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
399 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
408 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
400 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
409 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
401 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
410 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
402 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$
411
403
412 $ cd ..
404 $ cd ..
413
405
414 clone of serve with repo in root and unserved subrepo (issue2970)
406 clone of serve with repo in root and unserved subrepo (issue2970)
415
407
416 $ hg --cwd test init sub
408 $ hg --cwd test init sub
417 $ echo empty > test/sub/empty
409 $ echo empty > test/sub/empty
418 $ hg --cwd test/sub add empty
410 $ hg --cwd test/sub add empty
419 $ hg --cwd test/sub commit -qm 'add empty'
411 $ hg --cwd test/sub commit -qm 'add empty'
420 $ hg --cwd test/sub tag -r 0 something
412 $ hg --cwd test/sub tag -r 0 something
421 $ echo sub = sub > test/.hgsub
413 $ echo sub = sub > test/.hgsub
422 $ hg --cwd test add .hgsub
414 $ hg --cwd test add .hgsub
423 $ hg --cwd test commit -qm 'add subrepo'
415 $ hg --cwd test commit -qm 'add subrepo'
424 $ hg clone http://localhost:$HGPORT noslash-clone
416 $ hg clone http://localhost:$HGPORT noslash-clone
425 requesting all changes
417 requesting all changes
426 adding changesets
418 adding changesets
427 adding manifests
419 adding manifests
428 adding file changes
420 adding file changes
429 added 3 changesets with 7 changes to 7 files
421 added 3 changesets with 7 changes to 7 files
430 new changesets 8b6053c928fe:56f9bc90cce6
422 new changesets 8b6053c928fe:56f9bc90cce6
431 updating to branch default
423 updating to branch default
432 abort: HTTP Error 404: Not Found
424 abort: HTTP Error 404: Not Found
433 [255]
425 [255]
434 $ hg clone http://localhost:$HGPORT/ slash-clone
426 $ hg clone http://localhost:$HGPORT/ slash-clone
435 requesting all changes
427 requesting all changes
436 adding changesets
428 adding changesets
437 adding manifests
429 adding manifests
438 adding file changes
430 adding file changes
439 added 3 changesets with 7 changes to 7 files
431 added 3 changesets with 7 changes to 7 files
440 new changesets 8b6053c928fe:56f9bc90cce6
432 new changesets 8b6053c928fe:56f9bc90cce6
441 updating to branch default
433 updating to branch default
442 abort: HTTP Error 404: Not Found
434 abort: HTTP Error 404: Not Found
443 [255]
435 [255]
444
436
445 check error log
437 check error log
446
438
447 $ cat error.log
439 $ cat error.log
448
440
449 check abort error reporting while pulling/cloning
441 check abort error reporting while pulling/cloning
450
442
451 $ $RUNTESTDIR/killdaemons.py
443 $ $RUNTESTDIR/killdaemons.py
452 $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py
444 $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py
453 $ cat hg3.pid >> $DAEMON_PIDS
445 $ cat hg3.pid >> $DAEMON_PIDS
454 $ hg clone http://localhost:$HGPORT/ abort-clone
446 $ hg clone http://localhost:$HGPORT/ abort-clone
455 requesting all changes
447 requesting all changes
456 remote: abort: this is an exercise
448 remote: abort: this is an exercise
457 abort: pull failed on remote
449 abort: pull failed on remote
458 [255]
450 [255]
459 $ cat error.log
451 $ cat error.log
460
452
461 disable pull-based clones
453 disable pull-based clones
462
454
463 $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True
455 $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True
464 $ cat hg4.pid >> $DAEMON_PIDS
456 $ cat hg4.pid >> $DAEMON_PIDS
465 $ hg clone http://localhost:$HGPORT1/ disable-pull-clone
457 $ hg clone http://localhost:$HGPORT1/ disable-pull-clone
466 requesting all changes
458 requesting all changes
467 remote: abort: server has pull-based clones disabled
459 remote: abort: server has pull-based clones disabled
468 abort: pull failed on remote
460 abort: pull failed on remote
469 (remove --pull if specified or upgrade Mercurial)
461 (remove --pull if specified or upgrade Mercurial)
470 [255]
462 [255]
471
463
472 ... but keep stream clones working
464 ... but keep stream clones working
473
465
474 $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone
466 $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone
475 streaming all changes
467 streaming all changes
476 * files to transfer, * of data (glob)
468 * files to transfer, * of data (glob)
477 transferred * in * seconds (*/sec) (glob)
469 transferred * in * seconds (*/sec) (glob)
478 searching for changes
470 searching for changes
479 no changes found
471 no changes found
480 $ cat error.log
472 $ cat error.log
481
473
482 ... and also keep partial clones and pulls working
474 ... and also keep partial clones and pulls working
483 $ hg clone http://localhost:$HGPORT1 --rev 0 test-partial-clone
475 $ hg clone http://localhost:$HGPORT1 --rev 0 test-partial-clone
484 adding changesets
476 adding changesets
485 adding manifests
477 adding manifests
486 adding file changes
478 adding file changes
487 added 1 changesets with 4 changes to 4 files
479 added 1 changesets with 4 changes to 4 files
488 new changesets 8b6053c928fe
480 new changesets 8b6053c928fe
489 updating to branch default
481 updating to branch default
490 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
482 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
491 $ hg pull -R test-partial-clone
483 $ hg pull -R test-partial-clone
492 pulling from http://localhost:$HGPORT1/
484 pulling from http://localhost:$HGPORT1/
493 searching for changes
485 searching for changes
494 adding changesets
486 adding changesets
495 adding manifests
487 adding manifests
496 adding file changes
488 adding file changes
497 added 2 changesets with 3 changes to 3 files
489 added 2 changesets with 3 changes to 3 files
498 new changesets 5fed3813f7f5:56f9bc90cce6
490 new changesets 5fed3813f7f5:56f9bc90cce6
499 (run 'hg update' to get a working copy)
491 (run 'hg update' to get a working copy)
500
492
501 corrupt cookies file should yield a warning
493 corrupt cookies file should yield a warning
502
494
503 $ cat > $TESTTMP/cookies.txt << EOF
495 $ cat > $TESTTMP/cookies.txt << EOF
504 > bad format
496 > bad format
505 > EOF
497 > EOF
506
498
507 $ hg --config auth.cookiefile=$TESTTMP/cookies.txt id http://localhost:$HGPORT/
499 $ hg --config auth.cookiefile=$TESTTMP/cookies.txt id http://localhost:$HGPORT/
508 (error loading cookie file $TESTTMP/cookies.txt: '*/cookies.txt' does not look like a Netscape format cookies file; continuing without cookies) (glob)
500 (error loading cookie file $TESTTMP/cookies.txt: '*/cookies.txt' does not look like a Netscape format cookies file; continuing without cookies) (glob)
509 56f9bc90cce6
501 56f9bc90cce6
510
502
511 $ killdaemons.py
503 $ killdaemons.py
512
504
513 Create dummy authentication handler that looks for cookies. It doesn't do anything
505 Create dummy authentication handler that looks for cookies. It doesn't do anything
514 useful. It just raises an HTTP 500 with details about the Cookie request header.
506 useful. It just raises an HTTP 500 with details about the Cookie request header.
515 We raise HTTP 500 because its message is printed in the abort message.
507 We raise HTTP 500 because its message is printed in the abort message.
516
508
517 $ cat > cookieauth.py << EOF
509 $ cat > cookieauth.py << EOF
518 > from mercurial import util
510 > from mercurial import util
519 > from mercurial.hgweb import common
511 > from mercurial.hgweb import common
520 > def perform_authentication(hgweb, req, op):
512 > def perform_authentication(hgweb, req, op):
521 > cookie = req.env.get('HTTP_COOKIE')
513 > cookie = req.env.get('HTTP_COOKIE')
522 > if not cookie:
514 > if not cookie:
523 > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, 'no-cookie')
515 > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, 'no-cookie')
524 > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, 'Cookie: %s' % cookie)
516 > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, 'Cookie: %s' % cookie)
525 > def extsetup():
517 > def extsetup():
526 > common.permhooks.insert(0, perform_authentication)
518 > common.permhooks.insert(0, perform_authentication)
527 > EOF
519 > EOF
528
520
529 $ hg serve --config extensions.cookieauth=cookieauth.py -R test -p $HGPORT -d --pid-file=pid
521 $ hg serve --config extensions.cookieauth=cookieauth.py -R test -p $HGPORT -d --pid-file=pid
530 $ cat pid > $DAEMON_PIDS
522 $ cat pid > $DAEMON_PIDS
531
523
532 Request without cookie sent should fail due to lack of cookie
524 Request without cookie sent should fail due to lack of cookie
533
525
534 $ hg id http://localhost:$HGPORT
526 $ hg id http://localhost:$HGPORT
535 abort: HTTP Error 500: no-cookie
527 abort: HTTP Error 500: no-cookie
536 [255]
528 [255]
537
529
538 Populate a cookies file
530 Populate a cookies file
539
531
540 $ cat > cookies.txt << EOF
532 $ cat > cookies.txt << EOF
541 > # HTTP Cookie File
533 > # HTTP Cookie File
542 > # Expiration is 2030-01-01 at midnight
534 > # Expiration is 2030-01-01 at midnight
543 > .example.com TRUE / FALSE 1893456000 hgkey examplevalue
535 > .example.com TRUE / FALSE 1893456000 hgkey examplevalue
544 > EOF
536 > EOF
545
537
546 Should not send a cookie for another domain
538 Should not send a cookie for another domain
547
539
548 $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
540 $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
549 abort: HTTP Error 500: no-cookie
541 abort: HTTP Error 500: no-cookie
550 [255]
542 [255]
551
543
552 Add a cookie entry for our test server and verify it is sent
544 Add a cookie entry for our test server and verify it is sent
553
545
554 $ cat >> cookies.txt << EOF
546 $ cat >> cookies.txt << EOF
555 > localhost.local FALSE / FALSE 1893456000 hgkey localhostvalue
547 > localhost.local FALSE / FALSE 1893456000 hgkey localhostvalue
556 > EOF
548 > EOF
557
549
558 $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
550 $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
559 abort: HTTP Error 500: Cookie: hgkey=localhostvalue
551 abort: HTTP Error 500: Cookie: hgkey=localhostvalue
560 [255]
552 [255]
@@ -1,464 +1,464 b''
1 #testcases sshv1 sshv2
1 #testcases sshv1 sshv2
2
2
3 #if sshv2
3 #if sshv2
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [experimental]
5 > [experimental]
6 > sshpeer.advertise-v2 = true
6 > sshpeer.advertise-v2 = true
7 > sshserver.support-v2 = true
7 > sshserver.support-v2 = true
8 > EOF
8 > EOF
9 #endif
9 #endif
10
10
11 This file contains testcases that tend to be related to the wire protocol part
11 This file contains testcases that tend to be related to the wire protocol part
12 of largefiles.
12 of largefiles.
13
13
14 $ USERCACHE="$TESTTMP/cache"; export USERCACHE
14 $ USERCACHE="$TESTTMP/cache"; export USERCACHE
15 $ mkdir "${USERCACHE}"
15 $ mkdir "${USERCACHE}"
16 $ cat >> $HGRCPATH <<EOF
16 $ cat >> $HGRCPATH <<EOF
17 > [extensions]
17 > [extensions]
18 > largefiles=
18 > largefiles=
19 > purge=
19 > purge=
20 > rebase=
20 > rebase=
21 > transplant=
21 > transplant=
22 > [phases]
22 > [phases]
23 > publish=False
23 > publish=False
24 > [largefiles]
24 > [largefiles]
25 > minsize=2
25 > minsize=2
26 > patterns=glob:**.dat
26 > patterns=glob:**.dat
27 > usercache=${USERCACHE}
27 > usercache=${USERCACHE}
28 > [web]
28 > [web]
29 > allow_archive = zip
29 > allow_archive = zip
30 > [hooks]
30 > [hooks]
31 > precommit=sh -c "echo \\"Invoking status precommit hook\\"; hg status"
31 > precommit=sh -c "echo \\"Invoking status precommit hook\\"; hg status"
32 > EOF
32 > EOF
33
33
34
34
35 #if serve
35 #if serve
36 vanilla clients not locked out from largefiles servers on vanilla repos
36 vanilla clients not locked out from largefiles servers on vanilla repos
37 $ mkdir r1
37 $ mkdir r1
38 $ cd r1
38 $ cd r1
39 $ hg init
39 $ hg init
40 $ echo c1 > f1
40 $ echo c1 > f1
41 $ hg add f1
41 $ hg add f1
42 $ hg commit -m "m1"
42 $ hg commit -m "m1"
43 Invoking status precommit hook
43 Invoking status precommit hook
44 A f1
44 A f1
45 $ cd ..
45 $ cd ..
46 $ hg serve -R r1 -d -p $HGPORT --pid-file hg.pid
46 $ hg serve -R r1 -d -p $HGPORT --pid-file hg.pid
47 $ cat hg.pid >> $DAEMON_PIDS
47 $ cat hg.pid >> $DAEMON_PIDS
48 $ hg --config extensions.largefiles=! clone http://localhost:$HGPORT r2
48 $ hg --config extensions.largefiles=! clone http://localhost:$HGPORT r2
49 requesting all changes
49 requesting all changes
50 adding changesets
50 adding changesets
51 adding manifests
51 adding manifests
52 adding file changes
52 adding file changes
53 added 1 changesets with 1 changes to 1 files
53 added 1 changesets with 1 changes to 1 files
54 new changesets b6eb3a2e2efe
54 new changesets b6eb3a2e2efe
55 updating to branch default
55 updating to branch default
56 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
56 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
57
57
58 largefiles clients still work with vanilla servers
58 largefiles clients still work with vanilla servers
59 $ hg serve --config extensions.largefiles=! -R r1 -d -p $HGPORT1 --pid-file hg.pid
59 $ hg serve --config extensions.largefiles=! -R r1 -d -p $HGPORT1 --pid-file hg.pid
60 $ cat hg.pid >> $DAEMON_PIDS
60 $ cat hg.pid >> $DAEMON_PIDS
61 $ hg clone http://localhost:$HGPORT1 r3
61 $ hg clone http://localhost:$HGPORT1 r3
62 requesting all changes
62 requesting all changes
63 adding changesets
63 adding changesets
64 adding manifests
64 adding manifests
65 adding file changes
65 adding file changes
66 added 1 changesets with 1 changes to 1 files
66 added 1 changesets with 1 changes to 1 files
67 new changesets b6eb3a2e2efe
67 new changesets b6eb3a2e2efe
68 updating to branch default
68 updating to branch default
69 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
69 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
70 #endif
70 #endif
71
71
72 vanilla clients locked out from largefiles http repos
72 vanilla clients locked out from largefiles http repos
73 $ mkdir r4
73 $ mkdir r4
74 $ cd r4
74 $ cd r4
75 $ hg init
75 $ hg init
76 $ echo c1 > f1
76 $ echo c1 > f1
77 $ hg add --large f1
77 $ hg add --large f1
78 $ hg commit -m "m1"
78 $ hg commit -m "m1"
79 Invoking status precommit hook
79 Invoking status precommit hook
80 A f1
80 A f1
81 $ cd ..
81 $ cd ..
82
82
83 largefiles can be pushed locally (issue3583)
83 largefiles can be pushed locally (issue3583)
84 $ hg init dest
84 $ hg init dest
85 $ cd r4
85 $ cd r4
86 $ hg outgoing ../dest
86 $ hg outgoing ../dest
87 comparing with ../dest
87 comparing with ../dest
88 searching for changes
88 searching for changes
89 changeset: 0:639881c12b4c
89 changeset: 0:639881c12b4c
90 tag: tip
90 tag: tip
91 user: test
91 user: test
92 date: Thu Jan 01 00:00:00 1970 +0000
92 date: Thu Jan 01 00:00:00 1970 +0000
93 summary: m1
93 summary: m1
94
94
95 $ hg push ../dest
95 $ hg push ../dest
96 pushing to ../dest
96 pushing to ../dest
97 searching for changes
97 searching for changes
98 adding changesets
98 adding changesets
99 adding manifests
99 adding manifests
100 adding file changes
100 adding file changes
101 added 1 changesets with 1 changes to 1 files
101 added 1 changesets with 1 changes to 1 files
102
102
103 exit code with nothing outgoing (issue3611)
103 exit code with nothing outgoing (issue3611)
104 $ hg outgoing ../dest
104 $ hg outgoing ../dest
105 comparing with ../dest
105 comparing with ../dest
106 searching for changes
106 searching for changes
107 no changes found
107 no changes found
108 [1]
108 [1]
109 $ cd ..
109 $ cd ..
110
110
111 #if serve
111 #if serve
112 $ hg serve -R r4 -d -p $HGPORT2 --pid-file hg.pid
112 $ hg serve -R r4 -d -p $HGPORT2 --pid-file hg.pid
113 $ cat hg.pid >> $DAEMON_PIDS
113 $ cat hg.pid >> $DAEMON_PIDS
114 $ hg --config extensions.largefiles=! clone http://localhost:$HGPORT2 r5
114 $ hg --config extensions.largefiles=! clone http://localhost:$HGPORT2 r5
115 abort: remote error:
115 abort: remote error:
116
116
117 This repository uses the largefiles extension.
117 This repository uses the largefiles extension.
118
118
119 Please enable it in your Mercurial config file.
119 Please enable it in your Mercurial config file.
120 [255]
120 [255]
121
121
122 used all HGPORTs, kill all daemons
122 used all HGPORTs, kill all daemons
123 $ killdaemons.py
123 $ killdaemons.py
124 #endif
124 #endif
125
125
126 vanilla clients locked out from largefiles ssh repos
126 vanilla clients locked out from largefiles ssh repos
127 $ hg --config extensions.largefiles=! clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/r4 r5
127 $ hg --config extensions.largefiles=! clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/r4 r5
128 remote:
128 remote:
129 remote: This repository uses the largefiles extension.
129 remote: This repository uses the largefiles extension.
130 remote:
130 remote:
131 remote: Please enable it in your Mercurial config file.
131 remote: Please enable it in your Mercurial config file.
132 remote:
132 remote:
133 remote: -
133 remote: -
134 abort: remote error
134 abort: remote error
135 (check previous remote output)
135 (check previous remote output)
136 [255]
136 [255]
137
137
138 #if serve
138 #if serve
139
139
140 largefiles clients refuse to push largefiles repos to vanilla servers
140 largefiles clients refuse to push largefiles repos to vanilla servers
141 $ mkdir r6
141 $ mkdir r6
142 $ cd r6
142 $ cd r6
143 $ hg init
143 $ hg init
144 $ echo c1 > f1
144 $ echo c1 > f1
145 $ hg add f1
145 $ hg add f1
146 $ hg commit -m "m1"
146 $ hg commit -m "m1"
147 Invoking status precommit hook
147 Invoking status precommit hook
148 A f1
148 A f1
149 $ cat >> .hg/hgrc <<!
149 $ cat >> .hg/hgrc <<!
150 > [web]
150 > [web]
151 > push_ssl = false
151 > push_ssl = false
152 > allow_push = *
152 > allow_push = *
153 > !
153 > !
154 $ cd ..
154 $ cd ..
155 $ hg clone r6 r7
155 $ hg clone r6 r7
156 updating to branch default
156 updating to branch default
157 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
157 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
158 $ cd r7
158 $ cd r7
159 $ echo c2 > f2
159 $ echo c2 > f2
160 $ hg add --large f2
160 $ hg add --large f2
161 $ hg commit -m "m2"
161 $ hg commit -m "m2"
162 Invoking status precommit hook
162 Invoking status precommit hook
163 A f2
163 A f2
164 $ hg verify --large
164 $ hg verify --large
165 checking changesets
165 checking changesets
166 checking manifests
166 checking manifests
167 crosschecking files in changesets and manifests
167 crosschecking files in changesets and manifests
168 checking files
168 checking files
169 2 files, 2 changesets, 2 total revisions
169 2 files, 2 changesets, 2 total revisions
170 searching 1 changesets for largefiles
170 searching 1 changesets for largefiles
171 verified existence of 1 revisions of 1 largefiles
171 verified existence of 1 revisions of 1 largefiles
172 $ hg serve --config extensions.largefiles=! -R ../r6 -d -p $HGPORT --pid-file ../hg.pid
172 $ hg serve --config extensions.largefiles=! -R ../r6 -d -p $HGPORT --pid-file ../hg.pid
173 $ cat ../hg.pid >> $DAEMON_PIDS
173 $ cat ../hg.pid >> $DAEMON_PIDS
174 $ hg push http://localhost:$HGPORT
174 $ hg push http://localhost:$HGPORT
175 pushing to http://localhost:$HGPORT/
175 pushing to http://localhost:$HGPORT/
176 searching for changes
176 searching for changes
177 abort: http://localhost:$HGPORT/ does not appear to be a largefile store
177 abort: http://localhost:$HGPORT/ does not appear to be a largefile store
178 [255]
178 [255]
179 $ cd ..
179 $ cd ..
180
180
181 putlfile errors are shown (issue3123)
181 putlfile errors are shown (issue3123)
182 Corrupt the cached largefile in r7 and move it out of the servers usercache
182 Corrupt the cached largefile in r7 and move it out of the servers usercache
183 $ mv r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 .
183 $ mv r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 .
184 $ echo 'client side corruption' > r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
184 $ echo 'client side corruption' > r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
185 $ rm "$USERCACHE/4cdac4d8b084d0b599525cf732437fb337d422a8"
185 $ rm "$USERCACHE/4cdac4d8b084d0b599525cf732437fb337d422a8"
186 $ hg init empty
186 $ hg init empty
187 $ hg serve -R empty -d -p $HGPORT1 --pid-file hg.pid \
187 $ hg serve -R empty -d -p $HGPORT1 --pid-file hg.pid \
188 > --config 'web.allow_push=*' --config web.push_ssl=False
188 > --config 'web.allow_push=*' --config web.push_ssl=False
189 $ cat hg.pid >> $DAEMON_PIDS
189 $ cat hg.pid >> $DAEMON_PIDS
190 $ hg push -R r7 http://localhost:$HGPORT1
190 $ hg push -R r7 http://localhost:$HGPORT1
191 pushing to http://localhost:$HGPORT1/
191 pushing to http://localhost:$HGPORT1/
192 searching for changes
192 searching for changes
193 remote: largefiles: failed to put 4cdac4d8b084d0b599525cf732437fb337d422a8 into store: largefile contents do not match hash
193 remote: largefiles: failed to put 4cdac4d8b084d0b599525cf732437fb337d422a8 into store: largefile contents do not match hash
194 abort: remotestore: could not put $TESTTMP/r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 to remote store http://localhost:$HGPORT1/
194 abort: remotestore: could not put $TESTTMP/r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 to remote store http://localhost:$HGPORT1/
195 [255]
195 [255]
196 $ mv 4cdac4d8b084d0b599525cf732437fb337d422a8 r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
196 $ mv 4cdac4d8b084d0b599525cf732437fb337d422a8 r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
197 Push of file that exists on server but is corrupted - magic healing would be nice ... but too magic
197 Push of file that exists on server but is corrupted - magic healing would be nice ... but too magic
198 $ echo "server side corruption" > empty/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
198 $ echo "server side corruption" > empty/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
199 $ hg push -R r7 http://localhost:$HGPORT1
199 $ hg push -R r7 http://localhost:$HGPORT1
200 pushing to http://localhost:$HGPORT1/
200 pushing to http://localhost:$HGPORT1/
201 searching for changes
201 searching for changes
202 remote: adding changesets
202 remote: adding changesets
203 remote: adding manifests
203 remote: adding manifests
204 remote: adding file changes
204 remote: adding file changes
205 remote: added 2 changesets with 2 changes to 2 files
205 remote: added 2 changesets with 2 changes to 2 files
206 $ cat empty/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
206 $ cat empty/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
207 server side corruption
207 server side corruption
208 $ rm -rf empty
208 $ rm -rf empty
209
209
210 Push a largefiles repository to a served empty repository
210 Push a largefiles repository to a served empty repository
211 $ hg init r8
211 $ hg init r8
212 $ echo c3 > r8/f1
212 $ echo c3 > r8/f1
213 $ hg add --large r8/f1 -R r8
213 $ hg add --large r8/f1 -R r8
214 $ hg commit -m "m1" -R r8
214 $ hg commit -m "m1" -R r8
215 Invoking status precommit hook
215 Invoking status precommit hook
216 A f1
216 A f1
217 $ hg init empty
217 $ hg init empty
218 $ hg serve -R empty -d -p $HGPORT2 --pid-file hg.pid \
218 $ hg serve -R empty -d -p $HGPORT2 --pid-file hg.pid \
219 > --config 'web.allow_push=*' --config web.push_ssl=False
219 > --config 'web.allow_push=*' --config web.push_ssl=False
220 $ cat hg.pid >> $DAEMON_PIDS
220 $ cat hg.pid >> $DAEMON_PIDS
221 $ rm "${USERCACHE}"/*
221 $ rm "${USERCACHE}"/*
222 $ hg push -R r8 http://localhost:$HGPORT2/#default
222 $ hg push -R r8 http://localhost:$HGPORT2/#default
223 pushing to http://localhost:$HGPORT2/
223 pushing to http://localhost:$HGPORT2/
224 searching for changes
224 searching for changes
225 remote: adding changesets
225 remote: adding changesets
226 remote: adding manifests
226 remote: adding manifests
227 remote: adding file changes
227 remote: adding file changes
228 remote: added 1 changesets with 1 changes to 1 files
228 remote: added 1 changesets with 1 changes to 1 files
229 $ [ -f "${USERCACHE}"/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
229 $ [ -f "${USERCACHE}"/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
230 $ [ -f empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
230 $ [ -f empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
231
231
232 Clone over http, no largefiles pulled on clone.
232 Clone over http, no largefiles pulled on clone.
233
233
234 $ hg clone http://localhost:$HGPORT2/#default http-clone -U
234 $ hg clone http://localhost:$HGPORT2/#default http-clone -U
235 adding changesets
235 adding changesets
236 adding manifests
236 adding manifests
237 adding file changes
237 adding file changes
238 added 1 changesets with 1 changes to 1 files
238 added 1 changesets with 1 changes to 1 files
239 new changesets cf03e5bb9936
239 new changesets cf03e5bb9936
240
240
241 Archive contains largefiles
241 Archive contains largefiles
242 >>> import os
242 >>> import os
243 >>> import urllib2
243 >>> import urllib2
244 >>> u = 'http://localhost:%s/archive/default.zip' % os.environ['HGPORT2']
244 >>> u = 'http://localhost:%s/archive/default.zip' % os.environ['HGPORT2']
245 >>> with open('archive.zip', 'w') as f:
245 >>> with open('archive.zip', 'w') as f:
246 ... f.write(urllib2.urlopen(u).read())
246 ... f.write(urllib2.urlopen(u).read())
247 $ unzip -t archive.zip
247 $ unzip -t archive.zip
248 Archive: archive.zip
248 Archive: archive.zip
249 testing: empty-default/.hg_archival.txt*OK (glob)
249 testing: empty-default/.hg_archival.txt*OK (glob)
250 testing: empty-default/f1*OK (glob)
250 testing: empty-default/f1*OK (glob)
251 No errors detected in compressed data of archive.zip.
251 No errors detected in compressed data of archive.zip.
252
252
253 test 'verify' with remotestore:
253 test 'verify' with remotestore:
254
254
255 $ rm "${USERCACHE}"/02a439e5c31c526465ab1a0ca1f431f76b827b90
255 $ rm "${USERCACHE}"/02a439e5c31c526465ab1a0ca1f431f76b827b90
256 $ mv empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 .
256 $ mv empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 .
257 $ hg -R http-clone verify --large --lfa
257 $ hg -R http-clone verify --large --lfa
258 checking changesets
258 checking changesets
259 checking manifests
259 checking manifests
260 crosschecking files in changesets and manifests
260 crosschecking files in changesets and manifests
261 checking files
261 checking files
262 1 files, 1 changesets, 1 total revisions
262 1 files, 1 changesets, 1 total revisions
263 searching 1 changesets for largefiles
263 searching 1 changesets for largefiles
264 changeset 0:cf03e5bb9936: f1 missing
264 changeset 0:cf03e5bb9936: f1 missing
265 verified existence of 1 revisions of 1 largefiles
265 verified existence of 1 revisions of 1 largefiles
266 [1]
266 [1]
267 $ mv 02a439e5c31c526465ab1a0ca1f431f76b827b90 empty/.hg/largefiles/
267 $ mv 02a439e5c31c526465ab1a0ca1f431f76b827b90 empty/.hg/largefiles/
268 $ hg -R http-clone -q verify --large --lfa
268 $ hg -R http-clone -q verify --large --lfa
269
269
270 largefiles pulled on update - a largefile missing on the server:
270 largefiles pulled on update - a largefile missing on the server:
271 $ mv empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 .
271 $ mv empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 .
272 $ hg -R http-clone up --config largefiles.usercache=http-clone-usercache
272 $ hg -R http-clone up --config largefiles.usercache=http-clone-usercache
273 getting changed largefiles
273 getting changed largefiles
274 f1: largefile 02a439e5c31c526465ab1a0ca1f431f76b827b90 not available from http://localhost:$HGPORT2/
274 f1: largefile 02a439e5c31c526465ab1a0ca1f431f76b827b90 not available from http://localhost:$HGPORT2/
275 0 largefiles updated, 0 removed
275 0 largefiles updated, 0 removed
276 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
276 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
277 $ hg -R http-clone st
277 $ hg -R http-clone st
278 ! f1
278 ! f1
279 $ hg -R http-clone up -Cqr null
279 $ hg -R http-clone up -Cqr null
280
280
281 largefiles pulled on update - a largefile corrupted on the server:
281 largefiles pulled on update - a largefile corrupted on the server:
282 $ echo corruption > empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90
282 $ echo corruption > empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90
283 $ hg -R http-clone up --config largefiles.usercache=http-clone-usercache
283 $ hg -R http-clone up --config largefiles.usercache=http-clone-usercache
284 getting changed largefiles
284 getting changed largefiles
285 f1: data corruption (expected 02a439e5c31c526465ab1a0ca1f431f76b827b90, got 6a7bb2556144babe3899b25e5428123735bb1e27)
285 f1: data corruption (expected 02a439e5c31c526465ab1a0ca1f431f76b827b90, got 6a7bb2556144babe3899b25e5428123735bb1e27)
286 0 largefiles updated, 0 removed
286 0 largefiles updated, 0 removed
287 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
287 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
288 $ hg -R http-clone st
288 $ hg -R http-clone st
289 ! f1
289 ! f1
290 $ [ ! -f http-clone/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
290 $ [ ! -f http-clone/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
291 $ [ ! -f http-clone/f1 ]
291 $ [ ! -f http-clone/f1 ]
292 $ [ ! -f http-clone-usercache ]
292 $ [ ! -f http-clone-usercache ]
293 $ hg -R http-clone verify --large --lfc
293 $ hg -R http-clone verify --large --lfc
294 checking changesets
294 checking changesets
295 checking manifests
295 checking manifests
296 crosschecking files in changesets and manifests
296 crosschecking files in changesets and manifests
297 checking files
297 checking files
298 1 files, 1 changesets, 1 total revisions
298 1 files, 1 changesets, 1 total revisions
299 searching 1 changesets for largefiles
299 searching 1 changesets for largefiles
300 verified contents of 1 revisions of 1 largefiles
300 verified contents of 1 revisions of 1 largefiles
301 $ hg -R http-clone up -Cqr null
301 $ hg -R http-clone up -Cqr null
302
302
303 largefiles pulled on update - no server side problems:
303 largefiles pulled on update - no server side problems:
304 $ mv 02a439e5c31c526465ab1a0ca1f431f76b827b90 empty/.hg/largefiles/
304 $ mv 02a439e5c31c526465ab1a0ca1f431f76b827b90 empty/.hg/largefiles/
305 $ hg -R http-clone --debug up --config largefiles.usercache=http-clone-usercache --config progress.debug=true
305 $ hg -R http-clone --debug up --config largefiles.usercache=http-clone-usercache --config progress.debug=true
306 resolving manifests
306 resolving manifests
307 branchmerge: False, force: False, partial: False
307 branchmerge: False, force: False, partial: False
308 ancestor: 000000000000, local: 000000000000+, remote: cf03e5bb9936
308 ancestor: 000000000000, local: 000000000000+, remote: cf03e5bb9936
309 .hglf/f1: remote created -> g
309 .hglf/f1: remote created -> g
310 getting .hglf/f1
310 getting .hglf/f1
311 updating: .hglf/f1 1/1 files (100.00%)
311 updating: .hglf/f1 1/1 files (100.00%)
312 getting changed largefiles
312 getting changed largefiles
313 using http://localhost:$HGPORT2/
313 using http://localhost:$HGPORT2/
314 sending capabilities command
314 sending capabilities command
315 sending batch command
315 sending batch command
316 getting largefiles: 0/1 files (0.00%)
316 getting largefiles: 0/1 files (0.00%)
317 getting f1:02a439e5c31c526465ab1a0ca1f431f76b827b90
317 getting f1:02a439e5c31c526465ab1a0ca1f431f76b827b90
318 sending getlfile command
318 sending getlfile command
319 found 02a439e5c31c526465ab1a0ca1f431f76b827b90 in store
319 found 02a439e5c31c526465ab1a0ca1f431f76b827b90 in store
320 1 largefiles updated, 0 removed
320 1 largefiles updated, 0 removed
321 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
321 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
322
322
323 $ ls http-clone-usercache/*
323 $ ls http-clone-usercache/*
324 http-clone-usercache/02a439e5c31c526465ab1a0ca1f431f76b827b90
324 http-clone-usercache/02a439e5c31c526465ab1a0ca1f431f76b827b90
325
325
326 $ rm -rf empty http-clone*
326 $ rm -rf empty http-clone*
327
327
328 used all HGPORTs, kill all daemons
328 used all HGPORTs, kill all daemons
329 $ killdaemons.py
329 $ killdaemons.py
330
330
331 largefiles should batch verify remote calls
331 largefiles should batch verify remote calls
332
332
333 $ hg init batchverifymain
333 $ hg init batchverifymain
334 $ cd batchverifymain
334 $ cd batchverifymain
335 $ echo "aaa" >> a
335 $ echo "aaa" >> a
336 $ hg add --large a
336 $ hg add --large a
337 $ hg commit -m "a"
337 $ hg commit -m "a"
338 Invoking status precommit hook
338 Invoking status precommit hook
339 A a
339 A a
340 $ echo "bbb" >> b
340 $ echo "bbb" >> b
341 $ hg add --large b
341 $ hg add --large b
342 $ hg commit -m "b"
342 $ hg commit -m "b"
343 Invoking status precommit hook
343 Invoking status precommit hook
344 A b
344 A b
345 $ cd ..
345 $ cd ..
346 $ hg serve -R batchverifymain -d -p $HGPORT --pid-file hg.pid \
346 $ hg serve -R batchverifymain -d -p $HGPORT --pid-file hg.pid \
347 > -A access.log
347 > -A access.log
348 $ cat hg.pid >> $DAEMON_PIDS
348 $ cat hg.pid >> $DAEMON_PIDS
349 $ hg clone --noupdate http://localhost:$HGPORT batchverifyclone
349 $ hg clone --noupdate http://localhost:$HGPORT batchverifyclone
350 requesting all changes
350 requesting all changes
351 adding changesets
351 adding changesets
352 adding manifests
352 adding manifests
353 adding file changes
353 adding file changes
354 added 2 changesets with 2 changes to 2 files
354 added 2 changesets with 2 changes to 2 files
355 new changesets 567253b0f523:04d19c27a332
355 new changesets 567253b0f523:04d19c27a332
356 $ hg -R batchverifyclone verify --large --lfa
356 $ hg -R batchverifyclone verify --large --lfa
357 checking changesets
357 checking changesets
358 checking manifests
358 checking manifests
359 crosschecking files in changesets and manifests
359 crosschecking files in changesets and manifests
360 checking files
360 checking files
361 2 files, 2 changesets, 2 total revisions
361 2 files, 2 changesets, 2 total revisions
362 searching 2 changesets for largefiles
362 searching 2 changesets for largefiles
363 verified existence of 2 revisions of 2 largefiles
363 verified existence of 2 revisions of 2 largefiles
364 $ tail -1 access.log
364 $ tail -1 access.log
365 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3D972a1a11f19934401291cc99117ec614933374ce%3Bstatlfile+sha%3Dc801c9cfe94400963fcb683246217d5db77f9a9a x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
365 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3D972a1a11f19934401291cc99117ec614933374ce%3Bstatlfile+sha%3Dc801c9cfe94400963fcb683246217d5db77f9a9a x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
366 $ hg -R batchverifyclone update
366 $ hg -R batchverifyclone update
367 getting changed largefiles
367 getting changed largefiles
368 2 largefiles updated, 0 removed
368 2 largefiles updated, 0 removed
369 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
369 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
370
370
371 Clear log file before next test
371 Clear log file before next test
372
372
373 $ printf "" > access.log
373 $ printf "" > access.log
374
374
375 Verify should check file on remote server only when file is not
375 Verify should check file on remote server only when file is not
376 available locally.
376 available locally.
377
377
378 $ echo "ccc" >> batchverifymain/c
378 $ echo "ccc" >> batchverifymain/c
379 $ hg -R batchverifymain status
379 $ hg -R batchverifymain status
380 ? c
380 ? c
381 $ hg -R batchverifymain add --large batchverifymain/c
381 $ hg -R batchverifymain add --large batchverifymain/c
382 $ hg -R batchverifymain commit -m "c"
382 $ hg -R batchverifymain commit -m "c"
383 Invoking status precommit hook
383 Invoking status precommit hook
384 A c
384 A c
385 $ hg -R batchverifyclone pull
385 $ hg -R batchverifyclone pull
386 pulling from http://localhost:$HGPORT/
386 pulling from http://localhost:$HGPORT/
387 searching for changes
387 searching for changes
388 adding changesets
388 adding changesets
389 adding manifests
389 adding manifests
390 adding file changes
390 adding file changes
391 added 1 changesets with 1 changes to 1 files
391 added 1 changesets with 1 changes to 1 files
392 new changesets 6bba8cb6935d
392 new changesets 6bba8cb6935d
393 (run 'hg update' to get a working copy)
393 (run 'hg update' to get a working copy)
394 $ hg -R batchverifyclone verify --lfa
394 $ hg -R batchverifyclone verify --lfa
395 checking changesets
395 checking changesets
396 checking manifests
396 checking manifests
397 crosschecking files in changesets and manifests
397 crosschecking files in changesets and manifests
398 checking files
398 checking files
399 3 files, 3 changesets, 3 total revisions
399 3 files, 3 changesets, 3 total revisions
400 searching 3 changesets for largefiles
400 searching 3 changesets for largefiles
401 verified existence of 3 revisions of 3 largefiles
401 verified existence of 3 revisions of 3 largefiles
402 $ tail -1 access.log
402 $ tail -1 access.log
403 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3Dc8559c3c9cfb42131794b7d8009230403b9b454c x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
403 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3Dc8559c3c9cfb42131794b7d8009230403b9b454c x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
404
404
405 $ killdaemons.py
405 $ killdaemons.py
406
406
407 largefiles should not ask for password again after successful authorization
407 largefiles should not ask for password again after successful authorization
408
408
409 $ hg init credentialmain
409 $ hg init credentialmain
410 $ cd credentialmain
410 $ cd credentialmain
411 $ echo "aaa" >> a
411 $ echo "aaa" >> a
412 $ hg add --large a
412 $ hg add --large a
413 $ hg commit -m "a"
413 $ hg commit -m "a"
414 Invoking status precommit hook
414 Invoking status precommit hook
415 A a
415 A a
416
416
417 Before running server clear the user cache to force clone to download
417 Before running server clear the user cache to force clone to download
418 a large file from the server rather than to get it from the cache
418 a large file from the server rather than to get it from the cache
419
419
420 $ rm "${USERCACHE}"/*
420 $ rm "${USERCACHE}"/*
421
421
422 $ cd ..
422 $ cd ..
423 $ cat << EOT > userpass.py
423 $ cat << EOT > userpass.py
424 > import base64
424 > import base64
425 > from mercurial.hgweb import common
425 > from mercurial.hgweb import common
426 > def perform_authentication(hgweb, req, op):
426 > def perform_authentication(hgweb, req, op):
427 > auth = req.env.get('HTTP_AUTHORIZATION')
427 > auth = req.env.get('HTTP_AUTHORIZATION')
428 > if not auth:
428 > if not auth:
429 > raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, 'who',
429 > raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, 'who',
430 > [('WWW-Authenticate', 'Basic Realm="mercurial"')])
430 > [('WWW-Authenticate', 'Basic Realm="mercurial"')])
431 > if base64.b64decode(auth.split()[1]).split(':', 1) != ['user', 'pass']:
431 > if base64.b64decode(auth.split()[1]).split(':', 1) != ['user', 'pass']:
432 > raise common.ErrorResponse(common.HTTP_FORBIDDEN, 'no')
432 > raise common.ErrorResponse(common.HTTP_FORBIDDEN, 'no')
433 > def extsetup():
433 > def extsetup():
434 > common.permhooks.insert(0, perform_authentication)
434 > common.permhooks.insert(0, perform_authentication)
435 > EOT
435 > EOT
436 $ hg serve --config extensions.x=userpass.py -R credentialmain \
436 $ hg serve --config extensions.x=userpass.py -R credentialmain \
437 > -d -p $HGPORT --pid-file hg.pid -A access.log
437 > -d -p $HGPORT --pid-file hg.pid -A access.log
438 $ cat hg.pid >> $DAEMON_PIDS
438 $ cat hg.pid >> $DAEMON_PIDS
439 $ cat << EOF > get_pass.py
439 $ cat << EOF > get_pass.py
440 > import getpass
440 > import getpass
441 > def newgetpass(arg):
441 > def newgetpass(arg):
442 > return "pass"
442 > return "pass"
443 > getpass.getpass = newgetpass
443 > getpass.getpass = newgetpass
444 > EOF
444 > EOF
445 $ hg clone --config ui.interactive=true --config extensions.getpass=get_pass.py \
445 $ hg clone --config ui.interactive=true --config extensions.getpass=get_pass.py \
446 > http://user@localhost:$HGPORT credentialclone
446 > http://user@localhost:$HGPORT credentialclone
447 requesting all changes
448 http authorization required for http://localhost:$HGPORT/
447 http authorization required for http://localhost:$HGPORT/
449 realm: mercurial
448 realm: mercurial
450 user: user
449 user: user
451 password: adding changesets
450 password: requesting all changes
451 adding changesets
452 adding manifests
452 adding manifests
453 adding file changes
453 adding file changes
454 added 1 changesets with 1 changes to 1 files
454 added 1 changesets with 1 changes to 1 files
455 new changesets 567253b0f523
455 new changesets 567253b0f523
456 updating to branch default
456 updating to branch default
457 getting changed largefiles
457 getting changed largefiles
458 1 largefiles updated, 0 removed
458 1 largefiles updated, 0 removed
459 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
459 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
460
460
461 $ killdaemons.py
461 $ killdaemons.py
462 $ rm hg.pid access.log
462 $ rm hg.pid access.log
463
463
464 #endif
464 #endif
@@ -1,1102 +1,1102 b''
1 # Initial setup
1 # Initial setup
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [extensions]
4 > [extensions]
5 > lfs=
5 > lfs=
6 > [lfs]
6 > [lfs]
7 > # Test deprecated config
7 > # Test deprecated config
8 > threshold=1000B
8 > threshold=1000B
9 > EOF
9 > EOF
10
10
11 $ LONG=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
11 $ LONG=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
12
12
13 # Prepare server and enable extension
13 # Prepare server and enable extension
14 $ hg init server
14 $ hg init server
15 $ hg clone -q server client
15 $ hg clone -q server client
16 $ cd client
16 $ cd client
17
17
18 # Commit small file
18 # Commit small file
19 $ echo s > smallfile
19 $ echo s > smallfile
20 $ echo '**.py = LF' > .hgeol
20 $ echo '**.py = LF' > .hgeol
21 $ hg --config lfs.track='"size(\">1000B\")"' commit -Aqm "add small file"
21 $ hg --config lfs.track='"size(\">1000B\")"' commit -Aqm "add small file"
22 hg: parse error: unsupported file pattern: size(">1000B")
22 hg: parse error: unsupported file pattern: size(">1000B")
23 (paths must be prefixed with "path:")
23 (paths must be prefixed with "path:")
24 [255]
24 [255]
25 $ hg --config lfs.track='size(">1000B")' commit -Aqm "add small file"
25 $ hg --config lfs.track='size(">1000B")' commit -Aqm "add small file"
26
26
27 # Commit large file
27 # Commit large file
28 $ echo $LONG > largefile
28 $ echo $LONG > largefile
29 $ grep lfs .hg/requires
29 $ grep lfs .hg/requires
30 [1]
30 [1]
31 $ hg commit --traceback -Aqm "add large file"
31 $ hg commit --traceback -Aqm "add large file"
32 $ grep lfs .hg/requires
32 $ grep lfs .hg/requires
33 lfs
33 lfs
34
34
35 # Ensure metadata is stored
35 # Ensure metadata is stored
36 $ hg debugdata largefile 0
36 $ hg debugdata largefile 0
37 version https://git-lfs.github.com/spec/v1
37 version https://git-lfs.github.com/spec/v1
38 oid sha256:f11e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
38 oid sha256:f11e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
39 size 1501
39 size 1501
40 x-is-binary 0
40 x-is-binary 0
41
41
42 # Check the blobstore is populated
42 # Check the blobstore is populated
43 $ find .hg/store/lfs/objects | sort
43 $ find .hg/store/lfs/objects | sort
44 .hg/store/lfs/objects
44 .hg/store/lfs/objects
45 .hg/store/lfs/objects/f1
45 .hg/store/lfs/objects/f1
46 .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
46 .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
47
47
48 # Check the blob stored contains the actual contents of the file
48 # Check the blob stored contains the actual contents of the file
49 $ cat .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
49 $ cat .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
50 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
50 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
51
51
52 # Push changes to the server
52 # Push changes to the server
53
53
54 $ hg push
54 $ hg push
55 pushing to $TESTTMP/server
55 pushing to $TESTTMP/server
56 searching for changes
56 searching for changes
57 abort: lfs.url needs to be configured
57 abort: lfs.url needs to be configured
58 [255]
58 [255]
59
59
60 $ cat >> $HGRCPATH << EOF
60 $ cat >> $HGRCPATH << EOF
61 > [lfs]
61 > [lfs]
62 > url=file:$TESTTMP/dummy-remote/
62 > url=file:$TESTTMP/dummy-remote/
63 > EOF
63 > EOF
64
64
65 Push to a local non-lfs repo with the extension enabled will add the
65 Push to a local non-lfs repo with the extension enabled will add the
66 lfs requirement
66 lfs requirement
67
67
68 $ grep lfs $TESTTMP/server/.hg/requires
68 $ grep lfs $TESTTMP/server/.hg/requires
69 [1]
69 [1]
70 $ hg push -v | egrep -v '^(uncompressed| )'
70 $ hg push -v | egrep -v '^(uncompressed| )'
71 pushing to $TESTTMP/server
71 pushing to $TESTTMP/server
72 searching for changes
72 searching for changes
73 lfs: found f11e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b in the local lfs store
73 lfs: found f11e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b in the local lfs store
74 2 changesets found
74 2 changesets found
75 adding changesets
75 adding changesets
76 adding manifests
76 adding manifests
77 adding file changes
77 adding file changes
78 added 2 changesets with 3 changes to 3 files
78 added 2 changesets with 3 changes to 3 files
79 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
79 calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
80 $ grep lfs $TESTTMP/server/.hg/requires
80 $ grep lfs $TESTTMP/server/.hg/requires
81 lfs
81 lfs
82
82
83 # Unknown URL scheme
83 # Unknown URL scheme
84
84
85 $ hg push --config lfs.url=ftp://foobar
85 $ hg push --config lfs.url=ftp://foobar
86 abort: lfs: unknown url scheme: ftp
86 abort: lfs: unknown url scheme: ftp
87 [255]
87 [255]
88
88
89 $ cd ../
89 $ cd ../
90
90
91 # Initialize new client (not cloning) and setup extension
91 # Initialize new client (not cloning) and setup extension
92 $ hg init client2
92 $ hg init client2
93 $ cd client2
93 $ cd client2
94 $ cat >> .hg/hgrc <<EOF
94 $ cat >> .hg/hgrc <<EOF
95 > [paths]
95 > [paths]
96 > default = $TESTTMP/server
96 > default = $TESTTMP/server
97 > EOF
97 > EOF
98
98
99 # Pull from server
99 # Pull from server
100
100
101 Pulling a local lfs repo into a local non-lfs repo with the extension
101 Pulling a local lfs repo into a local non-lfs repo with the extension
102 enabled adds the lfs requirement
102 enabled adds the lfs requirement
103
103
104 $ grep lfs .hg/requires $TESTTMP/server/.hg/requires
104 $ grep lfs .hg/requires $TESTTMP/server/.hg/requires
105 $TESTTMP/server/.hg/requires:lfs
105 $TESTTMP/server/.hg/requires:lfs
106 $ hg pull default
106 $ hg pull default
107 pulling from $TESTTMP/server
107 pulling from $TESTTMP/server
108 requesting all changes
108 requesting all changes
109 adding changesets
109 adding changesets
110 adding manifests
110 adding manifests
111 adding file changes
111 adding file changes
112 added 2 changesets with 3 changes to 3 files
112 added 2 changesets with 3 changes to 3 files
113 new changesets 0ead593177f7:b88141481348
113 new changesets 0ead593177f7:b88141481348
114 (run 'hg update' to get a working copy)
114 (run 'hg update' to get a working copy)
115 $ grep lfs .hg/requires $TESTTMP/server/.hg/requires
115 $ grep lfs .hg/requires $TESTTMP/server/.hg/requires
116 .hg/requires:lfs
116 .hg/requires:lfs
117 $TESTTMP/server/.hg/requires:lfs
117 $TESTTMP/server/.hg/requires:lfs
118
118
119 # Check the blobstore is not yet populated
119 # Check the blobstore is not yet populated
120 $ [ -d .hg/store/lfs/objects ]
120 $ [ -d .hg/store/lfs/objects ]
121 [1]
121 [1]
122
122
123 # Update to the last revision containing the large file
123 # Update to the last revision containing the large file
124 $ hg update
124 $ hg update
125 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
125 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
126
126
127 # Check the blobstore has been populated on update
127 # Check the blobstore has been populated on update
128 $ find .hg/store/lfs/objects | sort
128 $ find .hg/store/lfs/objects | sort
129 .hg/store/lfs/objects
129 .hg/store/lfs/objects
130 .hg/store/lfs/objects/f1
130 .hg/store/lfs/objects/f1
131 .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
131 .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b
132
132
133 # Check the contents of the file are fetched from blobstore when requested
133 # Check the contents of the file are fetched from blobstore when requested
134 $ hg cat -r . largefile
134 $ hg cat -r . largefile
135 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
135 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
136
136
137 # Check the file has been copied in the working copy
137 # Check the file has been copied in the working copy
138 $ cat largefile
138 $ cat largefile
139 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
139 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
140
140
141 $ cd ..
141 $ cd ..
142
142
143 # Check rename, and switch between large and small files
143 # Check rename, and switch between large and small files
144
144
145 $ hg init repo3
145 $ hg init repo3
146 $ cd repo3
146 $ cd repo3
147 $ cat >> .hg/hgrc << EOF
147 $ cat >> .hg/hgrc << EOF
148 > [lfs]
148 > [lfs]
149 > track=size(">10B")
149 > track=size(">10B")
150 > EOF
150 > EOF
151
151
152 $ echo LONGER-THAN-TEN-BYTES-WILL-TRIGGER-LFS > large
152 $ echo LONGER-THAN-TEN-BYTES-WILL-TRIGGER-LFS > large
153 $ echo SHORTER > small
153 $ echo SHORTER > small
154 $ hg add . -q
154 $ hg add . -q
155 $ hg commit -m 'commit with lfs content'
155 $ hg commit -m 'commit with lfs content'
156
156
157 $ hg files -r . 'set:added()'
157 $ hg files -r . 'set:added()'
158 large
158 large
159 small
159 small
160 $ hg files -r . 'set:added() & lfs()'
160 $ hg files -r . 'set:added() & lfs()'
161 large
161 large
162
162
163 $ hg mv large l
163 $ hg mv large l
164 $ hg mv small s
164 $ hg mv small s
165 $ hg status 'set:removed()'
165 $ hg status 'set:removed()'
166 R large
166 R large
167 R small
167 R small
168 $ hg status 'set:removed() & lfs()'
168 $ hg status 'set:removed() & lfs()'
169 R large
169 R large
170 $ hg commit -m 'renames'
170 $ hg commit -m 'renames'
171
171
172 $ hg files -r . 'set:copied()'
172 $ hg files -r . 'set:copied()'
173 l
173 l
174 s
174 s
175 $ hg files -r . 'set:copied() & lfs()'
175 $ hg files -r . 'set:copied() & lfs()'
176 l
176 l
177 $ hg status --change . 'set:removed()'
177 $ hg status --change . 'set:removed()'
178 R large
178 R large
179 R small
179 R small
180 $ hg status --change . 'set:removed() & lfs()'
180 $ hg status --change . 'set:removed() & lfs()'
181 R large
181 R large
182
182
183 $ echo SHORT > l
183 $ echo SHORT > l
184 $ echo BECOME-LARGER-FROM-SHORTER > s
184 $ echo BECOME-LARGER-FROM-SHORTER > s
185 $ hg commit -m 'large to small, small to large'
185 $ hg commit -m 'large to small, small to large'
186
186
187 $ echo 1 >> l
187 $ echo 1 >> l
188 $ echo 2 >> s
188 $ echo 2 >> s
189 $ hg commit -m 'random modifications'
189 $ hg commit -m 'random modifications'
190
190
191 $ echo RESTORE-TO-BE-LARGE > l
191 $ echo RESTORE-TO-BE-LARGE > l
192 $ echo SHORTER > s
192 $ echo SHORTER > s
193 $ hg commit -m 'switch large and small again'
193 $ hg commit -m 'switch large and small again'
194
194
195 # Test lfs_files template
195 # Test lfs_files template
196
196
197 $ hg log -r 'all()' -T '{rev} {join(lfs_files, ", ")}\n'
197 $ hg log -r 'all()' -T '{rev} {join(lfs_files, ", ")}\n'
198 0 large
198 0 large
199 1 l, large
199 1 l, large
200 2 s
200 2 s
201 3 s
201 3 s
202 4 l
202 4 l
203
203
204 # Push and pull the above repo
204 # Push and pull the above repo
205
205
206 $ hg --cwd .. init repo4
206 $ hg --cwd .. init repo4
207 $ hg push ../repo4
207 $ hg push ../repo4
208 pushing to ../repo4
208 pushing to ../repo4
209 searching for changes
209 searching for changes
210 adding changesets
210 adding changesets
211 adding manifests
211 adding manifests
212 adding file changes
212 adding file changes
213 added 5 changesets with 10 changes to 4 files
213 added 5 changesets with 10 changes to 4 files
214
214
215 $ hg --cwd .. init repo5
215 $ hg --cwd .. init repo5
216 $ hg --cwd ../repo5 pull ../repo3
216 $ hg --cwd ../repo5 pull ../repo3
217 pulling from ../repo3
217 pulling from ../repo3
218 requesting all changes
218 requesting all changes
219 adding changesets
219 adding changesets
220 adding manifests
220 adding manifests
221 adding file changes
221 adding file changes
222 added 5 changesets with 10 changes to 4 files
222 added 5 changesets with 10 changes to 4 files
223 new changesets fd47a419c4f7:5adf850972b9
223 new changesets fd47a419c4f7:5adf850972b9
224 (run 'hg update' to get a working copy)
224 (run 'hg update' to get a working copy)
225
225
226 $ cd ..
226 $ cd ..
227
227
228 # Test clone
228 # Test clone
229
229
230 $ hg init repo6
230 $ hg init repo6
231 $ cd repo6
231 $ cd repo6
232 $ cat >> .hg/hgrc << EOF
232 $ cat >> .hg/hgrc << EOF
233 > [lfs]
233 > [lfs]
234 > track=size(">30B")
234 > track=size(">30B")
235 > EOF
235 > EOF
236
236
237 $ echo LARGE-BECAUSE-IT-IS-MORE-THAN-30-BYTES > large
237 $ echo LARGE-BECAUSE-IT-IS-MORE-THAN-30-BYTES > large
238 $ echo SMALL > small
238 $ echo SMALL > small
239 $ hg commit -Aqm 'create a lfs file' large small
239 $ hg commit -Aqm 'create a lfs file' large small
240 $ hg debuglfsupload -r 'all()' -v
240 $ hg debuglfsupload -r 'all()' -v
241 lfs: found 8e92251415339ae9b148c8da89ed5ec665905166a1ab11b09dca8fad83344738 in the local lfs store
241 lfs: found 8e92251415339ae9b148c8da89ed5ec665905166a1ab11b09dca8fad83344738 in the local lfs store
242
242
243 $ cd ..
243 $ cd ..
244
244
245 $ hg clone repo6 repo7
245 $ hg clone repo6 repo7
246 updating to branch default
246 updating to branch default
247 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
247 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
248 $ cd repo7
248 $ cd repo7
249 $ hg config extensions --debug | grep lfs
249 $ hg config extensions --debug | grep lfs
250 $TESTTMP/repo7/.hg/hgrc:*: extensions.lfs= (glob)
250 $TESTTMP/repo7/.hg/hgrc:*: extensions.lfs= (glob)
251 $ cat large
251 $ cat large
252 LARGE-BECAUSE-IT-IS-MORE-THAN-30-BYTES
252 LARGE-BECAUSE-IT-IS-MORE-THAN-30-BYTES
253 $ cat small
253 $ cat small
254 SMALL
254 SMALL
255
255
256 $ cd ..
256 $ cd ..
257
257
258 $ hg --config extensions.share= share repo7 sharedrepo
258 $ hg --config extensions.share= share repo7 sharedrepo
259 updating working directory
259 updating working directory
260 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
260 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
261 $ hg -R sharedrepo config extensions --debug | grep lfs
261 $ hg -R sharedrepo config extensions --debug | grep lfs
262 $TESTTMP/sharedrepo/.hg/hgrc:*: extensions.lfs= (glob)
262 $TESTTMP/sharedrepo/.hg/hgrc:*: extensions.lfs= (glob)
263
263
264 # Test rename and status
264 # Test rename and status
265
265
266 $ hg init repo8
266 $ hg init repo8
267 $ cd repo8
267 $ cd repo8
268 $ cat >> .hg/hgrc << EOF
268 $ cat >> .hg/hgrc << EOF
269 > [lfs]
269 > [lfs]
270 > track=size(">10B")
270 > track=size(">10B")
271 > EOF
271 > EOF
272
272
273 $ echo THIS-IS-LFS-BECAUSE-10-BYTES > a1
273 $ echo THIS-IS-LFS-BECAUSE-10-BYTES > a1
274 $ echo SMALL > a2
274 $ echo SMALL > a2
275 $ hg commit -m a -A a1 a2
275 $ hg commit -m a -A a1 a2
276 $ hg status
276 $ hg status
277 $ hg mv a1 b1
277 $ hg mv a1 b1
278 $ hg mv a2 a1
278 $ hg mv a2 a1
279 $ hg mv b1 a2
279 $ hg mv b1 a2
280 $ hg commit -m b
280 $ hg commit -m b
281 $ hg status
281 $ hg status
282 >>> with open('a2', 'wb') as f:
282 >>> with open('a2', 'wb') as f:
283 ... f.write(b'\1\nSTART-WITH-HG-FILELOG-METADATA')
283 ... f.write(b'\1\nSTART-WITH-HG-FILELOG-METADATA')
284 >>> with open('a1', 'wb') as f:
284 >>> with open('a1', 'wb') as f:
285 ... f.write(b'\1\nMETA\n')
285 ... f.write(b'\1\nMETA\n')
286 $ hg commit -m meta
286 $ hg commit -m meta
287 $ hg status
287 $ hg status
288 $ hg log -T '{rev}: {file_copies} | {file_dels} | {file_adds}\n'
288 $ hg log -T '{rev}: {file_copies} | {file_dels} | {file_adds}\n'
289 2: | |
289 2: | |
290 1: a1 (a2)a2 (a1) | |
290 1: a1 (a2)a2 (a1) | |
291 0: | | a1 a2
291 0: | | a1 a2
292
292
293 $ for n in a1 a2; do
293 $ for n in a1 a2; do
294 > for r in 0 1 2; do
294 > for r in 0 1 2; do
295 > printf '\n%s @ %s\n' $n $r
295 > printf '\n%s @ %s\n' $n $r
296 > hg debugdata $n $r
296 > hg debugdata $n $r
297 > done
297 > done
298 > done
298 > done
299
299
300 a1 @ 0
300 a1 @ 0
301 version https://git-lfs.github.com/spec/v1
301 version https://git-lfs.github.com/spec/v1
302 oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
302 oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
303 size 29
303 size 29
304 x-is-binary 0
304 x-is-binary 0
305
305
306 a1 @ 1
306 a1 @ 1
307 \x01 (esc)
307 \x01 (esc)
308 copy: a2
308 copy: a2
309 copyrev: 50470ad23cf937b1f4b9f80bfe54df38e65b50d9
309 copyrev: 50470ad23cf937b1f4b9f80bfe54df38e65b50d9
310 \x01 (esc)
310 \x01 (esc)
311 SMALL
311 SMALL
312
312
313 a1 @ 2
313 a1 @ 2
314 \x01 (esc)
314 \x01 (esc)
315 \x01 (esc)
315 \x01 (esc)
316 \x01 (esc)
316 \x01 (esc)
317 META
317 META
318
318
319 a2 @ 0
319 a2 @ 0
320 SMALL
320 SMALL
321
321
322 a2 @ 1
322 a2 @ 1
323 version https://git-lfs.github.com/spec/v1
323 version https://git-lfs.github.com/spec/v1
324 oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
324 oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
325 size 29
325 size 29
326 x-hg-copy a1
326 x-hg-copy a1
327 x-hg-copyrev be23af27908a582af43e5cda209a5a9b319de8d4
327 x-hg-copyrev be23af27908a582af43e5cda209a5a9b319de8d4
328 x-is-binary 0
328 x-is-binary 0
329
329
330 a2 @ 2
330 a2 @ 2
331 version https://git-lfs.github.com/spec/v1
331 version https://git-lfs.github.com/spec/v1
332 oid sha256:876dadc86a8542f9798048f2c47f51dbf8e4359aed883e8ec80c5db825f0d943
332 oid sha256:876dadc86a8542f9798048f2c47f51dbf8e4359aed883e8ec80c5db825f0d943
333 size 32
333 size 32
334 x-is-binary 0
334 x-is-binary 0
335
335
336 # Verify commit hashes include rename metadata
336 # Verify commit hashes include rename metadata
337
337
338 $ hg log -T '{rev}:{node|short} {desc}\n'
338 $ hg log -T '{rev}:{node|short} {desc}\n'
339 2:0fae949de7fa meta
339 2:0fae949de7fa meta
340 1:9cd6bdffdac0 b
340 1:9cd6bdffdac0 b
341 0:7f96794915f7 a
341 0:7f96794915f7 a
342
342
343 $ cd ..
343 $ cd ..
344
344
345 # Test bundle
345 # Test bundle
346
346
347 $ hg init repo9
347 $ hg init repo9
348 $ cd repo9
348 $ cd repo9
349 $ cat >> .hg/hgrc << EOF
349 $ cat >> .hg/hgrc << EOF
350 > [lfs]
350 > [lfs]
351 > track=size(">10B")
351 > track=size(">10B")
352 > [diff]
352 > [diff]
353 > git=1
353 > git=1
354 > EOF
354 > EOF
355
355
356 $ for i in 0 single two three 4; do
356 $ for i in 0 single two three 4; do
357 > echo 'THIS-IS-LFS-'$i > a
357 > echo 'THIS-IS-LFS-'$i > a
358 > hg commit -m a-$i -A a
358 > hg commit -m a-$i -A a
359 > done
359 > done
360
360
361 $ hg update 2 -q
361 $ hg update 2 -q
362 $ echo 'THIS-IS-LFS-2-CHILD' > a
362 $ echo 'THIS-IS-LFS-2-CHILD' > a
363 $ hg commit -m branching -q
363 $ hg commit -m branching -q
364
364
365 $ hg bundle --base 1 bundle.hg -v
365 $ hg bundle --base 1 bundle.hg -v
366 lfs: found 5ab7a3739a5feec94a562d070a14f36dba7cad17e5484a4a89eea8e5f3166888 in the local lfs store
366 lfs: found 5ab7a3739a5feec94a562d070a14f36dba7cad17e5484a4a89eea8e5f3166888 in the local lfs store
367 lfs: found a9c7d1cd6ce2b9bbdf46ed9a862845228717b921c089d0d42e3bcaed29eb612e in the local lfs store
367 lfs: found a9c7d1cd6ce2b9bbdf46ed9a862845228717b921c089d0d42e3bcaed29eb612e in the local lfs store
368 lfs: found f693890c49c409ec33673b71e53f297681f76c1166daf33b2ad7ebf8b1d3237e in the local lfs store
368 lfs: found f693890c49c409ec33673b71e53f297681f76c1166daf33b2ad7ebf8b1d3237e in the local lfs store
369 lfs: found fda198fea753eb66a252e9856915e1f5cddbe41723bd4b695ece2604ad3c9f75 in the local lfs store
369 lfs: found fda198fea753eb66a252e9856915e1f5cddbe41723bd4b695ece2604ad3c9f75 in the local lfs store
370 4 changesets found
370 4 changesets found
371 uncompressed size of bundle content:
371 uncompressed size of bundle content:
372 * (changelog) (glob)
372 * (changelog) (glob)
373 * (manifests) (glob)
373 * (manifests) (glob)
374 * a (glob)
374 * a (glob)
375 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
375 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
376 $ hg -R bundle.hg log -p -T '{rev} {desc}\n' a
376 $ hg -R bundle.hg log -p -T '{rev} {desc}\n' a
377 5 branching
377 5 branching
378 diff --git a/a b/a
378 diff --git a/a b/a
379 --- a/a
379 --- a/a
380 +++ b/a
380 +++ b/a
381 @@ -1,1 +1,1 @@
381 @@ -1,1 +1,1 @@
382 -THIS-IS-LFS-two
382 -THIS-IS-LFS-two
383 +THIS-IS-LFS-2-CHILD
383 +THIS-IS-LFS-2-CHILD
384
384
385 4 a-4
385 4 a-4
386 diff --git a/a b/a
386 diff --git a/a b/a
387 --- a/a
387 --- a/a
388 +++ b/a
388 +++ b/a
389 @@ -1,1 +1,1 @@
389 @@ -1,1 +1,1 @@
390 -THIS-IS-LFS-three
390 -THIS-IS-LFS-three
391 +THIS-IS-LFS-4
391 +THIS-IS-LFS-4
392
392
393 3 a-three
393 3 a-three
394 diff --git a/a b/a
394 diff --git a/a b/a
395 --- a/a
395 --- a/a
396 +++ b/a
396 +++ b/a
397 @@ -1,1 +1,1 @@
397 @@ -1,1 +1,1 @@
398 -THIS-IS-LFS-two
398 -THIS-IS-LFS-two
399 +THIS-IS-LFS-three
399 +THIS-IS-LFS-three
400
400
401 2 a-two
401 2 a-two
402 diff --git a/a b/a
402 diff --git a/a b/a
403 --- a/a
403 --- a/a
404 +++ b/a
404 +++ b/a
405 @@ -1,1 +1,1 @@
405 @@ -1,1 +1,1 @@
406 -THIS-IS-LFS-single
406 -THIS-IS-LFS-single
407 +THIS-IS-LFS-two
407 +THIS-IS-LFS-two
408
408
409 1 a-single
409 1 a-single
410 diff --git a/a b/a
410 diff --git a/a b/a
411 --- a/a
411 --- a/a
412 +++ b/a
412 +++ b/a
413 @@ -1,1 +1,1 @@
413 @@ -1,1 +1,1 @@
414 -THIS-IS-LFS-0
414 -THIS-IS-LFS-0
415 +THIS-IS-LFS-single
415 +THIS-IS-LFS-single
416
416
417 0 a-0
417 0 a-0
418 diff --git a/a b/a
418 diff --git a/a b/a
419 new file mode 100644
419 new file mode 100644
420 --- /dev/null
420 --- /dev/null
421 +++ b/a
421 +++ b/a
422 @@ -0,0 +1,1 @@
422 @@ -0,0 +1,1 @@
423 +THIS-IS-LFS-0
423 +THIS-IS-LFS-0
424
424
425 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q
425 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q
426 $ hg -R bundle-again.hg log -p -T '{rev} {desc}\n' a
426 $ hg -R bundle-again.hg log -p -T '{rev} {desc}\n' a
427 5 branching
427 5 branching
428 diff --git a/a b/a
428 diff --git a/a b/a
429 --- a/a
429 --- a/a
430 +++ b/a
430 +++ b/a
431 @@ -1,1 +1,1 @@
431 @@ -1,1 +1,1 @@
432 -THIS-IS-LFS-two
432 -THIS-IS-LFS-two
433 +THIS-IS-LFS-2-CHILD
433 +THIS-IS-LFS-2-CHILD
434
434
435 4 a-4
435 4 a-4
436 diff --git a/a b/a
436 diff --git a/a b/a
437 --- a/a
437 --- a/a
438 +++ b/a
438 +++ b/a
439 @@ -1,1 +1,1 @@
439 @@ -1,1 +1,1 @@
440 -THIS-IS-LFS-three
440 -THIS-IS-LFS-three
441 +THIS-IS-LFS-4
441 +THIS-IS-LFS-4
442
442
443 3 a-three
443 3 a-three
444 diff --git a/a b/a
444 diff --git a/a b/a
445 --- a/a
445 --- a/a
446 +++ b/a
446 +++ b/a
447 @@ -1,1 +1,1 @@
447 @@ -1,1 +1,1 @@
448 -THIS-IS-LFS-two
448 -THIS-IS-LFS-two
449 +THIS-IS-LFS-three
449 +THIS-IS-LFS-three
450
450
451 2 a-two
451 2 a-two
452 diff --git a/a b/a
452 diff --git a/a b/a
453 --- a/a
453 --- a/a
454 +++ b/a
454 +++ b/a
455 @@ -1,1 +1,1 @@
455 @@ -1,1 +1,1 @@
456 -THIS-IS-LFS-single
456 -THIS-IS-LFS-single
457 +THIS-IS-LFS-two
457 +THIS-IS-LFS-two
458
458
459 1 a-single
459 1 a-single
460 diff --git a/a b/a
460 diff --git a/a b/a
461 --- a/a
461 --- a/a
462 +++ b/a
462 +++ b/a
463 @@ -1,1 +1,1 @@
463 @@ -1,1 +1,1 @@
464 -THIS-IS-LFS-0
464 -THIS-IS-LFS-0
465 +THIS-IS-LFS-single
465 +THIS-IS-LFS-single
466
466
467 0 a-0
467 0 a-0
468 diff --git a/a b/a
468 diff --git a/a b/a
469 new file mode 100644
469 new file mode 100644
470 --- /dev/null
470 --- /dev/null
471 +++ b/a
471 +++ b/a
472 @@ -0,0 +1,1 @@
472 @@ -0,0 +1,1 @@
473 +THIS-IS-LFS-0
473 +THIS-IS-LFS-0
474
474
475 $ cd ..
475 $ cd ..
476
476
477 # Test isbinary
477 # Test isbinary
478
478
479 $ hg init repo10
479 $ hg init repo10
480 $ cd repo10
480 $ cd repo10
481 $ cat >> .hg/hgrc << EOF
481 $ cat >> .hg/hgrc << EOF
482 > [extensions]
482 > [extensions]
483 > lfs=
483 > lfs=
484 > [lfs]
484 > [lfs]
485 > track=all()
485 > track=all()
486 > EOF
486 > EOF
487 $ $PYTHON <<'EOF'
487 $ $PYTHON <<'EOF'
488 > def write(path, content):
488 > def write(path, content):
489 > with open(path, 'wb') as f:
489 > with open(path, 'wb') as f:
490 > f.write(content)
490 > f.write(content)
491 > write('a', b'\0\0')
491 > write('a', b'\0\0')
492 > write('b', b'\1\n')
492 > write('b', b'\1\n')
493 > write('c', b'\1\n\0')
493 > write('c', b'\1\n\0')
494 > write('d', b'xx')
494 > write('d', b'xx')
495 > EOF
495 > EOF
496 $ hg add a b c d
496 $ hg add a b c d
497 $ hg diff --stat
497 $ hg diff --stat
498 a | Bin
498 a | Bin
499 b | 1 +
499 b | 1 +
500 c | Bin
500 c | Bin
501 d | 1 +
501 d | 1 +
502 4 files changed, 2 insertions(+), 0 deletions(-)
502 4 files changed, 2 insertions(+), 0 deletions(-)
503 $ hg commit -m binarytest
503 $ hg commit -m binarytest
504 $ cat > $TESTTMP/dumpbinary.py << EOF
504 $ cat > $TESTTMP/dumpbinary.py << EOF
505 > def reposetup(ui, repo):
505 > def reposetup(ui, repo):
506 > for n in 'abcd':
506 > for n in 'abcd':
507 > ui.write(('%s: binary=%s\n') % (n, repo['.'][n].isbinary()))
507 > ui.write(('%s: binary=%s\n') % (n, repo['.'][n].isbinary()))
508 > EOF
508 > EOF
509 $ hg --config extensions.dumpbinary=$TESTTMP/dumpbinary.py id --trace
509 $ hg --config extensions.dumpbinary=$TESTTMP/dumpbinary.py id --trace
510 a: binary=True
510 a: binary=True
511 b: binary=False
511 b: binary=False
512 c: binary=True
512 c: binary=True
513 d: binary=False
513 d: binary=False
514 b55353847f02 tip
514 b55353847f02 tip
515
515
516 $ cd ..
516 $ cd ..
517
517
518 # Test fctx.cmp fastpath - diff without LFS blobs
518 # Test fctx.cmp fastpath - diff without LFS blobs
519
519
520 $ hg init repo12
520 $ hg init repo12
521 $ cd repo12
521 $ cd repo12
522 $ cat >> .hg/hgrc <<EOF
522 $ cat >> .hg/hgrc <<EOF
523 > [lfs]
523 > [lfs]
524 > threshold=1
524 > threshold=1
525 > EOF
525 > EOF
526 $ cat > ../patch.diff <<EOF
526 $ cat > ../patch.diff <<EOF
527 > # HG changeset patch
527 > # HG changeset patch
528 > 2
528 > 2
529 >
529 >
530 > diff --git a/a b/a
530 > diff --git a/a b/a
531 > old mode 100644
531 > old mode 100644
532 > new mode 100755
532 > new mode 100755
533 > EOF
533 > EOF
534
534
535 $ for i in 1 2 3; do
535 $ for i in 1 2 3; do
536 > cp ../repo10/a a
536 > cp ../repo10/a a
537 > if [ $i = 3 ]; then
537 > if [ $i = 3 ]; then
538 > # make a content-only change
538 > # make a content-only change
539 > hg import -q --bypass ../patch.diff
539 > hg import -q --bypass ../patch.diff
540 > hg update -q
540 > hg update -q
541 > rm ../patch.diff
541 > rm ../patch.diff
542 > else
542 > else
543 > echo $i >> a
543 > echo $i >> a
544 > hg commit -m $i -A a
544 > hg commit -m $i -A a
545 > fi
545 > fi
546 > done
546 > done
547 $ [ -d .hg/store/lfs/objects ]
547 $ [ -d .hg/store/lfs/objects ]
548
548
549 $ cd ..
549 $ cd ..
550
550
551 $ hg clone repo12 repo13 --noupdate
551 $ hg clone repo12 repo13 --noupdate
552 $ cd repo13
552 $ cd repo13
553 $ hg log --removed -p a -T '{desc}\n' --config diff.nobinary=1 --git
553 $ hg log --removed -p a -T '{desc}\n' --config diff.nobinary=1 --git
554 2
554 2
555 diff --git a/a b/a
555 diff --git a/a b/a
556 old mode 100644
556 old mode 100644
557 new mode 100755
557 new mode 100755
558
558
559 2
559 2
560 diff --git a/a b/a
560 diff --git a/a b/a
561 Binary file a has changed
561 Binary file a has changed
562
562
563 1
563 1
564 diff --git a/a b/a
564 diff --git a/a b/a
565 new file mode 100644
565 new file mode 100644
566 Binary file a has changed
566 Binary file a has changed
567
567
568 $ [ -d .hg/store/lfs/objects ]
568 $ [ -d .hg/store/lfs/objects ]
569 [1]
569 [1]
570
570
571 $ cd ..
571 $ cd ..
572
572
573 # Test filter
573 # Test filter
574
574
575 $ hg init repo11
575 $ hg init repo11
576 $ cd repo11
576 $ cd repo11
577 $ cat >> .hg/hgrc << EOF
577 $ cat >> .hg/hgrc << EOF
578 > [lfs]
578 > [lfs]
579 > track=(**.a & size(">5B")) | (**.b & !size(">5B"))
579 > track=(**.a & size(">5B")) | (**.b & !size(">5B"))
580 > | (**.c & "path:d" & !"path:d/c.c") | size(">10B")
580 > | (**.c & "path:d" & !"path:d/c.c") | size(">10B")
581 > EOF
581 > EOF
582
582
583 $ mkdir a
583 $ mkdir a
584 $ echo aaaaaa > a/1.a
584 $ echo aaaaaa > a/1.a
585 $ echo a > a/2.a
585 $ echo a > a/2.a
586 $ echo aaaaaa > 1.b
586 $ echo aaaaaa > 1.b
587 $ echo a > 2.b
587 $ echo a > 2.b
588 $ echo a > 1.c
588 $ echo a > 1.c
589 $ mkdir d
589 $ mkdir d
590 $ echo a > d/c.c
590 $ echo a > d/c.c
591 $ echo a > d/d.c
591 $ echo a > d/d.c
592 $ echo aaaaaaaaaaaa > x
592 $ echo aaaaaaaaaaaa > x
593 $ hg add . -q
593 $ hg add . -q
594 $ hg commit -m files
594 $ hg commit -m files
595
595
596 $ for p in a/1.a a/2.a 1.b 2.b 1.c d/c.c d/d.c x; do
596 $ for p in a/1.a a/2.a 1.b 2.b 1.c d/c.c d/d.c x; do
597 > if hg debugdata $p 0 2>&1 | grep git-lfs >/dev/null; then
597 > if hg debugdata $p 0 2>&1 | grep git-lfs >/dev/null; then
598 > echo "${p}: is lfs"
598 > echo "${p}: is lfs"
599 > else
599 > else
600 > echo "${p}: not lfs"
600 > echo "${p}: not lfs"
601 > fi
601 > fi
602 > done
602 > done
603 a/1.a: is lfs
603 a/1.a: is lfs
604 a/2.a: not lfs
604 a/2.a: not lfs
605 1.b: not lfs
605 1.b: not lfs
606 2.b: is lfs
606 2.b: is lfs
607 1.c: not lfs
607 1.c: not lfs
608 d/c.c: not lfs
608 d/c.c: not lfs
609 d/d.c: is lfs
609 d/d.c: is lfs
610 x: is lfs
610 x: is lfs
611
611
612 $ cd ..
612 $ cd ..
613
613
614 # Verify the repos
614 # Verify the repos
615
615
616 $ cat > $TESTTMP/dumpflog.py << EOF
616 $ cat > $TESTTMP/dumpflog.py << EOF
617 > # print raw revision sizes, flags, and hashes for certain files
617 > # print raw revision sizes, flags, and hashes for certain files
618 > import hashlib
618 > import hashlib
619 > from mercurial.node import short
619 > from mercurial.node import short
620 > from mercurial import revlog
620 > from mercurial import revlog
621 > def hash(rawtext):
621 > def hash(rawtext):
622 > h = hashlib.sha512()
622 > h = hashlib.sha512()
623 > h.update(rawtext)
623 > h.update(rawtext)
624 > return h.hexdigest()[:4]
624 > return h.hexdigest()[:4]
625 > def reposetup(ui, repo):
625 > def reposetup(ui, repo):
626 > # these 2 files are interesting
626 > # these 2 files are interesting
627 > for name in ['l', 's']:
627 > for name in ['l', 's']:
628 > fl = repo.file(name)
628 > fl = repo.file(name)
629 > if len(fl) == 0:
629 > if len(fl) == 0:
630 > continue
630 > continue
631 > sizes = [revlog.revlog.rawsize(fl, i) for i in fl]
631 > sizes = [revlog.revlog.rawsize(fl, i) for i in fl]
632 > texts = [fl.revision(i, raw=True) for i in fl]
632 > texts = [fl.revision(i, raw=True) for i in fl]
633 > flags = [int(fl.flags(i)) for i in fl]
633 > flags = [int(fl.flags(i)) for i in fl]
634 > hashes = [hash(t) for t in texts]
634 > hashes = [hash(t) for t in texts]
635 > print(' %s: rawsizes=%r flags=%r hashes=%r'
635 > print(' %s: rawsizes=%r flags=%r hashes=%r'
636 > % (name, sizes, flags, hashes))
636 > % (name, sizes, flags, hashes))
637 > EOF
637 > EOF
638
638
639 $ for i in client client2 server repo3 repo4 repo5 repo6 repo7 repo8 repo9 \
639 $ for i in client client2 server repo3 repo4 repo5 repo6 repo7 repo8 repo9 \
640 > repo10; do
640 > repo10; do
641 > echo 'repo:' $i
641 > echo 'repo:' $i
642 > hg --cwd $i verify --config extensions.dumpflog=$TESTTMP/dumpflog.py -q
642 > hg --cwd $i verify --config extensions.dumpflog=$TESTTMP/dumpflog.py -q
643 > done
643 > done
644 repo: client
644 repo: client
645 repo: client2
645 repo: client2
646 repo: server
646 repo: server
647 repo: repo3
647 repo: repo3
648 l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d']
648 l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d']
649 s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b']
649 s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b']
650 repo: repo4
650 repo: repo4
651 l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d']
651 l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d']
652 s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b']
652 s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b']
653 repo: repo5
653 repo: repo5
654 l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d']
654 l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d']
655 s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b']
655 s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b']
656 repo: repo6
656 repo: repo6
657 repo: repo7
657 repo: repo7
658 repo: repo8
658 repo: repo8
659 repo: repo9
659 repo: repo9
660 repo: repo10
660 repo: repo10
661
661
662 repo13 doesn't have any cached lfs files and its source never pushed its
662 repo13 doesn't have any cached lfs files and its source never pushed its
663 files. Therefore, the files don't exist in the remote store. Use the files in
663 files. Therefore, the files don't exist in the remote store. Use the files in
664 the user cache.
664 the user cache.
665
665
666 $ test -d $TESTTMP/repo13/.hg/store/lfs/objects
666 $ test -d $TESTTMP/repo13/.hg/store/lfs/objects
667 [1]
667 [1]
668
668
669 $ hg --config extensions.share= share repo13 repo14
669 $ hg --config extensions.share= share repo13 repo14
670 updating working directory
670 updating working directory
671 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
671 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
672 $ hg -R repo14 -q verify
672 $ hg -R repo14 -q verify
673
673
674 $ hg clone repo13 repo15
674 $ hg clone repo13 repo15
675 updating to branch default
675 updating to branch default
676 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
676 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
677 $ hg -R repo15 -q verify
677 $ hg -R repo15 -q verify
678
678
679 If the source repo doesn't have the blob (maybe it was pulled or cloned with
679 If the source repo doesn't have the blob (maybe it was pulled or cloned with
680 --noupdate), the blob is still accessible via the global cache to send to the
680 --noupdate), the blob is still accessible via the global cache to send to the
681 remote store.
681 remote store.
682
682
683 $ rm -rf $TESTTMP/repo15/.hg/store/lfs
683 $ rm -rf $TESTTMP/repo15/.hg/store/lfs
684 $ hg init repo16
684 $ hg init repo16
685 $ hg -R repo15 push repo16
685 $ hg -R repo15 push repo16
686 pushing to repo16
686 pushing to repo16
687 searching for changes
687 searching for changes
688 adding changesets
688 adding changesets
689 adding manifests
689 adding manifests
690 adding file changes
690 adding file changes
691 added 3 changesets with 2 changes to 1 files
691 added 3 changesets with 2 changes to 1 files
692 $ hg -R repo15 -q verify
692 $ hg -R repo15 -q verify
693
693
694 Test damaged file scenarios. (This also damages the usercache because of the
694 Test damaged file scenarios. (This also damages the usercache because of the
695 hardlinks.)
695 hardlinks.)
696
696
697 $ echo 'damage' >> repo5/.hg/store/lfs/objects/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
697 $ echo 'damage' >> repo5/.hg/store/lfs/objects/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
698
698
699 Repo with damaged lfs objects in any revision will fail verification.
699 Repo with damaged lfs objects in any revision will fail verification.
700
700
701 $ hg -R repo5 verify
701 $ hg -R repo5 verify
702 checking changesets
702 checking changesets
703 checking manifests
703 checking manifests
704 crosschecking files in changesets and manifests
704 crosschecking files in changesets and manifests
705 checking files
705 checking files
706 l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
706 l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
707 large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
707 large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
708 4 files, 5 changesets, 10 total revisions
708 4 files, 5 changesets, 10 total revisions
709 2 integrity errors encountered!
709 2 integrity errors encountered!
710 (first damaged changeset appears to be 0)
710 (first damaged changeset appears to be 0)
711 [1]
711 [1]
712
712
713 Updates work after cloning a damaged repo, if the damaged lfs objects aren't in
713 Updates work after cloning a damaged repo, if the damaged lfs objects aren't in
714 the update destination. Those objects won't be added to the new repo's store
714 the update destination. Those objects won't be added to the new repo's store
715 because they aren't accessed.
715 because they aren't accessed.
716
716
717 $ hg clone -v repo5 fromcorrupt
717 $ hg clone -v repo5 fromcorrupt
718 updating to branch default
718 updating to branch default
719 resolving manifests
719 resolving manifests
720 getting l
720 getting l
721 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the usercache
721 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the usercache
722 getting s
722 getting s
723 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
723 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
724 $ test -f fromcorrupt/.hg/store/lfs/objects/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
724 $ test -f fromcorrupt/.hg/store/lfs/objects/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
725 [1]
725 [1]
726
726
727 Verify will copy/link all lfs objects into the local store that aren't already
727 Verify will copy/link all lfs objects into the local store that aren't already
728 present. Bypass the corrupted usercache to show that verify works when fed by
728 present. Bypass the corrupted usercache to show that verify works when fed by
729 the (uncorrupted) remote store.
729 the (uncorrupted) remote store.
730
730
731 $ hg -R fromcorrupt --config lfs.usercache=emptycache verify -v
731 $ hg -R fromcorrupt --config lfs.usercache=emptycache verify -v
732 repository uses revlog format 1
732 repository uses revlog format 1
733 checking changesets
733 checking changesets
734 checking manifests
734 checking manifests
735 crosschecking files in changesets and manifests
735 crosschecking files in changesets and manifests
736 checking files
736 checking files
737 lfs: adding 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e to the usercache
737 lfs: adding 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e to the usercache
738 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
738 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
739 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
739 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
740 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
740 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
741 lfs: adding 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 to the usercache
741 lfs: adding 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 to the usercache
742 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
742 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
743 lfs: adding b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c to the usercache
743 lfs: adding b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c to the usercache
744 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
744 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
745 4 files, 5 changesets, 10 total revisions
745 4 files, 5 changesets, 10 total revisions
746
746
747 Verify will not copy/link a corrupted file from the usercache into the local
747 Verify will not copy/link a corrupted file from the usercache into the local
748 store, and poison it. (The verify with a good remote now works.)
748 store, and poison it. (The verify with a good remote now works.)
749
749
750 $ rm -r fromcorrupt/.hg/store/lfs/objects/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
750 $ rm -r fromcorrupt/.hg/store/lfs/objects/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
751 $ hg -R fromcorrupt verify -v
751 $ hg -R fromcorrupt verify -v
752 repository uses revlog format 1
752 repository uses revlog format 1
753 checking changesets
753 checking changesets
754 checking manifests
754 checking manifests
755 crosschecking files in changesets and manifests
755 crosschecking files in changesets and manifests
756 checking files
756 checking files
757 l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
757 l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
758 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
758 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
759 large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
759 large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
760 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
760 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
761 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
761 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
762 4 files, 5 changesets, 10 total revisions
762 4 files, 5 changesets, 10 total revisions
763 2 integrity errors encountered!
763 2 integrity errors encountered!
764 (first damaged changeset appears to be 0)
764 (first damaged changeset appears to be 0)
765 [1]
765 [1]
766 $ hg -R fromcorrupt --config lfs.usercache=emptycache verify -v
766 $ hg -R fromcorrupt --config lfs.usercache=emptycache verify -v
767 repository uses revlog format 1
767 repository uses revlog format 1
768 checking changesets
768 checking changesets
769 checking manifests
769 checking manifests
770 crosschecking files in changesets and manifests
770 crosschecking files in changesets and manifests
771 checking files
771 checking files
772 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the usercache
772 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the usercache
773 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
773 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
774 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
774 lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
775 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
775 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
776 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
776 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
777 4 files, 5 changesets, 10 total revisions
777 4 files, 5 changesets, 10 total revisions
778
778
779 Damaging a file required by the update destination fails the update.
779 Damaging a file required by the update destination fails the update.
780
780
781 $ echo 'damage' >> $TESTTMP/dummy-remote/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
781 $ echo 'damage' >> $TESTTMP/dummy-remote/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
782 $ hg --config lfs.usercache=emptycache clone -v repo5 fromcorrupt2
782 $ hg --config lfs.usercache=emptycache clone -v repo5 fromcorrupt2
783 updating to branch default
783 updating to branch default
784 resolving manifests
784 resolving manifests
785 abort: corrupt remote lfs object: 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
785 abort: corrupt remote lfs object: 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
786 [255]
786 [255]
787
787
788 A corrupted lfs blob is not transferred from a file://remotestore to the
788 A corrupted lfs blob is not transferred from a file://remotestore to the
789 usercache or local store.
789 usercache or local store.
790
790
791 $ test -f emptycache/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
791 $ test -f emptycache/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
792 [1]
792 [1]
793 $ test -f fromcorrupt2/.hg/store/lfs/objects/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
793 $ test -f fromcorrupt2/.hg/store/lfs/objects/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
794 [1]
794 [1]
795
795
796 $ hg -R fromcorrupt2 verify
796 $ hg -R fromcorrupt2 verify
797 checking changesets
797 checking changesets
798 checking manifests
798 checking manifests
799 crosschecking files in changesets and manifests
799 crosschecking files in changesets and manifests
800 checking files
800 checking files
801 l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
801 l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
802 large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
802 large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
803 4 files, 5 changesets, 10 total revisions
803 4 files, 5 changesets, 10 total revisions
804 2 integrity errors encountered!
804 2 integrity errors encountered!
805 (first damaged changeset appears to be 0)
805 (first damaged changeset appears to be 0)
806 [1]
806 [1]
807
807
808 Corrupt local files are not sent upstream. (The alternate dummy remote
808 Corrupt local files are not sent upstream. (The alternate dummy remote
809 avoids the corrupt lfs object in the original remote.)
809 avoids the corrupt lfs object in the original remote.)
810
810
811 $ mkdir $TESTTMP/dummy-remote2
811 $ mkdir $TESTTMP/dummy-remote2
812 $ hg init dest
812 $ hg init dest
813 $ hg -R fromcorrupt2 --config lfs.url=file:///$TESTTMP/dummy-remote2 push -v dest
813 $ hg -R fromcorrupt2 --config lfs.url=file:///$TESTTMP/dummy-remote2 push -v dest
814 pushing to dest
814 pushing to dest
815 searching for changes
815 searching for changes
816 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
816 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
817 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
817 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
818 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
818 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
819 abort: detected corrupt lfs object: 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
819 abort: detected corrupt lfs object: 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
820 (run hg verify)
820 (run hg verify)
821 [255]
821 [255]
822
822
823 $ hg -R fromcorrupt2 --config lfs.url=file:///$TESTTMP/dummy-remote2 verify -v
823 $ hg -R fromcorrupt2 --config lfs.url=file:///$TESTTMP/dummy-remote2 verify -v
824 repository uses revlog format 1
824 repository uses revlog format 1
825 checking changesets
825 checking changesets
826 checking manifests
826 checking manifests
827 crosschecking files in changesets and manifests
827 crosschecking files in changesets and manifests
828 checking files
828 checking files
829 l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
829 l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
830 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
830 lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
831 large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
831 large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
832 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
832 lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
833 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
833 lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
834 4 files, 5 changesets, 10 total revisions
834 4 files, 5 changesets, 10 total revisions
835 2 integrity errors encountered!
835 2 integrity errors encountered!
836 (first damaged changeset appears to be 0)
836 (first damaged changeset appears to be 0)
837 [1]
837 [1]
838
838
839 $ cat $TESTTMP/dummy-remote2/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b | $TESTDIR/f --sha256
839 $ cat $TESTTMP/dummy-remote2/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b | $TESTDIR/f --sha256
840 sha256=22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
840 sha256=22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
841 $ cat fromcorrupt2/.hg/store/lfs/objects/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b | $TESTDIR/f --sha256
841 $ cat fromcorrupt2/.hg/store/lfs/objects/22/f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b | $TESTDIR/f --sha256
842 sha256=22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
842 sha256=22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
843 $ test -f $TESTTMP/dummy-remote2/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
843 $ test -f $TESTTMP/dummy-remote2/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
844 [1]
844 [1]
845
845
846 Accessing a corrupt file will complain
846 Accessing a corrupt file will complain
847
847
848 $ hg --cwd fromcorrupt2 cat -r 0 large
848 $ hg --cwd fromcorrupt2 cat -r 0 large
849 abort: integrity check failed on data/large.i:0!
849 abort: integrity check failed on data/large.i:0!
850 [255]
850 [255]
851
851
852 lfs -> normal -> lfs round trip conversions are possible. The 'none()'
852 lfs -> normal -> lfs round trip conversions are possible. The 'none()'
853 predicate on the command line will override whatever is configured globally and
853 predicate on the command line will override whatever is configured globally and
854 locally, and ensures everything converts to a regular file. For lfs -> normal,
854 locally, and ensures everything converts to a regular file. For lfs -> normal,
855 there's no 'lfs' destination repo requirement. For normal -> lfs, there is.
855 there's no 'lfs' destination repo requirement. For normal -> lfs, there is.
856
856
857 $ hg --config extensions.convert= --config 'lfs.track=none()' \
857 $ hg --config extensions.convert= --config 'lfs.track=none()' \
858 > convert repo8 convert_normal
858 > convert repo8 convert_normal
859 initializing destination convert_normal repository
859 initializing destination convert_normal repository
860 scanning source...
860 scanning source...
861 sorting...
861 sorting...
862 converting...
862 converting...
863 2 a
863 2 a
864 1 b
864 1 b
865 0 meta
865 0 meta
866 $ grep 'lfs' convert_normal/.hg/requires
866 $ grep 'lfs' convert_normal/.hg/requires
867 [1]
867 [1]
868 $ hg --cwd convert_normal cat a1 -r 0 -T '{rawdata}'
868 $ hg --cwd convert_normal cat a1 -r 0 -T '{rawdata}'
869 THIS-IS-LFS-BECAUSE-10-BYTES
869 THIS-IS-LFS-BECAUSE-10-BYTES
870
870
871 $ hg --config extensions.convert= --config lfs.threshold=10B \
871 $ hg --config extensions.convert= --config lfs.threshold=10B \
872 > convert convert_normal convert_lfs
872 > convert convert_normal convert_lfs
873 initializing destination convert_lfs repository
873 initializing destination convert_lfs repository
874 scanning source...
874 scanning source...
875 sorting...
875 sorting...
876 converting...
876 converting...
877 2 a
877 2 a
878 1 b
878 1 b
879 0 meta
879 0 meta
880
880
881 $ hg --cwd convert_lfs cat -r 0 a1 -T '{rawdata}'
881 $ hg --cwd convert_lfs cat -r 0 a1 -T '{rawdata}'
882 version https://git-lfs.github.com/spec/v1
882 version https://git-lfs.github.com/spec/v1
883 oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
883 oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
884 size 29
884 size 29
885 x-is-binary 0
885 x-is-binary 0
886 $ hg --cwd convert_lfs debugdata a1 0
886 $ hg --cwd convert_lfs debugdata a1 0
887 version https://git-lfs.github.com/spec/v1
887 version https://git-lfs.github.com/spec/v1
888 oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
888 oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
889 size 29
889 size 29
890 x-is-binary 0
890 x-is-binary 0
891 $ hg --cwd convert_lfs log -r 0 -T "{lfs_files % '{lfspointer % '{key}={value}\n'}'}"
891 $ hg --cwd convert_lfs log -r 0 -T "{lfs_files % '{lfspointer % '{key}={value}\n'}'}"
892 version=https://git-lfs.github.com/spec/v1
892 version=https://git-lfs.github.com/spec/v1
893 oid=sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
893 oid=sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
894 size=29
894 size=29
895 x-is-binary=0
895 x-is-binary=0
896 $ hg --cwd convert_lfs log -r 0 \
896 $ hg --cwd convert_lfs log -r 0 \
897 > -T '{lfs_files % "{get(lfspointer, "oid")}\n"}{lfs_files % "{lfspointer.oid}\n"}'
897 > -T '{lfs_files % "{get(lfspointer, "oid")}\n"}{lfs_files % "{lfspointer.oid}\n"}'
898 sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
898 sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
899 sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
899 sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
900 $ hg --cwd convert_lfs log -r 0 -T '{lfs_files % "{lfspointer}\n"}'
900 $ hg --cwd convert_lfs log -r 0 -T '{lfs_files % "{lfspointer}\n"}'
901 version=https://git-lfs.github.com/spec/v1 oid=sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024 size=29 x-is-binary=0
901 version=https://git-lfs.github.com/spec/v1 oid=sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024 size=29 x-is-binary=0
902 $ hg --cwd convert_lfs \
902 $ hg --cwd convert_lfs \
903 > log -r 'all()' -T '{rev}: {lfs_files % "{file}: {lfsoid}\n"}'
903 > log -r 'all()' -T '{rev}: {lfs_files % "{file}: {lfsoid}\n"}'
904 0: a1: 5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
904 0: a1: 5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
905 1: a2: 5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
905 1: a2: 5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
906 2: a2: 876dadc86a8542f9798048f2c47f51dbf8e4359aed883e8ec80c5db825f0d943
906 2: a2: 876dadc86a8542f9798048f2c47f51dbf8e4359aed883e8ec80c5db825f0d943
907
907
908 $ grep 'lfs' convert_lfs/.hg/requires
908 $ grep 'lfs' convert_lfs/.hg/requires
909 lfs
909 lfs
910
910
911 The hashes in all stages of the conversion are unchanged.
911 The hashes in all stages of the conversion are unchanged.
912
912
913 $ hg -R repo8 log -T '{node|short}\n'
913 $ hg -R repo8 log -T '{node|short}\n'
914 0fae949de7fa
914 0fae949de7fa
915 9cd6bdffdac0
915 9cd6bdffdac0
916 7f96794915f7
916 7f96794915f7
917 $ hg -R convert_normal log -T '{node|short}\n'
917 $ hg -R convert_normal log -T '{node|short}\n'
918 0fae949de7fa
918 0fae949de7fa
919 9cd6bdffdac0
919 9cd6bdffdac0
920 7f96794915f7
920 7f96794915f7
921 $ hg -R convert_lfs log -T '{node|short}\n'
921 $ hg -R convert_lfs log -T '{node|short}\n'
922 0fae949de7fa
922 0fae949de7fa
923 9cd6bdffdac0
923 9cd6bdffdac0
924 7f96794915f7
924 7f96794915f7
925
925
926 This convert is trickier, because it contains deleted files (via `hg mv`)
926 This convert is trickier, because it contains deleted files (via `hg mv`)
927
927
928 $ hg --config extensions.convert= --config lfs.threshold=1000M \
928 $ hg --config extensions.convert= --config lfs.threshold=1000M \
929 > convert repo3 convert_normal2
929 > convert repo3 convert_normal2
930 initializing destination convert_normal2 repository
930 initializing destination convert_normal2 repository
931 scanning source...
931 scanning source...
932 sorting...
932 sorting...
933 converting...
933 converting...
934 4 commit with lfs content
934 4 commit with lfs content
935 3 renames
935 3 renames
936 2 large to small, small to large
936 2 large to small, small to large
937 1 random modifications
937 1 random modifications
938 0 switch large and small again
938 0 switch large and small again
939 $ grep 'lfs' convert_normal2/.hg/requires
939 $ grep 'lfs' convert_normal2/.hg/requires
940 [1]
940 [1]
941 $ hg --cwd convert_normal2 debugdata large 0
941 $ hg --cwd convert_normal2 debugdata large 0
942 LONGER-THAN-TEN-BYTES-WILL-TRIGGER-LFS
942 LONGER-THAN-TEN-BYTES-WILL-TRIGGER-LFS
943
943
944 $ hg --config extensions.convert= --config lfs.threshold=10B \
944 $ hg --config extensions.convert= --config lfs.threshold=10B \
945 > convert convert_normal2 convert_lfs2
945 > convert convert_normal2 convert_lfs2
946 initializing destination convert_lfs2 repository
946 initializing destination convert_lfs2 repository
947 scanning source...
947 scanning source...
948 sorting...
948 sorting...
949 converting...
949 converting...
950 4 commit with lfs content
950 4 commit with lfs content
951 3 renames
951 3 renames
952 2 large to small, small to large
952 2 large to small, small to large
953 1 random modifications
953 1 random modifications
954 0 switch large and small again
954 0 switch large and small again
955 $ grep 'lfs' convert_lfs2/.hg/requires
955 $ grep 'lfs' convert_lfs2/.hg/requires
956 lfs
956 lfs
957 $ hg --cwd convert_lfs2 debugdata large 0
957 $ hg --cwd convert_lfs2 debugdata large 0
958 version https://git-lfs.github.com/spec/v1
958 version https://git-lfs.github.com/spec/v1
959 oid sha256:66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
959 oid sha256:66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
960 size 39
960 size 39
961 x-is-binary 0
961 x-is-binary 0
962
962
963 $ hg -R convert_lfs2 config --debug extensions | grep lfs
963 $ hg -R convert_lfs2 config --debug extensions | grep lfs
964 $TESTTMP/convert_lfs2/.hg/hgrc:*: extensions.lfs= (glob)
964 $TESTTMP/convert_lfs2/.hg/hgrc:*: extensions.lfs= (glob)
965
965
966 Committing deleted files works:
966 Committing deleted files works:
967
967
968 $ hg init $TESTTMP/repo-del
968 $ hg init $TESTTMP/repo-del
969 $ cd $TESTTMP/repo-del
969 $ cd $TESTTMP/repo-del
970 $ echo 1 > A
970 $ echo 1 > A
971 $ hg commit -m 'add A' -A A
971 $ hg commit -m 'add A' -A A
972 $ hg rm A
972 $ hg rm A
973 $ hg commit -m 'rm A'
973 $ hg commit -m 'rm A'
974
974
975 Bad .hglfs files will block the commit with a useful message
975 Bad .hglfs files will block the commit with a useful message
976
976
977 $ cat > .hglfs << EOF
977 $ cat > .hglfs << EOF
978 > [track]
978 > [track]
979 > **.test = size(">5B")
979 > **.test = size(">5B")
980 > bad file ... no commit
980 > bad file ... no commit
981 > EOF
981 > EOF
982
982
983 $ echo x > file.txt
983 $ echo x > file.txt
984 $ hg ci -Aqm 'should fail'
984 $ hg ci -Aqm 'should fail'
985 hg: parse error at .hglfs:3: bad file ... no commit
985 hg: parse error at .hglfs:3: bad file ... no commit
986 [255]
986 [255]
987
987
988 $ cat > .hglfs << EOF
988 $ cat > .hglfs << EOF
989 > [track]
989 > [track]
990 > **.test = size(">5B")
990 > **.test = size(">5B")
991 > ** = nonexistent()
991 > ** = nonexistent()
992 > EOF
992 > EOF
993
993
994 $ hg ci -Aqm 'should fail'
994 $ hg ci -Aqm 'should fail'
995 abort: parse error in .hglfs: unknown identifier: nonexistent
995 abort: parse error in .hglfs: unknown identifier: nonexistent
996 [255]
996 [255]
997
997
998 '**' works out to mean all files.
998 '**' works out to mean all files.
999
999
1000 $ cat > .hglfs << EOF
1000 $ cat > .hglfs << EOF
1001 > [track]
1001 > [track]
1002 > path:.hglfs = none()
1002 > path:.hglfs = none()
1003 > **.test = size(">5B")
1003 > **.test = size(">5B")
1004 > **.exclude = none()
1004 > **.exclude = none()
1005 > ** = size(">10B")
1005 > ** = size(">10B")
1006 > EOF
1006 > EOF
1007
1007
1008 The LFS policy takes effect without tracking the .hglfs file
1008 The LFS policy takes effect without tracking the .hglfs file
1009
1009
1010 $ echo 'largefile' > lfs.test
1010 $ echo 'largefile' > lfs.test
1011 $ echo '012345678901234567890' > nolfs.exclude
1011 $ echo '012345678901234567890' > nolfs.exclude
1012 $ echo '01234567890123456' > lfs.catchall
1012 $ echo '01234567890123456' > lfs.catchall
1013 $ hg add *
1013 $ hg add *
1014 $ hg ci -qm 'before add .hglfs'
1014 $ hg ci -qm 'before add .hglfs'
1015 $ hg log -r . -T '{rev}: {lfs_files % "{file}: {lfsoid}\n"}\n'
1015 $ hg log -r . -T '{rev}: {lfs_files % "{file}: {lfsoid}\n"}\n'
1016 2: lfs.catchall: d4ec46c2869ba22eceb42a729377432052d9dd75d82fc40390ebaadecee87ee9
1016 2: lfs.catchall: d4ec46c2869ba22eceb42a729377432052d9dd75d82fc40390ebaadecee87ee9
1017 lfs.test: 5489e6ced8c36a7b267292bde9fd5242a5f80a7482e8f23fa0477393dfaa4d6c
1017 lfs.test: 5489e6ced8c36a7b267292bde9fd5242a5f80a7482e8f23fa0477393dfaa4d6c
1018
1018
1019 The .hglfs file works when tracked
1019 The .hglfs file works when tracked
1020
1020
1021 $ echo 'largefile2' > lfs.test
1021 $ echo 'largefile2' > lfs.test
1022 $ echo '012345678901234567890a' > nolfs.exclude
1022 $ echo '012345678901234567890a' > nolfs.exclude
1023 $ echo '01234567890123456a' > lfs.catchall
1023 $ echo '01234567890123456a' > lfs.catchall
1024 $ hg ci -Aqm 'after adding .hglfs'
1024 $ hg ci -Aqm 'after adding .hglfs'
1025 $ hg log -r . -T '{rev}: {lfs_files % "{file}: {lfsoid}\n"}\n'
1025 $ hg log -r . -T '{rev}: {lfs_files % "{file}: {lfsoid}\n"}\n'
1026 3: lfs.catchall: 31f43b9c62b540126b0ad5884dc013d21a61c9329b77de1fceeae2fc58511573
1026 3: lfs.catchall: 31f43b9c62b540126b0ad5884dc013d21a61c9329b77de1fceeae2fc58511573
1027 lfs.test: 8acd23467967bc7b8cc5a280056589b0ba0b17ff21dbd88a7b6474d6290378a6
1027 lfs.test: 8acd23467967bc7b8cc5a280056589b0ba0b17ff21dbd88a7b6474d6290378a6
1028
1028
1029 The LFS policy stops when the .hglfs is gone
1029 The LFS policy stops when the .hglfs is gone
1030
1030
1031 $ mv .hglfs .hglfs_
1031 $ mv .hglfs .hglfs_
1032 $ echo 'largefile3' > lfs.test
1032 $ echo 'largefile3' > lfs.test
1033 $ echo '012345678901234567890abc' > nolfs.exclude
1033 $ echo '012345678901234567890abc' > nolfs.exclude
1034 $ echo '01234567890123456abc' > lfs.catchall
1034 $ echo '01234567890123456abc' > lfs.catchall
1035 $ hg ci -qm 'file test' -X .hglfs
1035 $ hg ci -qm 'file test' -X .hglfs
1036 $ hg log -r . -T '{rev}: {lfs_files % "{file}: {lfsoid}\n"}\n'
1036 $ hg log -r . -T '{rev}: {lfs_files % "{file}: {lfsoid}\n"}\n'
1037 4:
1037 4:
1038
1038
1039 $ mv .hglfs_ .hglfs
1039 $ mv .hglfs_ .hglfs
1040 $ echo '012345678901234567890abc' > lfs.test
1040 $ echo '012345678901234567890abc' > lfs.test
1041 $ hg ci -m 'back to lfs'
1041 $ hg ci -m 'back to lfs'
1042 $ hg rm lfs.test
1042 $ hg rm lfs.test
1043 $ hg ci -qm 'remove lfs'
1043 $ hg ci -qm 'remove lfs'
1044
1044
1045 {lfs_files} will list deleted files too
1045 {lfs_files} will list deleted files too
1046
1046
1047 $ hg log -T "{lfs_files % '{rev} {file}: {lfspointer.oid}\n'}"
1047 $ hg log -T "{lfs_files % '{rev} {file}: {lfspointer.oid}\n'}"
1048 6 lfs.test:
1048 6 lfs.test:
1049 5 lfs.test: sha256:43f8f41171b6f62a6b61ba4ce98a8a6c1649240a47ebafd43120aa215ac9e7f6
1049 5 lfs.test: sha256:43f8f41171b6f62a6b61ba4ce98a8a6c1649240a47ebafd43120aa215ac9e7f6
1050 3 lfs.catchall: sha256:31f43b9c62b540126b0ad5884dc013d21a61c9329b77de1fceeae2fc58511573
1050 3 lfs.catchall: sha256:31f43b9c62b540126b0ad5884dc013d21a61c9329b77de1fceeae2fc58511573
1051 3 lfs.test: sha256:8acd23467967bc7b8cc5a280056589b0ba0b17ff21dbd88a7b6474d6290378a6
1051 3 lfs.test: sha256:8acd23467967bc7b8cc5a280056589b0ba0b17ff21dbd88a7b6474d6290378a6
1052 2 lfs.catchall: sha256:d4ec46c2869ba22eceb42a729377432052d9dd75d82fc40390ebaadecee87ee9
1052 2 lfs.catchall: sha256:d4ec46c2869ba22eceb42a729377432052d9dd75d82fc40390ebaadecee87ee9
1053 2 lfs.test: sha256:5489e6ced8c36a7b267292bde9fd5242a5f80a7482e8f23fa0477393dfaa4d6c
1053 2 lfs.test: sha256:5489e6ced8c36a7b267292bde9fd5242a5f80a7482e8f23fa0477393dfaa4d6c
1054
1054
1055 $ hg log -r 'file("set:lfs()")' -T '{rev} {join(lfs_files, ", ")}\n'
1055 $ hg log -r 'file("set:lfs()")' -T '{rev} {join(lfs_files, ", ")}\n'
1056 2 lfs.catchall, lfs.test
1056 2 lfs.catchall, lfs.test
1057 3 lfs.catchall, lfs.test
1057 3 lfs.catchall, lfs.test
1058 5 lfs.test
1058 5 lfs.test
1059 6 lfs.test
1059 6 lfs.test
1060
1060
1061 $ cd ..
1061 $ cd ..
1062
1062
1063 Unbundling adds a requirement to a non-lfs repo, if necessary.
1063 Unbundling adds a requirement to a non-lfs repo, if necessary.
1064
1064
1065 $ hg bundle -R $TESTTMP/repo-del -qr 0 --base null nolfs.hg
1065 $ hg bundle -R $TESTTMP/repo-del -qr 0 --base null nolfs.hg
1066 $ hg bundle -R convert_lfs2 -qr tip --base null lfs.hg
1066 $ hg bundle -R convert_lfs2 -qr tip --base null lfs.hg
1067 $ hg init unbundle
1067 $ hg init unbundle
1068 $ hg pull -R unbundle -q nolfs.hg
1068 $ hg pull -R unbundle -q nolfs.hg
1069 $ grep lfs unbundle/.hg/requires
1069 $ grep lfs unbundle/.hg/requires
1070 [1]
1070 [1]
1071 $ hg pull -R unbundle -q lfs.hg
1071 $ hg pull -R unbundle -q lfs.hg
1072 $ grep lfs unbundle/.hg/requires
1072 $ grep lfs unbundle/.hg/requires
1073 lfs
1073 lfs
1074
1074
1075 $ hg init no_lfs
1075 $ hg init no_lfs
1076 $ cat >> no_lfs/.hg/hgrc <<EOF
1076 $ cat >> no_lfs/.hg/hgrc <<EOF
1077 > [experimental]
1077 > [experimental]
1078 > changegroup3 = True
1078 > changegroup3 = True
1079 > [extensions]
1079 > [extensions]
1080 > lfs=!
1080 > lfs=!
1081 > EOF
1081 > EOF
1082 $ cp -R no_lfs no_lfs2
1082 $ cp -R no_lfs no_lfs2
1083
1083
1084 Pushing from a local lfs repo to a local repo without an lfs requirement and
1084 Pushing from a local lfs repo to a local repo without an lfs requirement and
1085 with lfs disabled, fails.
1085 with lfs disabled, fails.
1086
1086
1087 $ hg push -R convert_lfs2 no_lfs
1087 $ hg push -R convert_lfs2 no_lfs
1088 pushing to no_lfs
1088 pushing to no_lfs
1089 abort: required features are not supported in the destination: lfs
1089 abort: required features are not supported in the destination: lfs
1090 [255]
1090 [255]
1091 $ grep lfs no_lfs/.hg/requires
1091 $ grep lfs no_lfs/.hg/requires
1092 [1]
1092 [1]
1093
1093
1094 Pulling from a local lfs repo to a local repo without an lfs requirement and
1094 Pulling from a local lfs repo to a local repo without an lfs requirement and
1095 with lfs disabled, fails.
1095 with lfs disabled, fails.
1096
1096
1097 $ hg pull -R no_lfs2 convert_lfs2
1097 $ hg pull -R no_lfs2 convert_lfs2
1098 pulling from convert_lfs2
1098 pulling from convert_lfs2
1099 abort: required features are not supported in the destination: lfs
1099 abort: required features are not supported in the destination: lfs
1100 [255]
1100 [255]
1101 $ grep lfs no_lfs2/.hg/requires
1101 $ grep lfs no_lfs2/.hg/requires
1102 [1]
1102 [1]
@@ -1,81 +1,79 b''
1 #require killdaemons
1 #require killdaemons
2
2
3 $ hg init test
3 $ hg init test
4 $ cd test
4 $ cd test
5 $ echo a > a
5 $ echo a > a
6 $ hg ci -Ama
6 $ hg ci -Ama
7 adding a
7 adding a
8 $ cd ..
8 $ cd ..
9 $ hg clone test test2
9 $ hg clone test test2
10 updating to branch default
10 updating to branch default
11 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
11 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
12 $ cd test2
12 $ cd test2
13 $ echo a >> a
13 $ echo a >> a
14 $ hg ci -mb
14 $ hg ci -mb
15
15
16 Cloning with a password in the URL should not save the password in .hg/hgrc:
16 Cloning with a password in the URL should not save the password in .hg/hgrc:
17
17
18 $ hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
18 $ hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
19 $ cat hg.pid >> $DAEMON_PIDS
19 $ cat hg.pid >> $DAEMON_PIDS
20 $ hg clone http://foo:xyzzy@localhost:$HGPORT/ test3
20 $ hg clone http://foo:xyzzy@localhost:$HGPORT/ test3
21 requesting all changes
21 requesting all changes
22 adding changesets
22 adding changesets
23 adding manifests
23 adding manifests
24 adding file changes
24 adding file changes
25 added 2 changesets with 2 changes to 1 files
25 added 2 changesets with 2 changes to 1 files
26 new changesets cb9a9f314b8b:ba677d0156c1
26 new changesets cb9a9f314b8b:ba677d0156c1
27 updating to branch default
27 updating to branch default
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 $ cat test3/.hg/hgrc
29 $ cat test3/.hg/hgrc
30 # example repository config (see 'hg help config' for more info)
30 # example repository config (see 'hg help config' for more info)
31 [paths]
31 [paths]
32 default = http://foo@localhost:$HGPORT/
32 default = http://foo@localhost:$HGPORT/
33
33
34 # path aliases to other clones of this repo in URLs or filesystem paths
34 # path aliases to other clones of this repo in URLs or filesystem paths
35 # (see 'hg help config.paths' for more info)
35 # (see 'hg help config.paths' for more info)
36 #
36 #
37 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
37 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
38 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
38 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
39 # my-clone = /home/jdoe/jdoes-clone
39 # my-clone = /home/jdoe/jdoes-clone
40
40
41 [ui]
41 [ui]
42 # name and email (local to this repository, optional), e.g.
42 # name and email (local to this repository, optional), e.g.
43 # username = Jane Doe <jdoe@example.com>
43 # username = Jane Doe <jdoe@example.com>
44 $ killdaemons.py
44 $ killdaemons.py
45
45
46 expect error, cloning not allowed
46 expect error, cloning not allowed
47
47
48 $ echo '[web]' > .hg/hgrc
48 $ echo '[web]' > .hg/hgrc
49 $ echo 'allowpull = false' >> .hg/hgrc
49 $ echo 'allowpull = false' >> .hg/hgrc
50 $ hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
50 $ hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
51 $ cat hg.pid >> $DAEMON_PIDS
51 $ cat hg.pid >> $DAEMON_PIDS
52 $ hg clone http://localhost:$HGPORT/ test4 # bundle2+
52 $ hg clone http://localhost:$HGPORT/ test4 # bundle2+
53 requesting all changes
54 abort: authorization failed
53 abort: authorization failed
55 [255]
54 [255]
56 $ hg clone http://localhost:$HGPORT/ test4 --config devel.legacy.exchange=bundle1
55 $ hg clone http://localhost:$HGPORT/ test4 --config devel.legacy.exchange=bundle1
57 abort: authorization failed
56 abort: authorization failed
58 [255]
57 [255]
59 $ killdaemons.py
58 $ killdaemons.py
60
59
61 serve errors
60 serve errors
62
61
63 $ cat errors.log
62 $ cat errors.log
64 $ req() {
63 $ req() {
65 > hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
64 > hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
66 > cat hg.pid >> $DAEMON_PIDS
65 > cat hg.pid >> $DAEMON_PIDS
67 > hg --cwd ../test pull http://localhost:$HGPORT/
66 > hg --cwd ../test pull http://localhost:$HGPORT/
68 > killdaemons.py hg.pid
67 > killdaemons.py hg.pid
69 > echo % serve errors
68 > echo % serve errors
70 > cat errors.log
69 > cat errors.log
71 > }
70 > }
72
71
73 expect error, pulling not allowed
72 expect error, pulling not allowed
74
73
75 $ req
74 $ req
76 pulling from http://localhost:$HGPORT/
75 pulling from http://localhost:$HGPORT/
77 searching for changes
78 abort: authorization failed
76 abort: authorization failed
79 % serve errors
77 % serve errors
80
78
81 $ cd ..
79 $ cd ..
@@ -1,352 +1,330 b''
1 #require killdaemons
1 #require killdaemons
2
2
3 #testcases bundle1 bundle2
3 #testcases bundle1 bundle2
4
4
5 #if bundle1
5 #if bundle1
6 $ cat << EOF >> $HGRCPATH
6 $ cat << EOF >> $HGRCPATH
7 > [devel]
7 > [devel]
8 > # This test is dedicated to interaction through old bundle
8 > # This test is dedicated to interaction through old bundle
9 > legacy.exchange = bundle1
9 > legacy.exchange = bundle1
10 > EOF
10 > EOF
11 #endif
11 #endif
12
12
13 $ hg init test
13 $ hg init test
14 $ cd test
14 $ cd test
15 $ echo a > a
15 $ echo a > a
16 $ hg ci -Ama
16 $ hg ci -Ama
17 adding a
17 adding a
18 $ cd ..
18 $ cd ..
19 $ hg clone test test2
19 $ hg clone test test2
20 updating to branch default
20 updating to branch default
21 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
21 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
22 $ cd test2
22 $ cd test2
23 $ echo a >> a
23 $ echo a >> a
24 $ hg ci -mb
24 $ hg ci -mb
25 $ req() {
25 $ req() {
26 > hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
26 > hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
27 > cat hg.pid >> $DAEMON_PIDS
27 > cat hg.pid >> $DAEMON_PIDS
28 > hg --cwd ../test2 push http://localhost:$HGPORT/
28 > hg --cwd ../test2 push http://localhost:$HGPORT/
29 > exitstatus=$?
29 > exitstatus=$?
30 > killdaemons.py
30 > killdaemons.py
31 > echo % serve errors
31 > echo % serve errors
32 > cat errors.log
32 > cat errors.log
33 > return $exitstatus
33 > return $exitstatus
34 > }
34 > }
35 $ cd ../test
35 $ cd ../test
36
36
37 expect ssl error
37 expect ssl error
38
38
39 $ req
39 $ req
40 pushing to http://localhost:$HGPORT/
40 pushing to http://localhost:$HGPORT/
41 searching for changes
41 searching for changes
42 abort: HTTP Error 403: ssl required
42 abort: HTTP Error 403: ssl required
43 % serve errors
43 % serve errors
44 [255]
44 [255]
45
45
46 expect authorization error
46 expect authorization error
47
47
48 $ echo '[web]' > .hg/hgrc
48 $ echo '[web]' > .hg/hgrc
49 $ echo 'push_ssl = false' >> .hg/hgrc
49 $ echo 'push_ssl = false' >> .hg/hgrc
50 $ req
50 $ req
51 pushing to http://localhost:$HGPORT/
51 pushing to http://localhost:$HGPORT/
52 searching for changes
52 searching for changes
53 abort: authorization failed
53 abort: authorization failed
54 % serve errors
54 % serve errors
55 [255]
55 [255]
56
56
57 expect authorization error: must have authorized user
57 expect authorization error: must have authorized user
58
58
59 $ echo 'allow_push = unperson' >> .hg/hgrc
59 $ echo 'allow_push = unperson' >> .hg/hgrc
60 $ req
60 $ req
61 pushing to http://localhost:$HGPORT/
61 pushing to http://localhost:$HGPORT/
62 searching for changes
62 searching for changes
63 abort: authorization failed
63 abort: authorization failed
64 % serve errors
64 % serve errors
65 [255]
65 [255]
66
66
67 expect success
67 expect success
68
68
69 $ cat > $TESTTMP/hook.sh <<'EOF'
69 $ cat > $TESTTMP/hook.sh <<'EOF'
70 > echo "phase-move: $HG_NODE: $HG_OLDPHASE -> $HG_PHASE"
70 > echo "phase-move: $HG_NODE: $HG_OLDPHASE -> $HG_PHASE"
71 > EOF
71 > EOF
72
72
73 $ cat >> .hg/hgrc <<EOF
73 $ cat >> .hg/hgrc <<EOF
74 > allow_push = *
74 > allow_push = *
75 > [hooks]
75 > [hooks]
76 > changegroup = sh -c "printenv.py changegroup 0"
76 > changegroup = sh -c "printenv.py changegroup 0"
77 > pushkey = sh -c "printenv.py pushkey 0"
77 > pushkey = sh -c "printenv.py pushkey 0"
78 > txnclose-phase.test = sh $TESTTMP/hook.sh
78 > txnclose-phase.test = sh $TESTTMP/hook.sh
79 > EOF
79 > EOF
80 $ req
80 $ req
81 pushing to http://localhost:$HGPORT/
81 pushing to http://localhost:$HGPORT/
82 searching for changes
82 searching for changes
83 remote: adding changesets
83 remote: adding changesets
84 remote: adding manifests
84 remote: adding manifests
85 remote: adding file changes
85 remote: adding file changes
86 remote: added 1 changesets with 1 changes to 1 files
86 remote: added 1 changesets with 1 changes to 1 files
87 remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b: draft -> public
87 remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b: draft -> public
88 remote: phase-move: ba677d0156c1196c1a699fa53f390dcfc3ce3872: -> public
88 remote: phase-move: ba677d0156c1196c1a699fa53f390dcfc3ce3872: -> public
89 remote: changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) (bundle1 !)
89 remote: changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) (bundle1 !)
90 remote: changegroup hook: HG_BUNDLE2=1 HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) (bundle2 !)
90 remote: changegroup hook: HG_BUNDLE2=1 HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) (bundle2 !)
91 % serve errors
91 % serve errors
92 $ hg rollback
92 $ hg rollback
93 repository tip rolled back to revision 0 (undo serve)
93 repository tip rolled back to revision 0 (undo serve)
94
94
95 expect success, server lacks the httpheader capability
95 expect success, server lacks the httpheader capability
96
96
97 $ CAP=httpheader
97 $ CAP=httpheader
98 $ . "$TESTDIR/notcapable"
98 $ . "$TESTDIR/notcapable"
99 $ req
99 $ req
100 pushing to http://localhost:$HGPORT/
100 pushing to http://localhost:$HGPORT/
101 searching for changes
101 searching for changes
102 remote: adding changesets
102 remote: adding changesets
103 remote: adding manifests
103 remote: adding manifests
104 remote: adding file changes
104 remote: adding file changes
105 remote: added 1 changesets with 1 changes to 1 files
105 remote: added 1 changesets with 1 changes to 1 files
106 remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b: draft -> public
106 remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b: draft -> public
107 remote: phase-move: ba677d0156c1196c1a699fa53f390dcfc3ce3872: -> public
107 remote: phase-move: ba677d0156c1196c1a699fa53f390dcfc3ce3872: -> public
108 remote: changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) (bundle1 !)
108 remote: changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) (bundle1 !)
109 remote: changegroup hook: HG_BUNDLE2=1 HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) (bundle2 !)
109 remote: changegroup hook: HG_BUNDLE2=1 HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) (bundle2 !)
110 % serve errors
110 % serve errors
111 $ hg rollback
111 $ hg rollback
112 repository tip rolled back to revision 0 (undo serve)
112 repository tip rolled back to revision 0 (undo serve)
113
113
114 expect success, server lacks the unbundlehash capability
114 expect success, server lacks the unbundlehash capability
115
115
116 $ CAP=unbundlehash
116 $ CAP=unbundlehash
117 $ . "$TESTDIR/notcapable"
117 $ . "$TESTDIR/notcapable"
118 $ req
118 $ req
119 pushing to http://localhost:$HGPORT/
119 pushing to http://localhost:$HGPORT/
120 searching for changes
120 searching for changes
121 remote: adding changesets
121 remote: adding changesets
122 remote: adding manifests
122 remote: adding manifests
123 remote: adding file changes
123 remote: adding file changes
124 remote: added 1 changesets with 1 changes to 1 files
124 remote: added 1 changesets with 1 changes to 1 files
125 remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b: draft -> public
125 remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b: draft -> public
126 remote: phase-move: ba677d0156c1196c1a699fa53f390dcfc3ce3872: -> public
126 remote: phase-move: ba677d0156c1196c1a699fa53f390dcfc3ce3872: -> public
127 remote: changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) (bundle1 !)
127 remote: changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) (bundle1 !)
128 remote: changegroup hook: HG_BUNDLE2=1 HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) (bundle2 !)
128 remote: changegroup hook: HG_BUNDLE2=1 HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) (bundle2 !)
129 % serve errors
129 % serve errors
130 $ hg rollback
130 $ hg rollback
131 repository tip rolled back to revision 0 (undo serve)
131 repository tip rolled back to revision 0 (undo serve)
132
132
133 expect success, pre-d1b16a746db6 server supports the unbundle capability, but
133 expect success, pre-d1b16a746db6 server supports the unbundle capability, but
134 has no parameter
134 has no parameter
135
135
136 $ cat <<EOF > notcapable-unbundleparam.py
136 $ cat <<EOF > notcapable-unbundleparam.py
137 > from mercurial import extensions, httppeer
137 > from mercurial import extensions, httppeer
138 > def capable(orig, self, name):
138 > def capable(orig, self, name):
139 > if name == 'unbundle':
139 > if name == 'unbundle':
140 > return True
140 > return True
141 > return orig(self, name)
141 > return orig(self, name)
142 > def uisetup(ui):
142 > def uisetup(ui):
143 > extensions.wrapfunction(httppeer.httppeer, 'capable', capable)
143 > extensions.wrapfunction(httppeer.httppeer, 'capable', capable)
144 > EOF
144 > EOF
145 $ cp $HGRCPATH $HGRCPATH.orig
145 $ cp $HGRCPATH $HGRCPATH.orig
146 $ cat <<EOF >> $HGRCPATH
146 $ cat <<EOF >> $HGRCPATH
147 > [extensions]
147 > [extensions]
148 > notcapable-unbundleparam = `pwd`/notcapable-unbundleparam.py
148 > notcapable-unbundleparam = `pwd`/notcapable-unbundleparam.py
149 > EOF
149 > EOF
150 $ req
150 $ req
151 pushing to http://localhost:$HGPORT/
151 pushing to http://localhost:$HGPORT/
152 searching for changes
152 searching for changes
153 remote: adding changesets
153 remote: adding changesets
154 remote: adding manifests
154 remote: adding manifests
155 remote: adding file changes
155 remote: adding file changes
156 remote: added 1 changesets with 1 changes to 1 files
156 remote: added 1 changesets with 1 changes to 1 files
157 remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b: draft -> public
157 remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b: draft -> public
158 remote: phase-move: ba677d0156c1196c1a699fa53f390dcfc3ce3872: -> public
158 remote: phase-move: ba677d0156c1196c1a699fa53f390dcfc3ce3872: -> public
159 remote: changegroup hook: * (glob)
159 remote: changegroup hook: * (glob)
160 % serve errors
160 % serve errors
161 $ hg rollback
161 $ hg rollback
162 repository tip rolled back to revision 0 (undo serve)
162 repository tip rolled back to revision 0 (undo serve)
163 $ mv $HGRCPATH.orig $HGRCPATH
163 $ mv $HGRCPATH.orig $HGRCPATH
164
164
165 Test pushing to a publishing repository with a failing prepushkey hook
165 Test pushing to a publishing repository with a failing prepushkey hook
166
166
167 $ cat > .hg/hgrc <<EOF
167 $ cat > .hg/hgrc <<EOF
168 > [web]
168 > [web]
169 > push_ssl = false
169 > push_ssl = false
170 > allow_push = *
170 > allow_push = *
171 > [hooks]
171 > [hooks]
172 > prepushkey = sh -c "printenv.py prepushkey 1"
172 > prepushkey = sh -c "printenv.py prepushkey 1"
173 > [devel]
173 > [devel]
174 > legacy.exchange=phases
174 > legacy.exchange=phases
175 > EOF
175 > EOF
176
176
177 #if bundle1
177 #if bundle1
178 Bundle1 works because a) phases are updated as part of changegroup application
178 Bundle1 works because a) phases are updated as part of changegroup application
179 and b) client checks phases after the "unbundle" command. Since it sees no
179 and b) client checks phases after the "unbundle" command. Since it sees no
180 phase changes are necessary, it doesn't send the "pushkey" command and the
180 phase changes are necessary, it doesn't send the "pushkey" command and the
181 prepushkey hook never has to fire.
181 prepushkey hook never has to fire.
182
182
183 $ req
183 $ req
184 pushing to http://localhost:$HGPORT/
184 pushing to http://localhost:$HGPORT/
185 searching for changes
185 searching for changes
186 remote: adding changesets
186 remote: adding changesets
187 remote: adding manifests
187 remote: adding manifests
188 remote: adding file changes
188 remote: adding file changes
189 remote: added 1 changesets with 1 changes to 1 files
189 remote: added 1 changesets with 1 changes to 1 files
190 % serve errors
190 % serve errors
191
191
192 #endif
192 #endif
193
193
194 #if bundle2
194 #if bundle2
195 Bundle2 sends a "pushkey" bundle2 part. This runs as part of the transaction
195 Bundle2 sends a "pushkey" bundle2 part. This runs as part of the transaction
196 and fails the entire push.
196 and fails the entire push.
197 $ req
197 $ req
198 pushing to http://localhost:$HGPORT/
198 pushing to http://localhost:$HGPORT/
199 searching for changes
199 searching for changes
200 remote: adding changesets
200 remote: adding changesets
201 remote: adding manifests
201 remote: adding manifests
202 remote: adding file changes
202 remote: adding file changes
203 remote: added 1 changesets with 1 changes to 1 files
203 remote: added 1 changesets with 1 changes to 1 files
204 remote: prepushkey hook: HG_BUNDLE2=1 HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob)
204 remote: prepushkey hook: HG_BUNDLE2=1 HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob)
205 remote: pushkey-abort: prepushkey hook exited with status 1
205 remote: pushkey-abort: prepushkey hook exited with status 1
206 remote: transaction abort!
206 remote: transaction abort!
207 remote: rollback completed
207 remote: rollback completed
208 abort: updating ba677d0156c1 to public failed
208 abort: updating ba677d0156c1 to public failed
209 % serve errors
209 % serve errors
210 [255]
210 [255]
211
211
212 #endif
212 #endif
213
213
214 Now remove the failing prepushkey hook.
214 Now remove the failing prepushkey hook.
215
215
216 $ cat >> .hg/hgrc <<EOF
216 $ cat >> .hg/hgrc <<EOF
217 > [hooks]
217 > [hooks]
218 > prepushkey = sh -c "printenv.py prepushkey 0"
218 > prepushkey = sh -c "printenv.py prepushkey 0"
219 > EOF
219 > EOF
220
220
221 We don't need to test bundle1 because it succeeded above.
221 We don't need to test bundle1 because it succeeded above.
222
222
223 #if bundle2
223 #if bundle2
224 $ req
224 $ req
225 pushing to http://localhost:$HGPORT/
225 pushing to http://localhost:$HGPORT/
226 searching for changes
226 searching for changes
227 remote: adding changesets
227 remote: adding changesets
228 remote: adding manifests
228 remote: adding manifests
229 remote: adding file changes
229 remote: adding file changes
230 remote: added 1 changesets with 1 changes to 1 files
230 remote: added 1 changesets with 1 changes to 1 files
231 remote: prepushkey hook: HG_BUNDLE2=1 HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob)
231 remote: prepushkey hook: HG_BUNDLE2=1 HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob)
232 % serve errors
232 % serve errors
233 #endif
233 #endif
234
234
235 $ hg --config extensions.strip= strip -r 1:
235 $ hg --config extensions.strip= strip -r 1:
236 saved backup bundle to $TESTTMP/test/.hg/strip-backup/ba677d0156c1-eea704d7-backup.hg
236 saved backup bundle to $TESTTMP/test/.hg/strip-backup/ba677d0156c1-eea704d7-backup.hg
237
237
238 Now do a variant of the above, except on a non-publishing repository
238 Now do a variant of the above, except on a non-publishing repository
239
239
240 $ cat >> .hg/hgrc <<EOF
240 $ cat >> .hg/hgrc <<EOF
241 > [phases]
241 > [phases]
242 > publish = false
242 > publish = false
243 > [hooks]
243 > [hooks]
244 > prepushkey = sh -c "printenv.py prepushkey 1"
244 > prepushkey = sh -c "printenv.py prepushkey 1"
245 > EOF
245 > EOF
246
246
247 #if bundle1
247 #if bundle1
248 $ req
248 $ req
249 pushing to http://localhost:$HGPORT/
249 pushing to http://localhost:$HGPORT/
250 searching for changes
250 searching for changes
251 remote: adding changesets
251 remote: adding changesets
252 remote: adding manifests
252 remote: adding manifests
253 remote: adding file changes
253 remote: adding file changes
254 remote: added 1 changesets with 1 changes to 1 files
254 remote: added 1 changesets with 1 changes to 1 files
255 remote: prepushkey hook: HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1
255 remote: prepushkey hook: HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1
256 remote: pushkey-abort: prepushkey hook exited with status 1
256 remote: pushkey-abort: prepushkey hook exited with status 1
257 updating ba677d0156c1 to public failed!
257 updating ba677d0156c1 to public failed!
258 % serve errors
258 % serve errors
259 #endif
259 #endif
260
260
261 #if bundle2
261 #if bundle2
262 $ req
262 $ req
263 pushing to http://localhost:$HGPORT/
263 pushing to http://localhost:$HGPORT/
264 searching for changes
264 searching for changes
265 remote: adding changesets
265 remote: adding changesets
266 remote: adding manifests
266 remote: adding manifests
267 remote: adding file changes
267 remote: adding file changes
268 remote: added 1 changesets with 1 changes to 1 files
268 remote: added 1 changesets with 1 changes to 1 files
269 remote: prepushkey hook: HG_BUNDLE2=1 HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob)
269 remote: prepushkey hook: HG_BUNDLE2=1 HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob)
270 remote: pushkey-abort: prepushkey hook exited with status 1
270 remote: pushkey-abort: prepushkey hook exited with status 1
271 remote: transaction abort!
271 remote: transaction abort!
272 remote: rollback completed
272 remote: rollback completed
273 abort: updating ba677d0156c1 to public failed
273 abort: updating ba677d0156c1 to public failed
274 % serve errors
274 % serve errors
275 [255]
275 [255]
276 #endif
276 #endif
277
277
278 Make phases updates work
278 Make phases updates work
279
279
280 $ cat >> .hg/hgrc <<EOF
280 $ cat >> .hg/hgrc <<EOF
281 > [hooks]
281 > [hooks]
282 > prepushkey = sh -c "printenv.py prepushkey 0"
282 > prepushkey = sh -c "printenv.py prepushkey 0"
283 > EOF
283 > EOF
284
284
285 #if bundle1
285 #if bundle1
286 $ req
286 $ req
287 pushing to http://localhost:$HGPORT/
287 pushing to http://localhost:$HGPORT/
288 searching for changes
288 searching for changes
289 no changes found
289 no changes found
290 remote: prepushkey hook: HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1
290 remote: prepushkey hook: HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1
291 % serve errors
291 % serve errors
292 [1]
292 [1]
293 #endif
293 #endif
294
294
295 #if bundle2
295 #if bundle2
296 $ req
296 $ req
297 pushing to http://localhost:$HGPORT/
297 pushing to http://localhost:$HGPORT/
298 searching for changes
298 searching for changes
299 remote: adding changesets
299 remote: adding changesets
300 remote: adding manifests
300 remote: adding manifests
301 remote: adding file changes
301 remote: adding file changes
302 remote: added 1 changesets with 1 changes to 1 files
302 remote: added 1 changesets with 1 changes to 1 files
303 remote: prepushkey hook: HG_BUNDLE2=1 HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob)
303 remote: prepushkey hook: HG_BUNDLE2=1 HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob)
304 % serve errors
304 % serve errors
305 #endif
305 #endif
306
306
307 $ hg --config extensions.strip= strip -r 1:
307 $ hg --config extensions.strip= strip -r 1:
308 saved backup bundle to $TESTTMP/test/.hg/strip-backup/ba677d0156c1-eea704d7-backup.hg
308 saved backup bundle to $TESTTMP/test/.hg/strip-backup/ba677d0156c1-eea704d7-backup.hg
309
309
310 expect authorization error: all users denied
311
312 $ echo '[web]' > .hg/hgrc
313 $ echo 'push_ssl = false' >> .hg/hgrc
314 $ echo 'deny_push = *' >> .hg/hgrc
315 $ req
316 pushing to http://localhost:$HGPORT/
317 searching for changes
318 abort: authorization failed
319 % serve errors
320 [255]
321
322 expect authorization error: some users denied, users must be authenticated
323
324 $ echo 'deny_push = unperson' >> .hg/hgrc
325 $ req
326 pushing to http://localhost:$HGPORT/
327 searching for changes
328 abort: authorization failed
329 % serve errors
330 [255]
331
332 #if bundle2
310 #if bundle2
333
311
334 $ cat > .hg/hgrc <<EOF
312 $ cat > .hg/hgrc <<EOF
335 > [web]
313 > [web]
336 > push_ssl = false
314 > push_ssl = false
337 > allow_push = *
315 > allow_push = *
338 > [experimental]
316 > [experimental]
339 > httppostargs=true
317 > httppostargs=true
340 > EOF
318 > EOF
341 $ req
319 $ req
342 pushing to http://localhost:$HGPORT/
320 pushing to http://localhost:$HGPORT/
343 searching for changes
321 searching for changes
344 remote: adding changesets
322 remote: adding changesets
345 remote: adding manifests
323 remote: adding manifests
346 remote: adding file changes
324 remote: adding file changes
347 remote: added 1 changesets with 1 changes to 1 files
325 remote: added 1 changesets with 1 changes to 1 files
348 % serve errors
326 % serve errors
349
327
350 #endif
328 #endif
351
329
352 $ cd ..
330 $ cd ..
@@ -1,315 +1,320 b''
1 # test revlog interaction about raw data (flagprocessor)
1 # test revlog interaction about raw data (flagprocessor)
2
2
3 from __future__ import absolute_import, print_function
3 from __future__ import absolute_import, print_function
4
4
5 import sys
5 import sys
6
6
7 from mercurial import (
7 from mercurial import (
8 encoding,
8 encoding,
9 node,
9 node,
10 revlog,
10 revlog,
11 transaction,
11 transaction,
12 vfs,
12 vfs,
13 )
13 )
14
14
15 # TESTTMP is optional. This makes it convenient to run without run-tests.py
15 # TESTTMP is optional. This makes it convenient to run without run-tests.py
16 tvfs = vfs.vfs(encoding.environ.get('TESTTMP', b'/tmp'))
16 tvfs = vfs.vfs(encoding.environ.get('TESTTMP', b'/tmp'))
17
17
18 # Enable generaldelta otherwise revlog won't use delta as expected by the test
18 # Enable generaldelta otherwise revlog won't use delta as expected by the test
19 tvfs.options = {'generaldelta': True, 'revlogv1': True}
19 tvfs.options = {'generaldelta': True, 'revlogv1': True}
20
20
21 # The test wants to control whether to use delta explicitly, based on
21 # The test wants to control whether to use delta explicitly, based on
22 # "storedeltachains".
22 # "storedeltachains".
23 revlog.revlog._isgooddeltainfo = lambda self, d, textlen: self.storedeltachains
23 revlog.revlog._isgooddeltainfo = lambda self, d, textlen: self.storedeltachains
24
24
25 def abort(msg):
25 def abort(msg):
26 print('abort: %s' % msg)
26 print('abort: %s' % msg)
27 # Return 0 so run-tests.py could compare the output.
27 # Return 0 so run-tests.py could compare the output.
28 sys.exit()
28 sys.exit()
29
29
30 # Register a revlog processor for flag EXTSTORED.
30 # Register a revlog processor for flag EXTSTORED.
31 #
31 #
32 # It simply prepends a fixed header, and replaces '1' to 'i'. So it has
32 # It simply prepends a fixed header, and replaces '1' to 'i'. So it has
33 # insertion and replacement, and may be interesting to test revlog's line-based
33 # insertion and replacement, and may be interesting to test revlog's line-based
34 # deltas.
34 # deltas.
35 _extheader = b'E\n'
35 _extheader = b'E\n'
36
36
37 def readprocessor(self, rawtext):
37 def readprocessor(self, rawtext):
38 # True: the returned text could be used to verify hash
38 # True: the returned text could be used to verify hash
39 text = rawtext[len(_extheader):].replace(b'i', b'1')
39 text = rawtext[len(_extheader):].replace(b'i', b'1')
40 return text, True
40 return text, True
41
41
42 def writeprocessor(self, text):
42 def writeprocessor(self, text):
43 # False: the returned rawtext shouldn't be used to verify hash
43 # False: the returned rawtext shouldn't be used to verify hash
44 rawtext = _extheader + text.replace(b'1', b'i')
44 rawtext = _extheader + text.replace(b'1', b'i')
45 return rawtext, False
45 return rawtext, False
46
46
47 def rawprocessor(self, rawtext):
47 def rawprocessor(self, rawtext):
48 # False: do not verify hash. Only the content returned by "readprocessor"
48 # False: do not verify hash. Only the content returned by "readprocessor"
49 # can be used to verify hash.
49 # can be used to verify hash.
50 return False
50 return False
51
51
52 revlog.addflagprocessor(revlog.REVIDX_EXTSTORED,
52 revlog.addflagprocessor(revlog.REVIDX_EXTSTORED,
53 (readprocessor, writeprocessor, rawprocessor))
53 (readprocessor, writeprocessor, rawprocessor))
54
54
55 # Utilities about reading and appending revlog
55 # Utilities about reading and appending revlog
56
56
57 def newtransaction():
57 def newtransaction():
58 # A transaction is required to write revlogs
58 # A transaction is required to write revlogs
59 report = lambda msg: None
59 report = lambda msg: None
60 return transaction.transaction(report, tvfs, {'plain': tvfs}, b'journal')
60 return transaction.transaction(report, tvfs, {'plain': tvfs}, b'journal')
61
61
62 def newrevlog(name=b'_testrevlog.i', recreate=False):
62 def newrevlog(name=b'_testrevlog.i', recreate=False):
63 if recreate:
63 if recreate:
64 tvfs.tryunlink(name)
64 tvfs.tryunlink(name)
65 rlog = revlog.revlog(tvfs, name)
65 rlog = revlog.revlog(tvfs, name)
66 return rlog
66 return rlog
67
67
68 def appendrev(rlog, text, tr, isext=False, isdelta=True):
68 def appendrev(rlog, text, tr, isext=False, isdelta=True):
69 '''Append a revision. If isext is True, set the EXTSTORED flag so flag
69 '''Append a revision. If isext is True, set the EXTSTORED flag so flag
70 processor will be used (and rawtext is different from text). If isdelta is
70 processor will be used (and rawtext is different from text). If isdelta is
71 True, force the revision to be a delta, otherwise it's full text.
71 True, force the revision to be a delta, otherwise it's full text.
72 '''
72 '''
73 nextrev = len(rlog)
73 nextrev = len(rlog)
74 p1 = rlog.node(nextrev - 1)
74 p1 = rlog.node(nextrev - 1)
75 p2 = node.nullid
75 p2 = node.nullid
76 if isext:
76 if isext:
77 flags = revlog.REVIDX_EXTSTORED
77 flags = revlog.REVIDX_EXTSTORED
78 else:
78 else:
79 flags = revlog.REVIDX_DEFAULT_FLAGS
79 flags = revlog.REVIDX_DEFAULT_FLAGS
80 # Change storedeltachains temporarily, to override revlog's delta decision
80 # Change storedeltachains temporarily, to override revlog's delta decision
81 rlog.storedeltachains = isdelta
81 rlog.storedeltachains = isdelta
82 try:
82 try:
83 rlog.addrevision(text, tr, nextrev, p1, p2, flags=flags)
83 rlog.addrevision(text, tr, nextrev, p1, p2, flags=flags)
84 return nextrev
84 return nextrev
85 except Exception as ex:
85 except Exception as ex:
86 abort('rev %d: failed to append: %s' % (nextrev, ex))
86 abort('rev %d: failed to append: %s' % (nextrev, ex))
87 finally:
87 finally:
88 # Restore storedeltachains. It is always True, see revlog.__init__
88 # Restore storedeltachains. It is always True, see revlog.__init__
89 rlog.storedeltachains = True
89 rlog.storedeltachains = True
90
90
91 def addgroupcopy(rlog, tr, destname=b'_destrevlog.i', optimaldelta=True):
91 def addgroupcopy(rlog, tr, destname=b'_destrevlog.i', optimaldelta=True):
92 '''Copy revlog to destname using revlog.addgroup. Return the copied revlog.
92 '''Copy revlog to destname using revlog.addgroup. Return the copied revlog.
93
93
94 This emulates push or pull. They use changegroup. Changegroup requires
94 This emulates push or pull. They use changegroup. Changegroup requires
95 repo to work. We don't have a repo, so a dummy changegroup is used.
95 repo to work. We don't have a repo, so a dummy changegroup is used.
96
96
97 If optimaldelta is True, use optimized delta parent, so the destination
97 If optimaldelta is True, use optimized delta parent, so the destination
98 revlog could probably reuse it. Otherwise it builds sub-optimal delta, and
98 revlog could probably reuse it. Otherwise it builds sub-optimal delta, and
99 the destination revlog needs more work to use it.
99 the destination revlog needs more work to use it.
100
100
101 This exercises some revlog.addgroup (and revlog._addrevision(text=None))
101 This exercises some revlog.addgroup (and revlog._addrevision(text=None))
102 code path, which is not covered by "appendrev" alone.
102 code path, which is not covered by "appendrev" alone.
103 '''
103 '''
104 class dummychangegroup(object):
104 class dummychangegroup(object):
105 @staticmethod
105 @staticmethod
106 def deltachunk(pnode):
106 def deltachunk(pnode):
107 pnode = pnode or node.nullid
107 pnode = pnode or node.nullid
108 parentrev = rlog.rev(pnode)
108 parentrev = rlog.rev(pnode)
109 r = parentrev + 1
109 r = parentrev + 1
110 if r >= len(rlog):
110 if r >= len(rlog):
111 return {}
111 return {}
112 if optimaldelta:
112 if optimaldelta:
113 deltaparent = parentrev
113 deltaparent = parentrev
114 else:
114 else:
115 # suboptimal deltaparent
115 # suboptimal deltaparent
116 deltaparent = min(0, parentrev)
116 deltaparent = min(0, parentrev)
117 if not rlog.candelta(deltaparent, r):
118 deltaparent = -1
117 return {'node': rlog.node(r), 'p1': pnode, 'p2': node.nullid,
119 return {'node': rlog.node(r), 'p1': pnode, 'p2': node.nullid,
118 'cs': rlog.node(rlog.linkrev(r)), 'flags': rlog.flags(r),
120 'cs': rlog.node(rlog.linkrev(r)), 'flags': rlog.flags(r),
119 'deltabase': rlog.node(deltaparent),
121 'deltabase': rlog.node(deltaparent),
120 'delta': rlog.revdiff(deltaparent, r)}
122 'delta': rlog.revdiff(deltaparent, r)}
121
123
122 def deltaiter(self):
124 def deltaiter(self):
123 chain = None
125 chain = None
124 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
126 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
125 node = chunkdata['node']
127 node = chunkdata['node']
126 p1 = chunkdata['p1']
128 p1 = chunkdata['p1']
127 p2 = chunkdata['p2']
129 p2 = chunkdata['p2']
128 cs = chunkdata['cs']
130 cs = chunkdata['cs']
129 deltabase = chunkdata['deltabase']
131 deltabase = chunkdata['deltabase']
130 delta = chunkdata['delta']
132 delta = chunkdata['delta']
131 flags = chunkdata['flags']
133 flags = chunkdata['flags']
132
134
133 chain = node
135 chain = node
134
136
135 yield (node, p1, p2, cs, deltabase, delta, flags)
137 yield (node, p1, p2, cs, deltabase, delta, flags)
136
138
137 def linkmap(lnode):
139 def linkmap(lnode):
138 return rlog.rev(lnode)
140 return rlog.rev(lnode)
139
141
140 dlog = newrevlog(destname, recreate=True)
142 dlog = newrevlog(destname, recreate=True)
141 dummydeltas = dummychangegroup().deltaiter()
143 dummydeltas = dummychangegroup().deltaiter()
142 dlog.addgroup(dummydeltas, linkmap, tr)
144 dlog.addgroup(dummydeltas, linkmap, tr)
143 return dlog
145 return dlog
144
146
145 def lowlevelcopy(rlog, tr, destname=b'_destrevlog.i'):
147 def lowlevelcopy(rlog, tr, destname=b'_destrevlog.i'):
146 '''Like addgroupcopy, but use the low level revlog._addrevision directly.
148 '''Like addgroupcopy, but use the low level revlog._addrevision directly.
147
149
148 It exercises some code paths that are hard to reach easily otherwise.
150 It exercises some code paths that are hard to reach easily otherwise.
149 '''
151 '''
150 dlog = newrevlog(destname, recreate=True)
152 dlog = newrevlog(destname, recreate=True)
151 for r in rlog:
153 for r in rlog:
152 p1 = rlog.node(r - 1)
154 p1 = rlog.node(r - 1)
153 p2 = node.nullid
155 p2 = node.nullid
154 if r == 0:
156 if r == 0 or (rlog.flags(r) & revlog.REVIDX_EXTSTORED):
155 text = rlog.revision(r, raw=True)
157 text = rlog.revision(r, raw=True)
156 cachedelta = None
158 cachedelta = None
157 else:
159 else:
158 # deltaparent is more interesting if it has the EXTSTORED flag.
160 # deltaparent cannot have EXTSTORED flag.
159 deltaparent = max([0] + [p for p in range(r - 2) if rlog.flags(p)])
161 deltaparent = max([-1] +
162 [p for p in range(r)
163 if rlog.flags(p) & revlog.REVIDX_EXTSTORED == 0])
160 text = None
164 text = None
161 cachedelta = (deltaparent, rlog.revdiff(deltaparent, r))
165 cachedelta = (deltaparent, rlog.revdiff(deltaparent, r))
162 flags = rlog.flags(r)
166 flags = rlog.flags(r)
163 ifh = dfh = None
167 ifh = dfh = None
164 try:
168 try:
165 ifh = dlog.opener(dlog.indexfile, 'a+')
169 ifh = dlog.opener(dlog.indexfile, 'a+')
166 if not dlog._inline:
170 if not dlog._inline:
167 dfh = dlog.opener(dlog.datafile, 'a+')
171 dfh = dlog.opener(dlog.datafile, 'a+')
168 dlog._addrevision(rlog.node(r), text, tr, r, p1, p2, flags,
172 dlog._addrevision(rlog.node(r), text, tr, r, p1, p2, flags,
169 cachedelta, ifh, dfh)
173 cachedelta, ifh, dfh)
170 finally:
174 finally:
171 if dfh is not None:
175 if dfh is not None:
172 dfh.close()
176 dfh.close()
173 if ifh is not None:
177 if ifh is not None:
174 ifh.close()
178 ifh.close()
175 return dlog
179 return dlog
176
180
177 # Utilities to generate revisions for testing
181 # Utilities to generate revisions for testing
178
182
179 def genbits(n):
183 def genbits(n):
180 '''Given a number n, generate (2 ** (n * 2) + 1) numbers in range(2 ** n).
184 '''Given a number n, generate (2 ** (n * 2) + 1) numbers in range(2 ** n).
181 i.e. the generated numbers have a width of n bits.
185 i.e. the generated numbers have a width of n bits.
182
186
183 The combination of two adjacent numbers will cover all possible cases.
187 The combination of two adjacent numbers will cover all possible cases.
184 That is to say, given any x, y where both x, and y are in range(2 ** n),
188 That is to say, given any x, y where both x, and y are in range(2 ** n),
185 there is an x followed immediately by y in the generated sequence.
189 there is an x followed immediately by y in the generated sequence.
186 '''
190 '''
187 m = 2 ** n
191 m = 2 ** n
188
192
189 # Gray Code. See https://en.wikipedia.org/wiki/Gray_code
193 # Gray Code. See https://en.wikipedia.org/wiki/Gray_code
190 gray = lambda x: x ^ (x >> 1)
194 gray = lambda x: x ^ (x >> 1)
191 reversegray = dict((gray(i), i) for i in range(m))
195 reversegray = dict((gray(i), i) for i in range(m))
192
196
193 # Generate (n * 2) bit gray code, yield lower n bits as X, and look for
197 # Generate (n * 2) bit gray code, yield lower n bits as X, and look for
194 # the next unused gray code where higher n bits equal to X.
198 # the next unused gray code where higher n bits equal to X.
195
199
196 # For gray codes whose higher bits are X, a[X] of them have been used.
200 # For gray codes whose higher bits are X, a[X] of them have been used.
197 a = [0] * m
201 a = [0] * m
198
202
199 # Iterate from 0.
203 # Iterate from 0.
200 x = 0
204 x = 0
201 yield x
205 yield x
202 for i in range(m * m):
206 for i in range(m * m):
203 x = reversegray[x]
207 x = reversegray[x]
204 y = gray(a[x] + x * m) & (m - 1)
208 y = gray(a[x] + x * m) & (m - 1)
205 assert a[x] < m
209 assert a[x] < m
206 a[x] += 1
210 a[x] += 1
207 x = y
211 x = y
208 yield x
212 yield x
209
213
210 def gentext(rev):
214 def gentext(rev):
211 '''Given a revision number, generate dummy text'''
215 '''Given a revision number, generate dummy text'''
212 return b''.join(b'%d\n' % j for j in range(-1, rev % 5))
216 return b''.join(b'%d\n' % j for j in range(-1, rev % 5))
213
217
214 def writecases(rlog, tr):
218 def writecases(rlog, tr):
215 '''Write some revisions interested to the test.
219 '''Write some revisions interested to the test.
216
220
217 The test is interested in 3 properties of a revision:
221 The test is interested in 3 properties of a revision:
218
222
219 - Is it a delta or a full text? (isdelta)
223 - Is it a delta or a full text? (isdelta)
220 This is to catch some delta application issues.
224 This is to catch some delta application issues.
221 - Does it have a flag of EXTSTORED? (isext)
225 - Does it have a flag of EXTSTORED? (isext)
222 This is to catch some flag processor issues. Especially when
226 This is to catch some flag processor issues. Especially when
223 interacted with revlog deltas.
227 interacted with revlog deltas.
224 - Is its text empty? (isempty)
228 - Is its text empty? (isempty)
225 This is less important. It is intended to try to catch some careless
229 This is less important. It is intended to try to catch some careless
226 checks like "if text" instead of "if text is None". Note: if flag
230 checks like "if text" instead of "if text is None". Note: if flag
227 processor is involved, raw text may be not empty.
231 processor is involved, raw text may be not empty.
228
232
229 Write 65 revisions. So that all combinations of the above flags for
233 Write 65 revisions. So that all combinations of the above flags for
230 adjacent revisions are covered. That is to say,
234 adjacent revisions are covered. That is to say,
231
235
232 len(set(
236 len(set(
233 (r.delta, r.ext, r.empty, (r+1).delta, (r+1).ext, (r+1).empty)
237 (r.delta, r.ext, r.empty, (r+1).delta, (r+1).ext, (r+1).empty)
234 for r in range(len(rlog) - 1)
238 for r in range(len(rlog) - 1)
235 )) is 64.
239 )) is 64.
236
240
237 Where "r.delta", "r.ext", and "r.empty" are booleans matching properties
241 Where "r.delta", "r.ext", and "r.empty" are booleans matching properties
238 mentioned above.
242 mentioned above.
239
243
240 Return expected [(text, rawtext)].
244 Return expected [(text, rawtext)].
241 '''
245 '''
242 result = []
246 result = []
243 for i, x in enumerate(genbits(3)):
247 for i, x in enumerate(genbits(3)):
244 isdelta, isext, isempty = bool(x & 1), bool(x & 2), bool(x & 4)
248 isdelta, isext, isempty = bool(x & 1), bool(x & 2), bool(x & 4)
245 if isempty:
249 if isempty:
246 text = b''
250 text = b''
247 else:
251 else:
248 text = gentext(i)
252 text = gentext(i)
249 rev = appendrev(rlog, text, tr, isext=isext, isdelta=isdelta)
253 rev = appendrev(rlog, text, tr, isext=isext, isdelta=isdelta)
250
254
251 # Verify text, rawtext, and rawsize
255 # Verify text, rawtext, and rawsize
252 if isext:
256 if isext:
253 rawtext = writeprocessor(None, text)[0]
257 rawtext = writeprocessor(None, text)[0]
254 else:
258 else:
255 rawtext = text
259 rawtext = text
256 if rlog.rawsize(rev) != len(rawtext):
260 if rlog.rawsize(rev) != len(rawtext):
257 abort('rev %d: wrong rawsize' % rev)
261 abort('rev %d: wrong rawsize' % rev)
258 if rlog.revision(rev, raw=False) != text:
262 if rlog.revision(rev, raw=False) != text:
259 abort('rev %d: wrong text' % rev)
263 abort('rev %d: wrong text' % rev)
260 if rlog.revision(rev, raw=True) != rawtext:
264 if rlog.revision(rev, raw=True) != rawtext:
261 abort('rev %d: wrong rawtext' % rev)
265 abort('rev %d: wrong rawtext' % rev)
262 result.append((text, rawtext))
266 result.append((text, rawtext))
263
267
264 # Verify flags like isdelta, isext work as expected
268 # Verify flags like isdelta, isext work as expected
265 if bool(rlog.deltaparent(rev) > -1) != isdelta:
269 # isdelta can be overridden to False if this or p1 has isext set
266 abort('rev %d: isdelta is ineffective' % rev)
270 if bool(rlog.deltaparent(rev) > -1) and not isdelta:
271 abort('rev %d: isdelta is unexpected' % rev)
267 if bool(rlog.flags(rev)) != isext:
272 if bool(rlog.flags(rev)) != isext:
268 abort('rev %d: isext is ineffective' % rev)
273 abort('rev %d: isext is ineffective' % rev)
269 return result
274 return result
270
275
271 # Main test and checking
276 # Main test and checking
272
277
273 def checkrevlog(rlog, expected):
278 def checkrevlog(rlog, expected):
274 '''Check if revlog has expected contents. expected is [(text, rawtext)]'''
279 '''Check if revlog has expected contents. expected is [(text, rawtext)]'''
275 # Test using different access orders. This could expose some issues
280 # Test using different access orders. This could expose some issues
276 # depending on revlog caching (see revlog._cache).
281 # depending on revlog caching (see revlog._cache).
277 for r0 in range(len(rlog) - 1):
282 for r0 in range(len(rlog) - 1):
278 r1 = r0 + 1
283 r1 = r0 + 1
279 for revorder in [[r0, r1], [r1, r0]]:
284 for revorder in [[r0, r1], [r1, r0]]:
280 for raworder in [[True], [False], [True, False], [False, True]]:
285 for raworder in [[True], [False], [True, False], [False, True]]:
281 nlog = newrevlog()
286 nlog = newrevlog()
282 for rev in revorder:
287 for rev in revorder:
283 for raw in raworder:
288 for raw in raworder:
284 t = nlog.revision(rev, raw=raw)
289 t = nlog.revision(rev, raw=raw)
285 if t != expected[rev][int(raw)]:
290 if t != expected[rev][int(raw)]:
286 abort('rev %d: corrupted %stext'
291 abort('rev %d: corrupted %stext'
287 % (rev, raw and 'raw' or ''))
292 % (rev, raw and 'raw' or ''))
288
293
289 def maintest():
294 def maintest():
290 expected = rl = None
295 expected = rl = None
291 with newtransaction() as tr:
296 with newtransaction() as tr:
292 rl = newrevlog(recreate=True)
297 rl = newrevlog(recreate=True)
293 expected = writecases(rl, tr)
298 expected = writecases(rl, tr)
294 checkrevlog(rl, expected)
299 checkrevlog(rl, expected)
295 print('local test passed')
300 print('local test passed')
296 # Copy via revlog.addgroup
301 # Copy via revlog.addgroup
297 rl1 = addgroupcopy(rl, tr)
302 rl1 = addgroupcopy(rl, tr)
298 checkrevlog(rl1, expected)
303 checkrevlog(rl1, expected)
299 rl2 = addgroupcopy(rl, tr, optimaldelta=False)
304 rl2 = addgroupcopy(rl, tr, optimaldelta=False)
300 checkrevlog(rl2, expected)
305 checkrevlog(rl2, expected)
301 print('addgroupcopy test passed')
306 print('addgroupcopy test passed')
302 # Copy via revlog.clone
307 # Copy via revlog.clone
303 rl3 = newrevlog(name='_destrevlog3.i', recreate=True)
308 rl3 = newrevlog(name='_destrevlog3.i', recreate=True)
304 rl.clone(tr, rl3)
309 rl.clone(tr, rl3)
305 checkrevlog(rl3, expected)
310 checkrevlog(rl3, expected)
306 print('clone test passed')
311 print('clone test passed')
307 # Copy via low-level revlog._addrevision
312 # Copy via low-level revlog._addrevision
308 rl4 = lowlevelcopy(rl, tr)
313 rl4 = lowlevelcopy(rl, tr)
309 checkrevlog(rl4, expected)
314 checkrevlog(rl4, expected)
310 print('lowlevelcopy test passed')
315 print('lowlevelcopy test passed')
311
316
312 try:
317 try:
313 maintest()
318 maintest()
314 except Exception as ex:
319 except Exception as ex:
315 abort('crashed: %s' % ex)
320 abort('crashed: %s' % ex)
General Comments 0
You need to be logged in to leave comments. Login now