##// END OF EJS Templates
bundle2: add ``pushkey`` support...
Pierre-Yves David -
r21660:e87d2a12 default
parent child Browse files
Show More
@@ -1,870 +1,896 b''
1 1 # bundle2.py - generic container format to transmit arbitrary data.
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """Handling of the new bundle2 format
8 8
9 9 The goal of bundle2 is to act as an atomically packet to transmit a set of
10 10 payloads in an application agnostic way. It consist in a sequence of "parts"
11 11 that will be handed to and processed by the application layer.
12 12
13 13
14 14 General format architecture
15 15 ===========================
16 16
17 17 The format is architectured as follow
18 18
19 19 - magic string
20 20 - stream level parameters
21 21 - payload parts (any number)
22 22 - end of stream marker.
23 23
24 24 the Binary format
25 25 ============================
26 26
27 27 All numbers are unsigned and big-endian.
28 28
29 29 stream level parameters
30 30 ------------------------
31 31
32 32 Binary format is as follow
33 33
34 34 :params size: (16 bits integer)
35 35
36 36 The total number of Bytes used by the parameters
37 37
38 38 :params value: arbitrary number of Bytes
39 39
40 40 A blob of `params size` containing the serialized version of all stream level
41 41 parameters.
42 42
43 43 The blob contains a space separated list of parameters. Parameters with value
44 44 are stored in the form `<name>=<value>`. Both name and value are urlquoted.
45 45
46 46 Empty name are obviously forbidden.
47 47
48 48 Name MUST start with a letter. If this first letter is lower case, the
49 49 parameter is advisory and can be safely ignored. However when the first
50 50 letter is capital, the parameter is mandatory and the bundling process MUST
51 51 stop if he is not able to proceed it.
52 52
53 53 Stream parameters use a simple textual format for two main reasons:
54 54
55 55 - Stream level parameters should remain simple and we want to discourage any
56 56 crazy usage.
57 57 - Textual data allow easy human inspection of a bundle2 header in case of
58 58 troubles.
59 59
60 60 Any Applicative level options MUST go into a bundle2 part instead.
61 61
62 62 Payload part
63 63 ------------------------
64 64
65 65 Binary format is as follow
66 66
67 67 :header size: (16 bits inter)
68 68
69 69 The total number of Bytes used by the part headers. When the header is empty
70 70 (size = 0) this is interpreted as the end of stream marker.
71 71
72 72 :header:
73 73
74 74 The header defines how to interpret the part. It contains two piece of
75 75 data: the part type, and the part parameters.
76 76
77 77 The part type is used to route an application level handler, that can
78 78 interpret payload.
79 79
80 80 Part parameters are passed to the application level handler. They are
81 81 meant to convey information that will help the application level object to
82 82 interpret the part payload.
83 83
84 84 The binary format of the header is has follow
85 85
86 86 :typesize: (one byte)
87 87
88 88 :parttype: alphanumerical part name
89 89
90 90 :partid: A 32bits integer (unique in the bundle) that can be used to refer
91 91 to this part.
92 92
93 93 :parameters:
94 94
95 95 Part's parameter may have arbitrary content, the binary structure is::
96 96
97 97 <mandatory-count><advisory-count><param-sizes><param-data>
98 98
99 99 :mandatory-count: 1 byte, number of mandatory parameters
100 100
101 101 :advisory-count: 1 byte, number of advisory parameters
102 102
103 103 :param-sizes:
104 104
105 105 N couple of bytes, where N is the total number of parameters. Each
106 106 couple contains (<size-of-key>, <size-of-value) for one parameter.
107 107
108 108 :param-data:
109 109
110 110 A blob of bytes from which each parameter key and value can be
111 111 retrieved using the list of size couples stored in the previous
112 112 field.
113 113
114 114 Mandatory parameters comes first, then the advisory ones.
115 115
116 116 Each parameter's key MUST be unique within the part.
117 117
118 118 :payload:
119 119
120 120 payload is a series of `<chunksize><chunkdata>`.
121 121
122 122 `chunksize` is a 32 bits integer, `chunkdata` are plain bytes (as much as
123 123 `chunksize` says)` The payload part is concluded by a zero size chunk.
124 124
125 125 The current implementation always produces either zero or one chunk.
126 126 This is an implementation limitation that will ultimately be lifted.
127 127
128 128 Bundle processing
129 129 ============================
130 130
131 131 Each part is processed in order using a "part handler". Handler are registered
132 132 for a certain part type.
133 133
134 134 The matching of a part to its handler is case insensitive. The case of the
135 135 part type is used to know if a part is mandatory or advisory. If the Part type
136 136 contains any uppercase char it is considered mandatory. When no handler is
137 137 known for a Mandatory part, the process is aborted and an exception is raised.
138 138 If the part is advisory and no handler is known, the part is ignored. When the
139 139 process is aborted, the full bundle is still read from the stream to keep the
140 140 channel usable. But none of the part read from an abort are processed. In the
141 141 future, dropping the stream may become an option for channel we do not care to
142 142 preserve.
143 143 """
144 144
145 145 import util
146 146 import struct
147 147 import urllib
148 148 import string
149 149 import pushkey
150 150
151 151 import changegroup, error
152 152 from i18n import _
153 153
154 154 _pack = struct.pack
155 155 _unpack = struct.unpack
156 156
157 157 _magicstring = 'HG2X'
158 158
159 159 _fstreamparamsize = '>H'
160 160 _fpartheadersize = '>H'
161 161 _fparttypesize = '>B'
162 162 _fpartid = '>I'
163 163 _fpayloadsize = '>I'
164 164 _fpartparamcount = '>BB'
165 165
166 166 preferedchunksize = 4096
167 167
168 168 def _makefpartparamsizes(nbparams):
169 169 """return a struct format to read part parameter sizes
170 170
171 171 The number parameters is variable so we need to build that format
172 172 dynamically.
173 173 """
174 174 return '>'+('BB'*nbparams)
175 175
176 176 parthandlermapping = {}
177 177
178 178 def parthandler(parttype, params=()):
179 179 """decorator that register a function as a bundle2 part handler
180 180
181 181 eg::
182 182
183 183 @parthandler('myparttype', ('mandatory', 'param', 'handled'))
184 184 def myparttypehandler(...):
185 185 '''process a part of type "my part".'''
186 186 ...
187 187 """
188 188 def _decorator(func):
189 189 lparttype = parttype.lower() # enforce lower case matching.
190 190 assert lparttype not in parthandlermapping
191 191 parthandlermapping[lparttype] = func
192 192 func.params = frozenset(params)
193 193 return func
194 194 return _decorator
195 195
196 196 class unbundlerecords(object):
197 197 """keep record of what happens during and unbundle
198 198
199 199 New records are added using `records.add('cat', obj)`. Where 'cat' is a
200 200 category of record and obj is an arbitrary object.
201 201
202 202 `records['cat']` will return all entries of this category 'cat'.
203 203
204 204 Iterating on the object itself will yield `('category', obj)` tuples
205 205 for all entries.
206 206
207 207 All iterations happens in chronological order.
208 208 """
209 209
210 210 def __init__(self):
211 211 self._categories = {}
212 212 self._sequences = []
213 213 self._replies = {}
214 214
215 215 def add(self, category, entry, inreplyto=None):
216 216 """add a new record of a given category.
217 217
218 218 The entry can then be retrieved in the list returned by
219 219 self['category']."""
220 220 self._categories.setdefault(category, []).append(entry)
221 221 self._sequences.append((category, entry))
222 222 if inreplyto is not None:
223 223 self.getreplies(inreplyto).add(category, entry)
224 224
225 225 def getreplies(self, partid):
226 226 """get the subrecords that replies to a specific part"""
227 227 return self._replies.setdefault(partid, unbundlerecords())
228 228
229 229 def __getitem__(self, cat):
230 230 return tuple(self._categories.get(cat, ()))
231 231
232 232 def __iter__(self):
233 233 return iter(self._sequences)
234 234
235 235 def __len__(self):
236 236 return len(self._sequences)
237 237
238 238 def __nonzero__(self):
239 239 return bool(self._sequences)
240 240
241 241 class bundleoperation(object):
242 242 """an object that represents a single bundling process
243 243
244 244 Its purpose is to carry unbundle-related objects and states.
245 245
246 246 A new object should be created at the beginning of each bundle processing.
247 247 The object is to be returned by the processing function.
248 248
249 249 The object has very little content now it will ultimately contain:
250 250 * an access to the repo the bundle is applied to,
251 251 * a ui object,
252 252 * a way to retrieve a transaction to add changes to the repo,
253 253 * a way to record the result of processing each part,
254 254 * a way to construct a bundle response when applicable.
255 255 """
256 256
257 257 def __init__(self, repo, transactiongetter):
258 258 self.repo = repo
259 259 self.ui = repo.ui
260 260 self.records = unbundlerecords()
261 261 self.gettransaction = transactiongetter
262 262 self.reply = None
263 263
264 264 class TransactionUnavailable(RuntimeError):
265 265 pass
266 266
267 267 def _notransaction():
268 268 """default method to get a transaction while processing a bundle
269 269
270 270 Raise an exception to highlight the fact that no transaction was expected
271 271 to be created"""
272 272 raise TransactionUnavailable()
273 273
274 274 def processbundle(repo, unbundler, transactiongetter=_notransaction):
275 275 """This function process a bundle, apply effect to/from a repo
276 276
277 277 It iterates over each part then searches for and uses the proper handling
278 278 code to process the part. Parts are processed in order.
279 279
280 280 This is very early version of this function that will be strongly reworked
281 281 before final usage.
282 282
283 283 Unknown Mandatory part will abort the process.
284 284 """
285 285 op = bundleoperation(repo, transactiongetter)
286 286 # todo:
287 287 # - replace this is a init function soon.
288 288 # - exception catching
289 289 unbundler.params
290 290 iterparts = unbundler.iterparts()
291 291 part = None
292 292 try:
293 293 for part in iterparts:
294 294 parttype = part.type
295 295 # part key are matched lower case
296 296 key = parttype.lower()
297 297 try:
298 298 handler = parthandlermapping.get(key)
299 299 if handler is None:
300 300 raise error.BundleValueError(parttype=key)
301 301 op.ui.debug('found a handler for part %r\n' % parttype)
302 302 unknownparams = part.mandatorykeys - handler.params
303 303 if unknownparams:
304 304 unknownparams = list(unknownparams)
305 305 unknownparams.sort()
306 306 raise error.BundleValueError(parttype=key,
307 307 params=unknownparams)
308 308 except error.BundleValueError, exc:
309 309 if key != parttype: # mandatory parts
310 310 raise
311 311 op.ui.debug('ignoring unsupported advisory part %s\n' % exc)
312 312 # consuming the part
313 313 part.read()
314 314 continue
315 315
316 316
317 317 # handler is called outside the above try block so that we don't
318 318 # risk catching KeyErrors from anything other than the
319 319 # parthandlermapping lookup (any KeyError raised by handler()
320 320 # itself represents a defect of a different variety).
321 321 output = None
322 322 if op.reply is not None:
323 323 op.ui.pushbuffer(error=True)
324 324 output = ''
325 325 try:
326 326 handler(op, part)
327 327 finally:
328 328 if output is not None:
329 329 output = op.ui.popbuffer()
330 330 if output:
331 331 outpart = op.reply.newpart('b2x:output', data=output)
332 332 outpart.addparam('in-reply-to', str(part.id), mandatory=False)
333 333 part.read()
334 334 except Exception, exc:
335 335 if part is not None:
336 336 # consume the bundle content
337 337 part.read()
338 338 for part in iterparts:
339 339 # consume the bundle content
340 340 part.read()
341 341 # Small hack to let caller code distinguish exceptions from bundle2
342 342 # processing fron the ones from bundle1 processing. This is mostly
343 343 # needed to handle different return codes to unbundle according to the
344 344 # type of bundle. We should probably clean up or drop this return code
345 345 # craziness in a future version.
346 346 exc.duringunbundle2 = True
347 347 raise
348 348 return op
349 349
350 350 def decodecaps(blob):
351 351 """decode a bundle2 caps bytes blob into a dictionnary
352 352
353 353 The blob is a list of capabilities (one per line)
354 354 Capabilities may have values using a line of the form::
355 355
356 356 capability=value1,value2,value3
357 357
358 358 The values are always a list."""
359 359 caps = {}
360 360 for line in blob.splitlines():
361 361 if not line:
362 362 continue
363 363 if '=' not in line:
364 364 key, vals = line, ()
365 365 else:
366 366 key, vals = line.split('=', 1)
367 367 vals = vals.split(',')
368 368 key = urllib.unquote(key)
369 369 vals = [urllib.unquote(v) for v in vals]
370 370 caps[key] = vals
371 371 return caps
372 372
373 373 def encodecaps(caps):
374 374 """encode a bundle2 caps dictionary into a bytes blob"""
375 375 chunks = []
376 376 for ca in sorted(caps):
377 377 vals = caps[ca]
378 378 ca = urllib.quote(ca)
379 379 vals = [urllib.quote(v) for v in vals]
380 380 if vals:
381 381 ca = "%s=%s" % (ca, ','.join(vals))
382 382 chunks.append(ca)
383 383 return '\n'.join(chunks)
384 384
385 385 class bundle20(object):
386 386 """represent an outgoing bundle2 container
387 387
388 388 Use the `addparam` method to add stream level parameter. and `newpart` to
389 389 populate it. Then call `getchunks` to retrieve all the binary chunks of
390 390 data that compose the bundle2 container."""
391 391
392 392 def __init__(self, ui, capabilities=()):
393 393 self.ui = ui
394 394 self._params = []
395 395 self._parts = []
396 396 self.capabilities = dict(capabilities)
397 397
398 398 # methods used to defines the bundle2 content
399 399 def addparam(self, name, value=None):
400 400 """add a stream level parameter"""
401 401 if not name:
402 402 raise ValueError('empty parameter name')
403 403 if name[0] not in string.letters:
404 404 raise ValueError('non letter first character: %r' % name)
405 405 self._params.append((name, value))
406 406
407 407 def addpart(self, part):
408 408 """add a new part to the bundle2 container
409 409
410 410 Parts contains the actual applicative payload."""
411 411 assert part.id is None
412 412 part.id = len(self._parts) # very cheap counter
413 413 self._parts.append(part)
414 414
415 415 def newpart(self, typeid, *args, **kwargs):
416 416 """create a new part and add it to the containers
417 417
418 418 As the part is directly added to the containers. For now, this means
419 419 that any failure to properly initialize the part after calling
420 420 ``newpart`` should result in a failure of the whole bundling process.
421 421
422 422 You can still fall back to manually create and add if you need better
423 423 control."""
424 424 part = bundlepart(typeid, *args, **kwargs)
425 425 self.addpart(part)
426 426 return part
427 427
428 428 # methods used to generate the bundle2 stream
429 429 def getchunks(self):
430 430 self.ui.debug('start emission of %s stream\n' % _magicstring)
431 431 yield _magicstring
432 432 param = self._paramchunk()
433 433 self.ui.debug('bundle parameter: %s\n' % param)
434 434 yield _pack(_fstreamparamsize, len(param))
435 435 if param:
436 436 yield param
437 437
438 438 self.ui.debug('start of parts\n')
439 439 for part in self._parts:
440 440 self.ui.debug('bundle part: "%s"\n' % part.type)
441 441 for chunk in part.getchunks():
442 442 yield chunk
443 443 self.ui.debug('end of bundle\n')
444 444 yield '\0\0'
445 445
446 446 def _paramchunk(self):
447 447 """return a encoded version of all stream parameters"""
448 448 blocks = []
449 449 for par, value in self._params:
450 450 par = urllib.quote(par)
451 451 if value is not None:
452 452 value = urllib.quote(value)
453 453 par = '%s=%s' % (par, value)
454 454 blocks.append(par)
455 455 return ' '.join(blocks)
456 456
457 457 class unpackermixin(object):
458 458 """A mixin to extract bytes and struct data from a stream"""
459 459
460 460 def __init__(self, fp):
461 461 self._fp = fp
462 462
463 463 def _unpack(self, format):
464 464 """unpack this struct format from the stream"""
465 465 data = self._readexact(struct.calcsize(format))
466 466 return _unpack(format, data)
467 467
468 468 def _readexact(self, size):
469 469 """read exactly <size> bytes from the stream"""
470 470 return changegroup.readexactly(self._fp, size)
471 471
472 472
473 473 class unbundle20(unpackermixin):
474 474 """interpret a bundle2 stream
475 475
476 476 This class is fed with a binary stream and yields parts through its
477 477 `iterparts` methods."""
478 478
479 479 def __init__(self, ui, fp, header=None):
480 480 """If header is specified, we do not read it out of the stream."""
481 481 self.ui = ui
482 482 super(unbundle20, self).__init__(fp)
483 483 if header is None:
484 484 header = self._readexact(4)
485 485 magic, version = header[0:2], header[2:4]
486 486 if magic != 'HG':
487 487 raise util.Abort(_('not a Mercurial bundle'))
488 488 if version != '2X':
489 489 raise util.Abort(_('unknown bundle version %s') % version)
490 490 self.ui.debug('start processing of %s stream\n' % header)
491 491
492 492 @util.propertycache
493 493 def params(self):
494 494 """dictionary of stream level parameters"""
495 495 self.ui.debug('reading bundle2 stream parameters\n')
496 496 params = {}
497 497 paramssize = self._unpack(_fstreamparamsize)[0]
498 498 if paramssize:
499 499 for p in self._readexact(paramssize).split(' '):
500 500 p = p.split('=', 1)
501 501 p = [urllib.unquote(i) for i in p]
502 502 if len(p) < 2:
503 503 p.append(None)
504 504 self._processparam(*p)
505 505 params[p[0]] = p[1]
506 506 return params
507 507
508 508 def _processparam(self, name, value):
509 509 """process a parameter, applying its effect if needed
510 510
511 511 Parameter starting with a lower case letter are advisory and will be
512 512 ignored when unknown. Those starting with an upper case letter are
513 513 mandatory and will this function will raise a KeyError when unknown.
514 514
515 515 Note: no option are currently supported. Any input will be either
516 516 ignored or failing.
517 517 """
518 518 if not name:
519 519 raise ValueError('empty parameter name')
520 520 if name[0] not in string.letters:
521 521 raise ValueError('non letter first character: %r' % name)
522 522 # Some logic will be later added here to try to process the option for
523 523 # a dict of known parameter.
524 524 if name[0].islower():
525 525 self.ui.debug("ignoring unknown parameter %r\n" % name)
526 526 else:
527 527 raise error.BundleValueError(params=(name,))
528 528
529 529
530 530 def iterparts(self):
531 531 """yield all parts contained in the stream"""
532 532 # make sure param have been loaded
533 533 self.params
534 534 self.ui.debug('start extraction of bundle2 parts\n')
535 535 headerblock = self._readpartheader()
536 536 while headerblock is not None:
537 537 part = unbundlepart(self.ui, headerblock, self._fp)
538 538 yield part
539 539 headerblock = self._readpartheader()
540 540 self.ui.debug('end of bundle2 stream\n')
541 541
542 542 def _readpartheader(self):
543 543 """reads a part header size and return the bytes blob
544 544
545 545 returns None if empty"""
546 546 headersize = self._unpack(_fpartheadersize)[0]
547 547 self.ui.debug('part header size: %i\n' % headersize)
548 548 if headersize:
549 549 return self._readexact(headersize)
550 550 return None
551 551
552 552
553 553 class bundlepart(object):
554 554 """A bundle2 part contains application level payload
555 555
556 556 The part `type` is used to route the part to the application level
557 557 handler.
558 558
559 559 The part payload is contained in ``part.data``. It could be raw bytes or a
560 560 generator of byte chunks.
561 561
562 562 You can add parameters to the part using the ``addparam`` method.
563 563 Parameters can be either mandatory (default) or advisory. Remote side
564 564 should be able to safely ignore the advisory ones.
565 565
566 566 Both data and parameters cannot be modified after the generation has begun.
567 567 """
568 568
569 569 def __init__(self, parttype, mandatoryparams=(), advisoryparams=(),
570 570 data=''):
571 571 self.id = None
572 572 self.type = parttype
573 573 self._data = data
574 574 self._mandatoryparams = list(mandatoryparams)
575 575 self._advisoryparams = list(advisoryparams)
576 576 # checking for duplicated entries
577 577 self._seenparams = set()
578 578 for pname, __ in self._mandatoryparams + self._advisoryparams:
579 579 if pname in self._seenparams:
580 580 raise RuntimeError('duplicated params: %s' % pname)
581 581 self._seenparams.add(pname)
582 582 # status of the part's generation:
583 583 # - None: not started,
584 584 # - False: currently generated,
585 585 # - True: generation done.
586 586 self._generated = None
587 587
588 588 # methods used to defines the part content
589 589 def __setdata(self, data):
590 590 if self._generated is not None:
591 591 raise error.ReadOnlyPartError('part is being generated')
592 592 self._data = data
593 593 def __getdata(self):
594 594 return self._data
595 595 data = property(__getdata, __setdata)
596 596
597 597 @property
598 598 def mandatoryparams(self):
599 599 # make it an immutable tuple to force people through ``addparam``
600 600 return tuple(self._mandatoryparams)
601 601
602 602 @property
603 603 def advisoryparams(self):
604 604 # make it an immutable tuple to force people through ``addparam``
605 605 return tuple(self._advisoryparams)
606 606
607 607 def addparam(self, name, value='', mandatory=True):
608 608 if self._generated is not None:
609 609 raise error.ReadOnlyPartError('part is being generated')
610 610 if name in self._seenparams:
611 611 raise ValueError('duplicated params: %s' % name)
612 612 self._seenparams.add(name)
613 613 params = self._advisoryparams
614 614 if mandatory:
615 615 params = self._mandatoryparams
616 616 params.append((name, value))
617 617
618 618 # methods used to generates the bundle2 stream
619 619 def getchunks(self):
620 620 if self._generated is not None:
621 621 raise RuntimeError('part can only be consumed once')
622 622 self._generated = False
623 623 #### header
624 624 ## parttype
625 625 header = [_pack(_fparttypesize, len(self.type)),
626 626 self.type, _pack(_fpartid, self.id),
627 627 ]
628 628 ## parameters
629 629 # count
630 630 manpar = self.mandatoryparams
631 631 advpar = self.advisoryparams
632 632 header.append(_pack(_fpartparamcount, len(manpar), len(advpar)))
633 633 # size
634 634 parsizes = []
635 635 for key, value in manpar:
636 636 parsizes.append(len(key))
637 637 parsizes.append(len(value))
638 638 for key, value in advpar:
639 639 parsizes.append(len(key))
640 640 parsizes.append(len(value))
641 641 paramsizes = _pack(_makefpartparamsizes(len(parsizes) / 2), *parsizes)
642 642 header.append(paramsizes)
643 643 # key, value
644 644 for key, value in manpar:
645 645 header.append(key)
646 646 header.append(value)
647 647 for key, value in advpar:
648 648 header.append(key)
649 649 header.append(value)
650 650 ## finalize header
651 651 headerchunk = ''.join(header)
652 652 yield _pack(_fpartheadersize, len(headerchunk))
653 653 yield headerchunk
654 654 ## payload
655 655 for chunk in self._payloadchunks():
656 656 yield _pack(_fpayloadsize, len(chunk))
657 657 yield chunk
658 658 # end of payload
659 659 yield _pack(_fpayloadsize, 0)
660 660 self._generated = True
661 661
662 662 def _payloadchunks(self):
663 663 """yield chunks of a the part payload
664 664
665 665 Exists to handle the different methods to provide data to a part."""
666 666 # we only support fixed size data now.
667 667 # This will be improved in the future.
668 668 if util.safehasattr(self.data, 'next'):
669 669 buff = util.chunkbuffer(self.data)
670 670 chunk = buff.read(preferedchunksize)
671 671 while chunk:
672 672 yield chunk
673 673 chunk = buff.read(preferedchunksize)
674 674 elif len(self.data):
675 675 yield self.data
676 676
677 677 class unbundlepart(unpackermixin):
678 678 """a bundle part read from a bundle"""
679 679
680 680 def __init__(self, ui, header, fp):
681 681 super(unbundlepart, self).__init__(fp)
682 682 self.ui = ui
683 683 # unbundle state attr
684 684 self._headerdata = header
685 685 self._headeroffset = 0
686 686 self._initialized = False
687 687 self.consumed = False
688 688 # part data
689 689 self.id = None
690 690 self.type = None
691 691 self.mandatoryparams = None
692 692 self.advisoryparams = None
693 693 self.params = None
694 694 self.mandatorykeys = ()
695 695 self._payloadstream = None
696 696 self._readheader()
697 697
698 698 def _fromheader(self, size):
699 699 """return the next <size> byte from the header"""
700 700 offset = self._headeroffset
701 701 data = self._headerdata[offset:(offset + size)]
702 702 self._headeroffset = offset + size
703 703 return data
704 704
705 705 def _unpackheader(self, format):
706 706 """read given format from header
707 707
708 708 This automatically compute the size of the format to read."""
709 709 data = self._fromheader(struct.calcsize(format))
710 710 return _unpack(format, data)
711 711
712 712 def _initparams(self, mandatoryparams, advisoryparams):
713 713 """internal function to setup all logic related parameters"""
714 714 # make it read only to prevent people touching it by mistake.
715 715 self.mandatoryparams = tuple(mandatoryparams)
716 716 self.advisoryparams = tuple(advisoryparams)
717 717 # user friendly UI
718 718 self.params = dict(self.mandatoryparams)
719 719 self.params.update(dict(self.advisoryparams))
720 720 self.mandatorykeys = frozenset(p[0] for p in mandatoryparams)
721 721
722 722 def _readheader(self):
723 723 """read the header and setup the object"""
724 724 typesize = self._unpackheader(_fparttypesize)[0]
725 725 self.type = self._fromheader(typesize)
726 726 self.ui.debug('part type: "%s"\n' % self.type)
727 727 self.id = self._unpackheader(_fpartid)[0]
728 728 self.ui.debug('part id: "%s"\n' % self.id)
729 729 ## reading parameters
730 730 # param count
731 731 mancount, advcount = self._unpackheader(_fpartparamcount)
732 732 self.ui.debug('part parameters: %i\n' % (mancount + advcount))
733 733 # param size
734 734 fparamsizes = _makefpartparamsizes(mancount + advcount)
735 735 paramsizes = self._unpackheader(fparamsizes)
736 736 # make it a list of couple again
737 737 paramsizes = zip(paramsizes[::2], paramsizes[1::2])
738 738 # split mandatory from advisory
739 739 mansizes = paramsizes[:mancount]
740 740 advsizes = paramsizes[mancount:]
741 741 # retrive param value
742 742 manparams = []
743 743 for key, value in mansizes:
744 744 manparams.append((self._fromheader(key), self._fromheader(value)))
745 745 advparams = []
746 746 for key, value in advsizes:
747 747 advparams.append((self._fromheader(key), self._fromheader(value)))
748 748 self._initparams(manparams, advparams)
749 749 ## part payload
750 750 def payloadchunks():
751 751 payloadsize = self._unpack(_fpayloadsize)[0]
752 752 self.ui.debug('payload chunk size: %i\n' % payloadsize)
753 753 while payloadsize:
754 754 yield self._readexact(payloadsize)
755 755 payloadsize = self._unpack(_fpayloadsize)[0]
756 756 self.ui.debug('payload chunk size: %i\n' % payloadsize)
757 757 self._payloadstream = util.chunkbuffer(payloadchunks())
758 758 # we read the data, tell it
759 759 self._initialized = True
760 760
761 761 def read(self, size=None):
762 762 """read payload data"""
763 763 if not self._initialized:
764 764 self._readheader()
765 765 if size is None:
766 766 data = self._payloadstream.read()
767 767 else:
768 768 data = self._payloadstream.read(size)
769 769 if size is None or len(data) < size:
770 770 self.consumed = True
771 771 return data
772 772
773 773 def bundle2caps(remote):
774 774 """return the bundlecapabilities of a peer as dict"""
775 775 raw = remote.capable('bundle2-exp')
776 776 if not raw and raw != '':
777 777 return {}
778 778 capsblob = urllib.unquote(remote.capable('bundle2-exp'))
779 779 return decodecaps(capsblob)
780 780
781 781 @parthandler('b2x:changegroup')
782 782 def handlechangegroup(op, inpart):
783 783 """apply a changegroup part on the repo
784 784
785 785 This is a very early implementation that will massive rework before being
786 786 inflicted to any end-user.
787 787 """
788 788 # Make sure we trigger a transaction creation
789 789 #
790 790 # The addchangegroup function will get a transaction object by itself, but
791 791 # we need to make sure we trigger the creation of a transaction object used
792 792 # for the whole processing scope.
793 793 op.gettransaction()
794 794 cg = changegroup.unbundle10(inpart, 'UN')
795 795 ret = changegroup.addchangegroup(op.repo, cg, 'bundle2', 'bundle2')
796 796 op.records.add('changegroup', {'return': ret})
797 797 if op.reply is not None:
798 798 # This is definitly not the final form of this
799 799 # return. But one need to start somewhere.
800 800 part = op.reply.newpart('b2x:reply:changegroup')
801 801 part.addparam('in-reply-to', str(inpart.id), mandatory=False)
802 802 part.addparam('return', '%i' % ret, mandatory=False)
803 803 assert not inpart.read()
804 804
805 805 @parthandler('b2x:reply:changegroup', ('return', 'in-reply-to'))
806 806 def handlechangegroup(op, inpart):
807 807 ret = int(inpart.params['return'])
808 808 replyto = int(inpart.params['in-reply-to'])
809 809 op.records.add('changegroup', {'return': ret}, replyto)
810 810
811 811 @parthandler('b2x:check:heads')
812 812 def handlechangegroup(op, inpart):
813 813 """check that head of the repo did not change
814 814
815 815 This is used to detect a push race when using unbundle.
816 816 This replaces the "heads" argument of unbundle."""
817 817 h = inpart.read(20)
818 818 heads = []
819 819 while len(h) == 20:
820 820 heads.append(h)
821 821 h = inpart.read(20)
822 822 assert not h
823 823 if heads != op.repo.heads():
824 824 raise error.PushRaced('repository changed while pushing - '
825 825 'please try again')
826 826
827 827 @parthandler('b2x:output')
828 828 def handleoutput(op, inpart):
829 829 """forward output captured on the server to the client"""
830 830 for line in inpart.read().splitlines():
831 831 op.ui.write(('remote: %s\n' % line))
832 832
833 833 @parthandler('b2x:replycaps')
834 834 def handlereplycaps(op, inpart):
835 835 """Notify that a reply bundle should be created
836 836
837 837 The payload contains the capabilities information for the reply"""
838 838 caps = decodecaps(inpart.read())
839 839 if op.reply is None:
840 840 op.reply = bundle20(op.ui, caps)
841 841
842 842 @parthandler('b2x:error:abort', ('message', 'hint'))
843 843 def handlereplycaps(op, inpart):
844 844 """Used to transmit abort error over the wire"""
845 845 raise util.Abort(inpart.params['message'], hint=inpart.params.get('hint'))
846 846
847 847 @parthandler('b2x:error:unsupportedcontent', ('parttype', 'params'))
848 848 def handlereplycaps(op, inpart):
849 849 """Used to transmit unknown content error over the wire"""
850 850 kwargs = {}
851 851 parttype = inpart.params.get('parttype')
852 852 if parttype is not None:
853 853 kwargs['parttype'] = parttype
854 854 params = inpart.params.get('params')
855 855 if params is not None:
856 856 kwargs['params'] = params.split('\0')
857 857
858 858 raise error.BundleValueError(**kwargs)
859 859
860 860 @parthandler('b2x:error:pushraced', ('message',))
861 861 def handlereplycaps(op, inpart):
862 862 """Used to transmit push race error over the wire"""
863 863 raise error.ResponseError(_('push failed:'), inpart.params['message'])
864 864
865 865 @parthandler('b2x:listkeys', ('namespace',))
866 866 def handlelistkeys(op, inpart):
867 867 """retrieve pushkey namespace content stored in a bundle2"""
868 868 namespace = inpart.params['namespace']
869 869 r = pushkey.decodekeys(inpart.read())
870 870 op.records.add('listkeys', (namespace, r))
871
872 @parthandler('b2x:pushkey', ('namespace', 'key', 'old', 'new'))
873 def handlepushkey(op, inpart):
874 """process a pushkey request"""
875 dec = pushkey.decode
876 namespace = dec(inpart.params['namespace'])
877 key = dec(inpart.params['key'])
878 old = dec(inpart.params['old'])
879 new = dec(inpart.params['new'])
880 ret = op.repo.pushkey(namespace, key, old, new)
881 record = {'namespace': namespace,
882 'key': key,
883 'old': old,
884 'new': new}
885 op.records.add('pushkey', record)
886 if op.reply is not None:
887 rpart = op.reply.newpart('b2x:reply:pushkey')
888 rpart.addparam('in-reply-to', str(inpart.id), mandatory=False)
889 rpart.addparam('return', '%i' % ret, mandatory=False)
890
891 @parthandler('b2x:reply:pushkey', ('return', 'in-reply-to'))
892 def handlepushkeyreply(op, inpart):
893 """retrieve the result of a pushkey request"""
894 ret = int(inpart.params['return'])
895 partid = int(inpart.params['in-reply-to'])
896 op.records.add('pushkey', {'return': ret}, partid)
@@ -1,1774 +1,1775 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 propertycache = util.propertycache
22 22 filecache = scmutil.filecache
23 23
24 24 class repofilecache(filecache):
25 25 """All filecache usage on repo are done for logic that should be unfiltered
26 26 """
27 27
28 28 def __get__(self, repo, type=None):
29 29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 30 def __set__(self, repo, value):
31 31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 32 def __delete__(self, repo):
33 33 return super(repofilecache, self).__delete__(repo.unfiltered())
34 34
35 35 class storecache(repofilecache):
36 36 """filecache for files in the store"""
37 37 def join(self, obj, fname):
38 38 return obj.sjoin(fname)
39 39
40 40 class unfilteredpropertycache(propertycache):
41 41 """propertycache that apply to unfiltered repo only"""
42 42
43 43 def __get__(self, repo, type=None):
44 44 unfi = repo.unfiltered()
45 45 if unfi is repo:
46 46 return super(unfilteredpropertycache, self).__get__(unfi)
47 47 return getattr(unfi, self.name)
48 48
49 49 class filteredpropertycache(propertycache):
50 50 """propertycache that must take filtering in account"""
51 51
52 52 def cachevalue(self, obj, value):
53 53 object.__setattr__(obj, self.name, value)
54 54
55 55
56 56 def hasunfilteredcache(repo, name):
57 57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 58 return name in vars(repo.unfiltered())
59 59
60 60 def unfilteredmethod(orig):
61 61 """decorate method that always need to be run on unfiltered version"""
62 62 def wrapper(repo, *args, **kwargs):
63 63 return orig(repo.unfiltered(), *args, **kwargs)
64 64 return wrapper
65 65
66 66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 67 'unbundle'))
68 68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 69
70 70 class localpeer(peer.peerrepository):
71 71 '''peer for a local repo; reflects only the most recent API'''
72 72
73 73 def __init__(self, repo, caps=moderncaps):
74 74 peer.peerrepository.__init__(self)
75 75 self._repo = repo.filtered('served')
76 76 self.ui = repo.ui
77 77 self._caps = repo._restrictcapabilities(caps)
78 78 self.requirements = repo.requirements
79 79 self.supportedformats = repo.supportedformats
80 80
81 81 def close(self):
82 82 self._repo.close()
83 83
84 84 def _capabilities(self):
85 85 return self._caps
86 86
87 87 def local(self):
88 88 return self._repo
89 89
90 90 def canpush(self):
91 91 return True
92 92
93 93 def url(self):
94 94 return self._repo.url()
95 95
96 96 def lookup(self, key):
97 97 return self._repo.lookup(key)
98 98
99 99 def branchmap(self):
100 100 return self._repo.branchmap()
101 101
102 102 def heads(self):
103 103 return self._repo.heads()
104 104
105 105 def known(self, nodes):
106 106 return self._repo.known(nodes)
107 107
108 108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 109 format='HG10', **kwargs):
110 110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 111 common=common, bundlecaps=bundlecaps, **kwargs)
112 112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 113 # When requesting a bundle2, getbundle returns a stream to make the
114 114 # wire level function happier. We need to build a proper object
115 115 # from it in local peer.
116 116 cg = bundle2.unbundle20(self.ui, cg)
117 117 return cg
118 118
119 119 # TODO We might want to move the next two calls into legacypeer and add
120 120 # unbundle instead.
121 121
122 122 def unbundle(self, cg, heads, url):
123 123 """apply a bundle on a repo
124 124
125 125 This function handles the repo locking itself."""
126 126 try:
127 127 cg = exchange.readbundle(self.ui, cg, None)
128 128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 129 if util.safehasattr(ret, 'getchunks'):
130 130 # This is a bundle20 object, turn it into an unbundler.
131 131 # This little dance should be dropped eventually when the API
132 132 # is finally improved.
133 133 stream = util.chunkbuffer(ret.getchunks())
134 134 ret = bundle2.unbundle20(self.ui, stream)
135 135 return ret
136 136 except error.PushRaced, exc:
137 137 raise error.ResponseError(_('push failed:'), str(exc))
138 138
139 139 def lock(self):
140 140 return self._repo.lock()
141 141
142 142 def addchangegroup(self, cg, source, url):
143 143 return changegroup.addchangegroup(self._repo, cg, source, url)
144 144
145 145 def pushkey(self, namespace, key, old, new):
146 146 return self._repo.pushkey(namespace, key, old, new)
147 147
148 148 def listkeys(self, namespace):
149 149 return self._repo.listkeys(namespace)
150 150
151 151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 152 '''used to test argument passing over the wire'''
153 153 return "%s %s %s %s %s" % (one, two, three, four, five)
154 154
155 155 class locallegacypeer(localpeer):
156 156 '''peer extension which implements legacy methods too; used for tests with
157 157 restricted capabilities'''
158 158
159 159 def __init__(self, repo):
160 160 localpeer.__init__(self, repo, caps=legacycaps)
161 161
162 162 def branches(self, nodes):
163 163 return self._repo.branches(nodes)
164 164
165 165 def between(self, pairs):
166 166 return self._repo.between(pairs)
167 167
168 168 def changegroup(self, basenodes, source):
169 169 return changegroup.changegroup(self._repo, basenodes, source)
170 170
171 171 def changegroupsubset(self, bases, heads, source):
172 172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 173
174 174 class localrepository(object):
175 175
176 176 supportedformats = set(('revlogv1', 'generaldelta'))
177 177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 178 'dotencode'))
179 179 openerreqs = set(('revlogv1', 'generaldelta'))
180 180 requirements = ['revlogv1']
181 181 filtername = None
182 182
183 183 bundle2caps = {'HG2X': (),
184 'b2x:listkeys': ()}
184 'b2x:listkeys': (),
185 'b2x:pushkey': ()}
185 186
186 187 # a list of (ui, featureset) functions.
187 188 # only functions defined in module of enabled extensions are invoked
188 189 featuresetupfuncs = set()
189 190
190 191 def _baserequirements(self, create):
191 192 return self.requirements[:]
192 193
193 194 def __init__(self, baseui, path=None, create=False):
194 195 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
195 196 self.wopener = self.wvfs
196 197 self.root = self.wvfs.base
197 198 self.path = self.wvfs.join(".hg")
198 199 self.origroot = path
199 200 self.auditor = pathutil.pathauditor(self.root, self._checknested)
200 201 self.vfs = scmutil.vfs(self.path)
201 202 self.opener = self.vfs
202 203 self.baseui = baseui
203 204 self.ui = baseui.copy()
204 205 self.ui.copy = baseui.copy # prevent copying repo configuration
205 206 # A list of callback to shape the phase if no data were found.
206 207 # Callback are in the form: func(repo, roots) --> processed root.
207 208 # This list it to be filled by extension during repo setup
208 209 self._phasedefaults = []
209 210 try:
210 211 self.ui.readconfig(self.join("hgrc"), self.root)
211 212 extensions.loadall(self.ui)
212 213 except IOError:
213 214 pass
214 215
215 216 if self.featuresetupfuncs:
216 217 self.supported = set(self._basesupported) # use private copy
217 218 extmods = set(m.__name__ for n, m
218 219 in extensions.extensions(self.ui))
219 220 for setupfunc in self.featuresetupfuncs:
220 221 if setupfunc.__module__ in extmods:
221 222 setupfunc(self.ui, self.supported)
222 223 else:
223 224 self.supported = self._basesupported
224 225
225 226 if not self.vfs.isdir():
226 227 if create:
227 228 if not self.wvfs.exists():
228 229 self.wvfs.makedirs()
229 230 self.vfs.makedir(notindexed=True)
230 231 requirements = self._baserequirements(create)
231 232 if self.ui.configbool('format', 'usestore', True):
232 233 self.vfs.mkdir("store")
233 234 requirements.append("store")
234 235 if self.ui.configbool('format', 'usefncache', True):
235 236 requirements.append("fncache")
236 237 if self.ui.configbool('format', 'dotencode', True):
237 238 requirements.append('dotencode')
238 239 # create an invalid changelog
239 240 self.vfs.append(
240 241 "00changelog.i",
241 242 '\0\0\0\2' # represents revlogv2
242 243 ' dummy changelog to prevent using the old repo layout'
243 244 )
244 245 if self.ui.configbool('format', 'generaldelta', False):
245 246 requirements.append("generaldelta")
246 247 requirements = set(requirements)
247 248 else:
248 249 raise error.RepoError(_("repository %s not found") % path)
249 250 elif create:
250 251 raise error.RepoError(_("repository %s already exists") % path)
251 252 else:
252 253 try:
253 254 requirements = scmutil.readrequires(self.vfs, self.supported)
254 255 except IOError, inst:
255 256 if inst.errno != errno.ENOENT:
256 257 raise
257 258 requirements = set()
258 259
259 260 self.sharedpath = self.path
260 261 try:
261 262 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
262 263 realpath=True)
263 264 s = vfs.base
264 265 if not vfs.exists():
265 266 raise error.RepoError(
266 267 _('.hg/sharedpath points to nonexistent directory %s') % s)
267 268 self.sharedpath = s
268 269 except IOError, inst:
269 270 if inst.errno != errno.ENOENT:
270 271 raise
271 272
272 273 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
273 274 self.spath = self.store.path
274 275 self.svfs = self.store.vfs
275 276 self.sopener = self.svfs
276 277 self.sjoin = self.store.join
277 278 self.vfs.createmode = self.store.createmode
278 279 self._applyrequirements(requirements)
279 280 if create:
280 281 self._writerequirements()
281 282
282 283
283 284 self._branchcaches = {}
284 285 self.filterpats = {}
285 286 self._datafilters = {}
286 287 self._transref = self._lockref = self._wlockref = None
287 288
288 289 # A cache for various files under .hg/ that tracks file changes,
289 290 # (used by the filecache decorator)
290 291 #
291 292 # Maps a property name to its util.filecacheentry
292 293 self._filecache = {}
293 294
294 295 # hold sets of revision to be filtered
295 296 # should be cleared when something might have changed the filter value:
296 297 # - new changesets,
297 298 # - phase change,
298 299 # - new obsolescence marker,
299 300 # - working directory parent change,
300 301 # - bookmark changes
301 302 self.filteredrevcache = {}
302 303
303 304 def close(self):
304 305 pass
305 306
306 307 def _restrictcapabilities(self, caps):
307 308 # bundle2 is not ready for prime time, drop it unless explicitly
308 309 # required by the tests (or some brave tester)
309 310 if self.ui.configbool('experimental', 'bundle2-exp', False):
310 311 caps = set(caps)
311 312 capsblob = bundle2.encodecaps(self.bundle2caps)
312 313 caps.add('bundle2-exp=' + urllib.quote(capsblob))
313 314 return caps
314 315
315 316 def _applyrequirements(self, requirements):
316 317 self.requirements = requirements
317 318 self.sopener.options = dict((r, 1) for r in requirements
318 319 if r in self.openerreqs)
319 320 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
320 321 if chunkcachesize is not None:
321 322 self.sopener.options['chunkcachesize'] = chunkcachesize
322 323
323 324 def _writerequirements(self):
324 325 reqfile = self.opener("requires", "w")
325 326 for r in sorted(self.requirements):
326 327 reqfile.write("%s\n" % r)
327 328 reqfile.close()
328 329
329 330 def _checknested(self, path):
330 331 """Determine if path is a legal nested repository."""
331 332 if not path.startswith(self.root):
332 333 return False
333 334 subpath = path[len(self.root) + 1:]
334 335 normsubpath = util.pconvert(subpath)
335 336
336 337 # XXX: Checking against the current working copy is wrong in
337 338 # the sense that it can reject things like
338 339 #
339 340 # $ hg cat -r 10 sub/x.txt
340 341 #
341 342 # if sub/ is no longer a subrepository in the working copy
342 343 # parent revision.
343 344 #
344 345 # However, it can of course also allow things that would have
345 346 # been rejected before, such as the above cat command if sub/
346 347 # is a subrepository now, but was a normal directory before.
347 348 # The old path auditor would have rejected by mistake since it
348 349 # panics when it sees sub/.hg/.
349 350 #
350 351 # All in all, checking against the working copy seems sensible
351 352 # since we want to prevent access to nested repositories on
352 353 # the filesystem *now*.
353 354 ctx = self[None]
354 355 parts = util.splitpath(subpath)
355 356 while parts:
356 357 prefix = '/'.join(parts)
357 358 if prefix in ctx.substate:
358 359 if prefix == normsubpath:
359 360 return True
360 361 else:
361 362 sub = ctx.sub(prefix)
362 363 return sub.checknested(subpath[len(prefix) + 1:])
363 364 else:
364 365 parts.pop()
365 366 return False
366 367
367 368 def peer(self):
368 369 return localpeer(self) # not cached to avoid reference cycle
369 370
370 371 def unfiltered(self):
371 372 """Return unfiltered version of the repository
372 373
373 374 Intended to be overwritten by filtered repo."""
374 375 return self
375 376
376 377 def filtered(self, name):
377 378 """Return a filtered version of a repository"""
378 379 # build a new class with the mixin and the current class
379 380 # (possibly subclass of the repo)
380 381 class proxycls(repoview.repoview, self.unfiltered().__class__):
381 382 pass
382 383 return proxycls(self, name)
383 384
384 385 @repofilecache('bookmarks')
385 386 def _bookmarks(self):
386 387 return bookmarks.bmstore(self)
387 388
388 389 @repofilecache('bookmarks.current')
389 390 def _bookmarkcurrent(self):
390 391 return bookmarks.readcurrent(self)
391 392
392 393 def bookmarkheads(self, bookmark):
393 394 name = bookmark.split('@', 1)[0]
394 395 heads = []
395 396 for mark, n in self._bookmarks.iteritems():
396 397 if mark.split('@', 1)[0] == name:
397 398 heads.append(n)
398 399 return heads
399 400
400 401 @storecache('phaseroots')
401 402 def _phasecache(self):
402 403 return phases.phasecache(self, self._phasedefaults)
403 404
404 405 @storecache('obsstore')
405 406 def obsstore(self):
406 407 store = obsolete.obsstore(self.sopener)
407 408 if store and not obsolete._enabled:
408 409 # message is rare enough to not be translated
409 410 msg = 'obsolete feature not enabled but %i markers found!\n'
410 411 self.ui.warn(msg % len(list(store)))
411 412 return store
412 413
413 414 @storecache('00changelog.i')
414 415 def changelog(self):
415 416 c = changelog.changelog(self.sopener)
416 417 if 'HG_PENDING' in os.environ:
417 418 p = os.environ['HG_PENDING']
418 419 if p.startswith(self.root):
419 420 c.readpending('00changelog.i.a')
420 421 return c
421 422
422 423 @storecache('00manifest.i')
423 424 def manifest(self):
424 425 return manifest.manifest(self.sopener)
425 426
426 427 @repofilecache('dirstate')
427 428 def dirstate(self):
428 429 warned = [0]
429 430 def validate(node):
430 431 try:
431 432 self.changelog.rev(node)
432 433 return node
433 434 except error.LookupError:
434 435 if not warned[0]:
435 436 warned[0] = True
436 437 self.ui.warn(_("warning: ignoring unknown"
437 438 " working parent %s!\n") % short(node))
438 439 return nullid
439 440
440 441 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
441 442
442 443 def __getitem__(self, changeid):
443 444 if changeid is None:
444 445 return context.workingctx(self)
445 446 return context.changectx(self, changeid)
446 447
447 448 def __contains__(self, changeid):
448 449 try:
449 450 return bool(self.lookup(changeid))
450 451 except error.RepoLookupError:
451 452 return False
452 453
453 454 def __nonzero__(self):
454 455 return True
455 456
456 457 def __len__(self):
457 458 return len(self.changelog)
458 459
459 460 def __iter__(self):
460 461 return iter(self.changelog)
461 462
462 463 def revs(self, expr, *args):
463 464 '''Return a list of revisions matching the given revset'''
464 465 expr = revset.formatspec(expr, *args)
465 466 m = revset.match(None, expr)
466 467 return m(self, revset.spanset(self))
467 468
468 469 def set(self, expr, *args):
469 470 '''
470 471 Yield a context for each matching revision, after doing arg
471 472 replacement via revset.formatspec
472 473 '''
473 474 for r in self.revs(expr, *args):
474 475 yield self[r]
475 476
476 477 def url(self):
477 478 return 'file:' + self.root
478 479
479 480 def hook(self, name, throw=False, **args):
480 481 return hook.hook(self.ui, self, name, throw, **args)
481 482
482 483 @unfilteredmethod
483 484 def _tag(self, names, node, message, local, user, date, extra={},
484 485 editor=False):
485 486 if isinstance(names, str):
486 487 names = (names,)
487 488
488 489 branches = self.branchmap()
489 490 for name in names:
490 491 self.hook('pretag', throw=True, node=hex(node), tag=name,
491 492 local=local)
492 493 if name in branches:
493 494 self.ui.warn(_("warning: tag %s conflicts with existing"
494 495 " branch name\n") % name)
495 496
496 497 def writetags(fp, names, munge, prevtags):
497 498 fp.seek(0, 2)
498 499 if prevtags and prevtags[-1] != '\n':
499 500 fp.write('\n')
500 501 for name in names:
501 502 m = munge and munge(name) or name
502 503 if (self._tagscache.tagtypes and
503 504 name in self._tagscache.tagtypes):
504 505 old = self.tags().get(name, nullid)
505 506 fp.write('%s %s\n' % (hex(old), m))
506 507 fp.write('%s %s\n' % (hex(node), m))
507 508 fp.close()
508 509
509 510 prevtags = ''
510 511 if local:
511 512 try:
512 513 fp = self.opener('localtags', 'r+')
513 514 except IOError:
514 515 fp = self.opener('localtags', 'a')
515 516 else:
516 517 prevtags = fp.read()
517 518
518 519 # local tags are stored in the current charset
519 520 writetags(fp, names, None, prevtags)
520 521 for name in names:
521 522 self.hook('tag', node=hex(node), tag=name, local=local)
522 523 return
523 524
524 525 try:
525 526 fp = self.wfile('.hgtags', 'rb+')
526 527 except IOError, e:
527 528 if e.errno != errno.ENOENT:
528 529 raise
529 530 fp = self.wfile('.hgtags', 'ab')
530 531 else:
531 532 prevtags = fp.read()
532 533
533 534 # committed tags are stored in UTF-8
534 535 writetags(fp, names, encoding.fromlocal, prevtags)
535 536
536 537 fp.close()
537 538
538 539 self.invalidatecaches()
539 540
540 541 if '.hgtags' not in self.dirstate:
541 542 self[None].add(['.hgtags'])
542 543
543 544 m = matchmod.exact(self.root, '', ['.hgtags'])
544 545 tagnode = self.commit(message, user, date, extra=extra, match=m,
545 546 editor=editor)
546 547
547 548 for name in names:
548 549 self.hook('tag', node=hex(node), tag=name, local=local)
549 550
550 551 return tagnode
551 552
552 553 def tag(self, names, node, message, local, user, date, editor=False):
553 554 '''tag a revision with one or more symbolic names.
554 555
555 556 names is a list of strings or, when adding a single tag, names may be a
556 557 string.
557 558
558 559 if local is True, the tags are stored in a per-repository file.
559 560 otherwise, they are stored in the .hgtags file, and a new
560 561 changeset is committed with the change.
561 562
562 563 keyword arguments:
563 564
564 565 local: whether to store tags in non-version-controlled file
565 566 (default False)
566 567
567 568 message: commit message to use if committing
568 569
569 570 user: name of user to use if committing
570 571
571 572 date: date tuple to use if committing'''
572 573
573 574 if not local:
574 575 for x in self.status()[:5]:
575 576 if '.hgtags' in x:
576 577 raise util.Abort(_('working copy of .hgtags is changed '
577 578 '(please commit .hgtags manually)'))
578 579
579 580 self.tags() # instantiate the cache
580 581 self._tag(names, node, message, local, user, date, editor=editor)
581 582
582 583 @filteredpropertycache
583 584 def _tagscache(self):
584 585 '''Returns a tagscache object that contains various tags related
585 586 caches.'''
586 587
587 588 # This simplifies its cache management by having one decorated
588 589 # function (this one) and the rest simply fetch things from it.
589 590 class tagscache(object):
590 591 def __init__(self):
591 592 # These two define the set of tags for this repository. tags
592 593 # maps tag name to node; tagtypes maps tag name to 'global' or
593 594 # 'local'. (Global tags are defined by .hgtags across all
594 595 # heads, and local tags are defined in .hg/localtags.)
595 596 # They constitute the in-memory cache of tags.
596 597 self.tags = self.tagtypes = None
597 598
598 599 self.nodetagscache = self.tagslist = None
599 600
600 601 cache = tagscache()
601 602 cache.tags, cache.tagtypes = self._findtags()
602 603
603 604 return cache
604 605
605 606 def tags(self):
606 607 '''return a mapping of tag to node'''
607 608 t = {}
608 609 if self.changelog.filteredrevs:
609 610 tags, tt = self._findtags()
610 611 else:
611 612 tags = self._tagscache.tags
612 613 for k, v in tags.iteritems():
613 614 try:
614 615 # ignore tags to unknown nodes
615 616 self.changelog.rev(v)
616 617 t[k] = v
617 618 except (error.LookupError, ValueError):
618 619 pass
619 620 return t
620 621
621 622 def _findtags(self):
622 623 '''Do the hard work of finding tags. Return a pair of dicts
623 624 (tags, tagtypes) where tags maps tag name to node, and tagtypes
624 625 maps tag name to a string like \'global\' or \'local\'.
625 626 Subclasses or extensions are free to add their own tags, but
626 627 should be aware that the returned dicts will be retained for the
627 628 duration of the localrepo object.'''
628 629
629 630 # XXX what tagtype should subclasses/extensions use? Currently
630 631 # mq and bookmarks add tags, but do not set the tagtype at all.
631 632 # Should each extension invent its own tag type? Should there
632 633 # be one tagtype for all such "virtual" tags? Or is the status
633 634 # quo fine?
634 635
635 636 alltags = {} # map tag name to (node, hist)
636 637 tagtypes = {}
637 638
638 639 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
639 640 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
640 641
641 642 # Build the return dicts. Have to re-encode tag names because
642 643 # the tags module always uses UTF-8 (in order not to lose info
643 644 # writing to the cache), but the rest of Mercurial wants them in
644 645 # local encoding.
645 646 tags = {}
646 647 for (name, (node, hist)) in alltags.iteritems():
647 648 if node != nullid:
648 649 tags[encoding.tolocal(name)] = node
649 650 tags['tip'] = self.changelog.tip()
650 651 tagtypes = dict([(encoding.tolocal(name), value)
651 652 for (name, value) in tagtypes.iteritems()])
652 653 return (tags, tagtypes)
653 654
654 655 def tagtype(self, tagname):
655 656 '''
656 657 return the type of the given tag. result can be:
657 658
658 659 'local' : a local tag
659 660 'global' : a global tag
660 661 None : tag does not exist
661 662 '''
662 663
663 664 return self._tagscache.tagtypes.get(tagname)
664 665
665 666 def tagslist(self):
666 667 '''return a list of tags ordered by revision'''
667 668 if not self._tagscache.tagslist:
668 669 l = []
669 670 for t, n in self.tags().iteritems():
670 671 r = self.changelog.rev(n)
671 672 l.append((r, t, n))
672 673 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
673 674
674 675 return self._tagscache.tagslist
675 676
676 677 def nodetags(self, node):
677 678 '''return the tags associated with a node'''
678 679 if not self._tagscache.nodetagscache:
679 680 nodetagscache = {}
680 681 for t, n in self._tagscache.tags.iteritems():
681 682 nodetagscache.setdefault(n, []).append(t)
682 683 for tags in nodetagscache.itervalues():
683 684 tags.sort()
684 685 self._tagscache.nodetagscache = nodetagscache
685 686 return self._tagscache.nodetagscache.get(node, [])
686 687
687 688 def nodebookmarks(self, node):
688 689 marks = []
689 690 for bookmark, n in self._bookmarks.iteritems():
690 691 if n == node:
691 692 marks.append(bookmark)
692 693 return sorted(marks)
693 694
694 695 def branchmap(self):
695 696 '''returns a dictionary {branch: [branchheads]} with branchheads
696 697 ordered by increasing revision number'''
697 698 branchmap.updatecache(self)
698 699 return self._branchcaches[self.filtername]
699 700
700 701 def branchtip(self, branch):
701 702 '''return the tip node for a given branch'''
702 703 try:
703 704 return self.branchmap().branchtip(branch)
704 705 except KeyError:
705 706 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
706 707
707 708 def lookup(self, key):
708 709 return self[key].node()
709 710
710 711 def lookupbranch(self, key, remote=None):
711 712 repo = remote or self
712 713 if key in repo.branchmap():
713 714 return key
714 715
715 716 repo = (remote and remote.local()) and remote or self
716 717 return repo[key].branch()
717 718
718 719 def known(self, nodes):
719 720 nm = self.changelog.nodemap
720 721 pc = self._phasecache
721 722 result = []
722 723 for n in nodes:
723 724 r = nm.get(n)
724 725 resp = not (r is None or pc.phase(self, r) >= phases.secret)
725 726 result.append(resp)
726 727 return result
727 728
728 729 def local(self):
729 730 return self
730 731
731 732 def cancopy(self):
732 733 # so statichttprepo's override of local() works
733 734 if not self.local():
734 735 return False
735 736 if not self.ui.configbool('phases', 'publish', True):
736 737 return True
737 738 # if publishing we can't copy if there is filtered content
738 739 return not self.filtered('visible').changelog.filteredrevs
739 740
740 741 def join(self, f):
741 742 return os.path.join(self.path, f)
742 743
743 744 def wjoin(self, f):
744 745 return os.path.join(self.root, f)
745 746
746 747 def file(self, f):
747 748 if f[0] == '/':
748 749 f = f[1:]
749 750 return filelog.filelog(self.sopener, f)
750 751
751 752 def changectx(self, changeid):
752 753 return self[changeid]
753 754
754 755 def parents(self, changeid=None):
755 756 '''get list of changectxs for parents of changeid'''
756 757 return self[changeid].parents()
757 758
758 759 def setparents(self, p1, p2=nullid):
759 760 copies = self.dirstate.setparents(p1, p2)
760 761 pctx = self[p1]
761 762 if copies:
762 763 # Adjust copy records, the dirstate cannot do it, it
763 764 # requires access to parents manifests. Preserve them
764 765 # only for entries added to first parent.
765 766 for f in copies:
766 767 if f not in pctx and copies[f] in pctx:
767 768 self.dirstate.copy(copies[f], f)
768 769 if p2 == nullid:
769 770 for f, s in sorted(self.dirstate.copies().items()):
770 771 if f not in pctx and s not in pctx:
771 772 self.dirstate.copy(None, f)
772 773
773 774 def filectx(self, path, changeid=None, fileid=None):
774 775 """changeid can be a changeset revision, node, or tag.
775 776 fileid can be a file revision or node."""
776 777 return context.filectx(self, path, changeid, fileid)
777 778
778 779 def getcwd(self):
779 780 return self.dirstate.getcwd()
780 781
781 782 def pathto(self, f, cwd=None):
782 783 return self.dirstate.pathto(f, cwd)
783 784
784 785 def wfile(self, f, mode='r'):
785 786 return self.wopener(f, mode)
786 787
787 788 def _link(self, f):
788 789 return self.wvfs.islink(f)
789 790
790 791 def _loadfilter(self, filter):
791 792 if filter not in self.filterpats:
792 793 l = []
793 794 for pat, cmd in self.ui.configitems(filter):
794 795 if cmd == '!':
795 796 continue
796 797 mf = matchmod.match(self.root, '', [pat])
797 798 fn = None
798 799 params = cmd
799 800 for name, filterfn in self._datafilters.iteritems():
800 801 if cmd.startswith(name):
801 802 fn = filterfn
802 803 params = cmd[len(name):].lstrip()
803 804 break
804 805 if not fn:
805 806 fn = lambda s, c, **kwargs: util.filter(s, c)
806 807 # Wrap old filters not supporting keyword arguments
807 808 if not inspect.getargspec(fn)[2]:
808 809 oldfn = fn
809 810 fn = lambda s, c, **kwargs: oldfn(s, c)
810 811 l.append((mf, fn, params))
811 812 self.filterpats[filter] = l
812 813 return self.filterpats[filter]
813 814
814 815 def _filter(self, filterpats, filename, data):
815 816 for mf, fn, cmd in filterpats:
816 817 if mf(filename):
817 818 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
818 819 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
819 820 break
820 821
821 822 return data
822 823
823 824 @unfilteredpropertycache
824 825 def _encodefilterpats(self):
825 826 return self._loadfilter('encode')
826 827
827 828 @unfilteredpropertycache
828 829 def _decodefilterpats(self):
829 830 return self._loadfilter('decode')
830 831
831 832 def adddatafilter(self, name, filter):
832 833 self._datafilters[name] = filter
833 834
834 835 def wread(self, filename):
835 836 if self._link(filename):
836 837 data = self.wvfs.readlink(filename)
837 838 else:
838 839 data = self.wopener.read(filename)
839 840 return self._filter(self._encodefilterpats, filename, data)
840 841
841 842 def wwrite(self, filename, data, flags):
842 843 data = self._filter(self._decodefilterpats, filename, data)
843 844 if 'l' in flags:
844 845 self.wopener.symlink(data, filename)
845 846 else:
846 847 self.wopener.write(filename, data)
847 848 if 'x' in flags:
848 849 self.wvfs.setflags(filename, False, True)
849 850
850 851 def wwritedata(self, filename, data):
851 852 return self._filter(self._decodefilterpats, filename, data)
852 853
853 854 def transaction(self, desc, report=None):
854 855 tr = self._transref and self._transref() or None
855 856 if tr and tr.running():
856 857 return tr.nest()
857 858
858 859 # abort here if the journal already exists
859 860 if self.svfs.exists("journal"):
860 861 raise error.RepoError(
861 862 _("abandoned transaction found"),
862 863 hint=_("run 'hg recover' to clean up transaction"))
863 864
864 865 def onclose():
865 866 self.store.write(tr)
866 867
867 868 self._writejournal(desc)
868 869 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
869 870 rp = report and report or self.ui.warn
870 871 tr = transaction.transaction(rp, self.sopener,
871 872 "journal",
872 873 aftertrans(renames),
873 874 self.store.createmode,
874 875 onclose)
875 876 self._transref = weakref.ref(tr)
876 877 return tr
877 878
878 879 def _journalfiles(self):
879 880 return ((self.svfs, 'journal'),
880 881 (self.vfs, 'journal.dirstate'),
881 882 (self.vfs, 'journal.branch'),
882 883 (self.vfs, 'journal.desc'),
883 884 (self.vfs, 'journal.bookmarks'),
884 885 (self.svfs, 'journal.phaseroots'))
885 886
886 887 def undofiles(self):
887 888 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
888 889
889 890 def _writejournal(self, desc):
890 891 self.opener.write("journal.dirstate",
891 892 self.opener.tryread("dirstate"))
892 893 self.opener.write("journal.branch",
893 894 encoding.fromlocal(self.dirstate.branch()))
894 895 self.opener.write("journal.desc",
895 896 "%d\n%s\n" % (len(self), desc))
896 897 self.opener.write("journal.bookmarks",
897 898 self.opener.tryread("bookmarks"))
898 899 self.sopener.write("journal.phaseroots",
899 900 self.sopener.tryread("phaseroots"))
900 901
901 902 def recover(self):
902 903 lock = self.lock()
903 904 try:
904 905 if self.svfs.exists("journal"):
905 906 self.ui.status(_("rolling back interrupted transaction\n"))
906 907 transaction.rollback(self.sopener, "journal",
907 908 self.ui.warn)
908 909 self.invalidate()
909 910 return True
910 911 else:
911 912 self.ui.warn(_("no interrupted transaction available\n"))
912 913 return False
913 914 finally:
914 915 lock.release()
915 916
916 917 def rollback(self, dryrun=False, force=False):
917 918 wlock = lock = None
918 919 try:
919 920 wlock = self.wlock()
920 921 lock = self.lock()
921 922 if self.svfs.exists("undo"):
922 923 return self._rollback(dryrun, force)
923 924 else:
924 925 self.ui.warn(_("no rollback information available\n"))
925 926 return 1
926 927 finally:
927 928 release(lock, wlock)
928 929
929 930 @unfilteredmethod # Until we get smarter cache management
930 931 def _rollback(self, dryrun, force):
931 932 ui = self.ui
932 933 try:
933 934 args = self.opener.read('undo.desc').splitlines()
934 935 (oldlen, desc, detail) = (int(args[0]), args[1], None)
935 936 if len(args) >= 3:
936 937 detail = args[2]
937 938 oldtip = oldlen - 1
938 939
939 940 if detail and ui.verbose:
940 941 msg = (_('repository tip rolled back to revision %s'
941 942 ' (undo %s: %s)\n')
942 943 % (oldtip, desc, detail))
943 944 else:
944 945 msg = (_('repository tip rolled back to revision %s'
945 946 ' (undo %s)\n')
946 947 % (oldtip, desc))
947 948 except IOError:
948 949 msg = _('rolling back unknown transaction\n')
949 950 desc = None
950 951
951 952 if not force and self['.'] != self['tip'] and desc == 'commit':
952 953 raise util.Abort(
953 954 _('rollback of last commit while not checked out '
954 955 'may lose data'), hint=_('use -f to force'))
955 956
956 957 ui.status(msg)
957 958 if dryrun:
958 959 return 0
959 960
960 961 parents = self.dirstate.parents()
961 962 self.destroying()
962 963 transaction.rollback(self.sopener, 'undo', ui.warn)
963 964 if self.vfs.exists('undo.bookmarks'):
964 965 self.vfs.rename('undo.bookmarks', 'bookmarks')
965 966 if self.svfs.exists('undo.phaseroots'):
966 967 self.svfs.rename('undo.phaseroots', 'phaseroots')
967 968 self.invalidate()
968 969
969 970 parentgone = (parents[0] not in self.changelog.nodemap or
970 971 parents[1] not in self.changelog.nodemap)
971 972 if parentgone:
972 973 self.vfs.rename('undo.dirstate', 'dirstate')
973 974 try:
974 975 branch = self.opener.read('undo.branch')
975 976 self.dirstate.setbranch(encoding.tolocal(branch))
976 977 except IOError:
977 978 ui.warn(_('named branch could not be reset: '
978 979 'current branch is still \'%s\'\n')
979 980 % self.dirstate.branch())
980 981
981 982 self.dirstate.invalidate()
982 983 parents = tuple([p.rev() for p in self.parents()])
983 984 if len(parents) > 1:
984 985 ui.status(_('working directory now based on '
985 986 'revisions %d and %d\n') % parents)
986 987 else:
987 988 ui.status(_('working directory now based on '
988 989 'revision %d\n') % parents)
989 990 # TODO: if we know which new heads may result from this rollback, pass
990 991 # them to destroy(), which will prevent the branchhead cache from being
991 992 # invalidated.
992 993 self.destroyed()
993 994 return 0
994 995
995 996 def invalidatecaches(self):
996 997
997 998 if '_tagscache' in vars(self):
998 999 # can't use delattr on proxy
999 1000 del self.__dict__['_tagscache']
1000 1001
1001 1002 self.unfiltered()._branchcaches.clear()
1002 1003 self.invalidatevolatilesets()
1003 1004
1004 1005 def invalidatevolatilesets(self):
1005 1006 self.filteredrevcache.clear()
1006 1007 obsolete.clearobscaches(self)
1007 1008
1008 1009 def invalidatedirstate(self):
1009 1010 '''Invalidates the dirstate, causing the next call to dirstate
1010 1011 to check if it was modified since the last time it was read,
1011 1012 rereading it if it has.
1012 1013
1013 1014 This is different to dirstate.invalidate() that it doesn't always
1014 1015 rereads the dirstate. Use dirstate.invalidate() if you want to
1015 1016 explicitly read the dirstate again (i.e. restoring it to a previous
1016 1017 known good state).'''
1017 1018 if hasunfilteredcache(self, 'dirstate'):
1018 1019 for k in self.dirstate._filecache:
1019 1020 try:
1020 1021 delattr(self.dirstate, k)
1021 1022 except AttributeError:
1022 1023 pass
1023 1024 delattr(self.unfiltered(), 'dirstate')
1024 1025
1025 1026 def invalidate(self):
1026 1027 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1027 1028 for k in self._filecache:
1028 1029 # dirstate is invalidated separately in invalidatedirstate()
1029 1030 if k == 'dirstate':
1030 1031 continue
1031 1032
1032 1033 try:
1033 1034 delattr(unfiltered, k)
1034 1035 except AttributeError:
1035 1036 pass
1036 1037 self.invalidatecaches()
1037 1038 self.store.invalidatecaches()
1038 1039
1039 1040 def invalidateall(self):
1040 1041 '''Fully invalidates both store and non-store parts, causing the
1041 1042 subsequent operation to reread any outside changes.'''
1042 1043 # extension should hook this to invalidate its caches
1043 1044 self.invalidate()
1044 1045 self.invalidatedirstate()
1045 1046
1046 1047 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1047 1048 try:
1048 1049 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1049 1050 except error.LockHeld, inst:
1050 1051 if not wait:
1051 1052 raise
1052 1053 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1053 1054 (desc, inst.locker))
1054 1055 # default to 600 seconds timeout
1055 1056 l = lockmod.lock(vfs, lockname,
1056 1057 int(self.ui.config("ui", "timeout", "600")),
1057 1058 releasefn, desc=desc)
1058 1059 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1059 1060 if acquirefn:
1060 1061 acquirefn()
1061 1062 return l
1062 1063
1063 1064 def _afterlock(self, callback):
1064 1065 """add a callback to the current repository lock.
1065 1066
1066 1067 The callback will be executed on lock release."""
1067 1068 l = self._lockref and self._lockref()
1068 1069 if l:
1069 1070 l.postrelease.append(callback)
1070 1071 else:
1071 1072 callback()
1072 1073
1073 1074 def lock(self, wait=True):
1074 1075 '''Lock the repository store (.hg/store) and return a weak reference
1075 1076 to the lock. Use this before modifying the store (e.g. committing or
1076 1077 stripping). If you are opening a transaction, get a lock as well.)'''
1077 1078 l = self._lockref and self._lockref()
1078 1079 if l is not None and l.held:
1079 1080 l.lock()
1080 1081 return l
1081 1082
1082 1083 def unlock():
1083 1084 if hasunfilteredcache(self, '_phasecache'):
1084 1085 self._phasecache.write()
1085 1086 for k, ce in self._filecache.items():
1086 1087 if k == 'dirstate' or k not in self.__dict__:
1087 1088 continue
1088 1089 ce.refresh()
1089 1090
1090 1091 l = self._lock(self.svfs, "lock", wait, unlock,
1091 1092 self.invalidate, _('repository %s') % self.origroot)
1092 1093 self._lockref = weakref.ref(l)
1093 1094 return l
1094 1095
1095 1096 def wlock(self, wait=True):
1096 1097 '''Lock the non-store parts of the repository (everything under
1097 1098 .hg except .hg/store) and return a weak reference to the lock.
1098 1099 Use this before modifying files in .hg.'''
1099 1100 l = self._wlockref and self._wlockref()
1100 1101 if l is not None and l.held:
1101 1102 l.lock()
1102 1103 return l
1103 1104
1104 1105 def unlock():
1105 1106 self.dirstate.write()
1106 1107 self._filecache['dirstate'].refresh()
1107 1108
1108 1109 l = self._lock(self.vfs, "wlock", wait, unlock,
1109 1110 self.invalidatedirstate, _('working directory of %s') %
1110 1111 self.origroot)
1111 1112 self._wlockref = weakref.ref(l)
1112 1113 return l
1113 1114
1114 1115 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1115 1116 """
1116 1117 commit an individual file as part of a larger transaction
1117 1118 """
1118 1119
1119 1120 fname = fctx.path()
1120 1121 text = fctx.data()
1121 1122 flog = self.file(fname)
1122 1123 fparent1 = manifest1.get(fname, nullid)
1123 1124 fparent2 = fparent2o = manifest2.get(fname, nullid)
1124 1125
1125 1126 meta = {}
1126 1127 copy = fctx.renamed()
1127 1128 if copy and copy[0] != fname:
1128 1129 # Mark the new revision of this file as a copy of another
1129 1130 # file. This copy data will effectively act as a parent
1130 1131 # of this new revision. If this is a merge, the first
1131 1132 # parent will be the nullid (meaning "look up the copy data")
1132 1133 # and the second one will be the other parent. For example:
1133 1134 #
1134 1135 # 0 --- 1 --- 3 rev1 changes file foo
1135 1136 # \ / rev2 renames foo to bar and changes it
1136 1137 # \- 2 -/ rev3 should have bar with all changes and
1137 1138 # should record that bar descends from
1138 1139 # bar in rev2 and foo in rev1
1139 1140 #
1140 1141 # this allows this merge to succeed:
1141 1142 #
1142 1143 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1143 1144 # \ / merging rev3 and rev4 should use bar@rev2
1144 1145 # \- 2 --- 4 as the merge base
1145 1146 #
1146 1147
1147 1148 cfname = copy[0]
1148 1149 crev = manifest1.get(cfname)
1149 1150 newfparent = fparent2
1150 1151
1151 1152 if manifest2: # branch merge
1152 1153 if fparent2 == nullid or crev is None: # copied on remote side
1153 1154 if cfname in manifest2:
1154 1155 crev = manifest2[cfname]
1155 1156 newfparent = fparent1
1156 1157
1157 1158 # find source in nearest ancestor if we've lost track
1158 1159 if not crev:
1159 1160 self.ui.debug(" %s: searching for copy revision for %s\n" %
1160 1161 (fname, cfname))
1161 1162 for ancestor in self[None].ancestors():
1162 1163 if cfname in ancestor:
1163 1164 crev = ancestor[cfname].filenode()
1164 1165 break
1165 1166
1166 1167 if crev:
1167 1168 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1168 1169 meta["copy"] = cfname
1169 1170 meta["copyrev"] = hex(crev)
1170 1171 fparent1, fparent2 = nullid, newfparent
1171 1172 else:
1172 1173 self.ui.warn(_("warning: can't find ancestor for '%s' "
1173 1174 "copied from '%s'!\n") % (fname, cfname))
1174 1175
1175 1176 elif fparent1 == nullid:
1176 1177 fparent1, fparent2 = fparent2, nullid
1177 1178 elif fparent2 != nullid:
1178 1179 # is one parent an ancestor of the other?
1179 1180 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1180 1181 if fparent1 in fparentancestors:
1181 1182 fparent1, fparent2 = fparent2, nullid
1182 1183 elif fparent2 in fparentancestors:
1183 1184 fparent2 = nullid
1184 1185
1185 1186 # is the file changed?
1186 1187 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1187 1188 changelist.append(fname)
1188 1189 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1189 1190
1190 1191 # are just the flags changed during merge?
1191 1192 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1192 1193 changelist.append(fname)
1193 1194
1194 1195 return fparent1
1195 1196
1196 1197 @unfilteredmethod
1197 1198 def commit(self, text="", user=None, date=None, match=None, force=False,
1198 1199 editor=False, extra={}):
1199 1200 """Add a new revision to current repository.
1200 1201
1201 1202 Revision information is gathered from the working directory,
1202 1203 match can be used to filter the committed files. If editor is
1203 1204 supplied, it is called to get a commit message.
1204 1205 """
1205 1206
1206 1207 def fail(f, msg):
1207 1208 raise util.Abort('%s: %s' % (f, msg))
1208 1209
1209 1210 if not match:
1210 1211 match = matchmod.always(self.root, '')
1211 1212
1212 1213 if not force:
1213 1214 vdirs = []
1214 1215 match.explicitdir = vdirs.append
1215 1216 match.bad = fail
1216 1217
1217 1218 wlock = self.wlock()
1218 1219 try:
1219 1220 wctx = self[None]
1220 1221 merge = len(wctx.parents()) > 1
1221 1222
1222 1223 if (not force and merge and match and
1223 1224 (match.files() or match.anypats())):
1224 1225 raise util.Abort(_('cannot partially commit a merge '
1225 1226 '(do not specify files or patterns)'))
1226 1227
1227 1228 changes = self.status(match=match, clean=force)
1228 1229 if force:
1229 1230 changes[0].extend(changes[6]) # mq may commit unchanged files
1230 1231
1231 1232 # check subrepos
1232 1233 subs = []
1233 1234 commitsubs = set()
1234 1235 newstate = wctx.substate.copy()
1235 1236 # only manage subrepos and .hgsubstate if .hgsub is present
1236 1237 if '.hgsub' in wctx:
1237 1238 # we'll decide whether to track this ourselves, thanks
1238 1239 for c in changes[:3]:
1239 1240 if '.hgsubstate' in c:
1240 1241 c.remove('.hgsubstate')
1241 1242
1242 1243 # compare current state to last committed state
1243 1244 # build new substate based on last committed state
1244 1245 oldstate = wctx.p1().substate
1245 1246 for s in sorted(newstate.keys()):
1246 1247 if not match(s):
1247 1248 # ignore working copy, use old state if present
1248 1249 if s in oldstate:
1249 1250 newstate[s] = oldstate[s]
1250 1251 continue
1251 1252 if not force:
1252 1253 raise util.Abort(
1253 1254 _("commit with new subrepo %s excluded") % s)
1254 1255 if wctx.sub(s).dirty(True):
1255 1256 if not self.ui.configbool('ui', 'commitsubrepos'):
1256 1257 raise util.Abort(
1257 1258 _("uncommitted changes in subrepo %s") % s,
1258 1259 hint=_("use --subrepos for recursive commit"))
1259 1260 subs.append(s)
1260 1261 commitsubs.add(s)
1261 1262 else:
1262 1263 bs = wctx.sub(s).basestate()
1263 1264 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1264 1265 if oldstate.get(s, (None, None, None))[1] != bs:
1265 1266 subs.append(s)
1266 1267
1267 1268 # check for removed subrepos
1268 1269 for p in wctx.parents():
1269 1270 r = [s for s in p.substate if s not in newstate]
1270 1271 subs += [s for s in r if match(s)]
1271 1272 if subs:
1272 1273 if (not match('.hgsub') and
1273 1274 '.hgsub' in (wctx.modified() + wctx.added())):
1274 1275 raise util.Abort(
1275 1276 _("can't commit subrepos without .hgsub"))
1276 1277 changes[0].insert(0, '.hgsubstate')
1277 1278
1278 1279 elif '.hgsub' in changes[2]:
1279 1280 # clean up .hgsubstate when .hgsub is removed
1280 1281 if ('.hgsubstate' in wctx and
1281 1282 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1282 1283 changes[2].insert(0, '.hgsubstate')
1283 1284
1284 1285 # make sure all explicit patterns are matched
1285 1286 if not force and match.files():
1286 1287 matched = set(changes[0] + changes[1] + changes[2])
1287 1288
1288 1289 for f in match.files():
1289 1290 f = self.dirstate.normalize(f)
1290 1291 if f == '.' or f in matched or f in wctx.substate:
1291 1292 continue
1292 1293 if f in changes[3]: # missing
1293 1294 fail(f, _('file not found!'))
1294 1295 if f in vdirs: # visited directory
1295 1296 d = f + '/'
1296 1297 for mf in matched:
1297 1298 if mf.startswith(d):
1298 1299 break
1299 1300 else:
1300 1301 fail(f, _("no match under directory!"))
1301 1302 elif f not in self.dirstate:
1302 1303 fail(f, _("file not tracked!"))
1303 1304
1304 1305 cctx = context.workingctx(self, text, user, date, extra, changes)
1305 1306
1306 1307 if (not force and not extra.get("close") and not merge
1307 1308 and not cctx.files()
1308 1309 and wctx.branch() == wctx.p1().branch()):
1309 1310 return None
1310 1311
1311 1312 if merge and cctx.deleted():
1312 1313 raise util.Abort(_("cannot commit merge with missing files"))
1313 1314
1314 1315 ms = mergemod.mergestate(self)
1315 1316 for f in changes[0]:
1316 1317 if f in ms and ms[f] == 'u':
1317 1318 raise util.Abort(_("unresolved merge conflicts "
1318 1319 "(see hg help resolve)"))
1319 1320
1320 1321 if editor:
1321 1322 cctx._text = editor(self, cctx, subs)
1322 1323 edited = (text != cctx._text)
1323 1324
1324 1325 # Save commit message in case this transaction gets rolled back
1325 1326 # (e.g. by a pretxncommit hook). Leave the content alone on
1326 1327 # the assumption that the user will use the same editor again.
1327 1328 msgfn = self.savecommitmessage(cctx._text)
1328 1329
1329 1330 # commit subs and write new state
1330 1331 if subs:
1331 1332 for s in sorted(commitsubs):
1332 1333 sub = wctx.sub(s)
1333 1334 self.ui.status(_('committing subrepository %s\n') %
1334 1335 subrepo.subrelpath(sub))
1335 1336 sr = sub.commit(cctx._text, user, date)
1336 1337 newstate[s] = (newstate[s][0], sr)
1337 1338 subrepo.writestate(self, newstate)
1338 1339
1339 1340 p1, p2 = self.dirstate.parents()
1340 1341 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1341 1342 try:
1342 1343 self.hook("precommit", throw=True, parent1=hookp1,
1343 1344 parent2=hookp2)
1344 1345 ret = self.commitctx(cctx, True)
1345 1346 except: # re-raises
1346 1347 if edited:
1347 1348 self.ui.write(
1348 1349 _('note: commit message saved in %s\n') % msgfn)
1349 1350 raise
1350 1351
1351 1352 # update bookmarks, dirstate and mergestate
1352 1353 bookmarks.update(self, [p1, p2], ret)
1353 1354 cctx.markcommitted(ret)
1354 1355 ms.reset()
1355 1356 finally:
1356 1357 wlock.release()
1357 1358
1358 1359 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1359 1360 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1360 1361 self._afterlock(commithook)
1361 1362 return ret
1362 1363
1363 1364 @unfilteredmethod
1364 1365 def commitctx(self, ctx, error=False):
1365 1366 """Add a new revision to current repository.
1366 1367 Revision information is passed via the context argument.
1367 1368 """
1368 1369
1369 1370 tr = lock = None
1370 1371 removed = list(ctx.removed())
1371 1372 p1, p2 = ctx.p1(), ctx.p2()
1372 1373 user = ctx.user()
1373 1374
1374 1375 lock = self.lock()
1375 1376 try:
1376 1377 tr = self.transaction("commit")
1377 1378 trp = weakref.proxy(tr)
1378 1379
1379 1380 if ctx.files():
1380 1381 m1 = p1.manifest().copy()
1381 1382 m2 = p2.manifest()
1382 1383
1383 1384 # check in files
1384 1385 new = {}
1385 1386 changed = []
1386 1387 linkrev = len(self)
1387 1388 for f in sorted(ctx.modified() + ctx.added()):
1388 1389 self.ui.note(f + "\n")
1389 1390 try:
1390 1391 fctx = ctx[f]
1391 1392 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1392 1393 changed)
1393 1394 m1.set(f, fctx.flags())
1394 1395 except OSError, inst:
1395 1396 self.ui.warn(_("trouble committing %s!\n") % f)
1396 1397 raise
1397 1398 except IOError, inst:
1398 1399 errcode = getattr(inst, 'errno', errno.ENOENT)
1399 1400 if error or errcode and errcode != errno.ENOENT:
1400 1401 self.ui.warn(_("trouble committing %s!\n") % f)
1401 1402 raise
1402 1403 else:
1403 1404 removed.append(f)
1404 1405
1405 1406 # update manifest
1406 1407 m1.update(new)
1407 1408 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1408 1409 drop = [f for f in removed if f in m1]
1409 1410 for f in drop:
1410 1411 del m1[f]
1411 1412 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1412 1413 p2.manifestnode(), (new, drop))
1413 1414 files = changed + removed
1414 1415 else:
1415 1416 mn = p1.manifestnode()
1416 1417 files = []
1417 1418
1418 1419 # update changelog
1419 1420 self.changelog.delayupdate()
1420 1421 n = self.changelog.add(mn, files, ctx.description(),
1421 1422 trp, p1.node(), p2.node(),
1422 1423 user, ctx.date(), ctx.extra().copy())
1423 1424 p = lambda: self.changelog.writepending() and self.root or ""
1424 1425 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1425 1426 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1426 1427 parent2=xp2, pending=p)
1427 1428 self.changelog.finalize(trp)
1428 1429 # set the new commit is proper phase
1429 1430 targetphase = subrepo.newcommitphase(self.ui, ctx)
1430 1431 if targetphase:
1431 1432 # retract boundary do not alter parent changeset.
1432 1433 # if a parent have higher the resulting phase will
1433 1434 # be compliant anyway
1434 1435 #
1435 1436 # if minimal phase was 0 we don't need to retract anything
1436 1437 phases.retractboundary(self, targetphase, [n])
1437 1438 tr.close()
1438 1439 branchmap.updatecache(self.filtered('served'))
1439 1440 return n
1440 1441 finally:
1441 1442 if tr:
1442 1443 tr.release()
1443 1444 lock.release()
1444 1445
1445 1446 @unfilteredmethod
1446 1447 def destroying(self):
1447 1448 '''Inform the repository that nodes are about to be destroyed.
1448 1449 Intended for use by strip and rollback, so there's a common
1449 1450 place for anything that has to be done before destroying history.
1450 1451
1451 1452 This is mostly useful for saving state that is in memory and waiting
1452 1453 to be flushed when the current lock is released. Because a call to
1453 1454 destroyed is imminent, the repo will be invalidated causing those
1454 1455 changes to stay in memory (waiting for the next unlock), or vanish
1455 1456 completely.
1456 1457 '''
1457 1458 # When using the same lock to commit and strip, the phasecache is left
1458 1459 # dirty after committing. Then when we strip, the repo is invalidated,
1459 1460 # causing those changes to disappear.
1460 1461 if '_phasecache' in vars(self):
1461 1462 self._phasecache.write()
1462 1463
1463 1464 @unfilteredmethod
1464 1465 def destroyed(self):
1465 1466 '''Inform the repository that nodes have been destroyed.
1466 1467 Intended for use by strip and rollback, so there's a common
1467 1468 place for anything that has to be done after destroying history.
1468 1469 '''
1469 1470 # When one tries to:
1470 1471 # 1) destroy nodes thus calling this method (e.g. strip)
1471 1472 # 2) use phasecache somewhere (e.g. commit)
1472 1473 #
1473 1474 # then 2) will fail because the phasecache contains nodes that were
1474 1475 # removed. We can either remove phasecache from the filecache,
1475 1476 # causing it to reload next time it is accessed, or simply filter
1476 1477 # the removed nodes now and write the updated cache.
1477 1478 self._phasecache.filterunknown(self)
1478 1479 self._phasecache.write()
1479 1480
1480 1481 # update the 'served' branch cache to help read only server process
1481 1482 # Thanks to branchcache collaboration this is done from the nearest
1482 1483 # filtered subset and it is expected to be fast.
1483 1484 branchmap.updatecache(self.filtered('served'))
1484 1485
1485 1486 # Ensure the persistent tag cache is updated. Doing it now
1486 1487 # means that the tag cache only has to worry about destroyed
1487 1488 # heads immediately after a strip/rollback. That in turn
1488 1489 # guarantees that "cachetip == currenttip" (comparing both rev
1489 1490 # and node) always means no nodes have been added or destroyed.
1490 1491
1491 1492 # XXX this is suboptimal when qrefresh'ing: we strip the current
1492 1493 # head, refresh the tag cache, then immediately add a new head.
1493 1494 # But I think doing it this way is necessary for the "instant
1494 1495 # tag cache retrieval" case to work.
1495 1496 self.invalidate()
1496 1497
1497 1498 def walk(self, match, node=None):
1498 1499 '''
1499 1500 walk recursively through the directory tree or a given
1500 1501 changeset, finding all files matched by the match
1501 1502 function
1502 1503 '''
1503 1504 return self[node].walk(match)
1504 1505
1505 1506 def status(self, node1='.', node2=None, match=None,
1506 1507 ignored=False, clean=False, unknown=False,
1507 1508 listsubrepos=False):
1508 1509 '''a convenience method that calls node1.status(node2)'''
1509 1510 return self[node1].status(node2, match, ignored, clean, unknown,
1510 1511 listsubrepos)
1511 1512
1512 1513 def heads(self, start=None):
1513 1514 heads = self.changelog.heads(start)
1514 1515 # sort the output in rev descending order
1515 1516 return sorted(heads, key=self.changelog.rev, reverse=True)
1516 1517
1517 1518 def branchheads(self, branch=None, start=None, closed=False):
1518 1519 '''return a (possibly filtered) list of heads for the given branch
1519 1520
1520 1521 Heads are returned in topological order, from newest to oldest.
1521 1522 If branch is None, use the dirstate branch.
1522 1523 If start is not None, return only heads reachable from start.
1523 1524 If closed is True, return heads that are marked as closed as well.
1524 1525 '''
1525 1526 if branch is None:
1526 1527 branch = self[None].branch()
1527 1528 branches = self.branchmap()
1528 1529 if branch not in branches:
1529 1530 return []
1530 1531 # the cache returns heads ordered lowest to highest
1531 1532 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1532 1533 if start is not None:
1533 1534 # filter out the heads that cannot be reached from startrev
1534 1535 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1535 1536 bheads = [h for h in bheads if h in fbheads]
1536 1537 return bheads
1537 1538
1538 1539 def branches(self, nodes):
1539 1540 if not nodes:
1540 1541 nodes = [self.changelog.tip()]
1541 1542 b = []
1542 1543 for n in nodes:
1543 1544 t = n
1544 1545 while True:
1545 1546 p = self.changelog.parents(n)
1546 1547 if p[1] != nullid or p[0] == nullid:
1547 1548 b.append((t, n, p[0], p[1]))
1548 1549 break
1549 1550 n = p[0]
1550 1551 return b
1551 1552
1552 1553 def between(self, pairs):
1553 1554 r = []
1554 1555
1555 1556 for top, bottom in pairs:
1556 1557 n, l, i = top, [], 0
1557 1558 f = 1
1558 1559
1559 1560 while n != bottom and n != nullid:
1560 1561 p = self.changelog.parents(n)[0]
1561 1562 if i == f:
1562 1563 l.append(n)
1563 1564 f = f * 2
1564 1565 n = p
1565 1566 i += 1
1566 1567
1567 1568 r.append(l)
1568 1569
1569 1570 return r
1570 1571
1571 1572 def pull(self, remote, heads=None, force=False):
1572 1573 return exchange.pull (self, remote, heads, force)
1573 1574
1574 1575 def checkpush(self, pushop):
1575 1576 """Extensions can override this function if additional checks have
1576 1577 to be performed before pushing, or call it if they override push
1577 1578 command.
1578 1579 """
1579 1580 pass
1580 1581
1581 1582 @unfilteredpropertycache
1582 1583 def prepushoutgoinghooks(self):
1583 1584 """Return util.hooks consists of "(repo, remote, outgoing)"
1584 1585 functions, which are called before pushing changesets.
1585 1586 """
1586 1587 return util.hooks()
1587 1588
1588 1589 def push(self, remote, force=False, revs=None, newbranch=False):
1589 1590 return exchange.push(self, remote, force, revs, newbranch)
1590 1591
1591 1592 def stream_in(self, remote, requirements):
1592 1593 lock = self.lock()
1593 1594 try:
1594 1595 # Save remote branchmap. We will use it later
1595 1596 # to speed up branchcache creation
1596 1597 rbranchmap = None
1597 1598 if remote.capable("branchmap"):
1598 1599 rbranchmap = remote.branchmap()
1599 1600
1600 1601 fp = remote.stream_out()
1601 1602 l = fp.readline()
1602 1603 try:
1603 1604 resp = int(l)
1604 1605 except ValueError:
1605 1606 raise error.ResponseError(
1606 1607 _('unexpected response from remote server:'), l)
1607 1608 if resp == 1:
1608 1609 raise util.Abort(_('operation forbidden by server'))
1609 1610 elif resp == 2:
1610 1611 raise util.Abort(_('locking the remote repository failed'))
1611 1612 elif resp != 0:
1612 1613 raise util.Abort(_('the server sent an unknown error code'))
1613 1614 self.ui.status(_('streaming all changes\n'))
1614 1615 l = fp.readline()
1615 1616 try:
1616 1617 total_files, total_bytes = map(int, l.split(' ', 1))
1617 1618 except (ValueError, TypeError):
1618 1619 raise error.ResponseError(
1619 1620 _('unexpected response from remote server:'), l)
1620 1621 self.ui.status(_('%d files to transfer, %s of data\n') %
1621 1622 (total_files, util.bytecount(total_bytes)))
1622 1623 handled_bytes = 0
1623 1624 self.ui.progress(_('clone'), 0, total=total_bytes)
1624 1625 start = time.time()
1625 1626
1626 1627 tr = self.transaction(_('clone'))
1627 1628 try:
1628 1629 for i in xrange(total_files):
1629 1630 # XXX doesn't support '\n' or '\r' in filenames
1630 1631 l = fp.readline()
1631 1632 try:
1632 1633 name, size = l.split('\0', 1)
1633 1634 size = int(size)
1634 1635 except (ValueError, TypeError):
1635 1636 raise error.ResponseError(
1636 1637 _('unexpected response from remote server:'), l)
1637 1638 if self.ui.debugflag:
1638 1639 self.ui.debug('adding %s (%s)\n' %
1639 1640 (name, util.bytecount(size)))
1640 1641 # for backwards compat, name was partially encoded
1641 1642 ofp = self.sopener(store.decodedir(name), 'w')
1642 1643 for chunk in util.filechunkiter(fp, limit=size):
1643 1644 handled_bytes += len(chunk)
1644 1645 self.ui.progress(_('clone'), handled_bytes,
1645 1646 total=total_bytes)
1646 1647 ofp.write(chunk)
1647 1648 ofp.close()
1648 1649 tr.close()
1649 1650 finally:
1650 1651 tr.release()
1651 1652
1652 1653 # Writing straight to files circumvented the inmemory caches
1653 1654 self.invalidate()
1654 1655
1655 1656 elapsed = time.time() - start
1656 1657 if elapsed <= 0:
1657 1658 elapsed = 0.001
1658 1659 self.ui.progress(_('clone'), None)
1659 1660 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1660 1661 (util.bytecount(total_bytes), elapsed,
1661 1662 util.bytecount(total_bytes / elapsed)))
1662 1663
1663 1664 # new requirements = old non-format requirements +
1664 1665 # new format-related
1665 1666 # requirements from the streamed-in repository
1666 1667 requirements.update(set(self.requirements) - self.supportedformats)
1667 1668 self._applyrequirements(requirements)
1668 1669 self._writerequirements()
1669 1670
1670 1671 if rbranchmap:
1671 1672 rbheads = []
1672 1673 for bheads in rbranchmap.itervalues():
1673 1674 rbheads.extend(bheads)
1674 1675
1675 1676 if rbheads:
1676 1677 rtiprev = max((int(self.changelog.rev(node))
1677 1678 for node in rbheads))
1678 1679 cache = branchmap.branchcache(rbranchmap,
1679 1680 self[rtiprev].node(),
1680 1681 rtiprev)
1681 1682 # Try to stick it as low as possible
1682 1683 # filter above served are unlikely to be fetch from a clone
1683 1684 for candidate in ('base', 'immutable', 'served'):
1684 1685 rview = self.filtered(candidate)
1685 1686 if cache.validfor(rview):
1686 1687 self._branchcaches[candidate] = cache
1687 1688 cache.write(rview)
1688 1689 break
1689 1690 self.invalidate()
1690 1691 return len(self.heads()) + 1
1691 1692 finally:
1692 1693 lock.release()
1693 1694
1694 1695 def clone(self, remote, heads=[], stream=False):
1695 1696 '''clone remote repository.
1696 1697
1697 1698 keyword arguments:
1698 1699 heads: list of revs to clone (forces use of pull)
1699 1700 stream: use streaming clone if possible'''
1700 1701
1701 1702 # now, all clients that can request uncompressed clones can
1702 1703 # read repo formats supported by all servers that can serve
1703 1704 # them.
1704 1705
1705 1706 # if revlog format changes, client will have to check version
1706 1707 # and format flags on "stream" capability, and use
1707 1708 # uncompressed only if compatible.
1708 1709
1709 1710 if not stream:
1710 1711 # if the server explicitly prefers to stream (for fast LANs)
1711 1712 stream = remote.capable('stream-preferred')
1712 1713
1713 1714 if stream and not heads:
1714 1715 # 'stream' means remote revlog format is revlogv1 only
1715 1716 if remote.capable('stream'):
1716 1717 return self.stream_in(remote, set(('revlogv1',)))
1717 1718 # otherwise, 'streamreqs' contains the remote revlog format
1718 1719 streamreqs = remote.capable('streamreqs')
1719 1720 if streamreqs:
1720 1721 streamreqs = set(streamreqs.split(','))
1721 1722 # if we support it, stream in and adjust our requirements
1722 1723 if not streamreqs - self.supportedformats:
1723 1724 return self.stream_in(remote, streamreqs)
1724 1725 return self.pull(remote, heads)
1725 1726
1726 1727 def pushkey(self, namespace, key, old, new):
1727 1728 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1728 1729 old=old, new=new)
1729 1730 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1730 1731 ret = pushkey.push(self, namespace, key, old, new)
1731 1732 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1732 1733 ret=ret)
1733 1734 return ret
1734 1735
1735 1736 def listkeys(self, namespace):
1736 1737 self.hook('prelistkeys', throw=True, namespace=namespace)
1737 1738 self.ui.debug('listing keys for "%s"\n' % namespace)
1738 1739 values = pushkey.list(self, namespace)
1739 1740 self.hook('listkeys', namespace=namespace, values=values)
1740 1741 return values
1741 1742
1742 1743 def debugwireargs(self, one, two, three=None, four=None, five=None):
1743 1744 '''used to test argument passing over the wire'''
1744 1745 return "%s %s %s %s %s" % (one, two, three, four, five)
1745 1746
1746 1747 def savecommitmessage(self, text):
1747 1748 fp = self.opener('last-message.txt', 'wb')
1748 1749 try:
1749 1750 fp.write(text)
1750 1751 finally:
1751 1752 fp.close()
1752 1753 return self.pathto(fp.name[len(self.root) + 1:])
1753 1754
1754 1755 # used to avoid circular references so destructors work
1755 1756 def aftertrans(files):
1756 1757 renamefiles = [tuple(t) for t in files]
1757 1758 def a():
1758 1759 for vfs, src, dest in renamefiles:
1759 1760 try:
1760 1761 vfs.rename(src, dest)
1761 1762 except OSError: # journal file does not yet exist
1762 1763 pass
1763 1764 return a
1764 1765
1765 1766 def undoname(fn):
1766 1767 base, name = os.path.split(fn)
1767 1768 assert name.startswith('journal')
1768 1769 return os.path.join(base, name.replace('journal', 'undo', 1))
1769 1770
1770 1771 def instance(ui, path, create):
1771 1772 return localrepository(ui, util.urllocalpath(path), create)
1772 1773
1773 1774 def islocal(path):
1774 1775 return True
General Comments 0
You need to be logged in to leave comments. Login now