##// END OF EJS Templates
copies: move file input processsing early...
marmoute -
r43297:041f042a default
parent child Browse files
Show More
@@ -1,674 +1,682 b''
1 1 # changelog.py - changelog class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from .node import (
12 12 bin,
13 13 hex,
14 14 nullid,
15 15 )
16 16 from .thirdparty import (
17 17 attr,
18 18 )
19 19
20 20 from . import (
21 21 encoding,
22 22 error,
23 23 pycompat,
24 24 revlog,
25 25 util,
26 26 )
27 27 from .utils import (
28 28 dateutil,
29 29 stringutil,
30 30 )
31 31
32 32 _defaultextra = {'branch': 'default'}
33 33
34 34 def _string_escape(text):
35 35 """
36 36 >>> from .pycompat import bytechr as chr
37 37 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
38 38 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
39 39 >>> s
40 40 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
41 41 >>> res = _string_escape(s)
42 42 >>> s == _string_unescape(res)
43 43 True
44 44 """
45 45 # subset of the string_escape codec
46 46 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
47 47 return text.replace('\0', '\\0')
48 48
49 49 def _string_unescape(text):
50 50 if '\\0' in text:
51 51 # fix up \0 without getting into trouble with \\0
52 52 text = text.replace('\\\\', '\\\\\n')
53 53 text = text.replace('\\0', '\0')
54 54 text = text.replace('\n', '')
55 55 return stringutil.unescapestr(text)
56 56
57 57 def decodeextra(text):
58 58 """
59 59 >>> from .pycompat import bytechr as chr
60 60 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
61 61 ... ).items())
62 62 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
63 63 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
64 64 ... b'baz': chr(92) + chr(0) + b'2'})
65 65 ... ).items())
66 66 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
67 67 """
68 68 extra = _defaultextra.copy()
69 69 for l in text.split('\0'):
70 70 if l:
71 71 k, v = _string_unescape(l).split(':', 1)
72 72 extra[k] = v
73 73 return extra
74 74
75 75 def encodeextra(d):
76 76 # keys must be sorted to produce a deterministic changelog entry
77 77 items = [
78 78 _string_escape('%s:%s' % (k, pycompat.bytestr(d[k])))
79 79 for k in sorted(d)
80 80 ]
81 81 return "\0".join(items)
82 82
83 83 def encodecopies(files, copies):
84 84 items = []
85 85 for i, dst in enumerate(files):
86 86 if dst in copies:
87 87 items.append('%d\0%s' % (i, copies[dst]))
88 88 if len(items) != len(copies):
89 89 raise error.ProgrammingError('some copy targets missing from file list')
90 90 return "\n".join(items)
91 91
92 92 def decodecopies(files, data):
93 93 try:
94 94 copies = {}
95 95 if not data:
96 96 return copies
97 97 for l in data.split('\n'):
98 98 strindex, src = l.split('\0')
99 99 i = int(strindex)
100 100 dst = files[i]
101 101 copies[dst] = src
102 102 return copies
103 103 except (ValueError, IndexError):
104 104 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
105 105 # used different syntax for the value.
106 106 return None
107 107
108 108 def encodefileindices(files, subset):
109 109 subset = set(subset)
110 110 indices = []
111 111 for i, f in enumerate(files):
112 112 if f in subset:
113 113 indices.append('%d' % i)
114 114 return '\n'.join(indices)
115 115
116 116 def decodefileindices(files, data):
117 117 try:
118 118 subset = []
119 119 if not data:
120 120 return subset
121 121 for strindex in data.split('\n'):
122 122 i = int(strindex)
123 123 if i < 0 or i >= len(files):
124 124 return None
125 125 subset.append(files[i])
126 126 return subset
127 127 except (ValueError, IndexError):
128 128 # Perhaps someone had chosen the same key name (e.g. "added") and
129 129 # used different syntax for the value.
130 130 return None
131 131
132 132 def stripdesc(desc):
133 133 """strip trailing whitespace and leading and trailing empty lines"""
134 134 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
135 135
136 136 class appender(object):
137 137 '''the changelog index must be updated last on disk, so we use this class
138 138 to delay writes to it'''
139 139 def __init__(self, vfs, name, mode, buf):
140 140 self.data = buf
141 141 fp = vfs(name, mode)
142 142 self.fp = fp
143 143 self.offset = fp.tell()
144 144 self.size = vfs.fstat(fp).st_size
145 145 self._end = self.size
146 146
147 147 def end(self):
148 148 return self._end
149 149 def tell(self):
150 150 return self.offset
151 151 def flush(self):
152 152 pass
153 153
154 154 @property
155 155 def closed(self):
156 156 return self.fp.closed
157 157
158 158 def close(self):
159 159 self.fp.close()
160 160
161 161 def seek(self, offset, whence=0):
162 162 '''virtual file offset spans real file and data'''
163 163 if whence == 0:
164 164 self.offset = offset
165 165 elif whence == 1:
166 166 self.offset += offset
167 167 elif whence == 2:
168 168 self.offset = self.end() + offset
169 169 if self.offset < self.size:
170 170 self.fp.seek(self.offset)
171 171
172 172 def read(self, count=-1):
173 173 '''only trick here is reads that span real file and data'''
174 174 ret = ""
175 175 if self.offset < self.size:
176 176 s = self.fp.read(count)
177 177 ret = s
178 178 self.offset += len(s)
179 179 if count > 0:
180 180 count -= len(s)
181 181 if count != 0:
182 182 doff = self.offset - self.size
183 183 self.data.insert(0, "".join(self.data))
184 184 del self.data[1:]
185 185 s = self.data[0][doff:doff + count]
186 186 self.offset += len(s)
187 187 ret += s
188 188 return ret
189 189
190 190 def write(self, s):
191 191 self.data.append(bytes(s))
192 192 self.offset += len(s)
193 193 self._end += len(s)
194 194
195 195 def __enter__(self):
196 196 self.fp.__enter__()
197 197 return self
198 198
199 199 def __exit__(self, *args):
200 200 return self.fp.__exit__(*args)
201 201
202 202 def _divertopener(opener, target):
203 203 """build an opener that writes in 'target.a' instead of 'target'"""
204 204 def _divert(name, mode='r', checkambig=False):
205 205 if name != target:
206 206 return opener(name, mode)
207 207 return opener(name + ".a", mode)
208 208 return _divert
209 209
210 210 def _delayopener(opener, target, buf):
211 211 """build an opener that stores chunks in 'buf' instead of 'target'"""
212 212 def _delay(name, mode='r', checkambig=False):
213 213 if name != target:
214 214 return opener(name, mode)
215 215 return appender(opener, name, mode, buf)
216 216 return _delay
217 217
218 218 @attr.s
219 219 class _changelogrevision(object):
220 220 # Extensions might modify _defaultextra, so let the constructor below pass
221 221 # it in
222 222 extra = attr.ib()
223 223 manifest = attr.ib(default=nullid)
224 224 user = attr.ib(default='')
225 225 date = attr.ib(default=(0, 0))
226 226 files = attr.ib(default=attr.Factory(list))
227 227 filesadded = attr.ib(default=None)
228 228 filesremoved = attr.ib(default=None)
229 229 p1copies = attr.ib(default=None)
230 230 p2copies = attr.ib(default=None)
231 231 description = attr.ib(default='')
232 232
233 233 class changelogrevision(object):
234 234 """Holds results of a parsed changelog revision.
235 235
236 236 Changelog revisions consist of multiple pieces of data, including
237 237 the manifest node, user, and date. This object exposes a view into
238 238 the parsed object.
239 239 """
240 240
241 241 __slots__ = (
242 242 r'_offsets',
243 243 r'_text',
244 244 )
245 245
246 246 def __new__(cls, text):
247 247 if not text:
248 248 return _changelogrevision(extra=_defaultextra)
249 249
250 250 self = super(changelogrevision, cls).__new__(cls)
251 251 # We could return here and implement the following as an __init__.
252 252 # But doing it here is equivalent and saves an extra function call.
253 253
254 254 # format used:
255 255 # nodeid\n : manifest node in ascii
256 256 # user\n : user, no \n or \r allowed
257 257 # time tz extra\n : date (time is int or float, timezone is int)
258 258 # : extra is metadata, encoded and separated by '\0'
259 259 # : older versions ignore it
260 260 # files\n\n : files modified by the cset, no \n or \r allowed
261 261 # (.*) : comment (free text, ideally utf-8)
262 262 #
263 263 # changelog v0 doesn't use extra
264 264
265 265 nl1 = text.index('\n')
266 266 nl2 = text.index('\n', nl1 + 1)
267 267 nl3 = text.index('\n', nl2 + 1)
268 268
269 269 # The list of files may be empty. Which means nl3 is the first of the
270 270 # double newline that precedes the description.
271 271 if text[nl3 + 1:nl3 + 2] == '\n':
272 272 doublenl = nl3
273 273 else:
274 274 doublenl = text.index('\n\n', nl3 + 1)
275 275
276 276 self._offsets = (nl1, nl2, nl3, doublenl)
277 277 self._text = text
278 278
279 279 return self
280 280
281 281 @property
282 282 def manifest(self):
283 283 return bin(self._text[0:self._offsets[0]])
284 284
285 285 @property
286 286 def user(self):
287 287 off = self._offsets
288 288 return encoding.tolocal(self._text[off[0] + 1:off[1]])
289 289
290 290 @property
291 291 def _rawdate(self):
292 292 off = self._offsets
293 293 dateextra = self._text[off[1] + 1:off[2]]
294 294 return dateextra.split(' ', 2)[0:2]
295 295
296 296 @property
297 297 def _rawextra(self):
298 298 off = self._offsets
299 299 dateextra = self._text[off[1] + 1:off[2]]
300 300 fields = dateextra.split(' ', 2)
301 301 if len(fields) != 3:
302 302 return None
303 303
304 304 return fields[2]
305 305
306 306 @property
307 307 def date(self):
308 308 raw = self._rawdate
309 309 time = float(raw[0])
310 310 # Various tools did silly things with the timezone.
311 311 try:
312 312 timezone = int(raw[1])
313 313 except ValueError:
314 314 timezone = 0
315 315
316 316 return time, timezone
317 317
318 318 @property
319 319 def extra(self):
320 320 raw = self._rawextra
321 321 if raw is None:
322 322 return _defaultextra
323 323
324 324 return decodeextra(raw)
325 325
326 326 @property
327 327 def files(self):
328 328 off = self._offsets
329 329 if off[2] == off[3]:
330 330 return []
331 331
332 332 return self._text[off[2] + 1:off[3]].split('\n')
333 333
334 334 @property
335 335 def filesadded(self):
336 336 rawindices = self.extra.get('filesadded')
337 337 return rawindices and decodefileindices(self.files, rawindices)
338 338
339 339 @property
340 340 def filesremoved(self):
341 341 rawindices = self.extra.get('filesremoved')
342 342 return rawindices and decodefileindices(self.files, rawindices)
343 343
344 344 @property
345 345 def p1copies(self):
346 346 rawcopies = self.extra.get('p1copies')
347 347 return rawcopies and decodecopies(self.files, rawcopies)
348 348
349 349 @property
350 350 def p2copies(self):
351 351 rawcopies = self.extra.get('p2copies')
352 352 return rawcopies and decodecopies(self.files, rawcopies)
353 353
354 354 @property
355 355 def description(self):
356 356 return encoding.tolocal(self._text[self._offsets[3] + 2:])
357 357
358 358 class changelog(revlog.revlog):
359 359 def __init__(self, opener, trypending=False):
360 360 """Load a changelog revlog using an opener.
361 361
362 362 If ``trypending`` is true, we attempt to load the index from a
363 363 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
364 364 The ``00changelog.i.a`` file contains index (and possibly inline
365 365 revision) data for a transaction that hasn't been finalized yet.
366 366 It exists in a separate file to facilitate readers (such as
367 367 hooks processes) accessing data before a transaction is finalized.
368 368 """
369 369 if trypending and opener.exists('00changelog.i.a'):
370 370 indexfile = '00changelog.i.a'
371 371 else:
372 372 indexfile = '00changelog.i'
373 373
374 374 datafile = '00changelog.d'
375 375 revlog.revlog.__init__(self, opener, indexfile, datafile=datafile,
376 376 checkambig=True, mmaplargeindex=True)
377 377
378 378 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
379 379 # changelogs don't benefit from generaldelta.
380 380
381 381 self.version &= ~revlog.FLAG_GENERALDELTA
382 382 self._generaldelta = False
383 383
384 384 # Delta chains for changelogs tend to be very small because entries
385 385 # tend to be small and don't delta well with each. So disable delta
386 386 # chains.
387 387 self._storedeltachains = False
388 388
389 389 self._realopener = opener
390 390 self._delayed = False
391 391 self._delaybuf = None
392 392 self._divert = False
393 393 self.filteredrevs = frozenset()
394 394 self._copiesstorage = opener.options.get('copies-storage')
395 395
396 396 def tiprev(self):
397 397 for i in pycompat.xrange(len(self) -1, -2, -1):
398 398 if i not in self.filteredrevs:
399 399 return i
400 400
401 401 def tip(self):
402 402 """filtered version of revlog.tip"""
403 403 return self.node(self.tiprev())
404 404
405 405 def __contains__(self, rev):
406 406 """filtered version of revlog.__contains__"""
407 407 return (0 <= rev < len(self)
408 408 and rev not in self.filteredrevs)
409 409
410 410 def __iter__(self):
411 411 """filtered version of revlog.__iter__"""
412 412 if len(self.filteredrevs) == 0:
413 413 return revlog.revlog.__iter__(self)
414 414
415 415 def filterediter():
416 416 for i in pycompat.xrange(len(self)):
417 417 if i not in self.filteredrevs:
418 418 yield i
419 419
420 420 return filterediter()
421 421
422 422 def revs(self, start=0, stop=None):
423 423 """filtered version of revlog.revs"""
424 424 for i in super(changelog, self).revs(start, stop):
425 425 if i not in self.filteredrevs:
426 426 yield i
427 427
428 428 def _checknofilteredinrevs(self, revs):
429 429 """raise the appropriate error if 'revs' contains a filtered revision
430 430
431 431 This returns a version of 'revs' to be used thereafter by the caller.
432 432 In particular, if revs is an iterator, it is converted into a set.
433 433 """
434 434 safehasattr = util.safehasattr
435 435 if safehasattr(revs, '__next__'):
436 436 # Note that inspect.isgenerator() is not true for iterators,
437 437 revs = set(revs)
438 438
439 439 filteredrevs = self.filteredrevs
440 440 if safehasattr(revs, 'first'): # smartset
441 441 offenders = revs & filteredrevs
442 442 else:
443 443 offenders = filteredrevs.intersection(revs)
444 444
445 445 for rev in offenders:
446 446 raise error.FilteredIndexError(rev)
447 447 return revs
448 448
449 449 def headrevs(self, revs=None):
450 450 if revs is None and self.filteredrevs:
451 451 try:
452 452 return self.index.headrevsfiltered(self.filteredrevs)
453 453 # AttributeError covers non-c-extension environments and
454 454 # old c extensions without filter handling.
455 455 except AttributeError:
456 456 return self._headrevs()
457 457
458 458 if self.filteredrevs:
459 459 revs = self._checknofilteredinrevs(revs)
460 460 return super(changelog, self).headrevs(revs)
461 461
462 462 def strip(self, *args, **kwargs):
463 463 # XXX make something better than assert
464 464 # We can't expect proper strip behavior if we are filtered.
465 465 assert not self.filteredrevs
466 466 super(changelog, self).strip(*args, **kwargs)
467 467
468 468 def rev(self, node):
469 469 """filtered version of revlog.rev"""
470 470 r = super(changelog, self).rev(node)
471 471 if r in self.filteredrevs:
472 472 raise error.FilteredLookupError(hex(node), self.indexfile,
473 473 _('filtered node'))
474 474 return r
475 475
476 476 def node(self, rev):
477 477 """filtered version of revlog.node"""
478 478 if rev in self.filteredrevs:
479 479 raise error.FilteredIndexError(rev)
480 480 return super(changelog, self).node(rev)
481 481
482 482 def linkrev(self, rev):
483 483 """filtered version of revlog.linkrev"""
484 484 if rev in self.filteredrevs:
485 485 raise error.FilteredIndexError(rev)
486 486 return super(changelog, self).linkrev(rev)
487 487
488 488 def parentrevs(self, rev):
489 489 """filtered version of revlog.parentrevs"""
490 490 if rev in self.filteredrevs:
491 491 raise error.FilteredIndexError(rev)
492 492 return super(changelog, self).parentrevs(rev)
493 493
494 494 def flags(self, rev):
495 495 """filtered version of revlog.flags"""
496 496 if rev in self.filteredrevs:
497 497 raise error.FilteredIndexError(rev)
498 498 return super(changelog, self).flags(rev)
499 499
500 500 def delayupdate(self, tr):
501 501 "delay visibility of index updates to other readers"
502 502
503 503 if not self._delayed:
504 504 if len(self) == 0:
505 505 self._divert = True
506 506 if self._realopener.exists(self.indexfile + '.a'):
507 507 self._realopener.unlink(self.indexfile + '.a')
508 508 self.opener = _divertopener(self._realopener, self.indexfile)
509 509 else:
510 510 self._delaybuf = []
511 511 self.opener = _delayopener(self._realopener, self.indexfile,
512 512 self._delaybuf)
513 513 self._delayed = True
514 514 tr.addpending('cl-%i' % id(self), self._writepending)
515 515 tr.addfinalize('cl-%i' % id(self), self._finalize)
516 516
517 517 def _finalize(self, tr):
518 518 "finalize index updates"
519 519 self._delayed = False
520 520 self.opener = self._realopener
521 521 # move redirected index data back into place
522 522 if self._divert:
523 523 assert not self._delaybuf
524 524 tmpname = self.indexfile + ".a"
525 525 nfile = self.opener.open(tmpname)
526 526 nfile.close()
527 527 self.opener.rename(tmpname, self.indexfile, checkambig=True)
528 528 elif self._delaybuf:
529 529 fp = self.opener(self.indexfile, 'a', checkambig=True)
530 530 fp.write("".join(self._delaybuf))
531 531 fp.close()
532 532 self._delaybuf = None
533 533 self._divert = False
534 534 # split when we're done
535 535 self._enforceinlinesize(tr)
536 536
537 537 def _writepending(self, tr):
538 538 "create a file containing the unfinalized state for pretxnchangegroup"
539 539 if self._delaybuf:
540 540 # make a temporary copy of the index
541 541 fp1 = self._realopener(self.indexfile)
542 542 pendingfilename = self.indexfile + ".a"
543 543 # register as a temp file to ensure cleanup on failure
544 544 tr.registertmp(pendingfilename)
545 545 # write existing data
546 546 fp2 = self._realopener(pendingfilename, "w")
547 547 fp2.write(fp1.read())
548 548 # add pending data
549 549 fp2.write("".join(self._delaybuf))
550 550 fp2.close()
551 551 # switch modes so finalize can simply rename
552 552 self._delaybuf = None
553 553 self._divert = True
554 554 self.opener = _divertopener(self._realopener, self.indexfile)
555 555
556 556 if self._divert:
557 557 return True
558 558
559 559 return False
560 560
561 561 def _enforceinlinesize(self, tr, fp=None):
562 562 if not self._delayed:
563 563 revlog.revlog._enforceinlinesize(self, tr, fp)
564 564
565 565 def read(self, node):
566 566 """Obtain data from a parsed changelog revision.
567 567
568 568 Returns a 6-tuple of:
569 569
570 570 - manifest node in binary
571 571 - author/user as a localstr
572 572 - date as a 2-tuple of (time, timezone)
573 573 - list of files
574 574 - commit message as a localstr
575 575 - dict of extra metadata
576 576
577 577 Unless you need to access all fields, consider calling
578 578 ``changelogrevision`` instead, as it is faster for partial object
579 579 access.
580 580 """
581 581 c = changelogrevision(self.revision(node))
582 582 return (
583 583 c.manifest,
584 584 c.user,
585 585 c.date,
586 586 c.files,
587 587 c.description,
588 588 c.extra
589 589 )
590 590
591 591 def changelogrevision(self, nodeorrev):
592 592 """Obtain a ``changelogrevision`` for a node or revision."""
593 593 return changelogrevision(self.revision(nodeorrev))
594 594
595 595 def readfiles(self, node):
596 596 """
597 597 short version of read that only returns the files modified by the cset
598 598 """
599 599 text = self.revision(node)
600 600 if not text:
601 601 return []
602 602 last = text.index("\n\n")
603 603 l = text[:last].split('\n')
604 604 return l[3:]
605 605
606 606 def add(self, manifest, files, desc, transaction, p1, p2,
607 607 user, date=None, extra=None, p1copies=None, p2copies=None,
608 608 filesadded=None, filesremoved=None):
609 609 # Convert to UTF-8 encoded bytestrings as the very first
610 610 # thing: calling any method on a localstr object will turn it
611 611 # into a str object and the cached UTF-8 string is thus lost.
612 612 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
613 613
614 614 user = user.strip()
615 615 # An empty username or a username with a "\n" will make the
616 616 # revision text contain two "\n\n" sequences -> corrupt
617 617 # repository since read cannot unpack the revision.
618 618 if not user:
619 619 raise error.StorageError(_("empty username"))
620 620 if "\n" in user:
621 621 raise error.StorageError(_("username %r contains a newline")
622 622 % pycompat.bytestr(user))
623 623
624 624 desc = stripdesc(desc)
625 625
626 626 if date:
627 627 parseddate = "%d %d" % dateutil.parsedate(date)
628 628 else:
629 629 parseddate = "%d %d" % dateutil.makedate()
630 630 if extra:
631 631 branch = extra.get("branch")
632 632 if branch in ("default", ""):
633 633 del extra["branch"]
634 634 elif branch in (".", "null", "tip"):
635 635 raise error.StorageError(_('the name \'%s\' is reserved')
636 636 % branch)
637 637 sortedfiles = sorted(files)
638 638 if extra is not None:
639 639 for name in ('p1copies', 'p2copies', 'filesadded', 'filesremoved'):
640 640 extra.pop(name, None)
641 if p1copies is not None:
642 p1copies = encodecopies(sortedfiles, p1copies)
643 if p2copies is not None:
644 p2copies = encodecopies(sortedfiles, p2copies)
645 if filesadded is not None:
646 filesadded = encodefileindices(sortedfiles, filesadded)
647 if filesremoved is not None:
648 filesremoved = encodefileindices(sortedfiles, filesremoved)
641 649 if self._copiesstorage == 'extra':
642 650 extrasentries = p1copies, p2copies, filesadded, filesremoved
643 651 if extra is None and any(x is not None for x in extrasentries):
644 652 extra = {}
645 653 if p1copies is not None:
646 extra['p1copies'] = encodecopies(sortedfiles, p1copies)
654 extra['p1copies'] = p1copies
647 655 if p2copies is not None:
648 extra['p2copies'] = encodecopies(sortedfiles, p2copies)
656 extra['p2copies'] = p2copies
649 657 if filesadded is not None:
650 extra['filesadded'] = encodefileindices(sortedfiles, filesadded)
658 extra['filesadded'] = filesadded
651 659 if filesremoved is not None:
652 extra['filesremoved'] = encodefileindices(sortedfiles, filesremoved)
660 extra['filesremoved'] = filesremoved
653 661
654 662 if extra:
655 663 extra = encodeextra(extra)
656 664 parseddate = "%s %s" % (parseddate, extra)
657 665 l = [hex(manifest), user, parseddate] + sortedfiles + ["", desc]
658 666 text = "\n".join(l)
659 667 return self.addrevision(text, transaction, len(self), p1, p2)
660 668
661 669 def branchinfo(self, rev):
662 670 """return the branch name and open/close state of a revision
663 671
664 672 This function exists because creating a changectx object
665 673 just to access this is costly."""
666 674 extra = self.read(rev)[5]
667 675 return encoding.tolocal(extra.get("branch")), 'close' in extra
668 676
669 677 def _nodeduplicatecallback(self, transaction, node):
670 678 # keep track of revisions that got "re-added", eg: unbunde of know rev.
671 679 #
672 680 # We track them in a list to preserve their order from the source bundle
673 681 duplicates = transaction.changes.setdefault('revduplicates', [])
674 682 duplicates.append(self.rev(node))
General Comments 0
You need to be logged in to leave comments. Login now