##// END OF EJS Templates
dirstate: warn on invalid parents rather than aborting...
Matt Mackall -
r13032:e41e2b79 default
parent child Browse files
Show More
@@ -1,573 +1,571 b''
1 1 # Mercurial extension to provide the 'hg bookmark' command
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''track a line of development with movable markers
9 9
10 10 Bookmarks are local movable markers to changesets. Every bookmark
11 11 points to a changeset identified by its hash. If you commit a
12 12 changeset that is based on a changeset that has a bookmark on it, the
13 13 bookmark shifts to the new changeset.
14 14
15 15 It is possible to use bookmark names in every revision lookup (e.g.
16 16 :hg:`merge`, :hg:`update`).
17 17
18 18 By default, when several bookmarks point to the same changeset, they
19 19 will all move forward together. It is possible to obtain a more
20 20 git-like experience by adding the following configuration option to
21 21 your configuration file::
22 22
23 23 [bookmarks]
24 24 track.current = True
25 25
26 26 This will cause Mercurial to track the bookmark that you are currently
27 27 using, and only update it. This is similar to git's approach to
28 28 branching.
29 29 '''
30 30
31 31 from mercurial.i18n import _
32 32 from mercurial.node import nullid, nullrev, bin, hex, short
33 33 from mercurial import util, commands, repair, extensions, pushkey, hg, url
34 34 from mercurial import revset
35 35 import os
36 36
37 37 def write(repo):
38 38 '''Write bookmarks
39 39
40 40 Write the given bookmark => hash dictionary to the .hg/bookmarks file
41 41 in a format equal to those of localtags.
42 42
43 43 We also store a backup of the previous state in undo.bookmarks that
44 44 can be copied back on rollback.
45 45 '''
46 46 refs = repo._bookmarks
47 47 if os.path.exists(repo.join('bookmarks')):
48 48 util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks'))
49 49 if repo._bookmarkcurrent not in refs:
50 50 setcurrent(repo, None)
51 51 wlock = repo.wlock()
52 52 try:
53 53 file = repo.opener('bookmarks', 'w', atomictemp=True)
54 54 for refspec, node in refs.iteritems():
55 55 file.write("%s %s\n" % (hex(node), refspec))
56 56 file.rename()
57 57
58 58 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
59 59 try:
60 60 os.utime(repo.sjoin('00changelog.i'), None)
61 61 except OSError:
62 62 pass
63 63
64 64 finally:
65 65 wlock.release()
66 66
67 67 def setcurrent(repo, mark):
68 68 '''Set the name of the bookmark that we are currently on
69 69
70 70 Set the name of the bookmark that we are on (hg update <bookmark>).
71 71 The name is recorded in .hg/bookmarks.current
72 72 '''
73 73 current = repo._bookmarkcurrent
74 74 if current == mark:
75 75 return
76 76
77 77 refs = repo._bookmarks
78 78
79 79 # do not update if we do update to a rev equal to the current bookmark
80 80 if (mark and mark not in refs and
81 81 current and refs[current] == repo.changectx('.').node()):
82 82 return
83 83 if mark not in refs:
84 84 mark = ''
85 85 wlock = repo.wlock()
86 86 try:
87 87 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
88 88 file.write(mark)
89 89 file.rename()
90 90 finally:
91 91 wlock.release()
92 92 repo._bookmarkcurrent = mark
93 93
94 94 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
95 95 '''track a line of development with movable markers
96 96
97 97 Bookmarks are pointers to certain commits that move when
98 98 committing. Bookmarks are local. They can be renamed, copied and
99 99 deleted. It is possible to use bookmark names in :hg:`merge` and
100 100 :hg:`update` to merge and update respectively to a given bookmark.
101 101
102 102 You can use :hg:`bookmark NAME` to set a bookmark on the working
103 103 directory's parent revision with the given name. If you specify
104 104 a revision using -r REV (where REV may be an existing bookmark),
105 105 the bookmark is assigned to that revision.
106 106
107 107 Bookmarks can be pushed and pulled between repositories (see :hg:`help
108 108 push` and :hg:`help pull`). This requires the bookmark extension to be
109 109 enabled for both the local and remote repositories.
110 110 '''
111 111 hexfn = ui.debugflag and hex or short
112 112 marks = repo._bookmarks
113 113 cur = repo.changectx('.').node()
114 114
115 115 if rename:
116 116 if rename not in marks:
117 117 raise util.Abort(_("a bookmark of this name does not exist"))
118 118 if mark in marks and not force:
119 119 raise util.Abort(_("a bookmark of the same name already exists"))
120 120 if mark is None:
121 121 raise util.Abort(_("new bookmark name required"))
122 122 marks[mark] = marks[rename]
123 123 del marks[rename]
124 124 if repo._bookmarkcurrent == rename:
125 125 setcurrent(repo, mark)
126 126 write(repo)
127 127 return
128 128
129 129 if delete:
130 130 if mark is None:
131 131 raise util.Abort(_("bookmark name required"))
132 132 if mark not in marks:
133 133 raise util.Abort(_("a bookmark of this name does not exist"))
134 134 if mark == repo._bookmarkcurrent:
135 135 setcurrent(repo, None)
136 136 del marks[mark]
137 137 write(repo)
138 138 return
139 139
140 140 if mark is not None:
141 141 if "\n" in mark:
142 142 raise util.Abort(_("bookmark name cannot contain newlines"))
143 143 mark = mark.strip()
144 144 if not mark:
145 145 raise util.Abort(_("bookmark names cannot consist entirely of "
146 146 "whitespace"))
147 147 if mark in marks and not force:
148 148 raise util.Abort(_("a bookmark of the same name already exists"))
149 149 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
150 150 and not force):
151 151 raise util.Abort(
152 152 _("a bookmark cannot have the name of an existing branch"))
153 153 if rev:
154 154 marks[mark] = repo.lookup(rev)
155 155 else:
156 156 marks[mark] = repo.changectx('.').node()
157 157 setcurrent(repo, mark)
158 158 write(repo)
159 159 return
160 160
161 161 if mark is None:
162 162 if rev:
163 163 raise util.Abort(_("bookmark name required"))
164 164 if len(marks) == 0:
165 165 ui.status(_("no bookmarks set\n"))
166 166 else:
167 167 for bmark, n in marks.iteritems():
168 168 if ui.configbool('bookmarks', 'track.current'):
169 169 current = repo._bookmarkcurrent
170 170 if bmark == current and n == cur:
171 171 prefix, label = '*', 'bookmarks.current'
172 172 else:
173 173 prefix, label = ' ', ''
174 174 else:
175 175 if n == cur:
176 176 prefix, label = '*', 'bookmarks.current'
177 177 else:
178 178 prefix, label = ' ', ''
179 179
180 180 if ui.quiet:
181 181 ui.write("%s\n" % bmark, label=label)
182 182 else:
183 183 ui.write(" %s %-25s %d:%s\n" % (
184 184 prefix, bmark, repo.changelog.rev(n), hexfn(n)),
185 185 label=label)
186 186 return
187 187
188 188 def _revstostrip(changelog, node):
189 189 srev = changelog.rev(node)
190 190 tostrip = [srev]
191 191 saveheads = []
192 192 for r in xrange(srev, len(changelog)):
193 193 parents = changelog.parentrevs(r)
194 194 if parents[0] in tostrip or parents[1] in tostrip:
195 195 tostrip.append(r)
196 196 if parents[1] != nullrev:
197 197 for p in parents:
198 198 if p not in tostrip and p > srev:
199 199 saveheads.append(p)
200 200 return [r for r in tostrip if r not in saveheads]
201 201
202 202 def strip(oldstrip, ui, repo, node, backup="all"):
203 203 """Strip bookmarks if revisions are stripped using
204 204 the mercurial.strip method. This usually happens during
205 205 qpush and qpop"""
206 206 revisions = _revstostrip(repo.changelog, node)
207 207 marks = repo._bookmarks
208 208 update = []
209 209 for mark, n in marks.iteritems():
210 210 if repo.changelog.rev(n) in revisions:
211 211 update.append(mark)
212 212 oldstrip(ui, repo, node, backup)
213 213 if len(update) > 0:
214 214 for m in update:
215 215 marks[m] = repo.changectx('.').node()
216 216 write(repo)
217 217
218 218 def reposetup(ui, repo):
219 219 if not repo.local():
220 220 return
221 221
222 222 class bookmark_repo(repo.__class__):
223 223
224 224 @util.propertycache
225 225 def _bookmarks(self):
226 226 '''Parse .hg/bookmarks file and return a dictionary
227 227
228 228 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
229 229 in the .hg/bookmarks file.
230 230 Read the file and return a (name=>nodeid) dictionary
231 231 '''
232 232 try:
233 233 bookmarks = {}
234 234 for line in self.opener('bookmarks'):
235 235 sha, refspec = line.strip().split(' ', 1)
236 236 bookmarks[refspec] = self.changelog.lookup(sha)
237 237 except:
238 238 pass
239 239 return bookmarks
240 240
241 241 @util.propertycache
242 242 def _bookmarkcurrent(self):
243 243 '''Get the current bookmark
244 244
245 245 If we use gittishsh branches we have a current bookmark that
246 246 we are on. This function returns the name of the bookmark. It
247 247 is stored in .hg/bookmarks.current
248 248 '''
249 249 mark = None
250 250 if os.path.exists(self.join('bookmarks.current')):
251 251 file = self.opener('bookmarks.current')
252 252 # No readline() in posixfile_nt, reading everything is cheap
253 253 mark = (file.readlines() or [''])[0]
254 254 if mark == '':
255 255 mark = None
256 256 file.close()
257 257 return mark
258 258
259 259 def rollback(self, *args):
260 260 if os.path.exists(self.join('undo.bookmarks')):
261 261 util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
262 262 return super(bookmark_repo, self).rollback(*args)
263 263
264 264 def lookup(self, key):
265 265 if key in self._bookmarks:
266 266 key = self._bookmarks[key]
267 267 return super(bookmark_repo, self).lookup(key)
268 268
269 269 def _bookmarksupdate(self, parents, node):
270 270 marks = self._bookmarks
271 271 update = False
272 272 if ui.configbool('bookmarks', 'track.current'):
273 273 mark = self._bookmarkcurrent
274 274 if mark and marks[mark] in parents:
275 275 marks[mark] = node
276 276 update = True
277 277 else:
278 278 for mark, n in marks.items():
279 279 if n in parents:
280 280 marks[mark] = node
281 281 update = True
282 282 if update:
283 283 write(self)
284 284
285 285 def commitctx(self, ctx, error=False):
286 286 """Add a revision to the repository and
287 287 move the bookmark"""
288 288 wlock = self.wlock() # do both commit and bookmark with lock held
289 289 try:
290 290 node = super(bookmark_repo, self).commitctx(ctx, error)
291 291 if node is None:
292 292 return None
293 293 parents = self.changelog.parents(node)
294 294 if parents[1] == nullid:
295 295 parents = (parents[0],)
296 296
297 297 self._bookmarksupdate(parents, node)
298 298 return node
299 299 finally:
300 300 wlock.release()
301 301
302 302 def pull(self, remote, heads=None, force=False):
303 303 result = super(bookmark_repo, self).pull(remote, heads, force)
304 304
305 305 self.ui.debug("checking for updated bookmarks\n")
306 306 rb = remote.listkeys('bookmarks')
307 307 changed = False
308 308 for k in rb.keys():
309 309 if k in self._bookmarks:
310 310 nr, nl = rb[k], self._bookmarks[k]
311 311 if nr in self:
312 312 cr = self[nr]
313 313 cl = self[nl]
314 314 if cl.rev() >= cr.rev():
315 315 continue
316 316 if cr in cl.descendants():
317 317 self._bookmarks[k] = cr.node()
318 318 changed = True
319 319 self.ui.status(_("updating bookmark %s\n") % k)
320 320 else:
321 321 self.ui.warn(_("not updating divergent"
322 322 " bookmark %s\n") % k)
323 323 if changed:
324 324 write(repo)
325 325
326 326 return result
327 327
328 328 def push(self, remote, force=False, revs=None, newbranch=False):
329 329 result = super(bookmark_repo, self).push(remote, force, revs,
330 330 newbranch)
331 331
332 332 self.ui.debug("checking for updated bookmarks\n")
333 333 rb = remote.listkeys('bookmarks')
334 334 for k in rb.keys():
335 335 if k in self._bookmarks:
336 336 nr, nl = rb[k], self._bookmarks[k]
337 337 if nr in self:
338 338 cr = self[nr]
339 339 cl = self[nl]
340 340 if cl in cr.descendants():
341 341 r = remote.pushkey('bookmarks', k, nr, nl)
342 342 if r:
343 343 self.ui.status(_("updating bookmark %s\n") % k)
344 344 else:
345 345 self.ui.warn(_('updating bookmark %s'
346 346 ' failed!\n') % k)
347 347
348 348 return result
349 349
350 350 def addchangegroup(self, *args, **kwargs):
351 parents = self.dirstate.parents()
352
353 351 result = super(bookmark_repo, self).addchangegroup(*args, **kwargs)
354 352 if result > 1:
355 353 # We have more heads than before
356 354 return result
357 355 node = self.changelog.tip()
358
356 parents = self.dirstate.parents()
359 357 self._bookmarksupdate(parents, node)
360 358 return result
361 359
362 360 def _findtags(self):
363 361 """Merge bookmarks with normal tags"""
364 362 (tags, tagtypes) = super(bookmark_repo, self)._findtags()
365 363 tags.update(self._bookmarks)
366 364 return (tags, tagtypes)
367 365
368 366 if hasattr(repo, 'invalidate'):
369 367 def invalidate(self):
370 368 super(bookmark_repo, self).invalidate()
371 369 for attr in ('_bookmarks', '_bookmarkcurrent'):
372 370 if attr in self.__dict__:
373 371 delattr(self, attr)
374 372
375 373 repo.__class__ = bookmark_repo
376 374
377 375 def listbookmarks(repo):
378 376 # We may try to list bookmarks on a repo type that does not
379 377 # support it (e.g., statichttprepository).
380 378 if not hasattr(repo, '_bookmarks'):
381 379 return {}
382 380
383 381 d = {}
384 382 for k, v in repo._bookmarks.iteritems():
385 383 d[k] = hex(v)
386 384 return d
387 385
388 386 def pushbookmark(repo, key, old, new):
389 387 w = repo.wlock()
390 388 try:
391 389 marks = repo._bookmarks
392 390 if hex(marks.get(key, '')) != old:
393 391 return False
394 392 if new == '':
395 393 del marks[key]
396 394 else:
397 395 if new not in repo:
398 396 return False
399 397 marks[key] = repo[new].node()
400 398 write(repo)
401 399 return True
402 400 finally:
403 401 w.release()
404 402
405 403 def pull(oldpull, ui, repo, source="default", **opts):
406 404 # translate bookmark args to rev args for actual pull
407 405 if opts.get('bookmark'):
408 406 # this is an unpleasant hack as pull will do this internally
409 407 source, branches = hg.parseurl(ui.expandpath(source),
410 408 opts.get('branch'))
411 409 other = hg.repository(hg.remoteui(repo, opts), source)
412 410 rb = other.listkeys('bookmarks')
413 411
414 412 for b in opts['bookmark']:
415 413 if b not in rb:
416 414 raise util.Abort(_('remote bookmark %s not found!') % b)
417 415 opts.setdefault('rev', []).append(b)
418 416
419 417 result = oldpull(ui, repo, source, **opts)
420 418
421 419 # update specified bookmarks
422 420 if opts.get('bookmark'):
423 421 for b in opts['bookmark']:
424 422 # explicit pull overrides local bookmark if any
425 423 ui.status(_("importing bookmark %s\n") % b)
426 424 repo._bookmarks[b] = repo[rb[b]].node()
427 425 write(repo)
428 426
429 427 return result
430 428
431 429 def push(oldpush, ui, repo, dest=None, **opts):
432 430 dopush = True
433 431 if opts.get('bookmark'):
434 432 dopush = False
435 433 for b in opts['bookmark']:
436 434 if b in repo._bookmarks:
437 435 dopush = True
438 436 opts.setdefault('rev', []).append(b)
439 437
440 438 result = 0
441 439 if dopush:
442 440 result = oldpush(ui, repo, dest, **opts)
443 441
444 442 if opts.get('bookmark'):
445 443 # this is an unpleasant hack as push will do this internally
446 444 dest = ui.expandpath(dest or 'default-push', dest or 'default')
447 445 dest, branches = hg.parseurl(dest, opts.get('branch'))
448 446 other = hg.repository(hg.remoteui(repo, opts), dest)
449 447 rb = other.listkeys('bookmarks')
450 448 for b in opts['bookmark']:
451 449 # explicit push overrides remote bookmark if any
452 450 if b in repo._bookmarks:
453 451 ui.status(_("exporting bookmark %s\n") % b)
454 452 new = repo[b].hex()
455 453 elif b in rb:
456 454 ui.status(_("deleting remote bookmark %s\n") % b)
457 455 new = '' # delete
458 456 else:
459 457 ui.warn(_('bookmark %s does not exist on the local '
460 458 'or remote repository!\n') % b)
461 459 return 2
462 460 old = rb.get(b, '')
463 461 r = other.pushkey('bookmarks', b, old, new)
464 462 if not r:
465 463 ui.warn(_('updating bookmark %s failed!\n') % b)
466 464 if not result:
467 465 result = 2
468 466
469 467 return result
470 468
471 469 def diffbookmarks(ui, repo, remote):
472 470 ui.status(_("searching for changed bookmarks\n"))
473 471
474 472 lmarks = repo.listkeys('bookmarks')
475 473 rmarks = remote.listkeys('bookmarks')
476 474
477 475 diff = sorted(set(rmarks) - set(lmarks))
478 476 for k in diff:
479 477 ui.write(" %-25s %s\n" % (k, rmarks[k][:12]))
480 478
481 479 if len(diff) <= 0:
482 480 ui.status(_("no changed bookmarks found\n"))
483 481 return 1
484 482 return 0
485 483
486 484 def incoming(oldincoming, ui, repo, source="default", **opts):
487 485 if opts.get('bookmarks'):
488 486 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
489 487 other = hg.repository(hg.remoteui(repo, opts), source)
490 488 ui.status(_('comparing with %s\n') % url.hidepassword(source))
491 489 return diffbookmarks(ui, repo, other)
492 490 else:
493 491 return oldincoming(ui, repo, source, **opts)
494 492
495 493 def outgoing(oldoutgoing, ui, repo, dest=None, **opts):
496 494 if opts.get('bookmarks'):
497 495 dest = ui.expandpath(dest or 'default-push', dest or 'default')
498 496 dest, branches = hg.parseurl(dest, opts.get('branch'))
499 497 other = hg.repository(hg.remoteui(repo, opts), dest)
500 498 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
501 499 return diffbookmarks(ui, other, repo)
502 500 else:
503 501 return oldoutgoing(ui, repo, dest, **opts)
504 502
505 503 def uisetup(ui):
506 504 extensions.wrapfunction(repair, "strip", strip)
507 505 if ui.configbool('bookmarks', 'track.current'):
508 506 extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
509 507
510 508 entry = extensions.wrapcommand(commands.table, 'pull', pull)
511 509 entry[1].append(('B', 'bookmark', [],
512 510 _("bookmark to import"),
513 511 _('BOOKMARK')))
514 512 entry = extensions.wrapcommand(commands.table, 'push', push)
515 513 entry[1].append(('B', 'bookmark', [],
516 514 _("bookmark to export"),
517 515 _('BOOKMARK')))
518 516 entry = extensions.wrapcommand(commands.table, 'incoming', incoming)
519 517 entry[1].append(('B', 'bookmarks', False,
520 518 _("compare bookmark")))
521 519 entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
522 520 entry[1].append(('B', 'bookmarks', False,
523 521 _("compare bookmark")))
524 522
525 523 pushkey.register('bookmarks', pushbookmark, listbookmarks)
526 524
527 525 def updatecurbookmark(orig, ui, repo, *args, **opts):
528 526 '''Set the current bookmark
529 527
530 528 If the user updates to a bookmark we update the .hg/bookmarks.current
531 529 file.
532 530 '''
533 531 res = orig(ui, repo, *args, **opts)
534 532 rev = opts['rev']
535 533 if not rev and len(args) > 0:
536 534 rev = args[0]
537 535 setcurrent(repo, rev)
538 536 return res
539 537
540 538 def bmrevset(repo, subset, x):
541 539 """``bookmark([name])``
542 540 The named bookmark or all bookmarks.
543 541 """
544 542 # i18n: "bookmark" is a keyword
545 543 args = revset.getargs(x, 0, 1, _('bookmark takes one or no arguments'))
546 544 if args:
547 545 bm = revset.getstring(args[0],
548 546 # i18n: "bookmark" is a keyword
549 547 _('the argument to bookmark must be a string'))
550 548 bmrev = listbookmarks(repo).get(bm, None)
551 549 if bmrev:
552 550 bmrev = repo.changelog.rev(bin(bmrev))
553 551 return [r for r in subset if r == bmrev]
554 552 bms = set([repo.changelog.rev(bin(r)) for r in listbookmarks(repo).values()])
555 553 return [r for r in subset if r in bms]
556 554
557 555 def extsetup(ui):
558 556 revset.symbols['bookmark'] = bmrevset
559 557
560 558 cmdtable = {
561 559 "bookmarks":
562 560 (bookmark,
563 561 [('f', 'force', False, _('force')),
564 562 ('r', 'rev', '', _('revision'), _('REV')),
565 563 ('d', 'delete', False, _('delete a given bookmark')),
566 564 ('m', 'rename', '', _('rename a given bookmark'), _('NAME'))],
567 565 _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
568 566 }
569 567
570 568 colortable = {'bookmarks.current': 'green'}
571 569
572 570 # tell hggettext to extract docstrings from these functions:
573 571 i18nfunctions = [bmrevset]
@@ -1,681 +1,682 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid
9 9 from i18n import _
10 10 import util, ignore, osutil, parsers
11 11 import struct, os, stat, errno
12 12 import cStringIO
13 13
14 14 _format = ">cllll"
15 15 propertycache = util.propertycache
16 16
17 17 def _finddirs(path):
18 18 pos = path.rfind('/')
19 19 while pos != -1:
20 20 yield path[:pos]
21 21 pos = path.rfind('/', 0, pos)
22 22
23 23 def _incdirs(dirs, path):
24 24 for base in _finddirs(path):
25 25 if base in dirs:
26 26 dirs[base] += 1
27 27 return
28 28 dirs[base] = 1
29 29
30 30 def _decdirs(dirs, path):
31 31 for base in _finddirs(path):
32 32 if dirs[base] > 1:
33 33 dirs[base] -= 1
34 34 return
35 35 del dirs[base]
36 36
37 37 class dirstate(object):
38 38
39 def __init__(self, opener, ui, root):
39 def __init__(self, opener, ui, root, validate):
40 40 '''Create a new dirstate object.
41 41
42 42 opener is an open()-like callable that can be used to open the
43 43 dirstate file; root is the root of the directory tracked by
44 44 the dirstate.
45 45 '''
46 46 self._opener = opener
47 self._validate = validate
47 48 self._root = root
48 49 self._rootdir = os.path.join(root, '')
49 50 self._dirty = False
50 51 self._dirtypl = False
51 52 self._ui = ui
52 53
53 54 @propertycache
54 55 def _map(self):
55 56 '''Return the dirstate contents as a map from filename to
56 57 (state, mode, size, time).'''
57 58 self._read()
58 59 return self._map
59 60
60 61 @propertycache
61 62 def _copymap(self):
62 63 self._read()
63 64 return self._copymap
64 65
65 66 @propertycache
66 67 def _foldmap(self):
67 68 f = {}
68 69 for name in self._map:
69 70 f[os.path.normcase(name)] = name
70 71 return f
71 72
72 73 @propertycache
73 74 def _branch(self):
74 75 try:
75 76 return self._opener("branch").read().strip() or "default"
76 77 except IOError:
77 78 return "default"
78 79
79 80 @propertycache
80 81 def _pl(self):
81 82 try:
82 83 st = self._opener("dirstate").read(40)
83 84 l = len(st)
84 85 if l == 40:
85 86 return st[:20], st[20:40]
86 87 elif l > 0 and l < 40:
87 88 raise util.Abort(_('working directory state appears damaged!'))
88 89 except IOError, err:
89 90 if err.errno != errno.ENOENT:
90 91 raise
91 92 return [nullid, nullid]
92 93
93 94 @propertycache
94 95 def _dirs(self):
95 96 dirs = {}
96 97 for f, s in self._map.iteritems():
97 98 if s[0] != 'r':
98 99 _incdirs(dirs, f)
99 100 return dirs
100 101
101 102 @propertycache
102 103 def _ignore(self):
103 104 files = [self._join('.hgignore')]
104 105 for name, path in self._ui.configitems("ui"):
105 106 if name == 'ignore' or name.startswith('ignore.'):
106 107 files.append(util.expandpath(path))
107 108 return ignore.ignore(self._root, files, self._ui.warn)
108 109
109 110 @propertycache
110 111 def _slash(self):
111 112 return self._ui.configbool('ui', 'slash') and os.sep != '/'
112 113
113 114 @propertycache
114 115 def _checklink(self):
115 116 return util.checklink(self._root)
116 117
117 118 @propertycache
118 119 def _checkexec(self):
119 120 return util.checkexec(self._root)
120 121
121 122 @propertycache
122 123 def _checkcase(self):
123 124 return not util.checkcase(self._join('.hg'))
124 125
125 126 def _join(self, f):
126 127 # much faster than os.path.join()
127 128 # it's safe because f is always a relative path
128 129 return self._rootdir + f
129 130
130 131 def flagfunc(self, fallback):
131 132 if self._checklink:
132 133 if self._checkexec:
133 134 def f(x):
134 135 p = self._join(x)
135 136 if os.path.islink(p):
136 137 return 'l'
137 138 if util.is_exec(p):
138 139 return 'x'
139 140 return ''
140 141 return f
141 142 def f(x):
142 143 if os.path.islink(self._join(x)):
143 144 return 'l'
144 145 if 'x' in fallback(x):
145 146 return 'x'
146 147 return ''
147 148 return f
148 149 if self._checkexec:
149 150 def f(x):
150 151 if 'l' in fallback(x):
151 152 return 'l'
152 153 if util.is_exec(self._join(x)):
153 154 return 'x'
154 155 return ''
155 156 return f
156 157 return fallback
157 158
158 159 def getcwd(self):
159 160 cwd = os.getcwd()
160 161 if cwd == self._root:
161 162 return ''
162 163 # self._root ends with a path separator if self._root is '/' or 'C:\'
163 164 rootsep = self._root
164 165 if not util.endswithsep(rootsep):
165 166 rootsep += os.sep
166 167 if cwd.startswith(rootsep):
167 168 return cwd[len(rootsep):]
168 169 else:
169 170 # we're outside the repo. return an absolute path.
170 171 return cwd
171 172
172 173 def pathto(self, f, cwd=None):
173 174 if cwd is None:
174 175 cwd = self.getcwd()
175 176 path = util.pathto(self._root, cwd, f)
176 177 if self._slash:
177 178 return util.normpath(path)
178 179 return path
179 180
180 181 def __getitem__(self, key):
181 182 '''Return the current state of key (a filename) in the dirstate.
182 183
183 184 States are:
184 185 n normal
185 186 m needs merging
186 187 r marked for removal
187 188 a marked for addition
188 189 ? not tracked
189 190 '''
190 191 return self._map.get(key, ("?",))[0]
191 192
192 193 def __contains__(self, key):
193 194 return key in self._map
194 195
195 196 def __iter__(self):
196 197 for x in sorted(self._map):
197 198 yield x
198 199
199 200 def parents(self):
200 return self._pl
201 return [self._validate(p) for p in self._pl]
201 202
202 203 def branch(self):
203 204 return self._branch
204 205
205 206 def setparents(self, p1, p2=nullid):
206 207 self._dirty = self._dirtypl = True
207 208 self._pl = p1, p2
208 209
209 210 def setbranch(self, branch):
210 211 if branch in ['tip', '.', 'null']:
211 212 raise util.Abort(_('the name \'%s\' is reserved') % branch)
212 213 self._branch = branch
213 214 self._opener("branch", "w").write(branch + '\n')
214 215
215 216 def _read(self):
216 217 self._map = {}
217 218 self._copymap = {}
218 219 try:
219 220 st = self._opener("dirstate").read()
220 221 except IOError, err:
221 222 if err.errno != errno.ENOENT:
222 223 raise
223 224 return
224 225 if not st:
225 226 return
226 227
227 228 p = parsers.parse_dirstate(self._map, self._copymap, st)
228 229 if not self._dirtypl:
229 230 self._pl = p
230 231
231 232 def invalidate(self):
232 233 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
233 234 if a in self.__dict__:
234 235 delattr(self, a)
235 236 self._dirty = False
236 237
237 238 def copy(self, source, dest):
238 239 """Mark dest as a copy of source. Unmark dest if source is None."""
239 240 if source == dest:
240 241 return
241 242 self._dirty = True
242 243 if source is not None:
243 244 self._copymap[dest] = source
244 245 elif dest in self._copymap:
245 246 del self._copymap[dest]
246 247
247 248 def copied(self, file):
248 249 return self._copymap.get(file, None)
249 250
250 251 def copies(self):
251 252 return self._copymap
252 253
253 254 def _droppath(self, f):
254 255 if self[f] not in "?r" and "_dirs" in self.__dict__:
255 256 _decdirs(self._dirs, f)
256 257
257 258 def _addpath(self, f, check=False):
258 259 oldstate = self[f]
259 260 if check or oldstate == "r":
260 261 if '\r' in f or '\n' in f:
261 262 raise util.Abort(
262 263 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
263 264 if f in self._dirs:
264 265 raise util.Abort(_('directory %r already in dirstate') % f)
265 266 # shadows
266 267 for d in _finddirs(f):
267 268 if d in self._dirs:
268 269 break
269 270 if d in self._map and self[d] != 'r':
270 271 raise util.Abort(
271 272 _('file %r in dirstate clashes with %r') % (d, f))
272 273 if oldstate in "?r" and "_dirs" in self.__dict__:
273 274 _incdirs(self._dirs, f)
274 275
275 276 def normal(self, f):
276 277 '''Mark a file normal and clean.'''
277 278 self._dirty = True
278 279 self._addpath(f)
279 280 s = os.lstat(self._join(f))
280 281 self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime))
281 282 if f in self._copymap:
282 283 del self._copymap[f]
283 284
284 285 def normallookup(self, f):
285 286 '''Mark a file normal, but possibly dirty.'''
286 287 if self._pl[1] != nullid and f in self._map:
287 288 # if there is a merge going on and the file was either
288 289 # in state 'm' (-1) or coming from other parent (-2) before
289 290 # being removed, restore that state.
290 291 entry = self._map[f]
291 292 if entry[0] == 'r' and entry[2] in (-1, -2):
292 293 source = self._copymap.get(f)
293 294 if entry[2] == -1:
294 295 self.merge(f)
295 296 elif entry[2] == -2:
296 297 self.otherparent(f)
297 298 if source:
298 299 self.copy(source, f)
299 300 return
300 301 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
301 302 return
302 303 self._dirty = True
303 304 self._addpath(f)
304 305 self._map[f] = ('n', 0, -1, -1)
305 306 if f in self._copymap:
306 307 del self._copymap[f]
307 308
308 309 def otherparent(self, f):
309 310 '''Mark as coming from the other parent, always dirty.'''
310 311 if self._pl[1] == nullid:
311 312 raise util.Abort(_("setting %r to other parent "
312 313 "only allowed in merges") % f)
313 314 self._dirty = True
314 315 self._addpath(f)
315 316 self._map[f] = ('n', 0, -2, -1)
316 317 if f in self._copymap:
317 318 del self._copymap[f]
318 319
319 320 def add(self, f):
320 321 '''Mark a file added.'''
321 322 self._dirty = True
322 323 self._addpath(f, True)
323 324 self._map[f] = ('a', 0, -1, -1)
324 325 if f in self._copymap:
325 326 del self._copymap[f]
326 327
327 328 def remove(self, f):
328 329 '''Mark a file removed.'''
329 330 self._dirty = True
330 331 self._droppath(f)
331 332 size = 0
332 333 if self._pl[1] != nullid and f in self._map:
333 334 # backup the previous state
334 335 entry = self._map[f]
335 336 if entry[0] == 'm': # merge
336 337 size = -1
337 338 elif entry[0] == 'n' and entry[2] == -2: # other parent
338 339 size = -2
339 340 self._map[f] = ('r', 0, size, 0)
340 341 if size == 0 and f in self._copymap:
341 342 del self._copymap[f]
342 343
343 344 def merge(self, f):
344 345 '''Mark a file merged.'''
345 346 self._dirty = True
346 347 s = os.lstat(self._join(f))
347 348 self._addpath(f)
348 349 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
349 350 if f in self._copymap:
350 351 del self._copymap[f]
351 352
352 353 def forget(self, f):
353 354 '''Forget a file.'''
354 355 self._dirty = True
355 356 try:
356 357 self._droppath(f)
357 358 del self._map[f]
358 359 except KeyError:
359 360 self._ui.warn(_("not in dirstate: %s\n") % f)
360 361
361 362 def _normalize(self, path, knownpath):
362 363 norm_path = os.path.normcase(path)
363 364 fold_path = self._foldmap.get(norm_path, None)
364 365 if fold_path is None:
365 366 if knownpath or not os.path.lexists(os.path.join(self._root, path)):
366 367 fold_path = path
367 368 else:
368 369 fold_path = self._foldmap.setdefault(norm_path,
369 370 util.fspath(path, self._root))
370 371 return fold_path
371 372
372 373 def clear(self):
373 374 self._map = {}
374 375 if "_dirs" in self.__dict__:
375 376 delattr(self, "_dirs")
376 377 self._copymap = {}
377 378 self._pl = [nullid, nullid]
378 379 self._dirty = True
379 380
380 381 def rebuild(self, parent, files):
381 382 self.clear()
382 383 for f in files:
383 384 if 'x' in files.flags(f):
384 385 self._map[f] = ('n', 0777, -1, 0)
385 386 else:
386 387 self._map[f] = ('n', 0666, -1, 0)
387 388 self._pl = (parent, nullid)
388 389 self._dirty = True
389 390
390 391 def write(self):
391 392 if not self._dirty:
392 393 return
393 394 st = self._opener("dirstate", "w", atomictemp=True)
394 395
395 396 # use the modification time of the newly created temporary file as the
396 397 # filesystem's notion of 'now'
397 398 now = int(util.fstat(st).st_mtime)
398 399
399 400 cs = cStringIO.StringIO()
400 401 copymap = self._copymap
401 402 pack = struct.pack
402 403 write = cs.write
403 404 write("".join(self._pl))
404 405 for f, e in self._map.iteritems():
405 406 if e[0] == 'n' and e[3] == now:
406 407 # The file was last modified "simultaneously" with the current
407 408 # write to dirstate (i.e. within the same second for file-
408 409 # systems with a granularity of 1 sec). This commonly happens
409 410 # for at least a couple of files on 'update'.
410 411 # The user could change the file without changing its size
411 412 # within the same second. Invalidate the file's stat data in
412 413 # dirstate, forcing future 'status' calls to compare the
413 414 # contents of the file. This prevents mistakenly treating such
414 415 # files as clean.
415 416 e = (e[0], 0, -1, -1) # mark entry as 'unset'
416 417 self._map[f] = e
417 418
418 419 if f in copymap:
419 420 f = "%s\0%s" % (f, copymap[f])
420 421 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
421 422 write(e)
422 423 write(f)
423 424 st.write(cs.getvalue())
424 425 st.rename()
425 426 self._dirty = self._dirtypl = False
426 427
427 428 def _dirignore(self, f):
428 429 if f == '.':
429 430 return False
430 431 if self._ignore(f):
431 432 return True
432 433 for p in _finddirs(f):
433 434 if self._ignore(p):
434 435 return True
435 436 return False
436 437
437 438 def walk(self, match, subrepos, unknown, ignored):
438 439 '''
439 440 Walk recursively through the directory tree, finding all files
440 441 matched by match.
441 442
442 443 Return a dict mapping filename to stat-like object (either
443 444 mercurial.osutil.stat instance or return value of os.stat()).
444 445 '''
445 446
446 447 def fwarn(f, msg):
447 448 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
448 449 return False
449 450
450 451 def badtype(mode):
451 452 kind = _('unknown')
452 453 if stat.S_ISCHR(mode):
453 454 kind = _('character device')
454 455 elif stat.S_ISBLK(mode):
455 456 kind = _('block device')
456 457 elif stat.S_ISFIFO(mode):
457 458 kind = _('fifo')
458 459 elif stat.S_ISSOCK(mode):
459 460 kind = _('socket')
460 461 elif stat.S_ISDIR(mode):
461 462 kind = _('directory')
462 463 return _('unsupported file type (type is %s)') % kind
463 464
464 465 ignore = self._ignore
465 466 dirignore = self._dirignore
466 467 if ignored:
467 468 ignore = util.never
468 469 dirignore = util.never
469 470 elif not unknown:
470 471 # if unknown and ignored are False, skip step 2
471 472 ignore = util.always
472 473 dirignore = util.always
473 474
474 475 matchfn = match.matchfn
475 476 badfn = match.bad
476 477 dmap = self._map
477 478 normpath = util.normpath
478 479 listdir = osutil.listdir
479 480 lstat = os.lstat
480 481 getkind = stat.S_IFMT
481 482 dirkind = stat.S_IFDIR
482 483 regkind = stat.S_IFREG
483 484 lnkkind = stat.S_IFLNK
484 485 join = self._join
485 486 work = []
486 487 wadd = work.append
487 488
488 489 exact = skipstep3 = False
489 490 if matchfn == match.exact: # match.exact
490 491 exact = True
491 492 dirignore = util.always # skip step 2
492 493 elif match.files() and not match.anypats(): # match.match, no patterns
493 494 skipstep3 = True
494 495
495 496 if self._checkcase:
496 497 normalize = self._normalize
497 498 skipstep3 = False
498 499 else:
499 500 normalize = lambda x, y: x
500 501
501 502 files = sorted(match.files())
502 503 subrepos.sort()
503 504 i, j = 0, 0
504 505 while i < len(files) and j < len(subrepos):
505 506 subpath = subrepos[j] + "/"
506 507 if not files[i].startswith(subpath):
507 508 i += 1
508 509 continue
509 510 while files and files[i].startswith(subpath):
510 511 del files[i]
511 512 j += 1
512 513
513 514 if not files or '.' in files:
514 515 files = ['']
515 516 results = dict.fromkeys(subrepos)
516 517 results['.hg'] = None
517 518
518 519 # step 1: find all explicit files
519 520 for ff in files:
520 521 nf = normalize(normpath(ff), False)
521 522 if nf in results:
522 523 continue
523 524
524 525 try:
525 526 st = lstat(join(nf))
526 527 kind = getkind(st.st_mode)
527 528 if kind == dirkind:
528 529 skipstep3 = False
529 530 if nf in dmap:
530 531 #file deleted on disk but still in dirstate
531 532 results[nf] = None
532 533 match.dir(nf)
533 534 if not dirignore(nf):
534 535 wadd(nf)
535 536 elif kind == regkind or kind == lnkkind:
536 537 results[nf] = st
537 538 else:
538 539 badfn(ff, badtype(kind))
539 540 if nf in dmap:
540 541 results[nf] = None
541 542 except OSError, inst:
542 543 if nf in dmap: # does it exactly match a file?
543 544 results[nf] = None
544 545 else: # does it match a directory?
545 546 prefix = nf + "/"
546 547 for fn in dmap:
547 548 if fn.startswith(prefix):
548 549 match.dir(nf)
549 550 skipstep3 = False
550 551 break
551 552 else:
552 553 badfn(ff, inst.strerror)
553 554
554 555 # step 2: visit subdirectories
555 556 while work:
556 557 nd = work.pop()
557 558 skip = None
558 559 if nd == '.':
559 560 nd = ''
560 561 else:
561 562 skip = '.hg'
562 563 try:
563 564 entries = listdir(join(nd), stat=True, skip=skip)
564 565 except OSError, inst:
565 566 if inst.errno == errno.EACCES:
566 567 fwarn(nd, inst.strerror)
567 568 continue
568 569 raise
569 570 for f, kind, st in entries:
570 571 nf = normalize(nd and (nd + "/" + f) or f, True)
571 572 if nf not in results:
572 573 if kind == dirkind:
573 574 if not ignore(nf):
574 575 match.dir(nf)
575 576 wadd(nf)
576 577 if nf in dmap and matchfn(nf):
577 578 results[nf] = None
578 579 elif kind == regkind or kind == lnkkind:
579 580 if nf in dmap:
580 581 if matchfn(nf):
581 582 results[nf] = st
582 583 elif matchfn(nf) and not ignore(nf):
583 584 results[nf] = st
584 585 elif nf in dmap and matchfn(nf):
585 586 results[nf] = None
586 587
587 588 # step 3: report unseen items in the dmap hash
588 589 if not skipstep3 and not exact:
589 590 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
590 591 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
591 592 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
592 593 st = None
593 594 results[nf] = st
594 595 for s in subrepos:
595 596 del results[s]
596 597 del results['.hg']
597 598 return results
598 599
599 600 def status(self, match, subrepos, ignored, clean, unknown):
600 601 '''Determine the status of the working copy relative to the
601 602 dirstate and return a tuple of lists (unsure, modified, added,
602 603 removed, deleted, unknown, ignored, clean), where:
603 604
604 605 unsure:
605 606 files that might have been modified since the dirstate was
606 607 written, but need to be read to be sure (size is the same
607 608 but mtime differs)
608 609 modified:
609 610 files that have definitely been modified since the dirstate
610 611 was written (different size or mode)
611 612 added:
612 613 files that have been explicitly added with hg add
613 614 removed:
614 615 files that have been explicitly removed with hg remove
615 616 deleted:
616 617 files that have been deleted through other means ("missing")
617 618 unknown:
618 619 files not in the dirstate that are not ignored
619 620 ignored:
620 621 files not in the dirstate that are ignored
621 622 (by _dirignore())
622 623 clean:
623 624 files that have definitely not been modified since the
624 625 dirstate was written
625 626 '''
626 627 listignored, listclean, listunknown = ignored, clean, unknown
627 628 lookup, modified, added, unknown, ignored = [], [], [], [], []
628 629 removed, deleted, clean = [], [], []
629 630
630 631 dmap = self._map
631 632 ladd = lookup.append # aka "unsure"
632 633 madd = modified.append
633 634 aadd = added.append
634 635 uadd = unknown.append
635 636 iadd = ignored.append
636 637 radd = removed.append
637 638 dadd = deleted.append
638 639 cadd = clean.append
639 640
640 641 lnkkind = stat.S_IFLNK
641 642
642 643 for fn, st in self.walk(match, subrepos, listunknown,
643 644 listignored).iteritems():
644 645 if fn not in dmap:
645 646 if (listignored or match.exact(fn)) and self._dirignore(fn):
646 647 if listignored:
647 648 iadd(fn)
648 649 elif listunknown:
649 650 uadd(fn)
650 651 continue
651 652
652 653 state, mode, size, time = dmap[fn]
653 654
654 655 if not st and state in "nma":
655 656 dadd(fn)
656 657 elif state == 'n':
657 658 # The "mode & lnkkind != lnkkind or self._checklink"
658 659 # lines are an expansion of "islink => checklink"
659 660 # where islink means "is this a link?" and checklink
660 661 # means "can we check links?".
661 662 if (size >= 0 and
662 663 (size != st.st_size
663 664 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
664 665 and (mode & lnkkind != lnkkind or self._checklink)
665 666 or size == -2 # other parent
666 667 or fn in self._copymap):
667 668 madd(fn)
668 669 elif (time != int(st.st_mtime)
669 670 and (mode & lnkkind != lnkkind or self._checklink)):
670 671 ladd(fn)
671 672 elif listclean:
672 673 cadd(fn)
673 674 elif state == 'm':
674 675 madd(fn)
675 676 elif state == 'a':
676 677 aadd(fn)
677 678 elif state == 'r':
678 679 radd(fn)
679 680
680 681 return (lookup, modified, added, removed, deleted, unknown, ignored,
681 682 clean)
@@ -1,1904 +1,1916 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 24 supportedformats = set(('revlogv1', 'parentdelta'))
25 25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 26 'dotencode'))
27 27
28 28 def __init__(self, baseui, path=None, create=0):
29 29 repo.repository.__init__(self)
30 30 self.root = os.path.realpath(util.expandpath(path))
31 31 self.path = os.path.join(self.root, ".hg")
32 32 self.origroot = path
33 33 self.auditor = util.path_auditor(self.root, self._checknested)
34 34 self.opener = util.opener(self.path)
35 35 self.wopener = util.opener(self.root)
36 36 self.baseui = baseui
37 37 self.ui = baseui.copy()
38 38
39 39 try:
40 40 self.ui.readconfig(self.join("hgrc"), self.root)
41 41 extensions.loadall(self.ui)
42 42 except IOError:
43 43 pass
44 44
45 45 if not os.path.isdir(self.path):
46 46 if create:
47 47 if not os.path.exists(path):
48 48 util.makedirs(path)
49 49 os.mkdir(self.path)
50 50 requirements = ["revlogv1"]
51 51 if self.ui.configbool('format', 'usestore', True):
52 52 os.mkdir(os.path.join(self.path, "store"))
53 53 requirements.append("store")
54 54 if self.ui.configbool('format', 'usefncache', True):
55 55 requirements.append("fncache")
56 56 if self.ui.configbool('format', 'dotencode', True):
57 57 requirements.append('dotencode')
58 58 # create an invalid changelog
59 59 self.opener("00changelog.i", "a").write(
60 60 '\0\0\0\2' # represents revlogv2
61 61 ' dummy changelog to prevent using the old repo layout'
62 62 )
63 63 if self.ui.configbool('format', 'parentdelta', False):
64 64 requirements.append("parentdelta")
65 65 else:
66 66 raise error.RepoError(_("repository %s not found") % path)
67 67 elif create:
68 68 raise error.RepoError(_("repository %s already exists") % path)
69 69 else:
70 70 # find requirements
71 71 requirements = set()
72 72 try:
73 73 requirements = set(self.opener("requires").read().splitlines())
74 74 except IOError, inst:
75 75 if inst.errno != errno.ENOENT:
76 76 raise
77 77 for r in requirements - self.supported:
78 78 raise error.RepoError(_("requirement '%s' not supported") % r)
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener("sharedpath").read())
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100 # These two define the set of tags for this repository. _tags
101 101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 102 # 'local'. (Global tags are defined by .hgtags across all
103 103 # heads, and local tags are defined in .hg/localtags.) They
104 104 # constitute the in-memory cache of tags.
105 105 self._tags = None
106 106 self._tagtypes = None
107 107
108 108 self._branchcache = None # in UTF-8
109 109 self._branchcachetip = None
110 110 self.nodetagscache = None
111 111 self.filterpats = {}
112 112 self._datafilters = {}
113 113 self._transref = self._lockref = self._wlockref = None
114 114
115 115 def _applyrequirements(self, requirements):
116 116 self.requirements = requirements
117 117 self.sopener.options = {}
118 118 if 'parentdelta' in requirements:
119 119 self.sopener.options['parentdelta'] = 1
120 120
121 121 def _writerequirements(self):
122 122 reqfile = self.opener("requires", "w")
123 123 for r in self.requirements:
124 124 reqfile.write("%s\n" % r)
125 125 reqfile.close()
126 126
127 127 def _checknested(self, path):
128 128 """Determine if path is a legal nested repository."""
129 129 if not path.startswith(self.root):
130 130 return False
131 131 subpath = path[len(self.root) + 1:]
132 132
133 133 # XXX: Checking against the current working copy is wrong in
134 134 # the sense that it can reject things like
135 135 #
136 136 # $ hg cat -r 10 sub/x.txt
137 137 #
138 138 # if sub/ is no longer a subrepository in the working copy
139 139 # parent revision.
140 140 #
141 141 # However, it can of course also allow things that would have
142 142 # been rejected before, such as the above cat command if sub/
143 143 # is a subrepository now, but was a normal directory before.
144 144 # The old path auditor would have rejected by mistake since it
145 145 # panics when it sees sub/.hg/.
146 146 #
147 147 # All in all, checking against the working copy seems sensible
148 148 # since we want to prevent access to nested repositories on
149 149 # the filesystem *now*.
150 150 ctx = self[None]
151 151 parts = util.splitpath(subpath)
152 152 while parts:
153 153 prefix = os.sep.join(parts)
154 154 if prefix in ctx.substate:
155 155 if prefix == subpath:
156 156 return True
157 157 else:
158 158 sub = ctx.sub(prefix)
159 159 return sub.checknested(subpath[len(prefix) + 1:])
160 160 else:
161 161 parts.pop()
162 162 return False
163 163
164 164
165 165 @propertycache
166 166 def changelog(self):
167 167 c = changelog.changelog(self.sopener)
168 168 if 'HG_PENDING' in os.environ:
169 169 p = os.environ['HG_PENDING']
170 170 if p.startswith(self.root):
171 171 c.readpending('00changelog.i.a')
172 172 self.sopener.options['defversion'] = c.version
173 173 return c
174 174
175 175 @propertycache
176 176 def manifest(self):
177 177 return manifest.manifest(self.sopener)
178 178
179 179 @propertycache
180 180 def dirstate(self):
181 return dirstate.dirstate(self.opener, self.ui, self.root)
181 warned = [0]
182 def validate(node):
183 try:
184 r = self.changelog.rev(node)
185 return node
186 except error.LookupError:
187 if not warned[0]:
188 warned[0] = True
189 self.ui.warn(_("warning: ignoring unknown"
190 " working parent %s!\n" % short(node)))
191 return nullid
192
193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
182 194
183 195 def __getitem__(self, changeid):
184 196 if changeid is None:
185 197 return context.workingctx(self)
186 198 return context.changectx(self, changeid)
187 199
188 200 def __contains__(self, changeid):
189 201 try:
190 202 return bool(self.lookup(changeid))
191 203 except error.RepoLookupError:
192 204 return False
193 205
194 206 def __nonzero__(self):
195 207 return True
196 208
197 209 def __len__(self):
198 210 return len(self.changelog)
199 211
200 212 def __iter__(self):
201 213 for i in xrange(len(self)):
202 214 yield i
203 215
204 216 def url(self):
205 217 return 'file:' + self.root
206 218
207 219 def hook(self, name, throw=False, **args):
208 220 return hook.hook(self.ui, self, name, throw, **args)
209 221
210 222 tag_disallowed = ':\r\n'
211 223
212 224 def _tag(self, names, node, message, local, user, date, extra={}):
213 225 if isinstance(names, str):
214 226 allchars = names
215 227 names = (names,)
216 228 else:
217 229 allchars = ''.join(names)
218 230 for c in self.tag_disallowed:
219 231 if c in allchars:
220 232 raise util.Abort(_('%r cannot be used in a tag name') % c)
221 233
222 234 branches = self.branchmap()
223 235 for name in names:
224 236 self.hook('pretag', throw=True, node=hex(node), tag=name,
225 237 local=local)
226 238 if name in branches:
227 239 self.ui.warn(_("warning: tag %s conflicts with existing"
228 240 " branch name\n") % name)
229 241
230 242 def writetags(fp, names, munge, prevtags):
231 243 fp.seek(0, 2)
232 244 if prevtags and prevtags[-1] != '\n':
233 245 fp.write('\n')
234 246 for name in names:
235 247 m = munge and munge(name) or name
236 248 if self._tagtypes and name in self._tagtypes:
237 249 old = self._tags.get(name, nullid)
238 250 fp.write('%s %s\n' % (hex(old), m))
239 251 fp.write('%s %s\n' % (hex(node), m))
240 252 fp.close()
241 253
242 254 prevtags = ''
243 255 if local:
244 256 try:
245 257 fp = self.opener('localtags', 'r+')
246 258 except IOError:
247 259 fp = self.opener('localtags', 'a')
248 260 else:
249 261 prevtags = fp.read()
250 262
251 263 # local tags are stored in the current charset
252 264 writetags(fp, names, None, prevtags)
253 265 for name in names:
254 266 self.hook('tag', node=hex(node), tag=name, local=local)
255 267 return
256 268
257 269 try:
258 270 fp = self.wfile('.hgtags', 'rb+')
259 271 except IOError:
260 272 fp = self.wfile('.hgtags', 'ab')
261 273 else:
262 274 prevtags = fp.read()
263 275
264 276 # committed tags are stored in UTF-8
265 277 writetags(fp, names, encoding.fromlocal, prevtags)
266 278
267 279 if '.hgtags' not in self.dirstate:
268 280 self[None].add(['.hgtags'])
269 281
270 282 m = matchmod.exact(self.root, '', ['.hgtags'])
271 283 tagnode = self.commit(message, user, date, extra=extra, match=m)
272 284
273 285 for name in names:
274 286 self.hook('tag', node=hex(node), tag=name, local=local)
275 287
276 288 return tagnode
277 289
278 290 def tag(self, names, node, message, local, user, date):
279 291 '''tag a revision with one or more symbolic names.
280 292
281 293 names is a list of strings or, when adding a single tag, names may be a
282 294 string.
283 295
284 296 if local is True, the tags are stored in a per-repository file.
285 297 otherwise, they are stored in the .hgtags file, and a new
286 298 changeset is committed with the change.
287 299
288 300 keyword arguments:
289 301
290 302 local: whether to store tags in non-version-controlled file
291 303 (default False)
292 304
293 305 message: commit message to use if committing
294 306
295 307 user: name of user to use if committing
296 308
297 309 date: date tuple to use if committing'''
298 310
299 311 for x in self.status()[:5]:
300 312 if '.hgtags' in x:
301 313 raise util.Abort(_('working copy of .hgtags is changed '
302 314 '(please commit .hgtags manually)'))
303 315
304 316 self.tags() # instantiate the cache
305 317 self._tag(names, node, message, local, user, date)
306 318
307 319 def tags(self):
308 320 '''return a mapping of tag to node'''
309 321 if self._tags is None:
310 322 (self._tags, self._tagtypes) = self._findtags()
311 323
312 324 return self._tags
313 325
314 326 def _findtags(self):
315 327 '''Do the hard work of finding tags. Return a pair of dicts
316 328 (tags, tagtypes) where tags maps tag name to node, and tagtypes
317 329 maps tag name to a string like \'global\' or \'local\'.
318 330 Subclasses or extensions are free to add their own tags, but
319 331 should be aware that the returned dicts will be retained for the
320 332 duration of the localrepo object.'''
321 333
322 334 # XXX what tagtype should subclasses/extensions use? Currently
323 335 # mq and bookmarks add tags, but do not set the tagtype at all.
324 336 # Should each extension invent its own tag type? Should there
325 337 # be one tagtype for all such "virtual" tags? Or is the status
326 338 # quo fine?
327 339
328 340 alltags = {} # map tag name to (node, hist)
329 341 tagtypes = {}
330 342
331 343 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
332 344 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
333 345
334 346 # Build the return dicts. Have to re-encode tag names because
335 347 # the tags module always uses UTF-8 (in order not to lose info
336 348 # writing to the cache), but the rest of Mercurial wants them in
337 349 # local encoding.
338 350 tags = {}
339 351 for (name, (node, hist)) in alltags.iteritems():
340 352 if node != nullid:
341 353 tags[encoding.tolocal(name)] = node
342 354 tags['tip'] = self.changelog.tip()
343 355 tagtypes = dict([(encoding.tolocal(name), value)
344 356 for (name, value) in tagtypes.iteritems()])
345 357 return (tags, tagtypes)
346 358
347 359 def tagtype(self, tagname):
348 360 '''
349 361 return the type of the given tag. result can be:
350 362
351 363 'local' : a local tag
352 364 'global' : a global tag
353 365 None : tag does not exist
354 366 '''
355 367
356 368 self.tags()
357 369
358 370 return self._tagtypes.get(tagname)
359 371
360 372 def tagslist(self):
361 373 '''return a list of tags ordered by revision'''
362 374 l = []
363 375 for t, n in self.tags().iteritems():
364 376 try:
365 377 r = self.changelog.rev(n)
366 378 except:
367 379 r = -2 # sort to the beginning of the list if unknown
368 380 l.append((r, t, n))
369 381 return [(t, n) for r, t, n in sorted(l)]
370 382
371 383 def nodetags(self, node):
372 384 '''return the tags associated with a node'''
373 385 if not self.nodetagscache:
374 386 self.nodetagscache = {}
375 387 for t, n in self.tags().iteritems():
376 388 self.nodetagscache.setdefault(n, []).append(t)
377 389 for tags in self.nodetagscache.itervalues():
378 390 tags.sort()
379 391 return self.nodetagscache.get(node, [])
380 392
381 393 def _branchtags(self, partial, lrev):
382 394 # TODO: rename this function?
383 395 tiprev = len(self) - 1
384 396 if lrev != tiprev:
385 397 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
386 398 self._updatebranchcache(partial, ctxgen)
387 399 self._writebranchcache(partial, self.changelog.tip(), tiprev)
388 400
389 401 return partial
390 402
391 403 def updatebranchcache(self):
392 404 tip = self.changelog.tip()
393 405 if self._branchcache is not None and self._branchcachetip == tip:
394 406 return self._branchcache
395 407
396 408 oldtip = self._branchcachetip
397 409 self._branchcachetip = tip
398 410 if oldtip is None or oldtip not in self.changelog.nodemap:
399 411 partial, last, lrev = self._readbranchcache()
400 412 else:
401 413 lrev = self.changelog.rev(oldtip)
402 414 partial = self._branchcache
403 415
404 416 self._branchtags(partial, lrev)
405 417 # this private cache holds all heads (not just tips)
406 418 self._branchcache = partial
407 419
408 420 def branchmap(self):
409 421 '''returns a dictionary {branch: [branchheads]}'''
410 422 self.updatebranchcache()
411 423 return self._branchcache
412 424
413 425 def branchtags(self):
414 426 '''return a dict where branch names map to the tipmost head of
415 427 the branch, open heads come before closed'''
416 428 bt = {}
417 429 for bn, heads in self.branchmap().iteritems():
418 430 tip = heads[-1]
419 431 for h in reversed(heads):
420 432 if 'close' not in self.changelog.read(h)[5]:
421 433 tip = h
422 434 break
423 435 bt[bn] = tip
424 436 return bt
425 437
426 438
427 439 def _readbranchcache(self):
428 440 partial = {}
429 441 try:
430 442 f = self.opener("branchheads.cache")
431 443 lines = f.read().split('\n')
432 444 f.close()
433 445 except (IOError, OSError):
434 446 return {}, nullid, nullrev
435 447
436 448 try:
437 449 last, lrev = lines.pop(0).split(" ", 1)
438 450 last, lrev = bin(last), int(lrev)
439 451 if lrev >= len(self) or self[lrev].node() != last:
440 452 # invalidate the cache
441 453 raise ValueError('invalidating branch cache (tip differs)')
442 454 for l in lines:
443 455 if not l:
444 456 continue
445 457 node, label = l.split(" ", 1)
446 458 partial.setdefault(label.strip(), []).append(bin(node))
447 459 except KeyboardInterrupt:
448 460 raise
449 461 except Exception, inst:
450 462 if self.ui.debugflag:
451 463 self.ui.warn(str(inst), '\n')
452 464 partial, last, lrev = {}, nullid, nullrev
453 465 return partial, last, lrev
454 466
455 467 def _writebranchcache(self, branches, tip, tiprev):
456 468 try:
457 469 f = self.opener("branchheads.cache", "w", atomictemp=True)
458 470 f.write("%s %s\n" % (hex(tip), tiprev))
459 471 for label, nodes in branches.iteritems():
460 472 for node in nodes:
461 473 f.write("%s %s\n" % (hex(node), label))
462 474 f.rename()
463 475 except (IOError, OSError):
464 476 pass
465 477
466 478 def _updatebranchcache(self, partial, ctxgen):
467 479 # collect new branch entries
468 480 newbranches = {}
469 481 for c in ctxgen:
470 482 newbranches.setdefault(c.branch(), []).append(c.node())
471 483 # if older branchheads are reachable from new ones, they aren't
472 484 # really branchheads. Note checking parents is insufficient:
473 485 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
474 486 for branch, newnodes in newbranches.iteritems():
475 487 bheads = partial.setdefault(branch, [])
476 488 bheads.extend(newnodes)
477 489 if len(bheads) <= 1:
478 490 continue
479 491 # starting from tip means fewer passes over reachable
480 492 while newnodes:
481 493 latest = newnodes.pop()
482 494 if latest not in bheads:
483 495 continue
484 496 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
485 497 reachable = self.changelog.reachable(latest, minbhrev)
486 498 reachable.remove(latest)
487 499 bheads = [b for b in bheads if b not in reachable]
488 500 partial[branch] = bheads
489 501
490 502 def lookup(self, key):
491 503 if isinstance(key, int):
492 504 return self.changelog.node(key)
493 505 elif key == '.':
494 506 return self.dirstate.parents()[0]
495 507 elif key == 'null':
496 508 return nullid
497 509 elif key == 'tip':
498 510 return self.changelog.tip()
499 511 n = self.changelog._match(key)
500 512 if n:
501 513 return n
502 514 if key in self.tags():
503 515 return self.tags()[key]
504 516 if key in self.branchtags():
505 517 return self.branchtags()[key]
506 518 n = self.changelog._partialmatch(key)
507 519 if n:
508 520 return n
509 521
510 522 # can't find key, check if it might have come from damaged dirstate
511 523 if key in self.dirstate.parents():
512 524 raise error.Abort(_("working directory has unknown parent '%s'!")
513 525 % short(key))
514 526 try:
515 527 if len(key) == 20:
516 528 key = hex(key)
517 529 except:
518 530 pass
519 531 raise error.RepoLookupError(_("unknown revision '%s'") % key)
520 532
521 533 def lookupbranch(self, key, remote=None):
522 534 repo = remote or self
523 535 if key in repo.branchmap():
524 536 return key
525 537
526 538 repo = (remote and remote.local()) and remote or self
527 539 return repo[key].branch()
528 540
529 541 def local(self):
530 542 return True
531 543
532 544 def join(self, f):
533 545 return os.path.join(self.path, f)
534 546
535 547 def wjoin(self, f):
536 548 return os.path.join(self.root, f)
537 549
538 550 def file(self, f):
539 551 if f[0] == '/':
540 552 f = f[1:]
541 553 return filelog.filelog(self.sopener, f)
542 554
543 555 def changectx(self, changeid):
544 556 return self[changeid]
545 557
546 558 def parents(self, changeid=None):
547 559 '''get list of changectxs for parents of changeid'''
548 560 return self[changeid].parents()
549 561
550 562 def filectx(self, path, changeid=None, fileid=None):
551 563 """changeid can be a changeset revision, node, or tag.
552 564 fileid can be a file revision or node."""
553 565 return context.filectx(self, path, changeid, fileid)
554 566
555 567 def getcwd(self):
556 568 return self.dirstate.getcwd()
557 569
558 570 def pathto(self, f, cwd=None):
559 571 return self.dirstate.pathto(f, cwd)
560 572
561 573 def wfile(self, f, mode='r'):
562 574 return self.wopener(f, mode)
563 575
564 576 def _link(self, f):
565 577 return os.path.islink(self.wjoin(f))
566 578
567 579 def _loadfilter(self, filter):
568 580 if filter not in self.filterpats:
569 581 l = []
570 582 for pat, cmd in self.ui.configitems(filter):
571 583 if cmd == '!':
572 584 continue
573 585 mf = matchmod.match(self.root, '', [pat])
574 586 fn = None
575 587 params = cmd
576 588 for name, filterfn in self._datafilters.iteritems():
577 589 if cmd.startswith(name):
578 590 fn = filterfn
579 591 params = cmd[len(name):].lstrip()
580 592 break
581 593 if not fn:
582 594 fn = lambda s, c, **kwargs: util.filter(s, c)
583 595 # Wrap old filters not supporting keyword arguments
584 596 if not inspect.getargspec(fn)[2]:
585 597 oldfn = fn
586 598 fn = lambda s, c, **kwargs: oldfn(s, c)
587 599 l.append((mf, fn, params))
588 600 self.filterpats[filter] = l
589 601 return self.filterpats[filter]
590 602
591 603 def _filter(self, filterpats, filename, data):
592 604 for mf, fn, cmd in filterpats:
593 605 if mf(filename):
594 606 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
595 607 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
596 608 break
597 609
598 610 return data
599 611
600 612 @propertycache
601 613 def _encodefilterpats(self):
602 614 return self._loadfilter('encode')
603 615
604 616 @propertycache
605 617 def _decodefilterpats(self):
606 618 return self._loadfilter('decode')
607 619
608 620 def adddatafilter(self, name, filter):
609 621 self._datafilters[name] = filter
610 622
611 623 def wread(self, filename):
612 624 if self._link(filename):
613 625 data = os.readlink(self.wjoin(filename))
614 626 else:
615 627 data = self.wopener(filename, 'r').read()
616 628 return self._filter(self._encodefilterpats, filename, data)
617 629
618 630 def wwrite(self, filename, data, flags):
619 631 data = self._filter(self._decodefilterpats, filename, data)
620 632 try:
621 633 os.unlink(self.wjoin(filename))
622 634 except OSError:
623 635 pass
624 636 if 'l' in flags:
625 637 self.wopener.symlink(data, filename)
626 638 else:
627 639 self.wopener(filename, 'w').write(data)
628 640 if 'x' in flags:
629 641 util.set_flags(self.wjoin(filename), False, True)
630 642
631 643 def wwritedata(self, filename, data):
632 644 return self._filter(self._decodefilterpats, filename, data)
633 645
634 646 def transaction(self, desc):
635 647 tr = self._transref and self._transref() or None
636 648 if tr and tr.running():
637 649 return tr.nest()
638 650
639 651 # abort here if the journal already exists
640 652 if os.path.exists(self.sjoin("journal")):
641 653 raise error.RepoError(
642 654 _("abandoned transaction found - run hg recover"))
643 655
644 656 # save dirstate for rollback
645 657 try:
646 658 ds = self.opener("dirstate").read()
647 659 except IOError:
648 660 ds = ""
649 661 self.opener("journal.dirstate", "w").write(ds)
650 662 self.opener("journal.branch", "w").write(self.dirstate.branch())
651 663 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
652 664
653 665 renames = [(self.sjoin("journal"), self.sjoin("undo")),
654 666 (self.join("journal.dirstate"), self.join("undo.dirstate")),
655 667 (self.join("journal.branch"), self.join("undo.branch")),
656 668 (self.join("journal.desc"), self.join("undo.desc"))]
657 669 tr = transaction.transaction(self.ui.warn, self.sopener,
658 670 self.sjoin("journal"),
659 671 aftertrans(renames),
660 672 self.store.createmode)
661 673 self._transref = weakref.ref(tr)
662 674 return tr
663 675
664 676 def recover(self):
665 677 lock = self.lock()
666 678 try:
667 679 if os.path.exists(self.sjoin("journal")):
668 680 self.ui.status(_("rolling back interrupted transaction\n"))
669 681 transaction.rollback(self.sopener, self.sjoin("journal"),
670 682 self.ui.warn)
671 683 self.invalidate()
672 684 return True
673 685 else:
674 686 self.ui.warn(_("no interrupted transaction available\n"))
675 687 return False
676 688 finally:
677 689 lock.release()
678 690
679 691 def rollback(self, dryrun=False):
680 692 wlock = lock = None
681 693 try:
682 694 wlock = self.wlock()
683 695 lock = self.lock()
684 696 if os.path.exists(self.sjoin("undo")):
685 697 try:
686 698 args = self.opener("undo.desc", "r").read().splitlines()
687 699 if len(args) >= 3 and self.ui.verbose:
688 700 desc = _("rolling back to revision %s"
689 701 " (undo %s: %s)\n") % (
690 702 int(args[0]) - 1, args[1], args[2])
691 703 elif len(args) >= 2:
692 704 desc = _("rolling back to revision %s (undo %s)\n") % (
693 705 int(args[0]) - 1, args[1])
694 706 except IOError:
695 707 desc = _("rolling back unknown transaction\n")
696 708 self.ui.status(desc)
697 709 if dryrun:
698 710 return
699 711 transaction.rollback(self.sopener, self.sjoin("undo"),
700 712 self.ui.warn)
701 713 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
702 714 try:
703 715 branch = self.opener("undo.branch").read()
704 716 self.dirstate.setbranch(branch)
705 717 except IOError:
706 718 self.ui.warn(_("Named branch could not be reset, "
707 719 "current branch still is: %s\n")
708 720 % encoding.tolocal(self.dirstate.branch()))
709 721 self.invalidate()
710 722 self.dirstate.invalidate()
711 723 self.destroyed()
712 724 else:
713 725 self.ui.warn(_("no rollback information available\n"))
714 726 return 1
715 727 finally:
716 728 release(lock, wlock)
717 729
718 730 def invalidatecaches(self):
719 731 self._tags = None
720 732 self._tagtypes = None
721 733 self.nodetagscache = None
722 734 self._branchcache = None # in UTF-8
723 735 self._branchcachetip = None
724 736
725 737 def invalidate(self):
726 738 for a in "changelog manifest".split():
727 739 if a in self.__dict__:
728 740 delattr(self, a)
729 741 self.invalidatecaches()
730 742
731 743 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
732 744 try:
733 745 l = lock.lock(lockname, 0, releasefn, desc=desc)
734 746 except error.LockHeld, inst:
735 747 if not wait:
736 748 raise
737 749 self.ui.warn(_("waiting for lock on %s held by %r\n") %
738 750 (desc, inst.locker))
739 751 # default to 600 seconds timeout
740 752 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
741 753 releasefn, desc=desc)
742 754 if acquirefn:
743 755 acquirefn()
744 756 return l
745 757
746 758 def lock(self, wait=True):
747 759 '''Lock the repository store (.hg/store) and return a weak reference
748 760 to the lock. Use this before modifying the store (e.g. committing or
749 761 stripping). If you are opening a transaction, get a lock as well.)'''
750 762 l = self._lockref and self._lockref()
751 763 if l is not None and l.held:
752 764 l.lock()
753 765 return l
754 766
755 767 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
756 768 _('repository %s') % self.origroot)
757 769 self._lockref = weakref.ref(l)
758 770 return l
759 771
760 772 def wlock(self, wait=True):
761 773 '''Lock the non-store parts of the repository (everything under
762 774 .hg except .hg/store) and return a weak reference to the lock.
763 775 Use this before modifying files in .hg.'''
764 776 l = self._wlockref and self._wlockref()
765 777 if l is not None and l.held:
766 778 l.lock()
767 779 return l
768 780
769 781 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
770 782 self.dirstate.invalidate, _('working directory of %s') %
771 783 self.origroot)
772 784 self._wlockref = weakref.ref(l)
773 785 return l
774 786
775 787 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
776 788 """
777 789 commit an individual file as part of a larger transaction
778 790 """
779 791
780 792 fname = fctx.path()
781 793 text = fctx.data()
782 794 flog = self.file(fname)
783 795 fparent1 = manifest1.get(fname, nullid)
784 796 fparent2 = fparent2o = manifest2.get(fname, nullid)
785 797
786 798 meta = {}
787 799 copy = fctx.renamed()
788 800 if copy and copy[0] != fname:
789 801 # Mark the new revision of this file as a copy of another
790 802 # file. This copy data will effectively act as a parent
791 803 # of this new revision. If this is a merge, the first
792 804 # parent will be the nullid (meaning "look up the copy data")
793 805 # and the second one will be the other parent. For example:
794 806 #
795 807 # 0 --- 1 --- 3 rev1 changes file foo
796 808 # \ / rev2 renames foo to bar and changes it
797 809 # \- 2 -/ rev3 should have bar with all changes and
798 810 # should record that bar descends from
799 811 # bar in rev2 and foo in rev1
800 812 #
801 813 # this allows this merge to succeed:
802 814 #
803 815 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
804 816 # \ / merging rev3 and rev4 should use bar@rev2
805 817 # \- 2 --- 4 as the merge base
806 818 #
807 819
808 820 cfname = copy[0]
809 821 crev = manifest1.get(cfname)
810 822 newfparent = fparent2
811 823
812 824 if manifest2: # branch merge
813 825 if fparent2 == nullid or crev is None: # copied on remote side
814 826 if cfname in manifest2:
815 827 crev = manifest2[cfname]
816 828 newfparent = fparent1
817 829
818 830 # find source in nearest ancestor if we've lost track
819 831 if not crev:
820 832 self.ui.debug(" %s: searching for copy revision for %s\n" %
821 833 (fname, cfname))
822 834 for ancestor in self[None].ancestors():
823 835 if cfname in ancestor:
824 836 crev = ancestor[cfname].filenode()
825 837 break
826 838
827 839 if crev:
828 840 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
829 841 meta["copy"] = cfname
830 842 meta["copyrev"] = hex(crev)
831 843 fparent1, fparent2 = nullid, newfparent
832 844 else:
833 845 self.ui.warn(_("warning: can't find ancestor for '%s' "
834 846 "copied from '%s'!\n") % (fname, cfname))
835 847
836 848 elif fparent2 != nullid:
837 849 # is one parent an ancestor of the other?
838 850 fparentancestor = flog.ancestor(fparent1, fparent2)
839 851 if fparentancestor == fparent1:
840 852 fparent1, fparent2 = fparent2, nullid
841 853 elif fparentancestor == fparent2:
842 854 fparent2 = nullid
843 855
844 856 # is the file changed?
845 857 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
846 858 changelist.append(fname)
847 859 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
848 860
849 861 # are just the flags changed during merge?
850 862 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
851 863 changelist.append(fname)
852 864
853 865 return fparent1
854 866
855 867 def commit(self, text="", user=None, date=None, match=None, force=False,
856 868 editor=False, extra={}):
857 869 """Add a new revision to current repository.
858 870
859 871 Revision information is gathered from the working directory,
860 872 match can be used to filter the committed files. If editor is
861 873 supplied, it is called to get a commit message.
862 874 """
863 875
864 876 def fail(f, msg):
865 877 raise util.Abort('%s: %s' % (f, msg))
866 878
867 879 if not match:
868 880 match = matchmod.always(self.root, '')
869 881
870 882 if not force:
871 883 vdirs = []
872 884 match.dir = vdirs.append
873 885 match.bad = fail
874 886
875 887 wlock = self.wlock()
876 888 try:
877 889 wctx = self[None]
878 890 merge = len(wctx.parents()) > 1
879 891
880 892 if (not force and merge and match and
881 893 (match.files() or match.anypats())):
882 894 raise util.Abort(_('cannot partially commit a merge '
883 895 '(do not specify files or patterns)'))
884 896
885 897 changes = self.status(match=match, clean=force)
886 898 if force:
887 899 changes[0].extend(changes[6]) # mq may commit unchanged files
888 900
889 901 # check subrepos
890 902 subs = []
891 903 removedsubs = set()
892 904 for p in wctx.parents():
893 905 removedsubs.update(s for s in p.substate if match(s))
894 906 for s in wctx.substate:
895 907 removedsubs.discard(s)
896 908 if match(s) and wctx.sub(s).dirty():
897 909 subs.append(s)
898 910 if (subs or removedsubs):
899 911 if (not match('.hgsub') and
900 912 '.hgsub' in (wctx.modified() + wctx.added())):
901 913 raise util.Abort(_("can't commit subrepos without .hgsub"))
902 914 if '.hgsubstate' not in changes[0]:
903 915 changes[0].insert(0, '.hgsubstate')
904 916
905 917 # make sure all explicit patterns are matched
906 918 if not force and match.files():
907 919 matched = set(changes[0] + changes[1] + changes[2])
908 920
909 921 for f in match.files():
910 922 if f == '.' or f in matched or f in wctx.substate:
911 923 continue
912 924 if f in changes[3]: # missing
913 925 fail(f, _('file not found!'))
914 926 if f in vdirs: # visited directory
915 927 d = f + '/'
916 928 for mf in matched:
917 929 if mf.startswith(d):
918 930 break
919 931 else:
920 932 fail(f, _("no match under directory!"))
921 933 elif f not in self.dirstate:
922 934 fail(f, _("file not tracked!"))
923 935
924 936 if (not force and not extra.get("close") and not merge
925 937 and not (changes[0] or changes[1] or changes[2])
926 938 and wctx.branch() == wctx.p1().branch()):
927 939 return None
928 940
929 941 ms = mergemod.mergestate(self)
930 942 for f in changes[0]:
931 943 if f in ms and ms[f] == 'u':
932 944 raise util.Abort(_("unresolved merge conflicts "
933 945 "(see hg resolve)"))
934 946
935 947 cctx = context.workingctx(self, text, user, date, extra, changes)
936 948 if editor:
937 949 cctx._text = editor(self, cctx, subs)
938 950 edited = (text != cctx._text)
939 951
940 952 # commit subs
941 953 if subs or removedsubs:
942 954 state = wctx.substate.copy()
943 955 for s in sorted(subs):
944 956 sub = wctx.sub(s)
945 957 self.ui.status(_('committing subrepository %s\n') %
946 958 subrepo.subrelpath(sub))
947 959 sr = sub.commit(cctx._text, user, date)
948 960 state[s] = (state[s][0], sr)
949 961 subrepo.writestate(self, state)
950 962
951 963 # Save commit message in case this transaction gets rolled back
952 964 # (e.g. by a pretxncommit hook). Leave the content alone on
953 965 # the assumption that the user will use the same editor again.
954 966 msgfile = self.opener('last-message.txt', 'wb')
955 967 msgfile.write(cctx._text)
956 968 msgfile.close()
957 969
958 970 p1, p2 = self.dirstate.parents()
959 971 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
960 972 try:
961 973 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
962 974 ret = self.commitctx(cctx, True)
963 975 except:
964 976 if edited:
965 977 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
966 978 self.ui.write(
967 979 _('note: commit message saved in %s\n') % msgfn)
968 980 raise
969 981
970 982 # update dirstate and mergestate
971 983 for f in changes[0] + changes[1]:
972 984 self.dirstate.normal(f)
973 985 for f in changes[2]:
974 986 self.dirstate.forget(f)
975 987 self.dirstate.setparents(ret)
976 988 ms.reset()
977 989 finally:
978 990 wlock.release()
979 991
980 992 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
981 993 return ret
982 994
983 995 def commitctx(self, ctx, error=False):
984 996 """Add a new revision to current repository.
985 997 Revision information is passed via the context argument.
986 998 """
987 999
988 1000 tr = lock = None
989 1001 removed = list(ctx.removed())
990 1002 p1, p2 = ctx.p1(), ctx.p2()
991 1003 m1 = p1.manifest().copy()
992 1004 m2 = p2.manifest()
993 1005 user = ctx.user()
994 1006
995 1007 lock = self.lock()
996 1008 try:
997 1009 tr = self.transaction("commit")
998 1010 trp = weakref.proxy(tr)
999 1011
1000 1012 # check in files
1001 1013 new = {}
1002 1014 changed = []
1003 1015 linkrev = len(self)
1004 1016 for f in sorted(ctx.modified() + ctx.added()):
1005 1017 self.ui.note(f + "\n")
1006 1018 try:
1007 1019 fctx = ctx[f]
1008 1020 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1009 1021 changed)
1010 1022 m1.set(f, fctx.flags())
1011 1023 except OSError, inst:
1012 1024 self.ui.warn(_("trouble committing %s!\n") % f)
1013 1025 raise
1014 1026 except IOError, inst:
1015 1027 errcode = getattr(inst, 'errno', errno.ENOENT)
1016 1028 if error or errcode and errcode != errno.ENOENT:
1017 1029 self.ui.warn(_("trouble committing %s!\n") % f)
1018 1030 raise
1019 1031 else:
1020 1032 removed.append(f)
1021 1033
1022 1034 # update manifest
1023 1035 m1.update(new)
1024 1036 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1025 1037 drop = [f for f in removed if f in m1]
1026 1038 for f in drop:
1027 1039 del m1[f]
1028 1040 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1029 1041 p2.manifestnode(), (new, drop))
1030 1042
1031 1043 # update changelog
1032 1044 self.changelog.delayupdate()
1033 1045 n = self.changelog.add(mn, changed + removed, ctx.description(),
1034 1046 trp, p1.node(), p2.node(),
1035 1047 user, ctx.date(), ctx.extra().copy())
1036 1048 p = lambda: self.changelog.writepending() and self.root or ""
1037 1049 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1038 1050 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1039 1051 parent2=xp2, pending=p)
1040 1052 self.changelog.finalize(trp)
1041 1053 tr.close()
1042 1054
1043 1055 if self._branchcache:
1044 1056 self.updatebranchcache()
1045 1057 return n
1046 1058 finally:
1047 1059 if tr:
1048 1060 tr.release()
1049 1061 lock.release()
1050 1062
1051 1063 def destroyed(self):
1052 1064 '''Inform the repository that nodes have been destroyed.
1053 1065 Intended for use by strip and rollback, so there's a common
1054 1066 place for anything that has to be done after destroying history.'''
1055 1067 # XXX it might be nice if we could take the list of destroyed
1056 1068 # nodes, but I don't see an easy way for rollback() to do that
1057 1069
1058 1070 # Ensure the persistent tag cache is updated. Doing it now
1059 1071 # means that the tag cache only has to worry about destroyed
1060 1072 # heads immediately after a strip/rollback. That in turn
1061 1073 # guarantees that "cachetip == currenttip" (comparing both rev
1062 1074 # and node) always means no nodes have been added or destroyed.
1063 1075
1064 1076 # XXX this is suboptimal when qrefresh'ing: we strip the current
1065 1077 # head, refresh the tag cache, then immediately add a new head.
1066 1078 # But I think doing it this way is necessary for the "instant
1067 1079 # tag cache retrieval" case to work.
1068 1080 self.invalidatecaches()
1069 1081
1070 1082 def walk(self, match, node=None):
1071 1083 '''
1072 1084 walk recursively through the directory tree or a given
1073 1085 changeset, finding all files matched by the match
1074 1086 function
1075 1087 '''
1076 1088 return self[node].walk(match)
1077 1089
1078 1090 def status(self, node1='.', node2=None, match=None,
1079 1091 ignored=False, clean=False, unknown=False,
1080 1092 listsubrepos=False):
1081 1093 """return status of files between two nodes or node and working directory
1082 1094
1083 1095 If node1 is None, use the first dirstate parent instead.
1084 1096 If node2 is None, compare node1 with working directory.
1085 1097 """
1086 1098
1087 1099 def mfmatches(ctx):
1088 1100 mf = ctx.manifest().copy()
1089 1101 for fn in mf.keys():
1090 1102 if not match(fn):
1091 1103 del mf[fn]
1092 1104 return mf
1093 1105
1094 1106 if isinstance(node1, context.changectx):
1095 1107 ctx1 = node1
1096 1108 else:
1097 1109 ctx1 = self[node1]
1098 1110 if isinstance(node2, context.changectx):
1099 1111 ctx2 = node2
1100 1112 else:
1101 1113 ctx2 = self[node2]
1102 1114
1103 1115 working = ctx2.rev() is None
1104 1116 parentworking = working and ctx1 == self['.']
1105 1117 match = match or matchmod.always(self.root, self.getcwd())
1106 1118 listignored, listclean, listunknown = ignored, clean, unknown
1107 1119
1108 1120 # load earliest manifest first for caching reasons
1109 1121 if not working and ctx2.rev() < ctx1.rev():
1110 1122 ctx2.manifest()
1111 1123
1112 1124 if not parentworking:
1113 1125 def bad(f, msg):
1114 1126 if f not in ctx1:
1115 1127 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1116 1128 match.bad = bad
1117 1129
1118 1130 if working: # we need to scan the working dir
1119 1131 subrepos = []
1120 1132 if '.hgsub' in self.dirstate:
1121 1133 subrepos = ctx1.substate.keys()
1122 1134 s = self.dirstate.status(match, subrepos, listignored,
1123 1135 listclean, listunknown)
1124 1136 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1125 1137
1126 1138 # check for any possibly clean files
1127 1139 if parentworking and cmp:
1128 1140 fixup = []
1129 1141 # do a full compare of any files that might have changed
1130 1142 for f in sorted(cmp):
1131 1143 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1132 1144 or ctx1[f].cmp(ctx2[f])):
1133 1145 modified.append(f)
1134 1146 else:
1135 1147 fixup.append(f)
1136 1148
1137 1149 # update dirstate for files that are actually clean
1138 1150 if fixup:
1139 1151 if listclean:
1140 1152 clean += fixup
1141 1153
1142 1154 try:
1143 1155 # updating the dirstate is optional
1144 1156 # so we don't wait on the lock
1145 1157 wlock = self.wlock(False)
1146 1158 try:
1147 1159 for f in fixup:
1148 1160 self.dirstate.normal(f)
1149 1161 finally:
1150 1162 wlock.release()
1151 1163 except error.LockError:
1152 1164 pass
1153 1165
1154 1166 if not parentworking:
1155 1167 mf1 = mfmatches(ctx1)
1156 1168 if working:
1157 1169 # we are comparing working dir against non-parent
1158 1170 # generate a pseudo-manifest for the working dir
1159 1171 mf2 = mfmatches(self['.'])
1160 1172 for f in cmp + modified + added:
1161 1173 mf2[f] = None
1162 1174 mf2.set(f, ctx2.flags(f))
1163 1175 for f in removed:
1164 1176 if f in mf2:
1165 1177 del mf2[f]
1166 1178 else:
1167 1179 # we are comparing two revisions
1168 1180 deleted, unknown, ignored = [], [], []
1169 1181 mf2 = mfmatches(ctx2)
1170 1182
1171 1183 modified, added, clean = [], [], []
1172 1184 for fn in mf2:
1173 1185 if fn in mf1:
1174 1186 if (mf1.flags(fn) != mf2.flags(fn) or
1175 1187 (mf1[fn] != mf2[fn] and
1176 1188 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1177 1189 modified.append(fn)
1178 1190 elif listclean:
1179 1191 clean.append(fn)
1180 1192 del mf1[fn]
1181 1193 else:
1182 1194 added.append(fn)
1183 1195 removed = mf1.keys()
1184 1196
1185 1197 r = modified, added, removed, deleted, unknown, ignored, clean
1186 1198
1187 1199 if listsubrepos:
1188 1200 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1189 1201 if working:
1190 1202 rev2 = None
1191 1203 else:
1192 1204 rev2 = ctx2.substate[subpath][1]
1193 1205 try:
1194 1206 submatch = matchmod.narrowmatcher(subpath, match)
1195 1207 s = sub.status(rev2, match=submatch, ignored=listignored,
1196 1208 clean=listclean, unknown=listunknown,
1197 1209 listsubrepos=True)
1198 1210 for rfiles, sfiles in zip(r, s):
1199 1211 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1200 1212 except error.LookupError:
1201 1213 self.ui.status(_("skipping missing subrepository: %s\n")
1202 1214 % subpath)
1203 1215
1204 1216 [l.sort() for l in r]
1205 1217 return r
1206 1218
1207 1219 def heads(self, start=None):
1208 1220 heads = self.changelog.heads(start)
1209 1221 # sort the output in rev descending order
1210 1222 return sorted(heads, key=self.changelog.rev, reverse=True)
1211 1223
1212 1224 def branchheads(self, branch=None, start=None, closed=False):
1213 1225 '''return a (possibly filtered) list of heads for the given branch
1214 1226
1215 1227 Heads are returned in topological order, from newest to oldest.
1216 1228 If branch is None, use the dirstate branch.
1217 1229 If start is not None, return only heads reachable from start.
1218 1230 If closed is True, return heads that are marked as closed as well.
1219 1231 '''
1220 1232 if branch is None:
1221 1233 branch = self[None].branch()
1222 1234 branches = self.branchmap()
1223 1235 if branch not in branches:
1224 1236 return []
1225 1237 # the cache returns heads ordered lowest to highest
1226 1238 bheads = list(reversed(branches[branch]))
1227 1239 if start is not None:
1228 1240 # filter out the heads that cannot be reached from startrev
1229 1241 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1230 1242 bheads = [h for h in bheads if h in fbheads]
1231 1243 if not closed:
1232 1244 bheads = [h for h in bheads if
1233 1245 ('close' not in self.changelog.read(h)[5])]
1234 1246 return bheads
1235 1247
1236 1248 def branches(self, nodes):
1237 1249 if not nodes:
1238 1250 nodes = [self.changelog.tip()]
1239 1251 b = []
1240 1252 for n in nodes:
1241 1253 t = n
1242 1254 while 1:
1243 1255 p = self.changelog.parents(n)
1244 1256 if p[1] != nullid or p[0] == nullid:
1245 1257 b.append((t, n, p[0], p[1]))
1246 1258 break
1247 1259 n = p[0]
1248 1260 return b
1249 1261
1250 1262 def between(self, pairs):
1251 1263 r = []
1252 1264
1253 1265 for top, bottom in pairs:
1254 1266 n, l, i = top, [], 0
1255 1267 f = 1
1256 1268
1257 1269 while n != bottom and n != nullid:
1258 1270 p = self.changelog.parents(n)[0]
1259 1271 if i == f:
1260 1272 l.append(n)
1261 1273 f = f * 2
1262 1274 n = p
1263 1275 i += 1
1264 1276
1265 1277 r.append(l)
1266 1278
1267 1279 return r
1268 1280
1269 1281 def pull(self, remote, heads=None, force=False):
1270 1282 lock = self.lock()
1271 1283 try:
1272 1284 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1273 1285 force=force)
1274 1286 common, fetch, rheads = tmp
1275 1287 if not fetch:
1276 1288 self.ui.status(_("no changes found\n"))
1277 1289 return 0
1278 1290
1279 1291 if heads is None and fetch == [nullid]:
1280 1292 self.ui.status(_("requesting all changes\n"))
1281 1293 elif heads is None and remote.capable('changegroupsubset'):
1282 1294 # issue1320, avoid a race if remote changed after discovery
1283 1295 heads = rheads
1284 1296
1285 1297 if heads is None:
1286 1298 cg = remote.changegroup(fetch, 'pull')
1287 1299 else:
1288 1300 if not remote.capable('changegroupsubset'):
1289 1301 raise util.Abort(_("partial pull cannot be done because "
1290 1302 "other repository doesn't support "
1291 1303 "changegroupsubset."))
1292 1304 cg = remote.changegroupsubset(fetch, heads, 'pull')
1293 1305 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1294 1306 finally:
1295 1307 lock.release()
1296 1308
1297 1309 def push(self, remote, force=False, revs=None, newbranch=False):
1298 1310 '''Push outgoing changesets (limited by revs) from the current
1299 1311 repository to remote. Return an integer:
1300 1312 - 0 means HTTP error *or* nothing to push
1301 1313 - 1 means we pushed and remote head count is unchanged *or*
1302 1314 we have outgoing changesets but refused to push
1303 1315 - other values as described by addchangegroup()
1304 1316 '''
1305 1317 # there are two ways to push to remote repo:
1306 1318 #
1307 1319 # addchangegroup assumes local user can lock remote
1308 1320 # repo (local filesystem, old ssh servers).
1309 1321 #
1310 1322 # unbundle assumes local user cannot lock remote repo (new ssh
1311 1323 # servers, http servers).
1312 1324
1313 1325 lock = None
1314 1326 unbundle = remote.capable('unbundle')
1315 1327 if not unbundle:
1316 1328 lock = remote.lock()
1317 1329 try:
1318 1330 ret = discovery.prepush(self, remote, force, revs, newbranch)
1319 1331 if ret[0] is None:
1320 1332 # and here we return 0 for "nothing to push" or 1 for
1321 1333 # "something to push but I refuse"
1322 1334 return ret[1]
1323 1335
1324 1336 cg, remote_heads = ret
1325 1337 if unbundle:
1326 1338 # local repo finds heads on server, finds out what revs it must
1327 1339 # push. once revs transferred, if server finds it has
1328 1340 # different heads (someone else won commit/push race), server
1329 1341 # aborts.
1330 1342 if force:
1331 1343 remote_heads = ['force']
1332 1344 # ssh: return remote's addchangegroup()
1333 1345 # http: return remote's addchangegroup() or 0 for error
1334 1346 return remote.unbundle(cg, remote_heads, 'push')
1335 1347 else:
1336 1348 # we return an integer indicating remote head count change
1337 1349 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1338 1350 finally:
1339 1351 if lock is not None:
1340 1352 lock.release()
1341 1353
1342 1354 def changegroupinfo(self, nodes, source):
1343 1355 if self.ui.verbose or source == 'bundle':
1344 1356 self.ui.status(_("%d changesets found\n") % len(nodes))
1345 1357 if self.ui.debugflag:
1346 1358 self.ui.debug("list of changesets:\n")
1347 1359 for node in nodes:
1348 1360 self.ui.debug("%s\n" % hex(node))
1349 1361
1350 1362 def changegroupsubset(self, bases, heads, source, extranodes=None):
1351 1363 """Compute a changegroup consisting of all the nodes that are
1352 1364 descendents of any of the bases and ancestors of any of the heads.
1353 1365 Return a chunkbuffer object whose read() method will return
1354 1366 successive changegroup chunks.
1355 1367
1356 1368 It is fairly complex as determining which filenodes and which
1357 1369 manifest nodes need to be included for the changeset to be complete
1358 1370 is non-trivial.
1359 1371
1360 1372 Another wrinkle is doing the reverse, figuring out which changeset in
1361 1373 the changegroup a particular filenode or manifestnode belongs to.
1362 1374
1363 1375 The caller can specify some nodes that must be included in the
1364 1376 changegroup using the extranodes argument. It should be a dict
1365 1377 where the keys are the filenames (or 1 for the manifest), and the
1366 1378 values are lists of (node, linknode) tuples, where node is a wanted
1367 1379 node and linknode is the changelog node that should be transmitted as
1368 1380 the linkrev.
1369 1381 """
1370 1382
1371 1383 # Set up some initial variables
1372 1384 # Make it easy to refer to self.changelog
1373 1385 cl = self.changelog
1374 1386 # Compute the list of changesets in this changegroup.
1375 1387 # Some bases may turn out to be superfluous, and some heads may be
1376 1388 # too. nodesbetween will return the minimal set of bases and heads
1377 1389 # necessary to re-create the changegroup.
1378 1390 if not bases:
1379 1391 bases = [nullid]
1380 1392 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1381 1393
1382 1394 if extranodes is None:
1383 1395 # can we go through the fast path ?
1384 1396 heads.sort()
1385 1397 allheads = self.heads()
1386 1398 allheads.sort()
1387 1399 if heads == allheads:
1388 1400 return self._changegroup(msng_cl_lst, source)
1389 1401
1390 1402 # slow path
1391 1403 self.hook('preoutgoing', throw=True, source=source)
1392 1404
1393 1405 self.changegroupinfo(msng_cl_lst, source)
1394 1406
1395 1407 # We assume that all ancestors of bases are known
1396 1408 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1397 1409
1398 1410 # Make it easy to refer to self.manifest
1399 1411 mnfst = self.manifest
1400 1412 # We don't know which manifests are missing yet
1401 1413 msng_mnfst_set = {}
1402 1414 # Nor do we know which filenodes are missing.
1403 1415 msng_filenode_set = {}
1404 1416
1405 1417 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1406 1418 junk = None
1407 1419
1408 1420 # A changeset always belongs to itself, so the changenode lookup
1409 1421 # function for a changenode is identity.
1410 1422 def identity(x):
1411 1423 return x
1412 1424
1413 1425 # A function generating function that sets up the initial environment
1414 1426 # the inner function.
1415 1427 def filenode_collector(changedfiles):
1416 1428 # This gathers information from each manifestnode included in the
1417 1429 # changegroup about which filenodes the manifest node references
1418 1430 # so we can include those in the changegroup too.
1419 1431 #
1420 1432 # It also remembers which changenode each filenode belongs to. It
1421 1433 # does this by assuming the a filenode belongs to the changenode
1422 1434 # the first manifest that references it belongs to.
1423 1435 def collect_msng_filenodes(mnfstnode):
1424 1436 r = mnfst.rev(mnfstnode)
1425 1437 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1426 1438 # If the previous rev is one of the parents,
1427 1439 # we only need to see a diff.
1428 1440 deltamf = mnfst.readdelta(mnfstnode)
1429 1441 # For each line in the delta
1430 1442 for f, fnode in deltamf.iteritems():
1431 1443 # And if the file is in the list of files we care
1432 1444 # about.
1433 1445 if f in changedfiles:
1434 1446 # Get the changenode this manifest belongs to
1435 1447 clnode = msng_mnfst_set[mnfstnode]
1436 1448 # Create the set of filenodes for the file if
1437 1449 # there isn't one already.
1438 1450 ndset = msng_filenode_set.setdefault(f, {})
1439 1451 # And set the filenode's changelog node to the
1440 1452 # manifest's if it hasn't been set already.
1441 1453 ndset.setdefault(fnode, clnode)
1442 1454 else:
1443 1455 # Otherwise we need a full manifest.
1444 1456 m = mnfst.read(mnfstnode)
1445 1457 # For every file in we care about.
1446 1458 for f in changedfiles:
1447 1459 fnode = m.get(f, None)
1448 1460 # If it's in the manifest
1449 1461 if fnode is not None:
1450 1462 # See comments above.
1451 1463 clnode = msng_mnfst_set[mnfstnode]
1452 1464 ndset = msng_filenode_set.setdefault(f, {})
1453 1465 ndset.setdefault(fnode, clnode)
1454 1466 return collect_msng_filenodes
1455 1467
1456 1468 # If we determine that a particular file or manifest node must be a
1457 1469 # node that the recipient of the changegroup will already have, we can
1458 1470 # also assume the recipient will have all the parents. This function
1459 1471 # prunes them from the set of missing nodes.
1460 1472 def prune(revlog, missingnodes):
1461 1473 hasset = set()
1462 1474 # If a 'missing' filenode thinks it belongs to a changenode we
1463 1475 # assume the recipient must have, then the recipient must have
1464 1476 # that filenode.
1465 1477 for n in missingnodes:
1466 1478 clrev = revlog.linkrev(revlog.rev(n))
1467 1479 if clrev in commonrevs:
1468 1480 hasset.add(n)
1469 1481 for n in hasset:
1470 1482 missingnodes.pop(n, None)
1471 1483 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1472 1484 missingnodes.pop(revlog.node(r), None)
1473 1485
1474 1486 # Add the nodes that were explicitly requested.
1475 1487 def add_extra_nodes(name, nodes):
1476 1488 if not extranodes or name not in extranodes:
1477 1489 return
1478 1490
1479 1491 for node, linknode in extranodes[name]:
1480 1492 if node not in nodes:
1481 1493 nodes[node] = linknode
1482 1494
1483 1495 # Now that we have all theses utility functions to help out and
1484 1496 # logically divide up the task, generate the group.
1485 1497 def gengroup():
1486 1498 # The set of changed files starts empty.
1487 1499 changedfiles = set()
1488 1500 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1489 1501
1490 1502 # Create a changenode group generator that will call our functions
1491 1503 # back to lookup the owning changenode and collect information.
1492 1504 group = cl.group(msng_cl_lst, identity, collect)
1493 1505 for cnt, chnk in enumerate(group):
1494 1506 yield chnk
1495 1507 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1496 1508 self.ui.progress(_('bundling changes'), None)
1497 1509
1498 1510 prune(mnfst, msng_mnfst_set)
1499 1511 add_extra_nodes(1, msng_mnfst_set)
1500 1512 msng_mnfst_lst = msng_mnfst_set.keys()
1501 1513 # Sort the manifestnodes by revision number.
1502 1514 msng_mnfst_lst.sort(key=mnfst.rev)
1503 1515 # Create a generator for the manifestnodes that calls our lookup
1504 1516 # and data collection functions back.
1505 1517 group = mnfst.group(msng_mnfst_lst,
1506 1518 lambda mnode: msng_mnfst_set[mnode],
1507 1519 filenode_collector(changedfiles))
1508 1520 for cnt, chnk in enumerate(group):
1509 1521 yield chnk
1510 1522 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1511 1523 self.ui.progress(_('bundling manifests'), None)
1512 1524
1513 1525 # These are no longer needed, dereference and toss the memory for
1514 1526 # them.
1515 1527 msng_mnfst_lst = None
1516 1528 msng_mnfst_set.clear()
1517 1529
1518 1530 if extranodes:
1519 1531 for fname in extranodes:
1520 1532 if isinstance(fname, int):
1521 1533 continue
1522 1534 msng_filenode_set.setdefault(fname, {})
1523 1535 changedfiles.add(fname)
1524 1536 # Go through all our files in order sorted by name.
1525 1537 cnt = 0
1526 1538 for fname in sorted(changedfiles):
1527 1539 filerevlog = self.file(fname)
1528 1540 if not len(filerevlog):
1529 1541 raise util.Abort(_("empty or missing revlog for %s") % fname)
1530 1542 # Toss out the filenodes that the recipient isn't really
1531 1543 # missing.
1532 1544 missingfnodes = msng_filenode_set.pop(fname, {})
1533 1545 prune(filerevlog, missingfnodes)
1534 1546 add_extra_nodes(fname, missingfnodes)
1535 1547 # If any filenodes are left, generate the group for them,
1536 1548 # otherwise don't bother.
1537 1549 if missingfnodes:
1538 1550 yield changegroup.chunkheader(len(fname))
1539 1551 yield fname
1540 1552 # Sort the filenodes by their revision # (topological order)
1541 1553 nodeiter = list(missingfnodes)
1542 1554 nodeiter.sort(key=filerevlog.rev)
1543 1555 # Create a group generator and only pass in a changenode
1544 1556 # lookup function as we need to collect no information
1545 1557 # from filenodes.
1546 1558 group = filerevlog.group(nodeiter,
1547 1559 lambda fnode: missingfnodes[fnode])
1548 1560 for chnk in group:
1549 1561 self.ui.progress(
1550 1562 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1551 1563 cnt += 1
1552 1564 yield chnk
1553 1565 # Signal that no more groups are left.
1554 1566 yield changegroup.closechunk()
1555 1567 self.ui.progress(_('bundling files'), None)
1556 1568
1557 1569 if msng_cl_lst:
1558 1570 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1559 1571
1560 1572 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1561 1573
1562 1574 def changegroup(self, basenodes, source):
1563 1575 # to avoid a race we use changegroupsubset() (issue1320)
1564 1576 return self.changegroupsubset(basenodes, self.heads(), source)
1565 1577
1566 1578 def _changegroup(self, nodes, source):
1567 1579 """Compute the changegroup of all nodes that we have that a recipient
1568 1580 doesn't. Return a chunkbuffer object whose read() method will return
1569 1581 successive changegroup chunks.
1570 1582
1571 1583 This is much easier than the previous function as we can assume that
1572 1584 the recipient has any changenode we aren't sending them.
1573 1585
1574 1586 nodes is the set of nodes to send"""
1575 1587
1576 1588 self.hook('preoutgoing', throw=True, source=source)
1577 1589
1578 1590 cl = self.changelog
1579 1591 revset = set([cl.rev(n) for n in nodes])
1580 1592 self.changegroupinfo(nodes, source)
1581 1593
1582 1594 def identity(x):
1583 1595 return x
1584 1596
1585 1597 def gennodelst(log):
1586 1598 for r in log:
1587 1599 if log.linkrev(r) in revset:
1588 1600 yield log.node(r)
1589 1601
1590 1602 def lookuplinkrev_func(revlog):
1591 1603 def lookuplinkrev(n):
1592 1604 return cl.node(revlog.linkrev(revlog.rev(n)))
1593 1605 return lookuplinkrev
1594 1606
1595 1607 def gengroup():
1596 1608 '''yield a sequence of changegroup chunks (strings)'''
1597 1609 # construct a list of all changed files
1598 1610 changedfiles = set()
1599 1611 mmfs = {}
1600 1612 collect = changegroup.collector(cl, mmfs, changedfiles)
1601 1613
1602 1614 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1603 1615 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1604 1616 yield chnk
1605 1617 self.ui.progress(_('bundling changes'), None)
1606 1618
1607 1619 mnfst = self.manifest
1608 1620 nodeiter = gennodelst(mnfst)
1609 1621 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1610 1622 lookuplinkrev_func(mnfst))):
1611 1623 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1612 1624 yield chnk
1613 1625 self.ui.progress(_('bundling manifests'), None)
1614 1626
1615 1627 cnt = 0
1616 1628 for fname in sorted(changedfiles):
1617 1629 filerevlog = self.file(fname)
1618 1630 if not len(filerevlog):
1619 1631 raise util.Abort(_("empty or missing revlog for %s") % fname)
1620 1632 nodeiter = gennodelst(filerevlog)
1621 1633 nodeiter = list(nodeiter)
1622 1634 if nodeiter:
1623 1635 yield changegroup.chunkheader(len(fname))
1624 1636 yield fname
1625 1637 lookup = lookuplinkrev_func(filerevlog)
1626 1638 for chnk in filerevlog.group(nodeiter, lookup):
1627 1639 self.ui.progress(
1628 1640 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1629 1641 cnt += 1
1630 1642 yield chnk
1631 1643 self.ui.progress(_('bundling files'), None)
1632 1644
1633 1645 yield changegroup.closechunk()
1634 1646
1635 1647 if nodes:
1636 1648 self.hook('outgoing', node=hex(nodes[0]), source=source)
1637 1649
1638 1650 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1639 1651
1640 1652 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1641 1653 """Add the changegroup returned by source.read() to this repo.
1642 1654 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1643 1655 the URL of the repo where this changegroup is coming from.
1644 1656
1645 1657 Return an integer summarizing the change to this repo:
1646 1658 - nothing changed or no source: 0
1647 1659 - more heads than before: 1+added heads (2..n)
1648 1660 - fewer heads than before: -1-removed heads (-2..-n)
1649 1661 - number of heads stays the same: 1
1650 1662 """
1651 1663 def csmap(x):
1652 1664 self.ui.debug("add changeset %s\n" % short(x))
1653 1665 return len(cl)
1654 1666
1655 1667 def revmap(x):
1656 1668 return cl.rev(x)
1657 1669
1658 1670 if not source:
1659 1671 return 0
1660 1672
1661 1673 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1662 1674
1663 1675 changesets = files = revisions = 0
1664 1676 efiles = set()
1665 1677
1666 1678 # write changelog data to temp files so concurrent readers will not see
1667 1679 # inconsistent view
1668 1680 cl = self.changelog
1669 1681 cl.delayupdate()
1670 1682 oldheads = len(cl.heads())
1671 1683
1672 1684 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1673 1685 try:
1674 1686 trp = weakref.proxy(tr)
1675 1687 # pull off the changeset group
1676 1688 self.ui.status(_("adding changesets\n"))
1677 1689 clstart = len(cl)
1678 1690 class prog(object):
1679 1691 step = _('changesets')
1680 1692 count = 1
1681 1693 ui = self.ui
1682 1694 total = None
1683 1695 def __call__(self):
1684 1696 self.ui.progress(self.step, self.count, unit=_('chunks'),
1685 1697 total=self.total)
1686 1698 self.count += 1
1687 1699 pr = prog()
1688 1700 source.callback = pr
1689 1701
1690 1702 if (cl.addgroup(source, csmap, trp) is None
1691 1703 and not emptyok):
1692 1704 raise util.Abort(_("received changelog group is empty"))
1693 1705 clend = len(cl)
1694 1706 changesets = clend - clstart
1695 1707 for c in xrange(clstart, clend):
1696 1708 efiles.update(self[c].files())
1697 1709 efiles = len(efiles)
1698 1710 self.ui.progress(_('changesets'), None)
1699 1711
1700 1712 # pull off the manifest group
1701 1713 self.ui.status(_("adding manifests\n"))
1702 1714 pr.step = _('manifests')
1703 1715 pr.count = 1
1704 1716 pr.total = changesets # manifests <= changesets
1705 1717 # no need to check for empty manifest group here:
1706 1718 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1707 1719 # no new manifest will be created and the manifest group will
1708 1720 # be empty during the pull
1709 1721 self.manifest.addgroup(source, revmap, trp)
1710 1722 self.ui.progress(_('manifests'), None)
1711 1723
1712 1724 needfiles = {}
1713 1725 if self.ui.configbool('server', 'validate', default=False):
1714 1726 # validate incoming csets have their manifests
1715 1727 for cset in xrange(clstart, clend):
1716 1728 mfest = self.changelog.read(self.changelog.node(cset))[0]
1717 1729 mfest = self.manifest.readdelta(mfest)
1718 1730 # store file nodes we must see
1719 1731 for f, n in mfest.iteritems():
1720 1732 needfiles.setdefault(f, set()).add(n)
1721 1733
1722 1734 # process the files
1723 1735 self.ui.status(_("adding file changes\n"))
1724 1736 pr.step = 'files'
1725 1737 pr.count = 1
1726 1738 pr.total = efiles
1727 1739 source.callback = None
1728 1740
1729 1741 while 1:
1730 1742 f = source.chunk()
1731 1743 if not f:
1732 1744 break
1733 1745 self.ui.debug("adding %s revisions\n" % f)
1734 1746 pr()
1735 1747 fl = self.file(f)
1736 1748 o = len(fl)
1737 1749 if fl.addgroup(source, revmap, trp) is None:
1738 1750 raise util.Abort(_("received file revlog group is empty"))
1739 1751 revisions += len(fl) - o
1740 1752 files += 1
1741 1753 if f in needfiles:
1742 1754 needs = needfiles[f]
1743 1755 for new in xrange(o, len(fl)):
1744 1756 n = fl.node(new)
1745 1757 if n in needs:
1746 1758 needs.remove(n)
1747 1759 if not needs:
1748 1760 del needfiles[f]
1749 1761 self.ui.progress(_('files'), None)
1750 1762
1751 1763 for f, needs in needfiles.iteritems():
1752 1764 fl = self.file(f)
1753 1765 for n in needs:
1754 1766 try:
1755 1767 fl.rev(n)
1756 1768 except error.LookupError:
1757 1769 raise util.Abort(
1758 1770 _('missing file data for %s:%s - run hg verify') %
1759 1771 (f, hex(n)))
1760 1772
1761 1773 newheads = len(cl.heads())
1762 1774 heads = ""
1763 1775 if oldheads and newheads != oldheads:
1764 1776 heads = _(" (%+d heads)") % (newheads - oldheads)
1765 1777
1766 1778 self.ui.status(_("added %d changesets"
1767 1779 " with %d changes to %d files%s\n")
1768 1780 % (changesets, revisions, files, heads))
1769 1781
1770 1782 if changesets > 0:
1771 1783 p = lambda: cl.writepending() and self.root or ""
1772 1784 self.hook('pretxnchangegroup', throw=True,
1773 1785 node=hex(cl.node(clstart)), source=srctype,
1774 1786 url=url, pending=p)
1775 1787
1776 1788 # make changelog see real files again
1777 1789 cl.finalize(trp)
1778 1790
1779 1791 tr.close()
1780 1792 finally:
1781 1793 tr.release()
1782 1794 if lock:
1783 1795 lock.release()
1784 1796
1785 1797 if changesets > 0:
1786 1798 # forcefully update the on-disk branch cache
1787 1799 self.ui.debug("updating the branch cache\n")
1788 1800 self.updatebranchcache()
1789 1801 self.hook("changegroup", node=hex(cl.node(clstart)),
1790 1802 source=srctype, url=url)
1791 1803
1792 1804 for i in xrange(clstart, clend):
1793 1805 self.hook("incoming", node=hex(cl.node(i)),
1794 1806 source=srctype, url=url)
1795 1807
1796 1808 # never return 0 here:
1797 1809 if newheads < oldheads:
1798 1810 return newheads - oldheads - 1
1799 1811 else:
1800 1812 return newheads - oldheads + 1
1801 1813
1802 1814
1803 1815 def stream_in(self, remote, requirements):
1804 1816 fp = remote.stream_out()
1805 1817 l = fp.readline()
1806 1818 try:
1807 1819 resp = int(l)
1808 1820 except ValueError:
1809 1821 raise error.ResponseError(
1810 1822 _('Unexpected response from remote server:'), l)
1811 1823 if resp == 1:
1812 1824 raise util.Abort(_('operation forbidden by server'))
1813 1825 elif resp == 2:
1814 1826 raise util.Abort(_('locking the remote repository failed'))
1815 1827 elif resp != 0:
1816 1828 raise util.Abort(_('the server sent an unknown error code'))
1817 1829 self.ui.status(_('streaming all changes\n'))
1818 1830 l = fp.readline()
1819 1831 try:
1820 1832 total_files, total_bytes = map(int, l.split(' ', 1))
1821 1833 except (ValueError, TypeError):
1822 1834 raise error.ResponseError(
1823 1835 _('Unexpected response from remote server:'), l)
1824 1836 self.ui.status(_('%d files to transfer, %s of data\n') %
1825 1837 (total_files, util.bytecount(total_bytes)))
1826 1838 start = time.time()
1827 1839 for i in xrange(total_files):
1828 1840 # XXX doesn't support '\n' or '\r' in filenames
1829 1841 l = fp.readline()
1830 1842 try:
1831 1843 name, size = l.split('\0', 1)
1832 1844 size = int(size)
1833 1845 except (ValueError, TypeError):
1834 1846 raise error.ResponseError(
1835 1847 _('Unexpected response from remote server:'), l)
1836 1848 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1837 1849 # for backwards compat, name was partially encoded
1838 1850 ofp = self.sopener(store.decodedir(name), 'w')
1839 1851 for chunk in util.filechunkiter(fp, limit=size):
1840 1852 ofp.write(chunk)
1841 1853 ofp.close()
1842 1854 elapsed = time.time() - start
1843 1855 if elapsed <= 0:
1844 1856 elapsed = 0.001
1845 1857 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1846 1858 (util.bytecount(total_bytes), elapsed,
1847 1859 util.bytecount(total_bytes / elapsed)))
1848 1860
1849 1861 # new requirements = old non-format requirements + new format-related
1850 1862 # requirements from the streamed-in repository
1851 1863 requirements.update(set(self.requirements) - self.supportedformats)
1852 1864 self._applyrequirements(requirements)
1853 1865 self._writerequirements()
1854 1866
1855 1867 self.invalidate()
1856 1868 return len(self.heads()) + 1
1857 1869
1858 1870 def clone(self, remote, heads=[], stream=False):
1859 1871 '''clone remote repository.
1860 1872
1861 1873 keyword arguments:
1862 1874 heads: list of revs to clone (forces use of pull)
1863 1875 stream: use streaming clone if possible'''
1864 1876
1865 1877 # now, all clients that can request uncompressed clones can
1866 1878 # read repo formats supported by all servers that can serve
1867 1879 # them.
1868 1880
1869 1881 # if revlog format changes, client will have to check version
1870 1882 # and format flags on "stream" capability, and use
1871 1883 # uncompressed only if compatible.
1872 1884
1873 1885 if stream and not heads:
1874 1886 # 'stream' means remote revlog format is revlogv1 only
1875 1887 if remote.capable('stream'):
1876 1888 return self.stream_in(remote, set(('revlogv1',)))
1877 1889 # otherwise, 'streamreqs' contains the remote revlog format
1878 1890 streamreqs = remote.capable('streamreqs')
1879 1891 if streamreqs:
1880 1892 streamreqs = set(streamreqs.split(','))
1881 1893 # if we support it, stream in and adjust our requirements
1882 1894 if not streamreqs - self.supportedformats:
1883 1895 return self.stream_in(remote, streamreqs)
1884 1896 return self.pull(remote, heads)
1885 1897
1886 1898 def pushkey(self, namespace, key, old, new):
1887 1899 return pushkey.push(self, namespace, key, old, new)
1888 1900
1889 1901 def listkeys(self, namespace):
1890 1902 return pushkey.list(self, namespace)
1891 1903
1892 1904 # used to avoid circular references so destructors work
1893 1905 def aftertrans(files):
1894 1906 renamefiles = [tuple(t) for t in files]
1895 1907 def a():
1896 1908 for src, dest in renamefiles:
1897 1909 util.rename(src, dest)
1898 1910 return a
1899 1911
1900 1912 def instance(ui, path, create):
1901 1913 return localrepository(ui, util.drop_scheme('file', path), create)
1902 1914
1903 1915 def islocal(path):
1904 1916 return True
General Comments 0
You need to be logged in to leave comments. Login now