##// END OF EJS Templates
tags: fix abababa case, with test case
Matt Mackall -
r4266:fe7f38dd default
parent child Browse files
Show More
@@ -1,2015 +1,2015
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19 supported = ('revlogv1', 'store')
20 20
21 21 def __del__(self):
22 22 self.transhandle = None
23 23 def __init__(self, parentui, path=None, create=0):
24 24 repo.repository.__init__(self)
25 25 if not path:
26 26 p = os.getcwd()
27 27 while not os.path.isdir(os.path.join(p, ".hg")):
28 28 oldp = p
29 29 p = os.path.dirname(p)
30 30 if p == oldp:
31 31 raise repo.RepoError(_("There is no Mercurial repository"
32 32 " here (.hg not found)"))
33 33 path = p
34 34
35 35 self.root = os.path.realpath(path)
36 36 self.path = os.path.join(self.root, ".hg")
37 37 self.origroot = path
38 38 self.opener = util.opener(self.path)
39 39 self.wopener = util.opener(self.root)
40 40
41 41 if not os.path.isdir(self.path):
42 42 if create:
43 43 if not os.path.exists(path):
44 44 os.mkdir(path)
45 45 os.mkdir(self.path)
46 46 os.mkdir(os.path.join(self.path, "store"))
47 47 requirements = ("revlogv1", "store")
48 48 reqfile = self.opener("requires", "w")
49 49 for r in requirements:
50 50 reqfile.write("%s\n" % r)
51 51 reqfile.close()
52 52 # create an invalid changelog
53 53 self.opener("00changelog.i", "a").write(
54 54 '\0\0\0\2' # represents revlogv2
55 55 ' dummy changelog to prevent using the old repo layout'
56 56 )
57 57 else:
58 58 raise repo.RepoError(_("repository %s not found") % path)
59 59 elif create:
60 60 raise repo.RepoError(_("repository %s already exists") % path)
61 61 else:
62 62 # find requirements
63 63 try:
64 64 requirements = self.opener("requires").read().splitlines()
65 65 except IOError, inst:
66 66 if inst.errno != errno.ENOENT:
67 67 raise
68 68 requirements = []
69 69 # check them
70 70 for r in requirements:
71 71 if r not in self.supported:
72 72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73 73
74 74 # setup store
75 75 if "store" in requirements:
76 76 self.encodefn = util.encodefilename
77 77 self.decodefn = util.decodefilename
78 78 self.spath = os.path.join(self.path, "store")
79 79 else:
80 80 self.encodefn = lambda x: x
81 81 self.decodefn = lambda x: x
82 82 self.spath = self.path
83 83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84 84
85 85 self.ui = ui.ui(parentui=parentui)
86 86 try:
87 87 self.ui.readconfig(self.join("hgrc"), self.root)
88 88 except IOError:
89 89 pass
90 90
91 91 v = self.ui.configrevlog()
92 92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
93 93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
94 94 fl = v.get('flags', None)
95 95 flags = 0
96 96 if fl != None:
97 97 for x in fl.split():
98 98 flags |= revlog.flagstr(x)
99 99 elif self.revlogv1:
100 100 flags = revlog.REVLOG_DEFAULT_FLAGS
101 101
102 102 v = self.revlogversion | flags
103 103 self.manifest = manifest.manifest(self.sopener, v)
104 104 self.changelog = changelog.changelog(self.sopener, v)
105 105
106 106 fallback = self.ui.config('ui', 'fallbackencoding')
107 107 if fallback:
108 108 util._fallbackencoding = fallback
109 109
110 110 # the changelog might not have the inline index flag
111 111 # on. If the format of the changelog is the same as found in
112 112 # .hgrc, apply any flags found in the .hgrc as well.
113 113 # Otherwise, just version from the changelog
114 114 v = self.changelog.version
115 115 if v == self.revlogversion:
116 116 v |= flags
117 117 self.revlogversion = v
118 118
119 119 self.tagscache = None
120 120 self.branchcache = None
121 121 self.nodetagscache = None
122 122 self.encodepats = None
123 123 self.decodepats = None
124 124 self.transhandle = None
125 125
126 126 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
127 127
128 128 def url(self):
129 129 return 'file:' + self.root
130 130
131 131 def hook(self, name, throw=False, **args):
132 132 def callhook(hname, funcname):
133 133 '''call python hook. hook is callable object, looked up as
134 134 name in python module. if callable returns "true", hook
135 135 fails, else passes. if hook raises exception, treated as
136 136 hook failure. exception propagates if throw is "true".
137 137
138 138 reason for "true" meaning "hook failed" is so that
139 139 unmodified commands (e.g. mercurial.commands.update) can
140 140 be run as hooks without wrappers to convert return values.'''
141 141
142 142 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
143 143 d = funcname.rfind('.')
144 144 if d == -1:
145 145 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
146 146 % (hname, funcname))
147 147 modname = funcname[:d]
148 148 try:
149 149 obj = __import__(modname)
150 150 except ImportError:
151 151 try:
152 152 # extensions are loaded with hgext_ prefix
153 153 obj = __import__("hgext_%s" % modname)
154 154 except ImportError:
155 155 raise util.Abort(_('%s hook is invalid '
156 156 '(import of "%s" failed)') %
157 157 (hname, modname))
158 158 try:
159 159 for p in funcname.split('.')[1:]:
160 160 obj = getattr(obj, p)
161 161 except AttributeError, err:
162 162 raise util.Abort(_('%s hook is invalid '
163 163 '("%s" is not defined)') %
164 164 (hname, funcname))
165 165 if not callable(obj):
166 166 raise util.Abort(_('%s hook is invalid '
167 167 '("%s" is not callable)') %
168 168 (hname, funcname))
169 169 try:
170 170 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
171 171 except (KeyboardInterrupt, util.SignalInterrupt):
172 172 raise
173 173 except Exception, exc:
174 174 if isinstance(exc, util.Abort):
175 175 self.ui.warn(_('error: %s hook failed: %s\n') %
176 176 (hname, exc.args[0]))
177 177 else:
178 178 self.ui.warn(_('error: %s hook raised an exception: '
179 179 '%s\n') % (hname, exc))
180 180 if throw:
181 181 raise
182 182 self.ui.print_exc()
183 183 return True
184 184 if r:
185 185 if throw:
186 186 raise util.Abort(_('%s hook failed') % hname)
187 187 self.ui.warn(_('warning: %s hook failed\n') % hname)
188 188 return r
189 189
190 190 def runhook(name, cmd):
191 191 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
192 192 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
193 193 r = util.system(cmd, environ=env, cwd=self.root)
194 194 if r:
195 195 desc, r = util.explain_exit(r)
196 196 if throw:
197 197 raise util.Abort(_('%s hook %s') % (name, desc))
198 198 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
199 199 return r
200 200
201 201 r = False
202 202 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
203 203 if hname.split(".", 1)[0] == name and cmd]
204 204 hooks.sort()
205 205 for hname, cmd in hooks:
206 206 if cmd.startswith('python:'):
207 207 r = callhook(hname, cmd[7:].strip()) or r
208 208 else:
209 209 r = runhook(hname, cmd) or r
210 210 return r
211 211
212 212 tag_disallowed = ':\r\n'
213 213
214 214 def tag(self, name, node, message, local, user, date):
215 215 '''tag a revision with a symbolic name.
216 216
217 217 if local is True, the tag is stored in a per-repository file.
218 218 otherwise, it is stored in the .hgtags file, and a new
219 219 changeset is committed with the change.
220 220
221 221 keyword arguments:
222 222
223 223 local: whether to store tag in non-version-controlled file
224 224 (default False)
225 225
226 226 message: commit message to use if committing
227 227
228 228 user: name of user to use if committing
229 229
230 230 date: date tuple to use if committing'''
231 231
232 232 for c in self.tag_disallowed:
233 233 if c in name:
234 234 raise util.Abort(_('%r cannot be used in a tag name') % c)
235 235
236 236 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
237 237
238 238 if local:
239 239 # local tags are stored in the current charset
240 240 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
241 241 self.hook('tag', node=hex(node), tag=name, local=local)
242 242 return
243 243
244 244 for x in self.status()[:5]:
245 245 if '.hgtags' in x:
246 246 raise util.Abort(_('working copy of .hgtags is changed '
247 247 '(please commit .hgtags manually)'))
248 248
249 249 # committed tags are stored in UTF-8
250 250 line = '%s %s\n' % (hex(node), util.fromlocal(name))
251 251 self.wfile('.hgtags', 'ab').write(line)
252 252 if self.dirstate.state('.hgtags') == '?':
253 253 self.add(['.hgtags'])
254 254
255 255 self.commit(['.hgtags'], message, user, date)
256 256 self.hook('tag', node=hex(node), tag=name, local=local)
257 257
258 258 def tags(self):
259 259 '''return a mapping of tag to node'''
260 260 if self.tagscache:
261 261 return self.tagscache
262 262
263 263 globaltags = {}
264 264
265 265 def readtags(lines, fn):
266 266 filetags = {}
267 267 count = 0
268 268
269 269 def warn(msg):
270 270 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
271 271
272 272 for l in lines:
273 273 count += 1
274 274 if not l:
275 275 continue
276 276 s = l.split(" ", 1)
277 277 if len(s) != 2:
278 278 warn(_("cannot parse entry"))
279 279 continue
280 280 node, key = s
281 281 key = util.tolocal(key.strip()) # stored in UTF-8
282 282 try:
283 283 bin_n = bin(node)
284 284 except TypeError:
285 285 warn(_("node '%s' is not well formed") % node)
286 286 continue
287 287 if bin_n not in self.changelog.nodemap:
288 288 warn(_("tag '%s' refers to unknown node") % key)
289 289 continue
290 290
291 h = {}
291 h = []
292 292 if key in filetags:
293 293 n, h = filetags[key]
294 h[n] = True
294 h.append(n)
295 295 filetags[key] = (bin_n, h)
296 296
297 297 for k,nh in filetags.items():
298 298 if k not in globaltags:
299 299 globaltags[k] = nh
300 300 continue
301 301 # we prefer the global tag if:
302 302 # it supercedes us OR
303 303 # mutual supercedes and it has a higher rank
304 304 # otherwise we win because we're tip-most
305 305 an, ah = nh
306 306 bn, bh = globaltags[k]
307 307 if bn != an and an in bh and \
308 308 (bn not in ah or len(bh) > len(ah)):
309 309 an = bn
310 ah.update(bh)
310 ah.append([n for n in bh if n not in ah])
311 311 globaltags[k] = an, ah
312 312
313 313 # read the tags file from each head, ending with the tip
314 314 f = None
315 315 for rev, node, fnode in self._hgtagsnodes():
316 316 f = (f and f.filectx(fnode) or
317 317 self.filectx('.hgtags', fileid=fnode))
318 318 readtags(f.data().splitlines(), f)
319 319
320 320 try:
321 321 data = util.fromlocal(self.opener("localtags").read())
322 322 # localtags are stored in the local character set
323 323 # while the internal tag table is stored in UTF-8
324 324 readtags(data.splitlines(), "localtags")
325 325 except IOError:
326 326 pass
327 327
328 328 self.tagscache = {}
329 329 for k,nh in globaltags.items():
330 330 n = nh[0]
331 331 if n != nullid:
332 332 self.tagscache[k] = n
333 333 self.tagscache['tip'] = self.changelog.tip()
334 334
335 335 return self.tagscache
336 336
337 337 def _hgtagsnodes(self):
338 338 heads = self.heads()
339 339 heads.reverse()
340 340 last = {}
341 341 ret = []
342 342 for node in heads:
343 343 c = self.changectx(node)
344 344 rev = c.rev()
345 345 try:
346 346 fnode = c.filenode('.hgtags')
347 347 except repo.LookupError:
348 348 continue
349 349 ret.append((rev, node, fnode))
350 350 if fnode in last:
351 351 ret[last[fnode]] = None
352 352 last[fnode] = len(ret) - 1
353 353 return [item for item in ret if item]
354 354
355 355 def tagslist(self):
356 356 '''return a list of tags ordered by revision'''
357 357 l = []
358 358 for t, n in self.tags().items():
359 359 try:
360 360 r = self.changelog.rev(n)
361 361 except:
362 362 r = -2 # sort to the beginning of the list if unknown
363 363 l.append((r, t, n))
364 364 l.sort()
365 365 return [(t, n) for r, t, n in l]
366 366
367 367 def nodetags(self, node):
368 368 '''return the tags associated with a node'''
369 369 if not self.nodetagscache:
370 370 self.nodetagscache = {}
371 371 for t, n in self.tags().items():
372 372 self.nodetagscache.setdefault(n, []).append(t)
373 373 return self.nodetagscache.get(node, [])
374 374
375 375 def _branchtags(self):
376 376 partial, last, lrev = self._readbranchcache()
377 377
378 378 tiprev = self.changelog.count() - 1
379 379 if lrev != tiprev:
380 380 self._updatebranchcache(partial, lrev+1, tiprev+1)
381 381 self._writebranchcache(partial, self.changelog.tip(), tiprev)
382 382
383 383 return partial
384 384
385 385 def branchtags(self):
386 386 if self.branchcache is not None:
387 387 return self.branchcache
388 388
389 389 self.branchcache = {} # avoid recursion in changectx
390 390 partial = self._branchtags()
391 391
392 392 # the branch cache is stored on disk as UTF-8, but in the local
393 393 # charset internally
394 394 for k, v in partial.items():
395 395 self.branchcache[util.tolocal(k)] = v
396 396 return self.branchcache
397 397
398 398 def _readbranchcache(self):
399 399 partial = {}
400 400 try:
401 401 f = self.opener("branch.cache")
402 402 lines = f.read().split('\n')
403 403 f.close()
404 404 last, lrev = lines.pop(0).split(" ", 1)
405 405 last, lrev = bin(last), int(lrev)
406 406 if not (lrev < self.changelog.count() and
407 407 self.changelog.node(lrev) == last): # sanity check
408 408 # invalidate the cache
409 409 raise ValueError('Invalid branch cache: unknown tip')
410 410 for l in lines:
411 411 if not l: continue
412 412 node, label = l.split(" ", 1)
413 413 partial[label.strip()] = bin(node)
414 414 except (KeyboardInterrupt, util.SignalInterrupt):
415 415 raise
416 416 except Exception, inst:
417 417 if self.ui.debugflag:
418 418 self.ui.warn(str(inst), '\n')
419 419 partial, last, lrev = {}, nullid, nullrev
420 420 return partial, last, lrev
421 421
422 422 def _writebranchcache(self, branches, tip, tiprev):
423 423 try:
424 424 f = self.opener("branch.cache", "w")
425 425 f.write("%s %s\n" % (hex(tip), tiprev))
426 426 for label, node in branches.iteritems():
427 427 f.write("%s %s\n" % (hex(node), label))
428 428 except IOError:
429 429 pass
430 430
431 431 def _updatebranchcache(self, partial, start, end):
432 432 for r in xrange(start, end):
433 433 c = self.changectx(r)
434 434 b = c.branch()
435 435 partial[b] = c.node()
436 436
437 437 def lookup(self, key):
438 438 if key == '.':
439 439 key = self.dirstate.parents()[0]
440 440 if key == nullid:
441 441 raise repo.RepoError(_("no revision checked out"))
442 442 elif key == 'null':
443 443 return nullid
444 444 n = self.changelog._match(key)
445 445 if n:
446 446 return n
447 447 if key in self.tags():
448 448 return self.tags()[key]
449 449 if key in self.branchtags():
450 450 return self.branchtags()[key]
451 451 n = self.changelog._partialmatch(key)
452 452 if n:
453 453 return n
454 454 raise repo.RepoError(_("unknown revision '%s'") % key)
455 455
456 456 def dev(self):
457 457 return os.lstat(self.path).st_dev
458 458
459 459 def local(self):
460 460 return True
461 461
462 462 def join(self, f):
463 463 return os.path.join(self.path, f)
464 464
465 465 def sjoin(self, f):
466 466 f = self.encodefn(f)
467 467 return os.path.join(self.spath, f)
468 468
469 469 def wjoin(self, f):
470 470 return os.path.join(self.root, f)
471 471
472 472 def file(self, f):
473 473 if f[0] == '/':
474 474 f = f[1:]
475 475 return filelog.filelog(self.sopener, f, self.revlogversion)
476 476
477 477 def changectx(self, changeid=None):
478 478 return context.changectx(self, changeid)
479 479
480 480 def workingctx(self):
481 481 return context.workingctx(self)
482 482
483 483 def parents(self, changeid=None):
484 484 '''
485 485 get list of changectxs for parents of changeid or working directory
486 486 '''
487 487 if changeid is None:
488 488 pl = self.dirstate.parents()
489 489 else:
490 490 n = self.changelog.lookup(changeid)
491 491 pl = self.changelog.parents(n)
492 492 if pl[1] == nullid:
493 493 return [self.changectx(pl[0])]
494 494 return [self.changectx(pl[0]), self.changectx(pl[1])]
495 495
496 496 def filectx(self, path, changeid=None, fileid=None):
497 497 """changeid can be a changeset revision, node, or tag.
498 498 fileid can be a file revision or node."""
499 499 return context.filectx(self, path, changeid, fileid)
500 500
501 501 def getcwd(self):
502 502 return self.dirstate.getcwd()
503 503
504 504 def wfile(self, f, mode='r'):
505 505 return self.wopener(f, mode)
506 506
507 507 def wread(self, filename):
508 508 if self.encodepats == None:
509 509 l = []
510 510 for pat, cmd in self.ui.configitems("encode"):
511 511 mf = util.matcher(self.root, "", [pat], [], [])[1]
512 512 l.append((mf, cmd))
513 513 self.encodepats = l
514 514
515 515 data = self.wopener(filename, 'r').read()
516 516
517 517 for mf, cmd in self.encodepats:
518 518 if mf(filename):
519 519 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
520 520 data = util.filter(data, cmd)
521 521 break
522 522
523 523 return data
524 524
525 525 def wwrite(self, filename, data, fd=None):
526 526 if self.decodepats == None:
527 527 l = []
528 528 for pat, cmd in self.ui.configitems("decode"):
529 529 mf = util.matcher(self.root, "", [pat], [], [])[1]
530 530 l.append((mf, cmd))
531 531 self.decodepats = l
532 532
533 533 for mf, cmd in self.decodepats:
534 534 if mf(filename):
535 535 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
536 536 data = util.filter(data, cmd)
537 537 break
538 538
539 539 if fd:
540 540 return fd.write(data)
541 541 return self.wopener(filename, 'w').write(data)
542 542
543 543 def transaction(self):
544 544 tr = self.transhandle
545 545 if tr != None and tr.running():
546 546 return tr.nest()
547 547
548 548 # save dirstate for rollback
549 549 try:
550 550 ds = self.opener("dirstate").read()
551 551 except IOError:
552 552 ds = ""
553 553 self.opener("journal.dirstate", "w").write(ds)
554 554
555 555 renames = [(self.sjoin("journal"), self.sjoin("undo")),
556 556 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
557 557 tr = transaction.transaction(self.ui.warn, self.sopener,
558 558 self.sjoin("journal"),
559 559 aftertrans(renames))
560 560 self.transhandle = tr
561 561 return tr
562 562
563 563 def recover(self):
564 564 l = self.lock()
565 565 if os.path.exists(self.sjoin("journal")):
566 566 self.ui.status(_("rolling back interrupted transaction\n"))
567 567 transaction.rollback(self.sopener, self.sjoin("journal"))
568 568 self.reload()
569 569 return True
570 570 else:
571 571 self.ui.warn(_("no interrupted transaction available\n"))
572 572 return False
573 573
574 574 def rollback(self, wlock=None):
575 575 if not wlock:
576 576 wlock = self.wlock()
577 577 l = self.lock()
578 578 if os.path.exists(self.sjoin("undo")):
579 579 self.ui.status(_("rolling back last transaction\n"))
580 580 transaction.rollback(self.sopener, self.sjoin("undo"))
581 581 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
582 582 self.reload()
583 583 self.wreload()
584 584 else:
585 585 self.ui.warn(_("no rollback information available\n"))
586 586
587 587 def wreload(self):
588 588 self.dirstate.read()
589 589
590 590 def reload(self):
591 591 self.changelog.load()
592 592 self.manifest.load()
593 593 self.tagscache = None
594 594 self.nodetagscache = None
595 595
596 596 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
597 597 desc=None):
598 598 try:
599 599 l = lock.lock(lockname, 0, releasefn, desc=desc)
600 600 except lock.LockHeld, inst:
601 601 if not wait:
602 602 raise
603 603 self.ui.warn(_("waiting for lock on %s held by %r\n") %
604 604 (desc, inst.locker))
605 605 # default to 600 seconds timeout
606 606 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
607 607 releasefn, desc=desc)
608 608 if acquirefn:
609 609 acquirefn()
610 610 return l
611 611
612 612 def lock(self, wait=1):
613 613 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
614 614 desc=_('repository %s') % self.origroot)
615 615
616 616 def wlock(self, wait=1):
617 617 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
618 618 self.wreload,
619 619 desc=_('working directory of %s') % self.origroot)
620 620
621 621 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
622 622 """
623 623 commit an individual file as part of a larger transaction
624 624 """
625 625
626 626 t = self.wread(fn)
627 627 fl = self.file(fn)
628 628 fp1 = manifest1.get(fn, nullid)
629 629 fp2 = manifest2.get(fn, nullid)
630 630
631 631 meta = {}
632 632 cp = self.dirstate.copied(fn)
633 633 if cp:
634 634 # Mark the new revision of this file as a copy of another
635 635 # file. This copy data will effectively act as a parent
636 636 # of this new revision. If this is a merge, the first
637 637 # parent will be the nullid (meaning "look up the copy data")
638 638 # and the second one will be the other parent. For example:
639 639 #
640 640 # 0 --- 1 --- 3 rev1 changes file foo
641 641 # \ / rev2 renames foo to bar and changes it
642 642 # \- 2 -/ rev3 should have bar with all changes and
643 643 # should record that bar descends from
644 644 # bar in rev2 and foo in rev1
645 645 #
646 646 # this allows this merge to succeed:
647 647 #
648 648 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
649 649 # \ / merging rev3 and rev4 should use bar@rev2
650 650 # \- 2 --- 4 as the merge base
651 651 #
652 652 meta["copy"] = cp
653 653 if not manifest2: # not a branch merge
654 654 meta["copyrev"] = hex(manifest1.get(cp, nullid))
655 655 fp2 = nullid
656 656 elif fp2 != nullid: # copied on remote side
657 657 meta["copyrev"] = hex(manifest1.get(cp, nullid))
658 658 elif fp1 != nullid: # copied on local side, reversed
659 659 meta["copyrev"] = hex(manifest2.get(cp))
660 660 fp2 = fp1
661 661 else: # directory rename
662 662 meta["copyrev"] = hex(manifest1.get(cp, nullid))
663 663 self.ui.debug(_(" %s: copy %s:%s\n") %
664 664 (fn, cp, meta["copyrev"]))
665 665 fp1 = nullid
666 666 elif fp2 != nullid:
667 667 # is one parent an ancestor of the other?
668 668 fpa = fl.ancestor(fp1, fp2)
669 669 if fpa == fp1:
670 670 fp1, fp2 = fp2, nullid
671 671 elif fpa == fp2:
672 672 fp2 = nullid
673 673
674 674 # is the file unmodified from the parent? report existing entry
675 675 if fp2 == nullid and not fl.cmp(fp1, t):
676 676 return fp1
677 677
678 678 changelist.append(fn)
679 679 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
680 680
681 681 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
682 682 if p1 is None:
683 683 p1, p2 = self.dirstate.parents()
684 684 return self.commit(files=files, text=text, user=user, date=date,
685 685 p1=p1, p2=p2, wlock=wlock)
686 686
687 687 def commit(self, files=None, text="", user=None, date=None,
688 688 match=util.always, force=False, lock=None, wlock=None,
689 689 force_editor=False, p1=None, p2=None, extra={}):
690 690
691 691 commit = []
692 692 remove = []
693 693 changed = []
694 694 use_dirstate = (p1 is None) # not rawcommit
695 695 extra = extra.copy()
696 696
697 697 if use_dirstate:
698 698 if files:
699 699 for f in files:
700 700 s = self.dirstate.state(f)
701 701 if s in 'nmai':
702 702 commit.append(f)
703 703 elif s == 'r':
704 704 remove.append(f)
705 705 else:
706 706 self.ui.warn(_("%s not tracked!\n") % f)
707 707 else:
708 708 changes = self.status(match=match)[:5]
709 709 modified, added, removed, deleted, unknown = changes
710 710 commit = modified + added
711 711 remove = removed
712 712 else:
713 713 commit = files
714 714
715 715 if use_dirstate:
716 716 p1, p2 = self.dirstate.parents()
717 717 update_dirstate = True
718 718 else:
719 719 p1, p2 = p1, p2 or nullid
720 720 update_dirstate = (self.dirstate.parents()[0] == p1)
721 721
722 722 c1 = self.changelog.read(p1)
723 723 c2 = self.changelog.read(p2)
724 724 m1 = self.manifest.read(c1[0]).copy()
725 725 m2 = self.manifest.read(c2[0])
726 726
727 727 if use_dirstate:
728 728 branchname = self.workingctx().branch()
729 729 try:
730 730 branchname = branchname.decode('UTF-8').encode('UTF-8')
731 731 except UnicodeDecodeError:
732 732 raise util.Abort(_('branch name not in UTF-8!'))
733 733 else:
734 734 branchname = ""
735 735
736 736 if use_dirstate:
737 737 oldname = c1[5].get("branch") # stored in UTF-8
738 738 if not commit and not remove and not force and p2 == nullid and \
739 739 branchname == oldname:
740 740 self.ui.status(_("nothing changed\n"))
741 741 return None
742 742
743 743 xp1 = hex(p1)
744 744 if p2 == nullid: xp2 = ''
745 745 else: xp2 = hex(p2)
746 746
747 747 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
748 748
749 749 if not wlock:
750 750 wlock = self.wlock()
751 751 if not lock:
752 752 lock = self.lock()
753 753 tr = self.transaction()
754 754
755 755 # check in files
756 756 new = {}
757 757 linkrev = self.changelog.count()
758 758 commit.sort()
759 759 for f in commit:
760 760 self.ui.note(f + "\n")
761 761 try:
762 762 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
763 763 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
764 764 except IOError:
765 765 if use_dirstate:
766 766 self.ui.warn(_("trouble committing %s!\n") % f)
767 767 raise
768 768 else:
769 769 remove.append(f)
770 770
771 771 # update manifest
772 772 m1.update(new)
773 773 remove.sort()
774 774
775 775 for f in remove:
776 776 if f in m1:
777 777 del m1[f]
778 778 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
779 779
780 780 # add changeset
781 781 new = new.keys()
782 782 new.sort()
783 783
784 784 user = user or self.ui.username()
785 785 if not text or force_editor:
786 786 edittext = []
787 787 if text:
788 788 edittext.append(text)
789 789 edittext.append("")
790 790 edittext.append("HG: user: %s" % user)
791 791 if p2 != nullid:
792 792 edittext.append("HG: branch merge")
793 793 edittext.extend(["HG: changed %s" % f for f in changed])
794 794 edittext.extend(["HG: removed %s" % f for f in remove])
795 795 if not changed and not remove:
796 796 edittext.append("HG: no files changed")
797 797 edittext.append("")
798 798 # run editor in the repository root
799 799 olddir = os.getcwd()
800 800 os.chdir(self.root)
801 801 text = self.ui.edit("\n".join(edittext), user)
802 802 os.chdir(olddir)
803 803
804 804 lines = [line.rstrip() for line in text.rstrip().splitlines()]
805 805 while lines and not lines[0]:
806 806 del lines[0]
807 807 if not lines:
808 808 return None
809 809 text = '\n'.join(lines)
810 810 if branchname:
811 811 extra["branch"] = branchname
812 812 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
813 813 user, date, extra)
814 814 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
815 815 parent2=xp2)
816 816 tr.close()
817 817
818 818 if use_dirstate or update_dirstate:
819 819 self.dirstate.setparents(n)
820 820 if use_dirstate:
821 821 self.dirstate.update(new, "n")
822 822 self.dirstate.forget(remove)
823 823
824 824 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
825 825 return n
826 826
827 827 def walk(self, node=None, files=[], match=util.always, badmatch=None):
828 828 '''
829 829 walk recursively through the directory tree or a given
830 830 changeset, finding all files matched by the match
831 831 function
832 832
833 833 results are yielded in a tuple (src, filename), where src
834 834 is one of:
835 835 'f' the file was found in the directory tree
836 836 'm' the file was only in the dirstate and not in the tree
837 837 'b' file was not found and matched badmatch
838 838 '''
839 839
840 840 if node:
841 841 fdict = dict.fromkeys(files)
842 842 for fn in self.manifest.read(self.changelog.read(node)[0]):
843 843 for ffn in fdict:
844 844 # match if the file is the exact name or a directory
845 845 if ffn == fn or fn.startswith("%s/" % ffn):
846 846 del fdict[ffn]
847 847 break
848 848 if match(fn):
849 849 yield 'm', fn
850 850 for fn in fdict:
851 851 if badmatch and badmatch(fn):
852 852 if match(fn):
853 853 yield 'b', fn
854 854 else:
855 855 self.ui.warn(_('%s: No such file in rev %s\n') % (
856 856 util.pathto(self.root, self.getcwd(), fn), short(node)))
857 857 else:
858 858 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
859 859 yield src, fn
860 860
861 861 def status(self, node1=None, node2=None, files=[], match=util.always,
862 862 wlock=None, list_ignored=False, list_clean=False):
863 863 """return status of files between two nodes or node and working directory
864 864
865 865 If node1 is None, use the first dirstate parent instead.
866 866 If node2 is None, compare node1 with working directory.
867 867 """
868 868
869 869 def fcmp(fn, mf):
870 870 t1 = self.wread(fn)
871 871 return self.file(fn).cmp(mf.get(fn, nullid), t1)
872 872
873 873 def mfmatches(node):
874 874 change = self.changelog.read(node)
875 875 mf = self.manifest.read(change[0]).copy()
876 876 for fn in mf.keys():
877 877 if not match(fn):
878 878 del mf[fn]
879 879 return mf
880 880
881 881 modified, added, removed, deleted, unknown = [], [], [], [], []
882 882 ignored, clean = [], []
883 883
884 884 compareworking = False
885 885 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
886 886 compareworking = True
887 887
888 888 if not compareworking:
889 889 # read the manifest from node1 before the manifest from node2,
890 890 # so that we'll hit the manifest cache if we're going through
891 891 # all the revisions in parent->child order.
892 892 mf1 = mfmatches(node1)
893 893
894 894 # are we comparing the working directory?
895 895 if not node2:
896 896 if not wlock:
897 897 try:
898 898 wlock = self.wlock(wait=0)
899 899 except lock.LockException:
900 900 wlock = None
901 901 (lookup, modified, added, removed, deleted, unknown,
902 902 ignored, clean) = self.dirstate.status(files, match,
903 903 list_ignored, list_clean)
904 904
905 905 # are we comparing working dir against its parent?
906 906 if compareworking:
907 907 if lookup:
908 908 # do a full compare of any files that might have changed
909 909 mf2 = mfmatches(self.dirstate.parents()[0])
910 910 for f in lookup:
911 911 if fcmp(f, mf2):
912 912 modified.append(f)
913 913 else:
914 914 clean.append(f)
915 915 if wlock is not None:
916 916 self.dirstate.update([f], "n")
917 917 else:
918 918 # we are comparing working dir against non-parent
919 919 # generate a pseudo-manifest for the working dir
920 920 # XXX: create it in dirstate.py ?
921 921 mf2 = mfmatches(self.dirstate.parents()[0])
922 922 for f in lookup + modified + added:
923 923 mf2[f] = ""
924 924 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
925 925 for f in removed:
926 926 if f in mf2:
927 927 del mf2[f]
928 928 else:
929 929 # we are comparing two revisions
930 930 mf2 = mfmatches(node2)
931 931
932 932 if not compareworking:
933 933 # flush lists from dirstate before comparing manifests
934 934 modified, added, clean = [], [], []
935 935
936 936 # make sure to sort the files so we talk to the disk in a
937 937 # reasonable order
938 938 mf2keys = mf2.keys()
939 939 mf2keys.sort()
940 940 for fn in mf2keys:
941 941 if mf1.has_key(fn):
942 942 if mf1.flags(fn) != mf2.flags(fn) or \
943 943 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
944 944 modified.append(fn)
945 945 elif list_clean:
946 946 clean.append(fn)
947 947 del mf1[fn]
948 948 else:
949 949 added.append(fn)
950 950
951 951 removed = mf1.keys()
952 952
953 953 # sort and return results:
954 954 for l in modified, added, removed, deleted, unknown, ignored, clean:
955 955 l.sort()
956 956 return (modified, added, removed, deleted, unknown, ignored, clean)
957 957
958 958 def add(self, list, wlock=None):
959 959 if not wlock:
960 960 wlock = self.wlock()
961 961 for f in list:
962 962 p = self.wjoin(f)
963 963 if not os.path.exists(p):
964 964 self.ui.warn(_("%s does not exist!\n") % f)
965 965 elif not os.path.isfile(p):
966 966 self.ui.warn(_("%s not added: only files supported currently\n")
967 967 % f)
968 968 elif self.dirstate.state(f) in 'an':
969 969 self.ui.warn(_("%s already tracked!\n") % f)
970 970 else:
971 971 self.dirstate.update([f], "a")
972 972
973 973 def forget(self, list, wlock=None):
974 974 if not wlock:
975 975 wlock = self.wlock()
976 976 for f in list:
977 977 if self.dirstate.state(f) not in 'ai':
978 978 self.ui.warn(_("%s not added!\n") % f)
979 979 else:
980 980 self.dirstate.forget([f])
981 981
982 982 def remove(self, list, unlink=False, wlock=None):
983 983 if unlink:
984 984 for f in list:
985 985 try:
986 986 util.unlink(self.wjoin(f))
987 987 except OSError, inst:
988 988 if inst.errno != errno.ENOENT:
989 989 raise
990 990 if not wlock:
991 991 wlock = self.wlock()
992 992 for f in list:
993 993 p = self.wjoin(f)
994 994 if os.path.exists(p):
995 995 self.ui.warn(_("%s still exists!\n") % f)
996 996 elif self.dirstate.state(f) == 'a':
997 997 self.dirstate.forget([f])
998 998 elif f not in self.dirstate:
999 999 self.ui.warn(_("%s not tracked!\n") % f)
1000 1000 else:
1001 1001 self.dirstate.update([f], "r")
1002 1002
1003 1003 def undelete(self, list, wlock=None):
1004 1004 p = self.dirstate.parents()[0]
1005 1005 mn = self.changelog.read(p)[0]
1006 1006 m = self.manifest.read(mn)
1007 1007 if not wlock:
1008 1008 wlock = self.wlock()
1009 1009 for f in list:
1010 1010 if self.dirstate.state(f) not in "r":
1011 1011 self.ui.warn("%s not removed!\n" % f)
1012 1012 else:
1013 1013 t = self.file(f).read(m[f])
1014 1014 self.wwrite(f, t)
1015 1015 util.set_exec(self.wjoin(f), m.execf(f))
1016 1016 self.dirstate.update([f], "n")
1017 1017
1018 1018 def copy(self, source, dest, wlock=None):
1019 1019 p = self.wjoin(dest)
1020 1020 if not os.path.exists(p):
1021 1021 self.ui.warn(_("%s does not exist!\n") % dest)
1022 1022 elif not os.path.isfile(p):
1023 1023 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1024 1024 else:
1025 1025 if not wlock:
1026 1026 wlock = self.wlock()
1027 1027 if self.dirstate.state(dest) == '?':
1028 1028 self.dirstate.update([dest], "a")
1029 1029 self.dirstate.copy(source, dest)
1030 1030
1031 1031 def heads(self, start=None):
1032 1032 heads = self.changelog.heads(start)
1033 1033 # sort the output in rev descending order
1034 1034 heads = [(-self.changelog.rev(h), h) for h in heads]
1035 1035 heads.sort()
1036 1036 return [n for (r, n) in heads]
1037 1037
1038 1038 # branchlookup returns a dict giving a list of branches for
1039 1039 # each head. A branch is defined as the tag of a node or
1040 1040 # the branch of the node's parents. If a node has multiple
1041 1041 # branch tags, tags are eliminated if they are visible from other
1042 1042 # branch tags.
1043 1043 #
1044 1044 # So, for this graph: a->b->c->d->e
1045 1045 # \ /
1046 1046 # aa -----/
1047 1047 # a has tag 2.6.12
1048 1048 # d has tag 2.6.13
1049 1049 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1050 1050 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1051 1051 # from the list.
1052 1052 #
1053 1053 # It is possible that more than one head will have the same branch tag.
1054 1054 # callers need to check the result for multiple heads under the same
1055 1055 # branch tag if that is a problem for them (ie checkout of a specific
1056 1056 # branch).
1057 1057 #
1058 1058 # passing in a specific branch will limit the depth of the search
1059 1059 # through the parents. It won't limit the branches returned in the
1060 1060 # result though.
1061 1061 def branchlookup(self, heads=None, branch=None):
1062 1062 if not heads:
1063 1063 heads = self.heads()
1064 1064 headt = [ h for h in heads ]
1065 1065 chlog = self.changelog
1066 1066 branches = {}
1067 1067 merges = []
1068 1068 seenmerge = {}
1069 1069
1070 1070 # traverse the tree once for each head, recording in the branches
1071 1071 # dict which tags are visible from this head. The branches
1072 1072 # dict also records which tags are visible from each tag
1073 1073 # while we traverse.
1074 1074 while headt or merges:
1075 1075 if merges:
1076 1076 n, found = merges.pop()
1077 1077 visit = [n]
1078 1078 else:
1079 1079 h = headt.pop()
1080 1080 visit = [h]
1081 1081 found = [h]
1082 1082 seen = {}
1083 1083 while visit:
1084 1084 n = visit.pop()
1085 1085 if n in seen:
1086 1086 continue
1087 1087 pp = chlog.parents(n)
1088 1088 tags = self.nodetags(n)
1089 1089 if tags:
1090 1090 for x in tags:
1091 1091 if x == 'tip':
1092 1092 continue
1093 1093 for f in found:
1094 1094 branches.setdefault(f, {})[n] = 1
1095 1095 branches.setdefault(n, {})[n] = 1
1096 1096 break
1097 1097 if n not in found:
1098 1098 found.append(n)
1099 1099 if branch in tags:
1100 1100 continue
1101 1101 seen[n] = 1
1102 1102 if pp[1] != nullid and n not in seenmerge:
1103 1103 merges.append((pp[1], [x for x in found]))
1104 1104 seenmerge[n] = 1
1105 1105 if pp[0] != nullid:
1106 1106 visit.append(pp[0])
1107 1107 # traverse the branches dict, eliminating branch tags from each
1108 1108 # head that are visible from another branch tag for that head.
1109 1109 out = {}
1110 1110 viscache = {}
1111 1111 for h in heads:
1112 1112 def visible(node):
1113 1113 if node in viscache:
1114 1114 return viscache[node]
1115 1115 ret = {}
1116 1116 visit = [node]
1117 1117 while visit:
1118 1118 x = visit.pop()
1119 1119 if x in viscache:
1120 1120 ret.update(viscache[x])
1121 1121 elif x not in ret:
1122 1122 ret[x] = 1
1123 1123 if x in branches:
1124 1124 visit[len(visit):] = branches[x].keys()
1125 1125 viscache[node] = ret
1126 1126 return ret
1127 1127 if h not in branches:
1128 1128 continue
1129 1129 # O(n^2), but somewhat limited. This only searches the
1130 1130 # tags visible from a specific head, not all the tags in the
1131 1131 # whole repo.
1132 1132 for b in branches[h]:
1133 1133 vis = False
1134 1134 for bb in branches[h].keys():
1135 1135 if b != bb:
1136 1136 if b in visible(bb):
1137 1137 vis = True
1138 1138 break
1139 1139 if not vis:
1140 1140 l = out.setdefault(h, [])
1141 1141 l[len(l):] = self.nodetags(b)
1142 1142 return out
1143 1143
1144 1144 def branches(self, nodes):
1145 1145 if not nodes:
1146 1146 nodes = [self.changelog.tip()]
1147 1147 b = []
1148 1148 for n in nodes:
1149 1149 t = n
1150 1150 while 1:
1151 1151 p = self.changelog.parents(n)
1152 1152 if p[1] != nullid or p[0] == nullid:
1153 1153 b.append((t, n, p[0], p[1]))
1154 1154 break
1155 1155 n = p[0]
1156 1156 return b
1157 1157
1158 1158 def between(self, pairs):
1159 1159 r = []
1160 1160
1161 1161 for top, bottom in pairs:
1162 1162 n, l, i = top, [], 0
1163 1163 f = 1
1164 1164
1165 1165 while n != bottom:
1166 1166 p = self.changelog.parents(n)[0]
1167 1167 if i == f:
1168 1168 l.append(n)
1169 1169 f = f * 2
1170 1170 n = p
1171 1171 i += 1
1172 1172
1173 1173 r.append(l)
1174 1174
1175 1175 return r
1176 1176
1177 1177 def findincoming(self, remote, base=None, heads=None, force=False):
1178 1178 """Return list of roots of the subsets of missing nodes from remote
1179 1179
1180 1180 If base dict is specified, assume that these nodes and their parents
1181 1181 exist on the remote side and that no child of a node of base exists
1182 1182 in both remote and self.
1183 1183 Furthermore base will be updated to include the nodes that exists
1184 1184 in self and remote but no children exists in self and remote.
1185 1185 If a list of heads is specified, return only nodes which are heads
1186 1186 or ancestors of these heads.
1187 1187
1188 1188 All the ancestors of base are in self and in remote.
1189 1189 All the descendants of the list returned are missing in self.
1190 1190 (and so we know that the rest of the nodes are missing in remote, see
1191 1191 outgoing)
1192 1192 """
1193 1193 m = self.changelog.nodemap
1194 1194 search = []
1195 1195 fetch = {}
1196 1196 seen = {}
1197 1197 seenbranch = {}
1198 1198 if base == None:
1199 1199 base = {}
1200 1200
1201 1201 if not heads:
1202 1202 heads = remote.heads()
1203 1203
1204 1204 if self.changelog.tip() == nullid:
1205 1205 base[nullid] = 1
1206 1206 if heads != [nullid]:
1207 1207 return [nullid]
1208 1208 return []
1209 1209
1210 1210 # assume we're closer to the tip than the root
1211 1211 # and start by examining the heads
1212 1212 self.ui.status(_("searching for changes\n"))
1213 1213
1214 1214 unknown = []
1215 1215 for h in heads:
1216 1216 if h not in m:
1217 1217 unknown.append(h)
1218 1218 else:
1219 1219 base[h] = 1
1220 1220
1221 1221 if not unknown:
1222 1222 return []
1223 1223
1224 1224 req = dict.fromkeys(unknown)
1225 1225 reqcnt = 0
1226 1226
1227 1227 # search through remote branches
1228 1228 # a 'branch' here is a linear segment of history, with four parts:
1229 1229 # head, root, first parent, second parent
1230 1230 # (a branch always has two parents (or none) by definition)
1231 1231 unknown = remote.branches(unknown)
1232 1232 while unknown:
1233 1233 r = []
1234 1234 while unknown:
1235 1235 n = unknown.pop(0)
1236 1236 if n[0] in seen:
1237 1237 continue
1238 1238
1239 1239 self.ui.debug(_("examining %s:%s\n")
1240 1240 % (short(n[0]), short(n[1])))
1241 1241 if n[0] == nullid: # found the end of the branch
1242 1242 pass
1243 1243 elif n in seenbranch:
1244 1244 self.ui.debug(_("branch already found\n"))
1245 1245 continue
1246 1246 elif n[1] and n[1] in m: # do we know the base?
1247 1247 self.ui.debug(_("found incomplete branch %s:%s\n")
1248 1248 % (short(n[0]), short(n[1])))
1249 1249 search.append(n) # schedule branch range for scanning
1250 1250 seenbranch[n] = 1
1251 1251 else:
1252 1252 if n[1] not in seen and n[1] not in fetch:
1253 1253 if n[2] in m and n[3] in m:
1254 1254 self.ui.debug(_("found new changeset %s\n") %
1255 1255 short(n[1]))
1256 1256 fetch[n[1]] = 1 # earliest unknown
1257 1257 for p in n[2:4]:
1258 1258 if p in m:
1259 1259 base[p] = 1 # latest known
1260 1260
1261 1261 for p in n[2:4]:
1262 1262 if p not in req and p not in m:
1263 1263 r.append(p)
1264 1264 req[p] = 1
1265 1265 seen[n[0]] = 1
1266 1266
1267 1267 if r:
1268 1268 reqcnt += 1
1269 1269 self.ui.debug(_("request %d: %s\n") %
1270 1270 (reqcnt, " ".join(map(short, r))))
1271 1271 for p in xrange(0, len(r), 10):
1272 1272 for b in remote.branches(r[p:p+10]):
1273 1273 self.ui.debug(_("received %s:%s\n") %
1274 1274 (short(b[0]), short(b[1])))
1275 1275 unknown.append(b)
1276 1276
1277 1277 # do binary search on the branches we found
1278 1278 while search:
1279 1279 n = search.pop(0)
1280 1280 reqcnt += 1
1281 1281 l = remote.between([(n[0], n[1])])[0]
1282 1282 l.append(n[1])
1283 1283 p = n[0]
1284 1284 f = 1
1285 1285 for i in l:
1286 1286 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1287 1287 if i in m:
1288 1288 if f <= 2:
1289 1289 self.ui.debug(_("found new branch changeset %s\n") %
1290 1290 short(p))
1291 1291 fetch[p] = 1
1292 1292 base[i] = 1
1293 1293 else:
1294 1294 self.ui.debug(_("narrowed branch search to %s:%s\n")
1295 1295 % (short(p), short(i)))
1296 1296 search.append((p, i))
1297 1297 break
1298 1298 p, f = i, f * 2
1299 1299
1300 1300 # sanity check our fetch list
1301 1301 for f in fetch.keys():
1302 1302 if f in m:
1303 1303 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1304 1304
1305 1305 if base.keys() == [nullid]:
1306 1306 if force:
1307 1307 self.ui.warn(_("warning: repository is unrelated\n"))
1308 1308 else:
1309 1309 raise util.Abort(_("repository is unrelated"))
1310 1310
1311 1311 self.ui.debug(_("found new changesets starting at ") +
1312 1312 " ".join([short(f) for f in fetch]) + "\n")
1313 1313
1314 1314 self.ui.debug(_("%d total queries\n") % reqcnt)
1315 1315
1316 1316 return fetch.keys()
1317 1317
1318 1318 def findoutgoing(self, remote, base=None, heads=None, force=False):
1319 1319 """Return list of nodes that are roots of subsets not in remote
1320 1320
1321 1321 If base dict is specified, assume that these nodes and their parents
1322 1322 exist on the remote side.
1323 1323 If a list of heads is specified, return only nodes which are heads
1324 1324 or ancestors of these heads, and return a second element which
1325 1325 contains all remote heads which get new children.
1326 1326 """
1327 1327 if base == None:
1328 1328 base = {}
1329 1329 self.findincoming(remote, base, heads, force=force)
1330 1330
1331 1331 self.ui.debug(_("common changesets up to ")
1332 1332 + " ".join(map(short, base.keys())) + "\n")
1333 1333
1334 1334 remain = dict.fromkeys(self.changelog.nodemap)
1335 1335
1336 1336 # prune everything remote has from the tree
1337 1337 del remain[nullid]
1338 1338 remove = base.keys()
1339 1339 while remove:
1340 1340 n = remove.pop(0)
1341 1341 if n in remain:
1342 1342 del remain[n]
1343 1343 for p in self.changelog.parents(n):
1344 1344 remove.append(p)
1345 1345
1346 1346 # find every node whose parents have been pruned
1347 1347 subset = []
1348 1348 # find every remote head that will get new children
1349 1349 updated_heads = {}
1350 1350 for n in remain:
1351 1351 p1, p2 = self.changelog.parents(n)
1352 1352 if p1 not in remain and p2 not in remain:
1353 1353 subset.append(n)
1354 1354 if heads:
1355 1355 if p1 in heads:
1356 1356 updated_heads[p1] = True
1357 1357 if p2 in heads:
1358 1358 updated_heads[p2] = True
1359 1359
1360 1360 # this is the set of all roots we have to push
1361 1361 if heads:
1362 1362 return subset, updated_heads.keys()
1363 1363 else:
1364 1364 return subset
1365 1365
1366 1366 def pull(self, remote, heads=None, force=False, lock=None):
1367 1367 mylock = False
1368 1368 if not lock:
1369 1369 lock = self.lock()
1370 1370 mylock = True
1371 1371
1372 1372 try:
1373 1373 fetch = self.findincoming(remote, force=force)
1374 1374 if fetch == [nullid]:
1375 1375 self.ui.status(_("requesting all changes\n"))
1376 1376
1377 1377 if not fetch:
1378 1378 self.ui.status(_("no changes found\n"))
1379 1379 return 0
1380 1380
1381 1381 if heads is None:
1382 1382 cg = remote.changegroup(fetch, 'pull')
1383 1383 else:
1384 1384 if 'changegroupsubset' not in remote.capabilities:
1385 1385 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1386 1386 cg = remote.changegroupsubset(fetch, heads, 'pull')
1387 1387 return self.addchangegroup(cg, 'pull', remote.url())
1388 1388 finally:
1389 1389 if mylock:
1390 1390 lock.release()
1391 1391
1392 1392 def push(self, remote, force=False, revs=None):
1393 1393 # there are two ways to push to remote repo:
1394 1394 #
1395 1395 # addchangegroup assumes local user can lock remote
1396 1396 # repo (local filesystem, old ssh servers).
1397 1397 #
1398 1398 # unbundle assumes local user cannot lock remote repo (new ssh
1399 1399 # servers, http servers).
1400 1400
1401 1401 if remote.capable('unbundle'):
1402 1402 return self.push_unbundle(remote, force, revs)
1403 1403 return self.push_addchangegroup(remote, force, revs)
1404 1404
1405 1405 def prepush(self, remote, force, revs):
1406 1406 base = {}
1407 1407 remote_heads = remote.heads()
1408 1408 inc = self.findincoming(remote, base, remote_heads, force=force)
1409 1409
1410 1410 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1411 1411 if revs is not None:
1412 1412 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1413 1413 else:
1414 1414 bases, heads = update, self.changelog.heads()
1415 1415
1416 1416 if not bases:
1417 1417 self.ui.status(_("no changes found\n"))
1418 1418 return None, 1
1419 1419 elif not force:
1420 1420 # check if we're creating new remote heads
1421 1421 # to be a remote head after push, node must be either
1422 1422 # - unknown locally
1423 1423 # - a local outgoing head descended from update
1424 1424 # - a remote head that's known locally and not
1425 1425 # ancestral to an outgoing head
1426 1426
1427 1427 warn = 0
1428 1428
1429 1429 if remote_heads == [nullid]:
1430 1430 warn = 0
1431 1431 elif not revs and len(heads) > len(remote_heads):
1432 1432 warn = 1
1433 1433 else:
1434 1434 newheads = list(heads)
1435 1435 for r in remote_heads:
1436 1436 if r in self.changelog.nodemap:
1437 1437 desc = self.changelog.heads(r, heads)
1438 1438 l = [h for h in heads if h in desc]
1439 1439 if not l:
1440 1440 newheads.append(r)
1441 1441 else:
1442 1442 newheads.append(r)
1443 1443 if len(newheads) > len(remote_heads):
1444 1444 warn = 1
1445 1445
1446 1446 if warn:
1447 1447 self.ui.warn(_("abort: push creates new remote branches!\n"))
1448 1448 self.ui.status(_("(did you forget to merge?"
1449 1449 " use push -f to force)\n"))
1450 1450 return None, 1
1451 1451 elif inc:
1452 1452 self.ui.warn(_("note: unsynced remote changes!\n"))
1453 1453
1454 1454
1455 1455 if revs is None:
1456 1456 cg = self.changegroup(update, 'push')
1457 1457 else:
1458 1458 cg = self.changegroupsubset(update, revs, 'push')
1459 1459 return cg, remote_heads
1460 1460
1461 1461 def push_addchangegroup(self, remote, force, revs):
1462 1462 lock = remote.lock()
1463 1463
1464 1464 ret = self.prepush(remote, force, revs)
1465 1465 if ret[0] is not None:
1466 1466 cg, remote_heads = ret
1467 1467 return remote.addchangegroup(cg, 'push', self.url())
1468 1468 return ret[1]
1469 1469
1470 1470 def push_unbundle(self, remote, force, revs):
1471 1471 # local repo finds heads on server, finds out what revs it
1472 1472 # must push. once revs transferred, if server finds it has
1473 1473 # different heads (someone else won commit/push race), server
1474 1474 # aborts.
1475 1475
1476 1476 ret = self.prepush(remote, force, revs)
1477 1477 if ret[0] is not None:
1478 1478 cg, remote_heads = ret
1479 1479 if force: remote_heads = ['force']
1480 1480 return remote.unbundle(cg, remote_heads, 'push')
1481 1481 return ret[1]
1482 1482
1483 1483 def changegroupinfo(self, nodes):
1484 1484 self.ui.note(_("%d changesets found\n") % len(nodes))
1485 1485 if self.ui.debugflag:
1486 1486 self.ui.debug(_("List of changesets:\n"))
1487 1487 for node in nodes:
1488 1488 self.ui.debug("%s\n" % hex(node))
1489 1489
1490 1490 def changegroupsubset(self, bases, heads, source):
1491 1491 """This function generates a changegroup consisting of all the nodes
1492 1492 that are descendents of any of the bases, and ancestors of any of
1493 1493 the heads.
1494 1494
1495 1495 It is fairly complex as determining which filenodes and which
1496 1496 manifest nodes need to be included for the changeset to be complete
1497 1497 is non-trivial.
1498 1498
1499 1499 Another wrinkle is doing the reverse, figuring out which changeset in
1500 1500 the changegroup a particular filenode or manifestnode belongs to."""
1501 1501
1502 1502 self.hook('preoutgoing', throw=True, source=source)
1503 1503
1504 1504 # Set up some initial variables
1505 1505 # Make it easy to refer to self.changelog
1506 1506 cl = self.changelog
1507 1507 # msng is short for missing - compute the list of changesets in this
1508 1508 # changegroup.
1509 1509 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1510 1510 self.changegroupinfo(msng_cl_lst)
1511 1511 # Some bases may turn out to be superfluous, and some heads may be
1512 1512 # too. nodesbetween will return the minimal set of bases and heads
1513 1513 # necessary to re-create the changegroup.
1514 1514
1515 1515 # Known heads are the list of heads that it is assumed the recipient
1516 1516 # of this changegroup will know about.
1517 1517 knownheads = {}
1518 1518 # We assume that all parents of bases are known heads.
1519 1519 for n in bases:
1520 1520 for p in cl.parents(n):
1521 1521 if p != nullid:
1522 1522 knownheads[p] = 1
1523 1523 knownheads = knownheads.keys()
1524 1524 if knownheads:
1525 1525 # Now that we know what heads are known, we can compute which
1526 1526 # changesets are known. The recipient must know about all
1527 1527 # changesets required to reach the known heads from the null
1528 1528 # changeset.
1529 1529 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1530 1530 junk = None
1531 1531 # Transform the list into an ersatz set.
1532 1532 has_cl_set = dict.fromkeys(has_cl_set)
1533 1533 else:
1534 1534 # If there were no known heads, the recipient cannot be assumed to
1535 1535 # know about any changesets.
1536 1536 has_cl_set = {}
1537 1537
1538 1538 # Make it easy to refer to self.manifest
1539 1539 mnfst = self.manifest
1540 1540 # We don't know which manifests are missing yet
1541 1541 msng_mnfst_set = {}
1542 1542 # Nor do we know which filenodes are missing.
1543 1543 msng_filenode_set = {}
1544 1544
1545 1545 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1546 1546 junk = None
1547 1547
1548 1548 # A changeset always belongs to itself, so the changenode lookup
1549 1549 # function for a changenode is identity.
1550 1550 def identity(x):
1551 1551 return x
1552 1552
1553 1553 # A function generating function. Sets up an environment for the
1554 1554 # inner function.
1555 1555 def cmp_by_rev_func(revlog):
1556 1556 # Compare two nodes by their revision number in the environment's
1557 1557 # revision history. Since the revision number both represents the
1558 1558 # most efficient order to read the nodes in, and represents a
1559 1559 # topological sorting of the nodes, this function is often useful.
1560 1560 def cmp_by_rev(a, b):
1561 1561 return cmp(revlog.rev(a), revlog.rev(b))
1562 1562 return cmp_by_rev
1563 1563
1564 1564 # If we determine that a particular file or manifest node must be a
1565 1565 # node that the recipient of the changegroup will already have, we can
1566 1566 # also assume the recipient will have all the parents. This function
1567 1567 # prunes them from the set of missing nodes.
1568 1568 def prune_parents(revlog, hasset, msngset):
1569 1569 haslst = hasset.keys()
1570 1570 haslst.sort(cmp_by_rev_func(revlog))
1571 1571 for node in haslst:
1572 1572 parentlst = [p for p in revlog.parents(node) if p != nullid]
1573 1573 while parentlst:
1574 1574 n = parentlst.pop()
1575 1575 if n not in hasset:
1576 1576 hasset[n] = 1
1577 1577 p = [p for p in revlog.parents(n) if p != nullid]
1578 1578 parentlst.extend(p)
1579 1579 for n in hasset:
1580 1580 msngset.pop(n, None)
1581 1581
1582 1582 # This is a function generating function used to set up an environment
1583 1583 # for the inner function to execute in.
1584 1584 def manifest_and_file_collector(changedfileset):
1585 1585 # This is an information gathering function that gathers
1586 1586 # information from each changeset node that goes out as part of
1587 1587 # the changegroup. The information gathered is a list of which
1588 1588 # manifest nodes are potentially required (the recipient may
1589 1589 # already have them) and total list of all files which were
1590 1590 # changed in any changeset in the changegroup.
1591 1591 #
1592 1592 # We also remember the first changenode we saw any manifest
1593 1593 # referenced by so we can later determine which changenode 'owns'
1594 1594 # the manifest.
1595 1595 def collect_manifests_and_files(clnode):
1596 1596 c = cl.read(clnode)
1597 1597 for f in c[3]:
1598 1598 # This is to make sure we only have one instance of each
1599 1599 # filename string for each filename.
1600 1600 changedfileset.setdefault(f, f)
1601 1601 msng_mnfst_set.setdefault(c[0], clnode)
1602 1602 return collect_manifests_and_files
1603 1603
1604 1604 # Figure out which manifest nodes (of the ones we think might be part
1605 1605 # of the changegroup) the recipient must know about and remove them
1606 1606 # from the changegroup.
1607 1607 def prune_manifests():
1608 1608 has_mnfst_set = {}
1609 1609 for n in msng_mnfst_set:
1610 1610 # If a 'missing' manifest thinks it belongs to a changenode
1611 1611 # the recipient is assumed to have, obviously the recipient
1612 1612 # must have that manifest.
1613 1613 linknode = cl.node(mnfst.linkrev(n))
1614 1614 if linknode in has_cl_set:
1615 1615 has_mnfst_set[n] = 1
1616 1616 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1617 1617
1618 1618 # Use the information collected in collect_manifests_and_files to say
1619 1619 # which changenode any manifestnode belongs to.
1620 1620 def lookup_manifest_link(mnfstnode):
1621 1621 return msng_mnfst_set[mnfstnode]
1622 1622
1623 1623 # A function generating function that sets up the initial environment
1624 1624 # the inner function.
1625 1625 def filenode_collector(changedfiles):
1626 1626 next_rev = [0]
1627 1627 # This gathers information from each manifestnode included in the
1628 1628 # changegroup about which filenodes the manifest node references
1629 1629 # so we can include those in the changegroup too.
1630 1630 #
1631 1631 # It also remembers which changenode each filenode belongs to. It
1632 1632 # does this by assuming the a filenode belongs to the changenode
1633 1633 # the first manifest that references it belongs to.
1634 1634 def collect_msng_filenodes(mnfstnode):
1635 1635 r = mnfst.rev(mnfstnode)
1636 1636 if r == next_rev[0]:
1637 1637 # If the last rev we looked at was the one just previous,
1638 1638 # we only need to see a diff.
1639 1639 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1640 1640 # For each line in the delta
1641 1641 for dline in delta.splitlines():
1642 1642 # get the filename and filenode for that line
1643 1643 f, fnode = dline.split('\0')
1644 1644 fnode = bin(fnode[:40])
1645 1645 f = changedfiles.get(f, None)
1646 1646 # And if the file is in the list of files we care
1647 1647 # about.
1648 1648 if f is not None:
1649 1649 # Get the changenode this manifest belongs to
1650 1650 clnode = msng_mnfst_set[mnfstnode]
1651 1651 # Create the set of filenodes for the file if
1652 1652 # there isn't one already.
1653 1653 ndset = msng_filenode_set.setdefault(f, {})
1654 1654 # And set the filenode's changelog node to the
1655 1655 # manifest's if it hasn't been set already.
1656 1656 ndset.setdefault(fnode, clnode)
1657 1657 else:
1658 1658 # Otherwise we need a full manifest.
1659 1659 m = mnfst.read(mnfstnode)
1660 1660 # For every file in we care about.
1661 1661 for f in changedfiles:
1662 1662 fnode = m.get(f, None)
1663 1663 # If it's in the manifest
1664 1664 if fnode is not None:
1665 1665 # See comments above.
1666 1666 clnode = msng_mnfst_set[mnfstnode]
1667 1667 ndset = msng_filenode_set.setdefault(f, {})
1668 1668 ndset.setdefault(fnode, clnode)
1669 1669 # Remember the revision we hope to see next.
1670 1670 next_rev[0] = r + 1
1671 1671 return collect_msng_filenodes
1672 1672
1673 1673 # We have a list of filenodes we think we need for a file, lets remove
1674 1674 # all those we now the recipient must have.
1675 1675 def prune_filenodes(f, filerevlog):
1676 1676 msngset = msng_filenode_set[f]
1677 1677 hasset = {}
1678 1678 # If a 'missing' filenode thinks it belongs to a changenode we
1679 1679 # assume the recipient must have, then the recipient must have
1680 1680 # that filenode.
1681 1681 for n in msngset:
1682 1682 clnode = cl.node(filerevlog.linkrev(n))
1683 1683 if clnode in has_cl_set:
1684 1684 hasset[n] = 1
1685 1685 prune_parents(filerevlog, hasset, msngset)
1686 1686
1687 1687 # A function generator function that sets up the a context for the
1688 1688 # inner function.
1689 1689 def lookup_filenode_link_func(fname):
1690 1690 msngset = msng_filenode_set[fname]
1691 1691 # Lookup the changenode the filenode belongs to.
1692 1692 def lookup_filenode_link(fnode):
1693 1693 return msngset[fnode]
1694 1694 return lookup_filenode_link
1695 1695
1696 1696 # Now that we have all theses utility functions to help out and
1697 1697 # logically divide up the task, generate the group.
1698 1698 def gengroup():
1699 1699 # The set of changed files starts empty.
1700 1700 changedfiles = {}
1701 1701 # Create a changenode group generator that will call our functions
1702 1702 # back to lookup the owning changenode and collect information.
1703 1703 group = cl.group(msng_cl_lst, identity,
1704 1704 manifest_and_file_collector(changedfiles))
1705 1705 for chnk in group:
1706 1706 yield chnk
1707 1707
1708 1708 # The list of manifests has been collected by the generator
1709 1709 # calling our functions back.
1710 1710 prune_manifests()
1711 1711 msng_mnfst_lst = msng_mnfst_set.keys()
1712 1712 # Sort the manifestnodes by revision number.
1713 1713 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1714 1714 # Create a generator for the manifestnodes that calls our lookup
1715 1715 # and data collection functions back.
1716 1716 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1717 1717 filenode_collector(changedfiles))
1718 1718 for chnk in group:
1719 1719 yield chnk
1720 1720
1721 1721 # These are no longer needed, dereference and toss the memory for
1722 1722 # them.
1723 1723 msng_mnfst_lst = None
1724 1724 msng_mnfst_set.clear()
1725 1725
1726 1726 changedfiles = changedfiles.keys()
1727 1727 changedfiles.sort()
1728 1728 # Go through all our files in order sorted by name.
1729 1729 for fname in changedfiles:
1730 1730 filerevlog = self.file(fname)
1731 1731 # Toss out the filenodes that the recipient isn't really
1732 1732 # missing.
1733 1733 if msng_filenode_set.has_key(fname):
1734 1734 prune_filenodes(fname, filerevlog)
1735 1735 msng_filenode_lst = msng_filenode_set[fname].keys()
1736 1736 else:
1737 1737 msng_filenode_lst = []
1738 1738 # If any filenodes are left, generate the group for them,
1739 1739 # otherwise don't bother.
1740 1740 if len(msng_filenode_lst) > 0:
1741 1741 yield changegroup.genchunk(fname)
1742 1742 # Sort the filenodes by their revision #
1743 1743 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1744 1744 # Create a group generator and only pass in a changenode
1745 1745 # lookup function as we need to collect no information
1746 1746 # from filenodes.
1747 1747 group = filerevlog.group(msng_filenode_lst,
1748 1748 lookup_filenode_link_func(fname))
1749 1749 for chnk in group:
1750 1750 yield chnk
1751 1751 if msng_filenode_set.has_key(fname):
1752 1752 # Don't need this anymore, toss it to free memory.
1753 1753 del msng_filenode_set[fname]
1754 1754 # Signal that no more groups are left.
1755 1755 yield changegroup.closechunk()
1756 1756
1757 1757 if msng_cl_lst:
1758 1758 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1759 1759
1760 1760 return util.chunkbuffer(gengroup())
1761 1761
1762 1762 def changegroup(self, basenodes, source):
1763 1763 """Generate a changegroup of all nodes that we have that a recipient
1764 1764 doesn't.
1765 1765
1766 1766 This is much easier than the previous function as we can assume that
1767 1767 the recipient has any changenode we aren't sending them."""
1768 1768
1769 1769 self.hook('preoutgoing', throw=True, source=source)
1770 1770
1771 1771 cl = self.changelog
1772 1772 nodes = cl.nodesbetween(basenodes, None)[0]
1773 1773 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1774 1774 self.changegroupinfo(nodes)
1775 1775
1776 1776 def identity(x):
1777 1777 return x
1778 1778
1779 1779 def gennodelst(revlog):
1780 1780 for r in xrange(0, revlog.count()):
1781 1781 n = revlog.node(r)
1782 1782 if revlog.linkrev(n) in revset:
1783 1783 yield n
1784 1784
1785 1785 def changed_file_collector(changedfileset):
1786 1786 def collect_changed_files(clnode):
1787 1787 c = cl.read(clnode)
1788 1788 for fname in c[3]:
1789 1789 changedfileset[fname] = 1
1790 1790 return collect_changed_files
1791 1791
1792 1792 def lookuprevlink_func(revlog):
1793 1793 def lookuprevlink(n):
1794 1794 return cl.node(revlog.linkrev(n))
1795 1795 return lookuprevlink
1796 1796
1797 1797 def gengroup():
1798 1798 # construct a list of all changed files
1799 1799 changedfiles = {}
1800 1800
1801 1801 for chnk in cl.group(nodes, identity,
1802 1802 changed_file_collector(changedfiles)):
1803 1803 yield chnk
1804 1804 changedfiles = changedfiles.keys()
1805 1805 changedfiles.sort()
1806 1806
1807 1807 mnfst = self.manifest
1808 1808 nodeiter = gennodelst(mnfst)
1809 1809 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1810 1810 yield chnk
1811 1811
1812 1812 for fname in changedfiles:
1813 1813 filerevlog = self.file(fname)
1814 1814 nodeiter = gennodelst(filerevlog)
1815 1815 nodeiter = list(nodeiter)
1816 1816 if nodeiter:
1817 1817 yield changegroup.genchunk(fname)
1818 1818 lookup = lookuprevlink_func(filerevlog)
1819 1819 for chnk in filerevlog.group(nodeiter, lookup):
1820 1820 yield chnk
1821 1821
1822 1822 yield changegroup.closechunk()
1823 1823
1824 1824 if nodes:
1825 1825 self.hook('outgoing', node=hex(nodes[0]), source=source)
1826 1826
1827 1827 return util.chunkbuffer(gengroup())
1828 1828
1829 1829 def addchangegroup(self, source, srctype, url):
1830 1830 """add changegroup to repo.
1831 1831
1832 1832 return values:
1833 1833 - nothing changed or no source: 0
1834 1834 - more heads than before: 1+added heads (2..n)
1835 1835 - less heads than before: -1-removed heads (-2..-n)
1836 1836 - number of heads stays the same: 1
1837 1837 """
1838 1838 def csmap(x):
1839 1839 self.ui.debug(_("add changeset %s\n") % short(x))
1840 1840 return cl.count()
1841 1841
1842 1842 def revmap(x):
1843 1843 return cl.rev(x)
1844 1844
1845 1845 if not source:
1846 1846 return 0
1847 1847
1848 1848 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1849 1849
1850 1850 changesets = files = revisions = 0
1851 1851
1852 1852 tr = self.transaction()
1853 1853
1854 1854 # write changelog data to temp files so concurrent readers will not see
1855 1855 # inconsistent view
1856 1856 cl = None
1857 1857 try:
1858 1858 cl = appendfile.appendchangelog(self.sopener,
1859 1859 self.changelog.version)
1860 1860
1861 1861 oldheads = len(cl.heads())
1862 1862
1863 1863 # pull off the changeset group
1864 1864 self.ui.status(_("adding changesets\n"))
1865 1865 cor = cl.count() - 1
1866 1866 chunkiter = changegroup.chunkiter(source)
1867 1867 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1868 1868 raise util.Abort(_("received changelog group is empty"))
1869 1869 cnr = cl.count() - 1
1870 1870 changesets = cnr - cor
1871 1871
1872 1872 # pull off the manifest group
1873 1873 self.ui.status(_("adding manifests\n"))
1874 1874 chunkiter = changegroup.chunkiter(source)
1875 1875 # no need to check for empty manifest group here:
1876 1876 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1877 1877 # no new manifest will be created and the manifest group will
1878 1878 # be empty during the pull
1879 1879 self.manifest.addgroup(chunkiter, revmap, tr)
1880 1880
1881 1881 # process the files
1882 1882 self.ui.status(_("adding file changes\n"))
1883 1883 while 1:
1884 1884 f = changegroup.getchunk(source)
1885 1885 if not f:
1886 1886 break
1887 1887 self.ui.debug(_("adding %s revisions\n") % f)
1888 1888 fl = self.file(f)
1889 1889 o = fl.count()
1890 1890 chunkiter = changegroup.chunkiter(source)
1891 1891 if fl.addgroup(chunkiter, revmap, tr) is None:
1892 1892 raise util.Abort(_("received file revlog group is empty"))
1893 1893 revisions += fl.count() - o
1894 1894 files += 1
1895 1895
1896 1896 cl.writedata()
1897 1897 finally:
1898 1898 if cl:
1899 1899 cl.cleanup()
1900 1900
1901 1901 # make changelog see real files again
1902 1902 self.changelog = changelog.changelog(self.sopener,
1903 1903 self.changelog.version)
1904 1904 self.changelog.checkinlinesize(tr)
1905 1905
1906 1906 newheads = len(self.changelog.heads())
1907 1907 heads = ""
1908 1908 if oldheads and newheads != oldheads:
1909 1909 heads = _(" (%+d heads)") % (newheads - oldheads)
1910 1910
1911 1911 self.ui.status(_("added %d changesets"
1912 1912 " with %d changes to %d files%s\n")
1913 1913 % (changesets, revisions, files, heads))
1914 1914
1915 1915 if changesets > 0:
1916 1916 self.hook('pretxnchangegroup', throw=True,
1917 1917 node=hex(self.changelog.node(cor+1)), source=srctype,
1918 1918 url=url)
1919 1919
1920 1920 tr.close()
1921 1921
1922 1922 if changesets > 0:
1923 1923 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1924 1924 source=srctype, url=url)
1925 1925
1926 1926 for i in xrange(cor + 1, cnr + 1):
1927 1927 self.hook("incoming", node=hex(self.changelog.node(i)),
1928 1928 source=srctype, url=url)
1929 1929
1930 1930 # never return 0 here:
1931 1931 if newheads < oldheads:
1932 1932 return newheads - oldheads - 1
1933 1933 else:
1934 1934 return newheads - oldheads + 1
1935 1935
1936 1936
1937 1937 def stream_in(self, remote):
1938 1938 fp = remote.stream_out()
1939 1939 l = fp.readline()
1940 1940 try:
1941 1941 resp = int(l)
1942 1942 except ValueError:
1943 1943 raise util.UnexpectedOutput(
1944 1944 _('Unexpected response from remote server:'), l)
1945 1945 if resp == 1:
1946 1946 raise util.Abort(_('operation forbidden by server'))
1947 1947 elif resp == 2:
1948 1948 raise util.Abort(_('locking the remote repository failed'))
1949 1949 elif resp != 0:
1950 1950 raise util.Abort(_('the server sent an unknown error code'))
1951 1951 self.ui.status(_('streaming all changes\n'))
1952 1952 l = fp.readline()
1953 1953 try:
1954 1954 total_files, total_bytes = map(int, l.split(' ', 1))
1955 1955 except ValueError, TypeError:
1956 1956 raise util.UnexpectedOutput(
1957 1957 _('Unexpected response from remote server:'), l)
1958 1958 self.ui.status(_('%d files to transfer, %s of data\n') %
1959 1959 (total_files, util.bytecount(total_bytes)))
1960 1960 start = time.time()
1961 1961 for i in xrange(total_files):
1962 1962 # XXX doesn't support '\n' or '\r' in filenames
1963 1963 l = fp.readline()
1964 1964 try:
1965 1965 name, size = l.split('\0', 1)
1966 1966 size = int(size)
1967 1967 except ValueError, TypeError:
1968 1968 raise util.UnexpectedOutput(
1969 1969 _('Unexpected response from remote server:'), l)
1970 1970 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1971 1971 ofp = self.sopener(name, 'w')
1972 1972 for chunk in util.filechunkiter(fp, limit=size):
1973 1973 ofp.write(chunk)
1974 1974 ofp.close()
1975 1975 elapsed = time.time() - start
1976 1976 if elapsed <= 0:
1977 1977 elapsed = 0.001
1978 1978 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1979 1979 (util.bytecount(total_bytes), elapsed,
1980 1980 util.bytecount(total_bytes / elapsed)))
1981 1981 self.reload()
1982 1982 return len(self.heads()) + 1
1983 1983
1984 1984 def clone(self, remote, heads=[], stream=False):
1985 1985 '''clone remote repository.
1986 1986
1987 1987 keyword arguments:
1988 1988 heads: list of revs to clone (forces use of pull)
1989 1989 stream: use streaming clone if possible'''
1990 1990
1991 1991 # now, all clients that can request uncompressed clones can
1992 1992 # read repo formats supported by all servers that can serve
1993 1993 # them.
1994 1994
1995 1995 # if revlog format changes, client will have to check version
1996 1996 # and format flags on "stream" capability, and use
1997 1997 # uncompressed only if compatible.
1998 1998
1999 1999 if stream and not heads and remote.capable('stream'):
2000 2000 return self.stream_in(remote)
2001 2001 return self.pull(remote, heads)
2002 2002
2003 2003 # used to avoid circular references so destructors work
2004 2004 def aftertrans(files):
2005 2005 renamefiles = [tuple(t) for t in files]
2006 2006 def a():
2007 2007 for src, dest in renamefiles:
2008 2008 util.rename(src, dest)
2009 2009 return a
2010 2010
2011 2011 def instance(ui, path, create):
2012 2012 return localrepository(ui, util.drop_scheme('file', path), create)
2013 2013
2014 2014 def islocal(path):
2015 2015 return True
@@ -1,85 +1,104
1 1 #!/bin/sh
2 2
3 3 mkdir t
4 4 cd t
5 5 hg init
6 6 hg id
7 7 echo a > a
8 8 hg add a
9 9 hg commit -m "test" -d "1000000 0"
10 10 hg co
11 11 hg identify
12 12 T=`hg tip --debug | head -n 1 | cut -d : -f 3`
13 13 hg tag -l "This is a local tag with a really long name!"
14 14 hg tags
15 15 rm .hg/localtags
16 16 echo "$T first" > .hgtags
17 17 cat .hgtags
18 18 hg add .hgtags
19 19 hg commit -m "add tags" -d "1000000 0"
20 20 hg tags
21 21 hg identify
22 22 echo bb > a
23 23 hg status
24 24 hg identify
25 25 hg co first
26 26 hg id
27 27 hg -v id
28 28 hg status
29 29 echo 1 > b
30 30 hg add b
31 31 hg commit -m "branch" -d "1000000 0"
32 32 hg id
33 33 hg merge 1
34 34 hg id
35 35 hg status
36 36
37 37 hg commit -m "merge" -d "1000000 0"
38 38
39 39 # create fake head, make sure tag not visible afterwards
40 40 cp .hgtags tags
41 41 hg tag -d "1000000 0" last
42 42 hg rm .hgtags
43 43 hg commit -m "remove" -d "1000000 0"
44 44
45 45 mv tags .hgtags
46 46 hg add .hgtags
47 47 hg commit -m "readd" -d "1000000 0"
48 48
49 49 hg tags
50 50
51 51 # invalid tags
52 52 echo "spam" >> .hgtags
53 53 echo >> .hgtags
54 54 echo "foo bar" >> .hgtags
55 55 echo "$T invalid" | sed "s/..../a5a5/" >> .hg/localtags
56 56 hg commit -m "tags" -d "1000000 0"
57 57
58 58 # report tag parse error on other head
59 59 hg up 3
60 60 echo 'x y' >> .hgtags
61 61 hg commit -m "head" -d "1000000 0"
62 62
63 63 hg tags
64 64 hg tip
65 65
66 66 # test tag precedence rules
67 67 cd ..
68 68 hg init t2
69 69 cd t2
70 70 echo foo > foo
71 71 hg add foo
72 72 hg ci -m 'add foo' -d '1000000 0' # rev 0
73 73 hg tag -d '1000000 0' bar # rev 1
74 74 echo >> foo
75 75 hg ci -m 'change foo 1' -d '1000000 0' # rev 2
76 76 hg up -C 1
77 77 hg tag -r 1 -d '1000000 0' bar # rev 3
78 78 hg up -C 1
79 79 echo >> foo
80 80 hg ci -m 'change foo 2' -d '1000000 0' # rev 4
81 81 hg tags
82 82
83 # test tag removal
83 84 hg tag --remove -d '1000000 0' bar
84 85 hg tip
85 86 hg tags
87
88 # test tag rank
89 cd ..
90 hg init t3
91 cd t3
92 echo foo > foo
93 hg add foo
94 hg ci -m 'add foo' -d '1000000 0' # rev 0
95 hg tag -d '1000000 0' bar # rev 1 bar -> 0
96 hg tag -d '1000000 0' bar # rev 2 bar -> 1
97 hg tag -d '1000000 0' -r 0 bar # rev 3 bar -> 0
98 hg tag -d '1000000 0' -r 1 bar # rev 3 bar -> 1
99 hg tag -d '1000000 0' -r 0 bar # rev 4 bar -> 0
100 hg tags
101 hg co 3
102 echo barbar > foo
103 hg ci -m 'change foo' -d '1000000 0' # rev 0
104 hg tags
@@ -1,51 +1,56
1 1 unknown
2 2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 3 0acdaf898367 tip
4 4 tip 0:0acdaf898367
5 5 This is a local tag with a really long name! 0:0acdaf898367
6 6 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 first
7 7 tip 1:8a3ca90d111d
8 8 first 0:0acdaf898367
9 9 8a3ca90d111d tip
10 10 M a
11 11 8a3ca90d111d+ tip
12 12 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
13 13 0acdaf898367+ first
14 14 0acdaf898367+ first
15 15 M a
16 16 8216907a933d tip
17 17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 18 (branch merge, don't forget to commit)
19 19 8216907a933d+8a3ca90d111d+ tip
20 20 M .hgtags
21 21 tip 6:e2174d339386
22 22 first 0:0acdaf898367
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 .hgtags@c071f74ab5eb, line 2: cannot parse entry
25 25 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
26 26 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
27 27 localtags, line 1: tag 'invalid' refers to unknown node
28 28 tip 8:4ca6f1b1a68c
29 29 first 0:0acdaf898367
30 30 changeset: 8:4ca6f1b1a68c
31 31 .hgtags@c071f74ab5eb, line 2: cannot parse entry
32 32 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
33 33 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
34 34 localtags, line 1: tag 'invalid' refers to unknown node
35 35 tag: tip
36 36 parent: 3:b2ef3841386b
37 37 user: test
38 38 date: Mon Jan 12 13:46:40 1970 +0000
39 39 summary: head
40 40
41 41 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 42 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
43 43 tip 4:36195b728445
44 44 bar 1:b204a97e6e8d
45 45 changeset: 5:57e1983b4a60
46 46 tag: tip
47 47 user: test
48 48 date: Mon Jan 12 13:46:40 1970 +0000
49 49 summary: Removed tag bar
50 50
51 51 tip 5:57e1983b4a60
52 tip 5:d8bb4d1eff25
53 bar 0:b409d9da318e
54 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
55 tip 6:b5ff9d142648
56 bar 0:b409d9da318e
General Comments 0
You need to be logged in to leave comments. Login now