##// END OF EJS Templates
use .extend instead of .append in readtags
Alexis S. L. Carvalho -
r4482:99f411ba default
parent child Browse files
Show More
@@ -1,2020 +1,2020 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19 supported = ('revlogv1', 'store')
20 20
21 21 def __del__(self):
22 22 self.transhandle = None
23 23 def __init__(self, parentui, path=None, create=0):
24 24 repo.repository.__init__(self)
25 25 if not path:
26 26 p = os.getcwd()
27 27 while not os.path.isdir(os.path.join(p, ".hg")):
28 28 oldp = p
29 29 p = os.path.dirname(p)
30 30 if p == oldp:
31 31 raise repo.RepoError(_("There is no Mercurial repository"
32 32 " here (.hg not found)"))
33 33 path = p
34 34
35 35 self.root = os.path.realpath(path)
36 36 self.path = os.path.join(self.root, ".hg")
37 37 self.origroot = path
38 38 self.opener = util.opener(self.path)
39 39 self.wopener = util.opener(self.root)
40 40
41 41 if not os.path.isdir(self.path):
42 42 if create:
43 43 if not os.path.exists(path):
44 44 os.mkdir(path)
45 45 os.mkdir(self.path)
46 46 os.mkdir(os.path.join(self.path, "store"))
47 47 requirements = ("revlogv1", "store")
48 48 reqfile = self.opener("requires", "w")
49 49 for r in requirements:
50 50 reqfile.write("%s\n" % r)
51 51 reqfile.close()
52 52 # create an invalid changelog
53 53 self.opener("00changelog.i", "a").write(
54 54 '\0\0\0\2' # represents revlogv2
55 55 ' dummy changelog to prevent using the old repo layout'
56 56 )
57 57 else:
58 58 raise repo.RepoError(_("repository %s not found") % path)
59 59 elif create:
60 60 raise repo.RepoError(_("repository %s already exists") % path)
61 61 else:
62 62 # find requirements
63 63 try:
64 64 requirements = self.opener("requires").read().splitlines()
65 65 except IOError, inst:
66 66 if inst.errno != errno.ENOENT:
67 67 raise
68 68 requirements = []
69 69 # check them
70 70 for r in requirements:
71 71 if r not in self.supported:
72 72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73 73
74 74 # setup store
75 75 if "store" in requirements:
76 76 self.encodefn = util.encodefilename
77 77 self.decodefn = util.decodefilename
78 78 self.spath = os.path.join(self.path, "store")
79 79 else:
80 80 self.encodefn = lambda x: x
81 81 self.decodefn = lambda x: x
82 82 self.spath = self.path
83 83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84 84
85 85 self.ui = ui.ui(parentui=parentui)
86 86 try:
87 87 self.ui.readconfig(self.join("hgrc"), self.root)
88 88 except IOError:
89 89 pass
90 90
91 91 v = self.ui.configrevlog()
92 92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
93 93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
94 94 fl = v.get('flags', None)
95 95 flags = 0
96 96 if fl != None:
97 97 for x in fl.split():
98 98 flags |= revlog.flagstr(x)
99 99 elif self.revlogv1:
100 100 flags = revlog.REVLOG_DEFAULT_FLAGS
101 101
102 102 v = self.revlogversion | flags
103 103 self.manifest = manifest.manifest(self.sopener, v)
104 104 self.changelog = changelog.changelog(self.sopener, v)
105 105
106 106 fallback = self.ui.config('ui', 'fallbackencoding')
107 107 if fallback:
108 108 util._fallbackencoding = fallback
109 109
110 110 # the changelog might not have the inline index flag
111 111 # on. If the format of the changelog is the same as found in
112 112 # .hgrc, apply any flags found in the .hgrc as well.
113 113 # Otherwise, just version from the changelog
114 114 v = self.changelog.version
115 115 if v == self.revlogversion:
116 116 v |= flags
117 117 self.revlogversion = v
118 118
119 119 self.tagscache = None
120 120 self.branchcache = None
121 121 self.nodetagscache = None
122 122 self.encodepats = None
123 123 self.decodepats = None
124 124 self.transhandle = None
125 125
126 126 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
127 127
128 128 def url(self):
129 129 return 'file:' + self.root
130 130
131 131 def hook(self, name, throw=False, **args):
132 132 def callhook(hname, funcname):
133 133 '''call python hook. hook is callable object, looked up as
134 134 name in python module. if callable returns "true", hook
135 135 fails, else passes. if hook raises exception, treated as
136 136 hook failure. exception propagates if throw is "true".
137 137
138 138 reason for "true" meaning "hook failed" is so that
139 139 unmodified commands (e.g. mercurial.commands.update) can
140 140 be run as hooks without wrappers to convert return values.'''
141 141
142 142 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
143 143 d = funcname.rfind('.')
144 144 if d == -1:
145 145 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
146 146 % (hname, funcname))
147 147 modname = funcname[:d]
148 148 try:
149 149 obj = __import__(modname)
150 150 except ImportError:
151 151 try:
152 152 # extensions are loaded with hgext_ prefix
153 153 obj = __import__("hgext_%s" % modname)
154 154 except ImportError:
155 155 raise util.Abort(_('%s hook is invalid '
156 156 '(import of "%s" failed)') %
157 157 (hname, modname))
158 158 try:
159 159 for p in funcname.split('.')[1:]:
160 160 obj = getattr(obj, p)
161 161 except AttributeError, err:
162 162 raise util.Abort(_('%s hook is invalid '
163 163 '("%s" is not defined)') %
164 164 (hname, funcname))
165 165 if not callable(obj):
166 166 raise util.Abort(_('%s hook is invalid '
167 167 '("%s" is not callable)') %
168 168 (hname, funcname))
169 169 try:
170 170 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
171 171 except (KeyboardInterrupt, util.SignalInterrupt):
172 172 raise
173 173 except Exception, exc:
174 174 if isinstance(exc, util.Abort):
175 175 self.ui.warn(_('error: %s hook failed: %s\n') %
176 176 (hname, exc.args[0]))
177 177 else:
178 178 self.ui.warn(_('error: %s hook raised an exception: '
179 179 '%s\n') % (hname, exc))
180 180 if throw:
181 181 raise
182 182 self.ui.print_exc()
183 183 return True
184 184 if r:
185 185 if throw:
186 186 raise util.Abort(_('%s hook failed') % hname)
187 187 self.ui.warn(_('warning: %s hook failed\n') % hname)
188 188 return r
189 189
190 190 def runhook(name, cmd):
191 191 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
192 192 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
193 193 r = util.system(cmd, environ=env, cwd=self.root)
194 194 if r:
195 195 desc, r = util.explain_exit(r)
196 196 if throw:
197 197 raise util.Abort(_('%s hook %s') % (name, desc))
198 198 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
199 199 return r
200 200
201 201 r = False
202 202 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
203 203 if hname.split(".", 1)[0] == name and cmd]
204 204 hooks.sort()
205 205 for hname, cmd in hooks:
206 206 if cmd.startswith('python:'):
207 207 r = callhook(hname, cmd[7:].strip()) or r
208 208 else:
209 209 r = runhook(hname, cmd) or r
210 210 return r
211 211
212 212 tag_disallowed = ':\r\n'
213 213
214 214 def tag(self, name, node, message, local, user, date):
215 215 '''tag a revision with a symbolic name.
216 216
217 217 if local is True, the tag is stored in a per-repository file.
218 218 otherwise, it is stored in the .hgtags file, and a new
219 219 changeset is committed with the change.
220 220
221 221 keyword arguments:
222 222
223 223 local: whether to store tag in non-version-controlled file
224 224 (default False)
225 225
226 226 message: commit message to use if committing
227 227
228 228 user: name of user to use if committing
229 229
230 230 date: date tuple to use if committing'''
231 231
232 232 for c in self.tag_disallowed:
233 233 if c in name:
234 234 raise util.Abort(_('%r cannot be used in a tag name') % c)
235 235
236 236 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
237 237
238 238 if local:
239 239 # local tags are stored in the current charset
240 240 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
241 241 self.hook('tag', node=hex(node), tag=name, local=local)
242 242 return
243 243
244 244 for x in self.status()[:5]:
245 245 if '.hgtags' in x:
246 246 raise util.Abort(_('working copy of .hgtags is changed '
247 247 '(please commit .hgtags manually)'))
248 248
249 249 # committed tags are stored in UTF-8
250 250 line = '%s %s\n' % (hex(node), util.fromlocal(name))
251 251 self.wfile('.hgtags', 'ab').write(line)
252 252 if self.dirstate.state('.hgtags') == '?':
253 253 self.add(['.hgtags'])
254 254
255 255 self.commit(['.hgtags'], message, user, date)
256 256 self.hook('tag', node=hex(node), tag=name, local=local)
257 257
258 258 def tags(self):
259 259 '''return a mapping of tag to node'''
260 260 if self.tagscache:
261 261 return self.tagscache
262 262
263 263 globaltags = {}
264 264
265 265 def readtags(lines, fn):
266 266 filetags = {}
267 267 count = 0
268 268
269 269 def warn(msg):
270 270 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
271 271
272 272 for l in lines:
273 273 count += 1
274 274 if not l:
275 275 continue
276 276 s = l.split(" ", 1)
277 277 if len(s) != 2:
278 278 warn(_("cannot parse entry"))
279 279 continue
280 280 node, key = s
281 281 key = util.tolocal(key.strip()) # stored in UTF-8
282 282 try:
283 283 bin_n = bin(node)
284 284 except TypeError:
285 285 warn(_("node '%s' is not well formed") % node)
286 286 continue
287 287 if bin_n not in self.changelog.nodemap:
288 288 warn(_("tag '%s' refers to unknown node") % key)
289 289 continue
290 290
291 291 h = []
292 292 if key in filetags:
293 293 n, h = filetags[key]
294 294 h.append(n)
295 295 filetags[key] = (bin_n, h)
296 296
297 297 for k,nh in filetags.items():
298 298 if k not in globaltags:
299 299 globaltags[k] = nh
300 300 continue
301 301 # we prefer the global tag if:
302 302 # it supercedes us OR
303 303 # mutual supercedes and it has a higher rank
304 304 # otherwise we win because we're tip-most
305 305 an, ah = nh
306 306 bn, bh = globaltags[k]
307 307 if bn != an and an in bh and \
308 308 (bn not in ah or len(bh) > len(ah)):
309 309 an = bn
310 ah.append([n for n in bh if n not in ah])
310 ah.extend([n for n in bh if n not in ah])
311 311 globaltags[k] = an, ah
312 312
313 313 # read the tags file from each head, ending with the tip
314 314 f = None
315 315 for rev, node, fnode in self._hgtagsnodes():
316 316 f = (f and f.filectx(fnode) or
317 317 self.filectx('.hgtags', fileid=fnode))
318 318 readtags(f.data().splitlines(), f)
319 319
320 320 try:
321 321 data = util.fromlocal(self.opener("localtags").read())
322 322 # localtags are stored in the local character set
323 323 # while the internal tag table is stored in UTF-8
324 324 readtags(data.splitlines(), "localtags")
325 325 except IOError:
326 326 pass
327 327
328 328 self.tagscache = {}
329 329 for k,nh in globaltags.items():
330 330 n = nh[0]
331 331 if n != nullid:
332 332 self.tagscache[k] = n
333 333 self.tagscache['tip'] = self.changelog.tip()
334 334
335 335 return self.tagscache
336 336
337 337 def _hgtagsnodes(self):
338 338 heads = self.heads()
339 339 heads.reverse()
340 340 last = {}
341 341 ret = []
342 342 for node in heads:
343 343 c = self.changectx(node)
344 344 rev = c.rev()
345 345 try:
346 346 fnode = c.filenode('.hgtags')
347 347 except repo.LookupError:
348 348 continue
349 349 ret.append((rev, node, fnode))
350 350 if fnode in last:
351 351 ret[last[fnode]] = None
352 352 last[fnode] = len(ret) - 1
353 353 return [item for item in ret if item]
354 354
355 355 def tagslist(self):
356 356 '''return a list of tags ordered by revision'''
357 357 l = []
358 358 for t, n in self.tags().items():
359 359 try:
360 360 r = self.changelog.rev(n)
361 361 except:
362 362 r = -2 # sort to the beginning of the list if unknown
363 363 l.append((r, t, n))
364 364 l.sort()
365 365 return [(t, n) for r, t, n in l]
366 366
367 367 def nodetags(self, node):
368 368 '''return the tags associated with a node'''
369 369 if not self.nodetagscache:
370 370 self.nodetagscache = {}
371 371 for t, n in self.tags().items():
372 372 self.nodetagscache.setdefault(n, []).append(t)
373 373 return self.nodetagscache.get(node, [])
374 374
375 375 def _branchtags(self):
376 376 partial, last, lrev = self._readbranchcache()
377 377
378 378 tiprev = self.changelog.count() - 1
379 379 if lrev != tiprev:
380 380 self._updatebranchcache(partial, lrev+1, tiprev+1)
381 381 self._writebranchcache(partial, self.changelog.tip(), tiprev)
382 382
383 383 return partial
384 384
385 385 def branchtags(self):
386 386 if self.branchcache is not None:
387 387 return self.branchcache
388 388
389 389 self.branchcache = {} # avoid recursion in changectx
390 390 partial = self._branchtags()
391 391
392 392 # the branch cache is stored on disk as UTF-8, but in the local
393 393 # charset internally
394 394 for k, v in partial.items():
395 395 self.branchcache[util.tolocal(k)] = v
396 396 return self.branchcache
397 397
398 398 def _readbranchcache(self):
399 399 partial = {}
400 400 try:
401 401 f = self.opener("branch.cache")
402 402 lines = f.read().split('\n')
403 403 f.close()
404 404 except (IOError, OSError):
405 405 return {}, nullid, nullrev
406 406
407 407 try:
408 408 last, lrev = lines.pop(0).split(" ", 1)
409 409 last, lrev = bin(last), int(lrev)
410 410 if not (lrev < self.changelog.count() and
411 411 self.changelog.node(lrev) == last): # sanity check
412 412 # invalidate the cache
413 413 raise ValueError('Invalid branch cache: unknown tip')
414 414 for l in lines:
415 415 if not l: continue
416 416 node, label = l.split(" ", 1)
417 417 partial[label.strip()] = bin(node)
418 418 except (KeyboardInterrupt, util.SignalInterrupt):
419 419 raise
420 420 except Exception, inst:
421 421 if self.ui.debugflag:
422 422 self.ui.warn(str(inst), '\n')
423 423 partial, last, lrev = {}, nullid, nullrev
424 424 return partial, last, lrev
425 425
426 426 def _writebranchcache(self, branches, tip, tiprev):
427 427 try:
428 428 f = self.opener("branch.cache", "w", atomictemp=True)
429 429 f.write("%s %s\n" % (hex(tip), tiprev))
430 430 for label, node in branches.iteritems():
431 431 f.write("%s %s\n" % (hex(node), label))
432 432 f.rename()
433 433 except (IOError, OSError):
434 434 pass
435 435
436 436 def _updatebranchcache(self, partial, start, end):
437 437 for r in xrange(start, end):
438 438 c = self.changectx(r)
439 439 b = c.branch()
440 440 partial[b] = c.node()
441 441
442 442 def lookup(self, key):
443 443 if key == '.':
444 444 key = self.dirstate.parents()[0]
445 445 if key == nullid:
446 446 raise repo.RepoError(_("no revision checked out"))
447 447 elif key == 'null':
448 448 return nullid
449 449 n = self.changelog._match(key)
450 450 if n:
451 451 return n
452 452 if key in self.tags():
453 453 return self.tags()[key]
454 454 if key in self.branchtags():
455 455 return self.branchtags()[key]
456 456 n = self.changelog._partialmatch(key)
457 457 if n:
458 458 return n
459 459 raise repo.RepoError(_("unknown revision '%s'") % key)
460 460
461 461 def dev(self):
462 462 return os.lstat(self.path).st_dev
463 463
464 464 def local(self):
465 465 return True
466 466
467 467 def join(self, f):
468 468 return os.path.join(self.path, f)
469 469
470 470 def sjoin(self, f):
471 471 f = self.encodefn(f)
472 472 return os.path.join(self.spath, f)
473 473
474 474 def wjoin(self, f):
475 475 return os.path.join(self.root, f)
476 476
477 477 def file(self, f):
478 478 if f[0] == '/':
479 479 f = f[1:]
480 480 return filelog.filelog(self.sopener, f, self.revlogversion)
481 481
482 482 def changectx(self, changeid=None):
483 483 return context.changectx(self, changeid)
484 484
485 485 def workingctx(self):
486 486 return context.workingctx(self)
487 487
488 488 def parents(self, changeid=None):
489 489 '''
490 490 get list of changectxs for parents of changeid or working directory
491 491 '''
492 492 if changeid is None:
493 493 pl = self.dirstate.parents()
494 494 else:
495 495 n = self.changelog.lookup(changeid)
496 496 pl = self.changelog.parents(n)
497 497 if pl[1] == nullid:
498 498 return [self.changectx(pl[0])]
499 499 return [self.changectx(pl[0]), self.changectx(pl[1])]
500 500
501 501 def filectx(self, path, changeid=None, fileid=None):
502 502 """changeid can be a changeset revision, node, or tag.
503 503 fileid can be a file revision or node."""
504 504 return context.filectx(self, path, changeid, fileid)
505 505
506 506 def getcwd(self):
507 507 return self.dirstate.getcwd()
508 508
509 509 def wfile(self, f, mode='r'):
510 510 return self.wopener(f, mode)
511 511
512 512 def wread(self, filename):
513 513 if self.encodepats == None:
514 514 l = []
515 515 for pat, cmd in self.ui.configitems("encode"):
516 516 mf = util.matcher(self.root, "", [pat], [], [])[1]
517 517 l.append((mf, cmd))
518 518 self.encodepats = l
519 519
520 520 data = self.wopener(filename, 'r').read()
521 521
522 522 for mf, cmd in self.encodepats:
523 523 if mf(filename):
524 524 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
525 525 data = util.filter(data, cmd)
526 526 break
527 527
528 528 return data
529 529
530 530 def wwrite(self, filename, data, fd=None):
531 531 if self.decodepats == None:
532 532 l = []
533 533 for pat, cmd in self.ui.configitems("decode"):
534 534 mf = util.matcher(self.root, "", [pat], [], [])[1]
535 535 l.append((mf, cmd))
536 536 self.decodepats = l
537 537
538 538 for mf, cmd in self.decodepats:
539 539 if mf(filename):
540 540 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
541 541 data = util.filter(data, cmd)
542 542 break
543 543
544 544 if fd:
545 545 return fd.write(data)
546 546 return self.wopener(filename, 'w').write(data)
547 547
548 548 def transaction(self):
549 549 tr = self.transhandle
550 550 if tr != None and tr.running():
551 551 return tr.nest()
552 552
553 553 # save dirstate for rollback
554 554 try:
555 555 ds = self.opener("dirstate").read()
556 556 except IOError:
557 557 ds = ""
558 558 self.opener("journal.dirstate", "w").write(ds)
559 559
560 560 renames = [(self.sjoin("journal"), self.sjoin("undo")),
561 561 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
562 562 tr = transaction.transaction(self.ui.warn, self.sopener,
563 563 self.sjoin("journal"),
564 564 aftertrans(renames))
565 565 self.transhandle = tr
566 566 return tr
567 567
568 568 def recover(self):
569 569 l = self.lock()
570 570 if os.path.exists(self.sjoin("journal")):
571 571 self.ui.status(_("rolling back interrupted transaction\n"))
572 572 transaction.rollback(self.sopener, self.sjoin("journal"))
573 573 self.reload()
574 574 return True
575 575 else:
576 576 self.ui.warn(_("no interrupted transaction available\n"))
577 577 return False
578 578
579 579 def rollback(self, wlock=None):
580 580 if not wlock:
581 581 wlock = self.wlock()
582 582 l = self.lock()
583 583 if os.path.exists(self.sjoin("undo")):
584 584 self.ui.status(_("rolling back last transaction\n"))
585 585 transaction.rollback(self.sopener, self.sjoin("undo"))
586 586 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
587 587 self.reload()
588 588 self.wreload()
589 589 else:
590 590 self.ui.warn(_("no rollback information available\n"))
591 591
592 592 def wreload(self):
593 593 self.dirstate.read()
594 594
595 595 def reload(self):
596 596 self.changelog.load()
597 597 self.manifest.load()
598 598 self.tagscache = None
599 599 self.nodetagscache = None
600 600
601 601 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
602 602 desc=None):
603 603 try:
604 604 l = lock.lock(lockname, 0, releasefn, desc=desc)
605 605 except lock.LockHeld, inst:
606 606 if not wait:
607 607 raise
608 608 self.ui.warn(_("waiting for lock on %s held by %r\n") %
609 609 (desc, inst.locker))
610 610 # default to 600 seconds timeout
611 611 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
612 612 releasefn, desc=desc)
613 613 if acquirefn:
614 614 acquirefn()
615 615 return l
616 616
617 617 def lock(self, wait=1):
618 618 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
619 619 desc=_('repository %s') % self.origroot)
620 620
621 621 def wlock(self, wait=1):
622 622 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
623 623 self.wreload,
624 624 desc=_('working directory of %s') % self.origroot)
625 625
626 626 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
627 627 """
628 628 commit an individual file as part of a larger transaction
629 629 """
630 630
631 631 t = self.wread(fn)
632 632 fl = self.file(fn)
633 633 fp1 = manifest1.get(fn, nullid)
634 634 fp2 = manifest2.get(fn, nullid)
635 635
636 636 meta = {}
637 637 cp = self.dirstate.copied(fn)
638 638 if cp:
639 639 # Mark the new revision of this file as a copy of another
640 640 # file. This copy data will effectively act as a parent
641 641 # of this new revision. If this is a merge, the first
642 642 # parent will be the nullid (meaning "look up the copy data")
643 643 # and the second one will be the other parent. For example:
644 644 #
645 645 # 0 --- 1 --- 3 rev1 changes file foo
646 646 # \ / rev2 renames foo to bar and changes it
647 647 # \- 2 -/ rev3 should have bar with all changes and
648 648 # should record that bar descends from
649 649 # bar in rev2 and foo in rev1
650 650 #
651 651 # this allows this merge to succeed:
652 652 #
653 653 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
654 654 # \ / merging rev3 and rev4 should use bar@rev2
655 655 # \- 2 --- 4 as the merge base
656 656 #
657 657 meta["copy"] = cp
658 658 if not manifest2: # not a branch merge
659 659 meta["copyrev"] = hex(manifest1.get(cp, nullid))
660 660 fp2 = nullid
661 661 elif fp2 != nullid: # copied on remote side
662 662 meta["copyrev"] = hex(manifest1.get(cp, nullid))
663 663 elif fp1 != nullid: # copied on local side, reversed
664 664 meta["copyrev"] = hex(manifest2.get(cp))
665 665 fp2 = fp1
666 666 else: # directory rename
667 667 meta["copyrev"] = hex(manifest1.get(cp, nullid))
668 668 self.ui.debug(_(" %s: copy %s:%s\n") %
669 669 (fn, cp, meta["copyrev"]))
670 670 fp1 = nullid
671 671 elif fp2 != nullid:
672 672 # is one parent an ancestor of the other?
673 673 fpa = fl.ancestor(fp1, fp2)
674 674 if fpa == fp1:
675 675 fp1, fp2 = fp2, nullid
676 676 elif fpa == fp2:
677 677 fp2 = nullid
678 678
679 679 # is the file unmodified from the parent? report existing entry
680 680 if fp2 == nullid and not fl.cmp(fp1, t):
681 681 return fp1
682 682
683 683 changelist.append(fn)
684 684 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
685 685
686 686 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
687 687 if p1 is None:
688 688 p1, p2 = self.dirstate.parents()
689 689 return self.commit(files=files, text=text, user=user, date=date,
690 690 p1=p1, p2=p2, wlock=wlock)
691 691
692 692 def commit(self, files=None, text="", user=None, date=None,
693 693 match=util.always, force=False, lock=None, wlock=None,
694 694 force_editor=False, p1=None, p2=None, extra={}):
695 695
696 696 commit = []
697 697 remove = []
698 698 changed = []
699 699 use_dirstate = (p1 is None) # not rawcommit
700 700 extra = extra.copy()
701 701
702 702 if use_dirstate:
703 703 if files:
704 704 for f in files:
705 705 s = self.dirstate.state(f)
706 706 if s in 'nmai':
707 707 commit.append(f)
708 708 elif s == 'r':
709 709 remove.append(f)
710 710 else:
711 711 self.ui.warn(_("%s not tracked!\n") % f)
712 712 else:
713 713 changes = self.status(match=match)[:5]
714 714 modified, added, removed, deleted, unknown = changes
715 715 commit = modified + added
716 716 remove = removed
717 717 else:
718 718 commit = files
719 719
720 720 if use_dirstate:
721 721 p1, p2 = self.dirstate.parents()
722 722 update_dirstate = True
723 723 else:
724 724 p1, p2 = p1, p2 or nullid
725 725 update_dirstate = (self.dirstate.parents()[0] == p1)
726 726
727 727 c1 = self.changelog.read(p1)
728 728 c2 = self.changelog.read(p2)
729 729 m1 = self.manifest.read(c1[0]).copy()
730 730 m2 = self.manifest.read(c2[0])
731 731
732 732 if use_dirstate:
733 733 branchname = self.workingctx().branch()
734 734 try:
735 735 branchname = branchname.decode('UTF-8').encode('UTF-8')
736 736 except UnicodeDecodeError:
737 737 raise util.Abort(_('branch name not in UTF-8!'))
738 738 else:
739 739 branchname = ""
740 740
741 741 if use_dirstate:
742 742 oldname = c1[5].get("branch") # stored in UTF-8
743 743 if not commit and not remove and not force and p2 == nullid and \
744 744 branchname == oldname:
745 745 self.ui.status(_("nothing changed\n"))
746 746 return None
747 747
748 748 xp1 = hex(p1)
749 749 if p2 == nullid: xp2 = ''
750 750 else: xp2 = hex(p2)
751 751
752 752 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
753 753
754 754 if not wlock:
755 755 wlock = self.wlock()
756 756 if not lock:
757 757 lock = self.lock()
758 758 tr = self.transaction()
759 759
760 760 # check in files
761 761 new = {}
762 762 linkrev = self.changelog.count()
763 763 commit.sort()
764 764 for f in commit:
765 765 self.ui.note(f + "\n")
766 766 try:
767 767 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
768 768 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
769 769 except IOError:
770 770 if use_dirstate:
771 771 self.ui.warn(_("trouble committing %s!\n") % f)
772 772 raise
773 773 else:
774 774 remove.append(f)
775 775
776 776 # update manifest
777 777 m1.update(new)
778 778 remove.sort()
779 779
780 780 for f in remove:
781 781 if f in m1:
782 782 del m1[f]
783 783 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
784 784
785 785 # add changeset
786 786 new = new.keys()
787 787 new.sort()
788 788
789 789 user = user or self.ui.username()
790 790 if not text or force_editor:
791 791 edittext = []
792 792 if text:
793 793 edittext.append(text)
794 794 edittext.append("")
795 795 edittext.append("HG: user: %s" % user)
796 796 if p2 != nullid:
797 797 edittext.append("HG: branch merge")
798 798 edittext.extend(["HG: changed %s" % f for f in changed])
799 799 edittext.extend(["HG: removed %s" % f for f in remove])
800 800 if not changed and not remove:
801 801 edittext.append("HG: no files changed")
802 802 edittext.append("")
803 803 # run editor in the repository root
804 804 olddir = os.getcwd()
805 805 os.chdir(self.root)
806 806 text = self.ui.edit("\n".join(edittext), user)
807 807 os.chdir(olddir)
808 808
809 809 lines = [line.rstrip() for line in text.rstrip().splitlines()]
810 810 while lines and not lines[0]:
811 811 del lines[0]
812 812 if not lines:
813 813 return None
814 814 text = '\n'.join(lines)
815 815 if branchname:
816 816 extra["branch"] = branchname
817 817 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
818 818 user, date, extra)
819 819 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
820 820 parent2=xp2)
821 821 tr.close()
822 822
823 823 if use_dirstate or update_dirstate:
824 824 self.dirstate.setparents(n)
825 825 if use_dirstate:
826 826 self.dirstate.update(new, "n")
827 827 self.dirstate.forget(remove)
828 828
829 829 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
830 830 return n
831 831
832 832 def walk(self, node=None, files=[], match=util.always, badmatch=None):
833 833 '''
834 834 walk recursively through the directory tree or a given
835 835 changeset, finding all files matched by the match
836 836 function
837 837
838 838 results are yielded in a tuple (src, filename), where src
839 839 is one of:
840 840 'f' the file was found in the directory tree
841 841 'm' the file was only in the dirstate and not in the tree
842 842 'b' file was not found and matched badmatch
843 843 '''
844 844
845 845 if node:
846 846 fdict = dict.fromkeys(files)
847 847 for fn in self.manifest.read(self.changelog.read(node)[0]):
848 848 for ffn in fdict:
849 849 # match if the file is the exact name or a directory
850 850 if ffn == fn or fn.startswith("%s/" % ffn):
851 851 del fdict[ffn]
852 852 break
853 853 if match(fn):
854 854 yield 'm', fn
855 855 for fn in fdict:
856 856 if badmatch and badmatch(fn):
857 857 if match(fn):
858 858 yield 'b', fn
859 859 else:
860 860 self.ui.warn(_('%s: No such file in rev %s\n') % (
861 861 util.pathto(self.root, self.getcwd(), fn), short(node)))
862 862 else:
863 863 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
864 864 yield src, fn
865 865
866 866 def status(self, node1=None, node2=None, files=[], match=util.always,
867 867 wlock=None, list_ignored=False, list_clean=False):
868 868 """return status of files between two nodes or node and working directory
869 869
870 870 If node1 is None, use the first dirstate parent instead.
871 871 If node2 is None, compare node1 with working directory.
872 872 """
873 873
874 874 def fcmp(fn, mf):
875 875 t1 = self.wread(fn)
876 876 return self.file(fn).cmp(mf.get(fn, nullid), t1)
877 877
878 878 def mfmatches(node):
879 879 change = self.changelog.read(node)
880 880 mf = self.manifest.read(change[0]).copy()
881 881 for fn in mf.keys():
882 882 if not match(fn):
883 883 del mf[fn]
884 884 return mf
885 885
886 886 modified, added, removed, deleted, unknown = [], [], [], [], []
887 887 ignored, clean = [], []
888 888
889 889 compareworking = False
890 890 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
891 891 compareworking = True
892 892
893 893 if not compareworking:
894 894 # read the manifest from node1 before the manifest from node2,
895 895 # so that we'll hit the manifest cache if we're going through
896 896 # all the revisions in parent->child order.
897 897 mf1 = mfmatches(node1)
898 898
899 899 # are we comparing the working directory?
900 900 if not node2:
901 901 if not wlock:
902 902 try:
903 903 wlock = self.wlock(wait=0)
904 904 except lock.LockException:
905 905 wlock = None
906 906 (lookup, modified, added, removed, deleted, unknown,
907 907 ignored, clean) = self.dirstate.status(files, match,
908 908 list_ignored, list_clean)
909 909
910 910 # are we comparing working dir against its parent?
911 911 if compareworking:
912 912 if lookup:
913 913 # do a full compare of any files that might have changed
914 914 mf2 = mfmatches(self.dirstate.parents()[0])
915 915 for f in lookup:
916 916 if fcmp(f, mf2):
917 917 modified.append(f)
918 918 else:
919 919 clean.append(f)
920 920 if wlock is not None:
921 921 self.dirstate.update([f], "n")
922 922 else:
923 923 # we are comparing working dir against non-parent
924 924 # generate a pseudo-manifest for the working dir
925 925 # XXX: create it in dirstate.py ?
926 926 mf2 = mfmatches(self.dirstate.parents()[0])
927 927 for f in lookup + modified + added:
928 928 mf2[f] = ""
929 929 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
930 930 for f in removed:
931 931 if f in mf2:
932 932 del mf2[f]
933 933 else:
934 934 # we are comparing two revisions
935 935 mf2 = mfmatches(node2)
936 936
937 937 if not compareworking:
938 938 # flush lists from dirstate before comparing manifests
939 939 modified, added, clean = [], [], []
940 940
941 941 # make sure to sort the files so we talk to the disk in a
942 942 # reasonable order
943 943 mf2keys = mf2.keys()
944 944 mf2keys.sort()
945 945 for fn in mf2keys:
946 946 if mf1.has_key(fn):
947 947 if mf1.flags(fn) != mf2.flags(fn) or \
948 948 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
949 949 modified.append(fn)
950 950 elif list_clean:
951 951 clean.append(fn)
952 952 del mf1[fn]
953 953 else:
954 954 added.append(fn)
955 955
956 956 removed = mf1.keys()
957 957
958 958 # sort and return results:
959 959 for l in modified, added, removed, deleted, unknown, ignored, clean:
960 960 l.sort()
961 961 return (modified, added, removed, deleted, unknown, ignored, clean)
962 962
963 963 def add(self, list, wlock=None):
964 964 if not wlock:
965 965 wlock = self.wlock()
966 966 for f in list:
967 967 p = self.wjoin(f)
968 968 if not os.path.exists(p):
969 969 self.ui.warn(_("%s does not exist!\n") % f)
970 970 elif not os.path.isfile(p):
971 971 self.ui.warn(_("%s not added: only files supported currently\n")
972 972 % f)
973 973 elif self.dirstate.state(f) in 'an':
974 974 self.ui.warn(_("%s already tracked!\n") % f)
975 975 else:
976 976 self.dirstate.update([f], "a")
977 977
978 978 def forget(self, list, wlock=None):
979 979 if not wlock:
980 980 wlock = self.wlock()
981 981 for f in list:
982 982 if self.dirstate.state(f) not in 'ai':
983 983 self.ui.warn(_("%s not added!\n") % f)
984 984 else:
985 985 self.dirstate.forget([f])
986 986
987 987 def remove(self, list, unlink=False, wlock=None):
988 988 if unlink:
989 989 for f in list:
990 990 try:
991 991 util.unlink(self.wjoin(f))
992 992 except OSError, inst:
993 993 if inst.errno != errno.ENOENT:
994 994 raise
995 995 if not wlock:
996 996 wlock = self.wlock()
997 997 for f in list:
998 998 p = self.wjoin(f)
999 999 if os.path.exists(p):
1000 1000 self.ui.warn(_("%s still exists!\n") % f)
1001 1001 elif self.dirstate.state(f) == 'a':
1002 1002 self.dirstate.forget([f])
1003 1003 elif f not in self.dirstate:
1004 1004 self.ui.warn(_("%s not tracked!\n") % f)
1005 1005 else:
1006 1006 self.dirstate.update([f], "r")
1007 1007
1008 1008 def undelete(self, list, wlock=None):
1009 1009 p = self.dirstate.parents()[0]
1010 1010 mn = self.changelog.read(p)[0]
1011 1011 m = self.manifest.read(mn)
1012 1012 if not wlock:
1013 1013 wlock = self.wlock()
1014 1014 for f in list:
1015 1015 if self.dirstate.state(f) not in "r":
1016 1016 self.ui.warn("%s not removed!\n" % f)
1017 1017 else:
1018 1018 t = self.file(f).read(m[f])
1019 1019 self.wwrite(f, t)
1020 1020 util.set_exec(self.wjoin(f), m.execf(f))
1021 1021 self.dirstate.update([f], "n")
1022 1022
1023 1023 def copy(self, source, dest, wlock=None):
1024 1024 p = self.wjoin(dest)
1025 1025 if not os.path.exists(p):
1026 1026 self.ui.warn(_("%s does not exist!\n") % dest)
1027 1027 elif not os.path.isfile(p):
1028 1028 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1029 1029 else:
1030 1030 if not wlock:
1031 1031 wlock = self.wlock()
1032 1032 if self.dirstate.state(dest) == '?':
1033 1033 self.dirstate.update([dest], "a")
1034 1034 self.dirstate.copy(source, dest)
1035 1035
1036 1036 def heads(self, start=None):
1037 1037 heads = self.changelog.heads(start)
1038 1038 # sort the output in rev descending order
1039 1039 heads = [(-self.changelog.rev(h), h) for h in heads]
1040 1040 heads.sort()
1041 1041 return [n for (r, n) in heads]
1042 1042
1043 1043 # branchlookup returns a dict giving a list of branches for
1044 1044 # each head. A branch is defined as the tag of a node or
1045 1045 # the branch of the node's parents. If a node has multiple
1046 1046 # branch tags, tags are eliminated if they are visible from other
1047 1047 # branch tags.
1048 1048 #
1049 1049 # So, for this graph: a->b->c->d->e
1050 1050 # \ /
1051 1051 # aa -----/
1052 1052 # a has tag 2.6.12
1053 1053 # d has tag 2.6.13
1054 1054 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1055 1055 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1056 1056 # from the list.
1057 1057 #
1058 1058 # It is possible that more than one head will have the same branch tag.
1059 1059 # callers need to check the result for multiple heads under the same
1060 1060 # branch tag if that is a problem for them (ie checkout of a specific
1061 1061 # branch).
1062 1062 #
1063 1063 # passing in a specific branch will limit the depth of the search
1064 1064 # through the parents. It won't limit the branches returned in the
1065 1065 # result though.
1066 1066 def branchlookup(self, heads=None, branch=None):
1067 1067 if not heads:
1068 1068 heads = self.heads()
1069 1069 headt = [ h for h in heads ]
1070 1070 chlog = self.changelog
1071 1071 branches = {}
1072 1072 merges = []
1073 1073 seenmerge = {}
1074 1074
1075 1075 # traverse the tree once for each head, recording in the branches
1076 1076 # dict which tags are visible from this head. The branches
1077 1077 # dict also records which tags are visible from each tag
1078 1078 # while we traverse.
1079 1079 while headt or merges:
1080 1080 if merges:
1081 1081 n, found = merges.pop()
1082 1082 visit = [n]
1083 1083 else:
1084 1084 h = headt.pop()
1085 1085 visit = [h]
1086 1086 found = [h]
1087 1087 seen = {}
1088 1088 while visit:
1089 1089 n = visit.pop()
1090 1090 if n in seen:
1091 1091 continue
1092 1092 pp = chlog.parents(n)
1093 1093 tags = self.nodetags(n)
1094 1094 if tags:
1095 1095 for x in tags:
1096 1096 if x == 'tip':
1097 1097 continue
1098 1098 for f in found:
1099 1099 branches.setdefault(f, {})[n] = 1
1100 1100 branches.setdefault(n, {})[n] = 1
1101 1101 break
1102 1102 if n not in found:
1103 1103 found.append(n)
1104 1104 if branch in tags:
1105 1105 continue
1106 1106 seen[n] = 1
1107 1107 if pp[1] != nullid and n not in seenmerge:
1108 1108 merges.append((pp[1], [x for x in found]))
1109 1109 seenmerge[n] = 1
1110 1110 if pp[0] != nullid:
1111 1111 visit.append(pp[0])
1112 1112 # traverse the branches dict, eliminating branch tags from each
1113 1113 # head that are visible from another branch tag for that head.
1114 1114 out = {}
1115 1115 viscache = {}
1116 1116 for h in heads:
1117 1117 def visible(node):
1118 1118 if node in viscache:
1119 1119 return viscache[node]
1120 1120 ret = {}
1121 1121 visit = [node]
1122 1122 while visit:
1123 1123 x = visit.pop()
1124 1124 if x in viscache:
1125 1125 ret.update(viscache[x])
1126 1126 elif x not in ret:
1127 1127 ret[x] = 1
1128 1128 if x in branches:
1129 1129 visit[len(visit):] = branches[x].keys()
1130 1130 viscache[node] = ret
1131 1131 return ret
1132 1132 if h not in branches:
1133 1133 continue
1134 1134 # O(n^2), but somewhat limited. This only searches the
1135 1135 # tags visible from a specific head, not all the tags in the
1136 1136 # whole repo.
1137 1137 for b in branches[h]:
1138 1138 vis = False
1139 1139 for bb in branches[h].keys():
1140 1140 if b != bb:
1141 1141 if b in visible(bb):
1142 1142 vis = True
1143 1143 break
1144 1144 if not vis:
1145 1145 l = out.setdefault(h, [])
1146 1146 l[len(l):] = self.nodetags(b)
1147 1147 return out
1148 1148
1149 1149 def branches(self, nodes):
1150 1150 if not nodes:
1151 1151 nodes = [self.changelog.tip()]
1152 1152 b = []
1153 1153 for n in nodes:
1154 1154 t = n
1155 1155 while 1:
1156 1156 p = self.changelog.parents(n)
1157 1157 if p[1] != nullid or p[0] == nullid:
1158 1158 b.append((t, n, p[0], p[1]))
1159 1159 break
1160 1160 n = p[0]
1161 1161 return b
1162 1162
1163 1163 def between(self, pairs):
1164 1164 r = []
1165 1165
1166 1166 for top, bottom in pairs:
1167 1167 n, l, i = top, [], 0
1168 1168 f = 1
1169 1169
1170 1170 while n != bottom:
1171 1171 p = self.changelog.parents(n)[0]
1172 1172 if i == f:
1173 1173 l.append(n)
1174 1174 f = f * 2
1175 1175 n = p
1176 1176 i += 1
1177 1177
1178 1178 r.append(l)
1179 1179
1180 1180 return r
1181 1181
1182 1182 def findincoming(self, remote, base=None, heads=None, force=False):
1183 1183 """Return list of roots of the subsets of missing nodes from remote
1184 1184
1185 1185 If base dict is specified, assume that these nodes and their parents
1186 1186 exist on the remote side and that no child of a node of base exists
1187 1187 in both remote and self.
1188 1188 Furthermore base will be updated to include the nodes that exists
1189 1189 in self and remote but no children exists in self and remote.
1190 1190 If a list of heads is specified, return only nodes which are heads
1191 1191 or ancestors of these heads.
1192 1192
1193 1193 All the ancestors of base are in self and in remote.
1194 1194 All the descendants of the list returned are missing in self.
1195 1195 (and so we know that the rest of the nodes are missing in remote, see
1196 1196 outgoing)
1197 1197 """
1198 1198 m = self.changelog.nodemap
1199 1199 search = []
1200 1200 fetch = {}
1201 1201 seen = {}
1202 1202 seenbranch = {}
1203 1203 if base == None:
1204 1204 base = {}
1205 1205
1206 1206 if not heads:
1207 1207 heads = remote.heads()
1208 1208
1209 1209 if self.changelog.tip() == nullid:
1210 1210 base[nullid] = 1
1211 1211 if heads != [nullid]:
1212 1212 return [nullid]
1213 1213 return []
1214 1214
1215 1215 # assume we're closer to the tip than the root
1216 1216 # and start by examining the heads
1217 1217 self.ui.status(_("searching for changes\n"))
1218 1218
1219 1219 unknown = []
1220 1220 for h in heads:
1221 1221 if h not in m:
1222 1222 unknown.append(h)
1223 1223 else:
1224 1224 base[h] = 1
1225 1225
1226 1226 if not unknown:
1227 1227 return []
1228 1228
1229 1229 req = dict.fromkeys(unknown)
1230 1230 reqcnt = 0
1231 1231
1232 1232 # search through remote branches
1233 1233 # a 'branch' here is a linear segment of history, with four parts:
1234 1234 # head, root, first parent, second parent
1235 1235 # (a branch always has two parents (or none) by definition)
1236 1236 unknown = remote.branches(unknown)
1237 1237 while unknown:
1238 1238 r = []
1239 1239 while unknown:
1240 1240 n = unknown.pop(0)
1241 1241 if n[0] in seen:
1242 1242 continue
1243 1243
1244 1244 self.ui.debug(_("examining %s:%s\n")
1245 1245 % (short(n[0]), short(n[1])))
1246 1246 if n[0] == nullid: # found the end of the branch
1247 1247 pass
1248 1248 elif n in seenbranch:
1249 1249 self.ui.debug(_("branch already found\n"))
1250 1250 continue
1251 1251 elif n[1] and n[1] in m: # do we know the base?
1252 1252 self.ui.debug(_("found incomplete branch %s:%s\n")
1253 1253 % (short(n[0]), short(n[1])))
1254 1254 search.append(n) # schedule branch range for scanning
1255 1255 seenbranch[n] = 1
1256 1256 else:
1257 1257 if n[1] not in seen and n[1] not in fetch:
1258 1258 if n[2] in m and n[3] in m:
1259 1259 self.ui.debug(_("found new changeset %s\n") %
1260 1260 short(n[1]))
1261 1261 fetch[n[1]] = 1 # earliest unknown
1262 1262 for p in n[2:4]:
1263 1263 if p in m:
1264 1264 base[p] = 1 # latest known
1265 1265
1266 1266 for p in n[2:4]:
1267 1267 if p not in req and p not in m:
1268 1268 r.append(p)
1269 1269 req[p] = 1
1270 1270 seen[n[0]] = 1
1271 1271
1272 1272 if r:
1273 1273 reqcnt += 1
1274 1274 self.ui.debug(_("request %d: %s\n") %
1275 1275 (reqcnt, " ".join(map(short, r))))
1276 1276 for p in xrange(0, len(r), 10):
1277 1277 for b in remote.branches(r[p:p+10]):
1278 1278 self.ui.debug(_("received %s:%s\n") %
1279 1279 (short(b[0]), short(b[1])))
1280 1280 unknown.append(b)
1281 1281
1282 1282 # do binary search on the branches we found
1283 1283 while search:
1284 1284 n = search.pop(0)
1285 1285 reqcnt += 1
1286 1286 l = remote.between([(n[0], n[1])])[0]
1287 1287 l.append(n[1])
1288 1288 p = n[0]
1289 1289 f = 1
1290 1290 for i in l:
1291 1291 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1292 1292 if i in m:
1293 1293 if f <= 2:
1294 1294 self.ui.debug(_("found new branch changeset %s\n") %
1295 1295 short(p))
1296 1296 fetch[p] = 1
1297 1297 base[i] = 1
1298 1298 else:
1299 1299 self.ui.debug(_("narrowed branch search to %s:%s\n")
1300 1300 % (short(p), short(i)))
1301 1301 search.append((p, i))
1302 1302 break
1303 1303 p, f = i, f * 2
1304 1304
1305 1305 # sanity check our fetch list
1306 1306 for f in fetch.keys():
1307 1307 if f in m:
1308 1308 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1309 1309
1310 1310 if base.keys() == [nullid]:
1311 1311 if force:
1312 1312 self.ui.warn(_("warning: repository is unrelated\n"))
1313 1313 else:
1314 1314 raise util.Abort(_("repository is unrelated"))
1315 1315
1316 1316 self.ui.debug(_("found new changesets starting at ") +
1317 1317 " ".join([short(f) for f in fetch]) + "\n")
1318 1318
1319 1319 self.ui.debug(_("%d total queries\n") % reqcnt)
1320 1320
1321 1321 return fetch.keys()
1322 1322
1323 1323 def findoutgoing(self, remote, base=None, heads=None, force=False):
1324 1324 """Return list of nodes that are roots of subsets not in remote
1325 1325
1326 1326 If base dict is specified, assume that these nodes and their parents
1327 1327 exist on the remote side.
1328 1328 If a list of heads is specified, return only nodes which are heads
1329 1329 or ancestors of these heads, and return a second element which
1330 1330 contains all remote heads which get new children.
1331 1331 """
1332 1332 if base == None:
1333 1333 base = {}
1334 1334 self.findincoming(remote, base, heads, force=force)
1335 1335
1336 1336 self.ui.debug(_("common changesets up to ")
1337 1337 + " ".join(map(short, base.keys())) + "\n")
1338 1338
1339 1339 remain = dict.fromkeys(self.changelog.nodemap)
1340 1340
1341 1341 # prune everything remote has from the tree
1342 1342 del remain[nullid]
1343 1343 remove = base.keys()
1344 1344 while remove:
1345 1345 n = remove.pop(0)
1346 1346 if n in remain:
1347 1347 del remain[n]
1348 1348 for p in self.changelog.parents(n):
1349 1349 remove.append(p)
1350 1350
1351 1351 # find every node whose parents have been pruned
1352 1352 subset = []
1353 1353 # find every remote head that will get new children
1354 1354 updated_heads = {}
1355 1355 for n in remain:
1356 1356 p1, p2 = self.changelog.parents(n)
1357 1357 if p1 not in remain and p2 not in remain:
1358 1358 subset.append(n)
1359 1359 if heads:
1360 1360 if p1 in heads:
1361 1361 updated_heads[p1] = True
1362 1362 if p2 in heads:
1363 1363 updated_heads[p2] = True
1364 1364
1365 1365 # this is the set of all roots we have to push
1366 1366 if heads:
1367 1367 return subset, updated_heads.keys()
1368 1368 else:
1369 1369 return subset
1370 1370
1371 1371 def pull(self, remote, heads=None, force=False, lock=None):
1372 1372 mylock = False
1373 1373 if not lock:
1374 1374 lock = self.lock()
1375 1375 mylock = True
1376 1376
1377 1377 try:
1378 1378 fetch = self.findincoming(remote, force=force)
1379 1379 if fetch == [nullid]:
1380 1380 self.ui.status(_("requesting all changes\n"))
1381 1381
1382 1382 if not fetch:
1383 1383 self.ui.status(_("no changes found\n"))
1384 1384 return 0
1385 1385
1386 1386 if heads is None:
1387 1387 cg = remote.changegroup(fetch, 'pull')
1388 1388 else:
1389 1389 if 'changegroupsubset' not in remote.capabilities:
1390 1390 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1391 1391 cg = remote.changegroupsubset(fetch, heads, 'pull')
1392 1392 return self.addchangegroup(cg, 'pull', remote.url())
1393 1393 finally:
1394 1394 if mylock:
1395 1395 lock.release()
1396 1396
1397 1397 def push(self, remote, force=False, revs=None):
1398 1398 # there are two ways to push to remote repo:
1399 1399 #
1400 1400 # addchangegroup assumes local user can lock remote
1401 1401 # repo (local filesystem, old ssh servers).
1402 1402 #
1403 1403 # unbundle assumes local user cannot lock remote repo (new ssh
1404 1404 # servers, http servers).
1405 1405
1406 1406 if remote.capable('unbundle'):
1407 1407 return self.push_unbundle(remote, force, revs)
1408 1408 return self.push_addchangegroup(remote, force, revs)
1409 1409
1410 1410 def prepush(self, remote, force, revs):
1411 1411 base = {}
1412 1412 remote_heads = remote.heads()
1413 1413 inc = self.findincoming(remote, base, remote_heads, force=force)
1414 1414
1415 1415 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1416 1416 if revs is not None:
1417 1417 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1418 1418 else:
1419 1419 bases, heads = update, self.changelog.heads()
1420 1420
1421 1421 if not bases:
1422 1422 self.ui.status(_("no changes found\n"))
1423 1423 return None, 1
1424 1424 elif not force:
1425 1425 # check if we're creating new remote heads
1426 1426 # to be a remote head after push, node must be either
1427 1427 # - unknown locally
1428 1428 # - a local outgoing head descended from update
1429 1429 # - a remote head that's known locally and not
1430 1430 # ancestral to an outgoing head
1431 1431
1432 1432 warn = 0
1433 1433
1434 1434 if remote_heads == [nullid]:
1435 1435 warn = 0
1436 1436 elif not revs and len(heads) > len(remote_heads):
1437 1437 warn = 1
1438 1438 else:
1439 1439 newheads = list(heads)
1440 1440 for r in remote_heads:
1441 1441 if r in self.changelog.nodemap:
1442 1442 desc = self.changelog.heads(r, heads)
1443 1443 l = [h for h in heads if h in desc]
1444 1444 if not l:
1445 1445 newheads.append(r)
1446 1446 else:
1447 1447 newheads.append(r)
1448 1448 if len(newheads) > len(remote_heads):
1449 1449 warn = 1
1450 1450
1451 1451 if warn:
1452 1452 self.ui.warn(_("abort: push creates new remote branches!\n"))
1453 1453 self.ui.status(_("(did you forget to merge?"
1454 1454 " use push -f to force)\n"))
1455 1455 return None, 1
1456 1456 elif inc:
1457 1457 self.ui.warn(_("note: unsynced remote changes!\n"))
1458 1458
1459 1459
1460 1460 if revs is None:
1461 1461 cg = self.changegroup(update, 'push')
1462 1462 else:
1463 1463 cg = self.changegroupsubset(update, revs, 'push')
1464 1464 return cg, remote_heads
1465 1465
1466 1466 def push_addchangegroup(self, remote, force, revs):
1467 1467 lock = remote.lock()
1468 1468
1469 1469 ret = self.prepush(remote, force, revs)
1470 1470 if ret[0] is not None:
1471 1471 cg, remote_heads = ret
1472 1472 return remote.addchangegroup(cg, 'push', self.url())
1473 1473 return ret[1]
1474 1474
1475 1475 def push_unbundle(self, remote, force, revs):
1476 1476 # local repo finds heads on server, finds out what revs it
1477 1477 # must push. once revs transferred, if server finds it has
1478 1478 # different heads (someone else won commit/push race), server
1479 1479 # aborts.
1480 1480
1481 1481 ret = self.prepush(remote, force, revs)
1482 1482 if ret[0] is not None:
1483 1483 cg, remote_heads = ret
1484 1484 if force: remote_heads = ['force']
1485 1485 return remote.unbundle(cg, remote_heads, 'push')
1486 1486 return ret[1]
1487 1487
1488 1488 def changegroupinfo(self, nodes):
1489 1489 self.ui.note(_("%d changesets found\n") % len(nodes))
1490 1490 if self.ui.debugflag:
1491 1491 self.ui.debug(_("List of changesets:\n"))
1492 1492 for node in nodes:
1493 1493 self.ui.debug("%s\n" % hex(node))
1494 1494
1495 1495 def changegroupsubset(self, bases, heads, source):
1496 1496 """This function generates a changegroup consisting of all the nodes
1497 1497 that are descendents of any of the bases, and ancestors of any of
1498 1498 the heads.
1499 1499
1500 1500 It is fairly complex as determining which filenodes and which
1501 1501 manifest nodes need to be included for the changeset to be complete
1502 1502 is non-trivial.
1503 1503
1504 1504 Another wrinkle is doing the reverse, figuring out which changeset in
1505 1505 the changegroup a particular filenode or manifestnode belongs to."""
1506 1506
1507 1507 self.hook('preoutgoing', throw=True, source=source)
1508 1508
1509 1509 # Set up some initial variables
1510 1510 # Make it easy to refer to self.changelog
1511 1511 cl = self.changelog
1512 1512 # msng is short for missing - compute the list of changesets in this
1513 1513 # changegroup.
1514 1514 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1515 1515 self.changegroupinfo(msng_cl_lst)
1516 1516 # Some bases may turn out to be superfluous, and some heads may be
1517 1517 # too. nodesbetween will return the minimal set of bases and heads
1518 1518 # necessary to re-create the changegroup.
1519 1519
1520 1520 # Known heads are the list of heads that it is assumed the recipient
1521 1521 # of this changegroup will know about.
1522 1522 knownheads = {}
1523 1523 # We assume that all parents of bases are known heads.
1524 1524 for n in bases:
1525 1525 for p in cl.parents(n):
1526 1526 if p != nullid:
1527 1527 knownheads[p] = 1
1528 1528 knownheads = knownheads.keys()
1529 1529 if knownheads:
1530 1530 # Now that we know what heads are known, we can compute which
1531 1531 # changesets are known. The recipient must know about all
1532 1532 # changesets required to reach the known heads from the null
1533 1533 # changeset.
1534 1534 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1535 1535 junk = None
1536 1536 # Transform the list into an ersatz set.
1537 1537 has_cl_set = dict.fromkeys(has_cl_set)
1538 1538 else:
1539 1539 # If there were no known heads, the recipient cannot be assumed to
1540 1540 # know about any changesets.
1541 1541 has_cl_set = {}
1542 1542
1543 1543 # Make it easy to refer to self.manifest
1544 1544 mnfst = self.manifest
1545 1545 # We don't know which manifests are missing yet
1546 1546 msng_mnfst_set = {}
1547 1547 # Nor do we know which filenodes are missing.
1548 1548 msng_filenode_set = {}
1549 1549
1550 1550 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1551 1551 junk = None
1552 1552
1553 1553 # A changeset always belongs to itself, so the changenode lookup
1554 1554 # function for a changenode is identity.
1555 1555 def identity(x):
1556 1556 return x
1557 1557
1558 1558 # A function generating function. Sets up an environment for the
1559 1559 # inner function.
1560 1560 def cmp_by_rev_func(revlog):
1561 1561 # Compare two nodes by their revision number in the environment's
1562 1562 # revision history. Since the revision number both represents the
1563 1563 # most efficient order to read the nodes in, and represents a
1564 1564 # topological sorting of the nodes, this function is often useful.
1565 1565 def cmp_by_rev(a, b):
1566 1566 return cmp(revlog.rev(a), revlog.rev(b))
1567 1567 return cmp_by_rev
1568 1568
1569 1569 # If we determine that a particular file or manifest node must be a
1570 1570 # node that the recipient of the changegroup will already have, we can
1571 1571 # also assume the recipient will have all the parents. This function
1572 1572 # prunes them from the set of missing nodes.
1573 1573 def prune_parents(revlog, hasset, msngset):
1574 1574 haslst = hasset.keys()
1575 1575 haslst.sort(cmp_by_rev_func(revlog))
1576 1576 for node in haslst:
1577 1577 parentlst = [p for p in revlog.parents(node) if p != nullid]
1578 1578 while parentlst:
1579 1579 n = parentlst.pop()
1580 1580 if n not in hasset:
1581 1581 hasset[n] = 1
1582 1582 p = [p for p in revlog.parents(n) if p != nullid]
1583 1583 parentlst.extend(p)
1584 1584 for n in hasset:
1585 1585 msngset.pop(n, None)
1586 1586
1587 1587 # This is a function generating function used to set up an environment
1588 1588 # for the inner function to execute in.
1589 1589 def manifest_and_file_collector(changedfileset):
1590 1590 # This is an information gathering function that gathers
1591 1591 # information from each changeset node that goes out as part of
1592 1592 # the changegroup. The information gathered is a list of which
1593 1593 # manifest nodes are potentially required (the recipient may
1594 1594 # already have them) and total list of all files which were
1595 1595 # changed in any changeset in the changegroup.
1596 1596 #
1597 1597 # We also remember the first changenode we saw any manifest
1598 1598 # referenced by so we can later determine which changenode 'owns'
1599 1599 # the manifest.
1600 1600 def collect_manifests_and_files(clnode):
1601 1601 c = cl.read(clnode)
1602 1602 for f in c[3]:
1603 1603 # This is to make sure we only have one instance of each
1604 1604 # filename string for each filename.
1605 1605 changedfileset.setdefault(f, f)
1606 1606 msng_mnfst_set.setdefault(c[0], clnode)
1607 1607 return collect_manifests_and_files
1608 1608
1609 1609 # Figure out which manifest nodes (of the ones we think might be part
1610 1610 # of the changegroup) the recipient must know about and remove them
1611 1611 # from the changegroup.
1612 1612 def prune_manifests():
1613 1613 has_mnfst_set = {}
1614 1614 for n in msng_mnfst_set:
1615 1615 # If a 'missing' manifest thinks it belongs to a changenode
1616 1616 # the recipient is assumed to have, obviously the recipient
1617 1617 # must have that manifest.
1618 1618 linknode = cl.node(mnfst.linkrev(n))
1619 1619 if linknode in has_cl_set:
1620 1620 has_mnfst_set[n] = 1
1621 1621 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1622 1622
1623 1623 # Use the information collected in collect_manifests_and_files to say
1624 1624 # which changenode any manifestnode belongs to.
1625 1625 def lookup_manifest_link(mnfstnode):
1626 1626 return msng_mnfst_set[mnfstnode]
1627 1627
1628 1628 # A function generating function that sets up the initial environment
1629 1629 # the inner function.
1630 1630 def filenode_collector(changedfiles):
1631 1631 next_rev = [0]
1632 1632 # This gathers information from each manifestnode included in the
1633 1633 # changegroup about which filenodes the manifest node references
1634 1634 # so we can include those in the changegroup too.
1635 1635 #
1636 1636 # It also remembers which changenode each filenode belongs to. It
1637 1637 # does this by assuming the a filenode belongs to the changenode
1638 1638 # the first manifest that references it belongs to.
1639 1639 def collect_msng_filenodes(mnfstnode):
1640 1640 r = mnfst.rev(mnfstnode)
1641 1641 if r == next_rev[0]:
1642 1642 # If the last rev we looked at was the one just previous,
1643 1643 # we only need to see a diff.
1644 1644 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1645 1645 # For each line in the delta
1646 1646 for dline in delta.splitlines():
1647 1647 # get the filename and filenode for that line
1648 1648 f, fnode = dline.split('\0')
1649 1649 fnode = bin(fnode[:40])
1650 1650 f = changedfiles.get(f, None)
1651 1651 # And if the file is in the list of files we care
1652 1652 # about.
1653 1653 if f is not None:
1654 1654 # Get the changenode this manifest belongs to
1655 1655 clnode = msng_mnfst_set[mnfstnode]
1656 1656 # Create the set of filenodes for the file if
1657 1657 # there isn't one already.
1658 1658 ndset = msng_filenode_set.setdefault(f, {})
1659 1659 # And set the filenode's changelog node to the
1660 1660 # manifest's if it hasn't been set already.
1661 1661 ndset.setdefault(fnode, clnode)
1662 1662 else:
1663 1663 # Otherwise we need a full manifest.
1664 1664 m = mnfst.read(mnfstnode)
1665 1665 # For every file in we care about.
1666 1666 for f in changedfiles:
1667 1667 fnode = m.get(f, None)
1668 1668 # If it's in the manifest
1669 1669 if fnode is not None:
1670 1670 # See comments above.
1671 1671 clnode = msng_mnfst_set[mnfstnode]
1672 1672 ndset = msng_filenode_set.setdefault(f, {})
1673 1673 ndset.setdefault(fnode, clnode)
1674 1674 # Remember the revision we hope to see next.
1675 1675 next_rev[0] = r + 1
1676 1676 return collect_msng_filenodes
1677 1677
1678 1678 # We have a list of filenodes we think we need for a file, lets remove
1679 1679 # all those we now the recipient must have.
1680 1680 def prune_filenodes(f, filerevlog):
1681 1681 msngset = msng_filenode_set[f]
1682 1682 hasset = {}
1683 1683 # If a 'missing' filenode thinks it belongs to a changenode we
1684 1684 # assume the recipient must have, then the recipient must have
1685 1685 # that filenode.
1686 1686 for n in msngset:
1687 1687 clnode = cl.node(filerevlog.linkrev(n))
1688 1688 if clnode in has_cl_set:
1689 1689 hasset[n] = 1
1690 1690 prune_parents(filerevlog, hasset, msngset)
1691 1691
1692 1692 # A function generator function that sets up the a context for the
1693 1693 # inner function.
1694 1694 def lookup_filenode_link_func(fname):
1695 1695 msngset = msng_filenode_set[fname]
1696 1696 # Lookup the changenode the filenode belongs to.
1697 1697 def lookup_filenode_link(fnode):
1698 1698 return msngset[fnode]
1699 1699 return lookup_filenode_link
1700 1700
1701 1701 # Now that we have all theses utility functions to help out and
1702 1702 # logically divide up the task, generate the group.
1703 1703 def gengroup():
1704 1704 # The set of changed files starts empty.
1705 1705 changedfiles = {}
1706 1706 # Create a changenode group generator that will call our functions
1707 1707 # back to lookup the owning changenode and collect information.
1708 1708 group = cl.group(msng_cl_lst, identity,
1709 1709 manifest_and_file_collector(changedfiles))
1710 1710 for chnk in group:
1711 1711 yield chnk
1712 1712
1713 1713 # The list of manifests has been collected by the generator
1714 1714 # calling our functions back.
1715 1715 prune_manifests()
1716 1716 msng_mnfst_lst = msng_mnfst_set.keys()
1717 1717 # Sort the manifestnodes by revision number.
1718 1718 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1719 1719 # Create a generator for the manifestnodes that calls our lookup
1720 1720 # and data collection functions back.
1721 1721 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1722 1722 filenode_collector(changedfiles))
1723 1723 for chnk in group:
1724 1724 yield chnk
1725 1725
1726 1726 # These are no longer needed, dereference and toss the memory for
1727 1727 # them.
1728 1728 msng_mnfst_lst = None
1729 1729 msng_mnfst_set.clear()
1730 1730
1731 1731 changedfiles = changedfiles.keys()
1732 1732 changedfiles.sort()
1733 1733 # Go through all our files in order sorted by name.
1734 1734 for fname in changedfiles:
1735 1735 filerevlog = self.file(fname)
1736 1736 # Toss out the filenodes that the recipient isn't really
1737 1737 # missing.
1738 1738 if msng_filenode_set.has_key(fname):
1739 1739 prune_filenodes(fname, filerevlog)
1740 1740 msng_filenode_lst = msng_filenode_set[fname].keys()
1741 1741 else:
1742 1742 msng_filenode_lst = []
1743 1743 # If any filenodes are left, generate the group for them,
1744 1744 # otherwise don't bother.
1745 1745 if len(msng_filenode_lst) > 0:
1746 1746 yield changegroup.genchunk(fname)
1747 1747 # Sort the filenodes by their revision #
1748 1748 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1749 1749 # Create a group generator and only pass in a changenode
1750 1750 # lookup function as we need to collect no information
1751 1751 # from filenodes.
1752 1752 group = filerevlog.group(msng_filenode_lst,
1753 1753 lookup_filenode_link_func(fname))
1754 1754 for chnk in group:
1755 1755 yield chnk
1756 1756 if msng_filenode_set.has_key(fname):
1757 1757 # Don't need this anymore, toss it to free memory.
1758 1758 del msng_filenode_set[fname]
1759 1759 # Signal that no more groups are left.
1760 1760 yield changegroup.closechunk()
1761 1761
1762 1762 if msng_cl_lst:
1763 1763 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1764 1764
1765 1765 return util.chunkbuffer(gengroup())
1766 1766
1767 1767 def changegroup(self, basenodes, source):
1768 1768 """Generate a changegroup of all nodes that we have that a recipient
1769 1769 doesn't.
1770 1770
1771 1771 This is much easier than the previous function as we can assume that
1772 1772 the recipient has any changenode we aren't sending them."""
1773 1773
1774 1774 self.hook('preoutgoing', throw=True, source=source)
1775 1775
1776 1776 cl = self.changelog
1777 1777 nodes = cl.nodesbetween(basenodes, None)[0]
1778 1778 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1779 1779 self.changegroupinfo(nodes)
1780 1780
1781 1781 def identity(x):
1782 1782 return x
1783 1783
1784 1784 def gennodelst(revlog):
1785 1785 for r in xrange(0, revlog.count()):
1786 1786 n = revlog.node(r)
1787 1787 if revlog.linkrev(n) in revset:
1788 1788 yield n
1789 1789
1790 1790 def changed_file_collector(changedfileset):
1791 1791 def collect_changed_files(clnode):
1792 1792 c = cl.read(clnode)
1793 1793 for fname in c[3]:
1794 1794 changedfileset[fname] = 1
1795 1795 return collect_changed_files
1796 1796
1797 1797 def lookuprevlink_func(revlog):
1798 1798 def lookuprevlink(n):
1799 1799 return cl.node(revlog.linkrev(n))
1800 1800 return lookuprevlink
1801 1801
1802 1802 def gengroup():
1803 1803 # construct a list of all changed files
1804 1804 changedfiles = {}
1805 1805
1806 1806 for chnk in cl.group(nodes, identity,
1807 1807 changed_file_collector(changedfiles)):
1808 1808 yield chnk
1809 1809 changedfiles = changedfiles.keys()
1810 1810 changedfiles.sort()
1811 1811
1812 1812 mnfst = self.manifest
1813 1813 nodeiter = gennodelst(mnfst)
1814 1814 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1815 1815 yield chnk
1816 1816
1817 1817 for fname in changedfiles:
1818 1818 filerevlog = self.file(fname)
1819 1819 nodeiter = gennodelst(filerevlog)
1820 1820 nodeiter = list(nodeiter)
1821 1821 if nodeiter:
1822 1822 yield changegroup.genchunk(fname)
1823 1823 lookup = lookuprevlink_func(filerevlog)
1824 1824 for chnk in filerevlog.group(nodeiter, lookup):
1825 1825 yield chnk
1826 1826
1827 1827 yield changegroup.closechunk()
1828 1828
1829 1829 if nodes:
1830 1830 self.hook('outgoing', node=hex(nodes[0]), source=source)
1831 1831
1832 1832 return util.chunkbuffer(gengroup())
1833 1833
1834 1834 def addchangegroup(self, source, srctype, url):
1835 1835 """add changegroup to repo.
1836 1836
1837 1837 return values:
1838 1838 - nothing changed or no source: 0
1839 1839 - more heads than before: 1+added heads (2..n)
1840 1840 - less heads than before: -1-removed heads (-2..-n)
1841 1841 - number of heads stays the same: 1
1842 1842 """
1843 1843 def csmap(x):
1844 1844 self.ui.debug(_("add changeset %s\n") % short(x))
1845 1845 return cl.count()
1846 1846
1847 1847 def revmap(x):
1848 1848 return cl.rev(x)
1849 1849
1850 1850 if not source:
1851 1851 return 0
1852 1852
1853 1853 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1854 1854
1855 1855 changesets = files = revisions = 0
1856 1856
1857 1857 tr = self.transaction()
1858 1858
1859 1859 # write changelog data to temp files so concurrent readers will not see
1860 1860 # inconsistent view
1861 1861 cl = None
1862 1862 try:
1863 1863 cl = appendfile.appendchangelog(self.sopener,
1864 1864 self.changelog.version)
1865 1865
1866 1866 oldheads = len(cl.heads())
1867 1867
1868 1868 # pull off the changeset group
1869 1869 self.ui.status(_("adding changesets\n"))
1870 1870 cor = cl.count() - 1
1871 1871 chunkiter = changegroup.chunkiter(source)
1872 1872 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1873 1873 raise util.Abort(_("received changelog group is empty"))
1874 1874 cnr = cl.count() - 1
1875 1875 changesets = cnr - cor
1876 1876
1877 1877 # pull off the manifest group
1878 1878 self.ui.status(_("adding manifests\n"))
1879 1879 chunkiter = changegroup.chunkiter(source)
1880 1880 # no need to check for empty manifest group here:
1881 1881 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1882 1882 # no new manifest will be created and the manifest group will
1883 1883 # be empty during the pull
1884 1884 self.manifest.addgroup(chunkiter, revmap, tr)
1885 1885
1886 1886 # process the files
1887 1887 self.ui.status(_("adding file changes\n"))
1888 1888 while 1:
1889 1889 f = changegroup.getchunk(source)
1890 1890 if not f:
1891 1891 break
1892 1892 self.ui.debug(_("adding %s revisions\n") % f)
1893 1893 fl = self.file(f)
1894 1894 o = fl.count()
1895 1895 chunkiter = changegroup.chunkiter(source)
1896 1896 if fl.addgroup(chunkiter, revmap, tr) is None:
1897 1897 raise util.Abort(_("received file revlog group is empty"))
1898 1898 revisions += fl.count() - o
1899 1899 files += 1
1900 1900
1901 1901 cl.writedata()
1902 1902 finally:
1903 1903 if cl:
1904 1904 cl.cleanup()
1905 1905
1906 1906 # make changelog see real files again
1907 1907 self.changelog = changelog.changelog(self.sopener,
1908 1908 self.changelog.version)
1909 1909 self.changelog.checkinlinesize(tr)
1910 1910
1911 1911 newheads = len(self.changelog.heads())
1912 1912 heads = ""
1913 1913 if oldheads and newheads != oldheads:
1914 1914 heads = _(" (%+d heads)") % (newheads - oldheads)
1915 1915
1916 1916 self.ui.status(_("added %d changesets"
1917 1917 " with %d changes to %d files%s\n")
1918 1918 % (changesets, revisions, files, heads))
1919 1919
1920 1920 if changesets > 0:
1921 1921 self.hook('pretxnchangegroup', throw=True,
1922 1922 node=hex(self.changelog.node(cor+1)), source=srctype,
1923 1923 url=url)
1924 1924
1925 1925 tr.close()
1926 1926
1927 1927 if changesets > 0:
1928 1928 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1929 1929 source=srctype, url=url)
1930 1930
1931 1931 for i in xrange(cor + 1, cnr + 1):
1932 1932 self.hook("incoming", node=hex(self.changelog.node(i)),
1933 1933 source=srctype, url=url)
1934 1934
1935 1935 # never return 0 here:
1936 1936 if newheads < oldheads:
1937 1937 return newheads - oldheads - 1
1938 1938 else:
1939 1939 return newheads - oldheads + 1
1940 1940
1941 1941
1942 1942 def stream_in(self, remote):
1943 1943 fp = remote.stream_out()
1944 1944 l = fp.readline()
1945 1945 try:
1946 1946 resp = int(l)
1947 1947 except ValueError:
1948 1948 raise util.UnexpectedOutput(
1949 1949 _('Unexpected response from remote server:'), l)
1950 1950 if resp == 1:
1951 1951 raise util.Abort(_('operation forbidden by server'))
1952 1952 elif resp == 2:
1953 1953 raise util.Abort(_('locking the remote repository failed'))
1954 1954 elif resp != 0:
1955 1955 raise util.Abort(_('the server sent an unknown error code'))
1956 1956 self.ui.status(_('streaming all changes\n'))
1957 1957 l = fp.readline()
1958 1958 try:
1959 1959 total_files, total_bytes = map(int, l.split(' ', 1))
1960 1960 except ValueError, TypeError:
1961 1961 raise util.UnexpectedOutput(
1962 1962 _('Unexpected response from remote server:'), l)
1963 1963 self.ui.status(_('%d files to transfer, %s of data\n') %
1964 1964 (total_files, util.bytecount(total_bytes)))
1965 1965 start = time.time()
1966 1966 for i in xrange(total_files):
1967 1967 # XXX doesn't support '\n' or '\r' in filenames
1968 1968 l = fp.readline()
1969 1969 try:
1970 1970 name, size = l.split('\0', 1)
1971 1971 size = int(size)
1972 1972 except ValueError, TypeError:
1973 1973 raise util.UnexpectedOutput(
1974 1974 _('Unexpected response from remote server:'), l)
1975 1975 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1976 1976 ofp = self.sopener(name, 'w')
1977 1977 for chunk in util.filechunkiter(fp, limit=size):
1978 1978 ofp.write(chunk)
1979 1979 ofp.close()
1980 1980 elapsed = time.time() - start
1981 1981 if elapsed <= 0:
1982 1982 elapsed = 0.001
1983 1983 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1984 1984 (util.bytecount(total_bytes), elapsed,
1985 1985 util.bytecount(total_bytes / elapsed)))
1986 1986 self.reload()
1987 1987 return len(self.heads()) + 1
1988 1988
1989 1989 def clone(self, remote, heads=[], stream=False):
1990 1990 '''clone remote repository.
1991 1991
1992 1992 keyword arguments:
1993 1993 heads: list of revs to clone (forces use of pull)
1994 1994 stream: use streaming clone if possible'''
1995 1995
1996 1996 # now, all clients that can request uncompressed clones can
1997 1997 # read repo formats supported by all servers that can serve
1998 1998 # them.
1999 1999
2000 2000 # if revlog format changes, client will have to check version
2001 2001 # and format flags on "stream" capability, and use
2002 2002 # uncompressed only if compatible.
2003 2003
2004 2004 if stream and not heads and remote.capable('stream'):
2005 2005 return self.stream_in(remote)
2006 2006 return self.pull(remote, heads)
2007 2007
2008 2008 # used to avoid circular references so destructors work
2009 2009 def aftertrans(files):
2010 2010 renamefiles = [tuple(t) for t in files]
2011 2011 def a():
2012 2012 for src, dest in renamefiles:
2013 2013 util.rename(src, dest)
2014 2014 return a
2015 2015
2016 2016 def instance(ui, path, create):
2017 2017 return localrepository(ui, util.drop_scheme('file', path), create)
2018 2018
2019 2019 def islocal(path):
2020 2020 return True
@@ -1,107 +1,124 b''
1 1 #!/bin/sh
2 2
3 3 mkdir t
4 4 cd t
5 5 hg init
6 6 hg id
7 7 echo a > a
8 8 hg add a
9 9 hg commit -m "test" -d "1000000 0"
10 10 hg co
11 11 hg identify
12 12 T=`hg tip --debug | head -n 1 | cut -d : -f 3`
13 13 hg tag -l "This is a local tag with a really long name!"
14 14 hg tags
15 15 rm .hg/localtags
16 16 echo "$T first" > .hgtags
17 17 cat .hgtags
18 18 hg add .hgtags
19 19 hg commit -m "add tags" -d "1000000 0"
20 20 hg tags
21 21 hg identify
22 22 echo bb > a
23 23 hg status
24 24 hg identify
25 25 hg co first
26 26 hg id
27 27 hg -v id
28 28 hg status
29 29 echo 1 > b
30 30 hg add b
31 31 hg commit -m "branch" -d "1000000 0"
32 32 hg id
33 33 hg merge 1
34 34 hg id
35 35 hg status
36 36
37 37 hg commit -m "merge" -d "1000000 0"
38 38
39 39 # create fake head, make sure tag not visible afterwards
40 40 cp .hgtags tags
41 41 hg tag -d "1000000 0" last
42 42 hg rm .hgtags
43 43 hg commit -m "remove" -d "1000000 0"
44 44
45 45 mv tags .hgtags
46 46 hg add .hgtags
47 47 hg commit -m "readd" -d "1000000 0"
48 48
49 49 hg tags
50 50
51 51 # invalid tags
52 52 echo "spam" >> .hgtags
53 53 echo >> .hgtags
54 54 echo "foo bar" >> .hgtags
55 55 echo "$T invalid" | sed "s/..../a5a5/" >> .hg/localtags
56 56 hg commit -m "tags" -d "1000000 0"
57 57
58 58 # report tag parse error on other head
59 59 hg up 3
60 60 echo 'x y' >> .hgtags
61 61 hg commit -m "head" -d "1000000 0"
62 62
63 63 hg tags
64 64 hg tip
65 65
66 66 # test tag precedence rules
67 67 cd ..
68 68 hg init t2
69 69 cd t2
70 70 echo foo > foo
71 71 hg add foo
72 72 hg ci -m 'add foo' -d '1000000 0' # rev 0
73 73 hg tag -d '1000000 0' bar # rev 1
74 74 echo >> foo
75 75 hg ci -m 'change foo 1' -d '1000000 0' # rev 2
76 76 hg up -C 1
77 77 hg tag -r 1 -d '1000000 0' -f bar # rev 3
78 78 hg up -C 1
79 79 echo >> foo
80 80 hg ci -m 'change foo 2' -d '1000000 0' # rev 4
81 81 hg tags
82 82
83 83 # test tag removal
84 84 hg tag --remove -d '1000000 0' bar
85 85 hg tip
86 86 hg tags
87 87
88 88 # test tag rank
89 89 cd ..
90 90 hg init t3
91 91 cd t3
92 92 echo foo > foo
93 93 hg add foo
94 94 hg ci -m 'add foo' -d '1000000 0' # rev 0
95 95 hg tag -d '1000000 0' -f bar # rev 1 bar -> 0
96 96 hg tag -d '1000000 0' -f bar # rev 2 bar -> 1
97 97 hg tag -d '1000000 0' -fr 0 bar # rev 3 bar -> 0
98 98 hg tag -d '1000000 0' -fr 1 bar # rev 3 bar -> 1
99 99 hg tag -d '1000000 0' -fr 0 bar # rev 4 bar -> 0
100 100 hg tags
101 101 hg co 3
102 102 echo barbar > foo
103 103 hg ci -m 'change foo' -d '1000000 0' # rev 0
104 104 hg tags
105 105
106 106 hg tag -d '1000000 0' -r 3 bar # should complain
107 hg tags No newline at end of file
107 hg tags
108
109 # test tag rank with 3 heads
110 cd ..
111 hg init t4
112 cd t4
113 echo foo > foo
114 hg add
115 hg ci -m 'add foo' -d '0 0' # rev 0
116 hg tag -d '0 0' bar # rev 1 bar -> 0
117 hg tag -d '0 0' -f bar # rev 2 bar -> 1
118 hg up -qC 0
119 hg tag -d '0 0' -fr 2 bar # rev 3 bar -> 2
120 hg tags
121 hg up -qC 0
122 hg tag -d '0 0' -m 'retag rev 0' -fr 0 bar # rev 4 bar -> 0, but bar stays at 2
123 echo % bar should still point to rev 2
124 hg tags
@@ -1,59 +1,65 b''
1 1 unknown
2 2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 3 0acdaf898367 tip
4 4 tip 0:0acdaf898367
5 5 This is a local tag with a really long name! 0:0acdaf898367
6 6 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 first
7 7 tip 1:8a3ca90d111d
8 8 first 0:0acdaf898367
9 9 8a3ca90d111d tip
10 10 M a
11 11 8a3ca90d111d+ tip
12 12 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
13 13 0acdaf898367+ first
14 14 0acdaf898367+ first
15 15 M a
16 16 8216907a933d tip
17 17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 18 (branch merge, don't forget to commit)
19 19 8216907a933d+8a3ca90d111d+ tip
20 20 M .hgtags
21 21 tip 6:e2174d339386
22 22 first 0:0acdaf898367
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 .hgtags@c071f74ab5eb, line 2: cannot parse entry
25 25 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
26 26 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
27 27 localtags, line 1: tag 'invalid' refers to unknown node
28 28 tip 8:4ca6f1b1a68c
29 29 first 0:0acdaf898367
30 30 changeset: 8:4ca6f1b1a68c
31 31 .hgtags@c071f74ab5eb, line 2: cannot parse entry
32 32 .hgtags@c071f74ab5eb, line 4: node 'foo' is not well formed
33 33 .hgtags@4ca6f1b1a68c, line 2: node 'x' is not well formed
34 34 localtags, line 1: tag 'invalid' refers to unknown node
35 35 tag: tip
36 36 parent: 3:b2ef3841386b
37 37 user: test
38 38 date: Mon Jan 12 13:46:40 1970 +0000
39 39 summary: head
40 40
41 41 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 42 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
43 43 tip 4:36195b728445
44 44 bar 1:b204a97e6e8d
45 45 changeset: 5:57e1983b4a60
46 46 tag: tip
47 47 user: test
48 48 date: Mon Jan 12 13:46:40 1970 +0000
49 49 summary: Removed tag bar
50 50
51 51 tip 5:57e1983b4a60
52 52 tip 5:d8bb4d1eff25
53 53 bar 0:b409d9da318e
54 54 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
55 55 tip 6:b5ff9d142648
56 56 bar 0:b409d9da318e
57 57 abort: a tag named bar already exists (use -f to force)
58 58 tip 6:b5ff9d142648
59 59 bar 0:b409d9da318e
60 adding foo
61 tip 3:ca8479b4351c
62 bar 2:72b852876a42
63 % bar should still point to rev 2
64 tip 4:40af5d225513
65 bar 2:72b852876a42
General Comments 0
You need to be logged in to leave comments. Login now