##// END OF EJS Templates
Split branchtags into two additional functions....
Alexis S. L. Carvalho -
r3491:23cffef5 default
parent child Browse files
Show More
@@ -1,1817 +1,1825
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 40 os.mkdir(self.join("data"))
41 41 else:
42 42 raise repo.RepoError(_("repository %s not found") % path)
43 43 elif create:
44 44 raise repo.RepoError(_("repository %s already exists") % path)
45 45
46 46 self.root = os.path.abspath(path)
47 47 self.origroot = path
48 48 self.ui = ui.ui(parentui=parentui)
49 49 self.opener = util.opener(self.path)
50 50 self.wopener = util.opener(self.root)
51 51
52 52 try:
53 53 self.ui.readconfig(self.join("hgrc"), self.root)
54 54 except IOError:
55 55 pass
56 56
57 57 v = self.ui.configrevlog()
58 58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 60 fl = v.get('flags', None)
61 61 flags = 0
62 62 if fl != None:
63 63 for x in fl.split():
64 64 flags |= revlog.flagstr(x)
65 65 elif self.revlogv1:
66 66 flags = revlog.REVLOG_DEFAULT_FLAGS
67 67
68 68 v = self.revlogversion | flags
69 69 self.manifest = manifest.manifest(self.opener, v)
70 70 self.changelog = changelog.changelog(self.opener, v)
71 71
72 72 # the changelog might not have the inline index flag
73 73 # on. If the format of the changelog is the same as found in
74 74 # .hgrc, apply any flags found in the .hgrc as well.
75 75 # Otherwise, just version from the changelog
76 76 v = self.changelog.version
77 77 if v == self.revlogversion:
78 78 v |= flags
79 79 self.revlogversion = v
80 80
81 81 self.tagscache = None
82 82 self.branchcache = None
83 83 self.nodetagscache = None
84 84 self.encodepats = None
85 85 self.decodepats = None
86 86 self.transhandle = None
87 87
88 88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89 89
90 90 def url(self):
91 91 return 'file:' + self.root
92 92
93 93 def hook(self, name, throw=False, **args):
94 94 def callhook(hname, funcname):
95 95 '''call python hook. hook is callable object, looked up as
96 96 name in python module. if callable returns "true", hook
97 97 fails, else passes. if hook raises exception, treated as
98 98 hook failure. exception propagates if throw is "true".
99 99
100 100 reason for "true" meaning "hook failed" is so that
101 101 unmodified commands (e.g. mercurial.commands.update) can
102 102 be run as hooks without wrappers to convert return values.'''
103 103
104 104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 105 d = funcname.rfind('.')
106 106 if d == -1:
107 107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 108 % (hname, funcname))
109 109 modname = funcname[:d]
110 110 try:
111 111 obj = __import__(modname)
112 112 except ImportError:
113 113 try:
114 114 # extensions are loaded with hgext_ prefix
115 115 obj = __import__("hgext_%s" % modname)
116 116 except ImportError:
117 117 raise util.Abort(_('%s hook is invalid '
118 118 '(import of "%s" failed)') %
119 119 (hname, modname))
120 120 try:
121 121 for p in funcname.split('.')[1:]:
122 122 obj = getattr(obj, p)
123 123 except AttributeError, err:
124 124 raise util.Abort(_('%s hook is invalid '
125 125 '("%s" is not defined)') %
126 126 (hname, funcname))
127 127 if not callable(obj):
128 128 raise util.Abort(_('%s hook is invalid '
129 129 '("%s" is not callable)') %
130 130 (hname, funcname))
131 131 try:
132 132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 133 except (KeyboardInterrupt, util.SignalInterrupt):
134 134 raise
135 135 except Exception, exc:
136 136 if isinstance(exc, util.Abort):
137 137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 138 (hname, exc.args[0]))
139 139 else:
140 140 self.ui.warn(_('error: %s hook raised an exception: '
141 141 '%s\n') % (hname, exc))
142 142 if throw:
143 143 raise
144 144 self.ui.print_exc()
145 145 return True
146 146 if r:
147 147 if throw:
148 148 raise util.Abort(_('%s hook failed') % hname)
149 149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 150 return r
151 151
152 152 def runhook(name, cmd):
153 153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 155 r = util.system(cmd, environ=env, cwd=self.root)
156 156 if r:
157 157 desc, r = util.explain_exit(r)
158 158 if throw:
159 159 raise util.Abort(_('%s hook %s') % (name, desc))
160 160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 161 return r
162 162
163 163 r = False
164 164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 165 if hname.split(".", 1)[0] == name and cmd]
166 166 hooks.sort()
167 167 for hname, cmd in hooks:
168 168 if cmd.startswith('python:'):
169 169 r = callhook(hname, cmd[7:].strip()) or r
170 170 else:
171 171 r = runhook(hname, cmd) or r
172 172 return r
173 173
174 174 tag_disallowed = ':\r\n'
175 175
176 176 def tag(self, name, node, message, local, user, date):
177 177 '''tag a revision with a symbolic name.
178 178
179 179 if local is True, the tag is stored in a per-repository file.
180 180 otherwise, it is stored in the .hgtags file, and a new
181 181 changeset is committed with the change.
182 182
183 183 keyword arguments:
184 184
185 185 local: whether to store tag in non-version-controlled file
186 186 (default False)
187 187
188 188 message: commit message to use if committing
189 189
190 190 user: name of user to use if committing
191 191
192 192 date: date tuple to use if committing'''
193 193
194 194 for c in self.tag_disallowed:
195 195 if c in name:
196 196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197 197
198 198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199 199
200 200 if local:
201 201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 202 self.hook('tag', node=hex(node), tag=name, local=local)
203 203 return
204 204
205 205 for x in self.status()[:5]:
206 206 if '.hgtags' in x:
207 207 raise util.Abort(_('working copy of .hgtags is changed '
208 208 '(please commit .hgtags manually)'))
209 209
210 210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 211 if self.dirstate.state('.hgtags') == '?':
212 212 self.add(['.hgtags'])
213 213
214 214 self.commit(['.hgtags'], message, user, date)
215 215 self.hook('tag', node=hex(node), tag=name, local=local)
216 216
217 217 def tags(self):
218 218 '''return a mapping of tag to node'''
219 219 if not self.tagscache:
220 220 self.tagscache = {}
221 221
222 222 def parsetag(line, context):
223 223 if not line:
224 224 return
225 225 s = l.split(" ", 1)
226 226 if len(s) != 2:
227 227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 228 return
229 229 node, key = s
230 230 key = key.strip()
231 231 try:
232 232 bin_n = bin(node)
233 233 except TypeError:
234 234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 235 (context, node))
236 236 return
237 237 if bin_n not in self.changelog.nodemap:
238 238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 239 (context, key))
240 240 return
241 241 self.tagscache[key] = bin_n
242 242
243 243 # read the tags file from each head, ending with the tip,
244 244 # and add each tag found to the map, with "newer" ones
245 245 # taking precedence
246 246 heads = self.heads()
247 247 heads.reverse()
248 248 fl = self.file(".hgtags")
249 249 for node in heads:
250 250 change = self.changelog.read(node)
251 251 rev = self.changelog.rev(node)
252 252 fn, ff = self.manifest.find(change[0], '.hgtags')
253 253 if fn is None: continue
254 254 count = 0
255 255 for l in fl.read(fn).splitlines():
256 256 count += 1
257 257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
258 258 (rev, short(node), count))
259 259 try:
260 260 f = self.opener("localtags")
261 261 count = 0
262 262 for l in f:
263 263 count += 1
264 264 parsetag(l, _("localtags, line %d") % count)
265 265 except IOError:
266 266 pass
267 267
268 268 self.tagscache['tip'] = self.changelog.tip()
269 269
270 270 return self.tagscache
271 271
272 272 def tagslist(self):
273 273 '''return a list of tags ordered by revision'''
274 274 l = []
275 275 for t, n in self.tags().items():
276 276 try:
277 277 r = self.changelog.rev(n)
278 278 except:
279 279 r = -2 # sort to the beginning of the list if unknown
280 280 l.append((r, t, n))
281 281 l.sort()
282 282 return [(t, n) for r, t, n in l]
283 283
284 284 def nodetags(self, node):
285 285 '''return the tags associated with a node'''
286 286 if not self.nodetagscache:
287 287 self.nodetagscache = {}
288 288 for t, n in self.tags().items():
289 289 self.nodetagscache.setdefault(n, []).append(t)
290 290 return self.nodetagscache.get(node, [])
291 291
292 292 def branchtags(self):
293 293 if self.branchcache != None:
294 294 return self.branchcache
295 295
296 296 self.branchcache = {} # avoid recursion in changectx
297 297
298 partial, last, lrev = self._readbranchcache()
299
300 tiprev = self.changelog.count() - 1
301 if lrev != tiprev:
302 self._updatebranchcache(partial, lrev+1, tiprev+1)
303 self._writebranchcache(partial, self.changelog.tip(), tiprev)
304
305 self.branchcache = partial
306 return self.branchcache
307
308 def _readbranchcache(self):
309 partial = {}
298 310 try:
299 311 f = self.opener("branches.cache")
300 312 last, lrev = f.readline().rstrip().split(" ", 1)
301 313 last, lrev = bin(last), int(lrev)
302 314 if (lrev < self.changelog.count() and
303 315 self.changelog.node(lrev) == last): # sanity check
304 316 for l in f:
305 317 node, label = l.rstrip().split(" ", 1)
306 self.branchcache[label] = bin(node)
318 partial[label] = bin(node)
307 319 else: # invalidate the cache
308 320 last, lrev = nullid, -1
309 321 f.close()
310 322 except IOError:
311 323 last, lrev = nullid, -1
324 return partial, last, lrev
312 325
313 tip = self.changelog.count() - 1
314 if lrev != tip:
315 for r in xrange(lrev + 1, tip + 1):
316 c = self.changectx(r)
317 b = c.branch()
318 if b:
319 self.branchcache[b] = c.node()
320 self._writebranchcache()
321
322 return self.branchcache
323
324 def _writebranchcache(self):
326 def _writebranchcache(self, branches, tip, tiprev):
325 327 try:
326 328 f = self.opener("branches.cache", "w")
327 t = self.changelog.tip()
328 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
329 for label, node in self.branchcache.iteritems():
329 f.write("%s %s\n" % (hex(tip), tiprev))
330 for label, node in branches.iteritems():
330 331 f.write("%s %s\n" % (hex(node), label))
331 332 except IOError:
332 333 pass
333 334
335 def _updatebranchcache(self, partial, start, end):
336 for r in xrange(start, end):
337 c = self.changectx(r)
338 b = c.branch()
339 if b:
340 partial[b] = c.node()
341
334 342 def lookup(self, key):
335 343 if key == '.':
336 344 key = self.dirstate.parents()[0]
337 345 if key == nullid:
338 346 raise repo.RepoError(_("no revision checked out"))
339 347 if key in self.tags():
340 348 return self.tags()[key]
341 349 if key in self.branchtags():
342 350 return self.branchtags()[key]
343 351 try:
344 352 return self.changelog.lookup(key)
345 353 except:
346 354 raise repo.RepoError(_("unknown revision '%s'") % key)
347 355
348 356 def dev(self):
349 357 return os.lstat(self.path).st_dev
350 358
351 359 def local(self):
352 360 return True
353 361
354 362 def join(self, f):
355 363 return os.path.join(self.path, f)
356 364
357 365 def wjoin(self, f):
358 366 return os.path.join(self.root, f)
359 367
360 368 def file(self, f):
361 369 if f[0] == '/':
362 370 f = f[1:]
363 371 return filelog.filelog(self.opener, f, self.revlogversion)
364 372
365 373 def changectx(self, changeid=None):
366 374 return context.changectx(self, changeid)
367 375
368 376 def workingctx(self):
369 377 return context.workingctx(self)
370 378
371 379 def parents(self, changeid=None):
372 380 '''
373 381 get list of changectxs for parents of changeid or working directory
374 382 '''
375 383 if changeid is None:
376 384 pl = self.dirstate.parents()
377 385 else:
378 386 n = self.changelog.lookup(changeid)
379 387 pl = self.changelog.parents(n)
380 388 if pl[1] == nullid:
381 389 return [self.changectx(pl[0])]
382 390 return [self.changectx(pl[0]), self.changectx(pl[1])]
383 391
384 392 def filectx(self, path, changeid=None, fileid=None):
385 393 """changeid can be a changeset revision, node, or tag.
386 394 fileid can be a file revision or node."""
387 395 return context.filectx(self, path, changeid, fileid)
388 396
389 397 def getcwd(self):
390 398 return self.dirstate.getcwd()
391 399
392 400 def wfile(self, f, mode='r'):
393 401 return self.wopener(f, mode)
394 402
395 403 def wread(self, filename):
396 404 if self.encodepats == None:
397 405 l = []
398 406 for pat, cmd in self.ui.configitems("encode"):
399 407 mf = util.matcher(self.root, "", [pat], [], [])[1]
400 408 l.append((mf, cmd))
401 409 self.encodepats = l
402 410
403 411 data = self.wopener(filename, 'r').read()
404 412
405 413 for mf, cmd in self.encodepats:
406 414 if mf(filename):
407 415 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
408 416 data = util.filter(data, cmd)
409 417 break
410 418
411 419 return data
412 420
413 421 def wwrite(self, filename, data, fd=None):
414 422 if self.decodepats == None:
415 423 l = []
416 424 for pat, cmd in self.ui.configitems("decode"):
417 425 mf = util.matcher(self.root, "", [pat], [], [])[1]
418 426 l.append((mf, cmd))
419 427 self.decodepats = l
420 428
421 429 for mf, cmd in self.decodepats:
422 430 if mf(filename):
423 431 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
424 432 data = util.filter(data, cmd)
425 433 break
426 434
427 435 if fd:
428 436 return fd.write(data)
429 437 return self.wopener(filename, 'w').write(data)
430 438
431 439 def transaction(self):
432 440 tr = self.transhandle
433 441 if tr != None and tr.running():
434 442 return tr.nest()
435 443
436 444 # save dirstate for rollback
437 445 try:
438 446 ds = self.opener("dirstate").read()
439 447 except IOError:
440 448 ds = ""
441 449 self.opener("journal.dirstate", "w").write(ds)
442 450
443 451 tr = transaction.transaction(self.ui.warn, self.opener,
444 452 self.join("journal"),
445 453 aftertrans(self.path))
446 454 self.transhandle = tr
447 455 return tr
448 456
449 457 def recover(self):
450 458 l = self.lock()
451 459 if os.path.exists(self.join("journal")):
452 460 self.ui.status(_("rolling back interrupted transaction\n"))
453 461 transaction.rollback(self.opener, self.join("journal"))
454 462 self.reload()
455 463 return True
456 464 else:
457 465 self.ui.warn(_("no interrupted transaction available\n"))
458 466 return False
459 467
460 468 def rollback(self, wlock=None):
461 469 if not wlock:
462 470 wlock = self.wlock()
463 471 l = self.lock()
464 472 if os.path.exists(self.join("undo")):
465 473 self.ui.status(_("rolling back last transaction\n"))
466 474 transaction.rollback(self.opener, self.join("undo"))
467 475 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
468 476 self.reload()
469 477 self.wreload()
470 478 else:
471 479 self.ui.warn(_("no rollback information available\n"))
472 480
473 481 def wreload(self):
474 482 self.dirstate.read()
475 483
476 484 def reload(self):
477 485 self.changelog.load()
478 486 self.manifest.load()
479 487 self.tagscache = None
480 488 self.nodetagscache = None
481 489
482 490 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
483 491 desc=None):
484 492 try:
485 493 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
486 494 except lock.LockHeld, inst:
487 495 if not wait:
488 496 raise
489 497 self.ui.warn(_("waiting for lock on %s held by %s\n") %
490 498 (desc, inst.args[0]))
491 499 # default to 600 seconds timeout
492 500 l = lock.lock(self.join(lockname),
493 501 int(self.ui.config("ui", "timeout") or 600),
494 502 releasefn, desc=desc)
495 503 if acquirefn:
496 504 acquirefn()
497 505 return l
498 506
499 507 def lock(self, wait=1):
500 508 return self.do_lock("lock", wait, acquirefn=self.reload,
501 509 desc=_('repository %s') % self.origroot)
502 510
503 511 def wlock(self, wait=1):
504 512 return self.do_lock("wlock", wait, self.dirstate.write,
505 513 self.wreload,
506 514 desc=_('working directory of %s') % self.origroot)
507 515
508 516 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
509 517 """
510 518 commit an individual file as part of a larger transaction
511 519 """
512 520
513 521 t = self.wread(fn)
514 522 fl = self.file(fn)
515 523 fp1 = manifest1.get(fn, nullid)
516 524 fp2 = manifest2.get(fn, nullid)
517 525
518 526 meta = {}
519 527 cp = self.dirstate.copied(fn)
520 528 if cp:
521 529 meta["copy"] = cp
522 530 if not manifest2: # not a branch merge
523 531 meta["copyrev"] = hex(manifest1.get(cp, nullid))
524 532 fp2 = nullid
525 533 elif fp2 != nullid: # copied on remote side
526 534 meta["copyrev"] = hex(manifest1.get(cp, nullid))
527 535 else: # copied on local side, reversed
528 536 meta["copyrev"] = hex(manifest2.get(cp))
529 537 fp2 = nullid
530 538 self.ui.debug(_(" %s: copy %s:%s\n") %
531 539 (fn, cp, meta["copyrev"]))
532 540 fp1 = nullid
533 541 elif fp2 != nullid:
534 542 # is one parent an ancestor of the other?
535 543 fpa = fl.ancestor(fp1, fp2)
536 544 if fpa == fp1:
537 545 fp1, fp2 = fp2, nullid
538 546 elif fpa == fp2:
539 547 fp2 = nullid
540 548
541 549 # is the file unmodified from the parent? report existing entry
542 550 if fp2 == nullid and not fl.cmp(fp1, t):
543 551 return fp1
544 552
545 553 changelist.append(fn)
546 554 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
547 555
548 556 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
549 557 orig_parent = self.dirstate.parents()[0] or nullid
550 558 p1 = p1 or self.dirstate.parents()[0] or nullid
551 559 p2 = p2 or self.dirstate.parents()[1] or nullid
552 560 c1 = self.changelog.read(p1)
553 561 c2 = self.changelog.read(p2)
554 562 m1 = self.manifest.read(c1[0]).copy()
555 563 m2 = self.manifest.read(c2[0])
556 564 changed = []
557 565 removed = []
558 566
559 567 if orig_parent == p1:
560 568 update_dirstate = 1
561 569 else:
562 570 update_dirstate = 0
563 571
564 572 if not wlock:
565 573 wlock = self.wlock()
566 574 l = self.lock()
567 575 tr = self.transaction()
568 576 linkrev = self.changelog.count()
569 577 for f in files:
570 578 try:
571 579 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
572 580 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
573 581 except IOError:
574 582 try:
575 583 del m1[f]
576 584 if update_dirstate:
577 585 self.dirstate.forget([f])
578 586 removed.append(f)
579 587 except:
580 588 # deleted from p2?
581 589 pass
582 590
583 591 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
584 592 user = user or self.ui.username()
585 593 n = self.changelog.add(mnode, changed + removed, text,
586 594 tr, p1, p2, user, date)
587 595 tr.close()
588 596 if update_dirstate:
589 597 self.dirstate.setparents(n, nullid)
590 598
591 599 def commit(self, files=None, text="", user=None, date=None,
592 600 match=util.always, force=False, lock=None, wlock=None,
593 601 force_editor=False):
594 602 commit = []
595 603 remove = []
596 604 changed = []
597 605
598 606 if files:
599 607 for f in files:
600 608 s = self.dirstate.state(f)
601 609 if s in 'nmai':
602 610 commit.append(f)
603 611 elif s == 'r':
604 612 remove.append(f)
605 613 else:
606 614 self.ui.warn(_("%s not tracked!\n") % f)
607 615 else:
608 616 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
609 617 commit = modified + added
610 618 remove = removed
611 619
612 620 p1, p2 = self.dirstate.parents()
613 621 c1 = self.changelog.read(p1)
614 622 c2 = self.changelog.read(p2)
615 623 m1 = self.manifest.read(c1[0]).copy()
616 624 m2 = self.manifest.read(c2[0])
617 625
618 626 branchname = self.workingctx().branch()
619 627 oldname = c1[5].get("branch", "")
620 628
621 629 if not commit and not remove and not force and p2 == nullid and \
622 630 branchname == oldname:
623 631 self.ui.status(_("nothing changed\n"))
624 632 return None
625 633
626 634 xp1 = hex(p1)
627 635 if p2 == nullid: xp2 = ''
628 636 else: xp2 = hex(p2)
629 637
630 638 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
631 639
632 640 if not wlock:
633 641 wlock = self.wlock()
634 642 if not lock:
635 643 lock = self.lock()
636 644 tr = self.transaction()
637 645
638 646 # check in files
639 647 new = {}
640 648 linkrev = self.changelog.count()
641 649 commit.sort()
642 650 for f in commit:
643 651 self.ui.note(f + "\n")
644 652 try:
645 653 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
646 654 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
647 655 except IOError:
648 656 self.ui.warn(_("trouble committing %s!\n") % f)
649 657 raise
650 658
651 659 # update manifest
652 660 m1.update(new)
653 661 for f in remove:
654 662 if f in m1:
655 663 del m1[f]
656 664 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
657 665
658 666 # add changeset
659 667 new = new.keys()
660 668 new.sort()
661 669
662 670 user = user or self.ui.username()
663 671 if not text or force_editor:
664 672 edittext = []
665 673 if text:
666 674 edittext.append(text)
667 675 edittext.append("")
668 676 if p2 != nullid:
669 677 edittext.append("HG: branch merge")
670 678 edittext.extend(["HG: changed %s" % f for f in changed])
671 679 edittext.extend(["HG: removed %s" % f for f in remove])
672 680 if not changed and not remove:
673 681 edittext.append("HG: no files changed")
674 682 edittext.append("")
675 683 # run editor in the repository root
676 684 olddir = os.getcwd()
677 685 os.chdir(self.root)
678 686 text = self.ui.edit("\n".join(edittext), user)
679 687 os.chdir(olddir)
680 688
681 689 lines = [line.rstrip() for line in text.rstrip().splitlines()]
682 690 while lines and not lines[0]:
683 691 del lines[0]
684 692 if not lines:
685 693 return None
686 694 text = '\n'.join(lines)
687 695 extra = {}
688 696 if branchname:
689 697 extra["branch"] = branchname
690 698 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
691 699 user, date, extra)
692 700 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
693 701 parent2=xp2)
694 702 tr.close()
695 703
696 704 self.dirstate.setparents(n)
697 705 self.dirstate.update(new, "n")
698 706 self.dirstate.forget(remove)
699 707
700 708 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
701 709 return n
702 710
703 711 def walk(self, node=None, files=[], match=util.always, badmatch=None):
704 712 if node:
705 713 fdict = dict.fromkeys(files)
706 714 for fn in self.manifest.read(self.changelog.read(node)[0]):
707 715 for ffn in fdict:
708 716 # match if the file is the exact name or a directory
709 717 if ffn == fn or fn.startswith("%s/" % ffn):
710 718 del fdict[ffn]
711 719 break
712 720 if match(fn):
713 721 yield 'm', fn
714 722 for fn in fdict:
715 723 if badmatch and badmatch(fn):
716 724 if match(fn):
717 725 yield 'b', fn
718 726 else:
719 727 self.ui.warn(_('%s: No such file in rev %s\n') % (
720 728 util.pathto(self.getcwd(), fn), short(node)))
721 729 else:
722 730 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
723 731 yield src, fn
724 732
725 733 def status(self, node1=None, node2=None, files=[], match=util.always,
726 734 wlock=None, list_ignored=False, list_clean=False):
727 735 """return status of files between two nodes or node and working directory
728 736
729 737 If node1 is None, use the first dirstate parent instead.
730 738 If node2 is None, compare node1 with working directory.
731 739 """
732 740
733 741 def fcmp(fn, mf):
734 742 t1 = self.wread(fn)
735 743 return self.file(fn).cmp(mf.get(fn, nullid), t1)
736 744
737 745 def mfmatches(node):
738 746 change = self.changelog.read(node)
739 747 mf = self.manifest.read(change[0]).copy()
740 748 for fn in mf.keys():
741 749 if not match(fn):
742 750 del mf[fn]
743 751 return mf
744 752
745 753 modified, added, removed, deleted, unknown = [], [], [], [], []
746 754 ignored, clean = [], []
747 755
748 756 compareworking = False
749 757 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
750 758 compareworking = True
751 759
752 760 if not compareworking:
753 761 # read the manifest from node1 before the manifest from node2,
754 762 # so that we'll hit the manifest cache if we're going through
755 763 # all the revisions in parent->child order.
756 764 mf1 = mfmatches(node1)
757 765
758 766 # are we comparing the working directory?
759 767 if not node2:
760 768 if not wlock:
761 769 try:
762 770 wlock = self.wlock(wait=0)
763 771 except lock.LockException:
764 772 wlock = None
765 773 (lookup, modified, added, removed, deleted, unknown,
766 774 ignored, clean) = self.dirstate.status(files, match,
767 775 list_ignored, list_clean)
768 776
769 777 # are we comparing working dir against its parent?
770 778 if compareworking:
771 779 if lookup:
772 780 # do a full compare of any files that might have changed
773 781 mf2 = mfmatches(self.dirstate.parents()[0])
774 782 for f in lookup:
775 783 if fcmp(f, mf2):
776 784 modified.append(f)
777 785 else:
778 786 clean.append(f)
779 787 if wlock is not None:
780 788 self.dirstate.update([f], "n")
781 789 else:
782 790 # we are comparing working dir against non-parent
783 791 # generate a pseudo-manifest for the working dir
784 792 # XXX: create it in dirstate.py ?
785 793 mf2 = mfmatches(self.dirstate.parents()[0])
786 794 for f in lookup + modified + added:
787 795 mf2[f] = ""
788 796 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
789 797 for f in removed:
790 798 if f in mf2:
791 799 del mf2[f]
792 800 else:
793 801 # we are comparing two revisions
794 802 mf2 = mfmatches(node2)
795 803
796 804 if not compareworking:
797 805 # flush lists from dirstate before comparing manifests
798 806 modified, added, clean = [], [], []
799 807
800 808 # make sure to sort the files so we talk to the disk in a
801 809 # reasonable order
802 810 mf2keys = mf2.keys()
803 811 mf2keys.sort()
804 812 for fn in mf2keys:
805 813 if mf1.has_key(fn):
806 814 if mf1.flags(fn) != mf2.flags(fn) or \
807 815 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
808 816 modified.append(fn)
809 817 elif list_clean:
810 818 clean.append(fn)
811 819 del mf1[fn]
812 820 else:
813 821 added.append(fn)
814 822
815 823 removed = mf1.keys()
816 824
817 825 # sort and return results:
818 826 for l in modified, added, removed, deleted, unknown, ignored, clean:
819 827 l.sort()
820 828 return (modified, added, removed, deleted, unknown, ignored, clean)
821 829
822 830 def add(self, list, wlock=None):
823 831 if not wlock:
824 832 wlock = self.wlock()
825 833 for f in list:
826 834 p = self.wjoin(f)
827 835 if not os.path.exists(p):
828 836 self.ui.warn(_("%s does not exist!\n") % f)
829 837 elif not os.path.isfile(p):
830 838 self.ui.warn(_("%s not added: only files supported currently\n")
831 839 % f)
832 840 elif self.dirstate.state(f) in 'an':
833 841 self.ui.warn(_("%s already tracked!\n") % f)
834 842 else:
835 843 self.dirstate.update([f], "a")
836 844
837 845 def forget(self, list, wlock=None):
838 846 if not wlock:
839 847 wlock = self.wlock()
840 848 for f in list:
841 849 if self.dirstate.state(f) not in 'ai':
842 850 self.ui.warn(_("%s not added!\n") % f)
843 851 else:
844 852 self.dirstate.forget([f])
845 853
846 854 def remove(self, list, unlink=False, wlock=None):
847 855 if unlink:
848 856 for f in list:
849 857 try:
850 858 util.unlink(self.wjoin(f))
851 859 except OSError, inst:
852 860 if inst.errno != errno.ENOENT:
853 861 raise
854 862 if not wlock:
855 863 wlock = self.wlock()
856 864 for f in list:
857 865 p = self.wjoin(f)
858 866 if os.path.exists(p):
859 867 self.ui.warn(_("%s still exists!\n") % f)
860 868 elif self.dirstate.state(f) == 'a':
861 869 self.dirstate.forget([f])
862 870 elif f not in self.dirstate:
863 871 self.ui.warn(_("%s not tracked!\n") % f)
864 872 else:
865 873 self.dirstate.update([f], "r")
866 874
867 875 def undelete(self, list, wlock=None):
868 876 p = self.dirstate.parents()[0]
869 877 mn = self.changelog.read(p)[0]
870 878 m = self.manifest.read(mn)
871 879 if not wlock:
872 880 wlock = self.wlock()
873 881 for f in list:
874 882 if self.dirstate.state(f) not in "r":
875 883 self.ui.warn("%s not removed!\n" % f)
876 884 else:
877 885 t = self.file(f).read(m[f])
878 886 self.wwrite(f, t)
879 887 util.set_exec(self.wjoin(f), m.execf(f))
880 888 self.dirstate.update([f], "n")
881 889
882 890 def copy(self, source, dest, wlock=None):
883 891 p = self.wjoin(dest)
884 892 if not os.path.exists(p):
885 893 self.ui.warn(_("%s does not exist!\n") % dest)
886 894 elif not os.path.isfile(p):
887 895 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
888 896 else:
889 897 if not wlock:
890 898 wlock = self.wlock()
891 899 if self.dirstate.state(dest) == '?':
892 900 self.dirstate.update([dest], "a")
893 901 self.dirstate.copy(source, dest)
894 902
895 903 def heads(self, start=None):
896 904 heads = self.changelog.heads(start)
897 905 # sort the output in rev descending order
898 906 heads = [(-self.changelog.rev(h), h) for h in heads]
899 907 heads.sort()
900 908 return [n for (r, n) in heads]
901 909
902 910 # branchlookup returns a dict giving a list of branches for
903 911 # each head. A branch is defined as the tag of a node or
904 912 # the branch of the node's parents. If a node has multiple
905 913 # branch tags, tags are eliminated if they are visible from other
906 914 # branch tags.
907 915 #
908 916 # So, for this graph: a->b->c->d->e
909 917 # \ /
910 918 # aa -----/
911 919 # a has tag 2.6.12
912 920 # d has tag 2.6.13
913 921 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
914 922 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
915 923 # from the list.
916 924 #
917 925 # It is possible that more than one head will have the same branch tag.
918 926 # callers need to check the result for multiple heads under the same
919 927 # branch tag if that is a problem for them (ie checkout of a specific
920 928 # branch).
921 929 #
922 930 # passing in a specific branch will limit the depth of the search
923 931 # through the parents. It won't limit the branches returned in the
924 932 # result though.
925 933 def branchlookup(self, heads=None, branch=None):
926 934 if not heads:
927 935 heads = self.heads()
928 936 headt = [ h for h in heads ]
929 937 chlog = self.changelog
930 938 branches = {}
931 939 merges = []
932 940 seenmerge = {}
933 941
934 942 # traverse the tree once for each head, recording in the branches
935 943 # dict which tags are visible from this head. The branches
936 944 # dict also records which tags are visible from each tag
937 945 # while we traverse.
938 946 while headt or merges:
939 947 if merges:
940 948 n, found = merges.pop()
941 949 visit = [n]
942 950 else:
943 951 h = headt.pop()
944 952 visit = [h]
945 953 found = [h]
946 954 seen = {}
947 955 while visit:
948 956 n = visit.pop()
949 957 if n in seen:
950 958 continue
951 959 pp = chlog.parents(n)
952 960 tags = self.nodetags(n)
953 961 if tags:
954 962 for x in tags:
955 963 if x == 'tip':
956 964 continue
957 965 for f in found:
958 966 branches.setdefault(f, {})[n] = 1
959 967 branches.setdefault(n, {})[n] = 1
960 968 break
961 969 if n not in found:
962 970 found.append(n)
963 971 if branch in tags:
964 972 continue
965 973 seen[n] = 1
966 974 if pp[1] != nullid and n not in seenmerge:
967 975 merges.append((pp[1], [x for x in found]))
968 976 seenmerge[n] = 1
969 977 if pp[0] != nullid:
970 978 visit.append(pp[0])
971 979 # traverse the branches dict, eliminating branch tags from each
972 980 # head that are visible from another branch tag for that head.
973 981 out = {}
974 982 viscache = {}
975 983 for h in heads:
976 984 def visible(node):
977 985 if node in viscache:
978 986 return viscache[node]
979 987 ret = {}
980 988 visit = [node]
981 989 while visit:
982 990 x = visit.pop()
983 991 if x in viscache:
984 992 ret.update(viscache[x])
985 993 elif x not in ret:
986 994 ret[x] = 1
987 995 if x in branches:
988 996 visit[len(visit):] = branches[x].keys()
989 997 viscache[node] = ret
990 998 return ret
991 999 if h not in branches:
992 1000 continue
993 1001 # O(n^2), but somewhat limited. This only searches the
994 1002 # tags visible from a specific head, not all the tags in the
995 1003 # whole repo.
996 1004 for b in branches[h]:
997 1005 vis = False
998 1006 for bb in branches[h].keys():
999 1007 if b != bb:
1000 1008 if b in visible(bb):
1001 1009 vis = True
1002 1010 break
1003 1011 if not vis:
1004 1012 l = out.setdefault(h, [])
1005 1013 l[len(l):] = self.nodetags(b)
1006 1014 return out
1007 1015
1008 1016 def branches(self, nodes):
1009 1017 if not nodes:
1010 1018 nodes = [self.changelog.tip()]
1011 1019 b = []
1012 1020 for n in nodes:
1013 1021 t = n
1014 1022 while 1:
1015 1023 p = self.changelog.parents(n)
1016 1024 if p[1] != nullid or p[0] == nullid:
1017 1025 b.append((t, n, p[0], p[1]))
1018 1026 break
1019 1027 n = p[0]
1020 1028 return b
1021 1029
1022 1030 def between(self, pairs):
1023 1031 r = []
1024 1032
1025 1033 for top, bottom in pairs:
1026 1034 n, l, i = top, [], 0
1027 1035 f = 1
1028 1036
1029 1037 while n != bottom:
1030 1038 p = self.changelog.parents(n)[0]
1031 1039 if i == f:
1032 1040 l.append(n)
1033 1041 f = f * 2
1034 1042 n = p
1035 1043 i += 1
1036 1044
1037 1045 r.append(l)
1038 1046
1039 1047 return r
1040 1048
1041 1049 def findincoming(self, remote, base=None, heads=None, force=False):
1042 1050 """Return list of roots of the subsets of missing nodes from remote
1043 1051
1044 1052 If base dict is specified, assume that these nodes and their parents
1045 1053 exist on the remote side and that no child of a node of base exists
1046 1054 in both remote and self.
1047 1055 Furthermore base will be updated to include the nodes that exists
1048 1056 in self and remote but no children exists in self and remote.
1049 1057 If a list of heads is specified, return only nodes which are heads
1050 1058 or ancestors of these heads.
1051 1059
1052 1060 All the ancestors of base are in self and in remote.
1053 1061 All the descendants of the list returned are missing in self.
1054 1062 (and so we know that the rest of the nodes are missing in remote, see
1055 1063 outgoing)
1056 1064 """
1057 1065 m = self.changelog.nodemap
1058 1066 search = []
1059 1067 fetch = {}
1060 1068 seen = {}
1061 1069 seenbranch = {}
1062 1070 if base == None:
1063 1071 base = {}
1064 1072
1065 1073 if not heads:
1066 1074 heads = remote.heads()
1067 1075
1068 1076 if self.changelog.tip() == nullid:
1069 1077 base[nullid] = 1
1070 1078 if heads != [nullid]:
1071 1079 return [nullid]
1072 1080 return []
1073 1081
1074 1082 # assume we're closer to the tip than the root
1075 1083 # and start by examining the heads
1076 1084 self.ui.status(_("searching for changes\n"))
1077 1085
1078 1086 unknown = []
1079 1087 for h in heads:
1080 1088 if h not in m:
1081 1089 unknown.append(h)
1082 1090 else:
1083 1091 base[h] = 1
1084 1092
1085 1093 if not unknown:
1086 1094 return []
1087 1095
1088 1096 req = dict.fromkeys(unknown)
1089 1097 reqcnt = 0
1090 1098
1091 1099 # search through remote branches
1092 1100 # a 'branch' here is a linear segment of history, with four parts:
1093 1101 # head, root, first parent, second parent
1094 1102 # (a branch always has two parents (or none) by definition)
1095 1103 unknown = remote.branches(unknown)
1096 1104 while unknown:
1097 1105 r = []
1098 1106 while unknown:
1099 1107 n = unknown.pop(0)
1100 1108 if n[0] in seen:
1101 1109 continue
1102 1110
1103 1111 self.ui.debug(_("examining %s:%s\n")
1104 1112 % (short(n[0]), short(n[1])))
1105 1113 if n[0] == nullid: # found the end of the branch
1106 1114 pass
1107 1115 elif n in seenbranch:
1108 1116 self.ui.debug(_("branch already found\n"))
1109 1117 continue
1110 1118 elif n[1] and n[1] in m: # do we know the base?
1111 1119 self.ui.debug(_("found incomplete branch %s:%s\n")
1112 1120 % (short(n[0]), short(n[1])))
1113 1121 search.append(n) # schedule branch range for scanning
1114 1122 seenbranch[n] = 1
1115 1123 else:
1116 1124 if n[1] not in seen and n[1] not in fetch:
1117 1125 if n[2] in m and n[3] in m:
1118 1126 self.ui.debug(_("found new changeset %s\n") %
1119 1127 short(n[1]))
1120 1128 fetch[n[1]] = 1 # earliest unknown
1121 1129 for p in n[2:4]:
1122 1130 if p in m:
1123 1131 base[p] = 1 # latest known
1124 1132
1125 1133 for p in n[2:4]:
1126 1134 if p not in req and p not in m:
1127 1135 r.append(p)
1128 1136 req[p] = 1
1129 1137 seen[n[0]] = 1
1130 1138
1131 1139 if r:
1132 1140 reqcnt += 1
1133 1141 self.ui.debug(_("request %d: %s\n") %
1134 1142 (reqcnt, " ".join(map(short, r))))
1135 1143 for p in xrange(0, len(r), 10):
1136 1144 for b in remote.branches(r[p:p+10]):
1137 1145 self.ui.debug(_("received %s:%s\n") %
1138 1146 (short(b[0]), short(b[1])))
1139 1147 unknown.append(b)
1140 1148
1141 1149 # do binary search on the branches we found
1142 1150 while search:
1143 1151 n = search.pop(0)
1144 1152 reqcnt += 1
1145 1153 l = remote.between([(n[0], n[1])])[0]
1146 1154 l.append(n[1])
1147 1155 p = n[0]
1148 1156 f = 1
1149 1157 for i in l:
1150 1158 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1151 1159 if i in m:
1152 1160 if f <= 2:
1153 1161 self.ui.debug(_("found new branch changeset %s\n") %
1154 1162 short(p))
1155 1163 fetch[p] = 1
1156 1164 base[i] = 1
1157 1165 else:
1158 1166 self.ui.debug(_("narrowed branch search to %s:%s\n")
1159 1167 % (short(p), short(i)))
1160 1168 search.append((p, i))
1161 1169 break
1162 1170 p, f = i, f * 2
1163 1171
1164 1172 # sanity check our fetch list
1165 1173 for f in fetch.keys():
1166 1174 if f in m:
1167 1175 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1168 1176
1169 1177 if base.keys() == [nullid]:
1170 1178 if force:
1171 1179 self.ui.warn(_("warning: repository is unrelated\n"))
1172 1180 else:
1173 1181 raise util.Abort(_("repository is unrelated"))
1174 1182
1175 1183 self.ui.debug(_("found new changesets starting at ") +
1176 1184 " ".join([short(f) for f in fetch]) + "\n")
1177 1185
1178 1186 self.ui.debug(_("%d total queries\n") % reqcnt)
1179 1187
1180 1188 return fetch.keys()
1181 1189
1182 1190 def findoutgoing(self, remote, base=None, heads=None, force=False):
1183 1191 """Return list of nodes that are roots of subsets not in remote
1184 1192
1185 1193 If base dict is specified, assume that these nodes and their parents
1186 1194 exist on the remote side.
1187 1195 If a list of heads is specified, return only nodes which are heads
1188 1196 or ancestors of these heads, and return a second element which
1189 1197 contains all remote heads which get new children.
1190 1198 """
1191 1199 if base == None:
1192 1200 base = {}
1193 1201 self.findincoming(remote, base, heads, force=force)
1194 1202
1195 1203 self.ui.debug(_("common changesets up to ")
1196 1204 + " ".join(map(short, base.keys())) + "\n")
1197 1205
1198 1206 remain = dict.fromkeys(self.changelog.nodemap)
1199 1207
1200 1208 # prune everything remote has from the tree
1201 1209 del remain[nullid]
1202 1210 remove = base.keys()
1203 1211 while remove:
1204 1212 n = remove.pop(0)
1205 1213 if n in remain:
1206 1214 del remain[n]
1207 1215 for p in self.changelog.parents(n):
1208 1216 remove.append(p)
1209 1217
1210 1218 # find every node whose parents have been pruned
1211 1219 subset = []
1212 1220 # find every remote head that will get new children
1213 1221 updated_heads = {}
1214 1222 for n in remain:
1215 1223 p1, p2 = self.changelog.parents(n)
1216 1224 if p1 not in remain and p2 not in remain:
1217 1225 subset.append(n)
1218 1226 if heads:
1219 1227 if p1 in heads:
1220 1228 updated_heads[p1] = True
1221 1229 if p2 in heads:
1222 1230 updated_heads[p2] = True
1223 1231
1224 1232 # this is the set of all roots we have to push
1225 1233 if heads:
1226 1234 return subset, updated_heads.keys()
1227 1235 else:
1228 1236 return subset
1229 1237
1230 1238 def pull(self, remote, heads=None, force=False, lock=None):
1231 1239 mylock = False
1232 1240 if not lock:
1233 1241 lock = self.lock()
1234 1242 mylock = True
1235 1243
1236 1244 try:
1237 1245 fetch = self.findincoming(remote, force=force)
1238 1246 if fetch == [nullid]:
1239 1247 self.ui.status(_("requesting all changes\n"))
1240 1248
1241 1249 if not fetch:
1242 1250 self.ui.status(_("no changes found\n"))
1243 1251 return 0
1244 1252
1245 1253 if heads is None:
1246 1254 cg = remote.changegroup(fetch, 'pull')
1247 1255 else:
1248 1256 if 'changegroupsubset' not in remote.capabilities:
1249 1257 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1250 1258 cg = remote.changegroupsubset(fetch, heads, 'pull')
1251 1259 return self.addchangegroup(cg, 'pull', remote.url())
1252 1260 finally:
1253 1261 if mylock:
1254 1262 lock.release()
1255 1263
1256 1264 def push(self, remote, force=False, revs=None):
1257 1265 # there are two ways to push to remote repo:
1258 1266 #
1259 1267 # addchangegroup assumes local user can lock remote
1260 1268 # repo (local filesystem, old ssh servers).
1261 1269 #
1262 1270 # unbundle assumes local user cannot lock remote repo (new ssh
1263 1271 # servers, http servers).
1264 1272
1265 1273 if remote.capable('unbundle'):
1266 1274 return self.push_unbundle(remote, force, revs)
1267 1275 return self.push_addchangegroup(remote, force, revs)
1268 1276
1269 1277 def prepush(self, remote, force, revs):
1270 1278 base = {}
1271 1279 remote_heads = remote.heads()
1272 1280 inc = self.findincoming(remote, base, remote_heads, force=force)
1273 1281 if not force and inc:
1274 1282 self.ui.warn(_("abort: unsynced remote changes!\n"))
1275 1283 self.ui.status(_("(did you forget to sync?"
1276 1284 " use push -f to force)\n"))
1277 1285 return None, 1
1278 1286
1279 1287 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1280 1288 if revs is not None:
1281 1289 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1282 1290 else:
1283 1291 bases, heads = update, self.changelog.heads()
1284 1292
1285 1293 if not bases:
1286 1294 self.ui.status(_("no changes found\n"))
1287 1295 return None, 1
1288 1296 elif not force:
1289 1297 # FIXME we don't properly detect creation of new heads
1290 1298 # in the push -r case, assume the user knows what he's doing
1291 1299 if not revs and len(remote_heads) < len(heads) \
1292 1300 and remote_heads != [nullid]:
1293 1301 self.ui.warn(_("abort: push creates new remote branches!\n"))
1294 1302 self.ui.status(_("(did you forget to merge?"
1295 1303 " use push -f to force)\n"))
1296 1304 return None, 1
1297 1305
1298 1306 if revs is None:
1299 1307 cg = self.changegroup(update, 'push')
1300 1308 else:
1301 1309 cg = self.changegroupsubset(update, revs, 'push')
1302 1310 return cg, remote_heads
1303 1311
1304 1312 def push_addchangegroup(self, remote, force, revs):
1305 1313 lock = remote.lock()
1306 1314
1307 1315 ret = self.prepush(remote, force, revs)
1308 1316 if ret[0] is not None:
1309 1317 cg, remote_heads = ret
1310 1318 return remote.addchangegroup(cg, 'push', self.url())
1311 1319 return ret[1]
1312 1320
1313 1321 def push_unbundle(self, remote, force, revs):
1314 1322 # local repo finds heads on server, finds out what revs it
1315 1323 # must push. once revs transferred, if server finds it has
1316 1324 # different heads (someone else won commit/push race), server
1317 1325 # aborts.
1318 1326
1319 1327 ret = self.prepush(remote, force, revs)
1320 1328 if ret[0] is not None:
1321 1329 cg, remote_heads = ret
1322 1330 if force: remote_heads = ['force']
1323 1331 return remote.unbundle(cg, remote_heads, 'push')
1324 1332 return ret[1]
1325 1333
1326 1334 def changegroupsubset(self, bases, heads, source):
1327 1335 """This function generates a changegroup consisting of all the nodes
1328 1336 that are descendents of any of the bases, and ancestors of any of
1329 1337 the heads.
1330 1338
1331 1339 It is fairly complex as determining which filenodes and which
1332 1340 manifest nodes need to be included for the changeset to be complete
1333 1341 is non-trivial.
1334 1342
1335 1343 Another wrinkle is doing the reverse, figuring out which changeset in
1336 1344 the changegroup a particular filenode or manifestnode belongs to."""
1337 1345
1338 1346 self.hook('preoutgoing', throw=True, source=source)
1339 1347
1340 1348 # Set up some initial variables
1341 1349 # Make it easy to refer to self.changelog
1342 1350 cl = self.changelog
1343 1351 # msng is short for missing - compute the list of changesets in this
1344 1352 # changegroup.
1345 1353 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1346 1354 # Some bases may turn out to be superfluous, and some heads may be
1347 1355 # too. nodesbetween will return the minimal set of bases and heads
1348 1356 # necessary to re-create the changegroup.
1349 1357
1350 1358 # Known heads are the list of heads that it is assumed the recipient
1351 1359 # of this changegroup will know about.
1352 1360 knownheads = {}
1353 1361 # We assume that all parents of bases are known heads.
1354 1362 for n in bases:
1355 1363 for p in cl.parents(n):
1356 1364 if p != nullid:
1357 1365 knownheads[p] = 1
1358 1366 knownheads = knownheads.keys()
1359 1367 if knownheads:
1360 1368 # Now that we know what heads are known, we can compute which
1361 1369 # changesets are known. The recipient must know about all
1362 1370 # changesets required to reach the known heads from the null
1363 1371 # changeset.
1364 1372 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1365 1373 junk = None
1366 1374 # Transform the list into an ersatz set.
1367 1375 has_cl_set = dict.fromkeys(has_cl_set)
1368 1376 else:
1369 1377 # If there were no known heads, the recipient cannot be assumed to
1370 1378 # know about any changesets.
1371 1379 has_cl_set = {}
1372 1380
1373 1381 # Make it easy to refer to self.manifest
1374 1382 mnfst = self.manifest
1375 1383 # We don't know which manifests are missing yet
1376 1384 msng_mnfst_set = {}
1377 1385 # Nor do we know which filenodes are missing.
1378 1386 msng_filenode_set = {}
1379 1387
1380 1388 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1381 1389 junk = None
1382 1390
1383 1391 # A changeset always belongs to itself, so the changenode lookup
1384 1392 # function for a changenode is identity.
1385 1393 def identity(x):
1386 1394 return x
1387 1395
1388 1396 # A function generating function. Sets up an environment for the
1389 1397 # inner function.
1390 1398 def cmp_by_rev_func(revlog):
1391 1399 # Compare two nodes by their revision number in the environment's
1392 1400 # revision history. Since the revision number both represents the
1393 1401 # most efficient order to read the nodes in, and represents a
1394 1402 # topological sorting of the nodes, this function is often useful.
1395 1403 def cmp_by_rev(a, b):
1396 1404 return cmp(revlog.rev(a), revlog.rev(b))
1397 1405 return cmp_by_rev
1398 1406
1399 1407 # If we determine that a particular file or manifest node must be a
1400 1408 # node that the recipient of the changegroup will already have, we can
1401 1409 # also assume the recipient will have all the parents. This function
1402 1410 # prunes them from the set of missing nodes.
1403 1411 def prune_parents(revlog, hasset, msngset):
1404 1412 haslst = hasset.keys()
1405 1413 haslst.sort(cmp_by_rev_func(revlog))
1406 1414 for node in haslst:
1407 1415 parentlst = [p for p in revlog.parents(node) if p != nullid]
1408 1416 while parentlst:
1409 1417 n = parentlst.pop()
1410 1418 if n not in hasset:
1411 1419 hasset[n] = 1
1412 1420 p = [p for p in revlog.parents(n) if p != nullid]
1413 1421 parentlst.extend(p)
1414 1422 for n in hasset:
1415 1423 msngset.pop(n, None)
1416 1424
1417 1425 # This is a function generating function used to set up an environment
1418 1426 # for the inner function to execute in.
1419 1427 def manifest_and_file_collector(changedfileset):
1420 1428 # This is an information gathering function that gathers
1421 1429 # information from each changeset node that goes out as part of
1422 1430 # the changegroup. The information gathered is a list of which
1423 1431 # manifest nodes are potentially required (the recipient may
1424 1432 # already have them) and total list of all files which were
1425 1433 # changed in any changeset in the changegroup.
1426 1434 #
1427 1435 # We also remember the first changenode we saw any manifest
1428 1436 # referenced by so we can later determine which changenode 'owns'
1429 1437 # the manifest.
1430 1438 def collect_manifests_and_files(clnode):
1431 1439 c = cl.read(clnode)
1432 1440 for f in c[3]:
1433 1441 # This is to make sure we only have one instance of each
1434 1442 # filename string for each filename.
1435 1443 changedfileset.setdefault(f, f)
1436 1444 msng_mnfst_set.setdefault(c[0], clnode)
1437 1445 return collect_manifests_and_files
1438 1446
1439 1447 # Figure out which manifest nodes (of the ones we think might be part
1440 1448 # of the changegroup) the recipient must know about and remove them
1441 1449 # from the changegroup.
1442 1450 def prune_manifests():
1443 1451 has_mnfst_set = {}
1444 1452 for n in msng_mnfst_set:
1445 1453 # If a 'missing' manifest thinks it belongs to a changenode
1446 1454 # the recipient is assumed to have, obviously the recipient
1447 1455 # must have that manifest.
1448 1456 linknode = cl.node(mnfst.linkrev(n))
1449 1457 if linknode in has_cl_set:
1450 1458 has_mnfst_set[n] = 1
1451 1459 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1452 1460
1453 1461 # Use the information collected in collect_manifests_and_files to say
1454 1462 # which changenode any manifestnode belongs to.
1455 1463 def lookup_manifest_link(mnfstnode):
1456 1464 return msng_mnfst_set[mnfstnode]
1457 1465
1458 1466 # A function generating function that sets up the initial environment
1459 1467 # the inner function.
1460 1468 def filenode_collector(changedfiles):
1461 1469 next_rev = [0]
1462 1470 # This gathers information from each manifestnode included in the
1463 1471 # changegroup about which filenodes the manifest node references
1464 1472 # so we can include those in the changegroup too.
1465 1473 #
1466 1474 # It also remembers which changenode each filenode belongs to. It
1467 1475 # does this by assuming the a filenode belongs to the changenode
1468 1476 # the first manifest that references it belongs to.
1469 1477 def collect_msng_filenodes(mnfstnode):
1470 1478 r = mnfst.rev(mnfstnode)
1471 1479 if r == next_rev[0]:
1472 1480 # If the last rev we looked at was the one just previous,
1473 1481 # we only need to see a diff.
1474 1482 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1475 1483 # For each line in the delta
1476 1484 for dline in delta.splitlines():
1477 1485 # get the filename and filenode for that line
1478 1486 f, fnode = dline.split('\0')
1479 1487 fnode = bin(fnode[:40])
1480 1488 f = changedfiles.get(f, None)
1481 1489 # And if the file is in the list of files we care
1482 1490 # about.
1483 1491 if f is not None:
1484 1492 # Get the changenode this manifest belongs to
1485 1493 clnode = msng_mnfst_set[mnfstnode]
1486 1494 # Create the set of filenodes for the file if
1487 1495 # there isn't one already.
1488 1496 ndset = msng_filenode_set.setdefault(f, {})
1489 1497 # And set the filenode's changelog node to the
1490 1498 # manifest's if it hasn't been set already.
1491 1499 ndset.setdefault(fnode, clnode)
1492 1500 else:
1493 1501 # Otherwise we need a full manifest.
1494 1502 m = mnfst.read(mnfstnode)
1495 1503 # For every file in we care about.
1496 1504 for f in changedfiles:
1497 1505 fnode = m.get(f, None)
1498 1506 # If it's in the manifest
1499 1507 if fnode is not None:
1500 1508 # See comments above.
1501 1509 clnode = msng_mnfst_set[mnfstnode]
1502 1510 ndset = msng_filenode_set.setdefault(f, {})
1503 1511 ndset.setdefault(fnode, clnode)
1504 1512 # Remember the revision we hope to see next.
1505 1513 next_rev[0] = r + 1
1506 1514 return collect_msng_filenodes
1507 1515
1508 1516 # We have a list of filenodes we think we need for a file, lets remove
1509 1517 # all those we now the recipient must have.
1510 1518 def prune_filenodes(f, filerevlog):
1511 1519 msngset = msng_filenode_set[f]
1512 1520 hasset = {}
1513 1521 # If a 'missing' filenode thinks it belongs to a changenode we
1514 1522 # assume the recipient must have, then the recipient must have
1515 1523 # that filenode.
1516 1524 for n in msngset:
1517 1525 clnode = cl.node(filerevlog.linkrev(n))
1518 1526 if clnode in has_cl_set:
1519 1527 hasset[n] = 1
1520 1528 prune_parents(filerevlog, hasset, msngset)
1521 1529
1522 1530 # A function generator function that sets up the a context for the
1523 1531 # inner function.
1524 1532 def lookup_filenode_link_func(fname):
1525 1533 msngset = msng_filenode_set[fname]
1526 1534 # Lookup the changenode the filenode belongs to.
1527 1535 def lookup_filenode_link(fnode):
1528 1536 return msngset[fnode]
1529 1537 return lookup_filenode_link
1530 1538
1531 1539 # Now that we have all theses utility functions to help out and
1532 1540 # logically divide up the task, generate the group.
1533 1541 def gengroup():
1534 1542 # The set of changed files starts empty.
1535 1543 changedfiles = {}
1536 1544 # Create a changenode group generator that will call our functions
1537 1545 # back to lookup the owning changenode and collect information.
1538 1546 group = cl.group(msng_cl_lst, identity,
1539 1547 manifest_and_file_collector(changedfiles))
1540 1548 for chnk in group:
1541 1549 yield chnk
1542 1550
1543 1551 # The list of manifests has been collected by the generator
1544 1552 # calling our functions back.
1545 1553 prune_manifests()
1546 1554 msng_mnfst_lst = msng_mnfst_set.keys()
1547 1555 # Sort the manifestnodes by revision number.
1548 1556 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1549 1557 # Create a generator for the manifestnodes that calls our lookup
1550 1558 # and data collection functions back.
1551 1559 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1552 1560 filenode_collector(changedfiles))
1553 1561 for chnk in group:
1554 1562 yield chnk
1555 1563
1556 1564 # These are no longer needed, dereference and toss the memory for
1557 1565 # them.
1558 1566 msng_mnfst_lst = None
1559 1567 msng_mnfst_set.clear()
1560 1568
1561 1569 changedfiles = changedfiles.keys()
1562 1570 changedfiles.sort()
1563 1571 # Go through all our files in order sorted by name.
1564 1572 for fname in changedfiles:
1565 1573 filerevlog = self.file(fname)
1566 1574 # Toss out the filenodes that the recipient isn't really
1567 1575 # missing.
1568 1576 if msng_filenode_set.has_key(fname):
1569 1577 prune_filenodes(fname, filerevlog)
1570 1578 msng_filenode_lst = msng_filenode_set[fname].keys()
1571 1579 else:
1572 1580 msng_filenode_lst = []
1573 1581 # If any filenodes are left, generate the group for them,
1574 1582 # otherwise don't bother.
1575 1583 if len(msng_filenode_lst) > 0:
1576 1584 yield changegroup.genchunk(fname)
1577 1585 # Sort the filenodes by their revision #
1578 1586 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1579 1587 # Create a group generator and only pass in a changenode
1580 1588 # lookup function as we need to collect no information
1581 1589 # from filenodes.
1582 1590 group = filerevlog.group(msng_filenode_lst,
1583 1591 lookup_filenode_link_func(fname))
1584 1592 for chnk in group:
1585 1593 yield chnk
1586 1594 if msng_filenode_set.has_key(fname):
1587 1595 # Don't need this anymore, toss it to free memory.
1588 1596 del msng_filenode_set[fname]
1589 1597 # Signal that no more groups are left.
1590 1598 yield changegroup.closechunk()
1591 1599
1592 1600 if msng_cl_lst:
1593 1601 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1594 1602
1595 1603 return util.chunkbuffer(gengroup())
1596 1604
1597 1605 def changegroup(self, basenodes, source):
1598 1606 """Generate a changegroup of all nodes that we have that a recipient
1599 1607 doesn't.
1600 1608
1601 1609 This is much easier than the previous function as we can assume that
1602 1610 the recipient has any changenode we aren't sending them."""
1603 1611
1604 1612 self.hook('preoutgoing', throw=True, source=source)
1605 1613
1606 1614 cl = self.changelog
1607 1615 nodes = cl.nodesbetween(basenodes, None)[0]
1608 1616 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1609 1617
1610 1618 def identity(x):
1611 1619 return x
1612 1620
1613 1621 def gennodelst(revlog):
1614 1622 for r in xrange(0, revlog.count()):
1615 1623 n = revlog.node(r)
1616 1624 if revlog.linkrev(n) in revset:
1617 1625 yield n
1618 1626
1619 1627 def changed_file_collector(changedfileset):
1620 1628 def collect_changed_files(clnode):
1621 1629 c = cl.read(clnode)
1622 1630 for fname in c[3]:
1623 1631 changedfileset[fname] = 1
1624 1632 return collect_changed_files
1625 1633
1626 1634 def lookuprevlink_func(revlog):
1627 1635 def lookuprevlink(n):
1628 1636 return cl.node(revlog.linkrev(n))
1629 1637 return lookuprevlink
1630 1638
1631 1639 def gengroup():
1632 1640 # construct a list of all changed files
1633 1641 changedfiles = {}
1634 1642
1635 1643 for chnk in cl.group(nodes, identity,
1636 1644 changed_file_collector(changedfiles)):
1637 1645 yield chnk
1638 1646 changedfiles = changedfiles.keys()
1639 1647 changedfiles.sort()
1640 1648
1641 1649 mnfst = self.manifest
1642 1650 nodeiter = gennodelst(mnfst)
1643 1651 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1644 1652 yield chnk
1645 1653
1646 1654 for fname in changedfiles:
1647 1655 filerevlog = self.file(fname)
1648 1656 nodeiter = gennodelst(filerevlog)
1649 1657 nodeiter = list(nodeiter)
1650 1658 if nodeiter:
1651 1659 yield changegroup.genchunk(fname)
1652 1660 lookup = lookuprevlink_func(filerevlog)
1653 1661 for chnk in filerevlog.group(nodeiter, lookup):
1654 1662 yield chnk
1655 1663
1656 1664 yield changegroup.closechunk()
1657 1665
1658 1666 if nodes:
1659 1667 self.hook('outgoing', node=hex(nodes[0]), source=source)
1660 1668
1661 1669 return util.chunkbuffer(gengroup())
1662 1670
1663 1671 def addchangegroup(self, source, srctype, url):
1664 1672 """add changegroup to repo.
1665 1673 returns number of heads modified or added + 1."""
1666 1674
1667 1675 def csmap(x):
1668 1676 self.ui.debug(_("add changeset %s\n") % short(x))
1669 1677 return cl.count()
1670 1678
1671 1679 def revmap(x):
1672 1680 return cl.rev(x)
1673 1681
1674 1682 if not source:
1675 1683 return 0
1676 1684
1677 1685 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1678 1686
1679 1687 changesets = files = revisions = 0
1680 1688
1681 1689 tr = self.transaction()
1682 1690
1683 1691 # write changelog data to temp files so concurrent readers will not see
1684 1692 # inconsistent view
1685 1693 cl = None
1686 1694 try:
1687 1695 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1688 1696
1689 1697 oldheads = len(cl.heads())
1690 1698
1691 1699 # pull off the changeset group
1692 1700 self.ui.status(_("adding changesets\n"))
1693 1701 cor = cl.count() - 1
1694 1702 chunkiter = changegroup.chunkiter(source)
1695 1703 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1696 1704 raise util.Abort(_("received changelog group is empty"))
1697 1705 cnr = cl.count() - 1
1698 1706 changesets = cnr - cor
1699 1707
1700 1708 # pull off the manifest group
1701 1709 self.ui.status(_("adding manifests\n"))
1702 1710 chunkiter = changegroup.chunkiter(source)
1703 1711 # no need to check for empty manifest group here:
1704 1712 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1705 1713 # no new manifest will be created and the manifest group will
1706 1714 # be empty during the pull
1707 1715 self.manifest.addgroup(chunkiter, revmap, tr)
1708 1716
1709 1717 # process the files
1710 1718 self.ui.status(_("adding file changes\n"))
1711 1719 while 1:
1712 1720 f = changegroup.getchunk(source)
1713 1721 if not f:
1714 1722 break
1715 1723 self.ui.debug(_("adding %s revisions\n") % f)
1716 1724 fl = self.file(f)
1717 1725 o = fl.count()
1718 1726 chunkiter = changegroup.chunkiter(source)
1719 1727 if fl.addgroup(chunkiter, revmap, tr) is None:
1720 1728 raise util.Abort(_("received file revlog group is empty"))
1721 1729 revisions += fl.count() - o
1722 1730 files += 1
1723 1731
1724 1732 cl.writedata()
1725 1733 finally:
1726 1734 if cl:
1727 1735 cl.cleanup()
1728 1736
1729 1737 # make changelog see real files again
1730 1738 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1731 1739 self.changelog.checkinlinesize(tr)
1732 1740
1733 1741 newheads = len(self.changelog.heads())
1734 1742 heads = ""
1735 1743 if oldheads and newheads != oldheads:
1736 1744 heads = _(" (%+d heads)") % (newheads - oldheads)
1737 1745
1738 1746 self.ui.status(_("added %d changesets"
1739 1747 " with %d changes to %d files%s\n")
1740 1748 % (changesets, revisions, files, heads))
1741 1749
1742 1750 if changesets > 0:
1743 1751 self.hook('pretxnchangegroup', throw=True,
1744 1752 node=hex(self.changelog.node(cor+1)), source=srctype,
1745 1753 url=url)
1746 1754
1747 1755 tr.close()
1748 1756
1749 1757 if changesets > 0:
1750 1758 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1751 1759 source=srctype, url=url)
1752 1760
1753 1761 for i in xrange(cor + 1, cnr + 1):
1754 1762 self.hook("incoming", node=hex(self.changelog.node(i)),
1755 1763 source=srctype, url=url)
1756 1764
1757 1765 return newheads - oldheads + 1
1758 1766
1759 1767
1760 1768 def stream_in(self, remote):
1761 1769 fp = remote.stream_out()
1762 1770 resp = int(fp.readline())
1763 1771 if resp != 0:
1764 1772 raise util.Abort(_('operation forbidden by server'))
1765 1773 self.ui.status(_('streaming all changes\n'))
1766 1774 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1767 1775 self.ui.status(_('%d files to transfer, %s of data\n') %
1768 1776 (total_files, util.bytecount(total_bytes)))
1769 1777 start = time.time()
1770 1778 for i in xrange(total_files):
1771 1779 name, size = fp.readline().split('\0', 1)
1772 1780 size = int(size)
1773 1781 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1774 1782 ofp = self.opener(name, 'w')
1775 1783 for chunk in util.filechunkiter(fp, limit=size):
1776 1784 ofp.write(chunk)
1777 1785 ofp.close()
1778 1786 elapsed = time.time() - start
1779 1787 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1780 1788 (util.bytecount(total_bytes), elapsed,
1781 1789 util.bytecount(total_bytes / elapsed)))
1782 1790 self.reload()
1783 1791 return len(self.heads()) + 1
1784 1792
1785 1793 def clone(self, remote, heads=[], stream=False):
1786 1794 '''clone remote repository.
1787 1795
1788 1796 keyword arguments:
1789 1797 heads: list of revs to clone (forces use of pull)
1790 1798 stream: use streaming clone if possible'''
1791 1799
1792 1800 # now, all clients that can request uncompressed clones can
1793 1801 # read repo formats supported by all servers that can serve
1794 1802 # them.
1795 1803
1796 1804 # if revlog format changes, client will have to check version
1797 1805 # and format flags on "stream" capability, and use
1798 1806 # uncompressed only if compatible.
1799 1807
1800 1808 if stream and not heads and remote.capable('stream'):
1801 1809 return self.stream_in(remote)
1802 1810 return self.pull(remote, heads)
1803 1811
1804 1812 # used to avoid circular references so destructors work
1805 1813 def aftertrans(base):
1806 1814 p = base
1807 1815 def a():
1808 1816 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1809 1817 util.rename(os.path.join(p, "journal.dirstate"),
1810 1818 os.path.join(p, "undo.dirstate"))
1811 1819 return a
1812 1820
1813 1821 def instance(ui, path, create):
1814 1822 return localrepository(ui, util.drop_scheme('file', path), create)
1815 1823
1816 1824 def islocal(path):
1817 1825 return True
General Comments 0
You need to be logged in to leave comments. Login now