##// END OF EJS Templates
rawcommit: add removed files to the changelog file list...
Alexis S. L. Carvalho -
r3377:9fe62e2d default
parent child Browse files
Show More
@@ -1,1760 +1,1763 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ()
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 40 os.mkdir(self.join("data"))
41 41 else:
42 42 raise repo.RepoError(_("repository %s not found") % path)
43 43 elif create:
44 44 raise repo.RepoError(_("repository %s already exists") % path)
45 45
46 46 self.root = os.path.abspath(path)
47 47 self.origroot = path
48 48 self.ui = ui.ui(parentui=parentui)
49 49 self.opener = util.opener(self.path)
50 50 self.wopener = util.opener(self.root)
51 51
52 52 try:
53 53 self.ui.readconfig(self.join("hgrc"), self.root)
54 54 except IOError:
55 55 pass
56 56
57 57 v = self.ui.configrevlog()
58 58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 60 fl = v.get('flags', None)
61 61 flags = 0
62 62 if fl != None:
63 63 for x in fl.split():
64 64 flags |= revlog.flagstr(x)
65 65 elif self.revlogv1:
66 66 flags = revlog.REVLOG_DEFAULT_FLAGS
67 67
68 68 v = self.revlogversion | flags
69 69 self.manifest = manifest.manifest(self.opener, v)
70 70 self.changelog = changelog.changelog(self.opener, v)
71 71
72 72 # the changelog might not have the inline index flag
73 73 # on. If the format of the changelog is the same as found in
74 74 # .hgrc, apply any flags found in the .hgrc as well.
75 75 # Otherwise, just version from the changelog
76 76 v = self.changelog.version
77 77 if v == self.revlogversion:
78 78 v |= flags
79 79 self.revlogversion = v
80 80
81 81 self.tagscache = None
82 82 self.nodetagscache = None
83 83 self.encodepats = None
84 84 self.decodepats = None
85 85 self.transhandle = None
86 86
87 87 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88 88
89 89 def url(self):
90 90 return 'file:' + self.root
91 91
92 92 def hook(self, name, throw=False, **args):
93 93 def callhook(hname, funcname):
94 94 '''call python hook. hook is callable object, looked up as
95 95 name in python module. if callable returns "true", hook
96 96 fails, else passes. if hook raises exception, treated as
97 97 hook failure. exception propagates if throw is "true".
98 98
99 99 reason for "true" meaning "hook failed" is so that
100 100 unmodified commands (e.g. mercurial.commands.update) can
101 101 be run as hooks without wrappers to convert return values.'''
102 102
103 103 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 104 d = funcname.rfind('.')
105 105 if d == -1:
106 106 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 107 % (hname, funcname))
108 108 modname = funcname[:d]
109 109 try:
110 110 obj = __import__(modname)
111 111 except ImportError:
112 112 try:
113 113 # extensions are loaded with hgext_ prefix
114 114 obj = __import__("hgext_%s" % modname)
115 115 except ImportError:
116 116 raise util.Abort(_('%s hook is invalid '
117 117 '(import of "%s" failed)') %
118 118 (hname, modname))
119 119 try:
120 120 for p in funcname.split('.')[1:]:
121 121 obj = getattr(obj, p)
122 122 except AttributeError, err:
123 123 raise util.Abort(_('%s hook is invalid '
124 124 '("%s" is not defined)') %
125 125 (hname, funcname))
126 126 if not callable(obj):
127 127 raise util.Abort(_('%s hook is invalid '
128 128 '("%s" is not callable)') %
129 129 (hname, funcname))
130 130 try:
131 131 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 132 except (KeyboardInterrupt, util.SignalInterrupt):
133 133 raise
134 134 except Exception, exc:
135 135 if isinstance(exc, util.Abort):
136 136 self.ui.warn(_('error: %s hook failed: %s\n') %
137 137 (hname, exc.args[0]))
138 138 else:
139 139 self.ui.warn(_('error: %s hook raised an exception: '
140 140 '%s\n') % (hname, exc))
141 141 if throw:
142 142 raise
143 143 self.ui.print_exc()
144 144 return True
145 145 if r:
146 146 if throw:
147 147 raise util.Abort(_('%s hook failed') % hname)
148 148 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 149 return r
150 150
151 151 def runhook(name, cmd):
152 152 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 153 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 154 r = util.system(cmd, environ=env, cwd=self.root)
155 155 if r:
156 156 desc, r = util.explain_exit(r)
157 157 if throw:
158 158 raise util.Abort(_('%s hook %s') % (name, desc))
159 159 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 160 return r
161 161
162 162 r = False
163 163 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 164 if hname.split(".", 1)[0] == name and cmd]
165 165 hooks.sort()
166 166 for hname, cmd in hooks:
167 167 if cmd.startswith('python:'):
168 168 r = callhook(hname, cmd[7:].strip()) or r
169 169 else:
170 170 r = runhook(hname, cmd) or r
171 171 return r
172 172
173 173 tag_disallowed = ':\r\n'
174 174
175 175 def tag(self, name, node, message, local, user, date):
176 176 '''tag a revision with a symbolic name.
177 177
178 178 if local is True, the tag is stored in a per-repository file.
179 179 otherwise, it is stored in the .hgtags file, and a new
180 180 changeset is committed with the change.
181 181
182 182 keyword arguments:
183 183
184 184 local: whether to store tag in non-version-controlled file
185 185 (default False)
186 186
187 187 message: commit message to use if committing
188 188
189 189 user: name of user to use if committing
190 190
191 191 date: date tuple to use if committing'''
192 192
193 193 for c in self.tag_disallowed:
194 194 if c in name:
195 195 raise util.Abort(_('%r cannot be used in a tag name') % c)
196 196
197 197 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198 198
199 199 if local:
200 200 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 201 self.hook('tag', node=hex(node), tag=name, local=local)
202 202 return
203 203
204 204 for x in self.status()[:5]:
205 205 if '.hgtags' in x:
206 206 raise util.Abort(_('working copy of .hgtags is changed '
207 207 '(please commit .hgtags manually)'))
208 208
209 209 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 210 if self.dirstate.state('.hgtags') == '?':
211 211 self.add(['.hgtags'])
212 212
213 213 self.commit(['.hgtags'], message, user, date)
214 214 self.hook('tag', node=hex(node), tag=name, local=local)
215 215
216 216 def tags(self):
217 217 '''return a mapping of tag to node'''
218 218 if not self.tagscache:
219 219 self.tagscache = {}
220 220
221 221 def parsetag(line, context):
222 222 if not line:
223 223 return
224 224 s = l.split(" ", 1)
225 225 if len(s) != 2:
226 226 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 227 return
228 228 node, key = s
229 229 key = key.strip()
230 230 try:
231 231 bin_n = bin(node)
232 232 except TypeError:
233 233 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 234 (context, node))
235 235 return
236 236 if bin_n not in self.changelog.nodemap:
237 237 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 238 (context, key))
239 239 return
240 240 self.tagscache[key] = bin_n
241 241
242 242 # read the tags file from each head, ending with the tip,
243 243 # and add each tag found to the map, with "newer" ones
244 244 # taking precedence
245 245 heads = self.heads()
246 246 heads.reverse()
247 247 fl = self.file(".hgtags")
248 248 for node in heads:
249 249 change = self.changelog.read(node)
250 250 rev = self.changelog.rev(node)
251 251 fn, ff = self.manifest.find(change[0], '.hgtags')
252 252 if fn is None: continue
253 253 count = 0
254 254 for l in fl.read(fn).splitlines():
255 255 count += 1
256 256 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 257 (rev, short(node), count))
258 258 try:
259 259 f = self.opener("localtags")
260 260 count = 0
261 261 for l in f:
262 262 count += 1
263 263 parsetag(l, _("localtags, line %d") % count)
264 264 except IOError:
265 265 pass
266 266
267 267 self.tagscache['tip'] = self.changelog.tip()
268 268
269 269 return self.tagscache
270 270
271 271 def tagslist(self):
272 272 '''return a list of tags ordered by revision'''
273 273 l = []
274 274 for t, n in self.tags().items():
275 275 try:
276 276 r = self.changelog.rev(n)
277 277 except:
278 278 r = -2 # sort to the beginning of the list if unknown
279 279 l.append((r, t, n))
280 280 l.sort()
281 281 return [(t, n) for r, t, n in l]
282 282
283 283 def nodetags(self, node):
284 284 '''return the tags associated with a node'''
285 285 if not self.nodetagscache:
286 286 self.nodetagscache = {}
287 287 for t, n in self.tags().items():
288 288 self.nodetagscache.setdefault(n, []).append(t)
289 289 return self.nodetagscache.get(node, [])
290 290
291 291 def lookup(self, key):
292 292 try:
293 293 return self.tags()[key]
294 294 except KeyError:
295 295 if key == '.':
296 296 key = self.dirstate.parents()[0]
297 297 if key == nullid:
298 298 raise repo.RepoError(_("no revision checked out"))
299 299 try:
300 300 return self.changelog.lookup(key)
301 301 except:
302 302 raise repo.RepoError(_("unknown revision '%s'") % key)
303 303
304 304 def dev(self):
305 305 return os.lstat(self.path).st_dev
306 306
307 307 def local(self):
308 308 return True
309 309
310 310 def join(self, f):
311 311 return os.path.join(self.path, f)
312 312
313 313 def wjoin(self, f):
314 314 return os.path.join(self.root, f)
315 315
316 316 def file(self, f):
317 317 if f[0] == '/':
318 318 f = f[1:]
319 319 return filelog.filelog(self.opener, f, self.revlogversion)
320 320
321 321 def changectx(self, changeid=None):
322 322 return context.changectx(self, changeid)
323 323
324 324 def workingctx(self):
325 325 return context.workingctx(self)
326 326
327 327 def parents(self, changeid=None):
328 328 '''
329 329 get list of changectxs for parents of changeid or working directory
330 330 '''
331 331 if changeid is None:
332 332 pl = self.dirstate.parents()
333 333 else:
334 334 n = self.changelog.lookup(changeid)
335 335 pl = self.changelog.parents(n)
336 336 if pl[1] == nullid:
337 337 return [self.changectx(pl[0])]
338 338 return [self.changectx(pl[0]), self.changectx(pl[1])]
339 339
340 340 def filectx(self, path, changeid=None, fileid=None):
341 341 """changeid can be a changeset revision, node, or tag.
342 342 fileid can be a file revision or node."""
343 343 return context.filectx(self, path, changeid, fileid)
344 344
345 345 def getcwd(self):
346 346 return self.dirstate.getcwd()
347 347
348 348 def wfile(self, f, mode='r'):
349 349 return self.wopener(f, mode)
350 350
351 351 def wread(self, filename):
352 352 if self.encodepats == None:
353 353 l = []
354 354 for pat, cmd in self.ui.configitems("encode"):
355 355 mf = util.matcher(self.root, "", [pat], [], [])[1]
356 356 l.append((mf, cmd))
357 357 self.encodepats = l
358 358
359 359 data = self.wopener(filename, 'r').read()
360 360
361 361 for mf, cmd in self.encodepats:
362 362 if mf(filename):
363 363 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
364 364 data = util.filter(data, cmd)
365 365 break
366 366
367 367 return data
368 368
369 369 def wwrite(self, filename, data, fd=None):
370 370 if self.decodepats == None:
371 371 l = []
372 372 for pat, cmd in self.ui.configitems("decode"):
373 373 mf = util.matcher(self.root, "", [pat], [], [])[1]
374 374 l.append((mf, cmd))
375 375 self.decodepats = l
376 376
377 377 for mf, cmd in self.decodepats:
378 378 if mf(filename):
379 379 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
380 380 data = util.filter(data, cmd)
381 381 break
382 382
383 383 if fd:
384 384 return fd.write(data)
385 385 return self.wopener(filename, 'w').write(data)
386 386
387 387 def transaction(self):
388 388 tr = self.transhandle
389 389 if tr != None and tr.running():
390 390 return tr.nest()
391 391
392 392 # save dirstate for rollback
393 393 try:
394 394 ds = self.opener("dirstate").read()
395 395 except IOError:
396 396 ds = ""
397 397 self.opener("journal.dirstate", "w").write(ds)
398 398
399 399 tr = transaction.transaction(self.ui.warn, self.opener,
400 400 self.join("journal"),
401 401 aftertrans(self.path))
402 402 self.transhandle = tr
403 403 return tr
404 404
405 405 def recover(self):
406 406 l = self.lock()
407 407 if os.path.exists(self.join("journal")):
408 408 self.ui.status(_("rolling back interrupted transaction\n"))
409 409 transaction.rollback(self.opener, self.join("journal"))
410 410 self.reload()
411 411 return True
412 412 else:
413 413 self.ui.warn(_("no interrupted transaction available\n"))
414 414 return False
415 415
416 416 def rollback(self, wlock=None):
417 417 if not wlock:
418 418 wlock = self.wlock()
419 419 l = self.lock()
420 420 if os.path.exists(self.join("undo")):
421 421 self.ui.status(_("rolling back last transaction\n"))
422 422 transaction.rollback(self.opener, self.join("undo"))
423 423 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
424 424 self.reload()
425 425 self.wreload()
426 426 else:
427 427 self.ui.warn(_("no rollback information available\n"))
428 428
429 429 def wreload(self):
430 430 self.dirstate.read()
431 431
432 432 def reload(self):
433 433 self.changelog.load()
434 434 self.manifest.load()
435 435 self.tagscache = None
436 436 self.nodetagscache = None
437 437
438 438 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
439 439 desc=None):
440 440 try:
441 441 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
442 442 except lock.LockHeld, inst:
443 443 if not wait:
444 444 raise
445 445 self.ui.warn(_("waiting for lock on %s held by %s\n") %
446 446 (desc, inst.args[0]))
447 447 # default to 600 seconds timeout
448 448 l = lock.lock(self.join(lockname),
449 449 int(self.ui.config("ui", "timeout") or 600),
450 450 releasefn, desc=desc)
451 451 if acquirefn:
452 452 acquirefn()
453 453 return l
454 454
455 455 def lock(self, wait=1):
456 456 return self.do_lock("lock", wait, acquirefn=self.reload,
457 457 desc=_('repository %s') % self.origroot)
458 458
459 459 def wlock(self, wait=1):
460 460 return self.do_lock("wlock", wait, self.dirstate.write,
461 461 self.wreload,
462 462 desc=_('working directory of %s') % self.origroot)
463 463
464 464 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
465 465 """
466 466 commit an individual file as part of a larger transaction
467 467 """
468 468
469 469 t = self.wread(fn)
470 470 fl = self.file(fn)
471 471 fp1 = manifest1.get(fn, nullid)
472 472 fp2 = manifest2.get(fn, nullid)
473 473
474 474 meta = {}
475 475 cp = self.dirstate.copied(fn)
476 476 if cp:
477 477 meta["copy"] = cp
478 478 if not manifest2: # not a branch merge
479 479 meta["copyrev"] = hex(manifest1.get(cp, nullid))
480 480 fp2 = nullid
481 481 elif fp2 != nullid: # copied on remote side
482 482 meta["copyrev"] = hex(manifest1.get(cp, nullid))
483 483 else: # copied on local side, reversed
484 484 meta["copyrev"] = hex(manifest2.get(cp))
485 485 fp2 = nullid
486 486 self.ui.debug(_(" %s: copy %s:%s\n") %
487 487 (fn, cp, meta["copyrev"]))
488 488 fp1 = nullid
489 489 elif fp2 != nullid:
490 490 # is one parent an ancestor of the other?
491 491 fpa = fl.ancestor(fp1, fp2)
492 492 if fpa == fp1:
493 493 fp1, fp2 = fp2, nullid
494 494 elif fpa == fp2:
495 495 fp2 = nullid
496 496
497 497 # is the file unmodified from the parent? report existing entry
498 498 if fp2 == nullid and not fl.cmp(fp1, t):
499 499 return fp1
500 500
501 501 changelist.append(fn)
502 502 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
503 503
504 504 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
505 505 orig_parent = self.dirstate.parents()[0] or nullid
506 506 p1 = p1 or self.dirstate.parents()[0] or nullid
507 507 p2 = p2 or self.dirstate.parents()[1] or nullid
508 508 c1 = self.changelog.read(p1)
509 509 c2 = self.changelog.read(p2)
510 510 m1 = self.manifest.read(c1[0]).copy()
511 511 m2 = self.manifest.read(c2[0])
512 512 changed = []
513 removed = []
513 514
514 515 if orig_parent == p1:
515 516 update_dirstate = 1
516 517 else:
517 518 update_dirstate = 0
518 519
519 520 if not wlock:
520 521 wlock = self.wlock()
521 522 l = self.lock()
522 523 tr = self.transaction()
523 524 linkrev = self.changelog.count()
524 525 for f in files:
525 526 try:
526 527 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
527 528 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
528 529 except IOError:
529 530 try:
530 531 del m1[f]
531 532 if update_dirstate:
532 533 self.dirstate.forget([f])
534 removed.append(f)
533 535 except:
534 536 # deleted from p2?
535 537 pass
536 538
537 539 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
538 540 user = user or self.ui.username()
539 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
541 n = self.changelog.add(mnode, changed + removed, text,
542 tr, p1, p2, user, date)
540 543 tr.close()
541 544 if update_dirstate:
542 545 self.dirstate.setparents(n, nullid)
543 546
544 547 def commit(self, files=None, text="", user=None, date=None,
545 548 match=util.always, force=False, lock=None, wlock=None,
546 549 force_editor=False):
547 550 commit = []
548 551 remove = []
549 552 changed = []
550 553
551 554 if files:
552 555 for f in files:
553 556 s = self.dirstate.state(f)
554 557 if s in 'nmai':
555 558 commit.append(f)
556 559 elif s == 'r':
557 560 remove.append(f)
558 561 else:
559 562 self.ui.warn(_("%s not tracked!\n") % f)
560 563 else:
561 564 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
562 565 commit = modified + added
563 566 remove = removed
564 567
565 568 p1, p2 = self.dirstate.parents()
566 569 c1 = self.changelog.read(p1)
567 570 c2 = self.changelog.read(p2)
568 571 m1 = self.manifest.read(c1[0]).copy()
569 572 m2 = self.manifest.read(c2[0])
570 573
571 574 if not commit and not remove and not force and p2 == nullid:
572 575 self.ui.status(_("nothing changed\n"))
573 576 return None
574 577
575 578 xp1 = hex(p1)
576 579 if p2 == nullid: xp2 = ''
577 580 else: xp2 = hex(p2)
578 581
579 582 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
580 583
581 584 if not wlock:
582 585 wlock = self.wlock()
583 586 if not lock:
584 587 lock = self.lock()
585 588 tr = self.transaction()
586 589
587 590 # check in files
588 591 new = {}
589 592 linkrev = self.changelog.count()
590 593 commit.sort()
591 594 for f in commit:
592 595 self.ui.note(f + "\n")
593 596 try:
594 597 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
595 598 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
596 599 except IOError:
597 600 self.ui.warn(_("trouble committing %s!\n") % f)
598 601 raise
599 602
600 603 # update manifest
601 604 m1.update(new)
602 605 for f in remove:
603 606 if f in m1:
604 607 del m1[f]
605 608 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
606 609
607 610 # add changeset
608 611 new = new.keys()
609 612 new.sort()
610 613
611 614 user = user or self.ui.username()
612 615 if not text or force_editor:
613 616 edittext = []
614 617 if text:
615 618 edittext.append(text)
616 619 edittext.append("")
617 620 if p2 != nullid:
618 621 edittext.append("HG: branch merge")
619 622 edittext.extend(["HG: changed %s" % f for f in changed])
620 623 edittext.extend(["HG: removed %s" % f for f in remove])
621 624 if not changed and not remove:
622 625 edittext.append("HG: no files changed")
623 626 edittext.append("")
624 627 # run editor in the repository root
625 628 olddir = os.getcwd()
626 629 os.chdir(self.root)
627 630 text = self.ui.edit("\n".join(edittext), user)
628 631 os.chdir(olddir)
629 632
630 633 lines = [line.rstrip() for line in text.rstrip().splitlines()]
631 634 while lines and not lines[0]:
632 635 del lines[0]
633 636 if not lines:
634 637 return None
635 638 text = '\n'.join(lines)
636 639 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
637 640 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
638 641 parent2=xp2)
639 642 tr.close()
640 643
641 644 self.dirstate.setparents(n)
642 645 self.dirstate.update(new, "n")
643 646 self.dirstate.forget(remove)
644 647
645 648 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
646 649 return n
647 650
648 651 def walk(self, node=None, files=[], match=util.always, badmatch=None):
649 652 if node:
650 653 fdict = dict.fromkeys(files)
651 654 for fn in self.manifest.read(self.changelog.read(node)[0]):
652 655 for ffn in fdict:
653 656 # match if the file is the exact name or a directory
654 657 if ffn == fn or fn.startswith("%s/" % ffn):
655 658 del fdict[ffn]
656 659 break
657 660 if match(fn):
658 661 yield 'm', fn
659 662 for fn in fdict:
660 663 if badmatch and badmatch(fn):
661 664 if match(fn):
662 665 yield 'b', fn
663 666 else:
664 667 self.ui.warn(_('%s: No such file in rev %s\n') % (
665 668 util.pathto(self.getcwd(), fn), short(node)))
666 669 else:
667 670 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
668 671 yield src, fn
669 672
670 673 def status(self, node1=None, node2=None, files=[], match=util.always,
671 674 wlock=None, list_ignored=False, list_clean=False):
672 675 """return status of files between two nodes or node and working directory
673 676
674 677 If node1 is None, use the first dirstate parent instead.
675 678 If node2 is None, compare node1 with working directory.
676 679 """
677 680
678 681 def fcmp(fn, mf):
679 682 t1 = self.wread(fn)
680 683 return self.file(fn).cmp(mf.get(fn, nullid), t1)
681 684
682 685 def mfmatches(node):
683 686 change = self.changelog.read(node)
684 687 mf = self.manifest.read(change[0]).copy()
685 688 for fn in mf.keys():
686 689 if not match(fn):
687 690 del mf[fn]
688 691 return mf
689 692
690 693 modified, added, removed, deleted, unknown = [], [], [], [], []
691 694 ignored, clean = [], []
692 695
693 696 compareworking = False
694 697 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
695 698 compareworking = True
696 699
697 700 if not compareworking:
698 701 # read the manifest from node1 before the manifest from node2,
699 702 # so that we'll hit the manifest cache if we're going through
700 703 # all the revisions in parent->child order.
701 704 mf1 = mfmatches(node1)
702 705
703 706 # are we comparing the working directory?
704 707 if not node2:
705 708 if not wlock:
706 709 try:
707 710 wlock = self.wlock(wait=0)
708 711 except lock.LockException:
709 712 wlock = None
710 713 (lookup, modified, added, removed, deleted, unknown,
711 714 ignored, clean) = self.dirstate.status(files, match,
712 715 list_ignored, list_clean)
713 716
714 717 # are we comparing working dir against its parent?
715 718 if compareworking:
716 719 if lookup:
717 720 # do a full compare of any files that might have changed
718 721 mf2 = mfmatches(self.dirstate.parents()[0])
719 722 for f in lookup:
720 723 if fcmp(f, mf2):
721 724 modified.append(f)
722 725 else:
723 726 clean.append(f)
724 727 if wlock is not None:
725 728 self.dirstate.update([f], "n")
726 729 else:
727 730 # we are comparing working dir against non-parent
728 731 # generate a pseudo-manifest for the working dir
729 732 # XXX: create it in dirstate.py ?
730 733 mf2 = mfmatches(self.dirstate.parents()[0])
731 734 for f in lookup + modified + added:
732 735 mf2[f] = ""
733 736 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
734 737 for f in removed:
735 738 if f in mf2:
736 739 del mf2[f]
737 740 else:
738 741 # we are comparing two revisions
739 742 mf2 = mfmatches(node2)
740 743
741 744 if not compareworking:
742 745 # flush lists from dirstate before comparing manifests
743 746 modified, added, clean = [], [], []
744 747
745 748 # make sure to sort the files so we talk to the disk in a
746 749 # reasonable order
747 750 mf2keys = mf2.keys()
748 751 mf2keys.sort()
749 752 for fn in mf2keys:
750 753 if mf1.has_key(fn):
751 754 if mf1.flags(fn) != mf2.flags(fn) or \
752 755 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
753 756 modified.append(fn)
754 757 elif list_clean:
755 758 clean.append(fn)
756 759 del mf1[fn]
757 760 else:
758 761 added.append(fn)
759 762
760 763 removed = mf1.keys()
761 764
762 765 # sort and return results:
763 766 for l in modified, added, removed, deleted, unknown, ignored, clean:
764 767 l.sort()
765 768 return (modified, added, removed, deleted, unknown, ignored, clean)
766 769
767 770 def add(self, list, wlock=None):
768 771 if not wlock:
769 772 wlock = self.wlock()
770 773 for f in list:
771 774 p = self.wjoin(f)
772 775 if not os.path.exists(p):
773 776 self.ui.warn(_("%s does not exist!\n") % f)
774 777 elif not os.path.isfile(p):
775 778 self.ui.warn(_("%s not added: only files supported currently\n")
776 779 % f)
777 780 elif self.dirstate.state(f) in 'an':
778 781 self.ui.warn(_("%s already tracked!\n") % f)
779 782 else:
780 783 self.dirstate.update([f], "a")
781 784
782 785 def forget(self, list, wlock=None):
783 786 if not wlock:
784 787 wlock = self.wlock()
785 788 for f in list:
786 789 if self.dirstate.state(f) not in 'ai':
787 790 self.ui.warn(_("%s not added!\n") % f)
788 791 else:
789 792 self.dirstate.forget([f])
790 793
791 794 def remove(self, list, unlink=False, wlock=None):
792 795 if unlink:
793 796 for f in list:
794 797 try:
795 798 util.unlink(self.wjoin(f))
796 799 except OSError, inst:
797 800 if inst.errno != errno.ENOENT:
798 801 raise
799 802 if not wlock:
800 803 wlock = self.wlock()
801 804 for f in list:
802 805 p = self.wjoin(f)
803 806 if os.path.exists(p):
804 807 self.ui.warn(_("%s still exists!\n") % f)
805 808 elif self.dirstate.state(f) == 'a':
806 809 self.dirstate.forget([f])
807 810 elif f not in self.dirstate:
808 811 self.ui.warn(_("%s not tracked!\n") % f)
809 812 else:
810 813 self.dirstate.update([f], "r")
811 814
812 815 def undelete(self, list, wlock=None):
813 816 p = self.dirstate.parents()[0]
814 817 mn = self.changelog.read(p)[0]
815 818 m = self.manifest.read(mn)
816 819 if not wlock:
817 820 wlock = self.wlock()
818 821 for f in list:
819 822 if self.dirstate.state(f) not in "r":
820 823 self.ui.warn("%s not removed!\n" % f)
821 824 else:
822 825 t = self.file(f).read(m[f])
823 826 self.wwrite(f, t)
824 827 util.set_exec(self.wjoin(f), m.execf(f))
825 828 self.dirstate.update([f], "n")
826 829
827 830 def copy(self, source, dest, wlock=None):
828 831 p = self.wjoin(dest)
829 832 if not os.path.exists(p):
830 833 self.ui.warn(_("%s does not exist!\n") % dest)
831 834 elif not os.path.isfile(p):
832 835 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
833 836 else:
834 837 if not wlock:
835 838 wlock = self.wlock()
836 839 if self.dirstate.state(dest) == '?':
837 840 self.dirstate.update([dest], "a")
838 841 self.dirstate.copy(source, dest)
839 842
840 843 def heads(self, start=None):
841 844 heads = self.changelog.heads(start)
842 845 # sort the output in rev descending order
843 846 heads = [(-self.changelog.rev(h), h) for h in heads]
844 847 heads.sort()
845 848 return [n for (r, n) in heads]
846 849
847 850 # branchlookup returns a dict giving a list of branches for
848 851 # each head. A branch is defined as the tag of a node or
849 852 # the branch of the node's parents. If a node has multiple
850 853 # branch tags, tags are eliminated if they are visible from other
851 854 # branch tags.
852 855 #
853 856 # So, for this graph: a->b->c->d->e
854 857 # \ /
855 858 # aa -----/
856 859 # a has tag 2.6.12
857 860 # d has tag 2.6.13
858 861 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
859 862 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
860 863 # from the list.
861 864 #
862 865 # It is possible that more than one head will have the same branch tag.
863 866 # callers need to check the result for multiple heads under the same
864 867 # branch tag if that is a problem for them (ie checkout of a specific
865 868 # branch).
866 869 #
867 870 # passing in a specific branch will limit the depth of the search
868 871 # through the parents. It won't limit the branches returned in the
869 872 # result though.
870 873 def branchlookup(self, heads=None, branch=None):
871 874 if not heads:
872 875 heads = self.heads()
873 876 headt = [ h for h in heads ]
874 877 chlog = self.changelog
875 878 branches = {}
876 879 merges = []
877 880 seenmerge = {}
878 881
879 882 # traverse the tree once for each head, recording in the branches
880 883 # dict which tags are visible from this head. The branches
881 884 # dict also records which tags are visible from each tag
882 885 # while we traverse.
883 886 while headt or merges:
884 887 if merges:
885 888 n, found = merges.pop()
886 889 visit = [n]
887 890 else:
888 891 h = headt.pop()
889 892 visit = [h]
890 893 found = [h]
891 894 seen = {}
892 895 while visit:
893 896 n = visit.pop()
894 897 if n in seen:
895 898 continue
896 899 pp = chlog.parents(n)
897 900 tags = self.nodetags(n)
898 901 if tags:
899 902 for x in tags:
900 903 if x == 'tip':
901 904 continue
902 905 for f in found:
903 906 branches.setdefault(f, {})[n] = 1
904 907 branches.setdefault(n, {})[n] = 1
905 908 break
906 909 if n not in found:
907 910 found.append(n)
908 911 if branch in tags:
909 912 continue
910 913 seen[n] = 1
911 914 if pp[1] != nullid and n not in seenmerge:
912 915 merges.append((pp[1], [x for x in found]))
913 916 seenmerge[n] = 1
914 917 if pp[0] != nullid:
915 918 visit.append(pp[0])
916 919 # traverse the branches dict, eliminating branch tags from each
917 920 # head that are visible from another branch tag for that head.
918 921 out = {}
919 922 viscache = {}
920 923 for h in heads:
921 924 def visible(node):
922 925 if node in viscache:
923 926 return viscache[node]
924 927 ret = {}
925 928 visit = [node]
926 929 while visit:
927 930 x = visit.pop()
928 931 if x in viscache:
929 932 ret.update(viscache[x])
930 933 elif x not in ret:
931 934 ret[x] = 1
932 935 if x in branches:
933 936 visit[len(visit):] = branches[x].keys()
934 937 viscache[node] = ret
935 938 return ret
936 939 if h not in branches:
937 940 continue
938 941 # O(n^2), but somewhat limited. This only searches the
939 942 # tags visible from a specific head, not all the tags in the
940 943 # whole repo.
941 944 for b in branches[h]:
942 945 vis = False
943 946 for bb in branches[h].keys():
944 947 if b != bb:
945 948 if b in visible(bb):
946 949 vis = True
947 950 break
948 951 if not vis:
949 952 l = out.setdefault(h, [])
950 953 l[len(l):] = self.nodetags(b)
951 954 return out
952 955
953 956 def branches(self, nodes):
954 957 if not nodes:
955 958 nodes = [self.changelog.tip()]
956 959 b = []
957 960 for n in nodes:
958 961 t = n
959 962 while 1:
960 963 p = self.changelog.parents(n)
961 964 if p[1] != nullid or p[0] == nullid:
962 965 b.append((t, n, p[0], p[1]))
963 966 break
964 967 n = p[0]
965 968 return b
966 969
967 970 def between(self, pairs):
968 971 r = []
969 972
970 973 for top, bottom in pairs:
971 974 n, l, i = top, [], 0
972 975 f = 1
973 976
974 977 while n != bottom:
975 978 p = self.changelog.parents(n)[0]
976 979 if i == f:
977 980 l.append(n)
978 981 f = f * 2
979 982 n = p
980 983 i += 1
981 984
982 985 r.append(l)
983 986
984 987 return r
985 988
986 989 def findincoming(self, remote, base=None, heads=None, force=False):
987 990 """Return list of roots of the subsets of missing nodes from remote
988 991
989 992 If base dict is specified, assume that these nodes and their parents
990 993 exist on the remote side and that no child of a node of base exists
991 994 in both remote and self.
992 995 Furthermore base will be updated to include the nodes that exists
993 996 in self and remote but no children exists in self and remote.
994 997 If a list of heads is specified, return only nodes which are heads
995 998 or ancestors of these heads.
996 999
997 1000 All the ancestors of base are in self and in remote.
998 1001 All the descendants of the list returned are missing in self.
999 1002 (and so we know that the rest of the nodes are missing in remote, see
1000 1003 outgoing)
1001 1004 """
1002 1005 m = self.changelog.nodemap
1003 1006 search = []
1004 1007 fetch = {}
1005 1008 seen = {}
1006 1009 seenbranch = {}
1007 1010 if base == None:
1008 1011 base = {}
1009 1012
1010 1013 if not heads:
1011 1014 heads = remote.heads()
1012 1015
1013 1016 if self.changelog.tip() == nullid:
1014 1017 base[nullid] = 1
1015 1018 if heads != [nullid]:
1016 1019 return [nullid]
1017 1020 return []
1018 1021
1019 1022 # assume we're closer to the tip than the root
1020 1023 # and start by examining the heads
1021 1024 self.ui.status(_("searching for changes\n"))
1022 1025
1023 1026 unknown = []
1024 1027 for h in heads:
1025 1028 if h not in m:
1026 1029 unknown.append(h)
1027 1030 else:
1028 1031 base[h] = 1
1029 1032
1030 1033 if not unknown:
1031 1034 return []
1032 1035
1033 1036 req = dict.fromkeys(unknown)
1034 1037 reqcnt = 0
1035 1038
1036 1039 # search through remote branches
1037 1040 # a 'branch' here is a linear segment of history, with four parts:
1038 1041 # head, root, first parent, second parent
1039 1042 # (a branch always has two parents (or none) by definition)
1040 1043 unknown = remote.branches(unknown)
1041 1044 while unknown:
1042 1045 r = []
1043 1046 while unknown:
1044 1047 n = unknown.pop(0)
1045 1048 if n[0] in seen:
1046 1049 continue
1047 1050
1048 1051 self.ui.debug(_("examining %s:%s\n")
1049 1052 % (short(n[0]), short(n[1])))
1050 1053 if n[0] == nullid: # found the end of the branch
1051 1054 pass
1052 1055 elif n in seenbranch:
1053 1056 self.ui.debug(_("branch already found\n"))
1054 1057 continue
1055 1058 elif n[1] and n[1] in m: # do we know the base?
1056 1059 self.ui.debug(_("found incomplete branch %s:%s\n")
1057 1060 % (short(n[0]), short(n[1])))
1058 1061 search.append(n) # schedule branch range for scanning
1059 1062 seenbranch[n] = 1
1060 1063 else:
1061 1064 if n[1] not in seen and n[1] not in fetch:
1062 1065 if n[2] in m and n[3] in m:
1063 1066 self.ui.debug(_("found new changeset %s\n") %
1064 1067 short(n[1]))
1065 1068 fetch[n[1]] = 1 # earliest unknown
1066 1069 for p in n[2:4]:
1067 1070 if p in m:
1068 1071 base[p] = 1 # latest known
1069 1072
1070 1073 for p in n[2:4]:
1071 1074 if p not in req and p not in m:
1072 1075 r.append(p)
1073 1076 req[p] = 1
1074 1077 seen[n[0]] = 1
1075 1078
1076 1079 if r:
1077 1080 reqcnt += 1
1078 1081 self.ui.debug(_("request %d: %s\n") %
1079 1082 (reqcnt, " ".join(map(short, r))))
1080 1083 for p in range(0, len(r), 10):
1081 1084 for b in remote.branches(r[p:p+10]):
1082 1085 self.ui.debug(_("received %s:%s\n") %
1083 1086 (short(b[0]), short(b[1])))
1084 1087 unknown.append(b)
1085 1088
1086 1089 # do binary search on the branches we found
1087 1090 while search:
1088 1091 n = search.pop(0)
1089 1092 reqcnt += 1
1090 1093 l = remote.between([(n[0], n[1])])[0]
1091 1094 l.append(n[1])
1092 1095 p = n[0]
1093 1096 f = 1
1094 1097 for i in l:
1095 1098 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1096 1099 if i in m:
1097 1100 if f <= 2:
1098 1101 self.ui.debug(_("found new branch changeset %s\n") %
1099 1102 short(p))
1100 1103 fetch[p] = 1
1101 1104 base[i] = 1
1102 1105 else:
1103 1106 self.ui.debug(_("narrowed branch search to %s:%s\n")
1104 1107 % (short(p), short(i)))
1105 1108 search.append((p, i))
1106 1109 break
1107 1110 p, f = i, f * 2
1108 1111
1109 1112 # sanity check our fetch list
1110 1113 for f in fetch.keys():
1111 1114 if f in m:
1112 1115 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1113 1116
1114 1117 if base.keys() == [nullid]:
1115 1118 if force:
1116 1119 self.ui.warn(_("warning: repository is unrelated\n"))
1117 1120 else:
1118 1121 raise util.Abort(_("repository is unrelated"))
1119 1122
1120 1123 self.ui.debug(_("found new changesets starting at ") +
1121 1124 " ".join([short(f) for f in fetch]) + "\n")
1122 1125
1123 1126 self.ui.debug(_("%d total queries\n") % reqcnt)
1124 1127
1125 1128 return fetch.keys()
1126 1129
1127 1130 def findoutgoing(self, remote, base=None, heads=None, force=False):
1128 1131 """Return list of nodes that are roots of subsets not in remote
1129 1132
1130 1133 If base dict is specified, assume that these nodes and their parents
1131 1134 exist on the remote side.
1132 1135 If a list of heads is specified, return only nodes which are heads
1133 1136 or ancestors of these heads, and return a second element which
1134 1137 contains all remote heads which get new children.
1135 1138 """
1136 1139 if base == None:
1137 1140 base = {}
1138 1141 self.findincoming(remote, base, heads, force=force)
1139 1142
1140 1143 self.ui.debug(_("common changesets up to ")
1141 1144 + " ".join(map(short, base.keys())) + "\n")
1142 1145
1143 1146 remain = dict.fromkeys(self.changelog.nodemap)
1144 1147
1145 1148 # prune everything remote has from the tree
1146 1149 del remain[nullid]
1147 1150 remove = base.keys()
1148 1151 while remove:
1149 1152 n = remove.pop(0)
1150 1153 if n in remain:
1151 1154 del remain[n]
1152 1155 for p in self.changelog.parents(n):
1153 1156 remove.append(p)
1154 1157
1155 1158 # find every node whose parents have been pruned
1156 1159 subset = []
1157 1160 # find every remote head that will get new children
1158 1161 updated_heads = {}
1159 1162 for n in remain:
1160 1163 p1, p2 = self.changelog.parents(n)
1161 1164 if p1 not in remain and p2 not in remain:
1162 1165 subset.append(n)
1163 1166 if heads:
1164 1167 if p1 in heads:
1165 1168 updated_heads[p1] = True
1166 1169 if p2 in heads:
1167 1170 updated_heads[p2] = True
1168 1171
1169 1172 # this is the set of all roots we have to push
1170 1173 if heads:
1171 1174 return subset, updated_heads.keys()
1172 1175 else:
1173 1176 return subset
1174 1177
1175 1178 def pull(self, remote, heads=None, force=False, lock=None):
1176 1179 mylock = False
1177 1180 if not lock:
1178 1181 lock = self.lock()
1179 1182 mylock = True
1180 1183
1181 1184 try:
1182 1185 fetch = self.findincoming(remote, force=force)
1183 1186 if fetch == [nullid]:
1184 1187 self.ui.status(_("requesting all changes\n"))
1185 1188
1186 1189 if not fetch:
1187 1190 self.ui.status(_("no changes found\n"))
1188 1191 return 0
1189 1192
1190 1193 if heads is None:
1191 1194 cg = remote.changegroup(fetch, 'pull')
1192 1195 else:
1193 1196 cg = remote.changegroupsubset(fetch, heads, 'pull')
1194 1197 return self.addchangegroup(cg, 'pull', remote.url())
1195 1198 finally:
1196 1199 if mylock:
1197 1200 lock.release()
1198 1201
1199 1202 def push(self, remote, force=False, revs=None):
1200 1203 # there are two ways to push to remote repo:
1201 1204 #
1202 1205 # addchangegroup assumes local user can lock remote
1203 1206 # repo (local filesystem, old ssh servers).
1204 1207 #
1205 1208 # unbundle assumes local user cannot lock remote repo (new ssh
1206 1209 # servers, http servers).
1207 1210
1208 1211 if remote.capable('unbundle'):
1209 1212 return self.push_unbundle(remote, force, revs)
1210 1213 return self.push_addchangegroup(remote, force, revs)
1211 1214
1212 1215 def prepush(self, remote, force, revs):
1213 1216 base = {}
1214 1217 remote_heads = remote.heads()
1215 1218 inc = self.findincoming(remote, base, remote_heads, force=force)
1216 1219 if not force and inc:
1217 1220 self.ui.warn(_("abort: unsynced remote changes!\n"))
1218 1221 self.ui.status(_("(did you forget to sync?"
1219 1222 " use push -f to force)\n"))
1220 1223 return None, 1
1221 1224
1222 1225 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1223 1226 if revs is not None:
1224 1227 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1225 1228 else:
1226 1229 bases, heads = update, self.changelog.heads()
1227 1230
1228 1231 if not bases:
1229 1232 self.ui.status(_("no changes found\n"))
1230 1233 return None, 1
1231 1234 elif not force:
1232 1235 # FIXME we don't properly detect creation of new heads
1233 1236 # in the push -r case, assume the user knows what he's doing
1234 1237 if not revs and len(remote_heads) < len(heads) \
1235 1238 and remote_heads != [nullid]:
1236 1239 self.ui.warn(_("abort: push creates new remote branches!\n"))
1237 1240 self.ui.status(_("(did you forget to merge?"
1238 1241 " use push -f to force)\n"))
1239 1242 return None, 1
1240 1243
1241 1244 if revs is None:
1242 1245 cg = self.changegroup(update, 'push')
1243 1246 else:
1244 1247 cg = self.changegroupsubset(update, revs, 'push')
1245 1248 return cg, remote_heads
1246 1249
1247 1250 def push_addchangegroup(self, remote, force, revs):
1248 1251 lock = remote.lock()
1249 1252
1250 1253 ret = self.prepush(remote, force, revs)
1251 1254 if ret[0] is not None:
1252 1255 cg, remote_heads = ret
1253 1256 return remote.addchangegroup(cg, 'push', self.url())
1254 1257 return ret[1]
1255 1258
1256 1259 def push_unbundle(self, remote, force, revs):
1257 1260 # local repo finds heads on server, finds out what revs it
1258 1261 # must push. once revs transferred, if server finds it has
1259 1262 # different heads (someone else won commit/push race), server
1260 1263 # aborts.
1261 1264
1262 1265 ret = self.prepush(remote, force, revs)
1263 1266 if ret[0] is not None:
1264 1267 cg, remote_heads = ret
1265 1268 if force: remote_heads = ['force']
1266 1269 return remote.unbundle(cg, remote_heads, 'push')
1267 1270 return ret[1]
1268 1271
1269 1272 def changegroupsubset(self, bases, heads, source):
1270 1273 """This function generates a changegroup consisting of all the nodes
1271 1274 that are descendents of any of the bases, and ancestors of any of
1272 1275 the heads.
1273 1276
1274 1277 It is fairly complex as determining which filenodes and which
1275 1278 manifest nodes need to be included for the changeset to be complete
1276 1279 is non-trivial.
1277 1280
1278 1281 Another wrinkle is doing the reverse, figuring out which changeset in
1279 1282 the changegroup a particular filenode or manifestnode belongs to."""
1280 1283
1281 1284 self.hook('preoutgoing', throw=True, source=source)
1282 1285
1283 1286 # Set up some initial variables
1284 1287 # Make it easy to refer to self.changelog
1285 1288 cl = self.changelog
1286 1289 # msng is short for missing - compute the list of changesets in this
1287 1290 # changegroup.
1288 1291 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1289 1292 # Some bases may turn out to be superfluous, and some heads may be
1290 1293 # too. nodesbetween will return the minimal set of bases and heads
1291 1294 # necessary to re-create the changegroup.
1292 1295
1293 1296 # Known heads are the list of heads that it is assumed the recipient
1294 1297 # of this changegroup will know about.
1295 1298 knownheads = {}
1296 1299 # We assume that all parents of bases are known heads.
1297 1300 for n in bases:
1298 1301 for p in cl.parents(n):
1299 1302 if p != nullid:
1300 1303 knownheads[p] = 1
1301 1304 knownheads = knownheads.keys()
1302 1305 if knownheads:
1303 1306 # Now that we know what heads are known, we can compute which
1304 1307 # changesets are known. The recipient must know about all
1305 1308 # changesets required to reach the known heads from the null
1306 1309 # changeset.
1307 1310 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1308 1311 junk = None
1309 1312 # Transform the list into an ersatz set.
1310 1313 has_cl_set = dict.fromkeys(has_cl_set)
1311 1314 else:
1312 1315 # If there were no known heads, the recipient cannot be assumed to
1313 1316 # know about any changesets.
1314 1317 has_cl_set = {}
1315 1318
1316 1319 # Make it easy to refer to self.manifest
1317 1320 mnfst = self.manifest
1318 1321 # We don't know which manifests are missing yet
1319 1322 msng_mnfst_set = {}
1320 1323 # Nor do we know which filenodes are missing.
1321 1324 msng_filenode_set = {}
1322 1325
1323 1326 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1324 1327 junk = None
1325 1328
1326 1329 # A changeset always belongs to itself, so the changenode lookup
1327 1330 # function for a changenode is identity.
1328 1331 def identity(x):
1329 1332 return x
1330 1333
1331 1334 # A function generating function. Sets up an environment for the
1332 1335 # inner function.
1333 1336 def cmp_by_rev_func(revlog):
1334 1337 # Compare two nodes by their revision number in the environment's
1335 1338 # revision history. Since the revision number both represents the
1336 1339 # most efficient order to read the nodes in, and represents a
1337 1340 # topological sorting of the nodes, this function is often useful.
1338 1341 def cmp_by_rev(a, b):
1339 1342 return cmp(revlog.rev(a), revlog.rev(b))
1340 1343 return cmp_by_rev
1341 1344
1342 1345 # If we determine that a particular file or manifest node must be a
1343 1346 # node that the recipient of the changegroup will already have, we can
1344 1347 # also assume the recipient will have all the parents. This function
1345 1348 # prunes them from the set of missing nodes.
1346 1349 def prune_parents(revlog, hasset, msngset):
1347 1350 haslst = hasset.keys()
1348 1351 haslst.sort(cmp_by_rev_func(revlog))
1349 1352 for node in haslst:
1350 1353 parentlst = [p for p in revlog.parents(node) if p != nullid]
1351 1354 while parentlst:
1352 1355 n = parentlst.pop()
1353 1356 if n not in hasset:
1354 1357 hasset[n] = 1
1355 1358 p = [p for p in revlog.parents(n) if p != nullid]
1356 1359 parentlst.extend(p)
1357 1360 for n in hasset:
1358 1361 msngset.pop(n, None)
1359 1362
1360 1363 # This is a function generating function used to set up an environment
1361 1364 # for the inner function to execute in.
1362 1365 def manifest_and_file_collector(changedfileset):
1363 1366 # This is an information gathering function that gathers
1364 1367 # information from each changeset node that goes out as part of
1365 1368 # the changegroup. The information gathered is a list of which
1366 1369 # manifest nodes are potentially required (the recipient may
1367 1370 # already have them) and total list of all files which were
1368 1371 # changed in any changeset in the changegroup.
1369 1372 #
1370 1373 # We also remember the first changenode we saw any manifest
1371 1374 # referenced by so we can later determine which changenode 'owns'
1372 1375 # the manifest.
1373 1376 def collect_manifests_and_files(clnode):
1374 1377 c = cl.read(clnode)
1375 1378 for f in c[3]:
1376 1379 # This is to make sure we only have one instance of each
1377 1380 # filename string for each filename.
1378 1381 changedfileset.setdefault(f, f)
1379 1382 msng_mnfst_set.setdefault(c[0], clnode)
1380 1383 return collect_manifests_and_files
1381 1384
1382 1385 # Figure out which manifest nodes (of the ones we think might be part
1383 1386 # of the changegroup) the recipient must know about and remove them
1384 1387 # from the changegroup.
1385 1388 def prune_manifests():
1386 1389 has_mnfst_set = {}
1387 1390 for n in msng_mnfst_set:
1388 1391 # If a 'missing' manifest thinks it belongs to a changenode
1389 1392 # the recipient is assumed to have, obviously the recipient
1390 1393 # must have that manifest.
1391 1394 linknode = cl.node(mnfst.linkrev(n))
1392 1395 if linknode in has_cl_set:
1393 1396 has_mnfst_set[n] = 1
1394 1397 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1395 1398
1396 1399 # Use the information collected in collect_manifests_and_files to say
1397 1400 # which changenode any manifestnode belongs to.
1398 1401 def lookup_manifest_link(mnfstnode):
1399 1402 return msng_mnfst_set[mnfstnode]
1400 1403
1401 1404 # A function generating function that sets up the initial environment
1402 1405 # the inner function.
1403 1406 def filenode_collector(changedfiles):
1404 1407 next_rev = [0]
1405 1408 # This gathers information from each manifestnode included in the
1406 1409 # changegroup about which filenodes the manifest node references
1407 1410 # so we can include those in the changegroup too.
1408 1411 #
1409 1412 # It also remembers which changenode each filenode belongs to. It
1410 1413 # does this by assuming the a filenode belongs to the changenode
1411 1414 # the first manifest that references it belongs to.
1412 1415 def collect_msng_filenodes(mnfstnode):
1413 1416 r = mnfst.rev(mnfstnode)
1414 1417 if r == next_rev[0]:
1415 1418 # If the last rev we looked at was the one just previous,
1416 1419 # we only need to see a diff.
1417 1420 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1418 1421 # For each line in the delta
1419 1422 for dline in delta.splitlines():
1420 1423 # get the filename and filenode for that line
1421 1424 f, fnode = dline.split('\0')
1422 1425 fnode = bin(fnode[:40])
1423 1426 f = changedfiles.get(f, None)
1424 1427 # And if the file is in the list of files we care
1425 1428 # about.
1426 1429 if f is not None:
1427 1430 # Get the changenode this manifest belongs to
1428 1431 clnode = msng_mnfst_set[mnfstnode]
1429 1432 # Create the set of filenodes for the file if
1430 1433 # there isn't one already.
1431 1434 ndset = msng_filenode_set.setdefault(f, {})
1432 1435 # And set the filenode's changelog node to the
1433 1436 # manifest's if it hasn't been set already.
1434 1437 ndset.setdefault(fnode, clnode)
1435 1438 else:
1436 1439 # Otherwise we need a full manifest.
1437 1440 m = mnfst.read(mnfstnode)
1438 1441 # For every file in we care about.
1439 1442 for f in changedfiles:
1440 1443 fnode = m.get(f, None)
1441 1444 # If it's in the manifest
1442 1445 if fnode is not None:
1443 1446 # See comments above.
1444 1447 clnode = msng_mnfst_set[mnfstnode]
1445 1448 ndset = msng_filenode_set.setdefault(f, {})
1446 1449 ndset.setdefault(fnode, clnode)
1447 1450 # Remember the revision we hope to see next.
1448 1451 next_rev[0] = r + 1
1449 1452 return collect_msng_filenodes
1450 1453
1451 1454 # We have a list of filenodes we think we need for a file, lets remove
1452 1455 # all those we now the recipient must have.
1453 1456 def prune_filenodes(f, filerevlog):
1454 1457 msngset = msng_filenode_set[f]
1455 1458 hasset = {}
1456 1459 # If a 'missing' filenode thinks it belongs to a changenode we
1457 1460 # assume the recipient must have, then the recipient must have
1458 1461 # that filenode.
1459 1462 for n in msngset:
1460 1463 clnode = cl.node(filerevlog.linkrev(n))
1461 1464 if clnode in has_cl_set:
1462 1465 hasset[n] = 1
1463 1466 prune_parents(filerevlog, hasset, msngset)
1464 1467
1465 1468 # A function generator function that sets up the a context for the
1466 1469 # inner function.
1467 1470 def lookup_filenode_link_func(fname):
1468 1471 msngset = msng_filenode_set[fname]
1469 1472 # Lookup the changenode the filenode belongs to.
1470 1473 def lookup_filenode_link(fnode):
1471 1474 return msngset[fnode]
1472 1475 return lookup_filenode_link
1473 1476
1474 1477 # Now that we have all theses utility functions to help out and
1475 1478 # logically divide up the task, generate the group.
1476 1479 def gengroup():
1477 1480 # The set of changed files starts empty.
1478 1481 changedfiles = {}
1479 1482 # Create a changenode group generator that will call our functions
1480 1483 # back to lookup the owning changenode and collect information.
1481 1484 group = cl.group(msng_cl_lst, identity,
1482 1485 manifest_and_file_collector(changedfiles))
1483 1486 for chnk in group:
1484 1487 yield chnk
1485 1488
1486 1489 # The list of manifests has been collected by the generator
1487 1490 # calling our functions back.
1488 1491 prune_manifests()
1489 1492 msng_mnfst_lst = msng_mnfst_set.keys()
1490 1493 # Sort the manifestnodes by revision number.
1491 1494 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1492 1495 # Create a generator for the manifestnodes that calls our lookup
1493 1496 # and data collection functions back.
1494 1497 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1495 1498 filenode_collector(changedfiles))
1496 1499 for chnk in group:
1497 1500 yield chnk
1498 1501
1499 1502 # These are no longer needed, dereference and toss the memory for
1500 1503 # them.
1501 1504 msng_mnfst_lst = None
1502 1505 msng_mnfst_set.clear()
1503 1506
1504 1507 changedfiles = changedfiles.keys()
1505 1508 changedfiles.sort()
1506 1509 # Go through all our files in order sorted by name.
1507 1510 for fname in changedfiles:
1508 1511 filerevlog = self.file(fname)
1509 1512 # Toss out the filenodes that the recipient isn't really
1510 1513 # missing.
1511 1514 if msng_filenode_set.has_key(fname):
1512 1515 prune_filenodes(fname, filerevlog)
1513 1516 msng_filenode_lst = msng_filenode_set[fname].keys()
1514 1517 else:
1515 1518 msng_filenode_lst = []
1516 1519 # If any filenodes are left, generate the group for them,
1517 1520 # otherwise don't bother.
1518 1521 if len(msng_filenode_lst) > 0:
1519 1522 yield changegroup.genchunk(fname)
1520 1523 # Sort the filenodes by their revision #
1521 1524 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1522 1525 # Create a group generator and only pass in a changenode
1523 1526 # lookup function as we need to collect no information
1524 1527 # from filenodes.
1525 1528 group = filerevlog.group(msng_filenode_lst,
1526 1529 lookup_filenode_link_func(fname))
1527 1530 for chnk in group:
1528 1531 yield chnk
1529 1532 if msng_filenode_set.has_key(fname):
1530 1533 # Don't need this anymore, toss it to free memory.
1531 1534 del msng_filenode_set[fname]
1532 1535 # Signal that no more groups are left.
1533 1536 yield changegroup.closechunk()
1534 1537
1535 1538 if msng_cl_lst:
1536 1539 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1537 1540
1538 1541 return util.chunkbuffer(gengroup())
1539 1542
1540 1543 def changegroup(self, basenodes, source):
1541 1544 """Generate a changegroup of all nodes that we have that a recipient
1542 1545 doesn't.
1543 1546
1544 1547 This is much easier than the previous function as we can assume that
1545 1548 the recipient has any changenode we aren't sending them."""
1546 1549
1547 1550 self.hook('preoutgoing', throw=True, source=source)
1548 1551
1549 1552 cl = self.changelog
1550 1553 nodes = cl.nodesbetween(basenodes, None)[0]
1551 1554 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1552 1555
1553 1556 def identity(x):
1554 1557 return x
1555 1558
1556 1559 def gennodelst(revlog):
1557 1560 for r in xrange(0, revlog.count()):
1558 1561 n = revlog.node(r)
1559 1562 if revlog.linkrev(n) in revset:
1560 1563 yield n
1561 1564
1562 1565 def changed_file_collector(changedfileset):
1563 1566 def collect_changed_files(clnode):
1564 1567 c = cl.read(clnode)
1565 1568 for fname in c[3]:
1566 1569 changedfileset[fname] = 1
1567 1570 return collect_changed_files
1568 1571
1569 1572 def lookuprevlink_func(revlog):
1570 1573 def lookuprevlink(n):
1571 1574 return cl.node(revlog.linkrev(n))
1572 1575 return lookuprevlink
1573 1576
1574 1577 def gengroup():
1575 1578 # construct a list of all changed files
1576 1579 changedfiles = {}
1577 1580
1578 1581 for chnk in cl.group(nodes, identity,
1579 1582 changed_file_collector(changedfiles)):
1580 1583 yield chnk
1581 1584 changedfiles = changedfiles.keys()
1582 1585 changedfiles.sort()
1583 1586
1584 1587 mnfst = self.manifest
1585 1588 nodeiter = gennodelst(mnfst)
1586 1589 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1587 1590 yield chnk
1588 1591
1589 1592 for fname in changedfiles:
1590 1593 filerevlog = self.file(fname)
1591 1594 nodeiter = gennodelst(filerevlog)
1592 1595 nodeiter = list(nodeiter)
1593 1596 if nodeiter:
1594 1597 yield changegroup.genchunk(fname)
1595 1598 lookup = lookuprevlink_func(filerevlog)
1596 1599 for chnk in filerevlog.group(nodeiter, lookup):
1597 1600 yield chnk
1598 1601
1599 1602 yield changegroup.closechunk()
1600 1603
1601 1604 if nodes:
1602 1605 self.hook('outgoing', node=hex(nodes[0]), source=source)
1603 1606
1604 1607 return util.chunkbuffer(gengroup())
1605 1608
1606 1609 def addchangegroup(self, source, srctype, url):
1607 1610 """add changegroup to repo.
1608 1611 returns number of heads modified or added + 1."""
1609 1612
1610 1613 def csmap(x):
1611 1614 self.ui.debug(_("add changeset %s\n") % short(x))
1612 1615 return cl.count()
1613 1616
1614 1617 def revmap(x):
1615 1618 return cl.rev(x)
1616 1619
1617 1620 if not source:
1618 1621 return 0
1619 1622
1620 1623 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1621 1624
1622 1625 changesets = files = revisions = 0
1623 1626
1624 1627 tr = self.transaction()
1625 1628
1626 1629 # write changelog data to temp files so concurrent readers will not see
1627 1630 # inconsistent view
1628 1631 cl = None
1629 1632 try:
1630 1633 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1631 1634
1632 1635 oldheads = len(cl.heads())
1633 1636
1634 1637 # pull off the changeset group
1635 1638 self.ui.status(_("adding changesets\n"))
1636 1639 cor = cl.count() - 1
1637 1640 chunkiter = changegroup.chunkiter(source)
1638 1641 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1639 1642 raise util.Abort(_("received changelog group is empty"))
1640 1643 cnr = cl.count() - 1
1641 1644 changesets = cnr - cor
1642 1645
1643 1646 # pull off the manifest group
1644 1647 self.ui.status(_("adding manifests\n"))
1645 1648 chunkiter = changegroup.chunkiter(source)
1646 1649 # no need to check for empty manifest group here:
1647 1650 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1648 1651 # no new manifest will be created and the manifest group will
1649 1652 # be empty during the pull
1650 1653 self.manifest.addgroup(chunkiter, revmap, tr)
1651 1654
1652 1655 # process the files
1653 1656 self.ui.status(_("adding file changes\n"))
1654 1657 while 1:
1655 1658 f = changegroup.getchunk(source)
1656 1659 if not f:
1657 1660 break
1658 1661 self.ui.debug(_("adding %s revisions\n") % f)
1659 1662 fl = self.file(f)
1660 1663 o = fl.count()
1661 1664 chunkiter = changegroup.chunkiter(source)
1662 1665 if fl.addgroup(chunkiter, revmap, tr) is None:
1663 1666 raise util.Abort(_("received file revlog group is empty"))
1664 1667 revisions += fl.count() - o
1665 1668 files += 1
1666 1669
1667 1670 cl.writedata()
1668 1671 finally:
1669 1672 if cl:
1670 1673 cl.cleanup()
1671 1674
1672 1675 # make changelog see real files again
1673 1676 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1674 1677 self.changelog.checkinlinesize(tr)
1675 1678
1676 1679 newheads = len(self.changelog.heads())
1677 1680 heads = ""
1678 1681 if oldheads and newheads != oldheads:
1679 1682 heads = _(" (%+d heads)") % (newheads - oldheads)
1680 1683
1681 1684 self.ui.status(_("added %d changesets"
1682 1685 " with %d changes to %d files%s\n")
1683 1686 % (changesets, revisions, files, heads))
1684 1687
1685 1688 if changesets > 0:
1686 1689 self.hook('pretxnchangegroup', throw=True,
1687 1690 node=hex(self.changelog.node(cor+1)), source=srctype,
1688 1691 url=url)
1689 1692
1690 1693 tr.close()
1691 1694
1692 1695 if changesets > 0:
1693 1696 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1694 1697 source=srctype, url=url)
1695 1698
1696 1699 for i in range(cor + 1, cnr + 1):
1697 1700 self.hook("incoming", node=hex(self.changelog.node(i)),
1698 1701 source=srctype, url=url)
1699 1702
1700 1703 return newheads - oldheads + 1
1701 1704
1702 1705
1703 1706 def stream_in(self, remote):
1704 1707 fp = remote.stream_out()
1705 1708 resp = int(fp.readline())
1706 1709 if resp != 0:
1707 1710 raise util.Abort(_('operation forbidden by server'))
1708 1711 self.ui.status(_('streaming all changes\n'))
1709 1712 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1710 1713 self.ui.status(_('%d files to transfer, %s of data\n') %
1711 1714 (total_files, util.bytecount(total_bytes)))
1712 1715 start = time.time()
1713 1716 for i in xrange(total_files):
1714 1717 name, size = fp.readline().split('\0', 1)
1715 1718 size = int(size)
1716 1719 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1717 1720 ofp = self.opener(name, 'w')
1718 1721 for chunk in util.filechunkiter(fp, limit=size):
1719 1722 ofp.write(chunk)
1720 1723 ofp.close()
1721 1724 elapsed = time.time() - start
1722 1725 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1723 1726 (util.bytecount(total_bytes), elapsed,
1724 1727 util.bytecount(total_bytes / elapsed)))
1725 1728 self.reload()
1726 1729 return len(self.heads()) + 1
1727 1730
1728 1731 def clone(self, remote, heads=[], stream=False):
1729 1732 '''clone remote repository.
1730 1733
1731 1734 keyword arguments:
1732 1735 heads: list of revs to clone (forces use of pull)
1733 1736 stream: use streaming clone if possible'''
1734 1737
1735 1738 # now, all clients that can request uncompressed clones can
1736 1739 # read repo formats supported by all servers that can serve
1737 1740 # them.
1738 1741
1739 1742 # if revlog format changes, client will have to check version
1740 1743 # and format flags on "stream" capability, and use
1741 1744 # uncompressed only if compatible.
1742 1745
1743 1746 if stream and not heads and remote.capable('stream'):
1744 1747 return self.stream_in(remote)
1745 1748 return self.pull(remote, heads)
1746 1749
1747 1750 # used to avoid circular references so destructors work
1748 1751 def aftertrans(base):
1749 1752 p = base
1750 1753 def a():
1751 1754 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1752 1755 util.rename(os.path.join(p, "journal.dirstate"),
1753 1756 os.path.join(p, "undo.dirstate"))
1754 1757 return a
1755 1758
1756 1759 def instance(ui, path, create):
1757 1760 return localrepository(ui, util.drop_scheme('file', path), create)
1758 1761
1759 1762 def islocal(path):
1760 1763 return True
@@ -1,33 +1,33 b''
1 1 #!/bin/sh
2 2 hg --debug init
3 3 echo this is a1 > a
4 4 hg add a
5 5 hg commit -m0 -d "1000000 0"
6 6 echo this is b1 > b
7 7 hg add b
8 8 hg commit -m1 -d "1000000 0"
9 9 hg manifest 1
10 10 echo this is c1 > c
11 11 hg rawcommit -p 1 -d "1000000 0" -m2 c
12 12 hg manifest 2
13 hg parents
13 hg -v parents
14 14 rm b
15 15 hg rawcommit -p 2 -d "1000000 0" -m3 b
16 16 hg manifest 3
17 hg parents
17 hg -v parents
18 18 echo this is a22 > a
19 19 hg rawcommit -p 3 -d "1000000 0" -m4 a
20 20 hg manifest 4
21 hg parents
21 hg -v parents
22 22 echo this is c22 > c
23 23 hg rawcommit -p 1 -d "1000000 0" -m5 c
24 24 hg manifest 5
25 hg parents
25 hg -v parents
26 26 # merge, but no files changed
27 27 hg rawcommit -p 4 -p 5 -d "1000000 0" -m6
28 28 hg manifest 6
29 hg parents
29 hg -v parents
30 30 # no changes what-so-ever
31 31 hg rawcommit -p 6 -d "1000000 0" -m7
32 32 hg manifest 7
33 hg parents
33 hg -v parents
@@ -1,59 +1,77 b''
1 1 05f9e54f4c9b86b09099803d8b49a50edcb4eaab 644 a
2 2 54837d97f2932a8194e69745a280a2c11e61ff9c 644 b
3 3 (the rawcommit command is deprecated)
4 4 05f9e54f4c9b86b09099803d8b49a50edcb4eaab 644 a
5 5 54837d97f2932a8194e69745a280a2c11e61ff9c 644 b
6 6 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
7 7 changeset: 2:e110db3db549
8 8 tag: tip
9 9 user: test
10 10 date: Mon Jan 12 13:46:40 1970 +0000
11 summary: 2
11 files: c
12 description:
13 2
14
12 15
13 16 (the rawcommit command is deprecated)
14 17 05f9e54f4c9b86b09099803d8b49a50edcb4eaab 644 a
15 18 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
16 changeset: 3:0f9843914735
19 changeset: 3:20652cf30cc0
17 20 tag: tip
18 21 user: test
19 22 date: Mon Jan 12 13:46:40 1970 +0000
20 summary: 3
23 files: b
24 description:
25 3
26
21 27
22 28 (the rawcommit command is deprecated)
23 29 d6e3c4976c13feb1728cd3ac851abaf7256a5c23 644 a
24 30 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
25 changeset: 4:909a3d1d3ee1
31 changeset: 4:42556b925639
26 32 tag: tip
27 33 user: test
28 34 date: Mon Jan 12 13:46:40 1970 +0000
29 summary: 4
35 files: a
36 description:
37 4
38
30 39
31 40 (the rawcommit command is deprecated)
32 41 05f9e54f4c9b86b09099803d8b49a50edcb4eaab 644 a
33 42 54837d97f2932a8194e69745a280a2c11e61ff9c 644 b
34 43 3570202ceac2b52517df64ebd0a062cb0d8fe33a 644 c
35 changeset: 4:909a3d1d3ee1
44 changeset: 4:42556b925639
36 45 user: test
37 46 date: Mon Jan 12 13:46:40 1970 +0000
38 summary: 4
47 files: a
48 description:
49 4
50
39 51
40 52 (the rawcommit command is deprecated)
41 53 d6e3c4976c13feb1728cd3ac851abaf7256a5c23 644 a
42 54 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
43 changeset: 6:725fdd0728db
55 changeset: 6:8a0c9254b0ab
44 56 tag: tip
45 parent: 4:909a3d1d3ee1
57 parent: 4:42556b925639
46 58 parent: 5:f56d4c64ab98
47 59 user: test
48 60 date: Mon Jan 12 13:46:40 1970 +0000
49 summary: 6
61 files:
62 description:
63 6
64
50 65
51 66 (the rawcommit command is deprecated)
52 67 d6e3c4976c13feb1728cd3ac851abaf7256a5c23 644 a
53 68 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
54 changeset: 7:2c11b55105cb
69 changeset: 7:a5a6e1f312b9
55 70 tag: tip
56 71 user: test
57 72 date: Mon Jan 12 13:46:40 1970 +0000
58 summary: 7
73 files:
74 description:
75 7
59 76
77
General Comments 0
You need to be logged in to leave comments. Login now