##// END OF EJS Templates
Implement revlogng....
mason@suse.com -
r2072:74d3f533 default
parent child Browse files
Show More
@@ -1,57 +1,58
1 1 # changelog.py - changelog class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from revlog import *
9 9 from i18n import gettext as _
10 10 from demandload import demandload
11 11 demandload(globals(), "os time util")
12 12
13 13 class changelog(revlog):
14 def __init__(self, opener):
15 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
14 def __init__(self, opener, defversion=0):
15 revlog.__init__(self, opener, "00changelog.i", "00changelog.d",
16 defversion)
16 17
17 18 def extract(self, text):
18 19 if not text:
19 20 return (nullid, "", (0, 0), [], "")
20 21 last = text.index("\n\n")
21 22 desc = text[last + 2:]
22 23 l = text[:last].splitlines()
23 24 manifest = bin(l[0])
24 25 user = l[1]
25 26 date = l[2].split(' ')
26 27 time = float(date.pop(0))
27 28 try:
28 29 # various tools did silly things with the time zone field.
29 30 timezone = int(date[0])
30 31 except:
31 32 timezone = 0
32 33 files = l[3:]
33 34 return (manifest, user, (time, timezone), files, desc)
34 35
35 36 def read(self, node):
36 37 return self.extract(self.revision(node))
37 38
38 39 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
39 40 user=None, date=None):
40 41 if date:
41 42 # validate explicit (probably user-specified) date and
42 43 # time zone offset. values must fit in signed 32 bits for
43 44 # current 32-bit linux runtimes.
44 45 try:
45 46 when, offset = map(int, date.split(' '))
46 47 except ValueError:
47 48 raise ValueError(_('invalid date: %r') % date)
48 49 if abs(when) > 0x7fffffff:
49 50 raise ValueError(_('date exceeds 32 bits: %d') % when)
50 51 if abs(offset) >= 43200:
51 52 raise ValueError(_('impossible time zone offset: %d') % offset)
52 53 else:
53 54 date = "%d %d" % util.makedate()
54 55 list.sort()
55 56 l = [hex(manifest), user, date] + list + ["", desc]
56 57 text = "\n".join(l)
57 58 return self.addrevision(text, transaction, self.count(), p1, p2)
@@ -1,3468 +1,3469
1 1 # commands.py - command processing for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from demandload import demandload
9 9 from node import *
10 10 from i18n import gettext as _
11 11 demandload(globals(), "os re sys signal shutil imp urllib pdb")
12 12 demandload(globals(), "fancyopts ui hg util lock revlog templater bundlerepo")
13 13 demandload(globals(), "fnmatch hgweb mdiff random signal tempfile time")
14 14 demandload(globals(), "traceback errno socket version struct atexit sets bz2")
15 15 demandload(globals(), "changegroup")
16 16
17 17 class UnknownCommand(Exception):
18 18 """Exception raised if command is not in the command table."""
19 19 class AmbiguousCommand(Exception):
20 20 """Exception raised if command shortcut matches more than one command."""
21 21
22 22 def filterfiles(filters, files):
23 23 l = [x for x in files if x in filters]
24 24
25 25 for t in filters:
26 26 if t and t[-1] != "/":
27 27 t += "/"
28 28 l += [x for x in files if x.startswith(t)]
29 29 return l
30 30
31 31 def relpath(repo, args):
32 32 cwd = repo.getcwd()
33 33 if cwd:
34 34 return [util.normpath(os.path.join(cwd, x)) for x in args]
35 35 return args
36 36
37 37 def matchpats(repo, pats=[], opts={}, head=''):
38 38 cwd = repo.getcwd()
39 39 if not pats and cwd:
40 40 opts['include'] = [os.path.join(cwd, i) for i in opts['include']]
41 41 opts['exclude'] = [os.path.join(cwd, x) for x in opts['exclude']]
42 42 cwd = ''
43 43 return util.cmdmatcher(repo.root, cwd, pats or ['.'], opts.get('include'),
44 44 opts.get('exclude'), head)
45 45
46 46 def makewalk(repo, pats, opts, node=None, head='', badmatch=None):
47 47 files, matchfn, anypats = matchpats(repo, pats, opts, head)
48 48 exact = dict(zip(files, files))
49 49 def walk():
50 50 for src, fn in repo.walk(node=node, files=files, match=matchfn,
51 51 badmatch=badmatch):
52 52 yield src, fn, util.pathto(repo.getcwd(), fn), fn in exact
53 53 return files, matchfn, walk()
54 54
55 55 def walk(repo, pats, opts, node=None, head='', badmatch=None):
56 56 files, matchfn, results = makewalk(repo, pats, opts, node, head, badmatch)
57 57 for r in results:
58 58 yield r
59 59
60 60 def walkchangerevs(ui, repo, pats, opts):
61 61 '''Iterate over files and the revs they changed in.
62 62
63 63 Callers most commonly need to iterate backwards over the history
64 64 it is interested in. Doing so has awful (quadratic-looking)
65 65 performance, so we use iterators in a "windowed" way.
66 66
67 67 We walk a window of revisions in the desired order. Within the
68 68 window, we first walk forwards to gather data, then in the desired
69 69 order (usually backwards) to display it.
70 70
71 71 This function returns an (iterator, getchange, matchfn) tuple. The
72 72 getchange function returns the changelog entry for a numeric
73 73 revision. The iterator yields 3-tuples. They will be of one of
74 74 the following forms:
75 75
76 76 "window", incrementing, lastrev: stepping through a window,
77 77 positive if walking forwards through revs, last rev in the
78 78 sequence iterated over - use to reset state for the current window
79 79
80 80 "add", rev, fns: out-of-order traversal of the given file names
81 81 fns, which changed during revision rev - use to gather data for
82 82 possible display
83 83
84 84 "iter", rev, None: in-order traversal of the revs earlier iterated
85 85 over with "add" - use to display data'''
86 86
87 87 def increasing_windows(start, end, windowsize=8, sizelimit=512):
88 88 if start < end:
89 89 while start < end:
90 90 yield start, min(windowsize, end-start)
91 91 start += windowsize
92 92 if windowsize < sizelimit:
93 93 windowsize *= 2
94 94 else:
95 95 while start > end:
96 96 yield start, min(windowsize, start-end-1)
97 97 start -= windowsize
98 98 if windowsize < sizelimit:
99 99 windowsize *= 2
100 100
101 101
102 102 files, matchfn, anypats = matchpats(repo, pats, opts)
103 103
104 104 if repo.changelog.count() == 0:
105 105 return [], False, matchfn
106 106
107 107 revs = map(int, revrange(ui, repo, opts['rev'] or ['tip:0']))
108 108 wanted = {}
109 109 slowpath = anypats
110 110 fncache = {}
111 111
112 112 chcache = {}
113 113 def getchange(rev):
114 114 ch = chcache.get(rev)
115 115 if ch is None:
116 116 chcache[rev] = ch = repo.changelog.read(repo.lookup(str(rev)))
117 117 return ch
118 118
119 119 if not slowpath and not files:
120 120 # No files, no patterns. Display all revs.
121 121 wanted = dict(zip(revs, revs))
122 122 if not slowpath:
123 123 # Only files, no patterns. Check the history of each file.
124 124 def filerevgen(filelog):
125 125 for i, window in increasing_windows(filelog.count()-1, -1):
126 126 revs = []
127 127 for j in xrange(i - window, i + 1):
128 128 revs.append(filelog.linkrev(filelog.node(j)))
129 129 revs.reverse()
130 130 for rev in revs:
131 131 yield rev
132 132
133 133 minrev, maxrev = min(revs), max(revs)
134 134 for file_ in files:
135 135 filelog = repo.file(file_)
136 136 # A zero count may be a directory or deleted file, so
137 137 # try to find matching entries on the slow path.
138 138 if filelog.count() == 0:
139 139 slowpath = True
140 140 break
141 141 for rev in filerevgen(filelog):
142 142 if rev <= maxrev:
143 143 if rev < minrev:
144 144 break
145 145 fncache.setdefault(rev, [])
146 146 fncache[rev].append(file_)
147 147 wanted[rev] = 1
148 148 if slowpath:
149 149 # The slow path checks files modified in every changeset.
150 150 def changerevgen():
151 151 for i, window in increasing_windows(repo.changelog.count()-1, -1):
152 152 for j in xrange(i - window, i + 1):
153 153 yield j, getchange(j)[3]
154 154
155 155 for rev, changefiles in changerevgen():
156 156 matches = filter(matchfn, changefiles)
157 157 if matches:
158 158 fncache[rev] = matches
159 159 wanted[rev] = 1
160 160
161 161 def iterate():
162 162 for i, window in increasing_windows(0, len(revs)):
163 163 yield 'window', revs[0] < revs[-1], revs[-1]
164 164 nrevs = [rev for rev in revs[i:i+window]
165 165 if rev in wanted]
166 166 srevs = list(nrevs)
167 167 srevs.sort()
168 168 for rev in srevs:
169 169 fns = fncache.get(rev) or filter(matchfn, getchange(rev)[3])
170 170 yield 'add', rev, fns
171 171 for rev in nrevs:
172 172 yield 'iter', rev, None
173 173 return iterate(), getchange, matchfn
174 174
175 175 revrangesep = ':'
176 176
177 177 def revrange(ui, repo, revs, revlog=None):
178 178 """Yield revision as strings from a list of revision specifications."""
179 179 if revlog is None:
180 180 revlog = repo.changelog
181 181 revcount = revlog.count()
182 182 def fix(val, defval):
183 183 if not val:
184 184 return defval
185 185 try:
186 186 num = int(val)
187 187 if str(num) != val:
188 188 raise ValueError
189 189 if num < 0:
190 190 num += revcount
191 191 if num < 0:
192 192 num = 0
193 193 elif num >= revcount:
194 194 raise ValueError
195 195 except ValueError:
196 196 try:
197 197 num = repo.changelog.rev(repo.lookup(val))
198 198 except KeyError:
199 199 try:
200 200 num = revlog.rev(revlog.lookup(val))
201 201 except KeyError:
202 202 raise util.Abort(_('invalid revision identifier %s'), val)
203 203 return num
204 204 seen = {}
205 205 for spec in revs:
206 206 if spec.find(revrangesep) >= 0:
207 207 start, end = spec.split(revrangesep, 1)
208 208 start = fix(start, 0)
209 209 end = fix(end, revcount - 1)
210 210 step = start > end and -1 or 1
211 211 for rev in xrange(start, end+step, step):
212 212 if rev in seen:
213 213 continue
214 214 seen[rev] = 1
215 215 yield str(rev)
216 216 else:
217 217 rev = fix(spec, None)
218 218 if rev in seen:
219 219 continue
220 220 seen[rev] = 1
221 221 yield str(rev)
222 222
223 223 def make_filename(repo, r, pat, node=None,
224 224 total=None, seqno=None, revwidth=None, pathname=None):
225 225 node_expander = {
226 226 'H': lambda: hex(node),
227 227 'R': lambda: str(r.rev(node)),
228 228 'h': lambda: short(node),
229 229 }
230 230 expander = {
231 231 '%': lambda: '%',
232 232 'b': lambda: os.path.basename(repo.root),
233 233 }
234 234
235 235 try:
236 236 if node:
237 237 expander.update(node_expander)
238 238 if node and revwidth is not None:
239 239 expander['r'] = lambda: str(r.rev(node)).zfill(revwidth)
240 240 if total is not None:
241 241 expander['N'] = lambda: str(total)
242 242 if seqno is not None:
243 243 expander['n'] = lambda: str(seqno)
244 244 if total is not None and seqno is not None:
245 245 expander['n'] = lambda:str(seqno).zfill(len(str(total)))
246 246 if pathname is not None:
247 247 expander['s'] = lambda: os.path.basename(pathname)
248 248 expander['d'] = lambda: os.path.dirname(pathname) or '.'
249 249 expander['p'] = lambda: pathname
250 250
251 251 newname = []
252 252 patlen = len(pat)
253 253 i = 0
254 254 while i < patlen:
255 255 c = pat[i]
256 256 if c == '%':
257 257 i += 1
258 258 c = pat[i]
259 259 c = expander[c]()
260 260 newname.append(c)
261 261 i += 1
262 262 return ''.join(newname)
263 263 except KeyError, inst:
264 264 raise util.Abort(_("invalid format spec '%%%s' in output file name"),
265 265 inst.args[0])
266 266
267 267 def make_file(repo, r, pat, node=None,
268 268 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
269 269 if not pat or pat == '-':
270 270 return 'w' in mode and sys.stdout or sys.stdin
271 271 if hasattr(pat, 'write') and 'w' in mode:
272 272 return pat
273 273 if hasattr(pat, 'read') and 'r' in mode:
274 274 return pat
275 275 return open(make_filename(repo, r, pat, node, total, seqno, revwidth,
276 276 pathname),
277 277 mode)
278 278
279 279 def write_bundle(cg, filename=None, compress=True):
280 280 """Write a bundle file and return its filename.
281 281
282 282 Existing files will not be overwritten.
283 283 If no filename is specified, a temporary file is created.
284 284 bz2 compression can be turned off.
285 285 The bundle file will be deleted in case of errors.
286 286 """
287 287 class nocompress(object):
288 288 def compress(self, x):
289 289 return x
290 290 def flush(self):
291 291 return ""
292 292
293 293 fh = None
294 294 cleanup = None
295 295 try:
296 296 if filename:
297 297 if os.path.exists(filename):
298 298 raise util.Abort(_("file '%s' already exists"), filename)
299 299 fh = open(filename, "wb")
300 300 else:
301 301 fd, filename = tempfile.mkstemp(suffix=".hg", prefix="hg-bundle-")
302 302 fh = os.fdopen(fd, "wb")
303 303 cleanup = filename
304 304
305 305 if compress:
306 306 fh.write("HG10")
307 307 z = bz2.BZ2Compressor(9)
308 308 else:
309 309 fh.write("HG10UN")
310 310 z = nocompress()
311 311 # parse the changegroup data, otherwise we will block
312 312 # in case of sshrepo because we don't know the end of the stream
313 313
314 314 # an empty chunkiter is the end of the changegroup
315 315 empty = False
316 316 while not empty:
317 317 empty = True
318 318 for chunk in changegroup.chunkiter(cg):
319 319 empty = False
320 320 fh.write(z.compress(changegroup.genchunk(chunk)))
321 321 fh.write(z.compress(changegroup.closechunk()))
322 322 fh.write(z.flush())
323 323 cleanup = None
324 324 return filename
325 325 finally:
326 326 if fh is not None:
327 327 fh.close()
328 328 if cleanup is not None:
329 329 os.unlink(cleanup)
330 330
331 331 def dodiff(fp, ui, repo, node1, node2, files=None, match=util.always,
332 332 changes=None, text=False, opts={}):
333 333 if not node1:
334 334 node1 = repo.dirstate.parents()[0]
335 335 # reading the data for node1 early allows it to play nicely
336 336 # with repo.changes and the revlog cache.
337 337 change = repo.changelog.read(node1)
338 338 mmap = repo.manifest.read(change[0])
339 339 date1 = util.datestr(change[2])
340 340
341 341 if not changes:
342 342 changes = repo.changes(node1, node2, files, match=match)
343 343 modified, added, removed, deleted, unknown = changes
344 344 if files:
345 345 modified, added, removed = map(lambda x: filterfiles(files, x),
346 346 (modified, added, removed))
347 347
348 348 if not modified and not added and not removed:
349 349 return
350 350
351 351 if node2:
352 352 change = repo.changelog.read(node2)
353 353 mmap2 = repo.manifest.read(change[0])
354 354 date2 = util.datestr(change[2])
355 355 def read(f):
356 356 return repo.file(f).read(mmap2[f])
357 357 else:
358 358 date2 = util.datestr()
359 359 def read(f):
360 360 return repo.wread(f)
361 361
362 362 if ui.quiet:
363 363 r = None
364 364 else:
365 365 hexfunc = ui.verbose and hex or short
366 366 r = [hexfunc(node) for node in [node1, node2] if node]
367 367
368 368 diffopts = ui.diffopts()
369 369 showfunc = opts.get('show_function') or diffopts['showfunc']
370 370 ignorews = opts.get('ignore_all_space') or diffopts['ignorews']
371 371 for f in modified:
372 372 to = None
373 373 if f in mmap:
374 374 to = repo.file(f).read(mmap[f])
375 375 tn = read(f)
376 376 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r, text=text,
377 377 showfunc=showfunc, ignorews=ignorews))
378 378 for f in added:
379 379 to = None
380 380 tn = read(f)
381 381 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r, text=text,
382 382 showfunc=showfunc, ignorews=ignorews))
383 383 for f in removed:
384 384 to = repo.file(f).read(mmap[f])
385 385 tn = None
386 386 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r, text=text,
387 387 showfunc=showfunc, ignorews=ignorews))
388 388
389 389 def trimuser(ui, name, rev, revcache):
390 390 """trim the name of the user who committed a change"""
391 391 user = revcache.get(rev)
392 392 if user is None:
393 393 user = revcache[rev] = ui.shortuser(name)
394 394 return user
395 395
396 396 class changeset_templater(object):
397 397 '''use templater module to format changeset information.'''
398 398
399 399 def __init__(self, ui, repo, mapfile):
400 400 self.t = templater.templater(mapfile, templater.common_filters,
401 401 cache={'parent': '{rev}:{node|short} ',
402 402 'manifest': '{rev}:{node|short}'})
403 403 self.ui = ui
404 404 self.repo = repo
405 405
406 406 def use_template(self, t):
407 407 '''set template string to use'''
408 408 self.t.cache['changeset'] = t
409 409
410 410 def write(self, thing, header=False):
411 411 '''write expanded template.
412 412 uses in-order recursive traverse of iterators.'''
413 413 for t in thing:
414 414 if hasattr(t, '__iter__'):
415 415 self.write(t, header=header)
416 416 elif header:
417 417 self.ui.write_header(t)
418 418 else:
419 419 self.ui.write(t)
420 420
421 421 def write_header(self, thing):
422 422 self.write(thing, header=True)
423 423
424 424 def show(self, rev=0, changenode=None, brinfo=None):
425 425 '''show a single changeset or file revision'''
426 426 log = self.repo.changelog
427 427 if changenode is None:
428 428 changenode = log.node(rev)
429 429 elif not rev:
430 430 rev = log.rev(changenode)
431 431
432 432 changes = log.read(changenode)
433 433
434 434 def showlist(name, values, plural=None, **args):
435 435 '''expand set of values.
436 436 name is name of key in template map.
437 437 values is list of strings or dicts.
438 438 plural is plural of name, if not simply name + 's'.
439 439
440 440 expansion works like this, given name 'foo'.
441 441
442 442 if values is empty, expand 'no_foos'.
443 443
444 444 if 'foo' not in template map, return values as a string,
445 445 joined by space.
446 446
447 447 expand 'start_foos'.
448 448
449 449 for each value, expand 'foo'. if 'last_foo' in template
450 450 map, expand it instead of 'foo' for last key.
451 451
452 452 expand 'end_foos'.
453 453 '''
454 454 if plural: names = plural
455 455 else: names = name + 's'
456 456 if not values:
457 457 noname = 'no_' + names
458 458 if noname in self.t:
459 459 yield self.t(noname, **args)
460 460 return
461 461 if name not in self.t:
462 462 if isinstance(values[0], str):
463 463 yield ' '.join(values)
464 464 else:
465 465 for v in values:
466 466 yield dict(v, **args)
467 467 return
468 468 startname = 'start_' + names
469 469 if startname in self.t:
470 470 yield self.t(startname, **args)
471 471 vargs = args.copy()
472 472 def one(v, tag=name):
473 473 try:
474 474 vargs.update(v)
475 475 except (AttributeError, ValueError):
476 476 try:
477 477 for a, b in v:
478 478 vargs[a] = b
479 479 except ValueError:
480 480 vargs[name] = v
481 481 return self.t(tag, **vargs)
482 482 lastname = 'last_' + name
483 483 if lastname in self.t:
484 484 last = values.pop()
485 485 else:
486 486 last = None
487 487 for v in values:
488 488 yield one(v)
489 489 if last is not None:
490 490 yield one(last, tag=lastname)
491 491 endname = 'end_' + names
492 492 if endname in self.t:
493 493 yield self.t(endname, **args)
494 494
495 495 if brinfo:
496 496 def showbranches(**args):
497 497 if changenode in brinfo:
498 498 for x in showlist('branch', brinfo[changenode],
499 499 plural='branches', **args):
500 500 yield x
501 501 else:
502 502 showbranches = ''
503 503
504 504 if self.ui.debugflag:
505 505 def showmanifest(**args):
506 506 args = args.copy()
507 507 args.update(dict(rev=self.repo.manifest.rev(changes[0]),
508 508 node=hex(changes[0])))
509 509 yield self.t('manifest', **args)
510 510 else:
511 511 showmanifest = ''
512 512
513 513 def showparents(**args):
514 514 parents = [[('rev', log.rev(p)), ('node', hex(p))]
515 515 for p in log.parents(changenode)
516 516 if self.ui.debugflag or p != nullid]
517 517 if (not self.ui.debugflag and len(parents) == 1 and
518 518 parents[0][0][1] == rev - 1):
519 519 return
520 520 for x in showlist('parent', parents, **args):
521 521 yield x
522 522
523 523 def showtags(**args):
524 524 for x in showlist('tag', self.repo.nodetags(changenode), **args):
525 525 yield x
526 526
527 527 if self.ui.debugflag:
528 528 files = self.repo.changes(log.parents(changenode)[0], changenode)
529 529 def showfiles(**args):
530 530 for x in showlist('file', files[0], **args): yield x
531 531 def showadds(**args):
532 532 for x in showlist('file_add', files[1], **args): yield x
533 533 def showdels(**args):
534 534 for x in showlist('file_del', files[2], **args): yield x
535 535 else:
536 536 def showfiles(**args):
537 537 for x in showlist('file', changes[3], **args): yield x
538 538 showadds = ''
539 539 showdels = ''
540 540
541 541 props = {
542 542 'author': changes[1],
543 543 'branches': showbranches,
544 544 'date': changes[2],
545 545 'desc': changes[4],
546 546 'file_adds': showadds,
547 547 'file_dels': showdels,
548 548 'files': showfiles,
549 549 'manifest': showmanifest,
550 550 'node': hex(changenode),
551 551 'parents': showparents,
552 552 'rev': rev,
553 553 'tags': showtags,
554 554 }
555 555
556 556 try:
557 557 if self.ui.debugflag and 'header_debug' in self.t:
558 558 key = 'header_debug'
559 559 elif self.ui.quiet and 'header_quiet' in self.t:
560 560 key = 'header_quiet'
561 561 elif self.ui.verbose and 'header_verbose' in self.t:
562 562 key = 'header_verbose'
563 563 elif 'header' in self.t:
564 564 key = 'header'
565 565 else:
566 566 key = ''
567 567 if key:
568 568 self.write_header(self.t(key, **props))
569 569 if self.ui.debugflag and 'changeset_debug' in self.t:
570 570 key = 'changeset_debug'
571 571 elif self.ui.quiet and 'changeset_quiet' in self.t:
572 572 key = 'changeset_quiet'
573 573 elif self.ui.verbose and 'changeset_verbose' in self.t:
574 574 key = 'changeset_verbose'
575 575 else:
576 576 key = 'changeset'
577 577 self.write(self.t(key, **props))
578 578 except KeyError, inst:
579 579 raise util.Abort(_("%s: no key named '%s'") % (self.t.mapfile,
580 580 inst.args[0]))
581 581 except SyntaxError, inst:
582 582 raise util.Abort(_('%s: %s') % (self.t.mapfile, inst.args[0]))
583 583
584 584 class changeset_printer(object):
585 585 '''show changeset information when templating not requested.'''
586 586
587 587 def __init__(self, ui, repo):
588 588 self.ui = ui
589 589 self.repo = repo
590 590
591 591 def show(self, rev=0, changenode=None, brinfo=None):
592 592 '''show a single changeset or file revision'''
593 593 log = self.repo.changelog
594 594 if changenode is None:
595 595 changenode = log.node(rev)
596 596 elif not rev:
597 597 rev = log.rev(changenode)
598 598
599 599 if self.ui.quiet:
600 600 self.ui.write("%d:%s\n" % (rev, short(changenode)))
601 601 return
602 602
603 603 changes = log.read(changenode)
604 604 date = util.datestr(changes[2])
605 605
606 606 parents = [(log.rev(p), self.ui.verbose and hex(p) or short(p))
607 607 for p in log.parents(changenode)
608 608 if self.ui.debugflag or p != nullid]
609 609 if (not self.ui.debugflag and len(parents) == 1 and
610 610 parents[0][0] == rev-1):
611 611 parents = []
612 612
613 613 if self.ui.verbose:
614 614 self.ui.write(_("changeset: %d:%s\n") % (rev, hex(changenode)))
615 615 else:
616 616 self.ui.write(_("changeset: %d:%s\n") % (rev, short(changenode)))
617 617
618 618 for tag in self.repo.nodetags(changenode):
619 619 self.ui.status(_("tag: %s\n") % tag)
620 620 for parent in parents:
621 621 self.ui.write(_("parent: %d:%s\n") % parent)
622 622
623 623 if brinfo and changenode in brinfo:
624 624 br = brinfo[changenode]
625 625 self.ui.write(_("branch: %s\n") % " ".join(br))
626 626
627 627 self.ui.debug(_("manifest: %d:%s\n") %
628 628 (self.repo.manifest.rev(changes[0]), hex(changes[0])))
629 629 self.ui.status(_("user: %s\n") % changes[1])
630 630 self.ui.status(_("date: %s\n") % date)
631 631
632 632 if self.ui.debugflag:
633 633 files = self.repo.changes(log.parents(changenode)[0], changenode)
634 634 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
635 635 files):
636 636 if value:
637 637 self.ui.note("%-12s %s\n" % (key, " ".join(value)))
638 638 else:
639 639 self.ui.note(_("files: %s\n") % " ".join(changes[3]))
640 640
641 641 description = changes[4].strip()
642 642 if description:
643 643 if self.ui.verbose:
644 644 self.ui.status(_("description:\n"))
645 645 self.ui.status(description)
646 646 self.ui.status("\n\n")
647 647 else:
648 648 self.ui.status(_("summary: %s\n") %
649 649 description.splitlines()[0])
650 650 self.ui.status("\n")
651 651
652 652 def show_changeset(ui, repo, opts):
653 653 '''show one changeset. uses template or regular display. caller
654 654 can pass in 'style' and 'template' options in opts.'''
655 655
656 656 tmpl = opts.get('template')
657 657 if tmpl:
658 658 tmpl = templater.parsestring(tmpl, quoted=False)
659 659 else:
660 660 tmpl = ui.config('ui', 'logtemplate')
661 661 if tmpl: tmpl = templater.parsestring(tmpl)
662 662 mapfile = opts.get('style') or ui.config('ui', 'style')
663 663 if tmpl or mapfile:
664 664 if mapfile:
665 665 if not os.path.isfile(mapfile):
666 666 mapname = templater.templatepath('map-cmdline.' + mapfile)
667 667 if not mapname: mapname = templater.templatepath(mapfile)
668 668 if mapname: mapfile = mapname
669 669 try:
670 670 t = changeset_templater(ui, repo, mapfile)
671 671 except SyntaxError, inst:
672 672 raise util.Abort(inst.args[0])
673 673 if tmpl: t.use_template(tmpl)
674 674 return t
675 675 return changeset_printer(ui, repo)
676 676
677 677 def show_version(ui):
678 678 """output version and copyright information"""
679 679 ui.write(_("Mercurial Distributed SCM (version %s)\n")
680 680 % version.get_version())
681 681 ui.status(_(
682 682 "\nCopyright (C) 2005 Matt Mackall <mpm@selenic.com>\n"
683 683 "This is free software; see the source for copying conditions. "
684 684 "There is NO\nwarranty; "
685 685 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
686 686 ))
687 687
688 688 def help_(ui, cmd=None, with_version=False):
689 689 """show help for a given command or all commands"""
690 690 option_lists = []
691 691 if cmd and cmd != 'shortlist':
692 692 if with_version:
693 693 show_version(ui)
694 694 ui.write('\n')
695 695 aliases, i = find(cmd)
696 696 # synopsis
697 697 ui.write("%s\n\n" % i[2])
698 698
699 699 # description
700 700 doc = i[0].__doc__
701 701 if not doc:
702 702 doc = _("(No help text available)")
703 703 if ui.quiet:
704 704 doc = doc.splitlines(0)[0]
705 705 ui.write("%s\n" % doc.rstrip())
706 706
707 707 if not ui.quiet:
708 708 # aliases
709 709 if len(aliases) > 1:
710 710 ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
711 711
712 712 # options
713 713 if i[1]:
714 714 option_lists.append(("options", i[1]))
715 715
716 716 else:
717 717 # program name
718 718 if ui.verbose or with_version:
719 719 show_version(ui)
720 720 else:
721 721 ui.status(_("Mercurial Distributed SCM\n"))
722 722 ui.status('\n')
723 723
724 724 # list of commands
725 725 if cmd == "shortlist":
726 726 ui.status(_('basic commands (use "hg help" '
727 727 'for the full list or option "-v" for details):\n\n'))
728 728 elif ui.verbose:
729 729 ui.status(_('list of commands:\n\n'))
730 730 else:
731 731 ui.status(_('list of commands (use "hg help -v" '
732 732 'to show aliases and global options):\n\n'))
733 733
734 734 h = {}
735 735 cmds = {}
736 736 for c, e in table.items():
737 737 f = c.split("|")[0]
738 738 if cmd == "shortlist" and not f.startswith("^"):
739 739 continue
740 740 f = f.lstrip("^")
741 741 if not ui.debugflag and f.startswith("debug"):
742 742 continue
743 743 doc = e[0].__doc__
744 744 if not doc:
745 745 doc = _("(No help text available)")
746 746 h[f] = doc.splitlines(0)[0].rstrip()
747 747 cmds[f] = c.lstrip("^")
748 748
749 749 fns = h.keys()
750 750 fns.sort()
751 751 m = max(map(len, fns))
752 752 for f in fns:
753 753 if ui.verbose:
754 754 commands = cmds[f].replace("|",", ")
755 755 ui.write(" %s:\n %s\n"%(commands, h[f]))
756 756 else:
757 757 ui.write(' %-*s %s\n' % (m, f, h[f]))
758 758
759 759 # global options
760 760 if ui.verbose:
761 761 option_lists.append(("global options", globalopts))
762 762
763 763 # list all option lists
764 764 opt_output = []
765 765 for title, options in option_lists:
766 766 opt_output.append(("\n%s:\n" % title, None))
767 767 for shortopt, longopt, default, desc in options:
768 768 opt_output.append(("%2s%s" % (shortopt and "-%s" % shortopt,
769 769 longopt and " --%s" % longopt),
770 770 "%s%s" % (desc,
771 771 default
772 772 and _(" (default: %s)") % default
773 773 or "")))
774 774
775 775 if opt_output:
776 776 opts_len = max([len(line[0]) for line in opt_output if line[1]])
777 777 for first, second in opt_output:
778 778 if second:
779 779 ui.write(" %-*s %s\n" % (opts_len, first, second))
780 780 else:
781 781 ui.write("%s\n" % first)
782 782
783 783 # Commands start here, listed alphabetically
784 784
785 785 def add(ui, repo, *pats, **opts):
786 786 """add the specified files on the next commit
787 787
788 788 Schedule files to be version controlled and added to the repository.
789 789
790 790 The files will be added to the repository at the next commit.
791 791
792 792 If no names are given, add all files in the repository.
793 793 """
794 794
795 795 names = []
796 796 for src, abs, rel, exact in walk(repo, pats, opts):
797 797 if exact:
798 798 if ui.verbose:
799 799 ui.status(_('adding %s\n') % rel)
800 800 names.append(abs)
801 801 elif repo.dirstate.state(abs) == '?':
802 802 ui.status(_('adding %s\n') % rel)
803 803 names.append(abs)
804 804 repo.add(names)
805 805
806 806 def addremove(ui, repo, *pats, **opts):
807 807 """add all new files, delete all missing files
808 808
809 809 Add all new files and remove all missing files from the repository.
810 810
811 811 New files are ignored if they match any of the patterns in .hgignore. As
812 812 with add, these changes take effect at the next commit.
813 813 """
814 814 return addremove_lock(ui, repo, pats, opts)
815 815
816 816 def addremove_lock(ui, repo, pats, opts, wlock=None):
817 817 add, remove = [], []
818 818 for src, abs, rel, exact in walk(repo, pats, opts):
819 819 if src == 'f' and repo.dirstate.state(abs) == '?':
820 820 add.append(abs)
821 821 if ui.verbose or not exact:
822 822 ui.status(_('adding %s\n') % ((pats and rel) or abs))
823 823 if repo.dirstate.state(abs) != 'r' and not os.path.exists(rel):
824 824 remove.append(abs)
825 825 if ui.verbose or not exact:
826 826 ui.status(_('removing %s\n') % ((pats and rel) or abs))
827 827 repo.add(add, wlock=wlock)
828 828 repo.remove(remove, wlock=wlock)
829 829
830 830 def annotate(ui, repo, *pats, **opts):
831 831 """show changeset information per file line
832 832
833 833 List changes in files, showing the revision id responsible for each line
834 834
835 835 This command is useful to discover who did a change or when a change took
836 836 place.
837 837
838 838 Without the -a option, annotate will avoid processing files it
839 839 detects as binary. With -a, annotate will generate an annotation
840 840 anyway, probably with undesirable results.
841 841 """
842 842 def getnode(rev):
843 843 return short(repo.changelog.node(rev))
844 844
845 845 ucache = {}
846 846 def getname(rev):
847 847 cl = repo.changelog.read(repo.changelog.node(rev))
848 848 return trimuser(ui, cl[1], rev, ucache)
849 849
850 850 dcache = {}
851 851 def getdate(rev):
852 852 datestr = dcache.get(rev)
853 853 if datestr is None:
854 854 cl = repo.changelog.read(repo.changelog.node(rev))
855 855 datestr = dcache[rev] = util.datestr(cl[2])
856 856 return datestr
857 857
858 858 if not pats:
859 859 raise util.Abort(_('at least one file name or pattern required'))
860 860
861 861 opmap = [['user', getname], ['number', str], ['changeset', getnode],
862 862 ['date', getdate]]
863 863 if not opts['user'] and not opts['changeset'] and not opts['date']:
864 864 opts['number'] = 1
865 865
866 866 if opts['rev']:
867 867 node = repo.changelog.lookup(opts['rev'])
868 868 else:
869 869 node = repo.dirstate.parents()[0]
870 870 change = repo.changelog.read(node)
871 871 mmap = repo.manifest.read(change[0])
872 872
873 873 for src, abs, rel, exact in walk(repo, pats, opts, node=node):
874 874 f = repo.file(abs)
875 875 if not opts['text'] and util.binary(f.read(mmap[abs])):
876 876 ui.write(_("%s: binary file\n") % ((pats and rel) or abs))
877 877 continue
878 878
879 879 lines = f.annotate(mmap[abs])
880 880 pieces = []
881 881
882 882 for o, f in opmap:
883 883 if opts[o]:
884 884 l = [f(n) for n, dummy in lines]
885 885 if l:
886 886 m = max(map(len, l))
887 887 pieces.append(["%*s" % (m, x) for x in l])
888 888
889 889 if pieces:
890 890 for p, l in zip(zip(*pieces), lines):
891 891 ui.write("%s: %s" % (" ".join(p), l[1]))
892 892
893 893 def bundle(ui, repo, fname, dest="default-push", **opts):
894 894 """create a changegroup file
895 895
896 896 Generate a compressed changegroup file collecting all changesets
897 897 not found in the other repository.
898 898
899 899 This file can then be transferred using conventional means and
900 900 applied to another repository with the unbundle command. This is
901 901 useful when native push and pull are not available or when
902 902 exporting an entire repository is undesirable. The standard file
903 903 extension is ".hg".
904 904
905 905 Unlike import/export, this exactly preserves all changeset
906 906 contents including permissions, rename data, and revision history.
907 907 """
908 908 dest = ui.expandpath(dest)
909 909 other = hg.repository(ui, dest)
910 910 o = repo.findoutgoing(other, force=opts['force'])
911 911 cg = repo.changegroup(o, 'bundle')
912 912 write_bundle(cg, fname)
913 913
914 914 def cat(ui, repo, file1, *pats, **opts):
915 915 """output the latest or given revisions of files
916 916
917 917 Print the specified files as they were at the given revision.
918 918 If no revision is given then the tip is used.
919 919
920 920 Output may be to a file, in which case the name of the file is
921 921 given using a format string. The formatting rules are the same as
922 922 for the export command, with the following additions:
923 923
924 924 %s basename of file being printed
925 925 %d dirname of file being printed, or '.' if in repo root
926 926 %p root-relative path name of file being printed
927 927 """
928 928 mf = {}
929 929 rev = opts['rev']
930 930 if rev:
931 931 node = repo.lookup(rev)
932 932 else:
933 933 node = repo.changelog.tip()
934 934 change = repo.changelog.read(node)
935 935 mf = repo.manifest.read(change[0])
936 936 for src, abs, rel, exact in walk(repo, (file1,) + pats, opts, node):
937 937 r = repo.file(abs)
938 938 n = mf[abs]
939 939 fp = make_file(repo, r, opts['output'], node=n, pathname=abs)
940 940 fp.write(r.read(n))
941 941
942 942 def clone(ui, source, dest=None, **opts):
943 943 """make a copy of an existing repository
944 944
945 945 Create a copy of an existing repository in a new directory.
946 946
947 947 If no destination directory name is specified, it defaults to the
948 948 basename of the source.
949 949
950 950 The location of the source is added to the new repository's
951 951 .hg/hgrc file, as the default to be used for future pulls.
952 952
953 953 For efficiency, hardlinks are used for cloning whenever the source
954 954 and destination are on the same filesystem. Some filesystems,
955 955 such as AFS, implement hardlinking incorrectly, but do not report
956 956 errors. In these cases, use the --pull option to avoid
957 957 hardlinking.
958 958
959 959 See pull for valid source format details.
960 960 """
961 961 if dest is None:
962 962 dest = os.path.basename(os.path.normpath(source))
963 963
964 964 if os.path.exists(dest):
965 965 raise util.Abort(_("destination '%s' already exists"), dest)
966 966
967 967 dest = os.path.realpath(dest)
968 968
969 969 class Dircleanup(object):
970 970 def __init__(self, dir_):
971 971 self.rmtree = shutil.rmtree
972 972 self.dir_ = dir_
973 973 os.mkdir(dir_)
974 974 def close(self):
975 975 self.dir_ = None
976 976 def __del__(self):
977 977 if self.dir_:
978 978 self.rmtree(self.dir_, True)
979 979
980 980 if opts['ssh']:
981 981 ui.setconfig("ui", "ssh", opts['ssh'])
982 982 if opts['remotecmd']:
983 983 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
984 984
985 985 source = ui.expandpath(source)
986 986
987 987 d = Dircleanup(dest)
988 988 abspath = source
989 989 other = hg.repository(ui, source)
990 990
991 991 copy = False
992 992 if other.dev() != -1:
993 993 abspath = os.path.abspath(source)
994 994 if not opts['pull'] and not opts['rev']:
995 995 copy = True
996 996
997 997 if copy:
998 998 try:
999 999 # we use a lock here because if we race with commit, we
1000 1000 # can end up with extra data in the cloned revlogs that's
1001 1001 # not pointed to by changesets, thus causing verify to
1002 1002 # fail
1003 1003 l1 = other.lock()
1004 1004 except lock.LockException:
1005 1005 copy = False
1006 1006
1007 1007 if copy:
1008 1008 # we lock here to avoid premature writing to the target
1009 1009 os.mkdir(os.path.join(dest, ".hg"))
1010 1010 l2 = lock.lock(os.path.join(dest, ".hg", "lock"))
1011 1011
1012 1012 files = "data 00manifest.d 00manifest.i 00changelog.d 00changelog.i"
1013 1013 for f in files.split():
1014 1014 src = os.path.join(source, ".hg", f)
1015 1015 dst = os.path.join(dest, ".hg", f)
1016 1016 try:
1017 1017 util.copyfiles(src, dst)
1018 1018 except OSError, inst:
1019 1019 if inst.errno != errno.ENOENT:
1020 1020 raise
1021 1021
1022 1022 repo = hg.repository(ui, dest)
1023 1023
1024 1024 else:
1025 1025 revs = None
1026 1026 if opts['rev']:
1027 1027 if not other.local():
1028 1028 error = _("clone -r not supported yet for remote repositories.")
1029 1029 raise util.Abort(error)
1030 1030 else:
1031 1031 revs = [other.lookup(rev) for rev in opts['rev']]
1032 1032 repo = hg.repository(ui, dest, create=1)
1033 1033 repo.pull(other, heads = revs)
1034 1034
1035 1035 f = repo.opener("hgrc", "w", text=True)
1036 1036 f.write("[paths]\n")
1037 1037 f.write("default = %s\n" % abspath)
1038 1038 f.close()
1039 1039
1040 1040 if not opts['noupdate']:
1041 1041 update(repo.ui, repo)
1042 1042
1043 1043 d.close()
1044 1044
1045 1045 def commit(ui, repo, *pats, **opts):
1046 1046 """commit the specified files or all outstanding changes
1047 1047
1048 1048 Commit changes to the given files into the repository.
1049 1049
1050 1050 If a list of files is omitted, all changes reported by "hg status"
1051 1051 will be committed.
1052 1052
1053 1053 If no commit message is specified, the editor configured in your hgrc
1054 1054 or in the EDITOR environment variable is started to enter a message.
1055 1055 """
1056 1056 message = opts['message']
1057 1057 logfile = opts['logfile']
1058 1058
1059 1059 if message and logfile:
1060 1060 raise util.Abort(_('options --message and --logfile are mutually '
1061 1061 'exclusive'))
1062 1062 if not message and logfile:
1063 1063 try:
1064 1064 if logfile == '-':
1065 1065 message = sys.stdin.read()
1066 1066 else:
1067 1067 message = open(logfile).read()
1068 1068 except IOError, inst:
1069 1069 raise util.Abort(_("can't read commit message '%s': %s") %
1070 1070 (logfile, inst.strerror))
1071 1071
1072 1072 if opts['addremove']:
1073 1073 addremove(ui, repo, *pats, **opts)
1074 1074 fns, match, anypats = matchpats(repo, pats, opts)
1075 1075 if pats:
1076 1076 modified, added, removed, deleted, unknown = (
1077 1077 repo.changes(files=fns, match=match))
1078 1078 files = modified + added + removed
1079 1079 else:
1080 1080 files = []
1081 1081 try:
1082 1082 repo.commit(files, message, opts['user'], opts['date'], match)
1083 1083 except ValueError, inst:
1084 1084 raise util.Abort(str(inst))
1085 1085
1086 1086 def docopy(ui, repo, pats, opts, wlock):
1087 1087 # called with the repo lock held
1088 1088 cwd = repo.getcwd()
1089 1089 errors = 0
1090 1090 copied = []
1091 1091 targets = {}
1092 1092
1093 1093 def okaytocopy(abs, rel, exact):
1094 1094 reasons = {'?': _('is not managed'),
1095 1095 'a': _('has been marked for add'),
1096 1096 'r': _('has been marked for remove')}
1097 1097 state = repo.dirstate.state(abs)
1098 1098 reason = reasons.get(state)
1099 1099 if reason:
1100 1100 if state == 'a':
1101 1101 origsrc = repo.dirstate.copied(abs)
1102 1102 if origsrc is not None:
1103 1103 return origsrc
1104 1104 if exact:
1105 1105 ui.warn(_('%s: not copying - file %s\n') % (rel, reason))
1106 1106 else:
1107 1107 return abs
1108 1108
1109 1109 def copy(origsrc, abssrc, relsrc, target, exact):
1110 1110 abstarget = util.canonpath(repo.root, cwd, target)
1111 1111 reltarget = util.pathto(cwd, abstarget)
1112 1112 prevsrc = targets.get(abstarget)
1113 1113 if prevsrc is not None:
1114 1114 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1115 1115 (reltarget, abssrc, prevsrc))
1116 1116 return
1117 1117 if (not opts['after'] and os.path.exists(reltarget) or
1118 1118 opts['after'] and repo.dirstate.state(abstarget) not in '?r'):
1119 1119 if not opts['force']:
1120 1120 ui.warn(_('%s: not overwriting - file exists\n') %
1121 1121 reltarget)
1122 1122 return
1123 1123 if not opts['after']:
1124 1124 os.unlink(reltarget)
1125 1125 if opts['after']:
1126 1126 if not os.path.exists(reltarget):
1127 1127 return
1128 1128 else:
1129 1129 targetdir = os.path.dirname(reltarget) or '.'
1130 1130 if not os.path.isdir(targetdir):
1131 1131 os.makedirs(targetdir)
1132 1132 try:
1133 1133 restore = repo.dirstate.state(abstarget) == 'r'
1134 1134 if restore:
1135 1135 repo.undelete([abstarget], wlock)
1136 1136 try:
1137 1137 shutil.copyfile(relsrc, reltarget)
1138 1138 shutil.copymode(relsrc, reltarget)
1139 1139 restore = False
1140 1140 finally:
1141 1141 if restore:
1142 1142 repo.remove([abstarget], wlock)
1143 1143 except shutil.Error, inst:
1144 1144 raise util.Abort(str(inst))
1145 1145 except IOError, inst:
1146 1146 if inst.errno == errno.ENOENT:
1147 1147 ui.warn(_('%s: deleted in working copy\n') % relsrc)
1148 1148 else:
1149 1149 ui.warn(_('%s: cannot copy - %s\n') %
1150 1150 (relsrc, inst.strerror))
1151 1151 errors += 1
1152 1152 return
1153 1153 if ui.verbose or not exact:
1154 1154 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1155 1155 targets[abstarget] = abssrc
1156 1156 if abstarget != origsrc:
1157 1157 repo.copy(origsrc, abstarget, wlock)
1158 1158 copied.append((abssrc, relsrc, exact))
1159 1159
1160 1160 def targetpathfn(pat, dest, srcs):
1161 1161 if os.path.isdir(pat):
1162 1162 abspfx = util.canonpath(repo.root, cwd, pat)
1163 1163 if destdirexists:
1164 1164 striplen = len(os.path.split(abspfx)[0])
1165 1165 else:
1166 1166 striplen = len(abspfx)
1167 1167 if striplen:
1168 1168 striplen += len(os.sep)
1169 1169 res = lambda p: os.path.join(dest, p[striplen:])
1170 1170 elif destdirexists:
1171 1171 res = lambda p: os.path.join(dest, os.path.basename(p))
1172 1172 else:
1173 1173 res = lambda p: dest
1174 1174 return res
1175 1175
1176 1176 def targetpathafterfn(pat, dest, srcs):
1177 1177 if util.patkind(pat, None)[0]:
1178 1178 # a mercurial pattern
1179 1179 res = lambda p: os.path.join(dest, os.path.basename(p))
1180 1180 else:
1181 1181 abspfx = util.canonpath(repo.root, cwd, pat)
1182 1182 if len(abspfx) < len(srcs[0][0]):
1183 1183 # A directory. Either the target path contains the last
1184 1184 # component of the source path or it does not.
1185 1185 def evalpath(striplen):
1186 1186 score = 0
1187 1187 for s in srcs:
1188 1188 t = os.path.join(dest, s[0][striplen:])
1189 1189 if os.path.exists(t):
1190 1190 score += 1
1191 1191 return score
1192 1192
1193 1193 striplen = len(abspfx)
1194 1194 if striplen:
1195 1195 striplen += len(os.sep)
1196 1196 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1197 1197 score = evalpath(striplen)
1198 1198 striplen1 = len(os.path.split(abspfx)[0])
1199 1199 if striplen1:
1200 1200 striplen1 += len(os.sep)
1201 1201 if evalpath(striplen1) > score:
1202 1202 striplen = striplen1
1203 1203 res = lambda p: os.path.join(dest, p[striplen:])
1204 1204 else:
1205 1205 # a file
1206 1206 if destdirexists:
1207 1207 res = lambda p: os.path.join(dest, os.path.basename(p))
1208 1208 else:
1209 1209 res = lambda p: dest
1210 1210 return res
1211 1211
1212 1212
1213 1213 pats = list(pats)
1214 1214 if not pats:
1215 1215 raise util.Abort(_('no source or destination specified'))
1216 1216 if len(pats) == 1:
1217 1217 raise util.Abort(_('no destination specified'))
1218 1218 dest = pats.pop()
1219 1219 destdirexists = os.path.isdir(dest)
1220 1220 if (len(pats) > 1 or util.patkind(pats[0], None)[0]) and not destdirexists:
1221 1221 raise util.Abort(_('with multiple sources, destination must be an '
1222 1222 'existing directory'))
1223 1223 if opts['after']:
1224 1224 tfn = targetpathafterfn
1225 1225 else:
1226 1226 tfn = targetpathfn
1227 1227 copylist = []
1228 1228 for pat in pats:
1229 1229 srcs = []
1230 1230 for tag, abssrc, relsrc, exact in walk(repo, [pat], opts):
1231 1231 origsrc = okaytocopy(abssrc, relsrc, exact)
1232 1232 if origsrc:
1233 1233 srcs.append((origsrc, abssrc, relsrc, exact))
1234 1234 if not srcs:
1235 1235 continue
1236 1236 copylist.append((tfn(pat, dest, srcs), srcs))
1237 1237 if not copylist:
1238 1238 raise util.Abort(_('no files to copy'))
1239 1239
1240 1240 for targetpath, srcs in copylist:
1241 1241 for origsrc, abssrc, relsrc, exact in srcs:
1242 1242 copy(origsrc, abssrc, relsrc, targetpath(abssrc), exact)
1243 1243
1244 1244 if errors:
1245 1245 ui.warn(_('(consider using --after)\n'))
1246 1246 return errors, copied
1247 1247
1248 1248 def copy(ui, repo, *pats, **opts):
1249 1249 """mark files as copied for the next commit
1250 1250
1251 1251 Mark dest as having copies of source files. If dest is a
1252 1252 directory, copies are put in that directory. If dest is a file,
1253 1253 there can only be one source.
1254 1254
1255 1255 By default, this command copies the contents of files as they
1256 1256 stand in the working directory. If invoked with --after, the
1257 1257 operation is recorded, but no copying is performed.
1258 1258
1259 1259 This command takes effect in the next commit.
1260 1260
1261 1261 NOTE: This command should be treated as experimental. While it
1262 1262 should properly record copied files, this information is not yet
1263 1263 fully used by merge, nor fully reported by log.
1264 1264 """
1265 1265 wlock = repo.wlock(0)
1266 1266 errs, copied = docopy(ui, repo, pats, opts, wlock)
1267 1267 return errs
1268 1268
1269 1269 def debugancestor(ui, index, rev1, rev2):
1270 1270 """find the ancestor revision of two revisions in a given index"""
1271 r = revlog.revlog(util.opener(os.getcwd(), audit=False), index, "")
1271 r = revlog.revlog(util.opener(os.getcwd(), audit=False), index, "", 0)
1272 1272 a = r.ancestor(r.lookup(rev1), r.lookup(rev2))
1273 1273 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
1274 1274
1275 1275 def debugcomplete(ui, cmd='', **opts):
1276 1276 """returns the completion list associated with the given command"""
1277 1277
1278 1278 if opts['options']:
1279 1279 options = []
1280 1280 otables = [globalopts]
1281 1281 if cmd:
1282 1282 aliases, entry = find(cmd)
1283 1283 otables.append(entry[1])
1284 1284 for t in otables:
1285 1285 for o in t:
1286 1286 if o[0]:
1287 1287 options.append('-%s' % o[0])
1288 1288 options.append('--%s' % o[1])
1289 1289 ui.write("%s\n" % "\n".join(options))
1290 1290 return
1291 1291
1292 1292 clist = findpossible(cmd).keys()
1293 1293 clist.sort()
1294 1294 ui.write("%s\n" % "\n".join(clist))
1295 1295
1296 1296 def debugrebuildstate(ui, repo, rev=None):
1297 1297 """rebuild the dirstate as it would look like for the given revision"""
1298 1298 if not rev:
1299 1299 rev = repo.changelog.tip()
1300 1300 else:
1301 1301 rev = repo.lookup(rev)
1302 1302 change = repo.changelog.read(rev)
1303 1303 n = change[0]
1304 1304 files = repo.manifest.readflags(n)
1305 1305 wlock = repo.wlock()
1306 1306 repo.dirstate.rebuild(rev, files.iteritems())
1307 1307
1308 1308 def debugcheckstate(ui, repo):
1309 1309 """validate the correctness of the current dirstate"""
1310 1310 parent1, parent2 = repo.dirstate.parents()
1311 1311 repo.dirstate.read()
1312 1312 dc = repo.dirstate.map
1313 1313 keys = dc.keys()
1314 1314 keys.sort()
1315 1315 m1n = repo.changelog.read(parent1)[0]
1316 1316 m2n = repo.changelog.read(parent2)[0]
1317 1317 m1 = repo.manifest.read(m1n)
1318 1318 m2 = repo.manifest.read(m2n)
1319 1319 errors = 0
1320 1320 for f in dc:
1321 1321 state = repo.dirstate.state(f)
1322 1322 if state in "nr" and f not in m1:
1323 1323 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
1324 1324 errors += 1
1325 1325 if state in "a" and f in m1:
1326 1326 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
1327 1327 errors += 1
1328 1328 if state in "m" and f not in m1 and f not in m2:
1329 1329 ui.warn(_("%s in state %s, but not in either manifest\n") %
1330 1330 (f, state))
1331 1331 errors += 1
1332 1332 for f in m1:
1333 1333 state = repo.dirstate.state(f)
1334 1334 if state not in "nrm":
1335 1335 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
1336 1336 errors += 1
1337 1337 if errors:
1338 1338 error = _(".hg/dirstate inconsistent with current parent's manifest")
1339 1339 raise util.Abort(error)
1340 1340
1341 1341 def debugconfig(ui, repo):
1342 1342 """show combined config settings from all hgrc files"""
1343 1343 for section, name, value in ui.walkconfig():
1344 1344 ui.write('%s.%s=%s\n' % (section, name, value))
1345 1345
1346 1346 def debugsetparents(ui, repo, rev1, rev2=None):
1347 1347 """manually set the parents of the current working directory
1348 1348
1349 1349 This is useful for writing repository conversion tools, but should
1350 1350 be used with care.
1351 1351 """
1352 1352
1353 1353 if not rev2:
1354 1354 rev2 = hex(nullid)
1355 1355
1356 1356 repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
1357 1357
1358 1358 def debugstate(ui, repo):
1359 1359 """show the contents of the current dirstate"""
1360 1360 repo.dirstate.read()
1361 1361 dc = repo.dirstate.map
1362 1362 keys = dc.keys()
1363 1363 keys.sort()
1364 1364 for file_ in keys:
1365 1365 ui.write("%c %3o %10d %s %s\n"
1366 1366 % (dc[file_][0], dc[file_][1] & 0777, dc[file_][2],
1367 1367 time.strftime("%x %X",
1368 1368 time.localtime(dc[file_][3])), file_))
1369 1369 for f in repo.dirstate.copies:
1370 1370 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copies[f], f))
1371 1371
1372 1372 def debugdata(ui, file_, rev):
1373 1373 """dump the contents of an data file revision"""
1374 1374 r = revlog.revlog(util.opener(os.getcwd(), audit=False),
1375 file_[:-2] + ".i", file_)
1375 file_[:-2] + ".i", file_, 0)
1376 1376 try:
1377 1377 ui.write(r.revision(r.lookup(rev)))
1378 1378 except KeyError:
1379 1379 raise util.Abort(_('invalid revision identifier %s'), rev)
1380 1380
1381 1381 def debugindex(ui, file_):
1382 1382 """dump the contents of an index file"""
1383 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_, "")
1383 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_, "", 0)
1384 1384 ui.write(" rev offset length base linkrev" +
1385 1385 " nodeid p1 p2\n")
1386 1386 for i in range(r.count()):
1387 e = r.index[i]
1387 node = r.node(i)
1388 pp = r.parents(node)
1388 1389 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
1389 i, e[0], e[1], e[2], e[3],
1390 short(e[6]), short(e[4]), short(e[5])))
1390 i, r.start(i), r.length(i), r.base(i), r.linkrev(node),
1391 short(node), short(pp[0]), short(pp[1])))
1391 1392
1392 1393 def debugindexdot(ui, file_):
1393 1394 """dump an index DAG as a .dot file"""
1394 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_, "")
1395 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_, "", 0)
1395 1396 ui.write("digraph G {\n")
1396 1397 for i in range(r.count()):
1397 1398 e = r.index[i]
1398 1399 ui.write("\t%d -> %d\n" % (r.rev(e[4]), i))
1399 1400 if e[5] != nullid:
1400 1401 ui.write("\t%d -> %d\n" % (r.rev(e[5]), i))
1401 1402 ui.write("}\n")
1402 1403
1403 1404 def debugrename(ui, repo, file, rev=None):
1404 1405 """dump rename information"""
1405 1406 r = repo.file(relpath(repo, [file])[0])
1406 1407 if rev:
1407 1408 try:
1408 1409 # assume all revision numbers are for changesets
1409 1410 n = repo.lookup(rev)
1410 1411 change = repo.changelog.read(n)
1411 1412 m = repo.manifest.read(change[0])
1412 1413 n = m[relpath(repo, [file])[0]]
1413 1414 except (hg.RepoError, KeyError):
1414 1415 n = r.lookup(rev)
1415 1416 else:
1416 1417 n = r.tip()
1417 1418 m = r.renamed(n)
1418 1419 if m:
1419 1420 ui.write(_("renamed from %s:%s\n") % (m[0], hex(m[1])))
1420 1421 else:
1421 1422 ui.write(_("not renamed\n"))
1422 1423
1423 1424 def debugwalk(ui, repo, *pats, **opts):
1424 1425 """show how files match on given patterns"""
1425 1426 items = list(walk(repo, pats, opts))
1426 1427 if not items:
1427 1428 return
1428 1429 fmt = '%%s %%-%ds %%-%ds %%s' % (
1429 1430 max([len(abs) for (src, abs, rel, exact) in items]),
1430 1431 max([len(rel) for (src, abs, rel, exact) in items]))
1431 1432 for src, abs, rel, exact in items:
1432 1433 line = fmt % (src, abs, rel, exact and 'exact' or '')
1433 1434 ui.write("%s\n" % line.rstrip())
1434 1435
1435 1436 def diff(ui, repo, *pats, **opts):
1436 1437 """diff repository (or selected files)
1437 1438
1438 1439 Show differences between revisions for the specified files.
1439 1440
1440 1441 Differences between files are shown using the unified diff format.
1441 1442
1442 1443 When two revision arguments are given, then changes are shown
1443 1444 between those revisions. If only one revision is specified then
1444 1445 that revision is compared to the working directory, and, when no
1445 1446 revisions are specified, the working directory files are compared
1446 1447 to its parent.
1447 1448
1448 1449 Without the -a option, diff will avoid generating diffs of files
1449 1450 it detects as binary. With -a, diff will generate a diff anyway,
1450 1451 probably with undesirable results.
1451 1452 """
1452 1453 node1, node2 = None, None
1453 1454 revs = [repo.lookup(x) for x in opts['rev']]
1454 1455
1455 1456 if len(revs) > 0:
1456 1457 node1 = revs[0]
1457 1458 if len(revs) > 1:
1458 1459 node2 = revs[1]
1459 1460 if len(revs) > 2:
1460 1461 raise util.Abort(_("too many revisions to diff"))
1461 1462
1462 1463 fns, matchfn, anypats = matchpats(repo, pats, opts)
1463 1464
1464 1465 dodiff(sys.stdout, ui, repo, node1, node2, fns, match=matchfn,
1465 1466 text=opts['text'], opts=opts)
1466 1467
1467 1468 def doexport(ui, repo, changeset, seqno, total, revwidth, opts):
1468 1469 node = repo.lookup(changeset)
1469 1470 parents = [p for p in repo.changelog.parents(node) if p != nullid]
1470 1471 if opts['switch_parent']:
1471 1472 parents.reverse()
1472 1473 prev = (parents and parents[0]) or nullid
1473 1474 change = repo.changelog.read(node)
1474 1475
1475 1476 fp = make_file(repo, repo.changelog, opts['output'],
1476 1477 node=node, total=total, seqno=seqno,
1477 1478 revwidth=revwidth)
1478 1479 if fp != sys.stdout:
1479 1480 ui.note("%s\n" % fp.name)
1480 1481
1481 1482 fp.write("# HG changeset patch\n")
1482 1483 fp.write("# User %s\n" % change[1])
1483 1484 fp.write("# Node ID %s\n" % hex(node))
1484 1485 fp.write("# Parent %s\n" % hex(prev))
1485 1486 if len(parents) > 1:
1486 1487 fp.write("# Parent %s\n" % hex(parents[1]))
1487 1488 fp.write(change[4].rstrip())
1488 1489 fp.write("\n\n")
1489 1490
1490 1491 dodiff(fp, ui, repo, prev, node, text=opts['text'])
1491 1492 if fp != sys.stdout:
1492 1493 fp.close()
1493 1494
1494 1495 def export(ui, repo, *changesets, **opts):
1495 1496 """dump the header and diffs for one or more changesets
1496 1497
1497 1498 Print the changeset header and diffs for one or more revisions.
1498 1499
1499 1500 The information shown in the changeset header is: author,
1500 1501 changeset hash, parent and commit comment.
1501 1502
1502 1503 Output may be to a file, in which case the name of the file is
1503 1504 given using a format string. The formatting rules are as follows:
1504 1505
1505 1506 %% literal "%" character
1506 1507 %H changeset hash (40 bytes of hexadecimal)
1507 1508 %N number of patches being generated
1508 1509 %R changeset revision number
1509 1510 %b basename of the exporting repository
1510 1511 %h short-form changeset hash (12 bytes of hexadecimal)
1511 1512 %n zero-padded sequence number, starting at 1
1512 1513 %r zero-padded changeset revision number
1513 1514
1514 1515 Without the -a option, export will avoid generating diffs of files
1515 1516 it detects as binary. With -a, export will generate a diff anyway,
1516 1517 probably with undesirable results.
1517 1518
1518 1519 With the --switch-parent option, the diff will be against the second
1519 1520 parent. It can be useful to review a merge.
1520 1521 """
1521 1522 if not changesets:
1522 1523 raise util.Abort(_("export requires at least one changeset"))
1523 1524 seqno = 0
1524 1525 revs = list(revrange(ui, repo, changesets))
1525 1526 total = len(revs)
1526 1527 revwidth = max(map(len, revs))
1527 1528 msg = len(revs) > 1 and _("Exporting patches:\n") or _("Exporting patch:\n")
1528 1529 ui.note(msg)
1529 1530 for cset in revs:
1530 1531 seqno += 1
1531 1532 doexport(ui, repo, cset, seqno, total, revwidth, opts)
1532 1533
1533 1534 def forget(ui, repo, *pats, **opts):
1534 1535 """don't add the specified files on the next commit
1535 1536
1536 1537 Undo an 'hg add' scheduled for the next commit.
1537 1538 """
1538 1539 forget = []
1539 1540 for src, abs, rel, exact in walk(repo, pats, opts):
1540 1541 if repo.dirstate.state(abs) == 'a':
1541 1542 forget.append(abs)
1542 1543 if ui.verbose or not exact:
1543 1544 ui.status(_('forgetting %s\n') % ((pats and rel) or abs))
1544 1545 repo.forget(forget)
1545 1546
1546 1547 def grep(ui, repo, pattern, *pats, **opts):
1547 1548 """search for a pattern in specified files and revisions
1548 1549
1549 1550 Search revisions of files for a regular expression.
1550 1551
1551 1552 This command behaves differently than Unix grep. It only accepts
1552 1553 Python/Perl regexps. It searches repository history, not the
1553 1554 working directory. It always prints the revision number in which
1554 1555 a match appears.
1555 1556
1556 1557 By default, grep only prints output for the first revision of a
1557 1558 file in which it finds a match. To get it to print every revision
1558 1559 that contains a change in match status ("-" for a match that
1559 1560 becomes a non-match, or "+" for a non-match that becomes a match),
1560 1561 use the --all flag.
1561 1562 """
1562 1563 reflags = 0
1563 1564 if opts['ignore_case']:
1564 1565 reflags |= re.I
1565 1566 regexp = re.compile(pattern, reflags)
1566 1567 sep, eol = ':', '\n'
1567 1568 if opts['print0']:
1568 1569 sep = eol = '\0'
1569 1570
1570 1571 fcache = {}
1571 1572 def getfile(fn):
1572 1573 if fn not in fcache:
1573 1574 fcache[fn] = repo.file(fn)
1574 1575 return fcache[fn]
1575 1576
1576 1577 def matchlines(body):
1577 1578 begin = 0
1578 1579 linenum = 0
1579 1580 while True:
1580 1581 match = regexp.search(body, begin)
1581 1582 if not match:
1582 1583 break
1583 1584 mstart, mend = match.span()
1584 1585 linenum += body.count('\n', begin, mstart) + 1
1585 1586 lstart = body.rfind('\n', begin, mstart) + 1 or begin
1586 1587 lend = body.find('\n', mend)
1587 1588 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
1588 1589 begin = lend + 1
1589 1590
1590 1591 class linestate(object):
1591 1592 def __init__(self, line, linenum, colstart, colend):
1592 1593 self.line = line
1593 1594 self.linenum = linenum
1594 1595 self.colstart = colstart
1595 1596 self.colend = colend
1596 1597 def __eq__(self, other):
1597 1598 return self.line == other.line
1598 1599 def __hash__(self):
1599 1600 return hash(self.line)
1600 1601
1601 1602 matches = {}
1602 1603 def grepbody(fn, rev, body):
1603 1604 matches[rev].setdefault(fn, {})
1604 1605 m = matches[rev][fn]
1605 1606 for lnum, cstart, cend, line in matchlines(body):
1606 1607 s = linestate(line, lnum, cstart, cend)
1607 1608 m[s] = s
1608 1609
1609 1610 # FIXME: prev isn't used, why ?
1610 1611 prev = {}
1611 1612 ucache = {}
1612 1613 def display(fn, rev, states, prevstates):
1613 1614 diff = list(sets.Set(states).symmetric_difference(sets.Set(prevstates)))
1614 1615 diff.sort(lambda x, y: cmp(x.linenum, y.linenum))
1615 1616 counts = {'-': 0, '+': 0}
1616 1617 filerevmatches = {}
1617 1618 for l in diff:
1618 1619 if incrementing or not opts['all']:
1619 1620 change = ((l in prevstates) and '-') or '+'
1620 1621 r = rev
1621 1622 else:
1622 1623 change = ((l in states) and '-') or '+'
1623 1624 r = prev[fn]
1624 1625 cols = [fn, str(rev)]
1625 1626 if opts['line_number']:
1626 1627 cols.append(str(l.linenum))
1627 1628 if opts['all']:
1628 1629 cols.append(change)
1629 1630 if opts['user']:
1630 1631 cols.append(trimuser(ui, getchange(rev)[1], rev,
1631 1632 ucache))
1632 1633 if opts['files_with_matches']:
1633 1634 c = (fn, rev)
1634 1635 if c in filerevmatches:
1635 1636 continue
1636 1637 filerevmatches[c] = 1
1637 1638 else:
1638 1639 cols.append(l.line)
1639 1640 ui.write(sep.join(cols), eol)
1640 1641 counts[change] += 1
1641 1642 return counts['+'], counts['-']
1642 1643
1643 1644 fstate = {}
1644 1645 skip = {}
1645 1646 changeiter, getchange, matchfn = walkchangerevs(ui, repo, pats, opts)
1646 1647 count = 0
1647 1648 incrementing = False
1648 1649 for st, rev, fns in changeiter:
1649 1650 if st == 'window':
1650 1651 incrementing = rev
1651 1652 matches.clear()
1652 1653 elif st == 'add':
1653 1654 change = repo.changelog.read(repo.lookup(str(rev)))
1654 1655 mf = repo.manifest.read(change[0])
1655 1656 matches[rev] = {}
1656 1657 for fn in fns:
1657 1658 if fn in skip:
1658 1659 continue
1659 1660 fstate.setdefault(fn, {})
1660 1661 try:
1661 1662 grepbody(fn, rev, getfile(fn).read(mf[fn]))
1662 1663 except KeyError:
1663 1664 pass
1664 1665 elif st == 'iter':
1665 1666 states = matches[rev].items()
1666 1667 states.sort()
1667 1668 for fn, m in states:
1668 1669 if fn in skip:
1669 1670 continue
1670 1671 if incrementing or not opts['all'] or fstate[fn]:
1671 1672 pos, neg = display(fn, rev, m, fstate[fn])
1672 1673 count += pos + neg
1673 1674 if pos and not opts['all']:
1674 1675 skip[fn] = True
1675 1676 fstate[fn] = m
1676 1677 prev[fn] = rev
1677 1678
1678 1679 if not incrementing:
1679 1680 fstate = fstate.items()
1680 1681 fstate.sort()
1681 1682 for fn, state in fstate:
1682 1683 if fn in skip:
1683 1684 continue
1684 1685 display(fn, rev, {}, state)
1685 1686 return (count == 0 and 1) or 0
1686 1687
1687 1688 def heads(ui, repo, **opts):
1688 1689 """show current repository heads
1689 1690
1690 1691 Show all repository head changesets.
1691 1692
1692 1693 Repository "heads" are changesets that don't have children
1693 1694 changesets. They are where development generally takes place and
1694 1695 are the usual targets for update and merge operations.
1695 1696 """
1696 1697 if opts['rev']:
1697 1698 heads = repo.heads(repo.lookup(opts['rev']))
1698 1699 else:
1699 1700 heads = repo.heads()
1700 1701 br = None
1701 1702 if opts['branches']:
1702 1703 br = repo.branchlookup(heads)
1703 1704 displayer = show_changeset(ui, repo, opts)
1704 1705 for n in heads:
1705 1706 displayer.show(changenode=n, brinfo=br)
1706 1707
1707 1708 def identify(ui, repo):
1708 1709 """print information about the working copy
1709 1710
1710 1711 Print a short summary of the current state of the repo.
1711 1712
1712 1713 This summary identifies the repository state using one or two parent
1713 1714 hash identifiers, followed by a "+" if there are uncommitted changes
1714 1715 in the working directory, followed by a list of tags for this revision.
1715 1716 """
1716 1717 parents = [p for p in repo.dirstate.parents() if p != nullid]
1717 1718 if not parents:
1718 1719 ui.write(_("unknown\n"))
1719 1720 return
1720 1721
1721 1722 hexfunc = ui.verbose and hex or short
1722 1723 modified, added, removed, deleted, unknown = repo.changes()
1723 1724 output = ["%s%s" %
1724 1725 ('+'.join([hexfunc(parent) for parent in parents]),
1725 1726 (modified or added or removed or deleted) and "+" or "")]
1726 1727
1727 1728 if not ui.quiet:
1728 1729 # multiple tags for a single parent separated by '/'
1729 1730 parenttags = ['/'.join(tags)
1730 1731 for tags in map(repo.nodetags, parents) if tags]
1731 1732 # tags for multiple parents separated by ' + '
1732 1733 if parenttags:
1733 1734 output.append(' + '.join(parenttags))
1734 1735
1735 1736 ui.write("%s\n" % ' '.join(output))
1736 1737
1737 1738 def import_(ui, repo, patch1, *patches, **opts):
1738 1739 """import an ordered set of patches
1739 1740
1740 1741 Import a list of patches and commit them individually.
1741 1742
1742 1743 If there are outstanding changes in the working directory, import
1743 1744 will abort unless given the -f flag.
1744 1745
1745 1746 If a patch looks like a mail message (its first line starts with
1746 1747 "From " or looks like an RFC822 header), it will not be applied
1747 1748 unless the -f option is used. The importer neither parses nor
1748 1749 discards mail headers, so use -f only to override the "mailness"
1749 1750 safety check, not to import a real mail message.
1750 1751 """
1751 1752 patches = (patch1,) + patches
1752 1753
1753 1754 if not opts['force']:
1754 1755 modified, added, removed, deleted, unknown = repo.changes()
1755 1756 if modified or added or removed or deleted:
1756 1757 raise util.Abort(_("outstanding uncommitted changes"))
1757 1758
1758 1759 d = opts["base"]
1759 1760 strip = opts["strip"]
1760 1761
1761 1762 mailre = re.compile(r'(?:From |[\w-]+:)')
1762 1763
1763 1764 # attempt to detect the start of a patch
1764 1765 # (this heuristic is borrowed from quilt)
1765 1766 diffre = re.compile(r'(?:Index:[ \t]|diff[ \t]|RCS file: |' +
1766 1767 'retrieving revision [0-9]+(\.[0-9]+)*$|' +
1767 1768 '(---|\*\*\*)[ \t])')
1768 1769
1769 1770 for patch in patches:
1770 1771 ui.status(_("applying %s\n") % patch)
1771 1772 pf = os.path.join(d, patch)
1772 1773
1773 1774 message = []
1774 1775 user = None
1775 1776 hgpatch = False
1776 1777 for line in file(pf):
1777 1778 line = line.rstrip()
1778 1779 if (not message and not hgpatch and
1779 1780 mailre.match(line) and not opts['force']):
1780 1781 if len(line) > 35:
1781 1782 line = line[:32] + '...'
1782 1783 raise util.Abort(_('first line looks like a '
1783 1784 'mail header: ') + line)
1784 1785 if diffre.match(line):
1785 1786 break
1786 1787 elif hgpatch:
1787 1788 # parse values when importing the result of an hg export
1788 1789 if line.startswith("# User "):
1789 1790 user = line[7:]
1790 1791 ui.debug(_('User: %s\n') % user)
1791 1792 elif not line.startswith("# ") and line:
1792 1793 message.append(line)
1793 1794 hgpatch = False
1794 1795 elif line == '# HG changeset patch':
1795 1796 hgpatch = True
1796 1797 message = [] # We may have collected garbage
1797 1798 else:
1798 1799 message.append(line)
1799 1800
1800 1801 # make sure message isn't empty
1801 1802 if not message:
1802 1803 message = _("imported patch %s\n") % patch
1803 1804 else:
1804 1805 message = "%s\n" % '\n'.join(message)
1805 1806 ui.debug(_('message:\n%s\n') % message)
1806 1807
1807 1808 files = util.patch(strip, pf, ui)
1808 1809
1809 1810 if len(files) > 0:
1810 1811 addremove(ui, repo, *files)
1811 1812 repo.commit(files, message, user)
1812 1813
1813 1814 def incoming(ui, repo, source="default", **opts):
1814 1815 """show new changesets found in source
1815 1816
1816 1817 Show new changesets found in the specified path/URL or the default
1817 1818 pull location. These are the changesets that would be pulled if a pull
1818 1819 was requested.
1819 1820
1820 1821 For remote repository, using --bundle avoids downloading the changesets
1821 1822 twice if the incoming is followed by a pull.
1822 1823
1823 1824 See pull for valid source format details.
1824 1825 """
1825 1826 source = ui.expandpath(source)
1826 1827 if opts['ssh']:
1827 1828 ui.setconfig("ui", "ssh", opts['ssh'])
1828 1829 if opts['remotecmd']:
1829 1830 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
1830 1831
1831 1832 other = hg.repository(ui, source)
1832 1833 incoming = repo.findincoming(other, force=opts["force"])
1833 1834 if not incoming:
1834 1835 ui.status(_("no changes found\n"))
1835 1836 return
1836 1837
1837 1838 cleanup = None
1838 1839 try:
1839 1840 fname = opts["bundle"]
1840 1841 if fname or not other.local():
1841 1842 # create a bundle (uncompressed if other repo is not local)
1842 1843 cg = other.changegroup(incoming, "incoming")
1843 1844 fname = cleanup = write_bundle(cg, fname, compress=other.local())
1844 1845 # keep written bundle?
1845 1846 if opts["bundle"]:
1846 1847 cleanup = None
1847 1848 if not other.local():
1848 1849 # use the created uncompressed bundlerepo
1849 1850 other = bundlerepo.bundlerepository(ui, repo.root, fname)
1850 1851
1851 1852 o = other.changelog.nodesbetween(incoming)[0]
1852 1853 if opts['newest_first']:
1853 1854 o.reverse()
1854 1855 displayer = show_changeset(ui, other, opts)
1855 1856 for n in o:
1856 1857 parents = [p for p in other.changelog.parents(n) if p != nullid]
1857 1858 if opts['no_merges'] and len(parents) == 2:
1858 1859 continue
1859 1860 displayer.show(changenode=n)
1860 1861 if opts['patch']:
1861 1862 prev = (parents and parents[0]) or nullid
1862 1863 dodiff(ui, ui, other, prev, n)
1863 1864 ui.write("\n")
1864 1865 finally:
1865 1866 if hasattr(other, 'close'):
1866 1867 other.close()
1867 1868 if cleanup:
1868 1869 os.unlink(cleanup)
1869 1870
1870 1871 def init(ui, dest="."):
1871 1872 """create a new repository in the given directory
1872 1873
1873 1874 Initialize a new repository in the given directory. If the given
1874 1875 directory does not exist, it is created.
1875 1876
1876 1877 If no directory is given, the current directory is used.
1877 1878 """
1878 1879 if not os.path.exists(dest):
1879 1880 os.mkdir(dest)
1880 1881 hg.repository(ui, dest, create=1)
1881 1882
1882 1883 def locate(ui, repo, *pats, **opts):
1883 1884 """locate files matching specific patterns
1884 1885
1885 1886 Print all files under Mercurial control whose names match the
1886 1887 given patterns.
1887 1888
1888 1889 This command searches the current directory and its
1889 1890 subdirectories. To search an entire repository, move to the root
1890 1891 of the repository.
1891 1892
1892 1893 If no patterns are given to match, this command prints all file
1893 1894 names.
1894 1895
1895 1896 If you want to feed the output of this command into the "xargs"
1896 1897 command, use the "-0" option to both this command and "xargs".
1897 1898 This will avoid the problem of "xargs" treating single filenames
1898 1899 that contain white space as multiple filenames.
1899 1900 """
1900 1901 end = opts['print0'] and '\0' or '\n'
1901 1902 rev = opts['rev']
1902 1903 if rev:
1903 1904 node = repo.lookup(rev)
1904 1905 else:
1905 1906 node = None
1906 1907
1907 1908 for src, abs, rel, exact in walk(repo, pats, opts, node=node,
1908 1909 head='(?:.*/|)'):
1909 1910 if not node and repo.dirstate.state(abs) == '?':
1910 1911 continue
1911 1912 if opts['fullpath']:
1912 1913 ui.write(os.path.join(repo.root, abs), end)
1913 1914 else:
1914 1915 ui.write(((pats and rel) or abs), end)
1915 1916
1916 1917 def log(ui, repo, *pats, **opts):
1917 1918 """show revision history of entire repository or files
1918 1919
1919 1920 Print the revision history of the specified files or the entire project.
1920 1921
1921 1922 By default this command outputs: changeset id and hash, tags,
1922 1923 non-trivial parents, user, date and time, and a summary for each
1923 1924 commit. When the -v/--verbose switch is used, the list of changed
1924 1925 files and full commit message is shown.
1925 1926 """
1926 1927 class dui(object):
1927 1928 # Implement and delegate some ui protocol. Save hunks of
1928 1929 # output for later display in the desired order.
1929 1930 def __init__(self, ui):
1930 1931 self.ui = ui
1931 1932 self.hunk = {}
1932 1933 self.header = {}
1933 1934 def bump(self, rev):
1934 1935 self.rev = rev
1935 1936 self.hunk[rev] = []
1936 1937 self.header[rev] = []
1937 1938 def note(self, *args):
1938 1939 if self.verbose:
1939 1940 self.write(*args)
1940 1941 def status(self, *args):
1941 1942 if not self.quiet:
1942 1943 self.write(*args)
1943 1944 def write(self, *args):
1944 1945 self.hunk[self.rev].append(args)
1945 1946 def write_header(self, *args):
1946 1947 self.header[self.rev].append(args)
1947 1948 def debug(self, *args):
1948 1949 if self.debugflag:
1949 1950 self.write(*args)
1950 1951 def __getattr__(self, key):
1951 1952 return getattr(self.ui, key)
1952 1953
1953 1954 changeiter, getchange, matchfn = walkchangerevs(ui, repo, pats, opts)
1954 1955
1955 1956 if opts['limit']:
1956 1957 try:
1957 1958 limit = int(opts['limit'])
1958 1959 except ValueError:
1959 1960 raise util.Abort(_('limit must be a positive integer'))
1960 1961 if limit <= 0: raise util.Abort(_('limit must be positive'))
1961 1962 else:
1962 1963 limit = sys.maxint
1963 1964 count = 0
1964 1965
1965 1966 displayer = show_changeset(ui, repo, opts)
1966 1967 for st, rev, fns in changeiter:
1967 1968 if st == 'window':
1968 1969 du = dui(ui)
1969 1970 displayer.ui = du
1970 1971 elif st == 'add':
1971 1972 du.bump(rev)
1972 1973 changenode = repo.changelog.node(rev)
1973 1974 parents = [p for p in repo.changelog.parents(changenode)
1974 1975 if p != nullid]
1975 1976 if opts['no_merges'] and len(parents) == 2:
1976 1977 continue
1977 1978 if opts['only_merges'] and len(parents) != 2:
1978 1979 continue
1979 1980
1980 1981 if opts['keyword']:
1981 1982 changes = getchange(rev)
1982 1983 miss = 0
1983 1984 for k in [kw.lower() for kw in opts['keyword']]:
1984 1985 if not (k in changes[1].lower() or
1985 1986 k in changes[4].lower() or
1986 1987 k in " ".join(changes[3][:20]).lower()):
1987 1988 miss = 1
1988 1989 break
1989 1990 if miss:
1990 1991 continue
1991 1992
1992 1993 br = None
1993 1994 if opts['branches']:
1994 1995 br = repo.branchlookup([repo.changelog.node(rev)])
1995 1996
1996 1997 displayer.show(rev, brinfo=br)
1997 1998 if opts['patch']:
1998 1999 prev = (parents and parents[0]) or nullid
1999 2000 dodiff(du, du, repo, prev, changenode, match=matchfn)
2000 2001 du.write("\n\n")
2001 2002 elif st == 'iter':
2002 2003 if count == limit: break
2003 2004 if du.header[rev]:
2004 2005 for args in du.header[rev]:
2005 2006 ui.write_header(*args)
2006 2007 if du.hunk[rev]:
2007 2008 count += 1
2008 2009 for args in du.hunk[rev]:
2009 2010 ui.write(*args)
2010 2011
2011 2012 def manifest(ui, repo, rev=None):
2012 2013 """output the latest or given revision of the project manifest
2013 2014
2014 2015 Print a list of version controlled files for the given revision.
2015 2016
2016 2017 The manifest is the list of files being version controlled. If no revision
2017 2018 is given then the tip is used.
2018 2019 """
2019 2020 if rev:
2020 2021 try:
2021 2022 # assume all revision numbers are for changesets
2022 2023 n = repo.lookup(rev)
2023 2024 change = repo.changelog.read(n)
2024 2025 n = change[0]
2025 2026 except hg.RepoError:
2026 2027 n = repo.manifest.lookup(rev)
2027 2028 else:
2028 2029 n = repo.manifest.tip()
2029 2030 m = repo.manifest.read(n)
2030 2031 mf = repo.manifest.readflags(n)
2031 2032 files = m.keys()
2032 2033 files.sort()
2033 2034
2034 2035 for f in files:
2035 2036 ui.write("%40s %3s %s\n" % (hex(m[f]), mf[f] and "755" or "644", f))
2036 2037
2037 2038 def merge(ui, repo, node=None, **opts):
2038 2039 """Merge working directory with another revision
2039 2040
2040 2041 Merge the contents of the current working directory and the
2041 2042 requested revision. Files that changed between either parent are
2042 2043 marked as changed for the next commit and a commit must be
2043 2044 performed before any further updates are allowed.
2044 2045 """
2045 2046 return update(ui, repo, node=node, merge=True, **opts)
2046 2047
2047 2048 def outgoing(ui, repo, dest="default-push", **opts):
2048 2049 """show changesets not found in destination
2049 2050
2050 2051 Show changesets not found in the specified destination repository or
2051 2052 the default push location. These are the changesets that would be pushed
2052 2053 if a push was requested.
2053 2054
2054 2055 See pull for valid destination format details.
2055 2056 """
2056 2057 dest = ui.expandpath(dest)
2057 2058 if opts['ssh']:
2058 2059 ui.setconfig("ui", "ssh", opts['ssh'])
2059 2060 if opts['remotecmd']:
2060 2061 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
2061 2062
2062 2063 other = hg.repository(ui, dest)
2063 2064 o = repo.findoutgoing(other, force=opts['force'])
2064 2065 if not o:
2065 2066 ui.status(_("no changes found\n"))
2066 2067 return
2067 2068 o = repo.changelog.nodesbetween(o)[0]
2068 2069 if opts['newest_first']:
2069 2070 o.reverse()
2070 2071 displayer = show_changeset(ui, repo, opts)
2071 2072 for n in o:
2072 2073 parents = [p for p in repo.changelog.parents(n) if p != nullid]
2073 2074 if opts['no_merges'] and len(parents) == 2:
2074 2075 continue
2075 2076 displayer.show(changenode=n)
2076 2077 if opts['patch']:
2077 2078 prev = (parents and parents[0]) or nullid
2078 2079 dodiff(ui, ui, repo, prev, n)
2079 2080 ui.write("\n")
2080 2081
2081 2082 def parents(ui, repo, rev=None, branches=None, **opts):
2082 2083 """show the parents of the working dir or revision
2083 2084
2084 2085 Print the working directory's parent revisions.
2085 2086 """
2086 2087 if rev:
2087 2088 p = repo.changelog.parents(repo.lookup(rev))
2088 2089 else:
2089 2090 p = repo.dirstate.parents()
2090 2091
2091 2092 br = None
2092 2093 if branches is not None:
2093 2094 br = repo.branchlookup(p)
2094 2095 displayer = show_changeset(ui, repo, opts)
2095 2096 for n in p:
2096 2097 if n != nullid:
2097 2098 displayer.show(changenode=n, brinfo=br)
2098 2099
2099 2100 def paths(ui, repo, search=None):
2100 2101 """show definition of symbolic path names
2101 2102
2102 2103 Show definition of symbolic path name NAME. If no name is given, show
2103 2104 definition of available names.
2104 2105
2105 2106 Path names are defined in the [paths] section of /etc/mercurial/hgrc
2106 2107 and $HOME/.hgrc. If run inside a repository, .hg/hgrc is used, too.
2107 2108 """
2108 2109 if search:
2109 2110 for name, path in ui.configitems("paths"):
2110 2111 if name == search:
2111 2112 ui.write("%s\n" % path)
2112 2113 return
2113 2114 ui.warn(_("not found!\n"))
2114 2115 return 1
2115 2116 else:
2116 2117 for name, path in ui.configitems("paths"):
2117 2118 ui.write("%s = %s\n" % (name, path))
2118 2119
2119 2120 def postincoming(ui, repo, modheads, optupdate):
2120 2121 if modheads == 0:
2121 2122 return
2122 2123 if optupdate:
2123 2124 if modheads == 1:
2124 2125 return update(ui, repo)
2125 2126 else:
2126 2127 ui.status(_("not updating, since new heads added\n"))
2127 2128 if modheads > 1:
2128 2129 ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
2129 2130 else:
2130 2131 ui.status(_("(run 'hg update' to get a working copy)\n"))
2131 2132
2132 2133 def pull(ui, repo, source="default", **opts):
2133 2134 """pull changes from the specified source
2134 2135
2135 2136 Pull changes from a remote repository to a local one.
2136 2137
2137 2138 This finds all changes from the repository at the specified path
2138 2139 or URL and adds them to the local repository. By default, this
2139 2140 does not update the copy of the project in the working directory.
2140 2141
2141 2142 Valid URLs are of the form:
2142 2143
2143 2144 local/filesystem/path
2144 2145 http://[user@]host[:port][/path]
2145 2146 https://[user@]host[:port][/path]
2146 2147 ssh://[user@]host[:port][/path]
2147 2148
2148 2149 Some notes about using SSH with Mercurial:
2149 2150 - SSH requires an accessible shell account on the destination machine
2150 2151 and a copy of hg in the remote path or specified with as remotecmd.
2151 2152 - /path is relative to the remote user's home directory by default.
2152 2153 Use two slashes at the start of a path to specify an absolute path.
2153 2154 - Mercurial doesn't use its own compression via SSH; the right thing
2154 2155 to do is to configure it in your ~/.ssh/ssh_config, e.g.:
2155 2156 Host *.mylocalnetwork.example.com
2156 2157 Compression off
2157 2158 Host *
2158 2159 Compression on
2159 2160 Alternatively specify "ssh -C" as your ssh command in your hgrc or
2160 2161 with the --ssh command line option.
2161 2162 """
2162 2163 source = ui.expandpath(source)
2163 2164 ui.status(_('pulling from %s\n') % (source))
2164 2165
2165 2166 if opts['ssh']:
2166 2167 ui.setconfig("ui", "ssh", opts['ssh'])
2167 2168 if opts['remotecmd']:
2168 2169 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
2169 2170
2170 2171 other = hg.repository(ui, source)
2171 2172 revs = None
2172 2173 if opts['rev'] and not other.local():
2173 2174 raise util.Abort(_("pull -r doesn't work for remote repositories yet"))
2174 2175 elif opts['rev']:
2175 2176 revs = [other.lookup(rev) for rev in opts['rev']]
2176 2177 modheads = repo.pull(other, heads=revs, force=opts['force'])
2177 2178 return postincoming(ui, repo, modheads, opts['update'])
2178 2179
2179 2180 def push(ui, repo, dest="default-push", **opts):
2180 2181 """push changes to the specified destination
2181 2182
2182 2183 Push changes from the local repository to the given destination.
2183 2184
2184 2185 This is the symmetrical operation for pull. It helps to move
2185 2186 changes from the current repository to a different one. If the
2186 2187 destination is local this is identical to a pull in that directory
2187 2188 from the current one.
2188 2189
2189 2190 By default, push will refuse to run if it detects the result would
2190 2191 increase the number of remote heads. This generally indicates the
2191 2192 the client has forgotten to sync and merge before pushing.
2192 2193
2193 2194 Valid URLs are of the form:
2194 2195
2195 2196 local/filesystem/path
2196 2197 ssh://[user@]host[:port][/path]
2197 2198
2198 2199 Look at the help text for the pull command for important details
2199 2200 about ssh:// URLs.
2200 2201 """
2201 2202 dest = ui.expandpath(dest)
2202 2203 ui.status('pushing to %s\n' % (dest))
2203 2204
2204 2205 if opts['ssh']:
2205 2206 ui.setconfig("ui", "ssh", opts['ssh'])
2206 2207 if opts['remotecmd']:
2207 2208 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
2208 2209
2209 2210 other = hg.repository(ui, dest)
2210 2211 revs = None
2211 2212 if opts['rev']:
2212 2213 revs = [repo.lookup(rev) for rev in opts['rev']]
2213 2214 r = repo.push(other, opts['force'], revs=revs)
2214 2215 return r == 0
2215 2216
2216 2217 def rawcommit(ui, repo, *flist, **rc):
2217 2218 """raw commit interface (DEPRECATED)
2218 2219
2219 2220 (DEPRECATED)
2220 2221 Lowlevel commit, for use in helper scripts.
2221 2222
2222 2223 This command is not intended to be used by normal users, as it is
2223 2224 primarily useful for importing from other SCMs.
2224 2225
2225 2226 This command is now deprecated and will be removed in a future
2226 2227 release, please use debugsetparents and commit instead.
2227 2228 """
2228 2229
2229 2230 ui.warn(_("(the rawcommit command is deprecated)\n"))
2230 2231
2231 2232 message = rc['message']
2232 2233 if not message and rc['logfile']:
2233 2234 try:
2234 2235 message = open(rc['logfile']).read()
2235 2236 except IOError:
2236 2237 pass
2237 2238 if not message and not rc['logfile']:
2238 2239 raise util.Abort(_("missing commit message"))
2239 2240
2240 2241 files = relpath(repo, list(flist))
2241 2242 if rc['files']:
2242 2243 files += open(rc['files']).read().splitlines()
2243 2244
2244 2245 rc['parent'] = map(repo.lookup, rc['parent'])
2245 2246
2246 2247 try:
2247 2248 repo.rawcommit(files, message, rc['user'], rc['date'], *rc['parent'])
2248 2249 except ValueError, inst:
2249 2250 raise util.Abort(str(inst))
2250 2251
2251 2252 def recover(ui, repo):
2252 2253 """roll back an interrupted transaction
2253 2254
2254 2255 Recover from an interrupted commit or pull.
2255 2256
2256 2257 This command tries to fix the repository status after an interrupted
2257 2258 operation. It should only be necessary when Mercurial suggests it.
2258 2259 """
2259 2260 if repo.recover():
2260 2261 return repo.verify()
2261 2262 return False
2262 2263
2263 2264 def remove(ui, repo, pat, *pats, **opts):
2264 2265 """remove the specified files on the next commit
2265 2266
2266 2267 Schedule the indicated files for removal from the repository.
2267 2268
2268 2269 This command schedules the files to be removed at the next commit.
2269 2270 This only removes files from the current branch, not from the
2270 2271 entire project history. If the files still exist in the working
2271 2272 directory, they will be deleted from it.
2272 2273 """
2273 2274 names = []
2274 2275 def okaytoremove(abs, rel, exact):
2275 2276 modified, added, removed, deleted, unknown = repo.changes(files=[abs])
2276 2277 reason = None
2277 2278 if modified and not opts['force']:
2278 2279 reason = _('is modified')
2279 2280 elif added:
2280 2281 reason = _('has been marked for add')
2281 2282 elif unknown:
2282 2283 reason = _('is not managed')
2283 2284 if reason:
2284 2285 if exact:
2285 2286 ui.warn(_('not removing %s: file %s\n') % (rel, reason))
2286 2287 else:
2287 2288 return True
2288 2289 for src, abs, rel, exact in walk(repo, (pat,) + pats, opts):
2289 2290 if okaytoremove(abs, rel, exact):
2290 2291 if ui.verbose or not exact:
2291 2292 ui.status(_('removing %s\n') % rel)
2292 2293 names.append(abs)
2293 2294 repo.remove(names, unlink=True)
2294 2295
2295 2296 def rename(ui, repo, *pats, **opts):
2296 2297 """rename files; equivalent of copy + remove
2297 2298
2298 2299 Mark dest as copies of sources; mark sources for deletion. If
2299 2300 dest is a directory, copies are put in that directory. If dest is
2300 2301 a file, there can only be one source.
2301 2302
2302 2303 By default, this command copies the contents of files as they
2303 2304 stand in the working directory. If invoked with --after, the
2304 2305 operation is recorded, but no copying is performed.
2305 2306
2306 2307 This command takes effect in the next commit.
2307 2308
2308 2309 NOTE: This command should be treated as experimental. While it
2309 2310 should properly record rename files, this information is not yet
2310 2311 fully used by merge, nor fully reported by log.
2311 2312 """
2312 2313 wlock = repo.wlock(0)
2313 2314 errs, copied = docopy(ui, repo, pats, opts, wlock)
2314 2315 names = []
2315 2316 for abs, rel, exact in copied:
2316 2317 if ui.verbose or not exact:
2317 2318 ui.status(_('removing %s\n') % rel)
2318 2319 names.append(abs)
2319 2320 repo.remove(names, True, wlock)
2320 2321 return errs
2321 2322
2322 2323 def revert(ui, repo, *pats, **opts):
2323 2324 """revert modified files or dirs back to their unmodified states
2324 2325
2325 2326 In its default mode, it reverts any uncommitted modifications made
2326 2327 to the named files or directories. This restores the contents of
2327 2328 the affected files to an unmodified state.
2328 2329
2329 2330 Modified files are saved with a .orig suffix before reverting.
2330 2331 To disable these backups, use --no-backup.
2331 2332
2332 2333 Using the -r option, it reverts the given files or directories to
2333 2334 their state as of an earlier revision. This can be helpful to "roll
2334 2335 back" some or all of a change that should not have been committed.
2335 2336
2336 2337 Revert modifies the working directory. It does not commit any
2337 2338 changes, or change the parent of the current working directory.
2338 2339
2339 2340 If a file has been deleted, it is recreated. If the executable
2340 2341 mode of a file was changed, it is reset.
2341 2342
2342 2343 If names are given, all files matching the names are reverted.
2343 2344
2344 2345 If no arguments are given, all files in the repository are reverted.
2345 2346 """
2346 2347 parent = repo.dirstate.parents()[0]
2347 2348 node = opts['rev'] and repo.lookup(opts['rev']) or parent
2348 2349 mf = repo.manifest.read(repo.changelog.read(node)[0])
2349 2350
2350 2351 wlock = repo.wlock()
2351 2352
2352 2353 # need all matching names in dirstate and manifest of target rev,
2353 2354 # so have to walk both. do not print errors if files exist in one
2354 2355 # but not other.
2355 2356
2356 2357 names = {}
2357 2358 target_only = {}
2358 2359
2359 2360 # walk dirstate.
2360 2361
2361 2362 for src, abs, rel, exact in walk(repo, pats, opts, badmatch=mf.has_key):
2362 2363 names[abs] = (rel, exact)
2363 2364 if src == 'b':
2364 2365 target_only[abs] = True
2365 2366
2366 2367 # walk target manifest.
2367 2368
2368 2369 for src, abs, rel, exact in walk(repo, pats, opts, node=node,
2369 2370 badmatch=names.has_key):
2370 2371 if abs in names: continue
2371 2372 names[abs] = (rel, exact)
2372 2373 target_only[abs] = True
2373 2374
2374 2375 changes = repo.changes(match=names.has_key, wlock=wlock)
2375 2376 modified, added, removed, deleted, unknown = map(dict.fromkeys, changes)
2376 2377
2377 2378 revert = ([], _('reverting %s\n'))
2378 2379 add = ([], _('adding %s\n'))
2379 2380 remove = ([], _('removing %s\n'))
2380 2381 forget = ([], _('forgetting %s\n'))
2381 2382 undelete = ([], _('undeleting %s\n'))
2382 2383 update = {}
2383 2384
2384 2385 disptable = (
2385 2386 # dispatch table:
2386 2387 # file state
2387 2388 # action if in target manifest
2388 2389 # action if not in target manifest
2389 2390 # make backup if in target manifest
2390 2391 # make backup if not in target manifest
2391 2392 (modified, revert, remove, True, True),
2392 2393 (added, revert, forget, True, True),
2393 2394 (removed, undelete, None, False, False),
2394 2395 (deleted, revert, remove, False, False),
2395 2396 (unknown, add, None, True, False),
2396 2397 (target_only, add, None, False, False),
2397 2398 )
2398 2399
2399 2400 entries = names.items()
2400 2401 entries.sort()
2401 2402
2402 2403 for abs, (rel, exact) in entries:
2403 2404 in_mf = abs in mf
2404 2405 def handle(xlist, dobackup):
2405 2406 xlist[0].append(abs)
2406 2407 if dobackup and not opts['no_backup'] and os.path.exists(rel):
2407 2408 bakname = "%s.orig" % rel
2408 2409 ui.note(_('saving current version of %s as %s\n') %
2409 2410 (rel, bakname))
2410 2411 shutil.copyfile(rel, bakname)
2411 2412 shutil.copymode(rel, bakname)
2412 2413 if ui.verbose or not exact:
2413 2414 ui.status(xlist[1] % rel)
2414 2415 for table, hitlist, misslist, backuphit, backupmiss in disptable:
2415 2416 if abs not in table: continue
2416 2417 # file has changed in dirstate
2417 2418 if in_mf:
2418 2419 handle(hitlist, backuphit)
2419 2420 elif misslist is not None:
2420 2421 handle(misslist, backupmiss)
2421 2422 else:
2422 2423 if exact: ui.warn(_('file not managed: %s\n' % rel))
2423 2424 break
2424 2425 else:
2425 2426 # file has not changed in dirstate
2426 2427 if node == parent:
2427 2428 if exact: ui.warn(_('no changes needed to %s\n' % rel))
2428 2429 continue
2429 2430 if not in_mf:
2430 2431 handle(remove, False)
2431 2432 update[abs] = True
2432 2433
2433 2434 repo.dirstate.forget(forget[0])
2434 2435 r = repo.update(node, False, True, update.has_key, False, wlock=wlock)
2435 2436 repo.dirstate.update(add[0], 'a')
2436 2437 repo.dirstate.update(undelete[0], 'n')
2437 2438 repo.dirstate.update(remove[0], 'r')
2438 2439 return r
2439 2440
2440 2441 def root(ui, repo):
2441 2442 """print the root (top) of the current working dir
2442 2443
2443 2444 Print the root directory of the current repository.
2444 2445 """
2445 2446 ui.write(repo.root + "\n")
2446 2447
2447 2448 def serve(ui, repo, **opts):
2448 2449 """export the repository via HTTP
2449 2450
2450 2451 Start a local HTTP repository browser and pull server.
2451 2452
2452 2453 By default, the server logs accesses to stdout and errors to
2453 2454 stderr. Use the "-A" and "-E" options to log to files.
2454 2455 """
2455 2456
2456 2457 if opts["stdio"]:
2457 2458 fin, fout = sys.stdin, sys.stdout
2458 2459 sys.stdout = sys.stderr
2459 2460
2460 2461 # Prevent insertion/deletion of CRs
2461 2462 util.set_binary(fin)
2462 2463 util.set_binary(fout)
2463 2464
2464 2465 def getarg():
2465 2466 argline = fin.readline()[:-1]
2466 2467 arg, l = argline.split()
2467 2468 val = fin.read(int(l))
2468 2469 return arg, val
2469 2470 def respond(v):
2470 2471 fout.write("%d\n" % len(v))
2471 2472 fout.write(v)
2472 2473 fout.flush()
2473 2474
2474 2475 lock = None
2475 2476
2476 2477 while 1:
2477 2478 cmd = fin.readline()[:-1]
2478 2479 if cmd == '':
2479 2480 return
2480 2481 if cmd == "heads":
2481 2482 h = repo.heads()
2482 2483 respond(" ".join(map(hex, h)) + "\n")
2483 2484 if cmd == "lock":
2484 2485 lock = repo.lock()
2485 2486 respond("")
2486 2487 if cmd == "unlock":
2487 2488 if lock:
2488 2489 lock.release()
2489 2490 lock = None
2490 2491 respond("")
2491 2492 elif cmd == "branches":
2492 2493 arg, nodes = getarg()
2493 2494 nodes = map(bin, nodes.split(" "))
2494 2495 r = []
2495 2496 for b in repo.branches(nodes):
2496 2497 r.append(" ".join(map(hex, b)) + "\n")
2497 2498 respond("".join(r))
2498 2499 elif cmd == "between":
2499 2500 arg, pairs = getarg()
2500 2501 pairs = [map(bin, p.split("-")) for p in pairs.split(" ")]
2501 2502 r = []
2502 2503 for b in repo.between(pairs):
2503 2504 r.append(" ".join(map(hex, b)) + "\n")
2504 2505 respond("".join(r))
2505 2506 elif cmd == "changegroup":
2506 2507 nodes = []
2507 2508 arg, roots = getarg()
2508 2509 nodes = map(bin, roots.split(" "))
2509 2510
2510 2511 cg = repo.changegroup(nodes, 'serve')
2511 2512 while 1:
2512 2513 d = cg.read(4096)
2513 2514 if not d:
2514 2515 break
2515 2516 fout.write(d)
2516 2517
2517 2518 fout.flush()
2518 2519
2519 2520 elif cmd == "addchangegroup":
2520 2521 if not lock:
2521 2522 respond("not locked")
2522 2523 continue
2523 2524 respond("")
2524 2525
2525 2526 r = repo.addchangegroup(fin)
2526 2527 respond(str(r))
2527 2528
2528 2529 optlist = "name templates style address port ipv6 accesslog errorlog"
2529 2530 for o in optlist.split():
2530 2531 if opts[o]:
2531 2532 ui.setconfig("web", o, opts[o])
2532 2533
2533 2534 if opts['daemon'] and not opts['daemon_pipefds']:
2534 2535 rfd, wfd = os.pipe()
2535 2536 args = sys.argv[:]
2536 2537 args.append('--daemon-pipefds=%d,%d' % (rfd, wfd))
2537 2538 pid = os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
2538 2539 args[0], args)
2539 2540 os.close(wfd)
2540 2541 os.read(rfd, 1)
2541 2542 os._exit(0)
2542 2543
2543 2544 try:
2544 2545 httpd = hgweb.create_server(repo)
2545 2546 except socket.error, inst:
2546 2547 raise util.Abort(_('cannot start server: ') + inst.args[1])
2547 2548
2548 2549 if ui.verbose:
2549 2550 addr, port = httpd.socket.getsockname()
2550 2551 if addr == '0.0.0.0':
2551 2552 addr = socket.gethostname()
2552 2553 else:
2553 2554 try:
2554 2555 addr = socket.gethostbyaddr(addr)[0]
2555 2556 except socket.error:
2556 2557 pass
2557 2558 if port != 80:
2558 2559 ui.status(_('listening at http://%s:%d/\n') % (addr, port))
2559 2560 else:
2560 2561 ui.status(_('listening at http://%s/\n') % addr)
2561 2562
2562 2563 if opts['pid_file']:
2563 2564 fp = open(opts['pid_file'], 'w')
2564 2565 fp.write(str(os.getpid()))
2565 2566 fp.close()
2566 2567
2567 2568 if opts['daemon_pipefds']:
2568 2569 rfd, wfd = [int(x) for x in opts['daemon_pipefds'].split(',')]
2569 2570 os.close(rfd)
2570 2571 os.write(wfd, 'y')
2571 2572 os.close(wfd)
2572 2573 sys.stdout.flush()
2573 2574 sys.stderr.flush()
2574 2575 fd = os.open(util.nulldev, os.O_RDWR)
2575 2576 if fd != 0: os.dup2(fd, 0)
2576 2577 if fd != 1: os.dup2(fd, 1)
2577 2578 if fd != 2: os.dup2(fd, 2)
2578 2579 if fd not in (0, 1, 2): os.close(fd)
2579 2580
2580 2581 httpd.serve_forever()
2581 2582
2582 2583 def status(ui, repo, *pats, **opts):
2583 2584 """show changed files in the working directory
2584 2585
2585 2586 Show changed files in the repository. If names are
2586 2587 given, only files that match are shown.
2587 2588
2588 2589 The codes used to show the status of files are:
2589 2590 M = modified
2590 2591 A = added
2591 2592 R = removed
2592 2593 ! = deleted, but still tracked
2593 2594 ? = not tracked
2594 2595 I = ignored (not shown by default)
2595 2596 """
2596 2597
2597 2598 show_ignored = opts['ignored'] and True or False
2598 2599 files, matchfn, anypats = matchpats(repo, pats, opts)
2599 2600 cwd = (pats and repo.getcwd()) or ''
2600 2601 modified, added, removed, deleted, unknown, ignored = [
2601 2602 [util.pathto(cwd, x) for x in n]
2602 2603 for n in repo.changes(files=files, match=matchfn,
2603 2604 show_ignored=show_ignored)]
2604 2605
2605 2606 changetypes = [('modified', 'M', modified),
2606 2607 ('added', 'A', added),
2607 2608 ('removed', 'R', removed),
2608 2609 ('deleted', '!', deleted),
2609 2610 ('unknown', '?', unknown),
2610 2611 ('ignored', 'I', ignored)]
2611 2612
2612 2613 end = opts['print0'] and '\0' or '\n'
2613 2614
2614 2615 for opt, char, changes in ([ct for ct in changetypes if opts[ct[0]]]
2615 2616 or changetypes):
2616 2617 if opts['no_status']:
2617 2618 format = "%%s%s" % end
2618 2619 else:
2619 2620 format = "%s %%s%s" % (char, end)
2620 2621
2621 2622 for f in changes:
2622 2623 ui.write(format % f)
2623 2624
2624 2625 def tag(ui, repo, name, rev_=None, **opts):
2625 2626 """add a tag for the current tip or a given revision
2626 2627
2627 2628 Name a particular revision using <name>.
2628 2629
2629 2630 Tags are used to name particular revisions of the repository and are
2630 2631 very useful to compare different revision, to go back to significant
2631 2632 earlier versions or to mark branch points as releases, etc.
2632 2633
2633 2634 If no revision is given, the tip is used.
2634 2635
2635 2636 To facilitate version control, distribution, and merging of tags,
2636 2637 they are stored as a file named ".hgtags" which is managed
2637 2638 similarly to other project files and can be hand-edited if
2638 2639 necessary. The file '.hg/localtags' is used for local tags (not
2639 2640 shared among repositories).
2640 2641 """
2641 2642 if name == "tip":
2642 2643 raise util.Abort(_("the name 'tip' is reserved"))
2643 2644 if rev_ is not None:
2644 2645 ui.warn(_("use of 'hg tag NAME [REV]' is deprecated, "
2645 2646 "please use 'hg tag [-r REV] NAME' instead\n"))
2646 2647 if opts['rev']:
2647 2648 raise util.Abort(_("use only one form to specify the revision"))
2648 2649 if opts['rev']:
2649 2650 rev_ = opts['rev']
2650 2651 if rev_:
2651 2652 r = hex(repo.lookup(rev_))
2652 2653 else:
2653 2654 r = hex(repo.changelog.tip())
2654 2655
2655 2656 disallowed = (revrangesep, '\r', '\n')
2656 2657 for c in disallowed:
2657 2658 if name.find(c) >= 0:
2658 2659 raise util.Abort(_("%s cannot be used in a tag name") % repr(c))
2659 2660
2660 2661 repo.hook('pretag', throw=True, node=r, tag=name,
2661 2662 local=int(not not opts['local']))
2662 2663
2663 2664 if opts['local']:
2664 2665 repo.opener("localtags", "a").write("%s %s\n" % (r, name))
2665 2666 repo.hook('tag', node=r, tag=name, local=1)
2666 2667 return
2667 2668
2668 2669 for x in repo.changes():
2669 2670 if ".hgtags" in x:
2670 2671 raise util.Abort(_("working copy of .hgtags is changed "
2671 2672 "(please commit .hgtags manually)"))
2672 2673
2673 2674 repo.wfile(".hgtags", "ab").write("%s %s\n" % (r, name))
2674 2675 if repo.dirstate.state(".hgtags") == '?':
2675 2676 repo.add([".hgtags"])
2676 2677
2677 2678 message = (opts['message'] or
2678 2679 _("Added tag %s for changeset %s") % (name, r))
2679 2680 try:
2680 2681 repo.commit([".hgtags"], message, opts['user'], opts['date'])
2681 2682 repo.hook('tag', node=r, tag=name, local=0)
2682 2683 except ValueError, inst:
2683 2684 raise util.Abort(str(inst))
2684 2685
2685 2686 def tags(ui, repo):
2686 2687 """list repository tags
2687 2688
2688 2689 List the repository tags.
2689 2690
2690 2691 This lists both regular and local tags.
2691 2692 """
2692 2693
2693 2694 l = repo.tagslist()
2694 2695 l.reverse()
2695 2696 for t, n in l:
2696 2697 try:
2697 2698 r = "%5d:%s" % (repo.changelog.rev(n), hex(n))
2698 2699 except KeyError:
2699 2700 r = " ?:?"
2700 2701 if ui.quiet:
2701 2702 ui.write("%s\n" % t)
2702 2703 else:
2703 2704 ui.write("%-30s %s\n" % (t, r))
2704 2705
2705 2706 def tip(ui, repo, **opts):
2706 2707 """show the tip revision
2707 2708
2708 2709 Show the tip revision.
2709 2710 """
2710 2711 n = repo.changelog.tip()
2711 2712 br = None
2712 2713 if opts['branches']:
2713 2714 br = repo.branchlookup([n])
2714 2715 show_changeset(ui, repo, opts).show(changenode=n, brinfo=br)
2715 2716 if opts['patch']:
2716 2717 dodiff(ui, ui, repo, repo.changelog.parents(n)[0], n)
2717 2718
2718 2719 def unbundle(ui, repo, fname, **opts):
2719 2720 """apply a changegroup file
2720 2721
2721 2722 Apply a compressed changegroup file generated by the bundle
2722 2723 command.
2723 2724 """
2724 2725 f = urllib.urlopen(fname)
2725 2726
2726 2727 header = f.read(6)
2727 2728 if not header.startswith("HG"):
2728 2729 raise util.Abort(_("%s: not a Mercurial bundle file") % fname)
2729 2730 elif not header.startswith("HG10"):
2730 2731 raise util.Abort(_("%s: unknown bundle version") % fname)
2731 2732 elif header == "HG10BZ":
2732 2733 def generator(f):
2733 2734 zd = bz2.BZ2Decompressor()
2734 2735 zd.decompress("BZ")
2735 2736 for chunk in f:
2736 2737 yield zd.decompress(chunk)
2737 2738 elif header == "HG10UN":
2738 2739 def generator(f):
2739 2740 for chunk in f:
2740 2741 yield chunk
2741 2742 else:
2742 2743 raise util.Abort(_("%s: unknown bundle compression type")
2743 2744 % fname)
2744 2745 gen = generator(util.filechunkiter(f, 4096))
2745 2746 modheads = repo.addchangegroup(util.chunkbuffer(gen))
2746 2747 return postincoming(ui, repo, modheads, opts['update'])
2747 2748
2748 2749 def undo(ui, repo):
2749 2750 """undo the last commit or pull
2750 2751
2751 2752 Roll back the last pull or commit transaction on the
2752 2753 repository, restoring the project to its earlier state.
2753 2754
2754 2755 This command should be used with care. There is only one level of
2755 2756 undo and there is no redo.
2756 2757
2757 2758 This command is not intended for use on public repositories. Once
2758 2759 a change is visible for pull by other users, undoing it locally is
2759 2760 ineffective.
2760 2761 """
2761 2762 repo.undo()
2762 2763
2763 2764 def update(ui, repo, node=None, merge=False, clean=False, force=None,
2764 2765 branch=None, **opts):
2765 2766 """update or merge working directory
2766 2767
2767 2768 Update the working directory to the specified revision.
2768 2769
2769 2770 If there are no outstanding changes in the working directory and
2770 2771 there is a linear relationship between the current version and the
2771 2772 requested version, the result is the requested version.
2772 2773
2773 2774 Otherwise the result is a merge between the contents of the
2774 2775 current working directory and the requested version. Files that
2775 2776 changed between either parent are marked as changed for the next
2776 2777 commit and a commit must be performed before any further updates
2777 2778 are allowed.
2778 2779
2779 2780 By default, update will refuse to run if doing so would require
2780 2781 merging or discarding local changes.
2781 2782 """
2782 2783 if branch:
2783 2784 br = repo.branchlookup(branch=branch)
2784 2785 found = []
2785 2786 for x in br:
2786 2787 if branch in br[x]:
2787 2788 found.append(x)
2788 2789 if len(found) > 1:
2789 2790 ui.warn(_("Found multiple heads for %s\n") % branch)
2790 2791 for x in found:
2791 2792 show_changeset(ui, repo, opts).show(changenode=x, brinfo=br)
2792 2793 return 1
2793 2794 if len(found) == 1:
2794 2795 node = found[0]
2795 2796 ui.warn(_("Using head %s for branch %s\n") % (short(node), branch))
2796 2797 else:
2797 2798 ui.warn(_("branch %s not found\n") % (branch))
2798 2799 return 1
2799 2800 else:
2800 2801 node = node and repo.lookup(node) or repo.changelog.tip()
2801 2802 return repo.update(node, allow=merge, force=clean, forcemerge=force)
2802 2803
2803 2804 def verify(ui, repo):
2804 2805 """verify the integrity of the repository
2805 2806
2806 2807 Verify the integrity of the current repository.
2807 2808
2808 2809 This will perform an extensive check of the repository's
2809 2810 integrity, validating the hashes and checksums of each entry in
2810 2811 the changelog, manifest, and tracked files, as well as the
2811 2812 integrity of their crosslinks and indices.
2812 2813 """
2813 2814 return repo.verify()
2814 2815
2815 2816 # Command options and aliases are listed here, alphabetically
2816 2817
2817 2818 table = {
2818 2819 "^add":
2819 2820 (add,
2820 2821 [('I', 'include', [], _('include names matching the given patterns')),
2821 2822 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2822 2823 _('hg add [OPTION]... [FILE]...')),
2823 2824 "addremove":
2824 2825 (addremove,
2825 2826 [('I', 'include', [], _('include names matching the given patterns')),
2826 2827 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2827 2828 _('hg addremove [OPTION]... [FILE]...')),
2828 2829 "^annotate":
2829 2830 (annotate,
2830 2831 [('r', 'rev', '', _('annotate the specified revision')),
2831 2832 ('a', 'text', None, _('treat all files as text')),
2832 2833 ('u', 'user', None, _('list the author')),
2833 2834 ('d', 'date', None, _('list the date')),
2834 2835 ('n', 'number', None, _('list the revision number (default)')),
2835 2836 ('c', 'changeset', None, _('list the changeset')),
2836 2837 ('I', 'include', [], _('include names matching the given patterns')),
2837 2838 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2838 2839 _('hg annotate [-r REV] [-a] [-u] [-d] [-n] [-c] FILE...')),
2839 2840 "bundle":
2840 2841 (bundle,
2841 2842 [('f', 'force', None,
2842 2843 _('run even when remote repository is unrelated'))],
2843 2844 _('hg bundle FILE DEST')),
2844 2845 "cat":
2845 2846 (cat,
2846 2847 [('o', 'output', '', _('print output to file with formatted name')),
2847 2848 ('r', 'rev', '', _('print the given revision')),
2848 2849 ('I', 'include', [], _('include names matching the given patterns')),
2849 2850 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2850 2851 _('hg cat [OPTION]... FILE...')),
2851 2852 "^clone":
2852 2853 (clone,
2853 2854 [('U', 'noupdate', None, _('do not update the new working directory')),
2854 2855 ('r', 'rev', [],
2855 2856 _('a changeset you would like to have after cloning')),
2856 2857 ('', 'pull', None, _('use pull protocol to copy metadata')),
2857 2858 ('e', 'ssh', '', _('specify ssh command to use')),
2858 2859 ('', 'remotecmd', '',
2859 2860 _('specify hg command to run on the remote side'))],
2860 2861 _('hg clone [OPTION]... SOURCE [DEST]')),
2861 2862 "^commit|ci":
2862 2863 (commit,
2863 2864 [('A', 'addremove', None, _('run addremove during commit')),
2864 2865 ('m', 'message', '', _('use <text> as commit message')),
2865 2866 ('l', 'logfile', '', _('read the commit message from <file>')),
2866 2867 ('d', 'date', '', _('record datecode as commit date')),
2867 2868 ('u', 'user', '', _('record user as commiter')),
2868 2869 ('I', 'include', [], _('include names matching the given patterns')),
2869 2870 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2870 2871 _('hg commit [OPTION]... [FILE]...')),
2871 2872 "copy|cp":
2872 2873 (copy,
2873 2874 [('A', 'after', None, _('record a copy that has already occurred')),
2874 2875 ('f', 'force', None,
2875 2876 _('forcibly copy over an existing managed file')),
2876 2877 ('I', 'include', [], _('include names matching the given patterns')),
2877 2878 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2878 2879 _('hg copy [OPTION]... [SOURCE]... DEST')),
2879 2880 "debugancestor": (debugancestor, [], _('debugancestor INDEX REV1 REV2')),
2880 2881 "debugcomplete":
2881 2882 (debugcomplete,
2882 2883 [('o', 'options', None, _('show the command options'))],
2883 2884 _('debugcomplete [-o] CMD')),
2884 2885 "debugrebuildstate":
2885 2886 (debugrebuildstate,
2886 2887 [('r', 'rev', '', _('revision to rebuild to'))],
2887 2888 _('debugrebuildstate [-r REV] [REV]')),
2888 2889 "debugcheckstate": (debugcheckstate, [], _('debugcheckstate')),
2889 2890 "debugconfig": (debugconfig, [], _('debugconfig')),
2890 2891 "debugsetparents": (debugsetparents, [], _('debugsetparents REV1 [REV2]')),
2891 2892 "debugstate": (debugstate, [], _('debugstate')),
2892 2893 "debugdata": (debugdata, [], _('debugdata FILE REV')),
2893 2894 "debugindex": (debugindex, [], _('debugindex FILE')),
2894 2895 "debugindexdot": (debugindexdot, [], _('debugindexdot FILE')),
2895 2896 "debugrename": (debugrename, [], _('debugrename FILE [REV]')),
2896 2897 "debugwalk":
2897 2898 (debugwalk,
2898 2899 [('I', 'include', [], _('include names matching the given patterns')),
2899 2900 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2900 2901 _('debugwalk [OPTION]... [FILE]...')),
2901 2902 "^diff":
2902 2903 (diff,
2903 2904 [('r', 'rev', [], _('revision')),
2904 2905 ('a', 'text', None, _('treat all files as text')),
2905 2906 ('p', 'show-function', None,
2906 2907 _('show which function each change is in')),
2907 2908 ('w', 'ignore-all-space', None,
2908 2909 _('ignore white space when comparing lines')),
2909 2910 ('I', 'include', [], _('include names matching the given patterns')),
2910 2911 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2911 2912 _('hg diff [-a] [-I] [-X] [-r REV1 [-r REV2]] [FILE]...')),
2912 2913 "^export":
2913 2914 (export,
2914 2915 [('o', 'output', '', _('print output to file with formatted name')),
2915 2916 ('a', 'text', None, _('treat all files as text')),
2916 2917 ('', 'switch-parent', None, _('diff against the second parent'))],
2917 2918 _('hg export [-a] [-o OUTFILESPEC] REV...')),
2918 2919 "forget":
2919 2920 (forget,
2920 2921 [('I', 'include', [], _('include names matching the given patterns')),
2921 2922 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2922 2923 _('hg forget [OPTION]... FILE...')),
2923 2924 "grep":
2924 2925 (grep,
2925 2926 [('0', 'print0', None, _('end fields with NUL')),
2926 2927 ('', 'all', None, _('print all revisions that match')),
2927 2928 ('i', 'ignore-case', None, _('ignore case when matching')),
2928 2929 ('l', 'files-with-matches', None,
2929 2930 _('print only filenames and revs that match')),
2930 2931 ('n', 'line-number', None, _('print matching line numbers')),
2931 2932 ('r', 'rev', [], _('search in given revision range')),
2932 2933 ('u', 'user', None, _('print user who committed change')),
2933 2934 ('I', 'include', [], _('include names matching the given patterns')),
2934 2935 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2935 2936 _('hg grep [OPTION]... PATTERN [FILE]...')),
2936 2937 "heads":
2937 2938 (heads,
2938 2939 [('b', 'branches', None, _('show branches')),
2939 2940 ('', 'style', '', _('display using template map file')),
2940 2941 ('r', 'rev', '', _('show only heads which are descendants of rev')),
2941 2942 ('', 'template', '', _('display with template'))],
2942 2943 _('hg heads [-b] [-r <rev>]')),
2943 2944 "help": (help_, [], _('hg help [COMMAND]')),
2944 2945 "identify|id": (identify, [], _('hg identify')),
2945 2946 "import|patch":
2946 2947 (import_,
2947 2948 [('p', 'strip', 1,
2948 2949 _('directory strip option for patch. This has the same\n') +
2949 2950 _('meaning as the corresponding patch option')),
2950 2951 ('b', 'base', '', _('base path')),
2951 2952 ('f', 'force', None,
2952 2953 _('skip check for outstanding uncommitted changes'))],
2953 2954 _('hg import [-p NUM] [-b BASE] [-f] PATCH...')),
2954 2955 "incoming|in": (incoming,
2955 2956 [('M', 'no-merges', None, _('do not show merges')),
2956 2957 ('f', 'force', None,
2957 2958 _('run even when remote repository is unrelated')),
2958 2959 ('', 'style', '', _('display using template map file')),
2959 2960 ('n', 'newest-first', None, _('show newest record first')),
2960 2961 ('', 'bundle', '', _('file to store the bundles into')),
2961 2962 ('p', 'patch', None, _('show patch')),
2962 2963 ('', 'template', '', _('display with template')),
2963 2964 ('e', 'ssh', '', _('specify ssh command to use')),
2964 2965 ('', 'remotecmd', '',
2965 2966 _('specify hg command to run on the remote side'))],
2966 2967 _('hg incoming [-p] [-n] [-M] [--bundle FILENAME] [SOURCE]')),
2967 2968 "^init": (init, [], _('hg init [DEST]')),
2968 2969 "locate":
2969 2970 (locate,
2970 2971 [('r', 'rev', '', _('search the repository as it stood at rev')),
2971 2972 ('0', 'print0', None,
2972 2973 _('end filenames with NUL, for use with xargs')),
2973 2974 ('f', 'fullpath', None,
2974 2975 _('print complete paths from the filesystem root')),
2975 2976 ('I', 'include', [], _('include names matching the given patterns')),
2976 2977 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2977 2978 _('hg locate [OPTION]... [PATTERN]...')),
2978 2979 "^log|history":
2979 2980 (log,
2980 2981 [('b', 'branches', None, _('show branches')),
2981 2982 ('k', 'keyword', [], _('search for a keyword')),
2982 2983 ('l', 'limit', '', _('limit number of changes displayed')),
2983 2984 ('r', 'rev', [], _('show the specified revision or range')),
2984 2985 ('M', 'no-merges', None, _('do not show merges')),
2985 2986 ('', 'style', '', _('display using template map file')),
2986 2987 ('m', 'only-merges', None, _('show only merges')),
2987 2988 ('p', 'patch', None, _('show patch')),
2988 2989 ('', 'template', '', _('display with template')),
2989 2990 ('I', 'include', [], _('include names matching the given patterns')),
2990 2991 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2991 2992 _('hg log [OPTION]... [FILE]')),
2992 2993 "manifest": (manifest, [], _('hg manifest [REV]')),
2993 2994 "merge":
2994 2995 (merge,
2995 2996 [('b', 'branch', '', _('merge with head of a specific branch')),
2996 2997 ('', 'style', '', _('display using template map file')),
2997 2998 ('f', 'force', None, _('force a merge with outstanding changes')),
2998 2999 ('', 'template', '', _('display with template'))],
2999 3000 _('hg merge [-b TAG] [-f] [REV]')),
3000 3001 "outgoing|out": (outgoing,
3001 3002 [('M', 'no-merges', None, _('do not show merges')),
3002 3003 ('f', 'force', None,
3003 3004 _('run even when remote repository is unrelated')),
3004 3005 ('p', 'patch', None, _('show patch')),
3005 3006 ('', 'style', '', _('display using template map file')),
3006 3007 ('n', 'newest-first', None, _('show newest record first')),
3007 3008 ('', 'template', '', _('display with template')),
3008 3009 ('e', 'ssh', '', _('specify ssh command to use')),
3009 3010 ('', 'remotecmd', '',
3010 3011 _('specify hg command to run on the remote side'))],
3011 3012 _('hg outgoing [-M] [-p] [-n] [DEST]')),
3012 3013 "^parents":
3013 3014 (parents,
3014 3015 [('b', 'branches', None, _('show branches')),
3015 3016 ('', 'style', '', _('display using template map file')),
3016 3017 ('', 'template', '', _('display with template'))],
3017 3018 _('hg parents [-b] [REV]')),
3018 3019 "paths": (paths, [], _('hg paths [NAME]')),
3019 3020 "^pull":
3020 3021 (pull,
3021 3022 [('u', 'update', None,
3022 3023 _('update the working directory to tip after pull')),
3023 3024 ('e', 'ssh', '', _('specify ssh command to use')),
3024 3025 ('f', 'force', None,
3025 3026 _('run even when remote repository is unrelated')),
3026 3027 ('r', 'rev', [], _('a specific revision you would like to pull')),
3027 3028 ('', 'remotecmd', '',
3028 3029 _('specify hg command to run on the remote side'))],
3029 3030 _('hg pull [-u] [-e FILE] [-r REV]... [--remotecmd FILE] [SOURCE]')),
3030 3031 "^push":
3031 3032 (push,
3032 3033 [('f', 'force', None, _('force push')),
3033 3034 ('e', 'ssh', '', _('specify ssh command to use')),
3034 3035 ('r', 'rev', [], _('a specific revision you would like to push')),
3035 3036 ('', 'remotecmd', '',
3036 3037 _('specify hg command to run on the remote side'))],
3037 3038 _('hg push [-f] [-e FILE] [-r REV]... [--remotecmd FILE] [DEST]')),
3038 3039 "debugrawcommit|rawcommit":
3039 3040 (rawcommit,
3040 3041 [('p', 'parent', [], _('parent')),
3041 3042 ('d', 'date', '', _('date code')),
3042 3043 ('u', 'user', '', _('user')),
3043 3044 ('F', 'files', '', _('file list')),
3044 3045 ('m', 'message', '', _('commit message')),
3045 3046 ('l', 'logfile', '', _('commit message file'))],
3046 3047 _('hg debugrawcommit [OPTION]... [FILE]...')),
3047 3048 "recover": (recover, [], _('hg recover')),
3048 3049 "^remove|rm":
3049 3050 (remove,
3050 3051 [('f', 'force', None, _('remove file even if modified')),
3051 3052 ('I', 'include', [], _('include names matching the given patterns')),
3052 3053 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
3053 3054 _('hg remove [OPTION]... FILE...')),
3054 3055 "rename|mv":
3055 3056 (rename,
3056 3057 [('A', 'after', None, _('record a rename that has already occurred')),
3057 3058 ('f', 'force', None,
3058 3059 _('forcibly copy over an existing managed file')),
3059 3060 ('I', 'include', [], _('include names matching the given patterns')),
3060 3061 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
3061 3062 _('hg rename [OPTION]... SOURCE... DEST')),
3062 3063 "^revert":
3063 3064 (revert,
3064 3065 [('r', 'rev', '', _('revision to revert to')),
3065 3066 ('', 'no-backup', None, _('do not save backup copies of files')),
3066 3067 ('I', 'include', [], _('include names matching given patterns')),
3067 3068 ('X', 'exclude', [], _('exclude names matching given patterns'))],
3068 3069 _('hg revert [-r REV] [NAME]...')),
3069 3070 "root": (root, [], _('hg root')),
3070 3071 "^serve":
3071 3072 (serve,
3072 3073 [('A', 'accesslog', '', _('name of access log file to write to')),
3073 3074 ('d', 'daemon', None, _('run server in background')),
3074 3075 ('', 'daemon-pipefds', '', _('used internally by daemon mode')),
3075 3076 ('E', 'errorlog', '', _('name of error log file to write to')),
3076 3077 ('p', 'port', 0, _('port to use (default: 8000)')),
3077 3078 ('a', 'address', '', _('address to use')),
3078 3079 ('n', 'name', '',
3079 3080 _('name to show in web pages (default: working dir)')),
3080 3081 ('', 'pid-file', '', _('name of file to write process ID to')),
3081 3082 ('', 'stdio', None, _('for remote clients')),
3082 3083 ('t', 'templates', '', _('web templates to use')),
3083 3084 ('', 'style', '', _('template style to use')),
3084 3085 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4'))],
3085 3086 _('hg serve [OPTION]...')),
3086 3087 "^status|st":
3087 3088 (status,
3088 3089 [('m', 'modified', None, _('show only modified files')),
3089 3090 ('a', 'added', None, _('show only added files')),
3090 3091 ('r', 'removed', None, _('show only removed files')),
3091 3092 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
3092 3093 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
3093 3094 ('i', 'ignored', None, _('show ignored files')),
3094 3095 ('n', 'no-status', None, _('hide status prefix')),
3095 3096 ('0', 'print0', None,
3096 3097 _('end filenames with NUL, for use with xargs')),
3097 3098 ('I', 'include', [], _('include names matching the given patterns')),
3098 3099 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
3099 3100 _('hg status [OPTION]... [FILE]...')),
3100 3101 "tag":
3101 3102 (tag,
3102 3103 [('l', 'local', None, _('make the tag local')),
3103 3104 ('m', 'message', '', _('message for tag commit log entry')),
3104 3105 ('d', 'date', '', _('record datecode as commit date')),
3105 3106 ('u', 'user', '', _('record user as commiter')),
3106 3107 ('r', 'rev', '', _('revision to tag'))],
3107 3108 _('hg tag [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME')),
3108 3109 "tags": (tags, [], _('hg tags')),
3109 3110 "tip":
3110 3111 (tip,
3111 3112 [('b', 'branches', None, _('show branches')),
3112 3113 ('', 'style', '', _('display using template map file')),
3113 3114 ('p', 'patch', None, _('show patch')),
3114 3115 ('', 'template', '', _('display with template'))],
3115 3116 _('hg tip [-b] [-p]')),
3116 3117 "unbundle":
3117 3118 (unbundle,
3118 3119 [('u', 'update', None,
3119 3120 _('update the working directory to tip after unbundle'))],
3120 3121 _('hg unbundle [-u] FILE')),
3121 3122 "undo": (undo, [], _('hg undo')),
3122 3123 "^update|up|checkout|co":
3123 3124 (update,
3124 3125 [('b', 'branch', '', _('checkout the head of a specific branch')),
3125 3126 ('', 'style', '', _('display using template map file')),
3126 3127 ('m', 'merge', None, _('allow merging of branches')),
3127 3128 ('C', 'clean', None, _('overwrite locally modified files')),
3128 3129 ('f', 'force', None, _('force a merge with outstanding changes')),
3129 3130 ('', 'template', '', _('display with template'))],
3130 3131 _('hg update [-b TAG] [-m] [-C] [-f] [REV]')),
3131 3132 "verify": (verify, [], _('hg verify')),
3132 3133 "version": (show_version, [], _('hg version')),
3133 3134 }
3134 3135
3135 3136 globalopts = [
3136 3137 ('R', 'repository', '',
3137 3138 _('repository root directory or symbolic path name')),
3138 3139 ('', 'cwd', '', _('change working directory')),
3139 3140 ('y', 'noninteractive', None,
3140 3141 _('do not prompt, assume \'yes\' for any required answers')),
3141 3142 ('q', 'quiet', None, _('suppress output')),
3142 3143 ('v', 'verbose', None, _('enable additional output')),
3143 3144 ('', 'debug', None, _('enable debugging output')),
3144 3145 ('', 'debugger', None, _('start debugger')),
3145 3146 ('', 'traceback', None, _('print traceback on exception')),
3146 3147 ('', 'time', None, _('time how long the command takes')),
3147 3148 ('', 'profile', None, _('print command execution profile')),
3148 3149 ('', 'version', None, _('output version information and exit')),
3149 3150 ('h', 'help', None, _('display help and exit')),
3150 3151 ]
3151 3152
3152 3153 norepo = ("clone init version help debugancestor debugcomplete debugdata"
3153 3154 " debugindex debugindexdot")
3154 3155 optionalrepo = ("paths debugconfig")
3155 3156
3156 3157 def findpossible(cmd):
3157 3158 """
3158 3159 Return cmd -> (aliases, command table entry)
3159 3160 for each matching command
3160 3161 """
3161 3162 choice = {}
3162 3163 debugchoice = {}
3163 3164 for e in table.keys():
3164 3165 aliases = e.lstrip("^").split("|")
3165 3166 if cmd in aliases:
3166 3167 choice[cmd] = (aliases, table[e])
3167 3168 continue
3168 3169 for a in aliases:
3169 3170 if a.startswith(cmd):
3170 3171 if aliases[0].startswith("debug"):
3171 3172 debugchoice[a] = (aliases, table[e])
3172 3173 else:
3173 3174 choice[a] = (aliases, table[e])
3174 3175 break
3175 3176
3176 3177 if not choice and debugchoice:
3177 3178 choice = debugchoice
3178 3179
3179 3180 return choice
3180 3181
3181 3182 def find(cmd):
3182 3183 """Return (aliases, command table entry) for command string."""
3183 3184 choice = findpossible(cmd)
3184 3185
3185 3186 if choice.has_key(cmd):
3186 3187 return choice[cmd]
3187 3188
3188 3189 if len(choice) > 1:
3189 3190 clist = choice.keys()
3190 3191 clist.sort()
3191 3192 raise AmbiguousCommand(cmd, clist)
3192 3193
3193 3194 if choice:
3194 3195 return choice.values()[0]
3195 3196
3196 3197 raise UnknownCommand(cmd)
3197 3198
3198 3199 class SignalInterrupt(Exception):
3199 3200 """Exception raised on SIGTERM and SIGHUP."""
3200 3201
3201 3202 def catchterm(*args):
3202 3203 raise SignalInterrupt
3203 3204
3204 3205 def run():
3205 3206 sys.exit(dispatch(sys.argv[1:]))
3206 3207
3207 3208 class ParseError(Exception):
3208 3209 """Exception raised on errors in parsing the command line."""
3209 3210
3210 3211 def parse(ui, args):
3211 3212 options = {}
3212 3213 cmdoptions = {}
3213 3214
3214 3215 try:
3215 3216 args = fancyopts.fancyopts(args, globalopts, options)
3216 3217 except fancyopts.getopt.GetoptError, inst:
3217 3218 raise ParseError(None, inst)
3218 3219
3219 3220 if args:
3220 3221 cmd, args = args[0], args[1:]
3221 3222 aliases, i = find(cmd)
3222 3223 cmd = aliases[0]
3223 3224 defaults = ui.config("defaults", cmd)
3224 3225 if defaults:
3225 3226 args = defaults.split() + args
3226 3227 c = list(i[1])
3227 3228 else:
3228 3229 cmd = None
3229 3230 c = []
3230 3231
3231 3232 # combine global options into local
3232 3233 for o in globalopts:
3233 3234 c.append((o[0], o[1], options[o[1]], o[3]))
3234 3235
3235 3236 try:
3236 3237 args = fancyopts.fancyopts(args, c, cmdoptions)
3237 3238 except fancyopts.getopt.GetoptError, inst:
3238 3239 raise ParseError(cmd, inst)
3239 3240
3240 3241 # separate global options back out
3241 3242 for o in globalopts:
3242 3243 n = o[1]
3243 3244 options[n] = cmdoptions[n]
3244 3245 del cmdoptions[n]
3245 3246
3246 3247 return (cmd, cmd and i[0] or None, args, options, cmdoptions)
3247 3248
3248 3249 def dispatch(args):
3249 3250 signal.signal(signal.SIGTERM, catchterm)
3250 3251 try:
3251 3252 signal.signal(signal.SIGHUP, catchterm)
3252 3253 except AttributeError:
3253 3254 pass
3254 3255
3255 3256 try:
3256 3257 u = ui.ui()
3257 3258 except util.Abort, inst:
3258 3259 sys.stderr.write(_("abort: %s\n") % inst)
3259 3260 sys.exit(1)
3260 3261
3261 3262 external = []
3262 3263 for x in u.extensions():
3263 3264 def on_exception(exc, inst):
3264 3265 u.warn(_("*** failed to import extension %s\n") % x[1])
3265 3266 u.warn("%s\n" % inst)
3266 3267 if "--traceback" in sys.argv[1:]:
3267 3268 traceback.print_exc()
3268 3269 if x[1]:
3269 3270 try:
3270 3271 mod = imp.load_source(x[0], x[1])
3271 3272 except Exception, inst:
3272 3273 on_exception(Exception, inst)
3273 3274 continue
3274 3275 else:
3275 3276 def importh(name):
3276 3277 mod = __import__(name)
3277 3278 components = name.split('.')
3278 3279 for comp in components[1:]:
3279 3280 mod = getattr(mod, comp)
3280 3281 return mod
3281 3282 try:
3282 3283 try:
3283 3284 mod = importh("hgext." + x[0])
3284 3285 except ImportError:
3285 3286 mod = importh(x[0])
3286 3287 except Exception, inst:
3287 3288 on_exception(Exception, inst)
3288 3289 continue
3289 3290
3290 3291 external.append(mod)
3291 3292 for x in external:
3292 3293 cmdtable = getattr(x, 'cmdtable', {})
3293 3294 for t in cmdtable:
3294 3295 if t in table:
3295 3296 u.warn(_("module %s overrides %s\n") % (x.__name__, t))
3296 3297 table.update(cmdtable)
3297 3298
3298 3299 try:
3299 3300 cmd, func, args, options, cmdoptions = parse(u, args)
3300 3301 if options["time"]:
3301 3302 def get_times():
3302 3303 t = os.times()
3303 3304 if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
3304 3305 t = (t[0], t[1], t[2], t[3], time.clock())
3305 3306 return t
3306 3307 s = get_times()
3307 3308 def print_time():
3308 3309 t = get_times()
3309 3310 u.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
3310 3311 (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
3311 3312 atexit.register(print_time)
3312 3313
3313 3314 u.updateopts(options["verbose"], options["debug"], options["quiet"],
3314 3315 not options["noninteractive"])
3315 3316
3316 3317 # enter the debugger before command execution
3317 3318 if options['debugger']:
3318 3319 pdb.set_trace()
3319 3320
3320 3321 try:
3321 3322 if options['cwd']:
3322 3323 try:
3323 3324 os.chdir(options['cwd'])
3324 3325 except OSError, inst:
3325 3326 raise util.Abort('%s: %s' %
3326 3327 (options['cwd'], inst.strerror))
3327 3328
3328 3329 path = u.expandpath(options["repository"]) or ""
3329 3330 repo = path and hg.repository(u, path=path) or None
3330 3331
3331 3332 if options['help']:
3332 3333 help_(u, cmd, options['version'])
3333 3334 sys.exit(0)
3334 3335 elif options['version']:
3335 3336 show_version(u)
3336 3337 sys.exit(0)
3337 3338 elif not cmd:
3338 3339 help_(u, 'shortlist')
3339 3340 sys.exit(0)
3340 3341
3341 3342 if cmd not in norepo.split():
3342 3343 try:
3343 3344 if not repo:
3344 3345 repo = hg.repository(u, path=path)
3345 3346 u = repo.ui
3346 3347 for x in external:
3347 3348 if hasattr(x, 'reposetup'):
3348 3349 x.reposetup(u, repo)
3349 3350 except hg.RepoError:
3350 3351 if cmd not in optionalrepo.split():
3351 3352 raise
3352 3353 d = lambda: func(u, repo, *args, **cmdoptions)
3353 3354 else:
3354 3355 d = lambda: func(u, *args, **cmdoptions)
3355 3356
3356 3357 try:
3357 3358 if options['profile']:
3358 3359 import hotshot, hotshot.stats
3359 3360 prof = hotshot.Profile("hg.prof")
3360 3361 try:
3361 3362 try:
3362 3363 return prof.runcall(d)
3363 3364 except:
3364 3365 try:
3365 3366 u.warn(_('exception raised - generating '
3366 3367 'profile anyway\n'))
3367 3368 except:
3368 3369 pass
3369 3370 raise
3370 3371 finally:
3371 3372 prof.close()
3372 3373 stats = hotshot.stats.load("hg.prof")
3373 3374 stats.strip_dirs()
3374 3375 stats.sort_stats('time', 'calls')
3375 3376 stats.print_stats(40)
3376 3377 else:
3377 3378 return d()
3378 3379 finally:
3379 3380 u.flush()
3380 3381 except:
3381 3382 # enter the debugger when we hit an exception
3382 3383 if options['debugger']:
3383 3384 pdb.post_mortem(sys.exc_info()[2])
3384 3385 if options['traceback']:
3385 3386 traceback.print_exc()
3386 3387 raise
3387 3388 except ParseError, inst:
3388 3389 if inst.args[0]:
3389 3390 u.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
3390 3391 help_(u, inst.args[0])
3391 3392 else:
3392 3393 u.warn(_("hg: %s\n") % inst.args[1])
3393 3394 help_(u, 'shortlist')
3394 3395 sys.exit(-1)
3395 3396 except AmbiguousCommand, inst:
3396 3397 u.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
3397 3398 (inst.args[0], " ".join(inst.args[1])))
3398 3399 sys.exit(1)
3399 3400 except UnknownCommand, inst:
3400 3401 u.warn(_("hg: unknown command '%s'\n") % inst.args[0])
3401 3402 help_(u, 'shortlist')
3402 3403 sys.exit(1)
3403 3404 except hg.RepoError, inst:
3404 3405 u.warn(_("abort: "), inst, "!\n")
3405 3406 except lock.LockHeld, inst:
3406 3407 if inst.errno == errno.ETIMEDOUT:
3407 3408 reason = _('timed out waiting for lock held by %s') % inst.locker
3408 3409 else:
3409 3410 reason = _('lock held by %s') % inst.locker
3410 3411 u.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
3411 3412 except lock.LockUnavailable, inst:
3412 3413 u.warn(_("abort: could not lock %s: %s\n") %
3413 3414 (inst.desc or inst.filename, inst.strerror))
3414 3415 except revlog.RevlogError, inst:
3415 3416 u.warn(_("abort: "), inst, "!\n")
3416 3417 except SignalInterrupt:
3417 3418 u.warn(_("killed!\n"))
3418 3419 except KeyboardInterrupt:
3419 3420 try:
3420 3421 u.warn(_("interrupted!\n"))
3421 3422 except IOError, inst:
3422 3423 if inst.errno == errno.EPIPE:
3423 3424 if u.debugflag:
3424 3425 u.warn(_("\nbroken pipe\n"))
3425 3426 else:
3426 3427 raise
3427 3428 except IOError, inst:
3428 3429 if hasattr(inst, "code"):
3429 3430 u.warn(_("abort: %s\n") % inst)
3430 3431 elif hasattr(inst, "reason"):
3431 3432 u.warn(_("abort: error: %s\n") % inst.reason[1])
3432 3433 elif hasattr(inst, "args") and inst[0] == errno.EPIPE:
3433 3434 if u.debugflag:
3434 3435 u.warn(_("broken pipe\n"))
3435 3436 elif getattr(inst, "strerror", None):
3436 3437 if getattr(inst, "filename", None):
3437 3438 u.warn(_("abort: %s - %s\n") % (inst.strerror, inst.filename))
3438 3439 else:
3439 3440 u.warn(_("abort: %s\n") % inst.strerror)
3440 3441 else:
3441 3442 raise
3442 3443 except OSError, inst:
3443 3444 if hasattr(inst, "filename"):
3444 3445 u.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
3445 3446 else:
3446 3447 u.warn(_("abort: %s\n") % inst.strerror)
3447 3448 except util.Abort, inst:
3448 3449 u.warn(_('abort: '), inst.args[0] % inst.args[1:], '\n')
3449 3450 sys.exit(1)
3450 3451 except TypeError, inst:
3451 3452 # was this an argument error?
3452 3453 tb = traceback.extract_tb(sys.exc_info()[2])
3453 3454 if len(tb) > 2: # no
3454 3455 raise
3455 3456 u.debug(inst, "\n")
3456 3457 u.warn(_("%s: invalid arguments\n") % cmd)
3457 3458 help_(u, cmd)
3458 3459 except SystemExit:
3459 3460 # don't catch this in the catch-all below
3460 3461 raise
3461 3462 except:
3462 3463 u.warn(_("** unknown exception encountered, details follow\n"))
3463 3464 u.warn(_("** report bug details to mercurial@selenic.com\n"))
3464 3465 u.warn(_("** Mercurial Distributed SCM (version %s)\n")
3465 3466 % version.get_version())
3466 3467 raise
3467 3468
3468 3469 sys.exit(-1)
@@ -1,107 +1,108
1 1 # filelog.py - file history class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os
9 9 from revlog import *
10 10 from demandload import *
11 11 demandload(globals(), "bdiff")
12 12
13 13 class filelog(revlog):
14 def __init__(self, opener, path):
14 def __init__(self, opener, path, defversion=0):
15 15 revlog.__init__(self, opener,
16 16 os.path.join("data", self.encodedir(path + ".i")),
17 os.path.join("data", self.encodedir(path + ".d")))
17 os.path.join("data", self.encodedir(path + ".d")),
18 defversion)
18 19
19 20 # This avoids a collision between a file named foo and a dir named
20 21 # foo.i or foo.d
21 22 def encodedir(self, path):
22 23 return (path
23 24 .replace(".hg/", ".hg.hg/")
24 25 .replace(".i/", ".i.hg/")
25 26 .replace(".d/", ".d.hg/"))
26 27
27 28 def decodedir(self, path):
28 29 return (path
29 30 .replace(".d.hg/", ".d/")
30 31 .replace(".i.hg/", ".i/")
31 32 .replace(".hg.hg/", ".hg/"))
32 33
33 34 def read(self, node):
34 35 t = self.revision(node)
35 36 if not t.startswith('\1\n'):
36 37 return t
37 38 s = t.find('\1\n', 2)
38 39 return t[s+2:]
39 40
40 41 def readmeta(self, node):
41 42 t = self.revision(node)
42 43 if not t.startswith('\1\n'):
43 44 return {}
44 45 s = t.find('\1\n', 2)
45 46 mt = t[2:s]
46 47 m = {}
47 48 for l in mt.splitlines():
48 49 k, v = l.split(": ", 1)
49 50 m[k] = v
50 51 return m
51 52
52 53 def add(self, text, meta, transaction, link, p1=None, p2=None):
53 54 if meta or text.startswith('\1\n'):
54 55 mt = ""
55 56 if meta:
56 57 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
57 58 text = "\1\n%s\1\n%s" % ("".join(mt), text)
58 59 return self.addrevision(text, transaction, link, p1, p2)
59 60
60 61 def renamed(self, node):
61 62 if self.parents(node)[0] != nullid:
62 63 return False
63 64 m = self.readmeta(node)
64 65 if m and m.has_key("copy"):
65 66 return (m["copy"], bin(m["copyrev"]))
66 67 return False
67 68
68 69 def annotate(self, node):
69 70
70 71 def decorate(text, rev):
71 72 return ([rev] * len(text.splitlines()), text)
72 73
73 74 def pair(parent, child):
74 75 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
75 76 child[0][b1:b2] = parent[0][a1:a2]
76 77 return child
77 78
78 79 # find all ancestors
79 80 needed = {node:1}
80 81 visit = [node]
81 82 while visit:
82 83 n = visit.pop(0)
83 84 for p in self.parents(n):
84 85 if p not in needed:
85 86 needed[p] = 1
86 87 visit.append(p)
87 88 else:
88 89 # count how many times we'll use this
89 90 needed[p] += 1
90 91
91 92 # sort by revision which is a topological order
92 93 visit = [ (self.rev(n), n) for n in needed.keys() ]
93 94 visit.sort()
94 95 hist = {}
95 96
96 97 for r,n in visit:
97 98 curr = decorate(self.read(n), self.linkrev(n))
98 99 for p in self.parents(n):
99 100 if p != nullid:
100 101 curr = pair(hist[p], curr)
101 102 # trim the history of unneeded revs
102 103 needed[p] -= 1
103 104 if not needed[p]:
104 105 del hist[p]
105 106 hist[n] = curr
106 107
107 108 return zip(hist[n][0], hist[n][1].splitlines(1))
@@ -1,1956 +1,1963
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 13 demandload(globals(), "appendfile changegroup")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui revlog")
15 15
16 16 class localrepository(object):
17 17 def __del__(self):
18 18 self.transhandle = None
19 19 def __init__(self, parentui, path=None, create=0):
20 20 if not path:
21 21 p = os.getcwd()
22 22 while not os.path.isdir(os.path.join(p, ".hg")):
23 23 oldp = p
24 24 p = os.path.dirname(p)
25 25 if p == oldp:
26 26 raise repo.RepoError(_("no repo found"))
27 27 path = p
28 28 self.path = os.path.join(path, ".hg")
29 29
30 30 if not create and not os.path.isdir(self.path):
31 31 raise repo.RepoError(_("repository %s not found") % path)
32 32
33 33 self.root = os.path.abspath(path)
34 34 self.origroot = path
35 35 self.ui = ui.ui(parentui=parentui)
36 36 self.opener = util.opener(self.path)
37 37 self.wopener = util.opener(self.root)
38 self.manifest = manifest.manifest(self.opener)
39 self.changelog = changelog.changelog(self.opener)
38
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 except IOError:
42 pass
43
44 v = self.ui.revlogopts
45 self.revlogversion = int(v.get('format', 0))
46 for x in v.get('flags', "").split():
47 self.revlogversion |= revlog.flagstr(x)
48
49 self.manifest = manifest.manifest(self.opener, self.revlogversion)
50 self.changelog = changelog.changelog(self.opener, self.revlogversion)
51 self.revlogversion = self.changelog.version
40 52 self.tagscache = None
41 53 self.nodetagscache = None
42 54 self.encodepats = None
43 55 self.decodepats = None
44 56 self.transhandle = None
45 57
46 58 if create:
47 59 os.mkdir(self.path)
48 60 os.mkdir(self.join("data"))
49 61
50 62 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
51 try:
52 self.ui.readconfig(self.join("hgrc"), self.root)
53 except IOError:
54 pass
55
56 63 def hook(self, name, throw=False, **args):
57 64 def runhook(name, cmd):
58 65 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
59 66 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
60 67 [(k.upper(), v) for k, v in args.iteritems()])
61 68 r = util.system(cmd, environ=env, cwd=self.root)
62 69 if r:
63 70 desc, r = util.explain_exit(r)
64 71 if throw:
65 72 raise util.Abort(_('%s hook %s') % (name, desc))
66 73 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
67 74 return False
68 75 return True
69 76
70 77 r = True
71 78 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
72 79 if hname.split(".", 1)[0] == name and cmd]
73 80 hooks.sort()
74 81 for hname, cmd in hooks:
75 82 r = runhook(hname, cmd) and r
76 83 return r
77 84
78 85 def tags(self):
79 86 '''return a mapping of tag to node'''
80 87 if not self.tagscache:
81 88 self.tagscache = {}
82 89
83 90 def parsetag(line, context):
84 91 if not line:
85 92 return
86 93 s = l.split(" ", 1)
87 94 if len(s) != 2:
88 95 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
89 96 return
90 97 node, key = s
91 98 try:
92 99 bin_n = bin(node)
93 100 except TypeError:
94 101 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
95 102 return
96 103 if bin_n not in self.changelog.nodemap:
97 104 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
98 105 return
99 106 self.tagscache[key.strip()] = bin_n
100 107
101 108 # read each head of the tags file, ending with the tip
102 109 # and add each tag found to the map, with "newer" ones
103 110 # taking precedence
104 111 fl = self.file(".hgtags")
105 112 h = fl.heads()
106 113 h.reverse()
107 114 for r in h:
108 115 count = 0
109 116 for l in fl.read(r).splitlines():
110 117 count += 1
111 118 parsetag(l, ".hgtags:%d" % count)
112 119
113 120 try:
114 121 f = self.opener("localtags")
115 122 count = 0
116 123 for l in f:
117 124 count += 1
118 125 parsetag(l, "localtags:%d" % count)
119 126 except IOError:
120 127 pass
121 128
122 129 self.tagscache['tip'] = self.changelog.tip()
123 130
124 131 return self.tagscache
125 132
126 133 def tagslist(self):
127 134 '''return a list of tags ordered by revision'''
128 135 l = []
129 136 for t, n in self.tags().items():
130 137 try:
131 138 r = self.changelog.rev(n)
132 139 except:
133 140 r = -2 # sort to the beginning of the list if unknown
134 141 l.append((r, t, n))
135 142 l.sort()
136 143 return [(t, n) for r, t, n in l]
137 144
138 145 def nodetags(self, node):
139 146 '''return the tags associated with a node'''
140 147 if not self.nodetagscache:
141 148 self.nodetagscache = {}
142 149 for t, n in self.tags().items():
143 150 self.nodetagscache.setdefault(n, []).append(t)
144 151 return self.nodetagscache.get(node, [])
145 152
146 153 def lookup(self, key):
147 154 try:
148 155 return self.tags()[key]
149 156 except KeyError:
150 157 try:
151 158 return self.changelog.lookup(key)
152 159 except:
153 160 raise repo.RepoError(_("unknown revision '%s'") % key)
154 161
155 162 def dev(self):
156 163 return os.stat(self.path).st_dev
157 164
158 165 def local(self):
159 166 return True
160 167
161 168 def join(self, f):
162 169 return os.path.join(self.path, f)
163 170
164 171 def wjoin(self, f):
165 172 return os.path.join(self.root, f)
166 173
167 174 def file(self, f):
168 175 if f[0] == '/':
169 176 f = f[1:]
170 return filelog.filelog(self.opener, f)
177 return filelog.filelog(self.opener, f, self.revlogversion)
171 178
172 179 def getcwd(self):
173 180 return self.dirstate.getcwd()
174 181
175 182 def wfile(self, f, mode='r'):
176 183 return self.wopener(f, mode)
177 184
178 185 def wread(self, filename):
179 186 if self.encodepats == None:
180 187 l = []
181 188 for pat, cmd in self.ui.configitems("encode"):
182 189 mf = util.matcher(self.root, "", [pat], [], [])[1]
183 190 l.append((mf, cmd))
184 191 self.encodepats = l
185 192
186 193 data = self.wopener(filename, 'r').read()
187 194
188 195 for mf, cmd in self.encodepats:
189 196 if mf(filename):
190 197 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
191 198 data = util.filter(data, cmd)
192 199 break
193 200
194 201 return data
195 202
196 203 def wwrite(self, filename, data, fd=None):
197 204 if self.decodepats == None:
198 205 l = []
199 206 for pat, cmd in self.ui.configitems("decode"):
200 207 mf = util.matcher(self.root, "", [pat], [], [])[1]
201 208 l.append((mf, cmd))
202 209 self.decodepats = l
203 210
204 211 for mf, cmd in self.decodepats:
205 212 if mf(filename):
206 213 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
207 214 data = util.filter(data, cmd)
208 215 break
209 216
210 217 if fd:
211 218 return fd.write(data)
212 219 return self.wopener(filename, 'w').write(data)
213 220
214 221 def transaction(self):
215 222 tr = self.transhandle
216 223 if tr != None and tr.running():
217 224 return tr.nest()
218 225
219 226 # save dirstate for undo
220 227 try:
221 228 ds = self.opener("dirstate").read()
222 229 except IOError:
223 230 ds = ""
224 231 self.opener("journal.dirstate", "w").write(ds)
225 232
226 233 tr = transaction.transaction(self.ui.warn, self.opener,
227 234 self.join("journal"),
228 235 aftertrans(self.path))
229 236 self.transhandle = tr
230 237 return tr
231 238
232 239 def recover(self):
233 240 l = self.lock()
234 241 if os.path.exists(self.join("journal")):
235 242 self.ui.status(_("rolling back interrupted transaction\n"))
236 243 transaction.rollback(self.opener, self.join("journal"))
237 244 self.reload()
238 245 return True
239 246 else:
240 247 self.ui.warn(_("no interrupted transaction available\n"))
241 248 return False
242 249
243 250 def undo(self, wlock=None):
244 251 if not wlock:
245 252 wlock = self.wlock()
246 253 l = self.lock()
247 254 if os.path.exists(self.join("undo")):
248 255 self.ui.status(_("rolling back last transaction\n"))
249 256 transaction.rollback(self.opener, self.join("undo"))
250 257 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
251 258 self.reload()
252 259 self.wreload()
253 260 else:
254 261 self.ui.warn(_("no undo information available\n"))
255 262
256 263 def wreload(self):
257 264 self.dirstate.read()
258 265
259 266 def reload(self):
260 267 self.changelog.load()
261 268 self.manifest.load()
262 269 self.tagscache = None
263 270 self.nodetagscache = None
264 271
265 272 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
266 273 desc=None):
267 274 try:
268 275 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
269 276 except lock.LockHeld, inst:
270 277 if not wait:
271 278 raise
272 279 self.ui.warn(_("waiting for lock on %s held by %s\n") %
273 280 (desc, inst.args[0]))
274 281 # default to 600 seconds timeout
275 282 l = lock.lock(self.join(lockname),
276 283 int(self.ui.config("ui", "timeout") or 600),
277 284 releasefn, desc=desc)
278 285 if acquirefn:
279 286 acquirefn()
280 287 return l
281 288
282 289 def lock(self, wait=1):
283 290 return self.do_lock("lock", wait, acquirefn=self.reload,
284 291 desc=_('repository %s') % self.origroot)
285 292
286 293 def wlock(self, wait=1):
287 294 return self.do_lock("wlock", wait, self.dirstate.write,
288 295 self.wreload,
289 296 desc=_('working directory of %s') % self.origroot)
290 297
291 298 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
292 299 "determine whether a new filenode is needed"
293 300 fp1 = manifest1.get(filename, nullid)
294 301 fp2 = manifest2.get(filename, nullid)
295 302
296 303 if fp2 != nullid:
297 304 # is one parent an ancestor of the other?
298 305 fpa = filelog.ancestor(fp1, fp2)
299 306 if fpa == fp1:
300 307 fp1, fp2 = fp2, nullid
301 308 elif fpa == fp2:
302 309 fp2 = nullid
303 310
304 311 # is the file unmodified from the parent? report existing entry
305 312 if fp2 == nullid and text == filelog.read(fp1):
306 313 return (fp1, None, None)
307 314
308 315 return (None, fp1, fp2)
309 316
310 317 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
311 318 orig_parent = self.dirstate.parents()[0] or nullid
312 319 p1 = p1 or self.dirstate.parents()[0] or nullid
313 320 p2 = p2 or self.dirstate.parents()[1] or nullid
314 321 c1 = self.changelog.read(p1)
315 322 c2 = self.changelog.read(p2)
316 323 m1 = self.manifest.read(c1[0])
317 324 mf1 = self.manifest.readflags(c1[0])
318 325 m2 = self.manifest.read(c2[0])
319 326 changed = []
320 327
321 328 if orig_parent == p1:
322 329 update_dirstate = 1
323 330 else:
324 331 update_dirstate = 0
325 332
326 333 if not wlock:
327 334 wlock = self.wlock()
328 335 l = self.lock()
329 336 tr = self.transaction()
330 337 mm = m1.copy()
331 338 mfm = mf1.copy()
332 339 linkrev = self.changelog.count()
333 340 for f in files:
334 341 try:
335 342 t = self.wread(f)
336 343 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
337 344 r = self.file(f)
338 345 mfm[f] = tm
339 346
340 347 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
341 348 if entry:
342 349 mm[f] = entry
343 350 continue
344 351
345 352 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
346 353 changed.append(f)
347 354 if update_dirstate:
348 355 self.dirstate.update([f], "n")
349 356 except IOError:
350 357 try:
351 358 del mm[f]
352 359 del mfm[f]
353 360 if update_dirstate:
354 361 self.dirstate.forget([f])
355 362 except:
356 363 # deleted from p2?
357 364 pass
358 365
359 366 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
360 367 user = user or self.ui.username()
361 368 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
362 369 tr.close()
363 370 if update_dirstate:
364 371 self.dirstate.setparents(n, nullid)
365 372
366 373 def commit(self, files=None, text="", user=None, date=None,
367 374 match=util.always, force=False, lock=None, wlock=None):
368 375 commit = []
369 376 remove = []
370 377 changed = []
371 378
372 379 if files:
373 380 for f in files:
374 381 s = self.dirstate.state(f)
375 382 if s in 'nmai':
376 383 commit.append(f)
377 384 elif s == 'r':
378 385 remove.append(f)
379 386 else:
380 387 self.ui.warn(_("%s not tracked!\n") % f)
381 388 else:
382 389 modified, added, removed, deleted, unknown = self.changes(match=match)
383 390 commit = modified + added
384 391 remove = removed
385 392
386 393 p1, p2 = self.dirstate.parents()
387 394 c1 = self.changelog.read(p1)
388 395 c2 = self.changelog.read(p2)
389 396 m1 = self.manifest.read(c1[0])
390 397 mf1 = self.manifest.readflags(c1[0])
391 398 m2 = self.manifest.read(c2[0])
392 399
393 400 if not commit and not remove and not force and p2 == nullid:
394 401 self.ui.status(_("nothing changed\n"))
395 402 return None
396 403
397 404 xp1 = hex(p1)
398 405 if p2 == nullid: xp2 = ''
399 406 else: xp2 = hex(p2)
400 407
401 408 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
402 409
403 410 if not wlock:
404 411 wlock = self.wlock()
405 412 if not lock:
406 413 lock = self.lock()
407 414 tr = self.transaction()
408 415
409 416 # check in files
410 417 new = {}
411 418 linkrev = self.changelog.count()
412 419 commit.sort()
413 420 for f in commit:
414 421 self.ui.note(f + "\n")
415 422 try:
416 423 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
417 424 t = self.wread(f)
418 425 except IOError:
419 426 self.ui.warn(_("trouble committing %s!\n") % f)
420 427 raise
421 428
422 429 r = self.file(f)
423 430
424 431 meta = {}
425 432 cp = self.dirstate.copied(f)
426 433 if cp:
427 434 meta["copy"] = cp
428 435 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
429 436 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
430 437 fp1, fp2 = nullid, nullid
431 438 else:
432 439 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
433 440 if entry:
434 441 new[f] = entry
435 442 continue
436 443
437 444 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
438 445 # remember what we've added so that we can later calculate
439 446 # the files to pull from a set of changesets
440 447 changed.append(f)
441 448
442 449 # update manifest
443 450 m1 = m1.copy()
444 451 m1.update(new)
445 452 for f in remove:
446 453 if f in m1:
447 454 del m1[f]
448 455 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
449 456 (new, remove))
450 457
451 458 # add changeset
452 459 new = new.keys()
453 460 new.sort()
454 461
455 462 user = user or self.ui.username()
456 463 if not text:
457 464 edittext = [""]
458 465 if p2 != nullid:
459 466 edittext.append("HG: branch merge")
460 467 edittext.extend(["HG: changed %s" % f for f in changed])
461 468 edittext.extend(["HG: removed %s" % f for f in remove])
462 469 if not changed and not remove:
463 470 edittext.append("HG: no files changed")
464 471 edittext.append("")
465 472 # run editor in the repository root
466 473 olddir = os.getcwd()
467 474 os.chdir(self.root)
468 475 edittext = self.ui.edit("\n".join(edittext), user)
469 476 os.chdir(olddir)
470 477 if not edittext.rstrip():
471 478 return None
472 479 text = edittext
473 480
474 481 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
475 482 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
476 483 parent2=xp2)
477 484 tr.close()
478 485
479 486 self.dirstate.setparents(n)
480 487 self.dirstate.update(new, "n")
481 488 self.dirstate.forget(remove)
482 489
483 490 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
484 491 return n
485 492
486 493 def walk(self, node=None, files=[], match=util.always, badmatch=None):
487 494 if node:
488 495 fdict = dict.fromkeys(files)
489 496 for fn in self.manifest.read(self.changelog.read(node)[0]):
490 497 fdict.pop(fn, None)
491 498 if match(fn):
492 499 yield 'm', fn
493 500 for fn in fdict:
494 501 if badmatch and badmatch(fn):
495 502 if match(fn):
496 503 yield 'b', fn
497 504 else:
498 505 self.ui.warn(_('%s: No such file in rev %s\n') % (
499 506 util.pathto(self.getcwd(), fn), short(node)))
500 507 else:
501 508 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
502 509 yield src, fn
503 510
504 511 def changes(self, node1=None, node2=None, files=[], match=util.always,
505 512 wlock=None, show_ignored=None):
506 513 """return changes between two nodes or node and working directory
507 514
508 515 If node1 is None, use the first dirstate parent instead.
509 516 If node2 is None, compare node1 with working directory.
510 517 """
511 518
512 519 def fcmp(fn, mf):
513 520 t1 = self.wread(fn)
514 521 t2 = self.file(fn).read(mf.get(fn, nullid))
515 522 return cmp(t1, t2)
516 523
517 524 def mfmatches(node):
518 525 change = self.changelog.read(node)
519 526 mf = dict(self.manifest.read(change[0]))
520 527 for fn in mf.keys():
521 528 if not match(fn):
522 529 del mf[fn]
523 530 return mf
524 531
525 532 if node1:
526 533 # read the manifest from node1 before the manifest from node2,
527 534 # so that we'll hit the manifest cache if we're going through
528 535 # all the revisions in parent->child order.
529 536 mf1 = mfmatches(node1)
530 537
531 538 # are we comparing the working directory?
532 539 if not node2:
533 540 if not wlock:
534 541 try:
535 542 wlock = self.wlock(wait=0)
536 543 except lock.LockException:
537 544 wlock = None
538 545 lookup, modified, added, removed, deleted, unknown, ignored = (
539 546 self.dirstate.changes(files, match, show_ignored))
540 547
541 548 # are we comparing working dir against its parent?
542 549 if not node1:
543 550 if lookup:
544 551 # do a full compare of any files that might have changed
545 552 mf2 = mfmatches(self.dirstate.parents()[0])
546 553 for f in lookup:
547 554 if fcmp(f, mf2):
548 555 modified.append(f)
549 556 elif wlock is not None:
550 557 self.dirstate.update([f], "n")
551 558 else:
552 559 # we are comparing working dir against non-parent
553 560 # generate a pseudo-manifest for the working dir
554 561 mf2 = mfmatches(self.dirstate.parents()[0])
555 562 for f in lookup + modified + added:
556 563 mf2[f] = ""
557 564 for f in removed:
558 565 if f in mf2:
559 566 del mf2[f]
560 567 else:
561 568 # we are comparing two revisions
562 569 deleted, unknown, ignored = [], [], []
563 570 mf2 = mfmatches(node2)
564 571
565 572 if node1:
566 573 # flush lists from dirstate before comparing manifests
567 574 modified, added = [], []
568 575
569 576 for fn in mf2:
570 577 if mf1.has_key(fn):
571 578 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
572 579 modified.append(fn)
573 580 del mf1[fn]
574 581 else:
575 582 added.append(fn)
576 583
577 584 removed = mf1.keys()
578 585
579 586 # sort and return results:
580 587 for l in modified, added, removed, deleted, unknown, ignored:
581 588 l.sort()
582 589 if show_ignored is None:
583 590 return (modified, added, removed, deleted, unknown)
584 591 else:
585 592 return (modified, added, removed, deleted, unknown, ignored)
586 593
587 594 def add(self, list, wlock=None):
588 595 if not wlock:
589 596 wlock = self.wlock()
590 597 for f in list:
591 598 p = self.wjoin(f)
592 599 if not os.path.exists(p):
593 600 self.ui.warn(_("%s does not exist!\n") % f)
594 601 elif not os.path.isfile(p):
595 602 self.ui.warn(_("%s not added: only files supported currently\n")
596 603 % f)
597 604 elif self.dirstate.state(f) in 'an':
598 605 self.ui.warn(_("%s already tracked!\n") % f)
599 606 else:
600 607 self.dirstate.update([f], "a")
601 608
602 609 def forget(self, list, wlock=None):
603 610 if not wlock:
604 611 wlock = self.wlock()
605 612 for f in list:
606 613 if self.dirstate.state(f) not in 'ai':
607 614 self.ui.warn(_("%s not added!\n") % f)
608 615 else:
609 616 self.dirstate.forget([f])
610 617
611 618 def remove(self, list, unlink=False, wlock=None):
612 619 if unlink:
613 620 for f in list:
614 621 try:
615 622 util.unlink(self.wjoin(f))
616 623 except OSError, inst:
617 624 if inst.errno != errno.ENOENT:
618 625 raise
619 626 if not wlock:
620 627 wlock = self.wlock()
621 628 for f in list:
622 629 p = self.wjoin(f)
623 630 if os.path.exists(p):
624 631 self.ui.warn(_("%s still exists!\n") % f)
625 632 elif self.dirstate.state(f) == 'a':
626 633 self.dirstate.forget([f])
627 634 elif f not in self.dirstate:
628 635 self.ui.warn(_("%s not tracked!\n") % f)
629 636 else:
630 637 self.dirstate.update([f], "r")
631 638
632 639 def undelete(self, list, wlock=None):
633 640 p = self.dirstate.parents()[0]
634 641 mn = self.changelog.read(p)[0]
635 642 mf = self.manifest.readflags(mn)
636 643 m = self.manifest.read(mn)
637 644 if not wlock:
638 645 wlock = self.wlock()
639 646 for f in list:
640 647 if self.dirstate.state(f) not in "r":
641 648 self.ui.warn("%s not removed!\n" % f)
642 649 else:
643 650 t = self.file(f).read(m[f])
644 651 self.wwrite(f, t)
645 652 util.set_exec(self.wjoin(f), mf[f])
646 653 self.dirstate.update([f], "n")
647 654
648 655 def copy(self, source, dest, wlock=None):
649 656 p = self.wjoin(dest)
650 657 if not os.path.exists(p):
651 658 self.ui.warn(_("%s does not exist!\n") % dest)
652 659 elif not os.path.isfile(p):
653 660 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
654 661 else:
655 662 if not wlock:
656 663 wlock = self.wlock()
657 664 if self.dirstate.state(dest) == '?':
658 665 self.dirstate.update([dest], "a")
659 666 self.dirstate.copy(source, dest)
660 667
661 668 def heads(self, start=None):
662 669 heads = self.changelog.heads(start)
663 670 # sort the output in rev descending order
664 671 heads = [(-self.changelog.rev(h), h) for h in heads]
665 672 heads.sort()
666 673 return [n for (r, n) in heads]
667 674
668 675 # branchlookup returns a dict giving a list of branches for
669 676 # each head. A branch is defined as the tag of a node or
670 677 # the branch of the node's parents. If a node has multiple
671 678 # branch tags, tags are eliminated if they are visible from other
672 679 # branch tags.
673 680 #
674 681 # So, for this graph: a->b->c->d->e
675 682 # \ /
676 683 # aa -----/
677 684 # a has tag 2.6.12
678 685 # d has tag 2.6.13
679 686 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
680 687 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
681 688 # from the list.
682 689 #
683 690 # It is possible that more than one head will have the same branch tag.
684 691 # callers need to check the result for multiple heads under the same
685 692 # branch tag if that is a problem for them (ie checkout of a specific
686 693 # branch).
687 694 #
688 695 # passing in a specific branch will limit the depth of the search
689 696 # through the parents. It won't limit the branches returned in the
690 697 # result though.
691 698 def branchlookup(self, heads=None, branch=None):
692 699 if not heads:
693 700 heads = self.heads()
694 701 headt = [ h for h in heads ]
695 702 chlog = self.changelog
696 703 branches = {}
697 704 merges = []
698 705 seenmerge = {}
699 706
700 707 # traverse the tree once for each head, recording in the branches
701 708 # dict which tags are visible from this head. The branches
702 709 # dict also records which tags are visible from each tag
703 710 # while we traverse.
704 711 while headt or merges:
705 712 if merges:
706 713 n, found = merges.pop()
707 714 visit = [n]
708 715 else:
709 716 h = headt.pop()
710 717 visit = [h]
711 718 found = [h]
712 719 seen = {}
713 720 while visit:
714 721 n = visit.pop()
715 722 if n in seen:
716 723 continue
717 724 pp = chlog.parents(n)
718 725 tags = self.nodetags(n)
719 726 if tags:
720 727 for x in tags:
721 728 if x == 'tip':
722 729 continue
723 730 for f in found:
724 731 branches.setdefault(f, {})[n] = 1
725 732 branches.setdefault(n, {})[n] = 1
726 733 break
727 734 if n not in found:
728 735 found.append(n)
729 736 if branch in tags:
730 737 continue
731 738 seen[n] = 1
732 739 if pp[1] != nullid and n not in seenmerge:
733 740 merges.append((pp[1], [x for x in found]))
734 741 seenmerge[n] = 1
735 742 if pp[0] != nullid:
736 743 visit.append(pp[0])
737 744 # traverse the branches dict, eliminating branch tags from each
738 745 # head that are visible from another branch tag for that head.
739 746 out = {}
740 747 viscache = {}
741 748 for h in heads:
742 749 def visible(node):
743 750 if node in viscache:
744 751 return viscache[node]
745 752 ret = {}
746 753 visit = [node]
747 754 while visit:
748 755 x = visit.pop()
749 756 if x in viscache:
750 757 ret.update(viscache[x])
751 758 elif x not in ret:
752 759 ret[x] = 1
753 760 if x in branches:
754 761 visit[len(visit):] = branches[x].keys()
755 762 viscache[node] = ret
756 763 return ret
757 764 if h not in branches:
758 765 continue
759 766 # O(n^2), but somewhat limited. This only searches the
760 767 # tags visible from a specific head, not all the tags in the
761 768 # whole repo.
762 769 for b in branches[h]:
763 770 vis = False
764 771 for bb in branches[h].keys():
765 772 if b != bb:
766 773 if b in visible(bb):
767 774 vis = True
768 775 break
769 776 if not vis:
770 777 l = out.setdefault(h, [])
771 778 l[len(l):] = self.nodetags(b)
772 779 return out
773 780
774 781 def branches(self, nodes):
775 782 if not nodes:
776 783 nodes = [self.changelog.tip()]
777 784 b = []
778 785 for n in nodes:
779 786 t = n
780 787 while n:
781 788 p = self.changelog.parents(n)
782 789 if p[1] != nullid or p[0] == nullid:
783 790 b.append((t, n, p[0], p[1]))
784 791 break
785 792 n = p[0]
786 793 return b
787 794
788 795 def between(self, pairs):
789 796 r = []
790 797
791 798 for top, bottom in pairs:
792 799 n, l, i = top, [], 0
793 800 f = 1
794 801
795 802 while n != bottom:
796 803 p = self.changelog.parents(n)[0]
797 804 if i == f:
798 805 l.append(n)
799 806 f = f * 2
800 807 n = p
801 808 i += 1
802 809
803 810 r.append(l)
804 811
805 812 return r
806 813
807 814 def findincoming(self, remote, base=None, heads=None, force=False):
808 815 m = self.changelog.nodemap
809 816 search = []
810 817 fetch = {}
811 818 seen = {}
812 819 seenbranch = {}
813 820 if base == None:
814 821 base = {}
815 822
816 823 # assume we're closer to the tip than the root
817 824 # and start by examining the heads
818 825 self.ui.status(_("searching for changes\n"))
819 826
820 827 if not heads:
821 828 heads = remote.heads()
822 829
823 830 unknown = []
824 831 for h in heads:
825 832 if h not in m:
826 833 unknown.append(h)
827 834 else:
828 835 base[h] = 1
829 836
830 837 if not unknown:
831 838 return []
832 839
833 840 rep = {}
834 841 reqcnt = 0
835 842
836 843 # search through remote branches
837 844 # a 'branch' here is a linear segment of history, with four parts:
838 845 # head, root, first parent, second parent
839 846 # (a branch always has two parents (or none) by definition)
840 847 unknown = remote.branches(unknown)
841 848 while unknown:
842 849 r = []
843 850 while unknown:
844 851 n = unknown.pop(0)
845 852 if n[0] in seen:
846 853 continue
847 854
848 855 self.ui.debug(_("examining %s:%s\n")
849 856 % (short(n[0]), short(n[1])))
850 857 if n[0] == nullid:
851 858 break
852 859 if n in seenbranch:
853 860 self.ui.debug(_("branch already found\n"))
854 861 continue
855 862 if n[1] and n[1] in m: # do we know the base?
856 863 self.ui.debug(_("found incomplete branch %s:%s\n")
857 864 % (short(n[0]), short(n[1])))
858 865 search.append(n) # schedule branch range for scanning
859 866 seenbranch[n] = 1
860 867 else:
861 868 if n[1] not in seen and n[1] not in fetch:
862 869 if n[2] in m and n[3] in m:
863 870 self.ui.debug(_("found new changeset %s\n") %
864 871 short(n[1]))
865 872 fetch[n[1]] = 1 # earliest unknown
866 873 base[n[2]] = 1 # latest known
867 874 continue
868 875
869 876 for a in n[2:4]:
870 877 if a not in rep:
871 878 r.append(a)
872 879 rep[a] = 1
873 880
874 881 seen[n[0]] = 1
875 882
876 883 if r:
877 884 reqcnt += 1
878 885 self.ui.debug(_("request %d: %s\n") %
879 886 (reqcnt, " ".join(map(short, r))))
880 887 for p in range(0, len(r), 10):
881 888 for b in remote.branches(r[p:p+10]):
882 889 self.ui.debug(_("received %s:%s\n") %
883 890 (short(b[0]), short(b[1])))
884 891 if b[0] in m:
885 892 self.ui.debug(_("found base node %s\n")
886 893 % short(b[0]))
887 894 base[b[0]] = 1
888 895 elif b[0] not in seen:
889 896 unknown.append(b)
890 897
891 898 # do binary search on the branches we found
892 899 while search:
893 900 n = search.pop(0)
894 901 reqcnt += 1
895 902 l = remote.between([(n[0], n[1])])[0]
896 903 l.append(n[1])
897 904 p = n[0]
898 905 f = 1
899 906 for i in l:
900 907 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
901 908 if i in m:
902 909 if f <= 2:
903 910 self.ui.debug(_("found new branch changeset %s\n") %
904 911 short(p))
905 912 fetch[p] = 1
906 913 base[i] = 1
907 914 else:
908 915 self.ui.debug(_("narrowed branch search to %s:%s\n")
909 916 % (short(p), short(i)))
910 917 search.append((p, i))
911 918 break
912 919 p, f = i, f * 2
913 920
914 921 # sanity check our fetch list
915 922 for f in fetch.keys():
916 923 if f in m:
917 924 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
918 925
919 926 if base.keys() == [nullid]:
920 927 if force:
921 928 self.ui.warn(_("warning: repository is unrelated\n"))
922 929 else:
923 930 raise util.Abort(_("repository is unrelated"))
924 931
925 932 self.ui.note(_("found new changesets starting at ") +
926 933 " ".join([short(f) for f in fetch]) + "\n")
927 934
928 935 self.ui.debug(_("%d total queries\n") % reqcnt)
929 936
930 937 return fetch.keys()
931 938
932 939 def findoutgoing(self, remote, base=None, heads=None, force=False):
933 940 """Return list of nodes that are roots of subsets not in remote
934 941
935 942 If base dict is specified, assume that these nodes and their parents
936 943 exist on the remote side.
937 944 If a list of heads is specified, return only nodes which are heads
938 945 or ancestors of these heads, and return a second element which
939 946 contains all remote heads which get new children.
940 947 """
941 948 if base == None:
942 949 base = {}
943 950 self.findincoming(remote, base, heads, force=force)
944 951
945 952 self.ui.debug(_("common changesets up to ")
946 953 + " ".join(map(short, base.keys())) + "\n")
947 954
948 955 remain = dict.fromkeys(self.changelog.nodemap)
949 956
950 957 # prune everything remote has from the tree
951 958 del remain[nullid]
952 959 remove = base.keys()
953 960 while remove:
954 961 n = remove.pop(0)
955 962 if n in remain:
956 963 del remain[n]
957 964 for p in self.changelog.parents(n):
958 965 remove.append(p)
959 966
960 967 # find every node whose parents have been pruned
961 968 subset = []
962 969 # find every remote head that will get new children
963 970 updated_heads = {}
964 971 for n in remain:
965 972 p1, p2 = self.changelog.parents(n)
966 973 if p1 not in remain and p2 not in remain:
967 974 subset.append(n)
968 975 if heads:
969 976 if p1 in heads:
970 977 updated_heads[p1] = True
971 978 if p2 in heads:
972 979 updated_heads[p2] = True
973 980
974 981 # this is the set of all roots we have to push
975 982 if heads:
976 983 return subset, updated_heads.keys()
977 984 else:
978 985 return subset
979 986
980 987 def pull(self, remote, heads=None, force=False):
981 988 l = self.lock()
982 989
983 990 # if we have an empty repo, fetch everything
984 991 if self.changelog.tip() == nullid:
985 992 self.ui.status(_("requesting all changes\n"))
986 993 fetch = [nullid]
987 994 else:
988 995 fetch = self.findincoming(remote, force=force)
989 996
990 997 if not fetch:
991 998 self.ui.status(_("no changes found\n"))
992 999 return 0
993 1000
994 1001 if heads is None:
995 1002 cg = remote.changegroup(fetch, 'pull')
996 1003 else:
997 1004 cg = remote.changegroupsubset(fetch, heads, 'pull')
998 1005 return self.addchangegroup(cg)
999 1006
1000 1007 def push(self, remote, force=False, revs=None):
1001 1008 lock = remote.lock()
1002 1009
1003 1010 base = {}
1004 1011 remote_heads = remote.heads()
1005 1012 inc = self.findincoming(remote, base, remote_heads, force=force)
1006 1013 if not force and inc:
1007 1014 self.ui.warn(_("abort: unsynced remote changes!\n"))
1008 1015 self.ui.status(_("(did you forget to sync?"
1009 1016 " use push -f to force)\n"))
1010 1017 return 1
1011 1018
1012 1019 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1013 1020 if revs is not None:
1014 1021 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1015 1022 else:
1016 1023 bases, heads = update, self.changelog.heads()
1017 1024
1018 1025 if not bases:
1019 1026 self.ui.status(_("no changes found\n"))
1020 1027 return 1
1021 1028 elif not force:
1022 1029 if revs is not None:
1023 1030 updated_heads = {}
1024 1031 for base in msng_cl:
1025 1032 for parent in self.changelog.parents(base):
1026 1033 if parent in remote_heads:
1027 1034 updated_heads[parent] = True
1028 1035 updated_heads = updated_heads.keys()
1029 1036 if len(updated_heads) < len(heads):
1030 1037 self.ui.warn(_("abort: push creates new remote branches!\n"))
1031 1038 self.ui.status(_("(did you forget to merge?"
1032 1039 " use push -f to force)\n"))
1033 1040 return 1
1034 1041
1035 1042 if revs is None:
1036 1043 cg = self.changegroup(update, 'push')
1037 1044 else:
1038 1045 cg = self.changegroupsubset(update, revs, 'push')
1039 1046 return remote.addchangegroup(cg)
1040 1047
1041 1048 def changegroupsubset(self, bases, heads, source):
1042 1049 """This function generates a changegroup consisting of all the nodes
1043 1050 that are descendents of any of the bases, and ancestors of any of
1044 1051 the heads.
1045 1052
1046 1053 It is fairly complex as determining which filenodes and which
1047 1054 manifest nodes need to be included for the changeset to be complete
1048 1055 is non-trivial.
1049 1056
1050 1057 Another wrinkle is doing the reverse, figuring out which changeset in
1051 1058 the changegroup a particular filenode or manifestnode belongs to."""
1052 1059
1053 1060 self.hook('preoutgoing', throw=True, source=source)
1054 1061
1055 1062 # Set up some initial variables
1056 1063 # Make it easy to refer to self.changelog
1057 1064 cl = self.changelog
1058 1065 # msng is short for missing - compute the list of changesets in this
1059 1066 # changegroup.
1060 1067 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1061 1068 # Some bases may turn out to be superfluous, and some heads may be
1062 1069 # too. nodesbetween will return the minimal set of bases and heads
1063 1070 # necessary to re-create the changegroup.
1064 1071
1065 1072 # Known heads are the list of heads that it is assumed the recipient
1066 1073 # of this changegroup will know about.
1067 1074 knownheads = {}
1068 1075 # We assume that all parents of bases are known heads.
1069 1076 for n in bases:
1070 1077 for p in cl.parents(n):
1071 1078 if p != nullid:
1072 1079 knownheads[p] = 1
1073 1080 knownheads = knownheads.keys()
1074 1081 if knownheads:
1075 1082 # Now that we know what heads are known, we can compute which
1076 1083 # changesets are known. The recipient must know about all
1077 1084 # changesets required to reach the known heads from the null
1078 1085 # changeset.
1079 1086 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1080 1087 junk = None
1081 1088 # Transform the list into an ersatz set.
1082 1089 has_cl_set = dict.fromkeys(has_cl_set)
1083 1090 else:
1084 1091 # If there were no known heads, the recipient cannot be assumed to
1085 1092 # know about any changesets.
1086 1093 has_cl_set = {}
1087 1094
1088 1095 # Make it easy to refer to self.manifest
1089 1096 mnfst = self.manifest
1090 1097 # We don't know which manifests are missing yet
1091 1098 msng_mnfst_set = {}
1092 1099 # Nor do we know which filenodes are missing.
1093 1100 msng_filenode_set = {}
1094 1101
1095 1102 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1096 1103 junk = None
1097 1104
1098 1105 # A changeset always belongs to itself, so the changenode lookup
1099 1106 # function for a changenode is identity.
1100 1107 def identity(x):
1101 1108 return x
1102 1109
1103 1110 # A function generating function. Sets up an environment for the
1104 1111 # inner function.
1105 1112 def cmp_by_rev_func(revlog):
1106 1113 # Compare two nodes by their revision number in the environment's
1107 1114 # revision history. Since the revision number both represents the
1108 1115 # most efficient order to read the nodes in, and represents a
1109 1116 # topological sorting of the nodes, this function is often useful.
1110 1117 def cmp_by_rev(a, b):
1111 1118 return cmp(revlog.rev(a), revlog.rev(b))
1112 1119 return cmp_by_rev
1113 1120
1114 1121 # If we determine that a particular file or manifest node must be a
1115 1122 # node that the recipient of the changegroup will already have, we can
1116 1123 # also assume the recipient will have all the parents. This function
1117 1124 # prunes them from the set of missing nodes.
1118 1125 def prune_parents(revlog, hasset, msngset):
1119 1126 haslst = hasset.keys()
1120 1127 haslst.sort(cmp_by_rev_func(revlog))
1121 1128 for node in haslst:
1122 1129 parentlst = [p for p in revlog.parents(node) if p != nullid]
1123 1130 while parentlst:
1124 1131 n = parentlst.pop()
1125 1132 if n not in hasset:
1126 1133 hasset[n] = 1
1127 1134 p = [p for p in revlog.parents(n) if p != nullid]
1128 1135 parentlst.extend(p)
1129 1136 for n in hasset:
1130 1137 msngset.pop(n, None)
1131 1138
1132 1139 # This is a function generating function used to set up an environment
1133 1140 # for the inner function to execute in.
1134 1141 def manifest_and_file_collector(changedfileset):
1135 1142 # This is an information gathering function that gathers
1136 1143 # information from each changeset node that goes out as part of
1137 1144 # the changegroup. The information gathered is a list of which
1138 1145 # manifest nodes are potentially required (the recipient may
1139 1146 # already have them) and total list of all files which were
1140 1147 # changed in any changeset in the changegroup.
1141 1148 #
1142 1149 # We also remember the first changenode we saw any manifest
1143 1150 # referenced by so we can later determine which changenode 'owns'
1144 1151 # the manifest.
1145 1152 def collect_manifests_and_files(clnode):
1146 1153 c = cl.read(clnode)
1147 1154 for f in c[3]:
1148 1155 # This is to make sure we only have one instance of each
1149 1156 # filename string for each filename.
1150 1157 changedfileset.setdefault(f, f)
1151 1158 msng_mnfst_set.setdefault(c[0], clnode)
1152 1159 return collect_manifests_and_files
1153 1160
1154 1161 # Figure out which manifest nodes (of the ones we think might be part
1155 1162 # of the changegroup) the recipient must know about and remove them
1156 1163 # from the changegroup.
1157 1164 def prune_manifests():
1158 1165 has_mnfst_set = {}
1159 1166 for n in msng_mnfst_set:
1160 1167 # If a 'missing' manifest thinks it belongs to a changenode
1161 1168 # the recipient is assumed to have, obviously the recipient
1162 1169 # must have that manifest.
1163 1170 linknode = cl.node(mnfst.linkrev(n))
1164 1171 if linknode in has_cl_set:
1165 1172 has_mnfst_set[n] = 1
1166 1173 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1167 1174
1168 1175 # Use the information collected in collect_manifests_and_files to say
1169 1176 # which changenode any manifestnode belongs to.
1170 1177 def lookup_manifest_link(mnfstnode):
1171 1178 return msng_mnfst_set[mnfstnode]
1172 1179
1173 1180 # A function generating function that sets up the initial environment
1174 1181 # the inner function.
1175 1182 def filenode_collector(changedfiles):
1176 1183 next_rev = [0]
1177 1184 # This gathers information from each manifestnode included in the
1178 1185 # changegroup about which filenodes the manifest node references
1179 1186 # so we can include those in the changegroup too.
1180 1187 #
1181 1188 # It also remembers which changenode each filenode belongs to. It
1182 1189 # does this by assuming the a filenode belongs to the changenode
1183 1190 # the first manifest that references it belongs to.
1184 1191 def collect_msng_filenodes(mnfstnode):
1185 1192 r = mnfst.rev(mnfstnode)
1186 1193 if r == next_rev[0]:
1187 1194 # If the last rev we looked at was the one just previous,
1188 1195 # we only need to see a diff.
1189 1196 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1190 1197 # For each line in the delta
1191 1198 for dline in delta.splitlines():
1192 1199 # get the filename and filenode for that line
1193 1200 f, fnode = dline.split('\0')
1194 1201 fnode = bin(fnode[:40])
1195 1202 f = changedfiles.get(f, None)
1196 1203 # And if the file is in the list of files we care
1197 1204 # about.
1198 1205 if f is not None:
1199 1206 # Get the changenode this manifest belongs to
1200 1207 clnode = msng_mnfst_set[mnfstnode]
1201 1208 # Create the set of filenodes for the file if
1202 1209 # there isn't one already.
1203 1210 ndset = msng_filenode_set.setdefault(f, {})
1204 1211 # And set the filenode's changelog node to the
1205 1212 # manifest's if it hasn't been set already.
1206 1213 ndset.setdefault(fnode, clnode)
1207 1214 else:
1208 1215 # Otherwise we need a full manifest.
1209 1216 m = mnfst.read(mnfstnode)
1210 1217 # For every file in we care about.
1211 1218 for f in changedfiles:
1212 1219 fnode = m.get(f, None)
1213 1220 # If it's in the manifest
1214 1221 if fnode is not None:
1215 1222 # See comments above.
1216 1223 clnode = msng_mnfst_set[mnfstnode]
1217 1224 ndset = msng_filenode_set.setdefault(f, {})
1218 1225 ndset.setdefault(fnode, clnode)
1219 1226 # Remember the revision we hope to see next.
1220 1227 next_rev[0] = r + 1
1221 1228 return collect_msng_filenodes
1222 1229
1223 1230 # We have a list of filenodes we think we need for a file, lets remove
1224 1231 # all those we now the recipient must have.
1225 1232 def prune_filenodes(f, filerevlog):
1226 1233 msngset = msng_filenode_set[f]
1227 1234 hasset = {}
1228 1235 # If a 'missing' filenode thinks it belongs to a changenode we
1229 1236 # assume the recipient must have, then the recipient must have
1230 1237 # that filenode.
1231 1238 for n in msngset:
1232 1239 clnode = cl.node(filerevlog.linkrev(n))
1233 1240 if clnode in has_cl_set:
1234 1241 hasset[n] = 1
1235 1242 prune_parents(filerevlog, hasset, msngset)
1236 1243
1237 1244 # A function generator function that sets up the a context for the
1238 1245 # inner function.
1239 1246 def lookup_filenode_link_func(fname):
1240 1247 msngset = msng_filenode_set[fname]
1241 1248 # Lookup the changenode the filenode belongs to.
1242 1249 def lookup_filenode_link(fnode):
1243 1250 return msngset[fnode]
1244 1251 return lookup_filenode_link
1245 1252
1246 1253 # Now that we have all theses utility functions to help out and
1247 1254 # logically divide up the task, generate the group.
1248 1255 def gengroup():
1249 1256 # The set of changed files starts empty.
1250 1257 changedfiles = {}
1251 1258 # Create a changenode group generator that will call our functions
1252 1259 # back to lookup the owning changenode and collect information.
1253 1260 group = cl.group(msng_cl_lst, identity,
1254 1261 manifest_and_file_collector(changedfiles))
1255 1262 for chnk in group:
1256 1263 yield chnk
1257 1264
1258 1265 # The list of manifests has been collected by the generator
1259 1266 # calling our functions back.
1260 1267 prune_manifests()
1261 1268 msng_mnfst_lst = msng_mnfst_set.keys()
1262 1269 # Sort the manifestnodes by revision number.
1263 1270 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1264 1271 # Create a generator for the manifestnodes that calls our lookup
1265 1272 # and data collection functions back.
1266 1273 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1267 1274 filenode_collector(changedfiles))
1268 1275 for chnk in group:
1269 1276 yield chnk
1270 1277
1271 1278 # These are no longer needed, dereference and toss the memory for
1272 1279 # them.
1273 1280 msng_mnfst_lst = None
1274 1281 msng_mnfst_set.clear()
1275 1282
1276 1283 changedfiles = changedfiles.keys()
1277 1284 changedfiles.sort()
1278 1285 # Go through all our files in order sorted by name.
1279 1286 for fname in changedfiles:
1280 1287 filerevlog = self.file(fname)
1281 1288 # Toss out the filenodes that the recipient isn't really
1282 1289 # missing.
1283 1290 if msng_filenode_set.has_key(fname):
1284 1291 prune_filenodes(fname, filerevlog)
1285 1292 msng_filenode_lst = msng_filenode_set[fname].keys()
1286 1293 else:
1287 1294 msng_filenode_lst = []
1288 1295 # If any filenodes are left, generate the group for them,
1289 1296 # otherwise don't bother.
1290 1297 if len(msng_filenode_lst) > 0:
1291 1298 yield changegroup.genchunk(fname)
1292 1299 # Sort the filenodes by their revision #
1293 1300 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1294 1301 # Create a group generator and only pass in a changenode
1295 1302 # lookup function as we need to collect no information
1296 1303 # from filenodes.
1297 1304 group = filerevlog.group(msng_filenode_lst,
1298 1305 lookup_filenode_link_func(fname))
1299 1306 for chnk in group:
1300 1307 yield chnk
1301 1308 if msng_filenode_set.has_key(fname):
1302 1309 # Don't need this anymore, toss it to free memory.
1303 1310 del msng_filenode_set[fname]
1304 1311 # Signal that no more groups are left.
1305 1312 yield changegroup.closechunk()
1306 1313
1307 1314 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1308 1315
1309 1316 return util.chunkbuffer(gengroup())
1310 1317
1311 1318 def changegroup(self, basenodes, source):
1312 1319 """Generate a changegroup of all nodes that we have that a recipient
1313 1320 doesn't.
1314 1321
1315 1322 This is much easier than the previous function as we can assume that
1316 1323 the recipient has any changenode we aren't sending them."""
1317 1324
1318 1325 self.hook('preoutgoing', throw=True, source=source)
1319 1326
1320 1327 cl = self.changelog
1321 1328 nodes = cl.nodesbetween(basenodes, None)[0]
1322 1329 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1323 1330
1324 1331 def identity(x):
1325 1332 return x
1326 1333
1327 1334 def gennodelst(revlog):
1328 1335 for r in xrange(0, revlog.count()):
1329 1336 n = revlog.node(r)
1330 1337 if revlog.linkrev(n) in revset:
1331 1338 yield n
1332 1339
1333 1340 def changed_file_collector(changedfileset):
1334 1341 def collect_changed_files(clnode):
1335 1342 c = cl.read(clnode)
1336 1343 for fname in c[3]:
1337 1344 changedfileset[fname] = 1
1338 1345 return collect_changed_files
1339 1346
1340 1347 def lookuprevlink_func(revlog):
1341 1348 def lookuprevlink(n):
1342 1349 return cl.node(revlog.linkrev(n))
1343 1350 return lookuprevlink
1344 1351
1345 1352 def gengroup():
1346 1353 # construct a list of all changed files
1347 1354 changedfiles = {}
1348 1355
1349 1356 for chnk in cl.group(nodes, identity,
1350 1357 changed_file_collector(changedfiles)):
1351 1358 yield chnk
1352 1359 changedfiles = changedfiles.keys()
1353 1360 changedfiles.sort()
1354 1361
1355 1362 mnfst = self.manifest
1356 1363 nodeiter = gennodelst(mnfst)
1357 1364 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1358 1365 yield chnk
1359 1366
1360 1367 for fname in changedfiles:
1361 1368 filerevlog = self.file(fname)
1362 1369 nodeiter = gennodelst(filerevlog)
1363 1370 nodeiter = list(nodeiter)
1364 1371 if nodeiter:
1365 1372 yield changegroup.genchunk(fname)
1366 1373 lookup = lookuprevlink_func(filerevlog)
1367 1374 for chnk in filerevlog.group(nodeiter, lookup):
1368 1375 yield chnk
1369 1376
1370 1377 yield changegroup.closechunk()
1371 1378 self.hook('outgoing', node=hex(nodes[0]), source=source)
1372 1379
1373 1380 return util.chunkbuffer(gengroup())
1374 1381
1375 1382 def addchangegroup(self, source):
1376 1383 """add changegroup to repo.
1377 1384 returns number of heads modified or added + 1."""
1378 1385
1379 1386 def csmap(x):
1380 1387 self.ui.debug(_("add changeset %s\n") % short(x))
1381 1388 return cl.count()
1382 1389
1383 1390 def revmap(x):
1384 1391 return cl.rev(x)
1385 1392
1386 1393 if not source:
1387 1394 return 0
1388 1395
1389 1396 self.hook('prechangegroup', throw=True)
1390 1397
1391 1398 changesets = files = revisions = 0
1392 1399
1393 1400 tr = self.transaction()
1394 1401
1395 1402 # write changelog and manifest data to temp files so
1396 1403 # concurrent readers will not see inconsistent view
1397 1404 cl = appendfile.appendchangelog(self.opener)
1398 1405
1399 1406 oldheads = len(cl.heads())
1400 1407
1401 1408 # pull off the changeset group
1402 1409 self.ui.status(_("adding changesets\n"))
1403 1410 co = cl.tip()
1404 1411 chunkiter = changegroup.chunkiter(source)
1405 1412 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1406 1413 cnr, cor = map(cl.rev, (cn, co))
1407 1414 if cn == nullid:
1408 1415 cnr = cor
1409 1416 changesets = cnr - cor
1410 1417
1411 1418 mf = appendfile.appendmanifest(self.opener)
1412 1419
1413 1420 # pull off the manifest group
1414 1421 self.ui.status(_("adding manifests\n"))
1415 1422 mm = mf.tip()
1416 1423 chunkiter = changegroup.chunkiter(source)
1417 1424 mo = mf.addgroup(chunkiter, revmap, tr)
1418 1425
1419 1426 # process the files
1420 1427 self.ui.status(_("adding file changes\n"))
1421 1428 while 1:
1422 1429 f = changegroup.getchunk(source)
1423 1430 if not f:
1424 1431 break
1425 1432 self.ui.debug(_("adding %s revisions\n") % f)
1426 1433 fl = self.file(f)
1427 1434 o = fl.count()
1428 1435 chunkiter = changegroup.chunkiter(source)
1429 1436 n = fl.addgroup(chunkiter, revmap, tr)
1430 1437 revisions += fl.count() - o
1431 1438 files += 1
1432 1439
1433 1440 # write order here is important so concurrent readers will see
1434 1441 # consistent view of repo
1435 1442 mf.writedata()
1436 1443 cl.writedata()
1437 1444
1438 1445 # make changelog and manifest see real files again
1439 1446 self.changelog = changelog.changelog(self.opener)
1440 1447 self.manifest = manifest.manifest(self.opener)
1441 1448
1442 1449 newheads = len(self.changelog.heads())
1443 1450 heads = ""
1444 1451 if oldheads and newheads > oldheads:
1445 1452 heads = _(" (+%d heads)") % (newheads - oldheads)
1446 1453
1447 1454 self.ui.status(_("added %d changesets"
1448 1455 " with %d changes to %d files%s\n")
1449 1456 % (changesets, revisions, files, heads))
1450 1457
1451 1458 self.hook('pretxnchangegroup', throw=True,
1452 1459 node=hex(self.changelog.node(cor+1)))
1453 1460
1454 1461 tr.close()
1455 1462
1456 1463 if changesets > 0:
1457 1464 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1458 1465
1459 1466 for i in range(cor + 1, cnr + 1):
1460 1467 self.hook("incoming", node=hex(self.changelog.node(i)))
1461 1468
1462 1469 return newheads - oldheads + 1
1463 1470
1464 1471 def update(self, node, allow=False, force=False, choose=None,
1465 1472 moddirstate=True, forcemerge=False, wlock=None):
1466 1473 pl = self.dirstate.parents()
1467 1474 if not force and pl[1] != nullid:
1468 1475 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1469 1476 return 1
1470 1477
1471 1478 err = False
1472 1479
1473 1480 p1, p2 = pl[0], node
1474 1481 pa = self.changelog.ancestor(p1, p2)
1475 1482 m1n = self.changelog.read(p1)[0]
1476 1483 m2n = self.changelog.read(p2)[0]
1477 1484 man = self.manifest.ancestor(m1n, m2n)
1478 1485 m1 = self.manifest.read(m1n)
1479 1486 mf1 = self.manifest.readflags(m1n)
1480 1487 m2 = self.manifest.read(m2n).copy()
1481 1488 mf2 = self.manifest.readflags(m2n)
1482 1489 ma = self.manifest.read(man)
1483 1490 mfa = self.manifest.readflags(man)
1484 1491
1485 1492 modified, added, removed, deleted, unknown = self.changes()
1486 1493
1487 1494 # is this a jump, or a merge? i.e. is there a linear path
1488 1495 # from p1 to p2?
1489 1496 linear_path = (pa == p1 or pa == p2)
1490 1497
1491 1498 if allow and linear_path:
1492 1499 raise util.Abort(_("there is nothing to merge, "
1493 1500 "just use 'hg update'"))
1494 1501 if allow and not forcemerge:
1495 1502 if modified or added or removed:
1496 1503 raise util.Abort(_("outstanding uncommitted changes"))
1497 1504 if not forcemerge and not force:
1498 1505 for f in unknown:
1499 1506 if f in m2:
1500 1507 t1 = self.wread(f)
1501 1508 t2 = self.file(f).read(m2[f])
1502 1509 if cmp(t1, t2) != 0:
1503 1510 raise util.Abort(_("'%s' already exists in the working"
1504 1511 " dir and differs from remote") % f)
1505 1512
1506 1513 # resolve the manifest to determine which files
1507 1514 # we care about merging
1508 1515 self.ui.note(_("resolving manifests\n"))
1509 1516 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1510 1517 (force, allow, moddirstate, linear_path))
1511 1518 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1512 1519 (short(man), short(m1n), short(m2n)))
1513 1520
1514 1521 merge = {}
1515 1522 get = {}
1516 1523 remove = []
1517 1524
1518 1525 # construct a working dir manifest
1519 1526 mw = m1.copy()
1520 1527 mfw = mf1.copy()
1521 1528 umap = dict.fromkeys(unknown)
1522 1529
1523 1530 for f in added + modified + unknown:
1524 1531 mw[f] = ""
1525 1532 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1526 1533
1527 1534 if moddirstate and not wlock:
1528 1535 wlock = self.wlock()
1529 1536
1530 1537 for f in deleted + removed:
1531 1538 if f in mw:
1532 1539 del mw[f]
1533 1540
1534 1541 # If we're jumping between revisions (as opposed to merging),
1535 1542 # and if neither the working directory nor the target rev has
1536 1543 # the file, then we need to remove it from the dirstate, to
1537 1544 # prevent the dirstate from listing the file when it is no
1538 1545 # longer in the manifest.
1539 1546 if moddirstate and linear_path and f not in m2:
1540 1547 self.dirstate.forget((f,))
1541 1548
1542 1549 # Compare manifests
1543 1550 for f, n in mw.iteritems():
1544 1551 if choose and not choose(f):
1545 1552 continue
1546 1553 if f in m2:
1547 1554 s = 0
1548 1555
1549 1556 # is the wfile new since m1, and match m2?
1550 1557 if f not in m1:
1551 1558 t1 = self.wread(f)
1552 1559 t2 = self.file(f).read(m2[f])
1553 1560 if cmp(t1, t2) == 0:
1554 1561 n = m2[f]
1555 1562 del t1, t2
1556 1563
1557 1564 # are files different?
1558 1565 if n != m2[f]:
1559 1566 a = ma.get(f, nullid)
1560 1567 # are both different from the ancestor?
1561 1568 if n != a and m2[f] != a:
1562 1569 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1563 1570 # merge executable bits
1564 1571 # "if we changed or they changed, change in merge"
1565 1572 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1566 1573 mode = ((a^b) | (a^c)) ^ a
1567 1574 merge[f] = (m1.get(f, nullid), m2[f], mode)
1568 1575 s = 1
1569 1576 # are we clobbering?
1570 1577 # is remote's version newer?
1571 1578 # or are we going back in time?
1572 1579 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1573 1580 self.ui.debug(_(" remote %s is newer, get\n") % f)
1574 1581 get[f] = m2[f]
1575 1582 s = 1
1576 1583 elif f in umap:
1577 1584 # this unknown file is the same as the checkout
1578 1585 get[f] = m2[f]
1579 1586
1580 1587 if not s and mfw[f] != mf2[f]:
1581 1588 if force:
1582 1589 self.ui.debug(_(" updating permissions for %s\n") % f)
1583 1590 util.set_exec(self.wjoin(f), mf2[f])
1584 1591 else:
1585 1592 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1586 1593 mode = ((a^b) | (a^c)) ^ a
1587 1594 if mode != b:
1588 1595 self.ui.debug(_(" updating permissions for %s\n")
1589 1596 % f)
1590 1597 util.set_exec(self.wjoin(f), mode)
1591 1598 del m2[f]
1592 1599 elif f in ma:
1593 1600 if n != ma[f]:
1594 1601 r = _("d")
1595 1602 if not force and (linear_path or allow):
1596 1603 r = self.ui.prompt(
1597 1604 (_(" local changed %s which remote deleted\n") % f) +
1598 1605 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1599 1606 if r == _("d"):
1600 1607 remove.append(f)
1601 1608 else:
1602 1609 self.ui.debug(_("other deleted %s\n") % f)
1603 1610 remove.append(f) # other deleted it
1604 1611 else:
1605 1612 # file is created on branch or in working directory
1606 1613 if force and f not in umap:
1607 1614 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1608 1615 remove.append(f)
1609 1616 elif n == m1.get(f, nullid): # same as parent
1610 1617 if p2 == pa: # going backwards?
1611 1618 self.ui.debug(_("remote deleted %s\n") % f)
1612 1619 remove.append(f)
1613 1620 else:
1614 1621 self.ui.debug(_("local modified %s, keeping\n") % f)
1615 1622 else:
1616 1623 self.ui.debug(_("working dir created %s, keeping\n") % f)
1617 1624
1618 1625 for f, n in m2.iteritems():
1619 1626 if choose and not choose(f):
1620 1627 continue
1621 1628 if f[0] == "/":
1622 1629 continue
1623 1630 if f in ma and n != ma[f]:
1624 1631 r = _("k")
1625 1632 if not force and (linear_path or allow):
1626 1633 r = self.ui.prompt(
1627 1634 (_("remote changed %s which local deleted\n") % f) +
1628 1635 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1629 1636 if r == _("k"):
1630 1637 get[f] = n
1631 1638 elif f not in ma:
1632 1639 self.ui.debug(_("remote created %s\n") % f)
1633 1640 get[f] = n
1634 1641 else:
1635 1642 if force or p2 == pa: # going backwards?
1636 1643 self.ui.debug(_("local deleted %s, recreating\n") % f)
1637 1644 get[f] = n
1638 1645 else:
1639 1646 self.ui.debug(_("local deleted %s\n") % f)
1640 1647
1641 1648 del mw, m1, m2, ma
1642 1649
1643 1650 if force:
1644 1651 for f in merge:
1645 1652 get[f] = merge[f][1]
1646 1653 merge = {}
1647 1654
1648 1655 if linear_path or force:
1649 1656 # we don't need to do any magic, just jump to the new rev
1650 1657 branch_merge = False
1651 1658 p1, p2 = p2, nullid
1652 1659 else:
1653 1660 if not allow:
1654 1661 self.ui.status(_("this update spans a branch"
1655 1662 " affecting the following files:\n"))
1656 1663 fl = merge.keys() + get.keys()
1657 1664 fl.sort()
1658 1665 for f in fl:
1659 1666 cf = ""
1660 1667 if f in merge:
1661 1668 cf = _(" (resolve)")
1662 1669 self.ui.status(" %s%s\n" % (f, cf))
1663 1670 self.ui.warn(_("aborting update spanning branches!\n"))
1664 1671 self.ui.status(_("(use 'hg merge' to merge across branches"
1665 1672 " or 'hg update -C' to lose changes)\n"))
1666 1673 return 1
1667 1674 branch_merge = True
1668 1675
1669 1676 # get the files we don't need to change
1670 1677 files = get.keys()
1671 1678 files.sort()
1672 1679 for f in files:
1673 1680 if f[0] == "/":
1674 1681 continue
1675 1682 self.ui.note(_("getting %s\n") % f)
1676 1683 t = self.file(f).read(get[f])
1677 1684 self.wwrite(f, t)
1678 1685 util.set_exec(self.wjoin(f), mf2[f])
1679 1686 if moddirstate:
1680 1687 if branch_merge:
1681 1688 self.dirstate.update([f], 'n', st_mtime=-1)
1682 1689 else:
1683 1690 self.dirstate.update([f], 'n')
1684 1691
1685 1692 # merge the tricky bits
1686 1693 failedmerge = []
1687 1694 files = merge.keys()
1688 1695 files.sort()
1689 1696 xp1 = hex(p1)
1690 1697 xp2 = hex(p2)
1691 1698 for f in files:
1692 1699 self.ui.status(_("merging %s\n") % f)
1693 1700 my, other, flag = merge[f]
1694 1701 ret = self.merge3(f, my, other, xp1, xp2)
1695 1702 if ret:
1696 1703 err = True
1697 1704 failedmerge.append(f)
1698 1705 util.set_exec(self.wjoin(f), flag)
1699 1706 if moddirstate:
1700 1707 if branch_merge:
1701 1708 # We've done a branch merge, mark this file as merged
1702 1709 # so that we properly record the merger later
1703 1710 self.dirstate.update([f], 'm')
1704 1711 else:
1705 1712 # We've update-merged a locally modified file, so
1706 1713 # we set the dirstate to emulate a normal checkout
1707 1714 # of that file some time in the past. Thus our
1708 1715 # merge will appear as a normal local file
1709 1716 # modification.
1710 1717 f_len = len(self.file(f).read(other))
1711 1718 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1712 1719
1713 1720 remove.sort()
1714 1721 for f in remove:
1715 1722 self.ui.note(_("removing %s\n") % f)
1716 1723 util.audit_path(f)
1717 1724 try:
1718 1725 util.unlink(self.wjoin(f))
1719 1726 except OSError, inst:
1720 1727 if inst.errno != errno.ENOENT:
1721 1728 self.ui.warn(_("update failed to remove %s: %s!\n") %
1722 1729 (f, inst.strerror))
1723 1730 if moddirstate:
1724 1731 if branch_merge:
1725 1732 self.dirstate.update(remove, 'r')
1726 1733 else:
1727 1734 self.dirstate.forget(remove)
1728 1735
1729 1736 if moddirstate:
1730 1737 self.dirstate.setparents(p1, p2)
1731 1738
1732 1739 stat = ((len(get), _("updated")),
1733 1740 (len(merge) - len(failedmerge), _("merged")),
1734 1741 (len(remove), _("removed")),
1735 1742 (len(failedmerge), _("unresolved")))
1736 1743 note = ", ".join([_("%d files %s") % s for s in stat])
1737 1744 self.ui.note("%s\n" % note)
1738 1745 if moddirstate and branch_merge:
1739 1746 self.ui.note(_("(branch merge, don't forget to commit)\n"))
1740 1747
1741 1748 return err
1742 1749
1743 1750 def merge3(self, fn, my, other, p1, p2):
1744 1751 """perform a 3-way merge in the working directory"""
1745 1752
1746 1753 def temp(prefix, node):
1747 1754 pre = "%s~%s." % (os.path.basename(fn), prefix)
1748 1755 (fd, name) = tempfile.mkstemp("", pre)
1749 1756 f = os.fdopen(fd, "wb")
1750 1757 self.wwrite(fn, fl.read(node), f)
1751 1758 f.close()
1752 1759 return name
1753 1760
1754 1761 fl = self.file(fn)
1755 1762 base = fl.ancestor(my, other)
1756 1763 a = self.wjoin(fn)
1757 1764 b = temp("base", base)
1758 1765 c = temp("other", other)
1759 1766
1760 1767 self.ui.note(_("resolving %s\n") % fn)
1761 1768 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1762 1769 (fn, short(my), short(other), short(base)))
1763 1770
1764 1771 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1765 1772 or "hgmerge")
1766 1773 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1767 1774 environ={'HG_FILE': fn,
1768 1775 'HG_MY_NODE': p1,
1769 1776 'HG_OTHER_NODE': p2,
1770 1777 'HG_FILE_MY_NODE': hex(my),
1771 1778 'HG_FILE_OTHER_NODE': hex(other),
1772 1779 'HG_FILE_BASE_NODE': hex(base)})
1773 1780 if r:
1774 1781 self.ui.warn(_("merging %s failed!\n") % fn)
1775 1782
1776 1783 os.unlink(b)
1777 1784 os.unlink(c)
1778 1785 return r
1779 1786
1780 1787 def verify(self):
1781 1788 filelinkrevs = {}
1782 1789 filenodes = {}
1783 1790 changesets = revisions = files = 0
1784 1791 errors = [0]
1785 1792 neededmanifests = {}
1786 1793
1787 1794 def err(msg):
1788 1795 self.ui.warn(msg + "\n")
1789 1796 errors[0] += 1
1790 1797
1791 1798 def checksize(obj, name):
1792 1799 d = obj.checksize()
1793 1800 if d[0]:
1794 1801 err(_("%s data length off by %d bytes") % (name, d[0]))
1795 1802 if d[1]:
1796 1803 err(_("%s index contains %d extra bytes") % (name, d[1]))
1797 1804
1798 1805 seen = {}
1799 1806 self.ui.status(_("checking changesets\n"))
1800 1807 checksize(self.changelog, "changelog")
1801 1808
1802 1809 for i in range(self.changelog.count()):
1803 1810 changesets += 1
1804 1811 n = self.changelog.node(i)
1805 1812 l = self.changelog.linkrev(n)
1806 1813 if l != i:
1807 1814 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1808 1815 if n in seen:
1809 1816 err(_("duplicate changeset at revision %d") % i)
1810 1817 seen[n] = 1
1811 1818
1812 1819 for p in self.changelog.parents(n):
1813 1820 if p not in self.changelog.nodemap:
1814 1821 err(_("changeset %s has unknown parent %s") %
1815 1822 (short(n), short(p)))
1816 1823 try:
1817 1824 changes = self.changelog.read(n)
1818 1825 except KeyboardInterrupt:
1819 1826 self.ui.warn(_("interrupted"))
1820 1827 raise
1821 1828 except Exception, inst:
1822 1829 err(_("unpacking changeset %s: %s") % (short(n), inst))
1823 1830 continue
1824 1831
1825 1832 neededmanifests[changes[0]] = n
1826 1833
1827 1834 for f in changes[3]:
1828 1835 filelinkrevs.setdefault(f, []).append(i)
1829 1836
1830 1837 seen = {}
1831 1838 self.ui.status(_("checking manifests\n"))
1832 1839 checksize(self.manifest, "manifest")
1833 1840
1834 1841 for i in range(self.manifest.count()):
1835 1842 n = self.manifest.node(i)
1836 1843 l = self.manifest.linkrev(n)
1837 1844
1838 1845 if l < 0 or l >= self.changelog.count():
1839 1846 err(_("bad manifest link (%d) at revision %d") % (l, i))
1840 1847
1841 1848 if n in neededmanifests:
1842 1849 del neededmanifests[n]
1843 1850
1844 1851 if n in seen:
1845 1852 err(_("duplicate manifest at revision %d") % i)
1846 1853
1847 1854 seen[n] = 1
1848 1855
1849 1856 for p in self.manifest.parents(n):
1850 1857 if p not in self.manifest.nodemap:
1851 1858 err(_("manifest %s has unknown parent %s") %
1852 1859 (short(n), short(p)))
1853 1860
1854 1861 try:
1855 1862 delta = mdiff.patchtext(self.manifest.delta(n))
1856 1863 except KeyboardInterrupt:
1857 1864 self.ui.warn(_("interrupted"))
1858 1865 raise
1859 1866 except Exception, inst:
1860 1867 err(_("unpacking manifest %s: %s") % (short(n), inst))
1861 1868 continue
1862 1869
1863 1870 try:
1864 1871 ff = [ l.split('\0') for l in delta.splitlines() ]
1865 1872 for f, fn in ff:
1866 1873 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1867 1874 except (ValueError, TypeError), inst:
1868 1875 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1869 1876
1870 1877 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1871 1878
1872 1879 for m, c in neededmanifests.items():
1873 1880 err(_("Changeset %s refers to unknown manifest %s") %
1874 1881 (short(m), short(c)))
1875 1882 del neededmanifests
1876 1883
1877 1884 for f in filenodes:
1878 1885 if f not in filelinkrevs:
1879 1886 err(_("file %s in manifest but not in changesets") % f)
1880 1887
1881 1888 for f in filelinkrevs:
1882 1889 if f not in filenodes:
1883 1890 err(_("file %s in changeset but not in manifest") % f)
1884 1891
1885 1892 self.ui.status(_("checking files\n"))
1886 1893 ff = filenodes.keys()
1887 1894 ff.sort()
1888 1895 for f in ff:
1889 1896 if f == "/dev/null":
1890 1897 continue
1891 1898 files += 1
1892 1899 if not f:
1893 1900 err(_("file without name in manifest %s") % short(n))
1894 1901 continue
1895 1902 fl = self.file(f)
1896 1903 checksize(fl, f)
1897 1904
1898 1905 nodes = {nullid: 1}
1899 1906 seen = {}
1900 1907 for i in range(fl.count()):
1901 1908 revisions += 1
1902 1909 n = fl.node(i)
1903 1910
1904 1911 if n in seen:
1905 1912 err(_("%s: duplicate revision %d") % (f, i))
1906 1913 if n not in filenodes[f]:
1907 1914 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1908 1915 else:
1909 1916 del filenodes[f][n]
1910 1917
1911 1918 flr = fl.linkrev(n)
1912 1919 if flr not in filelinkrevs.get(f, []):
1913 1920 err(_("%s:%s points to unexpected changeset %d")
1914 1921 % (f, short(n), flr))
1915 1922 else:
1916 1923 filelinkrevs[f].remove(flr)
1917 1924
1918 1925 # verify contents
1919 1926 try:
1920 1927 t = fl.read(n)
1921 1928 except KeyboardInterrupt:
1922 1929 self.ui.warn(_("interrupted"))
1923 1930 raise
1924 1931 except Exception, inst:
1925 1932 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1926 1933
1927 1934 # verify parents
1928 1935 (p1, p2) = fl.parents(n)
1929 1936 if p1 not in nodes:
1930 1937 err(_("file %s:%s unknown parent 1 %s") %
1931 1938 (f, short(n), short(p1)))
1932 1939 if p2 not in nodes:
1933 1940 err(_("file %s:%s unknown parent 2 %s") %
1934 1941 (f, short(n), short(p1)))
1935 1942 nodes[n] = 1
1936 1943
1937 1944 # cross-check
1938 1945 for node in filenodes[f]:
1939 1946 err(_("node %s in manifests not in %s") % (hex(node), f))
1940 1947
1941 1948 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1942 1949 (files, changesets, revisions))
1943 1950
1944 1951 if errors[0]:
1945 1952 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1946 1953 return 1
1947 1954
1948 1955 # used to avoid circular references so destructors work
1949 1956 def aftertrans(base):
1950 1957 p = base
1951 1958 def a():
1952 1959 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1953 1960 util.rename(os.path.join(p, "journal.dirstate"),
1954 1961 os.path.join(p, "undo.dirstate"))
1955 1962 return a
1956 1963
@@ -1,175 +1,176
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import struct
9 9 from revlog import *
10 10 from i18n import gettext as _
11 11 from demandload import *
12 12 demandload(globals(), "bisect array")
13 13
14 14 class manifest(revlog):
15 def __init__(self, opener):
15 def __init__(self, opener, defversion=0):
16 16 self.mapcache = None
17 17 self.listcache = None
18 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
18 revlog.__init__(self, opener, "00manifest.i", "00manifest.d",
19 defversion)
19 20
20 21 def read(self, node):
21 22 if node == nullid: return {} # don't upset local cache
22 23 if self.mapcache and self.mapcache[0] == node:
23 24 return self.mapcache[1]
24 25 text = self.revision(node)
25 26 map = {}
26 27 flag = {}
27 28 self.listcache = array.array('c', text)
28 29 lines = text.splitlines(1)
29 30 for l in lines:
30 31 (f, n) = l.split('\0')
31 32 map[f] = bin(n[:40])
32 33 flag[f] = (n[40:-1] == "x")
33 34 self.mapcache = (node, map, flag)
34 35 return map
35 36
36 37 def readflags(self, node):
37 38 if node == nullid: return {} # don't upset local cache
38 39 if not self.mapcache or self.mapcache[0] != node:
39 40 self.read(node)
40 41 return self.mapcache[2]
41 42
42 43 def diff(self, a, b):
43 44 return mdiff.textdiff(str(a), str(b))
44 45
45 46 def add(self, map, flags, transaction, link, p1=None, p2=None,
46 47 changed=None):
47 48
48 49 # returns a tuple (start, end). If the string is found
49 50 # m[start:end] are the line containing that string. If start == end
50 51 # the string was not found and they indicate the proper sorted
51 52 # insertion point. This was taken from bisect_left, and modified
52 53 # to find line start/end as it goes along.
53 54 #
54 55 # m should be a buffer or a string
55 56 # s is a string
56 57 #
57 58 def manifestsearch(m, s, lo=0, hi=None):
58 59 def advance(i, c):
59 60 while i < lenm and m[i] != c:
60 61 i += 1
61 62 return i
62 63 lenm = len(m)
63 64 if not hi:
64 65 hi = lenm
65 66 while lo < hi:
66 67 mid = (lo + hi) // 2
67 68 start = mid
68 69 while start > 0 and m[start-1] != '\n':
69 70 start -= 1
70 71 end = advance(start, '\0')
71 72 if m[start:end] < s:
72 73 # we know that after the null there are 40 bytes of sha1
73 74 # this translates to the bisect lo = mid + 1
74 75 lo = advance(end + 40, '\n') + 1
75 76 else:
76 77 # this translates to the bisect hi = mid
77 78 hi = start
78 79 end = advance(lo, '\0')
79 80 found = m[lo:end]
80 81 if cmp(s, found) == 0:
81 82 # we know that after the null there are 40 bytes of sha1
82 83 end = advance(end + 40, '\n')
83 84 return (lo, end+1)
84 85 else:
85 86 return (lo, lo)
86 87
87 88 # apply the changes collected during the bisect loop to our addlist
88 89 # return a delta suitable for addrevision
89 90 def addlistdelta(addlist, x):
90 91 # start from the bottom up
91 92 # so changes to the offsets don't mess things up.
92 93 i = len(x)
93 94 while i > 0:
94 95 i -= 1
95 96 start = x[i][0]
96 97 end = x[i][1]
97 98 if x[i][2]:
98 99 addlist[start:end] = array.array('c', x[i][2])
99 100 else:
100 101 del addlist[start:end]
101 102 return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2] \
102 103 for d in x ])
103 104
104 105 # if we're using the listcache, make sure it is valid and
105 106 # parented by the same node we're diffing against
106 107 if not changed or not self.listcache or not p1 or \
107 108 self.mapcache[0] != p1:
108 109 files = map.keys()
109 110 files.sort()
110 111
111 112 # if this is changed to support newlines in filenames,
112 113 # be sure to check the templates/ dir again (especially *-raw.tmpl)
113 114 text = ["%s\000%s%s\n" %
114 115 (f, hex(map[f]), flags[f] and "x" or '')
115 116 for f in files]
116 117 self.listcache = array.array('c', "".join(text))
117 118 cachedelta = None
118 119 else:
119 120 addlist = self.listcache
120 121
121 122 # combine the changed lists into one list for sorting
122 123 work = [[x, 0] for x in changed[0]]
123 124 work[len(work):] = [[x, 1] for x in changed[1]]
124 125 work.sort()
125 126
126 127 delta = []
127 128 dstart = None
128 129 dend = None
129 130 dline = [""]
130 131 start = 0
131 132 # zero copy representation of addlist as a buffer
132 133 addbuf = buffer(addlist)
133 134
134 135 # start with a readonly loop that finds the offset of
135 136 # each line and creates the deltas
136 137 for w in work:
137 138 f = w[0]
138 139 # bs will either be the index of the item or the insert point
139 140 start, end = manifestsearch(addbuf, f, start)
140 141 if w[1] == 0:
141 142 l = "%s\000%s%s\n" % (f, hex(map[f]),
142 143 flags[f] and "x" or '')
143 144 else:
144 145 l = ""
145 146 if start == end and w[1] == 1:
146 147 # item we want to delete was not found, error out
147 148 raise AssertionError(
148 149 _("failed to remove %s from manifest\n") % f)
149 150 if dstart != None and dstart <= start and dend >= start:
150 151 if dend < end:
151 152 dend = end
152 153 if l:
153 154 dline.append(l)
154 155 else:
155 156 if dstart != None:
156 157 delta.append([dstart, dend, "".join(dline)])
157 158 dstart = start
158 159 dend = end
159 160 dline = [l]
160 161
161 162 if dstart != None:
162 163 delta.append([dstart, dend, "".join(dline)])
163 164 # apply the delta to the addlist, and get a delta for addrevision
164 165 cachedelta = addlistdelta(addlist, delta)
165 166
166 167 # the delta is only valid if we've been processing the tip revision
167 168 if self.mapcache[0] != self.tip():
168 169 cachedelta = None
169 170 self.listcache = addlist
170 171
171 172 n = self.addrevision(buffer(self.listcache), transaction, link, p1, \
172 173 p2, cachedelta)
173 174 self.mapcache = (n, map, flags)
174 175
175 176 return n
@@ -1,883 +1,962
1 1 """
2 2 revlog.py - storage back-end for mercurial
3 3
4 4 This provides efficient delta storage with O(1) retrieve and append
5 5 and O(changes) merge between branches
6 6
7 7 Copyright 2005 Matt Mackall <mpm@selenic.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import *
14 14 from i18n import gettext as _
15 15 from demandload import demandload
16 16 demandload(globals(), "binascii changegroup errno heapq mdiff os")
17 17 demandload(globals(), "sha struct zlib")
18 18
19 # revlog version strings
20 REVLOGV0 = 0
21 REVLOGNG = 1
22
19 23 def hash(text, p1, p2):
20 24 """generate a hash from the given text and its parent hashes
21 25
22 26 This hash combines both the current file contents and its history
23 27 in a manner that makes it easy to distinguish nodes with the same
24 28 content in the revision graph.
25 29 """
26 30 l = [p1, p2]
27 31 l.sort()
28 32 s = sha.new(l[0])
29 33 s.update(l[1])
30 34 s.update(text)
31 35 return s.digest()
32 36
33 37 def compress(text):
34 38 """ generate a possibly-compressed representation of text """
35 39 if not text: return ("", text)
36 40 if len(text) < 44:
37 41 if text[0] == '\0': return ("", text)
38 42 return ('u', text)
39 43 bin = zlib.compress(text)
40 44 if len(bin) > len(text):
41 45 if text[0] == '\0': return ("", text)
42 46 return ('u', text)
43 47 return ("", bin)
44 48
45 49 def decompress(bin):
46 50 """ decompress the given input """
47 51 if not bin: return bin
48 52 t = bin[0]
49 53 if t == '\0': return bin
50 54 if t == 'x': return zlib.decompress(bin)
51 55 if t == 'u': return bin[1:]
52 56 raise RevlogError(_("unknown compression type %r") % t)
53 57
54 indexformat = ">4l20s20s20s"
58 indexformatv0 = ">4l20s20s20s"
59 # index ng:
60 # 6 bytes offset
61 # 2 bytes flags
62 # 4 bytes compressed length
63 # 4 bytes uncompressed length
64 # 4 bytes: base rev
65 # 4 bytes link rev
66 # 4 bytes parent 1 rev
67 # 4 bytes parent 2 rev
68 # 32 bytes: nodeid
69 indexformatng = ">Qiiiiii20s12x"
70 versionformat = ">i"
55 71
56 72 class lazyparser(object):
57 73 """
58 74 this class avoids the need to parse the entirety of large indices
59 75
60 76 By default we parse and load 1000 entries at a time.
61 77
62 78 If no position is specified, we load the whole index, and replace
63 79 the lazy objects in revlog with the underlying objects for
64 80 efficiency in cases where we look at most of the nodes.
65 81 """
66 def __init__(self, data, revlog):
82 def __init__(self, data, revlog, indexformat):
67 83 self.data = data
68 84 self.s = struct.calcsize(indexformat)
85 self.indexformat = indexformat
69 86 self.l = len(data)/self.s
70 87 self.index = [None] * self.l
71 88 self.map = {nullid: -1}
72 89 self.all = 0
73 90 self.revlog = revlog
74 91
75 def trunc(self, pos):
76 self.l = pos/self.s
77
78 92 def load(self, pos=None):
79 93 if self.all: return
80 94 if pos is not None:
81 95 block = pos / 1000
82 96 i = block * 1000
83 97 end = min(self.l, i + 1000)
84 98 else:
85 99 self.all = 1
86 100 i = 0
87 101 end = self.l
88 102 self.revlog.index = self.index
89 103 self.revlog.nodemap = self.map
90 104
91 105 while i < end:
92 d = self.data[i * self.s: (i + 1) * self.s]
93 e = struct.unpack(indexformat, d)
94 self.index[i] = e
95 self.map[e[6]] = i
106 if not self.index[i]:
107 d = self.data[i * self.s: (i + 1) * self.s]
108 e = struct.unpack(self.indexformat, d)
109 self.index[i] = e
110 self.map[e[-1]] = i
96 111 i += 1
97 112
98 113 class lazyindex(object):
99 114 """a lazy version of the index array"""
100 115 def __init__(self, parser):
101 116 self.p = parser
102 117 def __len__(self):
103 118 return len(self.p.index)
104 119 def load(self, pos):
105 120 if pos < 0:
106 121 pos += len(self.p.index)
107 122 self.p.load(pos)
108 123 return self.p.index[pos]
109 124 def __getitem__(self, pos):
110 125 return self.p.index[pos] or self.load(pos)
126 def __setitem__(self, pos, item):
127 self.p.index[pos] = item
111 128 def __delitem__(self, pos):
112 129 del self.p.index[pos]
113 130 def append(self, e):
114 131 self.p.index.append(e)
115 def trunc(self, pos):
116 self.p.trunc(pos)
117 132
118 133 class lazymap(object):
119 134 """a lazy version of the node map"""
120 135 def __init__(self, parser):
121 136 self.p = parser
122 137 def load(self, key):
123 138 if self.p.all: return
124 139 n = self.p.data.find(key)
125 140 if n < 0:
126 141 raise KeyError(key)
127 142 pos = n / self.p.s
128 143 self.p.load(pos)
129 144 def __contains__(self, key):
130 145 self.p.load()
131 146 return key in self.p.map
132 147 def __iter__(self):
133 148 yield nullid
134 149 for i in xrange(self.p.l):
135 150 try:
136 yield self.p.index[i][6]
151 yield self.p.index[i][-1]
137 152 except:
138 153 self.p.load(i)
139 yield self.p.index[i][6]
154 yield self.p.index[i][-1]
140 155 def __getitem__(self, key):
141 156 try:
142 157 return self.p.map[key]
143 158 except KeyError:
144 159 try:
145 160 self.load(key)
146 161 return self.p.map[key]
147 162 except KeyError:
148 163 raise KeyError("node " + hex(key))
149 164 def __setitem__(self, key, val):
150 165 self.p.map[key] = val
151 166 def __delitem__(self, key):
152 167 del self.p.map[key]
153 168
154 169 class RevlogError(Exception): pass
155 170
156 171 class revlog(object):
157 172 """
158 173 the underlying revision storage object
159 174
160 175 A revlog consists of two parts, an index and the revision data.
161 176
162 177 The index is a file with a fixed record size containing
163 178 information on each revision, includings its nodeid (hash), the
164 179 nodeids of its parents, the position and offset of its data within
165 180 the data file, and the revision it's based on. Finally, each entry
166 181 contains a linkrev entry that can serve as a pointer to external
167 182 data.
168 183
169 184 The revision data itself is a linear collection of data chunks.
170 185 Each chunk represents a revision and is usually represented as a
171 186 delta against the previous chunk. To bound lookup time, runs of
172 187 deltas are limited to about 2 times the length of the original
173 188 version data. This makes retrieval of a version proportional to
174 189 its size, or O(1) relative to the number of revisions.
175 190
176 191 Both pieces of the revlog are written to in an append-only
177 192 fashion, which means we never need to rewrite a file to insert or
178 193 remove data, and can use some simple techniques to avoid the need
179 194 for locking while reading.
180 195 """
181 def __init__(self, opener, indexfile, datafile):
196 def __init__(self, opener, indexfile, datafile, defversion=0):
182 197 """
183 198 create a revlog object
184 199
185 200 opener is a function that abstracts the file opening operation
186 201 and can be used to implement COW semantics or the like.
187 202 """
188 203 self.indexfile = indexfile
189 204 self.datafile = datafile
190 205 self.opener = opener
191 206
192 207 self.indexstat = None
193 208 self.cache = None
194 209 self.chunkcache = None
210 self.defversion = defversion
195 211 self.load()
196 212
197 213 def load(self):
214 v = self.defversion
198 215 try:
199 216 f = self.opener(self.indexfile)
217 i = f.read()
200 218 except IOError, inst:
201 219 if inst.errno != errno.ENOENT:
202 220 raise
203 221 i = ""
204 222 else:
205 223 try:
206 224 st = os.fstat(f.fileno())
207 225 except AttributeError, inst:
208 226 st = None
209 227 else:
210 228 oldst = self.indexstat
211 229 if (oldst and st.st_dev == oldst.st_dev
212 230 and st.st_ino == oldst.st_ino
213 231 and st.st_mtime == oldst.st_mtime
214 232 and st.st_ctime == oldst.st_ctime):
215 233 return
216 self.indexstat = st
217 i = f.read()
234 self.indexstat = st
235 if len(i) > 0:
236 v = struct.unpack(versionformat, i[:4])[0]
237 if v != 0:
238 flags = v & ~0xFFFF
239 fmt = v & 0xFFFF
240 if fmt != REVLOGNG or (flags & ~(REVLOGNGINLINEDATA)):
241 raise RevlogError(
242 _("unknown version format %d or flags %x on %s") %
243 (v, flags, self.indexfile))
244 self.version = v
245 if v == 0:
246 self.indexformat = indexformatv0
247 else:
248 self.indexformat = indexformatng
218 249
219 if i and i[:4] != "\0\0\0\0":
220 raise RevlogError(_("incompatible revlog signature on %s") %
221 self.indexfile)
222
223 if len(i) > 10000:
224 # big index, let's parse it on demand
225 parser = lazyparser(i, self)
226 self.index = lazyindex(parser)
227 self.nodemap = lazymap(parser)
250 if i:
251 if st and st.st_size > 10000:
252 # big index, let's parse it on demand
253 parser = lazyparser(i, self, self.indexformat)
254 self.index = lazyindex(parser)
255 self.nodemap = lazymap(parser)
256 else:
257 self.parseindex(i)
258 if self.version != 0:
259 e = list(self.index[0])
260 type = self.ngtype(e[0])
261 e[0] = self.offset_type(0, type)
262 self.index[0] = e
228 263 else:
229 s = struct.calcsize(indexformat)
230 l = len(i) / s
231 self.index = [None] * l
232 m = [None] * l
264 self.nodemap = { nullid: -1}
265 self.index = []
266
267
268 def parseindex(self, data):
269 s = struct.calcsize(self.indexformat)
270 l = len(data)
271 self.index = []
272 self.nodemap = {nullid: -1}
273 off = 0
274 n = 0
275 while off < l:
276 e = struct.unpack(self.indexformat, data[off:off + s])
277 self.index.append(e)
278 self.nodemap[e[-1]] = n
279 n += 1
280 off += s
233 281
234 n = 0
235 for f in xrange(0, l * s, s):
236 # offset, size, base, linkrev, p1, p2, nodeid
237 e = struct.unpack(indexformat, i[f:f + s])
238 m[n] = (e[6], n)
239 self.index[n] = e
240 n += 1
282 def ngoffset(self, q):
283 if q & 0xFFFF:
284 raise RevlogError(_('%s: incompatible revision flag %x') %
285 (self.indexfile, type))
286 return long(q >> 16)
287
288 def ngtype(self, q):
289 return int(q & 0xFFFF)
241 290
242 self.nodemap = dict(m)
243 self.nodemap[nullid] = -1
291 def offset_type(self, offset, type):
292 return long(long(offset) << 16 | type)
293
294 def loadindexmap(self):
295 """loads both the map and the index from the lazy parser"""
296 if isinstance(self.index, lazyindex):
297 p = self.index.p
298 p.load()
244 299
245 300 def tip(self): return self.node(len(self.index) - 1)
246 301 def count(self): return len(self.index)
247 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
302 def node(self, rev):
303 return (rev < 0) and nullid or self.index[rev][-1]
248 304 def rev(self, node):
249 305 try:
250 306 return self.nodemap[node]
251 307 except KeyError:
252 308 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
253 def linkrev(self, node): return self.index[self.rev(node)][3]
309 def linkrev(self, node): return self.index[self.rev(node)][-4]
254 310 def parents(self, node):
255 311 if node == nullid: return (nullid, nullid)
256 return self.index[self.rev(node)][4:6]
312 r = self.rev(node)
313 d = self.index[r][-3:-1]
314 if self.version == 0:
315 return d
316 return [ self.node(x) for x in d ]
317 def start(self, rev):
318 if rev < 0:
319 return -1
320 if self.version != 0:
321 return self.ngoffset(self.index[rev][0])
322 return self.index[rev][0]
323 def end(self, rev): return self.start(rev) + self.length(rev)
257 324
258 def start(self, rev): return (rev < 0) and -1 or self.index[rev][0]
259 325 def length(self, rev):
260 326 if rev < 0:
261 327 return 0
262 328 else:
263 329 return self.index[rev][1]
264 def end(self, rev): return self.start(rev) + self.length(rev)
265 def base(self, rev): return (rev < 0) and rev or self.index[rev][2]
330 def base(self, rev): return (rev < 0) and rev or self.index[rev][-5]
266 331
267 332 def reachable(self, rev, stop=None):
268 333 reachable = {}
269 334 visit = [rev]
270 335 reachable[rev] = 1
271 336 if stop:
272 337 stopn = self.rev(stop)
273 338 else:
274 339 stopn = 0
275 340 while visit:
276 341 n = visit.pop(0)
277 342 if n == stop:
278 343 continue
279 344 if n == nullid:
280 345 continue
281 346 for p in self.parents(n):
282 347 if self.rev(p) < stopn:
283 348 continue
284 349 if p not in reachable:
285 350 reachable[p] = 1
286 351 visit.append(p)
287 352 return reachable
288 353
289 354 def nodesbetween(self, roots=None, heads=None):
290 355 """Return a tuple containing three elements. Elements 1 and 2 contain
291 356 a final list bases and heads after all the unreachable ones have been
292 357 pruned. Element 0 contains a topologically sorted list of all
293 358
294 359 nodes that satisfy these constraints:
295 360 1. All nodes must be descended from a node in roots (the nodes on
296 361 roots are considered descended from themselves).
297 362 2. All nodes must also be ancestors of a node in heads (the nodes in
298 363 heads are considered to be their own ancestors).
299 364
300 365 If roots is unspecified, nullid is assumed as the only root.
301 366 If heads is unspecified, it is taken to be the output of the
302 367 heads method (i.e. a list of all nodes in the repository that
303 368 have no children)."""
304 369 nonodes = ([], [], [])
305 370 if roots is not None:
306 371 roots = list(roots)
307 372 if not roots:
308 373 return nonodes
309 374 lowestrev = min([self.rev(n) for n in roots])
310 375 else:
311 376 roots = [nullid] # Everybody's a descendent of nullid
312 377 lowestrev = -1
313 378 if (lowestrev == -1) and (heads is None):
314 379 # We want _all_ the nodes!
315 380 return ([self.node(r) for r in xrange(0, self.count())],
316 381 [nullid], list(self.heads()))
317 382 if heads is None:
318 383 # All nodes are ancestors, so the latest ancestor is the last
319 384 # node.
320 385 highestrev = self.count() - 1
321 386 # Set ancestors to None to signal that every node is an ancestor.
322 387 ancestors = None
323 388 # Set heads to an empty dictionary for later discovery of heads
324 389 heads = {}
325 390 else:
326 391 heads = list(heads)
327 392 if not heads:
328 393 return nonodes
329 394 ancestors = {}
330 395 # Start at the top and keep marking parents until we're done.
331 396 nodestotag = heads[:]
332 397 # Turn heads into a dictionary so we can remove 'fake' heads.
333 398 # Also, later we will be using it to filter out the heads we can't
334 399 # find from roots.
335 400 heads = dict.fromkeys(heads, 0)
336 401 # Remember where the top was so we can use it as a limit later.
337 402 highestrev = max([self.rev(n) for n in nodestotag])
338 403 while nodestotag:
339 404 # grab a node to tag
340 405 n = nodestotag.pop()
341 406 # Never tag nullid
342 407 if n == nullid:
343 408 continue
344 409 # A node's revision number represents its place in a
345 410 # topologically sorted list of nodes.
346 411 r = self.rev(n)
347 412 if r >= lowestrev:
348 413 if n not in ancestors:
349 414 # If we are possibly a descendent of one of the roots
350 415 # and we haven't already been marked as an ancestor
351 416 ancestors[n] = 1 # Mark as ancestor
352 417 # Add non-nullid parents to list of nodes to tag.
353 418 nodestotag.extend([p for p in self.parents(n) if
354 419 p != nullid])
355 420 elif n in heads: # We've seen it before, is it a fake head?
356 421 # So it is, real heads should not be the ancestors of
357 422 # any other heads.
358 423 heads.pop(n)
359 424 if not ancestors:
360 425 return nonodes
361 426 # Now that we have our set of ancestors, we want to remove any
362 427 # roots that are not ancestors.
363 428
364 429 # If one of the roots was nullid, everything is included anyway.
365 430 if lowestrev > -1:
366 431 # But, since we weren't, let's recompute the lowest rev to not
367 432 # include roots that aren't ancestors.
368 433
369 434 # Filter out roots that aren't ancestors of heads
370 435 roots = [n for n in roots if n in ancestors]
371 436 # Recompute the lowest revision
372 437 if roots:
373 438 lowestrev = min([self.rev(n) for n in roots])
374 439 else:
375 440 # No more roots? Return empty list
376 441 return nonodes
377 442 else:
378 443 # We are descending from nullid, and don't need to care about
379 444 # any other roots.
380 445 lowestrev = -1
381 446 roots = [nullid]
382 447 # Transform our roots list into a 'set' (i.e. a dictionary where the
383 448 # values don't matter.
384 449 descendents = dict.fromkeys(roots, 1)
385 450 # Also, keep the original roots so we can filter out roots that aren't
386 451 # 'real' roots (i.e. are descended from other roots).
387 452 roots = descendents.copy()
388 453 # Our topologically sorted list of output nodes.
389 454 orderedout = []
390 455 # Don't start at nullid since we don't want nullid in our output list,
391 456 # and if nullid shows up in descedents, empty parents will look like
392 457 # they're descendents.
393 458 for r in xrange(max(lowestrev, 0), highestrev + 1):
394 459 n = self.node(r)
395 460 isdescendent = False
396 461 if lowestrev == -1: # Everybody is a descendent of nullid
397 462 isdescendent = True
398 463 elif n in descendents:
399 464 # n is already a descendent
400 465 isdescendent = True
401 466 # This check only needs to be done here because all the roots
402 467 # will start being marked is descendents before the loop.
403 468 if n in roots:
404 469 # If n was a root, check if it's a 'real' root.
405 470 p = tuple(self.parents(n))
406 471 # If any of its parents are descendents, it's not a root.
407 472 if (p[0] in descendents) or (p[1] in descendents):
408 473 roots.pop(n)
409 474 else:
410 475 p = tuple(self.parents(n))
411 476 # A node is a descendent if either of its parents are
412 477 # descendents. (We seeded the dependents list with the roots
413 478 # up there, remember?)
414 479 if (p[0] in descendents) or (p[1] in descendents):
415 480 descendents[n] = 1
416 481 isdescendent = True
417 482 if isdescendent and ((ancestors is None) or (n in ancestors)):
418 483 # Only include nodes that are both descendents and ancestors.
419 484 orderedout.append(n)
420 485 if (ancestors is not None) and (n in heads):
421 486 # We're trying to figure out which heads are reachable
422 487 # from roots.
423 488 # Mark this head as having been reached
424 489 heads[n] = 1
425 490 elif ancestors is None:
426 491 # Otherwise, we're trying to discover the heads.
427 492 # Assume this is a head because if it isn't, the next step
428 493 # will eventually remove it.
429 494 heads[n] = 1
430 495 # But, obviously its parents aren't.
431 496 for p in self.parents(n):
432 497 heads.pop(p, None)
433 498 heads = [n for n in heads.iterkeys() if heads[n] != 0]
434 499 roots = roots.keys()
435 500 assert orderedout
436 501 assert roots
437 502 assert heads
438 503 return (orderedout, roots, heads)
439 504
440 505 def heads(self, start=None):
441 506 """return the list of all nodes that have no children
442 507
443 508 if start is specified, only heads that are descendants of
444 509 start will be returned
445 510
446 511 """
447 512 if start is None:
448 513 start = nullid
449 514 reachable = {start: 1}
450 515 heads = {start: 1}
451 516 startrev = self.rev(start)
452 517
453 518 for r in xrange(startrev + 1, self.count()):
454 519 n = self.node(r)
455 520 for pn in self.parents(n):
456 521 if pn in reachable:
457 522 reachable[n] = 1
458 523 heads[n] = 1
459 524 if pn in heads:
460 525 del heads[pn]
461 526 return heads.keys()
462 527
463 528 def children(self, node):
464 529 """find the children of a given node"""
465 530 c = []
466 531 p = self.rev(node)
467 532 for r in range(p + 1, self.count()):
468 533 n = self.node(r)
469 534 for pn in self.parents(n):
470 535 if pn == node:
471 536 c.append(n)
472 537 continue
473 538 elif pn == nullid:
474 539 continue
475 540 return c
476 541
477 542 def lookup(self, id):
478 543 """locate a node based on revision number or subset of hex nodeid"""
479 544 try:
480 545 rev = int(id)
481 546 if str(rev) != id: raise ValueError
482 547 if rev < 0: rev = self.count() + rev
483 548 if rev < 0 or rev >= self.count(): raise ValueError
484 549 return self.node(rev)
485 550 except (ValueError, OverflowError):
486 551 c = []
487 552 for n in self.nodemap:
488 553 if hex(n).startswith(id):
489 554 c.append(n)
490 555 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
491 556 if len(c) < 1: raise RevlogError(_("No match found"))
492 557 return c[0]
493 558
494 559 return None
495 560
496 561 def diff(self, a, b):
497 562 """return a delta between two revisions"""
498 563 return mdiff.textdiff(a, b)
499 564
500 565 def patches(self, t, pl):
501 566 """apply a list of patches to a string"""
502 567 return mdiff.patches(t, pl)
503 568
504 def chunk(self, rev):
569 def chunk(self, rev, df=None, cachelen=4096):
505 570 start, length = self.start(rev), self.length(rev)
506 571 end = start + length
507
508 def loadcache():
509 cache_length = max(4096 * 1024, length) # 4Mo
510 df = self.opener(self.datafile)
572 def loadcache(df):
573 cache_length = max(cachelen, length) # 4k
574 if not df:
575 df = self.opener(self.datafile)
511 576 df.seek(start)
512 577 self.chunkcache = (start, df.read(cache_length))
513 578
514 579 if not self.chunkcache:
515 loadcache()
580 loadcache(df)
516 581
517 582 cache_start = self.chunkcache[0]
518 583 cache_end = cache_start + len(self.chunkcache[1])
519 584 if start >= cache_start and end <= cache_end:
520 585 # it is cached
521 586 offset = start - cache_start
522 587 else:
523 loadcache()
588 loadcache(df)
524 589 offset = 0
525 590
526 591 #def checkchunk():
527 592 # df = self.opener(self.datafile)
528 593 # df.seek(start)
529 594 # return df.read(length)
530 595 #assert s == checkchunk()
531 596 return decompress(self.chunkcache[1][offset:offset + length])
532 597
533 598 def delta(self, node):
534 599 """return or calculate a delta between a node and its predecessor"""
535 600 r = self.rev(node)
536 601 return self.revdiff(r - 1, r)
537 602
538 603 def revdiff(self, rev1, rev2):
539 604 """return or calculate a delta between two revisions"""
540 605 b1 = self.base(rev1)
541 606 b2 = self.base(rev2)
542 607 if b1 == b2 and rev1 + 1 == rev2:
543 608 return self.chunk(rev2)
544 609 else:
545 610 return self.diff(self.revision(self.node(rev1)),
546 611 self.revision(self.node(rev2)))
547 612
548 613 def revision(self, node):
549 614 """return an uncompressed revision of a given"""
550 615 if node == nullid: return ""
551 616 if self.cache and self.cache[0] == node: return self.cache[2]
552 617
553 618 # look up what we need to read
554 619 text = None
555 620 rev = self.rev(node)
556 621 base = self.base(rev)
557 622
623 df = self.opener(self.datafile)
624
558 625 # do we have useful data cached?
559 626 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
560 627 base = self.cache[1]
561 628 text = self.cache[2]
562 629 else:
563 text = self.chunk(base)
630 text = self.chunk(base, df=df)
564 631
565 632 bins = []
566 633 for r in xrange(base + 1, rev + 1):
567 bins.append(self.chunk(r))
634 bins.append(self.chunk(r, df=df))
568 635
569 636 text = self.patches(text, bins)
570 637
571 638 p1, p2 = self.parents(node)
572 639 if node != hash(text, p1, p2):
573 640 raise RevlogError(_("integrity check failed on %s:%d")
574 641 % (self.datafile, rev))
575 642
576 643 self.cache = (node, rev, text)
577 644 return text
578 645
579 646 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
580 647 """add a revision to the log
581 648
582 649 text - the revision data to add
583 650 transaction - the transaction object used for rollback
584 651 link - the linkrev data to add
585 652 p1, p2 - the parent nodeids of the revision
586 653 d - an optional precomputed delta
587 654 """
588 655 if text is None: text = ""
589 656 if p1 is None: p1 = self.tip()
590 657 if p2 is None: p2 = nullid
591 658
592 659 node = hash(text, p1, p2)
593 660
594 661 if node in self.nodemap:
595 662 return node
596 663
597 664 n = self.count()
598 665 t = n - 1
599 666
600 667 if n:
601 668 base = self.base(t)
602 669 start = self.start(base)
603 670 end = self.end(t)
604 671 if not d:
605 672 prev = self.revision(self.tip())
606 673 d = self.diff(prev, str(text))
607 674 data = compress(d)
608 675 l = len(data[1]) + len(data[0])
609 676 dist = end - start + l
610 677
611 678 # full versions are inserted when the needed deltas
612 679 # become comparable to the uncompressed text
613 680 if not n or dist > len(text) * 2:
614 681 data = compress(text)
615 682 l = len(data[1]) + len(data[0])
616 683 base = n
617 684 else:
618 685 base = self.base(t)
619 686
620 687 offset = 0
621 688 if t >= 0:
622 689 offset = self.end(t)
623 690
624 e = (offset, l, base, link, p1, p2, node)
691 if self.version == 0:
692 e = (offset, l, base, link, p1, p2, node)
693 else:
694 e = (self.offset_type(offset, 0), l, len(text),
695 base, link, self.rev(p1), self.rev(p2), node)
625 696
626 697 self.index.append(e)
627 698 self.nodemap[node] = n
628 entry = struct.pack(indexformat, *e)
699 entry = struct.pack(self.indexformat, *e)
629 700
630 transaction.add(self.datafile, e[0])
701 transaction.add(self.datafile, offset)
702 transaction.add(self.indexfile, n * len(entry))
631 703 f = self.opener(self.datafile, "a")
632 704 if data[0]:
633 705 f.write(data[0])
634 706 f.write(data[1])
635 transaction.add(self.indexfile, n * len(entry))
636 self.opener(self.indexfile, "a").write(entry)
707 f = self.opener(self.indexfile, "a")
708
709 if len(self.index) == 1 and self.version != 0:
710 l = struct.pack(versionformat, self.version)
711 f.write(l)
712 entry = entry[4:]
713
714 f.write(entry)
637 715
638 716 self.cache = (node, n, text)
639 717 return node
640 718
641 719 def ancestor(self, a, b):
642 720 """calculate the least common ancestor of nodes a and b"""
643 721 # calculate the distance of every node from root
644 722 dist = {nullid: 0}
645 723 for i in xrange(self.count()):
646 724 n = self.node(i)
647 725 p1, p2 = self.parents(n)
648 726 dist[n] = max(dist[p1], dist[p2]) + 1
649 727
650 728 # traverse ancestors in order of decreasing distance from root
651 729 def ancestors(node):
652 730 # we store negative distances because heap returns smallest member
653 731 h = [(-dist[node], node)]
654 732 seen = {}
655 733 while h:
656 734 d, n = heapq.heappop(h)
657 735 if n not in seen:
658 736 seen[n] = 1
659 737 yield (-d, n)
660 738 for p in self.parents(n):
661 739 heapq.heappush(h, (-dist[p], p))
662 740
663 741 def generations(node):
664 742 sg, s = None, {}
665 743 for g,n in ancestors(node):
666 744 if g != sg:
667 745 if sg:
668 746 yield sg, s
669 747 sg, s = g, {n:1}
670 748 else:
671 749 s[n] = 1
672 750 yield sg, s
673 751
674 752 x = generations(a)
675 753 y = generations(b)
676 754 gx = x.next()
677 755 gy = y.next()
678 756
679 757 # increment each ancestor list until it is closer to root than
680 758 # the other, or they match
681 759 while 1:
682 760 #print "ancestor gen %s %s" % (gx[0], gy[0])
683 761 if gx[0] == gy[0]:
684 762 # find the intersection
685 763 i = [ n for n in gx[1] if n in gy[1] ]
686 764 if i:
687 765 return i[0]
688 766 else:
689 767 #print "next"
690 768 gy = y.next()
691 769 gx = x.next()
692 770 elif gx[0] < gy[0]:
693 771 #print "next y"
694 772 gy = y.next()
695 773 else:
696 774 #print "next x"
697 775 gx = x.next()
698 776
699 777 def group(self, nodelist, lookup, infocollect=None):
700 778 """calculate a delta group
701 779
702 780 Given a list of changeset revs, return a set of deltas and
703 781 metadata corresponding to nodes. the first delta is
704 782 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
705 783 have this parent as it has all history before these
706 784 changesets. parent is parent[0]
707 785 """
708 786 revs = [self.rev(n) for n in nodelist]
709 787
710 788 # if we don't have any revisions touched by these changesets, bail
711 789 if not revs:
712 790 yield changegroup.closechunk()
713 791 return
714 792
715 793 # add the parent of the first rev
716 794 p = self.parents(self.node(revs[0]))[0]
717 795 revs.insert(0, self.rev(p))
718 796
719 797 # build deltas
720 798 for d in xrange(0, len(revs) - 1):
721 799 a, b = revs[d], revs[d + 1]
722 800 nb = self.node(b)
723 801
724 802 if infocollect is not None:
725 803 infocollect(nb)
726 804
727 805 d = self.revdiff(a, b)
728 806 p = self.parents(nb)
729 807 meta = nb + p[0] + p[1] + lookup(nb)
730 808 yield changegroup.genchunk("%s%s" % (meta, d))
731 809
732 810 yield changegroup.closechunk()
733 811
734 812 def addgroup(self, revs, linkmapper, transaction, unique=0):
735 813 """
736 814 add a delta group
737 815
738 816 given a set of deltas, add them to the revision log. the
739 817 first delta is against its parent, which should be in our
740 818 log, the rest are against the previous delta.
741 819 """
742 820
743 821 #track the base of the current delta log
744 822 r = self.count()
745 823 t = r - 1
746 824 node = None
747 825
748 826 base = prev = -1
749 827 start = end = measure = 0
750 828 if r:
751 base = self.base(t)
752 start = self.start(base)
753 829 end = self.end(t)
754 measure = self.length(base)
755 prev = self.tip()
756 830
831 ifh = self.opener(self.indexfile, "a+")
832 transaction.add(self.indexfile, ifh.tell())
757 833 transaction.add(self.datafile, end)
758 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
759 834 dfh = self.opener(self.datafile, "a")
760 ifh = self.opener(self.indexfile, "a")
761 835
762 836 # loop through our set of deltas
763 837 chain = None
764 838 for chunk in revs:
765 839 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
766 840 link = linkmapper(cs)
767 841 if node in self.nodemap:
768 842 # this can happen if two branches make the same change
769 843 # if unique:
770 844 # raise RevlogError(_("already have %s") % hex(node[:4]))
771 845 chain = node
772 846 continue
773 847 delta = chunk[80:]
774 848
775 849 for p in (p1, p2):
776 850 if not p in self.nodemap:
777 851 raise RevlogError(_("unknown parent %s") % short(p1))
778 852
779 853 if not chain:
780 854 # retrieve the parent revision of the delta chain
781 855 chain = p1
782 856 if not chain in self.nodemap:
783 857 raise RevlogError(_("unknown base %s") % short(chain[:4]))
784 858
785 859 # full versions are inserted when the needed deltas become
786 860 # comparable to the uncompressed text or when the previous
787 861 # version is not the one we have a delta against. We use
788 862 # the size of the previous full rev as a proxy for the
789 863 # current size.
790 864
791 865 if chain == prev:
792 866 tempd = compress(delta)
793 867 cdelta = tempd[0] + tempd[1]
794 868
795 869 if chain != prev or (end - start + len(cdelta)) > measure * 2:
796 870 # flush our writes here so we can read it in revision
797 dfh.flush()
871 if dfh:
872 dfh.flush()
798 873 ifh.flush()
799 874 text = self.revision(chain)
800 875 text = self.patches(text, [delta])
801 876 chk = self.addrevision(text, transaction, link, p1, p2)
802 877 if chk != node:
803 878 raise RevlogError(_("consistency error adding group"))
804 879 measure = len(text)
805 880 else:
806 e = (end, len(cdelta), base, link, p1, p2, node)
881 if self.version == 0:
882 e = (end, len(cdelta), base, link, p1, p2, node)
883 else:
884 e = (self.offset_type(end, 0), len(cdelta), -1, base,
885 link, self.rev(p1), self.rev(p2), node)
807 886 self.index.append(e)
808 887 self.nodemap[node] = r
809 888 dfh.write(cdelta)
810 ifh.write(struct.pack(indexformat, *e))
889 ifh.write(struct.pack(self.indexformat, *e))
811 890
812 891 t, r, chain, prev = r, r + 1, node, node
813 892 base = self.base(t)
814 893 start = self.start(base)
815 894 end = self.end(t)
816 895
817 dfh.close()
818 ifh.close()
819 896 if node is None:
820 897 raise RevlogError(_("group to be added is empty"))
821 898 return node
822 899
823 900 def strip(self, rev, minlink):
824 901 if self.count() == 0 or rev >= self.count():
825 902 return
826 903
904 if isinstance(self.index, lazyindex):
905 self.loadindexmap()
906
827 907 # When stripping away a revision, we need to make sure it
828 908 # does not actually belong to an older changeset.
829 909 # The minlink parameter defines the oldest revision
830 910 # we're allowed to strip away.
831 while minlink > self.index[rev][3]:
911 while minlink > self.index[rev][-4]:
832 912 rev += 1
833 913 if rev >= self.count():
834 914 return
835 915
836 916 # first truncate the files on disk
837 917 end = self.start(rev)
838 self.opener(self.datafile, "a").truncate(end)
839 end = rev * struct.calcsize(indexformat)
840 self.opener(self.indexfile, "a").truncate(end)
918 df = self.opener(self.datafile, "a")
919 df.truncate(end)
920 end = rev * struct.calcsize(self.indexformat)
921
922 indexf = self.opener(self.indexfile, "a")
923 indexf.truncate(end)
841 924
842 925 # then reset internal state in memory to forget those revisions
843 926 self.cache = None
844 927 self.chunkcache = None
845 for p in self.index[rev:]:
846 del self.nodemap[p[6]]
847 del self.index[rev:]
928 for x in xrange(rev, self.count()):
929 del self.nodemap[self.node(x)]
848 930
849 # truncating the lazyindex also truncates the lazymap.
850 if isinstance(self.index, lazyindex):
851 self.index.trunc(end)
852
931 del self.index[rev:]
853 932
854 933 def checksize(self):
855 934 expected = 0
856 935 if self.count():
857 936 expected = self.end(self.count() - 1)
858 937
859 938 try:
860 939 f = self.opener(self.datafile)
861 940 f.seek(0, 2)
862 941 actual = f.tell()
863 942 dd = actual - expected
864 943 except IOError, inst:
865 944 if inst.errno != errno.ENOENT:
866 945 raise
867 946 dd = 0
868 947
869 948 try:
870 949 f = self.opener(self.indexfile)
871 950 f.seek(0, 2)
872 951 actual = f.tell()
873 s = struct.calcsize(indexformat)
952 s = struct.calcsize(self.indexformat)
874 953 i = actual / s
875 954 di = actual - (i * s)
876 955 except IOError, inst:
877 956 if inst.errno != errno.ENOENT:
878 957 raise
879 958 di = 0
880 959
881 960 return (dd, di)
882 961
883 962
@@ -1,47 +1,48
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms
8 8 # of the GNU General Public License, incorporated herein by reference.
9 9
10 10 from demandload import demandload
11 11 demandload(globals(), "changelog filelog httprangereader")
12 12 demandload(globals(), "localrepo manifest os urllib urllib2")
13 13
14 14 class rangereader(httprangereader.httprangereader):
15 15 def read(self, size=None):
16 16 try:
17 17 return httprangereader.httprangereader.read(self, size)
18 18 except urllib2.HTTPError, inst:
19 19 raise IOError(None, inst)
20 20 except urllib2.URLError, inst:
21 21 raise IOError(None, inst.reason[1])
22 22
23 23 def opener(base):
24 24 """return a function that opens files over http"""
25 25 p = base
26 26 def o(path, mode="r"):
27 27 f = os.path.join(p, urllib.quote(path))
28 28 return rangereader(f)
29 29 return o
30 30
31 31 class statichttprepository(localrepo.localrepository):
32 32 def __init__(self, ui, path):
33 33 self.path = (path + "/.hg")
34 34 self.ui = ui
35 self.revlogversion = 0
35 36 self.opener = opener(self.path)
36 37 self.manifest = manifest.manifest(self.opener)
37 38 self.changelog = changelog.changelog(self.opener)
38 39 self.tagscache = None
39 40 self.nodetagscache = None
40 41 self.encodepats = None
41 42 self.decodepats = None
42 43
43 44 def dev(self):
44 45 return -1
45 46
46 47 def local(self):
47 48 return False
@@ -1,257 +1,264
1 1 # ui.py - user interface bits for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import ConfigParser
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 demandload(globals(), "errno os re socket sys tempfile util")
12 12
13 13 class ui(object):
14 14 def __init__(self, verbose=False, debug=False, quiet=False,
15 15 interactive=True, parentui=None):
16 16 self.overlay = {}
17 17 if parentui is None:
18 18 # this is the parent of all ui children
19 19 self.parentui = None
20 20 self.cdata = ConfigParser.SafeConfigParser()
21 21 self.readconfig(util.rcpath())
22 22
23 23 self.quiet = self.configbool("ui", "quiet")
24 24 self.verbose = self.configbool("ui", "verbose")
25 25 self.debugflag = self.configbool("ui", "debug")
26 26 self.interactive = self.configbool("ui", "interactive", True)
27 27
28 28 self.updateopts(verbose, debug, quiet, interactive)
29 29 self.diffcache = None
30 30 self.header = []
31 31 self.prev_header = []
32 self.revlogopts = self.configrevlog()
32 33 else:
33 34 # parentui may point to an ui object which is already a child
34 35 self.parentui = parentui.parentui or parentui
35 36 parent_cdata = self.parentui.cdata
36 37 self.cdata = ConfigParser.SafeConfigParser(parent_cdata.defaults())
37 38 # make interpolation work
38 39 for section in parent_cdata.sections():
39 40 self.cdata.add_section(section)
40 41 for name, value in parent_cdata.items(section, raw=True):
41 42 self.cdata.set(section, name, value)
42 43
43 44 def __getattr__(self, key):
44 45 return getattr(self.parentui, key)
45 46
46 47 def updateopts(self, verbose=False, debug=False, quiet=False,
47 48 interactive=True):
48 49 self.quiet = (self.quiet or quiet) and not verbose and not debug
49 50 self.verbose = (self.verbose or verbose) or debug
50 51 self.debugflag = (self.debugflag or debug)
51 52 self.interactive = (self.interactive and interactive)
52 53
53 54 def readconfig(self, fn, root=None):
54 55 if isinstance(fn, basestring):
55 56 fn = [fn]
56 57 for f in fn:
57 58 try:
58 59 self.cdata.read(f)
59 60 except ConfigParser.ParsingError, inst:
60 61 raise util.Abort(_("Failed to parse %s\n%s") % (f, inst))
61 62 # translate paths relative to root (or home) into absolute paths
62 63 if root is None:
63 64 root = os.path.expanduser('~')
64 65 for name, path in self.configitems("paths"):
65 66 if path and path.find("://") == -1 and not os.path.isabs(path):
66 67 self.cdata.set("paths", name, os.path.join(root, path))
67 68
68 69 def setconfig(self, section, name, val):
69 70 self.overlay[(section, name)] = val
70 71
71 72 def config(self, section, name, default=None):
72 73 if self.overlay.has_key((section, name)):
73 74 return self.overlay[(section, name)]
74 75 if self.cdata.has_option(section, name):
75 76 try:
76 77 return self.cdata.get(section, name)
77 78 except ConfigParser.InterpolationError, inst:
78 79 raise util.Abort(_("Error in configuration:\n%s") % inst)
79 80 if self.parentui is None:
80 81 return default
81 82 else:
82 83 return self.parentui.config(section, name, default)
83 84
84 85 def configbool(self, section, name, default=False):
85 86 if self.overlay.has_key((section, name)):
86 87 return self.overlay[(section, name)]
87 88 if self.cdata.has_option(section, name):
88 89 try:
89 90 return self.cdata.getboolean(section, name)
90 91 except ConfigParser.InterpolationError, inst:
91 92 raise util.Abort(_("Error in configuration:\n%s") % inst)
92 93 if self.parentui is None:
93 94 return default
94 95 else:
95 96 return self.parentui.configbool(section, name, default)
96 97
97 98 def configitems(self, section):
98 99 items = {}
99 100 if self.parentui is not None:
100 101 items = dict(self.parentui.configitems(section))
101 102 if self.cdata.has_section(section):
102 103 try:
103 104 items.update(dict(self.cdata.items(section)))
104 105 except ConfigParser.InterpolationError, inst:
105 106 raise util.Abort(_("Error in configuration:\n%s") % inst)
106 107 x = items.items()
107 108 x.sort()
108 109 return x
109 110
110 111 def walkconfig(self, seen=None):
111 112 if seen is None:
112 113 seen = {}
113 114 for (section, name), value in self.overlay.iteritems():
114 115 yield section, name, value
115 116 seen[section, name] = 1
116 117 for section in self.cdata.sections():
117 118 for name, value in self.cdata.items(section):
118 119 if (section, name) in seen: continue
119 120 yield section, name, value.replace('\n', '\\n')
120 121 seen[section, name] = 1
121 122 if self.parentui is not None:
122 123 for parent in self.parentui.walkconfig(seen):
123 124 yield parent
124 125
125 126 def extensions(self):
126 127 return self.configitems("extensions")
127 128
128 129 def hgignorefiles(self):
129 130 result = []
130 131 cfgitems = self.configitems("ui")
131 132 for key, value in cfgitems:
132 133 if key == 'ignore' or key.startswith('ignore.'):
133 134 path = os.path.expanduser(value)
134 135 result.append(path)
135 136 return result
136 137
138 def configrevlog(self):
139 ret = {}
140 for x in self.configitems("revlog"):
141 k = x[0].lower()
142 ret[k] = x[1]
143 return ret
137 144 def diffopts(self):
138 145 if self.diffcache:
139 146 return self.diffcache
140 147 ret = { 'showfunc' : True, 'ignorews' : False}
141 148 for x in self.configitems("diff"):
142 149 k = x[0].lower()
143 150 v = x[1]
144 151 if v:
145 152 v = v.lower()
146 153 if v == 'true':
147 154 value = True
148 155 else:
149 156 value = False
150 157 ret[k] = value
151 158 self.diffcache = ret
152 159 return ret
153 160
154 161 def username(self):
155 162 """Return default username to be used in commits.
156 163
157 164 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
158 165 and stop searching if one of these is set.
159 166 Abort if found username is an empty string to force specifying
160 167 the commit user elsewhere, e.g. with line option or repo hgrc.
161 168 If not found, use $LOGNAME or $USERNAME +"@full.hostname".
162 169 """
163 170 user = os.environ.get("HGUSER")
164 171 if user is None:
165 172 user = self.config("ui", "username")
166 173 if user is None:
167 174 user = os.environ.get("EMAIL")
168 175 if user is None:
169 176 user = os.environ.get("LOGNAME") or os.environ.get("USERNAME")
170 177 if user:
171 178 user = "%s@%s" % (user, socket.getfqdn())
172 179 if not user:
173 180 raise util.Abort(_("Please specify a username."))
174 181 return user
175 182
176 183 def shortuser(self, user):
177 184 """Return a short representation of a user name or email address."""
178 185 if not self.verbose: user = util.shortuser(user)
179 186 return user
180 187
181 188 def expandpath(self, loc):
182 189 """Return repository location relative to cwd or from [paths]"""
183 190 if loc.find("://") != -1 or os.path.exists(loc):
184 191 return loc
185 192
186 193 return self.config("paths", loc, loc)
187 194
188 195 def write(self, *args):
189 196 if self.header:
190 197 if self.header != self.prev_header:
191 198 self.prev_header = self.header
192 199 self.write(*self.header)
193 200 self.header = []
194 201 for a in args:
195 202 sys.stdout.write(str(a))
196 203
197 204 def write_header(self, *args):
198 205 for a in args:
199 206 self.header.append(str(a))
200 207
201 208 def write_err(self, *args):
202 209 try:
203 210 if not sys.stdout.closed: sys.stdout.flush()
204 211 for a in args:
205 212 sys.stderr.write(str(a))
206 213 except IOError, inst:
207 214 if inst.errno != errno.EPIPE:
208 215 raise
209 216
210 217 def flush(self):
211 218 try: sys.stdout.flush()
212 219 except: pass
213 220 try: sys.stderr.flush()
214 221 except: pass
215 222
216 223 def readline(self):
217 224 return sys.stdin.readline()[:-1]
218 225 def prompt(self, msg, pat, default="y"):
219 226 if not self.interactive: return default
220 227 while 1:
221 228 self.write(msg, " ")
222 229 r = self.readline()
223 230 if re.match(pat, r):
224 231 return r
225 232 else:
226 233 self.write(_("unrecognized response\n"))
227 234 def status(self, *msg):
228 235 if not self.quiet: self.write(*msg)
229 236 def warn(self, *msg):
230 237 self.write_err(*msg)
231 238 def note(self, *msg):
232 239 if self.verbose: self.write(*msg)
233 240 def debug(self, *msg):
234 241 if self.debugflag: self.write(*msg)
235 242 def edit(self, text, user):
236 243 (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt")
237 244 try:
238 245 f = os.fdopen(fd, "w")
239 246 f.write(text)
240 247 f.close()
241 248
242 249 editor = (os.environ.get("HGEDITOR") or
243 250 self.config("ui", "editor") or
244 251 os.environ.get("EDITOR", "vi"))
245 252
246 253 util.system("%s \"%s\"" % (editor, name),
247 254 environ={'HGUSER': user},
248 255 onerr=util.Abort, errprefix=_("edit failed"))
249 256
250 257 f = open(name)
251 258 t = f.read()
252 259 f.close()
253 260 t = re.sub("(?m)^HG:.*\n", "", t)
254 261 finally:
255 262 os.unlink(name)
256 263
257 264 return t
General Comments 0
You need to be logged in to leave comments. Login now