##// END OF EJS Templates
add blank line after copyright notices and after header
Martin Geisler -
r8228:eee2319c default
parent child Browse files
Show More
@@ -1,51 +1,52 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # Copyright 2005-2007 by Intevation GmbH <intevation@intevation.de>
3 # Copyright 2005-2007 by Intevation GmbH <intevation@intevation.de>
4 #
4 # Author(s):
5 # Author(s):
5 # Thomas Arendsen Hein <thomas@intevation.de>
6 # Thomas Arendsen Hein <thomas@intevation.de>
6 #
7 #
7 # This software may be used and distributed according to the terms of the
8 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2, incorporated herein by reference.
9 # GNU General Public License version 2, incorporated herein by reference.
9
10
10 """
11 """
11 hg-ssh - a wrapper for ssh access to a limited set of mercurial repos
12 hg-ssh - a wrapper for ssh access to a limited set of mercurial repos
12
13
13 To be used in ~/.ssh/authorized_keys with the "command" option, see sshd(8):
14 To be used in ~/.ssh/authorized_keys with the "command" option, see sshd(8):
14 command="hg-ssh path/to/repo1 /path/to/repo2 ~/repo3 ~user/repo4" ssh-dss ...
15 command="hg-ssh path/to/repo1 /path/to/repo2 ~/repo3 ~user/repo4" ssh-dss ...
15 (probably together with these other useful options:
16 (probably together with these other useful options:
16 no-port-forwarding,no-X11-forwarding,no-agent-forwarding)
17 no-port-forwarding,no-X11-forwarding,no-agent-forwarding)
17
18
18 This allows pull/push over ssh to to the repositories given as arguments.
19 This allows pull/push over ssh to to the repositories given as arguments.
19
20
20 If all your repositories are subdirectories of a common directory, you can
21 If all your repositories are subdirectories of a common directory, you can
21 allow shorter paths with:
22 allow shorter paths with:
22 command="cd path/to/my/repositories && hg-ssh repo1 subdir/repo2"
23 command="cd path/to/my/repositories && hg-ssh repo1 subdir/repo2"
23
24
24 You can use pattern matching of your normal shell, e.g.:
25 You can use pattern matching of your normal shell, e.g.:
25 command="cd repos && hg-ssh user/thomas/* projects/{mercurial,foo}"
26 command="cd repos && hg-ssh user/thomas/* projects/{mercurial,foo}"
26 """
27 """
27
28
28 # enable importing on demand to reduce startup time
29 # enable importing on demand to reduce startup time
29 from mercurial import demandimport; demandimport.enable()
30 from mercurial import demandimport; demandimport.enable()
30
31
31 from mercurial import dispatch
32 from mercurial import dispatch
32
33
33 import sys, os
34 import sys, os
34
35
35 cwd = os.getcwd()
36 cwd = os.getcwd()
36 allowed_paths = [os.path.normpath(os.path.join(cwd, os.path.expanduser(path)))
37 allowed_paths = [os.path.normpath(os.path.join(cwd, os.path.expanduser(path)))
37 for path in sys.argv[1:]]
38 for path in sys.argv[1:]]
38 orig_cmd = os.getenv('SSH_ORIGINAL_COMMAND', '?')
39 orig_cmd = os.getenv('SSH_ORIGINAL_COMMAND', '?')
39
40
40 if orig_cmd.startswith('hg -R ') and orig_cmd.endswith(' serve --stdio'):
41 if orig_cmd.startswith('hg -R ') and orig_cmd.endswith(' serve --stdio'):
41 path = orig_cmd[6:-14]
42 path = orig_cmd[6:-14]
42 repo = os.path.normpath(os.path.join(cwd, os.path.expanduser(path)))
43 repo = os.path.normpath(os.path.join(cwd, os.path.expanduser(path)))
43 if repo in allowed_paths:
44 if repo in allowed_paths:
44 dispatch.dispatch(['-R', repo, 'serve', '--stdio'])
45 dispatch.dispatch(['-R', repo, 'serve', '--stdio'])
45 else:
46 else:
46 sys.stderr.write("Illegal repository %r\n" % repo)
47 sys.stderr.write("Illegal repository %r\n" % repo)
47 sys.exit(-1)
48 sys.exit(-1)
48 else:
49 else:
49 sys.stderr.write("Illegal command %r\n" % orig_cmd)
50 sys.stderr.write("Illegal command %r\n" % orig_cmd)
50 sys.exit(-1)
51 sys.exit(-1)
51
52
@@ -1,41 +1,42 b''
1 # Mercurial extension to provide the 'hg children' command
1 # Mercurial extension to provide the 'hg children' command
2 #
2 #
3 # Copyright 2007 by Intevation GmbH <intevation@intevation.de>
3 # Copyright 2007 by Intevation GmbH <intevation@intevation.de>
4 #
4 # Author(s):
5 # Author(s):
5 # Thomas Arendsen Hein <thomas@intevation.de>
6 # Thomas Arendsen Hein <thomas@intevation.de>
6 #
7 #
7 # This software may be used and distributed according to the terms of the
8 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2, incorporated herein by reference.
9 # GNU General Public License version 2, incorporated herein by reference.
9
10
10 from mercurial import cmdutil
11 from mercurial import cmdutil
11 from mercurial.commands import templateopts
12 from mercurial.commands import templateopts
12 from mercurial.i18n import _
13 from mercurial.i18n import _
13
14
14
15
15 def children(ui, repo, file_=None, **opts):
16 def children(ui, repo, file_=None, **opts):
16 """show the children of the given or working directory revision
17 """show the children of the given or working directory revision
17
18
18 Print the children of the working directory's revisions. If a
19 Print the children of the working directory's revisions. If a
19 revision is given via --rev/-r, the children of that revision will
20 revision is given via --rev/-r, the children of that revision will
20 be printed. If a file argument is given, revision in which the
21 be printed. If a file argument is given, revision in which the
21 file was last changed (after the working directory revision or the
22 file was last changed (after the working directory revision or the
22 argument to --rev if given) is printed.
23 argument to --rev if given) is printed.
23 """
24 """
24 rev = opts.get('rev')
25 rev = opts.get('rev')
25 if file_:
26 if file_:
26 ctx = repo.filectx(file_, changeid=rev)
27 ctx = repo.filectx(file_, changeid=rev)
27 else:
28 else:
28 ctx = repo[rev]
29 ctx = repo[rev]
29
30
30 displayer = cmdutil.show_changeset(ui, repo, opts)
31 displayer = cmdutil.show_changeset(ui, repo, opts)
31 for cctx in ctx.children():
32 for cctx in ctx.children():
32 displayer.show(cctx)
33 displayer.show(cctx)
33
34
34
35
35 cmdtable = {
36 cmdtable = {
36 "children":
37 "children":
37 (children,
38 (children,
38 [('r', 'rev', '', _('show children of the specified revision')),
39 [('r', 'rev', '', _('show children of the specified revision')),
39 ] + templateopts,
40 ] + templateopts,
40 _('hg children [-r REV] [FILE]')),
41 _('hg children [-r REV] [FILE]')),
41 }
42 }
@@ -1,162 +1,163 b''
1 # churn.py - create a graph of revisions count grouped by template
1 # churn.py - create a graph of revisions count grouped by template
2 #
2 #
3 # Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
3 # Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
4 # Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua>
4 # Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2, incorporated herein by reference.
7 # GNU General Public License version 2, incorporated herein by reference.
8
8 '''command to show certain statistics about revision history'''
9 '''command to show certain statistics about revision history'''
9
10
10 from mercurial.i18n import _
11 from mercurial.i18n import _
11 from mercurial import patch, cmdutil, util, templater
12 from mercurial import patch, cmdutil, util, templater
12 import sys
13 import sys
13 import time, datetime
14 import time, datetime
14
15
15 def maketemplater(ui, repo, tmpl):
16 def maketemplater(ui, repo, tmpl):
16 tmpl = templater.parsestring(tmpl, quoted=False)
17 tmpl = templater.parsestring(tmpl, quoted=False)
17 try:
18 try:
18 t = cmdutil.changeset_templater(ui, repo, False, None, None, False)
19 t = cmdutil.changeset_templater(ui, repo, False, None, None, False)
19 except SyntaxError, inst:
20 except SyntaxError, inst:
20 raise util.Abort(inst.args[0])
21 raise util.Abort(inst.args[0])
21 t.use_template(tmpl)
22 t.use_template(tmpl)
22 return t
23 return t
23
24
24 def changedlines(ui, repo, ctx1, ctx2, fns):
25 def changedlines(ui, repo, ctx1, ctx2, fns):
25 lines = 0
26 lines = 0
26 fmatch = cmdutil.match(repo, pats=fns)
27 fmatch = cmdutil.match(repo, pats=fns)
27 diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
28 diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
28 for l in diff.split('\n'):
29 for l in diff.split('\n'):
29 if (l.startswith("+") and not l.startswith("+++ ") or
30 if (l.startswith("+") and not l.startswith("+++ ") or
30 l.startswith("-") and not l.startswith("--- ")):
31 l.startswith("-") and not l.startswith("--- ")):
31 lines += 1
32 lines += 1
32 return lines
33 return lines
33
34
34 def countrate(ui, repo, amap, *pats, **opts):
35 def countrate(ui, repo, amap, *pats, **opts):
35 """Calculate stats"""
36 """Calculate stats"""
36 if opts.get('dateformat'):
37 if opts.get('dateformat'):
37 def getkey(ctx):
38 def getkey(ctx):
38 t, tz = ctx.date()
39 t, tz = ctx.date()
39 date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
40 date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
40 return date.strftime(opts['dateformat'])
41 return date.strftime(opts['dateformat'])
41 else:
42 else:
42 tmpl = opts.get('template', '{author|email}')
43 tmpl = opts.get('template', '{author|email}')
43 tmpl = maketemplater(ui, repo, tmpl)
44 tmpl = maketemplater(ui, repo, tmpl)
44 def getkey(ctx):
45 def getkey(ctx):
45 ui.pushbuffer()
46 ui.pushbuffer()
46 tmpl.show(ctx)
47 tmpl.show(ctx)
47 return ui.popbuffer()
48 return ui.popbuffer()
48
49
49 count = pct = 0
50 count = pct = 0
50 rate = {}
51 rate = {}
51 df = False
52 df = False
52 if opts.get('date'):
53 if opts.get('date'):
53 df = util.matchdate(opts['date'])
54 df = util.matchdate(opts['date'])
54
55
55 get = util.cachefunc(lambda r: repo[r].changeset())
56 get = util.cachefunc(lambda r: repo[r].changeset())
56 changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
57 changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
57 for st, rev, fns in changeiter:
58 for st, rev, fns in changeiter:
58 if not st == 'add':
59 if not st == 'add':
59 continue
60 continue
60 if df and not df(get(rev)[2][0]): # doesn't match date format
61 if df and not df(get(rev)[2][0]): # doesn't match date format
61 continue
62 continue
62
63
63 ctx = repo[rev]
64 ctx = repo[rev]
64 key = getkey(ctx)
65 key = getkey(ctx)
65 key = amap.get(key, key) # alias remap
66 key = amap.get(key, key) # alias remap
66 if opts.get('changesets'):
67 if opts.get('changesets'):
67 rate[key] = rate.get(key, 0) + 1
68 rate[key] = rate.get(key, 0) + 1
68 else:
69 else:
69 parents = ctx.parents()
70 parents = ctx.parents()
70 if len(parents) > 1:
71 if len(parents) > 1:
71 ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,))
72 ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,))
72 continue
73 continue
73
74
74 ctx1 = parents[0]
75 ctx1 = parents[0]
75 lines = changedlines(ui, repo, ctx1, ctx, fns)
76 lines = changedlines(ui, repo, ctx1, ctx, fns)
76 rate[key] = rate.get(key, 0) + lines
77 rate[key] = rate.get(key, 0) + lines
77
78
78 if opts.get('progress'):
79 if opts.get('progress'):
79 count += 1
80 count += 1
80 newpct = int(100.0 * count / max(len(repo), 1))
81 newpct = int(100.0 * count / max(len(repo), 1))
81 if pct < newpct:
82 if pct < newpct:
82 pct = newpct
83 pct = newpct
83 ui.write("\r" + _("generating stats: %d%%") % pct)
84 ui.write("\r" + _("generating stats: %d%%") % pct)
84 sys.stdout.flush()
85 sys.stdout.flush()
85
86
86 if opts.get('progress'):
87 if opts.get('progress'):
87 ui.write("\r")
88 ui.write("\r")
88 sys.stdout.flush()
89 sys.stdout.flush()
89
90
90 return rate
91 return rate
91
92
92
93
93 def churn(ui, repo, *pats, **opts):
94 def churn(ui, repo, *pats, **opts):
94 '''graph count of revisions grouped by template
95 '''graph count of revisions grouped by template
95
96
96 Will graph count of changed lines or revisions grouped by template
97 Will graph count of changed lines or revisions grouped by template
97 or alternatively by date, if dateformat is used. In this case it
98 or alternatively by date, if dateformat is used. In this case it
98 will override template.
99 will override template.
99
100
100 By default statistics are counted for number of changed lines.
101 By default statistics are counted for number of changed lines.
101
102
102 Examples:
103 Examples:
103
104
104 # display count of changed lines for every committer
105 # display count of changed lines for every committer
105 hg churn -t '{author|email}'
106 hg churn -t '{author|email}'
106
107
107 # display daily activity graph
108 # display daily activity graph
108 hg churn -f '%H' -s -c
109 hg churn -f '%H' -s -c
109
110
110 # display activity of developers by month
111 # display activity of developers by month
111 hg churn -f '%Y-%m' -s -c
112 hg churn -f '%Y-%m' -s -c
112
113
113 # display count of lines changed in every year
114 # display count of lines changed in every year
114 hg churn -f '%Y' -s
115 hg churn -f '%Y' -s
115
116
116 The map file format used to specify aliases is fairly simple:
117 The map file format used to specify aliases is fairly simple:
117
118
118 <alias email> <actual email>'''
119 <alias email> <actual email>'''
119 def pad(s, l):
120 def pad(s, l):
120 return (s + " " * l)[:l]
121 return (s + " " * l)[:l]
121
122
122 amap = {}
123 amap = {}
123 aliases = opts.get('aliases')
124 aliases = opts.get('aliases')
124 if aliases:
125 if aliases:
125 for l in open(aliases, "r"):
126 for l in open(aliases, "r"):
126 l = l.strip()
127 l = l.strip()
127 alias, actual = l.split()
128 alias, actual = l.split()
128 amap[alias] = actual
129 amap[alias] = actual
129
130
130 rate = countrate(ui, repo, amap, *pats, **opts).items()
131 rate = countrate(ui, repo, amap, *pats, **opts).items()
131 if not rate:
132 if not rate:
132 return
133 return
133
134
134 sortfn = ((not opts.get('sort')) and (lambda a, b: cmp(b[1], a[1])) or None)
135 sortfn = ((not opts.get('sort')) and (lambda a, b: cmp(b[1], a[1])) or None)
135 rate.sort(sortfn)
136 rate.sort(sortfn)
136
137
137 maxcount = float(max([v for k, v in rate]))
138 maxcount = float(max([v for k, v in rate]))
138 maxname = max([len(k) for k, v in rate])
139 maxname = max([len(k) for k, v in rate])
139
140
140 ttywidth = util.termwidth()
141 ttywidth = util.termwidth()
141 ui.debug(_("assuming %i character terminal\n") % ttywidth)
142 ui.debug(_("assuming %i character terminal\n") % ttywidth)
142 width = ttywidth - maxname - 2 - 6 - 2 - 2
143 width = ttywidth - maxname - 2 - 6 - 2 - 2
143
144
144 for date, count in rate:
145 for date, count in rate:
145 print "%s %6d %s" % (pad(date, maxname), count,
146 print "%s %6d %s" % (pad(date, maxname), count,
146 "*" * int(count * width / maxcount))
147 "*" * int(count * width / maxcount))
147
148
148
149
149 cmdtable = {
150 cmdtable = {
150 "churn":
151 "churn":
151 (churn,
152 (churn,
152 [('r', 'rev', [], _('count rate for the specified revision or range')),
153 [('r', 'rev', [], _('count rate for the specified revision or range')),
153 ('d', 'date', '', _('count rate for revisions matching date spec')),
154 ('d', 'date', '', _('count rate for revisions matching date spec')),
154 ('t', 'template', '{author|email}', _('template to group changesets')),
155 ('t', 'template', '{author|email}', _('template to group changesets')),
155 ('f', 'dateformat', '',
156 ('f', 'dateformat', '',
156 _('strftime-compatible format for grouping by date')),
157 _('strftime-compatible format for grouping by date')),
157 ('c', 'changesets', False, _('count rate by number of changesets')),
158 ('c', 'changesets', False, _('count rate by number of changesets')),
158 ('s', 'sort', False, _('sort by key (default: sort by count)')),
159 ('s', 'sort', False, _('sort by key (default: sort by count)')),
159 ('', 'aliases', '', _('file with email aliases')),
160 ('', 'aliases', '', _('file with email aliases')),
160 ('', 'progress', None, _('show progress'))],
161 ('', 'progress', None, _('show progress'))],
161 _("hg churn [-d DATE] [-r REV] [--aliases FILE] [--progress] [FILE]")),
162 _("hg churn [-d DATE] [-r REV] [--aliases FILE] [--progress] [FILE]")),
162 }
163 }
@@ -1,261 +1,262 b''
1 # convert.py Foreign SCM converter
1 # convert.py Foreign SCM converter
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7 '''converting foreign VCS repositories to Mercurial'''
8 '''converting foreign VCS repositories to Mercurial'''
8
9
9 import convcmd
10 import convcmd
10 import cvsps
11 import cvsps
11 import subversion
12 import subversion
12 from mercurial import commands
13 from mercurial import commands
13 from mercurial.i18n import _
14 from mercurial.i18n import _
14
15
15 # Commands definition was moved elsewhere to ease demandload job.
16 # Commands definition was moved elsewhere to ease demandload job.
16
17
17 def convert(ui, src, dest=None, revmapfile=None, **opts):
18 def convert(ui, src, dest=None, revmapfile=None, **opts):
18 """convert a foreign SCM repository to a Mercurial one.
19 """convert a foreign SCM repository to a Mercurial one.
19
20
20 Accepted source formats [identifiers]:
21 Accepted source formats [identifiers]:
21 - Mercurial [hg]
22 - Mercurial [hg]
22 - CVS [cvs]
23 - CVS [cvs]
23 - Darcs [darcs]
24 - Darcs [darcs]
24 - git [git]
25 - git [git]
25 - Subversion [svn]
26 - Subversion [svn]
26 - Monotone [mtn]
27 - Monotone [mtn]
27 - GNU Arch [gnuarch]
28 - GNU Arch [gnuarch]
28 - Bazaar [bzr]
29 - Bazaar [bzr]
29 - Perforce [p4]
30 - Perforce [p4]
30
31
31 Accepted destination formats [identifiers]:
32 Accepted destination formats [identifiers]:
32 - Mercurial [hg]
33 - Mercurial [hg]
33 - Subversion [svn] (history on branches is not preserved)
34 - Subversion [svn] (history on branches is not preserved)
34
35
35 If no revision is given, all revisions will be converted.
36 If no revision is given, all revisions will be converted.
36 Otherwise, convert will only import up to the named revision
37 Otherwise, convert will only import up to the named revision
37 (given in a format understood by the source).
38 (given in a format understood by the source).
38
39
39 If no destination directory name is specified, it defaults to the
40 If no destination directory name is specified, it defaults to the
40 basename of the source with '-hg' appended. If the destination
41 basename of the source with '-hg' appended. If the destination
41 repository doesn't exist, it will be created.
42 repository doesn't exist, it will be created.
42
43
43 If <REVMAP> isn't given, it will be put in a default location
44 If <REVMAP> isn't given, it will be put in a default location
44 (<dest>/.hg/shamap by default). The <REVMAP> is a simple text file
45 (<dest>/.hg/shamap by default). The <REVMAP> is a simple text file
45 that maps each source commit ID to the destination ID for that
46 that maps each source commit ID to the destination ID for that
46 revision, like so:
47 revision, like so:
47 <source ID> <destination ID>
48 <source ID> <destination ID>
48
49
49 If the file doesn't exist, it's automatically created. It's
50 If the file doesn't exist, it's automatically created. It's
50 updated on each commit copied, so convert-repo can be interrupted
51 updated on each commit copied, so convert-repo can be interrupted
51 and can be run repeatedly to copy new commits.
52 and can be run repeatedly to copy new commits.
52
53
53 The [username mapping] file is a simple text file that maps each
54 The [username mapping] file is a simple text file that maps each
54 source commit author to a destination commit author. It is handy
55 source commit author to a destination commit author. It is handy
55 for source SCMs that use unix logins to identify authors (eg:
56 for source SCMs that use unix logins to identify authors (eg:
56 CVS). One line per author mapping and the line format is:
57 CVS). One line per author mapping and the line format is:
57 srcauthor=whatever string you want
58 srcauthor=whatever string you want
58
59
59 The filemap is a file that allows filtering and remapping of files
60 The filemap is a file that allows filtering and remapping of files
60 and directories. Comment lines start with '#'. Each line can
61 and directories. Comment lines start with '#'. Each line can
61 contain one of the following directives:
62 contain one of the following directives:
62
63
63 include path/to/file
64 include path/to/file
64
65
65 exclude path/to/file
66 exclude path/to/file
66
67
67 rename from/file to/file
68 rename from/file to/file
68
69
69 The 'include' directive causes a file, or all files under a
70 The 'include' directive causes a file, or all files under a
70 directory, to be included in the destination repository, and the
71 directory, to be included in the destination repository, and the
71 exclusion of all other files and directories not explicitely included.
72 exclusion of all other files and directories not explicitely included.
72 The 'exclude' directive causes files or directories to be omitted.
73 The 'exclude' directive causes files or directories to be omitted.
73 The 'rename' directive renames a file or directory. To rename from
74 The 'rename' directive renames a file or directory. To rename from
74 a subdirectory into the root of the repository, use '.' as the
75 a subdirectory into the root of the repository, use '.' as the
75 path to rename to.
76 path to rename to.
76
77
77 The splicemap is a file that allows insertion of synthetic
78 The splicemap is a file that allows insertion of synthetic
78 history, letting you specify the parents of a revision. This is
79 history, letting you specify the parents of a revision. This is
79 useful if you want to e.g. give a Subversion merge two parents, or
80 useful if you want to e.g. give a Subversion merge two parents, or
80 graft two disconnected series of history together. Each entry
81 graft two disconnected series of history together. Each entry
81 contains a key, followed by a space, followed by one or two
82 contains a key, followed by a space, followed by one or two
82 comma-separated values. The key is the revision ID in the source
83 comma-separated values. The key is the revision ID in the source
83 revision control system whose parents should be modified (same
84 revision control system whose parents should be modified (same
84 format as a key in .hg/shamap). The values are the revision IDs
85 format as a key in .hg/shamap). The values are the revision IDs
85 (in either the source or destination revision control system) that
86 (in either the source or destination revision control system) that
86 should be used as the new parents for that node.
87 should be used as the new parents for that node.
87
88
88 Mercurial Source
89 Mercurial Source
89 -----------------
90 -----------------
90
91
91 --config convert.hg.ignoreerrors=False (boolean)
92 --config convert.hg.ignoreerrors=False (boolean)
92 ignore integrity errors when reading. Use it to fix Mercurial
93 ignore integrity errors when reading. Use it to fix Mercurial
93 repositories with missing revlogs, by converting from and to
94 repositories with missing revlogs, by converting from and to
94 Mercurial.
95 Mercurial.
95 --config convert.hg.saverev=False (boolean)
96 --config convert.hg.saverev=False (boolean)
96 store original revision ID in changeset (forces target IDs to
97 store original revision ID in changeset (forces target IDs to
97 change)
98 change)
98 --config convert.hg.startrev=0 (hg revision identifier)
99 --config convert.hg.startrev=0 (hg revision identifier)
99 convert start revision and its descendants
100 convert start revision and its descendants
100
101
101 CVS Source
102 CVS Source
102 ----------
103 ----------
103
104
104 CVS source will use a sandbox (i.e. a checked-out copy) from CVS
105 CVS source will use a sandbox (i.e. a checked-out copy) from CVS
105 to indicate the starting point of what will be converted. Direct
106 to indicate the starting point of what will be converted. Direct
106 access to the repository files is not needed, unless of course the
107 access to the repository files is not needed, unless of course the
107 repository is :local:. The conversion uses the top level directory
108 repository is :local:. The conversion uses the top level directory
108 in the sandbox to find the CVS repository, and then uses CVS rlog
109 in the sandbox to find the CVS repository, and then uses CVS rlog
109 commands to find files to convert. This means that unless a
110 commands to find files to convert. This means that unless a
110 filemap is given, all files under the starting directory will be
111 filemap is given, all files under the starting directory will be
111 converted, and that any directory reorganisation in the CVS
112 converted, and that any directory reorganisation in the CVS
112 sandbox is ignored.
113 sandbox is ignored.
113
114
114 Because CVS does not have changesets, it is necessary to collect
115 Because CVS does not have changesets, it is necessary to collect
115 individual commits to CVS and merge them into changesets. CVS
116 individual commits to CVS and merge them into changesets. CVS
116 source uses its internal changeset merging code by default but can
117 source uses its internal changeset merging code by default but can
117 be configured to call the external 'cvsps' program by setting:
118 be configured to call the external 'cvsps' program by setting:
118 --config convert.cvsps='cvsps -A -u --cvs-direct -q'
119 --config convert.cvsps='cvsps -A -u --cvs-direct -q'
119 This is a legacy option and may be removed in future.
120 This is a legacy option and may be removed in future.
120
121
121 The options shown are the defaults.
122 The options shown are the defaults.
122
123
123 Internal cvsps is selected by setting
124 Internal cvsps is selected by setting
124 --config convert.cvsps=builtin
125 --config convert.cvsps=builtin
125 and has a few more configurable options:
126 and has a few more configurable options:
126 --config convert.cvsps.cache=True (boolean)
127 --config convert.cvsps.cache=True (boolean)
127 Set to False to disable remote log caching, for testing and
128 Set to False to disable remote log caching, for testing and
128 debugging purposes.
129 debugging purposes.
129 --config convert.cvsps.fuzz=60 (integer)
130 --config convert.cvsps.fuzz=60 (integer)
130 Specify the maximum time (in seconds) that is allowed
131 Specify the maximum time (in seconds) that is allowed
131 between commits with identical user and log message in a
132 between commits with identical user and log message in a
132 single changeset. When very large files were checked in as
133 single changeset. When very large files were checked in as
133 part of a changeset then the default may not be long
134 part of a changeset then the default may not be long
134 enough.
135 enough.
135 --config convert.cvsps.mergeto='{{mergetobranch ([-\w]+)}}'
136 --config convert.cvsps.mergeto='{{mergetobranch ([-\w]+)}}'
136 Specify a regular expression to which commit log messages
137 Specify a regular expression to which commit log messages
137 are matched. If a match occurs, then the conversion
138 are matched. If a match occurs, then the conversion
138 process will insert a dummy revision merging the branch on
139 process will insert a dummy revision merging the branch on
139 which this log message occurs to the branch indicated in
140 which this log message occurs to the branch indicated in
140 the regex.
141 the regex.
141 --config convert.cvsps.mergefrom='{{mergefrombranch ([-\w]+)}}'
142 --config convert.cvsps.mergefrom='{{mergefrombranch ([-\w]+)}}'
142 Specify a regular expression to which commit log messages
143 Specify a regular expression to which commit log messages
143 are matched. If a match occurs, then the conversion
144 are matched. If a match occurs, then the conversion
144 process will add the most recent revision on the branch
145 process will add the most recent revision on the branch
145 indicated in the regex as the second parent of the
146 indicated in the regex as the second parent of the
146 changeset.
147 changeset.
147
148
148 The hgext/convert/cvsps wrapper script allows the builtin
149 The hgext/convert/cvsps wrapper script allows the builtin
149 changeset merging code to be run without doing a conversion. Its
150 changeset merging code to be run without doing a conversion. Its
150 parameters and output are similar to that of cvsps 2.1.
151 parameters and output are similar to that of cvsps 2.1.
151
152
152 Subversion Source
153 Subversion Source
153 -----------------
154 -----------------
154
155
155 Subversion source detects classical trunk/branches/tags layouts.
156 Subversion source detects classical trunk/branches/tags layouts.
156 By default, the supplied "svn://repo/path/" source URL is
157 By default, the supplied "svn://repo/path/" source URL is
157 converted as a single branch. If "svn://repo/path/trunk" exists it
158 converted as a single branch. If "svn://repo/path/trunk" exists it
158 replaces the default branch. If "svn://repo/path/branches" exists,
159 replaces the default branch. If "svn://repo/path/branches" exists,
159 its subdirectories are listed as possible branches. If
160 its subdirectories are listed as possible branches. If
160 "svn://repo/path/tags" exists, it is looked for tags referencing
161 "svn://repo/path/tags" exists, it is looked for tags referencing
161 converted branches. Default "trunk", "branches" and "tags" values
162 converted branches. Default "trunk", "branches" and "tags" values
162 can be overriden with following options. Set them to paths
163 can be overriden with following options. Set them to paths
163 relative to the source URL, or leave them blank to disable
164 relative to the source URL, or leave them blank to disable
164 autodetection.
165 autodetection.
165
166
166 --config convert.svn.branches=branches (directory name)
167 --config convert.svn.branches=branches (directory name)
167 specify the directory containing branches
168 specify the directory containing branches
168 --config convert.svn.tags=tags (directory name)
169 --config convert.svn.tags=tags (directory name)
169 specify the directory containing tags
170 specify the directory containing tags
170 --config convert.svn.trunk=trunk (directory name)
171 --config convert.svn.trunk=trunk (directory name)
171 specify the name of the trunk branch
172 specify the name of the trunk branch
172
173
173 Source history can be retrieved starting at a specific revision,
174 Source history can be retrieved starting at a specific revision,
174 instead of being integrally converted. Only single branch
175 instead of being integrally converted. Only single branch
175 conversions are supported.
176 conversions are supported.
176
177
177 --config convert.svn.startrev=0 (svn revision number)
178 --config convert.svn.startrev=0 (svn revision number)
178 specify start Subversion revision.
179 specify start Subversion revision.
179
180
180 Perforce Source
181 Perforce Source
181 ---------------
182 ---------------
182
183
183 The Perforce (P4) importer can be given a p4 depot path or a
184 The Perforce (P4) importer can be given a p4 depot path or a
184 client specification as source. It will convert all files in the
185 client specification as source. It will convert all files in the
185 source to a flat Mercurial repository, ignoring labels, branches
186 source to a flat Mercurial repository, ignoring labels, branches
186 and integrations. Note that when a depot path is given you then
187 and integrations. Note that when a depot path is given you then
187 usually should specify a target directory, because otherwise the
188 usually should specify a target directory, because otherwise the
188 target may be named ...-hg.
189 target may be named ...-hg.
189
190
190 It is possible to limit the amount of source history to be
191 It is possible to limit the amount of source history to be
191 converted by specifying an initial Perforce revision.
192 converted by specifying an initial Perforce revision.
192
193
193 --config convert.p4.startrev=0 (perforce changelist number)
194 --config convert.p4.startrev=0 (perforce changelist number)
194 specify initial Perforce revision.
195 specify initial Perforce revision.
195
196
196
197
197 Mercurial Destination
198 Mercurial Destination
198 ---------------------
199 ---------------------
199
200
200 --config convert.hg.clonebranches=False (boolean)
201 --config convert.hg.clonebranches=False (boolean)
201 dispatch source branches in separate clones.
202 dispatch source branches in separate clones.
202 --config convert.hg.tagsbranch=default (branch name)
203 --config convert.hg.tagsbranch=default (branch name)
203 tag revisions branch name
204 tag revisions branch name
204 --config convert.hg.usebranchnames=True (boolean)
205 --config convert.hg.usebranchnames=True (boolean)
205 preserve branch names
206 preserve branch names
206
207
207 """
208 """
208 return convcmd.convert(ui, src, dest, revmapfile, **opts)
209 return convcmd.convert(ui, src, dest, revmapfile, **opts)
209
210
210 def debugsvnlog(ui, **opts):
211 def debugsvnlog(ui, **opts):
211 return subversion.debugsvnlog(ui, **opts)
212 return subversion.debugsvnlog(ui, **opts)
212
213
213 def debugcvsps(ui, *args, **opts):
214 def debugcvsps(ui, *args, **opts):
214 '''create changeset information from CVS
215 '''create changeset information from CVS
215
216
216 This command is intended as a debugging tool for the CVS to
217 This command is intended as a debugging tool for the CVS to
217 Mercurial converter, and can be used as a direct replacement for
218 Mercurial converter, and can be used as a direct replacement for
218 cvsps.
219 cvsps.
219
220
220 Hg debugcvsps reads the CVS rlog for current directory (or any
221 Hg debugcvsps reads the CVS rlog for current directory (or any
221 named directory) in the CVS repository, and converts the log to a
222 named directory) in the CVS repository, and converts the log to a
222 series of changesets based on matching commit log entries and
223 series of changesets based on matching commit log entries and
223 dates.'''
224 dates.'''
224 return cvsps.debugcvsps(ui, *args, **opts)
225 return cvsps.debugcvsps(ui, *args, **opts)
225
226
226 commands.norepo += " convert debugsvnlog debugcvsps"
227 commands.norepo += " convert debugsvnlog debugcvsps"
227
228
228 cmdtable = {
229 cmdtable = {
229 "convert":
230 "convert":
230 (convert,
231 (convert,
231 [('A', 'authors', '', _('username mapping filename')),
232 [('A', 'authors', '', _('username mapping filename')),
232 ('d', 'dest-type', '', _('destination repository type')),
233 ('d', 'dest-type', '', _('destination repository type')),
233 ('', 'filemap', '', _('remap file names using contents of file')),
234 ('', 'filemap', '', _('remap file names using contents of file')),
234 ('r', 'rev', '', _('import up to target revision REV')),
235 ('r', 'rev', '', _('import up to target revision REV')),
235 ('s', 'source-type', '', _('source repository type')),
236 ('s', 'source-type', '', _('source repository type')),
236 ('', 'splicemap', '', _('splice synthesized history into place')),
237 ('', 'splicemap', '', _('splice synthesized history into place')),
237 ('', 'datesort', None, _('try to sort changesets by date'))],
238 ('', 'datesort', None, _('try to sort changesets by date'))],
238 _('hg convert [OPTION]... SOURCE [DEST [REVMAP]]')),
239 _('hg convert [OPTION]... SOURCE [DEST [REVMAP]]')),
239 "debugsvnlog":
240 "debugsvnlog":
240 (debugsvnlog,
241 (debugsvnlog,
241 [],
242 [],
242 'hg debugsvnlog'),
243 'hg debugsvnlog'),
243 "debugcvsps":
244 "debugcvsps":
244 (debugcvsps,
245 (debugcvsps,
245 [
246 [
246 # Main options shared with cvsps-2.1
247 # Main options shared with cvsps-2.1
247 ('b', 'branches', [], _('only return changes on specified branches')),
248 ('b', 'branches', [], _('only return changes on specified branches')),
248 ('p', 'prefix', '', _('prefix to remove from file names')),
249 ('p', 'prefix', '', _('prefix to remove from file names')),
249 ('r', 'revisions', [], _('only return changes after or between specified tags')),
250 ('r', 'revisions', [], _('only return changes after or between specified tags')),
250 ('u', 'update-cache', None, _("update cvs log cache")),
251 ('u', 'update-cache', None, _("update cvs log cache")),
251 ('x', 'new-cache', None, _("create new cvs log cache")),
252 ('x', 'new-cache', None, _("create new cvs log cache")),
252 ('z', 'fuzz', 60, _('set commit time fuzz in seconds')),
253 ('z', 'fuzz', 60, _('set commit time fuzz in seconds')),
253 ('', 'root', '', _('specify cvsroot')),
254 ('', 'root', '', _('specify cvsroot')),
254 # Options specific to builtin cvsps
255 # Options specific to builtin cvsps
255 ('', 'parents', '', _('show parent changesets')),
256 ('', 'parents', '', _('show parent changesets')),
256 ('', 'ancestors', '', _('show current changeset in ancestor branches')),
257 ('', 'ancestors', '', _('show current changeset in ancestor branches')),
257 # Options that are ignored for compatibility with cvsps-2.1
258 # Options that are ignored for compatibility with cvsps-2.1
258 ('A', 'cvs-direct', None, _('ignored for compatibility')),
259 ('A', 'cvs-direct', None, _('ignored for compatibility')),
259 ],
260 ],
260 _('hg debugcvsps [OPTION]... [PATH]...')),
261 _('hg debugcvsps [OPTION]... [PATH]...')),
261 }
262 }
@@ -1,146 +1,147 b''
1 # fetch.py - pull and merge remote changes
1 # fetch.py - pull and merge remote changes
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7 '''pulling, updating and merging in one command'''
8 '''pulling, updating and merging in one command'''
8
9
9 from mercurial.i18n import _
10 from mercurial.i18n import _
10 from mercurial.node import nullid, short
11 from mercurial.node import nullid, short
11 from mercurial import commands, cmdutil, hg, util, url
12 from mercurial import commands, cmdutil, hg, util, url
12 from mercurial.lock import release
13 from mercurial.lock import release
13
14
14 def fetch(ui, repo, source='default', **opts):
15 def fetch(ui, repo, source='default', **opts):
15 '''pull changes from a remote repository, merge new changes if needed.
16 '''pull changes from a remote repository, merge new changes if needed.
16
17
17 This finds all changes from the repository at the specified path
18 This finds all changes from the repository at the specified path
18 or URL and adds them to the local repository.
19 or URL and adds them to the local repository.
19
20
20 If the pulled changes add a new branch head, the head is
21 If the pulled changes add a new branch head, the head is
21 automatically merged, and the result of the merge is committed.
22 automatically merged, and the result of the merge is committed.
22 Otherwise, the working directory is updated to include the new
23 Otherwise, the working directory is updated to include the new
23 changes.
24 changes.
24
25
25 When a merge occurs, the newly pulled changes are assumed to be
26 When a merge occurs, the newly pulled changes are assumed to be
26 "authoritative". The head of the new changes is used as the first
27 "authoritative". The head of the new changes is used as the first
27 parent, with local changes as the second. To switch the merge
28 parent, with local changes as the second. To switch the merge
28 order, use --switch-parent.
29 order, use --switch-parent.
29
30
30 See 'hg help dates' for a list of formats valid for -d/--date.
31 See 'hg help dates' for a list of formats valid for -d/--date.
31 '''
32 '''
32
33
33 date = opts.get('date')
34 date = opts.get('date')
34 if date:
35 if date:
35 opts['date'] = util.parsedate(date)
36 opts['date'] = util.parsedate(date)
36
37
37 parent, p2 = repo.dirstate.parents()
38 parent, p2 = repo.dirstate.parents()
38 branch = repo.dirstate.branch()
39 branch = repo.dirstate.branch()
39 branchnode = repo.branchtags().get(branch)
40 branchnode = repo.branchtags().get(branch)
40 if parent != branchnode:
41 if parent != branchnode:
41 raise util.Abort(_('working dir not at branch tip '
42 raise util.Abort(_('working dir not at branch tip '
42 '(use "hg update" to check out branch tip)'))
43 '(use "hg update" to check out branch tip)'))
43
44
44 if p2 != nullid:
45 if p2 != nullid:
45 raise util.Abort(_('outstanding uncommitted merge'))
46 raise util.Abort(_('outstanding uncommitted merge'))
46
47
47 wlock = lock = None
48 wlock = lock = None
48 try:
49 try:
49 wlock = repo.wlock()
50 wlock = repo.wlock()
50 lock = repo.lock()
51 lock = repo.lock()
51 mod, add, rem, del_ = repo.status()[:4]
52 mod, add, rem, del_ = repo.status()[:4]
52
53
53 if mod or add or rem:
54 if mod or add or rem:
54 raise util.Abort(_('outstanding uncommitted changes'))
55 raise util.Abort(_('outstanding uncommitted changes'))
55 if del_:
56 if del_:
56 raise util.Abort(_('working directory is missing some files'))
57 raise util.Abort(_('working directory is missing some files'))
57 bheads = repo.branchheads(branch)
58 bheads = repo.branchheads(branch)
58 bheads = [head for head in bheads if len(repo[head].children()) == 0]
59 bheads = [head for head in bheads if len(repo[head].children()) == 0]
59 if len(bheads) > 1:
60 if len(bheads) > 1:
60 raise util.Abort(_('multiple heads in this branch '
61 raise util.Abort(_('multiple heads in this branch '
61 '(use "hg heads ." and "hg merge" to merge)'))
62 '(use "hg heads ." and "hg merge" to merge)'))
62
63
63 other = hg.repository(cmdutil.remoteui(repo, opts),
64 other = hg.repository(cmdutil.remoteui(repo, opts),
64 ui.expandpath(source))
65 ui.expandpath(source))
65 ui.status(_('pulling from %s\n') %
66 ui.status(_('pulling from %s\n') %
66 url.hidepassword(ui.expandpath(source)))
67 url.hidepassword(ui.expandpath(source)))
67 revs = None
68 revs = None
68 if opts['rev']:
69 if opts['rev']:
69 if not other.local():
70 if not other.local():
70 raise util.Abort(_("fetch -r doesn't work for remote "
71 raise util.Abort(_("fetch -r doesn't work for remote "
71 "repositories yet"))
72 "repositories yet"))
72 else:
73 else:
73 revs = [other.lookup(rev) for rev in opts['rev']]
74 revs = [other.lookup(rev) for rev in opts['rev']]
74
75
75 # Are there any changes at all?
76 # Are there any changes at all?
76 modheads = repo.pull(other, heads=revs)
77 modheads = repo.pull(other, heads=revs)
77 if modheads == 0:
78 if modheads == 0:
78 return 0
79 return 0
79
80
80 # Is this a simple fast-forward along the current branch?
81 # Is this a simple fast-forward along the current branch?
81 newheads = repo.branchheads(branch)
82 newheads = repo.branchheads(branch)
82 newheads = [head for head in newheads if len(repo[head].children()) == 0]
83 newheads = [head for head in newheads if len(repo[head].children()) == 0]
83 newchildren = repo.changelog.nodesbetween([parent], newheads)[2]
84 newchildren = repo.changelog.nodesbetween([parent], newheads)[2]
84 if len(newheads) == 1:
85 if len(newheads) == 1:
85 if newchildren[0] != parent:
86 if newchildren[0] != parent:
86 return hg.clean(repo, newchildren[0])
87 return hg.clean(repo, newchildren[0])
87 else:
88 else:
88 return
89 return
89
90
90 # Are there more than one additional branch heads?
91 # Are there more than one additional branch heads?
91 newchildren = [n for n in newchildren if n != parent]
92 newchildren = [n for n in newchildren if n != parent]
92 newparent = parent
93 newparent = parent
93 if newchildren:
94 if newchildren:
94 newparent = newchildren[0]
95 newparent = newchildren[0]
95 hg.clean(repo, newparent)
96 hg.clean(repo, newparent)
96 newheads = [n for n in newheads if n != newparent]
97 newheads = [n for n in newheads if n != newparent]
97 if len(newheads) > 1:
98 if len(newheads) > 1:
98 ui.status(_('not merging with %d other new branch heads '
99 ui.status(_('not merging with %d other new branch heads '
99 '(use "hg heads ." and "hg merge" to merge them)\n') %
100 '(use "hg heads ." and "hg merge" to merge them)\n') %
100 (len(newheads) - 1))
101 (len(newheads) - 1))
101 return
102 return
102
103
103 # Otherwise, let's merge.
104 # Otherwise, let's merge.
104 err = False
105 err = False
105 if newheads:
106 if newheads:
106 # By default, we consider the repository we're pulling
107 # By default, we consider the repository we're pulling
107 # *from* as authoritative, so we merge our changes into
108 # *from* as authoritative, so we merge our changes into
108 # theirs.
109 # theirs.
109 if opts['switch_parent']:
110 if opts['switch_parent']:
110 firstparent, secondparent = newparent, newheads[0]
111 firstparent, secondparent = newparent, newheads[0]
111 else:
112 else:
112 firstparent, secondparent = newheads[0], newparent
113 firstparent, secondparent = newheads[0], newparent
113 ui.status(_('updating to %d:%s\n') %
114 ui.status(_('updating to %d:%s\n') %
114 (repo.changelog.rev(firstparent),
115 (repo.changelog.rev(firstparent),
115 short(firstparent)))
116 short(firstparent)))
116 hg.clean(repo, firstparent)
117 hg.clean(repo, firstparent)
117 ui.status(_('merging with %d:%s\n') %
118 ui.status(_('merging with %d:%s\n') %
118 (repo.changelog.rev(secondparent), short(secondparent)))
119 (repo.changelog.rev(secondparent), short(secondparent)))
119 err = hg.merge(repo, secondparent, remind=False)
120 err = hg.merge(repo, secondparent, remind=False)
120
121
121 if not err:
122 if not err:
122 mod, add, rem = repo.status()[:3]
123 mod, add, rem = repo.status()[:3]
123 message = (cmdutil.logmessage(opts) or
124 message = (cmdutil.logmessage(opts) or
124 (_('Automated merge with %s') %
125 (_('Automated merge with %s') %
125 url.removeauth(other.url())))
126 url.removeauth(other.url())))
126 force_editor = opts.get('force_editor') or opts.get('edit')
127 force_editor = opts.get('force_editor') or opts.get('edit')
127 n = repo.commit(mod + add + rem, message,
128 n = repo.commit(mod + add + rem, message,
128 opts['user'], opts['date'], force=True,
129 opts['user'], opts['date'], force=True,
129 force_editor=force_editor)
130 force_editor=force_editor)
130 ui.status(_('new changeset %d:%s merges remote changes '
131 ui.status(_('new changeset %d:%s merges remote changes '
131 'with local\n') % (repo.changelog.rev(n),
132 'with local\n') % (repo.changelog.rev(n),
132 short(n)))
133 short(n)))
133
134
134 finally:
135 finally:
135 release(lock, wlock)
136 release(lock, wlock)
136
137
137 cmdtable = {
138 cmdtable = {
138 'fetch':
139 'fetch':
139 (fetch,
140 (fetch,
140 [('r', 'rev', [], _('a specific revision you would like to pull')),
141 [('r', 'rev', [], _('a specific revision you would like to pull')),
141 ('e', 'edit', None, _('edit commit message')),
142 ('e', 'edit', None, _('edit commit message')),
142 ('', 'force-editor', None, _('edit commit message (DEPRECATED)')),
143 ('', 'force-editor', None, _('edit commit message (DEPRECATED)')),
143 ('', 'switch-parent', None, _('switch parents when merging')),
144 ('', 'switch-parent', None, _('switch parents when merging')),
144 ] + commands.commitopts + commands.commitopts2 + commands.remoteopts,
145 ] + commands.commitopts + commands.commitopts2 + commands.remoteopts,
145 _('hg fetch [SOURCE]')),
146 _('hg fetch [SOURCE]')),
146 }
147 }
@@ -1,415 +1,416 b''
1 # ASCII graph log extension for Mercurial
1 # ASCII graph log extension for Mercurial
2 #
2 #
3 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
3 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7 '''show revision graphs in terminal windows
8 '''show revision graphs in terminal windows
8
9
9 This extension adds a --graph option to the incoming, outgoing and log
10 This extension adds a --graph option to the incoming, outgoing and log
10 commands. When this options is given, an ascii representation of the
11 commands. When this options is given, an ascii representation of the
11 revision graph is also shown.
12 revision graph is also shown.
12 '''
13 '''
13
14
14 import os
15 import os
15 from mercurial.cmdutil import revrange, show_changeset
16 from mercurial.cmdutil import revrange, show_changeset
16 from mercurial.commands import templateopts
17 from mercurial.commands import templateopts
17 from mercurial.i18n import _
18 from mercurial.i18n import _
18 from mercurial.node import nullrev
19 from mercurial.node import nullrev
19 from mercurial import bundlerepo, changegroup, cmdutil, commands, extensions
20 from mercurial import bundlerepo, changegroup, cmdutil, commands, extensions
20 from mercurial import hg, url, util
21 from mercurial import hg, url, util
21
22
22 def revisions(repo, start, stop):
23 def revisions(repo, start, stop):
23 """cset DAG generator yielding (rev, node, [parents]) tuples
24 """cset DAG generator yielding (rev, node, [parents]) tuples
24
25
25 This generator function walks through the revision history from revision
26 This generator function walks through the revision history from revision
26 start to revision stop (which must be less than or equal to start).
27 start to revision stop (which must be less than or equal to start).
27 """
28 """
28 assert start >= stop
29 assert start >= stop
29 cur = start
30 cur = start
30 while cur >= stop:
31 while cur >= stop:
31 ctx = repo[cur]
32 ctx = repo[cur]
32 parents = [p.rev() for p in ctx.parents() if p.rev() != nullrev]
33 parents = [p.rev() for p in ctx.parents() if p.rev() != nullrev]
33 parents.sort()
34 parents.sort()
34 yield (ctx, parents)
35 yield (ctx, parents)
35 cur -= 1
36 cur -= 1
36
37
37 def filerevs(repo, path, start, stop):
38 def filerevs(repo, path, start, stop):
38 """file cset DAG generator yielding (rev, node, [parents]) tuples
39 """file cset DAG generator yielding (rev, node, [parents]) tuples
39
40
40 This generator function walks through the revision history of a single
41 This generator function walks through the revision history of a single
41 file from revision start to revision stop (which must be less than or
42 file from revision start to revision stop (which must be less than or
42 equal to start).
43 equal to start).
43 """
44 """
44 assert start >= stop
45 assert start >= stop
45 filerev = len(repo.file(path)) - 1
46 filerev = len(repo.file(path)) - 1
46 while filerev >= 0:
47 while filerev >= 0:
47 fctx = repo.filectx(path, fileid=filerev)
48 fctx = repo.filectx(path, fileid=filerev)
48 parents = [f.linkrev() for f in fctx.parents() if f.path() == path]
49 parents = [f.linkrev() for f in fctx.parents() if f.path() == path]
49 parents.sort()
50 parents.sort()
50 if fctx.rev() <= start:
51 if fctx.rev() <= start:
51 yield (fctx, parents)
52 yield (fctx, parents)
52 if fctx.rev() <= stop:
53 if fctx.rev() <= stop:
53 break
54 break
54 filerev -= 1
55 filerev -= 1
55
56
56 def grapher(nodes):
57 def grapher(nodes):
57 """grapher for asciigraph on a list of nodes and their parents
58 """grapher for asciigraph on a list of nodes and their parents
58
59
59 nodes must generate tuples (node, parents, char, lines) where
60 nodes must generate tuples (node, parents, char, lines) where
60 - parents must generate the parents of node, in sorted order,
61 - parents must generate the parents of node, in sorted order,
61 and max length 2,
62 and max length 2,
62 - char is the char to print as the node symbol, and
63 - char is the char to print as the node symbol, and
63 - lines are the lines to display next to the node.
64 - lines are the lines to display next to the node.
64 """
65 """
65 seen = []
66 seen = []
66 for node, parents, char, lines in nodes:
67 for node, parents, char, lines in nodes:
67 if node not in seen:
68 if node not in seen:
68 seen.append(node)
69 seen.append(node)
69 nodeidx = seen.index(node)
70 nodeidx = seen.index(node)
70
71
71 knownparents = []
72 knownparents = []
72 newparents = []
73 newparents = []
73 for parent in parents:
74 for parent in parents:
74 if parent in seen:
75 if parent in seen:
75 knownparents.append(parent)
76 knownparents.append(parent)
76 else:
77 else:
77 newparents.append(parent)
78 newparents.append(parent)
78
79
79 ncols = len(seen)
80 ncols = len(seen)
80 nextseen = seen[:]
81 nextseen = seen[:]
81 nextseen[nodeidx:nodeidx + 1] = newparents
82 nextseen[nodeidx:nodeidx + 1] = newparents
82 edges = [(nodeidx, nextseen.index(p)) for p in knownparents]
83 edges = [(nodeidx, nextseen.index(p)) for p in knownparents]
83
84
84 if len(newparents) > 0:
85 if len(newparents) > 0:
85 edges.append((nodeidx, nodeidx))
86 edges.append((nodeidx, nodeidx))
86 if len(newparents) > 1:
87 if len(newparents) > 1:
87 edges.append((nodeidx, nodeidx + 1))
88 edges.append((nodeidx, nodeidx + 1))
88 nmorecols = len(nextseen) - ncols
89 nmorecols = len(nextseen) - ncols
89 seen = nextseen
90 seen = nextseen
90 yield (char, lines, nodeidx, edges, ncols, nmorecols)
91 yield (char, lines, nodeidx, edges, ncols, nmorecols)
91
92
92 def fix_long_right_edges(edges):
93 def fix_long_right_edges(edges):
93 for (i, (start, end)) in enumerate(edges):
94 for (i, (start, end)) in enumerate(edges):
94 if end > start:
95 if end > start:
95 edges[i] = (start, end + 1)
96 edges[i] = (start, end + 1)
96
97
97 def get_nodeline_edges_tail(
98 def get_nodeline_edges_tail(
98 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
99 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
99 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
100 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
100 # Still going in the same non-vertical direction.
101 # Still going in the same non-vertical direction.
101 if n_columns_diff == -1:
102 if n_columns_diff == -1:
102 start = max(node_index + 1, p_node_index)
103 start = max(node_index + 1, p_node_index)
103 tail = ["|", " "] * (start - node_index - 1)
104 tail = ["|", " "] * (start - node_index - 1)
104 tail.extend(["/", " "] * (n_columns - start))
105 tail.extend(["/", " "] * (n_columns - start))
105 return tail
106 return tail
106 else:
107 else:
107 return ["\\", " "] * (n_columns - node_index - 1)
108 return ["\\", " "] * (n_columns - node_index - 1)
108 else:
109 else:
109 return ["|", " "] * (n_columns - node_index - 1)
110 return ["|", " "] * (n_columns - node_index - 1)
110
111
111 def draw_edges(edges, nodeline, interline):
112 def draw_edges(edges, nodeline, interline):
112 for (start, end) in edges:
113 for (start, end) in edges:
113 if start == end + 1:
114 if start == end + 1:
114 interline[2 * end + 1] = "/"
115 interline[2 * end + 1] = "/"
115 elif start == end - 1:
116 elif start == end - 1:
116 interline[2 * start + 1] = "\\"
117 interline[2 * start + 1] = "\\"
117 elif start == end:
118 elif start == end:
118 interline[2 * start] = "|"
119 interline[2 * start] = "|"
119 else:
120 else:
120 nodeline[2 * end] = "+"
121 nodeline[2 * end] = "+"
121 if start > end:
122 if start > end:
122 (start, end) = (end,start)
123 (start, end) = (end,start)
123 for i in range(2 * start + 1, 2 * end):
124 for i in range(2 * start + 1, 2 * end):
124 if nodeline[i] != "+":
125 if nodeline[i] != "+":
125 nodeline[i] = "-"
126 nodeline[i] = "-"
126
127
127 def get_padding_line(ni, n_columns, edges):
128 def get_padding_line(ni, n_columns, edges):
128 line = []
129 line = []
129 line.extend(["|", " "] * ni)
130 line.extend(["|", " "] * ni)
130 if (ni, ni - 1) in edges or (ni, ni) in edges:
131 if (ni, ni - 1) in edges or (ni, ni) in edges:
131 # (ni, ni - 1) (ni, ni)
132 # (ni, ni - 1) (ni, ni)
132 # | | | | | | | |
133 # | | | | | | | |
133 # +---o | | o---+
134 # +---o | | o---+
134 # | | c | | c | |
135 # | | c | | c | |
135 # | |/ / | |/ /
136 # | |/ / | |/ /
136 # | | | | | |
137 # | | | | | |
137 c = "|"
138 c = "|"
138 else:
139 else:
139 c = " "
140 c = " "
140 line.extend([c, " "])
141 line.extend([c, " "])
141 line.extend(["|", " "] * (n_columns - ni - 1))
142 line.extend(["|", " "] * (n_columns - ni - 1))
142 return line
143 return line
143
144
144 def ascii(ui, grapher):
145 def ascii(ui, grapher):
145 """prints an ASCII graph of the DAG returned by the grapher
146 """prints an ASCII graph of the DAG returned by the grapher
146
147
147 grapher is a generator that emits tuples with the following elements:
148 grapher is a generator that emits tuples with the following elements:
148
149
149 - Character to use as node's symbol.
150 - Character to use as node's symbol.
150 - List of lines to display as the node's text.
151 - List of lines to display as the node's text.
151 - Column of the current node in the set of ongoing edges.
152 - Column of the current node in the set of ongoing edges.
152 - Edges; a list of (col, next_col) indicating the edges between
153 - Edges; a list of (col, next_col) indicating the edges between
153 the current node and its parents.
154 the current node and its parents.
154 - Number of columns (ongoing edges) in the current revision.
155 - Number of columns (ongoing edges) in the current revision.
155 - The difference between the number of columns (ongoing edges)
156 - The difference between the number of columns (ongoing edges)
156 in the next revision and the number of columns (ongoing edges)
157 in the next revision and the number of columns (ongoing edges)
157 in the current revision. That is: -1 means one column removed;
158 in the current revision. That is: -1 means one column removed;
158 0 means no columns added or removed; 1 means one column added.
159 0 means no columns added or removed; 1 means one column added.
159 """
160 """
160 prev_n_columns_diff = 0
161 prev_n_columns_diff = 0
161 prev_node_index = 0
162 prev_node_index = 0
162 for (node_ch, node_lines, node_index, edges, n_columns, n_columns_diff) in grapher:
163 for (node_ch, node_lines, node_index, edges, n_columns, n_columns_diff) in grapher:
163
164
164 assert -2 < n_columns_diff < 2
165 assert -2 < n_columns_diff < 2
165 if n_columns_diff == -1:
166 if n_columns_diff == -1:
166 # Transform
167 # Transform
167 #
168 #
168 # | | | | | |
169 # | | | | | |
169 # o | | into o---+
170 # o | | into o---+
170 # |X / |/ /
171 # |X / |/ /
171 # | | | |
172 # | | | |
172 fix_long_right_edges(edges)
173 fix_long_right_edges(edges)
173
174
174 # add_padding_line says whether to rewrite
175 # add_padding_line says whether to rewrite
175 #
176 #
176 # | | | | | | | |
177 # | | | | | | | |
177 # | o---+ into | o---+
178 # | o---+ into | o---+
178 # | / / | | | # <--- padding line
179 # | / / | | | # <--- padding line
179 # o | | | / /
180 # o | | | / /
180 # o | |
181 # o | |
181 add_padding_line = (len(node_lines) > 2 and
182 add_padding_line = (len(node_lines) > 2 and
182 n_columns_diff == -1 and
183 n_columns_diff == -1 and
183 [x for (x, y) in edges if x + 1 < y])
184 [x for (x, y) in edges if x + 1 < y])
184
185
185 # fix_nodeline_tail says whether to rewrite
186 # fix_nodeline_tail says whether to rewrite
186 #
187 #
187 # | | o | | | | o | |
188 # | | o | | | | o | |
188 # | | |/ / | | |/ /
189 # | | |/ / | | |/ /
189 # | o | | into | o / / # <--- fixed nodeline tail
190 # | o | | into | o / / # <--- fixed nodeline tail
190 # | |/ / | |/ /
191 # | |/ / | |/ /
191 # o | | o | |
192 # o | | o | |
192 fix_nodeline_tail = len(node_lines) <= 2 and not add_padding_line
193 fix_nodeline_tail = len(node_lines) <= 2 and not add_padding_line
193
194
194 # nodeline is the line containing the node character (typically o)
195 # nodeline is the line containing the node character (typically o)
195 nodeline = ["|", " "] * node_index
196 nodeline = ["|", " "] * node_index
196 nodeline.extend([node_ch, " "])
197 nodeline.extend([node_ch, " "])
197
198
198 nodeline.extend(
199 nodeline.extend(
199 get_nodeline_edges_tail(
200 get_nodeline_edges_tail(
200 node_index, prev_node_index, n_columns, n_columns_diff,
201 node_index, prev_node_index, n_columns, n_columns_diff,
201 prev_n_columns_diff, fix_nodeline_tail))
202 prev_n_columns_diff, fix_nodeline_tail))
202
203
203 # shift_interline is the line containing the non-vertical
204 # shift_interline is the line containing the non-vertical
204 # edges between this entry and the next
205 # edges between this entry and the next
205 shift_interline = ["|", " "] * node_index
206 shift_interline = ["|", " "] * node_index
206 if n_columns_diff == -1:
207 if n_columns_diff == -1:
207 n_spaces = 1
208 n_spaces = 1
208 edge_ch = "/"
209 edge_ch = "/"
209 elif n_columns_diff == 0:
210 elif n_columns_diff == 0:
210 n_spaces = 2
211 n_spaces = 2
211 edge_ch = "|"
212 edge_ch = "|"
212 else:
213 else:
213 n_spaces = 3
214 n_spaces = 3
214 edge_ch = "\\"
215 edge_ch = "\\"
215 shift_interline.extend(n_spaces * [" "])
216 shift_interline.extend(n_spaces * [" "])
216 shift_interline.extend([edge_ch, " "] * (n_columns - node_index - 1))
217 shift_interline.extend([edge_ch, " "] * (n_columns - node_index - 1))
217
218
218 # draw edges from the current node to its parents
219 # draw edges from the current node to its parents
219 draw_edges(edges, nodeline, shift_interline)
220 draw_edges(edges, nodeline, shift_interline)
220
221
221 # lines is the list of all graph lines to print
222 # lines is the list of all graph lines to print
222 lines = [nodeline]
223 lines = [nodeline]
223 if add_padding_line:
224 if add_padding_line:
224 lines.append(get_padding_line(node_index, n_columns, edges))
225 lines.append(get_padding_line(node_index, n_columns, edges))
225 lines.append(shift_interline)
226 lines.append(shift_interline)
226
227
227 # make sure that there are as many graph lines as there are
228 # make sure that there are as many graph lines as there are
228 # log strings
229 # log strings
229 while len(node_lines) < len(lines):
230 while len(node_lines) < len(lines):
230 node_lines.append("")
231 node_lines.append("")
231 if len(lines) < len(node_lines):
232 if len(lines) < len(node_lines):
232 extra_interline = ["|", " "] * (n_columns + n_columns_diff)
233 extra_interline = ["|", " "] * (n_columns + n_columns_diff)
233 while len(lines) < len(node_lines):
234 while len(lines) < len(node_lines):
234 lines.append(extra_interline)
235 lines.append(extra_interline)
235
236
236 # print lines
237 # print lines
237 indentation_level = max(n_columns, n_columns + n_columns_diff)
238 indentation_level = max(n_columns, n_columns + n_columns_diff)
238 for (line, logstr) in zip(lines, node_lines):
239 for (line, logstr) in zip(lines, node_lines):
239 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
240 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
240 ui.write(ln.rstrip() + '\n')
241 ui.write(ln.rstrip() + '\n')
241
242
242 # ... and start over
243 # ... and start over
243 prev_node_index = node_index
244 prev_node_index = node_index
244 prev_n_columns_diff = n_columns_diff
245 prev_n_columns_diff = n_columns_diff
245
246
246 def get_revs(repo, rev_opt):
247 def get_revs(repo, rev_opt):
247 if rev_opt:
248 if rev_opt:
248 revs = revrange(repo, rev_opt)
249 revs = revrange(repo, rev_opt)
249 return (max(revs), min(revs))
250 return (max(revs), min(revs))
250 else:
251 else:
251 return (len(repo) - 1, 0)
252 return (len(repo) - 1, 0)
252
253
253 def check_unsupported_flags(opts):
254 def check_unsupported_flags(opts):
254 for op in ["follow", "follow_first", "date", "copies", "keyword", "remove",
255 for op in ["follow", "follow_first", "date", "copies", "keyword", "remove",
255 "only_merges", "user", "only_branch", "prune", "newest_first",
256 "only_merges", "user", "only_branch", "prune", "newest_first",
256 "no_merges", "include", "exclude"]:
257 "no_merges", "include", "exclude"]:
257 if op in opts and opts[op]:
258 if op in opts and opts[op]:
258 raise util.Abort(_("--graph option is incompatible with --%s") % op)
259 raise util.Abort(_("--graph option is incompatible with --%s") % op)
259
260
260 def graphlog(ui, repo, path=None, **opts):
261 def graphlog(ui, repo, path=None, **opts):
261 """show revision history alongside an ASCII revision graph
262 """show revision history alongside an ASCII revision graph
262
263
263 Print a revision history alongside a revision graph drawn with
264 Print a revision history alongside a revision graph drawn with
264 ASCII characters.
265 ASCII characters.
265
266
266 Nodes printed as an @ character are parents of the working
267 Nodes printed as an @ character are parents of the working
267 directory.
268 directory.
268 """
269 """
269
270
270 check_unsupported_flags(opts)
271 check_unsupported_flags(opts)
271 limit = cmdutil.loglimit(opts)
272 limit = cmdutil.loglimit(opts)
272 start, stop = get_revs(repo, opts["rev"])
273 start, stop = get_revs(repo, opts["rev"])
273 stop = max(stop, start - limit + 1)
274 stop = max(stop, start - limit + 1)
274 if start == nullrev:
275 if start == nullrev:
275 return
276 return
276
277
277 if path:
278 if path:
278 path = util.canonpath(repo.root, os.getcwd(), path)
279 path = util.canonpath(repo.root, os.getcwd(), path)
279 if path: # could be reset in canonpath
280 if path: # could be reset in canonpath
280 revdag = filerevs(repo, path, start, stop)
281 revdag = filerevs(repo, path, start, stop)
281 else:
282 else:
282 revdag = revisions(repo, start, stop)
283 revdag = revisions(repo, start, stop)
283
284
284 graphdag = graphabledag(ui, repo, revdag, opts)
285 graphdag = graphabledag(ui, repo, revdag, opts)
285 ascii(ui, grapher(graphdag))
286 ascii(ui, grapher(graphdag))
286
287
287 def graphrevs(repo, nodes, opts):
288 def graphrevs(repo, nodes, opts):
288 include = set(nodes)
289 include = set(nodes)
289 limit = cmdutil.loglimit(opts)
290 limit = cmdutil.loglimit(opts)
290 count = 0
291 count = 0
291 for node in reversed(nodes):
292 for node in reversed(nodes):
292 if count >= limit:
293 if count >= limit:
293 break
294 break
294 ctx = repo[node]
295 ctx = repo[node]
295 parents = [p.rev() for p in ctx.parents() if p.node() in include]
296 parents = [p.rev() for p in ctx.parents() if p.node() in include]
296 parents.sort()
297 parents.sort()
297 yield (ctx, parents)
298 yield (ctx, parents)
298 count += 1
299 count += 1
299
300
300 def graphabledag(ui, repo, revdag, opts):
301 def graphabledag(ui, repo, revdag, opts):
301 showparents = [ctx.node() for ctx in repo[None].parents()]
302 showparents = [ctx.node() for ctx in repo[None].parents()]
302 displayer = show_changeset(ui, repo, opts, buffered=True)
303 displayer = show_changeset(ui, repo, opts, buffered=True)
303 for (ctx, parents) in revdag:
304 for (ctx, parents) in revdag:
304 displayer.show(ctx)
305 displayer.show(ctx)
305 lines = displayer.hunk.pop(ctx.rev()).split('\n')[:-1]
306 lines = displayer.hunk.pop(ctx.rev()).split('\n')[:-1]
306 char = ctx.node() in showparents and '@' or 'o'
307 char = ctx.node() in showparents and '@' or 'o'
307 yield (ctx.rev(), parents, char, lines)
308 yield (ctx.rev(), parents, char, lines)
308
309
309 def goutgoing(ui, repo, dest=None, **opts):
310 def goutgoing(ui, repo, dest=None, **opts):
310 """show the outgoing changesets alongside an ASCII revision graph
311 """show the outgoing changesets alongside an ASCII revision graph
311
312
312 Print the outgoing changesets alongside a revision graph drawn with
313 Print the outgoing changesets alongside a revision graph drawn with
313 ASCII characters.
314 ASCII characters.
314
315
315 Nodes printed as an @ character are parents of the working
316 Nodes printed as an @ character are parents of the working
316 directory.
317 directory.
317 """
318 """
318
319
319 check_unsupported_flags(opts)
320 check_unsupported_flags(opts)
320 dest, revs, checkout = hg.parseurl(
321 dest, revs, checkout = hg.parseurl(
321 ui.expandpath(dest or 'default-push', dest or 'default'),
322 ui.expandpath(dest or 'default-push', dest or 'default'),
322 opts.get('rev'))
323 opts.get('rev'))
323 if revs:
324 if revs:
324 revs = [repo.lookup(rev) for rev in revs]
325 revs = [repo.lookup(rev) for rev in revs]
325 other = hg.repository(cmdutil.remoteui(ui, opts), dest)
326 other = hg.repository(cmdutil.remoteui(ui, opts), dest)
326 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
327 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
327 o = repo.findoutgoing(other, force=opts.get('force'))
328 o = repo.findoutgoing(other, force=opts.get('force'))
328 if not o:
329 if not o:
329 ui.status(_("no changes found\n"))
330 ui.status(_("no changes found\n"))
330 return
331 return
331
332
332 o = repo.changelog.nodesbetween(o, revs)[0]
333 o = repo.changelog.nodesbetween(o, revs)[0]
333 revdag = graphrevs(repo, o, opts)
334 revdag = graphrevs(repo, o, opts)
334 graphdag = graphabledag(ui, repo, revdag, opts)
335 graphdag = graphabledag(ui, repo, revdag, opts)
335 ascii(ui, grapher(graphdag))
336 ascii(ui, grapher(graphdag))
336
337
337 def gincoming(ui, repo, source="default", **opts):
338 def gincoming(ui, repo, source="default", **opts):
338 """show the incoming changesets alongside an ASCII revision graph
339 """show the incoming changesets alongside an ASCII revision graph
339
340
340 Print the incoming changesets alongside a revision graph drawn with
341 Print the incoming changesets alongside a revision graph drawn with
341 ASCII characters.
342 ASCII characters.
342
343
343 Nodes printed as an @ character are parents of the working
344 Nodes printed as an @ character are parents of the working
344 directory.
345 directory.
345 """
346 """
346
347
347 check_unsupported_flags(opts)
348 check_unsupported_flags(opts)
348 source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
349 source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
349 other = hg.repository(cmdutil.remoteui(repo, opts), source)
350 other = hg.repository(cmdutil.remoteui(repo, opts), source)
350 ui.status(_('comparing with %s\n') % url.hidepassword(source))
351 ui.status(_('comparing with %s\n') % url.hidepassword(source))
351 if revs:
352 if revs:
352 revs = [other.lookup(rev) for rev in revs]
353 revs = [other.lookup(rev) for rev in revs]
353 incoming = repo.findincoming(other, heads=revs, force=opts["force"])
354 incoming = repo.findincoming(other, heads=revs, force=opts["force"])
354 if not incoming:
355 if not incoming:
355 try:
356 try:
356 os.unlink(opts["bundle"])
357 os.unlink(opts["bundle"])
357 except:
358 except:
358 pass
359 pass
359 ui.status(_("no changes found\n"))
360 ui.status(_("no changes found\n"))
360 return
361 return
361
362
362 cleanup = None
363 cleanup = None
363 try:
364 try:
364
365
365 fname = opts["bundle"]
366 fname = opts["bundle"]
366 if fname or not other.local():
367 if fname or not other.local():
367 # create a bundle (uncompressed if other repo is not local)
368 # create a bundle (uncompressed if other repo is not local)
368 if revs is None:
369 if revs is None:
369 cg = other.changegroup(incoming, "incoming")
370 cg = other.changegroup(incoming, "incoming")
370 else:
371 else:
371 cg = other.changegroupsubset(incoming, revs, 'incoming')
372 cg = other.changegroupsubset(incoming, revs, 'incoming')
372 bundletype = other.local() and "HG10BZ" or "HG10UN"
373 bundletype = other.local() and "HG10BZ" or "HG10UN"
373 fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
374 fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
374 # keep written bundle?
375 # keep written bundle?
375 if opts["bundle"]:
376 if opts["bundle"]:
376 cleanup = None
377 cleanup = None
377 if not other.local():
378 if not other.local():
378 # use the created uncompressed bundlerepo
379 # use the created uncompressed bundlerepo
379 other = bundlerepo.bundlerepository(ui, repo.root, fname)
380 other = bundlerepo.bundlerepository(ui, repo.root, fname)
380
381
381 chlist = other.changelog.nodesbetween(incoming, revs)[0]
382 chlist = other.changelog.nodesbetween(incoming, revs)[0]
382 revdag = graphrevs(other, chlist, opts)
383 revdag = graphrevs(other, chlist, opts)
383 graphdag = graphabledag(ui, repo, revdag, opts)
384 graphdag = graphabledag(ui, repo, revdag, opts)
384 ascii(ui, grapher(graphdag))
385 ascii(ui, grapher(graphdag))
385
386
386 finally:
387 finally:
387 if hasattr(other, 'close'):
388 if hasattr(other, 'close'):
388 other.close()
389 other.close()
389 if cleanup:
390 if cleanup:
390 os.unlink(cleanup)
391 os.unlink(cleanup)
391
392
392 def uisetup(ui):
393 def uisetup(ui):
393 '''Initialize the extension.'''
394 '''Initialize the extension.'''
394 _wrapcmd(ui, 'log', commands.table, graphlog)
395 _wrapcmd(ui, 'log', commands.table, graphlog)
395 _wrapcmd(ui, 'incoming', commands.table, gincoming)
396 _wrapcmd(ui, 'incoming', commands.table, gincoming)
396 _wrapcmd(ui, 'outgoing', commands.table, goutgoing)
397 _wrapcmd(ui, 'outgoing', commands.table, goutgoing)
397
398
398 def _wrapcmd(ui, cmd, table, wrapfn):
399 def _wrapcmd(ui, cmd, table, wrapfn):
399 '''wrap the command'''
400 '''wrap the command'''
400 def graph(orig, *args, **kwargs):
401 def graph(orig, *args, **kwargs):
401 if kwargs['graph']:
402 if kwargs['graph']:
402 return wrapfn(*args, **kwargs)
403 return wrapfn(*args, **kwargs)
403 return orig(*args, **kwargs)
404 return orig(*args, **kwargs)
404 entry = extensions.wrapcommand(table, cmd, graph)
405 entry = extensions.wrapcommand(table, cmd, graph)
405 entry[1].append(('G', 'graph', None, _("show the revision DAG")))
406 entry[1].append(('G', 'graph', None, _("show the revision DAG")))
406
407
407 cmdtable = {
408 cmdtable = {
408 "glog":
409 "glog":
409 (graphlog,
410 (graphlog,
410 [('l', 'limit', '', _('limit number of changes displayed')),
411 [('l', 'limit', '', _('limit number of changes displayed')),
411 ('p', 'patch', False, _('show patch')),
412 ('p', 'patch', False, _('show patch')),
412 ('r', 'rev', [], _('show the specified revision or range')),
413 ('r', 'rev', [], _('show the specified revision or range')),
413 ] + templateopts,
414 ] + templateopts,
414 _('hg glog [OPTION]... [FILE]')),
415 _('hg glog [OPTION]... [FILE]')),
415 }
416 }
@@ -1,358 +1,359 b''
1 # Minimal support for git commands on an hg repository
1 # Minimal support for git commands on an hg repository
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7 '''browsing the repository in a graphical way
8 '''browsing the repository in a graphical way
8
9
9 The hgk extension allows browsing the history of a repository in a
10 The hgk extension allows browsing the history of a repository in a
10 graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
11 graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
11 distributed with Mercurial.)
12 distributed with Mercurial.)
12
13
13 hgk consists of two parts: a Tcl script that does the displaying and
14 hgk consists of two parts: a Tcl script that does the displaying and
14 querying of information, and an extension to mercurial named hgk.py,
15 querying of information, and an extension to mercurial named hgk.py,
15 which provides hooks for hgk to get information. hgk can be found in
16 which provides hooks for hgk to get information. hgk can be found in
16 the contrib directory, and hgk.py can be found in the hgext directory.
17 the contrib directory, and hgk.py can be found in the hgext directory.
17
18
18 To load the hgext.py extension, add it to your .hgrc file (you have to
19 To load the hgext.py extension, add it to your .hgrc file (you have to
19 use your global $HOME/.hgrc file, not one in a repository). You can
20 use your global $HOME/.hgrc file, not one in a repository). You can
20 specify an absolute path:
21 specify an absolute path:
21
22
22 [extensions]
23 [extensions]
23 hgk=/usr/local/lib/hgk.py
24 hgk=/usr/local/lib/hgk.py
24
25
25 Mercurial can also scan the default python library path for a file
26 Mercurial can also scan the default python library path for a file
26 named 'hgk.py' if you set hgk empty:
27 named 'hgk.py' if you set hgk empty:
27
28
28 [extensions]
29 [extensions]
29 hgk=
30 hgk=
30
31
31 The hg view command will launch the hgk Tcl script. For this command
32 The hg view command will launch the hgk Tcl script. For this command
32 to work, hgk must be in your search path. Alternately, you can specify
33 to work, hgk must be in your search path. Alternately, you can specify
33 the path to hgk in your .hgrc file:
34 the path to hgk in your .hgrc file:
34
35
35 [hgk]
36 [hgk]
36 path=/location/of/hgk
37 path=/location/of/hgk
37
38
38 hgk can make use of the extdiff extension to visualize revisions.
39 hgk can make use of the extdiff extension to visualize revisions.
39 Assuming you had already configured extdiff vdiff command, just add:
40 Assuming you had already configured extdiff vdiff command, just add:
40
41
41 [hgk]
42 [hgk]
42 vdiff=vdiff
43 vdiff=vdiff
43
44
44 Revisions context menu will now display additional entries to fire
45 Revisions context menu will now display additional entries to fire
45 vdiff on hovered and selected revisions.'''
46 vdiff on hovered and selected revisions.'''
46
47
47 import os
48 import os
48 from mercurial import commands, util, patch, revlog, cmdutil
49 from mercurial import commands, util, patch, revlog, cmdutil
49 from mercurial.node import nullid, nullrev, short
50 from mercurial.node import nullid, nullrev, short
50 from mercurial.i18n import _
51 from mercurial.i18n import _
51
52
52 def difftree(ui, repo, node1=None, node2=None, *files, **opts):
53 def difftree(ui, repo, node1=None, node2=None, *files, **opts):
53 """diff trees from two commits"""
54 """diff trees from two commits"""
54 def __difftree(repo, node1, node2, files=[]):
55 def __difftree(repo, node1, node2, files=[]):
55 assert node2 is not None
56 assert node2 is not None
56 mmap = repo[node1].manifest()
57 mmap = repo[node1].manifest()
57 mmap2 = repo[node2].manifest()
58 mmap2 = repo[node2].manifest()
58 m = cmdutil.match(repo, files)
59 m = cmdutil.match(repo, files)
59 modified, added, removed = repo.status(node1, node2, m)[:3]
60 modified, added, removed = repo.status(node1, node2, m)[:3]
60 empty = short(nullid)
61 empty = short(nullid)
61
62
62 for f in modified:
63 for f in modified:
63 # TODO get file permissions
64 # TODO get file permissions
64 ui.write(":100664 100664 %s %s M\t%s\t%s\n" %
65 ui.write(":100664 100664 %s %s M\t%s\t%s\n" %
65 (short(mmap[f]), short(mmap2[f]), f, f))
66 (short(mmap[f]), short(mmap2[f]), f, f))
66 for f in added:
67 for f in added:
67 ui.write(":000000 100664 %s %s N\t%s\t%s\n" %
68 ui.write(":000000 100664 %s %s N\t%s\t%s\n" %
68 (empty, short(mmap2[f]), f, f))
69 (empty, short(mmap2[f]), f, f))
69 for f in removed:
70 for f in removed:
70 ui.write(":100664 000000 %s %s D\t%s\t%s\n" %
71 ui.write(":100664 000000 %s %s D\t%s\t%s\n" %
71 (short(mmap[f]), empty, f, f))
72 (short(mmap[f]), empty, f, f))
72 ##
73 ##
73
74
74 while True:
75 while True:
75 if opts['stdin']:
76 if opts['stdin']:
76 try:
77 try:
77 line = raw_input().split(' ')
78 line = raw_input().split(' ')
78 node1 = line[0]
79 node1 = line[0]
79 if len(line) > 1:
80 if len(line) > 1:
80 node2 = line[1]
81 node2 = line[1]
81 else:
82 else:
82 node2 = None
83 node2 = None
83 except EOFError:
84 except EOFError:
84 break
85 break
85 node1 = repo.lookup(node1)
86 node1 = repo.lookup(node1)
86 if node2:
87 if node2:
87 node2 = repo.lookup(node2)
88 node2 = repo.lookup(node2)
88 else:
89 else:
89 node2 = node1
90 node2 = node1
90 node1 = repo.changelog.parents(node1)[0]
91 node1 = repo.changelog.parents(node1)[0]
91 if opts['patch']:
92 if opts['patch']:
92 if opts['pretty']:
93 if opts['pretty']:
93 catcommit(ui, repo, node2, "")
94 catcommit(ui, repo, node2, "")
94 m = cmdutil.match(repo, files)
95 m = cmdutil.match(repo, files)
95 chunks = patch.diff(repo, node1, node2, match=m,
96 chunks = patch.diff(repo, node1, node2, match=m,
96 opts=patch.diffopts(ui, {'git': True}))
97 opts=patch.diffopts(ui, {'git': True}))
97 for chunk in chunks:
98 for chunk in chunks:
98 repo.ui.write(chunk)
99 repo.ui.write(chunk)
99 else:
100 else:
100 __difftree(repo, node1, node2, files=files)
101 __difftree(repo, node1, node2, files=files)
101 if not opts['stdin']:
102 if not opts['stdin']:
102 break
103 break
103
104
104 def catcommit(ui, repo, n, prefix, ctx=None):
105 def catcommit(ui, repo, n, prefix, ctx=None):
105 nlprefix = '\n' + prefix;
106 nlprefix = '\n' + prefix;
106 if ctx is None:
107 if ctx is None:
107 ctx = repo[n]
108 ctx = repo[n]
108 ui.write("tree %s\n" % short(ctx.changeset()[0])) # use ctx.node() instead ??
109 ui.write("tree %s\n" % short(ctx.changeset()[0])) # use ctx.node() instead ??
109 for p in ctx.parents():
110 for p in ctx.parents():
110 ui.write("parent %s\n" % p)
111 ui.write("parent %s\n" % p)
111
112
112 date = ctx.date()
113 date = ctx.date()
113 description = ctx.description().replace("\0", "")
114 description = ctx.description().replace("\0", "")
114 lines = description.splitlines()
115 lines = description.splitlines()
115 if lines and lines[-1].startswith('committer:'):
116 if lines and lines[-1].startswith('committer:'):
116 committer = lines[-1].split(': ')[1].rstrip()
117 committer = lines[-1].split(': ')[1].rstrip()
117 else:
118 else:
118 committer = ctx.user()
119 committer = ctx.user()
119
120
120 ui.write("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1]))
121 ui.write("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1]))
121 ui.write("committer %s %s %s\n" % (committer, int(date[0]), date[1]))
122 ui.write("committer %s %s %s\n" % (committer, int(date[0]), date[1]))
122 ui.write("revision %d\n" % ctx.rev())
123 ui.write("revision %d\n" % ctx.rev())
123 ui.write("branch %s\n\n" % ctx.branch())
124 ui.write("branch %s\n\n" % ctx.branch())
124
125
125 if prefix != "":
126 if prefix != "":
126 ui.write("%s%s\n" % (prefix, description.replace('\n', nlprefix).strip()))
127 ui.write("%s%s\n" % (prefix, description.replace('\n', nlprefix).strip()))
127 else:
128 else:
128 ui.write(description + "\n")
129 ui.write(description + "\n")
129 if prefix:
130 if prefix:
130 ui.write('\0')
131 ui.write('\0')
131
132
132 def base(ui, repo, node1, node2):
133 def base(ui, repo, node1, node2):
133 """output common ancestor information"""
134 """output common ancestor information"""
134 node1 = repo.lookup(node1)
135 node1 = repo.lookup(node1)
135 node2 = repo.lookup(node2)
136 node2 = repo.lookup(node2)
136 n = repo.changelog.ancestor(node1, node2)
137 n = repo.changelog.ancestor(node1, node2)
137 ui.write(short(n) + "\n")
138 ui.write(short(n) + "\n")
138
139
139 def catfile(ui, repo, type=None, r=None, **opts):
140 def catfile(ui, repo, type=None, r=None, **opts):
140 """cat a specific revision"""
141 """cat a specific revision"""
141 # in stdin mode, every line except the commit is prefixed with two
142 # in stdin mode, every line except the commit is prefixed with two
142 # spaces. This way the our caller can find the commit without magic
143 # spaces. This way the our caller can find the commit without magic
143 # strings
144 # strings
144 #
145 #
145 prefix = ""
146 prefix = ""
146 if opts['stdin']:
147 if opts['stdin']:
147 try:
148 try:
148 (type, r) = raw_input().split(' ');
149 (type, r) = raw_input().split(' ');
149 prefix = " "
150 prefix = " "
150 except EOFError:
151 except EOFError:
151 return
152 return
152
153
153 else:
154 else:
154 if not type or not r:
155 if not type or not r:
155 ui.warn(_("cat-file: type or revision not supplied\n"))
156 ui.warn(_("cat-file: type or revision not supplied\n"))
156 commands.help_(ui, 'cat-file')
157 commands.help_(ui, 'cat-file')
157
158
158 while r:
159 while r:
159 if type != "commit":
160 if type != "commit":
160 ui.warn(_("aborting hg cat-file only understands commits\n"))
161 ui.warn(_("aborting hg cat-file only understands commits\n"))
161 return 1;
162 return 1;
162 n = repo.lookup(r)
163 n = repo.lookup(r)
163 catcommit(ui, repo, n, prefix)
164 catcommit(ui, repo, n, prefix)
164 if opts['stdin']:
165 if opts['stdin']:
165 try:
166 try:
166 (type, r) = raw_input().split(' ');
167 (type, r) = raw_input().split(' ');
167 except EOFError:
168 except EOFError:
168 break
169 break
169 else:
170 else:
170 break
171 break
171
172
172 # git rev-tree is a confusing thing. You can supply a number of
173 # git rev-tree is a confusing thing. You can supply a number of
173 # commit sha1s on the command line, and it walks the commit history
174 # commit sha1s on the command line, and it walks the commit history
174 # telling you which commits are reachable from the supplied ones via
175 # telling you which commits are reachable from the supplied ones via
175 # a bitmask based on arg position.
176 # a bitmask based on arg position.
176 # you can specify a commit to stop at by starting the sha1 with ^
177 # you can specify a commit to stop at by starting the sha1 with ^
177 def revtree(ui, args, repo, full="tree", maxnr=0, parents=False):
178 def revtree(ui, args, repo, full="tree", maxnr=0, parents=False):
178 def chlogwalk():
179 def chlogwalk():
179 count = len(repo)
180 count = len(repo)
180 i = count
181 i = count
181 l = [0] * 100
182 l = [0] * 100
182 chunk = 100
183 chunk = 100
183 while True:
184 while True:
184 if chunk > i:
185 if chunk > i:
185 chunk = i
186 chunk = i
186 i = 0
187 i = 0
187 else:
188 else:
188 i -= chunk
189 i -= chunk
189
190
190 for x in xrange(0, chunk):
191 for x in xrange(0, chunk):
191 if i + x >= count:
192 if i + x >= count:
192 l[chunk - x:] = [0] * (chunk - x)
193 l[chunk - x:] = [0] * (chunk - x)
193 break
194 break
194 if full != None:
195 if full != None:
195 l[x] = repo[i + x]
196 l[x] = repo[i + x]
196 l[x].changeset() # force reading
197 l[x].changeset() # force reading
197 else:
198 else:
198 l[x] = 1
199 l[x] = 1
199 for x in xrange(chunk-1, -1, -1):
200 for x in xrange(chunk-1, -1, -1):
200 if l[x] != 0:
201 if l[x] != 0:
201 yield (i + x, full != None and l[x] or None)
202 yield (i + x, full != None and l[x] or None)
202 if i == 0:
203 if i == 0:
203 break
204 break
204
205
205 # calculate and return the reachability bitmask for sha
206 # calculate and return the reachability bitmask for sha
206 def is_reachable(ar, reachable, sha):
207 def is_reachable(ar, reachable, sha):
207 if len(ar) == 0:
208 if len(ar) == 0:
208 return 1
209 return 1
209 mask = 0
210 mask = 0
210 for i in xrange(len(ar)):
211 for i in xrange(len(ar)):
211 if sha in reachable[i]:
212 if sha in reachable[i]:
212 mask |= 1 << i
213 mask |= 1 << i
213
214
214 return mask
215 return mask
215
216
216 reachable = []
217 reachable = []
217 stop_sha1 = []
218 stop_sha1 = []
218 want_sha1 = []
219 want_sha1 = []
219 count = 0
220 count = 0
220
221
221 # figure out which commits they are asking for and which ones they
222 # figure out which commits they are asking for and which ones they
222 # want us to stop on
223 # want us to stop on
223 for i in xrange(len(args)):
224 for i in xrange(len(args)):
224 if args[i].startswith('^'):
225 if args[i].startswith('^'):
225 s = repo.lookup(args[i][1:])
226 s = repo.lookup(args[i][1:])
226 stop_sha1.append(s)
227 stop_sha1.append(s)
227 want_sha1.append(s)
228 want_sha1.append(s)
228 elif args[i] != 'HEAD':
229 elif args[i] != 'HEAD':
229 want_sha1.append(repo.lookup(args[i]))
230 want_sha1.append(repo.lookup(args[i]))
230
231
231 # calculate the graph for the supplied commits
232 # calculate the graph for the supplied commits
232 for i in xrange(len(want_sha1)):
233 for i in xrange(len(want_sha1)):
233 reachable.append({});
234 reachable.append({});
234 n = want_sha1[i];
235 n = want_sha1[i];
235 visit = [n];
236 visit = [n];
236 reachable[i][n] = 1
237 reachable[i][n] = 1
237 while visit:
238 while visit:
238 n = visit.pop(0)
239 n = visit.pop(0)
239 if n in stop_sha1:
240 if n in stop_sha1:
240 continue
241 continue
241 for p in repo.changelog.parents(n):
242 for p in repo.changelog.parents(n):
242 if p not in reachable[i]:
243 if p not in reachable[i]:
243 reachable[i][p] = 1
244 reachable[i][p] = 1
244 visit.append(p)
245 visit.append(p)
245 if p in stop_sha1:
246 if p in stop_sha1:
246 continue
247 continue
247
248
248 # walk the repository looking for commits that are in our
249 # walk the repository looking for commits that are in our
249 # reachability graph
250 # reachability graph
250 for i, ctx in chlogwalk():
251 for i, ctx in chlogwalk():
251 n = repo.changelog.node(i)
252 n = repo.changelog.node(i)
252 mask = is_reachable(want_sha1, reachable, n)
253 mask = is_reachable(want_sha1, reachable, n)
253 if mask:
254 if mask:
254 parentstr = ""
255 parentstr = ""
255 if parents:
256 if parents:
256 pp = repo.changelog.parents(n)
257 pp = repo.changelog.parents(n)
257 if pp[0] != nullid:
258 if pp[0] != nullid:
258 parentstr += " " + short(pp[0])
259 parentstr += " " + short(pp[0])
259 if pp[1] != nullid:
260 if pp[1] != nullid:
260 parentstr += " " + short(pp[1])
261 parentstr += " " + short(pp[1])
261 if not full:
262 if not full:
262 ui.write("%s%s\n" % (short(n), parentstr))
263 ui.write("%s%s\n" % (short(n), parentstr))
263 elif full == "commit":
264 elif full == "commit":
264 ui.write("%s%s\n" % (short(n), parentstr))
265 ui.write("%s%s\n" % (short(n), parentstr))
265 catcommit(ui, repo, n, ' ', ctx)
266 catcommit(ui, repo, n, ' ', ctx)
266 else:
267 else:
267 (p1, p2) = repo.changelog.parents(n)
268 (p1, p2) = repo.changelog.parents(n)
268 (h, h1, h2) = map(short, (n, p1, p2))
269 (h, h1, h2) = map(short, (n, p1, p2))
269 (i1, i2) = map(repo.changelog.rev, (p1, p2))
270 (i1, i2) = map(repo.changelog.rev, (p1, p2))
270
271
271 date = ctx.date()[0]
272 date = ctx.date()[0]
272 ui.write("%s %s:%s" % (date, h, mask))
273 ui.write("%s %s:%s" % (date, h, mask))
273 mask = is_reachable(want_sha1, reachable, p1)
274 mask = is_reachable(want_sha1, reachable, p1)
274 if i1 != nullrev and mask > 0:
275 if i1 != nullrev and mask > 0:
275 ui.write("%s:%s " % (h1, mask)),
276 ui.write("%s:%s " % (h1, mask)),
276 mask = is_reachable(want_sha1, reachable, p2)
277 mask = is_reachable(want_sha1, reachable, p2)
277 if i2 != nullrev and mask > 0:
278 if i2 != nullrev and mask > 0:
278 ui.write("%s:%s " % (h2, mask))
279 ui.write("%s:%s " % (h2, mask))
279 ui.write("\n")
280 ui.write("\n")
280 if maxnr and count >= maxnr:
281 if maxnr and count >= maxnr:
281 break
282 break
282 count += 1
283 count += 1
283
284
284 def revparse(ui, repo, *revs, **opts):
285 def revparse(ui, repo, *revs, **opts):
285 """parse given revisions"""
286 """parse given revisions"""
286 def revstr(rev):
287 def revstr(rev):
287 if rev == 'HEAD':
288 if rev == 'HEAD':
288 rev = 'tip'
289 rev = 'tip'
289 return revlog.hex(repo.lookup(rev))
290 return revlog.hex(repo.lookup(rev))
290
291
291 for r in revs:
292 for r in revs:
292 revrange = r.split(':', 1)
293 revrange = r.split(':', 1)
293 ui.write('%s\n' % revstr(revrange[0]))
294 ui.write('%s\n' % revstr(revrange[0]))
294 if len(revrange) == 2:
295 if len(revrange) == 2:
295 ui.write('^%s\n' % revstr(revrange[1]))
296 ui.write('^%s\n' % revstr(revrange[1]))
296
297
297 # git rev-list tries to order things by date, and has the ability to stop
298 # git rev-list tries to order things by date, and has the ability to stop
298 # at a given commit without walking the whole repo. TODO add the stop
299 # at a given commit without walking the whole repo. TODO add the stop
299 # parameter
300 # parameter
300 def revlist(ui, repo, *revs, **opts):
301 def revlist(ui, repo, *revs, **opts):
301 """print revisions"""
302 """print revisions"""
302 if opts['header']:
303 if opts['header']:
303 full = "commit"
304 full = "commit"
304 else:
305 else:
305 full = None
306 full = None
306 copy = [x for x in revs]
307 copy = [x for x in revs]
307 revtree(ui, copy, repo, full, opts['max_count'], opts['parents'])
308 revtree(ui, copy, repo, full, opts['max_count'], opts['parents'])
308
309
309 def config(ui, repo, **opts):
310 def config(ui, repo, **opts):
310 """print extension options"""
311 """print extension options"""
311 def writeopt(name, value):
312 def writeopt(name, value):
312 ui.write('k=%s\nv=%s\n' % (name, value))
313 ui.write('k=%s\nv=%s\n' % (name, value))
313
314
314 writeopt('vdiff', ui.config('hgk', 'vdiff', ''))
315 writeopt('vdiff', ui.config('hgk', 'vdiff', ''))
315
316
316
317
317 def view(ui, repo, *etc, **opts):
318 def view(ui, repo, *etc, **opts):
318 "start interactive history viewer"
319 "start interactive history viewer"
319 os.chdir(repo.root)
320 os.chdir(repo.root)
320 optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v])
321 optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v])
321 cmd = ui.config("hgk", "path", "hgk") + " %s %s" % (optstr, " ".join(etc))
322 cmd = ui.config("hgk", "path", "hgk") + " %s %s" % (optstr, " ".join(etc))
322 ui.debug(_("running %s\n") % cmd)
323 ui.debug(_("running %s\n") % cmd)
323 util.system(cmd)
324 util.system(cmd)
324
325
325 cmdtable = {
326 cmdtable = {
326 "^view":
327 "^view":
327 (view,
328 (view,
328 [('l', 'limit', '', _('limit number of changes displayed'))],
329 [('l', 'limit', '', _('limit number of changes displayed'))],
329 _('hg view [-l LIMIT] [REVRANGE]')),
330 _('hg view [-l LIMIT] [REVRANGE]')),
330 "debug-diff-tree":
331 "debug-diff-tree":
331 (difftree,
332 (difftree,
332 [('p', 'patch', None, _('generate patch')),
333 [('p', 'patch', None, _('generate patch')),
333 ('r', 'recursive', None, _('recursive')),
334 ('r', 'recursive', None, _('recursive')),
334 ('P', 'pretty', None, _('pretty')),
335 ('P', 'pretty', None, _('pretty')),
335 ('s', 'stdin', None, _('stdin')),
336 ('s', 'stdin', None, _('stdin')),
336 ('C', 'copy', None, _('detect copies')),
337 ('C', 'copy', None, _('detect copies')),
337 ('S', 'search', "", _('search'))],
338 ('S', 'search', "", _('search'))],
338 _('hg git-diff-tree [OPTION]... NODE1 NODE2 [FILE]...')),
339 _('hg git-diff-tree [OPTION]... NODE1 NODE2 [FILE]...')),
339 "debug-cat-file":
340 "debug-cat-file":
340 (catfile,
341 (catfile,
341 [('s', 'stdin', None, _('stdin'))],
342 [('s', 'stdin', None, _('stdin'))],
342 _('hg debug-cat-file [OPTION]... TYPE FILE')),
343 _('hg debug-cat-file [OPTION]... TYPE FILE')),
343 "debug-config":
344 "debug-config":
344 (config, [], _('hg debug-config')),
345 (config, [], _('hg debug-config')),
345 "debug-merge-base":
346 "debug-merge-base":
346 (base, [], _('hg debug-merge-base node node')),
347 (base, [], _('hg debug-merge-base node node')),
347 "debug-rev-parse":
348 "debug-rev-parse":
348 (revparse,
349 (revparse,
349 [('', 'default', '', _('ignored'))],
350 [('', 'default', '', _('ignored'))],
350 _('hg debug-rev-parse REV')),
351 _('hg debug-rev-parse REV')),
351 "debug-rev-list":
352 "debug-rev-list":
352 (revlist,
353 (revlist,
353 [('H', 'header', None, _('header')),
354 [('H', 'header', None, _('header')),
354 ('t', 'topo-order', None, _('topo-order')),
355 ('t', 'topo-order', None, _('topo-order')),
355 ('p', 'parents', None, _('parents')),
356 ('p', 'parents', None, _('parents')),
356 ('n', 'max-count', 0, _('max-count'))],
357 ('n', 'max-count', 0, _('max-count'))],
357 _('hg debug-rev-list [options] revs')),
358 _('hg debug-rev-list [options] revs')),
358 }
359 }
@@ -1,96 +1,97 b''
1 # Mercurial extension to make it easy to refer to the parent of a revision
1 # Mercurial extension to make it easy to refer to the parent of a revision
2 #
2 #
3 # Copyright (C) 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
3 # Copyright (C) 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7 '''\
8 '''\
8 use suffixes to refer to ancestor revisions
9 use suffixes to refer to ancestor revisions
9
10
10 This extension allows you to use git-style suffixes to refer to the
11 This extension allows you to use git-style suffixes to refer to the
11 ancestors of a specific revision.
12 ancestors of a specific revision.
12
13
13 For example, if you can refer to a revision as "foo", then:
14 For example, if you can refer to a revision as "foo", then:
14
15
15 - foo^N = Nth parent of foo:
16 - foo^N = Nth parent of foo:
16 foo^0 = foo
17 foo^0 = foo
17 foo^1 = first parent of foo
18 foo^1 = first parent of foo
18 foo^2 = second parent of foo
19 foo^2 = second parent of foo
19 foo^ = foo^1
20 foo^ = foo^1
20
21
21 - foo~N = Nth first grandparent of foo
22 - foo~N = Nth first grandparent of foo
22 foo~0 = foo
23 foo~0 = foo
23 foo~1 = foo^1 = foo^ = first parent of foo
24 foo~1 = foo^1 = foo^ = first parent of foo
24 foo~2 = foo^1^1 = foo^^ = first parent of first parent of foo
25 foo~2 = foo^1^1 = foo^^ = first parent of first parent of foo
25 '''
26 '''
26 from mercurial import error
27 from mercurial import error
27
28
28 def reposetup(ui, repo):
29 def reposetup(ui, repo):
29 if not repo.local():
30 if not repo.local():
30 return
31 return
31
32
32 class parentrevspecrepo(repo.__class__):
33 class parentrevspecrepo(repo.__class__):
33 def lookup(self, key):
34 def lookup(self, key):
34 try:
35 try:
35 _super = super(parentrevspecrepo, self)
36 _super = super(parentrevspecrepo, self)
36 return _super.lookup(key)
37 return _super.lookup(key)
37 except error.RepoError:
38 except error.RepoError:
38 pass
39 pass
39
40
40 circ = key.find('^')
41 circ = key.find('^')
41 tilde = key.find('~')
42 tilde = key.find('~')
42 if circ < 0 and tilde < 0:
43 if circ < 0 and tilde < 0:
43 raise
44 raise
44 elif circ >= 0 and tilde >= 0:
45 elif circ >= 0 and tilde >= 0:
45 end = min(circ, tilde)
46 end = min(circ, tilde)
46 else:
47 else:
47 end = max(circ, tilde)
48 end = max(circ, tilde)
48
49
49 cl = self.changelog
50 cl = self.changelog
50 base = key[:end]
51 base = key[:end]
51 try:
52 try:
52 node = _super.lookup(base)
53 node = _super.lookup(base)
53 except error.RepoError:
54 except error.RepoError:
54 # eek - reraise the first error
55 # eek - reraise the first error
55 return _super.lookup(key)
56 return _super.lookup(key)
56
57
57 rev = cl.rev(node)
58 rev = cl.rev(node)
58 suffix = key[end:]
59 suffix = key[end:]
59 i = 0
60 i = 0
60 while i < len(suffix):
61 while i < len(suffix):
61 # foo^N => Nth parent of foo
62 # foo^N => Nth parent of foo
62 # foo^0 == foo
63 # foo^0 == foo
63 # foo^1 == foo^ == 1st parent of foo
64 # foo^1 == foo^ == 1st parent of foo
64 # foo^2 == 2nd parent of foo
65 # foo^2 == 2nd parent of foo
65 if suffix[i] == '^':
66 if suffix[i] == '^':
66 j = i + 1
67 j = i + 1
67 p = cl.parentrevs(rev)
68 p = cl.parentrevs(rev)
68 if j < len(suffix) and suffix[j].isdigit():
69 if j < len(suffix) and suffix[j].isdigit():
69 j += 1
70 j += 1
70 n = int(suffix[i+1:j])
71 n = int(suffix[i+1:j])
71 if n > 2 or n == 2 and p[1] == -1:
72 if n > 2 or n == 2 and p[1] == -1:
72 raise
73 raise
73 else:
74 else:
74 n = 1
75 n = 1
75 if n:
76 if n:
76 rev = p[n - 1]
77 rev = p[n - 1]
77 i = j
78 i = j
78 # foo~N => Nth first grandparent of foo
79 # foo~N => Nth first grandparent of foo
79 # foo~0 = foo
80 # foo~0 = foo
80 # foo~1 = foo^1 == foo^ == 1st parent of foo
81 # foo~1 = foo^1 == foo^ == 1st parent of foo
81 # foo~2 = foo^1^1 == foo^^ == 1st parent of 1st parent of foo
82 # foo~2 = foo^1^1 == foo^^ == 1st parent of 1st parent of foo
82 elif suffix[i] == '~':
83 elif suffix[i] == '~':
83 j = i + 1
84 j = i + 1
84 while j < len(suffix) and suffix[j].isdigit():
85 while j < len(suffix) and suffix[j].isdigit():
85 j += 1
86 j += 1
86 if j == i + 1:
87 if j == i + 1:
87 raise
88 raise
88 n = int(suffix[i+1:j])
89 n = int(suffix[i+1:j])
89 for k in xrange(n):
90 for k in xrange(n):
90 rev = cl.parentrevs(rev)[0]
91 rev = cl.parentrevs(rev)[0]
91 i = j
92 i = j
92 else:
93 else:
93 raise
94 raise
94 return cl.node(rev)
95 return cl.node(rev)
95
96
96 repo.__class__ = parentrevspecrepo
97 repo.__class__ = parentrevspecrepo
@@ -1,125 +1,126 b''
1 # win32mbcs.py -- MBCS filename support for Mercurial
1 # win32mbcs.py -- MBCS filename support for Mercurial
2 #
2 #
3 # Copyright (c) 2008 Shun-ichi Goto <shunichi.goto@gmail.com>
3 # Copyright (c) 2008 Shun-ichi Goto <shunichi.goto@gmail.com>
4 #
4 #
5 # Version: 0.2
5 # Version: 0.2
6 # Author: Shun-ichi Goto <shunichi.goto@gmail.com>
6 # Author: Shun-ichi Goto <shunichi.goto@gmail.com>
7 #
7 #
8 # This software may be used and distributed according to the terms of the
8 # This software may be used and distributed according to the terms of the
9 # GNU General Public License version 2, incorporated herein by reference.
9 # GNU General Public License version 2, incorporated herein by reference.
10 #
10 #
11
11 """allow to use MBCS path with problematic encoding.
12 """allow to use MBCS path with problematic encoding.
12
13
13 Some MBCS encodings are not good for some path operations (i.e.
14 Some MBCS encodings are not good for some path operations (i.e.
14 splitting path, case conversion, etc.) with its encoded bytes. We call
15 splitting path, case conversion, etc.) with its encoded bytes. We call
15 such a encoding (i.e. shift_jis and big5) as "problematic encoding".
16 such a encoding (i.e. shift_jis and big5) as "problematic encoding".
16 This extension can be used to fix the issue with those encodings by
17 This extension can be used to fix the issue with those encodings by
17 wrapping some functions to convert to unicode string before path
18 wrapping some functions to convert to unicode string before path
18 operation.
19 operation.
19
20
20 This extension is usefull for:
21 This extension is usefull for:
21 * Japanese Windows users using shift_jis encoding.
22 * Japanese Windows users using shift_jis encoding.
22 * Chinese Windows users using big5 encoding.
23 * Chinese Windows users using big5 encoding.
23 * All users who use a repository with one of problematic encodings on
24 * All users who use a repository with one of problematic encodings on
24 case-insensitive file system.
25 case-insensitive file system.
25
26
26 This extension is not needed for:
27 This extension is not needed for:
27 * Any user who use only ascii chars in path.
28 * Any user who use only ascii chars in path.
28 * Any user who do not use any of problematic encodings.
29 * Any user who do not use any of problematic encodings.
29
30
30 Note that there are some limitations on using this extension:
31 Note that there are some limitations on using this extension:
31 * You should use single encoding in one repository.
32 * You should use single encoding in one repository.
32 * You should set same encoding for the repository by locale or
33 * You should set same encoding for the repository by locale or
33 HGENCODING.
34 HGENCODING.
34
35
35 To use this extension, enable the extension in .hg/hgrc or ~/.hgrc:
36 To use this extension, enable the extension in .hg/hgrc or ~/.hgrc:
36
37
37 [extensions]
38 [extensions]
38 hgext.win32mbcs =
39 hgext.win32mbcs =
39
40
40 Path encoding conversion are done between unicode and
41 Path encoding conversion are done between unicode and
41 encoding.encoding which is decided by mercurial from current locale
42 encoding.encoding which is decided by mercurial from current locale
42 setting or HGENCODING.
43 setting or HGENCODING.
43
44
44 """
45 """
45
46
46 import os
47 import os
47 from mercurial.i18n import _
48 from mercurial.i18n import _
48 from mercurial import util, encoding
49 from mercurial import util, encoding
49
50
50 def decode(arg):
51 def decode(arg):
51 if isinstance(arg, str):
52 if isinstance(arg, str):
52 uarg = arg.decode(encoding.encoding)
53 uarg = arg.decode(encoding.encoding)
53 if arg == uarg.encode(encoding.encoding):
54 if arg == uarg.encode(encoding.encoding):
54 return uarg
55 return uarg
55 raise UnicodeError("Not local encoding")
56 raise UnicodeError("Not local encoding")
56 elif isinstance(arg, tuple):
57 elif isinstance(arg, tuple):
57 return tuple(map(decode, arg))
58 return tuple(map(decode, arg))
58 elif isinstance(arg, list):
59 elif isinstance(arg, list):
59 return map(decode, arg)
60 return map(decode, arg)
60 return arg
61 return arg
61
62
62 def encode(arg):
63 def encode(arg):
63 if isinstance(arg, unicode):
64 if isinstance(arg, unicode):
64 return arg.encode(encoding.encoding)
65 return arg.encode(encoding.encoding)
65 elif isinstance(arg, tuple):
66 elif isinstance(arg, tuple):
66 return tuple(map(encode, arg))
67 return tuple(map(encode, arg))
67 elif isinstance(arg, list):
68 elif isinstance(arg, list):
68 return map(encode, arg)
69 return map(encode, arg)
69 return arg
70 return arg
70
71
71 def wrapper(func, args):
72 def wrapper(func, args):
72 # check argument is unicode, then call original
73 # check argument is unicode, then call original
73 for arg in args:
74 for arg in args:
74 if isinstance(arg, unicode):
75 if isinstance(arg, unicode):
75 return func(*args)
76 return func(*args)
76
77
77 try:
78 try:
78 # convert arguments to unicode, call func, then convert back
79 # convert arguments to unicode, call func, then convert back
79 return encode(func(*decode(args)))
80 return encode(func(*decode(args)))
80 except UnicodeError:
81 except UnicodeError:
81 # If not encoded with encoding.encoding, report it then
82 # If not encoded with encoding.encoding, report it then
82 # continue with calling original function.
83 # continue with calling original function.
83 raise util.Abort(_("[win32mbcs] filename conversion fail with"
84 raise util.Abort(_("[win32mbcs] filename conversion fail with"
84 " %s encoding\n") % (encoding.encoding))
85 " %s encoding\n") % (encoding.encoding))
85
86
86 def wrapname(name):
87 def wrapname(name):
87 idx = name.rfind('.')
88 idx = name.rfind('.')
88 module = name[:idx]
89 module = name[:idx]
89 name = name[idx+1:]
90 name = name[idx+1:]
90 module = eval(module)
91 module = eval(module)
91 func = getattr(module, name)
92 func = getattr(module, name)
92 def f(*args):
93 def f(*args):
93 return wrapper(func, args)
94 return wrapper(func, args)
94 try:
95 try:
95 f.__name__ = func.__name__ # fail with python23
96 f.__name__ = func.__name__ # fail with python23
96 except Exception:
97 except Exception:
97 pass
98 pass
98 setattr(module, name, f)
99 setattr(module, name, f)
99
100
100 # List of functions to be wrapped.
101 # List of functions to be wrapped.
101 # NOTE: os.path.dirname() and os.path.basename() are safe because
102 # NOTE: os.path.dirname() and os.path.basename() are safe because
102 # they use result of os.path.split()
103 # they use result of os.path.split()
103 funcs = '''os.path.join os.path.split os.path.splitext
104 funcs = '''os.path.join os.path.split os.path.splitext
104 os.path.splitunc os.path.normpath os.path.normcase os.makedirs
105 os.path.splitunc os.path.normpath os.path.normcase os.makedirs
105 util.endswithsep util.splitpath util.checkcase util.fspath'''
106 util.endswithsep util.splitpath util.checkcase util.fspath'''
106
107
107 # codec and alias names of sjis and big5 to be faked.
108 # codec and alias names of sjis and big5 to be faked.
108 problematic_encodings = '''big5 big5-tw csbig5 big5hkscs big5-hkscs
109 problematic_encodings = '''big5 big5-tw csbig5 big5hkscs big5-hkscs
109 hkscs cp932 932 ms932 mskanji ms-kanji shift_jis csshiftjis shiftjis
110 hkscs cp932 932 ms932 mskanji ms-kanji shift_jis csshiftjis shiftjis
110 sjis s_jis shift_jis_2004 shiftjis2004 sjis_2004 sjis2004
111 sjis s_jis shift_jis_2004 shiftjis2004 sjis_2004 sjis2004
111 shift_jisx0213 shiftjisx0213 sjisx0213 s_jisx0213'''
112 shift_jisx0213 shiftjisx0213 sjisx0213 s_jisx0213'''
112
113
113 def reposetup(ui, repo):
114 def reposetup(ui, repo):
114 # TODO: decide use of config section for this extension
115 # TODO: decide use of config section for this extension
115 if not os.path.supports_unicode_filenames:
116 if not os.path.supports_unicode_filenames:
116 ui.warn(_("[win32mbcs] cannot activate on this platform.\n"))
117 ui.warn(_("[win32mbcs] cannot activate on this platform.\n"))
117 return
118 return
118
119
119 # fake is only for relevant environment.
120 # fake is only for relevant environment.
120 if encoding.encoding.lower() in problematic_encodings.split():
121 if encoding.encoding.lower() in problematic_encodings.split():
121 for f in funcs.split():
122 for f in funcs.split():
122 wrapname(f)
123 wrapname(f)
123 ui.debug(_("[win32mbcs] activated with encoding: %s\n")
124 ui.debug(_("[win32mbcs] activated with encoding: %s\n")
124 % encoding.encoding)
125 % encoding.encoding)
125
126
@@ -1,144 +1,145 b''
1 # changelog bisection for mercurial
1 # changelog bisection for mercurial
2 #
2 #
3 # Copyright 2007 Matt Mackall
3 # Copyright 2007 Matt Mackall
4 # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
4 # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
5 #
5 # Inspired by git bisect, extension skeleton taken from mq.py.
6 # Inspired by git bisect, extension skeleton taken from mq.py.
6 #
7 #
7 # This software may be used and distributed according to the terms of the
8 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2, incorporated herein by reference.
9 # GNU General Public License version 2, incorporated herein by reference.
9
10
10 import os
11 import os
11 from i18n import _
12 from i18n import _
12 from node import short, hex
13 from node import short, hex
13 import util
14 import util
14
15
15 def bisect(changelog, state):
16 def bisect(changelog, state):
16 """find the next node (if any) for testing during a bisect search.
17 """find the next node (if any) for testing during a bisect search.
17 returns a (nodes, number, good) tuple.
18 returns a (nodes, number, good) tuple.
18
19
19 'nodes' is the final result of the bisect if 'number' is 0.
20 'nodes' is the final result of the bisect if 'number' is 0.
20 Otherwise 'number' indicates the remaining possible candidates for
21 Otherwise 'number' indicates the remaining possible candidates for
21 the search and 'nodes' contains the next bisect target.
22 the search and 'nodes' contains the next bisect target.
22 'good' is True if bisect is searching for a first good changeset, False
23 'good' is True if bisect is searching for a first good changeset, False
23 if searching for a first bad one.
24 if searching for a first bad one.
24 """
25 """
25
26
26 clparents = changelog.parentrevs
27 clparents = changelog.parentrevs
27 skip = set([changelog.rev(n) for n in state['skip']])
28 skip = set([changelog.rev(n) for n in state['skip']])
28
29
29 def buildancestors(bad, good):
30 def buildancestors(bad, good):
30 # only the earliest bad revision matters
31 # only the earliest bad revision matters
31 badrev = min([changelog.rev(n) for n in bad])
32 badrev = min([changelog.rev(n) for n in bad])
32 goodrevs = [changelog.rev(n) for n in good]
33 goodrevs = [changelog.rev(n) for n in good]
33 # build ancestors array
34 # build ancestors array
34 ancestors = [[]] * (len(changelog) + 1) # an extra for [-1]
35 ancestors = [[]] * (len(changelog) + 1) # an extra for [-1]
35
36
36 # clear good revs from array
37 # clear good revs from array
37 for node in goodrevs:
38 for node in goodrevs:
38 ancestors[node] = None
39 ancestors[node] = None
39 for rev in xrange(len(changelog), -1, -1):
40 for rev in xrange(len(changelog), -1, -1):
40 if ancestors[rev] is None:
41 if ancestors[rev] is None:
41 for prev in clparents(rev):
42 for prev in clparents(rev):
42 ancestors[prev] = None
43 ancestors[prev] = None
43
44
44 if ancestors[badrev] is None:
45 if ancestors[badrev] is None:
45 return badrev, None
46 return badrev, None
46 return badrev, ancestors
47 return badrev, ancestors
47
48
48 good = 0
49 good = 0
49 badrev, ancestors = buildancestors(state['bad'], state['good'])
50 badrev, ancestors = buildancestors(state['bad'], state['good'])
50 if not ancestors: # looking for bad to good transition?
51 if not ancestors: # looking for bad to good transition?
51 good = 1
52 good = 1
52 badrev, ancestors = buildancestors(state['good'], state['bad'])
53 badrev, ancestors = buildancestors(state['good'], state['bad'])
53 bad = changelog.node(badrev)
54 bad = changelog.node(badrev)
54 if not ancestors: # now we're confused
55 if not ancestors: # now we're confused
55 raise util.Abort(_("Inconsistent state, %s:%s is good and bad")
56 raise util.Abort(_("Inconsistent state, %s:%s is good and bad")
56 % (badrev, short(bad)))
57 % (badrev, short(bad)))
57
58
58 # build children dict
59 # build children dict
59 children = {}
60 children = {}
60 visit = [badrev]
61 visit = [badrev]
61 candidates = []
62 candidates = []
62 while visit:
63 while visit:
63 rev = visit.pop(0)
64 rev = visit.pop(0)
64 if ancestors[rev] == []:
65 if ancestors[rev] == []:
65 candidates.append(rev)
66 candidates.append(rev)
66 for prev in clparents(rev):
67 for prev in clparents(rev):
67 if prev != -1:
68 if prev != -1:
68 if prev in children:
69 if prev in children:
69 children[prev].append(rev)
70 children[prev].append(rev)
70 else:
71 else:
71 children[prev] = [rev]
72 children[prev] = [rev]
72 visit.append(prev)
73 visit.append(prev)
73
74
74 candidates.sort()
75 candidates.sort()
75 # have we narrowed it down to one entry?
76 # have we narrowed it down to one entry?
76 # or have all other possible candidates besides 'bad' have been skipped?
77 # or have all other possible candidates besides 'bad' have been skipped?
77 tot = len(candidates)
78 tot = len(candidates)
78 unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
79 unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
79 if tot == 1 or not unskipped:
80 if tot == 1 or not unskipped:
80 return ([changelog.node(rev) for rev in candidates], 0, good)
81 return ([changelog.node(rev) for rev in candidates], 0, good)
81 perfect = tot // 2
82 perfect = tot // 2
82
83
83 # find the best node to test
84 # find the best node to test
84 best_rev = None
85 best_rev = None
85 best_len = -1
86 best_len = -1
86 poison = {}
87 poison = {}
87 for rev in candidates:
88 for rev in candidates:
88 if rev in poison:
89 if rev in poison:
89 for c in children.get(rev, []):
90 for c in children.get(rev, []):
90 poison[c] = True # poison children
91 poison[c] = True # poison children
91 continue
92 continue
92
93
93 a = ancestors[rev] or [rev]
94 a = ancestors[rev] or [rev]
94 ancestors[rev] = None
95 ancestors[rev] = None
95
96
96 x = len(a) # number of ancestors
97 x = len(a) # number of ancestors
97 y = tot - x # number of non-ancestors
98 y = tot - x # number of non-ancestors
98 value = min(x, y) # how good is this test?
99 value = min(x, y) # how good is this test?
99 if value > best_len and rev not in skip:
100 if value > best_len and rev not in skip:
100 best_len = value
101 best_len = value
101 best_rev = rev
102 best_rev = rev
102 if value == perfect: # found a perfect candidate? quit early
103 if value == perfect: # found a perfect candidate? quit early
103 break
104 break
104
105
105 if y < perfect and rev not in skip: # all downhill from here?
106 if y < perfect and rev not in skip: # all downhill from here?
106 for c in children.get(rev, []):
107 for c in children.get(rev, []):
107 poison[c] = True # poison children
108 poison[c] = True # poison children
108 continue
109 continue
109
110
110 for c in children.get(rev, []):
111 for c in children.get(rev, []):
111 if ancestors[c]:
112 if ancestors[c]:
112 ancestors[c] = list(set(ancestors[c] + a))
113 ancestors[c] = list(set(ancestors[c] + a))
113 else:
114 else:
114 ancestors[c] = a + [c]
115 ancestors[c] = a + [c]
115
116
116 assert best_rev is not None
117 assert best_rev is not None
117 best_node = changelog.node(best_rev)
118 best_node = changelog.node(best_rev)
118
119
119 return ([best_node], tot, good)
120 return ([best_node], tot, good)
120
121
121
122
122 def load_state(repo):
123 def load_state(repo):
123 state = {'good': [], 'bad': [], 'skip': []}
124 state = {'good': [], 'bad': [], 'skip': []}
124 if os.path.exists(repo.join("bisect.state")):
125 if os.path.exists(repo.join("bisect.state")):
125 for l in repo.opener("bisect.state"):
126 for l in repo.opener("bisect.state"):
126 kind, node = l[:-1].split()
127 kind, node = l[:-1].split()
127 node = repo.lookup(node)
128 node = repo.lookup(node)
128 if kind not in state:
129 if kind not in state:
129 raise util.Abort(_("unknown bisect kind %s") % kind)
130 raise util.Abort(_("unknown bisect kind %s") % kind)
130 state[kind].append(node)
131 state[kind].append(node)
131 return state
132 return state
132
133
133
134
134 def save_state(repo, state):
135 def save_state(repo, state):
135 f = repo.opener("bisect.state", "w", atomictemp=True)
136 f = repo.opener("bisect.state", "w", atomictemp=True)
136 wlock = repo.wlock()
137 wlock = repo.wlock()
137 try:
138 try:
138 for kind in state:
139 for kind in state:
139 for node in state[kind]:
140 for node in state[kind]:
140 f.write("%s %s\n" % (kind, hex(node)))
141 f.write("%s %s\n" % (kind, hex(node)))
141 f.rename()
142 f.rename()
142 finally:
143 finally:
143 wlock.release()
144 wlock.release()
144
145
General Comments 0
You need to be logged in to leave comments. Login now