Show More
@@ -1,51 +1,52 | |||
|
1 | 1 | #!/usr/bin/env python |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 by Intevation GmbH <intevation@intevation.de> |
|
4 | # | |
|
4 | 5 | # Author(s): |
|
5 | 6 | # Thomas Arendsen Hein <thomas@intevation.de> |
|
6 | 7 | # |
|
7 | 8 | # This software may be used and distributed according to the terms of the |
|
8 | 9 | # GNU General Public License version 2, incorporated herein by reference. |
|
9 | 10 | |
|
10 | 11 | """ |
|
11 | 12 | hg-ssh - a wrapper for ssh access to a limited set of mercurial repos |
|
12 | 13 | |
|
13 | 14 | To be used in ~/.ssh/authorized_keys with the "command" option, see sshd(8): |
|
14 | 15 | command="hg-ssh path/to/repo1 /path/to/repo2 ~/repo3 ~user/repo4" ssh-dss ... |
|
15 | 16 | (probably together with these other useful options: |
|
16 | 17 | no-port-forwarding,no-X11-forwarding,no-agent-forwarding) |
|
17 | 18 | |
|
18 | 19 | This allows pull/push over ssh to to the repositories given as arguments. |
|
19 | 20 | |
|
20 | 21 | If all your repositories are subdirectories of a common directory, you can |
|
21 | 22 | allow shorter paths with: |
|
22 | 23 | command="cd path/to/my/repositories && hg-ssh repo1 subdir/repo2" |
|
23 | 24 | |
|
24 | 25 | You can use pattern matching of your normal shell, e.g.: |
|
25 | 26 | command="cd repos && hg-ssh user/thomas/* projects/{mercurial,foo}" |
|
26 | 27 | """ |
|
27 | 28 | |
|
28 | 29 | # enable importing on demand to reduce startup time |
|
29 | 30 | from mercurial import demandimport; demandimport.enable() |
|
30 | 31 | |
|
31 | 32 | from mercurial import dispatch |
|
32 | 33 | |
|
33 | 34 | import sys, os |
|
34 | 35 | |
|
35 | 36 | cwd = os.getcwd() |
|
36 | 37 | allowed_paths = [os.path.normpath(os.path.join(cwd, os.path.expanduser(path))) |
|
37 | 38 | for path in sys.argv[1:]] |
|
38 | 39 | orig_cmd = os.getenv('SSH_ORIGINAL_COMMAND', '?') |
|
39 | 40 | |
|
40 | 41 | if orig_cmd.startswith('hg -R ') and orig_cmd.endswith(' serve --stdio'): |
|
41 | 42 | path = orig_cmd[6:-14] |
|
42 | 43 | repo = os.path.normpath(os.path.join(cwd, os.path.expanduser(path))) |
|
43 | 44 | if repo in allowed_paths: |
|
44 | 45 | dispatch.dispatch(['-R', repo, 'serve', '--stdio']) |
|
45 | 46 | else: |
|
46 | 47 | sys.stderr.write("Illegal repository %r\n" % repo) |
|
47 | 48 | sys.exit(-1) |
|
48 | 49 | else: |
|
49 | 50 | sys.stderr.write("Illegal command %r\n" % orig_cmd) |
|
50 | 51 | sys.exit(-1) |
|
51 | 52 |
@@ -1,41 +1,42 | |||
|
1 | 1 | # Mercurial extension to provide the 'hg children' command |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2007 by Intevation GmbH <intevation@intevation.de> |
|
4 | # | |
|
4 | 5 | # Author(s): |
|
5 | 6 | # Thomas Arendsen Hein <thomas@intevation.de> |
|
6 | 7 | # |
|
7 | 8 | # This software may be used and distributed according to the terms of the |
|
8 | 9 | # GNU General Public License version 2, incorporated herein by reference. |
|
9 | 10 | |
|
10 | 11 | from mercurial import cmdutil |
|
11 | 12 | from mercurial.commands import templateopts |
|
12 | 13 | from mercurial.i18n import _ |
|
13 | 14 | |
|
14 | 15 | |
|
15 | 16 | def children(ui, repo, file_=None, **opts): |
|
16 | 17 | """show the children of the given or working directory revision |
|
17 | 18 | |
|
18 | 19 | Print the children of the working directory's revisions. If a |
|
19 | 20 | revision is given via --rev/-r, the children of that revision will |
|
20 | 21 | be printed. If a file argument is given, revision in which the |
|
21 | 22 | file was last changed (after the working directory revision or the |
|
22 | 23 | argument to --rev if given) is printed. |
|
23 | 24 | """ |
|
24 | 25 | rev = opts.get('rev') |
|
25 | 26 | if file_: |
|
26 | 27 | ctx = repo.filectx(file_, changeid=rev) |
|
27 | 28 | else: |
|
28 | 29 | ctx = repo[rev] |
|
29 | 30 | |
|
30 | 31 | displayer = cmdutil.show_changeset(ui, repo, opts) |
|
31 | 32 | for cctx in ctx.children(): |
|
32 | 33 | displayer.show(cctx) |
|
33 | 34 | |
|
34 | 35 | |
|
35 | 36 | cmdtable = { |
|
36 | 37 | "children": |
|
37 | 38 | (children, |
|
38 | 39 | [('r', 'rev', '', _('show children of the specified revision')), |
|
39 | 40 | ] + templateopts, |
|
40 | 41 | _('hg children [-r REV] [FILE]')), |
|
41 | 42 | } |
@@ -1,162 +1,163 | |||
|
1 | 1 | # churn.py - create a graph of revisions count grouped by template |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net> |
|
4 | 4 | # Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua> |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2, incorporated herein by reference. |
|
8 | ||
|
8 | 9 | '''command to show certain statistics about revision history''' |
|
9 | 10 | |
|
10 | 11 | from mercurial.i18n import _ |
|
11 | 12 | from mercurial import patch, cmdutil, util, templater |
|
12 | 13 | import sys |
|
13 | 14 | import time, datetime |
|
14 | 15 | |
|
15 | 16 | def maketemplater(ui, repo, tmpl): |
|
16 | 17 | tmpl = templater.parsestring(tmpl, quoted=False) |
|
17 | 18 | try: |
|
18 | 19 | t = cmdutil.changeset_templater(ui, repo, False, None, None, False) |
|
19 | 20 | except SyntaxError, inst: |
|
20 | 21 | raise util.Abort(inst.args[0]) |
|
21 | 22 | t.use_template(tmpl) |
|
22 | 23 | return t |
|
23 | 24 | |
|
24 | 25 | def changedlines(ui, repo, ctx1, ctx2, fns): |
|
25 | 26 | lines = 0 |
|
26 | 27 | fmatch = cmdutil.match(repo, pats=fns) |
|
27 | 28 | diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch)) |
|
28 | 29 | for l in diff.split('\n'): |
|
29 | 30 | if (l.startswith("+") and not l.startswith("+++ ") or |
|
30 | 31 | l.startswith("-") and not l.startswith("--- ")): |
|
31 | 32 | lines += 1 |
|
32 | 33 | return lines |
|
33 | 34 | |
|
34 | 35 | def countrate(ui, repo, amap, *pats, **opts): |
|
35 | 36 | """Calculate stats""" |
|
36 | 37 | if opts.get('dateformat'): |
|
37 | 38 | def getkey(ctx): |
|
38 | 39 | t, tz = ctx.date() |
|
39 | 40 | date = datetime.datetime(*time.gmtime(float(t) - tz)[:6]) |
|
40 | 41 | return date.strftime(opts['dateformat']) |
|
41 | 42 | else: |
|
42 | 43 | tmpl = opts.get('template', '{author|email}') |
|
43 | 44 | tmpl = maketemplater(ui, repo, tmpl) |
|
44 | 45 | def getkey(ctx): |
|
45 | 46 | ui.pushbuffer() |
|
46 | 47 | tmpl.show(ctx) |
|
47 | 48 | return ui.popbuffer() |
|
48 | 49 | |
|
49 | 50 | count = pct = 0 |
|
50 | 51 | rate = {} |
|
51 | 52 | df = False |
|
52 | 53 | if opts.get('date'): |
|
53 | 54 | df = util.matchdate(opts['date']) |
|
54 | 55 | |
|
55 | 56 | get = util.cachefunc(lambda r: repo[r].changeset()) |
|
56 | 57 | changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts) |
|
57 | 58 | for st, rev, fns in changeiter: |
|
58 | 59 | if not st == 'add': |
|
59 | 60 | continue |
|
60 | 61 | if df and not df(get(rev)[2][0]): # doesn't match date format |
|
61 | 62 | continue |
|
62 | 63 | |
|
63 | 64 | ctx = repo[rev] |
|
64 | 65 | key = getkey(ctx) |
|
65 | 66 | key = amap.get(key, key) # alias remap |
|
66 | 67 | if opts.get('changesets'): |
|
67 | 68 | rate[key] = rate.get(key, 0) + 1 |
|
68 | 69 | else: |
|
69 | 70 | parents = ctx.parents() |
|
70 | 71 | if len(parents) > 1: |
|
71 | 72 | ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,)) |
|
72 | 73 | continue |
|
73 | 74 | |
|
74 | 75 | ctx1 = parents[0] |
|
75 | 76 | lines = changedlines(ui, repo, ctx1, ctx, fns) |
|
76 | 77 | rate[key] = rate.get(key, 0) + lines |
|
77 | 78 | |
|
78 | 79 | if opts.get('progress'): |
|
79 | 80 | count += 1 |
|
80 | 81 | newpct = int(100.0 * count / max(len(repo), 1)) |
|
81 | 82 | if pct < newpct: |
|
82 | 83 | pct = newpct |
|
83 | 84 | ui.write("\r" + _("generating stats: %d%%") % pct) |
|
84 | 85 | sys.stdout.flush() |
|
85 | 86 | |
|
86 | 87 | if opts.get('progress'): |
|
87 | 88 | ui.write("\r") |
|
88 | 89 | sys.stdout.flush() |
|
89 | 90 | |
|
90 | 91 | return rate |
|
91 | 92 | |
|
92 | 93 | |
|
93 | 94 | def churn(ui, repo, *pats, **opts): |
|
94 | 95 | '''graph count of revisions grouped by template |
|
95 | 96 | |
|
96 | 97 | Will graph count of changed lines or revisions grouped by template |
|
97 | 98 | or alternatively by date, if dateformat is used. In this case it |
|
98 | 99 | will override template. |
|
99 | 100 | |
|
100 | 101 | By default statistics are counted for number of changed lines. |
|
101 | 102 | |
|
102 | 103 | Examples: |
|
103 | 104 | |
|
104 | 105 | # display count of changed lines for every committer |
|
105 | 106 | hg churn -t '{author|email}' |
|
106 | 107 | |
|
107 | 108 | # display daily activity graph |
|
108 | 109 | hg churn -f '%H' -s -c |
|
109 | 110 | |
|
110 | 111 | # display activity of developers by month |
|
111 | 112 | hg churn -f '%Y-%m' -s -c |
|
112 | 113 | |
|
113 | 114 | # display count of lines changed in every year |
|
114 | 115 | hg churn -f '%Y' -s |
|
115 | 116 | |
|
116 | 117 | The map file format used to specify aliases is fairly simple: |
|
117 | 118 | |
|
118 | 119 | <alias email> <actual email>''' |
|
119 | 120 | def pad(s, l): |
|
120 | 121 | return (s + " " * l)[:l] |
|
121 | 122 | |
|
122 | 123 | amap = {} |
|
123 | 124 | aliases = opts.get('aliases') |
|
124 | 125 | if aliases: |
|
125 | 126 | for l in open(aliases, "r"): |
|
126 | 127 | l = l.strip() |
|
127 | 128 | alias, actual = l.split() |
|
128 | 129 | amap[alias] = actual |
|
129 | 130 | |
|
130 | 131 | rate = countrate(ui, repo, amap, *pats, **opts).items() |
|
131 | 132 | if not rate: |
|
132 | 133 | return |
|
133 | 134 | |
|
134 | 135 | sortfn = ((not opts.get('sort')) and (lambda a, b: cmp(b[1], a[1])) or None) |
|
135 | 136 | rate.sort(sortfn) |
|
136 | 137 | |
|
137 | 138 | maxcount = float(max([v for k, v in rate])) |
|
138 | 139 | maxname = max([len(k) for k, v in rate]) |
|
139 | 140 | |
|
140 | 141 | ttywidth = util.termwidth() |
|
141 | 142 | ui.debug(_("assuming %i character terminal\n") % ttywidth) |
|
142 | 143 | width = ttywidth - maxname - 2 - 6 - 2 - 2 |
|
143 | 144 | |
|
144 | 145 | for date, count in rate: |
|
145 | 146 | print "%s %6d %s" % (pad(date, maxname), count, |
|
146 | 147 | "*" * int(count * width / maxcount)) |
|
147 | 148 | |
|
148 | 149 | |
|
149 | 150 | cmdtable = { |
|
150 | 151 | "churn": |
|
151 | 152 | (churn, |
|
152 | 153 | [('r', 'rev', [], _('count rate for the specified revision or range')), |
|
153 | 154 | ('d', 'date', '', _('count rate for revisions matching date spec')), |
|
154 | 155 | ('t', 'template', '{author|email}', _('template to group changesets')), |
|
155 | 156 | ('f', 'dateformat', '', |
|
156 | 157 | _('strftime-compatible format for grouping by date')), |
|
157 | 158 | ('c', 'changesets', False, _('count rate by number of changesets')), |
|
158 | 159 | ('s', 'sort', False, _('sort by key (default: sort by count)')), |
|
159 | 160 | ('', 'aliases', '', _('file with email aliases')), |
|
160 | 161 | ('', 'progress', None, _('show progress'))], |
|
161 | 162 | _("hg churn [-d DATE] [-r REV] [--aliases FILE] [--progress] [FILE]")), |
|
162 | 163 | } |
@@ -1,261 +1,262 | |||
|
1 | 1 | # convert.py Foreign SCM converter |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2, incorporated herein by reference. |
|
7 | ||
|
7 | 8 | '''converting foreign VCS repositories to Mercurial''' |
|
8 | 9 | |
|
9 | 10 | import convcmd |
|
10 | 11 | import cvsps |
|
11 | 12 | import subversion |
|
12 | 13 | from mercurial import commands |
|
13 | 14 | from mercurial.i18n import _ |
|
14 | 15 | |
|
15 | 16 | # Commands definition was moved elsewhere to ease demandload job. |
|
16 | 17 | |
|
17 | 18 | def convert(ui, src, dest=None, revmapfile=None, **opts): |
|
18 | 19 | """convert a foreign SCM repository to a Mercurial one. |
|
19 | 20 | |
|
20 | 21 | Accepted source formats [identifiers]: |
|
21 | 22 | - Mercurial [hg] |
|
22 | 23 | - CVS [cvs] |
|
23 | 24 | - Darcs [darcs] |
|
24 | 25 | - git [git] |
|
25 | 26 | - Subversion [svn] |
|
26 | 27 | - Monotone [mtn] |
|
27 | 28 | - GNU Arch [gnuarch] |
|
28 | 29 | - Bazaar [bzr] |
|
29 | 30 | - Perforce [p4] |
|
30 | 31 | |
|
31 | 32 | Accepted destination formats [identifiers]: |
|
32 | 33 | - Mercurial [hg] |
|
33 | 34 | - Subversion [svn] (history on branches is not preserved) |
|
34 | 35 | |
|
35 | 36 | If no revision is given, all revisions will be converted. |
|
36 | 37 | Otherwise, convert will only import up to the named revision |
|
37 | 38 | (given in a format understood by the source). |
|
38 | 39 | |
|
39 | 40 | If no destination directory name is specified, it defaults to the |
|
40 | 41 | basename of the source with '-hg' appended. If the destination |
|
41 | 42 | repository doesn't exist, it will be created. |
|
42 | 43 | |
|
43 | 44 | If <REVMAP> isn't given, it will be put in a default location |
|
44 | 45 | (<dest>/.hg/shamap by default). The <REVMAP> is a simple text file |
|
45 | 46 | that maps each source commit ID to the destination ID for that |
|
46 | 47 | revision, like so: |
|
47 | 48 | <source ID> <destination ID> |
|
48 | 49 | |
|
49 | 50 | If the file doesn't exist, it's automatically created. It's |
|
50 | 51 | updated on each commit copied, so convert-repo can be interrupted |
|
51 | 52 | and can be run repeatedly to copy new commits. |
|
52 | 53 | |
|
53 | 54 | The [username mapping] file is a simple text file that maps each |
|
54 | 55 | source commit author to a destination commit author. It is handy |
|
55 | 56 | for source SCMs that use unix logins to identify authors (eg: |
|
56 | 57 | CVS). One line per author mapping and the line format is: |
|
57 | 58 | srcauthor=whatever string you want |
|
58 | 59 | |
|
59 | 60 | The filemap is a file that allows filtering and remapping of files |
|
60 | 61 | and directories. Comment lines start with '#'. Each line can |
|
61 | 62 | contain one of the following directives: |
|
62 | 63 | |
|
63 | 64 | include path/to/file |
|
64 | 65 | |
|
65 | 66 | exclude path/to/file |
|
66 | 67 | |
|
67 | 68 | rename from/file to/file |
|
68 | 69 | |
|
69 | 70 | The 'include' directive causes a file, or all files under a |
|
70 | 71 | directory, to be included in the destination repository, and the |
|
71 | 72 | exclusion of all other files and directories not explicitely included. |
|
72 | 73 | The 'exclude' directive causes files or directories to be omitted. |
|
73 | 74 | The 'rename' directive renames a file or directory. To rename from |
|
74 | 75 | a subdirectory into the root of the repository, use '.' as the |
|
75 | 76 | path to rename to. |
|
76 | 77 | |
|
77 | 78 | The splicemap is a file that allows insertion of synthetic |
|
78 | 79 | history, letting you specify the parents of a revision. This is |
|
79 | 80 | useful if you want to e.g. give a Subversion merge two parents, or |
|
80 | 81 | graft two disconnected series of history together. Each entry |
|
81 | 82 | contains a key, followed by a space, followed by one or two |
|
82 | 83 | comma-separated values. The key is the revision ID in the source |
|
83 | 84 | revision control system whose parents should be modified (same |
|
84 | 85 | format as a key in .hg/shamap). The values are the revision IDs |
|
85 | 86 | (in either the source or destination revision control system) that |
|
86 | 87 | should be used as the new parents for that node. |
|
87 | 88 | |
|
88 | 89 | Mercurial Source |
|
89 | 90 | ----------------- |
|
90 | 91 | |
|
91 | 92 | --config convert.hg.ignoreerrors=False (boolean) |
|
92 | 93 | ignore integrity errors when reading. Use it to fix Mercurial |
|
93 | 94 | repositories with missing revlogs, by converting from and to |
|
94 | 95 | Mercurial. |
|
95 | 96 | --config convert.hg.saverev=False (boolean) |
|
96 | 97 | store original revision ID in changeset (forces target IDs to |
|
97 | 98 | change) |
|
98 | 99 | --config convert.hg.startrev=0 (hg revision identifier) |
|
99 | 100 | convert start revision and its descendants |
|
100 | 101 | |
|
101 | 102 | CVS Source |
|
102 | 103 | ---------- |
|
103 | 104 | |
|
104 | 105 | CVS source will use a sandbox (i.e. a checked-out copy) from CVS |
|
105 | 106 | to indicate the starting point of what will be converted. Direct |
|
106 | 107 | access to the repository files is not needed, unless of course the |
|
107 | 108 | repository is :local:. The conversion uses the top level directory |
|
108 | 109 | in the sandbox to find the CVS repository, and then uses CVS rlog |
|
109 | 110 | commands to find files to convert. This means that unless a |
|
110 | 111 | filemap is given, all files under the starting directory will be |
|
111 | 112 | converted, and that any directory reorganisation in the CVS |
|
112 | 113 | sandbox is ignored. |
|
113 | 114 | |
|
114 | 115 | Because CVS does not have changesets, it is necessary to collect |
|
115 | 116 | individual commits to CVS and merge them into changesets. CVS |
|
116 | 117 | source uses its internal changeset merging code by default but can |
|
117 | 118 | be configured to call the external 'cvsps' program by setting: |
|
118 | 119 | --config convert.cvsps='cvsps -A -u --cvs-direct -q' |
|
119 | 120 | This is a legacy option and may be removed in future. |
|
120 | 121 | |
|
121 | 122 | The options shown are the defaults. |
|
122 | 123 | |
|
123 | 124 | Internal cvsps is selected by setting |
|
124 | 125 | --config convert.cvsps=builtin |
|
125 | 126 | and has a few more configurable options: |
|
126 | 127 | --config convert.cvsps.cache=True (boolean) |
|
127 | 128 | Set to False to disable remote log caching, for testing and |
|
128 | 129 | debugging purposes. |
|
129 | 130 | --config convert.cvsps.fuzz=60 (integer) |
|
130 | 131 | Specify the maximum time (in seconds) that is allowed |
|
131 | 132 | between commits with identical user and log message in a |
|
132 | 133 | single changeset. When very large files were checked in as |
|
133 | 134 | part of a changeset then the default may not be long |
|
134 | 135 | enough. |
|
135 | 136 | --config convert.cvsps.mergeto='{{mergetobranch ([-\w]+)}}' |
|
136 | 137 | Specify a regular expression to which commit log messages |
|
137 | 138 | are matched. If a match occurs, then the conversion |
|
138 | 139 | process will insert a dummy revision merging the branch on |
|
139 | 140 | which this log message occurs to the branch indicated in |
|
140 | 141 | the regex. |
|
141 | 142 | --config convert.cvsps.mergefrom='{{mergefrombranch ([-\w]+)}}' |
|
142 | 143 | Specify a regular expression to which commit log messages |
|
143 | 144 | are matched. If a match occurs, then the conversion |
|
144 | 145 | process will add the most recent revision on the branch |
|
145 | 146 | indicated in the regex as the second parent of the |
|
146 | 147 | changeset. |
|
147 | 148 | |
|
148 | 149 | The hgext/convert/cvsps wrapper script allows the builtin |
|
149 | 150 | changeset merging code to be run without doing a conversion. Its |
|
150 | 151 | parameters and output are similar to that of cvsps 2.1. |
|
151 | 152 | |
|
152 | 153 | Subversion Source |
|
153 | 154 | ----------------- |
|
154 | 155 | |
|
155 | 156 | Subversion source detects classical trunk/branches/tags layouts. |
|
156 | 157 | By default, the supplied "svn://repo/path/" source URL is |
|
157 | 158 | converted as a single branch. If "svn://repo/path/trunk" exists it |
|
158 | 159 | replaces the default branch. If "svn://repo/path/branches" exists, |
|
159 | 160 | its subdirectories are listed as possible branches. If |
|
160 | 161 | "svn://repo/path/tags" exists, it is looked for tags referencing |
|
161 | 162 | converted branches. Default "trunk", "branches" and "tags" values |
|
162 | 163 | can be overriden with following options. Set them to paths |
|
163 | 164 | relative to the source URL, or leave them blank to disable |
|
164 | 165 | autodetection. |
|
165 | 166 | |
|
166 | 167 | --config convert.svn.branches=branches (directory name) |
|
167 | 168 | specify the directory containing branches |
|
168 | 169 | --config convert.svn.tags=tags (directory name) |
|
169 | 170 | specify the directory containing tags |
|
170 | 171 | --config convert.svn.trunk=trunk (directory name) |
|
171 | 172 | specify the name of the trunk branch |
|
172 | 173 | |
|
173 | 174 | Source history can be retrieved starting at a specific revision, |
|
174 | 175 | instead of being integrally converted. Only single branch |
|
175 | 176 | conversions are supported. |
|
176 | 177 | |
|
177 | 178 | --config convert.svn.startrev=0 (svn revision number) |
|
178 | 179 | specify start Subversion revision. |
|
179 | 180 | |
|
180 | 181 | Perforce Source |
|
181 | 182 | --------------- |
|
182 | 183 | |
|
183 | 184 | The Perforce (P4) importer can be given a p4 depot path or a |
|
184 | 185 | client specification as source. It will convert all files in the |
|
185 | 186 | source to a flat Mercurial repository, ignoring labels, branches |
|
186 | 187 | and integrations. Note that when a depot path is given you then |
|
187 | 188 | usually should specify a target directory, because otherwise the |
|
188 | 189 | target may be named ...-hg. |
|
189 | 190 | |
|
190 | 191 | It is possible to limit the amount of source history to be |
|
191 | 192 | converted by specifying an initial Perforce revision. |
|
192 | 193 | |
|
193 | 194 | --config convert.p4.startrev=0 (perforce changelist number) |
|
194 | 195 | specify initial Perforce revision. |
|
195 | 196 | |
|
196 | 197 | |
|
197 | 198 | Mercurial Destination |
|
198 | 199 | --------------------- |
|
199 | 200 | |
|
200 | 201 | --config convert.hg.clonebranches=False (boolean) |
|
201 | 202 | dispatch source branches in separate clones. |
|
202 | 203 | --config convert.hg.tagsbranch=default (branch name) |
|
203 | 204 | tag revisions branch name |
|
204 | 205 | --config convert.hg.usebranchnames=True (boolean) |
|
205 | 206 | preserve branch names |
|
206 | 207 | |
|
207 | 208 | """ |
|
208 | 209 | return convcmd.convert(ui, src, dest, revmapfile, **opts) |
|
209 | 210 | |
|
210 | 211 | def debugsvnlog(ui, **opts): |
|
211 | 212 | return subversion.debugsvnlog(ui, **opts) |
|
212 | 213 | |
|
213 | 214 | def debugcvsps(ui, *args, **opts): |
|
214 | 215 | '''create changeset information from CVS |
|
215 | 216 | |
|
216 | 217 | This command is intended as a debugging tool for the CVS to |
|
217 | 218 | Mercurial converter, and can be used as a direct replacement for |
|
218 | 219 | cvsps. |
|
219 | 220 | |
|
220 | 221 | Hg debugcvsps reads the CVS rlog for current directory (or any |
|
221 | 222 | named directory) in the CVS repository, and converts the log to a |
|
222 | 223 | series of changesets based on matching commit log entries and |
|
223 | 224 | dates.''' |
|
224 | 225 | return cvsps.debugcvsps(ui, *args, **opts) |
|
225 | 226 | |
|
226 | 227 | commands.norepo += " convert debugsvnlog debugcvsps" |
|
227 | 228 | |
|
228 | 229 | cmdtable = { |
|
229 | 230 | "convert": |
|
230 | 231 | (convert, |
|
231 | 232 | [('A', 'authors', '', _('username mapping filename')), |
|
232 | 233 | ('d', 'dest-type', '', _('destination repository type')), |
|
233 | 234 | ('', 'filemap', '', _('remap file names using contents of file')), |
|
234 | 235 | ('r', 'rev', '', _('import up to target revision REV')), |
|
235 | 236 | ('s', 'source-type', '', _('source repository type')), |
|
236 | 237 | ('', 'splicemap', '', _('splice synthesized history into place')), |
|
237 | 238 | ('', 'datesort', None, _('try to sort changesets by date'))], |
|
238 | 239 | _('hg convert [OPTION]... SOURCE [DEST [REVMAP]]')), |
|
239 | 240 | "debugsvnlog": |
|
240 | 241 | (debugsvnlog, |
|
241 | 242 | [], |
|
242 | 243 | 'hg debugsvnlog'), |
|
243 | 244 | "debugcvsps": |
|
244 | 245 | (debugcvsps, |
|
245 | 246 | [ |
|
246 | 247 | # Main options shared with cvsps-2.1 |
|
247 | 248 | ('b', 'branches', [], _('only return changes on specified branches')), |
|
248 | 249 | ('p', 'prefix', '', _('prefix to remove from file names')), |
|
249 | 250 | ('r', 'revisions', [], _('only return changes after or between specified tags')), |
|
250 | 251 | ('u', 'update-cache', None, _("update cvs log cache")), |
|
251 | 252 | ('x', 'new-cache', None, _("create new cvs log cache")), |
|
252 | 253 | ('z', 'fuzz', 60, _('set commit time fuzz in seconds')), |
|
253 | 254 | ('', 'root', '', _('specify cvsroot')), |
|
254 | 255 | # Options specific to builtin cvsps |
|
255 | 256 | ('', 'parents', '', _('show parent changesets')), |
|
256 | 257 | ('', 'ancestors', '', _('show current changeset in ancestor branches')), |
|
257 | 258 | # Options that are ignored for compatibility with cvsps-2.1 |
|
258 | 259 | ('A', 'cvs-direct', None, _('ignored for compatibility')), |
|
259 | 260 | ], |
|
260 | 261 | _('hg debugcvsps [OPTION]... [PATH]...')), |
|
261 | 262 | } |
@@ -1,146 +1,147 | |||
|
1 | 1 | # fetch.py - pull and merge remote changes |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2, incorporated herein by reference. |
|
7 | ||
|
7 | 8 | '''pulling, updating and merging in one command''' |
|
8 | 9 | |
|
9 | 10 | from mercurial.i18n import _ |
|
10 | 11 | from mercurial.node import nullid, short |
|
11 | 12 | from mercurial import commands, cmdutil, hg, util, url |
|
12 | 13 | from mercurial.lock import release |
|
13 | 14 | |
|
14 | 15 | def fetch(ui, repo, source='default', **opts): |
|
15 | 16 | '''pull changes from a remote repository, merge new changes if needed. |
|
16 | 17 | |
|
17 | 18 | This finds all changes from the repository at the specified path |
|
18 | 19 | or URL and adds them to the local repository. |
|
19 | 20 | |
|
20 | 21 | If the pulled changes add a new branch head, the head is |
|
21 | 22 | automatically merged, and the result of the merge is committed. |
|
22 | 23 | Otherwise, the working directory is updated to include the new |
|
23 | 24 | changes. |
|
24 | 25 | |
|
25 | 26 | When a merge occurs, the newly pulled changes are assumed to be |
|
26 | 27 | "authoritative". The head of the new changes is used as the first |
|
27 | 28 | parent, with local changes as the second. To switch the merge |
|
28 | 29 | order, use --switch-parent. |
|
29 | 30 | |
|
30 | 31 | See 'hg help dates' for a list of formats valid for -d/--date. |
|
31 | 32 | ''' |
|
32 | 33 | |
|
33 | 34 | date = opts.get('date') |
|
34 | 35 | if date: |
|
35 | 36 | opts['date'] = util.parsedate(date) |
|
36 | 37 | |
|
37 | 38 | parent, p2 = repo.dirstate.parents() |
|
38 | 39 | branch = repo.dirstate.branch() |
|
39 | 40 | branchnode = repo.branchtags().get(branch) |
|
40 | 41 | if parent != branchnode: |
|
41 | 42 | raise util.Abort(_('working dir not at branch tip ' |
|
42 | 43 | '(use "hg update" to check out branch tip)')) |
|
43 | 44 | |
|
44 | 45 | if p2 != nullid: |
|
45 | 46 | raise util.Abort(_('outstanding uncommitted merge')) |
|
46 | 47 | |
|
47 | 48 | wlock = lock = None |
|
48 | 49 | try: |
|
49 | 50 | wlock = repo.wlock() |
|
50 | 51 | lock = repo.lock() |
|
51 | 52 | mod, add, rem, del_ = repo.status()[:4] |
|
52 | 53 | |
|
53 | 54 | if mod or add or rem: |
|
54 | 55 | raise util.Abort(_('outstanding uncommitted changes')) |
|
55 | 56 | if del_: |
|
56 | 57 | raise util.Abort(_('working directory is missing some files')) |
|
57 | 58 | bheads = repo.branchheads(branch) |
|
58 | 59 | bheads = [head for head in bheads if len(repo[head].children()) == 0] |
|
59 | 60 | if len(bheads) > 1: |
|
60 | 61 | raise util.Abort(_('multiple heads in this branch ' |
|
61 | 62 | '(use "hg heads ." and "hg merge" to merge)')) |
|
62 | 63 | |
|
63 | 64 | other = hg.repository(cmdutil.remoteui(repo, opts), |
|
64 | 65 | ui.expandpath(source)) |
|
65 | 66 | ui.status(_('pulling from %s\n') % |
|
66 | 67 | url.hidepassword(ui.expandpath(source))) |
|
67 | 68 | revs = None |
|
68 | 69 | if opts['rev']: |
|
69 | 70 | if not other.local(): |
|
70 | 71 | raise util.Abort(_("fetch -r doesn't work for remote " |
|
71 | 72 | "repositories yet")) |
|
72 | 73 | else: |
|
73 | 74 | revs = [other.lookup(rev) for rev in opts['rev']] |
|
74 | 75 | |
|
75 | 76 | # Are there any changes at all? |
|
76 | 77 | modheads = repo.pull(other, heads=revs) |
|
77 | 78 | if modheads == 0: |
|
78 | 79 | return 0 |
|
79 | 80 | |
|
80 | 81 | # Is this a simple fast-forward along the current branch? |
|
81 | 82 | newheads = repo.branchheads(branch) |
|
82 | 83 | newheads = [head for head in newheads if len(repo[head].children()) == 0] |
|
83 | 84 | newchildren = repo.changelog.nodesbetween([parent], newheads)[2] |
|
84 | 85 | if len(newheads) == 1: |
|
85 | 86 | if newchildren[0] != parent: |
|
86 | 87 | return hg.clean(repo, newchildren[0]) |
|
87 | 88 | else: |
|
88 | 89 | return |
|
89 | 90 | |
|
90 | 91 | # Are there more than one additional branch heads? |
|
91 | 92 | newchildren = [n for n in newchildren if n != parent] |
|
92 | 93 | newparent = parent |
|
93 | 94 | if newchildren: |
|
94 | 95 | newparent = newchildren[0] |
|
95 | 96 | hg.clean(repo, newparent) |
|
96 | 97 | newheads = [n for n in newheads if n != newparent] |
|
97 | 98 | if len(newheads) > 1: |
|
98 | 99 | ui.status(_('not merging with %d other new branch heads ' |
|
99 | 100 | '(use "hg heads ." and "hg merge" to merge them)\n') % |
|
100 | 101 | (len(newheads) - 1)) |
|
101 | 102 | return |
|
102 | 103 | |
|
103 | 104 | # Otherwise, let's merge. |
|
104 | 105 | err = False |
|
105 | 106 | if newheads: |
|
106 | 107 | # By default, we consider the repository we're pulling |
|
107 | 108 | # *from* as authoritative, so we merge our changes into |
|
108 | 109 | # theirs. |
|
109 | 110 | if opts['switch_parent']: |
|
110 | 111 | firstparent, secondparent = newparent, newheads[0] |
|
111 | 112 | else: |
|
112 | 113 | firstparent, secondparent = newheads[0], newparent |
|
113 | 114 | ui.status(_('updating to %d:%s\n') % |
|
114 | 115 | (repo.changelog.rev(firstparent), |
|
115 | 116 | short(firstparent))) |
|
116 | 117 | hg.clean(repo, firstparent) |
|
117 | 118 | ui.status(_('merging with %d:%s\n') % |
|
118 | 119 | (repo.changelog.rev(secondparent), short(secondparent))) |
|
119 | 120 | err = hg.merge(repo, secondparent, remind=False) |
|
120 | 121 | |
|
121 | 122 | if not err: |
|
122 | 123 | mod, add, rem = repo.status()[:3] |
|
123 | 124 | message = (cmdutil.logmessage(opts) or |
|
124 | 125 | (_('Automated merge with %s') % |
|
125 | 126 | url.removeauth(other.url()))) |
|
126 | 127 | force_editor = opts.get('force_editor') or opts.get('edit') |
|
127 | 128 | n = repo.commit(mod + add + rem, message, |
|
128 | 129 | opts['user'], opts['date'], force=True, |
|
129 | 130 | force_editor=force_editor) |
|
130 | 131 | ui.status(_('new changeset %d:%s merges remote changes ' |
|
131 | 132 | 'with local\n') % (repo.changelog.rev(n), |
|
132 | 133 | short(n))) |
|
133 | 134 | |
|
134 | 135 | finally: |
|
135 | 136 | release(lock, wlock) |
|
136 | 137 | |
|
137 | 138 | cmdtable = { |
|
138 | 139 | 'fetch': |
|
139 | 140 | (fetch, |
|
140 | 141 | [('r', 'rev', [], _('a specific revision you would like to pull')), |
|
141 | 142 | ('e', 'edit', None, _('edit commit message')), |
|
142 | 143 | ('', 'force-editor', None, _('edit commit message (DEPRECATED)')), |
|
143 | 144 | ('', 'switch-parent', None, _('switch parents when merging')), |
|
144 | 145 | ] + commands.commitopts + commands.commitopts2 + commands.remoteopts, |
|
145 | 146 | _('hg fetch [SOURCE]')), |
|
146 | 147 | } |
@@ -1,415 +1,416 | |||
|
1 | 1 | # ASCII graph log extension for Mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2007 Joel Rosdahl <joel@rosdahl.net> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2, incorporated herein by reference. |
|
7 | ||
|
7 | 8 | '''show revision graphs in terminal windows |
|
8 | 9 | |
|
9 | 10 | This extension adds a --graph option to the incoming, outgoing and log |
|
10 | 11 | commands. When this options is given, an ascii representation of the |
|
11 | 12 | revision graph is also shown. |
|
12 | 13 | ''' |
|
13 | 14 | |
|
14 | 15 | import os |
|
15 | 16 | from mercurial.cmdutil import revrange, show_changeset |
|
16 | 17 | from mercurial.commands import templateopts |
|
17 | 18 | from mercurial.i18n import _ |
|
18 | 19 | from mercurial.node import nullrev |
|
19 | 20 | from mercurial import bundlerepo, changegroup, cmdutil, commands, extensions |
|
20 | 21 | from mercurial import hg, url, util |
|
21 | 22 | |
|
22 | 23 | def revisions(repo, start, stop): |
|
23 | 24 | """cset DAG generator yielding (rev, node, [parents]) tuples |
|
24 | 25 | |
|
25 | 26 | This generator function walks through the revision history from revision |
|
26 | 27 | start to revision stop (which must be less than or equal to start). |
|
27 | 28 | """ |
|
28 | 29 | assert start >= stop |
|
29 | 30 | cur = start |
|
30 | 31 | while cur >= stop: |
|
31 | 32 | ctx = repo[cur] |
|
32 | 33 | parents = [p.rev() for p in ctx.parents() if p.rev() != nullrev] |
|
33 | 34 | parents.sort() |
|
34 | 35 | yield (ctx, parents) |
|
35 | 36 | cur -= 1 |
|
36 | 37 | |
|
37 | 38 | def filerevs(repo, path, start, stop): |
|
38 | 39 | """file cset DAG generator yielding (rev, node, [parents]) tuples |
|
39 | 40 | |
|
40 | 41 | This generator function walks through the revision history of a single |
|
41 | 42 | file from revision start to revision stop (which must be less than or |
|
42 | 43 | equal to start). |
|
43 | 44 | """ |
|
44 | 45 | assert start >= stop |
|
45 | 46 | filerev = len(repo.file(path)) - 1 |
|
46 | 47 | while filerev >= 0: |
|
47 | 48 | fctx = repo.filectx(path, fileid=filerev) |
|
48 | 49 | parents = [f.linkrev() for f in fctx.parents() if f.path() == path] |
|
49 | 50 | parents.sort() |
|
50 | 51 | if fctx.rev() <= start: |
|
51 | 52 | yield (fctx, parents) |
|
52 | 53 | if fctx.rev() <= stop: |
|
53 | 54 | break |
|
54 | 55 | filerev -= 1 |
|
55 | 56 | |
|
56 | 57 | def grapher(nodes): |
|
57 | 58 | """grapher for asciigraph on a list of nodes and their parents |
|
58 | 59 | |
|
59 | 60 | nodes must generate tuples (node, parents, char, lines) where |
|
60 | 61 | - parents must generate the parents of node, in sorted order, |
|
61 | 62 | and max length 2, |
|
62 | 63 | - char is the char to print as the node symbol, and |
|
63 | 64 | - lines are the lines to display next to the node. |
|
64 | 65 | """ |
|
65 | 66 | seen = [] |
|
66 | 67 | for node, parents, char, lines in nodes: |
|
67 | 68 | if node not in seen: |
|
68 | 69 | seen.append(node) |
|
69 | 70 | nodeidx = seen.index(node) |
|
70 | 71 | |
|
71 | 72 | knownparents = [] |
|
72 | 73 | newparents = [] |
|
73 | 74 | for parent in parents: |
|
74 | 75 | if parent in seen: |
|
75 | 76 | knownparents.append(parent) |
|
76 | 77 | else: |
|
77 | 78 | newparents.append(parent) |
|
78 | 79 | |
|
79 | 80 | ncols = len(seen) |
|
80 | 81 | nextseen = seen[:] |
|
81 | 82 | nextseen[nodeidx:nodeidx + 1] = newparents |
|
82 | 83 | edges = [(nodeidx, nextseen.index(p)) for p in knownparents] |
|
83 | 84 | |
|
84 | 85 | if len(newparents) > 0: |
|
85 | 86 | edges.append((nodeidx, nodeidx)) |
|
86 | 87 | if len(newparents) > 1: |
|
87 | 88 | edges.append((nodeidx, nodeidx + 1)) |
|
88 | 89 | nmorecols = len(nextseen) - ncols |
|
89 | 90 | seen = nextseen |
|
90 | 91 | yield (char, lines, nodeidx, edges, ncols, nmorecols) |
|
91 | 92 | |
|
92 | 93 | def fix_long_right_edges(edges): |
|
93 | 94 | for (i, (start, end)) in enumerate(edges): |
|
94 | 95 | if end > start: |
|
95 | 96 | edges[i] = (start, end + 1) |
|
96 | 97 | |
|
97 | 98 | def get_nodeline_edges_tail( |
|
98 | 99 | node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail): |
|
99 | 100 | if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0: |
|
100 | 101 | # Still going in the same non-vertical direction. |
|
101 | 102 | if n_columns_diff == -1: |
|
102 | 103 | start = max(node_index + 1, p_node_index) |
|
103 | 104 | tail = ["|", " "] * (start - node_index - 1) |
|
104 | 105 | tail.extend(["/", " "] * (n_columns - start)) |
|
105 | 106 | return tail |
|
106 | 107 | else: |
|
107 | 108 | return ["\\", " "] * (n_columns - node_index - 1) |
|
108 | 109 | else: |
|
109 | 110 | return ["|", " "] * (n_columns - node_index - 1) |
|
110 | 111 | |
|
111 | 112 | def draw_edges(edges, nodeline, interline): |
|
112 | 113 | for (start, end) in edges: |
|
113 | 114 | if start == end + 1: |
|
114 | 115 | interline[2 * end + 1] = "/" |
|
115 | 116 | elif start == end - 1: |
|
116 | 117 | interline[2 * start + 1] = "\\" |
|
117 | 118 | elif start == end: |
|
118 | 119 | interline[2 * start] = "|" |
|
119 | 120 | else: |
|
120 | 121 | nodeline[2 * end] = "+" |
|
121 | 122 | if start > end: |
|
122 | 123 | (start, end) = (end,start) |
|
123 | 124 | for i in range(2 * start + 1, 2 * end): |
|
124 | 125 | if nodeline[i] != "+": |
|
125 | 126 | nodeline[i] = "-" |
|
126 | 127 | |
|
127 | 128 | def get_padding_line(ni, n_columns, edges): |
|
128 | 129 | line = [] |
|
129 | 130 | line.extend(["|", " "] * ni) |
|
130 | 131 | if (ni, ni - 1) in edges or (ni, ni) in edges: |
|
131 | 132 | # (ni, ni - 1) (ni, ni) |
|
132 | 133 | # | | | | | | | | |
|
133 | 134 | # +---o | | o---+ |
|
134 | 135 | # | | c | | c | | |
|
135 | 136 | # | |/ / | |/ / |
|
136 | 137 | # | | | | | | |
|
137 | 138 | c = "|" |
|
138 | 139 | else: |
|
139 | 140 | c = " " |
|
140 | 141 | line.extend([c, " "]) |
|
141 | 142 | line.extend(["|", " "] * (n_columns - ni - 1)) |
|
142 | 143 | return line |
|
143 | 144 | |
|
144 | 145 | def ascii(ui, grapher): |
|
145 | 146 | """prints an ASCII graph of the DAG returned by the grapher |
|
146 | 147 | |
|
147 | 148 | grapher is a generator that emits tuples with the following elements: |
|
148 | 149 | |
|
149 | 150 | - Character to use as node's symbol. |
|
150 | 151 | - List of lines to display as the node's text. |
|
151 | 152 | - Column of the current node in the set of ongoing edges. |
|
152 | 153 | - Edges; a list of (col, next_col) indicating the edges between |
|
153 | 154 | the current node and its parents. |
|
154 | 155 | - Number of columns (ongoing edges) in the current revision. |
|
155 | 156 | - The difference between the number of columns (ongoing edges) |
|
156 | 157 | in the next revision and the number of columns (ongoing edges) |
|
157 | 158 | in the current revision. That is: -1 means one column removed; |
|
158 | 159 | 0 means no columns added or removed; 1 means one column added. |
|
159 | 160 | """ |
|
160 | 161 | prev_n_columns_diff = 0 |
|
161 | 162 | prev_node_index = 0 |
|
162 | 163 | for (node_ch, node_lines, node_index, edges, n_columns, n_columns_diff) in grapher: |
|
163 | 164 | |
|
164 | 165 | assert -2 < n_columns_diff < 2 |
|
165 | 166 | if n_columns_diff == -1: |
|
166 | 167 | # Transform |
|
167 | 168 | # |
|
168 | 169 | # | | | | | | |
|
169 | 170 | # o | | into o---+ |
|
170 | 171 | # |X / |/ / |
|
171 | 172 | # | | | | |
|
172 | 173 | fix_long_right_edges(edges) |
|
173 | 174 | |
|
174 | 175 | # add_padding_line says whether to rewrite |
|
175 | 176 | # |
|
176 | 177 | # | | | | | | | | |
|
177 | 178 | # | o---+ into | o---+ |
|
178 | 179 | # | / / | | | # <--- padding line |
|
179 | 180 | # o | | | / / |
|
180 | 181 | # o | | |
|
181 | 182 | add_padding_line = (len(node_lines) > 2 and |
|
182 | 183 | n_columns_diff == -1 and |
|
183 | 184 | [x for (x, y) in edges if x + 1 < y]) |
|
184 | 185 | |
|
185 | 186 | # fix_nodeline_tail says whether to rewrite |
|
186 | 187 | # |
|
187 | 188 | # | | o | | | | o | | |
|
188 | 189 | # | | |/ / | | |/ / |
|
189 | 190 | # | o | | into | o / / # <--- fixed nodeline tail |
|
190 | 191 | # | |/ / | |/ / |
|
191 | 192 | # o | | o | | |
|
192 | 193 | fix_nodeline_tail = len(node_lines) <= 2 and not add_padding_line |
|
193 | 194 | |
|
194 | 195 | # nodeline is the line containing the node character (typically o) |
|
195 | 196 | nodeline = ["|", " "] * node_index |
|
196 | 197 | nodeline.extend([node_ch, " "]) |
|
197 | 198 | |
|
198 | 199 | nodeline.extend( |
|
199 | 200 | get_nodeline_edges_tail( |
|
200 | 201 | node_index, prev_node_index, n_columns, n_columns_diff, |
|
201 | 202 | prev_n_columns_diff, fix_nodeline_tail)) |
|
202 | 203 | |
|
203 | 204 | # shift_interline is the line containing the non-vertical |
|
204 | 205 | # edges between this entry and the next |
|
205 | 206 | shift_interline = ["|", " "] * node_index |
|
206 | 207 | if n_columns_diff == -1: |
|
207 | 208 | n_spaces = 1 |
|
208 | 209 | edge_ch = "/" |
|
209 | 210 | elif n_columns_diff == 0: |
|
210 | 211 | n_spaces = 2 |
|
211 | 212 | edge_ch = "|" |
|
212 | 213 | else: |
|
213 | 214 | n_spaces = 3 |
|
214 | 215 | edge_ch = "\\" |
|
215 | 216 | shift_interline.extend(n_spaces * [" "]) |
|
216 | 217 | shift_interline.extend([edge_ch, " "] * (n_columns - node_index - 1)) |
|
217 | 218 | |
|
218 | 219 | # draw edges from the current node to its parents |
|
219 | 220 | draw_edges(edges, nodeline, shift_interline) |
|
220 | 221 | |
|
221 | 222 | # lines is the list of all graph lines to print |
|
222 | 223 | lines = [nodeline] |
|
223 | 224 | if add_padding_line: |
|
224 | 225 | lines.append(get_padding_line(node_index, n_columns, edges)) |
|
225 | 226 | lines.append(shift_interline) |
|
226 | 227 | |
|
227 | 228 | # make sure that there are as many graph lines as there are |
|
228 | 229 | # log strings |
|
229 | 230 | while len(node_lines) < len(lines): |
|
230 | 231 | node_lines.append("") |
|
231 | 232 | if len(lines) < len(node_lines): |
|
232 | 233 | extra_interline = ["|", " "] * (n_columns + n_columns_diff) |
|
233 | 234 | while len(lines) < len(node_lines): |
|
234 | 235 | lines.append(extra_interline) |
|
235 | 236 | |
|
236 | 237 | # print lines |
|
237 | 238 | indentation_level = max(n_columns, n_columns + n_columns_diff) |
|
238 | 239 | for (line, logstr) in zip(lines, node_lines): |
|
239 | 240 | ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr) |
|
240 | 241 | ui.write(ln.rstrip() + '\n') |
|
241 | 242 | |
|
242 | 243 | # ... and start over |
|
243 | 244 | prev_node_index = node_index |
|
244 | 245 | prev_n_columns_diff = n_columns_diff |
|
245 | 246 | |
|
246 | 247 | def get_revs(repo, rev_opt): |
|
247 | 248 | if rev_opt: |
|
248 | 249 | revs = revrange(repo, rev_opt) |
|
249 | 250 | return (max(revs), min(revs)) |
|
250 | 251 | else: |
|
251 | 252 | return (len(repo) - 1, 0) |
|
252 | 253 | |
|
253 | 254 | def check_unsupported_flags(opts): |
|
254 | 255 | for op in ["follow", "follow_first", "date", "copies", "keyword", "remove", |
|
255 | 256 | "only_merges", "user", "only_branch", "prune", "newest_first", |
|
256 | 257 | "no_merges", "include", "exclude"]: |
|
257 | 258 | if op in opts and opts[op]: |
|
258 | 259 | raise util.Abort(_("--graph option is incompatible with --%s") % op) |
|
259 | 260 | |
|
260 | 261 | def graphlog(ui, repo, path=None, **opts): |
|
261 | 262 | """show revision history alongside an ASCII revision graph |
|
262 | 263 | |
|
263 | 264 | Print a revision history alongside a revision graph drawn with |
|
264 | 265 | ASCII characters. |
|
265 | 266 | |
|
266 | 267 | Nodes printed as an @ character are parents of the working |
|
267 | 268 | directory. |
|
268 | 269 | """ |
|
269 | 270 | |
|
270 | 271 | check_unsupported_flags(opts) |
|
271 | 272 | limit = cmdutil.loglimit(opts) |
|
272 | 273 | start, stop = get_revs(repo, opts["rev"]) |
|
273 | 274 | stop = max(stop, start - limit + 1) |
|
274 | 275 | if start == nullrev: |
|
275 | 276 | return |
|
276 | 277 | |
|
277 | 278 | if path: |
|
278 | 279 | path = util.canonpath(repo.root, os.getcwd(), path) |
|
279 | 280 | if path: # could be reset in canonpath |
|
280 | 281 | revdag = filerevs(repo, path, start, stop) |
|
281 | 282 | else: |
|
282 | 283 | revdag = revisions(repo, start, stop) |
|
283 | 284 | |
|
284 | 285 | graphdag = graphabledag(ui, repo, revdag, opts) |
|
285 | 286 | ascii(ui, grapher(graphdag)) |
|
286 | 287 | |
|
287 | 288 | def graphrevs(repo, nodes, opts): |
|
288 | 289 | include = set(nodes) |
|
289 | 290 | limit = cmdutil.loglimit(opts) |
|
290 | 291 | count = 0 |
|
291 | 292 | for node in reversed(nodes): |
|
292 | 293 | if count >= limit: |
|
293 | 294 | break |
|
294 | 295 | ctx = repo[node] |
|
295 | 296 | parents = [p.rev() for p in ctx.parents() if p.node() in include] |
|
296 | 297 | parents.sort() |
|
297 | 298 | yield (ctx, parents) |
|
298 | 299 | count += 1 |
|
299 | 300 | |
|
300 | 301 | def graphabledag(ui, repo, revdag, opts): |
|
301 | 302 | showparents = [ctx.node() for ctx in repo[None].parents()] |
|
302 | 303 | displayer = show_changeset(ui, repo, opts, buffered=True) |
|
303 | 304 | for (ctx, parents) in revdag: |
|
304 | 305 | displayer.show(ctx) |
|
305 | 306 | lines = displayer.hunk.pop(ctx.rev()).split('\n')[:-1] |
|
306 | 307 | char = ctx.node() in showparents and '@' or 'o' |
|
307 | 308 | yield (ctx.rev(), parents, char, lines) |
|
308 | 309 | |
|
309 | 310 | def goutgoing(ui, repo, dest=None, **opts): |
|
310 | 311 | """show the outgoing changesets alongside an ASCII revision graph |
|
311 | 312 | |
|
312 | 313 | Print the outgoing changesets alongside a revision graph drawn with |
|
313 | 314 | ASCII characters. |
|
314 | 315 | |
|
315 | 316 | Nodes printed as an @ character are parents of the working |
|
316 | 317 | directory. |
|
317 | 318 | """ |
|
318 | 319 | |
|
319 | 320 | check_unsupported_flags(opts) |
|
320 | 321 | dest, revs, checkout = hg.parseurl( |
|
321 | 322 | ui.expandpath(dest or 'default-push', dest or 'default'), |
|
322 | 323 | opts.get('rev')) |
|
323 | 324 | if revs: |
|
324 | 325 | revs = [repo.lookup(rev) for rev in revs] |
|
325 | 326 | other = hg.repository(cmdutil.remoteui(ui, opts), dest) |
|
326 | 327 | ui.status(_('comparing with %s\n') % url.hidepassword(dest)) |
|
327 | 328 | o = repo.findoutgoing(other, force=opts.get('force')) |
|
328 | 329 | if not o: |
|
329 | 330 | ui.status(_("no changes found\n")) |
|
330 | 331 | return |
|
331 | 332 | |
|
332 | 333 | o = repo.changelog.nodesbetween(o, revs)[0] |
|
333 | 334 | revdag = graphrevs(repo, o, opts) |
|
334 | 335 | graphdag = graphabledag(ui, repo, revdag, opts) |
|
335 | 336 | ascii(ui, grapher(graphdag)) |
|
336 | 337 | |
|
337 | 338 | def gincoming(ui, repo, source="default", **opts): |
|
338 | 339 | """show the incoming changesets alongside an ASCII revision graph |
|
339 | 340 | |
|
340 | 341 | Print the incoming changesets alongside a revision graph drawn with |
|
341 | 342 | ASCII characters. |
|
342 | 343 | |
|
343 | 344 | Nodes printed as an @ character are parents of the working |
|
344 | 345 | directory. |
|
345 | 346 | """ |
|
346 | 347 | |
|
347 | 348 | check_unsupported_flags(opts) |
|
348 | 349 | source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev')) |
|
349 | 350 | other = hg.repository(cmdutil.remoteui(repo, opts), source) |
|
350 | 351 | ui.status(_('comparing with %s\n') % url.hidepassword(source)) |
|
351 | 352 | if revs: |
|
352 | 353 | revs = [other.lookup(rev) for rev in revs] |
|
353 | 354 | incoming = repo.findincoming(other, heads=revs, force=opts["force"]) |
|
354 | 355 | if not incoming: |
|
355 | 356 | try: |
|
356 | 357 | os.unlink(opts["bundle"]) |
|
357 | 358 | except: |
|
358 | 359 | pass |
|
359 | 360 | ui.status(_("no changes found\n")) |
|
360 | 361 | return |
|
361 | 362 | |
|
362 | 363 | cleanup = None |
|
363 | 364 | try: |
|
364 | 365 | |
|
365 | 366 | fname = opts["bundle"] |
|
366 | 367 | if fname or not other.local(): |
|
367 | 368 | # create a bundle (uncompressed if other repo is not local) |
|
368 | 369 | if revs is None: |
|
369 | 370 | cg = other.changegroup(incoming, "incoming") |
|
370 | 371 | else: |
|
371 | 372 | cg = other.changegroupsubset(incoming, revs, 'incoming') |
|
372 | 373 | bundletype = other.local() and "HG10BZ" or "HG10UN" |
|
373 | 374 | fname = cleanup = changegroup.writebundle(cg, fname, bundletype) |
|
374 | 375 | # keep written bundle? |
|
375 | 376 | if opts["bundle"]: |
|
376 | 377 | cleanup = None |
|
377 | 378 | if not other.local(): |
|
378 | 379 | # use the created uncompressed bundlerepo |
|
379 | 380 | other = bundlerepo.bundlerepository(ui, repo.root, fname) |
|
380 | 381 | |
|
381 | 382 | chlist = other.changelog.nodesbetween(incoming, revs)[0] |
|
382 | 383 | revdag = graphrevs(other, chlist, opts) |
|
383 | 384 | graphdag = graphabledag(ui, repo, revdag, opts) |
|
384 | 385 | ascii(ui, grapher(graphdag)) |
|
385 | 386 | |
|
386 | 387 | finally: |
|
387 | 388 | if hasattr(other, 'close'): |
|
388 | 389 | other.close() |
|
389 | 390 | if cleanup: |
|
390 | 391 | os.unlink(cleanup) |
|
391 | 392 | |
|
392 | 393 | def uisetup(ui): |
|
393 | 394 | '''Initialize the extension.''' |
|
394 | 395 | _wrapcmd(ui, 'log', commands.table, graphlog) |
|
395 | 396 | _wrapcmd(ui, 'incoming', commands.table, gincoming) |
|
396 | 397 | _wrapcmd(ui, 'outgoing', commands.table, goutgoing) |
|
397 | 398 | |
|
398 | 399 | def _wrapcmd(ui, cmd, table, wrapfn): |
|
399 | 400 | '''wrap the command''' |
|
400 | 401 | def graph(orig, *args, **kwargs): |
|
401 | 402 | if kwargs['graph']: |
|
402 | 403 | return wrapfn(*args, **kwargs) |
|
403 | 404 | return orig(*args, **kwargs) |
|
404 | 405 | entry = extensions.wrapcommand(table, cmd, graph) |
|
405 | 406 | entry[1].append(('G', 'graph', None, _("show the revision DAG"))) |
|
406 | 407 | |
|
407 | 408 | cmdtable = { |
|
408 | 409 | "glog": |
|
409 | 410 | (graphlog, |
|
410 | 411 | [('l', 'limit', '', _('limit number of changes displayed')), |
|
411 | 412 | ('p', 'patch', False, _('show patch')), |
|
412 | 413 | ('r', 'rev', [], _('show the specified revision or range')), |
|
413 | 414 | ] + templateopts, |
|
414 | 415 | _('hg glog [OPTION]... [FILE]')), |
|
415 | 416 | } |
@@ -1,358 +1,359 | |||
|
1 | 1 | # Minimal support for git commands on an hg repository |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2, incorporated herein by reference. |
|
7 | ||
|
7 | 8 | '''browsing the repository in a graphical way |
|
8 | 9 | |
|
9 | 10 | The hgk extension allows browsing the history of a repository in a |
|
10 | 11 | graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not |
|
11 | 12 | distributed with Mercurial.) |
|
12 | 13 | |
|
13 | 14 | hgk consists of two parts: a Tcl script that does the displaying and |
|
14 | 15 | querying of information, and an extension to mercurial named hgk.py, |
|
15 | 16 | which provides hooks for hgk to get information. hgk can be found in |
|
16 | 17 | the contrib directory, and hgk.py can be found in the hgext directory. |
|
17 | 18 | |
|
18 | 19 | To load the hgext.py extension, add it to your .hgrc file (you have to |
|
19 | 20 | use your global $HOME/.hgrc file, not one in a repository). You can |
|
20 | 21 | specify an absolute path: |
|
21 | 22 | |
|
22 | 23 | [extensions] |
|
23 | 24 | hgk=/usr/local/lib/hgk.py |
|
24 | 25 | |
|
25 | 26 | Mercurial can also scan the default python library path for a file |
|
26 | 27 | named 'hgk.py' if you set hgk empty: |
|
27 | 28 | |
|
28 | 29 | [extensions] |
|
29 | 30 | hgk= |
|
30 | 31 | |
|
31 | 32 | The hg view command will launch the hgk Tcl script. For this command |
|
32 | 33 | to work, hgk must be in your search path. Alternately, you can specify |
|
33 | 34 | the path to hgk in your .hgrc file: |
|
34 | 35 | |
|
35 | 36 | [hgk] |
|
36 | 37 | path=/location/of/hgk |
|
37 | 38 | |
|
38 | 39 | hgk can make use of the extdiff extension to visualize revisions. |
|
39 | 40 | Assuming you had already configured extdiff vdiff command, just add: |
|
40 | 41 | |
|
41 | 42 | [hgk] |
|
42 | 43 | vdiff=vdiff |
|
43 | 44 | |
|
44 | 45 | Revisions context menu will now display additional entries to fire |
|
45 | 46 | vdiff on hovered and selected revisions.''' |
|
46 | 47 | |
|
47 | 48 | import os |
|
48 | 49 | from mercurial import commands, util, patch, revlog, cmdutil |
|
49 | 50 | from mercurial.node import nullid, nullrev, short |
|
50 | 51 | from mercurial.i18n import _ |
|
51 | 52 | |
|
52 | 53 | def difftree(ui, repo, node1=None, node2=None, *files, **opts): |
|
53 | 54 | """diff trees from two commits""" |
|
54 | 55 | def __difftree(repo, node1, node2, files=[]): |
|
55 | 56 | assert node2 is not None |
|
56 | 57 | mmap = repo[node1].manifest() |
|
57 | 58 | mmap2 = repo[node2].manifest() |
|
58 | 59 | m = cmdutil.match(repo, files) |
|
59 | 60 | modified, added, removed = repo.status(node1, node2, m)[:3] |
|
60 | 61 | empty = short(nullid) |
|
61 | 62 | |
|
62 | 63 | for f in modified: |
|
63 | 64 | # TODO get file permissions |
|
64 | 65 | ui.write(":100664 100664 %s %s M\t%s\t%s\n" % |
|
65 | 66 | (short(mmap[f]), short(mmap2[f]), f, f)) |
|
66 | 67 | for f in added: |
|
67 | 68 | ui.write(":000000 100664 %s %s N\t%s\t%s\n" % |
|
68 | 69 | (empty, short(mmap2[f]), f, f)) |
|
69 | 70 | for f in removed: |
|
70 | 71 | ui.write(":100664 000000 %s %s D\t%s\t%s\n" % |
|
71 | 72 | (short(mmap[f]), empty, f, f)) |
|
72 | 73 | ## |
|
73 | 74 | |
|
74 | 75 | while True: |
|
75 | 76 | if opts['stdin']: |
|
76 | 77 | try: |
|
77 | 78 | line = raw_input().split(' ') |
|
78 | 79 | node1 = line[0] |
|
79 | 80 | if len(line) > 1: |
|
80 | 81 | node2 = line[1] |
|
81 | 82 | else: |
|
82 | 83 | node2 = None |
|
83 | 84 | except EOFError: |
|
84 | 85 | break |
|
85 | 86 | node1 = repo.lookup(node1) |
|
86 | 87 | if node2: |
|
87 | 88 | node2 = repo.lookup(node2) |
|
88 | 89 | else: |
|
89 | 90 | node2 = node1 |
|
90 | 91 | node1 = repo.changelog.parents(node1)[0] |
|
91 | 92 | if opts['patch']: |
|
92 | 93 | if opts['pretty']: |
|
93 | 94 | catcommit(ui, repo, node2, "") |
|
94 | 95 | m = cmdutil.match(repo, files) |
|
95 | 96 | chunks = patch.diff(repo, node1, node2, match=m, |
|
96 | 97 | opts=patch.diffopts(ui, {'git': True})) |
|
97 | 98 | for chunk in chunks: |
|
98 | 99 | repo.ui.write(chunk) |
|
99 | 100 | else: |
|
100 | 101 | __difftree(repo, node1, node2, files=files) |
|
101 | 102 | if not opts['stdin']: |
|
102 | 103 | break |
|
103 | 104 | |
|
104 | 105 | def catcommit(ui, repo, n, prefix, ctx=None): |
|
105 | 106 | nlprefix = '\n' + prefix; |
|
106 | 107 | if ctx is None: |
|
107 | 108 | ctx = repo[n] |
|
108 | 109 | ui.write("tree %s\n" % short(ctx.changeset()[0])) # use ctx.node() instead ?? |
|
109 | 110 | for p in ctx.parents(): |
|
110 | 111 | ui.write("parent %s\n" % p) |
|
111 | 112 | |
|
112 | 113 | date = ctx.date() |
|
113 | 114 | description = ctx.description().replace("\0", "") |
|
114 | 115 | lines = description.splitlines() |
|
115 | 116 | if lines and lines[-1].startswith('committer:'): |
|
116 | 117 | committer = lines[-1].split(': ')[1].rstrip() |
|
117 | 118 | else: |
|
118 | 119 | committer = ctx.user() |
|
119 | 120 | |
|
120 | 121 | ui.write("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1])) |
|
121 | 122 | ui.write("committer %s %s %s\n" % (committer, int(date[0]), date[1])) |
|
122 | 123 | ui.write("revision %d\n" % ctx.rev()) |
|
123 | 124 | ui.write("branch %s\n\n" % ctx.branch()) |
|
124 | 125 | |
|
125 | 126 | if prefix != "": |
|
126 | 127 | ui.write("%s%s\n" % (prefix, description.replace('\n', nlprefix).strip())) |
|
127 | 128 | else: |
|
128 | 129 | ui.write(description + "\n") |
|
129 | 130 | if prefix: |
|
130 | 131 | ui.write('\0') |
|
131 | 132 | |
|
132 | 133 | def base(ui, repo, node1, node2): |
|
133 | 134 | """output common ancestor information""" |
|
134 | 135 | node1 = repo.lookup(node1) |
|
135 | 136 | node2 = repo.lookup(node2) |
|
136 | 137 | n = repo.changelog.ancestor(node1, node2) |
|
137 | 138 | ui.write(short(n) + "\n") |
|
138 | 139 | |
|
139 | 140 | def catfile(ui, repo, type=None, r=None, **opts): |
|
140 | 141 | """cat a specific revision""" |
|
141 | 142 | # in stdin mode, every line except the commit is prefixed with two |
|
142 | 143 | # spaces. This way the our caller can find the commit without magic |
|
143 | 144 | # strings |
|
144 | 145 | # |
|
145 | 146 | prefix = "" |
|
146 | 147 | if opts['stdin']: |
|
147 | 148 | try: |
|
148 | 149 | (type, r) = raw_input().split(' '); |
|
149 | 150 | prefix = " " |
|
150 | 151 | except EOFError: |
|
151 | 152 | return |
|
152 | 153 | |
|
153 | 154 | else: |
|
154 | 155 | if not type or not r: |
|
155 | 156 | ui.warn(_("cat-file: type or revision not supplied\n")) |
|
156 | 157 | commands.help_(ui, 'cat-file') |
|
157 | 158 | |
|
158 | 159 | while r: |
|
159 | 160 | if type != "commit": |
|
160 | 161 | ui.warn(_("aborting hg cat-file only understands commits\n")) |
|
161 | 162 | return 1; |
|
162 | 163 | n = repo.lookup(r) |
|
163 | 164 | catcommit(ui, repo, n, prefix) |
|
164 | 165 | if opts['stdin']: |
|
165 | 166 | try: |
|
166 | 167 | (type, r) = raw_input().split(' '); |
|
167 | 168 | except EOFError: |
|
168 | 169 | break |
|
169 | 170 | else: |
|
170 | 171 | break |
|
171 | 172 | |
|
172 | 173 | # git rev-tree is a confusing thing. You can supply a number of |
|
173 | 174 | # commit sha1s on the command line, and it walks the commit history |
|
174 | 175 | # telling you which commits are reachable from the supplied ones via |
|
175 | 176 | # a bitmask based on arg position. |
|
176 | 177 | # you can specify a commit to stop at by starting the sha1 with ^ |
|
177 | 178 | def revtree(ui, args, repo, full="tree", maxnr=0, parents=False): |
|
178 | 179 | def chlogwalk(): |
|
179 | 180 | count = len(repo) |
|
180 | 181 | i = count |
|
181 | 182 | l = [0] * 100 |
|
182 | 183 | chunk = 100 |
|
183 | 184 | while True: |
|
184 | 185 | if chunk > i: |
|
185 | 186 | chunk = i |
|
186 | 187 | i = 0 |
|
187 | 188 | else: |
|
188 | 189 | i -= chunk |
|
189 | 190 | |
|
190 | 191 | for x in xrange(0, chunk): |
|
191 | 192 | if i + x >= count: |
|
192 | 193 | l[chunk - x:] = [0] * (chunk - x) |
|
193 | 194 | break |
|
194 | 195 | if full != None: |
|
195 | 196 | l[x] = repo[i + x] |
|
196 | 197 | l[x].changeset() # force reading |
|
197 | 198 | else: |
|
198 | 199 | l[x] = 1 |
|
199 | 200 | for x in xrange(chunk-1, -1, -1): |
|
200 | 201 | if l[x] != 0: |
|
201 | 202 | yield (i + x, full != None and l[x] or None) |
|
202 | 203 | if i == 0: |
|
203 | 204 | break |
|
204 | 205 | |
|
205 | 206 | # calculate and return the reachability bitmask for sha |
|
206 | 207 | def is_reachable(ar, reachable, sha): |
|
207 | 208 | if len(ar) == 0: |
|
208 | 209 | return 1 |
|
209 | 210 | mask = 0 |
|
210 | 211 | for i in xrange(len(ar)): |
|
211 | 212 | if sha in reachable[i]: |
|
212 | 213 | mask |= 1 << i |
|
213 | 214 | |
|
214 | 215 | return mask |
|
215 | 216 | |
|
216 | 217 | reachable = [] |
|
217 | 218 | stop_sha1 = [] |
|
218 | 219 | want_sha1 = [] |
|
219 | 220 | count = 0 |
|
220 | 221 | |
|
221 | 222 | # figure out which commits they are asking for and which ones they |
|
222 | 223 | # want us to stop on |
|
223 | 224 | for i in xrange(len(args)): |
|
224 | 225 | if args[i].startswith('^'): |
|
225 | 226 | s = repo.lookup(args[i][1:]) |
|
226 | 227 | stop_sha1.append(s) |
|
227 | 228 | want_sha1.append(s) |
|
228 | 229 | elif args[i] != 'HEAD': |
|
229 | 230 | want_sha1.append(repo.lookup(args[i])) |
|
230 | 231 | |
|
231 | 232 | # calculate the graph for the supplied commits |
|
232 | 233 | for i in xrange(len(want_sha1)): |
|
233 | 234 | reachable.append({}); |
|
234 | 235 | n = want_sha1[i]; |
|
235 | 236 | visit = [n]; |
|
236 | 237 | reachable[i][n] = 1 |
|
237 | 238 | while visit: |
|
238 | 239 | n = visit.pop(0) |
|
239 | 240 | if n in stop_sha1: |
|
240 | 241 | continue |
|
241 | 242 | for p in repo.changelog.parents(n): |
|
242 | 243 | if p not in reachable[i]: |
|
243 | 244 | reachable[i][p] = 1 |
|
244 | 245 | visit.append(p) |
|
245 | 246 | if p in stop_sha1: |
|
246 | 247 | continue |
|
247 | 248 | |
|
248 | 249 | # walk the repository looking for commits that are in our |
|
249 | 250 | # reachability graph |
|
250 | 251 | for i, ctx in chlogwalk(): |
|
251 | 252 | n = repo.changelog.node(i) |
|
252 | 253 | mask = is_reachable(want_sha1, reachable, n) |
|
253 | 254 | if mask: |
|
254 | 255 | parentstr = "" |
|
255 | 256 | if parents: |
|
256 | 257 | pp = repo.changelog.parents(n) |
|
257 | 258 | if pp[0] != nullid: |
|
258 | 259 | parentstr += " " + short(pp[0]) |
|
259 | 260 | if pp[1] != nullid: |
|
260 | 261 | parentstr += " " + short(pp[1]) |
|
261 | 262 | if not full: |
|
262 | 263 | ui.write("%s%s\n" % (short(n), parentstr)) |
|
263 | 264 | elif full == "commit": |
|
264 | 265 | ui.write("%s%s\n" % (short(n), parentstr)) |
|
265 | 266 | catcommit(ui, repo, n, ' ', ctx) |
|
266 | 267 | else: |
|
267 | 268 | (p1, p2) = repo.changelog.parents(n) |
|
268 | 269 | (h, h1, h2) = map(short, (n, p1, p2)) |
|
269 | 270 | (i1, i2) = map(repo.changelog.rev, (p1, p2)) |
|
270 | 271 | |
|
271 | 272 | date = ctx.date()[0] |
|
272 | 273 | ui.write("%s %s:%s" % (date, h, mask)) |
|
273 | 274 | mask = is_reachable(want_sha1, reachable, p1) |
|
274 | 275 | if i1 != nullrev and mask > 0: |
|
275 | 276 | ui.write("%s:%s " % (h1, mask)), |
|
276 | 277 | mask = is_reachable(want_sha1, reachable, p2) |
|
277 | 278 | if i2 != nullrev and mask > 0: |
|
278 | 279 | ui.write("%s:%s " % (h2, mask)) |
|
279 | 280 | ui.write("\n") |
|
280 | 281 | if maxnr and count >= maxnr: |
|
281 | 282 | break |
|
282 | 283 | count += 1 |
|
283 | 284 | |
|
284 | 285 | def revparse(ui, repo, *revs, **opts): |
|
285 | 286 | """parse given revisions""" |
|
286 | 287 | def revstr(rev): |
|
287 | 288 | if rev == 'HEAD': |
|
288 | 289 | rev = 'tip' |
|
289 | 290 | return revlog.hex(repo.lookup(rev)) |
|
290 | 291 | |
|
291 | 292 | for r in revs: |
|
292 | 293 | revrange = r.split(':', 1) |
|
293 | 294 | ui.write('%s\n' % revstr(revrange[0])) |
|
294 | 295 | if len(revrange) == 2: |
|
295 | 296 | ui.write('^%s\n' % revstr(revrange[1])) |
|
296 | 297 | |
|
297 | 298 | # git rev-list tries to order things by date, and has the ability to stop |
|
298 | 299 | # at a given commit without walking the whole repo. TODO add the stop |
|
299 | 300 | # parameter |
|
300 | 301 | def revlist(ui, repo, *revs, **opts): |
|
301 | 302 | """print revisions""" |
|
302 | 303 | if opts['header']: |
|
303 | 304 | full = "commit" |
|
304 | 305 | else: |
|
305 | 306 | full = None |
|
306 | 307 | copy = [x for x in revs] |
|
307 | 308 | revtree(ui, copy, repo, full, opts['max_count'], opts['parents']) |
|
308 | 309 | |
|
309 | 310 | def config(ui, repo, **opts): |
|
310 | 311 | """print extension options""" |
|
311 | 312 | def writeopt(name, value): |
|
312 | 313 | ui.write('k=%s\nv=%s\n' % (name, value)) |
|
313 | 314 | |
|
314 | 315 | writeopt('vdiff', ui.config('hgk', 'vdiff', '')) |
|
315 | 316 | |
|
316 | 317 | |
|
317 | 318 | def view(ui, repo, *etc, **opts): |
|
318 | 319 | "start interactive history viewer" |
|
319 | 320 | os.chdir(repo.root) |
|
320 | 321 | optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v]) |
|
321 | 322 | cmd = ui.config("hgk", "path", "hgk") + " %s %s" % (optstr, " ".join(etc)) |
|
322 | 323 | ui.debug(_("running %s\n") % cmd) |
|
323 | 324 | util.system(cmd) |
|
324 | 325 | |
|
325 | 326 | cmdtable = { |
|
326 | 327 | "^view": |
|
327 | 328 | (view, |
|
328 | 329 | [('l', 'limit', '', _('limit number of changes displayed'))], |
|
329 | 330 | _('hg view [-l LIMIT] [REVRANGE]')), |
|
330 | 331 | "debug-diff-tree": |
|
331 | 332 | (difftree, |
|
332 | 333 | [('p', 'patch', None, _('generate patch')), |
|
333 | 334 | ('r', 'recursive', None, _('recursive')), |
|
334 | 335 | ('P', 'pretty', None, _('pretty')), |
|
335 | 336 | ('s', 'stdin', None, _('stdin')), |
|
336 | 337 | ('C', 'copy', None, _('detect copies')), |
|
337 | 338 | ('S', 'search', "", _('search'))], |
|
338 | 339 | _('hg git-diff-tree [OPTION]... NODE1 NODE2 [FILE]...')), |
|
339 | 340 | "debug-cat-file": |
|
340 | 341 | (catfile, |
|
341 | 342 | [('s', 'stdin', None, _('stdin'))], |
|
342 | 343 | _('hg debug-cat-file [OPTION]... TYPE FILE')), |
|
343 | 344 | "debug-config": |
|
344 | 345 | (config, [], _('hg debug-config')), |
|
345 | 346 | "debug-merge-base": |
|
346 | 347 | (base, [], _('hg debug-merge-base node node')), |
|
347 | 348 | "debug-rev-parse": |
|
348 | 349 | (revparse, |
|
349 | 350 | [('', 'default', '', _('ignored'))], |
|
350 | 351 | _('hg debug-rev-parse REV')), |
|
351 | 352 | "debug-rev-list": |
|
352 | 353 | (revlist, |
|
353 | 354 | [('H', 'header', None, _('header')), |
|
354 | 355 | ('t', 'topo-order', None, _('topo-order')), |
|
355 | 356 | ('p', 'parents', None, _('parents')), |
|
356 | 357 | ('n', 'max-count', 0, _('max-count'))], |
|
357 | 358 | _('hg debug-rev-list [options] revs')), |
|
358 | 359 | } |
@@ -1,96 +1,97 | |||
|
1 | 1 | # Mercurial extension to make it easy to refer to the parent of a revision |
|
2 | 2 | # |
|
3 | 3 | # Copyright (C) 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2, incorporated herein by reference. |
|
7 | ||
|
7 | 8 | '''\ |
|
8 | 9 | use suffixes to refer to ancestor revisions |
|
9 | 10 | |
|
10 | 11 | This extension allows you to use git-style suffixes to refer to the |
|
11 | 12 | ancestors of a specific revision. |
|
12 | 13 | |
|
13 | 14 | For example, if you can refer to a revision as "foo", then: |
|
14 | 15 | |
|
15 | 16 | - foo^N = Nth parent of foo: |
|
16 | 17 | foo^0 = foo |
|
17 | 18 | foo^1 = first parent of foo |
|
18 | 19 | foo^2 = second parent of foo |
|
19 | 20 | foo^ = foo^1 |
|
20 | 21 | |
|
21 | 22 | - foo~N = Nth first grandparent of foo |
|
22 | 23 | foo~0 = foo |
|
23 | 24 | foo~1 = foo^1 = foo^ = first parent of foo |
|
24 | 25 | foo~2 = foo^1^1 = foo^^ = first parent of first parent of foo |
|
25 | 26 | ''' |
|
26 | 27 | from mercurial import error |
|
27 | 28 | |
|
28 | 29 | def reposetup(ui, repo): |
|
29 | 30 | if not repo.local(): |
|
30 | 31 | return |
|
31 | 32 | |
|
32 | 33 | class parentrevspecrepo(repo.__class__): |
|
33 | 34 | def lookup(self, key): |
|
34 | 35 | try: |
|
35 | 36 | _super = super(parentrevspecrepo, self) |
|
36 | 37 | return _super.lookup(key) |
|
37 | 38 | except error.RepoError: |
|
38 | 39 | pass |
|
39 | 40 | |
|
40 | 41 | circ = key.find('^') |
|
41 | 42 | tilde = key.find('~') |
|
42 | 43 | if circ < 0 and tilde < 0: |
|
43 | 44 | raise |
|
44 | 45 | elif circ >= 0 and tilde >= 0: |
|
45 | 46 | end = min(circ, tilde) |
|
46 | 47 | else: |
|
47 | 48 | end = max(circ, tilde) |
|
48 | 49 | |
|
49 | 50 | cl = self.changelog |
|
50 | 51 | base = key[:end] |
|
51 | 52 | try: |
|
52 | 53 | node = _super.lookup(base) |
|
53 | 54 | except error.RepoError: |
|
54 | 55 | # eek - reraise the first error |
|
55 | 56 | return _super.lookup(key) |
|
56 | 57 | |
|
57 | 58 | rev = cl.rev(node) |
|
58 | 59 | suffix = key[end:] |
|
59 | 60 | i = 0 |
|
60 | 61 | while i < len(suffix): |
|
61 | 62 | # foo^N => Nth parent of foo |
|
62 | 63 | # foo^0 == foo |
|
63 | 64 | # foo^1 == foo^ == 1st parent of foo |
|
64 | 65 | # foo^2 == 2nd parent of foo |
|
65 | 66 | if suffix[i] == '^': |
|
66 | 67 | j = i + 1 |
|
67 | 68 | p = cl.parentrevs(rev) |
|
68 | 69 | if j < len(suffix) and suffix[j].isdigit(): |
|
69 | 70 | j += 1 |
|
70 | 71 | n = int(suffix[i+1:j]) |
|
71 | 72 | if n > 2 or n == 2 and p[1] == -1: |
|
72 | 73 | raise |
|
73 | 74 | else: |
|
74 | 75 | n = 1 |
|
75 | 76 | if n: |
|
76 | 77 | rev = p[n - 1] |
|
77 | 78 | i = j |
|
78 | 79 | # foo~N => Nth first grandparent of foo |
|
79 | 80 | # foo~0 = foo |
|
80 | 81 | # foo~1 = foo^1 == foo^ == 1st parent of foo |
|
81 | 82 | # foo~2 = foo^1^1 == foo^^ == 1st parent of 1st parent of foo |
|
82 | 83 | elif suffix[i] == '~': |
|
83 | 84 | j = i + 1 |
|
84 | 85 | while j < len(suffix) and suffix[j].isdigit(): |
|
85 | 86 | j += 1 |
|
86 | 87 | if j == i + 1: |
|
87 | 88 | raise |
|
88 | 89 | n = int(suffix[i+1:j]) |
|
89 | 90 | for k in xrange(n): |
|
90 | 91 | rev = cl.parentrevs(rev)[0] |
|
91 | 92 | i = j |
|
92 | 93 | else: |
|
93 | 94 | raise |
|
94 | 95 | return cl.node(rev) |
|
95 | 96 | |
|
96 | 97 | repo.__class__ = parentrevspecrepo |
@@ -1,125 +1,126 | |||
|
1 | 1 | # win32mbcs.py -- MBCS filename support for Mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright (c) 2008 Shun-ichi Goto <shunichi.goto@gmail.com> |
|
4 | 4 | # |
|
5 | 5 | # Version: 0.2 |
|
6 | 6 | # Author: Shun-ichi Goto <shunichi.goto@gmail.com> |
|
7 | 7 | # |
|
8 | 8 | # This software may be used and distributed according to the terms of the |
|
9 | 9 | # GNU General Public License version 2, incorporated herein by reference. |
|
10 | 10 | # |
|
11 | ||
|
11 | 12 | """allow to use MBCS path with problematic encoding. |
|
12 | 13 | |
|
13 | 14 | Some MBCS encodings are not good for some path operations (i.e. |
|
14 | 15 | splitting path, case conversion, etc.) with its encoded bytes. We call |
|
15 | 16 | such a encoding (i.e. shift_jis and big5) as "problematic encoding". |
|
16 | 17 | This extension can be used to fix the issue with those encodings by |
|
17 | 18 | wrapping some functions to convert to unicode string before path |
|
18 | 19 | operation. |
|
19 | 20 | |
|
20 | 21 | This extension is usefull for: |
|
21 | 22 | * Japanese Windows users using shift_jis encoding. |
|
22 | 23 | * Chinese Windows users using big5 encoding. |
|
23 | 24 | * All users who use a repository with one of problematic encodings on |
|
24 | 25 | case-insensitive file system. |
|
25 | 26 | |
|
26 | 27 | This extension is not needed for: |
|
27 | 28 | * Any user who use only ascii chars in path. |
|
28 | 29 | * Any user who do not use any of problematic encodings. |
|
29 | 30 | |
|
30 | 31 | Note that there are some limitations on using this extension: |
|
31 | 32 | * You should use single encoding in one repository. |
|
32 | 33 | * You should set same encoding for the repository by locale or |
|
33 | 34 | HGENCODING. |
|
34 | 35 | |
|
35 | 36 | To use this extension, enable the extension in .hg/hgrc or ~/.hgrc: |
|
36 | 37 | |
|
37 | 38 | [extensions] |
|
38 | 39 | hgext.win32mbcs = |
|
39 | 40 | |
|
40 | 41 | Path encoding conversion are done between unicode and |
|
41 | 42 | encoding.encoding which is decided by mercurial from current locale |
|
42 | 43 | setting or HGENCODING. |
|
43 | 44 | |
|
44 | 45 | """ |
|
45 | 46 | |
|
46 | 47 | import os |
|
47 | 48 | from mercurial.i18n import _ |
|
48 | 49 | from mercurial import util, encoding |
|
49 | 50 | |
|
50 | 51 | def decode(arg): |
|
51 | 52 | if isinstance(arg, str): |
|
52 | 53 | uarg = arg.decode(encoding.encoding) |
|
53 | 54 | if arg == uarg.encode(encoding.encoding): |
|
54 | 55 | return uarg |
|
55 | 56 | raise UnicodeError("Not local encoding") |
|
56 | 57 | elif isinstance(arg, tuple): |
|
57 | 58 | return tuple(map(decode, arg)) |
|
58 | 59 | elif isinstance(arg, list): |
|
59 | 60 | return map(decode, arg) |
|
60 | 61 | return arg |
|
61 | 62 | |
|
62 | 63 | def encode(arg): |
|
63 | 64 | if isinstance(arg, unicode): |
|
64 | 65 | return arg.encode(encoding.encoding) |
|
65 | 66 | elif isinstance(arg, tuple): |
|
66 | 67 | return tuple(map(encode, arg)) |
|
67 | 68 | elif isinstance(arg, list): |
|
68 | 69 | return map(encode, arg) |
|
69 | 70 | return arg |
|
70 | 71 | |
|
71 | 72 | def wrapper(func, args): |
|
72 | 73 | # check argument is unicode, then call original |
|
73 | 74 | for arg in args: |
|
74 | 75 | if isinstance(arg, unicode): |
|
75 | 76 | return func(*args) |
|
76 | 77 | |
|
77 | 78 | try: |
|
78 | 79 | # convert arguments to unicode, call func, then convert back |
|
79 | 80 | return encode(func(*decode(args))) |
|
80 | 81 | except UnicodeError: |
|
81 | 82 | # If not encoded with encoding.encoding, report it then |
|
82 | 83 | # continue with calling original function. |
|
83 | 84 | raise util.Abort(_("[win32mbcs] filename conversion fail with" |
|
84 | 85 | " %s encoding\n") % (encoding.encoding)) |
|
85 | 86 | |
|
86 | 87 | def wrapname(name): |
|
87 | 88 | idx = name.rfind('.') |
|
88 | 89 | module = name[:idx] |
|
89 | 90 | name = name[idx+1:] |
|
90 | 91 | module = eval(module) |
|
91 | 92 | func = getattr(module, name) |
|
92 | 93 | def f(*args): |
|
93 | 94 | return wrapper(func, args) |
|
94 | 95 | try: |
|
95 | 96 | f.__name__ = func.__name__ # fail with python23 |
|
96 | 97 | except Exception: |
|
97 | 98 | pass |
|
98 | 99 | setattr(module, name, f) |
|
99 | 100 | |
|
100 | 101 | # List of functions to be wrapped. |
|
101 | 102 | # NOTE: os.path.dirname() and os.path.basename() are safe because |
|
102 | 103 | # they use result of os.path.split() |
|
103 | 104 | funcs = '''os.path.join os.path.split os.path.splitext |
|
104 | 105 | os.path.splitunc os.path.normpath os.path.normcase os.makedirs |
|
105 | 106 | util.endswithsep util.splitpath util.checkcase util.fspath''' |
|
106 | 107 | |
|
107 | 108 | # codec and alias names of sjis and big5 to be faked. |
|
108 | 109 | problematic_encodings = '''big5 big5-tw csbig5 big5hkscs big5-hkscs |
|
109 | 110 | hkscs cp932 932 ms932 mskanji ms-kanji shift_jis csshiftjis shiftjis |
|
110 | 111 | sjis s_jis shift_jis_2004 shiftjis2004 sjis_2004 sjis2004 |
|
111 | 112 | shift_jisx0213 shiftjisx0213 sjisx0213 s_jisx0213''' |
|
112 | 113 | |
|
113 | 114 | def reposetup(ui, repo): |
|
114 | 115 | # TODO: decide use of config section for this extension |
|
115 | 116 | if not os.path.supports_unicode_filenames: |
|
116 | 117 | ui.warn(_("[win32mbcs] cannot activate on this platform.\n")) |
|
117 | 118 | return |
|
118 | 119 | |
|
119 | 120 | # fake is only for relevant environment. |
|
120 | 121 | if encoding.encoding.lower() in problematic_encodings.split(): |
|
121 | 122 | for f in funcs.split(): |
|
122 | 123 | wrapname(f) |
|
123 | 124 | ui.debug(_("[win32mbcs] activated with encoding: %s\n") |
|
124 | 125 | % encoding.encoding) |
|
125 | 126 |
@@ -1,144 +1,145 | |||
|
1 | 1 | # changelog bisection for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2007 Matt Mackall |
|
4 | 4 | # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org> |
|
5 | # | |
|
5 | 6 | # Inspired by git bisect, extension skeleton taken from mq.py. |
|
6 | 7 | # |
|
7 | 8 | # This software may be used and distributed according to the terms of the |
|
8 | 9 | # GNU General Public License version 2, incorporated herein by reference. |
|
9 | 10 | |
|
10 | 11 | import os |
|
11 | 12 | from i18n import _ |
|
12 | 13 | from node import short, hex |
|
13 | 14 | import util |
|
14 | 15 | |
|
15 | 16 | def bisect(changelog, state): |
|
16 | 17 | """find the next node (if any) for testing during a bisect search. |
|
17 | 18 | returns a (nodes, number, good) tuple. |
|
18 | 19 | |
|
19 | 20 | 'nodes' is the final result of the bisect if 'number' is 0. |
|
20 | 21 | Otherwise 'number' indicates the remaining possible candidates for |
|
21 | 22 | the search and 'nodes' contains the next bisect target. |
|
22 | 23 | 'good' is True if bisect is searching for a first good changeset, False |
|
23 | 24 | if searching for a first bad one. |
|
24 | 25 | """ |
|
25 | 26 | |
|
26 | 27 | clparents = changelog.parentrevs |
|
27 | 28 | skip = set([changelog.rev(n) for n in state['skip']]) |
|
28 | 29 | |
|
29 | 30 | def buildancestors(bad, good): |
|
30 | 31 | # only the earliest bad revision matters |
|
31 | 32 | badrev = min([changelog.rev(n) for n in bad]) |
|
32 | 33 | goodrevs = [changelog.rev(n) for n in good] |
|
33 | 34 | # build ancestors array |
|
34 | 35 | ancestors = [[]] * (len(changelog) + 1) # an extra for [-1] |
|
35 | 36 | |
|
36 | 37 | # clear good revs from array |
|
37 | 38 | for node in goodrevs: |
|
38 | 39 | ancestors[node] = None |
|
39 | 40 | for rev in xrange(len(changelog), -1, -1): |
|
40 | 41 | if ancestors[rev] is None: |
|
41 | 42 | for prev in clparents(rev): |
|
42 | 43 | ancestors[prev] = None |
|
43 | 44 | |
|
44 | 45 | if ancestors[badrev] is None: |
|
45 | 46 | return badrev, None |
|
46 | 47 | return badrev, ancestors |
|
47 | 48 | |
|
48 | 49 | good = 0 |
|
49 | 50 | badrev, ancestors = buildancestors(state['bad'], state['good']) |
|
50 | 51 | if not ancestors: # looking for bad to good transition? |
|
51 | 52 | good = 1 |
|
52 | 53 | badrev, ancestors = buildancestors(state['good'], state['bad']) |
|
53 | 54 | bad = changelog.node(badrev) |
|
54 | 55 | if not ancestors: # now we're confused |
|
55 | 56 | raise util.Abort(_("Inconsistent state, %s:%s is good and bad") |
|
56 | 57 | % (badrev, short(bad))) |
|
57 | 58 | |
|
58 | 59 | # build children dict |
|
59 | 60 | children = {} |
|
60 | 61 | visit = [badrev] |
|
61 | 62 | candidates = [] |
|
62 | 63 | while visit: |
|
63 | 64 | rev = visit.pop(0) |
|
64 | 65 | if ancestors[rev] == []: |
|
65 | 66 | candidates.append(rev) |
|
66 | 67 | for prev in clparents(rev): |
|
67 | 68 | if prev != -1: |
|
68 | 69 | if prev in children: |
|
69 | 70 | children[prev].append(rev) |
|
70 | 71 | else: |
|
71 | 72 | children[prev] = [rev] |
|
72 | 73 | visit.append(prev) |
|
73 | 74 | |
|
74 | 75 | candidates.sort() |
|
75 | 76 | # have we narrowed it down to one entry? |
|
76 | 77 | # or have all other possible candidates besides 'bad' have been skipped? |
|
77 | 78 | tot = len(candidates) |
|
78 | 79 | unskipped = [c for c in candidates if (c not in skip) and (c != badrev)] |
|
79 | 80 | if tot == 1 or not unskipped: |
|
80 | 81 | return ([changelog.node(rev) for rev in candidates], 0, good) |
|
81 | 82 | perfect = tot // 2 |
|
82 | 83 | |
|
83 | 84 | # find the best node to test |
|
84 | 85 | best_rev = None |
|
85 | 86 | best_len = -1 |
|
86 | 87 | poison = {} |
|
87 | 88 | for rev in candidates: |
|
88 | 89 | if rev in poison: |
|
89 | 90 | for c in children.get(rev, []): |
|
90 | 91 | poison[c] = True # poison children |
|
91 | 92 | continue |
|
92 | 93 | |
|
93 | 94 | a = ancestors[rev] or [rev] |
|
94 | 95 | ancestors[rev] = None |
|
95 | 96 | |
|
96 | 97 | x = len(a) # number of ancestors |
|
97 | 98 | y = tot - x # number of non-ancestors |
|
98 | 99 | value = min(x, y) # how good is this test? |
|
99 | 100 | if value > best_len and rev not in skip: |
|
100 | 101 | best_len = value |
|
101 | 102 | best_rev = rev |
|
102 | 103 | if value == perfect: # found a perfect candidate? quit early |
|
103 | 104 | break |
|
104 | 105 | |
|
105 | 106 | if y < perfect and rev not in skip: # all downhill from here? |
|
106 | 107 | for c in children.get(rev, []): |
|
107 | 108 | poison[c] = True # poison children |
|
108 | 109 | continue |
|
109 | 110 | |
|
110 | 111 | for c in children.get(rev, []): |
|
111 | 112 | if ancestors[c]: |
|
112 | 113 | ancestors[c] = list(set(ancestors[c] + a)) |
|
113 | 114 | else: |
|
114 | 115 | ancestors[c] = a + [c] |
|
115 | 116 | |
|
116 | 117 | assert best_rev is not None |
|
117 | 118 | best_node = changelog.node(best_rev) |
|
118 | 119 | |
|
119 | 120 | return ([best_node], tot, good) |
|
120 | 121 | |
|
121 | 122 | |
|
122 | 123 | def load_state(repo): |
|
123 | 124 | state = {'good': [], 'bad': [], 'skip': []} |
|
124 | 125 | if os.path.exists(repo.join("bisect.state")): |
|
125 | 126 | for l in repo.opener("bisect.state"): |
|
126 | 127 | kind, node = l[:-1].split() |
|
127 | 128 | node = repo.lookup(node) |
|
128 | 129 | if kind not in state: |
|
129 | 130 | raise util.Abort(_("unknown bisect kind %s") % kind) |
|
130 | 131 | state[kind].append(node) |
|
131 | 132 | return state |
|
132 | 133 | |
|
133 | 134 | |
|
134 | 135 | def save_state(repo, state): |
|
135 | 136 | f = repo.opener("bisect.state", "w", atomictemp=True) |
|
136 | 137 | wlock = repo.wlock() |
|
137 | 138 | try: |
|
138 | 139 | for kind in state: |
|
139 | 140 | for node in state[kind]: |
|
140 | 141 | f.write("%s %s\n" % (kind, hex(node))) |
|
141 | 142 | f.rename() |
|
142 | 143 | finally: |
|
143 | 144 | wlock.release() |
|
144 | 145 |
General Comments 0
You need to be logged in to leave comments.
Login now