Show More
@@ -1,161 +1,161 b'' | |||||
1 | # churn.py - create a graph of revisions count grouped by template |
|
1 | # churn.py - create a graph of revisions count grouped by template | |
2 | # |
|
2 | # | |
3 | # Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net> |
|
3 | # Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net> | |
4 | # Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua> |
|
4 | # Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms |
|
6 | # This software may be used and distributed according to the terms | |
7 | # of the GNU General Public License, incorporated herein by reference. |
|
7 | # of the GNU General Public License, incorporated herein by reference. | |
8 | '''command to show certain statistics about revision history''' |
|
8 | '''command to show certain statistics about revision history''' | |
9 |
|
9 | |||
10 | from mercurial.i18n import _ |
|
10 | from mercurial.i18n import _ | |
11 | from mercurial import patch, cmdutil, util, templater |
|
11 | from mercurial import patch, cmdutil, util, templater | |
12 |
import |
|
12 | import sys | |
13 | import time, datetime |
|
13 | import time, datetime | |
14 |
|
14 | |||
15 | def maketemplater(ui, repo, tmpl): |
|
15 | def maketemplater(ui, repo, tmpl): | |
16 | tmpl = templater.parsestring(tmpl, quoted=False) |
|
16 | tmpl = templater.parsestring(tmpl, quoted=False) | |
17 | try: |
|
17 | try: | |
18 | t = cmdutil.changeset_templater(ui, repo, False, None, None, False) |
|
18 | t = cmdutil.changeset_templater(ui, repo, False, None, None, False) | |
19 | except SyntaxError, inst: |
|
19 | except SyntaxError, inst: | |
20 | raise util.Abort(inst.args[0]) |
|
20 | raise util.Abort(inst.args[0]) | |
21 | t.use_template(tmpl) |
|
21 | t.use_template(tmpl) | |
22 | return t |
|
22 | return t | |
23 |
|
23 | |||
24 | def changedlines(ui, repo, ctx1, ctx2): |
|
24 | def changedlines(ui, repo, ctx1, ctx2): | |
25 | lines = 0 |
|
25 | lines = 0 | |
26 | diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node())) |
|
26 | diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node())) | |
27 | for l in diff.split('\n'): |
|
27 | for l in diff.split('\n'): | |
28 | if (l.startswith("+") and not l.startswith("+++ ") or |
|
28 | if (l.startswith("+") and not l.startswith("+++ ") or | |
29 | l.startswith("-") and not l.startswith("--- ")): |
|
29 | l.startswith("-") and not l.startswith("--- ")): | |
30 | lines += 1 |
|
30 | lines += 1 | |
31 | return lines |
|
31 | return lines | |
32 |
|
32 | |||
33 | def countrate(ui, repo, amap, *pats, **opts): |
|
33 | def countrate(ui, repo, amap, *pats, **opts): | |
34 | """Calculate stats""" |
|
34 | """Calculate stats""" | |
35 | if opts.get('dateformat'): |
|
35 | if opts.get('dateformat'): | |
36 | def getkey(ctx): |
|
36 | def getkey(ctx): | |
37 | t, tz = ctx.date() |
|
37 | t, tz = ctx.date() | |
38 | date = datetime.datetime(*time.gmtime(float(t) - tz)[:6]) |
|
38 | date = datetime.datetime(*time.gmtime(float(t) - tz)[:6]) | |
39 | return date.strftime(opts['dateformat']) |
|
39 | return date.strftime(opts['dateformat']) | |
40 | else: |
|
40 | else: | |
41 | tmpl = opts.get('template', '{author|email}') |
|
41 | tmpl = opts.get('template', '{author|email}') | |
42 | tmpl = maketemplater(ui, repo, tmpl) |
|
42 | tmpl = maketemplater(ui, repo, tmpl) | |
43 | def getkey(ctx): |
|
43 | def getkey(ctx): | |
44 | ui.pushbuffer() |
|
44 | ui.pushbuffer() | |
45 | tmpl.show(ctx) |
|
45 | tmpl.show(ctx) | |
46 | return ui.popbuffer() |
|
46 | return ui.popbuffer() | |
47 |
|
47 | |||
48 | count = pct = 0 |
|
48 | count = pct = 0 | |
49 | rate = {} |
|
49 | rate = {} | |
50 | df = False |
|
50 | df = False | |
51 | if opts.get('date'): |
|
51 | if opts.get('date'): | |
52 | df = util.matchdate(opts['date']) |
|
52 | df = util.matchdate(opts['date']) | |
53 |
|
53 | |||
54 | get = util.cachefunc(lambda r: repo[r].changeset()) |
|
54 | get = util.cachefunc(lambda r: repo[r].changeset()) | |
55 | changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts) |
|
55 | changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts) | |
56 | for st, rev, fns in changeiter: |
|
56 | for st, rev, fns in changeiter: | |
57 | if not st == 'add': |
|
57 | if not st == 'add': | |
58 | continue |
|
58 | continue | |
59 | if df and not df(get(rev)[2][0]): # doesn't match date format |
|
59 | if df and not df(get(rev)[2][0]): # doesn't match date format | |
60 | continue |
|
60 | continue | |
61 |
|
61 | |||
62 | ctx = repo[rev] |
|
62 | ctx = repo[rev] | |
63 | key = getkey(ctx) |
|
63 | key = getkey(ctx) | |
64 | key = amap.get(key, key) # alias remap |
|
64 | key = amap.get(key, key) # alias remap | |
65 | if opts.get('changesets'): |
|
65 | if opts.get('changesets'): | |
66 | rate[key] = rate.get(key, 0) + 1 |
|
66 | rate[key] = rate.get(key, 0) + 1 | |
67 | else: |
|
67 | else: | |
68 | parents = ctx.parents() |
|
68 | parents = ctx.parents() | |
69 | if len(parents) > 1: |
|
69 | if len(parents) > 1: | |
70 | ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,)) |
|
70 | ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,)) | |
71 | continue |
|
71 | continue | |
72 |
|
72 | |||
73 | ctx1 = parents[0] |
|
73 | ctx1 = parents[0] | |
74 | lines = changedlines(ui, repo, ctx1, ctx) |
|
74 | lines = changedlines(ui, repo, ctx1, ctx) | |
75 | rate[key] = rate.get(key, 0) + lines |
|
75 | rate[key] = rate.get(key, 0) + lines | |
76 |
|
76 | |||
77 | if opts.get('progress'): |
|
77 | if opts.get('progress'): | |
78 | count += 1 |
|
78 | count += 1 | |
79 | newpct = int(100.0 * count / max(len(repo), 1)) |
|
79 | newpct = int(100.0 * count / max(len(repo), 1)) | |
80 | if pct < newpct: |
|
80 | if pct < newpct: | |
81 | pct = newpct |
|
81 | pct = newpct | |
82 | ui.write(_("\rgenerating stats: %d%%") % pct) |
|
82 | ui.write(_("\rgenerating stats: %d%%") % pct) | |
83 | sys.stdout.flush() |
|
83 | sys.stdout.flush() | |
84 |
|
84 | |||
85 | if opts.get('progress'): |
|
85 | if opts.get('progress'): | |
86 | ui.write("\r") |
|
86 | ui.write("\r") | |
87 | sys.stdout.flush() |
|
87 | sys.stdout.flush() | |
88 |
|
88 | |||
89 | return rate |
|
89 | return rate | |
90 |
|
90 | |||
91 |
|
91 | |||
92 | def churn(ui, repo, *pats, **opts): |
|
92 | def churn(ui, repo, *pats, **opts): | |
93 | '''graph count of revisions grouped by template |
|
93 | '''graph count of revisions grouped by template | |
94 |
|
94 | |||
95 | Will graph count of changed lines or revisions grouped by template or |
|
95 | Will graph count of changed lines or revisions grouped by template or | |
96 | alternatively by date, if dateformat is used. In this case it will override |
|
96 | alternatively by date, if dateformat is used. In this case it will override | |
97 | template. |
|
97 | template. | |
98 |
|
98 | |||
99 | By default statistics are counted for number of changed lines. |
|
99 | By default statistics are counted for number of changed lines. | |
100 |
|
100 | |||
101 | Examples: |
|
101 | Examples: | |
102 |
|
102 | |||
103 | # display count of changed lines for every committer |
|
103 | # display count of changed lines for every committer | |
104 | hg churn -t '{author|email}' |
|
104 | hg churn -t '{author|email}' | |
105 |
|
105 | |||
106 | # display daily activity graph |
|
106 | # display daily activity graph | |
107 | hg churn -f '%H' -s -c |
|
107 | hg churn -f '%H' -s -c | |
108 |
|
108 | |||
109 | # display activity of developers by month |
|
109 | # display activity of developers by month | |
110 | hg churn -f '%Y-%m' -s -c |
|
110 | hg churn -f '%Y-%m' -s -c | |
111 |
|
111 | |||
112 | # display count of lines changed in every year |
|
112 | # display count of lines changed in every year | |
113 | hg churn -f '%Y' -s |
|
113 | hg churn -f '%Y' -s | |
114 |
|
114 | |||
115 | The map file format used to specify aliases is fairly simple: |
|
115 | The map file format used to specify aliases is fairly simple: | |
116 |
|
116 | |||
117 | <alias email> <actual email>''' |
|
117 | <alias email> <actual email>''' | |
118 | def pad(s, l): |
|
118 | def pad(s, l): | |
119 | return (s + " " * l)[:l] |
|
119 | return (s + " " * l)[:l] | |
120 |
|
120 | |||
121 | amap = {} |
|
121 | amap = {} | |
122 | aliases = opts.get('aliases') |
|
122 | aliases = opts.get('aliases') | |
123 | if aliases: |
|
123 | if aliases: | |
124 | for l in open(aliases, "r"): |
|
124 | for l in open(aliases, "r"): | |
125 | l = l.strip() |
|
125 | l = l.strip() | |
126 | alias, actual = l.split() |
|
126 | alias, actual = l.split() | |
127 | amap[alias] = actual |
|
127 | amap[alias] = actual | |
128 |
|
128 | |||
129 | rate = countrate(ui, repo, amap, *pats, **opts).items() |
|
129 | rate = countrate(ui, repo, amap, *pats, **opts).items() | |
130 | if not rate: |
|
130 | if not rate: | |
131 | return |
|
131 | return | |
132 |
|
132 | |||
133 | sortfn = ((not opts.get('sort')) and (lambda a, b: cmp(b[1], a[1])) or None) |
|
133 | sortfn = ((not opts.get('sort')) and (lambda a, b: cmp(b[1], a[1])) or None) | |
134 | rate.sort(sortfn) |
|
134 | rate.sort(sortfn) | |
135 |
|
135 | |||
136 | maxcount = float(max([v for k, v in rate])) |
|
136 | maxcount = float(max([v for k, v in rate])) | |
137 | maxname = max([len(k) for k, v in rate]) |
|
137 | maxname = max([len(k) for k, v in rate]) | |
138 |
|
138 | |||
139 | ttywidth = util.termwidth() |
|
139 | ttywidth = util.termwidth() | |
140 | ui.debug(_("assuming %i character terminal\n") % ttywidth) |
|
140 | ui.debug(_("assuming %i character terminal\n") % ttywidth) | |
141 | width = ttywidth - maxname - 2 - 6 - 2 - 2 |
|
141 | width = ttywidth - maxname - 2 - 6 - 2 - 2 | |
142 |
|
142 | |||
143 | for date, count in rate: |
|
143 | for date, count in rate: | |
144 | print "%s %6d %s" % (pad(date, maxname), count, |
|
144 | print "%s %6d %s" % (pad(date, maxname), count, | |
145 | "*" * int(count * width / maxcount)) |
|
145 | "*" * int(count * width / maxcount)) | |
146 |
|
146 | |||
147 |
|
147 | |||
148 | cmdtable = { |
|
148 | cmdtable = { | |
149 | "churn": |
|
149 | "churn": | |
150 | (churn, |
|
150 | (churn, | |
151 | [('r', 'rev', [], _('count rate for the specified revision or range')), |
|
151 | [('r', 'rev', [], _('count rate for the specified revision or range')), | |
152 | ('d', 'date', '', _('count rate for revs matching date spec')), |
|
152 | ('d', 'date', '', _('count rate for revs matching date spec')), | |
153 | ('t', 'template', '{author|email}', _('template to group changesets')), |
|
153 | ('t', 'template', '{author|email}', _('template to group changesets')), | |
154 | ('f', 'dateformat', '', |
|
154 | ('f', 'dateformat', '', | |
155 | _('strftime-compatible format for grouping by date')), |
|
155 | _('strftime-compatible format for grouping by date')), | |
156 | ('c', 'changesets', False, _('count rate by number of changesets')), |
|
156 | ('c', 'changesets', False, _('count rate by number of changesets')), | |
157 | ('s', 'sort', False, _('sort by key (default: sort by count)')), |
|
157 | ('s', 'sort', False, _('sort by key (default: sort by count)')), | |
158 | ('', 'aliases', '', _('file with email aliases')), |
|
158 | ('', 'aliases', '', _('file with email aliases')), | |
159 | ('', 'progress', None, _('show progress'))], |
|
159 | ('', 'progress', None, _('show progress'))], | |
160 | _("hg churn [-d DATE] [-r REV] [--aliases FILE] [--progress] [FILE]")), |
|
160 | _("hg churn [-d DATE] [-r REV] [--aliases FILE] [--progress] [FILE]")), | |
161 | } |
|
161 | } |
@@ -1,250 +1,251 b'' | |||||
1 | # convert.py Foreign SCM converter |
|
1 | # convert.py Foreign SCM converter | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 | '''converting foreign VCS repositories to Mercurial''' |
|
7 | '''converting foreign VCS repositories to Mercurial''' | |
8 |
|
8 | |||
9 | import convcmd |
|
9 | import convcmd | |
10 | import cvsps |
|
10 | import cvsps | |
|
11 | import subversion | |||
11 | from mercurial import commands |
|
12 | from mercurial import commands | |
12 | from mercurial.i18n import _ |
|
13 | from mercurial.i18n import _ | |
13 |
|
14 | |||
14 | # Commands definition was moved elsewhere to ease demandload job. |
|
15 | # Commands definition was moved elsewhere to ease demandload job. | |
15 |
|
16 | |||
16 | def convert(ui, src, dest=None, revmapfile=None, **opts): |
|
17 | def convert(ui, src, dest=None, revmapfile=None, **opts): | |
17 | """convert a foreign SCM repository to a Mercurial one. |
|
18 | """convert a foreign SCM repository to a Mercurial one. | |
18 |
|
19 | |||
19 | Accepted source formats [identifiers]: |
|
20 | Accepted source formats [identifiers]: | |
20 | - Mercurial [hg] |
|
21 | - Mercurial [hg] | |
21 | - CVS [cvs] |
|
22 | - CVS [cvs] | |
22 | - Darcs [darcs] |
|
23 | - Darcs [darcs] | |
23 | - git [git] |
|
24 | - git [git] | |
24 | - Subversion [svn] |
|
25 | - Subversion [svn] | |
25 | - Monotone [mtn] |
|
26 | - Monotone [mtn] | |
26 | - GNU Arch [gnuarch] |
|
27 | - GNU Arch [gnuarch] | |
27 | - Bazaar [bzr] |
|
28 | - Bazaar [bzr] | |
28 | - Perforce [p4] |
|
29 | - Perforce [p4] | |
29 |
|
30 | |||
30 | Accepted destination formats [identifiers]: |
|
31 | Accepted destination formats [identifiers]: | |
31 | - Mercurial [hg] |
|
32 | - Mercurial [hg] | |
32 | - Subversion [svn] (history on branches is not preserved) |
|
33 | - Subversion [svn] (history on branches is not preserved) | |
33 |
|
34 | |||
34 | If no revision is given, all revisions will be converted. Otherwise, |
|
35 | If no revision is given, all revisions will be converted. Otherwise, | |
35 | convert will only import up to the named revision (given in a format |
|
36 | convert will only import up to the named revision (given in a format | |
36 | understood by the source). |
|
37 | understood by the source). | |
37 |
|
38 | |||
38 | If no destination directory name is specified, it defaults to the |
|
39 | If no destination directory name is specified, it defaults to the | |
39 | basename of the source with '-hg' appended. If the destination |
|
40 | basename of the source with '-hg' appended. If the destination | |
40 | repository doesn't exist, it will be created. |
|
41 | repository doesn't exist, it will be created. | |
41 |
|
42 | |||
42 | If <REVMAP> isn't given, it will be put in a default location |
|
43 | If <REVMAP> isn't given, it will be put in a default location | |
43 | (<dest>/.hg/shamap by default). The <REVMAP> is a simple text |
|
44 | (<dest>/.hg/shamap by default). The <REVMAP> is a simple text | |
44 | file that maps each source commit ID to the destination ID for |
|
45 | file that maps each source commit ID to the destination ID for | |
45 | that revision, like so: |
|
46 | that revision, like so: | |
46 | <source ID> <destination ID> |
|
47 | <source ID> <destination ID> | |
47 |
|
48 | |||
48 | If the file doesn't exist, it's automatically created. It's updated |
|
49 | If the file doesn't exist, it's automatically created. It's updated | |
49 | on each commit copied, so convert-repo can be interrupted and can |
|
50 | on each commit copied, so convert-repo can be interrupted and can | |
50 | be run repeatedly to copy new commits. |
|
51 | be run repeatedly to copy new commits. | |
51 |
|
52 | |||
52 | The [username mapping] file is a simple text file that maps each source |
|
53 | The [username mapping] file is a simple text file that maps each source | |
53 | commit author to a destination commit author. It is handy for source SCMs |
|
54 | commit author to a destination commit author. It is handy for source SCMs | |
54 | that use unix logins to identify authors (eg: CVS). One line per author |
|
55 | that use unix logins to identify authors (eg: CVS). One line per author | |
55 | mapping and the line format is: |
|
56 | mapping and the line format is: | |
56 | srcauthor=whatever string you want |
|
57 | srcauthor=whatever string you want | |
57 |
|
58 | |||
58 | The filemap is a file that allows filtering and remapping of files |
|
59 | The filemap is a file that allows filtering and remapping of files | |
59 | and directories. Comment lines start with '#'. Each line can |
|
60 | and directories. Comment lines start with '#'. Each line can | |
60 | contain one of the following directives: |
|
61 | contain one of the following directives: | |
61 |
|
62 | |||
62 | include path/to/file |
|
63 | include path/to/file | |
63 |
|
64 | |||
64 | exclude path/to/file |
|
65 | exclude path/to/file | |
65 |
|
66 | |||
66 | rename from/file to/file |
|
67 | rename from/file to/file | |
67 |
|
68 | |||
68 | The 'include' directive causes a file, or all files under a |
|
69 | The 'include' directive causes a file, or all files under a | |
69 | directory, to be included in the destination repository, and the |
|
70 | directory, to be included in the destination repository, and the | |
70 | exclusion of all other files and dirs not explicitely included. |
|
71 | exclusion of all other files and dirs not explicitely included. | |
71 | The 'exclude' directive causes files or directories to be omitted. |
|
72 | The 'exclude' directive causes files or directories to be omitted. | |
72 | The 'rename' directive renames a file or directory. To rename from a |
|
73 | The 'rename' directive renames a file or directory. To rename from a | |
73 | subdirectory into the root of the repository, use '.' as the path to |
|
74 | subdirectory into the root of the repository, use '.' as the path to | |
74 | rename to. |
|
75 | rename to. | |
75 |
|
76 | |||
76 | The splicemap is a file that allows insertion of synthetic |
|
77 | The splicemap is a file that allows insertion of synthetic | |
77 | history, letting you specify the parents of a revision. This is |
|
78 | history, letting you specify the parents of a revision. This is | |
78 | useful if you want to e.g. give a Subversion merge two parents, or |
|
79 | useful if you want to e.g. give a Subversion merge two parents, or | |
79 | graft two disconnected series of history together. Each entry |
|
80 | graft two disconnected series of history together. Each entry | |
80 | contains a key, followed by a space, followed by one or two |
|
81 | contains a key, followed by a space, followed by one or two | |
81 | values, separated by spaces. The key is the revision ID in the |
|
82 | values, separated by spaces. The key is the revision ID in the | |
82 | source revision control system whose parents should be modified |
|
83 | source revision control system whose parents should be modified | |
83 | (same format as a key in .hg/shamap). The values are the revision |
|
84 | (same format as a key in .hg/shamap). The values are the revision | |
84 | IDs (in either the source or destination revision control system) |
|
85 | IDs (in either the source or destination revision control system) | |
85 | that should be used as the new parents for that node. |
|
86 | that should be used as the new parents for that node. | |
86 |
|
87 | |||
87 | Mercurial Source |
|
88 | Mercurial Source | |
88 | ----------------- |
|
89 | ----------------- | |
89 |
|
90 | |||
90 | --config convert.hg.ignoreerrors=False (boolean) |
|
91 | --config convert.hg.ignoreerrors=False (boolean) | |
91 | ignore integrity errors when reading. Use it to fix Mercurial |
|
92 | ignore integrity errors when reading. Use it to fix Mercurial | |
92 | repositories with missing revlogs, by converting from and to |
|
93 | repositories with missing revlogs, by converting from and to | |
93 | Mercurial. |
|
94 | Mercurial. | |
94 | --config convert.hg.saverev=False (boolean) |
|
95 | --config convert.hg.saverev=False (boolean) | |
95 | store original revision ID in changeset (forces target IDs to change) |
|
96 | store original revision ID in changeset (forces target IDs to change) | |
96 | --config convert.hg.startrev=0 (hg revision identifier) |
|
97 | --config convert.hg.startrev=0 (hg revision identifier) | |
97 | convert start revision and its descendants |
|
98 | convert start revision and its descendants | |
98 |
|
99 | |||
99 | CVS Source |
|
100 | CVS Source | |
100 | ---------- |
|
101 | ---------- | |
101 |
|
102 | |||
102 | CVS source will use a sandbox (i.e. a checked-out copy) from CVS |
|
103 | CVS source will use a sandbox (i.e. a checked-out copy) from CVS | |
103 | to indicate the starting point of what will be converted. Direct |
|
104 | to indicate the starting point of what will be converted. Direct | |
104 | access to the repository files is not needed, unless of course |
|
105 | access to the repository files is not needed, unless of course | |
105 | the repository is :local:. The conversion uses the top level |
|
106 | the repository is :local:. The conversion uses the top level | |
106 | directory in the sandbox to find the CVS repository, and then uses |
|
107 | directory in the sandbox to find the CVS repository, and then uses | |
107 | CVS rlog commands to find files to convert. This means that unless |
|
108 | CVS rlog commands to find files to convert. This means that unless | |
108 | a filemap is given, all files under the starting directory will be |
|
109 | a filemap is given, all files under the starting directory will be | |
109 | converted, and that any directory reorganisation in the CVS |
|
110 | converted, and that any directory reorganisation in the CVS | |
110 | sandbox is ignored. |
|
111 | sandbox is ignored. | |
111 |
|
112 | |||
112 | Because CVS does not have changesets, it is necessary to collect |
|
113 | Because CVS does not have changesets, it is necessary to collect | |
113 | individual commits to CVS and merge them into changesets. CVS |
|
114 | individual commits to CVS and merge them into changesets. CVS | |
114 | source uses its internal changeset merging code by default but can |
|
115 | source uses its internal changeset merging code by default but can | |
115 | be configured to call the external 'cvsps' program by setting: |
|
116 | be configured to call the external 'cvsps' program by setting: | |
116 | --config convert.cvsps='cvsps -A -u --cvs-direct -q' |
|
117 | --config convert.cvsps='cvsps -A -u --cvs-direct -q' | |
117 | This is a legacy option and may be removed in future. |
|
118 | This is a legacy option and may be removed in future. | |
118 |
|
119 | |||
119 | The options shown are the defaults. |
|
120 | The options shown are the defaults. | |
120 |
|
121 | |||
121 | Internal cvsps is selected by setting |
|
122 | Internal cvsps is selected by setting | |
122 | --config convert.cvsps=builtin |
|
123 | --config convert.cvsps=builtin | |
123 | and has a few more configurable options: |
|
124 | and has a few more configurable options: | |
124 | --config convert.cvsps.fuzz=60 (integer) |
|
125 | --config convert.cvsps.fuzz=60 (integer) | |
125 | Specify the maximum time (in seconds) that is allowed between |
|
126 | Specify the maximum time (in seconds) that is allowed between | |
126 | commits with identical user and log message in a single |
|
127 | commits with identical user and log message in a single | |
127 | changeset. When very large files were checked in as part |
|
128 | changeset. When very large files were checked in as part | |
128 | of a changeset then the default may not be long enough. |
|
129 | of a changeset then the default may not be long enough. | |
129 | --config convert.cvsps.mergeto='{{mergetobranch ([-\w]+)}}' |
|
130 | --config convert.cvsps.mergeto='{{mergetobranch ([-\w]+)}}' | |
130 | Specify a regular expression to which commit log messages are |
|
131 | Specify a regular expression to which commit log messages are | |
131 | matched. If a match occurs, then the conversion process will |
|
132 | matched. If a match occurs, then the conversion process will | |
132 | insert a dummy revision merging the branch on which this log |
|
133 | insert a dummy revision merging the branch on which this log | |
133 | message occurs to the branch indicated in the regex. |
|
134 | message occurs to the branch indicated in the regex. | |
134 | --config convert.cvsps.mergefrom='{{mergefrombranch ([-\w]+)}}' |
|
135 | --config convert.cvsps.mergefrom='{{mergefrombranch ([-\w]+)}}' | |
135 | Specify a regular expression to which commit log messages are |
|
136 | Specify a regular expression to which commit log messages are | |
136 | matched. If a match occurs, then the conversion process will |
|
137 | matched. If a match occurs, then the conversion process will | |
137 | add the most recent revision on the branch indicated in the |
|
138 | add the most recent revision on the branch indicated in the | |
138 | regex as the second parent of the changeset. |
|
139 | regex as the second parent of the changeset. | |
139 |
|
140 | |||
140 | The hgext/convert/cvsps wrapper script allows the builtin changeset |
|
141 | The hgext/convert/cvsps wrapper script allows the builtin changeset | |
141 | merging code to be run without doing a conversion. Its parameters and |
|
142 | merging code to be run without doing a conversion. Its parameters and | |
142 | output are similar to that of cvsps 2.1. |
|
143 | output are similar to that of cvsps 2.1. | |
143 |
|
144 | |||
144 | Subversion Source |
|
145 | Subversion Source | |
145 | ----------------- |
|
146 | ----------------- | |
146 |
|
147 | |||
147 | Subversion source detects classical trunk/branches/tags layouts. |
|
148 | Subversion source detects classical trunk/branches/tags layouts. | |
148 | By default, the supplied "svn://repo/path/" source URL is |
|
149 | By default, the supplied "svn://repo/path/" source URL is | |
149 | converted as a single branch. If "svn://repo/path/trunk" exists |
|
150 | converted as a single branch. If "svn://repo/path/trunk" exists | |
150 | it replaces the default branch. If "svn://repo/path/branches" |
|
151 | it replaces the default branch. If "svn://repo/path/branches" | |
151 | exists, its subdirectories are listed as possible branches. If |
|
152 | exists, its subdirectories are listed as possible branches. If | |
152 | "svn://repo/path/tags" exists, it is looked for tags referencing |
|
153 | "svn://repo/path/tags" exists, it is looked for tags referencing | |
153 | converted branches. Default "trunk", "branches" and "tags" values |
|
154 | converted branches. Default "trunk", "branches" and "tags" values | |
154 | can be overriden with following options. Set them to paths |
|
155 | can be overriden with following options. Set them to paths | |
155 | relative to the source URL, or leave them blank to disable |
|
156 | relative to the source URL, or leave them blank to disable | |
156 | autodetection. |
|
157 | autodetection. | |
157 |
|
158 | |||
158 | --config convert.svn.branches=branches (directory name) |
|
159 | --config convert.svn.branches=branches (directory name) | |
159 | specify the directory containing branches |
|
160 | specify the directory containing branches | |
160 | --config convert.svn.tags=tags (directory name) |
|
161 | --config convert.svn.tags=tags (directory name) | |
161 | specify the directory containing tags |
|
162 | specify the directory containing tags | |
162 | --config convert.svn.trunk=trunk (directory name) |
|
163 | --config convert.svn.trunk=trunk (directory name) | |
163 | specify the name of the trunk branch |
|
164 | specify the name of the trunk branch | |
164 |
|
165 | |||
165 | Source history can be retrieved starting at a specific revision, |
|
166 | Source history can be retrieved starting at a specific revision, | |
166 | instead of being integrally converted. Only single branch |
|
167 | instead of being integrally converted. Only single branch | |
167 | conversions are supported. |
|
168 | conversions are supported. | |
168 |
|
169 | |||
169 | --config convert.svn.startrev=0 (svn revision number) |
|
170 | --config convert.svn.startrev=0 (svn revision number) | |
170 | specify start Subversion revision. |
|
171 | specify start Subversion revision. | |
171 |
|
172 | |||
172 | Perforce Source |
|
173 | Perforce Source | |
173 | --------------- |
|
174 | --------------- | |
174 |
|
175 | |||
175 | The Perforce (P4) importer can be given a p4 depot path or a client |
|
176 | The Perforce (P4) importer can be given a p4 depot path or a client | |
176 | specification as source. It will convert all files in the source to |
|
177 | specification as source. It will convert all files in the source to | |
177 | a flat Mercurial repository, ignoring labels, branches and integrations. |
|
178 | a flat Mercurial repository, ignoring labels, branches and integrations. | |
178 | Note that when a depot path is given you then usually should specify a |
|
179 | Note that when a depot path is given you then usually should specify a | |
179 | target directory, because otherwise the target may be named ...-hg. |
|
180 | target directory, because otherwise the target may be named ...-hg. | |
180 |
|
181 | |||
181 | It is possible to limit the amount of source history to be converted |
|
182 | It is possible to limit the amount of source history to be converted | |
182 | by specifying an initial Perforce revision. |
|
183 | by specifying an initial Perforce revision. | |
183 |
|
184 | |||
184 | --config convert.p4.startrev=0 (perforce changelist number) |
|
185 | --config convert.p4.startrev=0 (perforce changelist number) | |
185 | specify initial Perforce revision. |
|
186 | specify initial Perforce revision. | |
186 |
|
187 | |||
187 |
|
188 | |||
188 | Mercurial Destination |
|
189 | Mercurial Destination | |
189 | --------------------- |
|
190 | --------------------- | |
190 |
|
191 | |||
191 | --config convert.hg.clonebranches=False (boolean) |
|
192 | --config convert.hg.clonebranches=False (boolean) | |
192 | dispatch source branches in separate clones. |
|
193 | dispatch source branches in separate clones. | |
193 | --config convert.hg.tagsbranch=default (branch name) |
|
194 | --config convert.hg.tagsbranch=default (branch name) | |
194 | tag revisions branch name |
|
195 | tag revisions branch name | |
195 | --config convert.hg.usebranchnames=True (boolean) |
|
196 | --config convert.hg.usebranchnames=True (boolean) | |
196 | preserve branch names |
|
197 | preserve branch names | |
197 |
|
198 | |||
198 | """ |
|
199 | """ | |
199 | return convcmd.convert(ui, src, dest, revmapfile, **opts) |
|
200 | return convcmd.convert(ui, src, dest, revmapfile, **opts) | |
200 |
|
201 | |||
201 | def debugsvnlog(ui, **opts): |
|
202 | def debugsvnlog(ui, **opts): | |
202 |
return |
|
203 | return subversion.debugsvnlog(ui, **opts) | |
203 |
|
204 | |||
204 | def debugcvsps(ui, *args, **opts): |
|
205 | def debugcvsps(ui, *args, **opts): | |
205 | '''create changeset information from CVS |
|
206 | '''create changeset information from CVS | |
206 |
|
207 | |||
207 | This command is intended as a debugging tool for the CVS to Mercurial |
|
208 | This command is intended as a debugging tool for the CVS to Mercurial | |
208 | converter, and can be used as a direct replacement for cvsps. |
|
209 | converter, and can be used as a direct replacement for cvsps. | |
209 |
|
210 | |||
210 | Hg debugcvsps reads the CVS rlog for current directory (or any named |
|
211 | Hg debugcvsps reads the CVS rlog for current directory (or any named | |
211 | directory) in the CVS repository, and converts the log to a series of |
|
212 | directory) in the CVS repository, and converts the log to a series of | |
212 | changesets based on matching commit log entries and dates.''' |
|
213 | changesets based on matching commit log entries and dates.''' | |
213 | return cvsps.debugcvsps(ui, *args, **opts) |
|
214 | return cvsps.debugcvsps(ui, *args, **opts) | |
214 |
|
215 | |||
215 | commands.norepo += " convert debugsvnlog debugcvsps" |
|
216 | commands.norepo += " convert debugsvnlog debugcvsps" | |
216 |
|
217 | |||
217 | cmdtable = { |
|
218 | cmdtable = { | |
218 | "convert": |
|
219 | "convert": | |
219 | (convert, |
|
220 | (convert, | |
220 | [('A', 'authors', '', _('username mapping filename')), |
|
221 | [('A', 'authors', '', _('username mapping filename')), | |
221 | ('d', 'dest-type', '', _('destination repository type')), |
|
222 | ('d', 'dest-type', '', _('destination repository type')), | |
222 | ('', 'filemap', '', _('remap file names using contents of file')), |
|
223 | ('', 'filemap', '', _('remap file names using contents of file')), | |
223 | ('r', 'rev', '', _('import up to target revision REV')), |
|
224 | ('r', 'rev', '', _('import up to target revision REV')), | |
224 | ('s', 'source-type', '', _('source repository type')), |
|
225 | ('s', 'source-type', '', _('source repository type')), | |
225 | ('', 'splicemap', '', _('splice synthesized history into place')), |
|
226 | ('', 'splicemap', '', _('splice synthesized history into place')), | |
226 | ('', 'datesort', None, _('try to sort changesets by date'))], |
|
227 | ('', 'datesort', None, _('try to sort changesets by date'))], | |
227 | _('hg convert [OPTION]... SOURCE [DEST [REVMAP]]')), |
|
228 | _('hg convert [OPTION]... SOURCE [DEST [REVMAP]]')), | |
228 | "debugsvnlog": |
|
229 | "debugsvnlog": | |
229 | (debugsvnlog, |
|
230 | (debugsvnlog, | |
230 | [], |
|
231 | [], | |
231 | 'hg debugsvnlog'), |
|
232 | 'hg debugsvnlog'), | |
232 | "debugcvsps": |
|
233 | "debugcvsps": | |
233 | (debugcvsps, |
|
234 | (debugcvsps, | |
234 | [ |
|
235 | [ | |
235 | # Main options shared with cvsps-2.1 |
|
236 | # Main options shared with cvsps-2.1 | |
236 | ('b', 'branches', [], _('only return changes on specified branches')), |
|
237 | ('b', 'branches', [], _('only return changes on specified branches')), | |
237 | ('p', 'prefix', '', _('prefix to remove from file names')), |
|
238 | ('p', 'prefix', '', _('prefix to remove from file names')), | |
238 | ('r', 'revisions', [], _('only return changes after or between specified tags')), |
|
239 | ('r', 'revisions', [], _('only return changes after or between specified tags')), | |
239 | ('u', 'update-cache', None, _("update cvs log cache")), |
|
240 | ('u', 'update-cache', None, _("update cvs log cache")), | |
240 | ('x', 'new-cache', None, _("create new cvs log cache")), |
|
241 | ('x', 'new-cache', None, _("create new cvs log cache")), | |
241 | ('z', 'fuzz', 60, _('set commit time fuzz in seconds')), |
|
242 | ('z', 'fuzz', 60, _('set commit time fuzz in seconds')), | |
242 | ('', 'root', '', _('specify cvsroot')), |
|
243 | ('', 'root', '', _('specify cvsroot')), | |
243 | # Options specific to builtin cvsps |
|
244 | # Options specific to builtin cvsps | |
244 | ('', 'parents', '', _('show parent changesets')), |
|
245 | ('', 'parents', '', _('show parent changesets')), | |
245 | ('', 'ancestors', '', _('show current changeset in ancestor branches')), |
|
246 | ('', 'ancestors', '', _('show current changeset in ancestor branches')), | |
246 | # Options that are ignored for compatibility with cvsps-2.1 |
|
247 | # Options that are ignored for compatibility with cvsps-2.1 | |
247 | ('A', 'cvs-direct', None, _('ignored for compatibility')), |
|
248 | ('A', 'cvs-direct', None, _('ignored for compatibility')), | |
248 | ], |
|
249 | ], | |
249 | _('hg debugcvsps [OPTION]... [PATH]...')), |
|
250 | _('hg debugcvsps [OPTION]... [PATH]...')), | |
250 | } |
|
251 | } |
@@ -1,341 +1,341 b'' | |||||
1 | # convcmd - convert extension commands definition |
|
1 | # convcmd - convert extension commands definition | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 |
|
7 | |||
8 | from common import NoRepo, MissingTool, SKIPREV, mapfile |
|
8 | from common import NoRepo, MissingTool, SKIPREV, mapfile | |
9 | from cvs import convert_cvs |
|
9 | from cvs import convert_cvs | |
10 | from darcs import darcs_source |
|
10 | from darcs import darcs_source | |
11 | from git import convert_git |
|
11 | from git import convert_git | |
12 | from hg import mercurial_source, mercurial_sink |
|
12 | from hg import mercurial_source, mercurial_sink | |
13 |
from subversion import |
|
13 | from subversion import svn_source, svn_sink | |
14 | from monotone import monotone_source |
|
14 | from monotone import monotone_source | |
15 | from gnuarch import gnuarch_source |
|
15 | from gnuarch import gnuarch_source | |
16 | from bzr import bzr_source |
|
16 | from bzr import bzr_source | |
17 | from p4 import p4_source |
|
17 | from p4 import p4_source | |
18 | import filemap |
|
18 | import filemap | |
19 |
|
19 | |||
20 | import os, shutil |
|
20 | import os, shutil | |
21 | from mercurial import hg, util |
|
21 | from mercurial import hg, util | |
22 | from mercurial.i18n import _ |
|
22 | from mercurial.i18n import _ | |
23 |
|
23 | |||
24 | orig_encoding = 'ascii' |
|
24 | orig_encoding = 'ascii' | |
25 |
|
25 | |||
26 | def recode(s): |
|
26 | def recode(s): | |
27 | if isinstance(s, unicode): |
|
27 | if isinstance(s, unicode): | |
28 | return s.encode(orig_encoding, 'replace') |
|
28 | return s.encode(orig_encoding, 'replace') | |
29 | else: |
|
29 | else: | |
30 | return s.decode('utf-8').encode(orig_encoding, 'replace') |
|
30 | return s.decode('utf-8').encode(orig_encoding, 'replace') | |
31 |
|
31 | |||
32 | source_converters = [ |
|
32 | source_converters = [ | |
33 | ('cvs', convert_cvs), |
|
33 | ('cvs', convert_cvs), | |
34 | ('git', convert_git), |
|
34 | ('git', convert_git), | |
35 | ('svn', svn_source), |
|
35 | ('svn', svn_source), | |
36 | ('hg', mercurial_source), |
|
36 | ('hg', mercurial_source), | |
37 | ('darcs', darcs_source), |
|
37 | ('darcs', darcs_source), | |
38 | ('mtn', monotone_source), |
|
38 | ('mtn', monotone_source), | |
39 | ('gnuarch', gnuarch_source), |
|
39 | ('gnuarch', gnuarch_source), | |
40 | ('bzr', bzr_source), |
|
40 | ('bzr', bzr_source), | |
41 | ('p4', p4_source), |
|
41 | ('p4', p4_source), | |
42 | ] |
|
42 | ] | |
43 |
|
43 | |||
44 | sink_converters = [ |
|
44 | sink_converters = [ | |
45 | ('hg', mercurial_sink), |
|
45 | ('hg', mercurial_sink), | |
46 | ('svn', svn_sink), |
|
46 | ('svn', svn_sink), | |
47 | ] |
|
47 | ] | |
48 |
|
48 | |||
49 | def convertsource(ui, path, type, rev): |
|
49 | def convertsource(ui, path, type, rev): | |
50 | exceptions = [] |
|
50 | exceptions = [] | |
51 | for name, source in source_converters: |
|
51 | for name, source in source_converters: | |
52 | try: |
|
52 | try: | |
53 | if not type or name == type: |
|
53 | if not type or name == type: | |
54 | return source(ui, path, rev) |
|
54 | return source(ui, path, rev) | |
55 | except (NoRepo, MissingTool), inst: |
|
55 | except (NoRepo, MissingTool), inst: | |
56 | exceptions.append(inst) |
|
56 | exceptions.append(inst) | |
57 | if not ui.quiet: |
|
57 | if not ui.quiet: | |
58 | for inst in exceptions: |
|
58 | for inst in exceptions: | |
59 | ui.write("%s\n" % inst) |
|
59 | ui.write("%s\n" % inst) | |
60 | raise util.Abort(_('%s: missing or unsupported repository') % path) |
|
60 | raise util.Abort(_('%s: missing or unsupported repository') % path) | |
61 |
|
61 | |||
62 | def convertsink(ui, path, type): |
|
62 | def convertsink(ui, path, type): | |
63 | for name, sink in sink_converters: |
|
63 | for name, sink in sink_converters: | |
64 | try: |
|
64 | try: | |
65 | if not type or name == type: |
|
65 | if not type or name == type: | |
66 | return sink(ui, path) |
|
66 | return sink(ui, path) | |
67 | except NoRepo, inst: |
|
67 | except NoRepo, inst: | |
68 | ui.note(_("convert: %s\n") % inst) |
|
68 | ui.note(_("convert: %s\n") % inst) | |
69 | raise util.Abort(_('%s: unknown repository type') % path) |
|
69 | raise util.Abort(_('%s: unknown repository type') % path) | |
70 |
|
70 | |||
71 | class converter(object): |
|
71 | class converter(object): | |
72 | def __init__(self, ui, source, dest, revmapfile, opts): |
|
72 | def __init__(self, ui, source, dest, revmapfile, opts): | |
73 |
|
73 | |||
74 | self.source = source |
|
74 | self.source = source | |
75 | self.dest = dest |
|
75 | self.dest = dest | |
76 | self.ui = ui |
|
76 | self.ui = ui | |
77 | self.opts = opts |
|
77 | self.opts = opts | |
78 | self.commitcache = {} |
|
78 | self.commitcache = {} | |
79 | self.authors = {} |
|
79 | self.authors = {} | |
80 | self.authorfile = None |
|
80 | self.authorfile = None | |
81 |
|
81 | |||
82 | self.map = mapfile(ui, revmapfile) |
|
82 | self.map = mapfile(ui, revmapfile) | |
83 |
|
83 | |||
84 | # Read first the dst author map if any |
|
84 | # Read first the dst author map if any | |
85 | authorfile = self.dest.authorfile() |
|
85 | authorfile = self.dest.authorfile() | |
86 | if authorfile and os.path.exists(authorfile): |
|
86 | if authorfile and os.path.exists(authorfile): | |
87 | self.readauthormap(authorfile) |
|
87 | self.readauthormap(authorfile) | |
88 | # Extend/Override with new author map if necessary |
|
88 | # Extend/Override with new author map if necessary | |
89 | if opts.get('authors'): |
|
89 | if opts.get('authors'): | |
90 | self.readauthormap(opts.get('authors')) |
|
90 | self.readauthormap(opts.get('authors')) | |
91 | self.authorfile = self.dest.authorfile() |
|
91 | self.authorfile = self.dest.authorfile() | |
92 |
|
92 | |||
93 | self.splicemap = mapfile(ui, opts.get('splicemap')) |
|
93 | self.splicemap = mapfile(ui, opts.get('splicemap')) | |
94 |
|
94 | |||
95 | def walktree(self, heads): |
|
95 | def walktree(self, heads): | |
96 | '''Return a mapping that identifies the uncommitted parents of every |
|
96 | '''Return a mapping that identifies the uncommitted parents of every | |
97 | uncommitted changeset.''' |
|
97 | uncommitted changeset.''' | |
98 | visit = heads |
|
98 | visit = heads | |
99 | known = {} |
|
99 | known = {} | |
100 | parents = {} |
|
100 | parents = {} | |
101 | while visit: |
|
101 | while visit: | |
102 | n = visit.pop(0) |
|
102 | n = visit.pop(0) | |
103 | if n in known or n in self.map: continue |
|
103 | if n in known or n in self.map: continue | |
104 | known[n] = 1 |
|
104 | known[n] = 1 | |
105 | commit = self.cachecommit(n) |
|
105 | commit = self.cachecommit(n) | |
106 | parents[n] = [] |
|
106 | parents[n] = [] | |
107 | for p in commit.parents: |
|
107 | for p in commit.parents: | |
108 | parents[n].append(p) |
|
108 | parents[n].append(p) | |
109 | visit.append(p) |
|
109 | visit.append(p) | |
110 |
|
110 | |||
111 | return parents |
|
111 | return parents | |
112 |
|
112 | |||
113 | def toposort(self, parents): |
|
113 | def toposort(self, parents): | |
114 | '''Return an ordering such that every uncommitted changeset is |
|
114 | '''Return an ordering such that every uncommitted changeset is | |
115 | preceeded by all its uncommitted ancestors.''' |
|
115 | preceeded by all its uncommitted ancestors.''' | |
116 | visit = parents.keys() |
|
116 | visit = parents.keys() | |
117 | seen = {} |
|
117 | seen = {} | |
118 | children = {} |
|
118 | children = {} | |
119 | actives = [] |
|
119 | actives = [] | |
120 |
|
120 | |||
121 | while visit: |
|
121 | while visit: | |
122 | n = visit.pop(0) |
|
122 | n = visit.pop(0) | |
123 | if n in seen: continue |
|
123 | if n in seen: continue | |
124 | seen[n] = 1 |
|
124 | seen[n] = 1 | |
125 | # Ensure that nodes without parents are present in the 'children' |
|
125 | # Ensure that nodes without parents are present in the 'children' | |
126 | # mapping. |
|
126 | # mapping. | |
127 | children.setdefault(n, []) |
|
127 | children.setdefault(n, []) | |
128 | hasparent = False |
|
128 | hasparent = False | |
129 | for p in parents[n]: |
|
129 | for p in parents[n]: | |
130 | if not p in self.map: |
|
130 | if not p in self.map: | |
131 | visit.append(p) |
|
131 | visit.append(p) | |
132 | hasparent = True |
|
132 | hasparent = True | |
133 | children.setdefault(p, []).append(n) |
|
133 | children.setdefault(p, []).append(n) | |
134 | if not hasparent: |
|
134 | if not hasparent: | |
135 | actives.append(n) |
|
135 | actives.append(n) | |
136 |
|
136 | |||
137 | del seen |
|
137 | del seen | |
138 | del visit |
|
138 | del visit | |
139 |
|
139 | |||
140 | if self.opts.get('datesort'): |
|
140 | if self.opts.get('datesort'): | |
141 | dates = {} |
|
141 | dates = {} | |
142 | def getdate(n): |
|
142 | def getdate(n): | |
143 | if n not in dates: |
|
143 | if n not in dates: | |
144 | dates[n] = util.parsedate(self.commitcache[n].date) |
|
144 | dates[n] = util.parsedate(self.commitcache[n].date) | |
145 | return dates[n] |
|
145 | return dates[n] | |
146 |
|
146 | |||
147 | def picknext(nodes): |
|
147 | def picknext(nodes): | |
148 | return min([(getdate(n), n) for n in nodes])[1] |
|
148 | return min([(getdate(n), n) for n in nodes])[1] | |
149 | else: |
|
149 | else: | |
150 | prev = [None] |
|
150 | prev = [None] | |
151 | def picknext(nodes): |
|
151 | def picknext(nodes): | |
152 | # Return the first eligible child of the previously converted |
|
152 | # Return the first eligible child of the previously converted | |
153 | # revision, or any of them. |
|
153 | # revision, or any of them. | |
154 | next = nodes[0] |
|
154 | next = nodes[0] | |
155 | for n in nodes: |
|
155 | for n in nodes: | |
156 | if prev[0] in parents[n]: |
|
156 | if prev[0] in parents[n]: | |
157 | next = n |
|
157 | next = n | |
158 | break |
|
158 | break | |
159 | prev[0] = next |
|
159 | prev[0] = next | |
160 | return next |
|
160 | return next | |
161 |
|
161 | |||
162 | s = [] |
|
162 | s = [] | |
163 | pendings = {} |
|
163 | pendings = {} | |
164 | while actives: |
|
164 | while actives: | |
165 | n = picknext(actives) |
|
165 | n = picknext(actives) | |
166 | actives.remove(n) |
|
166 | actives.remove(n) | |
167 | s.append(n) |
|
167 | s.append(n) | |
168 |
|
168 | |||
169 | # Update dependents list |
|
169 | # Update dependents list | |
170 | for c in children.get(n, []): |
|
170 | for c in children.get(n, []): | |
171 | if c not in pendings: |
|
171 | if c not in pendings: | |
172 | pendings[c] = [p for p in parents[c] if p not in self.map] |
|
172 | pendings[c] = [p for p in parents[c] if p not in self.map] | |
173 | try: |
|
173 | try: | |
174 | pendings[c].remove(n) |
|
174 | pendings[c].remove(n) | |
175 | except ValueError: |
|
175 | except ValueError: | |
176 | raise util.Abort(_('cycle detected between %s and %s') |
|
176 | raise util.Abort(_('cycle detected between %s and %s') | |
177 | % (recode(c), recode(n))) |
|
177 | % (recode(c), recode(n))) | |
178 | if not pendings[c]: |
|
178 | if not pendings[c]: | |
179 | # Parents are converted, node is eligible |
|
179 | # Parents are converted, node is eligible | |
180 | actives.insert(0, c) |
|
180 | actives.insert(0, c) | |
181 | pendings[c] = None |
|
181 | pendings[c] = None | |
182 |
|
182 | |||
183 | if len(s) != len(parents): |
|
183 | if len(s) != len(parents): | |
184 | raise util.Abort(_("not all revisions were sorted")) |
|
184 | raise util.Abort(_("not all revisions were sorted")) | |
185 |
|
185 | |||
186 | return s |
|
186 | return s | |
187 |
|
187 | |||
188 | def writeauthormap(self): |
|
188 | def writeauthormap(self): | |
189 | authorfile = self.authorfile |
|
189 | authorfile = self.authorfile | |
190 | if authorfile: |
|
190 | if authorfile: | |
191 | self.ui.status(_('Writing author map file %s\n') % authorfile) |
|
191 | self.ui.status(_('Writing author map file %s\n') % authorfile) | |
192 | ofile = open(authorfile, 'w+') |
|
192 | ofile = open(authorfile, 'w+') | |
193 | for author in self.authors: |
|
193 | for author in self.authors: | |
194 | ofile.write("%s=%s\n" % (author, self.authors[author])) |
|
194 | ofile.write("%s=%s\n" % (author, self.authors[author])) | |
195 | ofile.close() |
|
195 | ofile.close() | |
196 |
|
196 | |||
197 | def readauthormap(self, authorfile): |
|
197 | def readauthormap(self, authorfile): | |
198 | afile = open(authorfile, 'r') |
|
198 | afile = open(authorfile, 'r') | |
199 | for line in afile: |
|
199 | for line in afile: | |
200 | if line.strip() == '': |
|
200 | if line.strip() == '': | |
201 | continue |
|
201 | continue | |
202 | try: |
|
202 | try: | |
203 | srcauthor, dstauthor = line.split('=', 1) |
|
203 | srcauthor, dstauthor = line.split('=', 1) | |
204 | srcauthor = srcauthor.strip() |
|
204 | srcauthor = srcauthor.strip() | |
205 | dstauthor = dstauthor.strip() |
|
205 | dstauthor = dstauthor.strip() | |
206 | if srcauthor in self.authors and dstauthor != self.authors[srcauthor]: |
|
206 | if srcauthor in self.authors and dstauthor != self.authors[srcauthor]: | |
207 | self.ui.status( |
|
207 | self.ui.status( | |
208 | _('Overriding mapping for author %s, was %s, will be %s\n') |
|
208 | _('Overriding mapping for author %s, was %s, will be %s\n') | |
209 | % (srcauthor, self.authors[srcauthor], dstauthor)) |
|
209 | % (srcauthor, self.authors[srcauthor], dstauthor)) | |
210 | else: |
|
210 | else: | |
211 | self.ui.debug(_('mapping author %s to %s\n') |
|
211 | self.ui.debug(_('mapping author %s to %s\n') | |
212 | % (srcauthor, dstauthor)) |
|
212 | % (srcauthor, dstauthor)) | |
213 | self.authors[srcauthor] = dstauthor |
|
213 | self.authors[srcauthor] = dstauthor | |
214 | except IndexError: |
|
214 | except IndexError: | |
215 | self.ui.warn( |
|
215 | self.ui.warn( | |
216 | _('Ignoring bad line in author map file %s: %s\n') |
|
216 | _('Ignoring bad line in author map file %s: %s\n') | |
217 | % (authorfile, line.rstrip())) |
|
217 | % (authorfile, line.rstrip())) | |
218 | afile.close() |
|
218 | afile.close() | |
219 |
|
219 | |||
220 | def cachecommit(self, rev): |
|
220 | def cachecommit(self, rev): | |
221 | commit = self.source.getcommit(rev) |
|
221 | commit = self.source.getcommit(rev) | |
222 | commit.author = self.authors.get(commit.author, commit.author) |
|
222 | commit.author = self.authors.get(commit.author, commit.author) | |
223 | self.commitcache[rev] = commit |
|
223 | self.commitcache[rev] = commit | |
224 | return commit |
|
224 | return commit | |
225 |
|
225 | |||
226 | def copy(self, rev): |
|
226 | def copy(self, rev): | |
227 | commit = self.commitcache[rev] |
|
227 | commit = self.commitcache[rev] | |
228 |
|
228 | |||
229 | changes = self.source.getchanges(rev) |
|
229 | changes = self.source.getchanges(rev) | |
230 | if isinstance(changes, basestring): |
|
230 | if isinstance(changes, basestring): | |
231 | if changes == SKIPREV: |
|
231 | if changes == SKIPREV: | |
232 | dest = SKIPREV |
|
232 | dest = SKIPREV | |
233 | else: |
|
233 | else: | |
234 | dest = self.map[changes] |
|
234 | dest = self.map[changes] | |
235 | self.map[rev] = dest |
|
235 | self.map[rev] = dest | |
236 | return |
|
236 | return | |
237 | files, copies = changes |
|
237 | files, copies = changes | |
238 | pbranches = [] |
|
238 | pbranches = [] | |
239 | if commit.parents: |
|
239 | if commit.parents: | |
240 | for prev in commit.parents: |
|
240 | for prev in commit.parents: | |
241 | if prev not in self.commitcache: |
|
241 | if prev not in self.commitcache: | |
242 | self.cachecommit(prev) |
|
242 | self.cachecommit(prev) | |
243 | pbranches.append((self.map[prev], |
|
243 | pbranches.append((self.map[prev], | |
244 | self.commitcache[prev].branch)) |
|
244 | self.commitcache[prev].branch)) | |
245 | self.dest.setbranch(commit.branch, pbranches) |
|
245 | self.dest.setbranch(commit.branch, pbranches) | |
246 | try: |
|
246 | try: | |
247 | parents = self.splicemap[rev].replace(',', ' ').split() |
|
247 | parents = self.splicemap[rev].replace(',', ' ').split() | |
248 | self.ui.status(_('spliced in %s as parents of %s\n') % |
|
248 | self.ui.status(_('spliced in %s as parents of %s\n') % | |
249 | (parents, rev)) |
|
249 | (parents, rev)) | |
250 | parents = [self.map.get(p, p) for p in parents] |
|
250 | parents = [self.map.get(p, p) for p in parents] | |
251 | except KeyError: |
|
251 | except KeyError: | |
252 | parents = [b[0] for b in pbranches] |
|
252 | parents = [b[0] for b in pbranches] | |
253 | newnode = self.dest.putcommit(files, copies, parents, commit, self.source) |
|
253 | newnode = self.dest.putcommit(files, copies, parents, commit, self.source) | |
254 | self.source.converted(rev, newnode) |
|
254 | self.source.converted(rev, newnode) | |
255 | self.map[rev] = newnode |
|
255 | self.map[rev] = newnode | |
256 |
|
256 | |||
257 | def convert(self): |
|
257 | def convert(self): | |
258 |
|
258 | |||
259 | try: |
|
259 | try: | |
260 | self.source.before() |
|
260 | self.source.before() | |
261 | self.dest.before() |
|
261 | self.dest.before() | |
262 | self.source.setrevmap(self.map) |
|
262 | self.source.setrevmap(self.map) | |
263 | self.ui.status(_("scanning source...\n")) |
|
263 | self.ui.status(_("scanning source...\n")) | |
264 | heads = self.source.getheads() |
|
264 | heads = self.source.getheads() | |
265 | parents = self.walktree(heads) |
|
265 | parents = self.walktree(heads) | |
266 | self.ui.status(_("sorting...\n")) |
|
266 | self.ui.status(_("sorting...\n")) | |
267 | t = self.toposort(parents) |
|
267 | t = self.toposort(parents) | |
268 | num = len(t) |
|
268 | num = len(t) | |
269 | c = None |
|
269 | c = None | |
270 |
|
270 | |||
271 | self.ui.status(_("converting...\n")) |
|
271 | self.ui.status(_("converting...\n")) | |
272 | for c in t: |
|
272 | for c in t: | |
273 | num -= 1 |
|
273 | num -= 1 | |
274 | desc = self.commitcache[c].desc |
|
274 | desc = self.commitcache[c].desc | |
275 | if "\n" in desc: |
|
275 | if "\n" in desc: | |
276 | desc = desc.splitlines()[0] |
|
276 | desc = desc.splitlines()[0] | |
277 | # convert log message to local encoding without using |
|
277 | # convert log message to local encoding without using | |
278 | # tolocal() because util._encoding conver() use it as |
|
278 | # tolocal() because util._encoding conver() use it as | |
279 | # 'utf-8' |
|
279 | # 'utf-8' | |
280 | self.ui.status("%d %s\n" % (num, recode(desc))) |
|
280 | self.ui.status("%d %s\n" % (num, recode(desc))) | |
281 | self.ui.note(_("source: %s\n") % recode(c)) |
|
281 | self.ui.note(_("source: %s\n") % recode(c)) | |
282 | self.copy(c) |
|
282 | self.copy(c) | |
283 |
|
283 | |||
284 | tags = self.source.gettags() |
|
284 | tags = self.source.gettags() | |
285 | ctags = {} |
|
285 | ctags = {} | |
286 | for k in tags: |
|
286 | for k in tags: | |
287 | v = tags[k] |
|
287 | v = tags[k] | |
288 | if self.map.get(v, SKIPREV) != SKIPREV: |
|
288 | if self.map.get(v, SKIPREV) != SKIPREV: | |
289 | ctags[k] = self.map[v] |
|
289 | ctags[k] = self.map[v] | |
290 |
|
290 | |||
291 | if c and ctags: |
|
291 | if c and ctags: | |
292 | nrev = self.dest.puttags(ctags) |
|
292 | nrev = self.dest.puttags(ctags) | |
293 | # write another hash correspondence to override the previous |
|
293 | # write another hash correspondence to override the previous | |
294 | # one so we don't end up with extra tag heads |
|
294 | # one so we don't end up with extra tag heads | |
295 | if nrev: |
|
295 | if nrev: | |
296 | self.map[c] = nrev |
|
296 | self.map[c] = nrev | |
297 |
|
297 | |||
298 | self.writeauthormap() |
|
298 | self.writeauthormap() | |
299 | finally: |
|
299 | finally: | |
300 | self.cleanup() |
|
300 | self.cleanup() | |
301 |
|
301 | |||
302 | def cleanup(self): |
|
302 | def cleanup(self): | |
303 | try: |
|
303 | try: | |
304 | self.dest.after() |
|
304 | self.dest.after() | |
305 | finally: |
|
305 | finally: | |
306 | self.source.after() |
|
306 | self.source.after() | |
307 | self.map.close() |
|
307 | self.map.close() | |
308 |
|
308 | |||
309 | def convert(ui, src, dest=None, revmapfile=None, **opts): |
|
309 | def convert(ui, src, dest=None, revmapfile=None, **opts): | |
310 | global orig_encoding |
|
310 | global orig_encoding | |
311 | orig_encoding = util._encoding |
|
311 | orig_encoding = util._encoding | |
312 | util._encoding = 'UTF-8' |
|
312 | util._encoding = 'UTF-8' | |
313 |
|
313 | |||
314 | if not dest: |
|
314 | if not dest: | |
315 | dest = hg.defaultdest(src) + "-hg" |
|
315 | dest = hg.defaultdest(src) + "-hg" | |
316 | ui.status(_("assuming destination %s\n") % dest) |
|
316 | ui.status(_("assuming destination %s\n") % dest) | |
317 |
|
317 | |||
318 | destc = convertsink(ui, dest, opts.get('dest_type')) |
|
318 | destc = convertsink(ui, dest, opts.get('dest_type')) | |
319 |
|
319 | |||
320 | try: |
|
320 | try: | |
321 | srcc = convertsource(ui, src, opts.get('source_type'), |
|
321 | srcc = convertsource(ui, src, opts.get('source_type'), | |
322 | opts.get('rev')) |
|
322 | opts.get('rev')) | |
323 | except Exception: |
|
323 | except Exception: | |
324 | for path in destc.created: |
|
324 | for path in destc.created: | |
325 | shutil.rmtree(path, True) |
|
325 | shutil.rmtree(path, True) | |
326 | raise |
|
326 | raise | |
327 |
|
327 | |||
328 | fmap = opts.get('filemap') |
|
328 | fmap = opts.get('filemap') | |
329 | if fmap: |
|
329 | if fmap: | |
330 | srcc = filemap.filemap_source(ui, srcc, fmap) |
|
330 | srcc = filemap.filemap_source(ui, srcc, fmap) | |
331 | destc.setfilemapmode(True) |
|
331 | destc.setfilemapmode(True) | |
332 |
|
332 | |||
333 | if not revmapfile: |
|
333 | if not revmapfile: | |
334 | try: |
|
334 | try: | |
335 | revmapfile = destc.revmapfile() |
|
335 | revmapfile = destc.revmapfile() | |
336 | except: |
|
336 | except: | |
337 | revmapfile = os.path.join(destc, "map") |
|
337 | revmapfile = os.path.join(destc, "map") | |
338 |
|
338 | |||
339 | c = converter(ui, srcc, destc, revmapfile, opts) |
|
339 | c = converter(ui, srcc, destc, revmapfile, opts) | |
340 | c.convert() |
|
340 | c.convert() | |
341 |
|
341 |
@@ -1,201 +1,201 b'' | |||||
1 | # monotone support for the convert extension |
|
1 | # monotone support for the convert extension | |
2 |
|
2 | |||
3 |
import os, re |
|
3 | import os, re | |
4 | from mercurial import util |
|
4 | from mercurial import util | |
5 |
from common import NoRepo, |
|
5 | from common import NoRepo, commit, converter_source, checktool | |
6 | from common import commandline |
|
6 | from common import commandline | |
7 | from mercurial.i18n import _ |
|
7 | from mercurial.i18n import _ | |
8 |
|
8 | |||
9 | class monotone_source(converter_source, commandline): |
|
9 | class monotone_source(converter_source, commandline): | |
10 | def __init__(self, ui, path=None, rev=None): |
|
10 | def __init__(self, ui, path=None, rev=None): | |
11 | converter_source.__init__(self, ui, path, rev) |
|
11 | converter_source.__init__(self, ui, path, rev) | |
12 | commandline.__init__(self, ui, 'mtn') |
|
12 | commandline.__init__(self, ui, 'mtn') | |
13 |
|
13 | |||
14 | self.ui = ui |
|
14 | self.ui = ui | |
15 | self.path = path |
|
15 | self.path = path | |
16 |
|
16 | |||
17 | # regular expressions for parsing monotone output |
|
17 | # regular expressions for parsing monotone output | |
18 | space = r'\s*' |
|
18 | space = r'\s*' | |
19 | name = r'\s+"((?:\\"|[^"])*)"\s*' |
|
19 | name = r'\s+"((?:\\"|[^"])*)"\s*' | |
20 | value = name |
|
20 | value = name | |
21 | revision = r'\s+\[(\w+)\]\s*' |
|
21 | revision = r'\s+\[(\w+)\]\s*' | |
22 | lines = r'(?:.|\n)+' |
|
22 | lines = r'(?:.|\n)+' | |
23 |
|
23 | |||
24 | self.dir_re = re.compile(space + "dir" + name) |
|
24 | self.dir_re = re.compile(space + "dir" + name) | |
25 | self.file_re = re.compile(space + "file" + name + "content" + revision) |
|
25 | self.file_re = re.compile(space + "file" + name + "content" + revision) | |
26 | self.add_file_re = re.compile(space + "add_file" + name + "content" + revision) |
|
26 | self.add_file_re = re.compile(space + "add_file" + name + "content" + revision) | |
27 | self.patch_re = re.compile(space + "patch" + name + "from" + revision + "to" + revision) |
|
27 | self.patch_re = re.compile(space + "patch" + name + "from" + revision + "to" + revision) | |
28 | self.rename_re = re.compile(space + "rename" + name + "to" + name) |
|
28 | self.rename_re = re.compile(space + "rename" + name + "to" + name) | |
29 | self.delete_re = re.compile(space + "delete" + name) |
|
29 | self.delete_re = re.compile(space + "delete" + name) | |
30 | self.tag_re = re.compile(space + "tag" + name + "revision" + revision) |
|
30 | self.tag_re = re.compile(space + "tag" + name + "revision" + revision) | |
31 | self.cert_re = re.compile(lines + space + "name" + name + "value" + value) |
|
31 | self.cert_re = re.compile(lines + space + "name" + name + "value" + value) | |
32 |
|
32 | |||
33 | attr = space + "file" + lines + space + "attr" + space |
|
33 | attr = space + "file" + lines + space + "attr" + space | |
34 | self.attr_execute_re = re.compile(attr + '"mtn:execute"' + space + '"true"') |
|
34 | self.attr_execute_re = re.compile(attr + '"mtn:execute"' + space + '"true"') | |
35 |
|
35 | |||
36 | # cached data |
|
36 | # cached data | |
37 | self.manifest_rev = None |
|
37 | self.manifest_rev = None | |
38 | self.manifest = None |
|
38 | self.manifest = None | |
39 | self.files = None |
|
39 | self.files = None | |
40 | self.dirs = None |
|
40 | self.dirs = None | |
41 |
|
41 | |||
42 | norepo = NoRepo (_("%s does not look like a monotone repo") % path) |
|
42 | norepo = NoRepo (_("%s does not look like a monotone repo") % path) | |
43 | if not os.path.exists(path): |
|
43 | if not os.path.exists(path): | |
44 | raise norepo |
|
44 | raise norepo | |
45 |
|
45 | |||
46 | checktool('mtn', abort=False) |
|
46 | checktool('mtn', abort=False) | |
47 |
|
47 | |||
48 | # test if there are any revisions |
|
48 | # test if there are any revisions | |
49 | self.rev = None |
|
49 | self.rev = None | |
50 | try: |
|
50 | try: | |
51 | self.getheads() |
|
51 | self.getheads() | |
52 | except: |
|
52 | except: | |
53 | raise norepo |
|
53 | raise norepo | |
54 | self.rev = rev |
|
54 | self.rev = rev | |
55 |
|
55 | |||
56 | def mtnrun(self, *args, **kwargs): |
|
56 | def mtnrun(self, *args, **kwargs): | |
57 | kwargs['d'] = self.path |
|
57 | kwargs['d'] = self.path | |
58 | return self.run0('automate', *args, **kwargs) |
|
58 | return self.run0('automate', *args, **kwargs) | |
59 |
|
59 | |||
60 | def mtnloadmanifest(self, rev): |
|
60 | def mtnloadmanifest(self, rev): | |
61 | if self.manifest_rev == rev: |
|
61 | if self.manifest_rev == rev: | |
62 | return |
|
62 | return | |
63 | self.manifest = self.mtnrun("get_manifest_of", rev).split("\n\n") |
|
63 | self.manifest = self.mtnrun("get_manifest_of", rev).split("\n\n") | |
64 | self.manifest_rev = rev |
|
64 | self.manifest_rev = rev | |
65 | self.files = {} |
|
65 | self.files = {} | |
66 | self.dirs = {} |
|
66 | self.dirs = {} | |
67 |
|
67 | |||
68 | for e in self.manifest: |
|
68 | for e in self.manifest: | |
69 | m = self.file_re.match(e) |
|
69 | m = self.file_re.match(e) | |
70 | if m: |
|
70 | if m: | |
71 | attr = "" |
|
71 | attr = "" | |
72 | name = m.group(1) |
|
72 | name = m.group(1) | |
73 | node = m.group(2) |
|
73 | node = m.group(2) | |
74 | if self.attr_execute_re.match(e): |
|
74 | if self.attr_execute_re.match(e): | |
75 | attr += "x" |
|
75 | attr += "x" | |
76 | self.files[name] = (node, attr) |
|
76 | self.files[name] = (node, attr) | |
77 | m = self.dir_re.match(e) |
|
77 | m = self.dir_re.match(e) | |
78 | if m: |
|
78 | if m: | |
79 | self.dirs[m.group(1)] = True |
|
79 | self.dirs[m.group(1)] = True | |
80 |
|
80 | |||
81 | def mtnisfile(self, name, rev): |
|
81 | def mtnisfile(self, name, rev): | |
82 | # a non-file could be a directory or a deleted or renamed file |
|
82 | # a non-file could be a directory or a deleted or renamed file | |
83 | self.mtnloadmanifest(rev) |
|
83 | self.mtnloadmanifest(rev) | |
84 | try: |
|
84 | try: | |
85 | self.files[name] |
|
85 | self.files[name] | |
86 | return True |
|
86 | return True | |
87 | except KeyError: |
|
87 | except KeyError: | |
88 | return False |
|
88 | return False | |
89 |
|
89 | |||
90 | def mtnisdir(self, name, rev): |
|
90 | def mtnisdir(self, name, rev): | |
91 | self.mtnloadmanifest(rev) |
|
91 | self.mtnloadmanifest(rev) | |
92 | try: |
|
92 | try: | |
93 | self.dirs[name] |
|
93 | self.dirs[name] | |
94 | return True |
|
94 | return True | |
95 | except KeyError: |
|
95 | except KeyError: | |
96 | return False |
|
96 | return False | |
97 |
|
97 | |||
98 | def mtngetcerts(self, rev): |
|
98 | def mtngetcerts(self, rev): | |
99 | certs = {"author":"<missing>", "date":"<missing>", |
|
99 | certs = {"author":"<missing>", "date":"<missing>", | |
100 | "changelog":"<missing>", "branch":"<missing>"} |
|
100 | "changelog":"<missing>", "branch":"<missing>"} | |
101 | cert_list = self.mtnrun("certs", rev).split('\n\n key "') |
|
101 | cert_list = self.mtnrun("certs", rev).split('\n\n key "') | |
102 | for e in cert_list: |
|
102 | for e in cert_list: | |
103 | m = self.cert_re.match(e) |
|
103 | m = self.cert_re.match(e) | |
104 | if m: |
|
104 | if m: | |
105 | name, value = m.groups() |
|
105 | name, value = m.groups() | |
106 | value = value.replace(r'\"', '"') |
|
106 | value = value.replace(r'\"', '"') | |
107 | value = value.replace(r'\\', '\\') |
|
107 | value = value.replace(r'\\', '\\') | |
108 | certs[name] = value |
|
108 | certs[name] = value | |
109 | return certs |
|
109 | return certs | |
110 |
|
110 | |||
111 | def mtnrenamefiles(self, files, fromdir, todir): |
|
111 | def mtnrenamefiles(self, files, fromdir, todir): | |
112 | renamed = {} |
|
112 | renamed = {} | |
113 | for tofile in files: |
|
113 | for tofile in files: | |
114 | suffix = tofile.lstrip(todir) |
|
114 | suffix = tofile.lstrip(todir) | |
115 | if todir + suffix == tofile: |
|
115 | if todir + suffix == tofile: | |
116 | renamed[tofile] = (fromdir + suffix).lstrip("/") |
|
116 | renamed[tofile] = (fromdir + suffix).lstrip("/") | |
117 | return renamed |
|
117 | return renamed | |
118 |
|
118 | |||
119 |
|
119 | |||
120 | # implement the converter_source interface: |
|
120 | # implement the converter_source interface: | |
121 |
|
121 | |||
122 | def getheads(self): |
|
122 | def getheads(self): | |
123 | if not self.rev: |
|
123 | if not self.rev: | |
124 | return self.mtnrun("leaves").splitlines() |
|
124 | return self.mtnrun("leaves").splitlines() | |
125 | else: |
|
125 | else: | |
126 | return [self.rev] |
|
126 | return [self.rev] | |
127 |
|
127 | |||
128 | def getchanges(self, rev): |
|
128 | def getchanges(self, rev): | |
129 | #revision = self.mtncmd("get_revision %s" % rev).split("\n\n") |
|
129 | #revision = self.mtncmd("get_revision %s" % rev).split("\n\n") | |
130 | revision = self.mtnrun("get_revision", rev).split("\n\n") |
|
130 | revision = self.mtnrun("get_revision", rev).split("\n\n") | |
131 | files = {} |
|
131 | files = {} | |
132 | copies = {} |
|
132 | copies = {} | |
133 | for e in revision: |
|
133 | for e in revision: | |
134 | m = self.add_file_re.match(e) |
|
134 | m = self.add_file_re.match(e) | |
135 | if m: |
|
135 | if m: | |
136 | files[m.group(1)] = rev |
|
136 | files[m.group(1)] = rev | |
137 | m = self.patch_re.match(e) |
|
137 | m = self.patch_re.match(e) | |
138 | if m: |
|
138 | if m: | |
139 | files[m.group(1)] = rev |
|
139 | files[m.group(1)] = rev | |
140 |
|
140 | |||
141 | # Delete/rename is handled later when the convert engine |
|
141 | # Delete/rename is handled later when the convert engine | |
142 | # discovers an IOError exception from getfile, |
|
142 | # discovers an IOError exception from getfile, | |
143 | # but only if we add the "from" file to the list of changes. |
|
143 | # but only if we add the "from" file to the list of changes. | |
144 | m = self.delete_re.match(e) |
|
144 | m = self.delete_re.match(e) | |
145 | if m: |
|
145 | if m: | |
146 | files[m.group(1)] = rev |
|
146 | files[m.group(1)] = rev | |
147 | m = self.rename_re.match(e) |
|
147 | m = self.rename_re.match(e) | |
148 | if m: |
|
148 | if m: | |
149 | toname = m.group(2) |
|
149 | toname = m.group(2) | |
150 | fromname = m.group(1) |
|
150 | fromname = m.group(1) | |
151 | if self.mtnisfile(toname, rev): |
|
151 | if self.mtnisfile(toname, rev): | |
152 | copies[toname] = fromname |
|
152 | copies[toname] = fromname | |
153 | files[toname] = rev |
|
153 | files[toname] = rev | |
154 | files[fromname] = rev |
|
154 | files[fromname] = rev | |
155 | if self.mtnisdir(toname, rev): |
|
155 | if self.mtnisdir(toname, rev): | |
156 | renamed = self.mtnrenamefiles(self.files, fromname, toname) |
|
156 | renamed = self.mtnrenamefiles(self.files, fromname, toname) | |
157 | for tofile, fromfile in renamed.items(): |
|
157 | for tofile, fromfile in renamed.items(): | |
158 | self.ui.debug (_("copying file in renamed dir from '%s' to '%s'") % (fromfile, tofile), '\n') |
|
158 | self.ui.debug (_("copying file in renamed dir from '%s' to '%s'") % (fromfile, tofile), '\n') | |
159 | files[tofile] = rev |
|
159 | files[tofile] = rev | |
160 | for fromfile in renamed.values(): |
|
160 | for fromfile in renamed.values(): | |
161 | files[fromfile] = rev |
|
161 | files[fromfile] = rev | |
162 | return (files.items(), copies) |
|
162 | return (files.items(), copies) | |
163 |
|
163 | |||
164 | def getmode(self, name, rev): |
|
164 | def getmode(self, name, rev): | |
165 | self.mtnloadmanifest(rev) |
|
165 | self.mtnloadmanifest(rev) | |
166 | try: |
|
166 | try: | |
167 | node, attr = self.files[name] |
|
167 | node, attr = self.files[name] | |
168 | return attr |
|
168 | return attr | |
169 | except KeyError: |
|
169 | except KeyError: | |
170 | return "" |
|
170 | return "" | |
171 |
|
171 | |||
172 | def getfile(self, name, rev): |
|
172 | def getfile(self, name, rev): | |
173 | if not self.mtnisfile(name, rev): |
|
173 | if not self.mtnisfile(name, rev): | |
174 | raise IOError() # file was deleted or renamed |
|
174 | raise IOError() # file was deleted or renamed | |
175 | try: |
|
175 | try: | |
176 | return self.mtnrun("get_file_of", name, r=rev) |
|
176 | return self.mtnrun("get_file_of", name, r=rev) | |
177 | except: |
|
177 | except: | |
178 | raise IOError() # file was deleted or renamed |
|
178 | raise IOError() # file was deleted or renamed | |
179 |
|
179 | |||
180 | def getcommit(self, rev): |
|
180 | def getcommit(self, rev): | |
181 | certs = self.mtngetcerts(rev) |
|
181 | certs = self.mtngetcerts(rev) | |
182 | return commit( |
|
182 | return commit( | |
183 | author=certs["author"], |
|
183 | author=certs["author"], | |
184 | date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")), |
|
184 | date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")), | |
185 | desc=certs["changelog"], |
|
185 | desc=certs["changelog"], | |
186 | rev=rev, |
|
186 | rev=rev, | |
187 | parents=self.mtnrun("parents", rev).splitlines(), |
|
187 | parents=self.mtnrun("parents", rev).splitlines(), | |
188 | branch=certs["branch"]) |
|
188 | branch=certs["branch"]) | |
189 |
|
189 | |||
190 | def gettags(self): |
|
190 | def gettags(self): | |
191 | tags = {} |
|
191 | tags = {} | |
192 | for e in self.mtnrun("tags").split("\n\n"): |
|
192 | for e in self.mtnrun("tags").split("\n\n"): | |
193 | m = self.tag_re.match(e) |
|
193 | m = self.tag_re.match(e) | |
194 | if m: |
|
194 | if m: | |
195 | tags[m.group(1)] = m.group(2) |
|
195 | tags[m.group(1)] = m.group(2) | |
196 | return tags |
|
196 | return tags | |
197 |
|
197 | |||
198 | def getchangedfiles(self, rev, i): |
|
198 | def getchangedfiles(self, rev, i): | |
199 | # This function is only needed to support --filemap |
|
199 | # This function is only needed to support --filemap | |
200 | # ... and we don't support that |
|
200 | # ... and we don't support that | |
201 | raise NotImplementedError() |
|
201 | raise NotImplementedError() |
@@ -1,422 +1,421 b'' | |||||
1 | # ASCII graph log extension for Mercurial |
|
1 | # ASCII graph log extension for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2007 Joel Rosdahl <joel@rosdahl.net> |
|
3 | # Copyright 2007 Joel Rosdahl <joel@rosdahl.net> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of |
|
5 | # This software may be used and distributed according to the terms of | |
6 | # the GNU General Public License, incorporated herein by reference. |
|
6 | # the GNU General Public License, incorporated herein by reference. | |
7 | '''show revision graphs in terminal windows |
|
7 | '''show revision graphs in terminal windows | |
8 |
|
8 | |||
9 | This extension adds a --graph option to the incoming, outgoing and log |
|
9 | This extension adds a --graph option to the incoming, outgoing and log | |
10 | commands. When this options is given, an ascii representation of the |
|
10 | commands. When this options is given, an ascii representation of the | |
11 | revision graph is also shown. |
|
11 | revision graph is also shown. | |
12 | ''' |
|
12 | ''' | |
13 |
|
13 | |||
14 | import os |
|
14 | import os | |
15 | import sys |
|
|||
16 | from mercurial.cmdutil import revrange, show_changeset |
|
15 | from mercurial.cmdutil import revrange, show_changeset | |
17 |
from mercurial.commands import templateopts |
|
16 | from mercurial.commands import templateopts | |
18 | from mercurial.i18n import _ |
|
17 | from mercurial.i18n import _ | |
19 | from mercurial.node import nullrev |
|
18 | from mercurial.node import nullrev | |
20 | from mercurial import bundlerepo, changegroup, cmdutil, commands, extensions |
|
19 | from mercurial import bundlerepo, changegroup, cmdutil, commands, extensions | |
21 |
from mercurial import hg, |
|
20 | from mercurial import hg, url, util | |
22 |
|
21 | |||
23 | def revisions(repo, start, stop): |
|
22 | def revisions(repo, start, stop): | |
24 | """cset DAG generator yielding (rev, node, [parents]) tuples |
|
23 | """cset DAG generator yielding (rev, node, [parents]) tuples | |
25 |
|
24 | |||
26 | This generator function walks through the revision history from revision |
|
25 | This generator function walks through the revision history from revision | |
27 | start to revision stop (which must be less than or equal to start). |
|
26 | start to revision stop (which must be less than or equal to start). | |
28 | """ |
|
27 | """ | |
29 | assert start >= stop |
|
28 | assert start >= stop | |
30 | cur = start |
|
29 | cur = start | |
31 | while cur >= stop: |
|
30 | while cur >= stop: | |
32 | ctx = repo[cur] |
|
31 | ctx = repo[cur] | |
33 | parents = [p.rev() for p in ctx.parents() if p.rev() != nullrev] |
|
32 | parents = [p.rev() for p in ctx.parents() if p.rev() != nullrev] | |
34 | parents.sort() |
|
33 | parents.sort() | |
35 | yield (ctx, parents) |
|
34 | yield (ctx, parents) | |
36 | cur -= 1 |
|
35 | cur -= 1 | |
37 |
|
36 | |||
38 | def filerevs(repo, path, start, stop): |
|
37 | def filerevs(repo, path, start, stop): | |
39 | """file cset DAG generator yielding (rev, node, [parents]) tuples |
|
38 | """file cset DAG generator yielding (rev, node, [parents]) tuples | |
40 |
|
39 | |||
41 | This generator function walks through the revision history of a single |
|
40 | This generator function walks through the revision history of a single | |
42 | file from revision start to revision stop (which must be less than or |
|
41 | file from revision start to revision stop (which must be less than or | |
43 | equal to start). |
|
42 | equal to start). | |
44 | """ |
|
43 | """ | |
45 | assert start >= stop |
|
44 | assert start >= stop | |
46 | filerev = len(repo.file(path)) - 1 |
|
45 | filerev = len(repo.file(path)) - 1 | |
47 | while filerev >= 0: |
|
46 | while filerev >= 0: | |
48 | fctx = repo.filectx(path, fileid=filerev) |
|
47 | fctx = repo.filectx(path, fileid=filerev) | |
49 | parents = [f.linkrev() for f in fctx.parents() if f.path() == path] |
|
48 | parents = [f.linkrev() for f in fctx.parents() if f.path() == path] | |
50 | parents.sort() |
|
49 | parents.sort() | |
51 | if fctx.rev() <= start: |
|
50 | if fctx.rev() <= start: | |
52 | yield (fctx, parents) |
|
51 | yield (fctx, parents) | |
53 | if fctx.rev() <= stop: |
|
52 | if fctx.rev() <= stop: | |
54 | break |
|
53 | break | |
55 | filerev -= 1 |
|
54 | filerev -= 1 | |
56 |
|
55 | |||
57 | def grapher(nodes): |
|
56 | def grapher(nodes): | |
58 | """grapher for asciigraph on a list of nodes and their parents |
|
57 | """grapher for asciigraph on a list of nodes and their parents | |
59 |
|
58 | |||
60 | nodes must generate tuples (node, parents, char, lines) where |
|
59 | nodes must generate tuples (node, parents, char, lines) where | |
61 | - parents must generate the parents of node, in sorted order, |
|
60 | - parents must generate the parents of node, in sorted order, | |
62 | and max length 2, |
|
61 | and max length 2, | |
63 | - char is the char to print as the node symbol, and |
|
62 | - char is the char to print as the node symbol, and | |
64 | - lines are the lines to display next to the node. |
|
63 | - lines are the lines to display next to the node. | |
65 | """ |
|
64 | """ | |
66 | seen = [] |
|
65 | seen = [] | |
67 | for node, parents, char, lines in nodes: |
|
66 | for node, parents, char, lines in nodes: | |
68 | if node not in seen: |
|
67 | if node not in seen: | |
69 | seen.append(node) |
|
68 | seen.append(node) | |
70 | nodeidx = seen.index(node) |
|
69 | nodeidx = seen.index(node) | |
71 |
|
70 | |||
72 | knownparents = [] |
|
71 | knownparents = [] | |
73 | newparents = [] |
|
72 | newparents = [] | |
74 | for parent in parents: |
|
73 | for parent in parents: | |
75 | if parent in seen: |
|
74 | if parent in seen: | |
76 | knownparents.append(parent) |
|
75 | knownparents.append(parent) | |
77 | else: |
|
76 | else: | |
78 | newparents.append(parent) |
|
77 | newparents.append(parent) | |
79 |
|
78 | |||
80 | ncols = len(seen) |
|
79 | ncols = len(seen) | |
81 | nextseen = seen[:] |
|
80 | nextseen = seen[:] | |
82 | nextseen[nodeidx:nodeidx + 1] = newparents |
|
81 | nextseen[nodeidx:nodeidx + 1] = newparents | |
83 | edges = [(nodeidx, nextseen.index(p)) for p in knownparents] |
|
82 | edges = [(nodeidx, nextseen.index(p)) for p in knownparents] | |
84 |
|
83 | |||
85 | if len(newparents) > 0: |
|
84 | if len(newparents) > 0: | |
86 | edges.append((nodeidx, nodeidx)) |
|
85 | edges.append((nodeidx, nodeidx)) | |
87 | if len(newparents) > 1: |
|
86 | if len(newparents) > 1: | |
88 | edges.append((nodeidx, nodeidx + 1)) |
|
87 | edges.append((nodeidx, nodeidx + 1)) | |
89 | nmorecols = len(nextseen) - ncols |
|
88 | nmorecols = len(nextseen) - ncols | |
90 | seen = nextseen |
|
89 | seen = nextseen | |
91 | yield (char, lines, nodeidx, edges, ncols, nmorecols) |
|
90 | yield (char, lines, nodeidx, edges, ncols, nmorecols) | |
92 |
|
91 | |||
93 | def fix_long_right_edges(edges): |
|
92 | def fix_long_right_edges(edges): | |
94 | for (i, (start, end)) in enumerate(edges): |
|
93 | for (i, (start, end)) in enumerate(edges): | |
95 | if end > start: |
|
94 | if end > start: | |
96 | edges[i] = (start, end + 1) |
|
95 | edges[i] = (start, end + 1) | |
97 |
|
96 | |||
98 | def get_nodeline_edges_tail( |
|
97 | def get_nodeline_edges_tail( | |
99 | node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail): |
|
98 | node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail): | |
100 | if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0: |
|
99 | if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0: | |
101 | # Still going in the same non-vertical direction. |
|
100 | # Still going in the same non-vertical direction. | |
102 | if n_columns_diff == -1: |
|
101 | if n_columns_diff == -1: | |
103 | start = max(node_index + 1, p_node_index) |
|
102 | start = max(node_index + 1, p_node_index) | |
104 | tail = ["|", " "] * (start - node_index - 1) |
|
103 | tail = ["|", " "] * (start - node_index - 1) | |
105 | tail.extend(["/", " "] * (n_columns - start)) |
|
104 | tail.extend(["/", " "] * (n_columns - start)) | |
106 | return tail |
|
105 | return tail | |
107 | else: |
|
106 | else: | |
108 | return ["\\", " "] * (n_columns - node_index - 1) |
|
107 | return ["\\", " "] * (n_columns - node_index - 1) | |
109 | else: |
|
108 | else: | |
110 | return ["|", " "] * (n_columns - node_index - 1) |
|
109 | return ["|", " "] * (n_columns - node_index - 1) | |
111 |
|
110 | |||
112 | def draw_edges(edges, nodeline, interline): |
|
111 | def draw_edges(edges, nodeline, interline): | |
113 | for (start, end) in edges: |
|
112 | for (start, end) in edges: | |
114 | if start == end + 1: |
|
113 | if start == end + 1: | |
115 | interline[2 * end + 1] = "/" |
|
114 | interline[2 * end + 1] = "/" | |
116 | elif start == end - 1: |
|
115 | elif start == end - 1: | |
117 | interline[2 * start + 1] = "\\" |
|
116 | interline[2 * start + 1] = "\\" | |
118 | elif start == end: |
|
117 | elif start == end: | |
119 | interline[2 * start] = "|" |
|
118 | interline[2 * start] = "|" | |
120 | else: |
|
119 | else: | |
121 | nodeline[2 * end] = "+" |
|
120 | nodeline[2 * end] = "+" | |
122 | if start > end: |
|
121 | if start > end: | |
123 | (start, end) = (end,start) |
|
122 | (start, end) = (end,start) | |
124 | for i in range(2 * start + 1, 2 * end): |
|
123 | for i in range(2 * start + 1, 2 * end): | |
125 | if nodeline[i] != "+": |
|
124 | if nodeline[i] != "+": | |
126 | nodeline[i] = "-" |
|
125 | nodeline[i] = "-" | |
127 |
|
126 | |||
128 | def get_padding_line(ni, n_columns, edges): |
|
127 | def get_padding_line(ni, n_columns, edges): | |
129 | line = [] |
|
128 | line = [] | |
130 | line.extend(["|", " "] * ni) |
|
129 | line.extend(["|", " "] * ni) | |
131 | if (ni, ni - 1) in edges or (ni, ni) in edges: |
|
130 | if (ni, ni - 1) in edges or (ni, ni) in edges: | |
132 | # (ni, ni - 1) (ni, ni) |
|
131 | # (ni, ni - 1) (ni, ni) | |
133 | # | | | | | | | | |
|
132 | # | | | | | | | | | |
134 | # +---o | | o---+ |
|
133 | # +---o | | o---+ | |
135 | # | | c | | c | | |
|
134 | # | | c | | c | | | |
136 | # | |/ / | |/ / |
|
135 | # | |/ / | |/ / | |
137 | # | | | | | | |
|
136 | # | | | | | | | |
138 | c = "|" |
|
137 | c = "|" | |
139 | else: |
|
138 | else: | |
140 | c = " " |
|
139 | c = " " | |
141 | line.extend([c, " "]) |
|
140 | line.extend([c, " "]) | |
142 | line.extend(["|", " "] * (n_columns - ni - 1)) |
|
141 | line.extend(["|", " "] * (n_columns - ni - 1)) | |
143 | return line |
|
142 | return line | |
144 |
|
143 | |||
145 | def ascii(ui, grapher): |
|
144 | def ascii(ui, grapher): | |
146 | """prints an ASCII graph of the DAG returned by the grapher |
|
145 | """prints an ASCII graph of the DAG returned by the grapher | |
147 |
|
146 | |||
148 | grapher is a generator that emits tuples with the following elements: |
|
147 | grapher is a generator that emits tuples with the following elements: | |
149 |
|
148 | |||
150 | - Character to use as node's symbol. |
|
149 | - Character to use as node's symbol. | |
151 | - List of lines to display as the node's text. |
|
150 | - List of lines to display as the node's text. | |
152 | - Column of the current node in the set of ongoing edges. |
|
151 | - Column of the current node in the set of ongoing edges. | |
153 | - Edges; a list of (col, next_col) indicating the edges between |
|
152 | - Edges; a list of (col, next_col) indicating the edges between | |
154 | the current node and its parents. |
|
153 | the current node and its parents. | |
155 | - Number of columns (ongoing edges) in the current revision. |
|
154 | - Number of columns (ongoing edges) in the current revision. | |
156 | - The difference between the number of columns (ongoing edges) |
|
155 | - The difference between the number of columns (ongoing edges) | |
157 | in the next revision and the number of columns (ongoing edges) |
|
156 | in the next revision and the number of columns (ongoing edges) | |
158 | in the current revision. That is: -1 means one column removed; |
|
157 | in the current revision. That is: -1 means one column removed; | |
159 | 0 means no columns added or removed; 1 means one column added. |
|
158 | 0 means no columns added or removed; 1 means one column added. | |
160 | """ |
|
159 | """ | |
161 | prev_n_columns_diff = 0 |
|
160 | prev_n_columns_diff = 0 | |
162 | prev_node_index = 0 |
|
161 | prev_node_index = 0 | |
163 | for (node_ch, node_lines, node_index, edges, n_columns, n_columns_diff) in grapher: |
|
162 | for (node_ch, node_lines, node_index, edges, n_columns, n_columns_diff) in grapher: | |
164 |
|
163 | |||
165 | assert -2 < n_columns_diff < 2 |
|
164 | assert -2 < n_columns_diff < 2 | |
166 | if n_columns_diff == -1: |
|
165 | if n_columns_diff == -1: | |
167 | # Transform |
|
166 | # Transform | |
168 | # |
|
167 | # | |
169 | # | | | | | | |
|
168 | # | | | | | | | |
170 | # o | | into o---+ |
|
169 | # o | | into o---+ | |
171 | # |X / |/ / |
|
170 | # |X / |/ / | |
172 | # | | | | |
|
171 | # | | | | | |
173 | fix_long_right_edges(edges) |
|
172 | fix_long_right_edges(edges) | |
174 |
|
173 | |||
175 | # add_padding_line says whether to rewrite |
|
174 | # add_padding_line says whether to rewrite | |
176 | # |
|
175 | # | |
177 | # | | | | | | | | |
|
176 | # | | | | | | | | | |
178 | # | o---+ into | o---+ |
|
177 | # | o---+ into | o---+ | |
179 | # | / / | | | # <--- padding line |
|
178 | # | / / | | | # <--- padding line | |
180 | # o | | | / / |
|
179 | # o | | | / / | |
181 | # o | | |
|
180 | # o | | | |
182 | add_padding_line = (len(node_lines) > 2 and |
|
181 | add_padding_line = (len(node_lines) > 2 and | |
183 | n_columns_diff == -1 and |
|
182 | n_columns_diff == -1 and | |
184 | [x for (x, y) in edges if x + 1 < y]) |
|
183 | [x for (x, y) in edges if x + 1 < y]) | |
185 |
|
184 | |||
186 | # fix_nodeline_tail says whether to rewrite |
|
185 | # fix_nodeline_tail says whether to rewrite | |
187 | # |
|
186 | # | |
188 | # | | o | | | | o | | |
|
187 | # | | o | | | | o | | | |
189 | # | | |/ / | | |/ / |
|
188 | # | | |/ / | | |/ / | |
190 | # | o | | into | o / / # <--- fixed nodeline tail |
|
189 | # | o | | into | o / / # <--- fixed nodeline tail | |
191 | # | |/ / | |/ / |
|
190 | # | |/ / | |/ / | |
192 | # o | | o | | |
|
191 | # o | | o | | | |
193 | fix_nodeline_tail = len(node_lines) <= 2 and not add_padding_line |
|
192 | fix_nodeline_tail = len(node_lines) <= 2 and not add_padding_line | |
194 |
|
193 | |||
195 | # nodeline is the line containing the node character (typically o) |
|
194 | # nodeline is the line containing the node character (typically o) | |
196 | nodeline = ["|", " "] * node_index |
|
195 | nodeline = ["|", " "] * node_index | |
197 | nodeline.extend([node_ch, " "]) |
|
196 | nodeline.extend([node_ch, " "]) | |
198 |
|
197 | |||
199 | nodeline.extend( |
|
198 | nodeline.extend( | |
200 | get_nodeline_edges_tail( |
|
199 | get_nodeline_edges_tail( | |
201 | node_index, prev_node_index, n_columns, n_columns_diff, |
|
200 | node_index, prev_node_index, n_columns, n_columns_diff, | |
202 | prev_n_columns_diff, fix_nodeline_tail)) |
|
201 | prev_n_columns_diff, fix_nodeline_tail)) | |
203 |
|
202 | |||
204 | # shift_interline is the line containing the non-vertical |
|
203 | # shift_interline is the line containing the non-vertical | |
205 | # edges between this entry and the next |
|
204 | # edges between this entry and the next | |
206 | shift_interline = ["|", " "] * node_index |
|
205 | shift_interline = ["|", " "] * node_index | |
207 | if n_columns_diff == -1: |
|
206 | if n_columns_diff == -1: | |
208 | n_spaces = 1 |
|
207 | n_spaces = 1 | |
209 | edge_ch = "/" |
|
208 | edge_ch = "/" | |
210 | elif n_columns_diff == 0: |
|
209 | elif n_columns_diff == 0: | |
211 | n_spaces = 2 |
|
210 | n_spaces = 2 | |
212 | edge_ch = "|" |
|
211 | edge_ch = "|" | |
213 | else: |
|
212 | else: | |
214 | n_spaces = 3 |
|
213 | n_spaces = 3 | |
215 | edge_ch = "\\" |
|
214 | edge_ch = "\\" | |
216 | shift_interline.extend(n_spaces * [" "]) |
|
215 | shift_interline.extend(n_spaces * [" "]) | |
217 | shift_interline.extend([edge_ch, " "] * (n_columns - node_index - 1)) |
|
216 | shift_interline.extend([edge_ch, " "] * (n_columns - node_index - 1)) | |
218 |
|
217 | |||
219 | # draw edges from the current node to its parents |
|
218 | # draw edges from the current node to its parents | |
220 | draw_edges(edges, nodeline, shift_interline) |
|
219 | draw_edges(edges, nodeline, shift_interline) | |
221 |
|
220 | |||
222 | # lines is the list of all graph lines to print |
|
221 | # lines is the list of all graph lines to print | |
223 | lines = [nodeline] |
|
222 | lines = [nodeline] | |
224 | if add_padding_line: |
|
223 | if add_padding_line: | |
225 | lines.append(get_padding_line(node_index, n_columns, edges)) |
|
224 | lines.append(get_padding_line(node_index, n_columns, edges)) | |
226 | lines.append(shift_interline) |
|
225 | lines.append(shift_interline) | |
227 |
|
226 | |||
228 | # make sure that there are as many graph lines as there are |
|
227 | # make sure that there are as many graph lines as there are | |
229 | # log strings |
|
228 | # log strings | |
230 | while len(node_lines) < len(lines): |
|
229 | while len(node_lines) < len(lines): | |
231 | node_lines.append("") |
|
230 | node_lines.append("") | |
232 | if len(lines) < len(node_lines): |
|
231 | if len(lines) < len(node_lines): | |
233 | extra_interline = ["|", " "] * (n_columns + n_columns_diff) |
|
232 | extra_interline = ["|", " "] * (n_columns + n_columns_diff) | |
234 | while len(lines) < len(node_lines): |
|
233 | while len(lines) < len(node_lines): | |
235 | lines.append(extra_interline) |
|
234 | lines.append(extra_interline) | |
236 |
|
235 | |||
237 | # print lines |
|
236 | # print lines | |
238 | indentation_level = max(n_columns, n_columns + n_columns_diff) |
|
237 | indentation_level = max(n_columns, n_columns + n_columns_diff) | |
239 | for (line, logstr) in zip(lines, node_lines): |
|
238 | for (line, logstr) in zip(lines, node_lines): | |
240 | ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr) |
|
239 | ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr) | |
241 | ui.write(ln.rstrip() + '\n') |
|
240 | ui.write(ln.rstrip() + '\n') | |
242 |
|
241 | |||
243 | # ... and start over |
|
242 | # ... and start over | |
244 | prev_node_index = node_index |
|
243 | prev_node_index = node_index | |
245 | prev_n_columns_diff = n_columns_diff |
|
244 | prev_n_columns_diff = n_columns_diff | |
246 |
|
245 | |||
247 | def get_revs(repo, rev_opt): |
|
246 | def get_revs(repo, rev_opt): | |
248 | if rev_opt: |
|
247 | if rev_opt: | |
249 | revs = revrange(repo, rev_opt) |
|
248 | revs = revrange(repo, rev_opt) | |
250 | return (max(revs), min(revs)) |
|
249 | return (max(revs), min(revs)) | |
251 | else: |
|
250 | else: | |
252 | return (len(repo) - 1, 0) |
|
251 | return (len(repo) - 1, 0) | |
253 |
|
252 | |||
254 | def check_unsupported_flags(opts): |
|
253 | def check_unsupported_flags(opts): | |
255 | for op in ["follow", "follow_first", "date", "copies", "keyword", "remove", |
|
254 | for op in ["follow", "follow_first", "date", "copies", "keyword", "remove", | |
256 | "only_merges", "user", "only_branch", "prune", "newest_first", |
|
255 | "only_merges", "user", "only_branch", "prune", "newest_first", | |
257 | "no_merges", "include", "exclude"]: |
|
256 | "no_merges", "include", "exclude"]: | |
258 | if op in opts and opts[op]: |
|
257 | if op in opts and opts[op]: | |
259 | raise util.Abort(_("--graph option is incompatible with --%s") % op) |
|
258 | raise util.Abort(_("--graph option is incompatible with --%s") % op) | |
260 |
|
259 | |||
261 | def graphlog(ui, repo, path=None, **opts): |
|
260 | def graphlog(ui, repo, path=None, **opts): | |
262 | """show revision history alongside an ASCII revision graph |
|
261 | """show revision history alongside an ASCII revision graph | |
263 |
|
262 | |||
264 | Print a revision history alongside a revision graph drawn with |
|
263 | Print a revision history alongside a revision graph drawn with | |
265 | ASCII characters. |
|
264 | ASCII characters. | |
266 |
|
265 | |||
267 | Nodes printed as an @ character are parents of the working |
|
266 | Nodes printed as an @ character are parents of the working | |
268 | directory. |
|
267 | directory. | |
269 | """ |
|
268 | """ | |
270 |
|
269 | |||
271 | check_unsupported_flags(opts) |
|
270 | check_unsupported_flags(opts) | |
272 | limit = cmdutil.loglimit(opts) |
|
271 | limit = cmdutil.loglimit(opts) | |
273 | start, stop = get_revs(repo, opts["rev"]) |
|
272 | start, stop = get_revs(repo, opts["rev"]) | |
274 | stop = max(stop, start - limit + 1) |
|
273 | stop = max(stop, start - limit + 1) | |
275 | if start == nullrev: |
|
274 | if start == nullrev: | |
276 | return |
|
275 | return | |
277 |
|
276 | |||
278 | if path: |
|
277 | if path: | |
279 | path = util.canonpath(repo.root, os.getcwd(), path) |
|
278 | path = util.canonpath(repo.root, os.getcwd(), path) | |
280 | if path: # could be reset in canonpath |
|
279 | if path: # could be reset in canonpath | |
281 | revdag = filerevs(repo, path, start, stop) |
|
280 | revdag = filerevs(repo, path, start, stop) | |
282 | else: |
|
281 | else: | |
283 | revdag = revisions(repo, start, stop) |
|
282 | revdag = revisions(repo, start, stop) | |
284 |
|
283 | |||
285 | graphdag = graphabledag(ui, repo, revdag, opts) |
|
284 | graphdag = graphabledag(ui, repo, revdag, opts) | |
286 | ascii(ui, grapher(graphdag)) |
|
285 | ascii(ui, grapher(graphdag)) | |
287 |
|
286 | |||
288 | def graphrevs(repo, nodes, opts): |
|
287 | def graphrevs(repo, nodes, opts): | |
289 | nodes.reverse() |
|
288 | nodes.reverse() | |
290 | include = util.set(nodes) |
|
289 | include = util.set(nodes) | |
291 | limit = cmdutil.loglimit(opts) |
|
290 | limit = cmdutil.loglimit(opts) | |
292 | count = 0 |
|
291 | count = 0 | |
293 | for node in nodes: |
|
292 | for node in nodes: | |
294 | if count >= limit: |
|
293 | if count >= limit: | |
295 | break |
|
294 | break | |
296 | ctx = repo[node] |
|
295 | ctx = repo[node] | |
297 | parents = [p.rev() for p in ctx.parents() if p.node() in include] |
|
296 | parents = [p.rev() for p in ctx.parents() if p.node() in include] | |
298 | parents.sort() |
|
297 | parents.sort() | |
299 | yield (ctx, parents) |
|
298 | yield (ctx, parents) | |
300 | count += 1 |
|
299 | count += 1 | |
301 |
|
300 | |||
302 | def graphabledag(ui, repo, revdag, opts): |
|
301 | def graphabledag(ui, repo, revdag, opts): | |
303 | showparents = [ctx.node() for ctx in repo[None].parents()] |
|
302 | showparents = [ctx.node() for ctx in repo[None].parents()] | |
304 | displayer = show_changeset(ui, repo, opts, buffered=True) |
|
303 | displayer = show_changeset(ui, repo, opts, buffered=True) | |
305 | for (ctx, parents) in revdag: |
|
304 | for (ctx, parents) in revdag: | |
306 | displayer.show(ctx) |
|
305 | displayer.show(ctx) | |
307 | lines = displayer.hunk.pop(ctx.rev()).split('\n')[:-1] |
|
306 | lines = displayer.hunk.pop(ctx.rev()).split('\n')[:-1] | |
308 | char = ctx.node() in showparents and '@' or 'o' |
|
307 | char = ctx.node() in showparents and '@' or 'o' | |
309 | yield (ctx.rev(), parents, char, lines) |
|
308 | yield (ctx.rev(), parents, char, lines) | |
310 |
|
309 | |||
311 | def goutgoing(ui, repo, dest=None, **opts): |
|
310 | def goutgoing(ui, repo, dest=None, **opts): | |
312 | """show the outgoing changesets alongside an ASCII revision graph |
|
311 | """show the outgoing changesets alongside an ASCII revision graph | |
313 |
|
312 | |||
314 | Print the outgoing changesets alongside a revision graph drawn with |
|
313 | Print the outgoing changesets alongside a revision graph drawn with | |
315 | ASCII characters. |
|
314 | ASCII characters. | |
316 |
|
315 | |||
317 | Nodes printed as an @ character are parents of the working |
|
316 | Nodes printed as an @ character are parents of the working | |
318 | directory. |
|
317 | directory. | |
319 | """ |
|
318 | """ | |
320 |
|
319 | |||
321 | check_unsupported_flags(opts) |
|
320 | check_unsupported_flags(opts) | |
322 | dest, revs, checkout = hg.parseurl( |
|
321 | dest, revs, checkout = hg.parseurl( | |
323 | ui.expandpath(dest or 'default-push', dest or 'default'), |
|
322 | ui.expandpath(dest or 'default-push', dest or 'default'), | |
324 | opts.get('rev')) |
|
323 | opts.get('rev')) | |
325 | cmdutil.setremoteconfig(ui, opts) |
|
324 | cmdutil.setremoteconfig(ui, opts) | |
326 | if revs: |
|
325 | if revs: | |
327 | revs = [repo.lookup(rev) for rev in revs] |
|
326 | revs = [repo.lookup(rev) for rev in revs] | |
328 | other = hg.repository(ui, dest) |
|
327 | other = hg.repository(ui, dest) | |
329 | ui.status(_('comparing with %s\n') % url.hidepassword(dest)) |
|
328 | ui.status(_('comparing with %s\n') % url.hidepassword(dest)) | |
330 | o = repo.findoutgoing(other, force=opts.get('force')) |
|
329 | o = repo.findoutgoing(other, force=opts.get('force')) | |
331 | if not o: |
|
330 | if not o: | |
332 | ui.status(_("no changes found\n")) |
|
331 | ui.status(_("no changes found\n")) | |
333 | return |
|
332 | return | |
334 |
|
333 | |||
335 | o = repo.changelog.nodesbetween(o, revs)[0] |
|
334 | o = repo.changelog.nodesbetween(o, revs)[0] | |
336 | revdag = graphrevs(repo, o, opts) |
|
335 | revdag = graphrevs(repo, o, opts) | |
337 | graphdag = graphabledag(ui, repo, revdag, opts) |
|
336 | graphdag = graphabledag(ui, repo, revdag, opts) | |
338 | ascii(ui, grapher(graphdag)) |
|
337 | ascii(ui, grapher(graphdag)) | |
339 |
|
338 | |||
340 | def gincoming(ui, repo, source="default", **opts): |
|
339 | def gincoming(ui, repo, source="default", **opts): | |
341 | """show the incoming changesets alongside an ASCII revision graph |
|
340 | """show the incoming changesets alongside an ASCII revision graph | |
342 |
|
341 | |||
343 | Print the incoming changesets alongside a revision graph drawn with |
|
342 | Print the incoming changesets alongside a revision graph drawn with | |
344 | ASCII characters. |
|
343 | ASCII characters. | |
345 |
|
344 | |||
346 | Nodes printed as an @ character are parents of the working |
|
345 | Nodes printed as an @ character are parents of the working | |
347 | directory. |
|
346 | directory. | |
348 | """ |
|
347 | """ | |
349 |
|
348 | |||
350 | check_unsupported_flags(opts) |
|
349 | check_unsupported_flags(opts) | |
351 | source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev')) |
|
350 | source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev')) | |
352 | cmdutil.setremoteconfig(ui, opts) |
|
351 | cmdutil.setremoteconfig(ui, opts) | |
353 |
|
352 | |||
354 | other = hg.repository(ui, source) |
|
353 | other = hg.repository(ui, source) | |
355 | ui.status(_('comparing with %s\n') % url.hidepassword(source)) |
|
354 | ui.status(_('comparing with %s\n') % url.hidepassword(source)) | |
356 | if revs: |
|
355 | if revs: | |
357 | revs = [other.lookup(rev) for rev in revs] |
|
356 | revs = [other.lookup(rev) for rev in revs] | |
358 | incoming = repo.findincoming(other, heads=revs, force=opts["force"]) |
|
357 | incoming = repo.findincoming(other, heads=revs, force=opts["force"]) | |
359 | if not incoming: |
|
358 | if not incoming: | |
360 | try: |
|
359 | try: | |
361 | os.unlink(opts["bundle"]) |
|
360 | os.unlink(opts["bundle"]) | |
362 | except: |
|
361 | except: | |
363 | pass |
|
362 | pass | |
364 | ui.status(_("no changes found\n")) |
|
363 | ui.status(_("no changes found\n")) | |
365 | return |
|
364 | return | |
366 |
|
365 | |||
367 | cleanup = None |
|
366 | cleanup = None | |
368 | try: |
|
367 | try: | |
369 |
|
368 | |||
370 | fname = opts["bundle"] |
|
369 | fname = opts["bundle"] | |
371 | if fname or not other.local(): |
|
370 | if fname or not other.local(): | |
372 | # create a bundle (uncompressed if other repo is not local) |
|
371 | # create a bundle (uncompressed if other repo is not local) | |
373 | if revs is None: |
|
372 | if revs is None: | |
374 | cg = other.changegroup(incoming, "incoming") |
|
373 | cg = other.changegroup(incoming, "incoming") | |
375 | else: |
|
374 | else: | |
376 | cg = other.changegroupsubset(incoming, revs, 'incoming') |
|
375 | cg = other.changegroupsubset(incoming, revs, 'incoming') | |
377 | bundletype = other.local() and "HG10BZ" or "HG10UN" |
|
376 | bundletype = other.local() and "HG10BZ" or "HG10UN" | |
378 | fname = cleanup = changegroup.writebundle(cg, fname, bundletype) |
|
377 | fname = cleanup = changegroup.writebundle(cg, fname, bundletype) | |
379 | # keep written bundle? |
|
378 | # keep written bundle? | |
380 | if opts["bundle"]: |
|
379 | if opts["bundle"]: | |
381 | cleanup = None |
|
380 | cleanup = None | |
382 | if not other.local(): |
|
381 | if not other.local(): | |
383 | # use the created uncompressed bundlerepo |
|
382 | # use the created uncompressed bundlerepo | |
384 | other = bundlerepo.bundlerepository(ui, repo.root, fname) |
|
383 | other = bundlerepo.bundlerepository(ui, repo.root, fname) | |
385 |
|
384 | |||
386 | chlist = other.changelog.nodesbetween(incoming, revs)[0] |
|
385 | chlist = other.changelog.nodesbetween(incoming, revs)[0] | |
387 | revdag = graphrevs(other, chlist, opts) |
|
386 | revdag = graphrevs(other, chlist, opts) | |
388 | other_parents = [] |
|
387 | other_parents = [] | |
389 | displayer = show_changeset(ui, other, opts, buffered=True) |
|
388 | displayer = show_changeset(ui, other, opts, buffered=True) | |
390 | graphdag = graphabledag(ui, repo, revdag, opts) |
|
389 | graphdag = graphabledag(ui, repo, revdag, opts) | |
391 | ascii(ui, grapher(graphdag)) |
|
390 | ascii(ui, grapher(graphdag)) | |
392 |
|
391 | |||
393 | finally: |
|
392 | finally: | |
394 | if hasattr(other, 'close'): |
|
393 | if hasattr(other, 'close'): | |
395 | other.close() |
|
394 | other.close() | |
396 | if cleanup: |
|
395 | if cleanup: | |
397 | os.unlink(cleanup) |
|
396 | os.unlink(cleanup) | |
398 |
|
397 | |||
399 | def uisetup(ui): |
|
398 | def uisetup(ui): | |
400 | '''Initialize the extension.''' |
|
399 | '''Initialize the extension.''' | |
401 | _wrapcmd(ui, 'log', commands.table, graphlog) |
|
400 | _wrapcmd(ui, 'log', commands.table, graphlog) | |
402 | _wrapcmd(ui, 'incoming', commands.table, gincoming) |
|
401 | _wrapcmd(ui, 'incoming', commands.table, gincoming) | |
403 | _wrapcmd(ui, 'outgoing', commands.table, goutgoing) |
|
402 | _wrapcmd(ui, 'outgoing', commands.table, goutgoing) | |
404 |
|
403 | |||
405 | def _wrapcmd(ui, cmd, table, wrapfn): |
|
404 | def _wrapcmd(ui, cmd, table, wrapfn): | |
406 | '''wrap the command''' |
|
405 | '''wrap the command''' | |
407 | def graph(orig, *args, **kwargs): |
|
406 | def graph(orig, *args, **kwargs): | |
408 | if kwargs['graph']: |
|
407 | if kwargs['graph']: | |
409 | return wrapfn(*args, **kwargs) |
|
408 | return wrapfn(*args, **kwargs) | |
410 | return orig(*args, **kwargs) |
|
409 | return orig(*args, **kwargs) | |
411 | entry = extensions.wrapcommand(table, cmd, graph) |
|
410 | entry = extensions.wrapcommand(table, cmd, graph) | |
412 | entry[1].append(('G', 'graph', None, _("show the revision DAG"))) |
|
411 | entry[1].append(('G', 'graph', None, _("show the revision DAG"))) | |
413 |
|
412 | |||
414 | cmdtable = { |
|
413 | cmdtable = { | |
415 | "glog": |
|
414 | "glog": | |
416 | (graphlog, |
|
415 | (graphlog, | |
417 | [('l', 'limit', '', _('limit number of changes displayed')), |
|
416 | [('l', 'limit', '', _('limit number of changes displayed')), | |
418 | ('p', 'patch', False, _('show patch')), |
|
417 | ('p', 'patch', False, _('show patch')), | |
419 | ('r', 'rev', [], _('show the specified revision or range')), |
|
418 | ('r', 'rev', [], _('show the specified revision or range')), | |
420 | ] + templateopts, |
|
419 | ] + templateopts, | |
421 | _('hg glog [OPTION]... [FILE]')), |
|
420 | _('hg glog [OPTION]... [FILE]')), | |
422 | } |
|
421 | } |
@@ -1,291 +1,290 b'' | |||||
1 | # notify.py - email notifications for mercurial |
|
1 | # notify.py - email notifications for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
3 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 |
|
7 | |||
8 | '''hook extension to email notifications on commits/pushes |
|
8 | '''hook extension to email notifications on commits/pushes | |
9 |
|
9 | |||
10 | Subscriptions can be managed through hgrc. Default mode is to print |
|
10 | Subscriptions can be managed through hgrc. Default mode is to print | |
11 | messages to stdout, for testing and configuring. |
|
11 | messages to stdout, for testing and configuring. | |
12 |
|
12 | |||
13 | To use, configure notify extension and enable in hgrc like this: |
|
13 | To use, configure notify extension and enable in hgrc like this: | |
14 |
|
14 | |||
15 | [extensions] |
|
15 | [extensions] | |
16 | hgext.notify = |
|
16 | hgext.notify = | |
17 |
|
17 | |||
18 | [hooks] |
|
18 | [hooks] | |
19 | # one email for each incoming changeset |
|
19 | # one email for each incoming changeset | |
20 | incoming.notify = python:hgext.notify.hook |
|
20 | incoming.notify = python:hgext.notify.hook | |
21 | # batch emails when many changesets incoming at one time |
|
21 | # batch emails when many changesets incoming at one time | |
22 | changegroup.notify = python:hgext.notify.hook |
|
22 | changegroup.notify = python:hgext.notify.hook | |
23 |
|
23 | |||
24 | [notify] |
|
24 | [notify] | |
25 | # config items go in here |
|
25 | # config items go in here | |
26 |
|
26 | |||
27 | config items: |
|
27 | config items: | |
28 |
|
28 | |||
29 | REQUIRED: |
|
29 | REQUIRED: | |
30 | config = /path/to/file # file containing subscriptions |
|
30 | config = /path/to/file # file containing subscriptions | |
31 |
|
31 | |||
32 | OPTIONAL: |
|
32 | OPTIONAL: | |
33 | test = True # print messages to stdout for testing |
|
33 | test = True # print messages to stdout for testing | |
34 | strip = 3 # number of slashes to strip for url paths |
|
34 | strip = 3 # number of slashes to strip for url paths | |
35 | domain = example.com # domain to use if committer missing domain |
|
35 | domain = example.com # domain to use if committer missing domain | |
36 | style = ... # style file to use when formatting email |
|
36 | style = ... # style file to use when formatting email | |
37 | template = ... # template to use when formatting email |
|
37 | template = ... # template to use when formatting email | |
38 | incoming = ... # template to use when run as incoming hook |
|
38 | incoming = ... # template to use when run as incoming hook | |
39 | changegroup = ... # template when run as changegroup hook |
|
39 | changegroup = ... # template when run as changegroup hook | |
40 | maxdiff = 300 # max lines of diffs to include (0=none, -1=all) |
|
40 | maxdiff = 300 # max lines of diffs to include (0=none, -1=all) | |
41 | maxsubject = 67 # truncate subject line longer than this |
|
41 | maxsubject = 67 # truncate subject line longer than this | |
42 | diffstat = True # add a diffstat before the diff content |
|
42 | diffstat = True # add a diffstat before the diff content | |
43 | sources = serve # notify if source of incoming changes in this list |
|
43 | sources = serve # notify if source of incoming changes in this list | |
44 | # (serve == ssh or http, push, pull, bundle) |
|
44 | # (serve == ssh or http, push, pull, bundle) | |
45 | [email] |
|
45 | [email] | |
46 | from = user@host.com # email address to send as if none given |
|
46 | from = user@host.com # email address to send as if none given | |
47 | [web] |
|
47 | [web] | |
48 | baseurl = http://hgserver/... # root of hg web site for browsing commits |
|
48 | baseurl = http://hgserver/... # root of hg web site for browsing commits | |
49 |
|
49 | |||
50 | notify config file has same format as regular hgrc. it has two |
|
50 | notify config file has same format as regular hgrc. it has two | |
51 | sections so you can express subscriptions in whatever way is handier |
|
51 | sections so you can express subscriptions in whatever way is handier | |
52 | for you. |
|
52 | for you. | |
53 |
|
53 | |||
54 | [usersubs] |
|
54 | [usersubs] | |
55 | # key is subscriber email, value is ","-separated list of glob patterns |
|
55 | # key is subscriber email, value is ","-separated list of glob patterns | |
56 | user@host = pattern |
|
56 | user@host = pattern | |
57 |
|
57 | |||
58 | [reposubs] |
|
58 | [reposubs] | |
59 | # key is glob pattern, value is ","-separated list of subscriber emails |
|
59 | # key is glob pattern, value is ","-separated list of subscriber emails | |
60 | pattern = user@host |
|
60 | pattern = user@host | |
61 |
|
61 | |||
62 | glob patterns are matched against path to repo root. |
|
62 | glob patterns are matched against path to repo root. | |
63 |
|
63 | |||
64 | if you like, you can put notify config file in repo that users can |
|
64 | if you like, you can put notify config file in repo that users can | |
65 | push changes to, they can manage their own subscriptions.''' |
|
65 | push changes to, they can manage their own subscriptions.''' | |
66 |
|
66 | |||
67 | from mercurial.i18n import _ |
|
67 | from mercurial.i18n import _ | |
68 | from mercurial.node import bin, short |
|
|||
69 | from mercurial import patch, cmdutil, templater, util, mail |
|
68 | from mercurial import patch, cmdutil, templater, util, mail | |
70 | import email.Parser, fnmatch, socket, time |
|
69 | import email.Parser, fnmatch, socket, time | |
71 |
|
70 | |||
72 | # template for single changeset can include email headers. |
|
71 | # template for single changeset can include email headers. | |
73 | single_template = ''' |
|
72 | single_template = ''' | |
74 | Subject: changeset in {webroot}: {desc|firstline|strip} |
|
73 | Subject: changeset in {webroot}: {desc|firstline|strip} | |
75 | From: {author} |
|
74 | From: {author} | |
76 |
|
75 | |||
77 | changeset {node|short} in {root} |
|
76 | changeset {node|short} in {root} | |
78 | details: {baseurl}{webroot}?cmd=changeset;node={node|short} |
|
77 | details: {baseurl}{webroot}?cmd=changeset;node={node|short} | |
79 | description: |
|
78 | description: | |
80 | \t{desc|tabindent|strip} |
|
79 | \t{desc|tabindent|strip} | |
81 | '''.lstrip() |
|
80 | '''.lstrip() | |
82 |
|
81 | |||
83 | # template for multiple changesets should not contain email headers, |
|
82 | # template for multiple changesets should not contain email headers, | |
84 | # because only first set of headers will be used and result will look |
|
83 | # because only first set of headers will be used and result will look | |
85 | # strange. |
|
84 | # strange. | |
86 | multiple_template = ''' |
|
85 | multiple_template = ''' | |
87 | changeset {node|short} in {root} |
|
86 | changeset {node|short} in {root} | |
88 | details: {baseurl}{webroot}?cmd=changeset;node={node|short} |
|
87 | details: {baseurl}{webroot}?cmd=changeset;node={node|short} | |
89 | summary: {desc|firstline} |
|
88 | summary: {desc|firstline} | |
90 | ''' |
|
89 | ''' | |
91 |
|
90 | |||
92 | deftemplates = { |
|
91 | deftemplates = { | |
93 | 'changegroup': multiple_template, |
|
92 | 'changegroup': multiple_template, | |
94 | } |
|
93 | } | |
95 |
|
94 | |||
96 | class notifier(object): |
|
95 | class notifier(object): | |
97 | '''email notification class.''' |
|
96 | '''email notification class.''' | |
98 |
|
97 | |||
99 | def __init__(self, ui, repo, hooktype): |
|
98 | def __init__(self, ui, repo, hooktype): | |
100 | self.ui = ui |
|
99 | self.ui = ui | |
101 | cfg = self.ui.config('notify', 'config') |
|
100 | cfg = self.ui.config('notify', 'config') | |
102 | if cfg: |
|
101 | if cfg: | |
103 | self.ui.readsections(cfg, 'usersubs', 'reposubs') |
|
102 | self.ui.readsections(cfg, 'usersubs', 'reposubs') | |
104 | self.repo = repo |
|
103 | self.repo = repo | |
105 | self.stripcount = int(self.ui.config('notify', 'strip', 0)) |
|
104 | self.stripcount = int(self.ui.config('notify', 'strip', 0)) | |
106 | self.root = self.strip(self.repo.root) |
|
105 | self.root = self.strip(self.repo.root) | |
107 | self.domain = self.ui.config('notify', 'domain') |
|
106 | self.domain = self.ui.config('notify', 'domain') | |
108 | self.test = self.ui.configbool('notify', 'test', True) |
|
107 | self.test = self.ui.configbool('notify', 'test', True) | |
109 | self.charsets = mail._charsets(self.ui) |
|
108 | self.charsets = mail._charsets(self.ui) | |
110 | self.subs = self.subscribers() |
|
109 | self.subs = self.subscribers() | |
111 |
|
110 | |||
112 | mapfile = self.ui.config('notify', 'style') |
|
111 | mapfile = self.ui.config('notify', 'style') | |
113 | template = (self.ui.config('notify', hooktype) or |
|
112 | template = (self.ui.config('notify', hooktype) or | |
114 | self.ui.config('notify', 'template')) |
|
113 | self.ui.config('notify', 'template')) | |
115 | self.t = cmdutil.changeset_templater(self.ui, self.repo, |
|
114 | self.t = cmdutil.changeset_templater(self.ui, self.repo, | |
116 | False, None, mapfile, False) |
|
115 | False, None, mapfile, False) | |
117 | if not mapfile and not template: |
|
116 | if not mapfile and not template: | |
118 | template = deftemplates.get(hooktype) or single_template |
|
117 | template = deftemplates.get(hooktype) or single_template | |
119 | if template: |
|
118 | if template: | |
120 | template = templater.parsestring(template, quoted=False) |
|
119 | template = templater.parsestring(template, quoted=False) | |
121 | self.t.use_template(template) |
|
120 | self.t.use_template(template) | |
122 |
|
121 | |||
123 | def strip(self, path): |
|
122 | def strip(self, path): | |
124 | '''strip leading slashes from local path, turn into web-safe path.''' |
|
123 | '''strip leading slashes from local path, turn into web-safe path.''' | |
125 |
|
124 | |||
126 | path = util.pconvert(path) |
|
125 | path = util.pconvert(path) | |
127 | count = self.stripcount |
|
126 | count = self.stripcount | |
128 | while count > 0: |
|
127 | while count > 0: | |
129 | c = path.find('/') |
|
128 | c = path.find('/') | |
130 | if c == -1: |
|
129 | if c == -1: | |
131 | break |
|
130 | break | |
132 | path = path[c+1:] |
|
131 | path = path[c+1:] | |
133 | count -= 1 |
|
132 | count -= 1 | |
134 | return path |
|
133 | return path | |
135 |
|
134 | |||
136 | def fixmail(self, addr): |
|
135 | def fixmail(self, addr): | |
137 | '''try to clean up email addresses.''' |
|
136 | '''try to clean up email addresses.''' | |
138 |
|
137 | |||
139 | addr = util.email(addr.strip()) |
|
138 | addr = util.email(addr.strip()) | |
140 | if self.domain: |
|
139 | if self.domain: | |
141 | a = addr.find('@localhost') |
|
140 | a = addr.find('@localhost') | |
142 | if a != -1: |
|
141 | if a != -1: | |
143 | addr = addr[:a] |
|
142 | addr = addr[:a] | |
144 | if '@' not in addr: |
|
143 | if '@' not in addr: | |
145 | return addr + '@' + self.domain |
|
144 | return addr + '@' + self.domain | |
146 | return addr |
|
145 | return addr | |
147 |
|
146 | |||
148 | def subscribers(self): |
|
147 | def subscribers(self): | |
149 | '''return list of email addresses of subscribers to this repo.''' |
|
148 | '''return list of email addresses of subscribers to this repo.''' | |
150 | subs = {} |
|
149 | subs = {} | |
151 | for user, pats in self.ui.configitems('usersubs'): |
|
150 | for user, pats in self.ui.configitems('usersubs'): | |
152 | for pat in pats.split(','): |
|
151 | for pat in pats.split(','): | |
153 | if fnmatch.fnmatch(self.repo.root, pat.strip()): |
|
152 | if fnmatch.fnmatch(self.repo.root, pat.strip()): | |
154 | subs[self.fixmail(user)] = 1 |
|
153 | subs[self.fixmail(user)] = 1 | |
155 | for pat, users in self.ui.configitems('reposubs'): |
|
154 | for pat, users in self.ui.configitems('reposubs'): | |
156 | if fnmatch.fnmatch(self.repo.root, pat): |
|
155 | if fnmatch.fnmatch(self.repo.root, pat): | |
157 | for user in users.split(','): |
|
156 | for user in users.split(','): | |
158 | subs[self.fixmail(user)] = 1 |
|
157 | subs[self.fixmail(user)] = 1 | |
159 | subs = util.sort(subs) |
|
158 | subs = util.sort(subs) | |
160 | return [mail.addressencode(self.ui, s, self.charsets, self.test) |
|
159 | return [mail.addressencode(self.ui, s, self.charsets, self.test) | |
161 | for s in subs] |
|
160 | for s in subs] | |
162 |
|
161 | |||
163 | def url(self, path=None): |
|
162 | def url(self, path=None): | |
164 | return self.ui.config('web', 'baseurl') + (path or self.root) |
|
163 | return self.ui.config('web', 'baseurl') + (path or self.root) | |
165 |
|
164 | |||
166 | def node(self, ctx): |
|
165 | def node(self, ctx): | |
167 | '''format one changeset.''' |
|
166 | '''format one changeset.''' | |
168 | self.t.show(ctx, changes=ctx.changeset(), |
|
167 | self.t.show(ctx, changes=ctx.changeset(), | |
169 | baseurl=self.ui.config('web', 'baseurl'), |
|
168 | baseurl=self.ui.config('web', 'baseurl'), | |
170 | root=self.repo.root, webroot=self.root) |
|
169 | root=self.repo.root, webroot=self.root) | |
171 |
|
170 | |||
172 | def skipsource(self, source): |
|
171 | def skipsource(self, source): | |
173 | '''true if incoming changes from this source should be skipped.''' |
|
172 | '''true if incoming changes from this source should be skipped.''' | |
174 | ok_sources = self.ui.config('notify', 'sources', 'serve').split() |
|
173 | ok_sources = self.ui.config('notify', 'sources', 'serve').split() | |
175 | return source not in ok_sources |
|
174 | return source not in ok_sources | |
176 |
|
175 | |||
177 | def send(self, ctx, count, data): |
|
176 | def send(self, ctx, count, data): | |
178 | '''send message.''' |
|
177 | '''send message.''' | |
179 |
|
178 | |||
180 | p = email.Parser.Parser() |
|
179 | p = email.Parser.Parser() | |
181 | msg = p.parsestr(data) |
|
180 | msg = p.parsestr(data) | |
182 |
|
181 | |||
183 | # store sender and subject |
|
182 | # store sender and subject | |
184 | sender, subject = msg['From'], msg['Subject'] |
|
183 | sender, subject = msg['From'], msg['Subject'] | |
185 | del msg['From'], msg['Subject'] |
|
184 | del msg['From'], msg['Subject'] | |
186 | # store remaining headers |
|
185 | # store remaining headers | |
187 | headers = msg.items() |
|
186 | headers = msg.items() | |
188 | # create fresh mime message from msg body |
|
187 | # create fresh mime message from msg body | |
189 | text = msg.get_payload() |
|
188 | text = msg.get_payload() | |
190 | # for notification prefer readability over data precision |
|
189 | # for notification prefer readability over data precision | |
191 | msg = mail.mimeencode(self.ui, text, self.charsets, self.test) |
|
190 | msg = mail.mimeencode(self.ui, text, self.charsets, self.test) | |
192 | # reinstate custom headers |
|
191 | # reinstate custom headers | |
193 | for k, v in headers: |
|
192 | for k, v in headers: | |
194 | msg[k] = v |
|
193 | msg[k] = v | |
195 |
|
194 | |||
196 | msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2") |
|
195 | msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2") | |
197 |
|
196 | |||
198 | # try to make subject line exist and be useful |
|
197 | # try to make subject line exist and be useful | |
199 | if not subject: |
|
198 | if not subject: | |
200 | if count > 1: |
|
199 | if count > 1: | |
201 | subject = _('%s: %d new changesets') % (self.root, count) |
|
200 | subject = _('%s: %d new changesets') % (self.root, count) | |
202 | else: |
|
201 | else: | |
203 | s = ctx.description().lstrip().split('\n', 1)[0].rstrip() |
|
202 | s = ctx.description().lstrip().split('\n', 1)[0].rstrip() | |
204 | subject = '%s: %s' % (self.root, s) |
|
203 | subject = '%s: %s' % (self.root, s) | |
205 | maxsubject = int(self.ui.config('notify', 'maxsubject', 67)) |
|
204 | maxsubject = int(self.ui.config('notify', 'maxsubject', 67)) | |
206 | if maxsubject and len(subject) > maxsubject: |
|
205 | if maxsubject and len(subject) > maxsubject: | |
207 | subject = subject[:maxsubject-3] + '...' |
|
206 | subject = subject[:maxsubject-3] + '...' | |
208 | msg['Subject'] = mail.headencode(self.ui, subject, |
|
207 | msg['Subject'] = mail.headencode(self.ui, subject, | |
209 | self.charsets, self.test) |
|
208 | self.charsets, self.test) | |
210 |
|
209 | |||
211 | # try to make message have proper sender |
|
210 | # try to make message have proper sender | |
212 | if not sender: |
|
211 | if not sender: | |
213 | sender = self.ui.config('email', 'from') or self.ui.username() |
|
212 | sender = self.ui.config('email', 'from') or self.ui.username() | |
214 | if '@' not in sender or '@localhost' in sender: |
|
213 | if '@' not in sender or '@localhost' in sender: | |
215 | sender = self.fixmail(sender) |
|
214 | sender = self.fixmail(sender) | |
216 | msg['From'] = mail.addressencode(self.ui, sender, |
|
215 | msg['From'] = mail.addressencode(self.ui, sender, | |
217 | self.charsets, self.test) |
|
216 | self.charsets, self.test) | |
218 |
|
217 | |||
219 | msg['X-Hg-Notification'] = 'changeset %s' % ctx |
|
218 | msg['X-Hg-Notification'] = 'changeset %s' % ctx | |
220 | if not msg['Message-Id']: |
|
219 | if not msg['Message-Id']: | |
221 | msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' % |
|
220 | msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' % | |
222 | (ctx, int(time.time()), |
|
221 | (ctx, int(time.time()), | |
223 | hash(self.repo.root), socket.getfqdn())) |
|
222 | hash(self.repo.root), socket.getfqdn())) | |
224 | msg['To'] = ', '.join(self.subs) |
|
223 | msg['To'] = ', '.join(self.subs) | |
225 |
|
224 | |||
226 | msgtext = msg.as_string(0) |
|
225 | msgtext = msg.as_string(0) | |
227 | if self.test: |
|
226 | if self.test: | |
228 | self.ui.write(msgtext) |
|
227 | self.ui.write(msgtext) | |
229 | if not msgtext.endswith('\n'): |
|
228 | if not msgtext.endswith('\n'): | |
230 | self.ui.write('\n') |
|
229 | self.ui.write('\n') | |
231 | else: |
|
230 | else: | |
232 | self.ui.status(_('notify: sending %d subscribers %d changes\n') % |
|
231 | self.ui.status(_('notify: sending %d subscribers %d changes\n') % | |
233 | (len(self.subs), count)) |
|
232 | (len(self.subs), count)) | |
234 | mail.sendmail(self.ui, util.email(msg['From']), |
|
233 | mail.sendmail(self.ui, util.email(msg['From']), | |
235 | self.subs, msgtext) |
|
234 | self.subs, msgtext) | |
236 |
|
235 | |||
237 | def diff(self, ctx, ref=None): |
|
236 | def diff(self, ctx, ref=None): | |
238 |
|
237 | |||
239 | maxdiff = int(self.ui.config('notify', 'maxdiff', 300)) |
|
238 | maxdiff = int(self.ui.config('notify', 'maxdiff', 300)) | |
240 | prev = ctx.parents()[0].node() |
|
239 | prev = ctx.parents()[0].node() | |
241 | ref = ref and ref.node() or ctx.node() |
|
240 | ref = ref and ref.node() or ctx.node() | |
242 | chunks = patch.diff(self.repo, prev, ref, opts=patch.diffopts(self.ui)) |
|
241 | chunks = patch.diff(self.repo, prev, ref, opts=patch.diffopts(self.ui)) | |
243 | difflines = ''.join(chunks).splitlines() |
|
242 | difflines = ''.join(chunks).splitlines() | |
244 |
|
243 | |||
245 | if self.ui.configbool('notify', 'diffstat', True): |
|
244 | if self.ui.configbool('notify', 'diffstat', True): | |
246 | s = patch.diffstat(difflines) |
|
245 | s = patch.diffstat(difflines) | |
247 | # s may be nil, don't include the header if it is |
|
246 | # s may be nil, don't include the header if it is | |
248 | if s: |
|
247 | if s: | |
249 | self.ui.write('\ndiffstat:\n\n%s' % s) |
|
248 | self.ui.write('\ndiffstat:\n\n%s' % s) | |
250 |
|
249 | |||
251 | if maxdiff == 0: |
|
250 | if maxdiff == 0: | |
252 | return |
|
251 | return | |
253 | elif maxdiff > 0 and len(difflines) > maxdiff: |
|
252 | elif maxdiff > 0 and len(difflines) > maxdiff: | |
254 | msg = _('\ndiffs (truncated from %d to %d lines):\n\n') |
|
253 | msg = _('\ndiffs (truncated from %d to %d lines):\n\n') | |
255 | self.ui.write(msg % (len(difflines), maxdiff)) |
|
254 | self.ui.write(msg % (len(difflines), maxdiff)) | |
256 | difflines = difflines[:maxdiff] |
|
255 | difflines = difflines[:maxdiff] | |
257 | elif difflines: |
|
256 | elif difflines: | |
258 | self.ui.write(_('\ndiffs (%d lines):\n\n') % len(difflines)) |
|
257 | self.ui.write(_('\ndiffs (%d lines):\n\n') % len(difflines)) | |
259 |
|
258 | |||
260 | self.ui.write("\n".join(difflines)) |
|
259 | self.ui.write("\n".join(difflines)) | |
261 |
|
260 | |||
262 | def hook(ui, repo, hooktype, node=None, source=None, **kwargs): |
|
261 | def hook(ui, repo, hooktype, node=None, source=None, **kwargs): | |
263 | '''send email notifications to interested subscribers. |
|
262 | '''send email notifications to interested subscribers. | |
264 |
|
263 | |||
265 | if used as changegroup hook, send one email for all changesets in |
|
264 | if used as changegroup hook, send one email for all changesets in | |
266 | changegroup. else send one email per changeset.''' |
|
265 | changegroup. else send one email per changeset.''' | |
267 |
|
266 | |||
268 | n = notifier(ui, repo, hooktype) |
|
267 | n = notifier(ui, repo, hooktype) | |
269 | ctx = repo[node] |
|
268 | ctx = repo[node] | |
270 |
|
269 | |||
271 | if not n.subs: |
|
270 | if not n.subs: | |
272 | ui.debug(_('notify: no subscribers to repo %s\n') % n.root) |
|
271 | ui.debug(_('notify: no subscribers to repo %s\n') % n.root) | |
273 | return |
|
272 | return | |
274 | if n.skipsource(source): |
|
273 | if n.skipsource(source): | |
275 | ui.debug(_('notify: changes have source "%s" - skipping\n') % source) |
|
274 | ui.debug(_('notify: changes have source "%s" - skipping\n') % source) | |
276 | return |
|
275 | return | |
277 |
|
276 | |||
278 | ui.pushbuffer() |
|
277 | ui.pushbuffer() | |
279 | if hooktype == 'changegroup': |
|
278 | if hooktype == 'changegroup': | |
280 | start, end = ctx.rev(), len(repo) |
|
279 | start, end = ctx.rev(), len(repo) | |
281 | count = end - start |
|
280 | count = end - start | |
282 | for rev in xrange(start, end): |
|
281 | for rev in xrange(start, end): | |
283 | n.node(repo[rev]) |
|
282 | n.node(repo[rev]) | |
284 | n.diff(ctx, repo['tip']) |
|
283 | n.diff(ctx, repo['tip']) | |
285 | else: |
|
284 | else: | |
286 | count = 1 |
|
285 | count = 1 | |
287 | n.node(ctx) |
|
286 | n.node(ctx) | |
288 | n.diff(ctx) |
|
287 | n.diff(ctx) | |
289 |
|
288 | |||
290 | data = ui.popbuffer() |
|
289 | data = ui.popbuffer() | |
291 | n.send(ctx, count, data) |
|
290 | n.send(ctx, count, data) |
@@ -1,97 +1,96 b'' | |||||
1 | # Mercurial extension to make it easy to refer to the parent of a revision |
|
1 | # Mercurial extension to make it easy to refer to the parent of a revision | |
2 | # |
|
2 | # | |
3 | # Copyright (C) 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br> |
|
3 | # Copyright (C) 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 | '''\ |
|
7 | '''\ | |
8 | use suffixes to refer to ancestor revisions |
|
8 | use suffixes to refer to ancestor revisions | |
9 |
|
9 | |||
10 | This extension allows you to use git-style suffixes to refer to |
|
10 | This extension allows you to use git-style suffixes to refer to | |
11 | the ancestors of a specific revision. |
|
11 | the ancestors of a specific revision. | |
12 |
|
12 | |||
13 | For example, if you can refer to a revision as "foo", then: |
|
13 | For example, if you can refer to a revision as "foo", then: | |
14 |
|
14 | |||
15 | - foo^N = Nth parent of foo: |
|
15 | - foo^N = Nth parent of foo: | |
16 | foo^0 = foo |
|
16 | foo^0 = foo | |
17 | foo^1 = first parent of foo |
|
17 | foo^1 = first parent of foo | |
18 | foo^2 = second parent of foo |
|
18 | foo^2 = second parent of foo | |
19 | foo^ = foo^1 |
|
19 | foo^ = foo^1 | |
20 |
|
20 | |||
21 | - foo~N = Nth first grandparent of foo |
|
21 | - foo~N = Nth first grandparent of foo | |
22 | foo~0 = foo |
|
22 | foo~0 = foo | |
23 | foo~1 = foo^1 = foo^ = first parent of foo |
|
23 | foo~1 = foo^1 = foo^ = first parent of foo | |
24 | foo~2 = foo^1^1 = foo^^ = first parent of first parent of foo |
|
24 | foo~2 = foo^1^1 = foo^^ = first parent of first parent of foo | |
25 | ''' |
|
25 | ''' | |
26 | import mercurial.repo |
|
|||
27 | from mercurial import error |
|
26 | from mercurial import error | |
28 |
|
27 | |||
29 | def reposetup(ui, repo): |
|
28 | def reposetup(ui, repo): | |
30 | if not repo.local(): |
|
29 | if not repo.local(): | |
31 | return |
|
30 | return | |
32 |
|
31 | |||
33 | class parentrevspecrepo(repo.__class__): |
|
32 | class parentrevspecrepo(repo.__class__): | |
34 | def lookup(self, key): |
|
33 | def lookup(self, key): | |
35 | try: |
|
34 | try: | |
36 | _super = super(parentrevspecrepo, self) |
|
35 | _super = super(parentrevspecrepo, self) | |
37 | return _super.lookup(key) |
|
36 | return _super.lookup(key) | |
38 | except error.RepoError: |
|
37 | except error.RepoError: | |
39 | pass |
|
38 | pass | |
40 |
|
39 | |||
41 | circ = key.find('^') |
|
40 | circ = key.find('^') | |
42 | tilde = key.find('~') |
|
41 | tilde = key.find('~') | |
43 | if circ < 0 and tilde < 0: |
|
42 | if circ < 0 and tilde < 0: | |
44 | raise |
|
43 | raise | |
45 | elif circ >= 0 and tilde >= 0: |
|
44 | elif circ >= 0 and tilde >= 0: | |
46 | end = min(circ, tilde) |
|
45 | end = min(circ, tilde) | |
47 | else: |
|
46 | else: | |
48 | end = max(circ, tilde) |
|
47 | end = max(circ, tilde) | |
49 |
|
48 | |||
50 | cl = self.changelog |
|
49 | cl = self.changelog | |
51 | base = key[:end] |
|
50 | base = key[:end] | |
52 | try: |
|
51 | try: | |
53 | node = _super.lookup(base) |
|
52 | node = _super.lookup(base) | |
54 | except error.RepoError: |
|
53 | except error.RepoError: | |
55 | # eek - reraise the first error |
|
54 | # eek - reraise the first error | |
56 | return _super.lookup(key) |
|
55 | return _super.lookup(key) | |
57 |
|
56 | |||
58 | rev = cl.rev(node) |
|
57 | rev = cl.rev(node) | |
59 | suffix = key[end:] |
|
58 | suffix = key[end:] | |
60 | i = 0 |
|
59 | i = 0 | |
61 | while i < len(suffix): |
|
60 | while i < len(suffix): | |
62 | # foo^N => Nth parent of foo |
|
61 | # foo^N => Nth parent of foo | |
63 | # foo^0 == foo |
|
62 | # foo^0 == foo | |
64 | # foo^1 == foo^ == 1st parent of foo |
|
63 | # foo^1 == foo^ == 1st parent of foo | |
65 | # foo^2 == 2nd parent of foo |
|
64 | # foo^2 == 2nd parent of foo | |
66 | if suffix[i] == '^': |
|
65 | if suffix[i] == '^': | |
67 | j = i + 1 |
|
66 | j = i + 1 | |
68 | p = cl.parentrevs(rev) |
|
67 | p = cl.parentrevs(rev) | |
69 | if j < len(suffix) and suffix[j].isdigit(): |
|
68 | if j < len(suffix) and suffix[j].isdigit(): | |
70 | j += 1 |
|
69 | j += 1 | |
71 | n = int(suffix[i+1:j]) |
|
70 | n = int(suffix[i+1:j]) | |
72 | if n > 2 or n == 2 and p[1] == -1: |
|
71 | if n > 2 or n == 2 and p[1] == -1: | |
73 | raise |
|
72 | raise | |
74 | else: |
|
73 | else: | |
75 | n = 1 |
|
74 | n = 1 | |
76 | if n: |
|
75 | if n: | |
77 | rev = p[n - 1] |
|
76 | rev = p[n - 1] | |
78 | i = j |
|
77 | i = j | |
79 | # foo~N => Nth first grandparent of foo |
|
78 | # foo~N => Nth first grandparent of foo | |
80 | # foo~0 = foo |
|
79 | # foo~0 = foo | |
81 | # foo~1 = foo^1 == foo^ == 1st parent of foo |
|
80 | # foo~1 = foo^1 == foo^ == 1st parent of foo | |
82 | # foo~2 = foo^1^1 == foo^^ == 1st parent of 1st parent of foo |
|
81 | # foo~2 = foo^1^1 == foo^^ == 1st parent of 1st parent of foo | |
83 | elif suffix[i] == '~': |
|
82 | elif suffix[i] == '~': | |
84 | j = i + 1 |
|
83 | j = i + 1 | |
85 | while j < len(suffix) and suffix[j].isdigit(): |
|
84 | while j < len(suffix) and suffix[j].isdigit(): | |
86 | j += 1 |
|
85 | j += 1 | |
87 | if j == i + 1: |
|
86 | if j == i + 1: | |
88 | raise |
|
87 | raise | |
89 | n = int(suffix[i+1:j]) |
|
88 | n = int(suffix[i+1:j]) | |
90 | for k in xrange(n): |
|
89 | for k in xrange(n): | |
91 | rev = cl.parentrevs(rev)[0] |
|
90 | rev = cl.parentrevs(rev)[0] | |
92 | i = j |
|
91 | i = j | |
93 | else: |
|
92 | else: | |
94 | raise |
|
93 | raise | |
95 | return cl.node(rev) |
|
94 | return cl.node(rev) | |
96 |
|
95 | |||
97 | repo.__class__ = parentrevspecrepo |
|
96 | repo.__class__ = parentrevspecrepo |
@@ -1,144 +1,144 b'' | |||||
1 | # win32text.py - LF <-> CRLF/CR translation utilities for Windows/Mac users |
|
1 | # win32text.py - LF <-> CRLF/CR translation utilities for Windows/Mac users | |
2 | # |
|
2 | # | |
3 | # This software may be used and distributed according to the terms |
|
3 | # This software may be used and distributed according to the terms | |
4 | # of the GNU General Public License, incorporated herein by reference. |
|
4 | # of the GNU General Public License, incorporated herein by reference. | |
5 | # |
|
5 | # | |
6 | # To perform automatic newline conversion, use: |
|
6 | # To perform automatic newline conversion, use: | |
7 | # |
|
7 | # | |
8 | # [extensions] |
|
8 | # [extensions] | |
9 | # hgext.win32text = |
|
9 | # hgext.win32text = | |
10 | # [encode] |
|
10 | # [encode] | |
11 | # ** = cleverencode: |
|
11 | # ** = cleverencode: | |
12 | # # or ** = macencode: |
|
12 | # # or ** = macencode: | |
13 | # [decode] |
|
13 | # [decode] | |
14 | # ** = cleverdecode: |
|
14 | # ** = cleverdecode: | |
15 | # # or ** = macdecode: |
|
15 | # # or ** = macdecode: | |
16 | # |
|
16 | # | |
17 | # If not doing conversion, to make sure you do not commit CRLF/CR by accident: |
|
17 | # If not doing conversion, to make sure you do not commit CRLF/CR by accident: | |
18 | # |
|
18 | # | |
19 | # [hooks] |
|
19 | # [hooks] | |
20 | # pretxncommit.crlf = python:hgext.win32text.forbidcrlf |
|
20 | # pretxncommit.crlf = python:hgext.win32text.forbidcrlf | |
21 | # # or pretxncommit.cr = python:hgext.win32text.forbidcr |
|
21 | # # or pretxncommit.cr = python:hgext.win32text.forbidcr | |
22 | # |
|
22 | # | |
23 | # To do the same check on a server to prevent CRLF/CR from being pushed or |
|
23 | # To do the same check on a server to prevent CRLF/CR from being pushed or | |
24 | # pulled: |
|
24 | # pulled: | |
25 | # |
|
25 | # | |
26 | # [hooks] |
|
26 | # [hooks] | |
27 | # pretxnchangegroup.crlf = python:hgext.win32text.forbidcrlf |
|
27 | # pretxnchangegroup.crlf = python:hgext.win32text.forbidcrlf | |
28 | # # or pretxnchangegroup.cr = python:hgext.win32text.forbidcr |
|
28 | # # or pretxnchangegroup.cr = python:hgext.win32text.forbidcr | |
29 |
|
29 | |||
30 | from mercurial.i18n import _ |
|
30 | from mercurial.i18n import _ | |
31 |
from mercurial.node import |
|
31 | from mercurial.node import short | |
32 | from mercurial import util |
|
32 | from mercurial import util | |
33 | import re |
|
33 | import re | |
34 |
|
34 | |||
35 | # regexp for single LF without CR preceding. |
|
35 | # regexp for single LF without CR preceding. | |
36 | re_single_lf = re.compile('(^|[^\r])\n', re.MULTILINE) |
|
36 | re_single_lf = re.compile('(^|[^\r])\n', re.MULTILINE) | |
37 |
|
37 | |||
38 | newlinestr = {'\r\n': 'CRLF', '\r': 'CR'} |
|
38 | newlinestr = {'\r\n': 'CRLF', '\r': 'CR'} | |
39 | filterstr = {'\r\n': 'clever', '\r': 'mac'} |
|
39 | filterstr = {'\r\n': 'clever', '\r': 'mac'} | |
40 |
|
40 | |||
41 | def checknewline(s, newline, ui=None, repo=None, filename=None): |
|
41 | def checknewline(s, newline, ui=None, repo=None, filename=None): | |
42 | # warn if already has 'newline' in repository. |
|
42 | # warn if already has 'newline' in repository. | |
43 | # it might cause unexpected eol conversion. |
|
43 | # it might cause unexpected eol conversion. | |
44 | # see issue 302: |
|
44 | # see issue 302: | |
45 | # http://www.selenic.com/mercurial/bts/issue302 |
|
45 | # http://www.selenic.com/mercurial/bts/issue302 | |
46 | if newline in s and ui and filename and repo: |
|
46 | if newline in s and ui and filename and repo: | |
47 | ui.warn(_('WARNING: %s already has %s line endings\n' |
|
47 | ui.warn(_('WARNING: %s already has %s line endings\n' | |
48 | 'and does not need EOL conversion by the win32text plugin.\n' |
|
48 | 'and does not need EOL conversion by the win32text plugin.\n' | |
49 | 'Before your next commit, please reconsider your ' |
|
49 | 'Before your next commit, please reconsider your ' | |
50 | 'encode/decode settings in \nMercurial.ini or %s.\n') % |
|
50 | 'encode/decode settings in \nMercurial.ini or %s.\n') % | |
51 | (filename, newlinestr[newline], repo.join('hgrc'))) |
|
51 | (filename, newlinestr[newline], repo.join('hgrc'))) | |
52 |
|
52 | |||
53 | def dumbdecode(s, cmd, **kwargs): |
|
53 | def dumbdecode(s, cmd, **kwargs): | |
54 | checknewline(s, '\r\n', **kwargs) |
|
54 | checknewline(s, '\r\n', **kwargs) | |
55 | # replace single LF to CRLF |
|
55 | # replace single LF to CRLF | |
56 | return re_single_lf.sub('\\1\r\n', s) |
|
56 | return re_single_lf.sub('\\1\r\n', s) | |
57 |
|
57 | |||
58 | def dumbencode(s, cmd): |
|
58 | def dumbencode(s, cmd): | |
59 | return s.replace('\r\n', '\n') |
|
59 | return s.replace('\r\n', '\n') | |
60 |
|
60 | |||
61 | def macdumbdecode(s, cmd, **kwargs): |
|
61 | def macdumbdecode(s, cmd, **kwargs): | |
62 | checknewline(s, '\r', **kwargs) |
|
62 | checknewline(s, '\r', **kwargs) | |
63 | return s.replace('\n', '\r') |
|
63 | return s.replace('\n', '\r') | |
64 |
|
64 | |||
65 | def macdumbencode(s, cmd): |
|
65 | def macdumbencode(s, cmd): | |
66 | return s.replace('\r', '\n') |
|
66 | return s.replace('\r', '\n') | |
67 |
|
67 | |||
68 | def cleverdecode(s, cmd, **kwargs): |
|
68 | def cleverdecode(s, cmd, **kwargs): | |
69 | if not util.binary(s): |
|
69 | if not util.binary(s): | |
70 | return dumbdecode(s, cmd, **kwargs) |
|
70 | return dumbdecode(s, cmd, **kwargs) | |
71 | return s |
|
71 | return s | |
72 |
|
72 | |||
73 | def cleverencode(s, cmd): |
|
73 | def cleverencode(s, cmd): | |
74 | if not util.binary(s): |
|
74 | if not util.binary(s): | |
75 | return dumbencode(s, cmd) |
|
75 | return dumbencode(s, cmd) | |
76 | return s |
|
76 | return s | |
77 |
|
77 | |||
78 | def macdecode(s, cmd, **kwargs): |
|
78 | def macdecode(s, cmd, **kwargs): | |
79 | if not util.binary(s): |
|
79 | if not util.binary(s): | |
80 | return macdumbdecode(s, cmd, **kwargs) |
|
80 | return macdumbdecode(s, cmd, **kwargs) | |
81 | return s |
|
81 | return s | |
82 |
|
82 | |||
83 | def macencode(s, cmd): |
|
83 | def macencode(s, cmd): | |
84 | if not util.binary(s): |
|
84 | if not util.binary(s): | |
85 | return macdumbencode(s, cmd) |
|
85 | return macdumbencode(s, cmd) | |
86 | return s |
|
86 | return s | |
87 |
|
87 | |||
88 | _filters = { |
|
88 | _filters = { | |
89 | 'dumbdecode:': dumbdecode, |
|
89 | 'dumbdecode:': dumbdecode, | |
90 | 'dumbencode:': dumbencode, |
|
90 | 'dumbencode:': dumbencode, | |
91 | 'cleverdecode:': cleverdecode, |
|
91 | 'cleverdecode:': cleverdecode, | |
92 | 'cleverencode:': cleverencode, |
|
92 | 'cleverencode:': cleverencode, | |
93 | 'macdumbdecode:': macdumbdecode, |
|
93 | 'macdumbdecode:': macdumbdecode, | |
94 | 'macdumbencode:': macdumbencode, |
|
94 | 'macdumbencode:': macdumbencode, | |
95 | 'macdecode:': macdecode, |
|
95 | 'macdecode:': macdecode, | |
96 | 'macencode:': macencode, |
|
96 | 'macencode:': macencode, | |
97 | } |
|
97 | } | |
98 |
|
98 | |||
99 | def forbidnewline(ui, repo, hooktype, node, newline, **kwargs): |
|
99 | def forbidnewline(ui, repo, hooktype, node, newline, **kwargs): | |
100 | halt = False |
|
100 | halt = False | |
101 | for rev in xrange(repo[node].rev(), len(repo)): |
|
101 | for rev in xrange(repo[node].rev(), len(repo)): | |
102 | c = repo[rev] |
|
102 | c = repo[rev] | |
103 | for f in c.files(): |
|
103 | for f in c.files(): | |
104 | if f not in c: |
|
104 | if f not in c: | |
105 | continue |
|
105 | continue | |
106 | data = c[f].data() |
|
106 | data = c[f].data() | |
107 | if not util.binary(data) and newline in data: |
|
107 | if not util.binary(data) and newline in data: | |
108 | if not halt: |
|
108 | if not halt: | |
109 | ui.warn(_('Attempt to commit or push text file(s) ' |
|
109 | ui.warn(_('Attempt to commit or push text file(s) ' | |
110 | 'using %s line endings\n') % |
|
110 | 'using %s line endings\n') % | |
111 | newlinestr[newline]) |
|
111 | newlinestr[newline]) | |
112 | ui.warn(_('in %s: %s\n') % (short(c.node()), f)) |
|
112 | ui.warn(_('in %s: %s\n') % (short(c.node()), f)) | |
113 | halt = True |
|
113 | halt = True | |
114 | if halt and hooktype == 'pretxnchangegroup': |
|
114 | if halt and hooktype == 'pretxnchangegroup': | |
115 | crlf = newlinestr[newline].lower() |
|
115 | crlf = newlinestr[newline].lower() | |
116 | filter = filterstr[newline] |
|
116 | filter = filterstr[newline] | |
117 | ui.warn(_('\nTo prevent this mistake in your local repository,\n' |
|
117 | ui.warn(_('\nTo prevent this mistake in your local repository,\n' | |
118 | 'add to Mercurial.ini or .hg/hgrc:\n' |
|
118 | 'add to Mercurial.ini or .hg/hgrc:\n' | |
119 | '\n' |
|
119 | '\n' | |
120 | '[hooks]\n' |
|
120 | '[hooks]\n' | |
121 | 'pretxncommit.%s = python:hgext.win32text.forbid%s\n' |
|
121 | 'pretxncommit.%s = python:hgext.win32text.forbid%s\n' | |
122 | '\n' |
|
122 | '\n' | |
123 | 'and also consider adding:\n' |
|
123 | 'and also consider adding:\n' | |
124 | '\n' |
|
124 | '\n' | |
125 | '[extensions]\n' |
|
125 | '[extensions]\n' | |
126 | 'hgext.win32text =\n' |
|
126 | 'hgext.win32text =\n' | |
127 | '[encode]\n' |
|
127 | '[encode]\n' | |
128 | '** = %sencode:\n' |
|
128 | '** = %sencode:\n' | |
129 | '[decode]\n' |
|
129 | '[decode]\n' | |
130 | '** = %sdecode:\n') % (crlf, crlf, filter, filter)) |
|
130 | '** = %sdecode:\n') % (crlf, crlf, filter, filter)) | |
131 | return halt |
|
131 | return halt | |
132 |
|
132 | |||
133 | def forbidcrlf(ui, repo, hooktype, node, **kwargs): |
|
133 | def forbidcrlf(ui, repo, hooktype, node, **kwargs): | |
134 | return forbidnewline(ui, repo, hooktype, node, '\r\n', **kwargs) |
|
134 | return forbidnewline(ui, repo, hooktype, node, '\r\n', **kwargs) | |
135 |
|
135 | |||
136 | def forbidcr(ui, repo, hooktype, node, **kwargs): |
|
136 | def forbidcr(ui, repo, hooktype, node, **kwargs): | |
137 | return forbidnewline(ui, repo, hooktype, node, '\r', **kwargs) |
|
137 | return forbidnewline(ui, repo, hooktype, node, '\r', **kwargs) | |
138 |
|
138 | |||
139 | def reposetup(ui, repo): |
|
139 | def reposetup(ui, repo): | |
140 | if not repo.local(): |
|
140 | if not repo.local(): | |
141 | return |
|
141 | return | |
142 | for name, fn in _filters.iteritems(): |
|
142 | for name, fn in _filters.iteritems(): | |
143 | repo.adddatafilter(name, fn) |
|
143 | repo.adddatafilter(name, fn) | |
144 |
|
144 |
@@ -1,298 +1,298 b'' | |||||
1 | """ |
|
1 | """ | |
2 | bundlerepo.py - repository class for viewing uncompressed bundles |
|
2 | bundlerepo.py - repository class for viewing uncompressed bundles | |
3 |
|
3 | |||
4 | This provides a read-only repository interface to bundles as if |
|
4 | This provides a read-only repository interface to bundles as if | |
5 | they were part of the actual repository. |
|
5 | they were part of the actual repository. | |
6 |
|
6 | |||
7 | Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com> |
|
7 | Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com> | |
8 |
|
8 | |||
9 | This software may be used and distributed according to the terms |
|
9 | This software may be used and distributed according to the terms | |
10 | of the GNU General Public License, incorporated herein by reference. |
|
10 | of the GNU General Public License, incorporated herein by reference. | |
11 | """ |
|
11 | """ | |
12 |
|
12 | |||
13 |
from node import |
|
13 | from node import nullid | |
14 | from i18n import _ |
|
14 | from i18n import _ | |
15 | import changegroup, util, os, struct, bz2, zlib, tempfile, shutil, mdiff |
|
15 | import changegroup, util, os, struct, bz2, zlib, tempfile, shutil, mdiff | |
16 |
import |
|
16 | import localrepo, changelog, manifest, filelog, revlog, error | |
17 |
|
17 | |||
18 | class bundlerevlog(revlog.revlog): |
|
18 | class bundlerevlog(revlog.revlog): | |
19 | def __init__(self, opener, indexfile, bundlefile, |
|
19 | def __init__(self, opener, indexfile, bundlefile, | |
20 | linkmapper=None): |
|
20 | linkmapper=None): | |
21 | # How it works: |
|
21 | # How it works: | |
22 | # to retrieve a revision, we need to know the offset of |
|
22 | # to retrieve a revision, we need to know the offset of | |
23 | # the revision in the bundlefile (an opened file). |
|
23 | # the revision in the bundlefile (an opened file). | |
24 | # |
|
24 | # | |
25 | # We store this offset in the index (start), to differentiate a |
|
25 | # We store this offset in the index (start), to differentiate a | |
26 | # rev in the bundle and from a rev in the revlog, we check |
|
26 | # rev in the bundle and from a rev in the revlog, we check | |
27 | # len(index[r]). If the tuple is bigger than 7, it is a bundle |
|
27 | # len(index[r]). If the tuple is bigger than 7, it is a bundle | |
28 | # (it is bigger since we store the node to which the delta is) |
|
28 | # (it is bigger since we store the node to which the delta is) | |
29 | # |
|
29 | # | |
30 | revlog.revlog.__init__(self, opener, indexfile) |
|
30 | revlog.revlog.__init__(self, opener, indexfile) | |
31 | self.bundlefile = bundlefile |
|
31 | self.bundlefile = bundlefile | |
32 | self.basemap = {} |
|
32 | self.basemap = {} | |
33 | def chunkpositer(): |
|
33 | def chunkpositer(): | |
34 | for chunk in changegroup.chunkiter(bundlefile): |
|
34 | for chunk in changegroup.chunkiter(bundlefile): | |
35 | pos = bundlefile.tell() |
|
35 | pos = bundlefile.tell() | |
36 | yield chunk, pos - len(chunk) |
|
36 | yield chunk, pos - len(chunk) | |
37 | n = len(self) |
|
37 | n = len(self) | |
38 | prev = None |
|
38 | prev = None | |
39 | for chunk, start in chunkpositer(): |
|
39 | for chunk, start in chunkpositer(): | |
40 | size = len(chunk) |
|
40 | size = len(chunk) | |
41 | if size < 80: |
|
41 | if size < 80: | |
42 | raise util.Abort(_("invalid changegroup")) |
|
42 | raise util.Abort(_("invalid changegroup")) | |
43 | start += 80 |
|
43 | start += 80 | |
44 | size -= 80 |
|
44 | size -= 80 | |
45 | node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) |
|
45 | node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) | |
46 | if node in self.nodemap: |
|
46 | if node in self.nodemap: | |
47 | prev = node |
|
47 | prev = node | |
48 | continue |
|
48 | continue | |
49 | for p in (p1, p2): |
|
49 | for p in (p1, p2): | |
50 | if not p in self.nodemap: |
|
50 | if not p in self.nodemap: | |
51 | raise error.LookupError(p1, self.indexfile, |
|
51 | raise error.LookupError(p1, self.indexfile, | |
52 | _("unknown parent")) |
|
52 | _("unknown parent")) | |
53 | if linkmapper is None: |
|
53 | if linkmapper is None: | |
54 | link = n |
|
54 | link = n | |
55 | else: |
|
55 | else: | |
56 | link = linkmapper(cs) |
|
56 | link = linkmapper(cs) | |
57 |
|
57 | |||
58 | if not prev: |
|
58 | if not prev: | |
59 | prev = p1 |
|
59 | prev = p1 | |
60 | # start, size, full unc. size, base (unused), link, p1, p2, node |
|
60 | # start, size, full unc. size, base (unused), link, p1, p2, node | |
61 | e = (revlog.offset_type(start, 0), size, -1, -1, link, |
|
61 | e = (revlog.offset_type(start, 0), size, -1, -1, link, | |
62 | self.rev(p1), self.rev(p2), node) |
|
62 | self.rev(p1), self.rev(p2), node) | |
63 | self.basemap[n] = prev |
|
63 | self.basemap[n] = prev | |
64 | self.index.insert(-1, e) |
|
64 | self.index.insert(-1, e) | |
65 | self.nodemap[node] = n |
|
65 | self.nodemap[node] = n | |
66 | prev = node |
|
66 | prev = node | |
67 | n += 1 |
|
67 | n += 1 | |
68 |
|
68 | |||
69 | def bundle(self, rev): |
|
69 | def bundle(self, rev): | |
70 | """is rev from the bundle""" |
|
70 | """is rev from the bundle""" | |
71 | if rev < 0: |
|
71 | if rev < 0: | |
72 | return False |
|
72 | return False | |
73 | return rev in self.basemap |
|
73 | return rev in self.basemap | |
74 | def bundlebase(self, rev): return self.basemap[rev] |
|
74 | def bundlebase(self, rev): return self.basemap[rev] | |
75 | def chunk(self, rev, df=None, cachelen=4096): |
|
75 | def chunk(self, rev, df=None, cachelen=4096): | |
76 | # Warning: in case of bundle, the diff is against bundlebase, |
|
76 | # Warning: in case of bundle, the diff is against bundlebase, | |
77 | # not against rev - 1 |
|
77 | # not against rev - 1 | |
78 | # XXX: could use some caching |
|
78 | # XXX: could use some caching | |
79 | if not self.bundle(rev): |
|
79 | if not self.bundle(rev): | |
80 | return revlog.revlog.chunk(self, rev, df) |
|
80 | return revlog.revlog.chunk(self, rev, df) | |
81 | self.bundlefile.seek(self.start(rev)) |
|
81 | self.bundlefile.seek(self.start(rev)) | |
82 | return self.bundlefile.read(self.length(rev)) |
|
82 | return self.bundlefile.read(self.length(rev)) | |
83 |
|
83 | |||
84 | def revdiff(self, rev1, rev2): |
|
84 | def revdiff(self, rev1, rev2): | |
85 | """return or calculate a delta between two revisions""" |
|
85 | """return or calculate a delta between two revisions""" | |
86 | if self.bundle(rev1) and self.bundle(rev2): |
|
86 | if self.bundle(rev1) and self.bundle(rev2): | |
87 | # hot path for bundle |
|
87 | # hot path for bundle | |
88 | revb = self.rev(self.bundlebase(rev2)) |
|
88 | revb = self.rev(self.bundlebase(rev2)) | |
89 | if revb == rev1: |
|
89 | if revb == rev1: | |
90 | return self.chunk(rev2) |
|
90 | return self.chunk(rev2) | |
91 | elif not self.bundle(rev1) and not self.bundle(rev2): |
|
91 | elif not self.bundle(rev1) and not self.bundle(rev2): | |
92 | return revlog.revlog.revdiff(self, rev1, rev2) |
|
92 | return revlog.revlog.revdiff(self, rev1, rev2) | |
93 |
|
93 | |||
94 | return mdiff.textdiff(self.revision(self.node(rev1)), |
|
94 | return mdiff.textdiff(self.revision(self.node(rev1)), | |
95 | self.revision(self.node(rev2))) |
|
95 | self.revision(self.node(rev2))) | |
96 |
|
96 | |||
97 | def revision(self, node): |
|
97 | def revision(self, node): | |
98 | """return an uncompressed revision of a given""" |
|
98 | """return an uncompressed revision of a given""" | |
99 | if node == nullid: return "" |
|
99 | if node == nullid: return "" | |
100 |
|
100 | |||
101 | text = None |
|
101 | text = None | |
102 | chain = [] |
|
102 | chain = [] | |
103 | iter_node = node |
|
103 | iter_node = node | |
104 | rev = self.rev(iter_node) |
|
104 | rev = self.rev(iter_node) | |
105 | # reconstruct the revision if it is from a changegroup |
|
105 | # reconstruct the revision if it is from a changegroup | |
106 | while self.bundle(rev): |
|
106 | while self.bundle(rev): | |
107 | if self._cache and self._cache[0] == iter_node: |
|
107 | if self._cache and self._cache[0] == iter_node: | |
108 | text = self._cache[2] |
|
108 | text = self._cache[2] | |
109 | break |
|
109 | break | |
110 | chain.append(rev) |
|
110 | chain.append(rev) | |
111 | iter_node = self.bundlebase(rev) |
|
111 | iter_node = self.bundlebase(rev) | |
112 | rev = self.rev(iter_node) |
|
112 | rev = self.rev(iter_node) | |
113 | if text is None: |
|
113 | if text is None: | |
114 | text = revlog.revlog.revision(self, iter_node) |
|
114 | text = revlog.revlog.revision(self, iter_node) | |
115 |
|
115 | |||
116 | while chain: |
|
116 | while chain: | |
117 | delta = self.chunk(chain.pop()) |
|
117 | delta = self.chunk(chain.pop()) | |
118 | text = mdiff.patches(text, [delta]) |
|
118 | text = mdiff.patches(text, [delta]) | |
119 |
|
119 | |||
120 | p1, p2 = self.parents(node) |
|
120 | p1, p2 = self.parents(node) | |
121 | if node != revlog.hash(text, p1, p2): |
|
121 | if node != revlog.hash(text, p1, p2): | |
122 | raise error.RevlogError(_("integrity check failed on %s:%d") |
|
122 | raise error.RevlogError(_("integrity check failed on %s:%d") | |
123 | % (self.datafile, self.rev(node))) |
|
123 | % (self.datafile, self.rev(node))) | |
124 |
|
124 | |||
125 | self._cache = (node, self.rev(node), text) |
|
125 | self._cache = (node, self.rev(node), text) | |
126 | return text |
|
126 | return text | |
127 |
|
127 | |||
128 | def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): |
|
128 | def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): | |
129 | raise NotImplementedError |
|
129 | raise NotImplementedError | |
130 | def addgroup(self, revs, linkmapper, transaction): |
|
130 | def addgroup(self, revs, linkmapper, transaction): | |
131 | raise NotImplementedError |
|
131 | raise NotImplementedError | |
132 | def strip(self, rev, minlink): |
|
132 | def strip(self, rev, minlink): | |
133 | raise NotImplementedError |
|
133 | raise NotImplementedError | |
134 | def checksize(self): |
|
134 | def checksize(self): | |
135 | raise NotImplementedError |
|
135 | raise NotImplementedError | |
136 |
|
136 | |||
137 | class bundlechangelog(bundlerevlog, changelog.changelog): |
|
137 | class bundlechangelog(bundlerevlog, changelog.changelog): | |
138 | def __init__(self, opener, bundlefile): |
|
138 | def __init__(self, opener, bundlefile): | |
139 | changelog.changelog.__init__(self, opener) |
|
139 | changelog.changelog.__init__(self, opener) | |
140 | bundlerevlog.__init__(self, opener, self.indexfile, bundlefile) |
|
140 | bundlerevlog.__init__(self, opener, self.indexfile, bundlefile) | |
141 |
|
141 | |||
142 | class bundlemanifest(bundlerevlog, manifest.manifest): |
|
142 | class bundlemanifest(bundlerevlog, manifest.manifest): | |
143 | def __init__(self, opener, bundlefile, linkmapper): |
|
143 | def __init__(self, opener, bundlefile, linkmapper): | |
144 | manifest.manifest.__init__(self, opener) |
|
144 | manifest.manifest.__init__(self, opener) | |
145 | bundlerevlog.__init__(self, opener, self.indexfile, bundlefile, |
|
145 | bundlerevlog.__init__(self, opener, self.indexfile, bundlefile, | |
146 | linkmapper) |
|
146 | linkmapper) | |
147 |
|
147 | |||
148 | class bundlefilelog(bundlerevlog, filelog.filelog): |
|
148 | class bundlefilelog(bundlerevlog, filelog.filelog): | |
149 | def __init__(self, opener, path, bundlefile, linkmapper): |
|
149 | def __init__(self, opener, path, bundlefile, linkmapper): | |
150 | filelog.filelog.__init__(self, opener, path) |
|
150 | filelog.filelog.__init__(self, opener, path) | |
151 | bundlerevlog.__init__(self, opener, self.indexfile, bundlefile, |
|
151 | bundlerevlog.__init__(self, opener, self.indexfile, bundlefile, | |
152 | linkmapper) |
|
152 | linkmapper) | |
153 |
|
153 | |||
154 | class bundlerepository(localrepo.localrepository): |
|
154 | class bundlerepository(localrepo.localrepository): | |
155 | def __init__(self, ui, path, bundlename): |
|
155 | def __init__(self, ui, path, bundlename): | |
156 | self._tempparent = None |
|
156 | self._tempparent = None | |
157 | try: |
|
157 | try: | |
158 | localrepo.localrepository.__init__(self, ui, path) |
|
158 | localrepo.localrepository.__init__(self, ui, path) | |
159 | except error.RepoError: |
|
159 | except error.RepoError: | |
160 | self._tempparent = tempfile.mkdtemp() |
|
160 | self._tempparent = tempfile.mkdtemp() | |
161 |
|
|
161 | localrepo.instance(ui,self._tempparent,1) | |
162 | localrepo.localrepository.__init__(self, ui, self._tempparent) |
|
162 | localrepo.localrepository.__init__(self, ui, self._tempparent) | |
163 |
|
163 | |||
164 | if path: |
|
164 | if path: | |
165 | self._url = 'bundle:' + path + '+' + bundlename |
|
165 | self._url = 'bundle:' + path + '+' + bundlename | |
166 | else: |
|
166 | else: | |
167 | self._url = 'bundle:' + bundlename |
|
167 | self._url = 'bundle:' + bundlename | |
168 |
|
168 | |||
169 | self.tempfile = None |
|
169 | self.tempfile = None | |
170 | self.bundlefile = open(bundlename, "rb") |
|
170 | self.bundlefile = open(bundlename, "rb") | |
171 | header = self.bundlefile.read(6) |
|
171 | header = self.bundlefile.read(6) | |
172 | if not header.startswith("HG"): |
|
172 | if not header.startswith("HG"): | |
173 | raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename) |
|
173 | raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename) | |
174 | elif not header.startswith("HG10"): |
|
174 | elif not header.startswith("HG10"): | |
175 | raise util.Abort(_("%s: unknown bundle version") % bundlename) |
|
175 | raise util.Abort(_("%s: unknown bundle version") % bundlename) | |
176 | elif (header == "HG10BZ") or (header == "HG10GZ"): |
|
176 | elif (header == "HG10BZ") or (header == "HG10GZ"): | |
177 | fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-", |
|
177 | fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-", | |
178 | suffix=".hg10un", dir=self.path) |
|
178 | suffix=".hg10un", dir=self.path) | |
179 | self.tempfile = temp |
|
179 | self.tempfile = temp | |
180 | fptemp = os.fdopen(fdtemp, 'wb') |
|
180 | fptemp = os.fdopen(fdtemp, 'wb') | |
181 | def generator(f): |
|
181 | def generator(f): | |
182 | if header == "HG10BZ": |
|
182 | if header == "HG10BZ": | |
183 | zd = bz2.BZ2Decompressor() |
|
183 | zd = bz2.BZ2Decompressor() | |
184 | zd.decompress("BZ") |
|
184 | zd.decompress("BZ") | |
185 | elif header == "HG10GZ": |
|
185 | elif header == "HG10GZ": | |
186 | zd = zlib.decompressobj() |
|
186 | zd = zlib.decompressobj() | |
187 | for chunk in f: |
|
187 | for chunk in f: | |
188 | yield zd.decompress(chunk) |
|
188 | yield zd.decompress(chunk) | |
189 | gen = generator(util.filechunkiter(self.bundlefile, 4096)) |
|
189 | gen = generator(util.filechunkiter(self.bundlefile, 4096)) | |
190 |
|
190 | |||
191 | try: |
|
191 | try: | |
192 | fptemp.write("HG10UN") |
|
192 | fptemp.write("HG10UN") | |
193 | for chunk in gen: |
|
193 | for chunk in gen: | |
194 | fptemp.write(chunk) |
|
194 | fptemp.write(chunk) | |
195 | finally: |
|
195 | finally: | |
196 | fptemp.close() |
|
196 | fptemp.close() | |
197 | self.bundlefile.close() |
|
197 | self.bundlefile.close() | |
198 |
|
198 | |||
199 | self.bundlefile = open(self.tempfile, "rb") |
|
199 | self.bundlefile = open(self.tempfile, "rb") | |
200 | # seek right after the header |
|
200 | # seek right after the header | |
201 | self.bundlefile.seek(6) |
|
201 | self.bundlefile.seek(6) | |
202 | elif header == "HG10UN": |
|
202 | elif header == "HG10UN": | |
203 | # nothing to do |
|
203 | # nothing to do | |
204 | pass |
|
204 | pass | |
205 | else: |
|
205 | else: | |
206 | raise util.Abort(_("%s: unknown bundle compression type") |
|
206 | raise util.Abort(_("%s: unknown bundle compression type") | |
207 | % bundlename) |
|
207 | % bundlename) | |
208 | # dict with the mapping 'filename' -> position in the bundle |
|
208 | # dict with the mapping 'filename' -> position in the bundle | |
209 | self.bundlefilespos = {} |
|
209 | self.bundlefilespos = {} | |
210 |
|
210 | |||
211 | def __getattr__(self, name): |
|
211 | def __getattr__(self, name): | |
212 | if name == 'changelog': |
|
212 | if name == 'changelog': | |
213 | self.changelog = bundlechangelog(self.sopener, self.bundlefile) |
|
213 | self.changelog = bundlechangelog(self.sopener, self.bundlefile) | |
214 | self.manstart = self.bundlefile.tell() |
|
214 | self.manstart = self.bundlefile.tell() | |
215 | return self.changelog |
|
215 | return self.changelog | |
216 | elif name == 'manifest': |
|
216 | elif name == 'manifest': | |
217 | self.bundlefile.seek(self.manstart) |
|
217 | self.bundlefile.seek(self.manstart) | |
218 | self.manifest = bundlemanifest(self.sopener, self.bundlefile, |
|
218 | self.manifest = bundlemanifest(self.sopener, self.bundlefile, | |
219 | self.changelog.rev) |
|
219 | self.changelog.rev) | |
220 | self.filestart = self.bundlefile.tell() |
|
220 | self.filestart = self.bundlefile.tell() | |
221 | return self.manifest |
|
221 | return self.manifest | |
222 | elif name == 'manstart': |
|
222 | elif name == 'manstart': | |
223 | self.changelog |
|
223 | self.changelog | |
224 | return self.manstart |
|
224 | return self.manstart | |
225 | elif name == 'filestart': |
|
225 | elif name == 'filestart': | |
226 | self.manifest |
|
226 | self.manifest | |
227 | return self.filestart |
|
227 | return self.filestart | |
228 | else: |
|
228 | else: | |
229 | return localrepo.localrepository.__getattr__(self, name) |
|
229 | return localrepo.localrepository.__getattr__(self, name) | |
230 |
|
230 | |||
231 | def url(self): |
|
231 | def url(self): | |
232 | return self._url |
|
232 | return self._url | |
233 |
|
233 | |||
234 | def file(self, f): |
|
234 | def file(self, f): | |
235 | if not self.bundlefilespos: |
|
235 | if not self.bundlefilespos: | |
236 | self.bundlefile.seek(self.filestart) |
|
236 | self.bundlefile.seek(self.filestart) | |
237 | while 1: |
|
237 | while 1: | |
238 | chunk = changegroup.getchunk(self.bundlefile) |
|
238 | chunk = changegroup.getchunk(self.bundlefile) | |
239 | if not chunk: |
|
239 | if not chunk: | |
240 | break |
|
240 | break | |
241 | self.bundlefilespos[chunk] = self.bundlefile.tell() |
|
241 | self.bundlefilespos[chunk] = self.bundlefile.tell() | |
242 | for c in changegroup.chunkiter(self.bundlefile): |
|
242 | for c in changegroup.chunkiter(self.bundlefile): | |
243 | pass |
|
243 | pass | |
244 |
|
244 | |||
245 | if f[0] == '/': |
|
245 | if f[0] == '/': | |
246 | f = f[1:] |
|
246 | f = f[1:] | |
247 | if f in self.bundlefilespos: |
|
247 | if f in self.bundlefilespos: | |
248 | self.bundlefile.seek(self.bundlefilespos[f]) |
|
248 | self.bundlefile.seek(self.bundlefilespos[f]) | |
249 | return bundlefilelog(self.sopener, f, self.bundlefile, |
|
249 | return bundlefilelog(self.sopener, f, self.bundlefile, | |
250 | self.changelog.rev) |
|
250 | self.changelog.rev) | |
251 | else: |
|
251 | else: | |
252 | return filelog.filelog(self.sopener, f) |
|
252 | return filelog.filelog(self.sopener, f) | |
253 |
|
253 | |||
254 | def close(self): |
|
254 | def close(self): | |
255 | """Close assigned bundle file immediately.""" |
|
255 | """Close assigned bundle file immediately.""" | |
256 | self.bundlefile.close() |
|
256 | self.bundlefile.close() | |
257 |
|
257 | |||
258 | def __del__(self): |
|
258 | def __del__(self): | |
259 | bundlefile = getattr(self, 'bundlefile', None) |
|
259 | bundlefile = getattr(self, 'bundlefile', None) | |
260 | if bundlefile and not bundlefile.closed: |
|
260 | if bundlefile and not bundlefile.closed: | |
261 | bundlefile.close() |
|
261 | bundlefile.close() | |
262 | tempfile = getattr(self, 'tempfile', None) |
|
262 | tempfile = getattr(self, 'tempfile', None) | |
263 | if tempfile is not None: |
|
263 | if tempfile is not None: | |
264 | os.unlink(tempfile) |
|
264 | os.unlink(tempfile) | |
265 | if self._tempparent: |
|
265 | if self._tempparent: | |
266 | shutil.rmtree(self._tempparent, True) |
|
266 | shutil.rmtree(self._tempparent, True) | |
267 |
|
267 | |||
268 | def cancopy(self): |
|
268 | def cancopy(self): | |
269 | return False |
|
269 | return False | |
270 |
|
270 | |||
271 | def getcwd(self): |
|
271 | def getcwd(self): | |
272 | return os.getcwd() # always outside the repo |
|
272 | return os.getcwd() # always outside the repo | |
273 |
|
273 | |||
274 | def instance(ui, path, create): |
|
274 | def instance(ui, path, create): | |
275 | if create: |
|
275 | if create: | |
276 | raise util.Abort(_('cannot create new bundle repository')) |
|
276 | raise util.Abort(_('cannot create new bundle repository')) | |
277 | parentpath = ui.config("bundle", "mainreporoot", "") |
|
277 | parentpath = ui.config("bundle", "mainreporoot", "") | |
278 | if parentpath: |
|
278 | if parentpath: | |
279 | # Try to make the full path relative so we get a nice, short URL. |
|
279 | # Try to make the full path relative so we get a nice, short URL. | |
280 | # In particular, we don't want temp dir names in test outputs. |
|
280 | # In particular, we don't want temp dir names in test outputs. | |
281 | cwd = os.getcwd() |
|
281 | cwd = os.getcwd() | |
282 | if parentpath == cwd: |
|
282 | if parentpath == cwd: | |
283 | parentpath = '' |
|
283 | parentpath = '' | |
284 | else: |
|
284 | else: | |
285 | cwd = os.path.join(cwd,'') |
|
285 | cwd = os.path.join(cwd,'') | |
286 | if parentpath.startswith(cwd): |
|
286 | if parentpath.startswith(cwd): | |
287 | parentpath = parentpath[len(cwd):] |
|
287 | parentpath = parentpath[len(cwd):] | |
288 | path = util.drop_scheme('file', path) |
|
288 | path = util.drop_scheme('file', path) | |
289 | if path.startswith('bundle:'): |
|
289 | if path.startswith('bundle:'): | |
290 | path = util.drop_scheme('bundle', path) |
|
290 | path = util.drop_scheme('bundle', path) | |
291 | s = path.split("+", 1) |
|
291 | s = path.split("+", 1) | |
292 | if len(s) == 1: |
|
292 | if len(s) == 1: | |
293 | repopath, bundlename = parentpath, s[0] |
|
293 | repopath, bundlename = parentpath, s[0] | |
294 | else: |
|
294 | else: | |
295 | repopath, bundlename = s |
|
295 | repopath, bundlename = s | |
296 | else: |
|
296 | else: | |
297 | repopath, bundlename = parentpath, path |
|
297 | repopath, bundlename = parentpath, path | |
298 | return bundlerepository(ui, repopath, bundlename) |
|
298 | return bundlerepository(ui, repopath, bundlename) |
@@ -1,233 +1,232 b'' | |||||
1 | # copies.py - copy detection for Mercurial |
|
1 | # copies.py - copy detection for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2008 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2008 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 |
|
7 | |||
8 | from node import nullid, nullrev |
|
|||
9 | from i18n import _ |
|
8 | from i18n import _ | |
10 | import util, heapq |
|
9 | import util, heapq | |
11 |
|
10 | |||
12 | def _nonoverlap(d1, d2, d3): |
|
11 | def _nonoverlap(d1, d2, d3): | |
13 | "Return list of elements in d1 not in d2 or d3" |
|
12 | "Return list of elements in d1 not in d2 or d3" | |
14 | return util.sort([d for d in d1 if d not in d3 and d not in d2]) |
|
13 | return util.sort([d for d in d1 if d not in d3 and d not in d2]) | |
15 |
|
14 | |||
16 | def _dirname(f): |
|
15 | def _dirname(f): | |
17 | s = f.rfind("/") |
|
16 | s = f.rfind("/") | |
18 | if s == -1: |
|
17 | if s == -1: | |
19 | return "" |
|
18 | return "" | |
20 | return f[:s] |
|
19 | return f[:s] | |
21 |
|
20 | |||
22 | def _dirs(files): |
|
21 | def _dirs(files): | |
23 | d = {} |
|
22 | d = {} | |
24 | for f in files: |
|
23 | for f in files: | |
25 | f = _dirname(f) |
|
24 | f = _dirname(f) | |
26 | while f not in d: |
|
25 | while f not in d: | |
27 | d[f] = True |
|
26 | d[f] = True | |
28 | f = _dirname(f) |
|
27 | f = _dirname(f) | |
29 | return d |
|
28 | return d | |
30 |
|
29 | |||
31 | def _findoldnames(fctx, limit): |
|
30 | def _findoldnames(fctx, limit): | |
32 | "find files that path was copied from, back to linkrev limit" |
|
31 | "find files that path was copied from, back to linkrev limit" | |
33 | old = {} |
|
32 | old = {} | |
34 | seen = {} |
|
33 | seen = {} | |
35 | orig = fctx.path() |
|
34 | orig = fctx.path() | |
36 | visit = [(fctx, 0)] |
|
35 | visit = [(fctx, 0)] | |
37 | while visit: |
|
36 | while visit: | |
38 | fc, depth = visit.pop() |
|
37 | fc, depth = visit.pop() | |
39 | s = str(fc) |
|
38 | s = str(fc) | |
40 | if s in seen: |
|
39 | if s in seen: | |
41 | continue |
|
40 | continue | |
42 | seen[s] = 1 |
|
41 | seen[s] = 1 | |
43 | if fc.path() != orig and fc.path() not in old: |
|
42 | if fc.path() != orig and fc.path() not in old: | |
44 | old[fc.path()] = (depth, fc.path()) # remember depth |
|
43 | old[fc.path()] = (depth, fc.path()) # remember depth | |
45 | if fc.rev() < limit and fc.rev() is not None: |
|
44 | if fc.rev() < limit and fc.rev() is not None: | |
46 | continue |
|
45 | continue | |
47 | visit += [(p, depth - 1) for p in fc.parents()] |
|
46 | visit += [(p, depth - 1) for p in fc.parents()] | |
48 |
|
47 | |||
49 | # return old names sorted by depth |
|
48 | # return old names sorted by depth | |
50 | return [o[1] for o in util.sort(old.values())] |
|
49 | return [o[1] for o in util.sort(old.values())] | |
51 |
|
50 | |||
52 | def _findlimit(repo, a, b): |
|
51 | def _findlimit(repo, a, b): | |
53 | "find the earliest revision that's an ancestor of a or b but not both" |
|
52 | "find the earliest revision that's an ancestor of a or b but not both" | |
54 | # basic idea: |
|
53 | # basic idea: | |
55 | # - mark a and b with different sides |
|
54 | # - mark a and b with different sides | |
56 | # - if a parent's children are all on the same side, the parent is |
|
55 | # - if a parent's children are all on the same side, the parent is | |
57 | # on that side, otherwise it is on no side |
|
56 | # on that side, otherwise it is on no side | |
58 | # - walk the graph in topological order with the help of a heap; |
|
57 | # - walk the graph in topological order with the help of a heap; | |
59 | # - add unseen parents to side map |
|
58 | # - add unseen parents to side map | |
60 | # - clear side of any parent that has children on different sides |
|
59 | # - clear side of any parent that has children on different sides | |
61 | # - track number of interesting revs that might still be on a side |
|
60 | # - track number of interesting revs that might still be on a side | |
62 | # - track the lowest interesting rev seen |
|
61 | # - track the lowest interesting rev seen | |
63 | # - quit when interesting revs is zero |
|
62 | # - quit when interesting revs is zero | |
64 |
|
63 | |||
65 | cl = repo.changelog |
|
64 | cl = repo.changelog | |
66 | working = len(cl) # pseudo rev for the working directory |
|
65 | working = len(cl) # pseudo rev for the working directory | |
67 | if a is None: |
|
66 | if a is None: | |
68 | a = working |
|
67 | a = working | |
69 | if b is None: |
|
68 | if b is None: | |
70 | b = working |
|
69 | b = working | |
71 |
|
70 | |||
72 | side = {a: -1, b: 1} |
|
71 | side = {a: -1, b: 1} | |
73 | visit = [-a, -b] |
|
72 | visit = [-a, -b] | |
74 | heapq.heapify(visit) |
|
73 | heapq.heapify(visit) | |
75 | interesting = len(visit) |
|
74 | interesting = len(visit) | |
76 | limit = working |
|
75 | limit = working | |
77 |
|
76 | |||
78 | while interesting: |
|
77 | while interesting: | |
79 | r = -heapq.heappop(visit) |
|
78 | r = -heapq.heappop(visit) | |
80 | if r == working: |
|
79 | if r == working: | |
81 | parents = [cl.rev(p) for p in repo.dirstate.parents()] |
|
80 | parents = [cl.rev(p) for p in repo.dirstate.parents()] | |
82 | else: |
|
81 | else: | |
83 | parents = cl.parentrevs(r) |
|
82 | parents = cl.parentrevs(r) | |
84 | for p in parents: |
|
83 | for p in parents: | |
85 | if p not in side: |
|
84 | if p not in side: | |
86 | # first time we see p; add it to visit |
|
85 | # first time we see p; add it to visit | |
87 | side[p] = side[r] |
|
86 | side[p] = side[r] | |
88 | if side[p]: |
|
87 | if side[p]: | |
89 | interesting += 1 |
|
88 | interesting += 1 | |
90 | heapq.heappush(visit, -p) |
|
89 | heapq.heappush(visit, -p) | |
91 | elif side[p] and side[p] != side[r]: |
|
90 | elif side[p] and side[p] != side[r]: | |
92 | # p was interesting but now we know better |
|
91 | # p was interesting but now we know better | |
93 | side[p] = 0 |
|
92 | side[p] = 0 | |
94 | interesting -= 1 |
|
93 | interesting -= 1 | |
95 | if side[r]: |
|
94 | if side[r]: | |
96 | limit = r # lowest rev visited |
|
95 | limit = r # lowest rev visited | |
97 | interesting -= 1 |
|
96 | interesting -= 1 | |
98 | return limit |
|
97 | return limit | |
99 |
|
98 | |||
100 | def copies(repo, c1, c2, ca, checkdirs=False): |
|
99 | def copies(repo, c1, c2, ca, checkdirs=False): | |
101 | """ |
|
100 | """ | |
102 | Find moves and copies between context c1 and c2 |
|
101 | Find moves and copies between context c1 and c2 | |
103 | """ |
|
102 | """ | |
104 | # avoid silly behavior for update from empty dir |
|
103 | # avoid silly behavior for update from empty dir | |
105 | if not c1 or not c2 or c1 == c2: |
|
104 | if not c1 or not c2 or c1 == c2: | |
106 | return {}, {} |
|
105 | return {}, {} | |
107 |
|
106 | |||
108 | # avoid silly behavior for parent -> working dir |
|
107 | # avoid silly behavior for parent -> working dir | |
109 | if c2.node() == None and c1.node() == repo.dirstate.parents()[0]: |
|
108 | if c2.node() == None and c1.node() == repo.dirstate.parents()[0]: | |
110 | return repo.dirstate.copies(), {} |
|
109 | return repo.dirstate.copies(), {} | |
111 |
|
110 | |||
112 | limit = _findlimit(repo, c1.rev(), c2.rev()) |
|
111 | limit = _findlimit(repo, c1.rev(), c2.rev()) | |
113 | m1 = c1.manifest() |
|
112 | m1 = c1.manifest() | |
114 | m2 = c2.manifest() |
|
113 | m2 = c2.manifest() | |
115 | ma = ca.manifest() |
|
114 | ma = ca.manifest() | |
116 |
|
115 | |||
117 | def makectx(f, n): |
|
116 | def makectx(f, n): | |
118 | if len(n) != 20: # in a working context? |
|
117 | if len(n) != 20: # in a working context? | |
119 | if c1.rev() is None: |
|
118 | if c1.rev() is None: | |
120 | return c1.filectx(f) |
|
119 | return c1.filectx(f) | |
121 | return c2.filectx(f) |
|
120 | return c2.filectx(f) | |
122 | return repo.filectx(f, fileid=n) |
|
121 | return repo.filectx(f, fileid=n) | |
123 | ctx = util.cachefunc(makectx) |
|
122 | ctx = util.cachefunc(makectx) | |
124 |
|
123 | |||
125 | copy = {} |
|
124 | copy = {} | |
126 | fullcopy = {} |
|
125 | fullcopy = {} | |
127 | diverge = {} |
|
126 | diverge = {} | |
128 |
|
127 | |||
129 | def checkcopies(f, m1, m2): |
|
128 | def checkcopies(f, m1, m2): | |
130 | '''check possible copies of f from m1 to m2''' |
|
129 | '''check possible copies of f from m1 to m2''' | |
131 | c1 = ctx(f, m1[f]) |
|
130 | c1 = ctx(f, m1[f]) | |
132 | for of in _findoldnames(c1, limit): |
|
131 | for of in _findoldnames(c1, limit): | |
133 | fullcopy[f] = of # remember for dir rename detection |
|
132 | fullcopy[f] = of # remember for dir rename detection | |
134 | if of in m2: # original file not in other manifest? |
|
133 | if of in m2: # original file not in other manifest? | |
135 | # if the original file is unchanged on the other branch, |
|
134 | # if the original file is unchanged on the other branch, | |
136 | # no merge needed |
|
135 | # no merge needed | |
137 | if m2[of] != ma.get(of): |
|
136 | if m2[of] != ma.get(of): | |
138 | c2 = ctx(of, m2[of]) |
|
137 | c2 = ctx(of, m2[of]) | |
139 | ca = c1.ancestor(c2) |
|
138 | ca = c1.ancestor(c2) | |
140 | # related and named changed on only one side? |
|
139 | # related and named changed on only one side? | |
141 | if ca and (ca.path() == f or ca.path() == c2.path()): |
|
140 | if ca and (ca.path() == f or ca.path() == c2.path()): | |
142 | if c1 != ca or c2 != ca: # merge needed? |
|
141 | if c1 != ca or c2 != ca: # merge needed? | |
143 | copy[f] = of |
|
142 | copy[f] = of | |
144 | elif of in ma: |
|
143 | elif of in ma: | |
145 | diverge.setdefault(of, []).append(f) |
|
144 | diverge.setdefault(of, []).append(f) | |
146 |
|
145 | |||
147 | repo.ui.debug(_(" searching for copies back to rev %d\n") % limit) |
|
146 | repo.ui.debug(_(" searching for copies back to rev %d\n") % limit) | |
148 |
|
147 | |||
149 | u1 = _nonoverlap(m1, m2, ma) |
|
148 | u1 = _nonoverlap(m1, m2, ma) | |
150 | u2 = _nonoverlap(m2, m1, ma) |
|
149 | u2 = _nonoverlap(m2, m1, ma) | |
151 |
|
150 | |||
152 | if u1: |
|
151 | if u1: | |
153 | repo.ui.debug(_(" unmatched files in local:\n %s\n") |
|
152 | repo.ui.debug(_(" unmatched files in local:\n %s\n") | |
154 | % "\n ".join(u1)) |
|
153 | % "\n ".join(u1)) | |
155 | if u2: |
|
154 | if u2: | |
156 | repo.ui.debug(_(" unmatched files in other:\n %s\n") |
|
155 | repo.ui.debug(_(" unmatched files in other:\n %s\n") | |
157 | % "\n ".join(u2)) |
|
156 | % "\n ".join(u2)) | |
158 |
|
157 | |||
159 | for f in u1: |
|
158 | for f in u1: | |
160 | checkcopies(f, m1, m2) |
|
159 | checkcopies(f, m1, m2) | |
161 | for f in u2: |
|
160 | for f in u2: | |
162 | checkcopies(f, m2, m1) |
|
161 | checkcopies(f, m2, m1) | |
163 |
|
162 | |||
164 | diverge2 = {} |
|
163 | diverge2 = {} | |
165 | for of, fl in diverge.items(): |
|
164 | for of, fl in diverge.items(): | |
166 | if len(fl) == 1: |
|
165 | if len(fl) == 1: | |
167 | del diverge[of] # not actually divergent |
|
166 | del diverge[of] # not actually divergent | |
168 | else: |
|
167 | else: | |
169 | diverge2.update(dict.fromkeys(fl)) # reverse map for below |
|
168 | diverge2.update(dict.fromkeys(fl)) # reverse map for below | |
170 |
|
169 | |||
171 | if fullcopy: |
|
170 | if fullcopy: | |
172 | repo.ui.debug(_(" all copies found (* = to merge, ! = divergent):\n")) |
|
171 | repo.ui.debug(_(" all copies found (* = to merge, ! = divergent):\n")) | |
173 | for f in fullcopy: |
|
172 | for f in fullcopy: | |
174 | note = "" |
|
173 | note = "" | |
175 | if f in copy: note += "*" |
|
174 | if f in copy: note += "*" | |
176 | if f in diverge2: note += "!" |
|
175 | if f in diverge2: note += "!" | |
177 | repo.ui.debug(_(" %s -> %s %s\n") % (f, fullcopy[f], note)) |
|
176 | repo.ui.debug(_(" %s -> %s %s\n") % (f, fullcopy[f], note)) | |
178 | del diverge2 |
|
177 | del diverge2 | |
179 |
|
178 | |||
180 | if not fullcopy or not checkdirs: |
|
179 | if not fullcopy or not checkdirs: | |
181 | return copy, diverge |
|
180 | return copy, diverge | |
182 |
|
181 | |||
183 | repo.ui.debug(_(" checking for directory renames\n")) |
|
182 | repo.ui.debug(_(" checking for directory renames\n")) | |
184 |
|
183 | |||
185 | # generate a directory move map |
|
184 | # generate a directory move map | |
186 | d1, d2 = _dirs(m1), _dirs(m2) |
|
185 | d1, d2 = _dirs(m1), _dirs(m2) | |
187 | invalid = {} |
|
186 | invalid = {} | |
188 | dirmove = {} |
|
187 | dirmove = {} | |
189 |
|
188 | |||
190 | # examine each file copy for a potential directory move, which is |
|
189 | # examine each file copy for a potential directory move, which is | |
191 | # when all the files in a directory are moved to a new directory |
|
190 | # when all the files in a directory are moved to a new directory | |
192 | for dst, src in fullcopy.iteritems(): |
|
191 | for dst, src in fullcopy.iteritems(): | |
193 | dsrc, ddst = _dirname(src), _dirname(dst) |
|
192 | dsrc, ddst = _dirname(src), _dirname(dst) | |
194 | if dsrc in invalid: |
|
193 | if dsrc in invalid: | |
195 | # already seen to be uninteresting |
|
194 | # already seen to be uninteresting | |
196 | continue |
|
195 | continue | |
197 | elif dsrc in d1 and ddst in d1: |
|
196 | elif dsrc in d1 and ddst in d1: | |
198 | # directory wasn't entirely moved locally |
|
197 | # directory wasn't entirely moved locally | |
199 | invalid[dsrc] = True |
|
198 | invalid[dsrc] = True | |
200 | elif dsrc in d2 and ddst in d2: |
|
199 | elif dsrc in d2 and ddst in d2: | |
201 | # directory wasn't entirely moved remotely |
|
200 | # directory wasn't entirely moved remotely | |
202 | invalid[dsrc] = True |
|
201 | invalid[dsrc] = True | |
203 | elif dsrc in dirmove and dirmove[dsrc] != ddst: |
|
202 | elif dsrc in dirmove and dirmove[dsrc] != ddst: | |
204 | # files from the same directory moved to two different places |
|
203 | # files from the same directory moved to two different places | |
205 | invalid[dsrc] = True |
|
204 | invalid[dsrc] = True | |
206 | else: |
|
205 | else: | |
207 | # looks good so far |
|
206 | # looks good so far | |
208 | dirmove[dsrc + "/"] = ddst + "/" |
|
207 | dirmove[dsrc + "/"] = ddst + "/" | |
209 |
|
208 | |||
210 | for i in invalid: |
|
209 | for i in invalid: | |
211 | if i in dirmove: |
|
210 | if i in dirmove: | |
212 | del dirmove[i] |
|
211 | del dirmove[i] | |
213 | del d1, d2, invalid |
|
212 | del d1, d2, invalid | |
214 |
|
213 | |||
215 | if not dirmove: |
|
214 | if not dirmove: | |
216 | return copy, diverge |
|
215 | return copy, diverge | |
217 |
|
216 | |||
218 | for d in dirmove: |
|
217 | for d in dirmove: | |
219 | repo.ui.debug(_(" dir %s -> %s\n") % (d, dirmove[d])) |
|
218 | repo.ui.debug(_(" dir %s -> %s\n") % (d, dirmove[d])) | |
220 |
|
219 | |||
221 | # check unaccounted nonoverlapping files against directory moves |
|
220 | # check unaccounted nonoverlapping files against directory moves | |
222 | for f in u1 + u2: |
|
221 | for f in u1 + u2: | |
223 | if f not in fullcopy: |
|
222 | if f not in fullcopy: | |
224 | for d in dirmove: |
|
223 | for d in dirmove: | |
225 | if f.startswith(d): |
|
224 | if f.startswith(d): | |
226 | # new file added in a directory that was moved, move it |
|
225 | # new file added in a directory that was moved, move it | |
227 | df = dirmove[d] + f[len(d):] |
|
226 | df = dirmove[d] + f[len(d):] | |
228 | if df not in copy: |
|
227 | if df not in copy: | |
229 | copy[f] = df |
|
228 | copy[f] = df | |
230 | repo.ui.debug(_(" file %s -> %s\n") % (f, copy[f])) |
|
229 | repo.ui.debug(_(" file %s -> %s\n") % (f, copy[f])) | |
231 | break |
|
230 | break | |
232 |
|
231 | |||
233 | return copy, diverge |
|
232 | return copy, diverge |
@@ -1,417 +1,417 b'' | |||||
1 | # dispatch.py - command dispatching for mercurial |
|
1 | # dispatch.py - command dispatching for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 |
|
7 | |||
8 | from i18n import _ |
|
8 | from i18n import _ | |
9 | import os, sys, atexit, signal, pdb, socket, errno, shlex, time |
|
9 | import os, sys, atexit, signal, pdb, socket, errno, shlex, time | |
10 |
import util, commands, hg |
|
10 | import util, commands, hg, fancyopts, extensions, hook, error | |
11 | import cmdutil |
|
11 | import cmdutil | |
12 | import ui as _ui |
|
12 | import ui as _ui | |
13 |
|
13 | |||
14 | def run(): |
|
14 | def run(): | |
15 | "run the command in sys.argv" |
|
15 | "run the command in sys.argv" | |
16 | sys.exit(dispatch(sys.argv[1:])) |
|
16 | sys.exit(dispatch(sys.argv[1:])) | |
17 |
|
17 | |||
18 | def dispatch(args): |
|
18 | def dispatch(args): | |
19 | "run the command specified in args" |
|
19 | "run the command specified in args" | |
20 | try: |
|
20 | try: | |
21 | u = _ui.ui(traceback='--traceback' in args) |
|
21 | u = _ui.ui(traceback='--traceback' in args) | |
22 | except util.Abort, inst: |
|
22 | except util.Abort, inst: | |
23 | sys.stderr.write(_("abort: %s\n") % inst) |
|
23 | sys.stderr.write(_("abort: %s\n") % inst) | |
24 | return -1 |
|
24 | return -1 | |
25 | return _runcatch(u, args) |
|
25 | return _runcatch(u, args) | |
26 |
|
26 | |||
27 | def _runcatch(ui, args): |
|
27 | def _runcatch(ui, args): | |
28 | def catchterm(*args): |
|
28 | def catchterm(*args): | |
29 | raise error.SignalInterrupt |
|
29 | raise error.SignalInterrupt | |
30 |
|
30 | |||
31 | for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM': |
|
31 | for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM': | |
32 | num = getattr(signal, name, None) |
|
32 | num = getattr(signal, name, None) | |
33 | if num: signal.signal(num, catchterm) |
|
33 | if num: signal.signal(num, catchterm) | |
34 |
|
34 | |||
35 | try: |
|
35 | try: | |
36 | try: |
|
36 | try: | |
37 | # enter the debugger before command execution |
|
37 | # enter the debugger before command execution | |
38 | if '--debugger' in args: |
|
38 | if '--debugger' in args: | |
39 | pdb.set_trace() |
|
39 | pdb.set_trace() | |
40 | try: |
|
40 | try: | |
41 | return _dispatch(ui, args) |
|
41 | return _dispatch(ui, args) | |
42 | finally: |
|
42 | finally: | |
43 | ui.flush() |
|
43 | ui.flush() | |
44 | except: |
|
44 | except: | |
45 | # enter the debugger when we hit an exception |
|
45 | # enter the debugger when we hit an exception | |
46 | if '--debugger' in args: |
|
46 | if '--debugger' in args: | |
47 | pdb.post_mortem(sys.exc_info()[2]) |
|
47 | pdb.post_mortem(sys.exc_info()[2]) | |
48 | ui.print_exc() |
|
48 | ui.print_exc() | |
49 | raise |
|
49 | raise | |
50 |
|
50 | |||
51 | # Global exception handling, alphabetically |
|
51 | # Global exception handling, alphabetically | |
52 | # Mercurial-specific first, followed by built-in and library exceptions |
|
52 | # Mercurial-specific first, followed by built-in and library exceptions | |
53 | except error.AmbiguousCommand, inst: |
|
53 | except error.AmbiguousCommand, inst: | |
54 | ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") % |
|
54 | ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") % | |
55 | (inst.args[0], " ".join(inst.args[1]))) |
|
55 | (inst.args[0], " ".join(inst.args[1]))) | |
56 | except error.LockHeld, inst: |
|
56 | except error.LockHeld, inst: | |
57 | if inst.errno == errno.ETIMEDOUT: |
|
57 | if inst.errno == errno.ETIMEDOUT: | |
58 | reason = _('timed out waiting for lock held by %s') % inst.locker |
|
58 | reason = _('timed out waiting for lock held by %s') % inst.locker | |
59 | else: |
|
59 | else: | |
60 | reason = _('lock held by %s') % inst.locker |
|
60 | reason = _('lock held by %s') % inst.locker | |
61 | ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason)) |
|
61 | ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason)) | |
62 | except error.LockUnavailable, inst: |
|
62 | except error.LockUnavailable, inst: | |
63 | ui.warn(_("abort: could not lock %s: %s\n") % |
|
63 | ui.warn(_("abort: could not lock %s: %s\n") % | |
64 | (inst.desc or inst.filename, inst.strerror)) |
|
64 | (inst.desc or inst.filename, inst.strerror)) | |
65 | except error.ParseError, inst: |
|
65 | except error.ParseError, inst: | |
66 | if inst.args[0]: |
|
66 | if inst.args[0]: | |
67 | ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1])) |
|
67 | ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1])) | |
68 | commands.help_(ui, inst.args[0]) |
|
68 | commands.help_(ui, inst.args[0]) | |
69 | else: |
|
69 | else: | |
70 | ui.warn(_("hg: %s\n") % inst.args[1]) |
|
70 | ui.warn(_("hg: %s\n") % inst.args[1]) | |
71 | commands.help_(ui, 'shortlist') |
|
71 | commands.help_(ui, 'shortlist') | |
72 | except error.RepoError, inst: |
|
72 | except error.RepoError, inst: | |
73 | ui.warn(_("abort: %s!\n") % inst) |
|
73 | ui.warn(_("abort: %s!\n") % inst) | |
74 | except error.ResponseError, inst: |
|
74 | except error.ResponseError, inst: | |
75 | ui.warn(_("abort: %s") % inst.args[0]) |
|
75 | ui.warn(_("abort: %s") % inst.args[0]) | |
76 | if not isinstance(inst.args[1], basestring): |
|
76 | if not isinstance(inst.args[1], basestring): | |
77 | ui.warn(" %r\n" % (inst.args[1],)) |
|
77 | ui.warn(" %r\n" % (inst.args[1],)) | |
78 | elif not inst.args[1]: |
|
78 | elif not inst.args[1]: | |
79 | ui.warn(_(" empty string\n")) |
|
79 | ui.warn(_(" empty string\n")) | |
80 | else: |
|
80 | else: | |
81 | ui.warn("\n%r\n" % util.ellipsis(inst.args[1])) |
|
81 | ui.warn("\n%r\n" % util.ellipsis(inst.args[1])) | |
82 | except error.RevlogError, inst: |
|
82 | except error.RevlogError, inst: | |
83 | ui.warn(_("abort: %s!\n") % inst) |
|
83 | ui.warn(_("abort: %s!\n") % inst) | |
84 | except error.SignalInterrupt: |
|
84 | except error.SignalInterrupt: | |
85 | ui.warn(_("killed!\n")) |
|
85 | ui.warn(_("killed!\n")) | |
86 | except error.UnknownCommand, inst: |
|
86 | except error.UnknownCommand, inst: | |
87 | ui.warn(_("hg: unknown command '%s'\n") % inst.args[0]) |
|
87 | ui.warn(_("hg: unknown command '%s'\n") % inst.args[0]) | |
88 | commands.help_(ui, 'shortlist') |
|
88 | commands.help_(ui, 'shortlist') | |
89 | except util.Abort, inst: |
|
89 | except util.Abort, inst: | |
90 | ui.warn(_("abort: %s\n") % inst) |
|
90 | ui.warn(_("abort: %s\n") % inst) | |
91 | except ImportError, inst: |
|
91 | except ImportError, inst: | |
92 | m = str(inst).split()[-1] |
|
92 | m = str(inst).split()[-1] | |
93 | ui.warn(_("abort: could not import module %s!\n") % m) |
|
93 | ui.warn(_("abort: could not import module %s!\n") % m) | |
94 | if m in "mpatch bdiff".split(): |
|
94 | if m in "mpatch bdiff".split(): | |
95 | ui.warn(_("(did you forget to compile extensions?)\n")) |
|
95 | ui.warn(_("(did you forget to compile extensions?)\n")) | |
96 | elif m in "zlib".split(): |
|
96 | elif m in "zlib".split(): | |
97 | ui.warn(_("(is your Python install correct?)\n")) |
|
97 | ui.warn(_("(is your Python install correct?)\n")) | |
98 | except IOError, inst: |
|
98 | except IOError, inst: | |
99 | if hasattr(inst, "code"): |
|
99 | if hasattr(inst, "code"): | |
100 | ui.warn(_("abort: %s\n") % inst) |
|
100 | ui.warn(_("abort: %s\n") % inst) | |
101 | elif hasattr(inst, "reason"): |
|
101 | elif hasattr(inst, "reason"): | |
102 | try: # usually it is in the form (errno, strerror) |
|
102 | try: # usually it is in the form (errno, strerror) | |
103 | reason = inst.reason.args[1] |
|
103 | reason = inst.reason.args[1] | |
104 | except: # it might be anything, for example a string |
|
104 | except: # it might be anything, for example a string | |
105 | reason = inst.reason |
|
105 | reason = inst.reason | |
106 | ui.warn(_("abort: error: %s\n") % reason) |
|
106 | ui.warn(_("abort: error: %s\n") % reason) | |
107 | elif hasattr(inst, "args") and inst.args[0] == errno.EPIPE: |
|
107 | elif hasattr(inst, "args") and inst.args[0] == errno.EPIPE: | |
108 | if ui.debugflag: |
|
108 | if ui.debugflag: | |
109 | ui.warn(_("broken pipe\n")) |
|
109 | ui.warn(_("broken pipe\n")) | |
110 | elif getattr(inst, "strerror", None): |
|
110 | elif getattr(inst, "strerror", None): | |
111 | if getattr(inst, "filename", None): |
|
111 | if getattr(inst, "filename", None): | |
112 | ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename)) |
|
112 | ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename)) | |
113 | else: |
|
113 | else: | |
114 | ui.warn(_("abort: %s\n") % inst.strerror) |
|
114 | ui.warn(_("abort: %s\n") % inst.strerror) | |
115 | else: |
|
115 | else: | |
116 | raise |
|
116 | raise | |
117 | except OSError, inst: |
|
117 | except OSError, inst: | |
118 | if getattr(inst, "filename", None): |
|
118 | if getattr(inst, "filename", None): | |
119 | ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename)) |
|
119 | ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename)) | |
120 | else: |
|
120 | else: | |
121 | ui.warn(_("abort: %s\n") % inst.strerror) |
|
121 | ui.warn(_("abort: %s\n") % inst.strerror) | |
122 | except KeyboardInterrupt: |
|
122 | except KeyboardInterrupt: | |
123 | try: |
|
123 | try: | |
124 | ui.warn(_("interrupted!\n")) |
|
124 | ui.warn(_("interrupted!\n")) | |
125 | except IOError, inst: |
|
125 | except IOError, inst: | |
126 | if inst.errno == errno.EPIPE: |
|
126 | if inst.errno == errno.EPIPE: | |
127 | if ui.debugflag: |
|
127 | if ui.debugflag: | |
128 | ui.warn(_("\nbroken pipe\n")) |
|
128 | ui.warn(_("\nbroken pipe\n")) | |
129 | else: |
|
129 | else: | |
130 | raise |
|
130 | raise | |
131 | except MemoryError: |
|
131 | except MemoryError: | |
132 | ui.warn(_("abort: out of memory\n")) |
|
132 | ui.warn(_("abort: out of memory\n")) | |
133 | except SystemExit, inst: |
|
133 | except SystemExit, inst: | |
134 | # Commands shouldn't sys.exit directly, but give a return code. |
|
134 | # Commands shouldn't sys.exit directly, but give a return code. | |
135 | # Just in case catch this and and pass exit code to caller. |
|
135 | # Just in case catch this and and pass exit code to caller. | |
136 | return inst.code |
|
136 | return inst.code | |
137 | except socket.error, inst: |
|
137 | except socket.error, inst: | |
138 | ui.warn(_("abort: %s\n") % inst.args[-1]) |
|
138 | ui.warn(_("abort: %s\n") % inst.args[-1]) | |
139 | except: |
|
139 | except: | |
140 | ui.warn(_("** unknown exception encountered, details follow\n")) |
|
140 | ui.warn(_("** unknown exception encountered, details follow\n")) | |
141 | ui.warn(_("** report bug details to " |
|
141 | ui.warn(_("** report bug details to " | |
142 | "http://www.selenic.com/mercurial/bts\n")) |
|
142 | "http://www.selenic.com/mercurial/bts\n")) | |
143 | ui.warn(_("** or mercurial@selenic.com\n")) |
|
143 | ui.warn(_("** or mercurial@selenic.com\n")) | |
144 | ui.warn(_("** Mercurial Distributed SCM (version %s)\n") |
|
144 | ui.warn(_("** Mercurial Distributed SCM (version %s)\n") | |
145 | % util.version()) |
|
145 | % util.version()) | |
146 | ui.warn(_("** Extensions loaded: %s\n") |
|
146 | ui.warn(_("** Extensions loaded: %s\n") | |
147 | % ", ".join([x[0] for x in extensions.extensions()])) |
|
147 | % ", ".join([x[0] for x in extensions.extensions()])) | |
148 | raise |
|
148 | raise | |
149 |
|
149 | |||
150 | return -1 |
|
150 | return -1 | |
151 |
|
151 | |||
152 | def _findrepo(p): |
|
152 | def _findrepo(p): | |
153 | while not os.path.isdir(os.path.join(p, ".hg")): |
|
153 | while not os.path.isdir(os.path.join(p, ".hg")): | |
154 | oldp, p = p, os.path.dirname(p) |
|
154 | oldp, p = p, os.path.dirname(p) | |
155 | if p == oldp: |
|
155 | if p == oldp: | |
156 | return None |
|
156 | return None | |
157 |
|
157 | |||
158 | return p |
|
158 | return p | |
159 |
|
159 | |||
160 | def _parse(ui, args): |
|
160 | def _parse(ui, args): | |
161 | options = {} |
|
161 | options = {} | |
162 | cmdoptions = {} |
|
162 | cmdoptions = {} | |
163 |
|
163 | |||
164 | try: |
|
164 | try: | |
165 | args = fancyopts.fancyopts(args, commands.globalopts, options) |
|
165 | args = fancyopts.fancyopts(args, commands.globalopts, options) | |
166 | except fancyopts.getopt.GetoptError, inst: |
|
166 | except fancyopts.getopt.GetoptError, inst: | |
167 | raise error.ParseError(None, inst) |
|
167 | raise error.ParseError(None, inst) | |
168 |
|
168 | |||
169 | if args: |
|
169 | if args: | |
170 | cmd, args = args[0], args[1:] |
|
170 | cmd, args = args[0], args[1:] | |
171 | aliases, i = cmdutil.findcmd(cmd, commands.table, |
|
171 | aliases, i = cmdutil.findcmd(cmd, commands.table, | |
172 | ui.config("ui", "strict")) |
|
172 | ui.config("ui", "strict")) | |
173 | cmd = aliases[0] |
|
173 | cmd = aliases[0] | |
174 | defaults = ui.config("defaults", cmd) |
|
174 | defaults = ui.config("defaults", cmd) | |
175 | if defaults: |
|
175 | if defaults: | |
176 | args = shlex.split(defaults) + args |
|
176 | args = shlex.split(defaults) + args | |
177 | c = list(i[1]) |
|
177 | c = list(i[1]) | |
178 | else: |
|
178 | else: | |
179 | cmd = None |
|
179 | cmd = None | |
180 | c = [] |
|
180 | c = [] | |
181 |
|
181 | |||
182 | # combine global options into local |
|
182 | # combine global options into local | |
183 | for o in commands.globalopts: |
|
183 | for o in commands.globalopts: | |
184 | c.append((o[0], o[1], options[o[1]], o[3])) |
|
184 | c.append((o[0], o[1], options[o[1]], o[3])) | |
185 |
|
185 | |||
186 | try: |
|
186 | try: | |
187 | args = fancyopts.fancyopts(args, c, cmdoptions, True) |
|
187 | args = fancyopts.fancyopts(args, c, cmdoptions, True) | |
188 | except fancyopts.getopt.GetoptError, inst: |
|
188 | except fancyopts.getopt.GetoptError, inst: | |
189 | raise error.ParseError(cmd, inst) |
|
189 | raise error.ParseError(cmd, inst) | |
190 |
|
190 | |||
191 | # separate global options back out |
|
191 | # separate global options back out | |
192 | for o in commands.globalopts: |
|
192 | for o in commands.globalopts: | |
193 | n = o[1] |
|
193 | n = o[1] | |
194 | options[n] = cmdoptions[n] |
|
194 | options[n] = cmdoptions[n] | |
195 | del cmdoptions[n] |
|
195 | del cmdoptions[n] | |
196 |
|
196 | |||
197 | return (cmd, cmd and i[0] or None, args, options, cmdoptions) |
|
197 | return (cmd, cmd and i[0] or None, args, options, cmdoptions) | |
198 |
|
198 | |||
199 | def _parseconfig(config): |
|
199 | def _parseconfig(config): | |
200 | """parse the --config options from the command line""" |
|
200 | """parse the --config options from the command line""" | |
201 | parsed = [] |
|
201 | parsed = [] | |
202 | for cfg in config: |
|
202 | for cfg in config: | |
203 | try: |
|
203 | try: | |
204 | name, value = cfg.split('=', 1) |
|
204 | name, value = cfg.split('=', 1) | |
205 | section, name = name.split('.', 1) |
|
205 | section, name = name.split('.', 1) | |
206 | if not section or not name: |
|
206 | if not section or not name: | |
207 | raise IndexError |
|
207 | raise IndexError | |
208 | parsed.append((section, name, value)) |
|
208 | parsed.append((section, name, value)) | |
209 | except (IndexError, ValueError): |
|
209 | except (IndexError, ValueError): | |
210 | raise util.Abort(_('malformed --config option: %s') % cfg) |
|
210 | raise util.Abort(_('malformed --config option: %s') % cfg) | |
211 | return parsed |
|
211 | return parsed | |
212 |
|
212 | |||
213 | def _earlygetopt(aliases, args): |
|
213 | def _earlygetopt(aliases, args): | |
214 | """Return list of values for an option (or aliases). |
|
214 | """Return list of values for an option (or aliases). | |
215 |
|
215 | |||
216 | The values are listed in the order they appear in args. |
|
216 | The values are listed in the order they appear in args. | |
217 | The options and values are removed from args. |
|
217 | The options and values are removed from args. | |
218 | """ |
|
218 | """ | |
219 | try: |
|
219 | try: | |
220 | argcount = args.index("--") |
|
220 | argcount = args.index("--") | |
221 | except ValueError: |
|
221 | except ValueError: | |
222 | argcount = len(args) |
|
222 | argcount = len(args) | |
223 | shortopts = [opt for opt in aliases if len(opt) == 2] |
|
223 | shortopts = [opt for opt in aliases if len(opt) == 2] | |
224 | values = [] |
|
224 | values = [] | |
225 | pos = 0 |
|
225 | pos = 0 | |
226 | while pos < argcount: |
|
226 | while pos < argcount: | |
227 | if args[pos] in aliases: |
|
227 | if args[pos] in aliases: | |
228 | if pos + 1 >= argcount: |
|
228 | if pos + 1 >= argcount: | |
229 | # ignore and let getopt report an error if there is no value |
|
229 | # ignore and let getopt report an error if there is no value | |
230 | break |
|
230 | break | |
231 | del args[pos] |
|
231 | del args[pos] | |
232 | values.append(args.pop(pos)) |
|
232 | values.append(args.pop(pos)) | |
233 | argcount -= 2 |
|
233 | argcount -= 2 | |
234 | elif args[pos][:2] in shortopts: |
|
234 | elif args[pos][:2] in shortopts: | |
235 | # short option can have no following space, e.g. hg log -Rfoo |
|
235 | # short option can have no following space, e.g. hg log -Rfoo | |
236 | values.append(args.pop(pos)[2:]) |
|
236 | values.append(args.pop(pos)[2:]) | |
237 | argcount -= 1 |
|
237 | argcount -= 1 | |
238 | else: |
|
238 | else: | |
239 | pos += 1 |
|
239 | pos += 1 | |
240 | return values |
|
240 | return values | |
241 |
|
241 | |||
242 | def runcommand(lui, repo, cmd, fullargs, ui, options, d): |
|
242 | def runcommand(lui, repo, cmd, fullargs, ui, options, d): | |
243 | # run pre-hook, and abort if it fails |
|
243 | # run pre-hook, and abort if it fails | |
244 | ret = hook.hook(lui, repo, "pre-%s" % cmd, False, args=" ".join(fullargs)) |
|
244 | ret = hook.hook(lui, repo, "pre-%s" % cmd, False, args=" ".join(fullargs)) | |
245 | if ret: |
|
245 | if ret: | |
246 | return ret |
|
246 | return ret | |
247 | ret = _runcommand(ui, options, cmd, d) |
|
247 | ret = _runcommand(ui, options, cmd, d) | |
248 | # run post-hook, passing command result |
|
248 | # run post-hook, passing command result | |
249 | hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs), |
|
249 | hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs), | |
250 | result = ret) |
|
250 | result = ret) | |
251 | return ret |
|
251 | return ret | |
252 |
|
252 | |||
253 | _loaded = {} |
|
253 | _loaded = {} | |
254 | def _dispatch(ui, args): |
|
254 | def _dispatch(ui, args): | |
255 | # read --config before doing anything else |
|
255 | # read --config before doing anything else | |
256 | # (e.g. to change trust settings for reading .hg/hgrc) |
|
256 | # (e.g. to change trust settings for reading .hg/hgrc) | |
257 | config = _earlygetopt(['--config'], args) |
|
257 | config = _earlygetopt(['--config'], args) | |
258 | if config: |
|
258 | if config: | |
259 | ui.updateopts(config=_parseconfig(config)) |
|
259 | ui.updateopts(config=_parseconfig(config)) | |
260 |
|
260 | |||
261 | # check for cwd |
|
261 | # check for cwd | |
262 | cwd = _earlygetopt(['--cwd'], args) |
|
262 | cwd = _earlygetopt(['--cwd'], args) | |
263 | if cwd: |
|
263 | if cwd: | |
264 | os.chdir(cwd[-1]) |
|
264 | os.chdir(cwd[-1]) | |
265 |
|
265 | |||
266 | # read the local repository .hgrc into a local ui object |
|
266 | # read the local repository .hgrc into a local ui object | |
267 | path = _findrepo(os.getcwd()) or "" |
|
267 | path = _findrepo(os.getcwd()) or "" | |
268 | if not path: |
|
268 | if not path: | |
269 | lui = ui |
|
269 | lui = ui | |
270 | if path: |
|
270 | if path: | |
271 | try: |
|
271 | try: | |
272 | lui = _ui.ui(parentui=ui) |
|
272 | lui = _ui.ui(parentui=ui) | |
273 | lui.readconfig(os.path.join(path, ".hg", "hgrc")) |
|
273 | lui.readconfig(os.path.join(path, ".hg", "hgrc")) | |
274 | except IOError: |
|
274 | except IOError: | |
275 | pass |
|
275 | pass | |
276 |
|
276 | |||
277 | # now we can expand paths, even ones in .hg/hgrc |
|
277 | # now we can expand paths, even ones in .hg/hgrc | |
278 | rpath = _earlygetopt(["-R", "--repository", "--repo"], args) |
|
278 | rpath = _earlygetopt(["-R", "--repository", "--repo"], args) | |
279 | if rpath: |
|
279 | if rpath: | |
280 | path = lui.expandpath(rpath[-1]) |
|
280 | path = lui.expandpath(rpath[-1]) | |
281 | lui = _ui.ui(parentui=ui) |
|
281 | lui = _ui.ui(parentui=ui) | |
282 | lui.readconfig(os.path.join(path, ".hg", "hgrc")) |
|
282 | lui.readconfig(os.path.join(path, ".hg", "hgrc")) | |
283 |
|
283 | |||
284 | extensions.loadall(lui) |
|
284 | extensions.loadall(lui) | |
285 | for name, module in extensions.extensions(): |
|
285 | for name, module in extensions.extensions(): | |
286 | if name in _loaded: |
|
286 | if name in _loaded: | |
287 | continue |
|
287 | continue | |
288 |
|
288 | |||
289 | # setup extensions |
|
289 | # setup extensions | |
290 | # TODO this should be generalized to scheme, where extensions can |
|
290 | # TODO this should be generalized to scheme, where extensions can | |
291 | # redepend on other extensions. then we should toposort them, and |
|
291 | # redepend on other extensions. then we should toposort them, and | |
292 | # do initialization in correct order |
|
292 | # do initialization in correct order | |
293 | extsetup = getattr(module, 'extsetup', None) |
|
293 | extsetup = getattr(module, 'extsetup', None) | |
294 | if extsetup: |
|
294 | if extsetup: | |
295 | extsetup() |
|
295 | extsetup() | |
296 |
|
296 | |||
297 | cmdtable = getattr(module, 'cmdtable', {}) |
|
297 | cmdtable = getattr(module, 'cmdtable', {}) | |
298 | overrides = [cmd for cmd in cmdtable if cmd in commands.table] |
|
298 | overrides = [cmd for cmd in cmdtable if cmd in commands.table] | |
299 | if overrides: |
|
299 | if overrides: | |
300 | ui.warn(_("extension '%s' overrides commands: %s\n") |
|
300 | ui.warn(_("extension '%s' overrides commands: %s\n") | |
301 | % (name, " ".join(overrides))) |
|
301 | % (name, " ".join(overrides))) | |
302 | commands.table.update(cmdtable) |
|
302 | commands.table.update(cmdtable) | |
303 | _loaded[name] = 1 |
|
303 | _loaded[name] = 1 | |
304 | # check for fallback encoding |
|
304 | # check for fallback encoding | |
305 | fallback = lui.config('ui', 'fallbackencoding') |
|
305 | fallback = lui.config('ui', 'fallbackencoding') | |
306 | if fallback: |
|
306 | if fallback: | |
307 | util._fallbackencoding = fallback |
|
307 | util._fallbackencoding = fallback | |
308 |
|
308 | |||
309 | fullargs = args |
|
309 | fullargs = args | |
310 | cmd, func, args, options, cmdoptions = _parse(lui, args) |
|
310 | cmd, func, args, options, cmdoptions = _parse(lui, args) | |
311 |
|
311 | |||
312 | if options["config"]: |
|
312 | if options["config"]: | |
313 | raise util.Abort(_("Option --config may not be abbreviated!")) |
|
313 | raise util.Abort(_("Option --config may not be abbreviated!")) | |
314 | if options["cwd"]: |
|
314 | if options["cwd"]: | |
315 | raise util.Abort(_("Option --cwd may not be abbreviated!")) |
|
315 | raise util.Abort(_("Option --cwd may not be abbreviated!")) | |
316 | if options["repository"]: |
|
316 | if options["repository"]: | |
317 | raise util.Abort(_( |
|
317 | raise util.Abort(_( | |
318 | "Option -R has to be separated from other options (i.e. not -qR) " |
|
318 | "Option -R has to be separated from other options (i.e. not -qR) " | |
319 | "and --repository may only be abbreviated as --repo!")) |
|
319 | "and --repository may only be abbreviated as --repo!")) | |
320 |
|
320 | |||
321 | if options["encoding"]: |
|
321 | if options["encoding"]: | |
322 | util._encoding = options["encoding"] |
|
322 | util._encoding = options["encoding"] | |
323 | if options["encodingmode"]: |
|
323 | if options["encodingmode"]: | |
324 | util._encodingmode = options["encodingmode"] |
|
324 | util._encodingmode = options["encodingmode"] | |
325 | if options["time"]: |
|
325 | if options["time"]: | |
326 | def get_times(): |
|
326 | def get_times(): | |
327 | t = os.times() |
|
327 | t = os.times() | |
328 | if t[4] == 0.0: # Windows leaves this as zero, so use time.clock() |
|
328 | if t[4] == 0.0: # Windows leaves this as zero, so use time.clock() | |
329 | t = (t[0], t[1], t[2], t[3], time.clock()) |
|
329 | t = (t[0], t[1], t[2], t[3], time.clock()) | |
330 | return t |
|
330 | return t | |
331 | s = get_times() |
|
331 | s = get_times() | |
332 | def print_time(): |
|
332 | def print_time(): | |
333 | t = get_times() |
|
333 | t = get_times() | |
334 | ui.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") % |
|
334 | ui.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") % | |
335 | (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3])) |
|
335 | (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3])) | |
336 | atexit.register(print_time) |
|
336 | atexit.register(print_time) | |
337 |
|
337 | |||
338 | ui.updateopts(options["verbose"], options["debug"], options["quiet"], |
|
338 | ui.updateopts(options["verbose"], options["debug"], options["quiet"], | |
339 | not options["noninteractive"], options["traceback"]) |
|
339 | not options["noninteractive"], options["traceback"]) | |
340 |
|
340 | |||
341 | if options['help']: |
|
341 | if options['help']: | |
342 | return commands.help_(ui, cmd, options['version']) |
|
342 | return commands.help_(ui, cmd, options['version']) | |
343 | elif options['version']: |
|
343 | elif options['version']: | |
344 | return commands.version_(ui) |
|
344 | return commands.version_(ui) | |
345 | elif not cmd: |
|
345 | elif not cmd: | |
346 | return commands.help_(ui, 'shortlist') |
|
346 | return commands.help_(ui, 'shortlist') | |
347 |
|
347 | |||
348 | repo = None |
|
348 | repo = None | |
349 | if cmd not in commands.norepo.split(): |
|
349 | if cmd not in commands.norepo.split(): | |
350 | try: |
|
350 | try: | |
351 | repo = hg.repository(ui, path=path) |
|
351 | repo = hg.repository(ui, path=path) | |
352 | ui = repo.ui |
|
352 | ui = repo.ui | |
353 | if not repo.local(): |
|
353 | if not repo.local(): | |
354 | raise util.Abort(_("repository '%s' is not local") % path) |
|
354 | raise util.Abort(_("repository '%s' is not local") % path) | |
355 | ui.setconfig("bundle", "mainreporoot", repo.root) |
|
355 | ui.setconfig("bundle", "mainreporoot", repo.root) | |
356 | except error.RepoError: |
|
356 | except error.RepoError: | |
357 | if cmd not in commands.optionalrepo.split(): |
|
357 | if cmd not in commands.optionalrepo.split(): | |
358 | if args and not path: # try to infer -R from command args |
|
358 | if args and not path: # try to infer -R from command args | |
359 | repos = map(_findrepo, args) |
|
359 | repos = map(_findrepo, args) | |
360 | guess = repos[0] |
|
360 | guess = repos[0] | |
361 | if guess and repos.count(guess) == len(repos): |
|
361 | if guess and repos.count(guess) == len(repos): | |
362 | return _dispatch(ui, ['--repository', guess] + fullargs) |
|
362 | return _dispatch(ui, ['--repository', guess] + fullargs) | |
363 | if not path: |
|
363 | if not path: | |
364 | raise error.RepoError(_("There is no Mercurial repository" |
|
364 | raise error.RepoError(_("There is no Mercurial repository" | |
365 | " here (.hg not found)")) |
|
365 | " here (.hg not found)")) | |
366 | raise |
|
366 | raise | |
367 | args.insert(0, repo) |
|
367 | args.insert(0, repo) | |
368 | elif rpath: |
|
368 | elif rpath: | |
369 | ui.warn("warning: --repository ignored\n") |
|
369 | ui.warn("warning: --repository ignored\n") | |
370 |
|
370 | |||
371 | d = lambda: util.checksignature(func)(ui, *args, **cmdoptions) |
|
371 | d = lambda: util.checksignature(func)(ui, *args, **cmdoptions) | |
372 | return runcommand(lui, repo, cmd, fullargs, ui, options, d) |
|
372 | return runcommand(lui, repo, cmd, fullargs, ui, options, d) | |
373 |
|
373 | |||
374 | def _runcommand(ui, options, cmd, cmdfunc): |
|
374 | def _runcommand(ui, options, cmd, cmdfunc): | |
375 | def checkargs(): |
|
375 | def checkargs(): | |
376 | try: |
|
376 | try: | |
377 | return cmdfunc() |
|
377 | return cmdfunc() | |
378 | except error.SignatureError: |
|
378 | except error.SignatureError: | |
379 | raise error.ParseError(cmd, _("invalid arguments")) |
|
379 | raise error.ParseError(cmd, _("invalid arguments")) | |
380 |
|
380 | |||
381 | if options['profile']: |
|
381 | if options['profile']: | |
382 | import hotshot, hotshot.stats |
|
382 | import hotshot, hotshot.stats | |
383 | prof = hotshot.Profile("hg.prof") |
|
383 | prof = hotshot.Profile("hg.prof") | |
384 | try: |
|
384 | try: | |
385 | try: |
|
385 | try: | |
386 | return prof.runcall(checkargs) |
|
386 | return prof.runcall(checkargs) | |
387 | except: |
|
387 | except: | |
388 | try: |
|
388 | try: | |
389 | ui.warn(_('exception raised - generating ' |
|
389 | ui.warn(_('exception raised - generating ' | |
390 | 'profile anyway\n')) |
|
390 | 'profile anyway\n')) | |
391 | except: |
|
391 | except: | |
392 | pass |
|
392 | pass | |
393 | raise |
|
393 | raise | |
394 | finally: |
|
394 | finally: | |
395 | prof.close() |
|
395 | prof.close() | |
396 | stats = hotshot.stats.load("hg.prof") |
|
396 | stats = hotshot.stats.load("hg.prof") | |
397 | stats.strip_dirs() |
|
397 | stats.strip_dirs() | |
398 | stats.sort_stats('time', 'calls') |
|
398 | stats.sort_stats('time', 'calls') | |
399 | stats.print_stats(40) |
|
399 | stats.print_stats(40) | |
400 | elif options['lsprof']: |
|
400 | elif options['lsprof']: | |
401 | try: |
|
401 | try: | |
402 | from mercurial import lsprof |
|
402 | from mercurial import lsprof | |
403 | except ImportError: |
|
403 | except ImportError: | |
404 | raise util.Abort(_( |
|
404 | raise util.Abort(_( | |
405 | 'lsprof not available - install from ' |
|
405 | 'lsprof not available - install from ' | |
406 | 'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/')) |
|
406 | 'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/')) | |
407 | p = lsprof.Profiler() |
|
407 | p = lsprof.Profiler() | |
408 | p.enable(subcalls=True) |
|
408 | p.enable(subcalls=True) | |
409 | try: |
|
409 | try: | |
410 | return checkargs() |
|
410 | return checkargs() | |
411 | finally: |
|
411 | finally: | |
412 | p.disable() |
|
412 | p.disable() | |
413 | stats = lsprof.Stats(p.getstats()) |
|
413 | stats = lsprof.Stats(p.getstats()) | |
414 | stats.sort() |
|
414 | stats.sort() | |
415 | stats.pprint(top=10, file=sys.stderr, climit=5) |
|
415 | stats.pprint(top=10, file=sys.stderr, climit=5) | |
416 | else: |
|
416 | else: | |
417 | return checkargs() |
|
417 | return checkargs() |
@@ -1,221 +1,221 b'' | |||||
1 | # filemerge.py - file-level merge handling for Mercurial |
|
1 | # filemerge.py - file-level merge handling for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 |
|
7 | |||
8 |
from node import |
|
8 | from node import short | |
9 | from i18n import _ |
|
9 | from i18n import _ | |
10 | import util, os, tempfile, simplemerge, re, filecmp |
|
10 | import util, os, tempfile, simplemerge, re, filecmp | |
11 |
|
11 | |||
12 | def _toolstr(ui, tool, part, default=""): |
|
12 | def _toolstr(ui, tool, part, default=""): | |
13 | return ui.config("merge-tools", tool + "." + part, default) |
|
13 | return ui.config("merge-tools", tool + "." + part, default) | |
14 |
|
14 | |||
15 | def _toolbool(ui, tool, part, default=False): |
|
15 | def _toolbool(ui, tool, part, default=False): | |
16 | return ui.configbool("merge-tools", tool + "." + part, default) |
|
16 | return ui.configbool("merge-tools", tool + "." + part, default) | |
17 |
|
17 | |||
18 | def _findtool(ui, tool): |
|
18 | def _findtool(ui, tool): | |
19 | if tool in ("internal:fail", "internal:local", "internal:other"): |
|
19 | if tool in ("internal:fail", "internal:local", "internal:other"): | |
20 | return tool |
|
20 | return tool | |
21 | k = _toolstr(ui, tool, "regkey") |
|
21 | k = _toolstr(ui, tool, "regkey") | |
22 | if k: |
|
22 | if k: | |
23 | p = util.lookup_reg(k, _toolstr(ui, tool, "regname")) |
|
23 | p = util.lookup_reg(k, _toolstr(ui, tool, "regname")) | |
24 | if p: |
|
24 | if p: | |
25 | p = util.find_exe(p + _toolstr(ui, tool, "regappend")) |
|
25 | p = util.find_exe(p + _toolstr(ui, tool, "regappend")) | |
26 | if p: |
|
26 | if p: | |
27 | return p |
|
27 | return p | |
28 | return util.find_exe(_toolstr(ui, tool, "executable", tool)) |
|
28 | return util.find_exe(_toolstr(ui, tool, "executable", tool)) | |
29 |
|
29 | |||
30 | def _picktool(repo, ui, path, binary, symlink): |
|
30 | def _picktool(repo, ui, path, binary, symlink): | |
31 | def check(tool, pat, symlink, binary): |
|
31 | def check(tool, pat, symlink, binary): | |
32 | tmsg = tool |
|
32 | tmsg = tool | |
33 | if pat: |
|
33 | if pat: | |
34 | tmsg += " specified for " + pat |
|
34 | tmsg += " specified for " + pat | |
35 | if not _findtool(ui, tool): |
|
35 | if not _findtool(ui, tool): | |
36 | if pat: # explicitly requested tool deserves a warning |
|
36 | if pat: # explicitly requested tool deserves a warning | |
37 | ui.warn(_("couldn't find merge tool %s\n") % tmsg) |
|
37 | ui.warn(_("couldn't find merge tool %s\n") % tmsg) | |
38 | else: # configured but non-existing tools are more silent |
|
38 | else: # configured but non-existing tools are more silent | |
39 | ui.note(_("couldn't find merge tool %s\n") % tmsg) |
|
39 | ui.note(_("couldn't find merge tool %s\n") % tmsg) | |
40 | elif symlink and not _toolbool(ui, tool, "symlink"): |
|
40 | elif symlink and not _toolbool(ui, tool, "symlink"): | |
41 | ui.warn(_("tool %s can't handle symlinks\n") % tmsg) |
|
41 | ui.warn(_("tool %s can't handle symlinks\n") % tmsg) | |
42 | elif binary and not _toolbool(ui, tool, "binary"): |
|
42 | elif binary and not _toolbool(ui, tool, "binary"): | |
43 | ui.warn(_("tool %s can't handle binary\n") % tmsg) |
|
43 | ui.warn(_("tool %s can't handle binary\n") % tmsg) | |
44 | elif not util.gui() and _toolbool(ui, tool, "gui"): |
|
44 | elif not util.gui() and _toolbool(ui, tool, "gui"): | |
45 | ui.warn(_("tool %s requires a GUI\n") % tmsg) |
|
45 | ui.warn(_("tool %s requires a GUI\n") % tmsg) | |
46 | else: |
|
46 | else: | |
47 | return True |
|
47 | return True | |
48 | return False |
|
48 | return False | |
49 |
|
49 | |||
50 | # HGMERGE takes precedence |
|
50 | # HGMERGE takes precedence | |
51 | hgmerge = os.environ.get("HGMERGE") |
|
51 | hgmerge = os.environ.get("HGMERGE") | |
52 | if hgmerge: |
|
52 | if hgmerge: | |
53 | return (hgmerge, hgmerge) |
|
53 | return (hgmerge, hgmerge) | |
54 |
|
54 | |||
55 | # then patterns |
|
55 | # then patterns | |
56 | for pat, tool in ui.configitems("merge-patterns"): |
|
56 | for pat, tool in ui.configitems("merge-patterns"): | |
57 | mf = util.matcher(repo.root, "", [pat], [], [])[1] |
|
57 | mf = util.matcher(repo.root, "", [pat], [], [])[1] | |
58 | if mf(path) and check(tool, pat, symlink, False): |
|
58 | if mf(path) and check(tool, pat, symlink, False): | |
59 | toolpath = _findtool(ui, tool) |
|
59 | toolpath = _findtool(ui, tool) | |
60 | return (tool, '"' + toolpath + '"') |
|
60 | return (tool, '"' + toolpath + '"') | |
61 |
|
61 | |||
62 | # then merge tools |
|
62 | # then merge tools | |
63 | tools = {} |
|
63 | tools = {} | |
64 | for k,v in ui.configitems("merge-tools"): |
|
64 | for k,v in ui.configitems("merge-tools"): | |
65 | t = k.split('.')[0] |
|
65 | t = k.split('.')[0] | |
66 | if t not in tools: |
|
66 | if t not in tools: | |
67 | tools[t] = int(_toolstr(ui, t, "priority", "0")) |
|
67 | tools[t] = int(_toolstr(ui, t, "priority", "0")) | |
68 | names = tools.keys() |
|
68 | names = tools.keys() | |
69 | tools = util.sort([(-p,t) for t,p in tools.items()]) |
|
69 | tools = util.sort([(-p,t) for t,p in tools.items()]) | |
70 | uimerge = ui.config("ui", "merge") |
|
70 | uimerge = ui.config("ui", "merge") | |
71 | if uimerge: |
|
71 | if uimerge: | |
72 | if uimerge not in names: |
|
72 | if uimerge not in names: | |
73 | return (uimerge, uimerge) |
|
73 | return (uimerge, uimerge) | |
74 | tools.insert(0, (None, uimerge)) # highest priority |
|
74 | tools.insert(0, (None, uimerge)) # highest priority | |
75 | tools.append((None, "hgmerge")) # the old default, if found |
|
75 | tools.append((None, "hgmerge")) # the old default, if found | |
76 | for p,t in tools: |
|
76 | for p,t in tools: | |
77 | if check(t, None, symlink, binary): |
|
77 | if check(t, None, symlink, binary): | |
78 | toolpath = _findtool(ui, t) |
|
78 | toolpath = _findtool(ui, t) | |
79 | return (t, '"' + toolpath + '"') |
|
79 | return (t, '"' + toolpath + '"') | |
80 | # internal merge as last resort |
|
80 | # internal merge as last resort | |
81 | return (not (symlink or binary) and "internal:merge" or None, None) |
|
81 | return (not (symlink or binary) and "internal:merge" or None, None) | |
82 |
|
82 | |||
83 | def _eoltype(data): |
|
83 | def _eoltype(data): | |
84 | "Guess the EOL type of a file" |
|
84 | "Guess the EOL type of a file" | |
85 | if '\0' in data: # binary |
|
85 | if '\0' in data: # binary | |
86 | return None |
|
86 | return None | |
87 | if '\r\n' in data: # Windows |
|
87 | if '\r\n' in data: # Windows | |
88 | return '\r\n' |
|
88 | return '\r\n' | |
89 | if '\r' in data: # Old Mac |
|
89 | if '\r' in data: # Old Mac | |
90 | return '\r' |
|
90 | return '\r' | |
91 | if '\n' in data: # UNIX |
|
91 | if '\n' in data: # UNIX | |
92 | return '\n' |
|
92 | return '\n' | |
93 | return None # unknown |
|
93 | return None # unknown | |
94 |
|
94 | |||
95 | def _matcheol(file, origfile): |
|
95 | def _matcheol(file, origfile): | |
96 | "Convert EOL markers in a file to match origfile" |
|
96 | "Convert EOL markers in a file to match origfile" | |
97 | tostyle = _eoltype(open(origfile, "rb").read()) |
|
97 | tostyle = _eoltype(open(origfile, "rb").read()) | |
98 | if tostyle: |
|
98 | if tostyle: | |
99 | data = open(file, "rb").read() |
|
99 | data = open(file, "rb").read() | |
100 | style = _eoltype(data) |
|
100 | style = _eoltype(data) | |
101 | if style: |
|
101 | if style: | |
102 | newdata = data.replace(style, tostyle) |
|
102 | newdata = data.replace(style, tostyle) | |
103 | if newdata != data: |
|
103 | if newdata != data: | |
104 | open(file, "wb").write(newdata) |
|
104 | open(file, "wb").write(newdata) | |
105 |
|
105 | |||
106 | def filemerge(repo, mynode, orig, fcd, fco, fca): |
|
106 | def filemerge(repo, mynode, orig, fcd, fco, fca): | |
107 | """perform a 3-way merge in the working directory |
|
107 | """perform a 3-way merge in the working directory | |
108 |
|
108 | |||
109 | mynode = parent node before merge |
|
109 | mynode = parent node before merge | |
110 | orig = original local filename before merge |
|
110 | orig = original local filename before merge | |
111 | fco = other file context |
|
111 | fco = other file context | |
112 | fca = ancestor file context |
|
112 | fca = ancestor file context | |
113 | fcd = local file context for current/destination file |
|
113 | fcd = local file context for current/destination file | |
114 | """ |
|
114 | """ | |
115 |
|
115 | |||
116 | def temp(prefix, ctx): |
|
116 | def temp(prefix, ctx): | |
117 | pre = "%s~%s." % (os.path.basename(ctx.path()), prefix) |
|
117 | pre = "%s~%s." % (os.path.basename(ctx.path()), prefix) | |
118 | (fd, name) = tempfile.mkstemp(prefix=pre) |
|
118 | (fd, name) = tempfile.mkstemp(prefix=pre) | |
119 | data = repo.wwritedata(ctx.path(), ctx.data()) |
|
119 | data = repo.wwritedata(ctx.path(), ctx.data()) | |
120 | f = os.fdopen(fd, "wb") |
|
120 | f = os.fdopen(fd, "wb") | |
121 | f.write(data) |
|
121 | f.write(data) | |
122 | f.close() |
|
122 | f.close() | |
123 | return name |
|
123 | return name | |
124 |
|
124 | |||
125 | def isbin(ctx): |
|
125 | def isbin(ctx): | |
126 | try: |
|
126 | try: | |
127 | return util.binary(ctx.data()) |
|
127 | return util.binary(ctx.data()) | |
128 | except IOError: |
|
128 | except IOError: | |
129 | return False |
|
129 | return False | |
130 |
|
130 | |||
131 | if not fco.cmp(fcd.data()): # files identical? |
|
131 | if not fco.cmp(fcd.data()): # files identical? | |
132 | return None |
|
132 | return None | |
133 |
|
133 | |||
134 | ui = repo.ui |
|
134 | ui = repo.ui | |
135 | fd = fcd.path() |
|
135 | fd = fcd.path() | |
136 | binary = isbin(fcd) or isbin(fco) or isbin(fca) |
|
136 | binary = isbin(fcd) or isbin(fco) or isbin(fca) | |
137 | symlink = 'l' in fcd.flags() + fco.flags() |
|
137 | symlink = 'l' in fcd.flags() + fco.flags() | |
138 | tool, toolpath = _picktool(repo, ui, fd, binary, symlink) |
|
138 | tool, toolpath = _picktool(repo, ui, fd, binary, symlink) | |
139 | ui.debug(_("picked tool '%s' for %s (binary %s symlink %s)\n") % |
|
139 | ui.debug(_("picked tool '%s' for %s (binary %s symlink %s)\n") % | |
140 | (tool, fd, binary, symlink)) |
|
140 | (tool, fd, binary, symlink)) | |
141 |
|
141 | |||
142 | if not tool: |
|
142 | if not tool: | |
143 | tool = "internal:local" |
|
143 | tool = "internal:local" | |
144 | if ui.prompt(_(" no tool found to merge %s\n" |
|
144 | if ui.prompt(_(" no tool found to merge %s\n" | |
145 | "keep (l)ocal or take (o)ther?") % fd, |
|
145 | "keep (l)ocal or take (o)ther?") % fd, | |
146 | _("[lo]"), _("l")) != _("l"): |
|
146 | _("[lo]"), _("l")) != _("l"): | |
147 | tool = "internal:other" |
|
147 | tool = "internal:other" | |
148 | if tool == "internal:local": |
|
148 | if tool == "internal:local": | |
149 | return 0 |
|
149 | return 0 | |
150 | if tool == "internal:other": |
|
150 | if tool == "internal:other": | |
151 | repo.wwrite(fd, fco.data(), fco.flags()) |
|
151 | repo.wwrite(fd, fco.data(), fco.flags()) | |
152 | return 0 |
|
152 | return 0 | |
153 | if tool == "internal:fail": |
|
153 | if tool == "internal:fail": | |
154 | return 1 |
|
154 | return 1 | |
155 |
|
155 | |||
156 | # do the actual merge |
|
156 | # do the actual merge | |
157 | a = repo.wjoin(fd) |
|
157 | a = repo.wjoin(fd) | |
158 | b = temp("base", fca) |
|
158 | b = temp("base", fca) | |
159 | c = temp("other", fco) |
|
159 | c = temp("other", fco) | |
160 | out = "" |
|
160 | out = "" | |
161 | back = a + ".orig" |
|
161 | back = a + ".orig" | |
162 | util.copyfile(a, back) |
|
162 | util.copyfile(a, back) | |
163 |
|
163 | |||
164 | if orig != fco.path(): |
|
164 | if orig != fco.path(): | |
165 | repo.ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd)) |
|
165 | repo.ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd)) | |
166 | else: |
|
166 | else: | |
167 | repo.ui.status(_("merging %s\n") % fd) |
|
167 | repo.ui.status(_("merging %s\n") % fd) | |
168 |
|
168 | |||
169 | repo.ui.debug(_("my %s other %s ancestor %s\n") % (fcd, fco, fca)) |
|
169 | repo.ui.debug(_("my %s other %s ancestor %s\n") % (fcd, fco, fca)) | |
170 |
|
170 | |||
171 | # do we attempt to simplemerge first? |
|
171 | # do we attempt to simplemerge first? | |
172 | if _toolbool(ui, tool, "premerge", not (binary or symlink)): |
|
172 | if _toolbool(ui, tool, "premerge", not (binary or symlink)): | |
173 | r = simplemerge.simplemerge(a, b, c, quiet=True) |
|
173 | r = simplemerge.simplemerge(a, b, c, quiet=True) | |
174 | if not r: |
|
174 | if not r: | |
175 | ui.debug(_(" premerge successful\n")) |
|
175 | ui.debug(_(" premerge successful\n")) | |
176 | os.unlink(back) |
|
176 | os.unlink(back) | |
177 | os.unlink(b) |
|
177 | os.unlink(b) | |
178 | os.unlink(c) |
|
178 | os.unlink(c) | |
179 | return 0 |
|
179 | return 0 | |
180 | util.copyfile(back, a) # restore from backup and try again |
|
180 | util.copyfile(back, a) # restore from backup and try again | |
181 |
|
181 | |||
182 | env = dict(HG_FILE=fd, |
|
182 | env = dict(HG_FILE=fd, | |
183 | HG_MY_NODE=short(mynode), |
|
183 | HG_MY_NODE=short(mynode), | |
184 | HG_OTHER_NODE=str(fco.changectx()), |
|
184 | HG_OTHER_NODE=str(fco.changectx()), | |
185 | HG_MY_ISLINK='l' in fcd.flags(), |
|
185 | HG_MY_ISLINK='l' in fcd.flags(), | |
186 | HG_OTHER_ISLINK='l' in fco.flags(), |
|
186 | HG_OTHER_ISLINK='l' in fco.flags(), | |
187 | HG_BASE_ISLINK='l' in fca.flags()) |
|
187 | HG_BASE_ISLINK='l' in fca.flags()) | |
188 |
|
188 | |||
189 | if tool == "internal:merge": |
|
189 | if tool == "internal:merge": | |
190 | r = simplemerge.simplemerge(a, b, c, label=['local', 'other']) |
|
190 | r = simplemerge.simplemerge(a, b, c, label=['local', 'other']) | |
191 | else: |
|
191 | else: | |
192 | args = _toolstr(ui, tool, "args", '$local $base $other') |
|
192 | args = _toolstr(ui, tool, "args", '$local $base $other') | |
193 | if "$output" in args: |
|
193 | if "$output" in args: | |
194 | out, a = a, back # read input from backup, write to original |
|
194 | out, a = a, back # read input from backup, write to original | |
195 | replace = dict(local=a, base=b, other=c, output=out) |
|
195 | replace = dict(local=a, base=b, other=c, output=out) | |
196 | args = re.sub("\$(local|base|other|output)", |
|
196 | args = re.sub("\$(local|base|other|output)", | |
197 | lambda x: '"%s"' % replace[x.group()[1:]], args) |
|
197 | lambda x: '"%s"' % replace[x.group()[1:]], args) | |
198 | r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env) |
|
198 | r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env) | |
199 |
|
199 | |||
200 | if not r and _toolbool(ui, tool, "checkconflicts"): |
|
200 | if not r and _toolbool(ui, tool, "checkconflicts"): | |
201 | if re.match("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data()): |
|
201 | if re.match("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data()): | |
202 | r = 1 |
|
202 | r = 1 | |
203 |
|
203 | |||
204 | if not r and _toolbool(ui, tool, "checkchanged"): |
|
204 | if not r and _toolbool(ui, tool, "checkchanged"): | |
205 | if filecmp.cmp(repo.wjoin(fd), back): |
|
205 | if filecmp.cmp(repo.wjoin(fd), back): | |
206 | if ui.prompt(_(" output file %s appears unchanged\n" |
|
206 | if ui.prompt(_(" output file %s appears unchanged\n" | |
207 | "was merge successful (yn)?") % fd, |
|
207 | "was merge successful (yn)?") % fd, | |
208 | _("[yn]"), _("n")) != _("y"): |
|
208 | _("[yn]"), _("n")) != _("y"): | |
209 | r = 1 |
|
209 | r = 1 | |
210 |
|
210 | |||
211 | if _toolbool(ui, tool, "fixeol"): |
|
211 | if _toolbool(ui, tool, "fixeol"): | |
212 | _matcheol(repo.wjoin(fd), back) |
|
212 | _matcheol(repo.wjoin(fd), back) | |
213 |
|
213 | |||
214 | if r: |
|
214 | if r: | |
215 | repo.ui.warn(_("merging %s failed!\n") % fd) |
|
215 | repo.ui.warn(_("merging %s failed!\n") % fd) | |
216 | else: |
|
216 | else: | |
217 | os.unlink(back) |
|
217 | os.unlink(back) | |
218 |
|
218 | |||
219 | os.unlink(b) |
|
219 | os.unlink(b) | |
220 | os.unlink(c) |
|
220 | os.unlink(c) | |
221 | return r |
|
221 | return r |
@@ -1,76 +1,75 b'' | |||||
1 | # Revision graph generator for Mercurial |
|
1 | # Revision graph generator for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl> |
|
3 | # Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl> | |
4 | # Copyright 2007 Joel Rosdahl <joel@rosdahl.net> |
|
4 | # Copyright 2007 Joel Rosdahl <joel@rosdahl.net> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of |
|
6 | # This software may be used and distributed according to the terms of | |
7 | # the GNU General Public License, incorporated herein by reference. |
|
7 | # the GNU General Public License, incorporated herein by reference. | |
8 |
|
8 | |||
9 |
from node import nullrev |
|
9 | from node import nullrev | |
10 | import ui, hg, util, templatefilters |
|
|||
11 |
|
10 | |||
12 | def graph(repo, start_rev, stop_rev): |
|
11 | def graph(repo, start_rev, stop_rev): | |
13 | """incremental revision grapher |
|
12 | """incremental revision grapher | |
14 |
|
13 | |||
15 | This generator function walks through the revision history from |
|
14 | This generator function walks through the revision history from | |
16 | revision start_rev to revision stop_rev (which must be less than |
|
15 | revision start_rev to revision stop_rev (which must be less than | |
17 | or equal to start_rev) and for each revision emits tuples with the |
|
16 | or equal to start_rev) and for each revision emits tuples with the | |
18 | following elements: |
|
17 | following elements: | |
19 |
|
18 | |||
20 | - Current node |
|
19 | - Current node | |
21 | - Column and color for the current node |
|
20 | - Column and color for the current node | |
22 | - Edges; a list of (col, next_col, color) indicating the edges between |
|
21 | - Edges; a list of (col, next_col, color) indicating the edges between | |
23 | the current node and its parents. |
|
22 | the current node and its parents. | |
24 | - First line of the changeset description |
|
23 | - First line of the changeset description | |
25 | - The changeset author |
|
24 | - The changeset author | |
26 | - The changeset date/time |
|
25 | - The changeset date/time | |
27 | """ |
|
26 | """ | |
28 |
|
27 | |||
29 | if start_rev == nullrev and not stop_rev: |
|
28 | if start_rev == nullrev and not stop_rev: | |
30 | return |
|
29 | return | |
31 |
|
30 | |||
32 | assert start_rev >= stop_rev |
|
31 | assert start_rev >= stop_rev | |
33 | assert stop_rev >= 0 |
|
32 | assert stop_rev >= 0 | |
34 | curr_rev = start_rev |
|
33 | curr_rev = start_rev | |
35 | revs = [] |
|
34 | revs = [] | |
36 | cl = repo.changelog |
|
35 | cl = repo.changelog | |
37 | colors = {} |
|
36 | colors = {} | |
38 | new_color = 1 |
|
37 | new_color = 1 | |
39 |
|
38 | |||
40 | while curr_rev >= stop_rev: |
|
39 | while curr_rev >= stop_rev: | |
41 | # Compute revs and next_revs |
|
40 | # Compute revs and next_revs | |
42 | if curr_rev not in revs: |
|
41 | if curr_rev not in revs: | |
43 | revs.append(curr_rev) # new head |
|
42 | revs.append(curr_rev) # new head | |
44 | colors[curr_rev] = new_color |
|
43 | colors[curr_rev] = new_color | |
45 | new_color += 1 |
|
44 | new_color += 1 | |
46 |
|
45 | |||
47 | idx = revs.index(curr_rev) |
|
46 | idx = revs.index(curr_rev) | |
48 | color = colors.pop(curr_rev) |
|
47 | color = colors.pop(curr_rev) | |
49 | next = revs[:] |
|
48 | next = revs[:] | |
50 |
|
49 | |||
51 | # Add parents to next_revs |
|
50 | # Add parents to next_revs | |
52 | parents = [x for x in cl.parentrevs(curr_rev) if x != nullrev] |
|
51 | parents = [x for x in cl.parentrevs(curr_rev) if x != nullrev] | |
53 | addparents = [p for p in parents if p not in next] |
|
52 | addparents = [p for p in parents if p not in next] | |
54 | next[idx:idx + 1] = addparents |
|
53 | next[idx:idx + 1] = addparents | |
55 |
|
54 | |||
56 | # Set colors for the parents |
|
55 | # Set colors for the parents | |
57 | for i, p in enumerate(addparents): |
|
56 | for i, p in enumerate(addparents): | |
58 | if not i: |
|
57 | if not i: | |
59 | colors[p] = color |
|
58 | colors[p] = color | |
60 | else: |
|
59 | else: | |
61 | colors[p] = new_color |
|
60 | colors[p] = new_color | |
62 | new_color += 1 |
|
61 | new_color += 1 | |
63 |
|
62 | |||
64 | # Add edges to the graph |
|
63 | # Add edges to the graph | |
65 | edges = [] |
|
64 | edges = [] | |
66 | for col, r in enumerate(revs): |
|
65 | for col, r in enumerate(revs): | |
67 | if r in next: |
|
66 | if r in next: | |
68 | edges.append((col, next.index(r), colors[r])) |
|
67 | edges.append((col, next.index(r), colors[r])) | |
69 | elif r == curr_rev: |
|
68 | elif r == curr_rev: | |
70 | for p in parents: |
|
69 | for p in parents: | |
71 | edges.append((col, next.index(p), colors[p])) |
|
70 | edges.append((col, next.index(p), colors[p])) | |
72 |
|
71 | |||
73 | # Yield and move on |
|
72 | # Yield and move on | |
74 | yield (repo[curr_rev], (idx, color), edges) |
|
73 | yield (repo[curr_rev], (idx, color), edges) | |
75 | revs = next |
|
74 | revs = next | |
76 | curr_rev -= 1 |
|
75 | curr_rev -= 1 |
@@ -1,313 +1,312 b'' | |||||
1 | # hgweb/hgweb_mod.py - Web interface for a repository. |
|
1 | # hgweb/hgweb_mod.py - Web interface for a repository. | |
2 | # |
|
2 | # | |
3 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> |
|
3 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> | |
4 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms |
|
6 | # This software may be used and distributed according to the terms | |
7 | # of the GNU General Public License, incorporated herein by reference. |
|
7 | # of the GNU General Public License, incorporated herein by reference. | |
8 |
|
8 | |||
9 |
import os |
|
9 | import os | |
10 | from mercurial.node import hex, nullid |
|
|||
11 | from mercurial import ui, hg, util, hook, error |
|
10 | from mercurial import ui, hg, util, hook, error | |
12 | from mercurial import templater, templatefilters |
|
11 | from mercurial import templater, templatefilters | |
13 | from common import get_mtime, style_map, ErrorResponse |
|
12 | from common import get_mtime, style_map, ErrorResponse | |
14 | from common import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVER_ERROR |
|
13 | from common import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVER_ERROR | |
15 | from common import HTTP_UNAUTHORIZED, HTTP_METHOD_NOT_ALLOWED |
|
14 | from common import HTTP_UNAUTHORIZED, HTTP_METHOD_NOT_ALLOWED | |
16 | from request import wsgirequest |
|
15 | from request import wsgirequest | |
17 | import webcommands, protocol, webutil |
|
16 | import webcommands, protocol, webutil | |
18 |
|
17 | |||
19 | perms = { |
|
18 | perms = { | |
20 | 'changegroup': 'pull', |
|
19 | 'changegroup': 'pull', | |
21 | 'changegroupsubset': 'pull', |
|
20 | 'changegroupsubset': 'pull', | |
22 | 'unbundle': 'push', |
|
21 | 'unbundle': 'push', | |
23 | 'stream_out': 'pull', |
|
22 | 'stream_out': 'pull', | |
24 | } |
|
23 | } | |
25 |
|
24 | |||
26 | class hgweb(object): |
|
25 | class hgweb(object): | |
27 | def __init__(self, repo, name=None): |
|
26 | def __init__(self, repo, name=None): | |
28 | if isinstance(repo, str): |
|
27 | if isinstance(repo, str): | |
29 | parentui = ui.ui(report_untrusted=False, interactive=False) |
|
28 | parentui = ui.ui(report_untrusted=False, interactive=False) | |
30 | self.repo = hg.repository(parentui, repo) |
|
29 | self.repo = hg.repository(parentui, repo) | |
31 | else: |
|
30 | else: | |
32 | self.repo = repo |
|
31 | self.repo = repo | |
33 |
|
32 | |||
34 | hook.redirect(True) |
|
33 | hook.redirect(True) | |
35 | self.mtime = -1 |
|
34 | self.mtime = -1 | |
36 | self.reponame = name |
|
35 | self.reponame = name | |
37 | self.archives = 'zip', 'gz', 'bz2' |
|
36 | self.archives = 'zip', 'gz', 'bz2' | |
38 | self.stripecount = 1 |
|
37 | self.stripecount = 1 | |
39 | # a repo owner may set web.templates in .hg/hgrc to get any file |
|
38 | # a repo owner may set web.templates in .hg/hgrc to get any file | |
40 | # readable by the user running the CGI script |
|
39 | # readable by the user running the CGI script | |
41 | self.templatepath = self.config("web", "templates", |
|
40 | self.templatepath = self.config("web", "templates", | |
42 | templater.templatepath(), |
|
41 | templater.templatepath(), | |
43 | untrusted=False) |
|
42 | untrusted=False) | |
44 |
|
43 | |||
45 | # The CGI scripts are often run by a user different from the repo owner. |
|
44 | # The CGI scripts are often run by a user different from the repo owner. | |
46 | # Trust the settings from the .hg/hgrc files by default. |
|
45 | # Trust the settings from the .hg/hgrc files by default. | |
47 | def config(self, section, name, default=None, untrusted=True): |
|
46 | def config(self, section, name, default=None, untrusted=True): | |
48 | return self.repo.ui.config(section, name, default, |
|
47 | return self.repo.ui.config(section, name, default, | |
49 | untrusted=untrusted) |
|
48 | untrusted=untrusted) | |
50 |
|
49 | |||
51 | def configbool(self, section, name, default=False, untrusted=True): |
|
50 | def configbool(self, section, name, default=False, untrusted=True): | |
52 | return self.repo.ui.configbool(section, name, default, |
|
51 | return self.repo.ui.configbool(section, name, default, | |
53 | untrusted=untrusted) |
|
52 | untrusted=untrusted) | |
54 |
|
53 | |||
55 | def configlist(self, section, name, default=None, untrusted=True): |
|
54 | def configlist(self, section, name, default=None, untrusted=True): | |
56 | return self.repo.ui.configlist(section, name, default, |
|
55 | return self.repo.ui.configlist(section, name, default, | |
57 | untrusted=untrusted) |
|
56 | untrusted=untrusted) | |
58 |
|
57 | |||
59 | def refresh(self): |
|
58 | def refresh(self): | |
60 | mtime = get_mtime(self.repo.root) |
|
59 | mtime = get_mtime(self.repo.root) | |
61 | if mtime != self.mtime: |
|
60 | if mtime != self.mtime: | |
62 | self.mtime = mtime |
|
61 | self.mtime = mtime | |
63 | self.repo = hg.repository(self.repo.ui, self.repo.root) |
|
62 | self.repo = hg.repository(self.repo.ui, self.repo.root) | |
64 | self.maxchanges = int(self.config("web", "maxchanges", 10)) |
|
63 | self.maxchanges = int(self.config("web", "maxchanges", 10)) | |
65 | self.stripecount = int(self.config("web", "stripes", 1)) |
|
64 | self.stripecount = int(self.config("web", "stripes", 1)) | |
66 | self.maxshortchanges = int(self.config("web", "maxshortchanges", 60)) |
|
65 | self.maxshortchanges = int(self.config("web", "maxshortchanges", 60)) | |
67 | self.maxfiles = int(self.config("web", "maxfiles", 10)) |
|
66 | self.maxfiles = int(self.config("web", "maxfiles", 10)) | |
68 | self.allowpull = self.configbool("web", "allowpull", True) |
|
67 | self.allowpull = self.configbool("web", "allowpull", True) | |
69 | self.encoding = self.config("web", "encoding", util._encoding) |
|
68 | self.encoding = self.config("web", "encoding", util._encoding) | |
70 |
|
69 | |||
71 | def run(self): |
|
70 | def run(self): | |
72 | if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."): |
|
71 | if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."): | |
73 | raise RuntimeError("This function is only intended to be called while running as a CGI script.") |
|
72 | raise RuntimeError("This function is only intended to be called while running as a CGI script.") | |
74 | import mercurial.hgweb.wsgicgi as wsgicgi |
|
73 | import mercurial.hgweb.wsgicgi as wsgicgi | |
75 | wsgicgi.launch(self) |
|
74 | wsgicgi.launch(self) | |
76 |
|
75 | |||
77 | def __call__(self, env, respond): |
|
76 | def __call__(self, env, respond): | |
78 | req = wsgirequest(env, respond) |
|
77 | req = wsgirequest(env, respond) | |
79 | return self.run_wsgi(req) |
|
78 | return self.run_wsgi(req) | |
80 |
|
79 | |||
81 | def run_wsgi(self, req): |
|
80 | def run_wsgi(self, req): | |
82 |
|
81 | |||
83 | self.refresh() |
|
82 | self.refresh() | |
84 |
|
83 | |||
85 | # process this if it's a protocol request |
|
84 | # process this if it's a protocol request | |
86 | # protocol bits don't need to create any URLs |
|
85 | # protocol bits don't need to create any URLs | |
87 | # and the clients always use the old URL structure |
|
86 | # and the clients always use the old URL structure | |
88 |
|
87 | |||
89 | cmd = req.form.get('cmd', [''])[0] |
|
88 | cmd = req.form.get('cmd', [''])[0] | |
90 | if cmd and cmd in protocol.__all__: |
|
89 | if cmd and cmd in protocol.__all__: | |
91 | try: |
|
90 | try: | |
92 | if cmd in perms: |
|
91 | if cmd in perms: | |
93 | try: |
|
92 | try: | |
94 | self.check_perm(req, perms[cmd]) |
|
93 | self.check_perm(req, perms[cmd]) | |
95 | except ErrorResponse, inst: |
|
94 | except ErrorResponse, inst: | |
96 | if cmd == 'unbundle': |
|
95 | if cmd == 'unbundle': | |
97 | req.drain() |
|
96 | req.drain() | |
98 | raise |
|
97 | raise | |
99 | method = getattr(protocol, cmd) |
|
98 | method = getattr(protocol, cmd) | |
100 | return method(self.repo, req) |
|
99 | return method(self.repo, req) | |
101 | except ErrorResponse, inst: |
|
100 | except ErrorResponse, inst: | |
102 | req.respond(inst, protocol.HGTYPE) |
|
101 | req.respond(inst, protocol.HGTYPE) | |
103 | if not inst.message: |
|
102 | if not inst.message: | |
104 | return [] |
|
103 | return [] | |
105 | return '0\n%s\n' % inst.message, |
|
104 | return '0\n%s\n' % inst.message, | |
106 |
|
105 | |||
107 | # work with CGI variables to create coherent structure |
|
106 | # work with CGI variables to create coherent structure | |
108 | # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME |
|
107 | # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME | |
109 |
|
108 | |||
110 | req.url = req.env['SCRIPT_NAME'] |
|
109 | req.url = req.env['SCRIPT_NAME'] | |
111 | if not req.url.endswith('/'): |
|
110 | if not req.url.endswith('/'): | |
112 | req.url += '/' |
|
111 | req.url += '/' | |
113 | if 'REPO_NAME' in req.env: |
|
112 | if 'REPO_NAME' in req.env: | |
114 | req.url += req.env['REPO_NAME'] + '/' |
|
113 | req.url += req.env['REPO_NAME'] + '/' | |
115 |
|
114 | |||
116 | if 'PATH_INFO' in req.env: |
|
115 | if 'PATH_INFO' in req.env: | |
117 | parts = req.env['PATH_INFO'].strip('/').split('/') |
|
116 | parts = req.env['PATH_INFO'].strip('/').split('/') | |
118 | repo_parts = req.env.get('REPO_NAME', '').split('/') |
|
117 | repo_parts = req.env.get('REPO_NAME', '').split('/') | |
119 | if parts[:len(repo_parts)] == repo_parts: |
|
118 | if parts[:len(repo_parts)] == repo_parts: | |
120 | parts = parts[len(repo_parts):] |
|
119 | parts = parts[len(repo_parts):] | |
121 | query = '/'.join(parts) |
|
120 | query = '/'.join(parts) | |
122 | else: |
|
121 | else: | |
123 | query = req.env['QUERY_STRING'].split('&', 1)[0] |
|
122 | query = req.env['QUERY_STRING'].split('&', 1)[0] | |
124 | query = query.split(';', 1)[0] |
|
123 | query = query.split(';', 1)[0] | |
125 |
|
124 | |||
126 | # translate user-visible url structure to internal structure |
|
125 | # translate user-visible url structure to internal structure | |
127 |
|
126 | |||
128 | args = query.split('/', 2) |
|
127 | args = query.split('/', 2) | |
129 | if 'cmd' not in req.form and args and args[0]: |
|
128 | if 'cmd' not in req.form and args and args[0]: | |
130 |
|
129 | |||
131 | cmd = args.pop(0) |
|
130 | cmd = args.pop(0) | |
132 | style = cmd.rfind('-') |
|
131 | style = cmd.rfind('-') | |
133 | if style != -1: |
|
132 | if style != -1: | |
134 | req.form['style'] = [cmd[:style]] |
|
133 | req.form['style'] = [cmd[:style]] | |
135 | cmd = cmd[style+1:] |
|
134 | cmd = cmd[style+1:] | |
136 |
|
135 | |||
137 | # avoid accepting e.g. style parameter as command |
|
136 | # avoid accepting e.g. style parameter as command | |
138 | if hasattr(webcommands, cmd): |
|
137 | if hasattr(webcommands, cmd): | |
139 | req.form['cmd'] = [cmd] |
|
138 | req.form['cmd'] = [cmd] | |
140 | else: |
|
139 | else: | |
141 | cmd = '' |
|
140 | cmd = '' | |
142 |
|
141 | |||
143 | if cmd == 'static': |
|
142 | if cmd == 'static': | |
144 | req.form['file'] = ['/'.join(args)] |
|
143 | req.form['file'] = ['/'.join(args)] | |
145 | else: |
|
144 | else: | |
146 | if args and args[0]: |
|
145 | if args and args[0]: | |
147 | node = args.pop(0) |
|
146 | node = args.pop(0) | |
148 | req.form['node'] = [node] |
|
147 | req.form['node'] = [node] | |
149 | if args: |
|
148 | if args: | |
150 | req.form['file'] = args |
|
149 | req.form['file'] = args | |
151 |
|
150 | |||
152 | if cmd == 'archive': |
|
151 | if cmd == 'archive': | |
153 | fn = req.form['node'][0] |
|
152 | fn = req.form['node'][0] | |
154 | for type_, spec in self.archive_specs.iteritems(): |
|
153 | for type_, spec in self.archive_specs.iteritems(): | |
155 | ext = spec[2] |
|
154 | ext = spec[2] | |
156 | if fn.endswith(ext): |
|
155 | if fn.endswith(ext): | |
157 | req.form['node'] = [fn[:-len(ext)]] |
|
156 | req.form['node'] = [fn[:-len(ext)]] | |
158 | req.form['type'] = [type_] |
|
157 | req.form['type'] = [type_] | |
159 |
|
158 | |||
160 | # process the web interface request |
|
159 | # process the web interface request | |
161 |
|
160 | |||
162 | try: |
|
161 | try: | |
163 | tmpl = self.templater(req) |
|
162 | tmpl = self.templater(req) | |
164 | ctype = tmpl('mimetype', encoding=self.encoding) |
|
163 | ctype = tmpl('mimetype', encoding=self.encoding) | |
165 | ctype = templater.stringify(ctype) |
|
164 | ctype = templater.stringify(ctype) | |
166 |
|
165 | |||
167 | # check read permissions non-static content |
|
166 | # check read permissions non-static content | |
168 | if cmd != 'static': |
|
167 | if cmd != 'static': | |
169 | self.check_perm(req, None) |
|
168 | self.check_perm(req, None) | |
170 |
|
169 | |||
171 | if cmd == '': |
|
170 | if cmd == '': | |
172 | req.form['cmd'] = [tmpl.cache['default']] |
|
171 | req.form['cmd'] = [tmpl.cache['default']] | |
173 | cmd = req.form['cmd'][0] |
|
172 | cmd = req.form['cmd'][0] | |
174 |
|
173 | |||
175 | if cmd not in webcommands.__all__: |
|
174 | if cmd not in webcommands.__all__: | |
176 | msg = 'no such method: %s' % cmd |
|
175 | msg = 'no such method: %s' % cmd | |
177 | raise ErrorResponse(HTTP_BAD_REQUEST, msg) |
|
176 | raise ErrorResponse(HTTP_BAD_REQUEST, msg) | |
178 | elif cmd == 'file' and 'raw' in req.form.get('style', []): |
|
177 | elif cmd == 'file' and 'raw' in req.form.get('style', []): | |
179 | self.ctype = ctype |
|
178 | self.ctype = ctype | |
180 | content = webcommands.rawfile(self, req, tmpl) |
|
179 | content = webcommands.rawfile(self, req, tmpl) | |
181 | else: |
|
180 | else: | |
182 | content = getattr(webcommands, cmd)(self, req, tmpl) |
|
181 | content = getattr(webcommands, cmd)(self, req, tmpl) | |
183 | req.respond(HTTP_OK, ctype) |
|
182 | req.respond(HTTP_OK, ctype) | |
184 |
|
183 | |||
185 | return content |
|
184 | return content | |
186 |
|
185 | |||
187 | except error.LookupError, err: |
|
186 | except error.LookupError, err: | |
188 | req.respond(HTTP_NOT_FOUND, ctype) |
|
187 | req.respond(HTTP_NOT_FOUND, ctype) | |
189 | msg = str(err) |
|
188 | msg = str(err) | |
190 | if 'manifest' not in msg: |
|
189 | if 'manifest' not in msg: | |
191 | msg = 'revision not found: %s' % err.name |
|
190 | msg = 'revision not found: %s' % err.name | |
192 | return tmpl('error', error=msg) |
|
191 | return tmpl('error', error=msg) | |
193 | except (error.RepoError, error.RevlogError), inst: |
|
192 | except (error.RepoError, error.RevlogError), inst: | |
194 | req.respond(HTTP_SERVER_ERROR, ctype) |
|
193 | req.respond(HTTP_SERVER_ERROR, ctype) | |
195 | return tmpl('error', error=str(inst)) |
|
194 | return tmpl('error', error=str(inst)) | |
196 | except ErrorResponse, inst: |
|
195 | except ErrorResponse, inst: | |
197 | req.respond(inst, ctype) |
|
196 | req.respond(inst, ctype) | |
198 | return tmpl('error', error=inst.message) |
|
197 | return tmpl('error', error=inst.message) | |
199 |
|
198 | |||
200 | def templater(self, req): |
|
199 | def templater(self, req): | |
201 |
|
200 | |||
202 | # determine scheme, port and server name |
|
201 | # determine scheme, port and server name | |
203 | # this is needed to create absolute urls |
|
202 | # this is needed to create absolute urls | |
204 |
|
203 | |||
205 | proto = req.env.get('wsgi.url_scheme') |
|
204 | proto = req.env.get('wsgi.url_scheme') | |
206 | if proto == 'https': |
|
205 | if proto == 'https': | |
207 | proto = 'https' |
|
206 | proto = 'https' | |
208 | default_port = "443" |
|
207 | default_port = "443" | |
209 | else: |
|
208 | else: | |
210 | proto = 'http' |
|
209 | proto = 'http' | |
211 | default_port = "80" |
|
210 | default_port = "80" | |
212 |
|
211 | |||
213 | port = req.env["SERVER_PORT"] |
|
212 | port = req.env["SERVER_PORT"] | |
214 | port = port != default_port and (":" + port) or "" |
|
213 | port = port != default_port and (":" + port) or "" | |
215 | urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port) |
|
214 | urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port) | |
216 | staticurl = self.config("web", "staticurl") or req.url + 'static/' |
|
215 | staticurl = self.config("web", "staticurl") or req.url + 'static/' | |
217 | if not staticurl.endswith('/'): |
|
216 | if not staticurl.endswith('/'): | |
218 | staticurl += '/' |
|
217 | staticurl += '/' | |
219 |
|
218 | |||
220 | # some functions for the templater |
|
219 | # some functions for the templater | |
221 |
|
220 | |||
222 | def header(**map): |
|
221 | def header(**map): | |
223 | yield tmpl('header', encoding=self.encoding, **map) |
|
222 | yield tmpl('header', encoding=self.encoding, **map) | |
224 |
|
223 | |||
225 | def footer(**map): |
|
224 | def footer(**map): | |
226 | yield tmpl("footer", **map) |
|
225 | yield tmpl("footer", **map) | |
227 |
|
226 | |||
228 | def motd(**map): |
|
227 | def motd(**map): | |
229 | yield self.config("web", "motd", "") |
|
228 | yield self.config("web", "motd", "") | |
230 |
|
229 | |||
231 | # figure out which style to use |
|
230 | # figure out which style to use | |
232 |
|
231 | |||
233 | vars = {} |
|
232 | vars = {} | |
234 | style = self.config("web", "style", "paper") |
|
233 | style = self.config("web", "style", "paper") | |
235 | if 'style' in req.form: |
|
234 | if 'style' in req.form: | |
236 | style = req.form['style'][0] |
|
235 | style = req.form['style'][0] | |
237 | vars['style'] = style |
|
236 | vars['style'] = style | |
238 |
|
237 | |||
239 | start = req.url[-1] == '?' and '&' or '?' |
|
238 | start = req.url[-1] == '?' and '&' or '?' | |
240 | sessionvars = webutil.sessionvars(vars, start) |
|
239 | sessionvars = webutil.sessionvars(vars, start) | |
241 | mapfile = style_map(self.templatepath, style) |
|
240 | mapfile = style_map(self.templatepath, style) | |
242 |
|
241 | |||
243 | if not self.reponame: |
|
242 | if not self.reponame: | |
244 | self.reponame = (self.config("web", "name") |
|
243 | self.reponame = (self.config("web", "name") | |
245 | or req.env.get('REPO_NAME') |
|
244 | or req.env.get('REPO_NAME') | |
246 | or req.url.strip('/') or self.repo.root) |
|
245 | or req.url.strip('/') or self.repo.root) | |
247 |
|
246 | |||
248 | # create the templater |
|
247 | # create the templater | |
249 |
|
248 | |||
250 | tmpl = templater.templater(mapfile, templatefilters.filters, |
|
249 | tmpl = templater.templater(mapfile, templatefilters.filters, | |
251 | defaults={"url": req.url, |
|
250 | defaults={"url": req.url, | |
252 | "staticurl": staticurl, |
|
251 | "staticurl": staticurl, | |
253 | "urlbase": urlbase, |
|
252 | "urlbase": urlbase, | |
254 | "repo": self.reponame, |
|
253 | "repo": self.reponame, | |
255 | "header": header, |
|
254 | "header": header, | |
256 | "footer": footer, |
|
255 | "footer": footer, | |
257 | "motd": motd, |
|
256 | "motd": motd, | |
258 | "sessionvars": sessionvars |
|
257 | "sessionvars": sessionvars | |
259 | }) |
|
258 | }) | |
260 | return tmpl |
|
259 | return tmpl | |
261 |
|
260 | |||
262 | def archivelist(self, nodeid): |
|
261 | def archivelist(self, nodeid): | |
263 | allowed = self.configlist("web", "allow_archive") |
|
262 | allowed = self.configlist("web", "allow_archive") | |
264 | for i, spec in self.archive_specs.iteritems(): |
|
263 | for i, spec in self.archive_specs.iteritems(): | |
265 | if i in allowed or self.configbool("web", "allow" + i): |
|
264 | if i in allowed or self.configbool("web", "allow" + i): | |
266 | yield {"type" : i, "extension" : spec[2], "node" : nodeid} |
|
265 | yield {"type" : i, "extension" : spec[2], "node" : nodeid} | |
267 |
|
266 | |||
268 | archive_specs = { |
|
267 | archive_specs = { | |
269 | 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None), |
|
268 | 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None), | |
270 | 'gz': ('application/x-tar', 'tgz', '.tar.gz', None), |
|
269 | 'gz': ('application/x-tar', 'tgz', '.tar.gz', None), | |
271 | 'zip': ('application/zip', 'zip', '.zip', None), |
|
270 | 'zip': ('application/zip', 'zip', '.zip', None), | |
272 | } |
|
271 | } | |
273 |
|
272 | |||
274 | def check_perm(self, req, op): |
|
273 | def check_perm(self, req, op): | |
275 | '''Check permission for operation based on request data (including |
|
274 | '''Check permission for operation based on request data (including | |
276 | authentication info). Return if op allowed, else raise an ErrorResponse |
|
275 | authentication info). Return if op allowed, else raise an ErrorResponse | |
277 | exception.''' |
|
276 | exception.''' | |
278 |
|
277 | |||
279 | user = req.env.get('REMOTE_USER') |
|
278 | user = req.env.get('REMOTE_USER') | |
280 |
|
279 | |||
281 | deny_read = self.configlist('web', 'deny_read') |
|
280 | deny_read = self.configlist('web', 'deny_read') | |
282 | if deny_read and (not user or deny_read == ['*'] or user in deny_read): |
|
281 | if deny_read and (not user or deny_read == ['*'] or user in deny_read): | |
283 | raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized') |
|
282 | raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized') | |
284 |
|
283 | |||
285 | allow_read = self.configlist('web', 'allow_read') |
|
284 | allow_read = self.configlist('web', 'allow_read') | |
286 | result = (not allow_read) or (allow_read == ['*']) |
|
285 | result = (not allow_read) or (allow_read == ['*']) | |
287 | if not (result or user in allow_read): |
|
286 | if not (result or user in allow_read): | |
288 | raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized') |
|
287 | raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized') | |
289 |
|
288 | |||
290 | if op == 'pull' and not self.allowpull: |
|
289 | if op == 'pull' and not self.allowpull: | |
291 | raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized') |
|
290 | raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized') | |
292 | elif op == 'pull' or op is None: # op is None for interface requests |
|
291 | elif op == 'pull' or op is None: # op is None for interface requests | |
293 | return |
|
292 | return | |
294 |
|
293 | |||
295 | # enforce that you can only push using POST requests |
|
294 | # enforce that you can only push using POST requests | |
296 | if req.env['REQUEST_METHOD'] != 'POST': |
|
295 | if req.env['REQUEST_METHOD'] != 'POST': | |
297 | msg = 'push requires POST request' |
|
296 | msg = 'push requires POST request' | |
298 | raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg) |
|
297 | raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg) | |
299 |
|
298 | |||
300 | # require ssl by default for pushing, auth info cannot be sniffed |
|
299 | # require ssl by default for pushing, auth info cannot be sniffed | |
301 | # and replayed |
|
300 | # and replayed | |
302 | scheme = req.env.get('wsgi.url_scheme') |
|
301 | scheme = req.env.get('wsgi.url_scheme') | |
303 | if self.configbool('web', 'push_ssl', True) and scheme != 'https': |
|
302 | if self.configbool('web', 'push_ssl', True) and scheme != 'https': | |
304 | raise ErrorResponse(HTTP_OK, 'ssl required') |
|
303 | raise ErrorResponse(HTTP_OK, 'ssl required') | |
305 |
|
304 | |||
306 | deny = self.configlist('web', 'deny_push') |
|
305 | deny = self.configlist('web', 'deny_push') | |
307 | if deny and (not user or deny == ['*'] or user in deny): |
|
306 | if deny and (not user or deny == ['*'] or user in deny): | |
308 | raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized') |
|
307 | raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized') | |
309 |
|
308 | |||
310 | allow = self.configlist('web', 'allow_push') |
|
309 | allow = self.configlist('web', 'allow_push') | |
311 | result = allow and (allow == ['*'] or user in allow) |
|
310 | result = allow and (allow == ['*'] or user in allow) | |
312 | if not result: |
|
311 | if not result: | |
313 | raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized') |
|
312 | raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized') |
@@ -1,659 +1,659 b'' | |||||
1 | # |
|
1 | # | |
2 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> |
|
2 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 |
|
7 | |||
8 | import os, mimetypes, re, cgi, copy |
|
8 | import os, mimetypes, re, cgi, copy | |
9 | import webutil |
|
9 | import webutil | |
10 | from mercurial import error, archival, templatefilters |
|
10 | from mercurial import error, archival, templatefilters | |
11 |
from mercurial.node import short, hex |
|
11 | from mercurial.node import short, hex | |
12 |
from mercurial.util import binary |
|
12 | from mercurial.util import binary | |
13 | from common import paritygen, staticfile, get_contact, ErrorResponse |
|
13 | from common import paritygen, staticfile, get_contact, ErrorResponse | |
14 | from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND |
|
14 | from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND | |
15 | from mercurial import graphmod, util |
|
15 | from mercurial import graphmod, util | |
16 |
|
16 | |||
17 | # __all__ is populated with the allowed commands. Be sure to add to it if |
|
17 | # __all__ is populated with the allowed commands. Be sure to add to it if | |
18 | # you're adding a new command, or the new command won't work. |
|
18 | # you're adding a new command, or the new command won't work. | |
19 |
|
19 | |||
20 | __all__ = [ |
|
20 | __all__ = [ | |
21 | 'log', 'rawfile', 'file', 'changelog', 'shortlog', 'changeset', 'rev', |
|
21 | 'log', 'rawfile', 'file', 'changelog', 'shortlog', 'changeset', 'rev', | |
22 | 'manifest', 'tags', 'summary', 'filediff', 'diff', 'annotate', 'filelog', |
|
22 | 'manifest', 'tags', 'summary', 'filediff', 'diff', 'annotate', 'filelog', | |
23 | 'archive', 'static', 'graph', |
|
23 | 'archive', 'static', 'graph', | |
24 | ] |
|
24 | ] | |
25 |
|
25 | |||
26 | def log(web, req, tmpl): |
|
26 | def log(web, req, tmpl): | |
27 | if 'file' in req.form and req.form['file'][0]: |
|
27 | if 'file' in req.form and req.form['file'][0]: | |
28 | return filelog(web, req, tmpl) |
|
28 | return filelog(web, req, tmpl) | |
29 | else: |
|
29 | else: | |
30 | return changelog(web, req, tmpl) |
|
30 | return changelog(web, req, tmpl) | |
31 |
|
31 | |||
32 | def rawfile(web, req, tmpl): |
|
32 | def rawfile(web, req, tmpl): | |
33 | path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0]) |
|
33 | path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0]) | |
34 | if not path: |
|
34 | if not path: | |
35 | content = manifest(web, req, tmpl) |
|
35 | content = manifest(web, req, tmpl) | |
36 | req.respond(HTTP_OK, web.ctype) |
|
36 | req.respond(HTTP_OK, web.ctype) | |
37 | return content |
|
37 | return content | |
38 |
|
38 | |||
39 | try: |
|
39 | try: | |
40 | fctx = webutil.filectx(web.repo, req) |
|
40 | fctx = webutil.filectx(web.repo, req) | |
41 | except error.LookupError, inst: |
|
41 | except error.LookupError, inst: | |
42 | try: |
|
42 | try: | |
43 | content = manifest(web, req, tmpl) |
|
43 | content = manifest(web, req, tmpl) | |
44 | req.respond(HTTP_OK, web.ctype) |
|
44 | req.respond(HTTP_OK, web.ctype) | |
45 | return content |
|
45 | return content | |
46 | except ErrorResponse: |
|
46 | except ErrorResponse: | |
47 | raise inst |
|
47 | raise inst | |
48 |
|
48 | |||
49 | path = fctx.path() |
|
49 | path = fctx.path() | |
50 | text = fctx.data() |
|
50 | text = fctx.data() | |
51 | mt = mimetypes.guess_type(path)[0] |
|
51 | mt = mimetypes.guess_type(path)[0] | |
52 | if mt is None: |
|
52 | if mt is None: | |
53 | mt = binary(text) and 'application/octet-stream' or 'text/plain' |
|
53 | mt = binary(text) and 'application/octet-stream' or 'text/plain' | |
54 |
|
54 | |||
55 | req.respond(HTTP_OK, mt, path, len(text)) |
|
55 | req.respond(HTTP_OK, mt, path, len(text)) | |
56 | return [text] |
|
56 | return [text] | |
57 |
|
57 | |||
58 | def _filerevision(web, tmpl, fctx): |
|
58 | def _filerevision(web, tmpl, fctx): | |
59 | f = fctx.path() |
|
59 | f = fctx.path() | |
60 | text = fctx.data() |
|
60 | text = fctx.data() | |
61 | parity = paritygen(web.stripecount) |
|
61 | parity = paritygen(web.stripecount) | |
62 |
|
62 | |||
63 | if binary(text): |
|
63 | if binary(text): | |
64 | mt = mimetypes.guess_type(f)[0] or 'application/octet-stream' |
|
64 | mt = mimetypes.guess_type(f)[0] or 'application/octet-stream' | |
65 | text = '(binary:%s)' % mt |
|
65 | text = '(binary:%s)' % mt | |
66 |
|
66 | |||
67 | def lines(): |
|
67 | def lines(): | |
68 | for lineno, t in enumerate(text.splitlines(1)): |
|
68 | for lineno, t in enumerate(text.splitlines(1)): | |
69 | yield {"line": t, |
|
69 | yield {"line": t, | |
70 | "lineid": "l%d" % (lineno + 1), |
|
70 | "lineid": "l%d" % (lineno + 1), | |
71 | "linenumber": "% 6d" % (lineno + 1), |
|
71 | "linenumber": "% 6d" % (lineno + 1), | |
72 | "parity": parity.next()} |
|
72 | "parity": parity.next()} | |
73 |
|
73 | |||
74 | return tmpl("filerevision", |
|
74 | return tmpl("filerevision", | |
75 | file=f, |
|
75 | file=f, | |
76 | path=webutil.up(f), |
|
76 | path=webutil.up(f), | |
77 | text=lines(), |
|
77 | text=lines(), | |
78 | rev=fctx.rev(), |
|
78 | rev=fctx.rev(), | |
79 | node=hex(fctx.node()), |
|
79 | node=hex(fctx.node()), | |
80 | author=fctx.user(), |
|
80 | author=fctx.user(), | |
81 | date=fctx.date(), |
|
81 | date=fctx.date(), | |
82 | desc=fctx.description(), |
|
82 | desc=fctx.description(), | |
83 | branch=webutil.nodebranchnodefault(fctx), |
|
83 | branch=webutil.nodebranchnodefault(fctx), | |
84 | parent=webutil.parents(fctx), |
|
84 | parent=webutil.parents(fctx), | |
85 | child=webutil.children(fctx), |
|
85 | child=webutil.children(fctx), | |
86 | rename=webutil.renamelink(fctx), |
|
86 | rename=webutil.renamelink(fctx), | |
87 | permissions=fctx.manifest().flags(f)) |
|
87 | permissions=fctx.manifest().flags(f)) | |
88 |
|
88 | |||
89 | def file(web, req, tmpl): |
|
89 | def file(web, req, tmpl): | |
90 | path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0]) |
|
90 | path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0]) | |
91 | if not path: |
|
91 | if not path: | |
92 | return manifest(web, req, tmpl) |
|
92 | return manifest(web, req, tmpl) | |
93 | try: |
|
93 | try: | |
94 | return _filerevision(web, tmpl, webutil.filectx(web.repo, req)) |
|
94 | return _filerevision(web, tmpl, webutil.filectx(web.repo, req)) | |
95 | except error.LookupError, inst: |
|
95 | except error.LookupError, inst: | |
96 | try: |
|
96 | try: | |
97 | return manifest(web, req, tmpl) |
|
97 | return manifest(web, req, tmpl) | |
98 | except ErrorResponse: |
|
98 | except ErrorResponse: | |
99 | raise inst |
|
99 | raise inst | |
100 |
|
100 | |||
101 | def _search(web, tmpl, query): |
|
101 | def _search(web, tmpl, query): | |
102 |
|
102 | |||
103 | def changelist(**map): |
|
103 | def changelist(**map): | |
104 | cl = web.repo.changelog |
|
104 | cl = web.repo.changelog | |
105 | count = 0 |
|
105 | count = 0 | |
106 | qw = query.lower().split() |
|
106 | qw = query.lower().split() | |
107 |
|
107 | |||
108 | def revgen(): |
|
108 | def revgen(): | |
109 | for i in xrange(len(cl) - 1, 0, -100): |
|
109 | for i in xrange(len(cl) - 1, 0, -100): | |
110 | l = [] |
|
110 | l = [] | |
111 | for j in xrange(max(0, i - 100), i + 1): |
|
111 | for j in xrange(max(0, i - 100), i + 1): | |
112 | ctx = web.repo[j] |
|
112 | ctx = web.repo[j] | |
113 | l.append(ctx) |
|
113 | l.append(ctx) | |
114 | l.reverse() |
|
114 | l.reverse() | |
115 | for e in l: |
|
115 | for e in l: | |
116 | yield e |
|
116 | yield e | |
117 |
|
117 | |||
118 | for ctx in revgen(): |
|
118 | for ctx in revgen(): | |
119 | miss = 0 |
|
119 | miss = 0 | |
120 | for q in qw: |
|
120 | for q in qw: | |
121 | if not (q in ctx.user().lower() or |
|
121 | if not (q in ctx.user().lower() or | |
122 | q in ctx.description().lower() or |
|
122 | q in ctx.description().lower() or | |
123 | q in " ".join(ctx.files()).lower()): |
|
123 | q in " ".join(ctx.files()).lower()): | |
124 | miss = 1 |
|
124 | miss = 1 | |
125 | break |
|
125 | break | |
126 | if miss: |
|
126 | if miss: | |
127 | continue |
|
127 | continue | |
128 |
|
128 | |||
129 | count += 1 |
|
129 | count += 1 | |
130 | n = ctx.node() |
|
130 | n = ctx.node() | |
131 | showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n) |
|
131 | showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n) | |
132 | files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles) |
|
132 | files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles) | |
133 |
|
133 | |||
134 | yield tmpl('searchentry', |
|
134 | yield tmpl('searchentry', | |
135 | parity=parity.next(), |
|
135 | parity=parity.next(), | |
136 | author=ctx.user(), |
|
136 | author=ctx.user(), | |
137 | parent=webutil.parents(ctx), |
|
137 | parent=webutil.parents(ctx), | |
138 | child=webutil.children(ctx), |
|
138 | child=webutil.children(ctx), | |
139 | changelogtag=showtags, |
|
139 | changelogtag=showtags, | |
140 | desc=ctx.description(), |
|
140 | desc=ctx.description(), | |
141 | date=ctx.date(), |
|
141 | date=ctx.date(), | |
142 | files=files, |
|
142 | files=files, | |
143 | rev=ctx.rev(), |
|
143 | rev=ctx.rev(), | |
144 | node=hex(n), |
|
144 | node=hex(n), | |
145 | tags=webutil.nodetagsdict(web.repo, n), |
|
145 | tags=webutil.nodetagsdict(web.repo, n), | |
146 | inbranch=webutil.nodeinbranch(web.repo, ctx), |
|
146 | inbranch=webutil.nodeinbranch(web.repo, ctx), | |
147 | branches=webutil.nodebranchdict(web.repo, ctx)) |
|
147 | branches=webutil.nodebranchdict(web.repo, ctx)) | |
148 |
|
148 | |||
149 | if count >= web.maxchanges: |
|
149 | if count >= web.maxchanges: | |
150 | break |
|
150 | break | |
151 |
|
151 | |||
152 | cl = web.repo.changelog |
|
152 | cl = web.repo.changelog | |
153 | parity = paritygen(web.stripecount) |
|
153 | parity = paritygen(web.stripecount) | |
154 |
|
154 | |||
155 | return tmpl('search', |
|
155 | return tmpl('search', | |
156 | query=query, |
|
156 | query=query, | |
157 | node=hex(cl.tip()), |
|
157 | node=hex(cl.tip()), | |
158 | entries=changelist, |
|
158 | entries=changelist, | |
159 | archives=web.archivelist("tip")) |
|
159 | archives=web.archivelist("tip")) | |
160 |
|
160 | |||
161 | def changelog(web, req, tmpl, shortlog = False): |
|
161 | def changelog(web, req, tmpl, shortlog = False): | |
162 | if 'node' in req.form: |
|
162 | if 'node' in req.form: | |
163 | ctx = webutil.changectx(web.repo, req) |
|
163 | ctx = webutil.changectx(web.repo, req) | |
164 | else: |
|
164 | else: | |
165 | if 'rev' in req.form: |
|
165 | if 'rev' in req.form: | |
166 | hi = req.form['rev'][0] |
|
166 | hi = req.form['rev'][0] | |
167 | else: |
|
167 | else: | |
168 | hi = len(web.repo) - 1 |
|
168 | hi = len(web.repo) - 1 | |
169 | try: |
|
169 | try: | |
170 | ctx = web.repo[hi] |
|
170 | ctx = web.repo[hi] | |
171 | except error.RepoError: |
|
171 | except error.RepoError: | |
172 | return _search(web, tmpl, hi) # XXX redirect to 404 page? |
|
172 | return _search(web, tmpl, hi) # XXX redirect to 404 page? | |
173 |
|
173 | |||
174 | def changelist(limit=0, **map): |
|
174 | def changelist(limit=0, **map): | |
175 | cl = web.repo.changelog |
|
175 | cl = web.repo.changelog | |
176 | l = [] # build a list in forward order for efficiency |
|
176 | l = [] # build a list in forward order for efficiency | |
177 | for i in xrange(start, end): |
|
177 | for i in xrange(start, end): | |
178 | ctx = web.repo[i] |
|
178 | ctx = web.repo[i] | |
179 | n = ctx.node() |
|
179 | n = ctx.node() | |
180 | showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n) |
|
180 | showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n) | |
181 | files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles) |
|
181 | files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles) | |
182 |
|
182 | |||
183 | l.insert(0, {"parity": parity.next(), |
|
183 | l.insert(0, {"parity": parity.next(), | |
184 | "author": ctx.user(), |
|
184 | "author": ctx.user(), | |
185 | "parent": webutil.parents(ctx, i - 1), |
|
185 | "parent": webutil.parents(ctx, i - 1), | |
186 | "child": webutil.children(ctx, i + 1), |
|
186 | "child": webutil.children(ctx, i + 1), | |
187 | "changelogtag": showtags, |
|
187 | "changelogtag": showtags, | |
188 | "desc": ctx.description(), |
|
188 | "desc": ctx.description(), | |
189 | "date": ctx.date(), |
|
189 | "date": ctx.date(), | |
190 | "files": files, |
|
190 | "files": files, | |
191 | "rev": i, |
|
191 | "rev": i, | |
192 | "node": hex(n), |
|
192 | "node": hex(n), | |
193 | "tags": webutil.nodetagsdict(web.repo, n), |
|
193 | "tags": webutil.nodetagsdict(web.repo, n), | |
194 | "inbranch": webutil.nodeinbranch(web.repo, ctx), |
|
194 | "inbranch": webutil.nodeinbranch(web.repo, ctx), | |
195 | "branches": webutil.nodebranchdict(web.repo, ctx) |
|
195 | "branches": webutil.nodebranchdict(web.repo, ctx) | |
196 | }) |
|
196 | }) | |
197 |
|
197 | |||
198 | if limit > 0: |
|
198 | if limit > 0: | |
199 | l = l[:limit] |
|
199 | l = l[:limit] | |
200 |
|
200 | |||
201 | for e in l: |
|
201 | for e in l: | |
202 | yield e |
|
202 | yield e | |
203 |
|
203 | |||
204 | maxchanges = shortlog and web.maxshortchanges or web.maxchanges |
|
204 | maxchanges = shortlog and web.maxshortchanges or web.maxchanges | |
205 | cl = web.repo.changelog |
|
205 | cl = web.repo.changelog | |
206 | count = len(cl) |
|
206 | count = len(cl) | |
207 | pos = ctx.rev() |
|
207 | pos = ctx.rev() | |
208 | start = max(0, pos - maxchanges + 1) |
|
208 | start = max(0, pos - maxchanges + 1) | |
209 | end = min(count, start + maxchanges) |
|
209 | end = min(count, start + maxchanges) | |
210 | pos = end - 1 |
|
210 | pos = end - 1 | |
211 | parity = paritygen(web.stripecount, offset=start-end) |
|
211 | parity = paritygen(web.stripecount, offset=start-end) | |
212 |
|
212 | |||
213 | changenav = webutil.revnavgen(pos, maxchanges, count, web.repo.changectx) |
|
213 | changenav = webutil.revnavgen(pos, maxchanges, count, web.repo.changectx) | |
214 |
|
214 | |||
215 | return tmpl(shortlog and 'shortlog' or 'changelog', |
|
215 | return tmpl(shortlog and 'shortlog' or 'changelog', | |
216 | changenav=changenav, |
|
216 | changenav=changenav, | |
217 | node=hex(ctx.node()), |
|
217 | node=hex(ctx.node()), | |
218 | rev=pos, changesets=count, |
|
218 | rev=pos, changesets=count, | |
219 | entries=lambda **x: changelist(limit=0,**x), |
|
219 | entries=lambda **x: changelist(limit=0,**x), | |
220 | latestentry=lambda **x: changelist(limit=1,**x), |
|
220 | latestentry=lambda **x: changelist(limit=1,**x), | |
221 | archives=web.archivelist("tip")) |
|
221 | archives=web.archivelist("tip")) | |
222 |
|
222 | |||
223 | def shortlog(web, req, tmpl): |
|
223 | def shortlog(web, req, tmpl): | |
224 | return changelog(web, req, tmpl, shortlog = True) |
|
224 | return changelog(web, req, tmpl, shortlog = True) | |
225 |
|
225 | |||
226 | def changeset(web, req, tmpl): |
|
226 | def changeset(web, req, tmpl): | |
227 | ctx = webutil.changectx(web.repo, req) |
|
227 | ctx = webutil.changectx(web.repo, req) | |
228 | showtags = webutil.showtag(web.repo, tmpl, 'changesettag', ctx.node()) |
|
228 | showtags = webutil.showtag(web.repo, tmpl, 'changesettag', ctx.node()) | |
229 | showbranch = webutil.nodebranchnodefault(ctx) |
|
229 | showbranch = webutil.nodebranchnodefault(ctx) | |
230 |
|
230 | |||
231 | files = [] |
|
231 | files = [] | |
232 | parity = paritygen(web.stripecount) |
|
232 | parity = paritygen(web.stripecount) | |
233 | for f in ctx.files(): |
|
233 | for f in ctx.files(): | |
234 | template = f in ctx and 'filenodelink' or 'filenolink' |
|
234 | template = f in ctx and 'filenodelink' or 'filenolink' | |
235 | files.append(tmpl(template, |
|
235 | files.append(tmpl(template, | |
236 | node=ctx.hex(), file=f, |
|
236 | node=ctx.hex(), file=f, | |
237 | parity=parity.next())) |
|
237 | parity=parity.next())) | |
238 |
|
238 | |||
239 | parity = paritygen(web.stripecount) |
|
239 | parity = paritygen(web.stripecount) | |
240 | diffs = webutil.diffs(web.repo, tmpl, ctx, None, parity) |
|
240 | diffs = webutil.diffs(web.repo, tmpl, ctx, None, parity) | |
241 | return tmpl('changeset', |
|
241 | return tmpl('changeset', | |
242 | diff=diffs, |
|
242 | diff=diffs, | |
243 | rev=ctx.rev(), |
|
243 | rev=ctx.rev(), | |
244 | node=ctx.hex(), |
|
244 | node=ctx.hex(), | |
245 | parent=webutil.parents(ctx), |
|
245 | parent=webutil.parents(ctx), | |
246 | child=webutil.children(ctx), |
|
246 | child=webutil.children(ctx), | |
247 | changesettag=showtags, |
|
247 | changesettag=showtags, | |
248 | changesetbranch=showbranch, |
|
248 | changesetbranch=showbranch, | |
249 | author=ctx.user(), |
|
249 | author=ctx.user(), | |
250 | desc=ctx.description(), |
|
250 | desc=ctx.description(), | |
251 | date=ctx.date(), |
|
251 | date=ctx.date(), | |
252 | files=files, |
|
252 | files=files, | |
253 | archives=web.archivelist(ctx.hex()), |
|
253 | archives=web.archivelist(ctx.hex()), | |
254 | tags=webutil.nodetagsdict(web.repo, ctx.node()), |
|
254 | tags=webutil.nodetagsdict(web.repo, ctx.node()), | |
255 | branch=webutil.nodebranchnodefault(ctx), |
|
255 | branch=webutil.nodebranchnodefault(ctx), | |
256 | inbranch=webutil.nodeinbranch(web.repo, ctx), |
|
256 | inbranch=webutil.nodeinbranch(web.repo, ctx), | |
257 | branches=webutil.nodebranchdict(web.repo, ctx)) |
|
257 | branches=webutil.nodebranchdict(web.repo, ctx)) | |
258 |
|
258 | |||
259 | rev = changeset |
|
259 | rev = changeset | |
260 |
|
260 | |||
261 | def manifest(web, req, tmpl): |
|
261 | def manifest(web, req, tmpl): | |
262 | ctx = webutil.changectx(web.repo, req) |
|
262 | ctx = webutil.changectx(web.repo, req) | |
263 | path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0]) |
|
263 | path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0]) | |
264 | mf = ctx.manifest() |
|
264 | mf = ctx.manifest() | |
265 | node = ctx.node() |
|
265 | node = ctx.node() | |
266 |
|
266 | |||
267 | files = {} |
|
267 | files = {} | |
268 | dirs = {} |
|
268 | dirs = {} | |
269 | parity = paritygen(web.stripecount) |
|
269 | parity = paritygen(web.stripecount) | |
270 |
|
270 | |||
271 | if path and path[-1] != "/": |
|
271 | if path and path[-1] != "/": | |
272 | path += "/" |
|
272 | path += "/" | |
273 | l = len(path) |
|
273 | l = len(path) | |
274 | abspath = "/" + path |
|
274 | abspath = "/" + path | |
275 |
|
275 | |||
276 | for f, n in mf.iteritems(): |
|
276 | for f, n in mf.iteritems(): | |
277 | if f[:l] != path: |
|
277 | if f[:l] != path: | |
278 | continue |
|
278 | continue | |
279 | remain = f[l:] |
|
279 | remain = f[l:] | |
280 | elements = remain.split('/') |
|
280 | elements = remain.split('/') | |
281 | if len(elements) == 1: |
|
281 | if len(elements) == 1: | |
282 | files[remain] = f |
|
282 | files[remain] = f | |
283 | else: |
|
283 | else: | |
284 | h = dirs # need to retain ref to dirs (root) |
|
284 | h = dirs # need to retain ref to dirs (root) | |
285 | for elem in elements[0:-1]: |
|
285 | for elem in elements[0:-1]: | |
286 | if elem not in h: |
|
286 | if elem not in h: | |
287 | h[elem] = {} |
|
287 | h[elem] = {} | |
288 | h = h[elem] |
|
288 | h = h[elem] | |
289 | if len(h) > 1: |
|
289 | if len(h) > 1: | |
290 | break |
|
290 | break | |
291 | h[None] = None # denotes files present |
|
291 | h[None] = None # denotes files present | |
292 |
|
292 | |||
293 | if mf and not files and not dirs: |
|
293 | if mf and not files and not dirs: | |
294 | raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path) |
|
294 | raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path) | |
295 |
|
295 | |||
296 | def filelist(**map): |
|
296 | def filelist(**map): | |
297 | for f in util.sort(files): |
|
297 | for f in util.sort(files): | |
298 | full = files[f] |
|
298 | full = files[f] | |
299 |
|
299 | |||
300 | fctx = ctx.filectx(full) |
|
300 | fctx = ctx.filectx(full) | |
301 | yield {"file": full, |
|
301 | yield {"file": full, | |
302 | "parity": parity.next(), |
|
302 | "parity": parity.next(), | |
303 | "basename": f, |
|
303 | "basename": f, | |
304 | "date": fctx.date(), |
|
304 | "date": fctx.date(), | |
305 | "size": fctx.size(), |
|
305 | "size": fctx.size(), | |
306 | "permissions": mf.flags(full)} |
|
306 | "permissions": mf.flags(full)} | |
307 |
|
307 | |||
308 | def dirlist(**map): |
|
308 | def dirlist(**map): | |
309 | for d in util.sort(dirs): |
|
309 | for d in util.sort(dirs): | |
310 |
|
310 | |||
311 | emptydirs = [] |
|
311 | emptydirs = [] | |
312 | h = dirs[d] |
|
312 | h = dirs[d] | |
313 | while isinstance(h, dict) and len(h) == 1: |
|
313 | while isinstance(h, dict) and len(h) == 1: | |
314 | k,v = h.items()[0] |
|
314 | k,v = h.items()[0] | |
315 | if v: |
|
315 | if v: | |
316 | emptydirs.append(k) |
|
316 | emptydirs.append(k) | |
317 | h = v |
|
317 | h = v | |
318 |
|
318 | |||
319 | path = "%s%s" % (abspath, d) |
|
319 | path = "%s%s" % (abspath, d) | |
320 | yield {"parity": parity.next(), |
|
320 | yield {"parity": parity.next(), | |
321 | "path": path, |
|
321 | "path": path, | |
322 | "emptydirs": "/".join(emptydirs), |
|
322 | "emptydirs": "/".join(emptydirs), | |
323 | "basename": d} |
|
323 | "basename": d} | |
324 |
|
324 | |||
325 | return tmpl("manifest", |
|
325 | return tmpl("manifest", | |
326 | rev=ctx.rev(), |
|
326 | rev=ctx.rev(), | |
327 | node=hex(node), |
|
327 | node=hex(node), | |
328 | path=abspath, |
|
328 | path=abspath, | |
329 | up=webutil.up(abspath), |
|
329 | up=webutil.up(abspath), | |
330 | upparity=parity.next(), |
|
330 | upparity=parity.next(), | |
331 | fentries=filelist, |
|
331 | fentries=filelist, | |
332 | dentries=dirlist, |
|
332 | dentries=dirlist, | |
333 | archives=web.archivelist(hex(node)), |
|
333 | archives=web.archivelist(hex(node)), | |
334 | tags=webutil.nodetagsdict(web.repo, node), |
|
334 | tags=webutil.nodetagsdict(web.repo, node), | |
335 | inbranch=webutil.nodeinbranch(web.repo, ctx), |
|
335 | inbranch=webutil.nodeinbranch(web.repo, ctx), | |
336 | branches=webutil.nodebranchdict(web.repo, ctx)) |
|
336 | branches=webutil.nodebranchdict(web.repo, ctx)) | |
337 |
|
337 | |||
338 | def tags(web, req, tmpl): |
|
338 | def tags(web, req, tmpl): | |
339 | i = web.repo.tagslist() |
|
339 | i = web.repo.tagslist() | |
340 | i.reverse() |
|
340 | i.reverse() | |
341 | parity = paritygen(web.stripecount) |
|
341 | parity = paritygen(web.stripecount) | |
342 |
|
342 | |||
343 | def entries(notip=False,limit=0, **map): |
|
343 | def entries(notip=False,limit=0, **map): | |
344 | count = 0 |
|
344 | count = 0 | |
345 | for k, n in i: |
|
345 | for k, n in i: | |
346 | if notip and k == "tip": |
|
346 | if notip and k == "tip": | |
347 | continue |
|
347 | continue | |
348 | if limit > 0 and count >= limit: |
|
348 | if limit > 0 and count >= limit: | |
349 | continue |
|
349 | continue | |
350 | count = count + 1 |
|
350 | count = count + 1 | |
351 | yield {"parity": parity.next(), |
|
351 | yield {"parity": parity.next(), | |
352 | "tag": k, |
|
352 | "tag": k, | |
353 | "date": web.repo[n].date(), |
|
353 | "date": web.repo[n].date(), | |
354 | "node": hex(n)} |
|
354 | "node": hex(n)} | |
355 |
|
355 | |||
356 | return tmpl("tags", |
|
356 | return tmpl("tags", | |
357 | node=hex(web.repo.changelog.tip()), |
|
357 | node=hex(web.repo.changelog.tip()), | |
358 | entries=lambda **x: entries(False,0, **x), |
|
358 | entries=lambda **x: entries(False,0, **x), | |
359 | entriesnotip=lambda **x: entries(True,0, **x), |
|
359 | entriesnotip=lambda **x: entries(True,0, **x), | |
360 | latestentry=lambda **x: entries(True,1, **x)) |
|
360 | latestentry=lambda **x: entries(True,1, **x)) | |
361 |
|
361 | |||
362 | def summary(web, req, tmpl): |
|
362 | def summary(web, req, tmpl): | |
363 | i = web.repo.tagslist() |
|
363 | i = web.repo.tagslist() | |
364 | i.reverse() |
|
364 | i.reverse() | |
365 |
|
365 | |||
366 | def tagentries(**map): |
|
366 | def tagentries(**map): | |
367 | parity = paritygen(web.stripecount) |
|
367 | parity = paritygen(web.stripecount) | |
368 | count = 0 |
|
368 | count = 0 | |
369 | for k, n in i: |
|
369 | for k, n in i: | |
370 | if k == "tip": # skip tip |
|
370 | if k == "tip": # skip tip | |
371 | continue |
|
371 | continue | |
372 |
|
372 | |||
373 | count += 1 |
|
373 | count += 1 | |
374 | if count > 10: # limit to 10 tags |
|
374 | if count > 10: # limit to 10 tags | |
375 | break |
|
375 | break | |
376 |
|
376 | |||
377 | yield tmpl("tagentry", |
|
377 | yield tmpl("tagentry", | |
378 | parity=parity.next(), |
|
378 | parity=parity.next(), | |
379 | tag=k, |
|
379 | tag=k, | |
380 | node=hex(n), |
|
380 | node=hex(n), | |
381 | date=web.repo[n].date()) |
|
381 | date=web.repo[n].date()) | |
382 |
|
382 | |||
383 | def branches(**map): |
|
383 | def branches(**map): | |
384 | parity = paritygen(web.stripecount) |
|
384 | parity = paritygen(web.stripecount) | |
385 |
|
385 | |||
386 | b = web.repo.branchtags() |
|
386 | b = web.repo.branchtags() | |
387 | l = [(-web.repo.changelog.rev(n), n, t) for t, n in b.iteritems()] |
|
387 | l = [(-web.repo.changelog.rev(n), n, t) for t, n in b.iteritems()] | |
388 | for r,n,t in util.sort(l): |
|
388 | for r,n,t in util.sort(l): | |
389 | yield {'parity': parity.next(), |
|
389 | yield {'parity': parity.next(), | |
390 | 'branch': t, |
|
390 | 'branch': t, | |
391 | 'node': hex(n), |
|
391 | 'node': hex(n), | |
392 | 'date': web.repo[n].date()} |
|
392 | 'date': web.repo[n].date()} | |
393 |
|
393 | |||
394 | def changelist(**map): |
|
394 | def changelist(**map): | |
395 | parity = paritygen(web.stripecount, offset=start-end) |
|
395 | parity = paritygen(web.stripecount, offset=start-end) | |
396 | l = [] # build a list in forward order for efficiency |
|
396 | l = [] # build a list in forward order for efficiency | |
397 | for i in xrange(start, end): |
|
397 | for i in xrange(start, end): | |
398 | ctx = web.repo[i] |
|
398 | ctx = web.repo[i] | |
399 | n = ctx.node() |
|
399 | n = ctx.node() | |
400 | hn = hex(n) |
|
400 | hn = hex(n) | |
401 |
|
401 | |||
402 | l.insert(0, tmpl( |
|
402 | l.insert(0, tmpl( | |
403 | 'shortlogentry', |
|
403 | 'shortlogentry', | |
404 | parity=parity.next(), |
|
404 | parity=parity.next(), | |
405 | author=ctx.user(), |
|
405 | author=ctx.user(), | |
406 | desc=ctx.description(), |
|
406 | desc=ctx.description(), | |
407 | date=ctx.date(), |
|
407 | date=ctx.date(), | |
408 | rev=i, |
|
408 | rev=i, | |
409 | node=hn, |
|
409 | node=hn, | |
410 | tags=webutil.nodetagsdict(web.repo, n), |
|
410 | tags=webutil.nodetagsdict(web.repo, n), | |
411 | inbranch=webutil.nodeinbranch(web.repo, ctx), |
|
411 | inbranch=webutil.nodeinbranch(web.repo, ctx), | |
412 | branches=webutil.nodebranchdict(web.repo, ctx))) |
|
412 | branches=webutil.nodebranchdict(web.repo, ctx))) | |
413 |
|
413 | |||
414 | yield l |
|
414 | yield l | |
415 |
|
415 | |||
416 | cl = web.repo.changelog |
|
416 | cl = web.repo.changelog | |
417 | count = len(cl) |
|
417 | count = len(cl) | |
418 | start = max(0, count - web.maxchanges) |
|
418 | start = max(0, count - web.maxchanges) | |
419 | end = min(count, start + web.maxchanges) |
|
419 | end = min(count, start + web.maxchanges) | |
420 |
|
420 | |||
421 | return tmpl("summary", |
|
421 | return tmpl("summary", | |
422 | desc=web.config("web", "description", "unknown"), |
|
422 | desc=web.config("web", "description", "unknown"), | |
423 | owner=get_contact(web.config) or "unknown", |
|
423 | owner=get_contact(web.config) or "unknown", | |
424 | lastchange=cl.read(cl.tip())[2], |
|
424 | lastchange=cl.read(cl.tip())[2], | |
425 | tags=tagentries, |
|
425 | tags=tagentries, | |
426 | branches=branches, |
|
426 | branches=branches, | |
427 | shortlog=changelist, |
|
427 | shortlog=changelist, | |
428 | node=hex(cl.tip()), |
|
428 | node=hex(cl.tip()), | |
429 | archives=web.archivelist("tip")) |
|
429 | archives=web.archivelist("tip")) | |
430 |
|
430 | |||
431 | def filediff(web, req, tmpl): |
|
431 | def filediff(web, req, tmpl): | |
432 | fctx, ctx = None, None |
|
432 | fctx, ctx = None, None | |
433 | try: |
|
433 | try: | |
434 | fctx = webutil.filectx(web.repo, req) |
|
434 | fctx = webutil.filectx(web.repo, req) | |
435 | except LookupError: |
|
435 | except LookupError: | |
436 | ctx = webutil.changectx(web.repo, req) |
|
436 | ctx = webutil.changectx(web.repo, req) | |
437 | path = webutil.cleanpath(web.repo, req.form['file'][0]) |
|
437 | path = webutil.cleanpath(web.repo, req.form['file'][0]) | |
438 | if path not in ctx.files(): |
|
438 | if path not in ctx.files(): | |
439 | raise |
|
439 | raise | |
440 |
|
440 | |||
441 | if fctx is not None: |
|
441 | if fctx is not None: | |
442 | n = fctx.node() |
|
442 | n = fctx.node() | |
443 | path = fctx.path() |
|
443 | path = fctx.path() | |
444 | else: |
|
444 | else: | |
445 | n = ctx.node() |
|
445 | n = ctx.node() | |
446 | # path already defined in except clause |
|
446 | # path already defined in except clause | |
447 |
|
447 | |||
448 | parity = paritygen(web.stripecount) |
|
448 | parity = paritygen(web.stripecount) | |
449 | diffs = webutil.diffs(web.repo, tmpl, fctx or ctx, [path], parity) |
|
449 | diffs = webutil.diffs(web.repo, tmpl, fctx or ctx, [path], parity) | |
450 | rename = fctx and webutil.renamelink(fctx) or [] |
|
450 | rename = fctx and webutil.renamelink(fctx) or [] | |
451 | ctx = fctx and fctx or ctx |
|
451 | ctx = fctx and fctx or ctx | |
452 | return tmpl("filediff", |
|
452 | return tmpl("filediff", | |
453 | file=path, |
|
453 | file=path, | |
454 | node=hex(n), |
|
454 | node=hex(n), | |
455 | rev=ctx.rev(), |
|
455 | rev=ctx.rev(), | |
456 | date=ctx.date(), |
|
456 | date=ctx.date(), | |
457 | desc=ctx.description(), |
|
457 | desc=ctx.description(), | |
458 | author=ctx.user(), |
|
458 | author=ctx.user(), | |
459 | rename=rename, |
|
459 | rename=rename, | |
460 | branch=webutil.nodebranchnodefault(ctx), |
|
460 | branch=webutil.nodebranchnodefault(ctx), | |
461 | parent=webutil.parents(ctx), |
|
461 | parent=webutil.parents(ctx), | |
462 | child=webutil.children(ctx), |
|
462 | child=webutil.children(ctx), | |
463 | diff=diffs) |
|
463 | diff=diffs) | |
464 |
|
464 | |||
465 | diff = filediff |
|
465 | diff = filediff | |
466 |
|
466 | |||
467 | def annotate(web, req, tmpl): |
|
467 | def annotate(web, req, tmpl): | |
468 | fctx = webutil.filectx(web.repo, req) |
|
468 | fctx = webutil.filectx(web.repo, req) | |
469 | f = fctx.path() |
|
469 | f = fctx.path() | |
470 | parity = paritygen(web.stripecount) |
|
470 | parity = paritygen(web.stripecount) | |
471 |
|
471 | |||
472 | def annotate(**map): |
|
472 | def annotate(**map): | |
473 | last = None |
|
473 | last = None | |
474 | if binary(fctx.data()): |
|
474 | if binary(fctx.data()): | |
475 | mt = (mimetypes.guess_type(fctx.path())[0] |
|
475 | mt = (mimetypes.guess_type(fctx.path())[0] | |
476 | or 'application/octet-stream') |
|
476 | or 'application/octet-stream') | |
477 | lines = enumerate([((fctx.filectx(fctx.filerev()), 1), |
|
477 | lines = enumerate([((fctx.filectx(fctx.filerev()), 1), | |
478 | '(binary:%s)' % mt)]) |
|
478 | '(binary:%s)' % mt)]) | |
479 | else: |
|
479 | else: | |
480 | lines = enumerate(fctx.annotate(follow=True, linenumber=True)) |
|
480 | lines = enumerate(fctx.annotate(follow=True, linenumber=True)) | |
481 | for lineno, ((f, targetline), l) in lines: |
|
481 | for lineno, ((f, targetline), l) in lines: | |
482 | fnode = f.filenode() |
|
482 | fnode = f.filenode() | |
483 |
|
483 | |||
484 | if last != fnode: |
|
484 | if last != fnode: | |
485 | last = fnode |
|
485 | last = fnode | |
486 |
|
486 | |||
487 | yield {"parity": parity.next(), |
|
487 | yield {"parity": parity.next(), | |
488 | "node": hex(f.node()), |
|
488 | "node": hex(f.node()), | |
489 | "rev": f.rev(), |
|
489 | "rev": f.rev(), | |
490 | "author": f.user(), |
|
490 | "author": f.user(), | |
491 | "desc": f.description(), |
|
491 | "desc": f.description(), | |
492 | "file": f.path(), |
|
492 | "file": f.path(), | |
493 | "targetline": targetline, |
|
493 | "targetline": targetline, | |
494 | "line": l, |
|
494 | "line": l, | |
495 | "lineid": "l%d" % (lineno + 1), |
|
495 | "lineid": "l%d" % (lineno + 1), | |
496 | "linenumber": "% 6d" % (lineno + 1)} |
|
496 | "linenumber": "% 6d" % (lineno + 1)} | |
497 |
|
497 | |||
498 | return tmpl("fileannotate", |
|
498 | return tmpl("fileannotate", | |
499 | file=f, |
|
499 | file=f, | |
500 | annotate=annotate, |
|
500 | annotate=annotate, | |
501 | path=webutil.up(f), |
|
501 | path=webutil.up(f), | |
502 | rev=fctx.rev(), |
|
502 | rev=fctx.rev(), | |
503 | node=hex(fctx.node()), |
|
503 | node=hex(fctx.node()), | |
504 | author=fctx.user(), |
|
504 | author=fctx.user(), | |
505 | date=fctx.date(), |
|
505 | date=fctx.date(), | |
506 | desc=fctx.description(), |
|
506 | desc=fctx.description(), | |
507 | rename=webutil.renamelink(fctx), |
|
507 | rename=webutil.renamelink(fctx), | |
508 | branch=webutil.nodebranchnodefault(fctx), |
|
508 | branch=webutil.nodebranchnodefault(fctx), | |
509 | parent=webutil.parents(fctx), |
|
509 | parent=webutil.parents(fctx), | |
510 | child=webutil.children(fctx), |
|
510 | child=webutil.children(fctx), | |
511 | permissions=fctx.manifest().flags(f)) |
|
511 | permissions=fctx.manifest().flags(f)) | |
512 |
|
512 | |||
513 | def filelog(web, req, tmpl): |
|
513 | def filelog(web, req, tmpl): | |
514 |
|
514 | |||
515 | try: |
|
515 | try: | |
516 | fctx = webutil.filectx(web.repo, req) |
|
516 | fctx = webutil.filectx(web.repo, req) | |
517 | f = fctx.path() |
|
517 | f = fctx.path() | |
518 | fl = fctx.filelog() |
|
518 | fl = fctx.filelog() | |
519 | except error.LookupError: |
|
519 | except error.LookupError: | |
520 | f = webutil.cleanpath(web.repo, req.form['file'][0]) |
|
520 | f = webutil.cleanpath(web.repo, req.form['file'][0]) | |
521 | fl = web.repo.file(f) |
|
521 | fl = web.repo.file(f) | |
522 | numrevs = len(fl) |
|
522 | numrevs = len(fl) | |
523 | if not numrevs: # file doesn't exist at all |
|
523 | if not numrevs: # file doesn't exist at all | |
524 | raise |
|
524 | raise | |
525 | rev = webutil.changectx(web.repo, req).rev() |
|
525 | rev = webutil.changectx(web.repo, req).rev() | |
526 | first = fl.linkrev(0) |
|
526 | first = fl.linkrev(0) | |
527 | if rev < first: # current rev is from before file existed |
|
527 | if rev < first: # current rev is from before file existed | |
528 | raise |
|
528 | raise | |
529 | frev = numrevs - 1 |
|
529 | frev = numrevs - 1 | |
530 | while fl.linkrev(frev) > rev: |
|
530 | while fl.linkrev(frev) > rev: | |
531 | frev -= 1 |
|
531 | frev -= 1 | |
532 | fctx = web.repo.filectx(f, fl.linkrev(frev)) |
|
532 | fctx = web.repo.filectx(f, fl.linkrev(frev)) | |
533 |
|
533 | |||
534 | count = fctx.filerev() + 1 |
|
534 | count = fctx.filerev() + 1 | |
535 | pagelen = web.maxshortchanges |
|
535 | pagelen = web.maxshortchanges | |
536 | start = max(0, fctx.filerev() - pagelen + 1) # first rev on this page |
|
536 | start = max(0, fctx.filerev() - pagelen + 1) # first rev on this page | |
537 | end = min(count, start + pagelen) # last rev on this page |
|
537 | end = min(count, start + pagelen) # last rev on this page | |
538 | parity = paritygen(web.stripecount, offset=start-end) |
|
538 | parity = paritygen(web.stripecount, offset=start-end) | |
539 |
|
539 | |||
540 | def entries(limit=0, **map): |
|
540 | def entries(limit=0, **map): | |
541 | l = [] |
|
541 | l = [] | |
542 |
|
542 | |||
543 | repo = web.repo |
|
543 | repo = web.repo | |
544 | for i in xrange(start, end): |
|
544 | for i in xrange(start, end): | |
545 | iterfctx = fctx.filectx(i) |
|
545 | iterfctx = fctx.filectx(i) | |
546 |
|
546 | |||
547 | l.insert(0, {"parity": parity.next(), |
|
547 | l.insert(0, {"parity": parity.next(), | |
548 | "filerev": i, |
|
548 | "filerev": i, | |
549 | "file": f, |
|
549 | "file": f, | |
550 | "node": hex(iterfctx.node()), |
|
550 | "node": hex(iterfctx.node()), | |
551 | "author": iterfctx.user(), |
|
551 | "author": iterfctx.user(), | |
552 | "date": iterfctx.date(), |
|
552 | "date": iterfctx.date(), | |
553 | "rename": webutil.renamelink(iterfctx), |
|
553 | "rename": webutil.renamelink(iterfctx), | |
554 | "parent": webutil.parents(iterfctx), |
|
554 | "parent": webutil.parents(iterfctx), | |
555 | "child": webutil.children(iterfctx), |
|
555 | "child": webutil.children(iterfctx), | |
556 | "desc": iterfctx.description(), |
|
556 | "desc": iterfctx.description(), | |
557 | "tags": webutil.nodetagsdict(repo, iterfctx.node()), |
|
557 | "tags": webutil.nodetagsdict(repo, iterfctx.node()), | |
558 | "branch": webutil.nodebranchnodefault(iterfctx), |
|
558 | "branch": webutil.nodebranchnodefault(iterfctx), | |
559 | "inbranch": webutil.nodeinbranch(repo, iterfctx), |
|
559 | "inbranch": webutil.nodeinbranch(repo, iterfctx), | |
560 | "branches": webutil.nodebranchdict(repo, iterfctx)}) |
|
560 | "branches": webutil.nodebranchdict(repo, iterfctx)}) | |
561 |
|
561 | |||
562 | if limit > 0: |
|
562 | if limit > 0: | |
563 | l = l[:limit] |
|
563 | l = l[:limit] | |
564 |
|
564 | |||
565 | for e in l: |
|
565 | for e in l: | |
566 | yield e |
|
566 | yield e | |
567 |
|
567 | |||
568 | nodefunc = lambda x: fctx.filectx(fileid=x) |
|
568 | nodefunc = lambda x: fctx.filectx(fileid=x) | |
569 | nav = webutil.revnavgen(end - 1, pagelen, count, nodefunc) |
|
569 | nav = webutil.revnavgen(end - 1, pagelen, count, nodefunc) | |
570 | return tmpl("filelog", file=f, node=hex(fctx.node()), nav=nav, |
|
570 | return tmpl("filelog", file=f, node=hex(fctx.node()), nav=nav, | |
571 | entries=lambda **x: entries(limit=0, **x), |
|
571 | entries=lambda **x: entries(limit=0, **x), | |
572 | latestentry=lambda **x: entries(limit=1, **x)) |
|
572 | latestentry=lambda **x: entries(limit=1, **x)) | |
573 |
|
573 | |||
574 |
|
574 | |||
575 | def archive(web, req, tmpl): |
|
575 | def archive(web, req, tmpl): | |
576 | type_ = req.form.get('type', [None])[0] |
|
576 | type_ = req.form.get('type', [None])[0] | |
577 | allowed = web.configlist("web", "allow_archive") |
|
577 | allowed = web.configlist("web", "allow_archive") | |
578 | key = req.form['node'][0] |
|
578 | key = req.form['node'][0] | |
579 |
|
579 | |||
580 | if type_ not in web.archives: |
|
580 | if type_ not in web.archives: | |
581 | msg = 'Unsupported archive type: %s' % type_ |
|
581 | msg = 'Unsupported archive type: %s' % type_ | |
582 | raise ErrorResponse(HTTP_NOT_FOUND, msg) |
|
582 | raise ErrorResponse(HTTP_NOT_FOUND, msg) | |
583 |
|
583 | |||
584 | if not ((type_ in allowed or |
|
584 | if not ((type_ in allowed or | |
585 | web.configbool("web", "allow" + type_, False))): |
|
585 | web.configbool("web", "allow" + type_, False))): | |
586 | msg = 'Archive type not allowed: %s' % type_ |
|
586 | msg = 'Archive type not allowed: %s' % type_ | |
587 | raise ErrorResponse(HTTP_FORBIDDEN, msg) |
|
587 | raise ErrorResponse(HTTP_FORBIDDEN, msg) | |
588 |
|
588 | |||
589 | reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame)) |
|
589 | reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame)) | |
590 | cnode = web.repo.lookup(key) |
|
590 | cnode = web.repo.lookup(key) | |
591 | arch_version = key |
|
591 | arch_version = key | |
592 | if cnode == key or key == 'tip': |
|
592 | if cnode == key or key == 'tip': | |
593 | arch_version = short(cnode) |
|
593 | arch_version = short(cnode) | |
594 | name = "%s-%s" % (reponame, arch_version) |
|
594 | name = "%s-%s" % (reponame, arch_version) | |
595 | mimetype, artype, extension, encoding = web.archive_specs[type_] |
|
595 | mimetype, artype, extension, encoding = web.archive_specs[type_] | |
596 | headers = [ |
|
596 | headers = [ | |
597 | ('Content-Type', mimetype), |
|
597 | ('Content-Type', mimetype), | |
598 | ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension)) |
|
598 | ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension)) | |
599 | ] |
|
599 | ] | |
600 | if encoding: |
|
600 | if encoding: | |
601 | headers.append(('Content-Encoding', encoding)) |
|
601 | headers.append(('Content-Encoding', encoding)) | |
602 | req.header(headers) |
|
602 | req.header(headers) | |
603 | req.respond(HTTP_OK) |
|
603 | req.respond(HTTP_OK) | |
604 | archival.archive(web.repo, req, cnode, artype, prefix=name) |
|
604 | archival.archive(web.repo, req, cnode, artype, prefix=name) | |
605 | return [] |
|
605 | return [] | |
606 |
|
606 | |||
607 |
|
607 | |||
608 | def static(web, req, tmpl): |
|
608 | def static(web, req, tmpl): | |
609 | fname = req.form['file'][0] |
|
609 | fname = req.form['file'][0] | |
610 | # a repo owner may set web.static in .hg/hgrc to get any file |
|
610 | # a repo owner may set web.static in .hg/hgrc to get any file | |
611 | # readable by the user running the CGI script |
|
611 | # readable by the user running the CGI script | |
612 | static = web.config("web", "static", None, untrusted=False) |
|
612 | static = web.config("web", "static", None, untrusted=False) | |
613 | if not static: |
|
613 | if not static: | |
614 | tp = web.templatepath |
|
614 | tp = web.templatepath | |
615 | if isinstance(tp, str): |
|
615 | if isinstance(tp, str): | |
616 | tp = [tp] |
|
616 | tp = [tp] | |
617 | static = [os.path.join(p, 'static') for p in tp] |
|
617 | static = [os.path.join(p, 'static') for p in tp] | |
618 | return [staticfile(static, fname, req)] |
|
618 | return [staticfile(static, fname, req)] | |
619 |
|
619 | |||
620 | def graph(web, req, tmpl): |
|
620 | def graph(web, req, tmpl): | |
621 | rev = webutil.changectx(web.repo, req).rev() |
|
621 | rev = webutil.changectx(web.repo, req).rev() | |
622 | bg_height = 39 |
|
622 | bg_height = 39 | |
623 |
|
623 | |||
624 | revcount = 25 |
|
624 | revcount = 25 | |
625 | if 'revcount' in req.form: |
|
625 | if 'revcount' in req.form: | |
626 | revcount = int(req.form.get('revcount', [revcount])[0]) |
|
626 | revcount = int(req.form.get('revcount', [revcount])[0]) | |
627 | tmpl.defaults['sessionvars']['revcount'] = revcount |
|
627 | tmpl.defaults['sessionvars']['revcount'] = revcount | |
628 |
|
628 | |||
629 | lessvars = copy.copy(tmpl.defaults['sessionvars']) |
|
629 | lessvars = copy.copy(tmpl.defaults['sessionvars']) | |
630 | lessvars['revcount'] = revcount / 2 |
|
630 | lessvars['revcount'] = revcount / 2 | |
631 | morevars = copy.copy(tmpl.defaults['sessionvars']) |
|
631 | morevars = copy.copy(tmpl.defaults['sessionvars']) | |
632 | morevars['revcount'] = revcount * 2 |
|
632 | morevars['revcount'] = revcount * 2 | |
633 |
|
633 | |||
634 | max_rev = len(web.repo) - 1 |
|
634 | max_rev = len(web.repo) - 1 | |
635 | revcount = min(max_rev, revcount) |
|
635 | revcount = min(max_rev, revcount) | |
636 | revnode = web.repo.changelog.node(rev) |
|
636 | revnode = web.repo.changelog.node(rev) | |
637 | revnode_hex = hex(revnode) |
|
637 | revnode_hex = hex(revnode) | |
638 | uprev = min(max_rev, rev + revcount) |
|
638 | uprev = min(max_rev, rev + revcount) | |
639 | downrev = max(0, rev - revcount) |
|
639 | downrev = max(0, rev - revcount) | |
640 | count = len(web.repo) |
|
640 | count = len(web.repo) | |
641 | changenav = webutil.revnavgen(rev, revcount, count, web.repo.changectx) |
|
641 | changenav = webutil.revnavgen(rev, revcount, count, web.repo.changectx) | |
642 |
|
642 | |||
643 | tree = list(graphmod.graph(web.repo, rev, downrev)) |
|
643 | tree = list(graphmod.graph(web.repo, rev, downrev)) | |
644 | canvasheight = (len(tree) + 1) * bg_height - 27; |
|
644 | canvasheight = (len(tree) + 1) * bg_height - 27; | |
645 | data = [] |
|
645 | data = [] | |
646 | for i, (ctx, vtx, edges) in enumerate(tree): |
|
646 | for i, (ctx, vtx, edges) in enumerate(tree): | |
647 | node = short(ctx.node()) |
|
647 | node = short(ctx.node()) | |
648 | age = templatefilters.age(ctx.date()) |
|
648 | age = templatefilters.age(ctx.date()) | |
649 | desc = templatefilters.firstline(ctx.description()) |
|
649 | desc = templatefilters.firstline(ctx.description()) | |
650 | desc = cgi.escape(desc) |
|
650 | desc = cgi.escape(desc) | |
651 | user = cgi.escape(templatefilters.person(ctx.user())) |
|
651 | user = cgi.escape(templatefilters.person(ctx.user())) | |
652 | branch = ctx.branch() |
|
652 | branch = ctx.branch() | |
653 | branch = branch, web.repo.branchtags().get(branch) == ctx.node() |
|
653 | branch = branch, web.repo.branchtags().get(branch) == ctx.node() | |
654 | data.append((node, vtx, edges, desc, user, age, branch, ctx.tags())) |
|
654 | data.append((node, vtx, edges, desc, user, age, branch, ctx.tags())) | |
655 |
|
655 | |||
656 | return tmpl('graph', rev=rev, revcount=revcount, uprev=uprev, |
|
656 | return tmpl('graph', rev=rev, revcount=revcount, uprev=uprev, | |
657 | lessvars=lessvars, morevars=morevars, downrev=downrev, |
|
657 | lessvars=lessvars, morevars=morevars, downrev=downrev, | |
658 | canvasheight=canvasheight, jsdata=data, bg_height=bg_height, |
|
658 | canvasheight=canvasheight, jsdata=data, bg_height=bg_height, | |
659 | node=revnode_hex, changenav=changenav) |
|
659 | node=revnode_hex, changenav=changenav) |
@@ -1,1394 +1,1394 b'' | |||||
1 | # patch.py - patch file parsing routines |
|
1 | # patch.py - patch file parsing routines | |
2 | # |
|
2 | # | |
3 | # Copyright 2006 Brendan Cully <brendan@kublai.com> |
|
3 | # Copyright 2006 Brendan Cully <brendan@kublai.com> | |
4 | # Copyright 2007 Chris Mason <chris.mason@oracle.com> |
|
4 | # Copyright 2007 Chris Mason <chris.mason@oracle.com> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms |
|
6 | # This software may be used and distributed according to the terms | |
7 | # of the GNU General Public License, incorporated herein by reference. |
|
7 | # of the GNU General Public License, incorporated herein by reference. | |
8 |
|
8 | |||
9 | from i18n import _ |
|
9 | from i18n import _ | |
10 | from node import hex, nullid, short |
|
10 | from node import hex, nullid, short | |
11 |
import base85, cmdutil, mdiff, util, |
|
11 | import base85, cmdutil, mdiff, util, diffhelpers, copies | |
12 |
import cStringIO, email.Parser, os, re, |
|
12 | import cStringIO, email.Parser, os, re, math | |
13 | import sys, tempfile, zlib |
|
13 | import sys, tempfile, zlib | |
14 |
|
14 | |||
15 | gitre = re.compile('diff --git a/(.*) b/(.*)') |
|
15 | gitre = re.compile('diff --git a/(.*) b/(.*)') | |
16 |
|
16 | |||
17 | class PatchError(Exception): |
|
17 | class PatchError(Exception): | |
18 | pass |
|
18 | pass | |
19 |
|
19 | |||
20 | class NoHunks(PatchError): |
|
20 | class NoHunks(PatchError): | |
21 | pass |
|
21 | pass | |
22 |
|
22 | |||
23 | # helper functions |
|
23 | # helper functions | |
24 |
|
24 | |||
25 | def copyfile(src, dst, basedir): |
|
25 | def copyfile(src, dst, basedir): | |
26 | abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]] |
|
26 | abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]] | |
27 | if os.path.exists(absdst): |
|
27 | if os.path.exists(absdst): | |
28 | raise util.Abort(_("cannot create %s: destination already exists") % |
|
28 | raise util.Abort(_("cannot create %s: destination already exists") % | |
29 | dst) |
|
29 | dst) | |
30 |
|
30 | |||
31 | dstdir = os.path.dirname(absdst) |
|
31 | dstdir = os.path.dirname(absdst) | |
32 | if dstdir and not os.path.isdir(dstdir): |
|
32 | if dstdir and not os.path.isdir(dstdir): | |
33 | try: |
|
33 | try: | |
34 | os.makedirs(dstdir) |
|
34 | os.makedirs(dstdir) | |
35 | except IOError: |
|
35 | except IOError: | |
36 | raise util.Abort( |
|
36 | raise util.Abort( | |
37 | _("cannot create %s: unable to create destination directory") |
|
37 | _("cannot create %s: unable to create destination directory") | |
38 | % dst) |
|
38 | % dst) | |
39 |
|
39 | |||
40 | util.copyfile(abssrc, absdst) |
|
40 | util.copyfile(abssrc, absdst) | |
41 |
|
41 | |||
42 | # public functions |
|
42 | # public functions | |
43 |
|
43 | |||
44 | def extract(ui, fileobj): |
|
44 | def extract(ui, fileobj): | |
45 | '''extract patch from data read from fileobj. |
|
45 | '''extract patch from data read from fileobj. | |
46 |
|
46 | |||
47 | patch can be a normal patch or contained in an email message. |
|
47 | patch can be a normal patch or contained in an email message. | |
48 |
|
48 | |||
49 | return tuple (filename, message, user, date, node, p1, p2). |
|
49 | return tuple (filename, message, user, date, node, p1, p2). | |
50 | Any item in the returned tuple can be None. If filename is None, |
|
50 | Any item in the returned tuple can be None. If filename is None, | |
51 | fileobj did not contain a patch. Caller must unlink filename when done.''' |
|
51 | fileobj did not contain a patch. Caller must unlink filename when done.''' | |
52 |
|
52 | |||
53 | # attempt to detect the start of a patch |
|
53 | # attempt to detect the start of a patch | |
54 | # (this heuristic is borrowed from quilt) |
|
54 | # (this heuristic is borrowed from quilt) | |
55 | diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |' |
|
55 | diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |' | |
56 | r'retrieving revision [0-9]+(\.[0-9]+)*$|' |
|
56 | r'retrieving revision [0-9]+(\.[0-9]+)*$|' | |
57 | r'(---|\*\*\*)[ \t])', re.MULTILINE) |
|
57 | r'(---|\*\*\*)[ \t])', re.MULTILINE) | |
58 |
|
58 | |||
59 | fd, tmpname = tempfile.mkstemp(prefix='hg-patch-') |
|
59 | fd, tmpname = tempfile.mkstemp(prefix='hg-patch-') | |
60 | tmpfp = os.fdopen(fd, 'w') |
|
60 | tmpfp = os.fdopen(fd, 'w') | |
61 | try: |
|
61 | try: | |
62 | msg = email.Parser.Parser().parse(fileobj) |
|
62 | msg = email.Parser.Parser().parse(fileobj) | |
63 |
|
63 | |||
64 | subject = msg['Subject'] |
|
64 | subject = msg['Subject'] | |
65 | user = msg['From'] |
|
65 | user = msg['From'] | |
66 | gitsendmail = 'git-send-email' in msg.get('X-Mailer', '') |
|
66 | gitsendmail = 'git-send-email' in msg.get('X-Mailer', '') | |
67 | # should try to parse msg['Date'] |
|
67 | # should try to parse msg['Date'] | |
68 | date = None |
|
68 | date = None | |
69 | nodeid = None |
|
69 | nodeid = None | |
70 | branch = None |
|
70 | branch = None | |
71 | parents = [] |
|
71 | parents = [] | |
72 |
|
72 | |||
73 | if subject: |
|
73 | if subject: | |
74 | if subject.startswith('[PATCH'): |
|
74 | if subject.startswith('[PATCH'): | |
75 | pend = subject.find(']') |
|
75 | pend = subject.find(']') | |
76 | if pend >= 0: |
|
76 | if pend >= 0: | |
77 | subject = subject[pend+1:].lstrip() |
|
77 | subject = subject[pend+1:].lstrip() | |
78 | subject = subject.replace('\n\t', ' ') |
|
78 | subject = subject.replace('\n\t', ' ') | |
79 | ui.debug('Subject: %s\n' % subject) |
|
79 | ui.debug('Subject: %s\n' % subject) | |
80 | if user: |
|
80 | if user: | |
81 | ui.debug('From: %s\n' % user) |
|
81 | ui.debug('From: %s\n' % user) | |
82 | diffs_seen = 0 |
|
82 | diffs_seen = 0 | |
83 | ok_types = ('text/plain', 'text/x-diff', 'text/x-patch') |
|
83 | ok_types = ('text/plain', 'text/x-diff', 'text/x-patch') | |
84 | message = '' |
|
84 | message = '' | |
85 | for part in msg.walk(): |
|
85 | for part in msg.walk(): | |
86 | content_type = part.get_content_type() |
|
86 | content_type = part.get_content_type() | |
87 | ui.debug('Content-Type: %s\n' % content_type) |
|
87 | ui.debug('Content-Type: %s\n' % content_type) | |
88 | if content_type not in ok_types: |
|
88 | if content_type not in ok_types: | |
89 | continue |
|
89 | continue | |
90 | payload = part.get_payload(decode=True) |
|
90 | payload = part.get_payload(decode=True) | |
91 | m = diffre.search(payload) |
|
91 | m = diffre.search(payload) | |
92 | if m: |
|
92 | if m: | |
93 | hgpatch = False |
|
93 | hgpatch = False | |
94 | ignoretext = False |
|
94 | ignoretext = False | |
95 |
|
95 | |||
96 | ui.debug(_('found patch at byte %d\n') % m.start(0)) |
|
96 | ui.debug(_('found patch at byte %d\n') % m.start(0)) | |
97 | diffs_seen += 1 |
|
97 | diffs_seen += 1 | |
98 | cfp = cStringIO.StringIO() |
|
98 | cfp = cStringIO.StringIO() | |
99 | for line in payload[:m.start(0)].splitlines(): |
|
99 | for line in payload[:m.start(0)].splitlines(): | |
100 | if line.startswith('# HG changeset patch'): |
|
100 | if line.startswith('# HG changeset patch'): | |
101 | ui.debug(_('patch generated by hg export\n')) |
|
101 | ui.debug(_('patch generated by hg export\n')) | |
102 | hgpatch = True |
|
102 | hgpatch = True | |
103 | # drop earlier commit message content |
|
103 | # drop earlier commit message content | |
104 | cfp.seek(0) |
|
104 | cfp.seek(0) | |
105 | cfp.truncate() |
|
105 | cfp.truncate() | |
106 | subject = None |
|
106 | subject = None | |
107 | elif hgpatch: |
|
107 | elif hgpatch: | |
108 | if line.startswith('# User '): |
|
108 | if line.startswith('# User '): | |
109 | user = line[7:] |
|
109 | user = line[7:] | |
110 | ui.debug('From: %s\n' % user) |
|
110 | ui.debug('From: %s\n' % user) | |
111 | elif line.startswith("# Date "): |
|
111 | elif line.startswith("# Date "): | |
112 | date = line[7:] |
|
112 | date = line[7:] | |
113 | elif line.startswith("# Branch "): |
|
113 | elif line.startswith("# Branch "): | |
114 | branch = line[9:] |
|
114 | branch = line[9:] | |
115 | elif line.startswith("# Node ID "): |
|
115 | elif line.startswith("# Node ID "): | |
116 | nodeid = line[10:] |
|
116 | nodeid = line[10:] | |
117 | elif line.startswith("# Parent "): |
|
117 | elif line.startswith("# Parent "): | |
118 | parents.append(line[10:]) |
|
118 | parents.append(line[10:]) | |
119 | elif line == '---' and gitsendmail: |
|
119 | elif line == '---' and gitsendmail: | |
120 | ignoretext = True |
|
120 | ignoretext = True | |
121 | if not line.startswith('# ') and not ignoretext: |
|
121 | if not line.startswith('# ') and not ignoretext: | |
122 | cfp.write(line) |
|
122 | cfp.write(line) | |
123 | cfp.write('\n') |
|
123 | cfp.write('\n') | |
124 | message = cfp.getvalue() |
|
124 | message = cfp.getvalue() | |
125 | if tmpfp: |
|
125 | if tmpfp: | |
126 | tmpfp.write(payload) |
|
126 | tmpfp.write(payload) | |
127 | if not payload.endswith('\n'): |
|
127 | if not payload.endswith('\n'): | |
128 | tmpfp.write('\n') |
|
128 | tmpfp.write('\n') | |
129 | elif not diffs_seen and message and content_type == 'text/plain': |
|
129 | elif not diffs_seen and message and content_type == 'text/plain': | |
130 | message += '\n' + payload |
|
130 | message += '\n' + payload | |
131 | except: |
|
131 | except: | |
132 | tmpfp.close() |
|
132 | tmpfp.close() | |
133 | os.unlink(tmpname) |
|
133 | os.unlink(tmpname) | |
134 | raise |
|
134 | raise | |
135 |
|
135 | |||
136 | if subject and not message.startswith(subject): |
|
136 | if subject and not message.startswith(subject): | |
137 | message = '%s\n%s' % (subject, message) |
|
137 | message = '%s\n%s' % (subject, message) | |
138 | tmpfp.close() |
|
138 | tmpfp.close() | |
139 | if not diffs_seen: |
|
139 | if not diffs_seen: | |
140 | os.unlink(tmpname) |
|
140 | os.unlink(tmpname) | |
141 | return None, message, user, date, branch, None, None, None |
|
141 | return None, message, user, date, branch, None, None, None | |
142 | p1 = parents and parents.pop(0) or None |
|
142 | p1 = parents and parents.pop(0) or None | |
143 | p2 = parents and parents.pop(0) or None |
|
143 | p2 = parents and parents.pop(0) or None | |
144 | return tmpname, message, user, date, branch, nodeid, p1, p2 |
|
144 | return tmpname, message, user, date, branch, nodeid, p1, p2 | |
145 |
|
145 | |||
146 | GP_PATCH = 1 << 0 # we have to run patch |
|
146 | GP_PATCH = 1 << 0 # we have to run patch | |
147 | GP_FILTER = 1 << 1 # there's some copy/rename operation |
|
147 | GP_FILTER = 1 << 1 # there's some copy/rename operation | |
148 | GP_BINARY = 1 << 2 # there's a binary patch |
|
148 | GP_BINARY = 1 << 2 # there's a binary patch | |
149 |
|
149 | |||
150 | class patchmeta: |
|
150 | class patchmeta: | |
151 | """Patched file metadata |
|
151 | """Patched file metadata | |
152 |
|
152 | |||
153 | 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY |
|
153 | 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY | |
154 | or COPY. 'path' is patched file path. 'oldpath' is set to the |
|
154 | or COPY. 'path' is patched file path. 'oldpath' is set to the | |
155 | origin file when 'op' is either COPY or RENAME, None otherwise. If |
|
155 | origin file when 'op' is either COPY or RENAME, None otherwise. If | |
156 | file mode is changed, 'mode' is a tuple (islink, isexec) where |
|
156 | file mode is changed, 'mode' is a tuple (islink, isexec) where | |
157 | 'islink' is True if the file is a symlink and 'isexec' is True if |
|
157 | 'islink' is True if the file is a symlink and 'isexec' is True if | |
158 | the file is executable. Otherwise, 'mode' is None. |
|
158 | the file is executable. Otherwise, 'mode' is None. | |
159 | """ |
|
159 | """ | |
160 | def __init__(self, path): |
|
160 | def __init__(self, path): | |
161 | self.path = path |
|
161 | self.path = path | |
162 | self.oldpath = None |
|
162 | self.oldpath = None | |
163 | self.mode = None |
|
163 | self.mode = None | |
164 | self.op = 'MODIFY' |
|
164 | self.op = 'MODIFY' | |
165 | self.lineno = 0 |
|
165 | self.lineno = 0 | |
166 | self.binary = False |
|
166 | self.binary = False | |
167 |
|
167 | |||
168 | def setmode(self, mode): |
|
168 | def setmode(self, mode): | |
169 | islink = mode & 020000 |
|
169 | islink = mode & 020000 | |
170 | isexec = mode & 0100 |
|
170 | isexec = mode & 0100 | |
171 | self.mode = (islink, isexec) |
|
171 | self.mode = (islink, isexec) | |
172 |
|
172 | |||
173 | def readgitpatch(lr): |
|
173 | def readgitpatch(lr): | |
174 | """extract git-style metadata about patches from <patchname>""" |
|
174 | """extract git-style metadata about patches from <patchname>""" | |
175 |
|
175 | |||
176 | # Filter patch for git information |
|
176 | # Filter patch for git information | |
177 | gp = None |
|
177 | gp = None | |
178 | gitpatches = [] |
|
178 | gitpatches = [] | |
179 | # Can have a git patch with only metadata, causing patch to complain |
|
179 | # Can have a git patch with only metadata, causing patch to complain | |
180 | dopatch = 0 |
|
180 | dopatch = 0 | |
181 |
|
181 | |||
182 | lineno = 0 |
|
182 | lineno = 0 | |
183 | for line in lr: |
|
183 | for line in lr: | |
184 | lineno += 1 |
|
184 | lineno += 1 | |
185 | if line.startswith('diff --git'): |
|
185 | if line.startswith('diff --git'): | |
186 | m = gitre.match(line) |
|
186 | m = gitre.match(line) | |
187 | if m: |
|
187 | if m: | |
188 | if gp: |
|
188 | if gp: | |
189 | gitpatches.append(gp) |
|
189 | gitpatches.append(gp) | |
190 | src, dst = m.group(1, 2) |
|
190 | src, dst = m.group(1, 2) | |
191 | gp = patchmeta(dst) |
|
191 | gp = patchmeta(dst) | |
192 | gp.lineno = lineno |
|
192 | gp.lineno = lineno | |
193 | elif gp: |
|
193 | elif gp: | |
194 | if line.startswith('--- '): |
|
194 | if line.startswith('--- '): | |
195 | if gp.op in ('COPY', 'RENAME'): |
|
195 | if gp.op in ('COPY', 'RENAME'): | |
196 | dopatch |= GP_FILTER |
|
196 | dopatch |= GP_FILTER | |
197 | gitpatches.append(gp) |
|
197 | gitpatches.append(gp) | |
198 | gp = None |
|
198 | gp = None | |
199 | dopatch |= GP_PATCH |
|
199 | dopatch |= GP_PATCH | |
200 | continue |
|
200 | continue | |
201 | if line.startswith('rename from '): |
|
201 | if line.startswith('rename from '): | |
202 | gp.op = 'RENAME' |
|
202 | gp.op = 'RENAME' | |
203 | gp.oldpath = line[12:].rstrip() |
|
203 | gp.oldpath = line[12:].rstrip() | |
204 | elif line.startswith('rename to '): |
|
204 | elif line.startswith('rename to '): | |
205 | gp.path = line[10:].rstrip() |
|
205 | gp.path = line[10:].rstrip() | |
206 | elif line.startswith('copy from '): |
|
206 | elif line.startswith('copy from '): | |
207 | gp.op = 'COPY' |
|
207 | gp.op = 'COPY' | |
208 | gp.oldpath = line[10:].rstrip() |
|
208 | gp.oldpath = line[10:].rstrip() | |
209 | elif line.startswith('copy to '): |
|
209 | elif line.startswith('copy to '): | |
210 | gp.path = line[8:].rstrip() |
|
210 | gp.path = line[8:].rstrip() | |
211 | elif line.startswith('deleted file'): |
|
211 | elif line.startswith('deleted file'): | |
212 | gp.op = 'DELETE' |
|
212 | gp.op = 'DELETE' | |
213 | # is the deleted file a symlink? |
|
213 | # is the deleted file a symlink? | |
214 | gp.setmode(int(line.rstrip()[-6:], 8)) |
|
214 | gp.setmode(int(line.rstrip()[-6:], 8)) | |
215 | elif line.startswith('new file mode '): |
|
215 | elif line.startswith('new file mode '): | |
216 | gp.op = 'ADD' |
|
216 | gp.op = 'ADD' | |
217 | gp.setmode(int(line.rstrip()[-6:], 8)) |
|
217 | gp.setmode(int(line.rstrip()[-6:], 8)) | |
218 | elif line.startswith('new mode '): |
|
218 | elif line.startswith('new mode '): | |
219 | gp.setmode(int(line.rstrip()[-6:], 8)) |
|
219 | gp.setmode(int(line.rstrip()[-6:], 8)) | |
220 | elif line.startswith('GIT binary patch'): |
|
220 | elif line.startswith('GIT binary patch'): | |
221 | dopatch |= GP_BINARY |
|
221 | dopatch |= GP_BINARY | |
222 | gp.binary = True |
|
222 | gp.binary = True | |
223 | if gp: |
|
223 | if gp: | |
224 | gitpatches.append(gp) |
|
224 | gitpatches.append(gp) | |
225 |
|
225 | |||
226 | if not gitpatches: |
|
226 | if not gitpatches: | |
227 | dopatch = GP_PATCH |
|
227 | dopatch = GP_PATCH | |
228 |
|
228 | |||
229 | return (dopatch, gitpatches) |
|
229 | return (dopatch, gitpatches) | |
230 |
|
230 | |||
231 | # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1 |
|
231 | # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1 | |
232 | unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@') |
|
232 | unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@') | |
233 | contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)') |
|
233 | contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)') | |
234 |
|
234 | |||
235 | class patchfile: |
|
235 | class patchfile: | |
236 | def __init__(self, ui, fname, opener, missing=False): |
|
236 | def __init__(self, ui, fname, opener, missing=False): | |
237 | self.fname = fname |
|
237 | self.fname = fname | |
238 | self.opener = opener |
|
238 | self.opener = opener | |
239 | self.ui = ui |
|
239 | self.ui = ui | |
240 | self.lines = [] |
|
240 | self.lines = [] | |
241 | self.exists = False |
|
241 | self.exists = False | |
242 | self.missing = missing |
|
242 | self.missing = missing | |
243 | if not missing: |
|
243 | if not missing: | |
244 | try: |
|
244 | try: | |
245 | self.lines = self.readlines(fname) |
|
245 | self.lines = self.readlines(fname) | |
246 | self.exists = True |
|
246 | self.exists = True | |
247 | except IOError: |
|
247 | except IOError: | |
248 | pass |
|
248 | pass | |
249 | else: |
|
249 | else: | |
250 | self.ui.warn(_("unable to find '%s' for patching\n") % self.fname) |
|
250 | self.ui.warn(_("unable to find '%s' for patching\n") % self.fname) | |
251 |
|
251 | |||
252 | self.hash = {} |
|
252 | self.hash = {} | |
253 | self.dirty = 0 |
|
253 | self.dirty = 0 | |
254 | self.offset = 0 |
|
254 | self.offset = 0 | |
255 | self.rej = [] |
|
255 | self.rej = [] | |
256 | self.fileprinted = False |
|
256 | self.fileprinted = False | |
257 | self.printfile(False) |
|
257 | self.printfile(False) | |
258 | self.hunks = 0 |
|
258 | self.hunks = 0 | |
259 |
|
259 | |||
260 | def readlines(self, fname): |
|
260 | def readlines(self, fname): | |
261 | fp = self.opener(fname, 'r') |
|
261 | fp = self.opener(fname, 'r') | |
262 | try: |
|
262 | try: | |
263 | return fp.readlines() |
|
263 | return fp.readlines() | |
264 | finally: |
|
264 | finally: | |
265 | fp.close() |
|
265 | fp.close() | |
266 |
|
266 | |||
267 | def writelines(self, fname, lines): |
|
267 | def writelines(self, fname, lines): | |
268 | fp = self.opener(fname, 'w') |
|
268 | fp = self.opener(fname, 'w') | |
269 | try: |
|
269 | try: | |
270 | fp.writelines(lines) |
|
270 | fp.writelines(lines) | |
271 | finally: |
|
271 | finally: | |
272 | fp.close() |
|
272 | fp.close() | |
273 |
|
273 | |||
274 | def unlink(self, fname): |
|
274 | def unlink(self, fname): | |
275 | os.unlink(fname) |
|
275 | os.unlink(fname) | |
276 |
|
276 | |||
277 | def printfile(self, warn): |
|
277 | def printfile(self, warn): | |
278 | if self.fileprinted: |
|
278 | if self.fileprinted: | |
279 | return |
|
279 | return | |
280 | if warn or self.ui.verbose: |
|
280 | if warn or self.ui.verbose: | |
281 | self.fileprinted = True |
|
281 | self.fileprinted = True | |
282 | s = _("patching file %s\n") % self.fname |
|
282 | s = _("patching file %s\n") % self.fname | |
283 | if warn: |
|
283 | if warn: | |
284 | self.ui.warn(s) |
|
284 | self.ui.warn(s) | |
285 | else: |
|
285 | else: | |
286 | self.ui.note(s) |
|
286 | self.ui.note(s) | |
287 |
|
287 | |||
288 |
|
288 | |||
289 | def findlines(self, l, linenum): |
|
289 | def findlines(self, l, linenum): | |
290 | # looks through the hash and finds candidate lines. The |
|
290 | # looks through the hash and finds candidate lines. The | |
291 | # result is a list of line numbers sorted based on distance |
|
291 | # result is a list of line numbers sorted based on distance | |
292 | # from linenum |
|
292 | # from linenum | |
293 | def sorter(a, b): |
|
293 | def sorter(a, b): | |
294 | vala = abs(a - linenum) |
|
294 | vala = abs(a - linenum) | |
295 | valb = abs(b - linenum) |
|
295 | valb = abs(b - linenum) | |
296 | return cmp(vala, valb) |
|
296 | return cmp(vala, valb) | |
297 |
|
297 | |||
298 | try: |
|
298 | try: | |
299 | cand = self.hash[l] |
|
299 | cand = self.hash[l] | |
300 | except: |
|
300 | except: | |
301 | return [] |
|
301 | return [] | |
302 |
|
302 | |||
303 | if len(cand) > 1: |
|
303 | if len(cand) > 1: | |
304 | # resort our list of potentials forward then back. |
|
304 | # resort our list of potentials forward then back. | |
305 | cand.sort(sorter) |
|
305 | cand.sort(sorter) | |
306 | return cand |
|
306 | return cand | |
307 |
|
307 | |||
308 | def hashlines(self): |
|
308 | def hashlines(self): | |
309 | self.hash = {} |
|
309 | self.hash = {} | |
310 | for x in xrange(len(self.lines)): |
|
310 | for x in xrange(len(self.lines)): | |
311 | s = self.lines[x] |
|
311 | s = self.lines[x] | |
312 | self.hash.setdefault(s, []).append(x) |
|
312 | self.hash.setdefault(s, []).append(x) | |
313 |
|
313 | |||
314 | def write_rej(self): |
|
314 | def write_rej(self): | |
315 | # our rejects are a little different from patch(1). This always |
|
315 | # our rejects are a little different from patch(1). This always | |
316 | # creates rejects in the same form as the original patch. A file |
|
316 | # creates rejects in the same form as the original patch. A file | |
317 | # header is inserted so that you can run the reject through patch again |
|
317 | # header is inserted so that you can run the reject through patch again | |
318 | # without having to type the filename. |
|
318 | # without having to type the filename. | |
319 |
|
319 | |||
320 | if not self.rej: |
|
320 | if not self.rej: | |
321 | return |
|
321 | return | |
322 |
|
322 | |||
323 | fname = self.fname + ".rej" |
|
323 | fname = self.fname + ".rej" | |
324 | self.ui.warn( |
|
324 | self.ui.warn( | |
325 | _("%d out of %d hunks FAILED -- saving rejects to file %s\n") % |
|
325 | _("%d out of %d hunks FAILED -- saving rejects to file %s\n") % | |
326 | (len(self.rej), self.hunks, fname)) |
|
326 | (len(self.rej), self.hunks, fname)) | |
327 |
|
327 | |||
328 | def rejlines(): |
|
328 | def rejlines(): | |
329 | base = os.path.basename(self.fname) |
|
329 | base = os.path.basename(self.fname) | |
330 | yield "--- %s\n+++ %s\n" % (base, base) |
|
330 | yield "--- %s\n+++ %s\n" % (base, base) | |
331 | for x in self.rej: |
|
331 | for x in self.rej: | |
332 | for l in x.hunk: |
|
332 | for l in x.hunk: | |
333 | yield l |
|
333 | yield l | |
334 | if l[-1] != '\n': |
|
334 | if l[-1] != '\n': | |
335 | yield "\n\ No newline at end of file\n" |
|
335 | yield "\n\ No newline at end of file\n" | |
336 |
|
336 | |||
337 | self.writelines(fname, rejlines()) |
|
337 | self.writelines(fname, rejlines()) | |
338 |
|
338 | |||
339 | def write(self, dest=None): |
|
339 | def write(self, dest=None): | |
340 | if not self.dirty: |
|
340 | if not self.dirty: | |
341 | return |
|
341 | return | |
342 | if not dest: |
|
342 | if not dest: | |
343 | dest = self.fname |
|
343 | dest = self.fname | |
344 | self.writelines(dest, self.lines) |
|
344 | self.writelines(dest, self.lines) | |
345 |
|
345 | |||
346 | def close(self): |
|
346 | def close(self): | |
347 | self.write() |
|
347 | self.write() | |
348 | self.write_rej() |
|
348 | self.write_rej() | |
349 |
|
349 | |||
350 | def apply(self, h, reverse): |
|
350 | def apply(self, h, reverse): | |
351 | if not h.complete(): |
|
351 | if not h.complete(): | |
352 | raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") % |
|
352 | raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") % | |
353 | (h.number, h.desc, len(h.a), h.lena, len(h.b), |
|
353 | (h.number, h.desc, len(h.a), h.lena, len(h.b), | |
354 | h.lenb)) |
|
354 | h.lenb)) | |
355 |
|
355 | |||
356 | self.hunks += 1 |
|
356 | self.hunks += 1 | |
357 | if reverse: |
|
357 | if reverse: | |
358 | h.reverse() |
|
358 | h.reverse() | |
359 |
|
359 | |||
360 | if self.missing: |
|
360 | if self.missing: | |
361 | self.rej.append(h) |
|
361 | self.rej.append(h) | |
362 | return -1 |
|
362 | return -1 | |
363 |
|
363 | |||
364 | if self.exists and h.createfile(): |
|
364 | if self.exists and h.createfile(): | |
365 | self.ui.warn(_("file %s already exists\n") % self.fname) |
|
365 | self.ui.warn(_("file %s already exists\n") % self.fname) | |
366 | self.rej.append(h) |
|
366 | self.rej.append(h) | |
367 | return -1 |
|
367 | return -1 | |
368 |
|
368 | |||
369 | if isinstance(h, githunk): |
|
369 | if isinstance(h, githunk): | |
370 | if h.rmfile(): |
|
370 | if h.rmfile(): | |
371 | self.unlink(self.fname) |
|
371 | self.unlink(self.fname) | |
372 | else: |
|
372 | else: | |
373 | self.lines[:] = h.new() |
|
373 | self.lines[:] = h.new() | |
374 | self.offset += len(h.new()) |
|
374 | self.offset += len(h.new()) | |
375 | self.dirty = 1 |
|
375 | self.dirty = 1 | |
376 | return 0 |
|
376 | return 0 | |
377 |
|
377 | |||
378 | # fast case first, no offsets, no fuzz |
|
378 | # fast case first, no offsets, no fuzz | |
379 | old = h.old() |
|
379 | old = h.old() | |
380 | # patch starts counting at 1 unless we are adding the file |
|
380 | # patch starts counting at 1 unless we are adding the file | |
381 | if h.starta == 0: |
|
381 | if h.starta == 0: | |
382 | start = 0 |
|
382 | start = 0 | |
383 | else: |
|
383 | else: | |
384 | start = h.starta + self.offset - 1 |
|
384 | start = h.starta + self.offset - 1 | |
385 | orig_start = start |
|
385 | orig_start = start | |
386 | if diffhelpers.testhunk(old, self.lines, start) == 0: |
|
386 | if diffhelpers.testhunk(old, self.lines, start) == 0: | |
387 | if h.rmfile(): |
|
387 | if h.rmfile(): | |
388 | self.unlink(self.fname) |
|
388 | self.unlink(self.fname) | |
389 | else: |
|
389 | else: | |
390 | self.lines[start : start + h.lena] = h.new() |
|
390 | self.lines[start : start + h.lena] = h.new() | |
391 | self.offset += h.lenb - h.lena |
|
391 | self.offset += h.lenb - h.lena | |
392 | self.dirty = 1 |
|
392 | self.dirty = 1 | |
393 | return 0 |
|
393 | return 0 | |
394 |
|
394 | |||
395 | # ok, we couldn't match the hunk. Lets look for offsets and fuzz it |
|
395 | # ok, we couldn't match the hunk. Lets look for offsets and fuzz it | |
396 | self.hashlines() |
|
396 | self.hashlines() | |
397 | if h.hunk[-1][0] != ' ': |
|
397 | if h.hunk[-1][0] != ' ': | |
398 | # if the hunk tried to put something at the bottom of the file |
|
398 | # if the hunk tried to put something at the bottom of the file | |
399 | # override the start line and use eof here |
|
399 | # override the start line and use eof here | |
400 | search_start = len(self.lines) |
|
400 | search_start = len(self.lines) | |
401 | else: |
|
401 | else: | |
402 | search_start = orig_start |
|
402 | search_start = orig_start | |
403 |
|
403 | |||
404 | for fuzzlen in xrange(3): |
|
404 | for fuzzlen in xrange(3): | |
405 | for toponly in [ True, False ]: |
|
405 | for toponly in [ True, False ]: | |
406 | old = h.old(fuzzlen, toponly) |
|
406 | old = h.old(fuzzlen, toponly) | |
407 |
|
407 | |||
408 | cand = self.findlines(old[0][1:], search_start) |
|
408 | cand = self.findlines(old[0][1:], search_start) | |
409 | for l in cand: |
|
409 | for l in cand: | |
410 | if diffhelpers.testhunk(old, self.lines, l) == 0: |
|
410 | if diffhelpers.testhunk(old, self.lines, l) == 0: | |
411 | newlines = h.new(fuzzlen, toponly) |
|
411 | newlines = h.new(fuzzlen, toponly) | |
412 | self.lines[l : l + len(old)] = newlines |
|
412 | self.lines[l : l + len(old)] = newlines | |
413 | self.offset += len(newlines) - len(old) |
|
413 | self.offset += len(newlines) - len(old) | |
414 | self.dirty = 1 |
|
414 | self.dirty = 1 | |
415 | if fuzzlen: |
|
415 | if fuzzlen: | |
416 | fuzzstr = "with fuzz %d " % fuzzlen |
|
416 | fuzzstr = "with fuzz %d " % fuzzlen | |
417 | f = self.ui.warn |
|
417 | f = self.ui.warn | |
418 | self.printfile(True) |
|
418 | self.printfile(True) | |
419 | else: |
|
419 | else: | |
420 | fuzzstr = "" |
|
420 | fuzzstr = "" | |
421 | f = self.ui.note |
|
421 | f = self.ui.note | |
422 | offset = l - orig_start - fuzzlen |
|
422 | offset = l - orig_start - fuzzlen | |
423 | if offset == 1: |
|
423 | if offset == 1: | |
424 | linestr = "line" |
|
424 | linestr = "line" | |
425 | else: |
|
425 | else: | |
426 | linestr = "lines" |
|
426 | linestr = "lines" | |
427 | f(_("Hunk #%d succeeded at %d %s(offset %d %s).\n") % |
|
427 | f(_("Hunk #%d succeeded at %d %s(offset %d %s).\n") % | |
428 | (h.number, l+1, fuzzstr, offset, linestr)) |
|
428 | (h.number, l+1, fuzzstr, offset, linestr)) | |
429 | return fuzzlen |
|
429 | return fuzzlen | |
430 | self.printfile(True) |
|
430 | self.printfile(True) | |
431 | self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start)) |
|
431 | self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start)) | |
432 | self.rej.append(h) |
|
432 | self.rej.append(h) | |
433 | return -1 |
|
433 | return -1 | |
434 |
|
434 | |||
435 | class hunk: |
|
435 | class hunk: | |
436 | def __init__(self, desc, num, lr, context, create=False, remove=False): |
|
436 | def __init__(self, desc, num, lr, context, create=False, remove=False): | |
437 | self.number = num |
|
437 | self.number = num | |
438 | self.desc = desc |
|
438 | self.desc = desc | |
439 | self.hunk = [ desc ] |
|
439 | self.hunk = [ desc ] | |
440 | self.a = [] |
|
440 | self.a = [] | |
441 | self.b = [] |
|
441 | self.b = [] | |
442 | if context: |
|
442 | if context: | |
443 | self.read_context_hunk(lr) |
|
443 | self.read_context_hunk(lr) | |
444 | else: |
|
444 | else: | |
445 | self.read_unified_hunk(lr) |
|
445 | self.read_unified_hunk(lr) | |
446 | self.create = create |
|
446 | self.create = create | |
447 | self.remove = remove and not create |
|
447 | self.remove = remove and not create | |
448 |
|
448 | |||
449 | def read_unified_hunk(self, lr): |
|
449 | def read_unified_hunk(self, lr): | |
450 | m = unidesc.match(self.desc) |
|
450 | m = unidesc.match(self.desc) | |
451 | if not m: |
|
451 | if not m: | |
452 | raise PatchError(_("bad hunk #%d") % self.number) |
|
452 | raise PatchError(_("bad hunk #%d") % self.number) | |
453 | self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups() |
|
453 | self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups() | |
454 | if self.lena == None: |
|
454 | if self.lena == None: | |
455 | self.lena = 1 |
|
455 | self.lena = 1 | |
456 | else: |
|
456 | else: | |
457 | self.lena = int(self.lena) |
|
457 | self.lena = int(self.lena) | |
458 | if self.lenb == None: |
|
458 | if self.lenb == None: | |
459 | self.lenb = 1 |
|
459 | self.lenb = 1 | |
460 | else: |
|
460 | else: | |
461 | self.lenb = int(self.lenb) |
|
461 | self.lenb = int(self.lenb) | |
462 | self.starta = int(self.starta) |
|
462 | self.starta = int(self.starta) | |
463 | self.startb = int(self.startb) |
|
463 | self.startb = int(self.startb) | |
464 | diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b) |
|
464 | diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b) | |
465 | # if we hit eof before finishing out the hunk, the last line will |
|
465 | # if we hit eof before finishing out the hunk, the last line will | |
466 | # be zero length. Lets try to fix it up. |
|
466 | # be zero length. Lets try to fix it up. | |
467 | while len(self.hunk[-1]) == 0: |
|
467 | while len(self.hunk[-1]) == 0: | |
468 | del self.hunk[-1] |
|
468 | del self.hunk[-1] | |
469 | del self.a[-1] |
|
469 | del self.a[-1] | |
470 | del self.b[-1] |
|
470 | del self.b[-1] | |
471 | self.lena -= 1 |
|
471 | self.lena -= 1 | |
472 | self.lenb -= 1 |
|
472 | self.lenb -= 1 | |
473 |
|
473 | |||
474 | def read_context_hunk(self, lr): |
|
474 | def read_context_hunk(self, lr): | |
475 | self.desc = lr.readline() |
|
475 | self.desc = lr.readline() | |
476 | m = contextdesc.match(self.desc) |
|
476 | m = contextdesc.match(self.desc) | |
477 | if not m: |
|
477 | if not m: | |
478 | raise PatchError(_("bad hunk #%d") % self.number) |
|
478 | raise PatchError(_("bad hunk #%d") % self.number) | |
479 | foo, self.starta, foo2, aend, foo3 = m.groups() |
|
479 | foo, self.starta, foo2, aend, foo3 = m.groups() | |
480 | self.starta = int(self.starta) |
|
480 | self.starta = int(self.starta) | |
481 | if aend == None: |
|
481 | if aend == None: | |
482 | aend = self.starta |
|
482 | aend = self.starta | |
483 | self.lena = int(aend) - self.starta |
|
483 | self.lena = int(aend) - self.starta | |
484 | if self.starta: |
|
484 | if self.starta: | |
485 | self.lena += 1 |
|
485 | self.lena += 1 | |
486 | for x in xrange(self.lena): |
|
486 | for x in xrange(self.lena): | |
487 | l = lr.readline() |
|
487 | l = lr.readline() | |
488 | if l.startswith('---'): |
|
488 | if l.startswith('---'): | |
489 | lr.push(l) |
|
489 | lr.push(l) | |
490 | break |
|
490 | break | |
491 | s = l[2:] |
|
491 | s = l[2:] | |
492 | if l.startswith('- ') or l.startswith('! '): |
|
492 | if l.startswith('- ') or l.startswith('! '): | |
493 | u = '-' + s |
|
493 | u = '-' + s | |
494 | elif l.startswith(' '): |
|
494 | elif l.startswith(' '): | |
495 | u = ' ' + s |
|
495 | u = ' ' + s | |
496 | else: |
|
496 | else: | |
497 | raise PatchError(_("bad hunk #%d old text line %d") % |
|
497 | raise PatchError(_("bad hunk #%d old text line %d") % | |
498 | (self.number, x)) |
|
498 | (self.number, x)) | |
499 | self.a.append(u) |
|
499 | self.a.append(u) | |
500 | self.hunk.append(u) |
|
500 | self.hunk.append(u) | |
501 |
|
501 | |||
502 | l = lr.readline() |
|
502 | l = lr.readline() | |
503 | if l.startswith('\ '): |
|
503 | if l.startswith('\ '): | |
504 | s = self.a[-1][:-1] |
|
504 | s = self.a[-1][:-1] | |
505 | self.a[-1] = s |
|
505 | self.a[-1] = s | |
506 | self.hunk[-1] = s |
|
506 | self.hunk[-1] = s | |
507 | l = lr.readline() |
|
507 | l = lr.readline() | |
508 | m = contextdesc.match(l) |
|
508 | m = contextdesc.match(l) | |
509 | if not m: |
|
509 | if not m: | |
510 | raise PatchError(_("bad hunk #%d") % self.number) |
|
510 | raise PatchError(_("bad hunk #%d") % self.number) | |
511 | foo, self.startb, foo2, bend, foo3 = m.groups() |
|
511 | foo, self.startb, foo2, bend, foo3 = m.groups() | |
512 | self.startb = int(self.startb) |
|
512 | self.startb = int(self.startb) | |
513 | if bend == None: |
|
513 | if bend == None: | |
514 | bend = self.startb |
|
514 | bend = self.startb | |
515 | self.lenb = int(bend) - self.startb |
|
515 | self.lenb = int(bend) - self.startb | |
516 | if self.startb: |
|
516 | if self.startb: | |
517 | self.lenb += 1 |
|
517 | self.lenb += 1 | |
518 | hunki = 1 |
|
518 | hunki = 1 | |
519 | for x in xrange(self.lenb): |
|
519 | for x in xrange(self.lenb): | |
520 | l = lr.readline() |
|
520 | l = lr.readline() | |
521 | if l.startswith('\ '): |
|
521 | if l.startswith('\ '): | |
522 | s = self.b[-1][:-1] |
|
522 | s = self.b[-1][:-1] | |
523 | self.b[-1] = s |
|
523 | self.b[-1] = s | |
524 | self.hunk[hunki-1] = s |
|
524 | self.hunk[hunki-1] = s | |
525 | continue |
|
525 | continue | |
526 | if not l: |
|
526 | if not l: | |
527 | lr.push(l) |
|
527 | lr.push(l) | |
528 | break |
|
528 | break | |
529 | s = l[2:] |
|
529 | s = l[2:] | |
530 | if l.startswith('+ ') or l.startswith('! '): |
|
530 | if l.startswith('+ ') or l.startswith('! '): | |
531 | u = '+' + s |
|
531 | u = '+' + s | |
532 | elif l.startswith(' '): |
|
532 | elif l.startswith(' '): | |
533 | u = ' ' + s |
|
533 | u = ' ' + s | |
534 | elif len(self.b) == 0: |
|
534 | elif len(self.b) == 0: | |
535 | # this can happen when the hunk does not add any lines |
|
535 | # this can happen when the hunk does not add any lines | |
536 | lr.push(l) |
|
536 | lr.push(l) | |
537 | break |
|
537 | break | |
538 | else: |
|
538 | else: | |
539 | raise PatchError(_("bad hunk #%d old text line %d") % |
|
539 | raise PatchError(_("bad hunk #%d old text line %d") % | |
540 | (self.number, x)) |
|
540 | (self.number, x)) | |
541 | self.b.append(s) |
|
541 | self.b.append(s) | |
542 | while True: |
|
542 | while True: | |
543 | if hunki >= len(self.hunk): |
|
543 | if hunki >= len(self.hunk): | |
544 | h = "" |
|
544 | h = "" | |
545 | else: |
|
545 | else: | |
546 | h = self.hunk[hunki] |
|
546 | h = self.hunk[hunki] | |
547 | hunki += 1 |
|
547 | hunki += 1 | |
548 | if h == u: |
|
548 | if h == u: | |
549 | break |
|
549 | break | |
550 | elif h.startswith('-'): |
|
550 | elif h.startswith('-'): | |
551 | continue |
|
551 | continue | |
552 | else: |
|
552 | else: | |
553 | self.hunk.insert(hunki-1, u) |
|
553 | self.hunk.insert(hunki-1, u) | |
554 | break |
|
554 | break | |
555 |
|
555 | |||
556 | if not self.a: |
|
556 | if not self.a: | |
557 | # this happens when lines were only added to the hunk |
|
557 | # this happens when lines were only added to the hunk | |
558 | for x in self.hunk: |
|
558 | for x in self.hunk: | |
559 | if x.startswith('-') or x.startswith(' '): |
|
559 | if x.startswith('-') or x.startswith(' '): | |
560 | self.a.append(x) |
|
560 | self.a.append(x) | |
561 | if not self.b: |
|
561 | if not self.b: | |
562 | # this happens when lines were only deleted from the hunk |
|
562 | # this happens when lines were only deleted from the hunk | |
563 | for x in self.hunk: |
|
563 | for x in self.hunk: | |
564 | if x.startswith('+') or x.startswith(' '): |
|
564 | if x.startswith('+') or x.startswith(' '): | |
565 | self.b.append(x[1:]) |
|
565 | self.b.append(x[1:]) | |
566 | # @@ -start,len +start,len @@ |
|
566 | # @@ -start,len +start,len @@ | |
567 | self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena, |
|
567 | self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena, | |
568 | self.startb, self.lenb) |
|
568 | self.startb, self.lenb) | |
569 | self.hunk[0] = self.desc |
|
569 | self.hunk[0] = self.desc | |
570 |
|
570 | |||
571 | def reverse(self): |
|
571 | def reverse(self): | |
572 | self.create, self.remove = self.remove, self.create |
|
572 | self.create, self.remove = self.remove, self.create | |
573 | origlena = self.lena |
|
573 | origlena = self.lena | |
574 | origstarta = self.starta |
|
574 | origstarta = self.starta | |
575 | self.lena = self.lenb |
|
575 | self.lena = self.lenb | |
576 | self.starta = self.startb |
|
576 | self.starta = self.startb | |
577 | self.lenb = origlena |
|
577 | self.lenb = origlena | |
578 | self.startb = origstarta |
|
578 | self.startb = origstarta | |
579 | self.a = [] |
|
579 | self.a = [] | |
580 | self.b = [] |
|
580 | self.b = [] | |
581 | # self.hunk[0] is the @@ description |
|
581 | # self.hunk[0] is the @@ description | |
582 | for x in xrange(1, len(self.hunk)): |
|
582 | for x in xrange(1, len(self.hunk)): | |
583 | o = self.hunk[x] |
|
583 | o = self.hunk[x] | |
584 | if o.startswith('-'): |
|
584 | if o.startswith('-'): | |
585 | n = '+' + o[1:] |
|
585 | n = '+' + o[1:] | |
586 | self.b.append(o[1:]) |
|
586 | self.b.append(o[1:]) | |
587 | elif o.startswith('+'): |
|
587 | elif o.startswith('+'): | |
588 | n = '-' + o[1:] |
|
588 | n = '-' + o[1:] | |
589 | self.a.append(n) |
|
589 | self.a.append(n) | |
590 | else: |
|
590 | else: | |
591 | n = o |
|
591 | n = o | |
592 | self.b.append(o[1:]) |
|
592 | self.b.append(o[1:]) | |
593 | self.a.append(o) |
|
593 | self.a.append(o) | |
594 | self.hunk[x] = o |
|
594 | self.hunk[x] = o | |
595 |
|
595 | |||
596 | def fix_newline(self): |
|
596 | def fix_newline(self): | |
597 | diffhelpers.fix_newline(self.hunk, self.a, self.b) |
|
597 | diffhelpers.fix_newline(self.hunk, self.a, self.b) | |
598 |
|
598 | |||
599 | def complete(self): |
|
599 | def complete(self): | |
600 | return len(self.a) == self.lena and len(self.b) == self.lenb |
|
600 | return len(self.a) == self.lena and len(self.b) == self.lenb | |
601 |
|
601 | |||
602 | def createfile(self): |
|
602 | def createfile(self): | |
603 | return self.starta == 0 and self.lena == 0 and self.create |
|
603 | return self.starta == 0 and self.lena == 0 and self.create | |
604 |
|
604 | |||
605 | def rmfile(self): |
|
605 | def rmfile(self): | |
606 | return self.startb == 0 and self.lenb == 0 and self.remove |
|
606 | return self.startb == 0 and self.lenb == 0 and self.remove | |
607 |
|
607 | |||
608 | def fuzzit(self, l, fuzz, toponly): |
|
608 | def fuzzit(self, l, fuzz, toponly): | |
609 | # this removes context lines from the top and bottom of list 'l'. It |
|
609 | # this removes context lines from the top and bottom of list 'l'. It | |
610 | # checks the hunk to make sure only context lines are removed, and then |
|
610 | # checks the hunk to make sure only context lines are removed, and then | |
611 | # returns a new shortened list of lines. |
|
611 | # returns a new shortened list of lines. | |
612 | fuzz = min(fuzz, len(l)-1) |
|
612 | fuzz = min(fuzz, len(l)-1) | |
613 | if fuzz: |
|
613 | if fuzz: | |
614 | top = 0 |
|
614 | top = 0 | |
615 | bot = 0 |
|
615 | bot = 0 | |
616 | hlen = len(self.hunk) |
|
616 | hlen = len(self.hunk) | |
617 | for x in xrange(hlen-1): |
|
617 | for x in xrange(hlen-1): | |
618 | # the hunk starts with the @@ line, so use x+1 |
|
618 | # the hunk starts with the @@ line, so use x+1 | |
619 | if self.hunk[x+1][0] == ' ': |
|
619 | if self.hunk[x+1][0] == ' ': | |
620 | top += 1 |
|
620 | top += 1 | |
621 | else: |
|
621 | else: | |
622 | break |
|
622 | break | |
623 | if not toponly: |
|
623 | if not toponly: | |
624 | for x in xrange(hlen-1): |
|
624 | for x in xrange(hlen-1): | |
625 | if self.hunk[hlen-bot-1][0] == ' ': |
|
625 | if self.hunk[hlen-bot-1][0] == ' ': | |
626 | bot += 1 |
|
626 | bot += 1 | |
627 | else: |
|
627 | else: | |
628 | break |
|
628 | break | |
629 |
|
629 | |||
630 | # top and bot now count context in the hunk |
|
630 | # top and bot now count context in the hunk | |
631 | # adjust them if either one is short |
|
631 | # adjust them if either one is short | |
632 | context = max(top, bot, 3) |
|
632 | context = max(top, bot, 3) | |
633 | if bot < context: |
|
633 | if bot < context: | |
634 | bot = max(0, fuzz - (context - bot)) |
|
634 | bot = max(0, fuzz - (context - bot)) | |
635 | else: |
|
635 | else: | |
636 | bot = min(fuzz, bot) |
|
636 | bot = min(fuzz, bot) | |
637 | if top < context: |
|
637 | if top < context: | |
638 | top = max(0, fuzz - (context - top)) |
|
638 | top = max(0, fuzz - (context - top)) | |
639 | else: |
|
639 | else: | |
640 | top = min(fuzz, top) |
|
640 | top = min(fuzz, top) | |
641 |
|
641 | |||
642 | return l[top:len(l)-bot] |
|
642 | return l[top:len(l)-bot] | |
643 | return l |
|
643 | return l | |
644 |
|
644 | |||
645 | def old(self, fuzz=0, toponly=False): |
|
645 | def old(self, fuzz=0, toponly=False): | |
646 | return self.fuzzit(self.a, fuzz, toponly) |
|
646 | return self.fuzzit(self.a, fuzz, toponly) | |
647 |
|
647 | |||
648 | def newctrl(self): |
|
648 | def newctrl(self): | |
649 | res = [] |
|
649 | res = [] | |
650 | for x in self.hunk: |
|
650 | for x in self.hunk: | |
651 | c = x[0] |
|
651 | c = x[0] | |
652 | if c == ' ' or c == '+': |
|
652 | if c == ' ' or c == '+': | |
653 | res.append(x) |
|
653 | res.append(x) | |
654 | return res |
|
654 | return res | |
655 |
|
655 | |||
656 | def new(self, fuzz=0, toponly=False): |
|
656 | def new(self, fuzz=0, toponly=False): | |
657 | return self.fuzzit(self.b, fuzz, toponly) |
|
657 | return self.fuzzit(self.b, fuzz, toponly) | |
658 |
|
658 | |||
659 | class githunk(object): |
|
659 | class githunk(object): | |
660 | """A git hunk""" |
|
660 | """A git hunk""" | |
661 | def __init__(self, gitpatch): |
|
661 | def __init__(self, gitpatch): | |
662 | self.gitpatch = gitpatch |
|
662 | self.gitpatch = gitpatch | |
663 | self.text = None |
|
663 | self.text = None | |
664 | self.hunk = [] |
|
664 | self.hunk = [] | |
665 |
|
665 | |||
666 | def createfile(self): |
|
666 | def createfile(self): | |
667 | return self.gitpatch.op in ('ADD', 'RENAME', 'COPY') |
|
667 | return self.gitpatch.op in ('ADD', 'RENAME', 'COPY') | |
668 |
|
668 | |||
669 | def rmfile(self): |
|
669 | def rmfile(self): | |
670 | return self.gitpatch.op == 'DELETE' |
|
670 | return self.gitpatch.op == 'DELETE' | |
671 |
|
671 | |||
672 | def complete(self): |
|
672 | def complete(self): | |
673 | return self.text is not None |
|
673 | return self.text is not None | |
674 |
|
674 | |||
675 | def new(self): |
|
675 | def new(self): | |
676 | return [self.text] |
|
676 | return [self.text] | |
677 |
|
677 | |||
678 | class binhunk(githunk): |
|
678 | class binhunk(githunk): | |
679 | 'A binary patch file. Only understands literals so far.' |
|
679 | 'A binary patch file. Only understands literals so far.' | |
680 | def __init__(self, gitpatch): |
|
680 | def __init__(self, gitpatch): | |
681 | super(binhunk, self).__init__(gitpatch) |
|
681 | super(binhunk, self).__init__(gitpatch) | |
682 | self.hunk = ['GIT binary patch\n'] |
|
682 | self.hunk = ['GIT binary patch\n'] | |
683 |
|
683 | |||
684 | def extract(self, lr): |
|
684 | def extract(self, lr): | |
685 | line = lr.readline() |
|
685 | line = lr.readline() | |
686 | self.hunk.append(line) |
|
686 | self.hunk.append(line) | |
687 | while line and not line.startswith('literal '): |
|
687 | while line and not line.startswith('literal '): | |
688 | line = lr.readline() |
|
688 | line = lr.readline() | |
689 | self.hunk.append(line) |
|
689 | self.hunk.append(line) | |
690 | if not line: |
|
690 | if not line: | |
691 | raise PatchError(_('could not extract binary patch')) |
|
691 | raise PatchError(_('could not extract binary patch')) | |
692 | size = int(line[8:].rstrip()) |
|
692 | size = int(line[8:].rstrip()) | |
693 | dec = [] |
|
693 | dec = [] | |
694 | line = lr.readline() |
|
694 | line = lr.readline() | |
695 | self.hunk.append(line) |
|
695 | self.hunk.append(line) | |
696 | while len(line) > 1: |
|
696 | while len(line) > 1: | |
697 | l = line[0] |
|
697 | l = line[0] | |
698 | if l <= 'Z' and l >= 'A': |
|
698 | if l <= 'Z' and l >= 'A': | |
699 | l = ord(l) - ord('A') + 1 |
|
699 | l = ord(l) - ord('A') + 1 | |
700 | else: |
|
700 | else: | |
701 | l = ord(l) - ord('a') + 27 |
|
701 | l = ord(l) - ord('a') + 27 | |
702 | dec.append(base85.b85decode(line[1:-1])[:l]) |
|
702 | dec.append(base85.b85decode(line[1:-1])[:l]) | |
703 | line = lr.readline() |
|
703 | line = lr.readline() | |
704 | self.hunk.append(line) |
|
704 | self.hunk.append(line) | |
705 | text = zlib.decompress(''.join(dec)) |
|
705 | text = zlib.decompress(''.join(dec)) | |
706 | if len(text) != size: |
|
706 | if len(text) != size: | |
707 | raise PatchError(_('binary patch is %d bytes, not %d') % |
|
707 | raise PatchError(_('binary patch is %d bytes, not %d') % | |
708 | len(text), size) |
|
708 | len(text), size) | |
709 | self.text = text |
|
709 | self.text = text | |
710 |
|
710 | |||
711 | class symlinkhunk(githunk): |
|
711 | class symlinkhunk(githunk): | |
712 | """A git symlink hunk""" |
|
712 | """A git symlink hunk""" | |
713 | def __init__(self, gitpatch, hunk): |
|
713 | def __init__(self, gitpatch, hunk): | |
714 | super(symlinkhunk, self).__init__(gitpatch) |
|
714 | super(symlinkhunk, self).__init__(gitpatch) | |
715 | self.hunk = hunk |
|
715 | self.hunk = hunk | |
716 |
|
716 | |||
717 | def complete(self): |
|
717 | def complete(self): | |
718 | return True |
|
718 | return True | |
719 |
|
719 | |||
720 | def fix_newline(self): |
|
720 | def fix_newline(self): | |
721 | return |
|
721 | return | |
722 |
|
722 | |||
723 | def parsefilename(str): |
|
723 | def parsefilename(str): | |
724 | # --- filename \t|space stuff |
|
724 | # --- filename \t|space stuff | |
725 | s = str[4:].rstrip('\r\n') |
|
725 | s = str[4:].rstrip('\r\n') | |
726 | i = s.find('\t') |
|
726 | i = s.find('\t') | |
727 | if i < 0: |
|
727 | if i < 0: | |
728 | i = s.find(' ') |
|
728 | i = s.find(' ') | |
729 | if i < 0: |
|
729 | if i < 0: | |
730 | return s |
|
730 | return s | |
731 | return s[:i] |
|
731 | return s[:i] | |
732 |
|
732 | |||
733 | def selectfile(afile_orig, bfile_orig, hunk, strip, reverse): |
|
733 | def selectfile(afile_orig, bfile_orig, hunk, strip, reverse): | |
734 | def pathstrip(path, count=1): |
|
734 | def pathstrip(path, count=1): | |
735 | pathlen = len(path) |
|
735 | pathlen = len(path) | |
736 | i = 0 |
|
736 | i = 0 | |
737 | if count == 0: |
|
737 | if count == 0: | |
738 | return '', path.rstrip() |
|
738 | return '', path.rstrip() | |
739 | while count > 0: |
|
739 | while count > 0: | |
740 | i = path.find('/', i) |
|
740 | i = path.find('/', i) | |
741 | if i == -1: |
|
741 | if i == -1: | |
742 | raise PatchError(_("unable to strip away %d dirs from %s") % |
|
742 | raise PatchError(_("unable to strip away %d dirs from %s") % | |
743 | (count, path)) |
|
743 | (count, path)) | |
744 | i += 1 |
|
744 | i += 1 | |
745 | # consume '//' in the path |
|
745 | # consume '//' in the path | |
746 | while i < pathlen - 1 and path[i] == '/': |
|
746 | while i < pathlen - 1 and path[i] == '/': | |
747 | i += 1 |
|
747 | i += 1 | |
748 | count -= 1 |
|
748 | count -= 1 | |
749 | return path[:i].lstrip(), path[i:].rstrip() |
|
749 | return path[:i].lstrip(), path[i:].rstrip() | |
750 |
|
750 | |||
751 | nulla = afile_orig == "/dev/null" |
|
751 | nulla = afile_orig == "/dev/null" | |
752 | nullb = bfile_orig == "/dev/null" |
|
752 | nullb = bfile_orig == "/dev/null" | |
753 | abase, afile = pathstrip(afile_orig, strip) |
|
753 | abase, afile = pathstrip(afile_orig, strip) | |
754 | gooda = not nulla and util.lexists(afile) |
|
754 | gooda = not nulla and util.lexists(afile) | |
755 | bbase, bfile = pathstrip(bfile_orig, strip) |
|
755 | bbase, bfile = pathstrip(bfile_orig, strip) | |
756 | if afile == bfile: |
|
756 | if afile == bfile: | |
757 | goodb = gooda |
|
757 | goodb = gooda | |
758 | else: |
|
758 | else: | |
759 | goodb = not nullb and os.path.exists(bfile) |
|
759 | goodb = not nullb and os.path.exists(bfile) | |
760 | createfunc = hunk.createfile |
|
760 | createfunc = hunk.createfile | |
761 | if reverse: |
|
761 | if reverse: | |
762 | createfunc = hunk.rmfile |
|
762 | createfunc = hunk.rmfile | |
763 | missing = not goodb and not gooda and not createfunc() |
|
763 | missing = not goodb and not gooda and not createfunc() | |
764 | # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the |
|
764 | # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the | |
765 | # diff is between a file and its backup. In this case, the original |
|
765 | # diff is between a file and its backup. In this case, the original | |
766 | # file should be patched (see original mpatch code). |
|
766 | # file should be patched (see original mpatch code). | |
767 | isbackup = (abase == bbase and bfile.startswith(afile)) |
|
767 | isbackup = (abase == bbase and bfile.startswith(afile)) | |
768 | fname = None |
|
768 | fname = None | |
769 | if not missing: |
|
769 | if not missing: | |
770 | if gooda and goodb: |
|
770 | if gooda and goodb: | |
771 | fname = isbackup and afile or bfile |
|
771 | fname = isbackup and afile or bfile | |
772 | elif gooda: |
|
772 | elif gooda: | |
773 | fname = afile |
|
773 | fname = afile | |
774 |
|
774 | |||
775 | if not fname: |
|
775 | if not fname: | |
776 | if not nullb: |
|
776 | if not nullb: | |
777 | fname = isbackup and afile or bfile |
|
777 | fname = isbackup and afile or bfile | |
778 | elif not nulla: |
|
778 | elif not nulla: | |
779 | fname = afile |
|
779 | fname = afile | |
780 | else: |
|
780 | else: | |
781 | raise PatchError(_("undefined source and destination files")) |
|
781 | raise PatchError(_("undefined source and destination files")) | |
782 |
|
782 | |||
783 | return fname, missing |
|
783 | return fname, missing | |
784 |
|
784 | |||
785 | class linereader: |
|
785 | class linereader: | |
786 | # simple class to allow pushing lines back into the input stream |
|
786 | # simple class to allow pushing lines back into the input stream | |
787 | def __init__(self, fp): |
|
787 | def __init__(self, fp): | |
788 | self.fp = fp |
|
788 | self.fp = fp | |
789 | self.buf = [] |
|
789 | self.buf = [] | |
790 |
|
790 | |||
791 | def push(self, line): |
|
791 | def push(self, line): | |
792 | if line is not None: |
|
792 | if line is not None: | |
793 | self.buf.append(line) |
|
793 | self.buf.append(line) | |
794 |
|
794 | |||
795 | def readline(self): |
|
795 | def readline(self): | |
796 | if self.buf: |
|
796 | if self.buf: | |
797 | return self.buf.pop(0) |
|
797 | return self.buf.pop(0) | |
798 | return self.fp.readline() |
|
798 | return self.fp.readline() | |
799 |
|
799 | |||
800 | def __iter__(self): |
|
800 | def __iter__(self): | |
801 | while 1: |
|
801 | while 1: | |
802 | l = self.readline() |
|
802 | l = self.readline() | |
803 | if not l: |
|
803 | if not l: | |
804 | break |
|
804 | break | |
805 | yield l |
|
805 | yield l | |
806 |
|
806 | |||
807 | def scangitpatch(lr, firstline): |
|
807 | def scangitpatch(lr, firstline): | |
808 | """ |
|
808 | """ | |
809 | Git patches can emit: |
|
809 | Git patches can emit: | |
810 | - rename a to b |
|
810 | - rename a to b | |
811 | - change b |
|
811 | - change b | |
812 | - copy a to c |
|
812 | - copy a to c | |
813 | - change c |
|
813 | - change c | |
814 |
|
814 | |||
815 | We cannot apply this sequence as-is, the renamed 'a' could not be |
|
815 | We cannot apply this sequence as-is, the renamed 'a' could not be | |
816 | found for it would have been renamed already. And we cannot copy |
|
816 | found for it would have been renamed already. And we cannot copy | |
817 | from 'b' instead because 'b' would have been changed already. So |
|
817 | from 'b' instead because 'b' would have been changed already. So | |
818 | we scan the git patch for copy and rename commands so we can |
|
818 | we scan the git patch for copy and rename commands so we can | |
819 | perform the copies ahead of time. |
|
819 | perform the copies ahead of time. | |
820 | """ |
|
820 | """ | |
821 | pos = 0 |
|
821 | pos = 0 | |
822 | try: |
|
822 | try: | |
823 | pos = lr.fp.tell() |
|
823 | pos = lr.fp.tell() | |
824 | fp = lr.fp |
|
824 | fp = lr.fp | |
825 | except IOError: |
|
825 | except IOError: | |
826 | fp = cStringIO.StringIO(lr.fp.read()) |
|
826 | fp = cStringIO.StringIO(lr.fp.read()) | |
827 | gitlr = linereader(fp) |
|
827 | gitlr = linereader(fp) | |
828 | gitlr.push(firstline) |
|
828 | gitlr.push(firstline) | |
829 | (dopatch, gitpatches) = readgitpatch(gitlr) |
|
829 | (dopatch, gitpatches) = readgitpatch(gitlr) | |
830 | fp.seek(pos) |
|
830 | fp.seek(pos) | |
831 | return dopatch, gitpatches |
|
831 | return dopatch, gitpatches | |
832 |
|
832 | |||
833 | def iterhunks(ui, fp, sourcefile=None): |
|
833 | def iterhunks(ui, fp, sourcefile=None): | |
834 | """Read a patch and yield the following events: |
|
834 | """Read a patch and yield the following events: | |
835 | - ("file", afile, bfile, firsthunk): select a new target file. |
|
835 | - ("file", afile, bfile, firsthunk): select a new target file. | |
836 | - ("hunk", hunk): a new hunk is ready to be applied, follows a |
|
836 | - ("hunk", hunk): a new hunk is ready to be applied, follows a | |
837 | "file" event. |
|
837 | "file" event. | |
838 | - ("git", gitchanges): current diff is in git format, gitchanges |
|
838 | - ("git", gitchanges): current diff is in git format, gitchanges | |
839 | maps filenames to gitpatch records. Unique event. |
|
839 | maps filenames to gitpatch records. Unique event. | |
840 | """ |
|
840 | """ | |
841 | changed = {} |
|
841 | changed = {} | |
842 | current_hunk = None |
|
842 | current_hunk = None | |
843 | afile = "" |
|
843 | afile = "" | |
844 | bfile = "" |
|
844 | bfile = "" | |
845 | state = None |
|
845 | state = None | |
846 | hunknum = 0 |
|
846 | hunknum = 0 | |
847 | emitfile = False |
|
847 | emitfile = False | |
848 | git = False |
|
848 | git = False | |
849 |
|
849 | |||
850 | # our states |
|
850 | # our states | |
851 | BFILE = 1 |
|
851 | BFILE = 1 | |
852 | context = None |
|
852 | context = None | |
853 | lr = linereader(fp) |
|
853 | lr = linereader(fp) | |
854 | dopatch = True |
|
854 | dopatch = True | |
855 | # gitworkdone is True if a git operation (copy, rename, ...) was |
|
855 | # gitworkdone is True if a git operation (copy, rename, ...) was | |
856 | # performed already for the current file. Useful when the file |
|
856 | # performed already for the current file. Useful when the file | |
857 | # section may have no hunk. |
|
857 | # section may have no hunk. | |
858 | gitworkdone = False |
|
858 | gitworkdone = False | |
859 |
|
859 | |||
860 | while True: |
|
860 | while True: | |
861 | newfile = False |
|
861 | newfile = False | |
862 | x = lr.readline() |
|
862 | x = lr.readline() | |
863 | if not x: |
|
863 | if not x: | |
864 | break |
|
864 | break | |
865 | if current_hunk: |
|
865 | if current_hunk: | |
866 | if x.startswith('\ '): |
|
866 | if x.startswith('\ '): | |
867 | current_hunk.fix_newline() |
|
867 | current_hunk.fix_newline() | |
868 | yield 'hunk', current_hunk |
|
868 | yield 'hunk', current_hunk | |
869 | current_hunk = None |
|
869 | current_hunk = None | |
870 | gitworkdone = False |
|
870 | gitworkdone = False | |
871 | if ((sourcefile or state == BFILE) and ((not context and x[0] == '@') or |
|
871 | if ((sourcefile or state == BFILE) and ((not context and x[0] == '@') or | |
872 | ((context or context == None) and x.startswith('***************')))): |
|
872 | ((context or context == None) and x.startswith('***************')))): | |
873 | try: |
|
873 | try: | |
874 | if context == None and x.startswith('***************'): |
|
874 | if context == None and x.startswith('***************'): | |
875 | context = True |
|
875 | context = True | |
876 | gpatch = changed.get(bfile) |
|
876 | gpatch = changed.get(bfile) | |
877 | create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD' |
|
877 | create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD' | |
878 | remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE' |
|
878 | remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE' | |
879 | current_hunk = hunk(x, hunknum + 1, lr, context, create, remove) |
|
879 | current_hunk = hunk(x, hunknum + 1, lr, context, create, remove) | |
880 | if remove: |
|
880 | if remove: | |
881 | gpatch = changed.get(afile[2:]) |
|
881 | gpatch = changed.get(afile[2:]) | |
882 | if gpatch and gpatch.mode[0]: |
|
882 | if gpatch and gpatch.mode[0]: | |
883 | current_hunk = symlinkhunk(gpatch, current_hunk) |
|
883 | current_hunk = symlinkhunk(gpatch, current_hunk) | |
884 | except PatchError, err: |
|
884 | except PatchError, err: | |
885 | ui.debug(err) |
|
885 | ui.debug(err) | |
886 | current_hunk = None |
|
886 | current_hunk = None | |
887 | continue |
|
887 | continue | |
888 | hunknum += 1 |
|
888 | hunknum += 1 | |
889 | if emitfile: |
|
889 | if emitfile: | |
890 | emitfile = False |
|
890 | emitfile = False | |
891 | yield 'file', (afile, bfile, current_hunk) |
|
891 | yield 'file', (afile, bfile, current_hunk) | |
892 | elif state == BFILE and x.startswith('GIT binary patch'): |
|
892 | elif state == BFILE and x.startswith('GIT binary patch'): | |
893 | current_hunk = binhunk(changed[bfile]) |
|
893 | current_hunk = binhunk(changed[bfile]) | |
894 | hunknum += 1 |
|
894 | hunknum += 1 | |
895 | if emitfile: |
|
895 | if emitfile: | |
896 | emitfile = False |
|
896 | emitfile = False | |
897 | yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk) |
|
897 | yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk) | |
898 | current_hunk.extract(lr) |
|
898 | current_hunk.extract(lr) | |
899 | elif x.startswith('diff --git'): |
|
899 | elif x.startswith('diff --git'): | |
900 | # check for git diff, scanning the whole patch file if needed |
|
900 | # check for git diff, scanning the whole patch file if needed | |
901 | m = gitre.match(x) |
|
901 | m = gitre.match(x) | |
902 | if m: |
|
902 | if m: | |
903 | afile, bfile = m.group(1, 2) |
|
903 | afile, bfile = m.group(1, 2) | |
904 | if not git: |
|
904 | if not git: | |
905 | git = True |
|
905 | git = True | |
906 | dopatch, gitpatches = scangitpatch(lr, x) |
|
906 | dopatch, gitpatches = scangitpatch(lr, x) | |
907 | yield 'git', gitpatches |
|
907 | yield 'git', gitpatches | |
908 | for gp in gitpatches: |
|
908 | for gp in gitpatches: | |
909 | changed[gp.path] = gp |
|
909 | changed[gp.path] = gp | |
910 | # else error? |
|
910 | # else error? | |
911 | # copy/rename + modify should modify target, not source |
|
911 | # copy/rename + modify should modify target, not source | |
912 | gp = changed.get(bfile) |
|
912 | gp = changed.get(bfile) | |
913 | if gp and gp.op in ('COPY', 'DELETE', 'RENAME'): |
|
913 | if gp and gp.op in ('COPY', 'DELETE', 'RENAME'): | |
914 | afile = bfile |
|
914 | afile = bfile | |
915 | gitworkdone = True |
|
915 | gitworkdone = True | |
916 | newfile = True |
|
916 | newfile = True | |
917 | elif x.startswith('---'): |
|
917 | elif x.startswith('---'): | |
918 | # check for a unified diff |
|
918 | # check for a unified diff | |
919 | l2 = lr.readline() |
|
919 | l2 = lr.readline() | |
920 | if not l2.startswith('+++'): |
|
920 | if not l2.startswith('+++'): | |
921 | lr.push(l2) |
|
921 | lr.push(l2) | |
922 | continue |
|
922 | continue | |
923 | newfile = True |
|
923 | newfile = True | |
924 | context = False |
|
924 | context = False | |
925 | afile = parsefilename(x) |
|
925 | afile = parsefilename(x) | |
926 | bfile = parsefilename(l2) |
|
926 | bfile = parsefilename(l2) | |
927 | elif x.startswith('***'): |
|
927 | elif x.startswith('***'): | |
928 | # check for a context diff |
|
928 | # check for a context diff | |
929 | l2 = lr.readline() |
|
929 | l2 = lr.readline() | |
930 | if not l2.startswith('---'): |
|
930 | if not l2.startswith('---'): | |
931 | lr.push(l2) |
|
931 | lr.push(l2) | |
932 | continue |
|
932 | continue | |
933 | l3 = lr.readline() |
|
933 | l3 = lr.readline() | |
934 | lr.push(l3) |
|
934 | lr.push(l3) | |
935 | if not l3.startswith("***************"): |
|
935 | if not l3.startswith("***************"): | |
936 | lr.push(l2) |
|
936 | lr.push(l2) | |
937 | continue |
|
937 | continue | |
938 | newfile = True |
|
938 | newfile = True | |
939 | context = True |
|
939 | context = True | |
940 | afile = parsefilename(x) |
|
940 | afile = parsefilename(x) | |
941 | bfile = parsefilename(l2) |
|
941 | bfile = parsefilename(l2) | |
942 |
|
942 | |||
943 | if newfile: |
|
943 | if newfile: | |
944 | emitfile = True |
|
944 | emitfile = True | |
945 | state = BFILE |
|
945 | state = BFILE | |
946 | hunknum = 0 |
|
946 | hunknum = 0 | |
947 | if current_hunk: |
|
947 | if current_hunk: | |
948 | if current_hunk.complete(): |
|
948 | if current_hunk.complete(): | |
949 | yield 'hunk', current_hunk |
|
949 | yield 'hunk', current_hunk | |
950 | else: |
|
950 | else: | |
951 | raise PatchError(_("malformed patch %s %s") % (afile, |
|
951 | raise PatchError(_("malformed patch %s %s") % (afile, | |
952 | current_hunk.desc)) |
|
952 | current_hunk.desc)) | |
953 |
|
953 | |||
954 | if hunknum == 0 and dopatch and not gitworkdone: |
|
954 | if hunknum == 0 and dopatch and not gitworkdone: | |
955 | raise NoHunks |
|
955 | raise NoHunks | |
956 |
|
956 | |||
957 | def applydiff(ui, fp, changed, strip=1, sourcefile=None, reverse=False): |
|
957 | def applydiff(ui, fp, changed, strip=1, sourcefile=None, reverse=False): | |
958 | """reads a patch from fp and tries to apply it. The dict 'changed' is |
|
958 | """reads a patch from fp and tries to apply it. The dict 'changed' is | |
959 | filled in with all of the filenames changed by the patch. Returns 0 |
|
959 | filled in with all of the filenames changed by the patch. Returns 0 | |
960 | for a clean patch, -1 if any rejects were found and 1 if there was |
|
960 | for a clean patch, -1 if any rejects were found and 1 if there was | |
961 | any fuzz.""" |
|
961 | any fuzz.""" | |
962 |
|
962 | |||
963 | rejects = 0 |
|
963 | rejects = 0 | |
964 | err = 0 |
|
964 | err = 0 | |
965 | current_file = None |
|
965 | current_file = None | |
966 | gitpatches = None |
|
966 | gitpatches = None | |
967 | opener = util.opener(os.getcwd()) |
|
967 | opener = util.opener(os.getcwd()) | |
968 |
|
968 | |||
969 | def closefile(): |
|
969 | def closefile(): | |
970 | if not current_file: |
|
970 | if not current_file: | |
971 | return 0 |
|
971 | return 0 | |
972 | current_file.close() |
|
972 | current_file.close() | |
973 | return len(current_file.rej) |
|
973 | return len(current_file.rej) | |
974 |
|
974 | |||
975 | for state, values in iterhunks(ui, fp, sourcefile): |
|
975 | for state, values in iterhunks(ui, fp, sourcefile): | |
976 | if state == 'hunk': |
|
976 | if state == 'hunk': | |
977 | if not current_file: |
|
977 | if not current_file: | |
978 | continue |
|
978 | continue | |
979 | current_hunk = values |
|
979 | current_hunk = values | |
980 | ret = current_file.apply(current_hunk, reverse) |
|
980 | ret = current_file.apply(current_hunk, reverse) | |
981 | if ret >= 0: |
|
981 | if ret >= 0: | |
982 | changed.setdefault(current_file.fname, None) |
|
982 | changed.setdefault(current_file.fname, None) | |
983 | if ret > 0: |
|
983 | if ret > 0: | |
984 | err = 1 |
|
984 | err = 1 | |
985 | elif state == 'file': |
|
985 | elif state == 'file': | |
986 | rejects += closefile() |
|
986 | rejects += closefile() | |
987 | afile, bfile, first_hunk = values |
|
987 | afile, bfile, first_hunk = values | |
988 | try: |
|
988 | try: | |
989 | if sourcefile: |
|
989 | if sourcefile: | |
990 | current_file = patchfile(ui, sourcefile, opener) |
|
990 | current_file = patchfile(ui, sourcefile, opener) | |
991 | else: |
|
991 | else: | |
992 | current_file, missing = selectfile(afile, bfile, first_hunk, |
|
992 | current_file, missing = selectfile(afile, bfile, first_hunk, | |
993 | strip, reverse) |
|
993 | strip, reverse) | |
994 | current_file = patchfile(ui, current_file, opener, missing) |
|
994 | current_file = patchfile(ui, current_file, opener, missing) | |
995 | except PatchError, err: |
|
995 | except PatchError, err: | |
996 | ui.warn(str(err) + '\n') |
|
996 | ui.warn(str(err) + '\n') | |
997 | current_file, current_hunk = None, None |
|
997 | current_file, current_hunk = None, None | |
998 | rejects += 1 |
|
998 | rejects += 1 | |
999 | continue |
|
999 | continue | |
1000 | elif state == 'git': |
|
1000 | elif state == 'git': | |
1001 | gitpatches = values |
|
1001 | gitpatches = values | |
1002 | cwd = os.getcwd() |
|
1002 | cwd = os.getcwd() | |
1003 | for gp in gitpatches: |
|
1003 | for gp in gitpatches: | |
1004 | if gp.op in ('COPY', 'RENAME'): |
|
1004 | if gp.op in ('COPY', 'RENAME'): | |
1005 | copyfile(gp.oldpath, gp.path, cwd) |
|
1005 | copyfile(gp.oldpath, gp.path, cwd) | |
1006 | changed[gp.path] = gp |
|
1006 | changed[gp.path] = gp | |
1007 | else: |
|
1007 | else: | |
1008 | raise util.Abort(_('unsupported parser state: %s') % state) |
|
1008 | raise util.Abort(_('unsupported parser state: %s') % state) | |
1009 |
|
1009 | |||
1010 | rejects += closefile() |
|
1010 | rejects += closefile() | |
1011 |
|
1011 | |||
1012 | if rejects: |
|
1012 | if rejects: | |
1013 | return -1 |
|
1013 | return -1 | |
1014 | return err |
|
1014 | return err | |
1015 |
|
1015 | |||
1016 | def diffopts(ui, opts={}, untrusted=False): |
|
1016 | def diffopts(ui, opts={}, untrusted=False): | |
1017 | def get(key, name=None, getter=ui.configbool): |
|
1017 | def get(key, name=None, getter=ui.configbool): | |
1018 | return (opts.get(key) or |
|
1018 | return (opts.get(key) or | |
1019 | getter('diff', name or key, None, untrusted=untrusted)) |
|
1019 | getter('diff', name or key, None, untrusted=untrusted)) | |
1020 | return mdiff.diffopts( |
|
1020 | return mdiff.diffopts( | |
1021 | text=opts.get('text'), |
|
1021 | text=opts.get('text'), | |
1022 | git=get('git'), |
|
1022 | git=get('git'), | |
1023 | nodates=get('nodates'), |
|
1023 | nodates=get('nodates'), | |
1024 | showfunc=get('show_function', 'showfunc'), |
|
1024 | showfunc=get('show_function', 'showfunc'), | |
1025 | ignorews=get('ignore_all_space', 'ignorews'), |
|
1025 | ignorews=get('ignore_all_space', 'ignorews'), | |
1026 | ignorewsamount=get('ignore_space_change', 'ignorewsamount'), |
|
1026 | ignorewsamount=get('ignore_space_change', 'ignorewsamount'), | |
1027 | ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'), |
|
1027 | ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'), | |
1028 | context=get('unified', getter=ui.config)) |
|
1028 | context=get('unified', getter=ui.config)) | |
1029 |
|
1029 | |||
1030 | def updatedir(ui, repo, patches, similarity=0): |
|
1030 | def updatedir(ui, repo, patches, similarity=0): | |
1031 | '''Update dirstate after patch application according to metadata''' |
|
1031 | '''Update dirstate after patch application according to metadata''' | |
1032 | if not patches: |
|
1032 | if not patches: | |
1033 | return |
|
1033 | return | |
1034 | copies = [] |
|
1034 | copies = [] | |
1035 | removes = {} |
|
1035 | removes = {} | |
1036 | cfiles = patches.keys() |
|
1036 | cfiles = patches.keys() | |
1037 | cwd = repo.getcwd() |
|
1037 | cwd = repo.getcwd() | |
1038 | if cwd: |
|
1038 | if cwd: | |
1039 | cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()] |
|
1039 | cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()] | |
1040 | for f in patches: |
|
1040 | for f in patches: | |
1041 | gp = patches[f] |
|
1041 | gp = patches[f] | |
1042 | if not gp: |
|
1042 | if not gp: | |
1043 | continue |
|
1043 | continue | |
1044 | if gp.op == 'RENAME': |
|
1044 | if gp.op == 'RENAME': | |
1045 | copies.append((gp.oldpath, gp.path)) |
|
1045 | copies.append((gp.oldpath, gp.path)) | |
1046 | removes[gp.oldpath] = 1 |
|
1046 | removes[gp.oldpath] = 1 | |
1047 | elif gp.op == 'COPY': |
|
1047 | elif gp.op == 'COPY': | |
1048 | copies.append((gp.oldpath, gp.path)) |
|
1048 | copies.append((gp.oldpath, gp.path)) | |
1049 | elif gp.op == 'DELETE': |
|
1049 | elif gp.op == 'DELETE': | |
1050 | removes[gp.path] = 1 |
|
1050 | removes[gp.path] = 1 | |
1051 | for src, dst in copies: |
|
1051 | for src, dst in copies: | |
1052 | repo.copy(src, dst) |
|
1052 | repo.copy(src, dst) | |
1053 | removes = removes.keys() |
|
1053 | removes = removes.keys() | |
1054 | if (not similarity) and removes: |
|
1054 | if (not similarity) and removes: | |
1055 | repo.remove(util.sort(removes), True) |
|
1055 | repo.remove(util.sort(removes), True) | |
1056 | for f in patches: |
|
1056 | for f in patches: | |
1057 | gp = patches[f] |
|
1057 | gp = patches[f] | |
1058 | if gp and gp.mode: |
|
1058 | if gp and gp.mode: | |
1059 | islink, isexec = gp.mode |
|
1059 | islink, isexec = gp.mode | |
1060 | dst = repo.wjoin(gp.path) |
|
1060 | dst = repo.wjoin(gp.path) | |
1061 | # patch won't create empty files |
|
1061 | # patch won't create empty files | |
1062 | if gp.op == 'ADD' and not os.path.exists(dst): |
|
1062 | if gp.op == 'ADD' and not os.path.exists(dst): | |
1063 | flags = (isexec and 'x' or '') + (islink and 'l' or '') |
|
1063 | flags = (isexec and 'x' or '') + (islink and 'l' or '') | |
1064 | repo.wwrite(gp.path, '', flags) |
|
1064 | repo.wwrite(gp.path, '', flags) | |
1065 | elif gp.op != 'DELETE': |
|
1065 | elif gp.op != 'DELETE': | |
1066 | util.set_flags(dst, islink, isexec) |
|
1066 | util.set_flags(dst, islink, isexec) | |
1067 | cmdutil.addremove(repo, cfiles, similarity=similarity) |
|
1067 | cmdutil.addremove(repo, cfiles, similarity=similarity) | |
1068 | files = patches.keys() |
|
1068 | files = patches.keys() | |
1069 | files.extend([r for r in removes if r not in files]) |
|
1069 | files.extend([r for r in removes if r not in files]) | |
1070 | return util.sort(files) |
|
1070 | return util.sort(files) | |
1071 |
|
1071 | |||
1072 | def externalpatch(patcher, args, patchname, ui, strip, cwd, files): |
|
1072 | def externalpatch(patcher, args, patchname, ui, strip, cwd, files): | |
1073 | """use <patcher> to apply <patchname> to the working directory. |
|
1073 | """use <patcher> to apply <patchname> to the working directory. | |
1074 | returns whether patch was applied with fuzz factor.""" |
|
1074 | returns whether patch was applied with fuzz factor.""" | |
1075 |
|
1075 | |||
1076 | fuzz = False |
|
1076 | fuzz = False | |
1077 | if cwd: |
|
1077 | if cwd: | |
1078 | args.append('-d %s' % util.shellquote(cwd)) |
|
1078 | args.append('-d %s' % util.shellquote(cwd)) | |
1079 | fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip, |
|
1079 | fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip, | |
1080 | util.shellquote(patchname))) |
|
1080 | util.shellquote(patchname))) | |
1081 |
|
1081 | |||
1082 | for line in fp: |
|
1082 | for line in fp: | |
1083 | line = line.rstrip() |
|
1083 | line = line.rstrip() | |
1084 | ui.note(line + '\n') |
|
1084 | ui.note(line + '\n') | |
1085 | if line.startswith('patching file '): |
|
1085 | if line.startswith('patching file '): | |
1086 | pf = util.parse_patch_output(line) |
|
1086 | pf = util.parse_patch_output(line) | |
1087 | printed_file = False |
|
1087 | printed_file = False | |
1088 | files.setdefault(pf, None) |
|
1088 | files.setdefault(pf, None) | |
1089 | elif line.find('with fuzz') >= 0: |
|
1089 | elif line.find('with fuzz') >= 0: | |
1090 | fuzz = True |
|
1090 | fuzz = True | |
1091 | if not printed_file: |
|
1091 | if not printed_file: | |
1092 | ui.warn(pf + '\n') |
|
1092 | ui.warn(pf + '\n') | |
1093 | printed_file = True |
|
1093 | printed_file = True | |
1094 | ui.warn(line + '\n') |
|
1094 | ui.warn(line + '\n') | |
1095 | elif line.find('saving rejects to file') >= 0: |
|
1095 | elif line.find('saving rejects to file') >= 0: | |
1096 | ui.warn(line + '\n') |
|
1096 | ui.warn(line + '\n') | |
1097 | elif line.find('FAILED') >= 0: |
|
1097 | elif line.find('FAILED') >= 0: | |
1098 | if not printed_file: |
|
1098 | if not printed_file: | |
1099 | ui.warn(pf + '\n') |
|
1099 | ui.warn(pf + '\n') | |
1100 | printed_file = True |
|
1100 | printed_file = True | |
1101 | ui.warn(line + '\n') |
|
1101 | ui.warn(line + '\n') | |
1102 | code = fp.close() |
|
1102 | code = fp.close() | |
1103 | if code: |
|
1103 | if code: | |
1104 | raise PatchError(_("patch command failed: %s") % |
|
1104 | raise PatchError(_("patch command failed: %s") % | |
1105 | util.explain_exit(code)[0]) |
|
1105 | util.explain_exit(code)[0]) | |
1106 | return fuzz |
|
1106 | return fuzz | |
1107 |
|
1107 | |||
1108 | def internalpatch(patchobj, ui, strip, cwd, files={}): |
|
1108 | def internalpatch(patchobj, ui, strip, cwd, files={}): | |
1109 | """use builtin patch to apply <patchobj> to the working directory. |
|
1109 | """use builtin patch to apply <patchobj> to the working directory. | |
1110 | returns whether patch was applied with fuzz factor.""" |
|
1110 | returns whether patch was applied with fuzz factor.""" | |
1111 | try: |
|
1111 | try: | |
1112 | fp = file(patchobj, 'rb') |
|
1112 | fp = file(patchobj, 'rb') | |
1113 | except TypeError: |
|
1113 | except TypeError: | |
1114 | fp = patchobj |
|
1114 | fp = patchobj | |
1115 | if cwd: |
|
1115 | if cwd: | |
1116 | curdir = os.getcwd() |
|
1116 | curdir = os.getcwd() | |
1117 | os.chdir(cwd) |
|
1117 | os.chdir(cwd) | |
1118 | try: |
|
1118 | try: | |
1119 | ret = applydiff(ui, fp, files, strip=strip) |
|
1119 | ret = applydiff(ui, fp, files, strip=strip) | |
1120 | finally: |
|
1120 | finally: | |
1121 | if cwd: |
|
1121 | if cwd: | |
1122 | os.chdir(curdir) |
|
1122 | os.chdir(curdir) | |
1123 | if ret < 0: |
|
1123 | if ret < 0: | |
1124 | raise PatchError |
|
1124 | raise PatchError | |
1125 | return ret > 0 |
|
1125 | return ret > 0 | |
1126 |
|
1126 | |||
1127 | def patch(patchname, ui, strip=1, cwd=None, files={}): |
|
1127 | def patch(patchname, ui, strip=1, cwd=None, files={}): | |
1128 | """apply <patchname> to the working directory. |
|
1128 | """apply <patchname> to the working directory. | |
1129 | returns whether patch was applied with fuzz factor.""" |
|
1129 | returns whether patch was applied with fuzz factor.""" | |
1130 | patcher = ui.config('ui', 'patch') |
|
1130 | patcher = ui.config('ui', 'patch') | |
1131 | args = [] |
|
1131 | args = [] | |
1132 | try: |
|
1132 | try: | |
1133 | if patcher: |
|
1133 | if patcher: | |
1134 | return externalpatch(patcher, args, patchname, ui, strip, cwd, |
|
1134 | return externalpatch(patcher, args, patchname, ui, strip, cwd, | |
1135 | files) |
|
1135 | files) | |
1136 | else: |
|
1136 | else: | |
1137 | try: |
|
1137 | try: | |
1138 | return internalpatch(patchname, ui, strip, cwd, files) |
|
1138 | return internalpatch(patchname, ui, strip, cwd, files) | |
1139 | except NoHunks: |
|
1139 | except NoHunks: | |
1140 | patcher = util.find_exe('gpatch') or util.find_exe('patch') or 'patch' |
|
1140 | patcher = util.find_exe('gpatch') or util.find_exe('patch') or 'patch' | |
1141 | ui.debug(_('no valid hunks found; trying with %r instead\n') % |
|
1141 | ui.debug(_('no valid hunks found; trying with %r instead\n') % | |
1142 | patcher) |
|
1142 | patcher) | |
1143 | if util.needbinarypatch(): |
|
1143 | if util.needbinarypatch(): | |
1144 | args.append('--binary') |
|
1144 | args.append('--binary') | |
1145 | return externalpatch(patcher, args, patchname, ui, strip, cwd, |
|
1145 | return externalpatch(patcher, args, patchname, ui, strip, cwd, | |
1146 | files) |
|
1146 | files) | |
1147 | except PatchError, err: |
|
1147 | except PatchError, err: | |
1148 | s = str(err) |
|
1148 | s = str(err) | |
1149 | if s: |
|
1149 | if s: | |
1150 | raise util.Abort(s) |
|
1150 | raise util.Abort(s) | |
1151 | else: |
|
1151 | else: | |
1152 | raise util.Abort(_('patch failed to apply')) |
|
1152 | raise util.Abort(_('patch failed to apply')) | |
1153 |
|
1153 | |||
1154 | def b85diff(to, tn): |
|
1154 | def b85diff(to, tn): | |
1155 | '''print base85-encoded binary diff''' |
|
1155 | '''print base85-encoded binary diff''' | |
1156 | def gitindex(text): |
|
1156 | def gitindex(text): | |
1157 | if not text: |
|
1157 | if not text: | |
1158 | return '0' * 40 |
|
1158 | return '0' * 40 | |
1159 | l = len(text) |
|
1159 | l = len(text) | |
1160 | s = util.sha1('blob %d\0' % l) |
|
1160 | s = util.sha1('blob %d\0' % l) | |
1161 | s.update(text) |
|
1161 | s.update(text) | |
1162 | return s.hexdigest() |
|
1162 | return s.hexdigest() | |
1163 |
|
1163 | |||
1164 | def fmtline(line): |
|
1164 | def fmtline(line): | |
1165 | l = len(line) |
|
1165 | l = len(line) | |
1166 | if l <= 26: |
|
1166 | if l <= 26: | |
1167 | l = chr(ord('A') + l - 1) |
|
1167 | l = chr(ord('A') + l - 1) | |
1168 | else: |
|
1168 | else: | |
1169 | l = chr(l - 26 + ord('a') - 1) |
|
1169 | l = chr(l - 26 + ord('a') - 1) | |
1170 | return '%c%s\n' % (l, base85.b85encode(line, True)) |
|
1170 | return '%c%s\n' % (l, base85.b85encode(line, True)) | |
1171 |
|
1171 | |||
1172 | def chunk(text, csize=52): |
|
1172 | def chunk(text, csize=52): | |
1173 | l = len(text) |
|
1173 | l = len(text) | |
1174 | i = 0 |
|
1174 | i = 0 | |
1175 | while i < l: |
|
1175 | while i < l: | |
1176 | yield text[i:i+csize] |
|
1176 | yield text[i:i+csize] | |
1177 | i += csize |
|
1177 | i += csize | |
1178 |
|
1178 | |||
1179 | tohash = gitindex(to) |
|
1179 | tohash = gitindex(to) | |
1180 | tnhash = gitindex(tn) |
|
1180 | tnhash = gitindex(tn) | |
1181 | if tohash == tnhash: |
|
1181 | if tohash == tnhash: | |
1182 | return "" |
|
1182 | return "" | |
1183 |
|
1183 | |||
1184 | # TODO: deltas |
|
1184 | # TODO: deltas | |
1185 | ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' % |
|
1185 | ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' % | |
1186 | (tohash, tnhash, len(tn))] |
|
1186 | (tohash, tnhash, len(tn))] | |
1187 | for l in chunk(zlib.compress(tn)): |
|
1187 | for l in chunk(zlib.compress(tn)): | |
1188 | ret.append(fmtline(l)) |
|
1188 | ret.append(fmtline(l)) | |
1189 | ret.append('\n') |
|
1189 | ret.append('\n') | |
1190 | return ''.join(ret) |
|
1190 | return ''.join(ret) | |
1191 |
|
1191 | |||
1192 | def _addmodehdr(header, omode, nmode): |
|
1192 | def _addmodehdr(header, omode, nmode): | |
1193 | if omode != nmode: |
|
1193 | if omode != nmode: | |
1194 | header.append('old mode %s\n' % omode) |
|
1194 | header.append('old mode %s\n' % omode) | |
1195 | header.append('new mode %s\n' % nmode) |
|
1195 | header.append('new mode %s\n' % nmode) | |
1196 |
|
1196 | |||
1197 | def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None): |
|
1197 | def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None): | |
1198 | '''yields diff of changes to files between two nodes, or node and |
|
1198 | '''yields diff of changes to files between two nodes, or node and | |
1199 | working directory. |
|
1199 | working directory. | |
1200 |
|
1200 | |||
1201 | if node1 is None, use first dirstate parent instead. |
|
1201 | if node1 is None, use first dirstate parent instead. | |
1202 | if node2 is None, compare node1 with working directory.''' |
|
1202 | if node2 is None, compare node1 with working directory.''' | |
1203 |
|
1203 | |||
1204 | if opts is None: |
|
1204 | if opts is None: | |
1205 | opts = mdiff.defaultopts |
|
1205 | opts = mdiff.defaultopts | |
1206 |
|
1206 | |||
1207 | if not node1: |
|
1207 | if not node1: | |
1208 | node1 = repo.dirstate.parents()[0] |
|
1208 | node1 = repo.dirstate.parents()[0] | |
1209 |
|
1209 | |||
1210 | flcache = {} |
|
1210 | flcache = {} | |
1211 | def getfilectx(f, ctx): |
|
1211 | def getfilectx(f, ctx): | |
1212 | flctx = ctx.filectx(f, filelog=flcache.get(f)) |
|
1212 | flctx = ctx.filectx(f, filelog=flcache.get(f)) | |
1213 | if f not in flcache: |
|
1213 | if f not in flcache: | |
1214 | flcache[f] = flctx._filelog |
|
1214 | flcache[f] = flctx._filelog | |
1215 | return flctx |
|
1215 | return flctx | |
1216 |
|
1216 | |||
1217 | ctx1 = repo[node1] |
|
1217 | ctx1 = repo[node1] | |
1218 | ctx2 = repo[node2] |
|
1218 | ctx2 = repo[node2] | |
1219 |
|
1219 | |||
1220 | if not changes: |
|
1220 | if not changes: | |
1221 | changes = repo.status(ctx1, ctx2, match=match) |
|
1221 | changes = repo.status(ctx1, ctx2, match=match) | |
1222 | modified, added, removed = changes[:3] |
|
1222 | modified, added, removed = changes[:3] | |
1223 |
|
1223 | |||
1224 | if not modified and not added and not removed: |
|
1224 | if not modified and not added and not removed: | |
1225 | return |
|
1225 | return | |
1226 |
|
1226 | |||
1227 | date1 = util.datestr(ctx1.date()) |
|
1227 | date1 = util.datestr(ctx1.date()) | |
1228 | man1 = ctx1.manifest() |
|
1228 | man1 = ctx1.manifest() | |
1229 |
|
1229 | |||
1230 | if repo.ui.quiet: |
|
1230 | if repo.ui.quiet: | |
1231 | r = None |
|
1231 | r = None | |
1232 | else: |
|
1232 | else: | |
1233 | hexfunc = repo.ui.debugflag and hex or short |
|
1233 | hexfunc = repo.ui.debugflag and hex or short | |
1234 | r = [hexfunc(node) for node in [node1, node2] if node] |
|
1234 | r = [hexfunc(node) for node in [node1, node2] if node] | |
1235 |
|
1235 | |||
1236 | if opts.git: |
|
1236 | if opts.git: | |
1237 | copy, diverge = copies.copies(repo, ctx1, ctx2, repo[nullid]) |
|
1237 | copy, diverge = copies.copies(repo, ctx1, ctx2, repo[nullid]) | |
1238 | for k, v in copy.items(): |
|
1238 | for k, v in copy.items(): | |
1239 | copy[v] = k |
|
1239 | copy[v] = k | |
1240 |
|
1240 | |||
1241 | gone = {} |
|
1241 | gone = {} | |
1242 | gitmode = {'l': '120000', 'x': '100755', '': '100644'} |
|
1242 | gitmode = {'l': '120000', 'x': '100755', '': '100644'} | |
1243 |
|
1243 | |||
1244 | for f in util.sort(modified + added + removed): |
|
1244 | for f in util.sort(modified + added + removed): | |
1245 | to = None |
|
1245 | to = None | |
1246 | tn = None |
|
1246 | tn = None | |
1247 | dodiff = True |
|
1247 | dodiff = True | |
1248 | header = [] |
|
1248 | header = [] | |
1249 | if f in man1: |
|
1249 | if f in man1: | |
1250 | to = getfilectx(f, ctx1).data() |
|
1250 | to = getfilectx(f, ctx1).data() | |
1251 | if f not in removed: |
|
1251 | if f not in removed: | |
1252 | tn = getfilectx(f, ctx2).data() |
|
1252 | tn = getfilectx(f, ctx2).data() | |
1253 | a, b = f, f |
|
1253 | a, b = f, f | |
1254 | if opts.git: |
|
1254 | if opts.git: | |
1255 | if f in added: |
|
1255 | if f in added: | |
1256 | mode = gitmode[ctx2.flags(f)] |
|
1256 | mode = gitmode[ctx2.flags(f)] | |
1257 | if f in copy: |
|
1257 | if f in copy: | |
1258 | a = copy[f] |
|
1258 | a = copy[f] | |
1259 | omode = gitmode[man1.flags(a)] |
|
1259 | omode = gitmode[man1.flags(a)] | |
1260 | _addmodehdr(header, omode, mode) |
|
1260 | _addmodehdr(header, omode, mode) | |
1261 | if a in removed and a not in gone: |
|
1261 | if a in removed and a not in gone: | |
1262 | op = 'rename' |
|
1262 | op = 'rename' | |
1263 | gone[a] = 1 |
|
1263 | gone[a] = 1 | |
1264 | else: |
|
1264 | else: | |
1265 | op = 'copy' |
|
1265 | op = 'copy' | |
1266 | header.append('%s from %s\n' % (op, a)) |
|
1266 | header.append('%s from %s\n' % (op, a)) | |
1267 | header.append('%s to %s\n' % (op, f)) |
|
1267 | header.append('%s to %s\n' % (op, f)) | |
1268 | to = getfilectx(a, ctx1).data() |
|
1268 | to = getfilectx(a, ctx1).data() | |
1269 | else: |
|
1269 | else: | |
1270 | header.append('new file mode %s\n' % mode) |
|
1270 | header.append('new file mode %s\n' % mode) | |
1271 | if util.binary(tn): |
|
1271 | if util.binary(tn): | |
1272 | dodiff = 'binary' |
|
1272 | dodiff = 'binary' | |
1273 | elif f in removed: |
|
1273 | elif f in removed: | |
1274 | # have we already reported a copy above? |
|
1274 | # have we already reported a copy above? | |
1275 | if f in copy and copy[f] in added and copy[copy[f]] == f: |
|
1275 | if f in copy and copy[f] in added and copy[copy[f]] == f: | |
1276 | dodiff = False |
|
1276 | dodiff = False | |
1277 | else: |
|
1277 | else: | |
1278 | header.append('deleted file mode %s\n' % |
|
1278 | header.append('deleted file mode %s\n' % | |
1279 | gitmode[man1.flags(f)]) |
|
1279 | gitmode[man1.flags(f)]) | |
1280 | else: |
|
1280 | else: | |
1281 | omode = gitmode[man1.flags(f)] |
|
1281 | omode = gitmode[man1.flags(f)] | |
1282 | nmode = gitmode[ctx2.flags(f)] |
|
1282 | nmode = gitmode[ctx2.flags(f)] | |
1283 | _addmodehdr(header, omode, nmode) |
|
1283 | _addmodehdr(header, omode, nmode) | |
1284 | if util.binary(to) or util.binary(tn): |
|
1284 | if util.binary(to) or util.binary(tn): | |
1285 | dodiff = 'binary' |
|
1285 | dodiff = 'binary' | |
1286 | r = None |
|
1286 | r = None | |
1287 | header.insert(0, mdiff.diffline(r, a, b, opts)) |
|
1287 | header.insert(0, mdiff.diffline(r, a, b, opts)) | |
1288 | if dodiff: |
|
1288 | if dodiff: | |
1289 | if dodiff == 'binary': |
|
1289 | if dodiff == 'binary': | |
1290 | text = b85diff(to, tn) |
|
1290 | text = b85diff(to, tn) | |
1291 | else: |
|
1291 | else: | |
1292 | text = mdiff.unidiff(to, date1, |
|
1292 | text = mdiff.unidiff(to, date1, | |
1293 | # ctx2 date may be dynamic |
|
1293 | # ctx2 date may be dynamic | |
1294 | tn, util.datestr(ctx2.date()), |
|
1294 | tn, util.datestr(ctx2.date()), | |
1295 | a, b, r, opts=opts) |
|
1295 | a, b, r, opts=opts) | |
1296 | if header and (text or len(header) > 1): |
|
1296 | if header and (text or len(header) > 1): | |
1297 | yield ''.join(header) |
|
1297 | yield ''.join(header) | |
1298 | if text: |
|
1298 | if text: | |
1299 | yield text |
|
1299 | yield text | |
1300 |
|
1300 | |||
1301 | def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False, |
|
1301 | def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False, | |
1302 | opts=None): |
|
1302 | opts=None): | |
1303 | '''export changesets as hg patches.''' |
|
1303 | '''export changesets as hg patches.''' | |
1304 |
|
1304 | |||
1305 | total = len(revs) |
|
1305 | total = len(revs) | |
1306 | revwidth = max([len(str(rev)) for rev in revs]) |
|
1306 | revwidth = max([len(str(rev)) for rev in revs]) | |
1307 |
|
1307 | |||
1308 | def single(rev, seqno, fp): |
|
1308 | def single(rev, seqno, fp): | |
1309 | ctx = repo[rev] |
|
1309 | ctx = repo[rev] | |
1310 | node = ctx.node() |
|
1310 | node = ctx.node() | |
1311 | parents = [p.node() for p in ctx.parents() if p] |
|
1311 | parents = [p.node() for p in ctx.parents() if p] | |
1312 | branch = ctx.branch() |
|
1312 | branch = ctx.branch() | |
1313 | if switch_parent: |
|
1313 | if switch_parent: | |
1314 | parents.reverse() |
|
1314 | parents.reverse() | |
1315 | prev = (parents and parents[0]) or nullid |
|
1315 | prev = (parents and parents[0]) or nullid | |
1316 |
|
1316 | |||
1317 | if not fp: |
|
1317 | if not fp: | |
1318 | fp = cmdutil.make_file(repo, template, node, total=total, |
|
1318 | fp = cmdutil.make_file(repo, template, node, total=total, | |
1319 | seqno=seqno, revwidth=revwidth, |
|
1319 | seqno=seqno, revwidth=revwidth, | |
1320 | mode='ab') |
|
1320 | mode='ab') | |
1321 | if fp != sys.stdout and hasattr(fp, 'name'): |
|
1321 | if fp != sys.stdout and hasattr(fp, 'name'): | |
1322 | repo.ui.note("%s\n" % fp.name) |
|
1322 | repo.ui.note("%s\n" % fp.name) | |
1323 |
|
1323 | |||
1324 | fp.write("# HG changeset patch\n") |
|
1324 | fp.write("# HG changeset patch\n") | |
1325 | fp.write("# User %s\n" % ctx.user()) |
|
1325 | fp.write("# User %s\n" % ctx.user()) | |
1326 | fp.write("# Date %d %d\n" % ctx.date()) |
|
1326 | fp.write("# Date %d %d\n" % ctx.date()) | |
1327 | if branch and (branch != 'default'): |
|
1327 | if branch and (branch != 'default'): | |
1328 | fp.write("# Branch %s\n" % branch) |
|
1328 | fp.write("# Branch %s\n" % branch) | |
1329 | fp.write("# Node ID %s\n" % hex(node)) |
|
1329 | fp.write("# Node ID %s\n" % hex(node)) | |
1330 | fp.write("# Parent %s\n" % hex(prev)) |
|
1330 | fp.write("# Parent %s\n" % hex(prev)) | |
1331 | if len(parents) > 1: |
|
1331 | if len(parents) > 1: | |
1332 | fp.write("# Parent %s\n" % hex(parents[1])) |
|
1332 | fp.write("# Parent %s\n" % hex(parents[1])) | |
1333 | fp.write(ctx.description().rstrip()) |
|
1333 | fp.write(ctx.description().rstrip()) | |
1334 | fp.write("\n\n") |
|
1334 | fp.write("\n\n") | |
1335 |
|
1335 | |||
1336 | for chunk in diff(repo, prev, node, opts=opts): |
|
1336 | for chunk in diff(repo, prev, node, opts=opts): | |
1337 | fp.write(chunk) |
|
1337 | fp.write(chunk) | |
1338 |
|
1338 | |||
1339 | for seqno, rev in enumerate(revs): |
|
1339 | for seqno, rev in enumerate(revs): | |
1340 | single(rev, seqno+1, fp) |
|
1340 | single(rev, seqno+1, fp) | |
1341 |
|
1341 | |||
1342 | def diffstatdata(lines): |
|
1342 | def diffstatdata(lines): | |
1343 | filename, adds, removes = None, 0, 0 |
|
1343 | filename, adds, removes = None, 0, 0 | |
1344 | for line in lines: |
|
1344 | for line in lines: | |
1345 | if line.startswith('diff'): |
|
1345 | if line.startswith('diff'): | |
1346 | if filename: |
|
1346 | if filename: | |
1347 | yield (filename, adds, removes) |
|
1347 | yield (filename, adds, removes) | |
1348 | # set numbers to 0 anyway when starting new file |
|
1348 | # set numbers to 0 anyway when starting new file | |
1349 | adds, removes = 0, 0 |
|
1349 | adds, removes = 0, 0 | |
1350 | if line.startswith('diff --git'): |
|
1350 | if line.startswith('diff --git'): | |
1351 | filename = gitre.search(line).group(1) |
|
1351 | filename = gitre.search(line).group(1) | |
1352 | else: |
|
1352 | else: | |
1353 | # format: "diff -r ... -r ... file name" |
|
1353 | # format: "diff -r ... -r ... file name" | |
1354 | filename = line.split(None, 5)[-1] |
|
1354 | filename = line.split(None, 5)[-1] | |
1355 | elif line.startswith('+') and not line.startswith('+++'): |
|
1355 | elif line.startswith('+') and not line.startswith('+++'): | |
1356 | adds += 1 |
|
1356 | adds += 1 | |
1357 | elif line.startswith('-') and not line.startswith('---'): |
|
1357 | elif line.startswith('-') and not line.startswith('---'): | |
1358 | removes += 1 |
|
1358 | removes += 1 | |
1359 | if filename: |
|
1359 | if filename: | |
1360 | yield (filename, adds, removes) |
|
1360 | yield (filename, adds, removes) | |
1361 |
|
1361 | |||
1362 | def diffstat(lines, width=80): |
|
1362 | def diffstat(lines, width=80): | |
1363 | output = [] |
|
1363 | output = [] | |
1364 | stats = list(diffstatdata(lines)) |
|
1364 | stats = list(diffstatdata(lines)) | |
1365 |
|
1365 | |||
1366 | maxtotal, maxname = 0, 0 |
|
1366 | maxtotal, maxname = 0, 0 | |
1367 | totaladds, totalremoves = 0, 0 |
|
1367 | totaladds, totalremoves = 0, 0 | |
1368 | for filename, adds, removes in stats: |
|
1368 | for filename, adds, removes in stats: | |
1369 | totaladds += adds |
|
1369 | totaladds += adds | |
1370 | totalremoves += removes |
|
1370 | totalremoves += removes | |
1371 | maxname = max(maxname, len(filename)) |
|
1371 | maxname = max(maxname, len(filename)) | |
1372 | maxtotal = max(maxtotal, adds+removes) |
|
1372 | maxtotal = max(maxtotal, adds+removes) | |
1373 |
|
1373 | |||
1374 | countwidth = len(str(maxtotal)) |
|
1374 | countwidth = len(str(maxtotal)) | |
1375 | graphwidth = width - countwidth - maxname |
|
1375 | graphwidth = width - countwidth - maxname | |
1376 | if graphwidth < 10: |
|
1376 | if graphwidth < 10: | |
1377 | graphwidth = 10 |
|
1377 | graphwidth = 10 | |
1378 |
|
1378 | |||
1379 | factor = max(int(math.ceil(float(maxtotal) / graphwidth)), 1) |
|
1379 | factor = max(int(math.ceil(float(maxtotal) / graphwidth)), 1) | |
1380 |
|
1380 | |||
1381 | for filename, adds, removes in stats: |
|
1381 | for filename, adds, removes in stats: | |
1382 | # If diffstat runs out of room it doesn't print anything, which |
|
1382 | # If diffstat runs out of room it doesn't print anything, which | |
1383 | # isn't very useful, so always print at least one + or - if there |
|
1383 | # isn't very useful, so always print at least one + or - if there | |
1384 | # were at least some changes |
|
1384 | # were at least some changes | |
1385 | pluses = '+' * max(adds/factor, int(bool(adds))) |
|
1385 | pluses = '+' * max(adds/factor, int(bool(adds))) | |
1386 | minuses = '-' * max(removes/factor, int(bool(removes))) |
|
1386 | minuses = '-' * max(removes/factor, int(bool(removes))) | |
1387 | output.append(' %-*s | %*.d %s%s\n' % (maxname, filename, countwidth, |
|
1387 | output.append(' %-*s | %*.d %s%s\n' % (maxname, filename, countwidth, | |
1388 | adds+removes, pluses, minuses)) |
|
1388 | adds+removes, pluses, minuses)) | |
1389 |
|
1389 | |||
1390 | if stats: |
|
1390 | if stats: | |
1391 | output.append(' %d files changed, %d insertions(+), %d deletions(-)\n' |
|
1391 | output.append(' %d files changed, %d insertions(+), %d deletions(-)\n' | |
1392 | % (len(stats), totaladds, totalremoves)) |
|
1392 | % (len(stats), totaladds, totalremoves)) | |
1393 |
|
1393 | |||
1394 | return ''.join(output) |
|
1394 | return ''.join(output) |
@@ -1,81 +1,81 b'' | |||||
1 | # parsers.py - Python implementation of parsers.c |
|
1 | # parsers.py - Python implementation of parsers.c | |
2 | # |
|
2 | # | |
3 | # Copyright 2009 Matt Mackall <mpm@selenic.com> and others |
|
3 | # Copyright 2009 Matt Mackall <mpm@selenic.com> and others | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 |
|
7 | |||
8 |
from node import bin, |
|
8 | from node import bin, nullid, nullrev | |
9 | import revlog, dirstate, struct, util, zlib |
|
9 | import revlog, dirstate, struct, util, zlib | |
10 |
|
10 | |||
11 | _pack = struct.pack |
|
11 | _pack = struct.pack | |
12 | _unpack = struct.unpack |
|
12 | _unpack = struct.unpack | |
13 | _compress = zlib.compress |
|
13 | _compress = zlib.compress | |
14 | _decompress = zlib.decompress |
|
14 | _decompress = zlib.decompress | |
15 | _sha = util.sha1 |
|
15 | _sha = util.sha1 | |
16 |
|
16 | |||
17 | def parse_manifest(mfdict, fdict, lines): |
|
17 | def parse_manifest(mfdict, fdict, lines): | |
18 | for l in lines.splitlines(): |
|
18 | for l in lines.splitlines(): | |
19 | f, n = l.split('\0') |
|
19 | f, n = l.split('\0') | |
20 | if len(n) > 40: |
|
20 | if len(n) > 40: | |
21 | fdict[f] = n[40:] |
|
21 | fdict[f] = n[40:] | |
22 | mfdict[f] = bin(n[:40]) |
|
22 | mfdict[f] = bin(n[:40]) | |
23 | else: |
|
23 | else: | |
24 | mfdict[f] = bin(n) |
|
24 | mfdict[f] = bin(n) | |
25 |
|
25 | |||
26 | def parse_index(data, inline): |
|
26 | def parse_index(data, inline): | |
27 | indexformatng = revlog.indexformatng |
|
27 | indexformatng = revlog.indexformatng | |
28 | s = struct.calcsize(indexformatng) |
|
28 | s = struct.calcsize(indexformatng) | |
29 | index = [] |
|
29 | index = [] | |
30 | cache = None |
|
30 | cache = None | |
31 | nodemap = {nullid: nullrev} |
|
31 | nodemap = {nullid: nullrev} | |
32 | n = off = 0 |
|
32 | n = off = 0 | |
33 | # if we're not using lazymap, always read the whole index |
|
33 | # if we're not using lazymap, always read the whole index | |
34 | l = len(data) - s |
|
34 | l = len(data) - s | |
35 | append = index.append |
|
35 | append = index.append | |
36 | if inline: |
|
36 | if inline: | |
37 | cache = (0, data) |
|
37 | cache = (0, data) | |
38 | while off <= l: |
|
38 | while off <= l: | |
39 | e = _unpack(indexformatng, data[off:off + s]) |
|
39 | e = _unpack(indexformatng, data[off:off + s]) | |
40 | nodemap[e[7]] = n |
|
40 | nodemap[e[7]] = n | |
41 | append(e) |
|
41 | append(e) | |
42 | n += 1 |
|
42 | n += 1 | |
43 | if e[1] < 0: |
|
43 | if e[1] < 0: | |
44 | break |
|
44 | break | |
45 | off += e[1] + s |
|
45 | off += e[1] + s | |
46 | else: |
|
46 | else: | |
47 | while off <= l: |
|
47 | while off <= l: | |
48 | e = _unpack(indexformatng, data[off:off + s]) |
|
48 | e = _unpack(indexformatng, data[off:off + s]) | |
49 | nodemap[e[7]] = n |
|
49 | nodemap[e[7]] = n | |
50 | append(e) |
|
50 | append(e) | |
51 | n += 1 |
|
51 | n += 1 | |
52 | off += s |
|
52 | off += s | |
53 |
|
53 | |||
54 | e = list(index[0]) |
|
54 | e = list(index[0]) | |
55 | type = revlog.gettype(e[0]) |
|
55 | type = revlog.gettype(e[0]) | |
56 | e[0] = revlog.offset_type(0, type) |
|
56 | e[0] = revlog.offset_type(0, type) | |
57 | index[0] = tuple(e) |
|
57 | index[0] = tuple(e) | |
58 |
|
58 | |||
59 | # add the magic null revision at -1 |
|
59 | # add the magic null revision at -1 | |
60 | index.append((0, 0, 0, -1, -1, -1, -1, nullid)) |
|
60 | index.append((0, 0, 0, -1, -1, -1, -1, nullid)) | |
61 |
|
61 | |||
62 | return index, nodemap, cache |
|
62 | return index, nodemap, cache | |
63 |
|
63 | |||
64 | def parse_dirstate(dmap, copymap, st): |
|
64 | def parse_dirstate(dmap, copymap, st): | |
65 | parents = [st[:20], st[20: 40]] |
|
65 | parents = [st[:20], st[20: 40]] | |
66 | # deref fields so they will be local in loop |
|
66 | # deref fields so they will be local in loop | |
67 | e_size = struct.calcsize(dirstate._format) |
|
67 | e_size = struct.calcsize(dirstate._format) | |
68 | pos1 = 40 |
|
68 | pos1 = 40 | |
69 | l = len(st) |
|
69 | l = len(st) | |
70 |
|
70 | |||
71 | # the inner loop |
|
71 | # the inner loop | |
72 | while pos1 < l: |
|
72 | while pos1 < l: | |
73 | pos2 = pos1 + e_size |
|
73 | pos2 = pos1 + e_size | |
74 | e = _unpack(">cllll", st[pos1:pos2]) # a literal here is faster |
|
74 | e = _unpack(">cllll", st[pos1:pos2]) # a literal here is faster | |
75 | pos1 = pos2 + e[4] |
|
75 | pos1 = pos2 + e[4] | |
76 | f = st[pos2:pos1] |
|
76 | f = st[pos2:pos1] | |
77 | if '\0' in f: |
|
77 | if '\0' in f: | |
78 | f, c = f.split('\0') |
|
78 | f, c = f.split('\0') | |
79 | copymap[f] = c |
|
79 | copymap[f] = c | |
80 | dmap[f] = e[:4] |
|
80 | dmap[f] = e[:4] | |
81 | return parents |
|
81 | return parents |
@@ -1,42 +1,43 b'' | |||||
1 | # repo.py - repository base classes for mercurial |
|
1 | # repo.py - repository base classes for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> | |
4 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
4 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms |
|
6 | # This software may be used and distributed according to the terms | |
7 | # of the GNU General Public License, incorporated herein by reference. |
|
7 | # of the GNU General Public License, incorporated herein by reference. | |
8 |
|
8 | |||
9 | from i18n import _ |
|
9 | from i18n import _ | |
|
10 | import error | |||
10 |
|
11 | |||
11 | class repository(object): |
|
12 | class repository(object): | |
12 | def capable(self, name): |
|
13 | def capable(self, name): | |
13 | '''tell whether repo supports named capability. |
|
14 | '''tell whether repo supports named capability. | |
14 | return False if not supported. |
|
15 | return False if not supported. | |
15 | if boolean capability, return True. |
|
16 | if boolean capability, return True. | |
16 | if string capability, return string.''' |
|
17 | if string capability, return string.''' | |
17 | if name in self.capabilities: |
|
18 | if name in self.capabilities: | |
18 | return True |
|
19 | return True | |
19 | name_eq = name + '=' |
|
20 | name_eq = name + '=' | |
20 | for cap in self.capabilities: |
|
21 | for cap in self.capabilities: | |
21 | if cap.startswith(name_eq): |
|
22 | if cap.startswith(name_eq): | |
22 | return cap[len(name_eq):] |
|
23 | return cap[len(name_eq):] | |
23 | return False |
|
24 | return False | |
24 |
|
25 | |||
25 | def requirecap(self, name, purpose): |
|
26 | def requirecap(self, name, purpose): | |
26 | '''raise an exception if the given capability is not present''' |
|
27 | '''raise an exception if the given capability is not present''' | |
27 | if not self.capable(name): |
|
28 | if not self.capable(name): | |
28 | raise error.CapabilityError( |
|
29 | raise error.CapabilityError( | |
29 | _('cannot %s; remote repository does not ' |
|
30 | _('cannot %s; remote repository does not ' | |
30 | 'support the %r capability') % (purpose, name)) |
|
31 | 'support the %r capability') % (purpose, name)) | |
31 |
|
32 | |||
32 | def local(self): |
|
33 | def local(self): | |
33 | return False |
|
34 | return False | |
34 |
|
35 | |||
35 | def cancopy(self): |
|
36 | def cancopy(self): | |
36 | return self.local() |
|
37 | return self.local() | |
37 |
|
38 | |||
38 | def rjoin(self, path): |
|
39 | def rjoin(self, path): | |
39 | url = self.url() |
|
40 | url = self.url() | |
40 | if url.endswith('/'): |
|
41 | if url.endswith('/'): | |
41 | return url + path |
|
42 | return url + path | |
42 | return url + '/' + path |
|
43 | return url + '/' + path |
@@ -1,1360 +1,1361 b'' | |||||
1 | """ |
|
1 | """ | |
2 | revlog.py - storage back-end for mercurial |
|
2 | revlog.py - storage back-end for mercurial | |
3 |
|
3 | |||
4 | This provides efficient delta storage with O(1) retrieve and append |
|
4 | This provides efficient delta storage with O(1) retrieve and append | |
5 | and O(changes) merge between branches |
|
5 | and O(changes) merge between branches | |
6 |
|
6 | |||
7 | Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
7 | Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
8 |
|
8 | |||
9 | This software may be used and distributed according to the terms |
|
9 | This software may be used and distributed according to the terms | |
10 | of the GNU General Public License, incorporated herein by reference. |
|
10 | of the GNU General Public License, incorporated herein by reference. | |
11 | """ |
|
11 | """ | |
12 |
|
12 | |||
13 | from node import bin, hex, nullid, nullrev, short |
|
13 | # import stuff from node for others to import from revlog | |
|
14 | from node import bin, hex, nullid, nullrev, short #@UnusedImport | |||
14 | from i18n import _ |
|
15 | from i18n import _ | |
15 | import changegroup, errno, ancestor, mdiff, parsers |
|
16 | import changegroup, errno, ancestor, mdiff, parsers | |
16 | import struct, util, zlib, error |
|
17 | import struct, util, zlib, error | |
17 |
|
18 | |||
18 | _pack = struct.pack |
|
19 | _pack = struct.pack | |
19 | _unpack = struct.unpack |
|
20 | _unpack = struct.unpack | |
20 | _compress = zlib.compress |
|
21 | _compress = zlib.compress | |
21 | _decompress = zlib.decompress |
|
22 | _decompress = zlib.decompress | |
22 | _sha = util.sha1 |
|
23 | _sha = util.sha1 | |
23 |
|
24 | |||
24 | # revlog flags |
|
25 | # revlog flags | |
25 | REVLOGV0 = 0 |
|
26 | REVLOGV0 = 0 | |
26 | REVLOGNG = 1 |
|
27 | REVLOGNG = 1 | |
27 | REVLOGNGINLINEDATA = (1 << 16) |
|
28 | REVLOGNGINLINEDATA = (1 << 16) | |
28 | REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA |
|
29 | REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA | |
29 | REVLOG_DEFAULT_FORMAT = REVLOGNG |
|
30 | REVLOG_DEFAULT_FORMAT = REVLOGNG | |
30 | REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS |
|
31 | REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS | |
31 |
|
32 | |||
32 | RevlogError = error.RevlogError |
|
33 | RevlogError = error.RevlogError | |
33 | LookupError = error.LookupError |
|
34 | LookupError = error.LookupError | |
34 |
|
35 | |||
35 | def getoffset(q): |
|
36 | def getoffset(q): | |
36 | return int(q >> 16) |
|
37 | return int(q >> 16) | |
37 |
|
38 | |||
38 | def gettype(q): |
|
39 | def gettype(q): | |
39 | return int(q & 0xFFFF) |
|
40 | return int(q & 0xFFFF) | |
40 |
|
41 | |||
41 | def offset_type(offset, type): |
|
42 | def offset_type(offset, type): | |
42 | return long(long(offset) << 16 | type) |
|
43 | return long(long(offset) << 16 | type) | |
43 |
|
44 | |||
44 | def hash(text, p1, p2): |
|
45 | def hash(text, p1, p2): | |
45 | """generate a hash from the given text and its parent hashes |
|
46 | """generate a hash from the given text and its parent hashes | |
46 |
|
47 | |||
47 | This hash combines both the current file contents and its history |
|
48 | This hash combines both the current file contents and its history | |
48 | in a manner that makes it easy to distinguish nodes with the same |
|
49 | in a manner that makes it easy to distinguish nodes with the same | |
49 | content in the revision graph. |
|
50 | content in the revision graph. | |
50 | """ |
|
51 | """ | |
51 | l = [p1, p2] |
|
52 | l = [p1, p2] | |
52 | l.sort() |
|
53 | l.sort() | |
53 | s = _sha(l[0]) |
|
54 | s = _sha(l[0]) | |
54 | s.update(l[1]) |
|
55 | s.update(l[1]) | |
55 | s.update(text) |
|
56 | s.update(text) | |
56 | return s.digest() |
|
57 | return s.digest() | |
57 |
|
58 | |||
58 | def compress(text): |
|
59 | def compress(text): | |
59 | """ generate a possibly-compressed representation of text """ |
|
60 | """ generate a possibly-compressed representation of text """ | |
60 | if not text: |
|
61 | if not text: | |
61 | return ("", text) |
|
62 | return ("", text) | |
62 | l = len(text) |
|
63 | l = len(text) | |
63 | bin = None |
|
64 | bin = None | |
64 | if l < 44: |
|
65 | if l < 44: | |
65 | pass |
|
66 | pass | |
66 | elif l > 1000000: |
|
67 | elif l > 1000000: | |
67 | # zlib makes an internal copy, thus doubling memory usage for |
|
68 | # zlib makes an internal copy, thus doubling memory usage for | |
68 | # large files, so lets do this in pieces |
|
69 | # large files, so lets do this in pieces | |
69 | z = zlib.compressobj() |
|
70 | z = zlib.compressobj() | |
70 | p = [] |
|
71 | p = [] | |
71 | pos = 0 |
|
72 | pos = 0 | |
72 | while pos < l: |
|
73 | while pos < l: | |
73 | pos2 = pos + 2**20 |
|
74 | pos2 = pos + 2**20 | |
74 | p.append(z.compress(text[pos:pos2])) |
|
75 | p.append(z.compress(text[pos:pos2])) | |
75 | pos = pos2 |
|
76 | pos = pos2 | |
76 | p.append(z.flush()) |
|
77 | p.append(z.flush()) | |
77 | if sum(map(len, p)) < l: |
|
78 | if sum(map(len, p)) < l: | |
78 | bin = "".join(p) |
|
79 | bin = "".join(p) | |
79 | else: |
|
80 | else: | |
80 | bin = _compress(text) |
|
81 | bin = _compress(text) | |
81 | if bin is None or len(bin) > l: |
|
82 | if bin is None or len(bin) > l: | |
82 | if text[0] == '\0': |
|
83 | if text[0] == '\0': | |
83 | return ("", text) |
|
84 | return ("", text) | |
84 | return ('u', text) |
|
85 | return ('u', text) | |
85 | return ("", bin) |
|
86 | return ("", bin) | |
86 |
|
87 | |||
87 | def decompress(bin): |
|
88 | def decompress(bin): | |
88 | """ decompress the given input """ |
|
89 | """ decompress the given input """ | |
89 | if not bin: |
|
90 | if not bin: | |
90 | return bin |
|
91 | return bin | |
91 | t = bin[0] |
|
92 | t = bin[0] | |
92 | if t == '\0': |
|
93 | if t == '\0': | |
93 | return bin |
|
94 | return bin | |
94 | if t == 'x': |
|
95 | if t == 'x': | |
95 | return _decompress(bin) |
|
96 | return _decompress(bin) | |
96 | if t == 'u': |
|
97 | if t == 'u': | |
97 | return bin[1:] |
|
98 | return bin[1:] | |
98 | raise RevlogError(_("unknown compression type %r") % t) |
|
99 | raise RevlogError(_("unknown compression type %r") % t) | |
99 |
|
100 | |||
100 | class lazyparser(object): |
|
101 | class lazyparser(object): | |
101 | """ |
|
102 | """ | |
102 | this class avoids the need to parse the entirety of large indices |
|
103 | this class avoids the need to parse the entirety of large indices | |
103 | """ |
|
104 | """ | |
104 |
|
105 | |||
105 | # lazyparser is not safe to use on windows if win32 extensions not |
|
106 | # lazyparser is not safe to use on windows if win32 extensions not | |
106 | # available. it keeps file handle open, which make it not possible |
|
107 | # available. it keeps file handle open, which make it not possible | |
107 | # to break hardlinks on local cloned repos. |
|
108 | # to break hardlinks on local cloned repos. | |
108 |
|
109 | |||
109 | def __init__(self, dataf, size): |
|
110 | def __init__(self, dataf, size): | |
110 | self.dataf = dataf |
|
111 | self.dataf = dataf | |
111 | self.s = struct.calcsize(indexformatng) |
|
112 | self.s = struct.calcsize(indexformatng) | |
112 | self.datasize = size |
|
113 | self.datasize = size | |
113 | self.l = size/self.s |
|
114 | self.l = size/self.s | |
114 | self.index = [None] * self.l |
|
115 | self.index = [None] * self.l | |
115 | self.map = {nullid: nullrev} |
|
116 | self.map = {nullid: nullrev} | |
116 | self.allmap = 0 |
|
117 | self.allmap = 0 | |
117 | self.all = 0 |
|
118 | self.all = 0 | |
118 | self.mapfind_count = 0 |
|
119 | self.mapfind_count = 0 | |
119 |
|
120 | |||
120 | def loadmap(self): |
|
121 | def loadmap(self): | |
121 | """ |
|
122 | """ | |
122 | during a commit, we need to make sure the rev being added is |
|
123 | during a commit, we need to make sure the rev being added is | |
123 | not a duplicate. This requires loading the entire index, |
|
124 | not a duplicate. This requires loading the entire index, | |
124 | which is fairly slow. loadmap can load up just the node map, |
|
125 | which is fairly slow. loadmap can load up just the node map, | |
125 | which takes much less time. |
|
126 | which takes much less time. | |
126 | """ |
|
127 | """ | |
127 | if self.allmap: |
|
128 | if self.allmap: | |
128 | return |
|
129 | return | |
129 | end = self.datasize |
|
130 | end = self.datasize | |
130 | self.allmap = 1 |
|
131 | self.allmap = 1 | |
131 | cur = 0 |
|
132 | cur = 0 | |
132 | count = 0 |
|
133 | count = 0 | |
133 | blocksize = self.s * 256 |
|
134 | blocksize = self.s * 256 | |
134 | self.dataf.seek(0) |
|
135 | self.dataf.seek(0) | |
135 | while cur < end: |
|
136 | while cur < end: | |
136 | data = self.dataf.read(blocksize) |
|
137 | data = self.dataf.read(blocksize) | |
137 | off = 0 |
|
138 | off = 0 | |
138 | for x in xrange(256): |
|
139 | for x in xrange(256): | |
139 | n = data[off + ngshaoffset:off + ngshaoffset + 20] |
|
140 | n = data[off + ngshaoffset:off + ngshaoffset + 20] | |
140 | self.map[n] = count |
|
141 | self.map[n] = count | |
141 | count += 1 |
|
142 | count += 1 | |
142 | if count >= self.l: |
|
143 | if count >= self.l: | |
143 | break |
|
144 | break | |
144 | off += self.s |
|
145 | off += self.s | |
145 | cur += blocksize |
|
146 | cur += blocksize | |
146 |
|
147 | |||
147 | def loadblock(self, blockstart, blocksize, data=None): |
|
148 | def loadblock(self, blockstart, blocksize, data=None): | |
148 | if self.all: |
|
149 | if self.all: | |
149 | return |
|
150 | return | |
150 | if data is None: |
|
151 | if data is None: | |
151 | self.dataf.seek(blockstart) |
|
152 | self.dataf.seek(blockstart) | |
152 | if blockstart + blocksize > self.datasize: |
|
153 | if blockstart + blocksize > self.datasize: | |
153 | # the revlog may have grown since we've started running, |
|
154 | # the revlog may have grown since we've started running, | |
154 | # but we don't have space in self.index for more entries. |
|
155 | # but we don't have space in self.index for more entries. | |
155 | # limit blocksize so that we don't get too much data. |
|
156 | # limit blocksize so that we don't get too much data. | |
156 | blocksize = max(self.datasize - blockstart, 0) |
|
157 | blocksize = max(self.datasize - blockstart, 0) | |
157 | data = self.dataf.read(blocksize) |
|
158 | data = self.dataf.read(blocksize) | |
158 | lend = len(data) / self.s |
|
159 | lend = len(data) / self.s | |
159 | i = blockstart / self.s |
|
160 | i = blockstart / self.s | |
160 | off = 0 |
|
161 | off = 0 | |
161 | # lazyindex supports __delitem__ |
|
162 | # lazyindex supports __delitem__ | |
162 | if lend > len(self.index) - i: |
|
163 | if lend > len(self.index) - i: | |
163 | lend = len(self.index) - i |
|
164 | lend = len(self.index) - i | |
164 | for x in xrange(lend): |
|
165 | for x in xrange(lend): | |
165 | if self.index[i + x] == None: |
|
166 | if self.index[i + x] == None: | |
166 | b = data[off : off + self.s] |
|
167 | b = data[off : off + self.s] | |
167 | self.index[i + x] = b |
|
168 | self.index[i + x] = b | |
168 | n = b[ngshaoffset:ngshaoffset + 20] |
|
169 | n = b[ngshaoffset:ngshaoffset + 20] | |
169 | self.map[n] = i + x |
|
170 | self.map[n] = i + x | |
170 | off += self.s |
|
171 | off += self.s | |
171 |
|
172 | |||
172 | def findnode(self, node): |
|
173 | def findnode(self, node): | |
173 | """search backwards through the index file for a specific node""" |
|
174 | """search backwards through the index file for a specific node""" | |
174 | if self.allmap: |
|
175 | if self.allmap: | |
175 | return None |
|
176 | return None | |
176 |
|
177 | |||
177 | # hg log will cause many many searches for the manifest |
|
178 | # hg log will cause many many searches for the manifest | |
178 | # nodes. After we get called a few times, just load the whole |
|
179 | # nodes. After we get called a few times, just load the whole | |
179 | # thing. |
|
180 | # thing. | |
180 | if self.mapfind_count > 8: |
|
181 | if self.mapfind_count > 8: | |
181 | self.loadmap() |
|
182 | self.loadmap() | |
182 | if node in self.map: |
|
183 | if node in self.map: | |
183 | return node |
|
184 | return node | |
184 | return None |
|
185 | return None | |
185 | self.mapfind_count += 1 |
|
186 | self.mapfind_count += 1 | |
186 | last = self.l - 1 |
|
187 | last = self.l - 1 | |
187 | while self.index[last] != None: |
|
188 | while self.index[last] != None: | |
188 | if last == 0: |
|
189 | if last == 0: | |
189 | self.all = 1 |
|
190 | self.all = 1 | |
190 | self.allmap = 1 |
|
191 | self.allmap = 1 | |
191 | return None |
|
192 | return None | |
192 | last -= 1 |
|
193 | last -= 1 | |
193 | end = (last + 1) * self.s |
|
194 | end = (last + 1) * self.s | |
194 | blocksize = self.s * 256 |
|
195 | blocksize = self.s * 256 | |
195 | while end >= 0: |
|
196 | while end >= 0: | |
196 | start = max(end - blocksize, 0) |
|
197 | start = max(end - blocksize, 0) | |
197 | self.dataf.seek(start) |
|
198 | self.dataf.seek(start) | |
198 | data = self.dataf.read(end - start) |
|
199 | data = self.dataf.read(end - start) | |
199 | findend = end - start |
|
200 | findend = end - start | |
200 | while True: |
|
201 | while True: | |
201 | # we're searching backwards, so we have to make sure |
|
202 | # we're searching backwards, so we have to make sure | |
202 | # we don't find a changeset where this node is a parent |
|
203 | # we don't find a changeset where this node is a parent | |
203 | off = data.find(node, 0, findend) |
|
204 | off = data.find(node, 0, findend) | |
204 | findend = off |
|
205 | findend = off | |
205 | if off >= 0: |
|
206 | if off >= 0: | |
206 | i = off / self.s |
|
207 | i = off / self.s | |
207 | off = i * self.s |
|
208 | off = i * self.s | |
208 | n = data[off + ngshaoffset:off + ngshaoffset + 20] |
|
209 | n = data[off + ngshaoffset:off + ngshaoffset + 20] | |
209 | if n == node: |
|
210 | if n == node: | |
210 | self.map[n] = i + start / self.s |
|
211 | self.map[n] = i + start / self.s | |
211 | return node |
|
212 | return node | |
212 | else: |
|
213 | else: | |
213 | break |
|
214 | break | |
214 | end -= blocksize |
|
215 | end -= blocksize | |
215 | return None |
|
216 | return None | |
216 |
|
217 | |||
217 | def loadindex(self, i=None, end=None): |
|
218 | def loadindex(self, i=None, end=None): | |
218 | if self.all: |
|
219 | if self.all: | |
219 | return |
|
220 | return | |
220 | all = False |
|
221 | all = False | |
221 | if i == None: |
|
222 | if i == None: | |
222 | blockstart = 0 |
|
223 | blockstart = 0 | |
223 | blocksize = (65536 / self.s) * self.s |
|
224 | blocksize = (65536 / self.s) * self.s | |
224 | end = self.datasize |
|
225 | end = self.datasize | |
225 | all = True |
|
226 | all = True | |
226 | else: |
|
227 | else: | |
227 | if end: |
|
228 | if end: | |
228 | blockstart = i * self.s |
|
229 | blockstart = i * self.s | |
229 | end = end * self.s |
|
230 | end = end * self.s | |
230 | blocksize = end - blockstart |
|
231 | blocksize = end - blockstart | |
231 | else: |
|
232 | else: | |
232 | blockstart = (i & ~1023) * self.s |
|
233 | blockstart = (i & ~1023) * self.s | |
233 | blocksize = self.s * 1024 |
|
234 | blocksize = self.s * 1024 | |
234 | end = blockstart + blocksize |
|
235 | end = blockstart + blocksize | |
235 | while blockstart < end: |
|
236 | while blockstart < end: | |
236 | self.loadblock(blockstart, blocksize) |
|
237 | self.loadblock(blockstart, blocksize) | |
237 | blockstart += blocksize |
|
238 | blockstart += blocksize | |
238 | if all: |
|
239 | if all: | |
239 | self.all = True |
|
240 | self.all = True | |
240 |
|
241 | |||
241 | class lazyindex(object): |
|
242 | class lazyindex(object): | |
242 | """a lazy version of the index array""" |
|
243 | """a lazy version of the index array""" | |
243 | def __init__(self, parser): |
|
244 | def __init__(self, parser): | |
244 | self.p = parser |
|
245 | self.p = parser | |
245 | def __len__(self): |
|
246 | def __len__(self): | |
246 | return len(self.p.index) |
|
247 | return len(self.p.index) | |
247 | def load(self, pos): |
|
248 | def load(self, pos): | |
248 | if pos < 0: |
|
249 | if pos < 0: | |
249 | pos += len(self.p.index) |
|
250 | pos += len(self.p.index) | |
250 | self.p.loadindex(pos) |
|
251 | self.p.loadindex(pos) | |
251 | return self.p.index[pos] |
|
252 | return self.p.index[pos] | |
252 | def __getitem__(self, pos): |
|
253 | def __getitem__(self, pos): | |
253 | return _unpack(indexformatng, self.p.index[pos] or self.load(pos)) |
|
254 | return _unpack(indexformatng, self.p.index[pos] or self.load(pos)) | |
254 | def __setitem__(self, pos, item): |
|
255 | def __setitem__(self, pos, item): | |
255 | self.p.index[pos] = _pack(indexformatng, *item) |
|
256 | self.p.index[pos] = _pack(indexformatng, *item) | |
256 | def __delitem__(self, pos): |
|
257 | def __delitem__(self, pos): | |
257 | del self.p.index[pos] |
|
258 | del self.p.index[pos] | |
258 | def insert(self, pos, e): |
|
259 | def insert(self, pos, e): | |
259 | self.p.index.insert(pos, _pack(indexformatng, *e)) |
|
260 | self.p.index.insert(pos, _pack(indexformatng, *e)) | |
260 | def append(self, e): |
|
261 | def append(self, e): | |
261 | self.p.index.append(_pack(indexformatng, *e)) |
|
262 | self.p.index.append(_pack(indexformatng, *e)) | |
262 |
|
263 | |||
263 | class lazymap(object): |
|
264 | class lazymap(object): | |
264 | """a lazy version of the node map""" |
|
265 | """a lazy version of the node map""" | |
265 | def __init__(self, parser): |
|
266 | def __init__(self, parser): | |
266 | self.p = parser |
|
267 | self.p = parser | |
267 | def load(self, key): |
|
268 | def load(self, key): | |
268 | n = self.p.findnode(key) |
|
269 | n = self.p.findnode(key) | |
269 | if n == None: |
|
270 | if n == None: | |
270 | raise KeyError(key) |
|
271 | raise KeyError(key) | |
271 | def __contains__(self, key): |
|
272 | def __contains__(self, key): | |
272 | if key in self.p.map: |
|
273 | if key in self.p.map: | |
273 | return True |
|
274 | return True | |
274 | self.p.loadmap() |
|
275 | self.p.loadmap() | |
275 | return key in self.p.map |
|
276 | return key in self.p.map | |
276 | def __iter__(self): |
|
277 | def __iter__(self): | |
277 | yield nullid |
|
278 | yield nullid | |
278 | for i in xrange(self.p.l): |
|
279 | for i in xrange(self.p.l): | |
279 | ret = self.p.index[i] |
|
280 | ret = self.p.index[i] | |
280 | if not ret: |
|
281 | if not ret: | |
281 | self.p.loadindex(i) |
|
282 | self.p.loadindex(i) | |
282 | ret = self.p.index[i] |
|
283 | ret = self.p.index[i] | |
283 | if isinstance(ret, str): |
|
284 | if isinstance(ret, str): | |
284 | ret = _unpack(indexformatng, ret) |
|
285 | ret = _unpack(indexformatng, ret) | |
285 | yield ret[7] |
|
286 | yield ret[7] | |
286 | def __getitem__(self, key): |
|
287 | def __getitem__(self, key): | |
287 | try: |
|
288 | try: | |
288 | return self.p.map[key] |
|
289 | return self.p.map[key] | |
289 | except KeyError: |
|
290 | except KeyError: | |
290 | try: |
|
291 | try: | |
291 | self.load(key) |
|
292 | self.load(key) | |
292 | return self.p.map[key] |
|
293 | return self.p.map[key] | |
293 | except KeyError: |
|
294 | except KeyError: | |
294 | raise KeyError("node " + hex(key)) |
|
295 | raise KeyError("node " + hex(key)) | |
295 | def __setitem__(self, key, val): |
|
296 | def __setitem__(self, key, val): | |
296 | self.p.map[key] = val |
|
297 | self.p.map[key] = val | |
297 | def __delitem__(self, key): |
|
298 | def __delitem__(self, key): | |
298 | del self.p.map[key] |
|
299 | del self.p.map[key] | |
299 |
|
300 | |||
300 | indexformatv0 = ">4l20s20s20s" |
|
301 | indexformatv0 = ">4l20s20s20s" | |
301 | v0shaoffset = 56 |
|
302 | v0shaoffset = 56 | |
302 |
|
303 | |||
303 | class revlogoldio(object): |
|
304 | class revlogoldio(object): | |
304 | def __init__(self): |
|
305 | def __init__(self): | |
305 | self.size = struct.calcsize(indexformatv0) |
|
306 | self.size = struct.calcsize(indexformatv0) | |
306 |
|
307 | |||
307 | def parseindex(self, fp, inline): |
|
308 | def parseindex(self, fp, inline): | |
308 | s = self.size |
|
309 | s = self.size | |
309 | index = [] |
|
310 | index = [] | |
310 | nodemap = {nullid: nullrev} |
|
311 | nodemap = {nullid: nullrev} | |
311 | n = off = 0 |
|
312 | n = off = 0 | |
312 | data = fp.read() |
|
313 | data = fp.read() | |
313 | l = len(data) |
|
314 | l = len(data) | |
314 | while off + s <= l: |
|
315 | while off + s <= l: | |
315 | cur = data[off:off + s] |
|
316 | cur = data[off:off + s] | |
316 | off += s |
|
317 | off += s | |
317 | e = _unpack(indexformatv0, cur) |
|
318 | e = _unpack(indexformatv0, cur) | |
318 | # transform to revlogv1 format |
|
319 | # transform to revlogv1 format | |
319 | e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3], |
|
320 | e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3], | |
320 | nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6]) |
|
321 | nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6]) | |
321 | index.append(e2) |
|
322 | index.append(e2) | |
322 | nodemap[e[6]] = n |
|
323 | nodemap[e[6]] = n | |
323 | n += 1 |
|
324 | n += 1 | |
324 |
|
325 | |||
325 | return index, nodemap, None |
|
326 | return index, nodemap, None | |
326 |
|
327 | |||
327 | def packentry(self, entry, node, version, rev): |
|
328 | def packentry(self, entry, node, version, rev): | |
328 | e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4], |
|
329 | e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4], | |
329 | node(entry[5]), node(entry[6]), entry[7]) |
|
330 | node(entry[5]), node(entry[6]), entry[7]) | |
330 | return _pack(indexformatv0, *e2) |
|
331 | return _pack(indexformatv0, *e2) | |
331 |
|
332 | |||
332 | # index ng: |
|
333 | # index ng: | |
333 | # 6 bytes offset |
|
334 | # 6 bytes offset | |
334 | # 2 bytes flags |
|
335 | # 2 bytes flags | |
335 | # 4 bytes compressed length |
|
336 | # 4 bytes compressed length | |
336 | # 4 bytes uncompressed length |
|
337 | # 4 bytes uncompressed length | |
337 | # 4 bytes: base rev |
|
338 | # 4 bytes: base rev | |
338 | # 4 bytes link rev |
|
339 | # 4 bytes link rev | |
339 | # 4 bytes parent 1 rev |
|
340 | # 4 bytes parent 1 rev | |
340 | # 4 bytes parent 2 rev |
|
341 | # 4 bytes parent 2 rev | |
341 | # 32 bytes: nodeid |
|
342 | # 32 bytes: nodeid | |
342 | indexformatng = ">Qiiiiii20s12x" |
|
343 | indexformatng = ">Qiiiiii20s12x" | |
343 | ngshaoffset = 32 |
|
344 | ngshaoffset = 32 | |
344 | versionformat = ">I" |
|
345 | versionformat = ">I" | |
345 |
|
346 | |||
346 | class revlogio(object): |
|
347 | class revlogio(object): | |
347 | def __init__(self): |
|
348 | def __init__(self): | |
348 | self.size = struct.calcsize(indexformatng) |
|
349 | self.size = struct.calcsize(indexformatng) | |
349 |
|
350 | |||
350 | def parseindex(self, fp, inline): |
|
351 | def parseindex(self, fp, inline): | |
351 | try: |
|
352 | try: | |
352 | size = util.fstat(fp).st_size |
|
353 | size = util.fstat(fp).st_size | |
353 | except AttributeError: |
|
354 | except AttributeError: | |
354 | size = 0 |
|
355 | size = 0 | |
355 |
|
356 | |||
356 | if util.openhardlinks() and not inline and size > 1000000: |
|
357 | if util.openhardlinks() and not inline and size > 1000000: | |
357 | # big index, let's parse it on demand |
|
358 | # big index, let's parse it on demand | |
358 | parser = lazyparser(fp, size) |
|
359 | parser = lazyparser(fp, size) | |
359 | index = lazyindex(parser) |
|
360 | index = lazyindex(parser) | |
360 | nodemap = lazymap(parser) |
|
361 | nodemap = lazymap(parser) | |
361 | e = list(index[0]) |
|
362 | e = list(index[0]) | |
362 | type = gettype(e[0]) |
|
363 | type = gettype(e[0]) | |
363 | e[0] = offset_type(0, type) |
|
364 | e[0] = offset_type(0, type) | |
364 | index[0] = e |
|
365 | index[0] = e | |
365 | return index, nodemap, None |
|
366 | return index, nodemap, None | |
366 |
|
367 | |||
367 | data = fp.read() |
|
368 | data = fp.read() | |
368 | # call the C implementation to parse the index data |
|
369 | # call the C implementation to parse the index data | |
369 | index, nodemap, cache = parsers.parse_index(data, inline) |
|
370 | index, nodemap, cache = parsers.parse_index(data, inline) | |
370 | return index, nodemap, cache |
|
371 | return index, nodemap, cache | |
371 |
|
372 | |||
372 | def packentry(self, entry, node, version, rev): |
|
373 | def packentry(self, entry, node, version, rev): | |
373 | p = _pack(indexformatng, *entry) |
|
374 | p = _pack(indexformatng, *entry) | |
374 | if rev == 0: |
|
375 | if rev == 0: | |
375 | p = _pack(versionformat, version) + p[4:] |
|
376 | p = _pack(versionformat, version) + p[4:] | |
376 | return p |
|
377 | return p | |
377 |
|
378 | |||
378 | class revlog(object): |
|
379 | class revlog(object): | |
379 | """ |
|
380 | """ | |
380 | the underlying revision storage object |
|
381 | the underlying revision storage object | |
381 |
|
382 | |||
382 | A revlog consists of two parts, an index and the revision data. |
|
383 | A revlog consists of two parts, an index and the revision data. | |
383 |
|
384 | |||
384 | The index is a file with a fixed record size containing |
|
385 | The index is a file with a fixed record size containing | |
385 | information on each revision, including its nodeid (hash), the |
|
386 | information on each revision, including its nodeid (hash), the | |
386 | nodeids of its parents, the position and offset of its data within |
|
387 | nodeids of its parents, the position and offset of its data within | |
387 | the data file, and the revision it's based on. Finally, each entry |
|
388 | the data file, and the revision it's based on. Finally, each entry | |
388 | contains a linkrev entry that can serve as a pointer to external |
|
389 | contains a linkrev entry that can serve as a pointer to external | |
389 | data. |
|
390 | data. | |
390 |
|
391 | |||
391 | The revision data itself is a linear collection of data chunks. |
|
392 | The revision data itself is a linear collection of data chunks. | |
392 | Each chunk represents a revision and is usually represented as a |
|
393 | Each chunk represents a revision and is usually represented as a | |
393 | delta against the previous chunk. To bound lookup time, runs of |
|
394 | delta against the previous chunk. To bound lookup time, runs of | |
394 | deltas are limited to about 2 times the length of the original |
|
395 | deltas are limited to about 2 times the length of the original | |
395 | version data. This makes retrieval of a version proportional to |
|
396 | version data. This makes retrieval of a version proportional to | |
396 | its size, or O(1) relative to the number of revisions. |
|
397 | its size, or O(1) relative to the number of revisions. | |
397 |
|
398 | |||
398 | Both pieces of the revlog are written to in an append-only |
|
399 | Both pieces of the revlog are written to in an append-only | |
399 | fashion, which means we never need to rewrite a file to insert or |
|
400 | fashion, which means we never need to rewrite a file to insert or | |
400 | remove data, and can use some simple techniques to avoid the need |
|
401 | remove data, and can use some simple techniques to avoid the need | |
401 | for locking while reading. |
|
402 | for locking while reading. | |
402 | """ |
|
403 | """ | |
403 | def __init__(self, opener, indexfile): |
|
404 | def __init__(self, opener, indexfile): | |
404 | """ |
|
405 | """ | |
405 | create a revlog object |
|
406 | create a revlog object | |
406 |
|
407 | |||
407 | opener is a function that abstracts the file opening operation |
|
408 | opener is a function that abstracts the file opening operation | |
408 | and can be used to implement COW semantics or the like. |
|
409 | and can be used to implement COW semantics or the like. | |
409 | """ |
|
410 | """ | |
410 | self.indexfile = indexfile |
|
411 | self.indexfile = indexfile | |
411 | self.datafile = indexfile[:-2] + ".d" |
|
412 | self.datafile = indexfile[:-2] + ".d" | |
412 | self.opener = opener |
|
413 | self.opener = opener | |
413 | self._cache = None |
|
414 | self._cache = None | |
414 | self._chunkcache = None |
|
415 | self._chunkcache = None | |
415 | self.nodemap = {nullid: nullrev} |
|
416 | self.nodemap = {nullid: nullrev} | |
416 | self.index = [] |
|
417 | self.index = [] | |
417 |
|
418 | |||
418 | v = REVLOG_DEFAULT_VERSION |
|
419 | v = REVLOG_DEFAULT_VERSION | |
419 | if hasattr(opener, "defversion"): |
|
420 | if hasattr(opener, "defversion"): | |
420 | v = opener.defversion |
|
421 | v = opener.defversion | |
421 | if v & REVLOGNG: |
|
422 | if v & REVLOGNG: | |
422 | v |= REVLOGNGINLINEDATA |
|
423 | v |= REVLOGNGINLINEDATA | |
423 |
|
424 | |||
424 | i = "" |
|
425 | i = "" | |
425 | try: |
|
426 | try: | |
426 | f = self.opener(self.indexfile) |
|
427 | f = self.opener(self.indexfile) | |
427 | i = f.read(4) |
|
428 | i = f.read(4) | |
428 | f.seek(0) |
|
429 | f.seek(0) | |
429 | if len(i) > 0: |
|
430 | if len(i) > 0: | |
430 | v = struct.unpack(versionformat, i)[0] |
|
431 | v = struct.unpack(versionformat, i)[0] | |
431 | except IOError, inst: |
|
432 | except IOError, inst: | |
432 | if inst.errno != errno.ENOENT: |
|
433 | if inst.errno != errno.ENOENT: | |
433 | raise |
|
434 | raise | |
434 |
|
435 | |||
435 | self.version = v |
|
436 | self.version = v | |
436 | self._inline = v & REVLOGNGINLINEDATA |
|
437 | self._inline = v & REVLOGNGINLINEDATA | |
437 | flags = v & ~0xFFFF |
|
438 | flags = v & ~0xFFFF | |
438 | fmt = v & 0xFFFF |
|
439 | fmt = v & 0xFFFF | |
439 | if fmt == REVLOGV0 and flags: |
|
440 | if fmt == REVLOGV0 and flags: | |
440 | raise RevlogError(_("index %s unknown flags %#04x for format v0") |
|
441 | raise RevlogError(_("index %s unknown flags %#04x for format v0") | |
441 | % (self.indexfile, flags >> 16)) |
|
442 | % (self.indexfile, flags >> 16)) | |
442 | elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA: |
|
443 | elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA: | |
443 | raise RevlogError(_("index %s unknown flags %#04x for revlogng") |
|
444 | raise RevlogError(_("index %s unknown flags %#04x for revlogng") | |
444 | % (self.indexfile, flags >> 16)) |
|
445 | % (self.indexfile, flags >> 16)) | |
445 | elif fmt > REVLOGNG: |
|
446 | elif fmt > REVLOGNG: | |
446 | raise RevlogError(_("index %s unknown format %d") |
|
447 | raise RevlogError(_("index %s unknown format %d") | |
447 | % (self.indexfile, fmt)) |
|
448 | % (self.indexfile, fmt)) | |
448 |
|
449 | |||
449 | self._io = revlogio() |
|
450 | self._io = revlogio() | |
450 | if self.version == REVLOGV0: |
|
451 | if self.version == REVLOGV0: | |
451 | self._io = revlogoldio() |
|
452 | self._io = revlogoldio() | |
452 | if i: |
|
453 | if i: | |
453 | d = self._io.parseindex(f, self._inline) |
|
454 | d = self._io.parseindex(f, self._inline) | |
454 | self.index, self.nodemap, self._chunkcache = d |
|
455 | self.index, self.nodemap, self._chunkcache = d | |
455 |
|
456 | |||
456 | # add the magic null revision at -1 (if it hasn't been done already) |
|
457 | # add the magic null revision at -1 (if it hasn't been done already) | |
457 | if (self.index == [] or isinstance(self.index, lazyindex) or |
|
458 | if (self.index == [] or isinstance(self.index, lazyindex) or | |
458 | self.index[-1][7] != nullid) : |
|
459 | self.index[-1][7] != nullid) : | |
459 | self.index.append((0, 0, 0, -1, -1, -1, -1, nullid)) |
|
460 | self.index.append((0, 0, 0, -1, -1, -1, -1, nullid)) | |
460 |
|
461 | |||
461 | def _loadindex(self, start, end): |
|
462 | def _loadindex(self, start, end): | |
462 | """load a block of indexes all at once from the lazy parser""" |
|
463 | """load a block of indexes all at once from the lazy parser""" | |
463 | if isinstance(self.index, lazyindex): |
|
464 | if isinstance(self.index, lazyindex): | |
464 | self.index.p.loadindex(start, end) |
|
465 | self.index.p.loadindex(start, end) | |
465 |
|
466 | |||
466 | def _loadindexmap(self): |
|
467 | def _loadindexmap(self): | |
467 | """loads both the map and the index from the lazy parser""" |
|
468 | """loads both the map and the index from the lazy parser""" | |
468 | if isinstance(self.index, lazyindex): |
|
469 | if isinstance(self.index, lazyindex): | |
469 | p = self.index.p |
|
470 | p = self.index.p | |
470 | p.loadindex() |
|
471 | p.loadindex() | |
471 | self.nodemap = p.map |
|
472 | self.nodemap = p.map | |
472 |
|
473 | |||
473 | def _loadmap(self): |
|
474 | def _loadmap(self): | |
474 | """loads the map from the lazy parser""" |
|
475 | """loads the map from the lazy parser""" | |
475 | if isinstance(self.nodemap, lazymap): |
|
476 | if isinstance(self.nodemap, lazymap): | |
476 | self.nodemap.p.loadmap() |
|
477 | self.nodemap.p.loadmap() | |
477 | self.nodemap = self.nodemap.p.map |
|
478 | self.nodemap = self.nodemap.p.map | |
478 |
|
479 | |||
479 | def tip(self): |
|
480 | def tip(self): | |
480 | return self.node(len(self.index) - 2) |
|
481 | return self.node(len(self.index) - 2) | |
481 | def __len__(self): |
|
482 | def __len__(self): | |
482 | return len(self.index) - 1 |
|
483 | return len(self.index) - 1 | |
483 | def __iter__(self): |
|
484 | def __iter__(self): | |
484 | for i in xrange(len(self)): |
|
485 | for i in xrange(len(self)): | |
485 | yield i |
|
486 | yield i | |
486 | def rev(self, node): |
|
487 | def rev(self, node): | |
487 | try: |
|
488 | try: | |
488 | return self.nodemap[node] |
|
489 | return self.nodemap[node] | |
489 | except KeyError: |
|
490 | except KeyError: | |
490 | raise LookupError(node, self.indexfile, _('no node')) |
|
491 | raise LookupError(node, self.indexfile, _('no node')) | |
491 | def node(self, rev): |
|
492 | def node(self, rev): | |
492 | return self.index[rev][7] |
|
493 | return self.index[rev][7] | |
493 | def linkrev(self, rev): |
|
494 | def linkrev(self, rev): | |
494 | return self.index[rev][4] |
|
495 | return self.index[rev][4] | |
495 | def parents(self, node): |
|
496 | def parents(self, node): | |
496 | i = self.index |
|
497 | i = self.index | |
497 | d = i[self.rev(node)] |
|
498 | d = i[self.rev(node)] | |
498 | return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline |
|
499 | return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline | |
499 | def parentrevs(self, rev): |
|
500 | def parentrevs(self, rev): | |
500 | return self.index[rev][5:7] |
|
501 | return self.index[rev][5:7] | |
501 | def start(self, rev): |
|
502 | def start(self, rev): | |
502 | return int(self.index[rev][0] >> 16) |
|
503 | return int(self.index[rev][0] >> 16) | |
503 | def end(self, rev): |
|
504 | def end(self, rev): | |
504 | return self.start(rev) + self.length(rev) |
|
505 | return self.start(rev) + self.length(rev) | |
505 | def length(self, rev): |
|
506 | def length(self, rev): | |
506 | return self.index[rev][1] |
|
507 | return self.index[rev][1] | |
507 | def base(self, rev): |
|
508 | def base(self, rev): | |
508 | return self.index[rev][3] |
|
509 | return self.index[rev][3] | |
509 |
|
510 | |||
510 | def size(self, rev): |
|
511 | def size(self, rev): | |
511 | """return the length of the uncompressed text for a given revision""" |
|
512 | """return the length of the uncompressed text for a given revision""" | |
512 | l = self.index[rev][2] |
|
513 | l = self.index[rev][2] | |
513 | if l >= 0: |
|
514 | if l >= 0: | |
514 | return l |
|
515 | return l | |
515 |
|
516 | |||
516 | t = self.revision(self.node(rev)) |
|
517 | t = self.revision(self.node(rev)) | |
517 | return len(t) |
|
518 | return len(t) | |
518 |
|
519 | |||
519 | # alternate implementation, The advantage to this code is it |
|
520 | # alternate implementation, The advantage to this code is it | |
520 | # will be faster for a single revision. But, the results are not |
|
521 | # will be faster for a single revision. But, the results are not | |
521 | # cached, so finding the size of every revision will be slower. |
|
522 | # cached, so finding the size of every revision will be slower. | |
522 | """ |
|
523 | """ | |
523 | if self.cache and self.cache[1] == rev: |
|
524 | if self.cache and self.cache[1] == rev: | |
524 | return len(self.cache[2]) |
|
525 | return len(self.cache[2]) | |
525 |
|
526 | |||
526 | base = self.base(rev) |
|
527 | base = self.base(rev) | |
527 | if self.cache and self.cache[1] >= base and self.cache[1] < rev: |
|
528 | if self.cache and self.cache[1] >= base and self.cache[1] < rev: | |
528 | base = self.cache[1] |
|
529 | base = self.cache[1] | |
529 | text = self.cache[2] |
|
530 | text = self.cache[2] | |
530 | else: |
|
531 | else: | |
531 | text = self.revision(self.node(base)) |
|
532 | text = self.revision(self.node(base)) | |
532 |
|
533 | |||
533 | l = len(text) |
|
534 | l = len(text) | |
534 | for x in xrange(base + 1, rev + 1): |
|
535 | for x in xrange(base + 1, rev + 1): | |
535 | l = mdiff.patchedsize(l, self.chunk(x)) |
|
536 | l = mdiff.patchedsize(l, self.chunk(x)) | |
536 | return l |
|
537 | return l | |
537 | """ |
|
538 | """ | |
538 |
|
539 | |||
539 | def reachable(self, node, stop=None): |
|
540 | def reachable(self, node, stop=None): | |
540 | """return a hash of all nodes ancestral to a given node, including |
|
541 | """return a hash of all nodes ancestral to a given node, including | |
541 | the node itself, stopping when stop is matched""" |
|
542 | the node itself, stopping when stop is matched""" | |
542 | reachable = {} |
|
543 | reachable = {} | |
543 | visit = [node] |
|
544 | visit = [node] | |
544 | reachable[node] = 1 |
|
545 | reachable[node] = 1 | |
545 | if stop: |
|
546 | if stop: | |
546 | stopn = self.rev(stop) |
|
547 | stopn = self.rev(stop) | |
547 | else: |
|
548 | else: | |
548 | stopn = 0 |
|
549 | stopn = 0 | |
549 | while visit: |
|
550 | while visit: | |
550 | n = visit.pop(0) |
|
551 | n = visit.pop(0) | |
551 | if n == stop: |
|
552 | if n == stop: | |
552 | continue |
|
553 | continue | |
553 | if n == nullid: |
|
554 | if n == nullid: | |
554 | continue |
|
555 | continue | |
555 | for p in self.parents(n): |
|
556 | for p in self.parents(n): | |
556 | if self.rev(p) < stopn: |
|
557 | if self.rev(p) < stopn: | |
557 | continue |
|
558 | continue | |
558 | if p not in reachable: |
|
559 | if p not in reachable: | |
559 | reachable[p] = 1 |
|
560 | reachable[p] = 1 | |
560 | visit.append(p) |
|
561 | visit.append(p) | |
561 | return reachable |
|
562 | return reachable | |
562 |
|
563 | |||
563 | def ancestors(self, *revs): |
|
564 | def ancestors(self, *revs): | |
564 | 'Generate the ancestors of revs using a breadth-first visit' |
|
565 | 'Generate the ancestors of revs using a breadth-first visit' | |
565 | visit = list(revs) |
|
566 | visit = list(revs) | |
566 | seen = util.set([nullrev]) |
|
567 | seen = util.set([nullrev]) | |
567 | while visit: |
|
568 | while visit: | |
568 | for parent in self.parentrevs(visit.pop(0)): |
|
569 | for parent in self.parentrevs(visit.pop(0)): | |
569 | if parent not in seen: |
|
570 | if parent not in seen: | |
570 | visit.append(parent) |
|
571 | visit.append(parent) | |
571 | seen.add(parent) |
|
572 | seen.add(parent) | |
572 | yield parent |
|
573 | yield parent | |
573 |
|
574 | |||
574 | def descendants(self, *revs): |
|
575 | def descendants(self, *revs): | |
575 | 'Generate the descendants of revs in topological order' |
|
576 | 'Generate the descendants of revs in topological order' | |
576 | seen = util.set(revs) |
|
577 | seen = util.set(revs) | |
577 | for i in xrange(min(revs) + 1, len(self)): |
|
578 | for i in xrange(min(revs) + 1, len(self)): | |
578 | for x in self.parentrevs(i): |
|
579 | for x in self.parentrevs(i): | |
579 | if x != nullrev and x in seen: |
|
580 | if x != nullrev and x in seen: | |
580 | seen.add(i) |
|
581 | seen.add(i) | |
581 | yield i |
|
582 | yield i | |
582 | break |
|
583 | break | |
583 |
|
584 | |||
584 | def findmissing(self, common=None, heads=None): |
|
585 | def findmissing(self, common=None, heads=None): | |
585 | ''' |
|
586 | ''' | |
586 | returns the topologically sorted list of nodes from the set: |
|
587 | returns the topologically sorted list of nodes from the set: | |
587 | missing = (ancestors(heads) \ ancestors(common)) |
|
588 | missing = (ancestors(heads) \ ancestors(common)) | |
588 |
|
589 | |||
589 | where ancestors() is the set of ancestors from heads, heads included |
|
590 | where ancestors() is the set of ancestors from heads, heads included | |
590 |
|
591 | |||
591 | if heads is None, the heads of the revlog are used |
|
592 | if heads is None, the heads of the revlog are used | |
592 | if common is None, nullid is assumed to be a common node |
|
593 | if common is None, nullid is assumed to be a common node | |
593 | ''' |
|
594 | ''' | |
594 | if common is None: |
|
595 | if common is None: | |
595 | common = [nullid] |
|
596 | common = [nullid] | |
596 | if heads is None: |
|
597 | if heads is None: | |
597 | heads = self.heads() |
|
598 | heads = self.heads() | |
598 |
|
599 | |||
599 | common = [self.rev(n) for n in common] |
|
600 | common = [self.rev(n) for n in common] | |
600 | heads = [self.rev(n) for n in heads] |
|
601 | heads = [self.rev(n) for n in heads] | |
601 |
|
602 | |||
602 | # we want the ancestors, but inclusive |
|
603 | # we want the ancestors, but inclusive | |
603 | has = dict.fromkeys(self.ancestors(*common)) |
|
604 | has = dict.fromkeys(self.ancestors(*common)) | |
604 | has[nullrev] = None |
|
605 | has[nullrev] = None | |
605 | for r in common: |
|
606 | for r in common: | |
606 | has[r] = None |
|
607 | has[r] = None | |
607 |
|
608 | |||
608 | # take all ancestors from heads that aren't in has |
|
609 | # take all ancestors from heads that aren't in has | |
609 | missing = {} |
|
610 | missing = {} | |
610 | visit = [r for r in heads if r not in has] |
|
611 | visit = [r for r in heads if r not in has] | |
611 | while visit: |
|
612 | while visit: | |
612 | r = visit.pop(0) |
|
613 | r = visit.pop(0) | |
613 | if r in missing: |
|
614 | if r in missing: | |
614 | continue |
|
615 | continue | |
615 | else: |
|
616 | else: | |
616 | missing[r] = None |
|
617 | missing[r] = None | |
617 | for p in self.parentrevs(r): |
|
618 | for p in self.parentrevs(r): | |
618 | if p not in has: |
|
619 | if p not in has: | |
619 | visit.append(p) |
|
620 | visit.append(p) | |
620 | missing = missing.keys() |
|
621 | missing = missing.keys() | |
621 | missing.sort() |
|
622 | missing.sort() | |
622 | return [self.node(r) for r in missing] |
|
623 | return [self.node(r) for r in missing] | |
623 |
|
624 | |||
624 | def nodesbetween(self, roots=None, heads=None): |
|
625 | def nodesbetween(self, roots=None, heads=None): | |
625 | """Return a tuple containing three elements. Elements 1 and 2 contain |
|
626 | """Return a tuple containing three elements. Elements 1 and 2 contain | |
626 | a final list bases and heads after all the unreachable ones have been |
|
627 | a final list bases and heads after all the unreachable ones have been | |
627 | pruned. Element 0 contains a topologically sorted list of all |
|
628 | pruned. Element 0 contains a topologically sorted list of all | |
628 |
|
629 | |||
629 | nodes that satisfy these constraints: |
|
630 | nodes that satisfy these constraints: | |
630 | 1. All nodes must be descended from a node in roots (the nodes on |
|
631 | 1. All nodes must be descended from a node in roots (the nodes on | |
631 | roots are considered descended from themselves). |
|
632 | roots are considered descended from themselves). | |
632 | 2. All nodes must also be ancestors of a node in heads (the nodes in |
|
633 | 2. All nodes must also be ancestors of a node in heads (the nodes in | |
633 | heads are considered to be their own ancestors). |
|
634 | heads are considered to be their own ancestors). | |
634 |
|
635 | |||
635 | If roots is unspecified, nullid is assumed as the only root. |
|
636 | If roots is unspecified, nullid is assumed as the only root. | |
636 | If heads is unspecified, it is taken to be the output of the |
|
637 | If heads is unspecified, it is taken to be the output of the | |
637 | heads method (i.e. a list of all nodes in the repository that |
|
638 | heads method (i.e. a list of all nodes in the repository that | |
638 | have no children).""" |
|
639 | have no children).""" | |
639 | nonodes = ([], [], []) |
|
640 | nonodes = ([], [], []) | |
640 | if roots is not None: |
|
641 | if roots is not None: | |
641 | roots = list(roots) |
|
642 | roots = list(roots) | |
642 | if not roots: |
|
643 | if not roots: | |
643 | return nonodes |
|
644 | return nonodes | |
644 | lowestrev = min([self.rev(n) for n in roots]) |
|
645 | lowestrev = min([self.rev(n) for n in roots]) | |
645 | else: |
|
646 | else: | |
646 | roots = [nullid] # Everybody's a descendent of nullid |
|
647 | roots = [nullid] # Everybody's a descendent of nullid | |
647 | lowestrev = nullrev |
|
648 | lowestrev = nullrev | |
648 | if (lowestrev == nullrev) and (heads is None): |
|
649 | if (lowestrev == nullrev) and (heads is None): | |
649 | # We want _all_ the nodes! |
|
650 | # We want _all_ the nodes! | |
650 | return ([self.node(r) for r in self], [nullid], list(self.heads())) |
|
651 | return ([self.node(r) for r in self], [nullid], list(self.heads())) | |
651 | if heads is None: |
|
652 | if heads is None: | |
652 | # All nodes are ancestors, so the latest ancestor is the last |
|
653 | # All nodes are ancestors, so the latest ancestor is the last | |
653 | # node. |
|
654 | # node. | |
654 | highestrev = len(self) - 1 |
|
655 | highestrev = len(self) - 1 | |
655 | # Set ancestors to None to signal that every node is an ancestor. |
|
656 | # Set ancestors to None to signal that every node is an ancestor. | |
656 | ancestors = None |
|
657 | ancestors = None | |
657 | # Set heads to an empty dictionary for later discovery of heads |
|
658 | # Set heads to an empty dictionary for later discovery of heads | |
658 | heads = {} |
|
659 | heads = {} | |
659 | else: |
|
660 | else: | |
660 | heads = list(heads) |
|
661 | heads = list(heads) | |
661 | if not heads: |
|
662 | if not heads: | |
662 | return nonodes |
|
663 | return nonodes | |
663 | ancestors = {} |
|
664 | ancestors = {} | |
664 | # Turn heads into a dictionary so we can remove 'fake' heads. |
|
665 | # Turn heads into a dictionary so we can remove 'fake' heads. | |
665 | # Also, later we will be using it to filter out the heads we can't |
|
666 | # Also, later we will be using it to filter out the heads we can't | |
666 | # find from roots. |
|
667 | # find from roots. | |
667 | heads = dict.fromkeys(heads, 0) |
|
668 | heads = dict.fromkeys(heads, 0) | |
668 | # Start at the top and keep marking parents until we're done. |
|
669 | # Start at the top and keep marking parents until we're done. | |
669 | nodestotag = heads.keys() |
|
670 | nodestotag = heads.keys() | |
670 | # Remember where the top was so we can use it as a limit later. |
|
671 | # Remember where the top was so we can use it as a limit later. | |
671 | highestrev = max([self.rev(n) for n in nodestotag]) |
|
672 | highestrev = max([self.rev(n) for n in nodestotag]) | |
672 | while nodestotag: |
|
673 | while nodestotag: | |
673 | # grab a node to tag |
|
674 | # grab a node to tag | |
674 | n = nodestotag.pop() |
|
675 | n = nodestotag.pop() | |
675 | # Never tag nullid |
|
676 | # Never tag nullid | |
676 | if n == nullid: |
|
677 | if n == nullid: | |
677 | continue |
|
678 | continue | |
678 | # A node's revision number represents its place in a |
|
679 | # A node's revision number represents its place in a | |
679 | # topologically sorted list of nodes. |
|
680 | # topologically sorted list of nodes. | |
680 | r = self.rev(n) |
|
681 | r = self.rev(n) | |
681 | if r >= lowestrev: |
|
682 | if r >= lowestrev: | |
682 | if n not in ancestors: |
|
683 | if n not in ancestors: | |
683 | # If we are possibly a descendent of one of the roots |
|
684 | # If we are possibly a descendent of one of the roots | |
684 | # and we haven't already been marked as an ancestor |
|
685 | # and we haven't already been marked as an ancestor | |
685 | ancestors[n] = 1 # Mark as ancestor |
|
686 | ancestors[n] = 1 # Mark as ancestor | |
686 | # Add non-nullid parents to list of nodes to tag. |
|
687 | # Add non-nullid parents to list of nodes to tag. | |
687 | nodestotag.extend([p for p in self.parents(n) if |
|
688 | nodestotag.extend([p for p in self.parents(n) if | |
688 | p != nullid]) |
|
689 | p != nullid]) | |
689 | elif n in heads: # We've seen it before, is it a fake head? |
|
690 | elif n in heads: # We've seen it before, is it a fake head? | |
690 | # So it is, real heads should not be the ancestors of |
|
691 | # So it is, real heads should not be the ancestors of | |
691 | # any other heads. |
|
692 | # any other heads. | |
692 | heads.pop(n) |
|
693 | heads.pop(n) | |
693 | if not ancestors: |
|
694 | if not ancestors: | |
694 | return nonodes |
|
695 | return nonodes | |
695 | # Now that we have our set of ancestors, we want to remove any |
|
696 | # Now that we have our set of ancestors, we want to remove any | |
696 | # roots that are not ancestors. |
|
697 | # roots that are not ancestors. | |
697 |
|
698 | |||
698 | # If one of the roots was nullid, everything is included anyway. |
|
699 | # If one of the roots was nullid, everything is included anyway. | |
699 | if lowestrev > nullrev: |
|
700 | if lowestrev > nullrev: | |
700 | # But, since we weren't, let's recompute the lowest rev to not |
|
701 | # But, since we weren't, let's recompute the lowest rev to not | |
701 | # include roots that aren't ancestors. |
|
702 | # include roots that aren't ancestors. | |
702 |
|
703 | |||
703 | # Filter out roots that aren't ancestors of heads |
|
704 | # Filter out roots that aren't ancestors of heads | |
704 | roots = [n for n in roots if n in ancestors] |
|
705 | roots = [n for n in roots if n in ancestors] | |
705 | # Recompute the lowest revision |
|
706 | # Recompute the lowest revision | |
706 | if roots: |
|
707 | if roots: | |
707 | lowestrev = min([self.rev(n) for n in roots]) |
|
708 | lowestrev = min([self.rev(n) for n in roots]) | |
708 | else: |
|
709 | else: | |
709 | # No more roots? Return empty list |
|
710 | # No more roots? Return empty list | |
710 | return nonodes |
|
711 | return nonodes | |
711 | else: |
|
712 | else: | |
712 | # We are descending from nullid, and don't need to care about |
|
713 | # We are descending from nullid, and don't need to care about | |
713 | # any other roots. |
|
714 | # any other roots. | |
714 | lowestrev = nullrev |
|
715 | lowestrev = nullrev | |
715 | roots = [nullid] |
|
716 | roots = [nullid] | |
716 | # Transform our roots list into a 'set' (i.e. a dictionary where the |
|
717 | # Transform our roots list into a 'set' (i.e. a dictionary where the | |
717 | # values don't matter. |
|
718 | # values don't matter. | |
718 | descendents = dict.fromkeys(roots, 1) |
|
719 | descendents = dict.fromkeys(roots, 1) | |
719 | # Also, keep the original roots so we can filter out roots that aren't |
|
720 | # Also, keep the original roots so we can filter out roots that aren't | |
720 | # 'real' roots (i.e. are descended from other roots). |
|
721 | # 'real' roots (i.e. are descended from other roots). | |
721 | roots = descendents.copy() |
|
722 | roots = descendents.copy() | |
722 | # Our topologically sorted list of output nodes. |
|
723 | # Our topologically sorted list of output nodes. | |
723 | orderedout = [] |
|
724 | orderedout = [] | |
724 | # Don't start at nullid since we don't want nullid in our output list, |
|
725 | # Don't start at nullid since we don't want nullid in our output list, | |
725 | # and if nullid shows up in descedents, empty parents will look like |
|
726 | # and if nullid shows up in descedents, empty parents will look like | |
726 | # they're descendents. |
|
727 | # they're descendents. | |
727 | for r in xrange(max(lowestrev, 0), highestrev + 1): |
|
728 | for r in xrange(max(lowestrev, 0), highestrev + 1): | |
728 | n = self.node(r) |
|
729 | n = self.node(r) | |
729 | isdescendent = False |
|
730 | isdescendent = False | |
730 | if lowestrev == nullrev: # Everybody is a descendent of nullid |
|
731 | if lowestrev == nullrev: # Everybody is a descendent of nullid | |
731 | isdescendent = True |
|
732 | isdescendent = True | |
732 | elif n in descendents: |
|
733 | elif n in descendents: | |
733 | # n is already a descendent |
|
734 | # n is already a descendent | |
734 | isdescendent = True |
|
735 | isdescendent = True | |
735 | # This check only needs to be done here because all the roots |
|
736 | # This check only needs to be done here because all the roots | |
736 | # will start being marked is descendents before the loop. |
|
737 | # will start being marked is descendents before the loop. | |
737 | if n in roots: |
|
738 | if n in roots: | |
738 | # If n was a root, check if it's a 'real' root. |
|
739 | # If n was a root, check if it's a 'real' root. | |
739 | p = tuple(self.parents(n)) |
|
740 | p = tuple(self.parents(n)) | |
740 | # If any of its parents are descendents, it's not a root. |
|
741 | # If any of its parents are descendents, it's not a root. | |
741 | if (p[0] in descendents) or (p[1] in descendents): |
|
742 | if (p[0] in descendents) or (p[1] in descendents): | |
742 | roots.pop(n) |
|
743 | roots.pop(n) | |
743 | else: |
|
744 | else: | |
744 | p = tuple(self.parents(n)) |
|
745 | p = tuple(self.parents(n)) | |
745 | # A node is a descendent if either of its parents are |
|
746 | # A node is a descendent if either of its parents are | |
746 | # descendents. (We seeded the dependents list with the roots |
|
747 | # descendents. (We seeded the dependents list with the roots | |
747 | # up there, remember?) |
|
748 | # up there, remember?) | |
748 | if (p[0] in descendents) or (p[1] in descendents): |
|
749 | if (p[0] in descendents) or (p[1] in descendents): | |
749 | descendents[n] = 1 |
|
750 | descendents[n] = 1 | |
750 | isdescendent = True |
|
751 | isdescendent = True | |
751 | if isdescendent and ((ancestors is None) or (n in ancestors)): |
|
752 | if isdescendent and ((ancestors is None) or (n in ancestors)): | |
752 | # Only include nodes that are both descendents and ancestors. |
|
753 | # Only include nodes that are both descendents and ancestors. | |
753 | orderedout.append(n) |
|
754 | orderedout.append(n) | |
754 | if (ancestors is not None) and (n in heads): |
|
755 | if (ancestors is not None) and (n in heads): | |
755 | # We're trying to figure out which heads are reachable |
|
756 | # We're trying to figure out which heads are reachable | |
756 | # from roots. |
|
757 | # from roots. | |
757 | # Mark this head as having been reached |
|
758 | # Mark this head as having been reached | |
758 | heads[n] = 1 |
|
759 | heads[n] = 1 | |
759 | elif ancestors is None: |
|
760 | elif ancestors is None: | |
760 | # Otherwise, we're trying to discover the heads. |
|
761 | # Otherwise, we're trying to discover the heads. | |
761 | # Assume this is a head because if it isn't, the next step |
|
762 | # Assume this is a head because if it isn't, the next step | |
762 | # will eventually remove it. |
|
763 | # will eventually remove it. | |
763 | heads[n] = 1 |
|
764 | heads[n] = 1 | |
764 | # But, obviously its parents aren't. |
|
765 | # But, obviously its parents aren't. | |
765 | for p in self.parents(n): |
|
766 | for p in self.parents(n): | |
766 | heads.pop(p, None) |
|
767 | heads.pop(p, None) | |
767 | heads = [n for n in heads.iterkeys() if heads[n] != 0] |
|
768 | heads = [n for n in heads.iterkeys() if heads[n] != 0] | |
768 | roots = roots.keys() |
|
769 | roots = roots.keys() | |
769 | assert orderedout |
|
770 | assert orderedout | |
770 | assert roots |
|
771 | assert roots | |
771 | assert heads |
|
772 | assert heads | |
772 | return (orderedout, roots, heads) |
|
773 | return (orderedout, roots, heads) | |
773 |
|
774 | |||
774 | def heads(self, start=None, stop=None): |
|
775 | def heads(self, start=None, stop=None): | |
775 | """return the list of all nodes that have no children |
|
776 | """return the list of all nodes that have no children | |
776 |
|
777 | |||
777 | if start is specified, only heads that are descendants of |
|
778 | if start is specified, only heads that are descendants of | |
778 | start will be returned |
|
779 | start will be returned | |
779 | if stop is specified, it will consider all the revs from stop |
|
780 | if stop is specified, it will consider all the revs from stop | |
780 | as if they had no children |
|
781 | as if they had no children | |
781 | """ |
|
782 | """ | |
782 | if start is None and stop is None: |
|
783 | if start is None and stop is None: | |
783 | count = len(self) |
|
784 | count = len(self) | |
784 | if not count: |
|
785 | if not count: | |
785 | return [nullid] |
|
786 | return [nullid] | |
786 | ishead = [1] * (count + 1) |
|
787 | ishead = [1] * (count + 1) | |
787 | index = self.index |
|
788 | index = self.index | |
788 | for r in xrange(count): |
|
789 | for r in xrange(count): | |
789 | e = index[r] |
|
790 | e = index[r] | |
790 | ishead[e[5]] = ishead[e[6]] = 0 |
|
791 | ishead[e[5]] = ishead[e[6]] = 0 | |
791 | return [self.node(r) for r in xrange(count) if ishead[r]] |
|
792 | return [self.node(r) for r in xrange(count) if ishead[r]] | |
792 |
|
793 | |||
793 | if start is None: |
|
794 | if start is None: | |
794 | start = nullid |
|
795 | start = nullid | |
795 | if stop is None: |
|
796 | if stop is None: | |
796 | stop = [] |
|
797 | stop = [] | |
797 | stoprevs = dict.fromkeys([self.rev(n) for n in stop]) |
|
798 | stoprevs = dict.fromkeys([self.rev(n) for n in stop]) | |
798 | startrev = self.rev(start) |
|
799 | startrev = self.rev(start) | |
799 | reachable = {startrev: 1} |
|
800 | reachable = {startrev: 1} | |
800 | heads = {startrev: 1} |
|
801 | heads = {startrev: 1} | |
801 |
|
802 | |||
802 | parentrevs = self.parentrevs |
|
803 | parentrevs = self.parentrevs | |
803 | for r in xrange(startrev + 1, len(self)): |
|
804 | for r in xrange(startrev + 1, len(self)): | |
804 | for p in parentrevs(r): |
|
805 | for p in parentrevs(r): | |
805 | if p in reachable: |
|
806 | if p in reachable: | |
806 | if r not in stoprevs: |
|
807 | if r not in stoprevs: | |
807 | reachable[r] = 1 |
|
808 | reachable[r] = 1 | |
808 | heads[r] = 1 |
|
809 | heads[r] = 1 | |
809 | if p in heads and p not in stoprevs: |
|
810 | if p in heads and p not in stoprevs: | |
810 | del heads[p] |
|
811 | del heads[p] | |
811 |
|
812 | |||
812 | return [self.node(r) for r in heads] |
|
813 | return [self.node(r) for r in heads] | |
813 |
|
814 | |||
814 | def children(self, node): |
|
815 | def children(self, node): | |
815 | """find the children of a given node""" |
|
816 | """find the children of a given node""" | |
816 | c = [] |
|
817 | c = [] | |
817 | p = self.rev(node) |
|
818 | p = self.rev(node) | |
818 | for r in range(p + 1, len(self)): |
|
819 | for r in range(p + 1, len(self)): | |
819 | prevs = [pr for pr in self.parentrevs(r) if pr != nullrev] |
|
820 | prevs = [pr for pr in self.parentrevs(r) if pr != nullrev] | |
820 | if prevs: |
|
821 | if prevs: | |
821 | for pr in prevs: |
|
822 | for pr in prevs: | |
822 | if pr == p: |
|
823 | if pr == p: | |
823 | c.append(self.node(r)) |
|
824 | c.append(self.node(r)) | |
824 | elif p == nullrev: |
|
825 | elif p == nullrev: | |
825 | c.append(self.node(r)) |
|
826 | c.append(self.node(r)) | |
826 | return c |
|
827 | return c | |
827 |
|
828 | |||
828 | def _match(self, id): |
|
829 | def _match(self, id): | |
829 | if isinstance(id, (long, int)): |
|
830 | if isinstance(id, (long, int)): | |
830 | # rev |
|
831 | # rev | |
831 | return self.node(id) |
|
832 | return self.node(id) | |
832 | if len(id) == 20: |
|
833 | if len(id) == 20: | |
833 | # possibly a binary node |
|
834 | # possibly a binary node | |
834 | # odds of a binary node being all hex in ASCII are 1 in 10**25 |
|
835 | # odds of a binary node being all hex in ASCII are 1 in 10**25 | |
835 | try: |
|
836 | try: | |
836 | node = id |
|
837 | node = id | |
837 | r = self.rev(node) # quick search the index |
|
838 | r = self.rev(node) # quick search the index | |
838 | return node |
|
839 | return node | |
839 | except LookupError: |
|
840 | except LookupError: | |
840 | pass # may be partial hex id |
|
841 | pass # may be partial hex id | |
841 | try: |
|
842 | try: | |
842 | # str(rev) |
|
843 | # str(rev) | |
843 | rev = int(id) |
|
844 | rev = int(id) | |
844 | if str(rev) != id: |
|
845 | if str(rev) != id: | |
845 | raise ValueError |
|
846 | raise ValueError | |
846 | if rev < 0: |
|
847 | if rev < 0: | |
847 | rev = len(self) + rev |
|
848 | rev = len(self) + rev | |
848 | if rev < 0 or rev >= len(self): |
|
849 | if rev < 0 or rev >= len(self): | |
849 | raise ValueError |
|
850 | raise ValueError | |
850 | return self.node(rev) |
|
851 | return self.node(rev) | |
851 | except (ValueError, OverflowError): |
|
852 | except (ValueError, OverflowError): | |
852 | pass |
|
853 | pass | |
853 | if len(id) == 40: |
|
854 | if len(id) == 40: | |
854 | try: |
|
855 | try: | |
855 | # a full hex nodeid? |
|
856 | # a full hex nodeid? | |
856 | node = bin(id) |
|
857 | node = bin(id) | |
857 | r = self.rev(node) |
|
858 | r = self.rev(node) | |
858 | return node |
|
859 | return node | |
859 | except (TypeError, LookupError): |
|
860 | except (TypeError, LookupError): | |
860 | pass |
|
861 | pass | |
861 |
|
862 | |||
862 | def _partialmatch(self, id): |
|
863 | def _partialmatch(self, id): | |
863 | if len(id) < 40: |
|
864 | if len(id) < 40: | |
864 | try: |
|
865 | try: | |
865 | # hex(node)[:...] |
|
866 | # hex(node)[:...] | |
866 | l = len(id) / 2 # grab an even number of digits |
|
867 | l = len(id) / 2 # grab an even number of digits | |
867 | bin_id = bin(id[:l*2]) |
|
868 | bin_id = bin(id[:l*2]) | |
868 | nl = [n for n in self.nodemap if n[:l] == bin_id] |
|
869 | nl = [n for n in self.nodemap if n[:l] == bin_id] | |
869 | nl = [n for n in nl if hex(n).startswith(id)] |
|
870 | nl = [n for n in nl if hex(n).startswith(id)] | |
870 | if len(nl) > 0: |
|
871 | if len(nl) > 0: | |
871 | if len(nl) == 1: |
|
872 | if len(nl) == 1: | |
872 | return nl[0] |
|
873 | return nl[0] | |
873 | raise LookupError(id, self.indexfile, |
|
874 | raise LookupError(id, self.indexfile, | |
874 | _('ambiguous identifier')) |
|
875 | _('ambiguous identifier')) | |
875 | return None |
|
876 | return None | |
876 | except TypeError: |
|
877 | except TypeError: | |
877 | pass |
|
878 | pass | |
878 |
|
879 | |||
879 | def lookup(self, id): |
|
880 | def lookup(self, id): | |
880 | """locate a node based on: |
|
881 | """locate a node based on: | |
881 | - revision number or str(revision number) |
|
882 | - revision number or str(revision number) | |
882 | - nodeid or subset of hex nodeid |
|
883 | - nodeid or subset of hex nodeid | |
883 | """ |
|
884 | """ | |
884 | n = self._match(id) |
|
885 | n = self._match(id) | |
885 | if n is not None: |
|
886 | if n is not None: | |
886 | return n |
|
887 | return n | |
887 | n = self._partialmatch(id) |
|
888 | n = self._partialmatch(id) | |
888 | if n: |
|
889 | if n: | |
889 | return n |
|
890 | return n | |
890 |
|
891 | |||
891 | raise LookupError(id, self.indexfile, _('no match found')) |
|
892 | raise LookupError(id, self.indexfile, _('no match found')) | |
892 |
|
893 | |||
893 | def cmp(self, node, text): |
|
894 | def cmp(self, node, text): | |
894 | """compare text with a given file revision""" |
|
895 | """compare text with a given file revision""" | |
895 | p1, p2 = self.parents(node) |
|
896 | p1, p2 = self.parents(node) | |
896 | return hash(text, p1, p2) != node |
|
897 | return hash(text, p1, p2) != node | |
897 |
|
898 | |||
898 | def chunk(self, rev, df=None): |
|
899 | def chunk(self, rev, df=None): | |
899 | def loadcache(df): |
|
900 | def loadcache(df): | |
900 | if not df: |
|
901 | if not df: | |
901 | if self._inline: |
|
902 | if self._inline: | |
902 | df = self.opener(self.indexfile) |
|
903 | df = self.opener(self.indexfile) | |
903 | else: |
|
904 | else: | |
904 | df = self.opener(self.datafile) |
|
905 | df = self.opener(self.datafile) | |
905 | df.seek(start) |
|
906 | df.seek(start) | |
906 | self._chunkcache = (start, df.read(cache_length)) |
|
907 | self._chunkcache = (start, df.read(cache_length)) | |
907 |
|
908 | |||
908 | start, length = self.start(rev), self.length(rev) |
|
909 | start, length = self.start(rev), self.length(rev) | |
909 | if self._inline: |
|
910 | if self._inline: | |
910 | start += (rev + 1) * self._io.size |
|
911 | start += (rev + 1) * self._io.size | |
911 | end = start + length |
|
912 | end = start + length | |
912 |
|
913 | |||
913 | offset = 0 |
|
914 | offset = 0 | |
914 | if not self._chunkcache: |
|
915 | if not self._chunkcache: | |
915 | cache_length = max(65536, length) |
|
916 | cache_length = max(65536, length) | |
916 | loadcache(df) |
|
917 | loadcache(df) | |
917 | else: |
|
918 | else: | |
918 | cache_start = self._chunkcache[0] |
|
919 | cache_start = self._chunkcache[0] | |
919 | cache_length = len(self._chunkcache[1]) |
|
920 | cache_length = len(self._chunkcache[1]) | |
920 | cache_end = cache_start + cache_length |
|
921 | cache_end = cache_start + cache_length | |
921 | if start >= cache_start and end <= cache_end: |
|
922 | if start >= cache_start and end <= cache_end: | |
922 | # it is cached |
|
923 | # it is cached | |
923 | offset = start - cache_start |
|
924 | offset = start - cache_start | |
924 | else: |
|
925 | else: | |
925 | cache_length = max(65536, length) |
|
926 | cache_length = max(65536, length) | |
926 | loadcache(df) |
|
927 | loadcache(df) | |
927 |
|
928 | |||
928 | # avoid copying large chunks |
|
929 | # avoid copying large chunks | |
929 | c = self._chunkcache[1] |
|
930 | c = self._chunkcache[1] | |
930 | if cache_length != length: |
|
931 | if cache_length != length: | |
931 | c = c[offset:offset + length] |
|
932 | c = c[offset:offset + length] | |
932 |
|
933 | |||
933 | return decompress(c) |
|
934 | return decompress(c) | |
934 |
|
935 | |||
935 | def revdiff(self, rev1, rev2): |
|
936 | def revdiff(self, rev1, rev2): | |
936 | """return or calculate a delta between two revisions""" |
|
937 | """return or calculate a delta between two revisions""" | |
937 | if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2): |
|
938 | if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2): | |
938 | return self.chunk(rev2) |
|
939 | return self.chunk(rev2) | |
939 |
|
940 | |||
940 | return mdiff.textdiff(self.revision(self.node(rev1)), |
|
941 | return mdiff.textdiff(self.revision(self.node(rev1)), | |
941 | self.revision(self.node(rev2))) |
|
942 | self.revision(self.node(rev2))) | |
942 |
|
943 | |||
943 | def revision(self, node): |
|
944 | def revision(self, node): | |
944 | """return an uncompressed revision of a given node""" |
|
945 | """return an uncompressed revision of a given node""" | |
945 | if node == nullid: |
|
946 | if node == nullid: | |
946 | return "" |
|
947 | return "" | |
947 | if self._cache and self._cache[0] == node: |
|
948 | if self._cache and self._cache[0] == node: | |
948 | return str(self._cache[2]) |
|
949 | return str(self._cache[2]) | |
949 |
|
950 | |||
950 | # look up what we need to read |
|
951 | # look up what we need to read | |
951 | text = None |
|
952 | text = None | |
952 | rev = self.rev(node) |
|
953 | rev = self.rev(node) | |
953 | base = self.base(rev) |
|
954 | base = self.base(rev) | |
954 |
|
955 | |||
955 | # check rev flags |
|
956 | # check rev flags | |
956 | if self.index[rev][0] & 0xFFFF: |
|
957 | if self.index[rev][0] & 0xFFFF: | |
957 | raise RevlogError(_('incompatible revision flag %x') % |
|
958 | raise RevlogError(_('incompatible revision flag %x') % | |
958 | (self.index[rev][0] & 0xFFFF)) |
|
959 | (self.index[rev][0] & 0xFFFF)) | |
959 |
|
960 | |||
960 | df = None |
|
961 | df = None | |
961 |
|
962 | |||
962 | # do we have useful data cached? |
|
963 | # do we have useful data cached? | |
963 | if self._cache and self._cache[1] >= base and self._cache[1] < rev: |
|
964 | if self._cache and self._cache[1] >= base and self._cache[1] < rev: | |
964 | base = self._cache[1] |
|
965 | base = self._cache[1] | |
965 | text = str(self._cache[2]) |
|
966 | text = str(self._cache[2]) | |
966 | self._loadindex(base, rev + 1) |
|
967 | self._loadindex(base, rev + 1) | |
967 | if not self._inline and rev > base + 1: |
|
968 | if not self._inline and rev > base + 1: | |
968 | df = self.opener(self.datafile) |
|
969 | df = self.opener(self.datafile) | |
969 | else: |
|
970 | else: | |
970 | self._loadindex(base, rev + 1) |
|
971 | self._loadindex(base, rev + 1) | |
971 | if not self._inline and rev > base: |
|
972 | if not self._inline and rev > base: | |
972 | df = self.opener(self.datafile) |
|
973 | df = self.opener(self.datafile) | |
973 | text = self.chunk(base, df=df) |
|
974 | text = self.chunk(base, df=df) | |
974 |
|
975 | |||
975 | bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)] |
|
976 | bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)] | |
976 | text = mdiff.patches(text, bins) |
|
977 | text = mdiff.patches(text, bins) | |
977 | p1, p2 = self.parents(node) |
|
978 | p1, p2 = self.parents(node) | |
978 | if node != hash(text, p1, p2): |
|
979 | if node != hash(text, p1, p2): | |
979 | raise RevlogError(_("integrity check failed on %s:%d") |
|
980 | raise RevlogError(_("integrity check failed on %s:%d") | |
980 | % (self.datafile, rev)) |
|
981 | % (self.datafile, rev)) | |
981 |
|
982 | |||
982 | self._cache = (node, rev, text) |
|
983 | self._cache = (node, rev, text) | |
983 | return text |
|
984 | return text | |
984 |
|
985 | |||
985 | def checkinlinesize(self, tr, fp=None): |
|
986 | def checkinlinesize(self, tr, fp=None): | |
986 | if not self._inline: |
|
987 | if not self._inline: | |
987 | return |
|
988 | return | |
988 | if not fp: |
|
989 | if not fp: | |
989 | fp = self.opener(self.indexfile, 'r') |
|
990 | fp = self.opener(self.indexfile, 'r') | |
990 | fp.seek(0, 2) |
|
991 | fp.seek(0, 2) | |
991 | size = fp.tell() |
|
992 | size = fp.tell() | |
992 | if size < 131072: |
|
993 | if size < 131072: | |
993 | return |
|
994 | return | |
994 | trinfo = tr.find(self.indexfile) |
|
995 | trinfo = tr.find(self.indexfile) | |
995 | if trinfo == None: |
|
996 | if trinfo == None: | |
996 | raise RevlogError(_("%s not found in the transaction") |
|
997 | raise RevlogError(_("%s not found in the transaction") | |
997 | % self.indexfile) |
|
998 | % self.indexfile) | |
998 |
|
999 | |||
999 | trindex = trinfo[2] |
|
1000 | trindex = trinfo[2] | |
1000 | dataoff = self.start(trindex) |
|
1001 | dataoff = self.start(trindex) | |
1001 |
|
1002 | |||
1002 | tr.add(self.datafile, dataoff) |
|
1003 | tr.add(self.datafile, dataoff) | |
1003 | df = self.opener(self.datafile, 'w') |
|
1004 | df = self.opener(self.datafile, 'w') | |
1004 | try: |
|
1005 | try: | |
1005 | calc = self._io.size |
|
1006 | calc = self._io.size | |
1006 | for r in self: |
|
1007 | for r in self: | |
1007 | start = self.start(r) + (r + 1) * calc |
|
1008 | start = self.start(r) + (r + 1) * calc | |
1008 | length = self.length(r) |
|
1009 | length = self.length(r) | |
1009 | fp.seek(start) |
|
1010 | fp.seek(start) | |
1010 | d = fp.read(length) |
|
1011 | d = fp.read(length) | |
1011 | df.write(d) |
|
1012 | df.write(d) | |
1012 | finally: |
|
1013 | finally: | |
1013 | df.close() |
|
1014 | df.close() | |
1014 |
|
1015 | |||
1015 | fp.close() |
|
1016 | fp.close() | |
1016 | fp = self.opener(self.indexfile, 'w', atomictemp=True) |
|
1017 | fp = self.opener(self.indexfile, 'w', atomictemp=True) | |
1017 | self.version &= ~(REVLOGNGINLINEDATA) |
|
1018 | self.version &= ~(REVLOGNGINLINEDATA) | |
1018 | self._inline = False |
|
1019 | self._inline = False | |
1019 | for i in self: |
|
1020 | for i in self: | |
1020 | e = self._io.packentry(self.index[i], self.node, self.version, i) |
|
1021 | e = self._io.packentry(self.index[i], self.node, self.version, i) | |
1021 | fp.write(e) |
|
1022 | fp.write(e) | |
1022 |
|
1023 | |||
1023 | # if we don't call rename, the temp file will never replace the |
|
1024 | # if we don't call rename, the temp file will never replace the | |
1024 | # real index |
|
1025 | # real index | |
1025 | fp.rename() |
|
1026 | fp.rename() | |
1026 |
|
1027 | |||
1027 | tr.replace(self.indexfile, trindex * calc) |
|
1028 | tr.replace(self.indexfile, trindex * calc) | |
1028 | self._chunkcache = None |
|
1029 | self._chunkcache = None | |
1029 |
|
1030 | |||
1030 | def addrevision(self, text, transaction, link, p1, p2, d=None): |
|
1031 | def addrevision(self, text, transaction, link, p1, p2, d=None): | |
1031 | """add a revision to the log |
|
1032 | """add a revision to the log | |
1032 |
|
1033 | |||
1033 | text - the revision data to add |
|
1034 | text - the revision data to add | |
1034 | transaction - the transaction object used for rollback |
|
1035 | transaction - the transaction object used for rollback | |
1035 | link - the linkrev data to add |
|
1036 | link - the linkrev data to add | |
1036 | p1, p2 - the parent nodeids of the revision |
|
1037 | p1, p2 - the parent nodeids of the revision | |
1037 | d - an optional precomputed delta |
|
1038 | d - an optional precomputed delta | |
1038 | """ |
|
1039 | """ | |
1039 | dfh = None |
|
1040 | dfh = None | |
1040 | if not self._inline: |
|
1041 | if not self._inline: | |
1041 | dfh = self.opener(self.datafile, "a") |
|
1042 | dfh = self.opener(self.datafile, "a") | |
1042 | ifh = self.opener(self.indexfile, "a+") |
|
1043 | ifh = self.opener(self.indexfile, "a+") | |
1043 | try: |
|
1044 | try: | |
1044 | return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh) |
|
1045 | return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh) | |
1045 | finally: |
|
1046 | finally: | |
1046 | if dfh: |
|
1047 | if dfh: | |
1047 | dfh.close() |
|
1048 | dfh.close() | |
1048 | ifh.close() |
|
1049 | ifh.close() | |
1049 |
|
1050 | |||
1050 | def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh): |
|
1051 | def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh): | |
1051 | node = hash(text, p1, p2) |
|
1052 | node = hash(text, p1, p2) | |
1052 | if node in self.nodemap: |
|
1053 | if node in self.nodemap: | |
1053 | return node |
|
1054 | return node | |
1054 |
|
1055 | |||
1055 | curr = len(self) |
|
1056 | curr = len(self) | |
1056 | prev = curr - 1 |
|
1057 | prev = curr - 1 | |
1057 | base = self.base(prev) |
|
1058 | base = self.base(prev) | |
1058 | offset = self.end(prev) |
|
1059 | offset = self.end(prev) | |
1059 |
|
1060 | |||
1060 | if curr: |
|
1061 | if curr: | |
1061 | if not d: |
|
1062 | if not d: | |
1062 | ptext = self.revision(self.node(prev)) |
|
1063 | ptext = self.revision(self.node(prev)) | |
1063 | d = mdiff.textdiff(ptext, text) |
|
1064 | d = mdiff.textdiff(ptext, text) | |
1064 | data = compress(d) |
|
1065 | data = compress(d) | |
1065 | l = len(data[1]) + len(data[0]) |
|
1066 | l = len(data[1]) + len(data[0]) | |
1066 | dist = l + offset - self.start(base) |
|
1067 | dist = l + offset - self.start(base) | |
1067 |
|
1068 | |||
1068 | # full versions are inserted when the needed deltas |
|
1069 | # full versions are inserted when the needed deltas | |
1069 | # become comparable to the uncompressed text |
|
1070 | # become comparable to the uncompressed text | |
1070 | if not curr or dist > len(text) * 2: |
|
1071 | if not curr or dist > len(text) * 2: | |
1071 | data = compress(text) |
|
1072 | data = compress(text) | |
1072 | l = len(data[1]) + len(data[0]) |
|
1073 | l = len(data[1]) + len(data[0]) | |
1073 | base = curr |
|
1074 | base = curr | |
1074 |
|
1075 | |||
1075 | e = (offset_type(offset, 0), l, len(text), |
|
1076 | e = (offset_type(offset, 0), l, len(text), | |
1076 | base, link, self.rev(p1), self.rev(p2), node) |
|
1077 | base, link, self.rev(p1), self.rev(p2), node) | |
1077 | self.index.insert(-1, e) |
|
1078 | self.index.insert(-1, e) | |
1078 | self.nodemap[node] = curr |
|
1079 | self.nodemap[node] = curr | |
1079 |
|
1080 | |||
1080 | entry = self._io.packentry(e, self.node, self.version, curr) |
|
1081 | entry = self._io.packentry(e, self.node, self.version, curr) | |
1081 | if not self._inline: |
|
1082 | if not self._inline: | |
1082 | transaction.add(self.datafile, offset) |
|
1083 | transaction.add(self.datafile, offset) | |
1083 | transaction.add(self.indexfile, curr * len(entry)) |
|
1084 | transaction.add(self.indexfile, curr * len(entry)) | |
1084 | if data[0]: |
|
1085 | if data[0]: | |
1085 | dfh.write(data[0]) |
|
1086 | dfh.write(data[0]) | |
1086 | dfh.write(data[1]) |
|
1087 | dfh.write(data[1]) | |
1087 | dfh.flush() |
|
1088 | dfh.flush() | |
1088 | ifh.write(entry) |
|
1089 | ifh.write(entry) | |
1089 | else: |
|
1090 | else: | |
1090 | offset += curr * self._io.size |
|
1091 | offset += curr * self._io.size | |
1091 | transaction.add(self.indexfile, offset, curr) |
|
1092 | transaction.add(self.indexfile, offset, curr) | |
1092 | ifh.write(entry) |
|
1093 | ifh.write(entry) | |
1093 | ifh.write(data[0]) |
|
1094 | ifh.write(data[0]) | |
1094 | ifh.write(data[1]) |
|
1095 | ifh.write(data[1]) | |
1095 | self.checkinlinesize(transaction, ifh) |
|
1096 | self.checkinlinesize(transaction, ifh) | |
1096 |
|
1097 | |||
1097 | self._cache = (node, curr, text) |
|
1098 | self._cache = (node, curr, text) | |
1098 | return node |
|
1099 | return node | |
1099 |
|
1100 | |||
1100 | def ancestor(self, a, b): |
|
1101 | def ancestor(self, a, b): | |
1101 | """calculate the least common ancestor of nodes a and b""" |
|
1102 | """calculate the least common ancestor of nodes a and b""" | |
1102 |
|
1103 | |||
1103 | def parents(rev): |
|
1104 | def parents(rev): | |
1104 | return [p for p in self.parentrevs(rev) if p != nullrev] |
|
1105 | return [p for p in self.parentrevs(rev) if p != nullrev] | |
1105 |
|
1106 | |||
1106 | c = ancestor.ancestor(self.rev(a), self.rev(b), parents) |
|
1107 | c = ancestor.ancestor(self.rev(a), self.rev(b), parents) | |
1107 | if c is None: |
|
1108 | if c is None: | |
1108 | return nullid |
|
1109 | return nullid | |
1109 |
|
1110 | |||
1110 | return self.node(c) |
|
1111 | return self.node(c) | |
1111 |
|
1112 | |||
1112 | def group(self, nodelist, lookup, infocollect=None): |
|
1113 | def group(self, nodelist, lookup, infocollect=None): | |
1113 | """calculate a delta group |
|
1114 | """calculate a delta group | |
1114 |
|
1115 | |||
1115 | Given a list of changeset revs, return a set of deltas and |
|
1116 | Given a list of changeset revs, return a set of deltas and | |
1116 | metadata corresponding to nodes. the first delta is |
|
1117 | metadata corresponding to nodes. the first delta is | |
1117 | parent(nodes[0]) -> nodes[0] the receiver is guaranteed to |
|
1118 | parent(nodes[0]) -> nodes[0] the receiver is guaranteed to | |
1118 | have this parent as it has all history before these |
|
1119 | have this parent as it has all history before these | |
1119 | changesets. parent is parent[0] |
|
1120 | changesets. parent is parent[0] | |
1120 | """ |
|
1121 | """ | |
1121 | revs = [self.rev(n) for n in nodelist] |
|
1122 | revs = [self.rev(n) for n in nodelist] | |
1122 |
|
1123 | |||
1123 | # if we don't have any revisions touched by these changesets, bail |
|
1124 | # if we don't have any revisions touched by these changesets, bail | |
1124 | if not revs: |
|
1125 | if not revs: | |
1125 | yield changegroup.closechunk() |
|
1126 | yield changegroup.closechunk() | |
1126 | return |
|
1127 | return | |
1127 |
|
1128 | |||
1128 | # add the parent of the first rev |
|
1129 | # add the parent of the first rev | |
1129 | p = self.parents(self.node(revs[0]))[0] |
|
1130 | p = self.parents(self.node(revs[0]))[0] | |
1130 | revs.insert(0, self.rev(p)) |
|
1131 | revs.insert(0, self.rev(p)) | |
1131 |
|
1132 | |||
1132 | # build deltas |
|
1133 | # build deltas | |
1133 | for d in xrange(0, len(revs) - 1): |
|
1134 | for d in xrange(0, len(revs) - 1): | |
1134 | a, b = revs[d], revs[d + 1] |
|
1135 | a, b = revs[d], revs[d + 1] | |
1135 | nb = self.node(b) |
|
1136 | nb = self.node(b) | |
1136 |
|
1137 | |||
1137 | if infocollect is not None: |
|
1138 | if infocollect is not None: | |
1138 | infocollect(nb) |
|
1139 | infocollect(nb) | |
1139 |
|
1140 | |||
1140 | p = self.parents(nb) |
|
1141 | p = self.parents(nb) | |
1141 | meta = nb + p[0] + p[1] + lookup(nb) |
|
1142 | meta = nb + p[0] + p[1] + lookup(nb) | |
1142 | if a == -1: |
|
1143 | if a == -1: | |
1143 | d = self.revision(nb) |
|
1144 | d = self.revision(nb) | |
1144 | meta += mdiff.trivialdiffheader(len(d)) |
|
1145 | meta += mdiff.trivialdiffheader(len(d)) | |
1145 | else: |
|
1146 | else: | |
1146 | d = self.revdiff(a, b) |
|
1147 | d = self.revdiff(a, b) | |
1147 | yield changegroup.chunkheader(len(meta) + len(d)) |
|
1148 | yield changegroup.chunkheader(len(meta) + len(d)) | |
1148 | yield meta |
|
1149 | yield meta | |
1149 | if len(d) > 2**20: |
|
1150 | if len(d) > 2**20: | |
1150 | pos = 0 |
|
1151 | pos = 0 | |
1151 | while pos < len(d): |
|
1152 | while pos < len(d): | |
1152 | pos2 = pos + 2 ** 18 |
|
1153 | pos2 = pos + 2 ** 18 | |
1153 | yield d[pos:pos2] |
|
1154 | yield d[pos:pos2] | |
1154 | pos = pos2 |
|
1155 | pos = pos2 | |
1155 | else: |
|
1156 | else: | |
1156 | yield d |
|
1157 | yield d | |
1157 |
|
1158 | |||
1158 | yield changegroup.closechunk() |
|
1159 | yield changegroup.closechunk() | |
1159 |
|
1160 | |||
1160 | def addgroup(self, revs, linkmapper, transaction): |
|
1161 | def addgroup(self, revs, linkmapper, transaction): | |
1161 | """ |
|
1162 | """ | |
1162 | add a delta group |
|
1163 | add a delta group | |
1163 |
|
1164 | |||
1164 | given a set of deltas, add them to the revision log. the |
|
1165 | given a set of deltas, add them to the revision log. the | |
1165 | first delta is against its parent, which should be in our |
|
1166 | first delta is against its parent, which should be in our | |
1166 | log, the rest are against the previous delta. |
|
1167 | log, the rest are against the previous delta. | |
1167 | """ |
|
1168 | """ | |
1168 |
|
1169 | |||
1169 | #track the base of the current delta log |
|
1170 | #track the base of the current delta log | |
1170 | r = len(self) |
|
1171 | r = len(self) | |
1171 | t = r - 1 |
|
1172 | t = r - 1 | |
1172 | node = None |
|
1173 | node = None | |
1173 |
|
1174 | |||
1174 | base = prev = nullrev |
|
1175 | base = prev = nullrev | |
1175 | start = end = textlen = 0 |
|
1176 | start = end = textlen = 0 | |
1176 | if r: |
|
1177 | if r: | |
1177 | end = self.end(t) |
|
1178 | end = self.end(t) | |
1178 |
|
1179 | |||
1179 | ifh = self.opener(self.indexfile, "a+") |
|
1180 | ifh = self.opener(self.indexfile, "a+") | |
1180 | isize = r * self._io.size |
|
1181 | isize = r * self._io.size | |
1181 | if self._inline: |
|
1182 | if self._inline: | |
1182 | transaction.add(self.indexfile, end + isize, r) |
|
1183 | transaction.add(self.indexfile, end + isize, r) | |
1183 | dfh = None |
|
1184 | dfh = None | |
1184 | else: |
|
1185 | else: | |
1185 | transaction.add(self.indexfile, isize, r) |
|
1186 | transaction.add(self.indexfile, isize, r) | |
1186 | transaction.add(self.datafile, end) |
|
1187 | transaction.add(self.datafile, end) | |
1187 | dfh = self.opener(self.datafile, "a") |
|
1188 | dfh = self.opener(self.datafile, "a") | |
1188 |
|
1189 | |||
1189 | try: |
|
1190 | try: | |
1190 | # loop through our set of deltas |
|
1191 | # loop through our set of deltas | |
1191 | chain = None |
|
1192 | chain = None | |
1192 | for chunk in revs: |
|
1193 | for chunk in revs: | |
1193 | node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) |
|
1194 | node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) | |
1194 | link = linkmapper(cs) |
|
1195 | link = linkmapper(cs) | |
1195 | if node in self.nodemap: |
|
1196 | if node in self.nodemap: | |
1196 | # this can happen if two branches make the same change |
|
1197 | # this can happen if two branches make the same change | |
1197 | chain = node |
|
1198 | chain = node | |
1198 | continue |
|
1199 | continue | |
1199 | delta = buffer(chunk, 80) |
|
1200 | delta = buffer(chunk, 80) | |
1200 | del chunk |
|
1201 | del chunk | |
1201 |
|
1202 | |||
1202 | for p in (p1, p2): |
|
1203 | for p in (p1, p2): | |
1203 | if not p in self.nodemap: |
|
1204 | if not p in self.nodemap: | |
1204 | raise LookupError(p, self.indexfile, _('unknown parent')) |
|
1205 | raise LookupError(p, self.indexfile, _('unknown parent')) | |
1205 |
|
1206 | |||
1206 | if not chain: |
|
1207 | if not chain: | |
1207 | # retrieve the parent revision of the delta chain |
|
1208 | # retrieve the parent revision of the delta chain | |
1208 | chain = p1 |
|
1209 | chain = p1 | |
1209 | if not chain in self.nodemap: |
|
1210 | if not chain in self.nodemap: | |
1210 | raise LookupError(chain, self.indexfile, _('unknown base')) |
|
1211 | raise LookupError(chain, self.indexfile, _('unknown base')) | |
1211 |
|
1212 | |||
1212 | # full versions are inserted when the needed deltas become |
|
1213 | # full versions are inserted when the needed deltas become | |
1213 | # comparable to the uncompressed text or when the previous |
|
1214 | # comparable to the uncompressed text or when the previous | |
1214 | # version is not the one we have a delta against. We use |
|
1215 | # version is not the one we have a delta against. We use | |
1215 | # the size of the previous full rev as a proxy for the |
|
1216 | # the size of the previous full rev as a proxy for the | |
1216 | # current size. |
|
1217 | # current size. | |
1217 |
|
1218 | |||
1218 | if chain == prev: |
|
1219 | if chain == prev: | |
1219 | cdelta = compress(delta) |
|
1220 | cdelta = compress(delta) | |
1220 | cdeltalen = len(cdelta[0]) + len(cdelta[1]) |
|
1221 | cdeltalen = len(cdelta[0]) + len(cdelta[1]) | |
1221 | textlen = mdiff.patchedsize(textlen, delta) |
|
1222 | textlen = mdiff.patchedsize(textlen, delta) | |
1222 |
|
1223 | |||
1223 | if chain != prev or (end - start + cdeltalen) > textlen * 2: |
|
1224 | if chain != prev or (end - start + cdeltalen) > textlen * 2: | |
1224 | # flush our writes here so we can read it in revision |
|
1225 | # flush our writes here so we can read it in revision | |
1225 | if dfh: |
|
1226 | if dfh: | |
1226 | dfh.flush() |
|
1227 | dfh.flush() | |
1227 | ifh.flush() |
|
1228 | ifh.flush() | |
1228 | text = self.revision(chain) |
|
1229 | text = self.revision(chain) | |
1229 | if len(text) == 0: |
|
1230 | if len(text) == 0: | |
1230 | # skip over trivial delta header |
|
1231 | # skip over trivial delta header | |
1231 | text = buffer(delta, 12) |
|
1232 | text = buffer(delta, 12) | |
1232 | else: |
|
1233 | else: | |
1233 | text = mdiff.patches(text, [delta]) |
|
1234 | text = mdiff.patches(text, [delta]) | |
1234 | del delta |
|
1235 | del delta | |
1235 | chk = self._addrevision(text, transaction, link, p1, p2, None, |
|
1236 | chk = self._addrevision(text, transaction, link, p1, p2, None, | |
1236 | ifh, dfh) |
|
1237 | ifh, dfh) | |
1237 | if not dfh and not self._inline: |
|
1238 | if not dfh and not self._inline: | |
1238 | # addrevision switched from inline to conventional |
|
1239 | # addrevision switched from inline to conventional | |
1239 | # reopen the index |
|
1240 | # reopen the index | |
1240 | dfh = self.opener(self.datafile, "a") |
|
1241 | dfh = self.opener(self.datafile, "a") | |
1241 | ifh = self.opener(self.indexfile, "a") |
|
1242 | ifh = self.opener(self.indexfile, "a") | |
1242 | if chk != node: |
|
1243 | if chk != node: | |
1243 | raise RevlogError(_("consistency error adding group")) |
|
1244 | raise RevlogError(_("consistency error adding group")) | |
1244 | textlen = len(text) |
|
1245 | textlen = len(text) | |
1245 | else: |
|
1246 | else: | |
1246 | e = (offset_type(end, 0), cdeltalen, textlen, base, |
|
1247 | e = (offset_type(end, 0), cdeltalen, textlen, base, | |
1247 | link, self.rev(p1), self.rev(p2), node) |
|
1248 | link, self.rev(p1), self.rev(p2), node) | |
1248 | self.index.insert(-1, e) |
|
1249 | self.index.insert(-1, e) | |
1249 | self.nodemap[node] = r |
|
1250 | self.nodemap[node] = r | |
1250 | entry = self._io.packentry(e, self.node, self.version, r) |
|
1251 | entry = self._io.packentry(e, self.node, self.version, r) | |
1251 | if self._inline: |
|
1252 | if self._inline: | |
1252 | ifh.write(entry) |
|
1253 | ifh.write(entry) | |
1253 | ifh.write(cdelta[0]) |
|
1254 | ifh.write(cdelta[0]) | |
1254 | ifh.write(cdelta[1]) |
|
1255 | ifh.write(cdelta[1]) | |
1255 | self.checkinlinesize(transaction, ifh) |
|
1256 | self.checkinlinesize(transaction, ifh) | |
1256 | if not self._inline: |
|
1257 | if not self._inline: | |
1257 | dfh = self.opener(self.datafile, "a") |
|
1258 | dfh = self.opener(self.datafile, "a") | |
1258 | ifh = self.opener(self.indexfile, "a") |
|
1259 | ifh = self.opener(self.indexfile, "a") | |
1259 | else: |
|
1260 | else: | |
1260 | dfh.write(cdelta[0]) |
|
1261 | dfh.write(cdelta[0]) | |
1261 | dfh.write(cdelta[1]) |
|
1262 | dfh.write(cdelta[1]) | |
1262 | ifh.write(entry) |
|
1263 | ifh.write(entry) | |
1263 |
|
1264 | |||
1264 | t, r, chain, prev = r, r + 1, node, node |
|
1265 | t, r, chain, prev = r, r + 1, node, node | |
1265 | base = self.base(t) |
|
1266 | base = self.base(t) | |
1266 | start = self.start(base) |
|
1267 | start = self.start(base) | |
1267 | end = self.end(t) |
|
1268 | end = self.end(t) | |
1268 | finally: |
|
1269 | finally: | |
1269 | if dfh: |
|
1270 | if dfh: | |
1270 | dfh.close() |
|
1271 | dfh.close() | |
1271 | ifh.close() |
|
1272 | ifh.close() | |
1272 |
|
1273 | |||
1273 | return node |
|
1274 | return node | |
1274 |
|
1275 | |||
1275 | def strip(self, minlink): |
|
1276 | def strip(self, minlink): | |
1276 | """truncate the revlog on the first revision with a linkrev >= minlink |
|
1277 | """truncate the revlog on the first revision with a linkrev >= minlink | |
1277 |
|
1278 | |||
1278 | This function is called when we're stripping revision minlink and |
|
1279 | This function is called when we're stripping revision minlink and | |
1279 | its descendants from the repository. |
|
1280 | its descendants from the repository. | |
1280 |
|
1281 | |||
1281 | We have to remove all revisions with linkrev >= minlink, because |
|
1282 | We have to remove all revisions with linkrev >= minlink, because | |
1282 | the equivalent changelog revisions will be renumbered after the |
|
1283 | the equivalent changelog revisions will be renumbered after the | |
1283 | strip. |
|
1284 | strip. | |
1284 |
|
1285 | |||
1285 | So we truncate the revlog on the first of these revisions, and |
|
1286 | So we truncate the revlog on the first of these revisions, and | |
1286 | trust that the caller has saved the revisions that shouldn't be |
|
1287 | trust that the caller has saved the revisions that shouldn't be | |
1287 | removed and that it'll readd them after this truncation. |
|
1288 | removed and that it'll readd them after this truncation. | |
1288 | """ |
|
1289 | """ | |
1289 | if len(self) == 0: |
|
1290 | if len(self) == 0: | |
1290 | return |
|
1291 | return | |
1291 |
|
1292 | |||
1292 | if isinstance(self.index, lazyindex): |
|
1293 | if isinstance(self.index, lazyindex): | |
1293 | self._loadindexmap() |
|
1294 | self._loadindexmap() | |
1294 |
|
1295 | |||
1295 | for rev in self: |
|
1296 | for rev in self: | |
1296 | if self.index[rev][4] >= minlink: |
|
1297 | if self.index[rev][4] >= minlink: | |
1297 | break |
|
1298 | break | |
1298 | else: |
|
1299 | else: | |
1299 | return |
|
1300 | return | |
1300 |
|
1301 | |||
1301 | # first truncate the files on disk |
|
1302 | # first truncate the files on disk | |
1302 | end = self.start(rev) |
|
1303 | end = self.start(rev) | |
1303 | if not self._inline: |
|
1304 | if not self._inline: | |
1304 | df = self.opener(self.datafile, "a") |
|
1305 | df = self.opener(self.datafile, "a") | |
1305 | df.truncate(end) |
|
1306 | df.truncate(end) | |
1306 | end = rev * self._io.size |
|
1307 | end = rev * self._io.size | |
1307 | else: |
|
1308 | else: | |
1308 | end += rev * self._io.size |
|
1309 | end += rev * self._io.size | |
1309 |
|
1310 | |||
1310 | indexf = self.opener(self.indexfile, "a") |
|
1311 | indexf = self.opener(self.indexfile, "a") | |
1311 | indexf.truncate(end) |
|
1312 | indexf.truncate(end) | |
1312 |
|
1313 | |||
1313 | # then reset internal state in memory to forget those revisions |
|
1314 | # then reset internal state in memory to forget those revisions | |
1314 | self._cache = None |
|
1315 | self._cache = None | |
1315 | self._chunkcache = None |
|
1316 | self._chunkcache = None | |
1316 | for x in xrange(rev, len(self)): |
|
1317 | for x in xrange(rev, len(self)): | |
1317 | del self.nodemap[self.node(x)] |
|
1318 | del self.nodemap[self.node(x)] | |
1318 |
|
1319 | |||
1319 | del self.index[rev:-1] |
|
1320 | del self.index[rev:-1] | |
1320 |
|
1321 | |||
1321 | def checksize(self): |
|
1322 | def checksize(self): | |
1322 | expected = 0 |
|
1323 | expected = 0 | |
1323 | if len(self): |
|
1324 | if len(self): | |
1324 | expected = max(0, self.end(len(self) - 1)) |
|
1325 | expected = max(0, self.end(len(self) - 1)) | |
1325 |
|
1326 | |||
1326 | try: |
|
1327 | try: | |
1327 | f = self.opener(self.datafile) |
|
1328 | f = self.opener(self.datafile) | |
1328 | f.seek(0, 2) |
|
1329 | f.seek(0, 2) | |
1329 | actual = f.tell() |
|
1330 | actual = f.tell() | |
1330 | dd = actual - expected |
|
1331 | dd = actual - expected | |
1331 | except IOError, inst: |
|
1332 | except IOError, inst: | |
1332 | if inst.errno != errno.ENOENT: |
|
1333 | if inst.errno != errno.ENOENT: | |
1333 | raise |
|
1334 | raise | |
1334 | dd = 0 |
|
1335 | dd = 0 | |
1335 |
|
1336 | |||
1336 | try: |
|
1337 | try: | |
1337 | f = self.opener(self.indexfile) |
|
1338 | f = self.opener(self.indexfile) | |
1338 | f.seek(0, 2) |
|
1339 | f.seek(0, 2) | |
1339 | actual = f.tell() |
|
1340 | actual = f.tell() | |
1340 | s = self._io.size |
|
1341 | s = self._io.size | |
1341 | i = max(0, actual / s) |
|
1342 | i = max(0, actual / s) | |
1342 | di = actual - (i * s) |
|
1343 | di = actual - (i * s) | |
1343 | if self._inline: |
|
1344 | if self._inline: | |
1344 | databytes = 0 |
|
1345 | databytes = 0 | |
1345 | for r in self: |
|
1346 | for r in self: | |
1346 | databytes += max(0, self.length(r)) |
|
1347 | databytes += max(0, self.length(r)) | |
1347 | dd = 0 |
|
1348 | dd = 0 | |
1348 | di = actual - len(self) * s - databytes |
|
1349 | di = actual - len(self) * s - databytes | |
1349 | except IOError, inst: |
|
1350 | except IOError, inst: | |
1350 | if inst.errno != errno.ENOENT: |
|
1351 | if inst.errno != errno.ENOENT: | |
1351 | raise |
|
1352 | raise | |
1352 | di = 0 |
|
1353 | di = 0 | |
1353 |
|
1354 | |||
1354 | return (dd, di) |
|
1355 | return (dd, di) | |
1355 |
|
1356 | |||
1356 | def files(self): |
|
1357 | def files(self): | |
1357 | res = [ self.indexfile ] |
|
1358 | res = [ self.indexfile ] | |
1358 | if not self._inline: |
|
1359 | if not self._inline: | |
1359 | res.append(self.datafile) |
|
1360 | res.append(self.datafile) | |
1360 | return res |
|
1361 | return res |
@@ -1,247 +1,247 b'' | |||||
1 | # sshrepo.py - ssh repository proxy class for mercurial |
|
1 | # sshrepo.py - ssh repository proxy class for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 |
|
7 | |||
8 | from node import bin, hex |
|
8 | from node import bin, hex | |
9 | from i18n import _ |
|
9 | from i18n import _ | |
10 |
import repo |
|
10 | import repo, re, util, error | |
11 |
|
11 | |||
12 | class remotelock(object): |
|
12 | class remotelock(object): | |
13 | def __init__(self, repo): |
|
13 | def __init__(self, repo): | |
14 | self.repo = repo |
|
14 | self.repo = repo | |
15 | def release(self): |
|
15 | def release(self): | |
16 | self.repo.unlock() |
|
16 | self.repo.unlock() | |
17 | self.repo = None |
|
17 | self.repo = None | |
18 | def __del__(self): |
|
18 | def __del__(self): | |
19 | if self.repo: |
|
19 | if self.repo: | |
20 | self.release() |
|
20 | self.release() | |
21 |
|
21 | |||
22 | class sshrepository(repo.repository): |
|
22 | class sshrepository(repo.repository): | |
23 | def __init__(self, ui, path, create=0): |
|
23 | def __init__(self, ui, path, create=0): | |
24 | self._url = path |
|
24 | self._url = path | |
25 | self.ui = ui |
|
25 | self.ui = ui | |
26 |
|
26 | |||
27 | m = re.match(r'^ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?$', path) |
|
27 | m = re.match(r'^ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?$', path) | |
28 | if not m: |
|
28 | if not m: | |
29 | self.abort(error.RepoError(_("couldn't parse location %s") % path)) |
|
29 | self.abort(error.RepoError(_("couldn't parse location %s") % path)) | |
30 |
|
30 | |||
31 | self.user = m.group(2) |
|
31 | self.user = m.group(2) | |
32 | self.host = m.group(3) |
|
32 | self.host = m.group(3) | |
33 | self.port = m.group(5) |
|
33 | self.port = m.group(5) | |
34 | self.path = m.group(7) or "." |
|
34 | self.path = m.group(7) or "." | |
35 |
|
35 | |||
36 | sshcmd = self.ui.config("ui", "ssh", "ssh") |
|
36 | sshcmd = self.ui.config("ui", "ssh", "ssh") | |
37 | remotecmd = self.ui.config("ui", "remotecmd", "hg") |
|
37 | remotecmd = self.ui.config("ui", "remotecmd", "hg") | |
38 |
|
38 | |||
39 | args = util.sshargs(sshcmd, self.host, self.user, self.port) |
|
39 | args = util.sshargs(sshcmd, self.host, self.user, self.port) | |
40 |
|
40 | |||
41 | if create: |
|
41 | if create: | |
42 | cmd = '%s %s "%s init %s"' |
|
42 | cmd = '%s %s "%s init %s"' | |
43 | cmd = cmd % (sshcmd, args, remotecmd, self.path) |
|
43 | cmd = cmd % (sshcmd, args, remotecmd, self.path) | |
44 |
|
44 | |||
45 | ui.note(_('running %s\n') % cmd) |
|
45 | ui.note(_('running %s\n') % cmd) | |
46 | res = util.system(cmd) |
|
46 | res = util.system(cmd) | |
47 | if res != 0: |
|
47 | if res != 0: | |
48 | self.abort(error.RepoError(_("could not create remote repo"))) |
|
48 | self.abort(error.RepoError(_("could not create remote repo"))) | |
49 |
|
49 | |||
50 | self.validate_repo(ui, sshcmd, args, remotecmd) |
|
50 | self.validate_repo(ui, sshcmd, args, remotecmd) | |
51 |
|
51 | |||
52 | def url(self): |
|
52 | def url(self): | |
53 | return self._url |
|
53 | return self._url | |
54 |
|
54 | |||
55 | def validate_repo(self, ui, sshcmd, args, remotecmd): |
|
55 | def validate_repo(self, ui, sshcmd, args, remotecmd): | |
56 | # cleanup up previous run |
|
56 | # cleanup up previous run | |
57 | self.cleanup() |
|
57 | self.cleanup() | |
58 |
|
58 | |||
59 | cmd = '%s %s "%s -R %s serve --stdio"' |
|
59 | cmd = '%s %s "%s -R %s serve --stdio"' | |
60 | cmd = cmd % (sshcmd, args, remotecmd, self.path) |
|
60 | cmd = cmd % (sshcmd, args, remotecmd, self.path) | |
61 |
|
61 | |||
62 | cmd = util.quotecommand(cmd) |
|
62 | cmd = util.quotecommand(cmd) | |
63 | ui.note(_('running %s\n') % cmd) |
|
63 | ui.note(_('running %s\n') % cmd) | |
64 | self.pipeo, self.pipei, self.pipee = util.popen3(cmd, 'b') |
|
64 | self.pipeo, self.pipei, self.pipee = util.popen3(cmd, 'b') | |
65 |
|
65 | |||
66 | # skip any noise generated by remote shell |
|
66 | # skip any noise generated by remote shell | |
67 | self.do_cmd("hello") |
|
67 | self.do_cmd("hello") | |
68 | r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40))) |
|
68 | r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40))) | |
69 | lines = ["", "dummy"] |
|
69 | lines = ["", "dummy"] | |
70 | max_noise = 500 |
|
70 | max_noise = 500 | |
71 | while lines[-1] and max_noise: |
|
71 | while lines[-1] and max_noise: | |
72 | l = r.readline() |
|
72 | l = r.readline() | |
73 | self.readerr() |
|
73 | self.readerr() | |
74 | if lines[-1] == "1\n" and l == "\n": |
|
74 | if lines[-1] == "1\n" and l == "\n": | |
75 | break |
|
75 | break | |
76 | if l: |
|
76 | if l: | |
77 | ui.debug(_("remote: "), l) |
|
77 | ui.debug(_("remote: "), l) | |
78 | lines.append(l) |
|
78 | lines.append(l) | |
79 | max_noise -= 1 |
|
79 | max_noise -= 1 | |
80 | else: |
|
80 | else: | |
81 | self.abort(error.RepoError(_("no suitable response from remote hg"))) |
|
81 | self.abort(error.RepoError(_("no suitable response from remote hg"))) | |
82 |
|
82 | |||
83 | self.capabilities = util.set() |
|
83 | self.capabilities = util.set() | |
84 | lines.reverse() |
|
84 | lines.reverse() | |
85 | for l in lines: |
|
85 | for l in lines: | |
86 | if l.startswith("capabilities:"): |
|
86 | if l.startswith("capabilities:"): | |
87 | self.capabilities.update(l[:-1].split(":")[1].split()) |
|
87 | self.capabilities.update(l[:-1].split(":")[1].split()) | |
88 | break |
|
88 | break | |
89 |
|
89 | |||
90 | def readerr(self): |
|
90 | def readerr(self): | |
91 | while 1: |
|
91 | while 1: | |
92 | size = util.fstat(self.pipee).st_size |
|
92 | size = util.fstat(self.pipee).st_size | |
93 | if size == 0: break |
|
93 | if size == 0: break | |
94 | l = self.pipee.readline() |
|
94 | l = self.pipee.readline() | |
95 | if not l: break |
|
95 | if not l: break | |
96 | self.ui.status(_("remote: "), l) |
|
96 | self.ui.status(_("remote: "), l) | |
97 |
|
97 | |||
98 | def abort(self, exception): |
|
98 | def abort(self, exception): | |
99 | self.cleanup() |
|
99 | self.cleanup() | |
100 | raise exception |
|
100 | raise exception | |
101 |
|
101 | |||
102 | def cleanup(self): |
|
102 | def cleanup(self): | |
103 | try: |
|
103 | try: | |
104 | self.pipeo.close() |
|
104 | self.pipeo.close() | |
105 | self.pipei.close() |
|
105 | self.pipei.close() | |
106 | # read the error descriptor until EOF |
|
106 | # read the error descriptor until EOF | |
107 | for l in self.pipee: |
|
107 | for l in self.pipee: | |
108 | self.ui.status(_("remote: "), l) |
|
108 | self.ui.status(_("remote: "), l) | |
109 | self.pipee.close() |
|
109 | self.pipee.close() | |
110 | except: |
|
110 | except: | |
111 | pass |
|
111 | pass | |
112 |
|
112 | |||
113 | __del__ = cleanup |
|
113 | __del__ = cleanup | |
114 |
|
114 | |||
115 | def do_cmd(self, cmd, **args): |
|
115 | def do_cmd(self, cmd, **args): | |
116 | self.ui.debug(_("sending %s command\n") % cmd) |
|
116 | self.ui.debug(_("sending %s command\n") % cmd) | |
117 | self.pipeo.write("%s\n" % cmd) |
|
117 | self.pipeo.write("%s\n" % cmd) | |
118 | for k, v in args.iteritems(): |
|
118 | for k, v in args.iteritems(): | |
119 | self.pipeo.write("%s %d\n" % (k, len(v))) |
|
119 | self.pipeo.write("%s %d\n" % (k, len(v))) | |
120 | self.pipeo.write(v) |
|
120 | self.pipeo.write(v) | |
121 | self.pipeo.flush() |
|
121 | self.pipeo.flush() | |
122 |
|
122 | |||
123 | return self.pipei |
|
123 | return self.pipei | |
124 |
|
124 | |||
125 | def call(self, cmd, **args): |
|
125 | def call(self, cmd, **args): | |
126 | self.do_cmd(cmd, **args) |
|
126 | self.do_cmd(cmd, **args) | |
127 | return self._recv() |
|
127 | return self._recv() | |
128 |
|
128 | |||
129 | def _recv(self): |
|
129 | def _recv(self): | |
130 | l = self.pipei.readline() |
|
130 | l = self.pipei.readline() | |
131 | self.readerr() |
|
131 | self.readerr() | |
132 | try: |
|
132 | try: | |
133 | l = int(l) |
|
133 | l = int(l) | |
134 | except: |
|
134 | except: | |
135 | self.abort(error.ResponseError(_("unexpected response:"), l)) |
|
135 | self.abort(error.ResponseError(_("unexpected response:"), l)) | |
136 | return self.pipei.read(l) |
|
136 | return self.pipei.read(l) | |
137 |
|
137 | |||
138 | def _send(self, data, flush=False): |
|
138 | def _send(self, data, flush=False): | |
139 | self.pipeo.write("%d\n" % len(data)) |
|
139 | self.pipeo.write("%d\n" % len(data)) | |
140 | if data: |
|
140 | if data: | |
141 | self.pipeo.write(data) |
|
141 | self.pipeo.write(data) | |
142 | if flush: |
|
142 | if flush: | |
143 | self.pipeo.flush() |
|
143 | self.pipeo.flush() | |
144 | self.readerr() |
|
144 | self.readerr() | |
145 |
|
145 | |||
146 | def lock(self): |
|
146 | def lock(self): | |
147 | self.call("lock") |
|
147 | self.call("lock") | |
148 | return remotelock(self) |
|
148 | return remotelock(self) | |
149 |
|
149 | |||
150 | def unlock(self): |
|
150 | def unlock(self): | |
151 | self.call("unlock") |
|
151 | self.call("unlock") | |
152 |
|
152 | |||
153 | def lookup(self, key): |
|
153 | def lookup(self, key): | |
154 | self.requirecap('lookup', _('look up remote revision')) |
|
154 | self.requirecap('lookup', _('look up remote revision')) | |
155 | d = self.call("lookup", key=key) |
|
155 | d = self.call("lookup", key=key) | |
156 | success, data = d[:-1].split(" ", 1) |
|
156 | success, data = d[:-1].split(" ", 1) | |
157 | if int(success): |
|
157 | if int(success): | |
158 | return bin(data) |
|
158 | return bin(data) | |
159 | else: |
|
159 | else: | |
160 | self.abort(error.RepoError(data)) |
|
160 | self.abort(error.RepoError(data)) | |
161 |
|
161 | |||
162 | def heads(self): |
|
162 | def heads(self): | |
163 | d = self.call("heads") |
|
163 | d = self.call("heads") | |
164 | try: |
|
164 | try: | |
165 | return map(bin, d[:-1].split(" ")) |
|
165 | return map(bin, d[:-1].split(" ")) | |
166 | except: |
|
166 | except: | |
167 | self.abort(error.ResponseError(_("unexpected response:"), d)) |
|
167 | self.abort(error.ResponseError(_("unexpected response:"), d)) | |
168 |
|
168 | |||
169 | def branches(self, nodes): |
|
169 | def branches(self, nodes): | |
170 | n = " ".join(map(hex, nodes)) |
|
170 | n = " ".join(map(hex, nodes)) | |
171 | d = self.call("branches", nodes=n) |
|
171 | d = self.call("branches", nodes=n) | |
172 | try: |
|
172 | try: | |
173 | br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ] |
|
173 | br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ] | |
174 | return br |
|
174 | return br | |
175 | except: |
|
175 | except: | |
176 | self.abort(error.ResponseError(_("unexpected response:"), d)) |
|
176 | self.abort(error.ResponseError(_("unexpected response:"), d)) | |
177 |
|
177 | |||
178 | def between(self, pairs): |
|
178 | def between(self, pairs): | |
179 | n = " ".join(["-".join(map(hex, p)) for p in pairs]) |
|
179 | n = " ".join(["-".join(map(hex, p)) for p in pairs]) | |
180 | d = self.call("between", pairs=n) |
|
180 | d = self.call("between", pairs=n) | |
181 | try: |
|
181 | try: | |
182 | p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ] |
|
182 | p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ] | |
183 | return p |
|
183 | return p | |
184 | except: |
|
184 | except: | |
185 | self.abort(error.ResponseError(_("unexpected response:"), d)) |
|
185 | self.abort(error.ResponseError(_("unexpected response:"), d)) | |
186 |
|
186 | |||
187 | def changegroup(self, nodes, kind): |
|
187 | def changegroup(self, nodes, kind): | |
188 | n = " ".join(map(hex, nodes)) |
|
188 | n = " ".join(map(hex, nodes)) | |
189 | return self.do_cmd("changegroup", roots=n) |
|
189 | return self.do_cmd("changegroup", roots=n) | |
190 |
|
190 | |||
191 | def changegroupsubset(self, bases, heads, kind): |
|
191 | def changegroupsubset(self, bases, heads, kind): | |
192 | self.requirecap('changegroupsubset', _('look up remote changes')) |
|
192 | self.requirecap('changegroupsubset', _('look up remote changes')) | |
193 | bases = " ".join(map(hex, bases)) |
|
193 | bases = " ".join(map(hex, bases)) | |
194 | heads = " ".join(map(hex, heads)) |
|
194 | heads = " ".join(map(hex, heads)) | |
195 | return self.do_cmd("changegroupsubset", bases=bases, heads=heads) |
|
195 | return self.do_cmd("changegroupsubset", bases=bases, heads=heads) | |
196 |
|
196 | |||
197 | def unbundle(self, cg, heads, source): |
|
197 | def unbundle(self, cg, heads, source): | |
198 | d = self.call("unbundle", heads=' '.join(map(hex, heads))) |
|
198 | d = self.call("unbundle", heads=' '.join(map(hex, heads))) | |
199 | if d: |
|
199 | if d: | |
200 | # remote may send "unsynced changes" |
|
200 | # remote may send "unsynced changes" | |
201 | self.abort(error.RepoError(_("push refused: %s") % d)) |
|
201 | self.abort(error.RepoError(_("push refused: %s") % d)) | |
202 |
|
202 | |||
203 | while 1: |
|
203 | while 1: | |
204 | d = cg.read(4096) |
|
204 | d = cg.read(4096) | |
205 | if not d: |
|
205 | if not d: | |
206 | break |
|
206 | break | |
207 | self._send(d) |
|
207 | self._send(d) | |
208 |
|
208 | |||
209 | self._send("", flush=True) |
|
209 | self._send("", flush=True) | |
210 |
|
210 | |||
211 | r = self._recv() |
|
211 | r = self._recv() | |
212 | if r: |
|
212 | if r: | |
213 | # remote may send "unsynced changes" |
|
213 | # remote may send "unsynced changes" | |
214 | self.abort(error.RepoError(_("push failed: %s") % r)) |
|
214 | self.abort(error.RepoError(_("push failed: %s") % r)) | |
215 |
|
215 | |||
216 | r = self._recv() |
|
216 | r = self._recv() | |
217 | try: |
|
217 | try: | |
218 | return int(r) |
|
218 | return int(r) | |
219 | except: |
|
219 | except: | |
220 | self.abort(error.ResponseError(_("unexpected response:"), r)) |
|
220 | self.abort(error.ResponseError(_("unexpected response:"), r)) | |
221 |
|
221 | |||
222 | def addchangegroup(self, cg, source, url): |
|
222 | def addchangegroup(self, cg, source, url): | |
223 | d = self.call("addchangegroup") |
|
223 | d = self.call("addchangegroup") | |
224 | if d: |
|
224 | if d: | |
225 | self.abort(error.RepoError(_("push refused: %s") % d)) |
|
225 | self.abort(error.RepoError(_("push refused: %s") % d)) | |
226 | while 1: |
|
226 | while 1: | |
227 | d = cg.read(4096) |
|
227 | d = cg.read(4096) | |
228 | if not d: |
|
228 | if not d: | |
229 | break |
|
229 | break | |
230 | self.pipeo.write(d) |
|
230 | self.pipeo.write(d) | |
231 | self.readerr() |
|
231 | self.readerr() | |
232 |
|
232 | |||
233 | self.pipeo.flush() |
|
233 | self.pipeo.flush() | |
234 |
|
234 | |||
235 | self.readerr() |
|
235 | self.readerr() | |
236 | r = self._recv() |
|
236 | r = self._recv() | |
237 | if not r: |
|
237 | if not r: | |
238 | return 1 |
|
238 | return 1 | |
239 | try: |
|
239 | try: | |
240 | return int(r) |
|
240 | return int(r) | |
241 | except: |
|
241 | except: | |
242 | self.abort(error.ResponseError(_("unexpected response:"), r)) |
|
242 | self.abort(error.ResponseError(_("unexpected response:"), r)) | |
243 |
|
243 | |||
244 | def stream_out(self): |
|
244 | def stream_out(self): | |
245 | return self.do_cmd('stream_out') |
|
245 | return self.do_cmd('stream_out') | |
246 |
|
246 | |||
247 | instance = sshrepository |
|
247 | instance = sshrepository |
@@ -1,117 +1,117 b'' | |||||
1 | # statichttprepo.py - simple http repository class for mercurial |
|
1 | # statichttprepo.py - simple http repository class for mercurial | |
2 | # |
|
2 | # | |
3 | # This provides read-only repo access to repositories exported via static http |
|
3 | # This provides read-only repo access to repositories exported via static http | |
4 | # |
|
4 | # | |
5 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
5 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
6 | # |
|
6 | # | |
7 | # This software may be used and distributed according to the terms |
|
7 | # This software may be used and distributed according to the terms | |
8 | # of the GNU General Public License, incorporated herein by reference. |
|
8 | # of the GNU General Public License, incorporated herein by reference. | |
9 |
|
9 | |||
10 | from i18n import _ |
|
10 | from i18n import _ | |
11 | import changelog, byterange, url, error |
|
11 | import changelog, byterange, url, error | |
12 |
import |
|
12 | import localrepo, manifest, util, store | |
13 | import urllib, urllib2, errno |
|
13 | import urllib, urllib2, errno | |
14 |
|
14 | |||
15 | class httprangereader(object): |
|
15 | class httprangereader(object): | |
16 | def __init__(self, url, opener): |
|
16 | def __init__(self, url, opener): | |
17 | # we assume opener has HTTPRangeHandler |
|
17 | # we assume opener has HTTPRangeHandler | |
18 | self.url = url |
|
18 | self.url = url | |
19 | self.pos = 0 |
|
19 | self.pos = 0 | |
20 | self.opener = opener |
|
20 | self.opener = opener | |
21 | def seek(self, pos): |
|
21 | def seek(self, pos): | |
22 | self.pos = pos |
|
22 | self.pos = pos | |
23 | def read(self, bytes=None): |
|
23 | def read(self, bytes=None): | |
24 | req = urllib2.Request(self.url) |
|
24 | req = urllib2.Request(self.url) | |
25 | end = '' |
|
25 | end = '' | |
26 | if bytes: |
|
26 | if bytes: | |
27 | end = self.pos + bytes - 1 |
|
27 | end = self.pos + bytes - 1 | |
28 | req.add_header('Range', 'bytes=%d-%s' % (self.pos, end)) |
|
28 | req.add_header('Range', 'bytes=%d-%s' % (self.pos, end)) | |
29 |
|
29 | |||
30 | try: |
|
30 | try: | |
31 | f = self.opener.open(req) |
|
31 | f = self.opener.open(req) | |
32 | data = f.read() |
|
32 | data = f.read() | |
33 | except urllib2.HTTPError, inst: |
|
33 | except urllib2.HTTPError, inst: | |
34 | num = inst.code == 404 and errno.ENOENT or None |
|
34 | num = inst.code == 404 and errno.ENOENT or None | |
35 | raise IOError(num, inst) |
|
35 | raise IOError(num, inst) | |
36 | except urllib2.URLError, inst: |
|
36 | except urllib2.URLError, inst: | |
37 | raise IOError(None, inst.reason[1]) |
|
37 | raise IOError(None, inst.reason[1]) | |
38 |
|
38 | |||
39 | if bytes: |
|
39 | if bytes: | |
40 | data = data[:bytes] |
|
40 | data = data[:bytes] | |
41 | return data |
|
41 | return data | |
42 |
|
42 | |||
43 | def build_opener(ui, authinfo): |
|
43 | def build_opener(ui, authinfo): | |
44 | # urllib cannot handle URLs with embedded user or passwd |
|
44 | # urllib cannot handle URLs with embedded user or passwd | |
45 | urlopener = url.opener(ui, authinfo) |
|
45 | urlopener = url.opener(ui, authinfo) | |
46 | urlopener.add_handler(byterange.HTTPRangeHandler()) |
|
46 | urlopener.add_handler(byterange.HTTPRangeHandler()) | |
47 |
|
47 | |||
48 | def opener(base): |
|
48 | def opener(base): | |
49 | """return a function that opens files over http""" |
|
49 | """return a function that opens files over http""" | |
50 | p = base |
|
50 | p = base | |
51 | def o(path, mode="r"): |
|
51 | def o(path, mode="r"): | |
52 | f = "/".join((p, urllib.quote(path))) |
|
52 | f = "/".join((p, urllib.quote(path))) | |
53 | return httprangereader(f, urlopener) |
|
53 | return httprangereader(f, urlopener) | |
54 | return o |
|
54 | return o | |
55 |
|
55 | |||
56 | return opener |
|
56 | return opener | |
57 |
|
57 | |||
58 | class statichttprepository(localrepo.localrepository): |
|
58 | class statichttprepository(localrepo.localrepository): | |
59 | def __init__(self, ui, path): |
|
59 | def __init__(self, ui, path): | |
60 | self._url = path |
|
60 | self._url = path | |
61 | self.ui = ui |
|
61 | self.ui = ui | |
62 |
|
62 | |||
63 | self.path, authinfo = url.getauthinfo(path.rstrip('/') + "/.hg") |
|
63 | self.path, authinfo = url.getauthinfo(path.rstrip('/') + "/.hg") | |
64 |
|
64 | |||
65 | opener = build_opener(ui, authinfo) |
|
65 | opener = build_opener(ui, authinfo) | |
66 | self.opener = opener(self.path) |
|
66 | self.opener = opener(self.path) | |
67 |
|
67 | |||
68 | # find requirements |
|
68 | # find requirements | |
69 | try: |
|
69 | try: | |
70 | requirements = self.opener("requires").read().splitlines() |
|
70 | requirements = self.opener("requires").read().splitlines() | |
71 | except IOError, inst: |
|
71 | except IOError, inst: | |
72 | if inst.errno != errno.ENOENT: |
|
72 | if inst.errno != errno.ENOENT: | |
73 | raise |
|
73 | raise | |
74 | # check if it is a non-empty old-style repository |
|
74 | # check if it is a non-empty old-style repository | |
75 | try: |
|
75 | try: | |
76 | self.opener("00changelog.i").read(1) |
|
76 | self.opener("00changelog.i").read(1) | |
77 | except IOError, inst: |
|
77 | except IOError, inst: | |
78 | if inst.errno != errno.ENOENT: |
|
78 | if inst.errno != errno.ENOENT: | |
79 | raise |
|
79 | raise | |
80 | # we do not care about empty old-style repositories here |
|
80 | # we do not care about empty old-style repositories here | |
81 | msg = _("'%s' does not appear to be an hg repository") % path |
|
81 | msg = _("'%s' does not appear to be an hg repository") % path | |
82 | raise error.RepoError(msg) |
|
82 | raise error.RepoError(msg) | |
83 | requirements = [] |
|
83 | requirements = [] | |
84 |
|
84 | |||
85 | # check them |
|
85 | # check them | |
86 | for r in requirements: |
|
86 | for r in requirements: | |
87 | if r not in self.supported: |
|
87 | if r not in self.supported: | |
88 | raise error.RepoError(_("requirement '%s' not supported") % r) |
|
88 | raise error.RepoError(_("requirement '%s' not supported") % r) | |
89 |
|
89 | |||
90 | # setup store |
|
90 | # setup store | |
91 | def pjoin(a, b): |
|
91 | def pjoin(a, b): | |
92 | return a + '/' + b |
|
92 | return a + '/' + b | |
93 | self.store = store.store(requirements, self.path, opener, pjoin) |
|
93 | self.store = store.store(requirements, self.path, opener, pjoin) | |
94 | self.spath = self.store.path |
|
94 | self.spath = self.store.path | |
95 | self.sopener = self.store.opener |
|
95 | self.sopener = self.store.opener | |
96 | self.sjoin = self.store.join |
|
96 | self.sjoin = self.store.join | |
97 |
|
97 | |||
98 | self.manifest = manifest.manifest(self.sopener) |
|
98 | self.manifest = manifest.manifest(self.sopener) | |
99 | self.changelog = changelog.changelog(self.sopener) |
|
99 | self.changelog = changelog.changelog(self.sopener) | |
100 | self.tagscache = None |
|
100 | self.tagscache = None | |
101 | self.nodetagscache = None |
|
101 | self.nodetagscache = None | |
102 | self.encodepats = None |
|
102 | self.encodepats = None | |
103 | self.decodepats = None |
|
103 | self.decodepats = None | |
104 |
|
104 | |||
105 | def url(self): |
|
105 | def url(self): | |
106 | return self._url |
|
106 | return self._url | |
107 |
|
107 | |||
108 | def local(self): |
|
108 | def local(self): | |
109 | return False |
|
109 | return False | |
110 |
|
110 | |||
111 | def lock(self, wait=True): |
|
111 | def lock(self, wait=True): | |
112 | raise util.Abort(_('cannot lock static-http repository')) |
|
112 | raise util.Abort(_('cannot lock static-http repository')) | |
113 |
|
113 | |||
114 | def instance(ui, path, create): |
|
114 | def instance(ui, path, create): | |
115 | if create: |
|
115 | if create: | |
116 | raise util.Abort(_('cannot create new static-http repository')) |
|
116 | raise util.Abort(_('cannot create new static-http repository')) | |
117 | return statichttprepository(ui, path[7:]) |
|
117 | return statichttprepository(ui, path[7:]) |
General Comments 0
You need to be logged in to leave comments.
Login now