##// END OF EJS Templates
cleanup: drop unused imports
Peter Arrenbrecht -
r7873:4a4c7f6a default
parent child Browse files
Show More
@@ -1,161 +1,161 b''
1 1 # churn.py - create a graph of revisions count grouped by template
2 2 #
3 3 # Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
4 4 # Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8 '''command to show certain statistics about revision history'''
9 9
10 10 from mercurial.i18n import _
11 11 from mercurial import patch, cmdutil, util, templater
12 import os, sys
12 import sys
13 13 import time, datetime
14 14
15 15 def maketemplater(ui, repo, tmpl):
16 16 tmpl = templater.parsestring(tmpl, quoted=False)
17 17 try:
18 18 t = cmdutil.changeset_templater(ui, repo, False, None, None, False)
19 19 except SyntaxError, inst:
20 20 raise util.Abort(inst.args[0])
21 21 t.use_template(tmpl)
22 22 return t
23 23
24 24 def changedlines(ui, repo, ctx1, ctx2):
25 25 lines = 0
26 26 diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node()))
27 27 for l in diff.split('\n'):
28 28 if (l.startswith("+") and not l.startswith("+++ ") or
29 29 l.startswith("-") and not l.startswith("--- ")):
30 30 lines += 1
31 31 return lines
32 32
33 33 def countrate(ui, repo, amap, *pats, **opts):
34 34 """Calculate stats"""
35 35 if opts.get('dateformat'):
36 36 def getkey(ctx):
37 37 t, tz = ctx.date()
38 38 date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
39 39 return date.strftime(opts['dateformat'])
40 40 else:
41 41 tmpl = opts.get('template', '{author|email}')
42 42 tmpl = maketemplater(ui, repo, tmpl)
43 43 def getkey(ctx):
44 44 ui.pushbuffer()
45 45 tmpl.show(ctx)
46 46 return ui.popbuffer()
47 47
48 48 count = pct = 0
49 49 rate = {}
50 50 df = False
51 51 if opts.get('date'):
52 52 df = util.matchdate(opts['date'])
53 53
54 54 get = util.cachefunc(lambda r: repo[r].changeset())
55 55 changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
56 56 for st, rev, fns in changeiter:
57 57 if not st == 'add':
58 58 continue
59 59 if df and not df(get(rev)[2][0]): # doesn't match date format
60 60 continue
61 61
62 62 ctx = repo[rev]
63 63 key = getkey(ctx)
64 64 key = amap.get(key, key) # alias remap
65 65 if opts.get('changesets'):
66 66 rate[key] = rate.get(key, 0) + 1
67 67 else:
68 68 parents = ctx.parents()
69 69 if len(parents) > 1:
70 70 ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,))
71 71 continue
72 72
73 73 ctx1 = parents[0]
74 74 lines = changedlines(ui, repo, ctx1, ctx)
75 75 rate[key] = rate.get(key, 0) + lines
76 76
77 77 if opts.get('progress'):
78 78 count += 1
79 79 newpct = int(100.0 * count / max(len(repo), 1))
80 80 if pct < newpct:
81 81 pct = newpct
82 82 ui.write(_("\rgenerating stats: %d%%") % pct)
83 83 sys.stdout.flush()
84 84
85 85 if opts.get('progress'):
86 86 ui.write("\r")
87 87 sys.stdout.flush()
88 88
89 89 return rate
90 90
91 91
92 92 def churn(ui, repo, *pats, **opts):
93 93 '''graph count of revisions grouped by template
94 94
95 95 Will graph count of changed lines or revisions grouped by template or
96 96 alternatively by date, if dateformat is used. In this case it will override
97 97 template.
98 98
99 99 By default statistics are counted for number of changed lines.
100 100
101 101 Examples:
102 102
103 103 # display count of changed lines for every committer
104 104 hg churn -t '{author|email}'
105 105
106 106 # display daily activity graph
107 107 hg churn -f '%H' -s -c
108 108
109 109 # display activity of developers by month
110 110 hg churn -f '%Y-%m' -s -c
111 111
112 112 # display count of lines changed in every year
113 113 hg churn -f '%Y' -s
114 114
115 115 The map file format used to specify aliases is fairly simple:
116 116
117 117 <alias email> <actual email>'''
118 118 def pad(s, l):
119 119 return (s + " " * l)[:l]
120 120
121 121 amap = {}
122 122 aliases = opts.get('aliases')
123 123 if aliases:
124 124 for l in open(aliases, "r"):
125 125 l = l.strip()
126 126 alias, actual = l.split()
127 127 amap[alias] = actual
128 128
129 129 rate = countrate(ui, repo, amap, *pats, **opts).items()
130 130 if not rate:
131 131 return
132 132
133 133 sortfn = ((not opts.get('sort')) and (lambda a, b: cmp(b[1], a[1])) or None)
134 134 rate.sort(sortfn)
135 135
136 136 maxcount = float(max([v for k, v in rate]))
137 137 maxname = max([len(k) for k, v in rate])
138 138
139 139 ttywidth = util.termwidth()
140 140 ui.debug(_("assuming %i character terminal\n") % ttywidth)
141 141 width = ttywidth - maxname - 2 - 6 - 2 - 2
142 142
143 143 for date, count in rate:
144 144 print "%s %6d %s" % (pad(date, maxname), count,
145 145 "*" * int(count * width / maxcount))
146 146
147 147
148 148 cmdtable = {
149 149 "churn":
150 150 (churn,
151 151 [('r', 'rev', [], _('count rate for the specified revision or range')),
152 152 ('d', 'date', '', _('count rate for revs matching date spec')),
153 153 ('t', 'template', '{author|email}', _('template to group changesets')),
154 154 ('f', 'dateformat', '',
155 155 _('strftime-compatible format for grouping by date')),
156 156 ('c', 'changesets', False, _('count rate by number of changesets')),
157 157 ('s', 'sort', False, _('sort by key (default: sort by count)')),
158 158 ('', 'aliases', '', _('file with email aliases')),
159 159 ('', 'progress', None, _('show progress'))],
160 160 _("hg churn [-d DATE] [-r REV] [--aliases FILE] [--progress] [FILE]")),
161 161 }
@@ -1,250 +1,251 b''
1 1 # convert.py Foreign SCM converter
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7 '''converting foreign VCS repositories to Mercurial'''
8 8
9 9 import convcmd
10 10 import cvsps
11 import subversion
11 12 from mercurial import commands
12 13 from mercurial.i18n import _
13 14
14 15 # Commands definition was moved elsewhere to ease demandload job.
15 16
16 17 def convert(ui, src, dest=None, revmapfile=None, **opts):
17 18 """convert a foreign SCM repository to a Mercurial one.
18 19
19 20 Accepted source formats [identifiers]:
20 21 - Mercurial [hg]
21 22 - CVS [cvs]
22 23 - Darcs [darcs]
23 24 - git [git]
24 25 - Subversion [svn]
25 26 - Monotone [mtn]
26 27 - GNU Arch [gnuarch]
27 28 - Bazaar [bzr]
28 29 - Perforce [p4]
29 30
30 31 Accepted destination formats [identifiers]:
31 32 - Mercurial [hg]
32 33 - Subversion [svn] (history on branches is not preserved)
33 34
34 35 If no revision is given, all revisions will be converted. Otherwise,
35 36 convert will only import up to the named revision (given in a format
36 37 understood by the source).
37 38
38 39 If no destination directory name is specified, it defaults to the
39 40 basename of the source with '-hg' appended. If the destination
40 41 repository doesn't exist, it will be created.
41 42
42 43 If <REVMAP> isn't given, it will be put in a default location
43 44 (<dest>/.hg/shamap by default). The <REVMAP> is a simple text
44 45 file that maps each source commit ID to the destination ID for
45 46 that revision, like so:
46 47 <source ID> <destination ID>
47 48
48 49 If the file doesn't exist, it's automatically created. It's updated
49 50 on each commit copied, so convert-repo can be interrupted and can
50 51 be run repeatedly to copy new commits.
51 52
52 53 The [username mapping] file is a simple text file that maps each source
53 54 commit author to a destination commit author. It is handy for source SCMs
54 55 that use unix logins to identify authors (eg: CVS). One line per author
55 56 mapping and the line format is:
56 57 srcauthor=whatever string you want
57 58
58 59 The filemap is a file that allows filtering and remapping of files
59 60 and directories. Comment lines start with '#'. Each line can
60 61 contain one of the following directives:
61 62
62 63 include path/to/file
63 64
64 65 exclude path/to/file
65 66
66 67 rename from/file to/file
67 68
68 69 The 'include' directive causes a file, or all files under a
69 70 directory, to be included in the destination repository, and the
70 71 exclusion of all other files and dirs not explicitely included.
71 72 The 'exclude' directive causes files or directories to be omitted.
72 73 The 'rename' directive renames a file or directory. To rename from a
73 74 subdirectory into the root of the repository, use '.' as the path to
74 75 rename to.
75 76
76 77 The splicemap is a file that allows insertion of synthetic
77 78 history, letting you specify the parents of a revision. This is
78 79 useful if you want to e.g. give a Subversion merge two parents, or
79 80 graft two disconnected series of history together. Each entry
80 81 contains a key, followed by a space, followed by one or two
81 82 values, separated by spaces. The key is the revision ID in the
82 83 source revision control system whose parents should be modified
83 84 (same format as a key in .hg/shamap). The values are the revision
84 85 IDs (in either the source or destination revision control system)
85 86 that should be used as the new parents for that node.
86 87
87 88 Mercurial Source
88 89 -----------------
89 90
90 91 --config convert.hg.ignoreerrors=False (boolean)
91 92 ignore integrity errors when reading. Use it to fix Mercurial
92 93 repositories with missing revlogs, by converting from and to
93 94 Mercurial.
94 95 --config convert.hg.saverev=False (boolean)
95 96 store original revision ID in changeset (forces target IDs to change)
96 97 --config convert.hg.startrev=0 (hg revision identifier)
97 98 convert start revision and its descendants
98 99
99 100 CVS Source
100 101 ----------
101 102
102 103 CVS source will use a sandbox (i.e. a checked-out copy) from CVS
103 104 to indicate the starting point of what will be converted. Direct
104 105 access to the repository files is not needed, unless of course
105 106 the repository is :local:. The conversion uses the top level
106 107 directory in the sandbox to find the CVS repository, and then uses
107 108 CVS rlog commands to find files to convert. This means that unless
108 109 a filemap is given, all files under the starting directory will be
109 110 converted, and that any directory reorganisation in the CVS
110 111 sandbox is ignored.
111 112
112 113 Because CVS does not have changesets, it is necessary to collect
113 114 individual commits to CVS and merge them into changesets. CVS
114 115 source uses its internal changeset merging code by default but can
115 116 be configured to call the external 'cvsps' program by setting:
116 117 --config convert.cvsps='cvsps -A -u --cvs-direct -q'
117 118 This is a legacy option and may be removed in future.
118 119
119 120 The options shown are the defaults.
120 121
121 122 Internal cvsps is selected by setting
122 123 --config convert.cvsps=builtin
123 124 and has a few more configurable options:
124 125 --config convert.cvsps.fuzz=60 (integer)
125 126 Specify the maximum time (in seconds) that is allowed between
126 127 commits with identical user and log message in a single
127 128 changeset. When very large files were checked in as part
128 129 of a changeset then the default may not be long enough.
129 130 --config convert.cvsps.mergeto='{{mergetobranch ([-\w]+)}}'
130 131 Specify a regular expression to which commit log messages are
131 132 matched. If a match occurs, then the conversion process will
132 133 insert a dummy revision merging the branch on which this log
133 134 message occurs to the branch indicated in the regex.
134 135 --config convert.cvsps.mergefrom='{{mergefrombranch ([-\w]+)}}'
135 136 Specify a regular expression to which commit log messages are
136 137 matched. If a match occurs, then the conversion process will
137 138 add the most recent revision on the branch indicated in the
138 139 regex as the second parent of the changeset.
139 140
140 141 The hgext/convert/cvsps wrapper script allows the builtin changeset
141 142 merging code to be run without doing a conversion. Its parameters and
142 143 output are similar to that of cvsps 2.1.
143 144
144 145 Subversion Source
145 146 -----------------
146 147
147 148 Subversion source detects classical trunk/branches/tags layouts.
148 149 By default, the supplied "svn://repo/path/" source URL is
149 150 converted as a single branch. If "svn://repo/path/trunk" exists
150 151 it replaces the default branch. If "svn://repo/path/branches"
151 152 exists, its subdirectories are listed as possible branches. If
152 153 "svn://repo/path/tags" exists, it is looked for tags referencing
153 154 converted branches. Default "trunk", "branches" and "tags" values
154 155 can be overriden with following options. Set them to paths
155 156 relative to the source URL, or leave them blank to disable
156 157 autodetection.
157 158
158 159 --config convert.svn.branches=branches (directory name)
159 160 specify the directory containing branches
160 161 --config convert.svn.tags=tags (directory name)
161 162 specify the directory containing tags
162 163 --config convert.svn.trunk=trunk (directory name)
163 164 specify the name of the trunk branch
164 165
165 166 Source history can be retrieved starting at a specific revision,
166 167 instead of being integrally converted. Only single branch
167 168 conversions are supported.
168 169
169 170 --config convert.svn.startrev=0 (svn revision number)
170 171 specify start Subversion revision.
171 172
172 173 Perforce Source
173 174 ---------------
174 175
175 176 The Perforce (P4) importer can be given a p4 depot path or a client
176 177 specification as source. It will convert all files in the source to
177 178 a flat Mercurial repository, ignoring labels, branches and integrations.
178 179 Note that when a depot path is given you then usually should specify a
179 180 target directory, because otherwise the target may be named ...-hg.
180 181
181 182 It is possible to limit the amount of source history to be converted
182 183 by specifying an initial Perforce revision.
183 184
184 185 --config convert.p4.startrev=0 (perforce changelist number)
185 186 specify initial Perforce revision.
186 187
187 188
188 189 Mercurial Destination
189 190 ---------------------
190 191
191 192 --config convert.hg.clonebranches=False (boolean)
192 193 dispatch source branches in separate clones.
193 194 --config convert.hg.tagsbranch=default (branch name)
194 195 tag revisions branch name
195 196 --config convert.hg.usebranchnames=True (boolean)
196 197 preserve branch names
197 198
198 199 """
199 200 return convcmd.convert(ui, src, dest, revmapfile, **opts)
200 201
201 202 def debugsvnlog(ui, **opts):
202 return convcmd.debugsvnlog(ui, **opts)
203 return subversion.debugsvnlog(ui, **opts)
203 204
204 205 def debugcvsps(ui, *args, **opts):
205 206 '''create changeset information from CVS
206 207
207 208 This command is intended as a debugging tool for the CVS to Mercurial
208 209 converter, and can be used as a direct replacement for cvsps.
209 210
210 211 Hg debugcvsps reads the CVS rlog for current directory (or any named
211 212 directory) in the CVS repository, and converts the log to a series of
212 213 changesets based on matching commit log entries and dates.'''
213 214 return cvsps.debugcvsps(ui, *args, **opts)
214 215
215 216 commands.norepo += " convert debugsvnlog debugcvsps"
216 217
217 218 cmdtable = {
218 219 "convert":
219 220 (convert,
220 221 [('A', 'authors', '', _('username mapping filename')),
221 222 ('d', 'dest-type', '', _('destination repository type')),
222 223 ('', 'filemap', '', _('remap file names using contents of file')),
223 224 ('r', 'rev', '', _('import up to target revision REV')),
224 225 ('s', 'source-type', '', _('source repository type')),
225 226 ('', 'splicemap', '', _('splice synthesized history into place')),
226 227 ('', 'datesort', None, _('try to sort changesets by date'))],
227 228 _('hg convert [OPTION]... SOURCE [DEST [REVMAP]]')),
228 229 "debugsvnlog":
229 230 (debugsvnlog,
230 231 [],
231 232 'hg debugsvnlog'),
232 233 "debugcvsps":
233 234 (debugcvsps,
234 235 [
235 236 # Main options shared with cvsps-2.1
236 237 ('b', 'branches', [], _('only return changes on specified branches')),
237 238 ('p', 'prefix', '', _('prefix to remove from file names')),
238 239 ('r', 'revisions', [], _('only return changes after or between specified tags')),
239 240 ('u', 'update-cache', None, _("update cvs log cache")),
240 241 ('x', 'new-cache', None, _("create new cvs log cache")),
241 242 ('z', 'fuzz', 60, _('set commit time fuzz in seconds')),
242 243 ('', 'root', '', _('specify cvsroot')),
243 244 # Options specific to builtin cvsps
244 245 ('', 'parents', '', _('show parent changesets')),
245 246 ('', 'ancestors', '', _('show current changeset in ancestor branches')),
246 247 # Options that are ignored for compatibility with cvsps-2.1
247 248 ('A', 'cvs-direct', None, _('ignored for compatibility')),
248 249 ],
249 250 _('hg debugcvsps [OPTION]... [PATH]...')),
250 251 }
@@ -1,341 +1,341 b''
1 1 # convcmd - convert extension commands definition
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from common import NoRepo, MissingTool, SKIPREV, mapfile
9 9 from cvs import convert_cvs
10 10 from darcs import darcs_source
11 11 from git import convert_git
12 12 from hg import mercurial_source, mercurial_sink
13 from subversion import debugsvnlog, svn_source, svn_sink
13 from subversion import svn_source, svn_sink
14 14 from monotone import monotone_source
15 15 from gnuarch import gnuarch_source
16 16 from bzr import bzr_source
17 17 from p4 import p4_source
18 18 import filemap
19 19
20 20 import os, shutil
21 21 from mercurial import hg, util
22 22 from mercurial.i18n import _
23 23
24 24 orig_encoding = 'ascii'
25 25
26 26 def recode(s):
27 27 if isinstance(s, unicode):
28 28 return s.encode(orig_encoding, 'replace')
29 29 else:
30 30 return s.decode('utf-8').encode(orig_encoding, 'replace')
31 31
32 32 source_converters = [
33 33 ('cvs', convert_cvs),
34 34 ('git', convert_git),
35 35 ('svn', svn_source),
36 36 ('hg', mercurial_source),
37 37 ('darcs', darcs_source),
38 38 ('mtn', monotone_source),
39 39 ('gnuarch', gnuarch_source),
40 40 ('bzr', bzr_source),
41 41 ('p4', p4_source),
42 42 ]
43 43
44 44 sink_converters = [
45 45 ('hg', mercurial_sink),
46 46 ('svn', svn_sink),
47 47 ]
48 48
49 49 def convertsource(ui, path, type, rev):
50 50 exceptions = []
51 51 for name, source in source_converters:
52 52 try:
53 53 if not type or name == type:
54 54 return source(ui, path, rev)
55 55 except (NoRepo, MissingTool), inst:
56 56 exceptions.append(inst)
57 57 if not ui.quiet:
58 58 for inst in exceptions:
59 59 ui.write("%s\n" % inst)
60 60 raise util.Abort(_('%s: missing or unsupported repository') % path)
61 61
62 62 def convertsink(ui, path, type):
63 63 for name, sink in sink_converters:
64 64 try:
65 65 if not type or name == type:
66 66 return sink(ui, path)
67 67 except NoRepo, inst:
68 68 ui.note(_("convert: %s\n") % inst)
69 69 raise util.Abort(_('%s: unknown repository type') % path)
70 70
71 71 class converter(object):
72 72 def __init__(self, ui, source, dest, revmapfile, opts):
73 73
74 74 self.source = source
75 75 self.dest = dest
76 76 self.ui = ui
77 77 self.opts = opts
78 78 self.commitcache = {}
79 79 self.authors = {}
80 80 self.authorfile = None
81 81
82 82 self.map = mapfile(ui, revmapfile)
83 83
84 84 # Read first the dst author map if any
85 85 authorfile = self.dest.authorfile()
86 86 if authorfile and os.path.exists(authorfile):
87 87 self.readauthormap(authorfile)
88 88 # Extend/Override with new author map if necessary
89 89 if opts.get('authors'):
90 90 self.readauthormap(opts.get('authors'))
91 91 self.authorfile = self.dest.authorfile()
92 92
93 93 self.splicemap = mapfile(ui, opts.get('splicemap'))
94 94
95 95 def walktree(self, heads):
96 96 '''Return a mapping that identifies the uncommitted parents of every
97 97 uncommitted changeset.'''
98 98 visit = heads
99 99 known = {}
100 100 parents = {}
101 101 while visit:
102 102 n = visit.pop(0)
103 103 if n in known or n in self.map: continue
104 104 known[n] = 1
105 105 commit = self.cachecommit(n)
106 106 parents[n] = []
107 107 for p in commit.parents:
108 108 parents[n].append(p)
109 109 visit.append(p)
110 110
111 111 return parents
112 112
113 113 def toposort(self, parents):
114 114 '''Return an ordering such that every uncommitted changeset is
115 115 preceeded by all its uncommitted ancestors.'''
116 116 visit = parents.keys()
117 117 seen = {}
118 118 children = {}
119 119 actives = []
120 120
121 121 while visit:
122 122 n = visit.pop(0)
123 123 if n in seen: continue
124 124 seen[n] = 1
125 125 # Ensure that nodes without parents are present in the 'children'
126 126 # mapping.
127 127 children.setdefault(n, [])
128 128 hasparent = False
129 129 for p in parents[n]:
130 130 if not p in self.map:
131 131 visit.append(p)
132 132 hasparent = True
133 133 children.setdefault(p, []).append(n)
134 134 if not hasparent:
135 135 actives.append(n)
136 136
137 137 del seen
138 138 del visit
139 139
140 140 if self.opts.get('datesort'):
141 141 dates = {}
142 142 def getdate(n):
143 143 if n not in dates:
144 144 dates[n] = util.parsedate(self.commitcache[n].date)
145 145 return dates[n]
146 146
147 147 def picknext(nodes):
148 148 return min([(getdate(n), n) for n in nodes])[1]
149 149 else:
150 150 prev = [None]
151 151 def picknext(nodes):
152 152 # Return the first eligible child of the previously converted
153 153 # revision, or any of them.
154 154 next = nodes[0]
155 155 for n in nodes:
156 156 if prev[0] in parents[n]:
157 157 next = n
158 158 break
159 159 prev[0] = next
160 160 return next
161 161
162 162 s = []
163 163 pendings = {}
164 164 while actives:
165 165 n = picknext(actives)
166 166 actives.remove(n)
167 167 s.append(n)
168 168
169 169 # Update dependents list
170 170 for c in children.get(n, []):
171 171 if c not in pendings:
172 172 pendings[c] = [p for p in parents[c] if p not in self.map]
173 173 try:
174 174 pendings[c].remove(n)
175 175 except ValueError:
176 176 raise util.Abort(_('cycle detected between %s and %s')
177 177 % (recode(c), recode(n)))
178 178 if not pendings[c]:
179 179 # Parents are converted, node is eligible
180 180 actives.insert(0, c)
181 181 pendings[c] = None
182 182
183 183 if len(s) != len(parents):
184 184 raise util.Abort(_("not all revisions were sorted"))
185 185
186 186 return s
187 187
188 188 def writeauthormap(self):
189 189 authorfile = self.authorfile
190 190 if authorfile:
191 191 self.ui.status(_('Writing author map file %s\n') % authorfile)
192 192 ofile = open(authorfile, 'w+')
193 193 for author in self.authors:
194 194 ofile.write("%s=%s\n" % (author, self.authors[author]))
195 195 ofile.close()
196 196
197 197 def readauthormap(self, authorfile):
198 198 afile = open(authorfile, 'r')
199 199 for line in afile:
200 200 if line.strip() == '':
201 201 continue
202 202 try:
203 203 srcauthor, dstauthor = line.split('=', 1)
204 204 srcauthor = srcauthor.strip()
205 205 dstauthor = dstauthor.strip()
206 206 if srcauthor in self.authors and dstauthor != self.authors[srcauthor]:
207 207 self.ui.status(
208 208 _('Overriding mapping for author %s, was %s, will be %s\n')
209 209 % (srcauthor, self.authors[srcauthor], dstauthor))
210 210 else:
211 211 self.ui.debug(_('mapping author %s to %s\n')
212 212 % (srcauthor, dstauthor))
213 213 self.authors[srcauthor] = dstauthor
214 214 except IndexError:
215 215 self.ui.warn(
216 216 _('Ignoring bad line in author map file %s: %s\n')
217 217 % (authorfile, line.rstrip()))
218 218 afile.close()
219 219
220 220 def cachecommit(self, rev):
221 221 commit = self.source.getcommit(rev)
222 222 commit.author = self.authors.get(commit.author, commit.author)
223 223 self.commitcache[rev] = commit
224 224 return commit
225 225
226 226 def copy(self, rev):
227 227 commit = self.commitcache[rev]
228 228
229 229 changes = self.source.getchanges(rev)
230 230 if isinstance(changes, basestring):
231 231 if changes == SKIPREV:
232 232 dest = SKIPREV
233 233 else:
234 234 dest = self.map[changes]
235 235 self.map[rev] = dest
236 236 return
237 237 files, copies = changes
238 238 pbranches = []
239 239 if commit.parents:
240 240 for prev in commit.parents:
241 241 if prev not in self.commitcache:
242 242 self.cachecommit(prev)
243 243 pbranches.append((self.map[prev],
244 244 self.commitcache[prev].branch))
245 245 self.dest.setbranch(commit.branch, pbranches)
246 246 try:
247 247 parents = self.splicemap[rev].replace(',', ' ').split()
248 248 self.ui.status(_('spliced in %s as parents of %s\n') %
249 249 (parents, rev))
250 250 parents = [self.map.get(p, p) for p in parents]
251 251 except KeyError:
252 252 parents = [b[0] for b in pbranches]
253 253 newnode = self.dest.putcommit(files, copies, parents, commit, self.source)
254 254 self.source.converted(rev, newnode)
255 255 self.map[rev] = newnode
256 256
257 257 def convert(self):
258 258
259 259 try:
260 260 self.source.before()
261 261 self.dest.before()
262 262 self.source.setrevmap(self.map)
263 263 self.ui.status(_("scanning source...\n"))
264 264 heads = self.source.getheads()
265 265 parents = self.walktree(heads)
266 266 self.ui.status(_("sorting...\n"))
267 267 t = self.toposort(parents)
268 268 num = len(t)
269 269 c = None
270 270
271 271 self.ui.status(_("converting...\n"))
272 272 for c in t:
273 273 num -= 1
274 274 desc = self.commitcache[c].desc
275 275 if "\n" in desc:
276 276 desc = desc.splitlines()[0]
277 277 # convert log message to local encoding without using
278 278 # tolocal() because util._encoding conver() use it as
279 279 # 'utf-8'
280 280 self.ui.status("%d %s\n" % (num, recode(desc)))
281 281 self.ui.note(_("source: %s\n") % recode(c))
282 282 self.copy(c)
283 283
284 284 tags = self.source.gettags()
285 285 ctags = {}
286 286 for k in tags:
287 287 v = tags[k]
288 288 if self.map.get(v, SKIPREV) != SKIPREV:
289 289 ctags[k] = self.map[v]
290 290
291 291 if c and ctags:
292 292 nrev = self.dest.puttags(ctags)
293 293 # write another hash correspondence to override the previous
294 294 # one so we don't end up with extra tag heads
295 295 if nrev:
296 296 self.map[c] = nrev
297 297
298 298 self.writeauthormap()
299 299 finally:
300 300 self.cleanup()
301 301
302 302 def cleanup(self):
303 303 try:
304 304 self.dest.after()
305 305 finally:
306 306 self.source.after()
307 307 self.map.close()
308 308
309 309 def convert(ui, src, dest=None, revmapfile=None, **opts):
310 310 global orig_encoding
311 311 orig_encoding = util._encoding
312 312 util._encoding = 'UTF-8'
313 313
314 314 if not dest:
315 315 dest = hg.defaultdest(src) + "-hg"
316 316 ui.status(_("assuming destination %s\n") % dest)
317 317
318 318 destc = convertsink(ui, dest, opts.get('dest_type'))
319 319
320 320 try:
321 321 srcc = convertsource(ui, src, opts.get('source_type'),
322 322 opts.get('rev'))
323 323 except Exception:
324 324 for path in destc.created:
325 325 shutil.rmtree(path, True)
326 326 raise
327 327
328 328 fmap = opts.get('filemap')
329 329 if fmap:
330 330 srcc = filemap.filemap_source(ui, srcc, fmap)
331 331 destc.setfilemapmode(True)
332 332
333 333 if not revmapfile:
334 334 try:
335 335 revmapfile = destc.revmapfile()
336 336 except:
337 337 revmapfile = os.path.join(destc, "map")
338 338
339 339 c = converter(ui, srcc, destc, revmapfile, opts)
340 340 c.convert()
341 341
@@ -1,201 +1,201 b''
1 1 # monotone support for the convert extension
2 2
3 import os, re, time
3 import os, re
4 4 from mercurial import util
5 from common import NoRepo, MissingTool, commit, converter_source, checktool
5 from common import NoRepo, commit, converter_source, checktool
6 6 from common import commandline
7 7 from mercurial.i18n import _
8 8
9 9 class monotone_source(converter_source, commandline):
10 10 def __init__(self, ui, path=None, rev=None):
11 11 converter_source.__init__(self, ui, path, rev)
12 12 commandline.__init__(self, ui, 'mtn')
13 13
14 14 self.ui = ui
15 15 self.path = path
16 16
17 17 # regular expressions for parsing monotone output
18 18 space = r'\s*'
19 19 name = r'\s+"((?:\\"|[^"])*)"\s*'
20 20 value = name
21 21 revision = r'\s+\[(\w+)\]\s*'
22 22 lines = r'(?:.|\n)+'
23 23
24 24 self.dir_re = re.compile(space + "dir" + name)
25 25 self.file_re = re.compile(space + "file" + name + "content" + revision)
26 26 self.add_file_re = re.compile(space + "add_file" + name + "content" + revision)
27 27 self.patch_re = re.compile(space + "patch" + name + "from" + revision + "to" + revision)
28 28 self.rename_re = re.compile(space + "rename" + name + "to" + name)
29 29 self.delete_re = re.compile(space + "delete" + name)
30 30 self.tag_re = re.compile(space + "tag" + name + "revision" + revision)
31 31 self.cert_re = re.compile(lines + space + "name" + name + "value" + value)
32 32
33 33 attr = space + "file" + lines + space + "attr" + space
34 34 self.attr_execute_re = re.compile(attr + '"mtn:execute"' + space + '"true"')
35 35
36 36 # cached data
37 37 self.manifest_rev = None
38 38 self.manifest = None
39 39 self.files = None
40 40 self.dirs = None
41 41
42 42 norepo = NoRepo (_("%s does not look like a monotone repo") % path)
43 43 if not os.path.exists(path):
44 44 raise norepo
45 45
46 46 checktool('mtn', abort=False)
47 47
48 48 # test if there are any revisions
49 49 self.rev = None
50 50 try:
51 51 self.getheads()
52 52 except:
53 53 raise norepo
54 54 self.rev = rev
55 55
56 56 def mtnrun(self, *args, **kwargs):
57 57 kwargs['d'] = self.path
58 58 return self.run0('automate', *args, **kwargs)
59 59
60 60 def mtnloadmanifest(self, rev):
61 61 if self.manifest_rev == rev:
62 62 return
63 63 self.manifest = self.mtnrun("get_manifest_of", rev).split("\n\n")
64 64 self.manifest_rev = rev
65 65 self.files = {}
66 66 self.dirs = {}
67 67
68 68 for e in self.manifest:
69 69 m = self.file_re.match(e)
70 70 if m:
71 71 attr = ""
72 72 name = m.group(1)
73 73 node = m.group(2)
74 74 if self.attr_execute_re.match(e):
75 75 attr += "x"
76 76 self.files[name] = (node, attr)
77 77 m = self.dir_re.match(e)
78 78 if m:
79 79 self.dirs[m.group(1)] = True
80 80
81 81 def mtnisfile(self, name, rev):
82 82 # a non-file could be a directory or a deleted or renamed file
83 83 self.mtnloadmanifest(rev)
84 84 try:
85 85 self.files[name]
86 86 return True
87 87 except KeyError:
88 88 return False
89 89
90 90 def mtnisdir(self, name, rev):
91 91 self.mtnloadmanifest(rev)
92 92 try:
93 93 self.dirs[name]
94 94 return True
95 95 except KeyError:
96 96 return False
97 97
98 98 def mtngetcerts(self, rev):
99 99 certs = {"author":"<missing>", "date":"<missing>",
100 100 "changelog":"<missing>", "branch":"<missing>"}
101 101 cert_list = self.mtnrun("certs", rev).split('\n\n key "')
102 102 for e in cert_list:
103 103 m = self.cert_re.match(e)
104 104 if m:
105 105 name, value = m.groups()
106 106 value = value.replace(r'\"', '"')
107 107 value = value.replace(r'\\', '\\')
108 108 certs[name] = value
109 109 return certs
110 110
111 111 def mtnrenamefiles(self, files, fromdir, todir):
112 112 renamed = {}
113 113 for tofile in files:
114 114 suffix = tofile.lstrip(todir)
115 115 if todir + suffix == tofile:
116 116 renamed[tofile] = (fromdir + suffix).lstrip("/")
117 117 return renamed
118 118
119 119
120 120 # implement the converter_source interface:
121 121
122 122 def getheads(self):
123 123 if not self.rev:
124 124 return self.mtnrun("leaves").splitlines()
125 125 else:
126 126 return [self.rev]
127 127
128 128 def getchanges(self, rev):
129 129 #revision = self.mtncmd("get_revision %s" % rev).split("\n\n")
130 130 revision = self.mtnrun("get_revision", rev).split("\n\n")
131 131 files = {}
132 132 copies = {}
133 133 for e in revision:
134 134 m = self.add_file_re.match(e)
135 135 if m:
136 136 files[m.group(1)] = rev
137 137 m = self.patch_re.match(e)
138 138 if m:
139 139 files[m.group(1)] = rev
140 140
141 141 # Delete/rename is handled later when the convert engine
142 142 # discovers an IOError exception from getfile,
143 143 # but only if we add the "from" file to the list of changes.
144 144 m = self.delete_re.match(e)
145 145 if m:
146 146 files[m.group(1)] = rev
147 147 m = self.rename_re.match(e)
148 148 if m:
149 149 toname = m.group(2)
150 150 fromname = m.group(1)
151 151 if self.mtnisfile(toname, rev):
152 152 copies[toname] = fromname
153 153 files[toname] = rev
154 154 files[fromname] = rev
155 155 if self.mtnisdir(toname, rev):
156 156 renamed = self.mtnrenamefiles(self.files, fromname, toname)
157 157 for tofile, fromfile in renamed.items():
158 158 self.ui.debug (_("copying file in renamed dir from '%s' to '%s'") % (fromfile, tofile), '\n')
159 159 files[tofile] = rev
160 160 for fromfile in renamed.values():
161 161 files[fromfile] = rev
162 162 return (files.items(), copies)
163 163
164 164 def getmode(self, name, rev):
165 165 self.mtnloadmanifest(rev)
166 166 try:
167 167 node, attr = self.files[name]
168 168 return attr
169 169 except KeyError:
170 170 return ""
171 171
172 172 def getfile(self, name, rev):
173 173 if not self.mtnisfile(name, rev):
174 174 raise IOError() # file was deleted or renamed
175 175 try:
176 176 return self.mtnrun("get_file_of", name, r=rev)
177 177 except:
178 178 raise IOError() # file was deleted or renamed
179 179
180 180 def getcommit(self, rev):
181 181 certs = self.mtngetcerts(rev)
182 182 return commit(
183 183 author=certs["author"],
184 184 date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")),
185 185 desc=certs["changelog"],
186 186 rev=rev,
187 187 parents=self.mtnrun("parents", rev).splitlines(),
188 188 branch=certs["branch"])
189 189
190 190 def gettags(self):
191 191 tags = {}
192 192 for e in self.mtnrun("tags").split("\n\n"):
193 193 m = self.tag_re.match(e)
194 194 if m:
195 195 tags[m.group(1)] = m.group(2)
196 196 return tags
197 197
198 198 def getchangedfiles(self, rev, i):
199 199 # This function is only needed to support --filemap
200 200 # ... and we don't support that
201 201 raise NotImplementedError()
@@ -1,422 +1,421 b''
1 1 # ASCII graph log extension for Mercurial
2 2 #
3 3 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of
6 6 # the GNU General Public License, incorporated herein by reference.
7 7 '''show revision graphs in terminal windows
8 8
9 9 This extension adds a --graph option to the incoming, outgoing and log
10 10 commands. When this options is given, an ascii representation of the
11 11 revision graph is also shown.
12 12 '''
13 13
14 14 import os
15 import sys
16 15 from mercurial.cmdutil import revrange, show_changeset
17 from mercurial.commands import templateopts, logopts, remoteopts
16 from mercurial.commands import templateopts
18 17 from mercurial.i18n import _
19 18 from mercurial.node import nullrev
20 19 from mercurial import bundlerepo, changegroup, cmdutil, commands, extensions
21 from mercurial import hg, ui, url, util
20 from mercurial import hg, url, util
22 21
23 22 def revisions(repo, start, stop):
24 23 """cset DAG generator yielding (rev, node, [parents]) tuples
25 24
26 25 This generator function walks through the revision history from revision
27 26 start to revision stop (which must be less than or equal to start).
28 27 """
29 28 assert start >= stop
30 29 cur = start
31 30 while cur >= stop:
32 31 ctx = repo[cur]
33 32 parents = [p.rev() for p in ctx.parents() if p.rev() != nullrev]
34 33 parents.sort()
35 34 yield (ctx, parents)
36 35 cur -= 1
37 36
38 37 def filerevs(repo, path, start, stop):
39 38 """file cset DAG generator yielding (rev, node, [parents]) tuples
40 39
41 40 This generator function walks through the revision history of a single
42 41 file from revision start to revision stop (which must be less than or
43 42 equal to start).
44 43 """
45 44 assert start >= stop
46 45 filerev = len(repo.file(path)) - 1
47 46 while filerev >= 0:
48 47 fctx = repo.filectx(path, fileid=filerev)
49 48 parents = [f.linkrev() for f in fctx.parents() if f.path() == path]
50 49 parents.sort()
51 50 if fctx.rev() <= start:
52 51 yield (fctx, parents)
53 52 if fctx.rev() <= stop:
54 53 break
55 54 filerev -= 1
56 55
57 56 def grapher(nodes):
58 57 """grapher for asciigraph on a list of nodes and their parents
59 58
60 59 nodes must generate tuples (node, parents, char, lines) where
61 60 - parents must generate the parents of node, in sorted order,
62 61 and max length 2,
63 62 - char is the char to print as the node symbol, and
64 63 - lines are the lines to display next to the node.
65 64 """
66 65 seen = []
67 66 for node, parents, char, lines in nodes:
68 67 if node not in seen:
69 68 seen.append(node)
70 69 nodeidx = seen.index(node)
71 70
72 71 knownparents = []
73 72 newparents = []
74 73 for parent in parents:
75 74 if parent in seen:
76 75 knownparents.append(parent)
77 76 else:
78 77 newparents.append(parent)
79 78
80 79 ncols = len(seen)
81 80 nextseen = seen[:]
82 81 nextseen[nodeidx:nodeidx + 1] = newparents
83 82 edges = [(nodeidx, nextseen.index(p)) for p in knownparents]
84 83
85 84 if len(newparents) > 0:
86 85 edges.append((nodeidx, nodeidx))
87 86 if len(newparents) > 1:
88 87 edges.append((nodeidx, nodeidx + 1))
89 88 nmorecols = len(nextseen) - ncols
90 89 seen = nextseen
91 90 yield (char, lines, nodeidx, edges, ncols, nmorecols)
92 91
93 92 def fix_long_right_edges(edges):
94 93 for (i, (start, end)) in enumerate(edges):
95 94 if end > start:
96 95 edges[i] = (start, end + 1)
97 96
98 97 def get_nodeline_edges_tail(
99 98 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
100 99 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
101 100 # Still going in the same non-vertical direction.
102 101 if n_columns_diff == -1:
103 102 start = max(node_index + 1, p_node_index)
104 103 tail = ["|", " "] * (start - node_index - 1)
105 104 tail.extend(["/", " "] * (n_columns - start))
106 105 return tail
107 106 else:
108 107 return ["\\", " "] * (n_columns - node_index - 1)
109 108 else:
110 109 return ["|", " "] * (n_columns - node_index - 1)
111 110
112 111 def draw_edges(edges, nodeline, interline):
113 112 for (start, end) in edges:
114 113 if start == end + 1:
115 114 interline[2 * end + 1] = "/"
116 115 elif start == end - 1:
117 116 interline[2 * start + 1] = "\\"
118 117 elif start == end:
119 118 interline[2 * start] = "|"
120 119 else:
121 120 nodeline[2 * end] = "+"
122 121 if start > end:
123 122 (start, end) = (end,start)
124 123 for i in range(2 * start + 1, 2 * end):
125 124 if nodeline[i] != "+":
126 125 nodeline[i] = "-"
127 126
128 127 def get_padding_line(ni, n_columns, edges):
129 128 line = []
130 129 line.extend(["|", " "] * ni)
131 130 if (ni, ni - 1) in edges or (ni, ni) in edges:
132 131 # (ni, ni - 1) (ni, ni)
133 132 # | | | | | | | |
134 133 # +---o | | o---+
135 134 # | | c | | c | |
136 135 # | |/ / | |/ /
137 136 # | | | | | |
138 137 c = "|"
139 138 else:
140 139 c = " "
141 140 line.extend([c, " "])
142 141 line.extend(["|", " "] * (n_columns - ni - 1))
143 142 return line
144 143
145 144 def ascii(ui, grapher):
146 145 """prints an ASCII graph of the DAG returned by the grapher
147 146
148 147 grapher is a generator that emits tuples with the following elements:
149 148
150 149 - Character to use as node's symbol.
151 150 - List of lines to display as the node's text.
152 151 - Column of the current node in the set of ongoing edges.
153 152 - Edges; a list of (col, next_col) indicating the edges between
154 153 the current node and its parents.
155 154 - Number of columns (ongoing edges) in the current revision.
156 155 - The difference between the number of columns (ongoing edges)
157 156 in the next revision and the number of columns (ongoing edges)
158 157 in the current revision. That is: -1 means one column removed;
159 158 0 means no columns added or removed; 1 means one column added.
160 159 """
161 160 prev_n_columns_diff = 0
162 161 prev_node_index = 0
163 162 for (node_ch, node_lines, node_index, edges, n_columns, n_columns_diff) in grapher:
164 163
165 164 assert -2 < n_columns_diff < 2
166 165 if n_columns_diff == -1:
167 166 # Transform
168 167 #
169 168 # | | | | | |
170 169 # o | | into o---+
171 170 # |X / |/ /
172 171 # | | | |
173 172 fix_long_right_edges(edges)
174 173
175 174 # add_padding_line says whether to rewrite
176 175 #
177 176 # | | | | | | | |
178 177 # | o---+ into | o---+
179 178 # | / / | | | # <--- padding line
180 179 # o | | | / /
181 180 # o | |
182 181 add_padding_line = (len(node_lines) > 2 and
183 182 n_columns_diff == -1 and
184 183 [x for (x, y) in edges if x + 1 < y])
185 184
186 185 # fix_nodeline_tail says whether to rewrite
187 186 #
188 187 # | | o | | | | o | |
189 188 # | | |/ / | | |/ /
190 189 # | o | | into | o / / # <--- fixed nodeline tail
191 190 # | |/ / | |/ /
192 191 # o | | o | |
193 192 fix_nodeline_tail = len(node_lines) <= 2 and not add_padding_line
194 193
195 194 # nodeline is the line containing the node character (typically o)
196 195 nodeline = ["|", " "] * node_index
197 196 nodeline.extend([node_ch, " "])
198 197
199 198 nodeline.extend(
200 199 get_nodeline_edges_tail(
201 200 node_index, prev_node_index, n_columns, n_columns_diff,
202 201 prev_n_columns_diff, fix_nodeline_tail))
203 202
204 203 # shift_interline is the line containing the non-vertical
205 204 # edges between this entry and the next
206 205 shift_interline = ["|", " "] * node_index
207 206 if n_columns_diff == -1:
208 207 n_spaces = 1
209 208 edge_ch = "/"
210 209 elif n_columns_diff == 0:
211 210 n_spaces = 2
212 211 edge_ch = "|"
213 212 else:
214 213 n_spaces = 3
215 214 edge_ch = "\\"
216 215 shift_interline.extend(n_spaces * [" "])
217 216 shift_interline.extend([edge_ch, " "] * (n_columns - node_index - 1))
218 217
219 218 # draw edges from the current node to its parents
220 219 draw_edges(edges, nodeline, shift_interline)
221 220
222 221 # lines is the list of all graph lines to print
223 222 lines = [nodeline]
224 223 if add_padding_line:
225 224 lines.append(get_padding_line(node_index, n_columns, edges))
226 225 lines.append(shift_interline)
227 226
228 227 # make sure that there are as many graph lines as there are
229 228 # log strings
230 229 while len(node_lines) < len(lines):
231 230 node_lines.append("")
232 231 if len(lines) < len(node_lines):
233 232 extra_interline = ["|", " "] * (n_columns + n_columns_diff)
234 233 while len(lines) < len(node_lines):
235 234 lines.append(extra_interline)
236 235
237 236 # print lines
238 237 indentation_level = max(n_columns, n_columns + n_columns_diff)
239 238 for (line, logstr) in zip(lines, node_lines):
240 239 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
241 240 ui.write(ln.rstrip() + '\n')
242 241
243 242 # ... and start over
244 243 prev_node_index = node_index
245 244 prev_n_columns_diff = n_columns_diff
246 245
247 246 def get_revs(repo, rev_opt):
248 247 if rev_opt:
249 248 revs = revrange(repo, rev_opt)
250 249 return (max(revs), min(revs))
251 250 else:
252 251 return (len(repo) - 1, 0)
253 252
254 253 def check_unsupported_flags(opts):
255 254 for op in ["follow", "follow_first", "date", "copies", "keyword", "remove",
256 255 "only_merges", "user", "only_branch", "prune", "newest_first",
257 256 "no_merges", "include", "exclude"]:
258 257 if op in opts and opts[op]:
259 258 raise util.Abort(_("--graph option is incompatible with --%s") % op)
260 259
261 260 def graphlog(ui, repo, path=None, **opts):
262 261 """show revision history alongside an ASCII revision graph
263 262
264 263 Print a revision history alongside a revision graph drawn with
265 264 ASCII characters.
266 265
267 266 Nodes printed as an @ character are parents of the working
268 267 directory.
269 268 """
270 269
271 270 check_unsupported_flags(opts)
272 271 limit = cmdutil.loglimit(opts)
273 272 start, stop = get_revs(repo, opts["rev"])
274 273 stop = max(stop, start - limit + 1)
275 274 if start == nullrev:
276 275 return
277 276
278 277 if path:
279 278 path = util.canonpath(repo.root, os.getcwd(), path)
280 279 if path: # could be reset in canonpath
281 280 revdag = filerevs(repo, path, start, stop)
282 281 else:
283 282 revdag = revisions(repo, start, stop)
284 283
285 284 graphdag = graphabledag(ui, repo, revdag, opts)
286 285 ascii(ui, grapher(graphdag))
287 286
288 287 def graphrevs(repo, nodes, opts):
289 288 nodes.reverse()
290 289 include = util.set(nodes)
291 290 limit = cmdutil.loglimit(opts)
292 291 count = 0
293 292 for node in nodes:
294 293 if count >= limit:
295 294 break
296 295 ctx = repo[node]
297 296 parents = [p.rev() for p in ctx.parents() if p.node() in include]
298 297 parents.sort()
299 298 yield (ctx, parents)
300 299 count += 1
301 300
302 301 def graphabledag(ui, repo, revdag, opts):
303 302 showparents = [ctx.node() for ctx in repo[None].parents()]
304 303 displayer = show_changeset(ui, repo, opts, buffered=True)
305 304 for (ctx, parents) in revdag:
306 305 displayer.show(ctx)
307 306 lines = displayer.hunk.pop(ctx.rev()).split('\n')[:-1]
308 307 char = ctx.node() in showparents and '@' or 'o'
309 308 yield (ctx.rev(), parents, char, lines)
310 309
311 310 def goutgoing(ui, repo, dest=None, **opts):
312 311 """show the outgoing changesets alongside an ASCII revision graph
313 312
314 313 Print the outgoing changesets alongside a revision graph drawn with
315 314 ASCII characters.
316 315
317 316 Nodes printed as an @ character are parents of the working
318 317 directory.
319 318 """
320 319
321 320 check_unsupported_flags(opts)
322 321 dest, revs, checkout = hg.parseurl(
323 322 ui.expandpath(dest or 'default-push', dest or 'default'),
324 323 opts.get('rev'))
325 324 cmdutil.setremoteconfig(ui, opts)
326 325 if revs:
327 326 revs = [repo.lookup(rev) for rev in revs]
328 327 other = hg.repository(ui, dest)
329 328 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
330 329 o = repo.findoutgoing(other, force=opts.get('force'))
331 330 if not o:
332 331 ui.status(_("no changes found\n"))
333 332 return
334 333
335 334 o = repo.changelog.nodesbetween(o, revs)[0]
336 335 revdag = graphrevs(repo, o, opts)
337 336 graphdag = graphabledag(ui, repo, revdag, opts)
338 337 ascii(ui, grapher(graphdag))
339 338
340 339 def gincoming(ui, repo, source="default", **opts):
341 340 """show the incoming changesets alongside an ASCII revision graph
342 341
343 342 Print the incoming changesets alongside a revision graph drawn with
344 343 ASCII characters.
345 344
346 345 Nodes printed as an @ character are parents of the working
347 346 directory.
348 347 """
349 348
350 349 check_unsupported_flags(opts)
351 350 source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
352 351 cmdutil.setremoteconfig(ui, opts)
353 352
354 353 other = hg.repository(ui, source)
355 354 ui.status(_('comparing with %s\n') % url.hidepassword(source))
356 355 if revs:
357 356 revs = [other.lookup(rev) for rev in revs]
358 357 incoming = repo.findincoming(other, heads=revs, force=opts["force"])
359 358 if not incoming:
360 359 try:
361 360 os.unlink(opts["bundle"])
362 361 except:
363 362 pass
364 363 ui.status(_("no changes found\n"))
365 364 return
366 365
367 366 cleanup = None
368 367 try:
369 368
370 369 fname = opts["bundle"]
371 370 if fname or not other.local():
372 371 # create a bundle (uncompressed if other repo is not local)
373 372 if revs is None:
374 373 cg = other.changegroup(incoming, "incoming")
375 374 else:
376 375 cg = other.changegroupsubset(incoming, revs, 'incoming')
377 376 bundletype = other.local() and "HG10BZ" or "HG10UN"
378 377 fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
379 378 # keep written bundle?
380 379 if opts["bundle"]:
381 380 cleanup = None
382 381 if not other.local():
383 382 # use the created uncompressed bundlerepo
384 383 other = bundlerepo.bundlerepository(ui, repo.root, fname)
385 384
386 385 chlist = other.changelog.nodesbetween(incoming, revs)[0]
387 386 revdag = graphrevs(other, chlist, opts)
388 387 other_parents = []
389 388 displayer = show_changeset(ui, other, opts, buffered=True)
390 389 graphdag = graphabledag(ui, repo, revdag, opts)
391 390 ascii(ui, grapher(graphdag))
392 391
393 392 finally:
394 393 if hasattr(other, 'close'):
395 394 other.close()
396 395 if cleanup:
397 396 os.unlink(cleanup)
398 397
399 398 def uisetup(ui):
400 399 '''Initialize the extension.'''
401 400 _wrapcmd(ui, 'log', commands.table, graphlog)
402 401 _wrapcmd(ui, 'incoming', commands.table, gincoming)
403 402 _wrapcmd(ui, 'outgoing', commands.table, goutgoing)
404 403
405 404 def _wrapcmd(ui, cmd, table, wrapfn):
406 405 '''wrap the command'''
407 406 def graph(orig, *args, **kwargs):
408 407 if kwargs['graph']:
409 408 return wrapfn(*args, **kwargs)
410 409 return orig(*args, **kwargs)
411 410 entry = extensions.wrapcommand(table, cmd, graph)
412 411 entry[1].append(('G', 'graph', None, _("show the revision DAG")))
413 412
414 413 cmdtable = {
415 414 "glog":
416 415 (graphlog,
417 416 [('l', 'limit', '', _('limit number of changes displayed')),
418 417 ('p', 'patch', False, _('show patch')),
419 418 ('r', 'rev', [], _('show the specified revision or range')),
420 419 ] + templateopts,
421 420 _('hg glog [OPTION]... [FILE]')),
422 421 }
@@ -1,291 +1,290 b''
1 1 # notify.py - email notifications for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 '''hook extension to email notifications on commits/pushes
9 9
10 10 Subscriptions can be managed through hgrc. Default mode is to print
11 11 messages to stdout, for testing and configuring.
12 12
13 13 To use, configure notify extension and enable in hgrc like this:
14 14
15 15 [extensions]
16 16 hgext.notify =
17 17
18 18 [hooks]
19 19 # one email for each incoming changeset
20 20 incoming.notify = python:hgext.notify.hook
21 21 # batch emails when many changesets incoming at one time
22 22 changegroup.notify = python:hgext.notify.hook
23 23
24 24 [notify]
25 25 # config items go in here
26 26
27 27 config items:
28 28
29 29 REQUIRED:
30 30 config = /path/to/file # file containing subscriptions
31 31
32 32 OPTIONAL:
33 33 test = True # print messages to stdout for testing
34 34 strip = 3 # number of slashes to strip for url paths
35 35 domain = example.com # domain to use if committer missing domain
36 36 style = ... # style file to use when formatting email
37 37 template = ... # template to use when formatting email
38 38 incoming = ... # template to use when run as incoming hook
39 39 changegroup = ... # template when run as changegroup hook
40 40 maxdiff = 300 # max lines of diffs to include (0=none, -1=all)
41 41 maxsubject = 67 # truncate subject line longer than this
42 42 diffstat = True # add a diffstat before the diff content
43 43 sources = serve # notify if source of incoming changes in this list
44 44 # (serve == ssh or http, push, pull, bundle)
45 45 [email]
46 46 from = user@host.com # email address to send as if none given
47 47 [web]
48 48 baseurl = http://hgserver/... # root of hg web site for browsing commits
49 49
50 50 notify config file has same format as regular hgrc. it has two
51 51 sections so you can express subscriptions in whatever way is handier
52 52 for you.
53 53
54 54 [usersubs]
55 55 # key is subscriber email, value is ","-separated list of glob patterns
56 56 user@host = pattern
57 57
58 58 [reposubs]
59 59 # key is glob pattern, value is ","-separated list of subscriber emails
60 60 pattern = user@host
61 61
62 62 glob patterns are matched against path to repo root.
63 63
64 64 if you like, you can put notify config file in repo that users can
65 65 push changes to, they can manage their own subscriptions.'''
66 66
67 67 from mercurial.i18n import _
68 from mercurial.node import bin, short
69 68 from mercurial import patch, cmdutil, templater, util, mail
70 69 import email.Parser, fnmatch, socket, time
71 70
72 71 # template for single changeset can include email headers.
73 72 single_template = '''
74 73 Subject: changeset in {webroot}: {desc|firstline|strip}
75 74 From: {author}
76 75
77 76 changeset {node|short} in {root}
78 77 details: {baseurl}{webroot}?cmd=changeset;node={node|short}
79 78 description:
80 79 \t{desc|tabindent|strip}
81 80 '''.lstrip()
82 81
83 82 # template for multiple changesets should not contain email headers,
84 83 # because only first set of headers will be used and result will look
85 84 # strange.
86 85 multiple_template = '''
87 86 changeset {node|short} in {root}
88 87 details: {baseurl}{webroot}?cmd=changeset;node={node|short}
89 88 summary: {desc|firstline}
90 89 '''
91 90
92 91 deftemplates = {
93 92 'changegroup': multiple_template,
94 93 }
95 94
96 95 class notifier(object):
97 96 '''email notification class.'''
98 97
99 98 def __init__(self, ui, repo, hooktype):
100 99 self.ui = ui
101 100 cfg = self.ui.config('notify', 'config')
102 101 if cfg:
103 102 self.ui.readsections(cfg, 'usersubs', 'reposubs')
104 103 self.repo = repo
105 104 self.stripcount = int(self.ui.config('notify', 'strip', 0))
106 105 self.root = self.strip(self.repo.root)
107 106 self.domain = self.ui.config('notify', 'domain')
108 107 self.test = self.ui.configbool('notify', 'test', True)
109 108 self.charsets = mail._charsets(self.ui)
110 109 self.subs = self.subscribers()
111 110
112 111 mapfile = self.ui.config('notify', 'style')
113 112 template = (self.ui.config('notify', hooktype) or
114 113 self.ui.config('notify', 'template'))
115 114 self.t = cmdutil.changeset_templater(self.ui, self.repo,
116 115 False, None, mapfile, False)
117 116 if not mapfile and not template:
118 117 template = deftemplates.get(hooktype) or single_template
119 118 if template:
120 119 template = templater.parsestring(template, quoted=False)
121 120 self.t.use_template(template)
122 121
123 122 def strip(self, path):
124 123 '''strip leading slashes from local path, turn into web-safe path.'''
125 124
126 125 path = util.pconvert(path)
127 126 count = self.stripcount
128 127 while count > 0:
129 128 c = path.find('/')
130 129 if c == -1:
131 130 break
132 131 path = path[c+1:]
133 132 count -= 1
134 133 return path
135 134
136 135 def fixmail(self, addr):
137 136 '''try to clean up email addresses.'''
138 137
139 138 addr = util.email(addr.strip())
140 139 if self.domain:
141 140 a = addr.find('@localhost')
142 141 if a != -1:
143 142 addr = addr[:a]
144 143 if '@' not in addr:
145 144 return addr + '@' + self.domain
146 145 return addr
147 146
148 147 def subscribers(self):
149 148 '''return list of email addresses of subscribers to this repo.'''
150 149 subs = {}
151 150 for user, pats in self.ui.configitems('usersubs'):
152 151 for pat in pats.split(','):
153 152 if fnmatch.fnmatch(self.repo.root, pat.strip()):
154 153 subs[self.fixmail(user)] = 1
155 154 for pat, users in self.ui.configitems('reposubs'):
156 155 if fnmatch.fnmatch(self.repo.root, pat):
157 156 for user in users.split(','):
158 157 subs[self.fixmail(user)] = 1
159 158 subs = util.sort(subs)
160 159 return [mail.addressencode(self.ui, s, self.charsets, self.test)
161 160 for s in subs]
162 161
163 162 def url(self, path=None):
164 163 return self.ui.config('web', 'baseurl') + (path or self.root)
165 164
166 165 def node(self, ctx):
167 166 '''format one changeset.'''
168 167 self.t.show(ctx, changes=ctx.changeset(),
169 168 baseurl=self.ui.config('web', 'baseurl'),
170 169 root=self.repo.root, webroot=self.root)
171 170
172 171 def skipsource(self, source):
173 172 '''true if incoming changes from this source should be skipped.'''
174 173 ok_sources = self.ui.config('notify', 'sources', 'serve').split()
175 174 return source not in ok_sources
176 175
177 176 def send(self, ctx, count, data):
178 177 '''send message.'''
179 178
180 179 p = email.Parser.Parser()
181 180 msg = p.parsestr(data)
182 181
183 182 # store sender and subject
184 183 sender, subject = msg['From'], msg['Subject']
185 184 del msg['From'], msg['Subject']
186 185 # store remaining headers
187 186 headers = msg.items()
188 187 # create fresh mime message from msg body
189 188 text = msg.get_payload()
190 189 # for notification prefer readability over data precision
191 190 msg = mail.mimeencode(self.ui, text, self.charsets, self.test)
192 191 # reinstate custom headers
193 192 for k, v in headers:
194 193 msg[k] = v
195 194
196 195 msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2")
197 196
198 197 # try to make subject line exist and be useful
199 198 if not subject:
200 199 if count > 1:
201 200 subject = _('%s: %d new changesets') % (self.root, count)
202 201 else:
203 202 s = ctx.description().lstrip().split('\n', 1)[0].rstrip()
204 203 subject = '%s: %s' % (self.root, s)
205 204 maxsubject = int(self.ui.config('notify', 'maxsubject', 67))
206 205 if maxsubject and len(subject) > maxsubject:
207 206 subject = subject[:maxsubject-3] + '...'
208 207 msg['Subject'] = mail.headencode(self.ui, subject,
209 208 self.charsets, self.test)
210 209
211 210 # try to make message have proper sender
212 211 if not sender:
213 212 sender = self.ui.config('email', 'from') or self.ui.username()
214 213 if '@' not in sender or '@localhost' in sender:
215 214 sender = self.fixmail(sender)
216 215 msg['From'] = mail.addressencode(self.ui, sender,
217 216 self.charsets, self.test)
218 217
219 218 msg['X-Hg-Notification'] = 'changeset %s' % ctx
220 219 if not msg['Message-Id']:
221 220 msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' %
222 221 (ctx, int(time.time()),
223 222 hash(self.repo.root), socket.getfqdn()))
224 223 msg['To'] = ', '.join(self.subs)
225 224
226 225 msgtext = msg.as_string(0)
227 226 if self.test:
228 227 self.ui.write(msgtext)
229 228 if not msgtext.endswith('\n'):
230 229 self.ui.write('\n')
231 230 else:
232 231 self.ui.status(_('notify: sending %d subscribers %d changes\n') %
233 232 (len(self.subs), count))
234 233 mail.sendmail(self.ui, util.email(msg['From']),
235 234 self.subs, msgtext)
236 235
237 236 def diff(self, ctx, ref=None):
238 237
239 238 maxdiff = int(self.ui.config('notify', 'maxdiff', 300))
240 239 prev = ctx.parents()[0].node()
241 240 ref = ref and ref.node() or ctx.node()
242 241 chunks = patch.diff(self.repo, prev, ref, opts=patch.diffopts(self.ui))
243 242 difflines = ''.join(chunks).splitlines()
244 243
245 244 if self.ui.configbool('notify', 'diffstat', True):
246 245 s = patch.diffstat(difflines)
247 246 # s may be nil, don't include the header if it is
248 247 if s:
249 248 self.ui.write('\ndiffstat:\n\n%s' % s)
250 249
251 250 if maxdiff == 0:
252 251 return
253 252 elif maxdiff > 0 and len(difflines) > maxdiff:
254 253 msg = _('\ndiffs (truncated from %d to %d lines):\n\n')
255 254 self.ui.write(msg % (len(difflines), maxdiff))
256 255 difflines = difflines[:maxdiff]
257 256 elif difflines:
258 257 self.ui.write(_('\ndiffs (%d lines):\n\n') % len(difflines))
259 258
260 259 self.ui.write("\n".join(difflines))
261 260
262 261 def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
263 262 '''send email notifications to interested subscribers.
264 263
265 264 if used as changegroup hook, send one email for all changesets in
266 265 changegroup. else send one email per changeset.'''
267 266
268 267 n = notifier(ui, repo, hooktype)
269 268 ctx = repo[node]
270 269
271 270 if not n.subs:
272 271 ui.debug(_('notify: no subscribers to repo %s\n') % n.root)
273 272 return
274 273 if n.skipsource(source):
275 274 ui.debug(_('notify: changes have source "%s" - skipping\n') % source)
276 275 return
277 276
278 277 ui.pushbuffer()
279 278 if hooktype == 'changegroup':
280 279 start, end = ctx.rev(), len(repo)
281 280 count = end - start
282 281 for rev in xrange(start, end):
283 282 n.node(repo[rev])
284 283 n.diff(ctx, repo['tip'])
285 284 else:
286 285 count = 1
287 286 n.node(ctx)
288 287 n.diff(ctx)
289 288
290 289 data = ui.popbuffer()
291 290 n.send(ctx, count, data)
@@ -1,97 +1,96 b''
1 1 # Mercurial extension to make it easy to refer to the parent of a revision
2 2 #
3 3 # Copyright (C) 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7 '''\
8 8 use suffixes to refer to ancestor revisions
9 9
10 10 This extension allows you to use git-style suffixes to refer to
11 11 the ancestors of a specific revision.
12 12
13 13 For example, if you can refer to a revision as "foo", then:
14 14
15 15 - foo^N = Nth parent of foo:
16 16 foo^0 = foo
17 17 foo^1 = first parent of foo
18 18 foo^2 = second parent of foo
19 19 foo^ = foo^1
20 20
21 21 - foo~N = Nth first grandparent of foo
22 22 foo~0 = foo
23 23 foo~1 = foo^1 = foo^ = first parent of foo
24 24 foo~2 = foo^1^1 = foo^^ = first parent of first parent of foo
25 25 '''
26 import mercurial.repo
27 26 from mercurial import error
28 27
29 28 def reposetup(ui, repo):
30 29 if not repo.local():
31 30 return
32 31
33 32 class parentrevspecrepo(repo.__class__):
34 33 def lookup(self, key):
35 34 try:
36 35 _super = super(parentrevspecrepo, self)
37 36 return _super.lookup(key)
38 37 except error.RepoError:
39 38 pass
40 39
41 40 circ = key.find('^')
42 41 tilde = key.find('~')
43 42 if circ < 0 and tilde < 0:
44 43 raise
45 44 elif circ >= 0 and tilde >= 0:
46 45 end = min(circ, tilde)
47 46 else:
48 47 end = max(circ, tilde)
49 48
50 49 cl = self.changelog
51 50 base = key[:end]
52 51 try:
53 52 node = _super.lookup(base)
54 53 except error.RepoError:
55 54 # eek - reraise the first error
56 55 return _super.lookup(key)
57 56
58 57 rev = cl.rev(node)
59 58 suffix = key[end:]
60 59 i = 0
61 60 while i < len(suffix):
62 61 # foo^N => Nth parent of foo
63 62 # foo^0 == foo
64 63 # foo^1 == foo^ == 1st parent of foo
65 64 # foo^2 == 2nd parent of foo
66 65 if suffix[i] == '^':
67 66 j = i + 1
68 67 p = cl.parentrevs(rev)
69 68 if j < len(suffix) and suffix[j].isdigit():
70 69 j += 1
71 70 n = int(suffix[i+1:j])
72 71 if n > 2 or n == 2 and p[1] == -1:
73 72 raise
74 73 else:
75 74 n = 1
76 75 if n:
77 76 rev = p[n - 1]
78 77 i = j
79 78 # foo~N => Nth first grandparent of foo
80 79 # foo~0 = foo
81 80 # foo~1 = foo^1 == foo^ == 1st parent of foo
82 81 # foo~2 = foo^1^1 == foo^^ == 1st parent of 1st parent of foo
83 82 elif suffix[i] == '~':
84 83 j = i + 1
85 84 while j < len(suffix) and suffix[j].isdigit():
86 85 j += 1
87 86 if j == i + 1:
88 87 raise
89 88 n = int(suffix[i+1:j])
90 89 for k in xrange(n):
91 90 rev = cl.parentrevs(rev)[0]
92 91 i = j
93 92 else:
94 93 raise
95 94 return cl.node(rev)
96 95
97 96 repo.__class__ = parentrevspecrepo
@@ -1,144 +1,144 b''
1 1 # win32text.py - LF <-> CRLF/CR translation utilities for Windows/Mac users
2 2 #
3 3 # This software may be used and distributed according to the terms
4 4 # of the GNU General Public License, incorporated herein by reference.
5 5 #
6 6 # To perform automatic newline conversion, use:
7 7 #
8 8 # [extensions]
9 9 # hgext.win32text =
10 10 # [encode]
11 11 # ** = cleverencode:
12 12 # # or ** = macencode:
13 13 # [decode]
14 14 # ** = cleverdecode:
15 15 # # or ** = macdecode:
16 16 #
17 17 # If not doing conversion, to make sure you do not commit CRLF/CR by accident:
18 18 #
19 19 # [hooks]
20 20 # pretxncommit.crlf = python:hgext.win32text.forbidcrlf
21 21 # # or pretxncommit.cr = python:hgext.win32text.forbidcr
22 22 #
23 23 # To do the same check on a server to prevent CRLF/CR from being pushed or
24 24 # pulled:
25 25 #
26 26 # [hooks]
27 27 # pretxnchangegroup.crlf = python:hgext.win32text.forbidcrlf
28 28 # # or pretxnchangegroup.cr = python:hgext.win32text.forbidcr
29 29
30 30 from mercurial.i18n import _
31 from mercurial.node import bin, short
31 from mercurial.node import short
32 32 from mercurial import util
33 33 import re
34 34
35 35 # regexp for single LF without CR preceding.
36 36 re_single_lf = re.compile('(^|[^\r])\n', re.MULTILINE)
37 37
38 38 newlinestr = {'\r\n': 'CRLF', '\r': 'CR'}
39 39 filterstr = {'\r\n': 'clever', '\r': 'mac'}
40 40
41 41 def checknewline(s, newline, ui=None, repo=None, filename=None):
42 42 # warn if already has 'newline' in repository.
43 43 # it might cause unexpected eol conversion.
44 44 # see issue 302:
45 45 # http://www.selenic.com/mercurial/bts/issue302
46 46 if newline in s and ui and filename and repo:
47 47 ui.warn(_('WARNING: %s already has %s line endings\n'
48 48 'and does not need EOL conversion by the win32text plugin.\n'
49 49 'Before your next commit, please reconsider your '
50 50 'encode/decode settings in \nMercurial.ini or %s.\n') %
51 51 (filename, newlinestr[newline], repo.join('hgrc')))
52 52
53 53 def dumbdecode(s, cmd, **kwargs):
54 54 checknewline(s, '\r\n', **kwargs)
55 55 # replace single LF to CRLF
56 56 return re_single_lf.sub('\\1\r\n', s)
57 57
58 58 def dumbencode(s, cmd):
59 59 return s.replace('\r\n', '\n')
60 60
61 61 def macdumbdecode(s, cmd, **kwargs):
62 62 checknewline(s, '\r', **kwargs)
63 63 return s.replace('\n', '\r')
64 64
65 65 def macdumbencode(s, cmd):
66 66 return s.replace('\r', '\n')
67 67
68 68 def cleverdecode(s, cmd, **kwargs):
69 69 if not util.binary(s):
70 70 return dumbdecode(s, cmd, **kwargs)
71 71 return s
72 72
73 73 def cleverencode(s, cmd):
74 74 if not util.binary(s):
75 75 return dumbencode(s, cmd)
76 76 return s
77 77
78 78 def macdecode(s, cmd, **kwargs):
79 79 if not util.binary(s):
80 80 return macdumbdecode(s, cmd, **kwargs)
81 81 return s
82 82
83 83 def macencode(s, cmd):
84 84 if not util.binary(s):
85 85 return macdumbencode(s, cmd)
86 86 return s
87 87
88 88 _filters = {
89 89 'dumbdecode:': dumbdecode,
90 90 'dumbencode:': dumbencode,
91 91 'cleverdecode:': cleverdecode,
92 92 'cleverencode:': cleverencode,
93 93 'macdumbdecode:': macdumbdecode,
94 94 'macdumbencode:': macdumbencode,
95 95 'macdecode:': macdecode,
96 96 'macencode:': macencode,
97 97 }
98 98
99 99 def forbidnewline(ui, repo, hooktype, node, newline, **kwargs):
100 100 halt = False
101 101 for rev in xrange(repo[node].rev(), len(repo)):
102 102 c = repo[rev]
103 103 for f in c.files():
104 104 if f not in c:
105 105 continue
106 106 data = c[f].data()
107 107 if not util.binary(data) and newline in data:
108 108 if not halt:
109 109 ui.warn(_('Attempt to commit or push text file(s) '
110 110 'using %s line endings\n') %
111 111 newlinestr[newline])
112 112 ui.warn(_('in %s: %s\n') % (short(c.node()), f))
113 113 halt = True
114 114 if halt and hooktype == 'pretxnchangegroup':
115 115 crlf = newlinestr[newline].lower()
116 116 filter = filterstr[newline]
117 117 ui.warn(_('\nTo prevent this mistake in your local repository,\n'
118 118 'add to Mercurial.ini or .hg/hgrc:\n'
119 119 '\n'
120 120 '[hooks]\n'
121 121 'pretxncommit.%s = python:hgext.win32text.forbid%s\n'
122 122 '\n'
123 123 'and also consider adding:\n'
124 124 '\n'
125 125 '[extensions]\n'
126 126 'hgext.win32text =\n'
127 127 '[encode]\n'
128 128 '** = %sencode:\n'
129 129 '[decode]\n'
130 130 '** = %sdecode:\n') % (crlf, crlf, filter, filter))
131 131 return halt
132 132
133 133 def forbidcrlf(ui, repo, hooktype, node, **kwargs):
134 134 return forbidnewline(ui, repo, hooktype, node, '\r\n', **kwargs)
135 135
136 136 def forbidcr(ui, repo, hooktype, node, **kwargs):
137 137 return forbidnewline(ui, repo, hooktype, node, '\r', **kwargs)
138 138
139 139 def reposetup(ui, repo):
140 140 if not repo.local():
141 141 return
142 142 for name, fn in _filters.iteritems():
143 143 repo.adddatafilter(name, fn)
144 144
@@ -1,298 +1,298 b''
1 1 """
2 2 bundlerepo.py - repository class for viewing uncompressed bundles
3 3
4 4 This provides a read-only repository interface to bundles as if
5 5 they were part of the actual repository.
6 6
7 7 Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 from node import hex, nullid, short
13 from node import nullid
14 14 from i18n import _
15 15 import changegroup, util, os, struct, bz2, zlib, tempfile, shutil, mdiff
16 import repo, localrepo, changelog, manifest, filelog, revlog, context, error
16 import localrepo, changelog, manifest, filelog, revlog, error
17 17
18 18 class bundlerevlog(revlog.revlog):
19 19 def __init__(self, opener, indexfile, bundlefile,
20 20 linkmapper=None):
21 21 # How it works:
22 22 # to retrieve a revision, we need to know the offset of
23 23 # the revision in the bundlefile (an opened file).
24 24 #
25 25 # We store this offset in the index (start), to differentiate a
26 26 # rev in the bundle and from a rev in the revlog, we check
27 27 # len(index[r]). If the tuple is bigger than 7, it is a bundle
28 28 # (it is bigger since we store the node to which the delta is)
29 29 #
30 30 revlog.revlog.__init__(self, opener, indexfile)
31 31 self.bundlefile = bundlefile
32 32 self.basemap = {}
33 33 def chunkpositer():
34 34 for chunk in changegroup.chunkiter(bundlefile):
35 35 pos = bundlefile.tell()
36 36 yield chunk, pos - len(chunk)
37 37 n = len(self)
38 38 prev = None
39 39 for chunk, start in chunkpositer():
40 40 size = len(chunk)
41 41 if size < 80:
42 42 raise util.Abort(_("invalid changegroup"))
43 43 start += 80
44 44 size -= 80
45 45 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
46 46 if node in self.nodemap:
47 47 prev = node
48 48 continue
49 49 for p in (p1, p2):
50 50 if not p in self.nodemap:
51 51 raise error.LookupError(p1, self.indexfile,
52 52 _("unknown parent"))
53 53 if linkmapper is None:
54 54 link = n
55 55 else:
56 56 link = linkmapper(cs)
57 57
58 58 if not prev:
59 59 prev = p1
60 60 # start, size, full unc. size, base (unused), link, p1, p2, node
61 61 e = (revlog.offset_type(start, 0), size, -1, -1, link,
62 62 self.rev(p1), self.rev(p2), node)
63 63 self.basemap[n] = prev
64 64 self.index.insert(-1, e)
65 65 self.nodemap[node] = n
66 66 prev = node
67 67 n += 1
68 68
69 69 def bundle(self, rev):
70 70 """is rev from the bundle"""
71 71 if rev < 0:
72 72 return False
73 73 return rev in self.basemap
74 74 def bundlebase(self, rev): return self.basemap[rev]
75 75 def chunk(self, rev, df=None, cachelen=4096):
76 76 # Warning: in case of bundle, the diff is against bundlebase,
77 77 # not against rev - 1
78 78 # XXX: could use some caching
79 79 if not self.bundle(rev):
80 80 return revlog.revlog.chunk(self, rev, df)
81 81 self.bundlefile.seek(self.start(rev))
82 82 return self.bundlefile.read(self.length(rev))
83 83
84 84 def revdiff(self, rev1, rev2):
85 85 """return or calculate a delta between two revisions"""
86 86 if self.bundle(rev1) and self.bundle(rev2):
87 87 # hot path for bundle
88 88 revb = self.rev(self.bundlebase(rev2))
89 89 if revb == rev1:
90 90 return self.chunk(rev2)
91 91 elif not self.bundle(rev1) and not self.bundle(rev2):
92 92 return revlog.revlog.revdiff(self, rev1, rev2)
93 93
94 94 return mdiff.textdiff(self.revision(self.node(rev1)),
95 95 self.revision(self.node(rev2)))
96 96
97 97 def revision(self, node):
98 98 """return an uncompressed revision of a given"""
99 99 if node == nullid: return ""
100 100
101 101 text = None
102 102 chain = []
103 103 iter_node = node
104 104 rev = self.rev(iter_node)
105 105 # reconstruct the revision if it is from a changegroup
106 106 while self.bundle(rev):
107 107 if self._cache and self._cache[0] == iter_node:
108 108 text = self._cache[2]
109 109 break
110 110 chain.append(rev)
111 111 iter_node = self.bundlebase(rev)
112 112 rev = self.rev(iter_node)
113 113 if text is None:
114 114 text = revlog.revlog.revision(self, iter_node)
115 115
116 116 while chain:
117 117 delta = self.chunk(chain.pop())
118 118 text = mdiff.patches(text, [delta])
119 119
120 120 p1, p2 = self.parents(node)
121 121 if node != revlog.hash(text, p1, p2):
122 122 raise error.RevlogError(_("integrity check failed on %s:%d")
123 123 % (self.datafile, self.rev(node)))
124 124
125 125 self._cache = (node, self.rev(node), text)
126 126 return text
127 127
128 128 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
129 129 raise NotImplementedError
130 130 def addgroup(self, revs, linkmapper, transaction):
131 131 raise NotImplementedError
132 132 def strip(self, rev, minlink):
133 133 raise NotImplementedError
134 134 def checksize(self):
135 135 raise NotImplementedError
136 136
137 137 class bundlechangelog(bundlerevlog, changelog.changelog):
138 138 def __init__(self, opener, bundlefile):
139 139 changelog.changelog.__init__(self, opener)
140 140 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile)
141 141
142 142 class bundlemanifest(bundlerevlog, manifest.manifest):
143 143 def __init__(self, opener, bundlefile, linkmapper):
144 144 manifest.manifest.__init__(self, opener)
145 145 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
146 146 linkmapper)
147 147
148 148 class bundlefilelog(bundlerevlog, filelog.filelog):
149 149 def __init__(self, opener, path, bundlefile, linkmapper):
150 150 filelog.filelog.__init__(self, opener, path)
151 151 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
152 152 linkmapper)
153 153
154 154 class bundlerepository(localrepo.localrepository):
155 155 def __init__(self, ui, path, bundlename):
156 156 self._tempparent = None
157 157 try:
158 158 localrepo.localrepository.__init__(self, ui, path)
159 159 except error.RepoError:
160 160 self._tempparent = tempfile.mkdtemp()
161 tmprepo = localrepo.instance(ui,self._tempparent,1)
161 localrepo.instance(ui,self._tempparent,1)
162 162 localrepo.localrepository.__init__(self, ui, self._tempparent)
163 163
164 164 if path:
165 165 self._url = 'bundle:' + path + '+' + bundlename
166 166 else:
167 167 self._url = 'bundle:' + bundlename
168 168
169 169 self.tempfile = None
170 170 self.bundlefile = open(bundlename, "rb")
171 171 header = self.bundlefile.read(6)
172 172 if not header.startswith("HG"):
173 173 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
174 174 elif not header.startswith("HG10"):
175 175 raise util.Abort(_("%s: unknown bundle version") % bundlename)
176 176 elif (header == "HG10BZ") or (header == "HG10GZ"):
177 177 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
178 178 suffix=".hg10un", dir=self.path)
179 179 self.tempfile = temp
180 180 fptemp = os.fdopen(fdtemp, 'wb')
181 181 def generator(f):
182 182 if header == "HG10BZ":
183 183 zd = bz2.BZ2Decompressor()
184 184 zd.decompress("BZ")
185 185 elif header == "HG10GZ":
186 186 zd = zlib.decompressobj()
187 187 for chunk in f:
188 188 yield zd.decompress(chunk)
189 189 gen = generator(util.filechunkiter(self.bundlefile, 4096))
190 190
191 191 try:
192 192 fptemp.write("HG10UN")
193 193 for chunk in gen:
194 194 fptemp.write(chunk)
195 195 finally:
196 196 fptemp.close()
197 197 self.bundlefile.close()
198 198
199 199 self.bundlefile = open(self.tempfile, "rb")
200 200 # seek right after the header
201 201 self.bundlefile.seek(6)
202 202 elif header == "HG10UN":
203 203 # nothing to do
204 204 pass
205 205 else:
206 206 raise util.Abort(_("%s: unknown bundle compression type")
207 207 % bundlename)
208 208 # dict with the mapping 'filename' -> position in the bundle
209 209 self.bundlefilespos = {}
210 210
211 211 def __getattr__(self, name):
212 212 if name == 'changelog':
213 213 self.changelog = bundlechangelog(self.sopener, self.bundlefile)
214 214 self.manstart = self.bundlefile.tell()
215 215 return self.changelog
216 216 elif name == 'manifest':
217 217 self.bundlefile.seek(self.manstart)
218 218 self.manifest = bundlemanifest(self.sopener, self.bundlefile,
219 219 self.changelog.rev)
220 220 self.filestart = self.bundlefile.tell()
221 221 return self.manifest
222 222 elif name == 'manstart':
223 223 self.changelog
224 224 return self.manstart
225 225 elif name == 'filestart':
226 226 self.manifest
227 227 return self.filestart
228 228 else:
229 229 return localrepo.localrepository.__getattr__(self, name)
230 230
231 231 def url(self):
232 232 return self._url
233 233
234 234 def file(self, f):
235 235 if not self.bundlefilespos:
236 236 self.bundlefile.seek(self.filestart)
237 237 while 1:
238 238 chunk = changegroup.getchunk(self.bundlefile)
239 239 if not chunk:
240 240 break
241 241 self.bundlefilespos[chunk] = self.bundlefile.tell()
242 242 for c in changegroup.chunkiter(self.bundlefile):
243 243 pass
244 244
245 245 if f[0] == '/':
246 246 f = f[1:]
247 247 if f in self.bundlefilespos:
248 248 self.bundlefile.seek(self.bundlefilespos[f])
249 249 return bundlefilelog(self.sopener, f, self.bundlefile,
250 250 self.changelog.rev)
251 251 else:
252 252 return filelog.filelog(self.sopener, f)
253 253
254 254 def close(self):
255 255 """Close assigned bundle file immediately."""
256 256 self.bundlefile.close()
257 257
258 258 def __del__(self):
259 259 bundlefile = getattr(self, 'bundlefile', None)
260 260 if bundlefile and not bundlefile.closed:
261 261 bundlefile.close()
262 262 tempfile = getattr(self, 'tempfile', None)
263 263 if tempfile is not None:
264 264 os.unlink(tempfile)
265 265 if self._tempparent:
266 266 shutil.rmtree(self._tempparent, True)
267 267
268 268 def cancopy(self):
269 269 return False
270 270
271 271 def getcwd(self):
272 272 return os.getcwd() # always outside the repo
273 273
274 274 def instance(ui, path, create):
275 275 if create:
276 276 raise util.Abort(_('cannot create new bundle repository'))
277 277 parentpath = ui.config("bundle", "mainreporoot", "")
278 278 if parentpath:
279 279 # Try to make the full path relative so we get a nice, short URL.
280 280 # In particular, we don't want temp dir names in test outputs.
281 281 cwd = os.getcwd()
282 282 if parentpath == cwd:
283 283 parentpath = ''
284 284 else:
285 285 cwd = os.path.join(cwd,'')
286 286 if parentpath.startswith(cwd):
287 287 parentpath = parentpath[len(cwd):]
288 288 path = util.drop_scheme('file', path)
289 289 if path.startswith('bundle:'):
290 290 path = util.drop_scheme('bundle', path)
291 291 s = path.split("+", 1)
292 292 if len(s) == 1:
293 293 repopath, bundlename = parentpath, s[0]
294 294 else:
295 295 repopath, bundlename = s
296 296 else:
297 297 repopath, bundlename = parentpath, path
298 298 return bundlerepository(ui, repopath, bundlename)
@@ -1,233 +1,232 b''
1 1 # copies.py - copy detection for Mercurial
2 2 #
3 3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 from node import nullid, nullrev
9 8 from i18n import _
10 9 import util, heapq
11 10
12 11 def _nonoverlap(d1, d2, d3):
13 12 "Return list of elements in d1 not in d2 or d3"
14 13 return util.sort([d for d in d1 if d not in d3 and d not in d2])
15 14
16 15 def _dirname(f):
17 16 s = f.rfind("/")
18 17 if s == -1:
19 18 return ""
20 19 return f[:s]
21 20
22 21 def _dirs(files):
23 22 d = {}
24 23 for f in files:
25 24 f = _dirname(f)
26 25 while f not in d:
27 26 d[f] = True
28 27 f = _dirname(f)
29 28 return d
30 29
31 30 def _findoldnames(fctx, limit):
32 31 "find files that path was copied from, back to linkrev limit"
33 32 old = {}
34 33 seen = {}
35 34 orig = fctx.path()
36 35 visit = [(fctx, 0)]
37 36 while visit:
38 37 fc, depth = visit.pop()
39 38 s = str(fc)
40 39 if s in seen:
41 40 continue
42 41 seen[s] = 1
43 42 if fc.path() != orig and fc.path() not in old:
44 43 old[fc.path()] = (depth, fc.path()) # remember depth
45 44 if fc.rev() < limit and fc.rev() is not None:
46 45 continue
47 46 visit += [(p, depth - 1) for p in fc.parents()]
48 47
49 48 # return old names sorted by depth
50 49 return [o[1] for o in util.sort(old.values())]
51 50
52 51 def _findlimit(repo, a, b):
53 52 "find the earliest revision that's an ancestor of a or b but not both"
54 53 # basic idea:
55 54 # - mark a and b with different sides
56 55 # - if a parent's children are all on the same side, the parent is
57 56 # on that side, otherwise it is on no side
58 57 # - walk the graph in topological order with the help of a heap;
59 58 # - add unseen parents to side map
60 59 # - clear side of any parent that has children on different sides
61 60 # - track number of interesting revs that might still be on a side
62 61 # - track the lowest interesting rev seen
63 62 # - quit when interesting revs is zero
64 63
65 64 cl = repo.changelog
66 65 working = len(cl) # pseudo rev for the working directory
67 66 if a is None:
68 67 a = working
69 68 if b is None:
70 69 b = working
71 70
72 71 side = {a: -1, b: 1}
73 72 visit = [-a, -b]
74 73 heapq.heapify(visit)
75 74 interesting = len(visit)
76 75 limit = working
77 76
78 77 while interesting:
79 78 r = -heapq.heappop(visit)
80 79 if r == working:
81 80 parents = [cl.rev(p) for p in repo.dirstate.parents()]
82 81 else:
83 82 parents = cl.parentrevs(r)
84 83 for p in parents:
85 84 if p not in side:
86 85 # first time we see p; add it to visit
87 86 side[p] = side[r]
88 87 if side[p]:
89 88 interesting += 1
90 89 heapq.heappush(visit, -p)
91 90 elif side[p] and side[p] != side[r]:
92 91 # p was interesting but now we know better
93 92 side[p] = 0
94 93 interesting -= 1
95 94 if side[r]:
96 95 limit = r # lowest rev visited
97 96 interesting -= 1
98 97 return limit
99 98
100 99 def copies(repo, c1, c2, ca, checkdirs=False):
101 100 """
102 101 Find moves and copies between context c1 and c2
103 102 """
104 103 # avoid silly behavior for update from empty dir
105 104 if not c1 or not c2 or c1 == c2:
106 105 return {}, {}
107 106
108 107 # avoid silly behavior for parent -> working dir
109 108 if c2.node() == None and c1.node() == repo.dirstate.parents()[0]:
110 109 return repo.dirstate.copies(), {}
111 110
112 111 limit = _findlimit(repo, c1.rev(), c2.rev())
113 112 m1 = c1.manifest()
114 113 m2 = c2.manifest()
115 114 ma = ca.manifest()
116 115
117 116 def makectx(f, n):
118 117 if len(n) != 20: # in a working context?
119 118 if c1.rev() is None:
120 119 return c1.filectx(f)
121 120 return c2.filectx(f)
122 121 return repo.filectx(f, fileid=n)
123 122 ctx = util.cachefunc(makectx)
124 123
125 124 copy = {}
126 125 fullcopy = {}
127 126 diverge = {}
128 127
129 128 def checkcopies(f, m1, m2):
130 129 '''check possible copies of f from m1 to m2'''
131 130 c1 = ctx(f, m1[f])
132 131 for of in _findoldnames(c1, limit):
133 132 fullcopy[f] = of # remember for dir rename detection
134 133 if of in m2: # original file not in other manifest?
135 134 # if the original file is unchanged on the other branch,
136 135 # no merge needed
137 136 if m2[of] != ma.get(of):
138 137 c2 = ctx(of, m2[of])
139 138 ca = c1.ancestor(c2)
140 139 # related and named changed on only one side?
141 140 if ca and (ca.path() == f or ca.path() == c2.path()):
142 141 if c1 != ca or c2 != ca: # merge needed?
143 142 copy[f] = of
144 143 elif of in ma:
145 144 diverge.setdefault(of, []).append(f)
146 145
147 146 repo.ui.debug(_(" searching for copies back to rev %d\n") % limit)
148 147
149 148 u1 = _nonoverlap(m1, m2, ma)
150 149 u2 = _nonoverlap(m2, m1, ma)
151 150
152 151 if u1:
153 152 repo.ui.debug(_(" unmatched files in local:\n %s\n")
154 153 % "\n ".join(u1))
155 154 if u2:
156 155 repo.ui.debug(_(" unmatched files in other:\n %s\n")
157 156 % "\n ".join(u2))
158 157
159 158 for f in u1:
160 159 checkcopies(f, m1, m2)
161 160 for f in u2:
162 161 checkcopies(f, m2, m1)
163 162
164 163 diverge2 = {}
165 164 for of, fl in diverge.items():
166 165 if len(fl) == 1:
167 166 del diverge[of] # not actually divergent
168 167 else:
169 168 diverge2.update(dict.fromkeys(fl)) # reverse map for below
170 169
171 170 if fullcopy:
172 171 repo.ui.debug(_(" all copies found (* = to merge, ! = divergent):\n"))
173 172 for f in fullcopy:
174 173 note = ""
175 174 if f in copy: note += "*"
176 175 if f in diverge2: note += "!"
177 176 repo.ui.debug(_(" %s -> %s %s\n") % (f, fullcopy[f], note))
178 177 del diverge2
179 178
180 179 if not fullcopy or not checkdirs:
181 180 return copy, diverge
182 181
183 182 repo.ui.debug(_(" checking for directory renames\n"))
184 183
185 184 # generate a directory move map
186 185 d1, d2 = _dirs(m1), _dirs(m2)
187 186 invalid = {}
188 187 dirmove = {}
189 188
190 189 # examine each file copy for a potential directory move, which is
191 190 # when all the files in a directory are moved to a new directory
192 191 for dst, src in fullcopy.iteritems():
193 192 dsrc, ddst = _dirname(src), _dirname(dst)
194 193 if dsrc in invalid:
195 194 # already seen to be uninteresting
196 195 continue
197 196 elif dsrc in d1 and ddst in d1:
198 197 # directory wasn't entirely moved locally
199 198 invalid[dsrc] = True
200 199 elif dsrc in d2 and ddst in d2:
201 200 # directory wasn't entirely moved remotely
202 201 invalid[dsrc] = True
203 202 elif dsrc in dirmove and dirmove[dsrc] != ddst:
204 203 # files from the same directory moved to two different places
205 204 invalid[dsrc] = True
206 205 else:
207 206 # looks good so far
208 207 dirmove[dsrc + "/"] = ddst + "/"
209 208
210 209 for i in invalid:
211 210 if i in dirmove:
212 211 del dirmove[i]
213 212 del d1, d2, invalid
214 213
215 214 if not dirmove:
216 215 return copy, diverge
217 216
218 217 for d in dirmove:
219 218 repo.ui.debug(_(" dir %s -> %s\n") % (d, dirmove[d]))
220 219
221 220 # check unaccounted nonoverlapping files against directory moves
222 221 for f in u1 + u2:
223 222 if f not in fullcopy:
224 223 for d in dirmove:
225 224 if f.startswith(d):
226 225 # new file added in a directory that was moved, move it
227 226 df = dirmove[d] + f[len(d):]
228 227 if df not in copy:
229 228 copy[f] = df
230 229 repo.ui.debug(_(" file %s -> %s\n") % (f, copy[f]))
231 230 break
232 231
233 232 return copy, diverge
@@ -1,417 +1,417 b''
1 1 # dispatch.py - command dispatching for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from i18n import _
9 9 import os, sys, atexit, signal, pdb, socket, errno, shlex, time
10 import util, commands, hg, lock, fancyopts, extensions, hook, error
10 import util, commands, hg, fancyopts, extensions, hook, error
11 11 import cmdutil
12 12 import ui as _ui
13 13
14 14 def run():
15 15 "run the command in sys.argv"
16 16 sys.exit(dispatch(sys.argv[1:]))
17 17
18 18 def dispatch(args):
19 19 "run the command specified in args"
20 20 try:
21 21 u = _ui.ui(traceback='--traceback' in args)
22 22 except util.Abort, inst:
23 23 sys.stderr.write(_("abort: %s\n") % inst)
24 24 return -1
25 25 return _runcatch(u, args)
26 26
27 27 def _runcatch(ui, args):
28 28 def catchterm(*args):
29 29 raise error.SignalInterrupt
30 30
31 31 for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
32 32 num = getattr(signal, name, None)
33 33 if num: signal.signal(num, catchterm)
34 34
35 35 try:
36 36 try:
37 37 # enter the debugger before command execution
38 38 if '--debugger' in args:
39 39 pdb.set_trace()
40 40 try:
41 41 return _dispatch(ui, args)
42 42 finally:
43 43 ui.flush()
44 44 except:
45 45 # enter the debugger when we hit an exception
46 46 if '--debugger' in args:
47 47 pdb.post_mortem(sys.exc_info()[2])
48 48 ui.print_exc()
49 49 raise
50 50
51 51 # Global exception handling, alphabetically
52 52 # Mercurial-specific first, followed by built-in and library exceptions
53 53 except error.AmbiguousCommand, inst:
54 54 ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
55 55 (inst.args[0], " ".join(inst.args[1])))
56 56 except error.LockHeld, inst:
57 57 if inst.errno == errno.ETIMEDOUT:
58 58 reason = _('timed out waiting for lock held by %s') % inst.locker
59 59 else:
60 60 reason = _('lock held by %s') % inst.locker
61 61 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
62 62 except error.LockUnavailable, inst:
63 63 ui.warn(_("abort: could not lock %s: %s\n") %
64 64 (inst.desc or inst.filename, inst.strerror))
65 65 except error.ParseError, inst:
66 66 if inst.args[0]:
67 67 ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
68 68 commands.help_(ui, inst.args[0])
69 69 else:
70 70 ui.warn(_("hg: %s\n") % inst.args[1])
71 71 commands.help_(ui, 'shortlist')
72 72 except error.RepoError, inst:
73 73 ui.warn(_("abort: %s!\n") % inst)
74 74 except error.ResponseError, inst:
75 75 ui.warn(_("abort: %s") % inst.args[0])
76 76 if not isinstance(inst.args[1], basestring):
77 77 ui.warn(" %r\n" % (inst.args[1],))
78 78 elif not inst.args[1]:
79 79 ui.warn(_(" empty string\n"))
80 80 else:
81 81 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
82 82 except error.RevlogError, inst:
83 83 ui.warn(_("abort: %s!\n") % inst)
84 84 except error.SignalInterrupt:
85 85 ui.warn(_("killed!\n"))
86 86 except error.UnknownCommand, inst:
87 87 ui.warn(_("hg: unknown command '%s'\n") % inst.args[0])
88 88 commands.help_(ui, 'shortlist')
89 89 except util.Abort, inst:
90 90 ui.warn(_("abort: %s\n") % inst)
91 91 except ImportError, inst:
92 92 m = str(inst).split()[-1]
93 93 ui.warn(_("abort: could not import module %s!\n") % m)
94 94 if m in "mpatch bdiff".split():
95 95 ui.warn(_("(did you forget to compile extensions?)\n"))
96 96 elif m in "zlib".split():
97 97 ui.warn(_("(is your Python install correct?)\n"))
98 98 except IOError, inst:
99 99 if hasattr(inst, "code"):
100 100 ui.warn(_("abort: %s\n") % inst)
101 101 elif hasattr(inst, "reason"):
102 102 try: # usually it is in the form (errno, strerror)
103 103 reason = inst.reason.args[1]
104 104 except: # it might be anything, for example a string
105 105 reason = inst.reason
106 106 ui.warn(_("abort: error: %s\n") % reason)
107 107 elif hasattr(inst, "args") and inst.args[0] == errno.EPIPE:
108 108 if ui.debugflag:
109 109 ui.warn(_("broken pipe\n"))
110 110 elif getattr(inst, "strerror", None):
111 111 if getattr(inst, "filename", None):
112 112 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
113 113 else:
114 114 ui.warn(_("abort: %s\n") % inst.strerror)
115 115 else:
116 116 raise
117 117 except OSError, inst:
118 118 if getattr(inst, "filename", None):
119 119 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
120 120 else:
121 121 ui.warn(_("abort: %s\n") % inst.strerror)
122 122 except KeyboardInterrupt:
123 123 try:
124 124 ui.warn(_("interrupted!\n"))
125 125 except IOError, inst:
126 126 if inst.errno == errno.EPIPE:
127 127 if ui.debugflag:
128 128 ui.warn(_("\nbroken pipe\n"))
129 129 else:
130 130 raise
131 131 except MemoryError:
132 132 ui.warn(_("abort: out of memory\n"))
133 133 except SystemExit, inst:
134 134 # Commands shouldn't sys.exit directly, but give a return code.
135 135 # Just in case catch this and and pass exit code to caller.
136 136 return inst.code
137 137 except socket.error, inst:
138 138 ui.warn(_("abort: %s\n") % inst.args[-1])
139 139 except:
140 140 ui.warn(_("** unknown exception encountered, details follow\n"))
141 141 ui.warn(_("** report bug details to "
142 142 "http://www.selenic.com/mercurial/bts\n"))
143 143 ui.warn(_("** or mercurial@selenic.com\n"))
144 144 ui.warn(_("** Mercurial Distributed SCM (version %s)\n")
145 145 % util.version())
146 146 ui.warn(_("** Extensions loaded: %s\n")
147 147 % ", ".join([x[0] for x in extensions.extensions()]))
148 148 raise
149 149
150 150 return -1
151 151
152 152 def _findrepo(p):
153 153 while not os.path.isdir(os.path.join(p, ".hg")):
154 154 oldp, p = p, os.path.dirname(p)
155 155 if p == oldp:
156 156 return None
157 157
158 158 return p
159 159
160 160 def _parse(ui, args):
161 161 options = {}
162 162 cmdoptions = {}
163 163
164 164 try:
165 165 args = fancyopts.fancyopts(args, commands.globalopts, options)
166 166 except fancyopts.getopt.GetoptError, inst:
167 167 raise error.ParseError(None, inst)
168 168
169 169 if args:
170 170 cmd, args = args[0], args[1:]
171 171 aliases, i = cmdutil.findcmd(cmd, commands.table,
172 172 ui.config("ui", "strict"))
173 173 cmd = aliases[0]
174 174 defaults = ui.config("defaults", cmd)
175 175 if defaults:
176 176 args = shlex.split(defaults) + args
177 177 c = list(i[1])
178 178 else:
179 179 cmd = None
180 180 c = []
181 181
182 182 # combine global options into local
183 183 for o in commands.globalopts:
184 184 c.append((o[0], o[1], options[o[1]], o[3]))
185 185
186 186 try:
187 187 args = fancyopts.fancyopts(args, c, cmdoptions, True)
188 188 except fancyopts.getopt.GetoptError, inst:
189 189 raise error.ParseError(cmd, inst)
190 190
191 191 # separate global options back out
192 192 for o in commands.globalopts:
193 193 n = o[1]
194 194 options[n] = cmdoptions[n]
195 195 del cmdoptions[n]
196 196
197 197 return (cmd, cmd and i[0] or None, args, options, cmdoptions)
198 198
199 199 def _parseconfig(config):
200 200 """parse the --config options from the command line"""
201 201 parsed = []
202 202 for cfg in config:
203 203 try:
204 204 name, value = cfg.split('=', 1)
205 205 section, name = name.split('.', 1)
206 206 if not section or not name:
207 207 raise IndexError
208 208 parsed.append((section, name, value))
209 209 except (IndexError, ValueError):
210 210 raise util.Abort(_('malformed --config option: %s') % cfg)
211 211 return parsed
212 212
213 213 def _earlygetopt(aliases, args):
214 214 """Return list of values for an option (or aliases).
215 215
216 216 The values are listed in the order they appear in args.
217 217 The options and values are removed from args.
218 218 """
219 219 try:
220 220 argcount = args.index("--")
221 221 except ValueError:
222 222 argcount = len(args)
223 223 shortopts = [opt for opt in aliases if len(opt) == 2]
224 224 values = []
225 225 pos = 0
226 226 while pos < argcount:
227 227 if args[pos] in aliases:
228 228 if pos + 1 >= argcount:
229 229 # ignore and let getopt report an error if there is no value
230 230 break
231 231 del args[pos]
232 232 values.append(args.pop(pos))
233 233 argcount -= 2
234 234 elif args[pos][:2] in shortopts:
235 235 # short option can have no following space, e.g. hg log -Rfoo
236 236 values.append(args.pop(pos)[2:])
237 237 argcount -= 1
238 238 else:
239 239 pos += 1
240 240 return values
241 241
242 242 def runcommand(lui, repo, cmd, fullargs, ui, options, d):
243 243 # run pre-hook, and abort if it fails
244 244 ret = hook.hook(lui, repo, "pre-%s" % cmd, False, args=" ".join(fullargs))
245 245 if ret:
246 246 return ret
247 247 ret = _runcommand(ui, options, cmd, d)
248 248 # run post-hook, passing command result
249 249 hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs),
250 250 result = ret)
251 251 return ret
252 252
253 253 _loaded = {}
254 254 def _dispatch(ui, args):
255 255 # read --config before doing anything else
256 256 # (e.g. to change trust settings for reading .hg/hgrc)
257 257 config = _earlygetopt(['--config'], args)
258 258 if config:
259 259 ui.updateopts(config=_parseconfig(config))
260 260
261 261 # check for cwd
262 262 cwd = _earlygetopt(['--cwd'], args)
263 263 if cwd:
264 264 os.chdir(cwd[-1])
265 265
266 266 # read the local repository .hgrc into a local ui object
267 267 path = _findrepo(os.getcwd()) or ""
268 268 if not path:
269 269 lui = ui
270 270 if path:
271 271 try:
272 272 lui = _ui.ui(parentui=ui)
273 273 lui.readconfig(os.path.join(path, ".hg", "hgrc"))
274 274 except IOError:
275 275 pass
276 276
277 277 # now we can expand paths, even ones in .hg/hgrc
278 278 rpath = _earlygetopt(["-R", "--repository", "--repo"], args)
279 279 if rpath:
280 280 path = lui.expandpath(rpath[-1])
281 281 lui = _ui.ui(parentui=ui)
282 282 lui.readconfig(os.path.join(path, ".hg", "hgrc"))
283 283
284 284 extensions.loadall(lui)
285 285 for name, module in extensions.extensions():
286 286 if name in _loaded:
287 287 continue
288 288
289 289 # setup extensions
290 290 # TODO this should be generalized to scheme, where extensions can
291 291 # redepend on other extensions. then we should toposort them, and
292 292 # do initialization in correct order
293 293 extsetup = getattr(module, 'extsetup', None)
294 294 if extsetup:
295 295 extsetup()
296 296
297 297 cmdtable = getattr(module, 'cmdtable', {})
298 298 overrides = [cmd for cmd in cmdtable if cmd in commands.table]
299 299 if overrides:
300 300 ui.warn(_("extension '%s' overrides commands: %s\n")
301 301 % (name, " ".join(overrides)))
302 302 commands.table.update(cmdtable)
303 303 _loaded[name] = 1
304 304 # check for fallback encoding
305 305 fallback = lui.config('ui', 'fallbackencoding')
306 306 if fallback:
307 307 util._fallbackencoding = fallback
308 308
309 309 fullargs = args
310 310 cmd, func, args, options, cmdoptions = _parse(lui, args)
311 311
312 312 if options["config"]:
313 313 raise util.Abort(_("Option --config may not be abbreviated!"))
314 314 if options["cwd"]:
315 315 raise util.Abort(_("Option --cwd may not be abbreviated!"))
316 316 if options["repository"]:
317 317 raise util.Abort(_(
318 318 "Option -R has to be separated from other options (i.e. not -qR) "
319 319 "and --repository may only be abbreviated as --repo!"))
320 320
321 321 if options["encoding"]:
322 322 util._encoding = options["encoding"]
323 323 if options["encodingmode"]:
324 324 util._encodingmode = options["encodingmode"]
325 325 if options["time"]:
326 326 def get_times():
327 327 t = os.times()
328 328 if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
329 329 t = (t[0], t[1], t[2], t[3], time.clock())
330 330 return t
331 331 s = get_times()
332 332 def print_time():
333 333 t = get_times()
334 334 ui.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
335 335 (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
336 336 atexit.register(print_time)
337 337
338 338 ui.updateopts(options["verbose"], options["debug"], options["quiet"],
339 339 not options["noninteractive"], options["traceback"])
340 340
341 341 if options['help']:
342 342 return commands.help_(ui, cmd, options['version'])
343 343 elif options['version']:
344 344 return commands.version_(ui)
345 345 elif not cmd:
346 346 return commands.help_(ui, 'shortlist')
347 347
348 348 repo = None
349 349 if cmd not in commands.norepo.split():
350 350 try:
351 351 repo = hg.repository(ui, path=path)
352 352 ui = repo.ui
353 353 if not repo.local():
354 354 raise util.Abort(_("repository '%s' is not local") % path)
355 355 ui.setconfig("bundle", "mainreporoot", repo.root)
356 356 except error.RepoError:
357 357 if cmd not in commands.optionalrepo.split():
358 358 if args and not path: # try to infer -R from command args
359 359 repos = map(_findrepo, args)
360 360 guess = repos[0]
361 361 if guess and repos.count(guess) == len(repos):
362 362 return _dispatch(ui, ['--repository', guess] + fullargs)
363 363 if not path:
364 364 raise error.RepoError(_("There is no Mercurial repository"
365 365 " here (.hg not found)"))
366 366 raise
367 367 args.insert(0, repo)
368 368 elif rpath:
369 369 ui.warn("warning: --repository ignored\n")
370 370
371 371 d = lambda: util.checksignature(func)(ui, *args, **cmdoptions)
372 372 return runcommand(lui, repo, cmd, fullargs, ui, options, d)
373 373
374 374 def _runcommand(ui, options, cmd, cmdfunc):
375 375 def checkargs():
376 376 try:
377 377 return cmdfunc()
378 378 except error.SignatureError:
379 379 raise error.ParseError(cmd, _("invalid arguments"))
380 380
381 381 if options['profile']:
382 382 import hotshot, hotshot.stats
383 383 prof = hotshot.Profile("hg.prof")
384 384 try:
385 385 try:
386 386 return prof.runcall(checkargs)
387 387 except:
388 388 try:
389 389 ui.warn(_('exception raised - generating '
390 390 'profile anyway\n'))
391 391 except:
392 392 pass
393 393 raise
394 394 finally:
395 395 prof.close()
396 396 stats = hotshot.stats.load("hg.prof")
397 397 stats.strip_dirs()
398 398 stats.sort_stats('time', 'calls')
399 399 stats.print_stats(40)
400 400 elif options['lsprof']:
401 401 try:
402 402 from mercurial import lsprof
403 403 except ImportError:
404 404 raise util.Abort(_(
405 405 'lsprof not available - install from '
406 406 'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
407 407 p = lsprof.Profiler()
408 408 p.enable(subcalls=True)
409 409 try:
410 410 return checkargs()
411 411 finally:
412 412 p.disable()
413 413 stats = lsprof.Stats(p.getstats())
414 414 stats.sort()
415 415 stats.pprint(top=10, file=sys.stderr, climit=5)
416 416 else:
417 417 return checkargs()
@@ -1,221 +1,221 b''
1 1 # filemerge.py - file-level merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 from node import nullrev, short
8 from node import short
9 9 from i18n import _
10 10 import util, os, tempfile, simplemerge, re, filecmp
11 11
12 12 def _toolstr(ui, tool, part, default=""):
13 13 return ui.config("merge-tools", tool + "." + part, default)
14 14
15 15 def _toolbool(ui, tool, part, default=False):
16 16 return ui.configbool("merge-tools", tool + "." + part, default)
17 17
18 18 def _findtool(ui, tool):
19 19 if tool in ("internal:fail", "internal:local", "internal:other"):
20 20 return tool
21 21 k = _toolstr(ui, tool, "regkey")
22 22 if k:
23 23 p = util.lookup_reg(k, _toolstr(ui, tool, "regname"))
24 24 if p:
25 25 p = util.find_exe(p + _toolstr(ui, tool, "regappend"))
26 26 if p:
27 27 return p
28 28 return util.find_exe(_toolstr(ui, tool, "executable", tool))
29 29
30 30 def _picktool(repo, ui, path, binary, symlink):
31 31 def check(tool, pat, symlink, binary):
32 32 tmsg = tool
33 33 if pat:
34 34 tmsg += " specified for " + pat
35 35 if not _findtool(ui, tool):
36 36 if pat: # explicitly requested tool deserves a warning
37 37 ui.warn(_("couldn't find merge tool %s\n") % tmsg)
38 38 else: # configured but non-existing tools are more silent
39 39 ui.note(_("couldn't find merge tool %s\n") % tmsg)
40 40 elif symlink and not _toolbool(ui, tool, "symlink"):
41 41 ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
42 42 elif binary and not _toolbool(ui, tool, "binary"):
43 43 ui.warn(_("tool %s can't handle binary\n") % tmsg)
44 44 elif not util.gui() and _toolbool(ui, tool, "gui"):
45 45 ui.warn(_("tool %s requires a GUI\n") % tmsg)
46 46 else:
47 47 return True
48 48 return False
49 49
50 50 # HGMERGE takes precedence
51 51 hgmerge = os.environ.get("HGMERGE")
52 52 if hgmerge:
53 53 return (hgmerge, hgmerge)
54 54
55 55 # then patterns
56 56 for pat, tool in ui.configitems("merge-patterns"):
57 57 mf = util.matcher(repo.root, "", [pat], [], [])[1]
58 58 if mf(path) and check(tool, pat, symlink, False):
59 59 toolpath = _findtool(ui, tool)
60 60 return (tool, '"' + toolpath + '"')
61 61
62 62 # then merge tools
63 63 tools = {}
64 64 for k,v in ui.configitems("merge-tools"):
65 65 t = k.split('.')[0]
66 66 if t not in tools:
67 67 tools[t] = int(_toolstr(ui, t, "priority", "0"))
68 68 names = tools.keys()
69 69 tools = util.sort([(-p,t) for t,p in tools.items()])
70 70 uimerge = ui.config("ui", "merge")
71 71 if uimerge:
72 72 if uimerge not in names:
73 73 return (uimerge, uimerge)
74 74 tools.insert(0, (None, uimerge)) # highest priority
75 75 tools.append((None, "hgmerge")) # the old default, if found
76 76 for p,t in tools:
77 77 if check(t, None, symlink, binary):
78 78 toolpath = _findtool(ui, t)
79 79 return (t, '"' + toolpath + '"')
80 80 # internal merge as last resort
81 81 return (not (symlink or binary) and "internal:merge" or None, None)
82 82
83 83 def _eoltype(data):
84 84 "Guess the EOL type of a file"
85 85 if '\0' in data: # binary
86 86 return None
87 87 if '\r\n' in data: # Windows
88 88 return '\r\n'
89 89 if '\r' in data: # Old Mac
90 90 return '\r'
91 91 if '\n' in data: # UNIX
92 92 return '\n'
93 93 return None # unknown
94 94
95 95 def _matcheol(file, origfile):
96 96 "Convert EOL markers in a file to match origfile"
97 97 tostyle = _eoltype(open(origfile, "rb").read())
98 98 if tostyle:
99 99 data = open(file, "rb").read()
100 100 style = _eoltype(data)
101 101 if style:
102 102 newdata = data.replace(style, tostyle)
103 103 if newdata != data:
104 104 open(file, "wb").write(newdata)
105 105
106 106 def filemerge(repo, mynode, orig, fcd, fco, fca):
107 107 """perform a 3-way merge in the working directory
108 108
109 109 mynode = parent node before merge
110 110 orig = original local filename before merge
111 111 fco = other file context
112 112 fca = ancestor file context
113 113 fcd = local file context for current/destination file
114 114 """
115 115
116 116 def temp(prefix, ctx):
117 117 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
118 118 (fd, name) = tempfile.mkstemp(prefix=pre)
119 119 data = repo.wwritedata(ctx.path(), ctx.data())
120 120 f = os.fdopen(fd, "wb")
121 121 f.write(data)
122 122 f.close()
123 123 return name
124 124
125 125 def isbin(ctx):
126 126 try:
127 127 return util.binary(ctx.data())
128 128 except IOError:
129 129 return False
130 130
131 131 if not fco.cmp(fcd.data()): # files identical?
132 132 return None
133 133
134 134 ui = repo.ui
135 135 fd = fcd.path()
136 136 binary = isbin(fcd) or isbin(fco) or isbin(fca)
137 137 symlink = 'l' in fcd.flags() + fco.flags()
138 138 tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
139 139 ui.debug(_("picked tool '%s' for %s (binary %s symlink %s)\n") %
140 140 (tool, fd, binary, symlink))
141 141
142 142 if not tool:
143 143 tool = "internal:local"
144 144 if ui.prompt(_(" no tool found to merge %s\n"
145 145 "keep (l)ocal or take (o)ther?") % fd,
146 146 _("[lo]"), _("l")) != _("l"):
147 147 tool = "internal:other"
148 148 if tool == "internal:local":
149 149 return 0
150 150 if tool == "internal:other":
151 151 repo.wwrite(fd, fco.data(), fco.flags())
152 152 return 0
153 153 if tool == "internal:fail":
154 154 return 1
155 155
156 156 # do the actual merge
157 157 a = repo.wjoin(fd)
158 158 b = temp("base", fca)
159 159 c = temp("other", fco)
160 160 out = ""
161 161 back = a + ".orig"
162 162 util.copyfile(a, back)
163 163
164 164 if orig != fco.path():
165 165 repo.ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
166 166 else:
167 167 repo.ui.status(_("merging %s\n") % fd)
168 168
169 169 repo.ui.debug(_("my %s other %s ancestor %s\n") % (fcd, fco, fca))
170 170
171 171 # do we attempt to simplemerge first?
172 172 if _toolbool(ui, tool, "premerge", not (binary or symlink)):
173 173 r = simplemerge.simplemerge(a, b, c, quiet=True)
174 174 if not r:
175 175 ui.debug(_(" premerge successful\n"))
176 176 os.unlink(back)
177 177 os.unlink(b)
178 178 os.unlink(c)
179 179 return 0
180 180 util.copyfile(back, a) # restore from backup and try again
181 181
182 182 env = dict(HG_FILE=fd,
183 183 HG_MY_NODE=short(mynode),
184 184 HG_OTHER_NODE=str(fco.changectx()),
185 185 HG_MY_ISLINK='l' in fcd.flags(),
186 186 HG_OTHER_ISLINK='l' in fco.flags(),
187 187 HG_BASE_ISLINK='l' in fca.flags())
188 188
189 189 if tool == "internal:merge":
190 190 r = simplemerge.simplemerge(a, b, c, label=['local', 'other'])
191 191 else:
192 192 args = _toolstr(ui, tool, "args", '$local $base $other')
193 193 if "$output" in args:
194 194 out, a = a, back # read input from backup, write to original
195 195 replace = dict(local=a, base=b, other=c, output=out)
196 196 args = re.sub("\$(local|base|other|output)",
197 197 lambda x: '"%s"' % replace[x.group()[1:]], args)
198 198 r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env)
199 199
200 200 if not r and _toolbool(ui, tool, "checkconflicts"):
201 201 if re.match("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data()):
202 202 r = 1
203 203
204 204 if not r and _toolbool(ui, tool, "checkchanged"):
205 205 if filecmp.cmp(repo.wjoin(fd), back):
206 206 if ui.prompt(_(" output file %s appears unchanged\n"
207 207 "was merge successful (yn)?") % fd,
208 208 _("[yn]"), _("n")) != _("y"):
209 209 r = 1
210 210
211 211 if _toolbool(ui, tool, "fixeol"):
212 212 _matcheol(repo.wjoin(fd), back)
213 213
214 214 if r:
215 215 repo.ui.warn(_("merging %s failed!\n") % fd)
216 216 else:
217 217 os.unlink(back)
218 218
219 219 os.unlink(b)
220 220 os.unlink(c)
221 221 return r
@@ -1,76 +1,75 b''
1 1 # Revision graph generator for Mercurial
2 2 #
3 3 # Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl>
4 4 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
5 5 #
6 6 # This software may be used and distributed according to the terms of
7 7 # the GNU General Public License, incorporated herein by reference.
8 8
9 from node import nullrev, short
10 import ui, hg, util, templatefilters
9 from node import nullrev
11 10
12 11 def graph(repo, start_rev, stop_rev):
13 12 """incremental revision grapher
14 13
15 14 This generator function walks through the revision history from
16 15 revision start_rev to revision stop_rev (which must be less than
17 16 or equal to start_rev) and for each revision emits tuples with the
18 17 following elements:
19 18
20 19 - Current node
21 20 - Column and color for the current node
22 21 - Edges; a list of (col, next_col, color) indicating the edges between
23 22 the current node and its parents.
24 23 - First line of the changeset description
25 24 - The changeset author
26 25 - The changeset date/time
27 26 """
28 27
29 28 if start_rev == nullrev and not stop_rev:
30 29 return
31 30
32 31 assert start_rev >= stop_rev
33 32 assert stop_rev >= 0
34 33 curr_rev = start_rev
35 34 revs = []
36 35 cl = repo.changelog
37 36 colors = {}
38 37 new_color = 1
39 38
40 39 while curr_rev >= stop_rev:
41 40 # Compute revs and next_revs
42 41 if curr_rev not in revs:
43 42 revs.append(curr_rev) # new head
44 43 colors[curr_rev] = new_color
45 44 new_color += 1
46 45
47 46 idx = revs.index(curr_rev)
48 47 color = colors.pop(curr_rev)
49 48 next = revs[:]
50 49
51 50 # Add parents to next_revs
52 51 parents = [x for x in cl.parentrevs(curr_rev) if x != nullrev]
53 52 addparents = [p for p in parents if p not in next]
54 53 next[idx:idx + 1] = addparents
55 54
56 55 # Set colors for the parents
57 56 for i, p in enumerate(addparents):
58 57 if not i:
59 58 colors[p] = color
60 59 else:
61 60 colors[p] = new_color
62 61 new_color += 1
63 62
64 63 # Add edges to the graph
65 64 edges = []
66 65 for col, r in enumerate(revs):
67 66 if r in next:
68 67 edges.append((col, next.index(r), colors[r]))
69 68 elif r == curr_rev:
70 69 for p in parents:
71 70 edges.append((col, next.index(p), colors[p]))
72 71
73 72 # Yield and move on
74 73 yield (repo[curr_rev], (idx, color), edges)
75 74 revs = next
76 75 curr_rev -= 1
@@ -1,313 +1,312 b''
1 1 # hgweb/hgweb_mod.py - Web interface for a repository.
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 import os, mimetypes
10 from mercurial.node import hex, nullid
9 import os
11 10 from mercurial import ui, hg, util, hook, error
12 11 from mercurial import templater, templatefilters
13 12 from common import get_mtime, style_map, ErrorResponse
14 13 from common import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
15 14 from common import HTTP_UNAUTHORIZED, HTTP_METHOD_NOT_ALLOWED
16 15 from request import wsgirequest
17 16 import webcommands, protocol, webutil
18 17
19 18 perms = {
20 19 'changegroup': 'pull',
21 20 'changegroupsubset': 'pull',
22 21 'unbundle': 'push',
23 22 'stream_out': 'pull',
24 23 }
25 24
26 25 class hgweb(object):
27 26 def __init__(self, repo, name=None):
28 27 if isinstance(repo, str):
29 28 parentui = ui.ui(report_untrusted=False, interactive=False)
30 29 self.repo = hg.repository(parentui, repo)
31 30 else:
32 31 self.repo = repo
33 32
34 33 hook.redirect(True)
35 34 self.mtime = -1
36 35 self.reponame = name
37 36 self.archives = 'zip', 'gz', 'bz2'
38 37 self.stripecount = 1
39 38 # a repo owner may set web.templates in .hg/hgrc to get any file
40 39 # readable by the user running the CGI script
41 40 self.templatepath = self.config("web", "templates",
42 41 templater.templatepath(),
43 42 untrusted=False)
44 43
45 44 # The CGI scripts are often run by a user different from the repo owner.
46 45 # Trust the settings from the .hg/hgrc files by default.
47 46 def config(self, section, name, default=None, untrusted=True):
48 47 return self.repo.ui.config(section, name, default,
49 48 untrusted=untrusted)
50 49
51 50 def configbool(self, section, name, default=False, untrusted=True):
52 51 return self.repo.ui.configbool(section, name, default,
53 52 untrusted=untrusted)
54 53
55 54 def configlist(self, section, name, default=None, untrusted=True):
56 55 return self.repo.ui.configlist(section, name, default,
57 56 untrusted=untrusted)
58 57
59 58 def refresh(self):
60 59 mtime = get_mtime(self.repo.root)
61 60 if mtime != self.mtime:
62 61 self.mtime = mtime
63 62 self.repo = hg.repository(self.repo.ui, self.repo.root)
64 63 self.maxchanges = int(self.config("web", "maxchanges", 10))
65 64 self.stripecount = int(self.config("web", "stripes", 1))
66 65 self.maxshortchanges = int(self.config("web", "maxshortchanges", 60))
67 66 self.maxfiles = int(self.config("web", "maxfiles", 10))
68 67 self.allowpull = self.configbool("web", "allowpull", True)
69 68 self.encoding = self.config("web", "encoding", util._encoding)
70 69
71 70 def run(self):
72 71 if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
73 72 raise RuntimeError("This function is only intended to be called while running as a CGI script.")
74 73 import mercurial.hgweb.wsgicgi as wsgicgi
75 74 wsgicgi.launch(self)
76 75
77 76 def __call__(self, env, respond):
78 77 req = wsgirequest(env, respond)
79 78 return self.run_wsgi(req)
80 79
81 80 def run_wsgi(self, req):
82 81
83 82 self.refresh()
84 83
85 84 # process this if it's a protocol request
86 85 # protocol bits don't need to create any URLs
87 86 # and the clients always use the old URL structure
88 87
89 88 cmd = req.form.get('cmd', [''])[0]
90 89 if cmd and cmd in protocol.__all__:
91 90 try:
92 91 if cmd in perms:
93 92 try:
94 93 self.check_perm(req, perms[cmd])
95 94 except ErrorResponse, inst:
96 95 if cmd == 'unbundle':
97 96 req.drain()
98 97 raise
99 98 method = getattr(protocol, cmd)
100 99 return method(self.repo, req)
101 100 except ErrorResponse, inst:
102 101 req.respond(inst, protocol.HGTYPE)
103 102 if not inst.message:
104 103 return []
105 104 return '0\n%s\n' % inst.message,
106 105
107 106 # work with CGI variables to create coherent structure
108 107 # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME
109 108
110 109 req.url = req.env['SCRIPT_NAME']
111 110 if not req.url.endswith('/'):
112 111 req.url += '/'
113 112 if 'REPO_NAME' in req.env:
114 113 req.url += req.env['REPO_NAME'] + '/'
115 114
116 115 if 'PATH_INFO' in req.env:
117 116 parts = req.env['PATH_INFO'].strip('/').split('/')
118 117 repo_parts = req.env.get('REPO_NAME', '').split('/')
119 118 if parts[:len(repo_parts)] == repo_parts:
120 119 parts = parts[len(repo_parts):]
121 120 query = '/'.join(parts)
122 121 else:
123 122 query = req.env['QUERY_STRING'].split('&', 1)[0]
124 123 query = query.split(';', 1)[0]
125 124
126 125 # translate user-visible url structure to internal structure
127 126
128 127 args = query.split('/', 2)
129 128 if 'cmd' not in req.form and args and args[0]:
130 129
131 130 cmd = args.pop(0)
132 131 style = cmd.rfind('-')
133 132 if style != -1:
134 133 req.form['style'] = [cmd[:style]]
135 134 cmd = cmd[style+1:]
136 135
137 136 # avoid accepting e.g. style parameter as command
138 137 if hasattr(webcommands, cmd):
139 138 req.form['cmd'] = [cmd]
140 139 else:
141 140 cmd = ''
142 141
143 142 if cmd == 'static':
144 143 req.form['file'] = ['/'.join(args)]
145 144 else:
146 145 if args and args[0]:
147 146 node = args.pop(0)
148 147 req.form['node'] = [node]
149 148 if args:
150 149 req.form['file'] = args
151 150
152 151 if cmd == 'archive':
153 152 fn = req.form['node'][0]
154 153 for type_, spec in self.archive_specs.iteritems():
155 154 ext = spec[2]
156 155 if fn.endswith(ext):
157 156 req.form['node'] = [fn[:-len(ext)]]
158 157 req.form['type'] = [type_]
159 158
160 159 # process the web interface request
161 160
162 161 try:
163 162 tmpl = self.templater(req)
164 163 ctype = tmpl('mimetype', encoding=self.encoding)
165 164 ctype = templater.stringify(ctype)
166 165
167 166 # check read permissions non-static content
168 167 if cmd != 'static':
169 168 self.check_perm(req, None)
170 169
171 170 if cmd == '':
172 171 req.form['cmd'] = [tmpl.cache['default']]
173 172 cmd = req.form['cmd'][0]
174 173
175 174 if cmd not in webcommands.__all__:
176 175 msg = 'no such method: %s' % cmd
177 176 raise ErrorResponse(HTTP_BAD_REQUEST, msg)
178 177 elif cmd == 'file' and 'raw' in req.form.get('style', []):
179 178 self.ctype = ctype
180 179 content = webcommands.rawfile(self, req, tmpl)
181 180 else:
182 181 content = getattr(webcommands, cmd)(self, req, tmpl)
183 182 req.respond(HTTP_OK, ctype)
184 183
185 184 return content
186 185
187 186 except error.LookupError, err:
188 187 req.respond(HTTP_NOT_FOUND, ctype)
189 188 msg = str(err)
190 189 if 'manifest' not in msg:
191 190 msg = 'revision not found: %s' % err.name
192 191 return tmpl('error', error=msg)
193 192 except (error.RepoError, error.RevlogError), inst:
194 193 req.respond(HTTP_SERVER_ERROR, ctype)
195 194 return tmpl('error', error=str(inst))
196 195 except ErrorResponse, inst:
197 196 req.respond(inst, ctype)
198 197 return tmpl('error', error=inst.message)
199 198
200 199 def templater(self, req):
201 200
202 201 # determine scheme, port and server name
203 202 # this is needed to create absolute urls
204 203
205 204 proto = req.env.get('wsgi.url_scheme')
206 205 if proto == 'https':
207 206 proto = 'https'
208 207 default_port = "443"
209 208 else:
210 209 proto = 'http'
211 210 default_port = "80"
212 211
213 212 port = req.env["SERVER_PORT"]
214 213 port = port != default_port and (":" + port) or ""
215 214 urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port)
216 215 staticurl = self.config("web", "staticurl") or req.url + 'static/'
217 216 if not staticurl.endswith('/'):
218 217 staticurl += '/'
219 218
220 219 # some functions for the templater
221 220
222 221 def header(**map):
223 222 yield tmpl('header', encoding=self.encoding, **map)
224 223
225 224 def footer(**map):
226 225 yield tmpl("footer", **map)
227 226
228 227 def motd(**map):
229 228 yield self.config("web", "motd", "")
230 229
231 230 # figure out which style to use
232 231
233 232 vars = {}
234 233 style = self.config("web", "style", "paper")
235 234 if 'style' in req.form:
236 235 style = req.form['style'][0]
237 236 vars['style'] = style
238 237
239 238 start = req.url[-1] == '?' and '&' or '?'
240 239 sessionvars = webutil.sessionvars(vars, start)
241 240 mapfile = style_map(self.templatepath, style)
242 241
243 242 if not self.reponame:
244 243 self.reponame = (self.config("web", "name")
245 244 or req.env.get('REPO_NAME')
246 245 or req.url.strip('/') or self.repo.root)
247 246
248 247 # create the templater
249 248
250 249 tmpl = templater.templater(mapfile, templatefilters.filters,
251 250 defaults={"url": req.url,
252 251 "staticurl": staticurl,
253 252 "urlbase": urlbase,
254 253 "repo": self.reponame,
255 254 "header": header,
256 255 "footer": footer,
257 256 "motd": motd,
258 257 "sessionvars": sessionvars
259 258 })
260 259 return tmpl
261 260
262 261 def archivelist(self, nodeid):
263 262 allowed = self.configlist("web", "allow_archive")
264 263 for i, spec in self.archive_specs.iteritems():
265 264 if i in allowed or self.configbool("web", "allow" + i):
266 265 yield {"type" : i, "extension" : spec[2], "node" : nodeid}
267 266
268 267 archive_specs = {
269 268 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None),
270 269 'gz': ('application/x-tar', 'tgz', '.tar.gz', None),
271 270 'zip': ('application/zip', 'zip', '.zip', None),
272 271 }
273 272
274 273 def check_perm(self, req, op):
275 274 '''Check permission for operation based on request data (including
276 275 authentication info). Return if op allowed, else raise an ErrorResponse
277 276 exception.'''
278 277
279 278 user = req.env.get('REMOTE_USER')
280 279
281 280 deny_read = self.configlist('web', 'deny_read')
282 281 if deny_read and (not user or deny_read == ['*'] or user in deny_read):
283 282 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
284 283
285 284 allow_read = self.configlist('web', 'allow_read')
286 285 result = (not allow_read) or (allow_read == ['*'])
287 286 if not (result or user in allow_read):
288 287 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
289 288
290 289 if op == 'pull' and not self.allowpull:
291 290 raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized')
292 291 elif op == 'pull' or op is None: # op is None for interface requests
293 292 return
294 293
295 294 # enforce that you can only push using POST requests
296 295 if req.env['REQUEST_METHOD'] != 'POST':
297 296 msg = 'push requires POST request'
298 297 raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
299 298
300 299 # require ssl by default for pushing, auth info cannot be sniffed
301 300 # and replayed
302 301 scheme = req.env.get('wsgi.url_scheme')
303 302 if self.configbool('web', 'push_ssl', True) and scheme != 'https':
304 303 raise ErrorResponse(HTTP_OK, 'ssl required')
305 304
306 305 deny = self.configlist('web', 'deny_push')
307 306 if deny and (not user or deny == ['*'] or user in deny):
308 307 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
309 308
310 309 allow = self.configlist('web', 'allow_push')
311 310 result = allow and (allow == ['*'] or user in allow)
312 311 if not result:
313 312 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
@@ -1,659 +1,659 b''
1 1 #
2 2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os, mimetypes, re, cgi, copy
9 9 import webutil
10 10 from mercurial import error, archival, templatefilters
11 from mercurial.node import short, hex, nullid
12 from mercurial.util import binary, datestr
11 from mercurial.node import short, hex
12 from mercurial.util import binary
13 13 from common import paritygen, staticfile, get_contact, ErrorResponse
14 14 from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND
15 15 from mercurial import graphmod, util
16 16
17 17 # __all__ is populated with the allowed commands. Be sure to add to it if
18 18 # you're adding a new command, or the new command won't work.
19 19
20 20 __all__ = [
21 21 'log', 'rawfile', 'file', 'changelog', 'shortlog', 'changeset', 'rev',
22 22 'manifest', 'tags', 'summary', 'filediff', 'diff', 'annotate', 'filelog',
23 23 'archive', 'static', 'graph',
24 24 ]
25 25
26 26 def log(web, req, tmpl):
27 27 if 'file' in req.form and req.form['file'][0]:
28 28 return filelog(web, req, tmpl)
29 29 else:
30 30 return changelog(web, req, tmpl)
31 31
32 32 def rawfile(web, req, tmpl):
33 33 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
34 34 if not path:
35 35 content = manifest(web, req, tmpl)
36 36 req.respond(HTTP_OK, web.ctype)
37 37 return content
38 38
39 39 try:
40 40 fctx = webutil.filectx(web.repo, req)
41 41 except error.LookupError, inst:
42 42 try:
43 43 content = manifest(web, req, tmpl)
44 44 req.respond(HTTP_OK, web.ctype)
45 45 return content
46 46 except ErrorResponse:
47 47 raise inst
48 48
49 49 path = fctx.path()
50 50 text = fctx.data()
51 51 mt = mimetypes.guess_type(path)[0]
52 52 if mt is None:
53 53 mt = binary(text) and 'application/octet-stream' or 'text/plain'
54 54
55 55 req.respond(HTTP_OK, mt, path, len(text))
56 56 return [text]
57 57
58 58 def _filerevision(web, tmpl, fctx):
59 59 f = fctx.path()
60 60 text = fctx.data()
61 61 parity = paritygen(web.stripecount)
62 62
63 63 if binary(text):
64 64 mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
65 65 text = '(binary:%s)' % mt
66 66
67 67 def lines():
68 68 for lineno, t in enumerate(text.splitlines(1)):
69 69 yield {"line": t,
70 70 "lineid": "l%d" % (lineno + 1),
71 71 "linenumber": "% 6d" % (lineno + 1),
72 72 "parity": parity.next()}
73 73
74 74 return tmpl("filerevision",
75 75 file=f,
76 76 path=webutil.up(f),
77 77 text=lines(),
78 78 rev=fctx.rev(),
79 79 node=hex(fctx.node()),
80 80 author=fctx.user(),
81 81 date=fctx.date(),
82 82 desc=fctx.description(),
83 83 branch=webutil.nodebranchnodefault(fctx),
84 84 parent=webutil.parents(fctx),
85 85 child=webutil.children(fctx),
86 86 rename=webutil.renamelink(fctx),
87 87 permissions=fctx.manifest().flags(f))
88 88
89 89 def file(web, req, tmpl):
90 90 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
91 91 if not path:
92 92 return manifest(web, req, tmpl)
93 93 try:
94 94 return _filerevision(web, tmpl, webutil.filectx(web.repo, req))
95 95 except error.LookupError, inst:
96 96 try:
97 97 return manifest(web, req, tmpl)
98 98 except ErrorResponse:
99 99 raise inst
100 100
101 101 def _search(web, tmpl, query):
102 102
103 103 def changelist(**map):
104 104 cl = web.repo.changelog
105 105 count = 0
106 106 qw = query.lower().split()
107 107
108 108 def revgen():
109 109 for i in xrange(len(cl) - 1, 0, -100):
110 110 l = []
111 111 for j in xrange(max(0, i - 100), i + 1):
112 112 ctx = web.repo[j]
113 113 l.append(ctx)
114 114 l.reverse()
115 115 for e in l:
116 116 yield e
117 117
118 118 for ctx in revgen():
119 119 miss = 0
120 120 for q in qw:
121 121 if not (q in ctx.user().lower() or
122 122 q in ctx.description().lower() or
123 123 q in " ".join(ctx.files()).lower()):
124 124 miss = 1
125 125 break
126 126 if miss:
127 127 continue
128 128
129 129 count += 1
130 130 n = ctx.node()
131 131 showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
132 132 files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
133 133
134 134 yield tmpl('searchentry',
135 135 parity=parity.next(),
136 136 author=ctx.user(),
137 137 parent=webutil.parents(ctx),
138 138 child=webutil.children(ctx),
139 139 changelogtag=showtags,
140 140 desc=ctx.description(),
141 141 date=ctx.date(),
142 142 files=files,
143 143 rev=ctx.rev(),
144 144 node=hex(n),
145 145 tags=webutil.nodetagsdict(web.repo, n),
146 146 inbranch=webutil.nodeinbranch(web.repo, ctx),
147 147 branches=webutil.nodebranchdict(web.repo, ctx))
148 148
149 149 if count >= web.maxchanges:
150 150 break
151 151
152 152 cl = web.repo.changelog
153 153 parity = paritygen(web.stripecount)
154 154
155 155 return tmpl('search',
156 156 query=query,
157 157 node=hex(cl.tip()),
158 158 entries=changelist,
159 159 archives=web.archivelist("tip"))
160 160
161 161 def changelog(web, req, tmpl, shortlog = False):
162 162 if 'node' in req.form:
163 163 ctx = webutil.changectx(web.repo, req)
164 164 else:
165 165 if 'rev' in req.form:
166 166 hi = req.form['rev'][0]
167 167 else:
168 168 hi = len(web.repo) - 1
169 169 try:
170 170 ctx = web.repo[hi]
171 171 except error.RepoError:
172 172 return _search(web, tmpl, hi) # XXX redirect to 404 page?
173 173
174 174 def changelist(limit=0, **map):
175 175 cl = web.repo.changelog
176 176 l = [] # build a list in forward order for efficiency
177 177 for i in xrange(start, end):
178 178 ctx = web.repo[i]
179 179 n = ctx.node()
180 180 showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
181 181 files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
182 182
183 183 l.insert(0, {"parity": parity.next(),
184 184 "author": ctx.user(),
185 185 "parent": webutil.parents(ctx, i - 1),
186 186 "child": webutil.children(ctx, i + 1),
187 187 "changelogtag": showtags,
188 188 "desc": ctx.description(),
189 189 "date": ctx.date(),
190 190 "files": files,
191 191 "rev": i,
192 192 "node": hex(n),
193 193 "tags": webutil.nodetagsdict(web.repo, n),
194 194 "inbranch": webutil.nodeinbranch(web.repo, ctx),
195 195 "branches": webutil.nodebranchdict(web.repo, ctx)
196 196 })
197 197
198 198 if limit > 0:
199 199 l = l[:limit]
200 200
201 201 for e in l:
202 202 yield e
203 203
204 204 maxchanges = shortlog and web.maxshortchanges or web.maxchanges
205 205 cl = web.repo.changelog
206 206 count = len(cl)
207 207 pos = ctx.rev()
208 208 start = max(0, pos - maxchanges + 1)
209 209 end = min(count, start + maxchanges)
210 210 pos = end - 1
211 211 parity = paritygen(web.stripecount, offset=start-end)
212 212
213 213 changenav = webutil.revnavgen(pos, maxchanges, count, web.repo.changectx)
214 214
215 215 return tmpl(shortlog and 'shortlog' or 'changelog',
216 216 changenav=changenav,
217 217 node=hex(ctx.node()),
218 218 rev=pos, changesets=count,
219 219 entries=lambda **x: changelist(limit=0,**x),
220 220 latestentry=lambda **x: changelist(limit=1,**x),
221 221 archives=web.archivelist("tip"))
222 222
223 223 def shortlog(web, req, tmpl):
224 224 return changelog(web, req, tmpl, shortlog = True)
225 225
226 226 def changeset(web, req, tmpl):
227 227 ctx = webutil.changectx(web.repo, req)
228 228 showtags = webutil.showtag(web.repo, tmpl, 'changesettag', ctx.node())
229 229 showbranch = webutil.nodebranchnodefault(ctx)
230 230
231 231 files = []
232 232 parity = paritygen(web.stripecount)
233 233 for f in ctx.files():
234 234 template = f in ctx and 'filenodelink' or 'filenolink'
235 235 files.append(tmpl(template,
236 236 node=ctx.hex(), file=f,
237 237 parity=parity.next()))
238 238
239 239 parity = paritygen(web.stripecount)
240 240 diffs = webutil.diffs(web.repo, tmpl, ctx, None, parity)
241 241 return tmpl('changeset',
242 242 diff=diffs,
243 243 rev=ctx.rev(),
244 244 node=ctx.hex(),
245 245 parent=webutil.parents(ctx),
246 246 child=webutil.children(ctx),
247 247 changesettag=showtags,
248 248 changesetbranch=showbranch,
249 249 author=ctx.user(),
250 250 desc=ctx.description(),
251 251 date=ctx.date(),
252 252 files=files,
253 253 archives=web.archivelist(ctx.hex()),
254 254 tags=webutil.nodetagsdict(web.repo, ctx.node()),
255 255 branch=webutil.nodebranchnodefault(ctx),
256 256 inbranch=webutil.nodeinbranch(web.repo, ctx),
257 257 branches=webutil.nodebranchdict(web.repo, ctx))
258 258
259 259 rev = changeset
260 260
261 261 def manifest(web, req, tmpl):
262 262 ctx = webutil.changectx(web.repo, req)
263 263 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
264 264 mf = ctx.manifest()
265 265 node = ctx.node()
266 266
267 267 files = {}
268 268 dirs = {}
269 269 parity = paritygen(web.stripecount)
270 270
271 271 if path and path[-1] != "/":
272 272 path += "/"
273 273 l = len(path)
274 274 abspath = "/" + path
275 275
276 276 for f, n in mf.iteritems():
277 277 if f[:l] != path:
278 278 continue
279 279 remain = f[l:]
280 280 elements = remain.split('/')
281 281 if len(elements) == 1:
282 282 files[remain] = f
283 283 else:
284 284 h = dirs # need to retain ref to dirs (root)
285 285 for elem in elements[0:-1]:
286 286 if elem not in h:
287 287 h[elem] = {}
288 288 h = h[elem]
289 289 if len(h) > 1:
290 290 break
291 291 h[None] = None # denotes files present
292 292
293 293 if mf and not files and not dirs:
294 294 raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
295 295
296 296 def filelist(**map):
297 297 for f in util.sort(files):
298 298 full = files[f]
299 299
300 300 fctx = ctx.filectx(full)
301 301 yield {"file": full,
302 302 "parity": parity.next(),
303 303 "basename": f,
304 304 "date": fctx.date(),
305 305 "size": fctx.size(),
306 306 "permissions": mf.flags(full)}
307 307
308 308 def dirlist(**map):
309 309 for d in util.sort(dirs):
310 310
311 311 emptydirs = []
312 312 h = dirs[d]
313 313 while isinstance(h, dict) and len(h) == 1:
314 314 k,v = h.items()[0]
315 315 if v:
316 316 emptydirs.append(k)
317 317 h = v
318 318
319 319 path = "%s%s" % (abspath, d)
320 320 yield {"parity": parity.next(),
321 321 "path": path,
322 322 "emptydirs": "/".join(emptydirs),
323 323 "basename": d}
324 324
325 325 return tmpl("manifest",
326 326 rev=ctx.rev(),
327 327 node=hex(node),
328 328 path=abspath,
329 329 up=webutil.up(abspath),
330 330 upparity=parity.next(),
331 331 fentries=filelist,
332 332 dentries=dirlist,
333 333 archives=web.archivelist(hex(node)),
334 334 tags=webutil.nodetagsdict(web.repo, node),
335 335 inbranch=webutil.nodeinbranch(web.repo, ctx),
336 336 branches=webutil.nodebranchdict(web.repo, ctx))
337 337
338 338 def tags(web, req, tmpl):
339 339 i = web.repo.tagslist()
340 340 i.reverse()
341 341 parity = paritygen(web.stripecount)
342 342
343 343 def entries(notip=False,limit=0, **map):
344 344 count = 0
345 345 for k, n in i:
346 346 if notip and k == "tip":
347 347 continue
348 348 if limit > 0 and count >= limit:
349 349 continue
350 350 count = count + 1
351 351 yield {"parity": parity.next(),
352 352 "tag": k,
353 353 "date": web.repo[n].date(),
354 354 "node": hex(n)}
355 355
356 356 return tmpl("tags",
357 357 node=hex(web.repo.changelog.tip()),
358 358 entries=lambda **x: entries(False,0, **x),
359 359 entriesnotip=lambda **x: entries(True,0, **x),
360 360 latestentry=lambda **x: entries(True,1, **x))
361 361
362 362 def summary(web, req, tmpl):
363 363 i = web.repo.tagslist()
364 364 i.reverse()
365 365
366 366 def tagentries(**map):
367 367 parity = paritygen(web.stripecount)
368 368 count = 0
369 369 for k, n in i:
370 370 if k == "tip": # skip tip
371 371 continue
372 372
373 373 count += 1
374 374 if count > 10: # limit to 10 tags
375 375 break
376 376
377 377 yield tmpl("tagentry",
378 378 parity=parity.next(),
379 379 tag=k,
380 380 node=hex(n),
381 381 date=web.repo[n].date())
382 382
383 383 def branches(**map):
384 384 parity = paritygen(web.stripecount)
385 385
386 386 b = web.repo.branchtags()
387 387 l = [(-web.repo.changelog.rev(n), n, t) for t, n in b.iteritems()]
388 388 for r,n,t in util.sort(l):
389 389 yield {'parity': parity.next(),
390 390 'branch': t,
391 391 'node': hex(n),
392 392 'date': web.repo[n].date()}
393 393
394 394 def changelist(**map):
395 395 parity = paritygen(web.stripecount, offset=start-end)
396 396 l = [] # build a list in forward order for efficiency
397 397 for i in xrange(start, end):
398 398 ctx = web.repo[i]
399 399 n = ctx.node()
400 400 hn = hex(n)
401 401
402 402 l.insert(0, tmpl(
403 403 'shortlogentry',
404 404 parity=parity.next(),
405 405 author=ctx.user(),
406 406 desc=ctx.description(),
407 407 date=ctx.date(),
408 408 rev=i,
409 409 node=hn,
410 410 tags=webutil.nodetagsdict(web.repo, n),
411 411 inbranch=webutil.nodeinbranch(web.repo, ctx),
412 412 branches=webutil.nodebranchdict(web.repo, ctx)))
413 413
414 414 yield l
415 415
416 416 cl = web.repo.changelog
417 417 count = len(cl)
418 418 start = max(0, count - web.maxchanges)
419 419 end = min(count, start + web.maxchanges)
420 420
421 421 return tmpl("summary",
422 422 desc=web.config("web", "description", "unknown"),
423 423 owner=get_contact(web.config) or "unknown",
424 424 lastchange=cl.read(cl.tip())[2],
425 425 tags=tagentries,
426 426 branches=branches,
427 427 shortlog=changelist,
428 428 node=hex(cl.tip()),
429 429 archives=web.archivelist("tip"))
430 430
431 431 def filediff(web, req, tmpl):
432 432 fctx, ctx = None, None
433 433 try:
434 434 fctx = webutil.filectx(web.repo, req)
435 435 except LookupError:
436 436 ctx = webutil.changectx(web.repo, req)
437 437 path = webutil.cleanpath(web.repo, req.form['file'][0])
438 438 if path not in ctx.files():
439 439 raise
440 440
441 441 if fctx is not None:
442 442 n = fctx.node()
443 443 path = fctx.path()
444 444 else:
445 445 n = ctx.node()
446 446 # path already defined in except clause
447 447
448 448 parity = paritygen(web.stripecount)
449 449 diffs = webutil.diffs(web.repo, tmpl, fctx or ctx, [path], parity)
450 450 rename = fctx and webutil.renamelink(fctx) or []
451 451 ctx = fctx and fctx or ctx
452 452 return tmpl("filediff",
453 453 file=path,
454 454 node=hex(n),
455 455 rev=ctx.rev(),
456 456 date=ctx.date(),
457 457 desc=ctx.description(),
458 458 author=ctx.user(),
459 459 rename=rename,
460 460 branch=webutil.nodebranchnodefault(ctx),
461 461 parent=webutil.parents(ctx),
462 462 child=webutil.children(ctx),
463 463 diff=diffs)
464 464
465 465 diff = filediff
466 466
467 467 def annotate(web, req, tmpl):
468 468 fctx = webutil.filectx(web.repo, req)
469 469 f = fctx.path()
470 470 parity = paritygen(web.stripecount)
471 471
472 472 def annotate(**map):
473 473 last = None
474 474 if binary(fctx.data()):
475 475 mt = (mimetypes.guess_type(fctx.path())[0]
476 476 or 'application/octet-stream')
477 477 lines = enumerate([((fctx.filectx(fctx.filerev()), 1),
478 478 '(binary:%s)' % mt)])
479 479 else:
480 480 lines = enumerate(fctx.annotate(follow=True, linenumber=True))
481 481 for lineno, ((f, targetline), l) in lines:
482 482 fnode = f.filenode()
483 483
484 484 if last != fnode:
485 485 last = fnode
486 486
487 487 yield {"parity": parity.next(),
488 488 "node": hex(f.node()),
489 489 "rev": f.rev(),
490 490 "author": f.user(),
491 491 "desc": f.description(),
492 492 "file": f.path(),
493 493 "targetline": targetline,
494 494 "line": l,
495 495 "lineid": "l%d" % (lineno + 1),
496 496 "linenumber": "% 6d" % (lineno + 1)}
497 497
498 498 return tmpl("fileannotate",
499 499 file=f,
500 500 annotate=annotate,
501 501 path=webutil.up(f),
502 502 rev=fctx.rev(),
503 503 node=hex(fctx.node()),
504 504 author=fctx.user(),
505 505 date=fctx.date(),
506 506 desc=fctx.description(),
507 507 rename=webutil.renamelink(fctx),
508 508 branch=webutil.nodebranchnodefault(fctx),
509 509 parent=webutil.parents(fctx),
510 510 child=webutil.children(fctx),
511 511 permissions=fctx.manifest().flags(f))
512 512
513 513 def filelog(web, req, tmpl):
514 514
515 515 try:
516 516 fctx = webutil.filectx(web.repo, req)
517 517 f = fctx.path()
518 518 fl = fctx.filelog()
519 519 except error.LookupError:
520 520 f = webutil.cleanpath(web.repo, req.form['file'][0])
521 521 fl = web.repo.file(f)
522 522 numrevs = len(fl)
523 523 if not numrevs: # file doesn't exist at all
524 524 raise
525 525 rev = webutil.changectx(web.repo, req).rev()
526 526 first = fl.linkrev(0)
527 527 if rev < first: # current rev is from before file existed
528 528 raise
529 529 frev = numrevs - 1
530 530 while fl.linkrev(frev) > rev:
531 531 frev -= 1
532 532 fctx = web.repo.filectx(f, fl.linkrev(frev))
533 533
534 534 count = fctx.filerev() + 1
535 535 pagelen = web.maxshortchanges
536 536 start = max(0, fctx.filerev() - pagelen + 1) # first rev on this page
537 537 end = min(count, start + pagelen) # last rev on this page
538 538 parity = paritygen(web.stripecount, offset=start-end)
539 539
540 540 def entries(limit=0, **map):
541 541 l = []
542 542
543 543 repo = web.repo
544 544 for i in xrange(start, end):
545 545 iterfctx = fctx.filectx(i)
546 546
547 547 l.insert(0, {"parity": parity.next(),
548 548 "filerev": i,
549 549 "file": f,
550 550 "node": hex(iterfctx.node()),
551 551 "author": iterfctx.user(),
552 552 "date": iterfctx.date(),
553 553 "rename": webutil.renamelink(iterfctx),
554 554 "parent": webutil.parents(iterfctx),
555 555 "child": webutil.children(iterfctx),
556 556 "desc": iterfctx.description(),
557 557 "tags": webutil.nodetagsdict(repo, iterfctx.node()),
558 558 "branch": webutil.nodebranchnodefault(iterfctx),
559 559 "inbranch": webutil.nodeinbranch(repo, iterfctx),
560 560 "branches": webutil.nodebranchdict(repo, iterfctx)})
561 561
562 562 if limit > 0:
563 563 l = l[:limit]
564 564
565 565 for e in l:
566 566 yield e
567 567
568 568 nodefunc = lambda x: fctx.filectx(fileid=x)
569 569 nav = webutil.revnavgen(end - 1, pagelen, count, nodefunc)
570 570 return tmpl("filelog", file=f, node=hex(fctx.node()), nav=nav,
571 571 entries=lambda **x: entries(limit=0, **x),
572 572 latestentry=lambda **x: entries(limit=1, **x))
573 573
574 574
575 575 def archive(web, req, tmpl):
576 576 type_ = req.form.get('type', [None])[0]
577 577 allowed = web.configlist("web", "allow_archive")
578 578 key = req.form['node'][0]
579 579
580 580 if type_ not in web.archives:
581 581 msg = 'Unsupported archive type: %s' % type_
582 582 raise ErrorResponse(HTTP_NOT_FOUND, msg)
583 583
584 584 if not ((type_ in allowed or
585 585 web.configbool("web", "allow" + type_, False))):
586 586 msg = 'Archive type not allowed: %s' % type_
587 587 raise ErrorResponse(HTTP_FORBIDDEN, msg)
588 588
589 589 reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
590 590 cnode = web.repo.lookup(key)
591 591 arch_version = key
592 592 if cnode == key or key == 'tip':
593 593 arch_version = short(cnode)
594 594 name = "%s-%s" % (reponame, arch_version)
595 595 mimetype, artype, extension, encoding = web.archive_specs[type_]
596 596 headers = [
597 597 ('Content-Type', mimetype),
598 598 ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
599 599 ]
600 600 if encoding:
601 601 headers.append(('Content-Encoding', encoding))
602 602 req.header(headers)
603 603 req.respond(HTTP_OK)
604 604 archival.archive(web.repo, req, cnode, artype, prefix=name)
605 605 return []
606 606
607 607
608 608 def static(web, req, tmpl):
609 609 fname = req.form['file'][0]
610 610 # a repo owner may set web.static in .hg/hgrc to get any file
611 611 # readable by the user running the CGI script
612 612 static = web.config("web", "static", None, untrusted=False)
613 613 if not static:
614 614 tp = web.templatepath
615 615 if isinstance(tp, str):
616 616 tp = [tp]
617 617 static = [os.path.join(p, 'static') for p in tp]
618 618 return [staticfile(static, fname, req)]
619 619
620 620 def graph(web, req, tmpl):
621 621 rev = webutil.changectx(web.repo, req).rev()
622 622 bg_height = 39
623 623
624 624 revcount = 25
625 625 if 'revcount' in req.form:
626 626 revcount = int(req.form.get('revcount', [revcount])[0])
627 627 tmpl.defaults['sessionvars']['revcount'] = revcount
628 628
629 629 lessvars = copy.copy(tmpl.defaults['sessionvars'])
630 630 lessvars['revcount'] = revcount / 2
631 631 morevars = copy.copy(tmpl.defaults['sessionvars'])
632 632 morevars['revcount'] = revcount * 2
633 633
634 634 max_rev = len(web.repo) - 1
635 635 revcount = min(max_rev, revcount)
636 636 revnode = web.repo.changelog.node(rev)
637 637 revnode_hex = hex(revnode)
638 638 uprev = min(max_rev, rev + revcount)
639 639 downrev = max(0, rev - revcount)
640 640 count = len(web.repo)
641 641 changenav = webutil.revnavgen(rev, revcount, count, web.repo.changectx)
642 642
643 643 tree = list(graphmod.graph(web.repo, rev, downrev))
644 644 canvasheight = (len(tree) + 1) * bg_height - 27;
645 645 data = []
646 646 for i, (ctx, vtx, edges) in enumerate(tree):
647 647 node = short(ctx.node())
648 648 age = templatefilters.age(ctx.date())
649 649 desc = templatefilters.firstline(ctx.description())
650 650 desc = cgi.escape(desc)
651 651 user = cgi.escape(templatefilters.person(ctx.user()))
652 652 branch = ctx.branch()
653 653 branch = branch, web.repo.branchtags().get(branch) == ctx.node()
654 654 data.append((node, vtx, edges, desc, user, age, branch, ctx.tags()))
655 655
656 656 return tmpl('graph', rev=rev, revcount=revcount, uprev=uprev,
657 657 lessvars=lessvars, morevars=morevars, downrev=downrev,
658 658 canvasheight=canvasheight, jsdata=data, bg_height=bg_height,
659 659 node=revnode_hex, changenav=changenav)
@@ -1,1394 +1,1394 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 from i18n import _
10 10 from node import hex, nullid, short
11 import base85, cmdutil, mdiff, util, revlog, diffhelpers, copies
12 import cStringIO, email.Parser, os, re, errno, math
11 import base85, cmdutil, mdiff, util, diffhelpers, copies
12 import cStringIO, email.Parser, os, re, math
13 13 import sys, tempfile, zlib
14 14
15 15 gitre = re.compile('diff --git a/(.*) b/(.*)')
16 16
17 17 class PatchError(Exception):
18 18 pass
19 19
20 20 class NoHunks(PatchError):
21 21 pass
22 22
23 23 # helper functions
24 24
25 25 def copyfile(src, dst, basedir):
26 26 abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]]
27 27 if os.path.exists(absdst):
28 28 raise util.Abort(_("cannot create %s: destination already exists") %
29 29 dst)
30 30
31 31 dstdir = os.path.dirname(absdst)
32 32 if dstdir and not os.path.isdir(dstdir):
33 33 try:
34 34 os.makedirs(dstdir)
35 35 except IOError:
36 36 raise util.Abort(
37 37 _("cannot create %s: unable to create destination directory")
38 38 % dst)
39 39
40 40 util.copyfile(abssrc, absdst)
41 41
42 42 # public functions
43 43
44 44 def extract(ui, fileobj):
45 45 '''extract patch from data read from fileobj.
46 46
47 47 patch can be a normal patch or contained in an email message.
48 48
49 49 return tuple (filename, message, user, date, node, p1, p2).
50 50 Any item in the returned tuple can be None. If filename is None,
51 51 fileobj did not contain a patch. Caller must unlink filename when done.'''
52 52
53 53 # attempt to detect the start of a patch
54 54 # (this heuristic is borrowed from quilt)
55 55 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
56 56 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
57 57 r'(---|\*\*\*)[ \t])', re.MULTILINE)
58 58
59 59 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
60 60 tmpfp = os.fdopen(fd, 'w')
61 61 try:
62 62 msg = email.Parser.Parser().parse(fileobj)
63 63
64 64 subject = msg['Subject']
65 65 user = msg['From']
66 66 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
67 67 # should try to parse msg['Date']
68 68 date = None
69 69 nodeid = None
70 70 branch = None
71 71 parents = []
72 72
73 73 if subject:
74 74 if subject.startswith('[PATCH'):
75 75 pend = subject.find(']')
76 76 if pend >= 0:
77 77 subject = subject[pend+1:].lstrip()
78 78 subject = subject.replace('\n\t', ' ')
79 79 ui.debug('Subject: %s\n' % subject)
80 80 if user:
81 81 ui.debug('From: %s\n' % user)
82 82 diffs_seen = 0
83 83 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
84 84 message = ''
85 85 for part in msg.walk():
86 86 content_type = part.get_content_type()
87 87 ui.debug('Content-Type: %s\n' % content_type)
88 88 if content_type not in ok_types:
89 89 continue
90 90 payload = part.get_payload(decode=True)
91 91 m = diffre.search(payload)
92 92 if m:
93 93 hgpatch = False
94 94 ignoretext = False
95 95
96 96 ui.debug(_('found patch at byte %d\n') % m.start(0))
97 97 diffs_seen += 1
98 98 cfp = cStringIO.StringIO()
99 99 for line in payload[:m.start(0)].splitlines():
100 100 if line.startswith('# HG changeset patch'):
101 101 ui.debug(_('patch generated by hg export\n'))
102 102 hgpatch = True
103 103 # drop earlier commit message content
104 104 cfp.seek(0)
105 105 cfp.truncate()
106 106 subject = None
107 107 elif hgpatch:
108 108 if line.startswith('# User '):
109 109 user = line[7:]
110 110 ui.debug('From: %s\n' % user)
111 111 elif line.startswith("# Date "):
112 112 date = line[7:]
113 113 elif line.startswith("# Branch "):
114 114 branch = line[9:]
115 115 elif line.startswith("# Node ID "):
116 116 nodeid = line[10:]
117 117 elif line.startswith("# Parent "):
118 118 parents.append(line[10:])
119 119 elif line == '---' and gitsendmail:
120 120 ignoretext = True
121 121 if not line.startswith('# ') and not ignoretext:
122 122 cfp.write(line)
123 123 cfp.write('\n')
124 124 message = cfp.getvalue()
125 125 if tmpfp:
126 126 tmpfp.write(payload)
127 127 if not payload.endswith('\n'):
128 128 tmpfp.write('\n')
129 129 elif not diffs_seen and message and content_type == 'text/plain':
130 130 message += '\n' + payload
131 131 except:
132 132 tmpfp.close()
133 133 os.unlink(tmpname)
134 134 raise
135 135
136 136 if subject and not message.startswith(subject):
137 137 message = '%s\n%s' % (subject, message)
138 138 tmpfp.close()
139 139 if not diffs_seen:
140 140 os.unlink(tmpname)
141 141 return None, message, user, date, branch, None, None, None
142 142 p1 = parents and parents.pop(0) or None
143 143 p2 = parents and parents.pop(0) or None
144 144 return tmpname, message, user, date, branch, nodeid, p1, p2
145 145
146 146 GP_PATCH = 1 << 0 # we have to run patch
147 147 GP_FILTER = 1 << 1 # there's some copy/rename operation
148 148 GP_BINARY = 1 << 2 # there's a binary patch
149 149
150 150 class patchmeta:
151 151 """Patched file metadata
152 152
153 153 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
154 154 or COPY. 'path' is patched file path. 'oldpath' is set to the
155 155 origin file when 'op' is either COPY or RENAME, None otherwise. If
156 156 file mode is changed, 'mode' is a tuple (islink, isexec) where
157 157 'islink' is True if the file is a symlink and 'isexec' is True if
158 158 the file is executable. Otherwise, 'mode' is None.
159 159 """
160 160 def __init__(self, path):
161 161 self.path = path
162 162 self.oldpath = None
163 163 self.mode = None
164 164 self.op = 'MODIFY'
165 165 self.lineno = 0
166 166 self.binary = False
167 167
168 168 def setmode(self, mode):
169 169 islink = mode & 020000
170 170 isexec = mode & 0100
171 171 self.mode = (islink, isexec)
172 172
173 173 def readgitpatch(lr):
174 174 """extract git-style metadata about patches from <patchname>"""
175 175
176 176 # Filter patch for git information
177 177 gp = None
178 178 gitpatches = []
179 179 # Can have a git patch with only metadata, causing patch to complain
180 180 dopatch = 0
181 181
182 182 lineno = 0
183 183 for line in lr:
184 184 lineno += 1
185 185 if line.startswith('diff --git'):
186 186 m = gitre.match(line)
187 187 if m:
188 188 if gp:
189 189 gitpatches.append(gp)
190 190 src, dst = m.group(1, 2)
191 191 gp = patchmeta(dst)
192 192 gp.lineno = lineno
193 193 elif gp:
194 194 if line.startswith('--- '):
195 195 if gp.op in ('COPY', 'RENAME'):
196 196 dopatch |= GP_FILTER
197 197 gitpatches.append(gp)
198 198 gp = None
199 199 dopatch |= GP_PATCH
200 200 continue
201 201 if line.startswith('rename from '):
202 202 gp.op = 'RENAME'
203 203 gp.oldpath = line[12:].rstrip()
204 204 elif line.startswith('rename to '):
205 205 gp.path = line[10:].rstrip()
206 206 elif line.startswith('copy from '):
207 207 gp.op = 'COPY'
208 208 gp.oldpath = line[10:].rstrip()
209 209 elif line.startswith('copy to '):
210 210 gp.path = line[8:].rstrip()
211 211 elif line.startswith('deleted file'):
212 212 gp.op = 'DELETE'
213 213 # is the deleted file a symlink?
214 214 gp.setmode(int(line.rstrip()[-6:], 8))
215 215 elif line.startswith('new file mode '):
216 216 gp.op = 'ADD'
217 217 gp.setmode(int(line.rstrip()[-6:], 8))
218 218 elif line.startswith('new mode '):
219 219 gp.setmode(int(line.rstrip()[-6:], 8))
220 220 elif line.startswith('GIT binary patch'):
221 221 dopatch |= GP_BINARY
222 222 gp.binary = True
223 223 if gp:
224 224 gitpatches.append(gp)
225 225
226 226 if not gitpatches:
227 227 dopatch = GP_PATCH
228 228
229 229 return (dopatch, gitpatches)
230 230
231 231 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
232 232 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
233 233 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
234 234
235 235 class patchfile:
236 236 def __init__(self, ui, fname, opener, missing=False):
237 237 self.fname = fname
238 238 self.opener = opener
239 239 self.ui = ui
240 240 self.lines = []
241 241 self.exists = False
242 242 self.missing = missing
243 243 if not missing:
244 244 try:
245 245 self.lines = self.readlines(fname)
246 246 self.exists = True
247 247 except IOError:
248 248 pass
249 249 else:
250 250 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
251 251
252 252 self.hash = {}
253 253 self.dirty = 0
254 254 self.offset = 0
255 255 self.rej = []
256 256 self.fileprinted = False
257 257 self.printfile(False)
258 258 self.hunks = 0
259 259
260 260 def readlines(self, fname):
261 261 fp = self.opener(fname, 'r')
262 262 try:
263 263 return fp.readlines()
264 264 finally:
265 265 fp.close()
266 266
267 267 def writelines(self, fname, lines):
268 268 fp = self.opener(fname, 'w')
269 269 try:
270 270 fp.writelines(lines)
271 271 finally:
272 272 fp.close()
273 273
274 274 def unlink(self, fname):
275 275 os.unlink(fname)
276 276
277 277 def printfile(self, warn):
278 278 if self.fileprinted:
279 279 return
280 280 if warn or self.ui.verbose:
281 281 self.fileprinted = True
282 282 s = _("patching file %s\n") % self.fname
283 283 if warn:
284 284 self.ui.warn(s)
285 285 else:
286 286 self.ui.note(s)
287 287
288 288
289 289 def findlines(self, l, linenum):
290 290 # looks through the hash and finds candidate lines. The
291 291 # result is a list of line numbers sorted based on distance
292 292 # from linenum
293 293 def sorter(a, b):
294 294 vala = abs(a - linenum)
295 295 valb = abs(b - linenum)
296 296 return cmp(vala, valb)
297 297
298 298 try:
299 299 cand = self.hash[l]
300 300 except:
301 301 return []
302 302
303 303 if len(cand) > 1:
304 304 # resort our list of potentials forward then back.
305 305 cand.sort(sorter)
306 306 return cand
307 307
308 308 def hashlines(self):
309 309 self.hash = {}
310 310 for x in xrange(len(self.lines)):
311 311 s = self.lines[x]
312 312 self.hash.setdefault(s, []).append(x)
313 313
314 314 def write_rej(self):
315 315 # our rejects are a little different from patch(1). This always
316 316 # creates rejects in the same form as the original patch. A file
317 317 # header is inserted so that you can run the reject through patch again
318 318 # without having to type the filename.
319 319
320 320 if not self.rej:
321 321 return
322 322
323 323 fname = self.fname + ".rej"
324 324 self.ui.warn(
325 325 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
326 326 (len(self.rej), self.hunks, fname))
327 327
328 328 def rejlines():
329 329 base = os.path.basename(self.fname)
330 330 yield "--- %s\n+++ %s\n" % (base, base)
331 331 for x in self.rej:
332 332 for l in x.hunk:
333 333 yield l
334 334 if l[-1] != '\n':
335 335 yield "\n\ No newline at end of file\n"
336 336
337 337 self.writelines(fname, rejlines())
338 338
339 339 def write(self, dest=None):
340 340 if not self.dirty:
341 341 return
342 342 if not dest:
343 343 dest = self.fname
344 344 self.writelines(dest, self.lines)
345 345
346 346 def close(self):
347 347 self.write()
348 348 self.write_rej()
349 349
350 350 def apply(self, h, reverse):
351 351 if not h.complete():
352 352 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
353 353 (h.number, h.desc, len(h.a), h.lena, len(h.b),
354 354 h.lenb))
355 355
356 356 self.hunks += 1
357 357 if reverse:
358 358 h.reverse()
359 359
360 360 if self.missing:
361 361 self.rej.append(h)
362 362 return -1
363 363
364 364 if self.exists and h.createfile():
365 365 self.ui.warn(_("file %s already exists\n") % self.fname)
366 366 self.rej.append(h)
367 367 return -1
368 368
369 369 if isinstance(h, githunk):
370 370 if h.rmfile():
371 371 self.unlink(self.fname)
372 372 else:
373 373 self.lines[:] = h.new()
374 374 self.offset += len(h.new())
375 375 self.dirty = 1
376 376 return 0
377 377
378 378 # fast case first, no offsets, no fuzz
379 379 old = h.old()
380 380 # patch starts counting at 1 unless we are adding the file
381 381 if h.starta == 0:
382 382 start = 0
383 383 else:
384 384 start = h.starta + self.offset - 1
385 385 orig_start = start
386 386 if diffhelpers.testhunk(old, self.lines, start) == 0:
387 387 if h.rmfile():
388 388 self.unlink(self.fname)
389 389 else:
390 390 self.lines[start : start + h.lena] = h.new()
391 391 self.offset += h.lenb - h.lena
392 392 self.dirty = 1
393 393 return 0
394 394
395 395 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
396 396 self.hashlines()
397 397 if h.hunk[-1][0] != ' ':
398 398 # if the hunk tried to put something at the bottom of the file
399 399 # override the start line and use eof here
400 400 search_start = len(self.lines)
401 401 else:
402 402 search_start = orig_start
403 403
404 404 for fuzzlen in xrange(3):
405 405 for toponly in [ True, False ]:
406 406 old = h.old(fuzzlen, toponly)
407 407
408 408 cand = self.findlines(old[0][1:], search_start)
409 409 for l in cand:
410 410 if diffhelpers.testhunk(old, self.lines, l) == 0:
411 411 newlines = h.new(fuzzlen, toponly)
412 412 self.lines[l : l + len(old)] = newlines
413 413 self.offset += len(newlines) - len(old)
414 414 self.dirty = 1
415 415 if fuzzlen:
416 416 fuzzstr = "with fuzz %d " % fuzzlen
417 417 f = self.ui.warn
418 418 self.printfile(True)
419 419 else:
420 420 fuzzstr = ""
421 421 f = self.ui.note
422 422 offset = l - orig_start - fuzzlen
423 423 if offset == 1:
424 424 linestr = "line"
425 425 else:
426 426 linestr = "lines"
427 427 f(_("Hunk #%d succeeded at %d %s(offset %d %s).\n") %
428 428 (h.number, l+1, fuzzstr, offset, linestr))
429 429 return fuzzlen
430 430 self.printfile(True)
431 431 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
432 432 self.rej.append(h)
433 433 return -1
434 434
435 435 class hunk:
436 436 def __init__(self, desc, num, lr, context, create=False, remove=False):
437 437 self.number = num
438 438 self.desc = desc
439 439 self.hunk = [ desc ]
440 440 self.a = []
441 441 self.b = []
442 442 if context:
443 443 self.read_context_hunk(lr)
444 444 else:
445 445 self.read_unified_hunk(lr)
446 446 self.create = create
447 447 self.remove = remove and not create
448 448
449 449 def read_unified_hunk(self, lr):
450 450 m = unidesc.match(self.desc)
451 451 if not m:
452 452 raise PatchError(_("bad hunk #%d") % self.number)
453 453 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
454 454 if self.lena == None:
455 455 self.lena = 1
456 456 else:
457 457 self.lena = int(self.lena)
458 458 if self.lenb == None:
459 459 self.lenb = 1
460 460 else:
461 461 self.lenb = int(self.lenb)
462 462 self.starta = int(self.starta)
463 463 self.startb = int(self.startb)
464 464 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
465 465 # if we hit eof before finishing out the hunk, the last line will
466 466 # be zero length. Lets try to fix it up.
467 467 while len(self.hunk[-1]) == 0:
468 468 del self.hunk[-1]
469 469 del self.a[-1]
470 470 del self.b[-1]
471 471 self.lena -= 1
472 472 self.lenb -= 1
473 473
474 474 def read_context_hunk(self, lr):
475 475 self.desc = lr.readline()
476 476 m = contextdesc.match(self.desc)
477 477 if not m:
478 478 raise PatchError(_("bad hunk #%d") % self.number)
479 479 foo, self.starta, foo2, aend, foo3 = m.groups()
480 480 self.starta = int(self.starta)
481 481 if aend == None:
482 482 aend = self.starta
483 483 self.lena = int(aend) - self.starta
484 484 if self.starta:
485 485 self.lena += 1
486 486 for x in xrange(self.lena):
487 487 l = lr.readline()
488 488 if l.startswith('---'):
489 489 lr.push(l)
490 490 break
491 491 s = l[2:]
492 492 if l.startswith('- ') or l.startswith('! '):
493 493 u = '-' + s
494 494 elif l.startswith(' '):
495 495 u = ' ' + s
496 496 else:
497 497 raise PatchError(_("bad hunk #%d old text line %d") %
498 498 (self.number, x))
499 499 self.a.append(u)
500 500 self.hunk.append(u)
501 501
502 502 l = lr.readline()
503 503 if l.startswith('\ '):
504 504 s = self.a[-1][:-1]
505 505 self.a[-1] = s
506 506 self.hunk[-1] = s
507 507 l = lr.readline()
508 508 m = contextdesc.match(l)
509 509 if not m:
510 510 raise PatchError(_("bad hunk #%d") % self.number)
511 511 foo, self.startb, foo2, bend, foo3 = m.groups()
512 512 self.startb = int(self.startb)
513 513 if bend == None:
514 514 bend = self.startb
515 515 self.lenb = int(bend) - self.startb
516 516 if self.startb:
517 517 self.lenb += 1
518 518 hunki = 1
519 519 for x in xrange(self.lenb):
520 520 l = lr.readline()
521 521 if l.startswith('\ '):
522 522 s = self.b[-1][:-1]
523 523 self.b[-1] = s
524 524 self.hunk[hunki-1] = s
525 525 continue
526 526 if not l:
527 527 lr.push(l)
528 528 break
529 529 s = l[2:]
530 530 if l.startswith('+ ') or l.startswith('! '):
531 531 u = '+' + s
532 532 elif l.startswith(' '):
533 533 u = ' ' + s
534 534 elif len(self.b) == 0:
535 535 # this can happen when the hunk does not add any lines
536 536 lr.push(l)
537 537 break
538 538 else:
539 539 raise PatchError(_("bad hunk #%d old text line %d") %
540 540 (self.number, x))
541 541 self.b.append(s)
542 542 while True:
543 543 if hunki >= len(self.hunk):
544 544 h = ""
545 545 else:
546 546 h = self.hunk[hunki]
547 547 hunki += 1
548 548 if h == u:
549 549 break
550 550 elif h.startswith('-'):
551 551 continue
552 552 else:
553 553 self.hunk.insert(hunki-1, u)
554 554 break
555 555
556 556 if not self.a:
557 557 # this happens when lines were only added to the hunk
558 558 for x in self.hunk:
559 559 if x.startswith('-') or x.startswith(' '):
560 560 self.a.append(x)
561 561 if not self.b:
562 562 # this happens when lines were only deleted from the hunk
563 563 for x in self.hunk:
564 564 if x.startswith('+') or x.startswith(' '):
565 565 self.b.append(x[1:])
566 566 # @@ -start,len +start,len @@
567 567 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
568 568 self.startb, self.lenb)
569 569 self.hunk[0] = self.desc
570 570
571 571 def reverse(self):
572 572 self.create, self.remove = self.remove, self.create
573 573 origlena = self.lena
574 574 origstarta = self.starta
575 575 self.lena = self.lenb
576 576 self.starta = self.startb
577 577 self.lenb = origlena
578 578 self.startb = origstarta
579 579 self.a = []
580 580 self.b = []
581 581 # self.hunk[0] is the @@ description
582 582 for x in xrange(1, len(self.hunk)):
583 583 o = self.hunk[x]
584 584 if o.startswith('-'):
585 585 n = '+' + o[1:]
586 586 self.b.append(o[1:])
587 587 elif o.startswith('+'):
588 588 n = '-' + o[1:]
589 589 self.a.append(n)
590 590 else:
591 591 n = o
592 592 self.b.append(o[1:])
593 593 self.a.append(o)
594 594 self.hunk[x] = o
595 595
596 596 def fix_newline(self):
597 597 diffhelpers.fix_newline(self.hunk, self.a, self.b)
598 598
599 599 def complete(self):
600 600 return len(self.a) == self.lena and len(self.b) == self.lenb
601 601
602 602 def createfile(self):
603 603 return self.starta == 0 and self.lena == 0 and self.create
604 604
605 605 def rmfile(self):
606 606 return self.startb == 0 and self.lenb == 0 and self.remove
607 607
608 608 def fuzzit(self, l, fuzz, toponly):
609 609 # this removes context lines from the top and bottom of list 'l'. It
610 610 # checks the hunk to make sure only context lines are removed, and then
611 611 # returns a new shortened list of lines.
612 612 fuzz = min(fuzz, len(l)-1)
613 613 if fuzz:
614 614 top = 0
615 615 bot = 0
616 616 hlen = len(self.hunk)
617 617 for x in xrange(hlen-1):
618 618 # the hunk starts with the @@ line, so use x+1
619 619 if self.hunk[x+1][0] == ' ':
620 620 top += 1
621 621 else:
622 622 break
623 623 if not toponly:
624 624 for x in xrange(hlen-1):
625 625 if self.hunk[hlen-bot-1][0] == ' ':
626 626 bot += 1
627 627 else:
628 628 break
629 629
630 630 # top and bot now count context in the hunk
631 631 # adjust them if either one is short
632 632 context = max(top, bot, 3)
633 633 if bot < context:
634 634 bot = max(0, fuzz - (context - bot))
635 635 else:
636 636 bot = min(fuzz, bot)
637 637 if top < context:
638 638 top = max(0, fuzz - (context - top))
639 639 else:
640 640 top = min(fuzz, top)
641 641
642 642 return l[top:len(l)-bot]
643 643 return l
644 644
645 645 def old(self, fuzz=0, toponly=False):
646 646 return self.fuzzit(self.a, fuzz, toponly)
647 647
648 648 def newctrl(self):
649 649 res = []
650 650 for x in self.hunk:
651 651 c = x[0]
652 652 if c == ' ' or c == '+':
653 653 res.append(x)
654 654 return res
655 655
656 656 def new(self, fuzz=0, toponly=False):
657 657 return self.fuzzit(self.b, fuzz, toponly)
658 658
659 659 class githunk(object):
660 660 """A git hunk"""
661 661 def __init__(self, gitpatch):
662 662 self.gitpatch = gitpatch
663 663 self.text = None
664 664 self.hunk = []
665 665
666 666 def createfile(self):
667 667 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
668 668
669 669 def rmfile(self):
670 670 return self.gitpatch.op == 'DELETE'
671 671
672 672 def complete(self):
673 673 return self.text is not None
674 674
675 675 def new(self):
676 676 return [self.text]
677 677
678 678 class binhunk(githunk):
679 679 'A binary patch file. Only understands literals so far.'
680 680 def __init__(self, gitpatch):
681 681 super(binhunk, self).__init__(gitpatch)
682 682 self.hunk = ['GIT binary patch\n']
683 683
684 684 def extract(self, lr):
685 685 line = lr.readline()
686 686 self.hunk.append(line)
687 687 while line and not line.startswith('literal '):
688 688 line = lr.readline()
689 689 self.hunk.append(line)
690 690 if not line:
691 691 raise PatchError(_('could not extract binary patch'))
692 692 size = int(line[8:].rstrip())
693 693 dec = []
694 694 line = lr.readline()
695 695 self.hunk.append(line)
696 696 while len(line) > 1:
697 697 l = line[0]
698 698 if l <= 'Z' and l >= 'A':
699 699 l = ord(l) - ord('A') + 1
700 700 else:
701 701 l = ord(l) - ord('a') + 27
702 702 dec.append(base85.b85decode(line[1:-1])[:l])
703 703 line = lr.readline()
704 704 self.hunk.append(line)
705 705 text = zlib.decompress(''.join(dec))
706 706 if len(text) != size:
707 707 raise PatchError(_('binary patch is %d bytes, not %d') %
708 708 len(text), size)
709 709 self.text = text
710 710
711 711 class symlinkhunk(githunk):
712 712 """A git symlink hunk"""
713 713 def __init__(self, gitpatch, hunk):
714 714 super(symlinkhunk, self).__init__(gitpatch)
715 715 self.hunk = hunk
716 716
717 717 def complete(self):
718 718 return True
719 719
720 720 def fix_newline(self):
721 721 return
722 722
723 723 def parsefilename(str):
724 724 # --- filename \t|space stuff
725 725 s = str[4:].rstrip('\r\n')
726 726 i = s.find('\t')
727 727 if i < 0:
728 728 i = s.find(' ')
729 729 if i < 0:
730 730 return s
731 731 return s[:i]
732 732
733 733 def selectfile(afile_orig, bfile_orig, hunk, strip, reverse):
734 734 def pathstrip(path, count=1):
735 735 pathlen = len(path)
736 736 i = 0
737 737 if count == 0:
738 738 return '', path.rstrip()
739 739 while count > 0:
740 740 i = path.find('/', i)
741 741 if i == -1:
742 742 raise PatchError(_("unable to strip away %d dirs from %s") %
743 743 (count, path))
744 744 i += 1
745 745 # consume '//' in the path
746 746 while i < pathlen - 1 and path[i] == '/':
747 747 i += 1
748 748 count -= 1
749 749 return path[:i].lstrip(), path[i:].rstrip()
750 750
751 751 nulla = afile_orig == "/dev/null"
752 752 nullb = bfile_orig == "/dev/null"
753 753 abase, afile = pathstrip(afile_orig, strip)
754 754 gooda = not nulla and util.lexists(afile)
755 755 bbase, bfile = pathstrip(bfile_orig, strip)
756 756 if afile == bfile:
757 757 goodb = gooda
758 758 else:
759 759 goodb = not nullb and os.path.exists(bfile)
760 760 createfunc = hunk.createfile
761 761 if reverse:
762 762 createfunc = hunk.rmfile
763 763 missing = not goodb and not gooda and not createfunc()
764 764 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
765 765 # diff is between a file and its backup. In this case, the original
766 766 # file should be patched (see original mpatch code).
767 767 isbackup = (abase == bbase and bfile.startswith(afile))
768 768 fname = None
769 769 if not missing:
770 770 if gooda and goodb:
771 771 fname = isbackup and afile or bfile
772 772 elif gooda:
773 773 fname = afile
774 774
775 775 if not fname:
776 776 if not nullb:
777 777 fname = isbackup and afile or bfile
778 778 elif not nulla:
779 779 fname = afile
780 780 else:
781 781 raise PatchError(_("undefined source and destination files"))
782 782
783 783 return fname, missing
784 784
785 785 class linereader:
786 786 # simple class to allow pushing lines back into the input stream
787 787 def __init__(self, fp):
788 788 self.fp = fp
789 789 self.buf = []
790 790
791 791 def push(self, line):
792 792 if line is not None:
793 793 self.buf.append(line)
794 794
795 795 def readline(self):
796 796 if self.buf:
797 797 return self.buf.pop(0)
798 798 return self.fp.readline()
799 799
800 800 def __iter__(self):
801 801 while 1:
802 802 l = self.readline()
803 803 if not l:
804 804 break
805 805 yield l
806 806
807 807 def scangitpatch(lr, firstline):
808 808 """
809 809 Git patches can emit:
810 810 - rename a to b
811 811 - change b
812 812 - copy a to c
813 813 - change c
814 814
815 815 We cannot apply this sequence as-is, the renamed 'a' could not be
816 816 found for it would have been renamed already. And we cannot copy
817 817 from 'b' instead because 'b' would have been changed already. So
818 818 we scan the git patch for copy and rename commands so we can
819 819 perform the copies ahead of time.
820 820 """
821 821 pos = 0
822 822 try:
823 823 pos = lr.fp.tell()
824 824 fp = lr.fp
825 825 except IOError:
826 826 fp = cStringIO.StringIO(lr.fp.read())
827 827 gitlr = linereader(fp)
828 828 gitlr.push(firstline)
829 829 (dopatch, gitpatches) = readgitpatch(gitlr)
830 830 fp.seek(pos)
831 831 return dopatch, gitpatches
832 832
833 833 def iterhunks(ui, fp, sourcefile=None):
834 834 """Read a patch and yield the following events:
835 835 - ("file", afile, bfile, firsthunk): select a new target file.
836 836 - ("hunk", hunk): a new hunk is ready to be applied, follows a
837 837 "file" event.
838 838 - ("git", gitchanges): current diff is in git format, gitchanges
839 839 maps filenames to gitpatch records. Unique event.
840 840 """
841 841 changed = {}
842 842 current_hunk = None
843 843 afile = ""
844 844 bfile = ""
845 845 state = None
846 846 hunknum = 0
847 847 emitfile = False
848 848 git = False
849 849
850 850 # our states
851 851 BFILE = 1
852 852 context = None
853 853 lr = linereader(fp)
854 854 dopatch = True
855 855 # gitworkdone is True if a git operation (copy, rename, ...) was
856 856 # performed already for the current file. Useful when the file
857 857 # section may have no hunk.
858 858 gitworkdone = False
859 859
860 860 while True:
861 861 newfile = False
862 862 x = lr.readline()
863 863 if not x:
864 864 break
865 865 if current_hunk:
866 866 if x.startswith('\ '):
867 867 current_hunk.fix_newline()
868 868 yield 'hunk', current_hunk
869 869 current_hunk = None
870 870 gitworkdone = False
871 871 if ((sourcefile or state == BFILE) and ((not context and x[0] == '@') or
872 872 ((context or context == None) and x.startswith('***************')))):
873 873 try:
874 874 if context == None and x.startswith('***************'):
875 875 context = True
876 876 gpatch = changed.get(bfile)
877 877 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
878 878 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
879 879 current_hunk = hunk(x, hunknum + 1, lr, context, create, remove)
880 880 if remove:
881 881 gpatch = changed.get(afile[2:])
882 882 if gpatch and gpatch.mode[0]:
883 883 current_hunk = symlinkhunk(gpatch, current_hunk)
884 884 except PatchError, err:
885 885 ui.debug(err)
886 886 current_hunk = None
887 887 continue
888 888 hunknum += 1
889 889 if emitfile:
890 890 emitfile = False
891 891 yield 'file', (afile, bfile, current_hunk)
892 892 elif state == BFILE and x.startswith('GIT binary patch'):
893 893 current_hunk = binhunk(changed[bfile])
894 894 hunknum += 1
895 895 if emitfile:
896 896 emitfile = False
897 897 yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk)
898 898 current_hunk.extract(lr)
899 899 elif x.startswith('diff --git'):
900 900 # check for git diff, scanning the whole patch file if needed
901 901 m = gitre.match(x)
902 902 if m:
903 903 afile, bfile = m.group(1, 2)
904 904 if not git:
905 905 git = True
906 906 dopatch, gitpatches = scangitpatch(lr, x)
907 907 yield 'git', gitpatches
908 908 for gp in gitpatches:
909 909 changed[gp.path] = gp
910 910 # else error?
911 911 # copy/rename + modify should modify target, not source
912 912 gp = changed.get(bfile)
913 913 if gp and gp.op in ('COPY', 'DELETE', 'RENAME'):
914 914 afile = bfile
915 915 gitworkdone = True
916 916 newfile = True
917 917 elif x.startswith('---'):
918 918 # check for a unified diff
919 919 l2 = lr.readline()
920 920 if not l2.startswith('+++'):
921 921 lr.push(l2)
922 922 continue
923 923 newfile = True
924 924 context = False
925 925 afile = parsefilename(x)
926 926 bfile = parsefilename(l2)
927 927 elif x.startswith('***'):
928 928 # check for a context diff
929 929 l2 = lr.readline()
930 930 if not l2.startswith('---'):
931 931 lr.push(l2)
932 932 continue
933 933 l3 = lr.readline()
934 934 lr.push(l3)
935 935 if not l3.startswith("***************"):
936 936 lr.push(l2)
937 937 continue
938 938 newfile = True
939 939 context = True
940 940 afile = parsefilename(x)
941 941 bfile = parsefilename(l2)
942 942
943 943 if newfile:
944 944 emitfile = True
945 945 state = BFILE
946 946 hunknum = 0
947 947 if current_hunk:
948 948 if current_hunk.complete():
949 949 yield 'hunk', current_hunk
950 950 else:
951 951 raise PatchError(_("malformed patch %s %s") % (afile,
952 952 current_hunk.desc))
953 953
954 954 if hunknum == 0 and dopatch and not gitworkdone:
955 955 raise NoHunks
956 956
957 957 def applydiff(ui, fp, changed, strip=1, sourcefile=None, reverse=False):
958 958 """reads a patch from fp and tries to apply it. The dict 'changed' is
959 959 filled in with all of the filenames changed by the patch. Returns 0
960 960 for a clean patch, -1 if any rejects were found and 1 if there was
961 961 any fuzz."""
962 962
963 963 rejects = 0
964 964 err = 0
965 965 current_file = None
966 966 gitpatches = None
967 967 opener = util.opener(os.getcwd())
968 968
969 969 def closefile():
970 970 if not current_file:
971 971 return 0
972 972 current_file.close()
973 973 return len(current_file.rej)
974 974
975 975 for state, values in iterhunks(ui, fp, sourcefile):
976 976 if state == 'hunk':
977 977 if not current_file:
978 978 continue
979 979 current_hunk = values
980 980 ret = current_file.apply(current_hunk, reverse)
981 981 if ret >= 0:
982 982 changed.setdefault(current_file.fname, None)
983 983 if ret > 0:
984 984 err = 1
985 985 elif state == 'file':
986 986 rejects += closefile()
987 987 afile, bfile, first_hunk = values
988 988 try:
989 989 if sourcefile:
990 990 current_file = patchfile(ui, sourcefile, opener)
991 991 else:
992 992 current_file, missing = selectfile(afile, bfile, first_hunk,
993 993 strip, reverse)
994 994 current_file = patchfile(ui, current_file, opener, missing)
995 995 except PatchError, err:
996 996 ui.warn(str(err) + '\n')
997 997 current_file, current_hunk = None, None
998 998 rejects += 1
999 999 continue
1000 1000 elif state == 'git':
1001 1001 gitpatches = values
1002 1002 cwd = os.getcwd()
1003 1003 for gp in gitpatches:
1004 1004 if gp.op in ('COPY', 'RENAME'):
1005 1005 copyfile(gp.oldpath, gp.path, cwd)
1006 1006 changed[gp.path] = gp
1007 1007 else:
1008 1008 raise util.Abort(_('unsupported parser state: %s') % state)
1009 1009
1010 1010 rejects += closefile()
1011 1011
1012 1012 if rejects:
1013 1013 return -1
1014 1014 return err
1015 1015
1016 1016 def diffopts(ui, opts={}, untrusted=False):
1017 1017 def get(key, name=None, getter=ui.configbool):
1018 1018 return (opts.get(key) or
1019 1019 getter('diff', name or key, None, untrusted=untrusted))
1020 1020 return mdiff.diffopts(
1021 1021 text=opts.get('text'),
1022 1022 git=get('git'),
1023 1023 nodates=get('nodates'),
1024 1024 showfunc=get('show_function', 'showfunc'),
1025 1025 ignorews=get('ignore_all_space', 'ignorews'),
1026 1026 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1027 1027 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1028 1028 context=get('unified', getter=ui.config))
1029 1029
1030 1030 def updatedir(ui, repo, patches, similarity=0):
1031 1031 '''Update dirstate after patch application according to metadata'''
1032 1032 if not patches:
1033 1033 return
1034 1034 copies = []
1035 1035 removes = {}
1036 1036 cfiles = patches.keys()
1037 1037 cwd = repo.getcwd()
1038 1038 if cwd:
1039 1039 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
1040 1040 for f in patches:
1041 1041 gp = patches[f]
1042 1042 if not gp:
1043 1043 continue
1044 1044 if gp.op == 'RENAME':
1045 1045 copies.append((gp.oldpath, gp.path))
1046 1046 removes[gp.oldpath] = 1
1047 1047 elif gp.op == 'COPY':
1048 1048 copies.append((gp.oldpath, gp.path))
1049 1049 elif gp.op == 'DELETE':
1050 1050 removes[gp.path] = 1
1051 1051 for src, dst in copies:
1052 1052 repo.copy(src, dst)
1053 1053 removes = removes.keys()
1054 1054 if (not similarity) and removes:
1055 1055 repo.remove(util.sort(removes), True)
1056 1056 for f in patches:
1057 1057 gp = patches[f]
1058 1058 if gp and gp.mode:
1059 1059 islink, isexec = gp.mode
1060 1060 dst = repo.wjoin(gp.path)
1061 1061 # patch won't create empty files
1062 1062 if gp.op == 'ADD' and not os.path.exists(dst):
1063 1063 flags = (isexec and 'x' or '') + (islink and 'l' or '')
1064 1064 repo.wwrite(gp.path, '', flags)
1065 1065 elif gp.op != 'DELETE':
1066 1066 util.set_flags(dst, islink, isexec)
1067 1067 cmdutil.addremove(repo, cfiles, similarity=similarity)
1068 1068 files = patches.keys()
1069 1069 files.extend([r for r in removes if r not in files])
1070 1070 return util.sort(files)
1071 1071
1072 1072 def externalpatch(patcher, args, patchname, ui, strip, cwd, files):
1073 1073 """use <patcher> to apply <patchname> to the working directory.
1074 1074 returns whether patch was applied with fuzz factor."""
1075 1075
1076 1076 fuzz = False
1077 1077 if cwd:
1078 1078 args.append('-d %s' % util.shellquote(cwd))
1079 1079 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1080 1080 util.shellquote(patchname)))
1081 1081
1082 1082 for line in fp:
1083 1083 line = line.rstrip()
1084 1084 ui.note(line + '\n')
1085 1085 if line.startswith('patching file '):
1086 1086 pf = util.parse_patch_output(line)
1087 1087 printed_file = False
1088 1088 files.setdefault(pf, None)
1089 1089 elif line.find('with fuzz') >= 0:
1090 1090 fuzz = True
1091 1091 if not printed_file:
1092 1092 ui.warn(pf + '\n')
1093 1093 printed_file = True
1094 1094 ui.warn(line + '\n')
1095 1095 elif line.find('saving rejects to file') >= 0:
1096 1096 ui.warn(line + '\n')
1097 1097 elif line.find('FAILED') >= 0:
1098 1098 if not printed_file:
1099 1099 ui.warn(pf + '\n')
1100 1100 printed_file = True
1101 1101 ui.warn(line + '\n')
1102 1102 code = fp.close()
1103 1103 if code:
1104 1104 raise PatchError(_("patch command failed: %s") %
1105 1105 util.explain_exit(code)[0])
1106 1106 return fuzz
1107 1107
1108 1108 def internalpatch(patchobj, ui, strip, cwd, files={}):
1109 1109 """use builtin patch to apply <patchobj> to the working directory.
1110 1110 returns whether patch was applied with fuzz factor."""
1111 1111 try:
1112 1112 fp = file(patchobj, 'rb')
1113 1113 except TypeError:
1114 1114 fp = patchobj
1115 1115 if cwd:
1116 1116 curdir = os.getcwd()
1117 1117 os.chdir(cwd)
1118 1118 try:
1119 1119 ret = applydiff(ui, fp, files, strip=strip)
1120 1120 finally:
1121 1121 if cwd:
1122 1122 os.chdir(curdir)
1123 1123 if ret < 0:
1124 1124 raise PatchError
1125 1125 return ret > 0
1126 1126
1127 1127 def patch(patchname, ui, strip=1, cwd=None, files={}):
1128 1128 """apply <patchname> to the working directory.
1129 1129 returns whether patch was applied with fuzz factor."""
1130 1130 patcher = ui.config('ui', 'patch')
1131 1131 args = []
1132 1132 try:
1133 1133 if patcher:
1134 1134 return externalpatch(patcher, args, patchname, ui, strip, cwd,
1135 1135 files)
1136 1136 else:
1137 1137 try:
1138 1138 return internalpatch(patchname, ui, strip, cwd, files)
1139 1139 except NoHunks:
1140 1140 patcher = util.find_exe('gpatch') or util.find_exe('patch') or 'patch'
1141 1141 ui.debug(_('no valid hunks found; trying with %r instead\n') %
1142 1142 patcher)
1143 1143 if util.needbinarypatch():
1144 1144 args.append('--binary')
1145 1145 return externalpatch(patcher, args, patchname, ui, strip, cwd,
1146 1146 files)
1147 1147 except PatchError, err:
1148 1148 s = str(err)
1149 1149 if s:
1150 1150 raise util.Abort(s)
1151 1151 else:
1152 1152 raise util.Abort(_('patch failed to apply'))
1153 1153
1154 1154 def b85diff(to, tn):
1155 1155 '''print base85-encoded binary diff'''
1156 1156 def gitindex(text):
1157 1157 if not text:
1158 1158 return '0' * 40
1159 1159 l = len(text)
1160 1160 s = util.sha1('blob %d\0' % l)
1161 1161 s.update(text)
1162 1162 return s.hexdigest()
1163 1163
1164 1164 def fmtline(line):
1165 1165 l = len(line)
1166 1166 if l <= 26:
1167 1167 l = chr(ord('A') + l - 1)
1168 1168 else:
1169 1169 l = chr(l - 26 + ord('a') - 1)
1170 1170 return '%c%s\n' % (l, base85.b85encode(line, True))
1171 1171
1172 1172 def chunk(text, csize=52):
1173 1173 l = len(text)
1174 1174 i = 0
1175 1175 while i < l:
1176 1176 yield text[i:i+csize]
1177 1177 i += csize
1178 1178
1179 1179 tohash = gitindex(to)
1180 1180 tnhash = gitindex(tn)
1181 1181 if tohash == tnhash:
1182 1182 return ""
1183 1183
1184 1184 # TODO: deltas
1185 1185 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1186 1186 (tohash, tnhash, len(tn))]
1187 1187 for l in chunk(zlib.compress(tn)):
1188 1188 ret.append(fmtline(l))
1189 1189 ret.append('\n')
1190 1190 return ''.join(ret)
1191 1191
1192 1192 def _addmodehdr(header, omode, nmode):
1193 1193 if omode != nmode:
1194 1194 header.append('old mode %s\n' % omode)
1195 1195 header.append('new mode %s\n' % nmode)
1196 1196
1197 1197 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None):
1198 1198 '''yields diff of changes to files between two nodes, or node and
1199 1199 working directory.
1200 1200
1201 1201 if node1 is None, use first dirstate parent instead.
1202 1202 if node2 is None, compare node1 with working directory.'''
1203 1203
1204 1204 if opts is None:
1205 1205 opts = mdiff.defaultopts
1206 1206
1207 1207 if not node1:
1208 1208 node1 = repo.dirstate.parents()[0]
1209 1209
1210 1210 flcache = {}
1211 1211 def getfilectx(f, ctx):
1212 1212 flctx = ctx.filectx(f, filelog=flcache.get(f))
1213 1213 if f not in flcache:
1214 1214 flcache[f] = flctx._filelog
1215 1215 return flctx
1216 1216
1217 1217 ctx1 = repo[node1]
1218 1218 ctx2 = repo[node2]
1219 1219
1220 1220 if not changes:
1221 1221 changes = repo.status(ctx1, ctx2, match=match)
1222 1222 modified, added, removed = changes[:3]
1223 1223
1224 1224 if not modified and not added and not removed:
1225 1225 return
1226 1226
1227 1227 date1 = util.datestr(ctx1.date())
1228 1228 man1 = ctx1.manifest()
1229 1229
1230 1230 if repo.ui.quiet:
1231 1231 r = None
1232 1232 else:
1233 1233 hexfunc = repo.ui.debugflag and hex or short
1234 1234 r = [hexfunc(node) for node in [node1, node2] if node]
1235 1235
1236 1236 if opts.git:
1237 1237 copy, diverge = copies.copies(repo, ctx1, ctx2, repo[nullid])
1238 1238 for k, v in copy.items():
1239 1239 copy[v] = k
1240 1240
1241 1241 gone = {}
1242 1242 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1243 1243
1244 1244 for f in util.sort(modified + added + removed):
1245 1245 to = None
1246 1246 tn = None
1247 1247 dodiff = True
1248 1248 header = []
1249 1249 if f in man1:
1250 1250 to = getfilectx(f, ctx1).data()
1251 1251 if f not in removed:
1252 1252 tn = getfilectx(f, ctx2).data()
1253 1253 a, b = f, f
1254 1254 if opts.git:
1255 1255 if f in added:
1256 1256 mode = gitmode[ctx2.flags(f)]
1257 1257 if f in copy:
1258 1258 a = copy[f]
1259 1259 omode = gitmode[man1.flags(a)]
1260 1260 _addmodehdr(header, omode, mode)
1261 1261 if a in removed and a not in gone:
1262 1262 op = 'rename'
1263 1263 gone[a] = 1
1264 1264 else:
1265 1265 op = 'copy'
1266 1266 header.append('%s from %s\n' % (op, a))
1267 1267 header.append('%s to %s\n' % (op, f))
1268 1268 to = getfilectx(a, ctx1).data()
1269 1269 else:
1270 1270 header.append('new file mode %s\n' % mode)
1271 1271 if util.binary(tn):
1272 1272 dodiff = 'binary'
1273 1273 elif f in removed:
1274 1274 # have we already reported a copy above?
1275 1275 if f in copy and copy[f] in added and copy[copy[f]] == f:
1276 1276 dodiff = False
1277 1277 else:
1278 1278 header.append('deleted file mode %s\n' %
1279 1279 gitmode[man1.flags(f)])
1280 1280 else:
1281 1281 omode = gitmode[man1.flags(f)]
1282 1282 nmode = gitmode[ctx2.flags(f)]
1283 1283 _addmodehdr(header, omode, nmode)
1284 1284 if util.binary(to) or util.binary(tn):
1285 1285 dodiff = 'binary'
1286 1286 r = None
1287 1287 header.insert(0, mdiff.diffline(r, a, b, opts))
1288 1288 if dodiff:
1289 1289 if dodiff == 'binary':
1290 1290 text = b85diff(to, tn)
1291 1291 else:
1292 1292 text = mdiff.unidiff(to, date1,
1293 1293 # ctx2 date may be dynamic
1294 1294 tn, util.datestr(ctx2.date()),
1295 1295 a, b, r, opts=opts)
1296 1296 if header and (text or len(header) > 1):
1297 1297 yield ''.join(header)
1298 1298 if text:
1299 1299 yield text
1300 1300
1301 1301 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1302 1302 opts=None):
1303 1303 '''export changesets as hg patches.'''
1304 1304
1305 1305 total = len(revs)
1306 1306 revwidth = max([len(str(rev)) for rev in revs])
1307 1307
1308 1308 def single(rev, seqno, fp):
1309 1309 ctx = repo[rev]
1310 1310 node = ctx.node()
1311 1311 parents = [p.node() for p in ctx.parents() if p]
1312 1312 branch = ctx.branch()
1313 1313 if switch_parent:
1314 1314 parents.reverse()
1315 1315 prev = (parents and parents[0]) or nullid
1316 1316
1317 1317 if not fp:
1318 1318 fp = cmdutil.make_file(repo, template, node, total=total,
1319 1319 seqno=seqno, revwidth=revwidth,
1320 1320 mode='ab')
1321 1321 if fp != sys.stdout and hasattr(fp, 'name'):
1322 1322 repo.ui.note("%s\n" % fp.name)
1323 1323
1324 1324 fp.write("# HG changeset patch\n")
1325 1325 fp.write("# User %s\n" % ctx.user())
1326 1326 fp.write("# Date %d %d\n" % ctx.date())
1327 1327 if branch and (branch != 'default'):
1328 1328 fp.write("# Branch %s\n" % branch)
1329 1329 fp.write("# Node ID %s\n" % hex(node))
1330 1330 fp.write("# Parent %s\n" % hex(prev))
1331 1331 if len(parents) > 1:
1332 1332 fp.write("# Parent %s\n" % hex(parents[1]))
1333 1333 fp.write(ctx.description().rstrip())
1334 1334 fp.write("\n\n")
1335 1335
1336 1336 for chunk in diff(repo, prev, node, opts=opts):
1337 1337 fp.write(chunk)
1338 1338
1339 1339 for seqno, rev in enumerate(revs):
1340 1340 single(rev, seqno+1, fp)
1341 1341
1342 1342 def diffstatdata(lines):
1343 1343 filename, adds, removes = None, 0, 0
1344 1344 for line in lines:
1345 1345 if line.startswith('diff'):
1346 1346 if filename:
1347 1347 yield (filename, adds, removes)
1348 1348 # set numbers to 0 anyway when starting new file
1349 1349 adds, removes = 0, 0
1350 1350 if line.startswith('diff --git'):
1351 1351 filename = gitre.search(line).group(1)
1352 1352 else:
1353 1353 # format: "diff -r ... -r ... file name"
1354 1354 filename = line.split(None, 5)[-1]
1355 1355 elif line.startswith('+') and not line.startswith('+++'):
1356 1356 adds += 1
1357 1357 elif line.startswith('-') and not line.startswith('---'):
1358 1358 removes += 1
1359 1359 if filename:
1360 1360 yield (filename, adds, removes)
1361 1361
1362 1362 def diffstat(lines, width=80):
1363 1363 output = []
1364 1364 stats = list(diffstatdata(lines))
1365 1365
1366 1366 maxtotal, maxname = 0, 0
1367 1367 totaladds, totalremoves = 0, 0
1368 1368 for filename, adds, removes in stats:
1369 1369 totaladds += adds
1370 1370 totalremoves += removes
1371 1371 maxname = max(maxname, len(filename))
1372 1372 maxtotal = max(maxtotal, adds+removes)
1373 1373
1374 1374 countwidth = len(str(maxtotal))
1375 1375 graphwidth = width - countwidth - maxname
1376 1376 if graphwidth < 10:
1377 1377 graphwidth = 10
1378 1378
1379 1379 factor = max(int(math.ceil(float(maxtotal) / graphwidth)), 1)
1380 1380
1381 1381 for filename, adds, removes in stats:
1382 1382 # If diffstat runs out of room it doesn't print anything, which
1383 1383 # isn't very useful, so always print at least one + or - if there
1384 1384 # were at least some changes
1385 1385 pluses = '+' * max(adds/factor, int(bool(adds)))
1386 1386 minuses = '-' * max(removes/factor, int(bool(removes)))
1387 1387 output.append(' %-*s | %*.d %s%s\n' % (maxname, filename, countwidth,
1388 1388 adds+removes, pluses, minuses))
1389 1389
1390 1390 if stats:
1391 1391 output.append(' %d files changed, %d insertions(+), %d deletions(-)\n'
1392 1392 % (len(stats), totaladds, totalremoves))
1393 1393
1394 1394 return ''.join(output)
@@ -1,81 +1,81 b''
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 from node import bin, hex, nullid, nullrev
8 from node import bin, nullid, nullrev
9 9 import revlog, dirstate, struct, util, zlib
10 10
11 11 _pack = struct.pack
12 12 _unpack = struct.unpack
13 13 _compress = zlib.compress
14 14 _decompress = zlib.decompress
15 15 _sha = util.sha1
16 16
17 17 def parse_manifest(mfdict, fdict, lines):
18 18 for l in lines.splitlines():
19 19 f, n = l.split('\0')
20 20 if len(n) > 40:
21 21 fdict[f] = n[40:]
22 22 mfdict[f] = bin(n[:40])
23 23 else:
24 24 mfdict[f] = bin(n)
25 25
26 26 def parse_index(data, inline):
27 27 indexformatng = revlog.indexformatng
28 28 s = struct.calcsize(indexformatng)
29 29 index = []
30 30 cache = None
31 31 nodemap = {nullid: nullrev}
32 32 n = off = 0
33 33 # if we're not using lazymap, always read the whole index
34 34 l = len(data) - s
35 35 append = index.append
36 36 if inline:
37 37 cache = (0, data)
38 38 while off <= l:
39 39 e = _unpack(indexformatng, data[off:off + s])
40 40 nodemap[e[7]] = n
41 41 append(e)
42 42 n += 1
43 43 if e[1] < 0:
44 44 break
45 45 off += e[1] + s
46 46 else:
47 47 while off <= l:
48 48 e = _unpack(indexformatng, data[off:off + s])
49 49 nodemap[e[7]] = n
50 50 append(e)
51 51 n += 1
52 52 off += s
53 53
54 54 e = list(index[0])
55 55 type = revlog.gettype(e[0])
56 56 e[0] = revlog.offset_type(0, type)
57 57 index[0] = tuple(e)
58 58
59 59 # add the magic null revision at -1
60 60 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
61 61
62 62 return index, nodemap, cache
63 63
64 64 def parse_dirstate(dmap, copymap, st):
65 65 parents = [st[:20], st[20: 40]]
66 66 # deref fields so they will be local in loop
67 67 e_size = struct.calcsize(dirstate._format)
68 68 pos1 = 40
69 69 l = len(st)
70 70
71 71 # the inner loop
72 72 while pos1 < l:
73 73 pos2 = pos1 + e_size
74 74 e = _unpack(">cllll", st[pos1:pos2]) # a literal here is faster
75 75 pos1 = pos2 + e[4]
76 76 f = st[pos2:pos1]
77 77 if '\0' in f:
78 78 f, c = f.split('\0')
79 79 copymap[f] = c
80 80 dmap[f] = e[:4]
81 81 return parents
@@ -1,42 +1,43 b''
1 1 # repo.py - repository base classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 from i18n import _
10 import error
10 11
11 12 class repository(object):
12 13 def capable(self, name):
13 14 '''tell whether repo supports named capability.
14 15 return False if not supported.
15 16 if boolean capability, return True.
16 17 if string capability, return string.'''
17 18 if name in self.capabilities:
18 19 return True
19 20 name_eq = name + '='
20 21 for cap in self.capabilities:
21 22 if cap.startswith(name_eq):
22 23 return cap[len(name_eq):]
23 24 return False
24 25
25 26 def requirecap(self, name, purpose):
26 27 '''raise an exception if the given capability is not present'''
27 28 if not self.capable(name):
28 29 raise error.CapabilityError(
29 30 _('cannot %s; remote repository does not '
30 31 'support the %r capability') % (purpose, name))
31 32
32 33 def local(self):
33 34 return False
34 35
35 36 def cancopy(self):
36 37 return self.local()
37 38
38 39 def rjoin(self, path):
39 40 url = self.url()
40 41 if url.endswith('/'):
41 42 return url + path
42 43 return url + '/' + path
@@ -1,1360 +1,1361 b''
1 1 """
2 2 revlog.py - storage back-end for mercurial
3 3
4 4 This provides efficient delta storage with O(1) retrieve and append
5 5 and O(changes) merge between branches
6 6
7 7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 from node import bin, hex, nullid, nullrev, short
13 # import stuff from node for others to import from revlog
14 from node import bin, hex, nullid, nullrev, short #@UnusedImport
14 15 from i18n import _
15 16 import changegroup, errno, ancestor, mdiff, parsers
16 17 import struct, util, zlib, error
17 18
18 19 _pack = struct.pack
19 20 _unpack = struct.unpack
20 21 _compress = zlib.compress
21 22 _decompress = zlib.decompress
22 23 _sha = util.sha1
23 24
24 25 # revlog flags
25 26 REVLOGV0 = 0
26 27 REVLOGNG = 1
27 28 REVLOGNGINLINEDATA = (1 << 16)
28 29 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
29 30 REVLOG_DEFAULT_FORMAT = REVLOGNG
30 31 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
31 32
32 33 RevlogError = error.RevlogError
33 34 LookupError = error.LookupError
34 35
35 36 def getoffset(q):
36 37 return int(q >> 16)
37 38
38 39 def gettype(q):
39 40 return int(q & 0xFFFF)
40 41
41 42 def offset_type(offset, type):
42 43 return long(long(offset) << 16 | type)
43 44
44 45 def hash(text, p1, p2):
45 46 """generate a hash from the given text and its parent hashes
46 47
47 48 This hash combines both the current file contents and its history
48 49 in a manner that makes it easy to distinguish nodes with the same
49 50 content in the revision graph.
50 51 """
51 52 l = [p1, p2]
52 53 l.sort()
53 54 s = _sha(l[0])
54 55 s.update(l[1])
55 56 s.update(text)
56 57 return s.digest()
57 58
58 59 def compress(text):
59 60 """ generate a possibly-compressed representation of text """
60 61 if not text:
61 62 return ("", text)
62 63 l = len(text)
63 64 bin = None
64 65 if l < 44:
65 66 pass
66 67 elif l > 1000000:
67 68 # zlib makes an internal copy, thus doubling memory usage for
68 69 # large files, so lets do this in pieces
69 70 z = zlib.compressobj()
70 71 p = []
71 72 pos = 0
72 73 while pos < l:
73 74 pos2 = pos + 2**20
74 75 p.append(z.compress(text[pos:pos2]))
75 76 pos = pos2
76 77 p.append(z.flush())
77 78 if sum(map(len, p)) < l:
78 79 bin = "".join(p)
79 80 else:
80 81 bin = _compress(text)
81 82 if bin is None or len(bin) > l:
82 83 if text[0] == '\0':
83 84 return ("", text)
84 85 return ('u', text)
85 86 return ("", bin)
86 87
87 88 def decompress(bin):
88 89 """ decompress the given input """
89 90 if not bin:
90 91 return bin
91 92 t = bin[0]
92 93 if t == '\0':
93 94 return bin
94 95 if t == 'x':
95 96 return _decompress(bin)
96 97 if t == 'u':
97 98 return bin[1:]
98 99 raise RevlogError(_("unknown compression type %r") % t)
99 100
100 101 class lazyparser(object):
101 102 """
102 103 this class avoids the need to parse the entirety of large indices
103 104 """
104 105
105 106 # lazyparser is not safe to use on windows if win32 extensions not
106 107 # available. it keeps file handle open, which make it not possible
107 108 # to break hardlinks on local cloned repos.
108 109
109 110 def __init__(self, dataf, size):
110 111 self.dataf = dataf
111 112 self.s = struct.calcsize(indexformatng)
112 113 self.datasize = size
113 114 self.l = size/self.s
114 115 self.index = [None] * self.l
115 116 self.map = {nullid: nullrev}
116 117 self.allmap = 0
117 118 self.all = 0
118 119 self.mapfind_count = 0
119 120
120 121 def loadmap(self):
121 122 """
122 123 during a commit, we need to make sure the rev being added is
123 124 not a duplicate. This requires loading the entire index,
124 125 which is fairly slow. loadmap can load up just the node map,
125 126 which takes much less time.
126 127 """
127 128 if self.allmap:
128 129 return
129 130 end = self.datasize
130 131 self.allmap = 1
131 132 cur = 0
132 133 count = 0
133 134 blocksize = self.s * 256
134 135 self.dataf.seek(0)
135 136 while cur < end:
136 137 data = self.dataf.read(blocksize)
137 138 off = 0
138 139 for x in xrange(256):
139 140 n = data[off + ngshaoffset:off + ngshaoffset + 20]
140 141 self.map[n] = count
141 142 count += 1
142 143 if count >= self.l:
143 144 break
144 145 off += self.s
145 146 cur += blocksize
146 147
147 148 def loadblock(self, blockstart, blocksize, data=None):
148 149 if self.all:
149 150 return
150 151 if data is None:
151 152 self.dataf.seek(blockstart)
152 153 if blockstart + blocksize > self.datasize:
153 154 # the revlog may have grown since we've started running,
154 155 # but we don't have space in self.index for more entries.
155 156 # limit blocksize so that we don't get too much data.
156 157 blocksize = max(self.datasize - blockstart, 0)
157 158 data = self.dataf.read(blocksize)
158 159 lend = len(data) / self.s
159 160 i = blockstart / self.s
160 161 off = 0
161 162 # lazyindex supports __delitem__
162 163 if lend > len(self.index) - i:
163 164 lend = len(self.index) - i
164 165 for x in xrange(lend):
165 166 if self.index[i + x] == None:
166 167 b = data[off : off + self.s]
167 168 self.index[i + x] = b
168 169 n = b[ngshaoffset:ngshaoffset + 20]
169 170 self.map[n] = i + x
170 171 off += self.s
171 172
172 173 def findnode(self, node):
173 174 """search backwards through the index file for a specific node"""
174 175 if self.allmap:
175 176 return None
176 177
177 178 # hg log will cause many many searches for the manifest
178 179 # nodes. After we get called a few times, just load the whole
179 180 # thing.
180 181 if self.mapfind_count > 8:
181 182 self.loadmap()
182 183 if node in self.map:
183 184 return node
184 185 return None
185 186 self.mapfind_count += 1
186 187 last = self.l - 1
187 188 while self.index[last] != None:
188 189 if last == 0:
189 190 self.all = 1
190 191 self.allmap = 1
191 192 return None
192 193 last -= 1
193 194 end = (last + 1) * self.s
194 195 blocksize = self.s * 256
195 196 while end >= 0:
196 197 start = max(end - blocksize, 0)
197 198 self.dataf.seek(start)
198 199 data = self.dataf.read(end - start)
199 200 findend = end - start
200 201 while True:
201 202 # we're searching backwards, so we have to make sure
202 203 # we don't find a changeset where this node is a parent
203 204 off = data.find(node, 0, findend)
204 205 findend = off
205 206 if off >= 0:
206 207 i = off / self.s
207 208 off = i * self.s
208 209 n = data[off + ngshaoffset:off + ngshaoffset + 20]
209 210 if n == node:
210 211 self.map[n] = i + start / self.s
211 212 return node
212 213 else:
213 214 break
214 215 end -= blocksize
215 216 return None
216 217
217 218 def loadindex(self, i=None, end=None):
218 219 if self.all:
219 220 return
220 221 all = False
221 222 if i == None:
222 223 blockstart = 0
223 224 blocksize = (65536 / self.s) * self.s
224 225 end = self.datasize
225 226 all = True
226 227 else:
227 228 if end:
228 229 blockstart = i * self.s
229 230 end = end * self.s
230 231 blocksize = end - blockstart
231 232 else:
232 233 blockstart = (i & ~1023) * self.s
233 234 blocksize = self.s * 1024
234 235 end = blockstart + blocksize
235 236 while blockstart < end:
236 237 self.loadblock(blockstart, blocksize)
237 238 blockstart += blocksize
238 239 if all:
239 240 self.all = True
240 241
241 242 class lazyindex(object):
242 243 """a lazy version of the index array"""
243 244 def __init__(self, parser):
244 245 self.p = parser
245 246 def __len__(self):
246 247 return len(self.p.index)
247 248 def load(self, pos):
248 249 if pos < 0:
249 250 pos += len(self.p.index)
250 251 self.p.loadindex(pos)
251 252 return self.p.index[pos]
252 253 def __getitem__(self, pos):
253 254 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
254 255 def __setitem__(self, pos, item):
255 256 self.p.index[pos] = _pack(indexformatng, *item)
256 257 def __delitem__(self, pos):
257 258 del self.p.index[pos]
258 259 def insert(self, pos, e):
259 260 self.p.index.insert(pos, _pack(indexformatng, *e))
260 261 def append(self, e):
261 262 self.p.index.append(_pack(indexformatng, *e))
262 263
263 264 class lazymap(object):
264 265 """a lazy version of the node map"""
265 266 def __init__(self, parser):
266 267 self.p = parser
267 268 def load(self, key):
268 269 n = self.p.findnode(key)
269 270 if n == None:
270 271 raise KeyError(key)
271 272 def __contains__(self, key):
272 273 if key in self.p.map:
273 274 return True
274 275 self.p.loadmap()
275 276 return key in self.p.map
276 277 def __iter__(self):
277 278 yield nullid
278 279 for i in xrange(self.p.l):
279 280 ret = self.p.index[i]
280 281 if not ret:
281 282 self.p.loadindex(i)
282 283 ret = self.p.index[i]
283 284 if isinstance(ret, str):
284 285 ret = _unpack(indexformatng, ret)
285 286 yield ret[7]
286 287 def __getitem__(self, key):
287 288 try:
288 289 return self.p.map[key]
289 290 except KeyError:
290 291 try:
291 292 self.load(key)
292 293 return self.p.map[key]
293 294 except KeyError:
294 295 raise KeyError("node " + hex(key))
295 296 def __setitem__(self, key, val):
296 297 self.p.map[key] = val
297 298 def __delitem__(self, key):
298 299 del self.p.map[key]
299 300
300 301 indexformatv0 = ">4l20s20s20s"
301 302 v0shaoffset = 56
302 303
303 304 class revlogoldio(object):
304 305 def __init__(self):
305 306 self.size = struct.calcsize(indexformatv0)
306 307
307 308 def parseindex(self, fp, inline):
308 309 s = self.size
309 310 index = []
310 311 nodemap = {nullid: nullrev}
311 312 n = off = 0
312 313 data = fp.read()
313 314 l = len(data)
314 315 while off + s <= l:
315 316 cur = data[off:off + s]
316 317 off += s
317 318 e = _unpack(indexformatv0, cur)
318 319 # transform to revlogv1 format
319 320 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
320 321 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
321 322 index.append(e2)
322 323 nodemap[e[6]] = n
323 324 n += 1
324 325
325 326 return index, nodemap, None
326 327
327 328 def packentry(self, entry, node, version, rev):
328 329 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
329 330 node(entry[5]), node(entry[6]), entry[7])
330 331 return _pack(indexformatv0, *e2)
331 332
332 333 # index ng:
333 334 # 6 bytes offset
334 335 # 2 bytes flags
335 336 # 4 bytes compressed length
336 337 # 4 bytes uncompressed length
337 338 # 4 bytes: base rev
338 339 # 4 bytes link rev
339 340 # 4 bytes parent 1 rev
340 341 # 4 bytes parent 2 rev
341 342 # 32 bytes: nodeid
342 343 indexformatng = ">Qiiiiii20s12x"
343 344 ngshaoffset = 32
344 345 versionformat = ">I"
345 346
346 347 class revlogio(object):
347 348 def __init__(self):
348 349 self.size = struct.calcsize(indexformatng)
349 350
350 351 def parseindex(self, fp, inline):
351 352 try:
352 353 size = util.fstat(fp).st_size
353 354 except AttributeError:
354 355 size = 0
355 356
356 357 if util.openhardlinks() and not inline and size > 1000000:
357 358 # big index, let's parse it on demand
358 359 parser = lazyparser(fp, size)
359 360 index = lazyindex(parser)
360 361 nodemap = lazymap(parser)
361 362 e = list(index[0])
362 363 type = gettype(e[0])
363 364 e[0] = offset_type(0, type)
364 365 index[0] = e
365 366 return index, nodemap, None
366 367
367 368 data = fp.read()
368 369 # call the C implementation to parse the index data
369 370 index, nodemap, cache = parsers.parse_index(data, inline)
370 371 return index, nodemap, cache
371 372
372 373 def packentry(self, entry, node, version, rev):
373 374 p = _pack(indexformatng, *entry)
374 375 if rev == 0:
375 376 p = _pack(versionformat, version) + p[4:]
376 377 return p
377 378
378 379 class revlog(object):
379 380 """
380 381 the underlying revision storage object
381 382
382 383 A revlog consists of two parts, an index and the revision data.
383 384
384 385 The index is a file with a fixed record size containing
385 386 information on each revision, including its nodeid (hash), the
386 387 nodeids of its parents, the position and offset of its data within
387 388 the data file, and the revision it's based on. Finally, each entry
388 389 contains a linkrev entry that can serve as a pointer to external
389 390 data.
390 391
391 392 The revision data itself is a linear collection of data chunks.
392 393 Each chunk represents a revision and is usually represented as a
393 394 delta against the previous chunk. To bound lookup time, runs of
394 395 deltas are limited to about 2 times the length of the original
395 396 version data. This makes retrieval of a version proportional to
396 397 its size, or O(1) relative to the number of revisions.
397 398
398 399 Both pieces of the revlog are written to in an append-only
399 400 fashion, which means we never need to rewrite a file to insert or
400 401 remove data, and can use some simple techniques to avoid the need
401 402 for locking while reading.
402 403 """
403 404 def __init__(self, opener, indexfile):
404 405 """
405 406 create a revlog object
406 407
407 408 opener is a function that abstracts the file opening operation
408 409 and can be used to implement COW semantics or the like.
409 410 """
410 411 self.indexfile = indexfile
411 412 self.datafile = indexfile[:-2] + ".d"
412 413 self.opener = opener
413 414 self._cache = None
414 415 self._chunkcache = None
415 416 self.nodemap = {nullid: nullrev}
416 417 self.index = []
417 418
418 419 v = REVLOG_DEFAULT_VERSION
419 420 if hasattr(opener, "defversion"):
420 421 v = opener.defversion
421 422 if v & REVLOGNG:
422 423 v |= REVLOGNGINLINEDATA
423 424
424 425 i = ""
425 426 try:
426 427 f = self.opener(self.indexfile)
427 428 i = f.read(4)
428 429 f.seek(0)
429 430 if len(i) > 0:
430 431 v = struct.unpack(versionformat, i)[0]
431 432 except IOError, inst:
432 433 if inst.errno != errno.ENOENT:
433 434 raise
434 435
435 436 self.version = v
436 437 self._inline = v & REVLOGNGINLINEDATA
437 438 flags = v & ~0xFFFF
438 439 fmt = v & 0xFFFF
439 440 if fmt == REVLOGV0 and flags:
440 441 raise RevlogError(_("index %s unknown flags %#04x for format v0")
441 442 % (self.indexfile, flags >> 16))
442 443 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
443 444 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
444 445 % (self.indexfile, flags >> 16))
445 446 elif fmt > REVLOGNG:
446 447 raise RevlogError(_("index %s unknown format %d")
447 448 % (self.indexfile, fmt))
448 449
449 450 self._io = revlogio()
450 451 if self.version == REVLOGV0:
451 452 self._io = revlogoldio()
452 453 if i:
453 454 d = self._io.parseindex(f, self._inline)
454 455 self.index, self.nodemap, self._chunkcache = d
455 456
456 457 # add the magic null revision at -1 (if it hasn't been done already)
457 458 if (self.index == [] or isinstance(self.index, lazyindex) or
458 459 self.index[-1][7] != nullid) :
459 460 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
460 461
461 462 def _loadindex(self, start, end):
462 463 """load a block of indexes all at once from the lazy parser"""
463 464 if isinstance(self.index, lazyindex):
464 465 self.index.p.loadindex(start, end)
465 466
466 467 def _loadindexmap(self):
467 468 """loads both the map and the index from the lazy parser"""
468 469 if isinstance(self.index, lazyindex):
469 470 p = self.index.p
470 471 p.loadindex()
471 472 self.nodemap = p.map
472 473
473 474 def _loadmap(self):
474 475 """loads the map from the lazy parser"""
475 476 if isinstance(self.nodemap, lazymap):
476 477 self.nodemap.p.loadmap()
477 478 self.nodemap = self.nodemap.p.map
478 479
479 480 def tip(self):
480 481 return self.node(len(self.index) - 2)
481 482 def __len__(self):
482 483 return len(self.index) - 1
483 484 def __iter__(self):
484 485 for i in xrange(len(self)):
485 486 yield i
486 487 def rev(self, node):
487 488 try:
488 489 return self.nodemap[node]
489 490 except KeyError:
490 491 raise LookupError(node, self.indexfile, _('no node'))
491 492 def node(self, rev):
492 493 return self.index[rev][7]
493 494 def linkrev(self, rev):
494 495 return self.index[rev][4]
495 496 def parents(self, node):
496 497 i = self.index
497 498 d = i[self.rev(node)]
498 499 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
499 500 def parentrevs(self, rev):
500 501 return self.index[rev][5:7]
501 502 def start(self, rev):
502 503 return int(self.index[rev][0] >> 16)
503 504 def end(self, rev):
504 505 return self.start(rev) + self.length(rev)
505 506 def length(self, rev):
506 507 return self.index[rev][1]
507 508 def base(self, rev):
508 509 return self.index[rev][3]
509 510
510 511 def size(self, rev):
511 512 """return the length of the uncompressed text for a given revision"""
512 513 l = self.index[rev][2]
513 514 if l >= 0:
514 515 return l
515 516
516 517 t = self.revision(self.node(rev))
517 518 return len(t)
518 519
519 520 # alternate implementation, The advantage to this code is it
520 521 # will be faster for a single revision. But, the results are not
521 522 # cached, so finding the size of every revision will be slower.
522 523 """
523 524 if self.cache and self.cache[1] == rev:
524 525 return len(self.cache[2])
525 526
526 527 base = self.base(rev)
527 528 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
528 529 base = self.cache[1]
529 530 text = self.cache[2]
530 531 else:
531 532 text = self.revision(self.node(base))
532 533
533 534 l = len(text)
534 535 for x in xrange(base + 1, rev + 1):
535 536 l = mdiff.patchedsize(l, self.chunk(x))
536 537 return l
537 538 """
538 539
539 540 def reachable(self, node, stop=None):
540 541 """return a hash of all nodes ancestral to a given node, including
541 542 the node itself, stopping when stop is matched"""
542 543 reachable = {}
543 544 visit = [node]
544 545 reachable[node] = 1
545 546 if stop:
546 547 stopn = self.rev(stop)
547 548 else:
548 549 stopn = 0
549 550 while visit:
550 551 n = visit.pop(0)
551 552 if n == stop:
552 553 continue
553 554 if n == nullid:
554 555 continue
555 556 for p in self.parents(n):
556 557 if self.rev(p) < stopn:
557 558 continue
558 559 if p not in reachable:
559 560 reachable[p] = 1
560 561 visit.append(p)
561 562 return reachable
562 563
563 564 def ancestors(self, *revs):
564 565 'Generate the ancestors of revs using a breadth-first visit'
565 566 visit = list(revs)
566 567 seen = util.set([nullrev])
567 568 while visit:
568 569 for parent in self.parentrevs(visit.pop(0)):
569 570 if parent not in seen:
570 571 visit.append(parent)
571 572 seen.add(parent)
572 573 yield parent
573 574
574 575 def descendants(self, *revs):
575 576 'Generate the descendants of revs in topological order'
576 577 seen = util.set(revs)
577 578 for i in xrange(min(revs) + 1, len(self)):
578 579 for x in self.parentrevs(i):
579 580 if x != nullrev and x in seen:
580 581 seen.add(i)
581 582 yield i
582 583 break
583 584
584 585 def findmissing(self, common=None, heads=None):
585 586 '''
586 587 returns the topologically sorted list of nodes from the set:
587 588 missing = (ancestors(heads) \ ancestors(common))
588 589
589 590 where ancestors() is the set of ancestors from heads, heads included
590 591
591 592 if heads is None, the heads of the revlog are used
592 593 if common is None, nullid is assumed to be a common node
593 594 '''
594 595 if common is None:
595 596 common = [nullid]
596 597 if heads is None:
597 598 heads = self.heads()
598 599
599 600 common = [self.rev(n) for n in common]
600 601 heads = [self.rev(n) for n in heads]
601 602
602 603 # we want the ancestors, but inclusive
603 604 has = dict.fromkeys(self.ancestors(*common))
604 605 has[nullrev] = None
605 606 for r in common:
606 607 has[r] = None
607 608
608 609 # take all ancestors from heads that aren't in has
609 610 missing = {}
610 611 visit = [r for r in heads if r not in has]
611 612 while visit:
612 613 r = visit.pop(0)
613 614 if r in missing:
614 615 continue
615 616 else:
616 617 missing[r] = None
617 618 for p in self.parentrevs(r):
618 619 if p not in has:
619 620 visit.append(p)
620 621 missing = missing.keys()
621 622 missing.sort()
622 623 return [self.node(r) for r in missing]
623 624
624 625 def nodesbetween(self, roots=None, heads=None):
625 626 """Return a tuple containing three elements. Elements 1 and 2 contain
626 627 a final list bases and heads after all the unreachable ones have been
627 628 pruned. Element 0 contains a topologically sorted list of all
628 629
629 630 nodes that satisfy these constraints:
630 631 1. All nodes must be descended from a node in roots (the nodes on
631 632 roots are considered descended from themselves).
632 633 2. All nodes must also be ancestors of a node in heads (the nodes in
633 634 heads are considered to be their own ancestors).
634 635
635 636 If roots is unspecified, nullid is assumed as the only root.
636 637 If heads is unspecified, it is taken to be the output of the
637 638 heads method (i.e. a list of all nodes in the repository that
638 639 have no children)."""
639 640 nonodes = ([], [], [])
640 641 if roots is not None:
641 642 roots = list(roots)
642 643 if not roots:
643 644 return nonodes
644 645 lowestrev = min([self.rev(n) for n in roots])
645 646 else:
646 647 roots = [nullid] # Everybody's a descendent of nullid
647 648 lowestrev = nullrev
648 649 if (lowestrev == nullrev) and (heads is None):
649 650 # We want _all_ the nodes!
650 651 return ([self.node(r) for r in self], [nullid], list(self.heads()))
651 652 if heads is None:
652 653 # All nodes are ancestors, so the latest ancestor is the last
653 654 # node.
654 655 highestrev = len(self) - 1
655 656 # Set ancestors to None to signal that every node is an ancestor.
656 657 ancestors = None
657 658 # Set heads to an empty dictionary for later discovery of heads
658 659 heads = {}
659 660 else:
660 661 heads = list(heads)
661 662 if not heads:
662 663 return nonodes
663 664 ancestors = {}
664 665 # Turn heads into a dictionary so we can remove 'fake' heads.
665 666 # Also, later we will be using it to filter out the heads we can't
666 667 # find from roots.
667 668 heads = dict.fromkeys(heads, 0)
668 669 # Start at the top and keep marking parents until we're done.
669 670 nodestotag = heads.keys()
670 671 # Remember where the top was so we can use it as a limit later.
671 672 highestrev = max([self.rev(n) for n in nodestotag])
672 673 while nodestotag:
673 674 # grab a node to tag
674 675 n = nodestotag.pop()
675 676 # Never tag nullid
676 677 if n == nullid:
677 678 continue
678 679 # A node's revision number represents its place in a
679 680 # topologically sorted list of nodes.
680 681 r = self.rev(n)
681 682 if r >= lowestrev:
682 683 if n not in ancestors:
683 684 # If we are possibly a descendent of one of the roots
684 685 # and we haven't already been marked as an ancestor
685 686 ancestors[n] = 1 # Mark as ancestor
686 687 # Add non-nullid parents to list of nodes to tag.
687 688 nodestotag.extend([p for p in self.parents(n) if
688 689 p != nullid])
689 690 elif n in heads: # We've seen it before, is it a fake head?
690 691 # So it is, real heads should not be the ancestors of
691 692 # any other heads.
692 693 heads.pop(n)
693 694 if not ancestors:
694 695 return nonodes
695 696 # Now that we have our set of ancestors, we want to remove any
696 697 # roots that are not ancestors.
697 698
698 699 # If one of the roots was nullid, everything is included anyway.
699 700 if lowestrev > nullrev:
700 701 # But, since we weren't, let's recompute the lowest rev to not
701 702 # include roots that aren't ancestors.
702 703
703 704 # Filter out roots that aren't ancestors of heads
704 705 roots = [n for n in roots if n in ancestors]
705 706 # Recompute the lowest revision
706 707 if roots:
707 708 lowestrev = min([self.rev(n) for n in roots])
708 709 else:
709 710 # No more roots? Return empty list
710 711 return nonodes
711 712 else:
712 713 # We are descending from nullid, and don't need to care about
713 714 # any other roots.
714 715 lowestrev = nullrev
715 716 roots = [nullid]
716 717 # Transform our roots list into a 'set' (i.e. a dictionary where the
717 718 # values don't matter.
718 719 descendents = dict.fromkeys(roots, 1)
719 720 # Also, keep the original roots so we can filter out roots that aren't
720 721 # 'real' roots (i.e. are descended from other roots).
721 722 roots = descendents.copy()
722 723 # Our topologically sorted list of output nodes.
723 724 orderedout = []
724 725 # Don't start at nullid since we don't want nullid in our output list,
725 726 # and if nullid shows up in descedents, empty parents will look like
726 727 # they're descendents.
727 728 for r in xrange(max(lowestrev, 0), highestrev + 1):
728 729 n = self.node(r)
729 730 isdescendent = False
730 731 if lowestrev == nullrev: # Everybody is a descendent of nullid
731 732 isdescendent = True
732 733 elif n in descendents:
733 734 # n is already a descendent
734 735 isdescendent = True
735 736 # This check only needs to be done here because all the roots
736 737 # will start being marked is descendents before the loop.
737 738 if n in roots:
738 739 # If n was a root, check if it's a 'real' root.
739 740 p = tuple(self.parents(n))
740 741 # If any of its parents are descendents, it's not a root.
741 742 if (p[0] in descendents) or (p[1] in descendents):
742 743 roots.pop(n)
743 744 else:
744 745 p = tuple(self.parents(n))
745 746 # A node is a descendent if either of its parents are
746 747 # descendents. (We seeded the dependents list with the roots
747 748 # up there, remember?)
748 749 if (p[0] in descendents) or (p[1] in descendents):
749 750 descendents[n] = 1
750 751 isdescendent = True
751 752 if isdescendent and ((ancestors is None) or (n in ancestors)):
752 753 # Only include nodes that are both descendents and ancestors.
753 754 orderedout.append(n)
754 755 if (ancestors is not None) and (n in heads):
755 756 # We're trying to figure out which heads are reachable
756 757 # from roots.
757 758 # Mark this head as having been reached
758 759 heads[n] = 1
759 760 elif ancestors is None:
760 761 # Otherwise, we're trying to discover the heads.
761 762 # Assume this is a head because if it isn't, the next step
762 763 # will eventually remove it.
763 764 heads[n] = 1
764 765 # But, obviously its parents aren't.
765 766 for p in self.parents(n):
766 767 heads.pop(p, None)
767 768 heads = [n for n in heads.iterkeys() if heads[n] != 0]
768 769 roots = roots.keys()
769 770 assert orderedout
770 771 assert roots
771 772 assert heads
772 773 return (orderedout, roots, heads)
773 774
774 775 def heads(self, start=None, stop=None):
775 776 """return the list of all nodes that have no children
776 777
777 778 if start is specified, only heads that are descendants of
778 779 start will be returned
779 780 if stop is specified, it will consider all the revs from stop
780 781 as if they had no children
781 782 """
782 783 if start is None and stop is None:
783 784 count = len(self)
784 785 if not count:
785 786 return [nullid]
786 787 ishead = [1] * (count + 1)
787 788 index = self.index
788 789 for r in xrange(count):
789 790 e = index[r]
790 791 ishead[e[5]] = ishead[e[6]] = 0
791 792 return [self.node(r) for r in xrange(count) if ishead[r]]
792 793
793 794 if start is None:
794 795 start = nullid
795 796 if stop is None:
796 797 stop = []
797 798 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
798 799 startrev = self.rev(start)
799 800 reachable = {startrev: 1}
800 801 heads = {startrev: 1}
801 802
802 803 parentrevs = self.parentrevs
803 804 for r in xrange(startrev + 1, len(self)):
804 805 for p in parentrevs(r):
805 806 if p in reachable:
806 807 if r not in stoprevs:
807 808 reachable[r] = 1
808 809 heads[r] = 1
809 810 if p in heads and p not in stoprevs:
810 811 del heads[p]
811 812
812 813 return [self.node(r) for r in heads]
813 814
814 815 def children(self, node):
815 816 """find the children of a given node"""
816 817 c = []
817 818 p = self.rev(node)
818 819 for r in range(p + 1, len(self)):
819 820 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
820 821 if prevs:
821 822 for pr in prevs:
822 823 if pr == p:
823 824 c.append(self.node(r))
824 825 elif p == nullrev:
825 826 c.append(self.node(r))
826 827 return c
827 828
828 829 def _match(self, id):
829 830 if isinstance(id, (long, int)):
830 831 # rev
831 832 return self.node(id)
832 833 if len(id) == 20:
833 834 # possibly a binary node
834 835 # odds of a binary node being all hex in ASCII are 1 in 10**25
835 836 try:
836 837 node = id
837 838 r = self.rev(node) # quick search the index
838 839 return node
839 840 except LookupError:
840 841 pass # may be partial hex id
841 842 try:
842 843 # str(rev)
843 844 rev = int(id)
844 845 if str(rev) != id:
845 846 raise ValueError
846 847 if rev < 0:
847 848 rev = len(self) + rev
848 849 if rev < 0 or rev >= len(self):
849 850 raise ValueError
850 851 return self.node(rev)
851 852 except (ValueError, OverflowError):
852 853 pass
853 854 if len(id) == 40:
854 855 try:
855 856 # a full hex nodeid?
856 857 node = bin(id)
857 858 r = self.rev(node)
858 859 return node
859 860 except (TypeError, LookupError):
860 861 pass
861 862
862 863 def _partialmatch(self, id):
863 864 if len(id) < 40:
864 865 try:
865 866 # hex(node)[:...]
866 867 l = len(id) / 2 # grab an even number of digits
867 868 bin_id = bin(id[:l*2])
868 869 nl = [n for n in self.nodemap if n[:l] == bin_id]
869 870 nl = [n for n in nl if hex(n).startswith(id)]
870 871 if len(nl) > 0:
871 872 if len(nl) == 1:
872 873 return nl[0]
873 874 raise LookupError(id, self.indexfile,
874 875 _('ambiguous identifier'))
875 876 return None
876 877 except TypeError:
877 878 pass
878 879
879 880 def lookup(self, id):
880 881 """locate a node based on:
881 882 - revision number or str(revision number)
882 883 - nodeid or subset of hex nodeid
883 884 """
884 885 n = self._match(id)
885 886 if n is not None:
886 887 return n
887 888 n = self._partialmatch(id)
888 889 if n:
889 890 return n
890 891
891 892 raise LookupError(id, self.indexfile, _('no match found'))
892 893
893 894 def cmp(self, node, text):
894 895 """compare text with a given file revision"""
895 896 p1, p2 = self.parents(node)
896 897 return hash(text, p1, p2) != node
897 898
898 899 def chunk(self, rev, df=None):
899 900 def loadcache(df):
900 901 if not df:
901 902 if self._inline:
902 903 df = self.opener(self.indexfile)
903 904 else:
904 905 df = self.opener(self.datafile)
905 906 df.seek(start)
906 907 self._chunkcache = (start, df.read(cache_length))
907 908
908 909 start, length = self.start(rev), self.length(rev)
909 910 if self._inline:
910 911 start += (rev + 1) * self._io.size
911 912 end = start + length
912 913
913 914 offset = 0
914 915 if not self._chunkcache:
915 916 cache_length = max(65536, length)
916 917 loadcache(df)
917 918 else:
918 919 cache_start = self._chunkcache[0]
919 920 cache_length = len(self._chunkcache[1])
920 921 cache_end = cache_start + cache_length
921 922 if start >= cache_start and end <= cache_end:
922 923 # it is cached
923 924 offset = start - cache_start
924 925 else:
925 926 cache_length = max(65536, length)
926 927 loadcache(df)
927 928
928 929 # avoid copying large chunks
929 930 c = self._chunkcache[1]
930 931 if cache_length != length:
931 932 c = c[offset:offset + length]
932 933
933 934 return decompress(c)
934 935
935 936 def revdiff(self, rev1, rev2):
936 937 """return or calculate a delta between two revisions"""
937 938 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
938 939 return self.chunk(rev2)
939 940
940 941 return mdiff.textdiff(self.revision(self.node(rev1)),
941 942 self.revision(self.node(rev2)))
942 943
943 944 def revision(self, node):
944 945 """return an uncompressed revision of a given node"""
945 946 if node == nullid:
946 947 return ""
947 948 if self._cache and self._cache[0] == node:
948 949 return str(self._cache[2])
949 950
950 951 # look up what we need to read
951 952 text = None
952 953 rev = self.rev(node)
953 954 base = self.base(rev)
954 955
955 956 # check rev flags
956 957 if self.index[rev][0] & 0xFFFF:
957 958 raise RevlogError(_('incompatible revision flag %x') %
958 959 (self.index[rev][0] & 0xFFFF))
959 960
960 961 df = None
961 962
962 963 # do we have useful data cached?
963 964 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
964 965 base = self._cache[1]
965 966 text = str(self._cache[2])
966 967 self._loadindex(base, rev + 1)
967 968 if not self._inline and rev > base + 1:
968 969 df = self.opener(self.datafile)
969 970 else:
970 971 self._loadindex(base, rev + 1)
971 972 if not self._inline and rev > base:
972 973 df = self.opener(self.datafile)
973 974 text = self.chunk(base, df=df)
974 975
975 976 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
976 977 text = mdiff.patches(text, bins)
977 978 p1, p2 = self.parents(node)
978 979 if node != hash(text, p1, p2):
979 980 raise RevlogError(_("integrity check failed on %s:%d")
980 981 % (self.datafile, rev))
981 982
982 983 self._cache = (node, rev, text)
983 984 return text
984 985
985 986 def checkinlinesize(self, tr, fp=None):
986 987 if not self._inline:
987 988 return
988 989 if not fp:
989 990 fp = self.opener(self.indexfile, 'r')
990 991 fp.seek(0, 2)
991 992 size = fp.tell()
992 993 if size < 131072:
993 994 return
994 995 trinfo = tr.find(self.indexfile)
995 996 if trinfo == None:
996 997 raise RevlogError(_("%s not found in the transaction")
997 998 % self.indexfile)
998 999
999 1000 trindex = trinfo[2]
1000 1001 dataoff = self.start(trindex)
1001 1002
1002 1003 tr.add(self.datafile, dataoff)
1003 1004 df = self.opener(self.datafile, 'w')
1004 1005 try:
1005 1006 calc = self._io.size
1006 1007 for r in self:
1007 1008 start = self.start(r) + (r + 1) * calc
1008 1009 length = self.length(r)
1009 1010 fp.seek(start)
1010 1011 d = fp.read(length)
1011 1012 df.write(d)
1012 1013 finally:
1013 1014 df.close()
1014 1015
1015 1016 fp.close()
1016 1017 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1017 1018 self.version &= ~(REVLOGNGINLINEDATA)
1018 1019 self._inline = False
1019 1020 for i in self:
1020 1021 e = self._io.packentry(self.index[i], self.node, self.version, i)
1021 1022 fp.write(e)
1022 1023
1023 1024 # if we don't call rename, the temp file will never replace the
1024 1025 # real index
1025 1026 fp.rename()
1026 1027
1027 1028 tr.replace(self.indexfile, trindex * calc)
1028 1029 self._chunkcache = None
1029 1030
1030 1031 def addrevision(self, text, transaction, link, p1, p2, d=None):
1031 1032 """add a revision to the log
1032 1033
1033 1034 text - the revision data to add
1034 1035 transaction - the transaction object used for rollback
1035 1036 link - the linkrev data to add
1036 1037 p1, p2 - the parent nodeids of the revision
1037 1038 d - an optional precomputed delta
1038 1039 """
1039 1040 dfh = None
1040 1041 if not self._inline:
1041 1042 dfh = self.opener(self.datafile, "a")
1042 1043 ifh = self.opener(self.indexfile, "a+")
1043 1044 try:
1044 1045 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1045 1046 finally:
1046 1047 if dfh:
1047 1048 dfh.close()
1048 1049 ifh.close()
1049 1050
1050 1051 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1051 1052 node = hash(text, p1, p2)
1052 1053 if node in self.nodemap:
1053 1054 return node
1054 1055
1055 1056 curr = len(self)
1056 1057 prev = curr - 1
1057 1058 base = self.base(prev)
1058 1059 offset = self.end(prev)
1059 1060
1060 1061 if curr:
1061 1062 if not d:
1062 1063 ptext = self.revision(self.node(prev))
1063 1064 d = mdiff.textdiff(ptext, text)
1064 1065 data = compress(d)
1065 1066 l = len(data[1]) + len(data[0])
1066 1067 dist = l + offset - self.start(base)
1067 1068
1068 1069 # full versions are inserted when the needed deltas
1069 1070 # become comparable to the uncompressed text
1070 1071 if not curr or dist > len(text) * 2:
1071 1072 data = compress(text)
1072 1073 l = len(data[1]) + len(data[0])
1073 1074 base = curr
1074 1075
1075 1076 e = (offset_type(offset, 0), l, len(text),
1076 1077 base, link, self.rev(p1), self.rev(p2), node)
1077 1078 self.index.insert(-1, e)
1078 1079 self.nodemap[node] = curr
1079 1080
1080 1081 entry = self._io.packentry(e, self.node, self.version, curr)
1081 1082 if not self._inline:
1082 1083 transaction.add(self.datafile, offset)
1083 1084 transaction.add(self.indexfile, curr * len(entry))
1084 1085 if data[0]:
1085 1086 dfh.write(data[0])
1086 1087 dfh.write(data[1])
1087 1088 dfh.flush()
1088 1089 ifh.write(entry)
1089 1090 else:
1090 1091 offset += curr * self._io.size
1091 1092 transaction.add(self.indexfile, offset, curr)
1092 1093 ifh.write(entry)
1093 1094 ifh.write(data[0])
1094 1095 ifh.write(data[1])
1095 1096 self.checkinlinesize(transaction, ifh)
1096 1097
1097 1098 self._cache = (node, curr, text)
1098 1099 return node
1099 1100
1100 1101 def ancestor(self, a, b):
1101 1102 """calculate the least common ancestor of nodes a and b"""
1102 1103
1103 1104 def parents(rev):
1104 1105 return [p for p in self.parentrevs(rev) if p != nullrev]
1105 1106
1106 1107 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1107 1108 if c is None:
1108 1109 return nullid
1109 1110
1110 1111 return self.node(c)
1111 1112
1112 1113 def group(self, nodelist, lookup, infocollect=None):
1113 1114 """calculate a delta group
1114 1115
1115 1116 Given a list of changeset revs, return a set of deltas and
1116 1117 metadata corresponding to nodes. the first delta is
1117 1118 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1118 1119 have this parent as it has all history before these
1119 1120 changesets. parent is parent[0]
1120 1121 """
1121 1122 revs = [self.rev(n) for n in nodelist]
1122 1123
1123 1124 # if we don't have any revisions touched by these changesets, bail
1124 1125 if not revs:
1125 1126 yield changegroup.closechunk()
1126 1127 return
1127 1128
1128 1129 # add the parent of the first rev
1129 1130 p = self.parents(self.node(revs[0]))[0]
1130 1131 revs.insert(0, self.rev(p))
1131 1132
1132 1133 # build deltas
1133 1134 for d in xrange(0, len(revs) - 1):
1134 1135 a, b = revs[d], revs[d + 1]
1135 1136 nb = self.node(b)
1136 1137
1137 1138 if infocollect is not None:
1138 1139 infocollect(nb)
1139 1140
1140 1141 p = self.parents(nb)
1141 1142 meta = nb + p[0] + p[1] + lookup(nb)
1142 1143 if a == -1:
1143 1144 d = self.revision(nb)
1144 1145 meta += mdiff.trivialdiffheader(len(d))
1145 1146 else:
1146 1147 d = self.revdiff(a, b)
1147 1148 yield changegroup.chunkheader(len(meta) + len(d))
1148 1149 yield meta
1149 1150 if len(d) > 2**20:
1150 1151 pos = 0
1151 1152 while pos < len(d):
1152 1153 pos2 = pos + 2 ** 18
1153 1154 yield d[pos:pos2]
1154 1155 pos = pos2
1155 1156 else:
1156 1157 yield d
1157 1158
1158 1159 yield changegroup.closechunk()
1159 1160
1160 1161 def addgroup(self, revs, linkmapper, transaction):
1161 1162 """
1162 1163 add a delta group
1163 1164
1164 1165 given a set of deltas, add them to the revision log. the
1165 1166 first delta is against its parent, which should be in our
1166 1167 log, the rest are against the previous delta.
1167 1168 """
1168 1169
1169 1170 #track the base of the current delta log
1170 1171 r = len(self)
1171 1172 t = r - 1
1172 1173 node = None
1173 1174
1174 1175 base = prev = nullrev
1175 1176 start = end = textlen = 0
1176 1177 if r:
1177 1178 end = self.end(t)
1178 1179
1179 1180 ifh = self.opener(self.indexfile, "a+")
1180 1181 isize = r * self._io.size
1181 1182 if self._inline:
1182 1183 transaction.add(self.indexfile, end + isize, r)
1183 1184 dfh = None
1184 1185 else:
1185 1186 transaction.add(self.indexfile, isize, r)
1186 1187 transaction.add(self.datafile, end)
1187 1188 dfh = self.opener(self.datafile, "a")
1188 1189
1189 1190 try:
1190 1191 # loop through our set of deltas
1191 1192 chain = None
1192 1193 for chunk in revs:
1193 1194 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1194 1195 link = linkmapper(cs)
1195 1196 if node in self.nodemap:
1196 1197 # this can happen if two branches make the same change
1197 1198 chain = node
1198 1199 continue
1199 1200 delta = buffer(chunk, 80)
1200 1201 del chunk
1201 1202
1202 1203 for p in (p1, p2):
1203 1204 if not p in self.nodemap:
1204 1205 raise LookupError(p, self.indexfile, _('unknown parent'))
1205 1206
1206 1207 if not chain:
1207 1208 # retrieve the parent revision of the delta chain
1208 1209 chain = p1
1209 1210 if not chain in self.nodemap:
1210 1211 raise LookupError(chain, self.indexfile, _('unknown base'))
1211 1212
1212 1213 # full versions are inserted when the needed deltas become
1213 1214 # comparable to the uncompressed text or when the previous
1214 1215 # version is not the one we have a delta against. We use
1215 1216 # the size of the previous full rev as a proxy for the
1216 1217 # current size.
1217 1218
1218 1219 if chain == prev:
1219 1220 cdelta = compress(delta)
1220 1221 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1221 1222 textlen = mdiff.patchedsize(textlen, delta)
1222 1223
1223 1224 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1224 1225 # flush our writes here so we can read it in revision
1225 1226 if dfh:
1226 1227 dfh.flush()
1227 1228 ifh.flush()
1228 1229 text = self.revision(chain)
1229 1230 if len(text) == 0:
1230 1231 # skip over trivial delta header
1231 1232 text = buffer(delta, 12)
1232 1233 else:
1233 1234 text = mdiff.patches(text, [delta])
1234 1235 del delta
1235 1236 chk = self._addrevision(text, transaction, link, p1, p2, None,
1236 1237 ifh, dfh)
1237 1238 if not dfh and not self._inline:
1238 1239 # addrevision switched from inline to conventional
1239 1240 # reopen the index
1240 1241 dfh = self.opener(self.datafile, "a")
1241 1242 ifh = self.opener(self.indexfile, "a")
1242 1243 if chk != node:
1243 1244 raise RevlogError(_("consistency error adding group"))
1244 1245 textlen = len(text)
1245 1246 else:
1246 1247 e = (offset_type(end, 0), cdeltalen, textlen, base,
1247 1248 link, self.rev(p1), self.rev(p2), node)
1248 1249 self.index.insert(-1, e)
1249 1250 self.nodemap[node] = r
1250 1251 entry = self._io.packentry(e, self.node, self.version, r)
1251 1252 if self._inline:
1252 1253 ifh.write(entry)
1253 1254 ifh.write(cdelta[0])
1254 1255 ifh.write(cdelta[1])
1255 1256 self.checkinlinesize(transaction, ifh)
1256 1257 if not self._inline:
1257 1258 dfh = self.opener(self.datafile, "a")
1258 1259 ifh = self.opener(self.indexfile, "a")
1259 1260 else:
1260 1261 dfh.write(cdelta[0])
1261 1262 dfh.write(cdelta[1])
1262 1263 ifh.write(entry)
1263 1264
1264 1265 t, r, chain, prev = r, r + 1, node, node
1265 1266 base = self.base(t)
1266 1267 start = self.start(base)
1267 1268 end = self.end(t)
1268 1269 finally:
1269 1270 if dfh:
1270 1271 dfh.close()
1271 1272 ifh.close()
1272 1273
1273 1274 return node
1274 1275
1275 1276 def strip(self, minlink):
1276 1277 """truncate the revlog on the first revision with a linkrev >= minlink
1277 1278
1278 1279 This function is called when we're stripping revision minlink and
1279 1280 its descendants from the repository.
1280 1281
1281 1282 We have to remove all revisions with linkrev >= minlink, because
1282 1283 the equivalent changelog revisions will be renumbered after the
1283 1284 strip.
1284 1285
1285 1286 So we truncate the revlog on the first of these revisions, and
1286 1287 trust that the caller has saved the revisions that shouldn't be
1287 1288 removed and that it'll readd them after this truncation.
1288 1289 """
1289 1290 if len(self) == 0:
1290 1291 return
1291 1292
1292 1293 if isinstance(self.index, lazyindex):
1293 1294 self._loadindexmap()
1294 1295
1295 1296 for rev in self:
1296 1297 if self.index[rev][4] >= minlink:
1297 1298 break
1298 1299 else:
1299 1300 return
1300 1301
1301 1302 # first truncate the files on disk
1302 1303 end = self.start(rev)
1303 1304 if not self._inline:
1304 1305 df = self.opener(self.datafile, "a")
1305 1306 df.truncate(end)
1306 1307 end = rev * self._io.size
1307 1308 else:
1308 1309 end += rev * self._io.size
1309 1310
1310 1311 indexf = self.opener(self.indexfile, "a")
1311 1312 indexf.truncate(end)
1312 1313
1313 1314 # then reset internal state in memory to forget those revisions
1314 1315 self._cache = None
1315 1316 self._chunkcache = None
1316 1317 for x in xrange(rev, len(self)):
1317 1318 del self.nodemap[self.node(x)]
1318 1319
1319 1320 del self.index[rev:-1]
1320 1321
1321 1322 def checksize(self):
1322 1323 expected = 0
1323 1324 if len(self):
1324 1325 expected = max(0, self.end(len(self) - 1))
1325 1326
1326 1327 try:
1327 1328 f = self.opener(self.datafile)
1328 1329 f.seek(0, 2)
1329 1330 actual = f.tell()
1330 1331 dd = actual - expected
1331 1332 except IOError, inst:
1332 1333 if inst.errno != errno.ENOENT:
1333 1334 raise
1334 1335 dd = 0
1335 1336
1336 1337 try:
1337 1338 f = self.opener(self.indexfile)
1338 1339 f.seek(0, 2)
1339 1340 actual = f.tell()
1340 1341 s = self._io.size
1341 1342 i = max(0, actual / s)
1342 1343 di = actual - (i * s)
1343 1344 if self._inline:
1344 1345 databytes = 0
1345 1346 for r in self:
1346 1347 databytes += max(0, self.length(r))
1347 1348 dd = 0
1348 1349 di = actual - len(self) * s - databytes
1349 1350 except IOError, inst:
1350 1351 if inst.errno != errno.ENOENT:
1351 1352 raise
1352 1353 di = 0
1353 1354
1354 1355 return (dd, di)
1355 1356
1356 1357 def files(self):
1357 1358 res = [ self.indexfile ]
1358 1359 if not self._inline:
1359 1360 res.append(self.datafile)
1360 1361 return res
@@ -1,247 +1,247 b''
1 1 # sshrepo.py - ssh repository proxy class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex
9 9 from i18n import _
10 import repo, os, re, util, error
10 import repo, re, util, error
11 11
12 12 class remotelock(object):
13 13 def __init__(self, repo):
14 14 self.repo = repo
15 15 def release(self):
16 16 self.repo.unlock()
17 17 self.repo = None
18 18 def __del__(self):
19 19 if self.repo:
20 20 self.release()
21 21
22 22 class sshrepository(repo.repository):
23 23 def __init__(self, ui, path, create=0):
24 24 self._url = path
25 25 self.ui = ui
26 26
27 27 m = re.match(r'^ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?$', path)
28 28 if not m:
29 29 self.abort(error.RepoError(_("couldn't parse location %s") % path))
30 30
31 31 self.user = m.group(2)
32 32 self.host = m.group(3)
33 33 self.port = m.group(5)
34 34 self.path = m.group(7) or "."
35 35
36 36 sshcmd = self.ui.config("ui", "ssh", "ssh")
37 37 remotecmd = self.ui.config("ui", "remotecmd", "hg")
38 38
39 39 args = util.sshargs(sshcmd, self.host, self.user, self.port)
40 40
41 41 if create:
42 42 cmd = '%s %s "%s init %s"'
43 43 cmd = cmd % (sshcmd, args, remotecmd, self.path)
44 44
45 45 ui.note(_('running %s\n') % cmd)
46 46 res = util.system(cmd)
47 47 if res != 0:
48 48 self.abort(error.RepoError(_("could not create remote repo")))
49 49
50 50 self.validate_repo(ui, sshcmd, args, remotecmd)
51 51
52 52 def url(self):
53 53 return self._url
54 54
55 55 def validate_repo(self, ui, sshcmd, args, remotecmd):
56 56 # cleanup up previous run
57 57 self.cleanup()
58 58
59 59 cmd = '%s %s "%s -R %s serve --stdio"'
60 60 cmd = cmd % (sshcmd, args, remotecmd, self.path)
61 61
62 62 cmd = util.quotecommand(cmd)
63 63 ui.note(_('running %s\n') % cmd)
64 64 self.pipeo, self.pipei, self.pipee = util.popen3(cmd, 'b')
65 65
66 66 # skip any noise generated by remote shell
67 67 self.do_cmd("hello")
68 68 r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
69 69 lines = ["", "dummy"]
70 70 max_noise = 500
71 71 while lines[-1] and max_noise:
72 72 l = r.readline()
73 73 self.readerr()
74 74 if lines[-1] == "1\n" and l == "\n":
75 75 break
76 76 if l:
77 77 ui.debug(_("remote: "), l)
78 78 lines.append(l)
79 79 max_noise -= 1
80 80 else:
81 81 self.abort(error.RepoError(_("no suitable response from remote hg")))
82 82
83 83 self.capabilities = util.set()
84 84 lines.reverse()
85 85 for l in lines:
86 86 if l.startswith("capabilities:"):
87 87 self.capabilities.update(l[:-1].split(":")[1].split())
88 88 break
89 89
90 90 def readerr(self):
91 91 while 1:
92 92 size = util.fstat(self.pipee).st_size
93 93 if size == 0: break
94 94 l = self.pipee.readline()
95 95 if not l: break
96 96 self.ui.status(_("remote: "), l)
97 97
98 98 def abort(self, exception):
99 99 self.cleanup()
100 100 raise exception
101 101
102 102 def cleanup(self):
103 103 try:
104 104 self.pipeo.close()
105 105 self.pipei.close()
106 106 # read the error descriptor until EOF
107 107 for l in self.pipee:
108 108 self.ui.status(_("remote: "), l)
109 109 self.pipee.close()
110 110 except:
111 111 pass
112 112
113 113 __del__ = cleanup
114 114
115 115 def do_cmd(self, cmd, **args):
116 116 self.ui.debug(_("sending %s command\n") % cmd)
117 117 self.pipeo.write("%s\n" % cmd)
118 118 for k, v in args.iteritems():
119 119 self.pipeo.write("%s %d\n" % (k, len(v)))
120 120 self.pipeo.write(v)
121 121 self.pipeo.flush()
122 122
123 123 return self.pipei
124 124
125 125 def call(self, cmd, **args):
126 126 self.do_cmd(cmd, **args)
127 127 return self._recv()
128 128
129 129 def _recv(self):
130 130 l = self.pipei.readline()
131 131 self.readerr()
132 132 try:
133 133 l = int(l)
134 134 except:
135 135 self.abort(error.ResponseError(_("unexpected response:"), l))
136 136 return self.pipei.read(l)
137 137
138 138 def _send(self, data, flush=False):
139 139 self.pipeo.write("%d\n" % len(data))
140 140 if data:
141 141 self.pipeo.write(data)
142 142 if flush:
143 143 self.pipeo.flush()
144 144 self.readerr()
145 145
146 146 def lock(self):
147 147 self.call("lock")
148 148 return remotelock(self)
149 149
150 150 def unlock(self):
151 151 self.call("unlock")
152 152
153 153 def lookup(self, key):
154 154 self.requirecap('lookup', _('look up remote revision'))
155 155 d = self.call("lookup", key=key)
156 156 success, data = d[:-1].split(" ", 1)
157 157 if int(success):
158 158 return bin(data)
159 159 else:
160 160 self.abort(error.RepoError(data))
161 161
162 162 def heads(self):
163 163 d = self.call("heads")
164 164 try:
165 165 return map(bin, d[:-1].split(" "))
166 166 except:
167 167 self.abort(error.ResponseError(_("unexpected response:"), d))
168 168
169 169 def branches(self, nodes):
170 170 n = " ".join(map(hex, nodes))
171 171 d = self.call("branches", nodes=n)
172 172 try:
173 173 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
174 174 return br
175 175 except:
176 176 self.abort(error.ResponseError(_("unexpected response:"), d))
177 177
178 178 def between(self, pairs):
179 179 n = " ".join(["-".join(map(hex, p)) for p in pairs])
180 180 d = self.call("between", pairs=n)
181 181 try:
182 182 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
183 183 return p
184 184 except:
185 185 self.abort(error.ResponseError(_("unexpected response:"), d))
186 186
187 187 def changegroup(self, nodes, kind):
188 188 n = " ".join(map(hex, nodes))
189 189 return self.do_cmd("changegroup", roots=n)
190 190
191 191 def changegroupsubset(self, bases, heads, kind):
192 192 self.requirecap('changegroupsubset', _('look up remote changes'))
193 193 bases = " ".join(map(hex, bases))
194 194 heads = " ".join(map(hex, heads))
195 195 return self.do_cmd("changegroupsubset", bases=bases, heads=heads)
196 196
197 197 def unbundle(self, cg, heads, source):
198 198 d = self.call("unbundle", heads=' '.join(map(hex, heads)))
199 199 if d:
200 200 # remote may send "unsynced changes"
201 201 self.abort(error.RepoError(_("push refused: %s") % d))
202 202
203 203 while 1:
204 204 d = cg.read(4096)
205 205 if not d:
206 206 break
207 207 self._send(d)
208 208
209 209 self._send("", flush=True)
210 210
211 211 r = self._recv()
212 212 if r:
213 213 # remote may send "unsynced changes"
214 214 self.abort(error.RepoError(_("push failed: %s") % r))
215 215
216 216 r = self._recv()
217 217 try:
218 218 return int(r)
219 219 except:
220 220 self.abort(error.ResponseError(_("unexpected response:"), r))
221 221
222 222 def addchangegroup(self, cg, source, url):
223 223 d = self.call("addchangegroup")
224 224 if d:
225 225 self.abort(error.RepoError(_("push refused: %s") % d))
226 226 while 1:
227 227 d = cg.read(4096)
228 228 if not d:
229 229 break
230 230 self.pipeo.write(d)
231 231 self.readerr()
232 232
233 233 self.pipeo.flush()
234 234
235 235 self.readerr()
236 236 r = self._recv()
237 237 if not r:
238 238 return 1
239 239 try:
240 240 return int(r)
241 241 except:
242 242 self.abort(error.ResponseError(_("unexpected response:"), r))
243 243
244 244 def stream_out(self):
245 245 return self.do_cmd('stream_out')
246 246
247 247 instance = sshrepository
@@ -1,117 +1,117 b''
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms
8 8 # of the GNU General Public License, incorporated herein by reference.
9 9
10 10 from i18n import _
11 11 import changelog, byterange, url, error
12 import repo, localrepo, manifest, util, store
12 import localrepo, manifest, util, store
13 13 import urllib, urllib2, errno
14 14
15 15 class httprangereader(object):
16 16 def __init__(self, url, opener):
17 17 # we assume opener has HTTPRangeHandler
18 18 self.url = url
19 19 self.pos = 0
20 20 self.opener = opener
21 21 def seek(self, pos):
22 22 self.pos = pos
23 23 def read(self, bytes=None):
24 24 req = urllib2.Request(self.url)
25 25 end = ''
26 26 if bytes:
27 27 end = self.pos + bytes - 1
28 28 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
29 29
30 30 try:
31 31 f = self.opener.open(req)
32 32 data = f.read()
33 33 except urllib2.HTTPError, inst:
34 34 num = inst.code == 404 and errno.ENOENT or None
35 35 raise IOError(num, inst)
36 36 except urllib2.URLError, inst:
37 37 raise IOError(None, inst.reason[1])
38 38
39 39 if bytes:
40 40 data = data[:bytes]
41 41 return data
42 42
43 43 def build_opener(ui, authinfo):
44 44 # urllib cannot handle URLs with embedded user or passwd
45 45 urlopener = url.opener(ui, authinfo)
46 46 urlopener.add_handler(byterange.HTTPRangeHandler())
47 47
48 48 def opener(base):
49 49 """return a function that opens files over http"""
50 50 p = base
51 51 def o(path, mode="r"):
52 52 f = "/".join((p, urllib.quote(path)))
53 53 return httprangereader(f, urlopener)
54 54 return o
55 55
56 56 return opener
57 57
58 58 class statichttprepository(localrepo.localrepository):
59 59 def __init__(self, ui, path):
60 60 self._url = path
61 61 self.ui = ui
62 62
63 63 self.path, authinfo = url.getauthinfo(path.rstrip('/') + "/.hg")
64 64
65 65 opener = build_opener(ui, authinfo)
66 66 self.opener = opener(self.path)
67 67
68 68 # find requirements
69 69 try:
70 70 requirements = self.opener("requires").read().splitlines()
71 71 except IOError, inst:
72 72 if inst.errno != errno.ENOENT:
73 73 raise
74 74 # check if it is a non-empty old-style repository
75 75 try:
76 76 self.opener("00changelog.i").read(1)
77 77 except IOError, inst:
78 78 if inst.errno != errno.ENOENT:
79 79 raise
80 80 # we do not care about empty old-style repositories here
81 81 msg = _("'%s' does not appear to be an hg repository") % path
82 82 raise error.RepoError(msg)
83 83 requirements = []
84 84
85 85 # check them
86 86 for r in requirements:
87 87 if r not in self.supported:
88 88 raise error.RepoError(_("requirement '%s' not supported") % r)
89 89
90 90 # setup store
91 91 def pjoin(a, b):
92 92 return a + '/' + b
93 93 self.store = store.store(requirements, self.path, opener, pjoin)
94 94 self.spath = self.store.path
95 95 self.sopener = self.store.opener
96 96 self.sjoin = self.store.join
97 97
98 98 self.manifest = manifest.manifest(self.sopener)
99 99 self.changelog = changelog.changelog(self.sopener)
100 100 self.tagscache = None
101 101 self.nodetagscache = None
102 102 self.encodepats = None
103 103 self.decodepats = None
104 104
105 105 def url(self):
106 106 return self._url
107 107
108 108 def local(self):
109 109 return False
110 110
111 111 def lock(self, wait=True):
112 112 raise util.Abort(_('cannot lock static-http repository'))
113 113
114 114 def instance(ui, path, create):
115 115 if create:
116 116 raise util.Abort(_('cannot create new static-http repository'))
117 117 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now