##// END OF EJS Templates
py3: convert the mode argument of os.fdopen to unicodes (1 of 2)...
Pulkit Goyal -
r30924:48dea083 default
parent child Browse files
Show More
@@ -1,557 +1,557 b''
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import os
16 import os
17 import shutil
17 import shutil
18 import tempfile
18 import tempfile
19
19
20 from .i18n import _
20 from .i18n import _
21 from .node import nullid
21 from .node import nullid
22
22
23 from . import (
23 from . import (
24 bundle2,
24 bundle2,
25 changegroup,
25 changegroup,
26 changelog,
26 changelog,
27 cmdutil,
27 cmdutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 filelog,
31 filelog,
32 localrepo,
32 localrepo,
33 manifest,
33 manifest,
34 mdiff,
34 mdiff,
35 node as nodemod,
35 node as nodemod,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 pycompat,
38 pycompat,
39 revlog,
39 revlog,
40 scmutil,
40 scmutil,
41 util,
41 util,
42 )
42 )
43
43
44 class bundlerevlog(revlog.revlog):
44 class bundlerevlog(revlog.revlog):
45 def __init__(self, opener, indexfile, bundle, linkmapper):
45 def __init__(self, opener, indexfile, bundle, linkmapper):
46 # How it works:
46 # How it works:
47 # To retrieve a revision, we need to know the offset of the revision in
47 # To retrieve a revision, we need to know the offset of the revision in
48 # the bundle (an unbundle object). We store this offset in the index
48 # the bundle (an unbundle object). We store this offset in the index
49 # (start). The base of the delta is stored in the base field.
49 # (start). The base of the delta is stored in the base field.
50 #
50 #
51 # To differentiate a rev in the bundle from a rev in the revlog, we
51 # To differentiate a rev in the bundle from a rev in the revlog, we
52 # check revision against repotiprev.
52 # check revision against repotiprev.
53 opener = scmutil.readonlyvfs(opener)
53 opener = scmutil.readonlyvfs(opener)
54 revlog.revlog.__init__(self, opener, indexfile)
54 revlog.revlog.__init__(self, opener, indexfile)
55 self.bundle = bundle
55 self.bundle = bundle
56 n = len(self)
56 n = len(self)
57 self.repotiprev = n - 1
57 self.repotiprev = n - 1
58 chain = None
58 chain = None
59 self.bundlerevs = set() # used by 'bundle()' revset expression
59 self.bundlerevs = set() # used by 'bundle()' revset expression
60 getchunk = lambda: bundle.deltachunk(chain)
60 getchunk = lambda: bundle.deltachunk(chain)
61 for chunkdata in iter(getchunk, {}):
61 for chunkdata in iter(getchunk, {}):
62 node = chunkdata['node']
62 node = chunkdata['node']
63 p1 = chunkdata['p1']
63 p1 = chunkdata['p1']
64 p2 = chunkdata['p2']
64 p2 = chunkdata['p2']
65 cs = chunkdata['cs']
65 cs = chunkdata['cs']
66 deltabase = chunkdata['deltabase']
66 deltabase = chunkdata['deltabase']
67 delta = chunkdata['delta']
67 delta = chunkdata['delta']
68
68
69 size = len(delta)
69 size = len(delta)
70 start = bundle.tell() - size
70 start = bundle.tell() - size
71
71
72 link = linkmapper(cs)
72 link = linkmapper(cs)
73 if node in self.nodemap:
73 if node in self.nodemap:
74 # this can happen if two branches make the same change
74 # this can happen if two branches make the same change
75 chain = node
75 chain = node
76 self.bundlerevs.add(self.nodemap[node])
76 self.bundlerevs.add(self.nodemap[node])
77 continue
77 continue
78
78
79 for p in (p1, p2):
79 for p in (p1, p2):
80 if p not in self.nodemap:
80 if p not in self.nodemap:
81 raise error.LookupError(p, self.indexfile,
81 raise error.LookupError(p, self.indexfile,
82 _("unknown parent"))
82 _("unknown parent"))
83
83
84 if deltabase not in self.nodemap:
84 if deltabase not in self.nodemap:
85 raise LookupError(deltabase, self.indexfile,
85 raise LookupError(deltabase, self.indexfile,
86 _('unknown delta base'))
86 _('unknown delta base'))
87
87
88 baserev = self.rev(deltabase)
88 baserev = self.rev(deltabase)
89 # start, size, full unc. size, base (unused), link, p1, p2, node
89 # start, size, full unc. size, base (unused), link, p1, p2, node
90 e = (revlog.offset_type(start, 0), size, -1, baserev, link,
90 e = (revlog.offset_type(start, 0), size, -1, baserev, link,
91 self.rev(p1), self.rev(p2), node)
91 self.rev(p1), self.rev(p2), node)
92 self.index.insert(-1, e)
92 self.index.insert(-1, e)
93 self.nodemap[node] = n
93 self.nodemap[node] = n
94 self.bundlerevs.add(n)
94 self.bundlerevs.add(n)
95 chain = node
95 chain = node
96 n += 1
96 n += 1
97
97
98 def _chunk(self, rev):
98 def _chunk(self, rev):
99 # Warning: in case of bundle, the diff is against what we stored as
99 # Warning: in case of bundle, the diff is against what we stored as
100 # delta base, not against rev - 1
100 # delta base, not against rev - 1
101 # XXX: could use some caching
101 # XXX: could use some caching
102 if rev <= self.repotiprev:
102 if rev <= self.repotiprev:
103 return revlog.revlog._chunk(self, rev)
103 return revlog.revlog._chunk(self, rev)
104 self.bundle.seek(self.start(rev))
104 self.bundle.seek(self.start(rev))
105 return self.bundle.read(self.length(rev))
105 return self.bundle.read(self.length(rev))
106
106
107 def revdiff(self, rev1, rev2):
107 def revdiff(self, rev1, rev2):
108 """return or calculate a delta between two revisions"""
108 """return or calculate a delta between two revisions"""
109 if rev1 > self.repotiprev and rev2 > self.repotiprev:
109 if rev1 > self.repotiprev and rev2 > self.repotiprev:
110 # hot path for bundle
110 # hot path for bundle
111 revb = self.index[rev2][3]
111 revb = self.index[rev2][3]
112 if revb == rev1:
112 if revb == rev1:
113 return self._chunk(rev2)
113 return self._chunk(rev2)
114 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
114 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
115 return revlog.revlog.revdiff(self, rev1, rev2)
115 return revlog.revlog.revdiff(self, rev1, rev2)
116
116
117 return mdiff.textdiff(self.revision(self.node(rev1)),
117 return mdiff.textdiff(self.revision(self.node(rev1)),
118 self.revision(self.node(rev2)))
118 self.revision(self.node(rev2)))
119
119
120 def revision(self, nodeorrev, raw=False):
120 def revision(self, nodeorrev, raw=False):
121 """return an uncompressed revision of a given node or revision
121 """return an uncompressed revision of a given node or revision
122 number.
122 number.
123 """
123 """
124 if isinstance(nodeorrev, int):
124 if isinstance(nodeorrev, int):
125 rev = nodeorrev
125 rev = nodeorrev
126 node = self.node(rev)
126 node = self.node(rev)
127 else:
127 else:
128 node = nodeorrev
128 node = nodeorrev
129 rev = self.rev(node)
129 rev = self.rev(node)
130
130
131 if node == nullid:
131 if node == nullid:
132 return ""
132 return ""
133
133
134 text = None
134 text = None
135 chain = []
135 chain = []
136 iterrev = rev
136 iterrev = rev
137 # reconstruct the revision if it is from a changegroup
137 # reconstruct the revision if it is from a changegroup
138 while iterrev > self.repotiprev:
138 while iterrev > self.repotiprev:
139 if self._cache and self._cache[1] == iterrev:
139 if self._cache and self._cache[1] == iterrev:
140 text = self._cache[2]
140 text = self._cache[2]
141 break
141 break
142 chain.append(iterrev)
142 chain.append(iterrev)
143 iterrev = self.index[iterrev][3]
143 iterrev = self.index[iterrev][3]
144 if text is None:
144 if text is None:
145 text = self.baserevision(iterrev)
145 text = self.baserevision(iterrev)
146
146
147 while chain:
147 while chain:
148 delta = self._chunk(chain.pop())
148 delta = self._chunk(chain.pop())
149 text = mdiff.patches(text, [delta])
149 text = mdiff.patches(text, [delta])
150
150
151 text, validatehash = self._processflags(text, self.flags(rev),
151 text, validatehash = self._processflags(text, self.flags(rev),
152 'read', raw=raw)
152 'read', raw=raw)
153 if validatehash:
153 if validatehash:
154 self.checkhash(text, node, rev=rev)
154 self.checkhash(text, node, rev=rev)
155 self._cache = (node, rev, text)
155 self._cache = (node, rev, text)
156 return text
156 return text
157
157
158 def baserevision(self, nodeorrev):
158 def baserevision(self, nodeorrev):
159 # Revlog subclasses may override 'revision' method to modify format of
159 # Revlog subclasses may override 'revision' method to modify format of
160 # content retrieved from revlog. To use bundlerevlog with such class one
160 # content retrieved from revlog. To use bundlerevlog with such class one
161 # needs to override 'baserevision' and make more specific call here.
161 # needs to override 'baserevision' and make more specific call here.
162 return revlog.revlog.revision(self, nodeorrev)
162 return revlog.revlog.revision(self, nodeorrev)
163
163
164 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
164 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
165 raise NotImplementedError
165 raise NotImplementedError
166 def addgroup(self, revs, linkmapper, transaction):
166 def addgroup(self, revs, linkmapper, transaction):
167 raise NotImplementedError
167 raise NotImplementedError
168 def strip(self, rev, minlink):
168 def strip(self, rev, minlink):
169 raise NotImplementedError
169 raise NotImplementedError
170 def checksize(self):
170 def checksize(self):
171 raise NotImplementedError
171 raise NotImplementedError
172
172
173 class bundlechangelog(bundlerevlog, changelog.changelog):
173 class bundlechangelog(bundlerevlog, changelog.changelog):
174 def __init__(self, opener, bundle):
174 def __init__(self, opener, bundle):
175 changelog.changelog.__init__(self, opener)
175 changelog.changelog.__init__(self, opener)
176 linkmapper = lambda x: x
176 linkmapper = lambda x: x
177 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
177 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
178 linkmapper)
178 linkmapper)
179
179
180 def baserevision(self, nodeorrev):
180 def baserevision(self, nodeorrev):
181 # Although changelog doesn't override 'revision' method, some extensions
181 # Although changelog doesn't override 'revision' method, some extensions
182 # may replace this class with another that does. Same story with
182 # may replace this class with another that does. Same story with
183 # manifest and filelog classes.
183 # manifest and filelog classes.
184
184
185 # This bypasses filtering on changelog.node() and rev() because we need
185 # This bypasses filtering on changelog.node() and rev() because we need
186 # revision text of the bundle base even if it is hidden.
186 # revision text of the bundle base even if it is hidden.
187 oldfilter = self.filteredrevs
187 oldfilter = self.filteredrevs
188 try:
188 try:
189 self.filteredrevs = ()
189 self.filteredrevs = ()
190 return changelog.changelog.revision(self, nodeorrev)
190 return changelog.changelog.revision(self, nodeorrev)
191 finally:
191 finally:
192 self.filteredrevs = oldfilter
192 self.filteredrevs = oldfilter
193
193
194 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
194 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
195 def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
195 def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
196 manifest.manifestrevlog.__init__(self, opener, dir=dir)
196 manifest.manifestrevlog.__init__(self, opener, dir=dir)
197 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
197 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
198 linkmapper)
198 linkmapper)
199 if dirlogstarts is None:
199 if dirlogstarts is None:
200 dirlogstarts = {}
200 dirlogstarts = {}
201 if self.bundle.version == "03":
201 if self.bundle.version == "03":
202 dirlogstarts = _getfilestarts(self.bundle)
202 dirlogstarts = _getfilestarts(self.bundle)
203 self._dirlogstarts = dirlogstarts
203 self._dirlogstarts = dirlogstarts
204 self._linkmapper = linkmapper
204 self._linkmapper = linkmapper
205
205
206 def baserevision(self, nodeorrev):
206 def baserevision(self, nodeorrev):
207 node = nodeorrev
207 node = nodeorrev
208 if isinstance(node, int):
208 if isinstance(node, int):
209 node = self.node(node)
209 node = self.node(node)
210
210
211 if node in self.fulltextcache:
211 if node in self.fulltextcache:
212 result = self.fulltextcache[node].tostring()
212 result = self.fulltextcache[node].tostring()
213 else:
213 else:
214 result = manifest.manifestrevlog.revision(self, nodeorrev)
214 result = manifest.manifestrevlog.revision(self, nodeorrev)
215 return result
215 return result
216
216
217 def dirlog(self, d):
217 def dirlog(self, d):
218 if d in self._dirlogstarts:
218 if d in self._dirlogstarts:
219 self.bundle.seek(self._dirlogstarts[d])
219 self.bundle.seek(self._dirlogstarts[d])
220 return bundlemanifest(
220 return bundlemanifest(
221 self.opener, self.bundle, self._linkmapper,
221 self.opener, self.bundle, self._linkmapper,
222 self._dirlogstarts, dir=d)
222 self._dirlogstarts, dir=d)
223 return super(bundlemanifest, self).dirlog(d)
223 return super(bundlemanifest, self).dirlog(d)
224
224
225 class bundlefilelog(bundlerevlog, filelog.filelog):
225 class bundlefilelog(bundlerevlog, filelog.filelog):
226 def __init__(self, opener, path, bundle, linkmapper):
226 def __init__(self, opener, path, bundle, linkmapper):
227 filelog.filelog.__init__(self, opener, path)
227 filelog.filelog.__init__(self, opener, path)
228 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
228 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
229 linkmapper)
229 linkmapper)
230
230
231 def baserevision(self, nodeorrev):
231 def baserevision(self, nodeorrev):
232 return filelog.filelog.revision(self, nodeorrev)
232 return filelog.filelog.revision(self, nodeorrev)
233
233
234 class bundlepeer(localrepo.localpeer):
234 class bundlepeer(localrepo.localpeer):
235 def canpush(self):
235 def canpush(self):
236 return False
236 return False
237
237
238 class bundlephasecache(phases.phasecache):
238 class bundlephasecache(phases.phasecache):
239 def __init__(self, *args, **kwargs):
239 def __init__(self, *args, **kwargs):
240 super(bundlephasecache, self).__init__(*args, **kwargs)
240 super(bundlephasecache, self).__init__(*args, **kwargs)
241 if util.safehasattr(self, 'opener'):
241 if util.safehasattr(self, 'opener'):
242 self.opener = scmutil.readonlyvfs(self.opener)
242 self.opener = scmutil.readonlyvfs(self.opener)
243
243
244 def write(self):
244 def write(self):
245 raise NotImplementedError
245 raise NotImplementedError
246
246
247 def _write(self, fp):
247 def _write(self, fp):
248 raise NotImplementedError
248 raise NotImplementedError
249
249
250 def _updateroots(self, phase, newroots, tr):
250 def _updateroots(self, phase, newroots, tr):
251 self.phaseroots[phase] = newroots
251 self.phaseroots[phase] = newroots
252 self.invalidate()
252 self.invalidate()
253 self.dirty = True
253 self.dirty = True
254
254
255 def _getfilestarts(bundle):
255 def _getfilestarts(bundle):
256 bundlefilespos = {}
256 bundlefilespos = {}
257 for chunkdata in iter(bundle.filelogheader, {}):
257 for chunkdata in iter(bundle.filelogheader, {}):
258 fname = chunkdata['filename']
258 fname = chunkdata['filename']
259 bundlefilespos[fname] = bundle.tell()
259 bundlefilespos[fname] = bundle.tell()
260 for chunk in iter(lambda: bundle.deltachunk(None), {}):
260 for chunk in iter(lambda: bundle.deltachunk(None), {}):
261 pass
261 pass
262 return bundlefilespos
262 return bundlefilespos
263
263
264 class bundlerepository(localrepo.localrepository):
264 class bundlerepository(localrepo.localrepository):
265 def __init__(self, ui, path, bundlename):
265 def __init__(self, ui, path, bundlename):
266 def _writetempbundle(read, suffix, header=''):
266 def _writetempbundle(read, suffix, header=''):
267 """Write a temporary file to disk
267 """Write a temporary file to disk
268
268
269 This is closure because we need to make sure this tracked by
269 This is closure because we need to make sure this tracked by
270 self.tempfile for cleanup purposes."""
270 self.tempfile for cleanup purposes."""
271 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
271 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
272 suffix=".hg10un")
272 suffix=".hg10un")
273 self.tempfile = temp
273 self.tempfile = temp
274
274
275 with os.fdopen(fdtemp, 'wb') as fptemp:
275 with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp:
276 fptemp.write(header)
276 fptemp.write(header)
277 while True:
277 while True:
278 chunk = read(2**18)
278 chunk = read(2**18)
279 if not chunk:
279 if not chunk:
280 break
280 break
281 fptemp.write(chunk)
281 fptemp.write(chunk)
282
282
283 return self.vfs.open(self.tempfile, mode="rb")
283 return self.vfs.open(self.tempfile, mode="rb")
284 self._tempparent = None
284 self._tempparent = None
285 try:
285 try:
286 localrepo.localrepository.__init__(self, ui, path)
286 localrepo.localrepository.__init__(self, ui, path)
287 except error.RepoError:
287 except error.RepoError:
288 self._tempparent = tempfile.mkdtemp()
288 self._tempparent = tempfile.mkdtemp()
289 localrepo.instance(ui, self._tempparent, 1)
289 localrepo.instance(ui, self._tempparent, 1)
290 localrepo.localrepository.__init__(self, ui, self._tempparent)
290 localrepo.localrepository.__init__(self, ui, self._tempparent)
291 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
291 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
292
292
293 if path:
293 if path:
294 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
294 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
295 else:
295 else:
296 self._url = 'bundle:' + bundlename
296 self._url = 'bundle:' + bundlename
297
297
298 self.tempfile = None
298 self.tempfile = None
299 f = util.posixfile(bundlename, "rb")
299 f = util.posixfile(bundlename, "rb")
300 self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
300 self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
301
301
302 if isinstance(self.bundle, bundle2.unbundle20):
302 if isinstance(self.bundle, bundle2.unbundle20):
303 cgstream = None
303 cgstream = None
304 for part in self.bundle.iterparts():
304 for part in self.bundle.iterparts():
305 if part.type == 'changegroup':
305 if part.type == 'changegroup':
306 if cgstream is not None:
306 if cgstream is not None:
307 raise NotImplementedError("can't process "
307 raise NotImplementedError("can't process "
308 "multiple changegroups")
308 "multiple changegroups")
309 cgstream = part
309 cgstream = part
310 version = part.params.get('version', '01')
310 version = part.params.get('version', '01')
311 legalcgvers = changegroup.supportedincomingversions(self)
311 legalcgvers = changegroup.supportedincomingversions(self)
312 if version not in legalcgvers:
312 if version not in legalcgvers:
313 msg = _('Unsupported changegroup version: %s')
313 msg = _('Unsupported changegroup version: %s')
314 raise error.Abort(msg % version)
314 raise error.Abort(msg % version)
315 if self.bundle.compressed():
315 if self.bundle.compressed():
316 cgstream = _writetempbundle(part.read,
316 cgstream = _writetempbundle(part.read,
317 ".cg%sun" % version)
317 ".cg%sun" % version)
318
318
319 if cgstream is None:
319 if cgstream is None:
320 raise error.Abort(_('No changegroups found'))
320 raise error.Abort(_('No changegroups found'))
321 cgstream.seek(0)
321 cgstream.seek(0)
322
322
323 self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
323 self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
324
324
325 elif self.bundle.compressed():
325 elif self.bundle.compressed():
326 f = _writetempbundle(self.bundle.read, '.hg10un', header='HG10UN')
326 f = _writetempbundle(self.bundle.read, '.hg10un', header='HG10UN')
327 self.bundlefile = self.bundle = exchange.readbundle(ui, f,
327 self.bundlefile = self.bundle = exchange.readbundle(ui, f,
328 bundlename,
328 bundlename,
329 self.vfs)
329 self.vfs)
330
330
331 # dict with the mapping 'filename' -> position in the bundle
331 # dict with the mapping 'filename' -> position in the bundle
332 self.bundlefilespos = {}
332 self.bundlefilespos = {}
333
333
334 self.firstnewrev = self.changelog.repotiprev + 1
334 self.firstnewrev = self.changelog.repotiprev + 1
335 phases.retractboundary(self, None, phases.draft,
335 phases.retractboundary(self, None, phases.draft,
336 [ctx.node() for ctx in self[self.firstnewrev:]])
336 [ctx.node() for ctx in self[self.firstnewrev:]])
337
337
338 @localrepo.unfilteredpropertycache
338 @localrepo.unfilteredpropertycache
339 def _phasecache(self):
339 def _phasecache(self):
340 return bundlephasecache(self, self._phasedefaults)
340 return bundlephasecache(self, self._phasedefaults)
341
341
342 @localrepo.unfilteredpropertycache
342 @localrepo.unfilteredpropertycache
343 def changelog(self):
343 def changelog(self):
344 # consume the header if it exists
344 # consume the header if it exists
345 self.bundle.changelogheader()
345 self.bundle.changelogheader()
346 c = bundlechangelog(self.svfs, self.bundle)
346 c = bundlechangelog(self.svfs, self.bundle)
347 self.manstart = self.bundle.tell()
347 self.manstart = self.bundle.tell()
348 return c
348 return c
349
349
350 def _constructmanifest(self):
350 def _constructmanifest(self):
351 self.bundle.seek(self.manstart)
351 self.bundle.seek(self.manstart)
352 # consume the header if it exists
352 # consume the header if it exists
353 self.bundle.manifestheader()
353 self.bundle.manifestheader()
354 linkmapper = self.unfiltered().changelog.rev
354 linkmapper = self.unfiltered().changelog.rev
355 m = bundlemanifest(self.svfs, self.bundle, linkmapper)
355 m = bundlemanifest(self.svfs, self.bundle, linkmapper)
356 self.filestart = self.bundle.tell()
356 self.filestart = self.bundle.tell()
357 return m
357 return m
358
358
359 @localrepo.unfilteredpropertycache
359 @localrepo.unfilteredpropertycache
360 def manstart(self):
360 def manstart(self):
361 self.changelog
361 self.changelog
362 return self.manstart
362 return self.manstart
363
363
364 @localrepo.unfilteredpropertycache
364 @localrepo.unfilteredpropertycache
365 def filestart(self):
365 def filestart(self):
366 self.manifestlog
366 self.manifestlog
367 return self.filestart
367 return self.filestart
368
368
369 def url(self):
369 def url(self):
370 return self._url
370 return self._url
371
371
372 def file(self, f):
372 def file(self, f):
373 if not self.bundlefilespos:
373 if not self.bundlefilespos:
374 self.bundle.seek(self.filestart)
374 self.bundle.seek(self.filestart)
375 self.bundlefilespos = _getfilestarts(self.bundle)
375 self.bundlefilespos = _getfilestarts(self.bundle)
376
376
377 if f in self.bundlefilespos:
377 if f in self.bundlefilespos:
378 self.bundle.seek(self.bundlefilespos[f])
378 self.bundle.seek(self.bundlefilespos[f])
379 linkmapper = self.unfiltered().changelog.rev
379 linkmapper = self.unfiltered().changelog.rev
380 return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
380 return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
381 else:
381 else:
382 return filelog.filelog(self.svfs, f)
382 return filelog.filelog(self.svfs, f)
383
383
384 def close(self):
384 def close(self):
385 """Close assigned bundle file immediately."""
385 """Close assigned bundle file immediately."""
386 self.bundlefile.close()
386 self.bundlefile.close()
387 if self.tempfile is not None:
387 if self.tempfile is not None:
388 self.vfs.unlink(self.tempfile)
388 self.vfs.unlink(self.tempfile)
389 if self._tempparent:
389 if self._tempparent:
390 shutil.rmtree(self._tempparent, True)
390 shutil.rmtree(self._tempparent, True)
391
391
392 def cancopy(self):
392 def cancopy(self):
393 return False
393 return False
394
394
395 def peer(self):
395 def peer(self):
396 return bundlepeer(self)
396 return bundlepeer(self)
397
397
398 def getcwd(self):
398 def getcwd(self):
399 return pycompat.getcwd() # always outside the repo
399 return pycompat.getcwd() # always outside the repo
400
400
401 # Check if parents exist in localrepo before setting
401 # Check if parents exist in localrepo before setting
402 def setparents(self, p1, p2=nullid):
402 def setparents(self, p1, p2=nullid):
403 p1rev = self.changelog.rev(p1)
403 p1rev = self.changelog.rev(p1)
404 p2rev = self.changelog.rev(p2)
404 p2rev = self.changelog.rev(p2)
405 msg = _("setting parent to node %s that only exists in the bundle\n")
405 msg = _("setting parent to node %s that only exists in the bundle\n")
406 if self.changelog.repotiprev < p1rev:
406 if self.changelog.repotiprev < p1rev:
407 self.ui.warn(msg % nodemod.hex(p1))
407 self.ui.warn(msg % nodemod.hex(p1))
408 if self.changelog.repotiprev < p2rev:
408 if self.changelog.repotiprev < p2rev:
409 self.ui.warn(msg % nodemod.hex(p2))
409 self.ui.warn(msg % nodemod.hex(p2))
410 return super(bundlerepository, self).setparents(p1, p2)
410 return super(bundlerepository, self).setparents(p1, p2)
411
411
412 def instance(ui, path, create):
412 def instance(ui, path, create):
413 if create:
413 if create:
414 raise error.Abort(_('cannot create new bundle repository'))
414 raise error.Abort(_('cannot create new bundle repository'))
415 # internal config: bundle.mainreporoot
415 # internal config: bundle.mainreporoot
416 parentpath = ui.config("bundle", "mainreporoot", "")
416 parentpath = ui.config("bundle", "mainreporoot", "")
417 if not parentpath:
417 if not parentpath:
418 # try to find the correct path to the working directory repo
418 # try to find the correct path to the working directory repo
419 parentpath = cmdutil.findrepo(pycompat.getcwd())
419 parentpath = cmdutil.findrepo(pycompat.getcwd())
420 if parentpath is None:
420 if parentpath is None:
421 parentpath = ''
421 parentpath = ''
422 if parentpath:
422 if parentpath:
423 # Try to make the full path relative so we get a nice, short URL.
423 # Try to make the full path relative so we get a nice, short URL.
424 # In particular, we don't want temp dir names in test outputs.
424 # In particular, we don't want temp dir names in test outputs.
425 cwd = pycompat.getcwd()
425 cwd = pycompat.getcwd()
426 if parentpath == cwd:
426 if parentpath == cwd:
427 parentpath = ''
427 parentpath = ''
428 else:
428 else:
429 cwd = pathutil.normasprefix(cwd)
429 cwd = pathutil.normasprefix(cwd)
430 if parentpath.startswith(cwd):
430 if parentpath.startswith(cwd):
431 parentpath = parentpath[len(cwd):]
431 parentpath = parentpath[len(cwd):]
432 u = util.url(path)
432 u = util.url(path)
433 path = u.localpath()
433 path = u.localpath()
434 if u.scheme == 'bundle':
434 if u.scheme == 'bundle':
435 s = path.split("+", 1)
435 s = path.split("+", 1)
436 if len(s) == 1:
436 if len(s) == 1:
437 repopath, bundlename = parentpath, s[0]
437 repopath, bundlename = parentpath, s[0]
438 else:
438 else:
439 repopath, bundlename = s
439 repopath, bundlename = s
440 else:
440 else:
441 repopath, bundlename = parentpath, path
441 repopath, bundlename = parentpath, path
442 return bundlerepository(ui, repopath, bundlename)
442 return bundlerepository(ui, repopath, bundlename)
443
443
444 class bundletransactionmanager(object):
444 class bundletransactionmanager(object):
445 def transaction(self):
445 def transaction(self):
446 return None
446 return None
447
447
448 def close(self):
448 def close(self):
449 raise NotImplementedError
449 raise NotImplementedError
450
450
451 def release(self):
451 def release(self):
452 raise NotImplementedError
452 raise NotImplementedError
453
453
454 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
454 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
455 force=False):
455 force=False):
456 '''obtains a bundle of changes incoming from other
456 '''obtains a bundle of changes incoming from other
457
457
458 "onlyheads" restricts the returned changes to those reachable from the
458 "onlyheads" restricts the returned changes to those reachable from the
459 specified heads.
459 specified heads.
460 "bundlename", if given, stores the bundle to this file path permanently;
460 "bundlename", if given, stores the bundle to this file path permanently;
461 otherwise it's stored to a temp file and gets deleted again when you call
461 otherwise it's stored to a temp file and gets deleted again when you call
462 the returned "cleanupfn".
462 the returned "cleanupfn".
463 "force" indicates whether to proceed on unrelated repos.
463 "force" indicates whether to proceed on unrelated repos.
464
464
465 Returns a tuple (local, csets, cleanupfn):
465 Returns a tuple (local, csets, cleanupfn):
466
466
467 "local" is a local repo from which to obtain the actual incoming
467 "local" is a local repo from which to obtain the actual incoming
468 changesets; it is a bundlerepo for the obtained bundle when the
468 changesets; it is a bundlerepo for the obtained bundle when the
469 original "other" is remote.
469 original "other" is remote.
470 "csets" lists the incoming changeset node ids.
470 "csets" lists the incoming changeset node ids.
471 "cleanupfn" must be called without arguments when you're done processing
471 "cleanupfn" must be called without arguments when you're done processing
472 the changes; it closes both the original "other" and the one returned
472 the changes; it closes both the original "other" and the one returned
473 here.
473 here.
474 '''
474 '''
475 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
475 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
476 force=force)
476 force=force)
477 common, incoming, rheads = tmp
477 common, incoming, rheads = tmp
478 if not incoming:
478 if not incoming:
479 try:
479 try:
480 if bundlename:
480 if bundlename:
481 os.unlink(bundlename)
481 os.unlink(bundlename)
482 except OSError:
482 except OSError:
483 pass
483 pass
484 return repo, [], other.close
484 return repo, [], other.close
485
485
486 commonset = set(common)
486 commonset = set(common)
487 rheads = [x for x in rheads if x not in commonset]
487 rheads = [x for x in rheads if x not in commonset]
488
488
489 bundle = None
489 bundle = None
490 bundlerepo = None
490 bundlerepo = None
491 localrepo = other.local()
491 localrepo = other.local()
492 if bundlename or not localrepo:
492 if bundlename or not localrepo:
493 # create a bundle (uncompressed if other repo is not local)
493 # create a bundle (uncompressed if other repo is not local)
494
494
495 # developer config: devel.legacy.exchange
495 # developer config: devel.legacy.exchange
496 legexc = ui.configlist('devel', 'legacy.exchange')
496 legexc = ui.configlist('devel', 'legacy.exchange')
497 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
497 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
498 canbundle2 = (not forcebundle1
498 canbundle2 = (not forcebundle1
499 and other.capable('getbundle')
499 and other.capable('getbundle')
500 and other.capable('bundle2'))
500 and other.capable('bundle2'))
501 if canbundle2:
501 if canbundle2:
502 kwargs = {}
502 kwargs = {}
503 kwargs['common'] = common
503 kwargs['common'] = common
504 kwargs['heads'] = rheads
504 kwargs['heads'] = rheads
505 kwargs['bundlecaps'] = exchange.caps20to10(repo)
505 kwargs['bundlecaps'] = exchange.caps20to10(repo)
506 kwargs['cg'] = True
506 kwargs['cg'] = True
507 b2 = other.getbundle('incoming', **kwargs)
507 b2 = other.getbundle('incoming', **kwargs)
508 fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
508 fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
509 bundlename)
509 bundlename)
510 else:
510 else:
511 if other.capable('getbundle'):
511 if other.capable('getbundle'):
512 cg = other.getbundle('incoming', common=common, heads=rheads)
512 cg = other.getbundle('incoming', common=common, heads=rheads)
513 elif onlyheads is None and not other.capable('changegroupsubset'):
513 elif onlyheads is None and not other.capable('changegroupsubset'):
514 # compat with older servers when pulling all remote heads
514 # compat with older servers when pulling all remote heads
515 cg = other.changegroup(incoming, "incoming")
515 cg = other.changegroup(incoming, "incoming")
516 rheads = None
516 rheads = None
517 else:
517 else:
518 cg = other.changegroupsubset(incoming, rheads, 'incoming')
518 cg = other.changegroupsubset(incoming, rheads, 'incoming')
519 if localrepo:
519 if localrepo:
520 bundletype = "HG10BZ"
520 bundletype = "HG10BZ"
521 else:
521 else:
522 bundletype = "HG10UN"
522 bundletype = "HG10UN"
523 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
523 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
524 bundletype)
524 bundletype)
525 # keep written bundle?
525 # keep written bundle?
526 if bundlename:
526 if bundlename:
527 bundle = None
527 bundle = None
528 if not localrepo:
528 if not localrepo:
529 # use the created uncompressed bundlerepo
529 # use the created uncompressed bundlerepo
530 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
530 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
531 fname)
531 fname)
532 # this repo contains local and other now, so filter out local again
532 # this repo contains local and other now, so filter out local again
533 common = repo.heads()
533 common = repo.heads()
534 if localrepo:
534 if localrepo:
535 # Part of common may be remotely filtered
535 # Part of common may be remotely filtered
536 # So use an unfiltered version
536 # So use an unfiltered version
537 # The discovery process probably need cleanup to avoid that
537 # The discovery process probably need cleanup to avoid that
538 localrepo = localrepo.unfiltered()
538 localrepo = localrepo.unfiltered()
539
539
540 csets = localrepo.changelog.findmissing(common, rheads)
540 csets = localrepo.changelog.findmissing(common, rheads)
541
541
542 if bundlerepo:
542 if bundlerepo:
543 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
543 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
544 remotephases = other.listkeys('phases')
544 remotephases = other.listkeys('phases')
545
545
546 pullop = exchange.pulloperation(bundlerepo, other, heads=reponodes)
546 pullop = exchange.pulloperation(bundlerepo, other, heads=reponodes)
547 pullop.trmanager = bundletransactionmanager()
547 pullop.trmanager = bundletransactionmanager()
548 exchange._pullapplyphases(pullop, remotephases)
548 exchange._pullapplyphases(pullop, remotephases)
549
549
550 def cleanup():
550 def cleanup():
551 if bundlerepo:
551 if bundlerepo:
552 bundlerepo.close()
552 bundlerepo.close()
553 if bundle:
553 if bundle:
554 os.unlink(bundle)
554 os.unlink(bundle)
555 other.close()
555 other.close()
556
556
557 return (localrepo, csets, cleanup)
557 return (localrepo, csets, cleanup)
@@ -1,588 +1,588 b''
1 # chgserver.py - command server extension for cHg
1 # chgserver.py - command server extension for cHg
2 #
2 #
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """command server extension for cHg
8 """command server extension for cHg
9
9
10 'S' channel (read/write)
10 'S' channel (read/write)
11 propagate ui.system() request to client
11 propagate ui.system() request to client
12
12
13 'attachio' command
13 'attachio' command
14 attach client's stdio passed by sendmsg()
14 attach client's stdio passed by sendmsg()
15
15
16 'chdir' command
16 'chdir' command
17 change current directory
17 change current directory
18
18
19 'setenv' command
19 'setenv' command
20 replace os.environ completely
20 replace os.environ completely
21
21
22 'setumask' command
22 'setumask' command
23 set umask
23 set umask
24
24
25 'validate' command
25 'validate' command
26 reload the config and check if the server is up to date
26 reload the config and check if the server is up to date
27
27
28 Config
28 Config
29 ------
29 ------
30
30
31 ::
31 ::
32
32
33 [chgserver]
33 [chgserver]
34 idletimeout = 3600 # seconds, after which an idle server will exit
34 idletimeout = 3600 # seconds, after which an idle server will exit
35 skiphash = False # whether to skip config or env change checks
35 skiphash = False # whether to skip config or env change checks
36 """
36 """
37
37
38 from __future__ import absolute_import
38 from __future__ import absolute_import
39
39
40 import errno
40 import errno
41 import hashlib
41 import hashlib
42 import inspect
42 import inspect
43 import os
43 import os
44 import re
44 import re
45 import struct
45 import struct
46 import time
46 import time
47
47
48 from .i18n import _
48 from .i18n import _
49
49
50 from . import (
50 from . import (
51 commandserver,
51 commandserver,
52 encoding,
52 encoding,
53 error,
53 error,
54 extensions,
54 extensions,
55 osutil,
55 osutil,
56 pycompat,
56 pycompat,
57 util,
57 util,
58 )
58 )
59
59
60 _log = commandserver.log
60 _log = commandserver.log
61
61
62 def _hashlist(items):
62 def _hashlist(items):
63 """return sha1 hexdigest for a list"""
63 """return sha1 hexdigest for a list"""
64 return hashlib.sha1(str(items)).hexdigest()
64 return hashlib.sha1(str(items)).hexdigest()
65
65
66 # sensitive config sections affecting confighash
66 # sensitive config sections affecting confighash
67 _configsections = [
67 _configsections = [
68 'alias', # affects global state commands.table
68 'alias', # affects global state commands.table
69 'extdiff', # uisetup will register new commands
69 'extdiff', # uisetup will register new commands
70 'extensions',
70 'extensions',
71 ]
71 ]
72
72
73 # sensitive environment variables affecting confighash
73 # sensitive environment variables affecting confighash
74 _envre = re.compile(r'''\A(?:
74 _envre = re.compile(r'''\A(?:
75 CHGHG
75 CHGHG
76 |HG(?:[A-Z].*)?
76 |HG(?:[A-Z].*)?
77 |LANG(?:UAGE)?
77 |LANG(?:UAGE)?
78 |LC_.*
78 |LC_.*
79 |LD_.*
79 |LD_.*
80 |PATH
80 |PATH
81 |PYTHON.*
81 |PYTHON.*
82 |TERM(?:INFO)?
82 |TERM(?:INFO)?
83 |TZ
83 |TZ
84 )\Z''', re.X)
84 )\Z''', re.X)
85
85
86 def _confighash(ui):
86 def _confighash(ui):
87 """return a quick hash for detecting config/env changes
87 """return a quick hash for detecting config/env changes
88
88
89 confighash is the hash of sensitive config items and environment variables.
89 confighash is the hash of sensitive config items and environment variables.
90
90
91 for chgserver, it is designed that once confighash changes, the server is
91 for chgserver, it is designed that once confighash changes, the server is
92 not qualified to serve its client and should redirect the client to a new
92 not qualified to serve its client and should redirect the client to a new
93 server. different from mtimehash, confighash change will not mark the
93 server. different from mtimehash, confighash change will not mark the
94 server outdated and exit since the user can have different configs at the
94 server outdated and exit since the user can have different configs at the
95 same time.
95 same time.
96 """
96 """
97 sectionitems = []
97 sectionitems = []
98 for section in _configsections:
98 for section in _configsections:
99 sectionitems.append(ui.configitems(section))
99 sectionitems.append(ui.configitems(section))
100 sectionhash = _hashlist(sectionitems)
100 sectionhash = _hashlist(sectionitems)
101 envitems = [(k, v) for k, v in encoding.environ.iteritems()
101 envitems = [(k, v) for k, v in encoding.environ.iteritems()
102 if _envre.match(k)]
102 if _envre.match(k)]
103 envhash = _hashlist(sorted(envitems))
103 envhash = _hashlist(sorted(envitems))
104 return sectionhash[:6] + envhash[:6]
104 return sectionhash[:6] + envhash[:6]
105
105
106 def _getmtimepaths(ui):
106 def _getmtimepaths(ui):
107 """get a list of paths that should be checked to detect change
107 """get a list of paths that should be checked to detect change
108
108
109 The list will include:
109 The list will include:
110 - extensions (will not cover all files for complex extensions)
110 - extensions (will not cover all files for complex extensions)
111 - mercurial/__version__.py
111 - mercurial/__version__.py
112 - python binary
112 - python binary
113 """
113 """
114 modules = [m for n, m in extensions.extensions(ui)]
114 modules = [m for n, m in extensions.extensions(ui)]
115 try:
115 try:
116 from . import __version__
116 from . import __version__
117 modules.append(__version__)
117 modules.append(__version__)
118 except ImportError:
118 except ImportError:
119 pass
119 pass
120 files = [pycompat.sysexecutable]
120 files = [pycompat.sysexecutable]
121 for m in modules:
121 for m in modules:
122 try:
122 try:
123 files.append(inspect.getabsfile(m))
123 files.append(inspect.getabsfile(m))
124 except TypeError:
124 except TypeError:
125 pass
125 pass
126 return sorted(set(files))
126 return sorted(set(files))
127
127
128 def _mtimehash(paths):
128 def _mtimehash(paths):
129 """return a quick hash for detecting file changes
129 """return a quick hash for detecting file changes
130
130
131 mtimehash calls stat on given paths and calculate a hash based on size and
131 mtimehash calls stat on given paths and calculate a hash based on size and
132 mtime of each file. mtimehash does not read file content because reading is
132 mtime of each file. mtimehash does not read file content because reading is
133 expensive. therefore it's not 100% reliable for detecting content changes.
133 expensive. therefore it's not 100% reliable for detecting content changes.
134 it's possible to return different hashes for same file contents.
134 it's possible to return different hashes for same file contents.
135 it's also possible to return a same hash for different file contents for
135 it's also possible to return a same hash for different file contents for
136 some carefully crafted situation.
136 some carefully crafted situation.
137
137
138 for chgserver, it is designed that once mtimehash changes, the server is
138 for chgserver, it is designed that once mtimehash changes, the server is
139 considered outdated immediately and should no longer provide service.
139 considered outdated immediately and should no longer provide service.
140
140
141 mtimehash is not included in confighash because we only know the paths of
141 mtimehash is not included in confighash because we only know the paths of
142 extensions after importing them (there is imp.find_module but that faces
142 extensions after importing them (there is imp.find_module but that faces
143 race conditions). We need to calculate confighash without importing.
143 race conditions). We need to calculate confighash without importing.
144 """
144 """
145 def trystat(path):
145 def trystat(path):
146 try:
146 try:
147 st = os.stat(path)
147 st = os.stat(path)
148 return (st.st_mtime, st.st_size)
148 return (st.st_mtime, st.st_size)
149 except OSError:
149 except OSError:
150 # could be ENOENT, EPERM etc. not fatal in any case
150 # could be ENOENT, EPERM etc. not fatal in any case
151 pass
151 pass
152 return _hashlist(map(trystat, paths))[:12]
152 return _hashlist(map(trystat, paths))[:12]
153
153
154 class hashstate(object):
154 class hashstate(object):
155 """a structure storing confighash, mtimehash, paths used for mtimehash"""
155 """a structure storing confighash, mtimehash, paths used for mtimehash"""
156 def __init__(self, confighash, mtimehash, mtimepaths):
156 def __init__(self, confighash, mtimehash, mtimepaths):
157 self.confighash = confighash
157 self.confighash = confighash
158 self.mtimehash = mtimehash
158 self.mtimehash = mtimehash
159 self.mtimepaths = mtimepaths
159 self.mtimepaths = mtimepaths
160
160
161 @staticmethod
161 @staticmethod
162 def fromui(ui, mtimepaths=None):
162 def fromui(ui, mtimepaths=None):
163 if mtimepaths is None:
163 if mtimepaths is None:
164 mtimepaths = _getmtimepaths(ui)
164 mtimepaths = _getmtimepaths(ui)
165 confighash = _confighash(ui)
165 confighash = _confighash(ui)
166 mtimehash = _mtimehash(mtimepaths)
166 mtimehash = _mtimehash(mtimepaths)
167 _log('confighash = %s mtimehash = %s\n' % (confighash, mtimehash))
167 _log('confighash = %s mtimehash = %s\n' % (confighash, mtimehash))
168 return hashstate(confighash, mtimehash, mtimepaths)
168 return hashstate(confighash, mtimehash, mtimepaths)
169
169
170 def _newchgui(srcui, csystem, attachio):
170 def _newchgui(srcui, csystem, attachio):
171 class chgui(srcui.__class__):
171 class chgui(srcui.__class__):
172 def __init__(self, src=None):
172 def __init__(self, src=None):
173 super(chgui, self).__init__(src)
173 super(chgui, self).__init__(src)
174 if src:
174 if src:
175 self._csystem = getattr(src, '_csystem', csystem)
175 self._csystem = getattr(src, '_csystem', csystem)
176 else:
176 else:
177 self._csystem = csystem
177 self._csystem = csystem
178
178
179 def system(self, cmd, environ=None, cwd=None, onerr=None,
179 def system(self, cmd, environ=None, cwd=None, onerr=None,
180 errprefix=None):
180 errprefix=None):
181 # fallback to the original system method if the output needs to be
181 # fallback to the original system method if the output needs to be
182 # captured (to self._buffers), or the output stream is not stdout
182 # captured (to self._buffers), or the output stream is not stdout
183 # (e.g. stderr, cStringIO), because the chg client is not aware of
183 # (e.g. stderr, cStringIO), because the chg client is not aware of
184 # these situations and will behave differently (write to stdout).
184 # these situations and will behave differently (write to stdout).
185 if (any(s[1] for s in self._bufferstates)
185 if (any(s[1] for s in self._bufferstates)
186 or not util.safehasattr(self.fout, 'fileno')
186 or not util.safehasattr(self.fout, 'fileno')
187 or self.fout.fileno() != util.stdout.fileno()):
187 or self.fout.fileno() != util.stdout.fileno()):
188 return super(chgui, self).system(cmd, environ, cwd, onerr,
188 return super(chgui, self).system(cmd, environ, cwd, onerr,
189 errprefix)
189 errprefix)
190 self.flush()
190 self.flush()
191 rc = self._csystem(cmd, util.shellenviron(environ), cwd)
191 rc = self._csystem(cmd, util.shellenviron(environ), cwd)
192 if rc and onerr:
192 if rc and onerr:
193 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
193 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
194 util.explainexit(rc)[0])
194 util.explainexit(rc)[0])
195 if errprefix:
195 if errprefix:
196 errmsg = '%s: %s' % (errprefix, errmsg)
196 errmsg = '%s: %s' % (errprefix, errmsg)
197 raise onerr(errmsg)
197 raise onerr(errmsg)
198 return rc
198 return rc
199
199
200 def _runpager(self, cmd):
200 def _runpager(self, cmd):
201 self._csystem(cmd, util.shellenviron(), type='pager',
201 self._csystem(cmd, util.shellenviron(), type='pager',
202 cmdtable={'attachio': attachio})
202 cmdtable={'attachio': attachio})
203
203
204 return chgui(srcui)
204 return chgui(srcui)
205
205
206 def _loadnewui(srcui, args):
206 def _loadnewui(srcui, args):
207 from . import dispatch # avoid cycle
207 from . import dispatch # avoid cycle
208
208
209 newui = srcui.__class__.load()
209 newui = srcui.__class__.load()
210 for a in ['fin', 'fout', 'ferr', 'environ']:
210 for a in ['fin', 'fout', 'ferr', 'environ']:
211 setattr(newui, a, getattr(srcui, a))
211 setattr(newui, a, getattr(srcui, a))
212 if util.safehasattr(srcui, '_csystem'):
212 if util.safehasattr(srcui, '_csystem'):
213 newui._csystem = srcui._csystem
213 newui._csystem = srcui._csystem
214
214
215 # command line args
215 # command line args
216 args = args[:]
216 args = args[:]
217 dispatch._parseconfig(newui, dispatch._earlygetopt(['--config'], args))
217 dispatch._parseconfig(newui, dispatch._earlygetopt(['--config'], args))
218
218
219 # stolen from tortoisehg.util.copydynamicconfig()
219 # stolen from tortoisehg.util.copydynamicconfig()
220 for section, name, value in srcui.walkconfig():
220 for section, name, value in srcui.walkconfig():
221 source = srcui.configsource(section, name)
221 source = srcui.configsource(section, name)
222 if ':' in source or source == '--config':
222 if ':' in source or source == '--config':
223 # path:line or command line
223 # path:line or command line
224 continue
224 continue
225 newui.setconfig(section, name, value, source)
225 newui.setconfig(section, name, value, source)
226
226
227 # load wd and repo config, copied from dispatch.py
227 # load wd and repo config, copied from dispatch.py
228 cwds = dispatch._earlygetopt(['--cwd'], args)
228 cwds = dispatch._earlygetopt(['--cwd'], args)
229 cwd = cwds and os.path.realpath(cwds[-1]) or None
229 cwd = cwds and os.path.realpath(cwds[-1]) or None
230 rpath = dispatch._earlygetopt(["-R", "--repository", "--repo"], args)
230 rpath = dispatch._earlygetopt(["-R", "--repository", "--repo"], args)
231 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
231 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
232
232
233 return (newui, newlui)
233 return (newui, newlui)
234
234
235 class channeledsystem(object):
235 class channeledsystem(object):
236 """Propagate ui.system() request in the following format:
236 """Propagate ui.system() request in the following format:
237
237
238 payload length (unsigned int),
238 payload length (unsigned int),
239 type, '\0',
239 type, '\0',
240 cmd, '\0',
240 cmd, '\0',
241 cwd, '\0',
241 cwd, '\0',
242 envkey, '=', val, '\0',
242 envkey, '=', val, '\0',
243 ...
243 ...
244 envkey, '=', val
244 envkey, '=', val
245
245
246 if type == 'system', waits for:
246 if type == 'system', waits for:
247
247
248 exitcode length (unsigned int),
248 exitcode length (unsigned int),
249 exitcode (int)
249 exitcode (int)
250
250
251 if type == 'pager', repetitively waits for a command name ending with '\n'
251 if type == 'pager', repetitively waits for a command name ending with '\n'
252 and executes it defined by cmdtable, or exits the loop if the command name
252 and executes it defined by cmdtable, or exits the loop if the command name
253 is empty.
253 is empty.
254 """
254 """
255 def __init__(self, in_, out, channel):
255 def __init__(self, in_, out, channel):
256 self.in_ = in_
256 self.in_ = in_
257 self.out = out
257 self.out = out
258 self.channel = channel
258 self.channel = channel
259
259
260 def __call__(self, cmd, environ, cwd=None, type='system', cmdtable=None):
260 def __call__(self, cmd, environ, cwd=None, type='system', cmdtable=None):
261 args = [type, util.quotecommand(cmd), os.path.abspath(cwd or '.')]
261 args = [type, util.quotecommand(cmd), os.path.abspath(cwd or '.')]
262 args.extend('%s=%s' % (k, v) for k, v in environ.iteritems())
262 args.extend('%s=%s' % (k, v) for k, v in environ.iteritems())
263 data = '\0'.join(args)
263 data = '\0'.join(args)
264 self.out.write(struct.pack('>cI', self.channel, len(data)))
264 self.out.write(struct.pack('>cI', self.channel, len(data)))
265 self.out.write(data)
265 self.out.write(data)
266 self.out.flush()
266 self.out.flush()
267
267
268 if type == 'system':
268 if type == 'system':
269 length = self.in_.read(4)
269 length = self.in_.read(4)
270 length, = struct.unpack('>I', length)
270 length, = struct.unpack('>I', length)
271 if length != 4:
271 if length != 4:
272 raise error.Abort(_('invalid response'))
272 raise error.Abort(_('invalid response'))
273 rc, = struct.unpack('>i', self.in_.read(4))
273 rc, = struct.unpack('>i', self.in_.read(4))
274 return rc
274 return rc
275 elif type == 'pager':
275 elif type == 'pager':
276 while True:
276 while True:
277 cmd = self.in_.readline()[:-1]
277 cmd = self.in_.readline()[:-1]
278 if not cmd:
278 if not cmd:
279 break
279 break
280 if cmdtable and cmd in cmdtable:
280 if cmdtable and cmd in cmdtable:
281 _log('pager subcommand: %s' % cmd)
281 _log('pager subcommand: %s' % cmd)
282 cmdtable[cmd]()
282 cmdtable[cmd]()
283 else:
283 else:
284 raise error.Abort(_('unexpected command: %s') % cmd)
284 raise error.Abort(_('unexpected command: %s') % cmd)
285 else:
285 else:
286 raise error.ProgrammingError('invalid S channel type: %s' % type)
286 raise error.ProgrammingError('invalid S channel type: %s' % type)
287
287
288 _iochannels = [
288 _iochannels = [
289 # server.ch, ui.fp, mode
289 # server.ch, ui.fp, mode
290 ('cin', 'fin', 'rb'),
290 ('cin', 'fin', pycompat.sysstr('rb')),
291 ('cout', 'fout', 'wb'),
291 ('cout', 'fout', pycompat.sysstr('wb')),
292 ('cerr', 'ferr', 'wb'),
292 ('cerr', 'ferr', pycompat.sysstr('wb')),
293 ]
293 ]
294
294
295 class chgcmdserver(commandserver.server):
295 class chgcmdserver(commandserver.server):
296 def __init__(self, ui, repo, fin, fout, sock, hashstate, baseaddress):
296 def __init__(self, ui, repo, fin, fout, sock, hashstate, baseaddress):
297 super(chgcmdserver, self).__init__(
297 super(chgcmdserver, self).__init__(
298 _newchgui(ui, channeledsystem(fin, fout, 'S'), self.attachio),
298 _newchgui(ui, channeledsystem(fin, fout, 'S'), self.attachio),
299 repo, fin, fout)
299 repo, fin, fout)
300 self.clientsock = sock
300 self.clientsock = sock
301 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
301 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
302 self.hashstate = hashstate
302 self.hashstate = hashstate
303 self.baseaddress = baseaddress
303 self.baseaddress = baseaddress
304 if hashstate is not None:
304 if hashstate is not None:
305 self.capabilities = self.capabilities.copy()
305 self.capabilities = self.capabilities.copy()
306 self.capabilities['validate'] = chgcmdserver.validate
306 self.capabilities['validate'] = chgcmdserver.validate
307
307
308 def cleanup(self):
308 def cleanup(self):
309 super(chgcmdserver, self).cleanup()
309 super(chgcmdserver, self).cleanup()
310 # dispatch._runcatch() does not flush outputs if exception is not
310 # dispatch._runcatch() does not flush outputs if exception is not
311 # handled by dispatch._dispatch()
311 # handled by dispatch._dispatch()
312 self.ui.flush()
312 self.ui.flush()
313 self._restoreio()
313 self._restoreio()
314
314
315 def attachio(self):
315 def attachio(self):
316 """Attach to client's stdio passed via unix domain socket; all
316 """Attach to client's stdio passed via unix domain socket; all
317 channels except cresult will no longer be used
317 channels except cresult will no longer be used
318 """
318 """
319 # tell client to sendmsg() with 1-byte payload, which makes it
319 # tell client to sendmsg() with 1-byte payload, which makes it
320 # distinctive from "attachio\n" command consumed by client.read()
320 # distinctive from "attachio\n" command consumed by client.read()
321 self.clientsock.sendall(struct.pack('>cI', 'I', 1))
321 self.clientsock.sendall(struct.pack('>cI', 'I', 1))
322 clientfds = osutil.recvfds(self.clientsock.fileno())
322 clientfds = osutil.recvfds(self.clientsock.fileno())
323 _log('received fds: %r\n' % clientfds)
323 _log('received fds: %r\n' % clientfds)
324
324
325 ui = self.ui
325 ui = self.ui
326 ui.flush()
326 ui.flush()
327 first = self._saveio()
327 first = self._saveio()
328 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
328 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
329 assert fd > 0
329 assert fd > 0
330 fp = getattr(ui, fn)
330 fp = getattr(ui, fn)
331 os.dup2(fd, fp.fileno())
331 os.dup2(fd, fp.fileno())
332 os.close(fd)
332 os.close(fd)
333 if not first:
333 if not first:
334 continue
334 continue
335 # reset buffering mode when client is first attached. as we want
335 # reset buffering mode when client is first attached. as we want
336 # to see output immediately on pager, the mode stays unchanged
336 # to see output immediately on pager, the mode stays unchanged
337 # when client re-attached. ferr is unchanged because it should
337 # when client re-attached. ferr is unchanged because it should
338 # be unbuffered no matter if it is a tty or not.
338 # be unbuffered no matter if it is a tty or not.
339 if fn == 'ferr':
339 if fn == 'ferr':
340 newfp = fp
340 newfp = fp
341 else:
341 else:
342 # make it line buffered explicitly because the default is
342 # make it line buffered explicitly because the default is
343 # decided on first write(), where fout could be a pager.
343 # decided on first write(), where fout could be a pager.
344 if fp.isatty():
344 if fp.isatty():
345 bufsize = 1 # line buffered
345 bufsize = 1 # line buffered
346 else:
346 else:
347 bufsize = -1 # system default
347 bufsize = -1 # system default
348 newfp = os.fdopen(fp.fileno(), mode, bufsize)
348 newfp = os.fdopen(fp.fileno(), mode, bufsize)
349 setattr(ui, fn, newfp)
349 setattr(ui, fn, newfp)
350 setattr(self, cn, newfp)
350 setattr(self, cn, newfp)
351
351
352 self.cresult.write(struct.pack('>i', len(clientfds)))
352 self.cresult.write(struct.pack('>i', len(clientfds)))
353
353
354 def _saveio(self):
354 def _saveio(self):
355 if self._oldios:
355 if self._oldios:
356 return False
356 return False
357 ui = self.ui
357 ui = self.ui
358 for cn, fn, _mode in _iochannels:
358 for cn, fn, _mode in _iochannels:
359 ch = getattr(self, cn)
359 ch = getattr(self, cn)
360 fp = getattr(ui, fn)
360 fp = getattr(ui, fn)
361 fd = os.dup(fp.fileno())
361 fd = os.dup(fp.fileno())
362 self._oldios.append((ch, fp, fd))
362 self._oldios.append((ch, fp, fd))
363 return True
363 return True
364
364
365 def _restoreio(self):
365 def _restoreio(self):
366 ui = self.ui
366 ui = self.ui
367 for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels):
367 for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels):
368 newfp = getattr(ui, fn)
368 newfp = getattr(ui, fn)
369 # close newfp while it's associated with client; otherwise it
369 # close newfp while it's associated with client; otherwise it
370 # would be closed when newfp is deleted
370 # would be closed when newfp is deleted
371 if newfp is not fp:
371 if newfp is not fp:
372 newfp.close()
372 newfp.close()
373 # restore original fd: fp is open again
373 # restore original fd: fp is open again
374 os.dup2(fd, fp.fileno())
374 os.dup2(fd, fp.fileno())
375 os.close(fd)
375 os.close(fd)
376 setattr(self, cn, ch)
376 setattr(self, cn, ch)
377 setattr(ui, fn, fp)
377 setattr(ui, fn, fp)
378 del self._oldios[:]
378 del self._oldios[:]
379
379
380 def validate(self):
380 def validate(self):
381 """Reload the config and check if the server is up to date
381 """Reload the config and check if the server is up to date
382
382
383 Read a list of '\0' separated arguments.
383 Read a list of '\0' separated arguments.
384 Write a non-empty list of '\0' separated instruction strings or '\0'
384 Write a non-empty list of '\0' separated instruction strings or '\0'
385 if the list is empty.
385 if the list is empty.
386 An instruction string could be either:
386 An instruction string could be either:
387 - "unlink $path", the client should unlink the path to stop the
387 - "unlink $path", the client should unlink the path to stop the
388 outdated server.
388 outdated server.
389 - "redirect $path", the client should attempt to connect to $path
389 - "redirect $path", the client should attempt to connect to $path
390 first. If it does not work, start a new server. It implies
390 first. If it does not work, start a new server. It implies
391 "reconnect".
391 "reconnect".
392 - "exit $n", the client should exit directly with code n.
392 - "exit $n", the client should exit directly with code n.
393 This may happen if we cannot parse the config.
393 This may happen if we cannot parse the config.
394 - "reconnect", the client should close the connection and
394 - "reconnect", the client should close the connection and
395 reconnect.
395 reconnect.
396 If neither "reconnect" nor "redirect" is included in the instruction
396 If neither "reconnect" nor "redirect" is included in the instruction
397 list, the client can continue with this server after completing all
397 list, the client can continue with this server after completing all
398 the instructions.
398 the instructions.
399 """
399 """
400 from . import dispatch # avoid cycle
400 from . import dispatch # avoid cycle
401
401
402 args = self._readlist()
402 args = self._readlist()
403 try:
403 try:
404 self.ui, lui = _loadnewui(self.ui, args)
404 self.ui, lui = _loadnewui(self.ui, args)
405 except error.ParseError as inst:
405 except error.ParseError as inst:
406 dispatch._formatparse(self.ui.warn, inst)
406 dispatch._formatparse(self.ui.warn, inst)
407 self.ui.flush()
407 self.ui.flush()
408 self.cresult.write('exit 255')
408 self.cresult.write('exit 255')
409 return
409 return
410 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
410 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
411 insts = []
411 insts = []
412 if newhash.mtimehash != self.hashstate.mtimehash:
412 if newhash.mtimehash != self.hashstate.mtimehash:
413 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
413 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
414 insts.append('unlink %s' % addr)
414 insts.append('unlink %s' % addr)
415 # mtimehash is empty if one or more extensions fail to load.
415 # mtimehash is empty if one or more extensions fail to load.
416 # to be compatible with hg, still serve the client this time.
416 # to be compatible with hg, still serve the client this time.
417 if self.hashstate.mtimehash:
417 if self.hashstate.mtimehash:
418 insts.append('reconnect')
418 insts.append('reconnect')
419 if newhash.confighash != self.hashstate.confighash:
419 if newhash.confighash != self.hashstate.confighash:
420 addr = _hashaddress(self.baseaddress, newhash.confighash)
420 addr = _hashaddress(self.baseaddress, newhash.confighash)
421 insts.append('redirect %s' % addr)
421 insts.append('redirect %s' % addr)
422 _log('validate: %s\n' % insts)
422 _log('validate: %s\n' % insts)
423 self.cresult.write('\0'.join(insts) or '\0')
423 self.cresult.write('\0'.join(insts) or '\0')
424
424
425 def chdir(self):
425 def chdir(self):
426 """Change current directory
426 """Change current directory
427
427
428 Note that the behavior of --cwd option is bit different from this.
428 Note that the behavior of --cwd option is bit different from this.
429 It does not affect --config parameter.
429 It does not affect --config parameter.
430 """
430 """
431 path = self._readstr()
431 path = self._readstr()
432 if not path:
432 if not path:
433 return
433 return
434 _log('chdir to %r\n' % path)
434 _log('chdir to %r\n' % path)
435 os.chdir(path)
435 os.chdir(path)
436
436
437 def setumask(self):
437 def setumask(self):
438 """Change umask"""
438 """Change umask"""
439 mask = struct.unpack('>I', self._read(4))[0]
439 mask = struct.unpack('>I', self._read(4))[0]
440 _log('setumask %r\n' % mask)
440 _log('setumask %r\n' % mask)
441 os.umask(mask)
441 os.umask(mask)
442
442
443 def runcommand(self):
443 def runcommand(self):
444 return super(chgcmdserver, self).runcommand()
444 return super(chgcmdserver, self).runcommand()
445
445
446 def setenv(self):
446 def setenv(self):
447 """Clear and update os.environ
447 """Clear and update os.environ
448
448
449 Note that not all variables can make an effect on the running process.
449 Note that not all variables can make an effect on the running process.
450 """
450 """
451 l = self._readlist()
451 l = self._readlist()
452 try:
452 try:
453 newenv = dict(s.split('=', 1) for s in l)
453 newenv = dict(s.split('=', 1) for s in l)
454 except ValueError:
454 except ValueError:
455 raise ValueError('unexpected value in setenv request')
455 raise ValueError('unexpected value in setenv request')
456 _log('setenv: %r\n' % sorted(newenv.keys()))
456 _log('setenv: %r\n' % sorted(newenv.keys()))
457 encoding.environ.clear()
457 encoding.environ.clear()
458 encoding.environ.update(newenv)
458 encoding.environ.update(newenv)
459
459
460 capabilities = commandserver.server.capabilities.copy()
460 capabilities = commandserver.server.capabilities.copy()
461 capabilities.update({'attachio': attachio,
461 capabilities.update({'attachio': attachio,
462 'chdir': chdir,
462 'chdir': chdir,
463 'runcommand': runcommand,
463 'runcommand': runcommand,
464 'setenv': setenv,
464 'setenv': setenv,
465 'setumask': setumask})
465 'setumask': setumask})
466
466
467 if util.safehasattr(osutil, 'setprocname'):
467 if util.safehasattr(osutil, 'setprocname'):
468 def setprocname(self):
468 def setprocname(self):
469 """Change process title"""
469 """Change process title"""
470 name = self._readstr()
470 name = self._readstr()
471 _log('setprocname: %r\n' % name)
471 _log('setprocname: %r\n' % name)
472 osutil.setprocname(name)
472 osutil.setprocname(name)
473 capabilities['setprocname'] = setprocname
473 capabilities['setprocname'] = setprocname
474
474
475 def _tempaddress(address):
475 def _tempaddress(address):
476 return '%s.%d.tmp' % (address, os.getpid())
476 return '%s.%d.tmp' % (address, os.getpid())
477
477
478 def _hashaddress(address, hashstr):
478 def _hashaddress(address, hashstr):
479 # if the basename of address contains '.', use only the left part. this
479 # if the basename of address contains '.', use only the left part. this
480 # makes it possible for the client to pass 'server.tmp$PID' and follow by
480 # makes it possible for the client to pass 'server.tmp$PID' and follow by
481 # an atomic rename to avoid locking when spawning new servers.
481 # an atomic rename to avoid locking when spawning new servers.
482 dirname, basename = os.path.split(address)
482 dirname, basename = os.path.split(address)
483 basename = basename.split('.', 1)[0]
483 basename = basename.split('.', 1)[0]
484 return '%s-%s' % (os.path.join(dirname, basename), hashstr)
484 return '%s-%s' % (os.path.join(dirname, basename), hashstr)
485
485
486 class chgunixservicehandler(object):
486 class chgunixservicehandler(object):
487 """Set of operations for chg services"""
487 """Set of operations for chg services"""
488
488
489 pollinterval = 1 # [sec]
489 pollinterval = 1 # [sec]
490
490
491 def __init__(self, ui):
491 def __init__(self, ui):
492 self.ui = ui
492 self.ui = ui
493 self._idletimeout = ui.configint('chgserver', 'idletimeout', 3600)
493 self._idletimeout = ui.configint('chgserver', 'idletimeout', 3600)
494 self._lastactive = time.time()
494 self._lastactive = time.time()
495
495
496 def bindsocket(self, sock, address):
496 def bindsocket(self, sock, address):
497 self._inithashstate(address)
497 self._inithashstate(address)
498 self._checkextensions()
498 self._checkextensions()
499 self._bind(sock)
499 self._bind(sock)
500 self._createsymlink()
500 self._createsymlink()
501
501
502 def _inithashstate(self, address):
502 def _inithashstate(self, address):
503 self._baseaddress = address
503 self._baseaddress = address
504 if self.ui.configbool('chgserver', 'skiphash', False):
504 if self.ui.configbool('chgserver', 'skiphash', False):
505 self._hashstate = None
505 self._hashstate = None
506 self._realaddress = address
506 self._realaddress = address
507 return
507 return
508 self._hashstate = hashstate.fromui(self.ui)
508 self._hashstate = hashstate.fromui(self.ui)
509 self._realaddress = _hashaddress(address, self._hashstate.confighash)
509 self._realaddress = _hashaddress(address, self._hashstate.confighash)
510
510
511 def _checkextensions(self):
511 def _checkextensions(self):
512 if not self._hashstate:
512 if not self._hashstate:
513 return
513 return
514 if extensions.notloaded():
514 if extensions.notloaded():
515 # one or more extensions failed to load. mtimehash becomes
515 # one or more extensions failed to load. mtimehash becomes
516 # meaningless because we do not know the paths of those extensions.
516 # meaningless because we do not know the paths of those extensions.
517 # set mtimehash to an illegal hash value to invalidate the server.
517 # set mtimehash to an illegal hash value to invalidate the server.
518 self._hashstate.mtimehash = ''
518 self._hashstate.mtimehash = ''
519
519
520 def _bind(self, sock):
520 def _bind(self, sock):
521 # use a unique temp address so we can stat the file and do ownership
521 # use a unique temp address so we can stat the file and do ownership
522 # check later
522 # check later
523 tempaddress = _tempaddress(self._realaddress)
523 tempaddress = _tempaddress(self._realaddress)
524 util.bindunixsocket(sock, tempaddress)
524 util.bindunixsocket(sock, tempaddress)
525 self._socketstat = os.stat(tempaddress)
525 self._socketstat = os.stat(tempaddress)
526 # rename will replace the old socket file if exists atomically. the
526 # rename will replace the old socket file if exists atomically. the
527 # old server will detect ownership change and exit.
527 # old server will detect ownership change and exit.
528 util.rename(tempaddress, self._realaddress)
528 util.rename(tempaddress, self._realaddress)
529
529
530 def _createsymlink(self):
530 def _createsymlink(self):
531 if self._baseaddress == self._realaddress:
531 if self._baseaddress == self._realaddress:
532 return
532 return
533 tempaddress = _tempaddress(self._baseaddress)
533 tempaddress = _tempaddress(self._baseaddress)
534 os.symlink(os.path.basename(self._realaddress), tempaddress)
534 os.symlink(os.path.basename(self._realaddress), tempaddress)
535 util.rename(tempaddress, self._baseaddress)
535 util.rename(tempaddress, self._baseaddress)
536
536
537 def _issocketowner(self):
537 def _issocketowner(self):
538 try:
538 try:
539 stat = os.stat(self._realaddress)
539 stat = os.stat(self._realaddress)
540 return (stat.st_ino == self._socketstat.st_ino and
540 return (stat.st_ino == self._socketstat.st_ino and
541 stat.st_mtime == self._socketstat.st_mtime)
541 stat.st_mtime == self._socketstat.st_mtime)
542 except OSError:
542 except OSError:
543 return False
543 return False
544
544
545 def unlinksocket(self, address):
545 def unlinksocket(self, address):
546 if not self._issocketowner():
546 if not self._issocketowner():
547 return
547 return
548 # it is possible to have a race condition here that we may
548 # it is possible to have a race condition here that we may
549 # remove another server's socket file. but that's okay
549 # remove another server's socket file. but that's okay
550 # since that server will detect and exit automatically and
550 # since that server will detect and exit automatically and
551 # the client will start a new server on demand.
551 # the client will start a new server on demand.
552 try:
552 try:
553 os.unlink(self._realaddress)
553 os.unlink(self._realaddress)
554 except OSError as exc:
554 except OSError as exc:
555 if exc.errno != errno.ENOENT:
555 if exc.errno != errno.ENOENT:
556 raise
556 raise
557
557
558 def printbanner(self, address):
558 def printbanner(self, address):
559 # no "listening at" message should be printed to simulate hg behavior
559 # no "listening at" message should be printed to simulate hg behavior
560 pass
560 pass
561
561
562 def shouldexit(self):
562 def shouldexit(self):
563 if not self._issocketowner():
563 if not self._issocketowner():
564 self.ui.debug('%s is not owned, exiting.\n' % self._realaddress)
564 self.ui.debug('%s is not owned, exiting.\n' % self._realaddress)
565 return True
565 return True
566 if time.time() - self._lastactive > self._idletimeout:
566 if time.time() - self._lastactive > self._idletimeout:
567 self.ui.debug('being idle too long. exiting.\n')
567 self.ui.debug('being idle too long. exiting.\n')
568 return True
568 return True
569 return False
569 return False
570
570
571 def newconnection(self):
571 def newconnection(self):
572 self._lastactive = time.time()
572 self._lastactive = time.time()
573
573
574 def createcmdserver(self, repo, conn, fin, fout):
574 def createcmdserver(self, repo, conn, fin, fout):
575 return chgcmdserver(self.ui, repo, fin, fout, conn,
575 return chgcmdserver(self.ui, repo, fin, fout, conn,
576 self._hashstate, self._baseaddress)
576 self._hashstate, self._baseaddress)
577
577
578 def chgunixservice(ui, repo, opts):
578 def chgunixservice(ui, repo, opts):
579 # CHGINTERNALMARK is temporarily set by chg client to detect if chg will
579 # CHGINTERNALMARK is temporarily set by chg client to detect if chg will
580 # start another chg. drop it to avoid possible side effects.
580 # start another chg. drop it to avoid possible side effects.
581 if 'CHGINTERNALMARK' in encoding.environ:
581 if 'CHGINTERNALMARK' in encoding.environ:
582 del encoding.environ['CHGINTERNALMARK']
582 del encoding.environ['CHGINTERNALMARK']
583
583
584 if repo:
584 if repo:
585 # one chgserver can serve multiple repos. drop repo information
585 # one chgserver can serve multiple repos. drop repo information
586 ui.setconfig('bundle', 'mainreporoot', '', 'repo')
586 ui.setconfig('bundle', 'mainreporoot', '', 'repo')
587 h = chgunixservicehandler(ui)
587 h = chgunixservicehandler(ui)
588 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
588 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
@@ -1,551 +1,551 b''
1 # commandserver.py - communicate with Mercurial's API over a pipe
1 # commandserver.py - communicate with Mercurial's API over a pipe
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import gc
11 import gc
12 import os
12 import os
13 import random
13 import random
14 import select
14 import select
15 import signal
15 import signal
16 import socket
16 import socket
17 import struct
17 import struct
18 import traceback
18 import traceback
19
19
20 from .i18n import _
20 from .i18n import _
21 from . import (
21 from . import (
22 encoding,
22 encoding,
23 error,
23 error,
24 pycompat,
24 pycompat,
25 util,
25 util,
26 )
26 )
27
27
28 logfile = None
28 logfile = None
29
29
30 def log(*args):
30 def log(*args):
31 if not logfile:
31 if not logfile:
32 return
32 return
33
33
34 for a in args:
34 for a in args:
35 logfile.write(str(a))
35 logfile.write(str(a))
36
36
37 logfile.flush()
37 logfile.flush()
38
38
39 class channeledoutput(object):
39 class channeledoutput(object):
40 """
40 """
41 Write data to out in the following format:
41 Write data to out in the following format:
42
42
43 data length (unsigned int),
43 data length (unsigned int),
44 data
44 data
45 """
45 """
46 def __init__(self, out, channel):
46 def __init__(self, out, channel):
47 self.out = out
47 self.out = out
48 self.channel = channel
48 self.channel = channel
49
49
50 @property
50 @property
51 def name(self):
51 def name(self):
52 return '<%c-channel>' % self.channel
52 return '<%c-channel>' % self.channel
53
53
54 def write(self, data):
54 def write(self, data):
55 if not data:
55 if not data:
56 return
56 return
57 # single write() to guarantee the same atomicity as the underlying file
57 # single write() to guarantee the same atomicity as the underlying file
58 self.out.write(struct.pack('>cI', self.channel, len(data)) + data)
58 self.out.write(struct.pack('>cI', self.channel, len(data)) + data)
59 self.out.flush()
59 self.out.flush()
60
60
61 def __getattr__(self, attr):
61 def __getattr__(self, attr):
62 if attr in ('isatty', 'fileno', 'tell', 'seek'):
62 if attr in ('isatty', 'fileno', 'tell', 'seek'):
63 raise AttributeError(attr)
63 raise AttributeError(attr)
64 return getattr(self.out, attr)
64 return getattr(self.out, attr)
65
65
66 class channeledinput(object):
66 class channeledinput(object):
67 """
67 """
68 Read data from in_.
68 Read data from in_.
69
69
70 Requests for input are written to out in the following format:
70 Requests for input are written to out in the following format:
71 channel identifier - 'I' for plain input, 'L' line based (1 byte)
71 channel identifier - 'I' for plain input, 'L' line based (1 byte)
72 how many bytes to send at most (unsigned int),
72 how many bytes to send at most (unsigned int),
73
73
74 The client replies with:
74 The client replies with:
75 data length (unsigned int), 0 meaning EOF
75 data length (unsigned int), 0 meaning EOF
76 data
76 data
77 """
77 """
78
78
79 maxchunksize = 4 * 1024
79 maxchunksize = 4 * 1024
80
80
81 def __init__(self, in_, out, channel):
81 def __init__(self, in_, out, channel):
82 self.in_ = in_
82 self.in_ = in_
83 self.out = out
83 self.out = out
84 self.channel = channel
84 self.channel = channel
85
85
86 @property
86 @property
87 def name(self):
87 def name(self):
88 return '<%c-channel>' % self.channel
88 return '<%c-channel>' % self.channel
89
89
90 def read(self, size=-1):
90 def read(self, size=-1):
91 if size < 0:
91 if size < 0:
92 # if we need to consume all the clients input, ask for 4k chunks
92 # if we need to consume all the clients input, ask for 4k chunks
93 # so the pipe doesn't fill up risking a deadlock
93 # so the pipe doesn't fill up risking a deadlock
94 size = self.maxchunksize
94 size = self.maxchunksize
95 s = self._read(size, self.channel)
95 s = self._read(size, self.channel)
96 buf = s
96 buf = s
97 while s:
97 while s:
98 s = self._read(size, self.channel)
98 s = self._read(size, self.channel)
99 buf += s
99 buf += s
100
100
101 return buf
101 return buf
102 else:
102 else:
103 return self._read(size, self.channel)
103 return self._read(size, self.channel)
104
104
105 def _read(self, size, channel):
105 def _read(self, size, channel):
106 if not size:
106 if not size:
107 return ''
107 return ''
108 assert size > 0
108 assert size > 0
109
109
110 # tell the client we need at most size bytes
110 # tell the client we need at most size bytes
111 self.out.write(struct.pack('>cI', channel, size))
111 self.out.write(struct.pack('>cI', channel, size))
112 self.out.flush()
112 self.out.flush()
113
113
114 length = self.in_.read(4)
114 length = self.in_.read(4)
115 length = struct.unpack('>I', length)[0]
115 length = struct.unpack('>I', length)[0]
116 if not length:
116 if not length:
117 return ''
117 return ''
118 else:
118 else:
119 return self.in_.read(length)
119 return self.in_.read(length)
120
120
121 def readline(self, size=-1):
121 def readline(self, size=-1):
122 if size < 0:
122 if size < 0:
123 size = self.maxchunksize
123 size = self.maxchunksize
124 s = self._read(size, 'L')
124 s = self._read(size, 'L')
125 buf = s
125 buf = s
126 # keep asking for more until there's either no more or
126 # keep asking for more until there's either no more or
127 # we got a full line
127 # we got a full line
128 while s and s[-1] != '\n':
128 while s and s[-1] != '\n':
129 s = self._read(size, 'L')
129 s = self._read(size, 'L')
130 buf += s
130 buf += s
131
131
132 return buf
132 return buf
133 else:
133 else:
134 return self._read(size, 'L')
134 return self._read(size, 'L')
135
135
136 def __iter__(self):
136 def __iter__(self):
137 return self
137 return self
138
138
139 def next(self):
139 def next(self):
140 l = self.readline()
140 l = self.readline()
141 if not l:
141 if not l:
142 raise StopIteration
142 raise StopIteration
143 return l
143 return l
144
144
145 def __getattr__(self, attr):
145 def __getattr__(self, attr):
146 if attr in ('isatty', 'fileno', 'tell', 'seek'):
146 if attr in ('isatty', 'fileno', 'tell', 'seek'):
147 raise AttributeError(attr)
147 raise AttributeError(attr)
148 return getattr(self.in_, attr)
148 return getattr(self.in_, attr)
149
149
150 class server(object):
150 class server(object):
151 """
151 """
152 Listens for commands on fin, runs them and writes the output on a channel
152 Listens for commands on fin, runs them and writes the output on a channel
153 based stream to fout.
153 based stream to fout.
154 """
154 """
155 def __init__(self, ui, repo, fin, fout):
155 def __init__(self, ui, repo, fin, fout):
156 self.cwd = pycompat.getcwd()
156 self.cwd = pycompat.getcwd()
157
157
158 # developer config: cmdserver.log
158 # developer config: cmdserver.log
159 logpath = ui.config("cmdserver", "log", None)
159 logpath = ui.config("cmdserver", "log", None)
160 if logpath:
160 if logpath:
161 global logfile
161 global logfile
162 if logpath == '-':
162 if logpath == '-':
163 # write log on a special 'd' (debug) channel
163 # write log on a special 'd' (debug) channel
164 logfile = channeledoutput(fout, 'd')
164 logfile = channeledoutput(fout, 'd')
165 else:
165 else:
166 logfile = open(logpath, 'a')
166 logfile = open(logpath, 'a')
167
167
168 if repo:
168 if repo:
169 # the ui here is really the repo ui so take its baseui so we don't
169 # the ui here is really the repo ui so take its baseui so we don't
170 # end up with its local configuration
170 # end up with its local configuration
171 self.ui = repo.baseui
171 self.ui = repo.baseui
172 self.repo = repo
172 self.repo = repo
173 self.repoui = repo.ui
173 self.repoui = repo.ui
174 else:
174 else:
175 self.ui = ui
175 self.ui = ui
176 self.repo = self.repoui = None
176 self.repo = self.repoui = None
177
177
178 self.cerr = channeledoutput(fout, 'e')
178 self.cerr = channeledoutput(fout, 'e')
179 self.cout = channeledoutput(fout, 'o')
179 self.cout = channeledoutput(fout, 'o')
180 self.cin = channeledinput(fin, fout, 'I')
180 self.cin = channeledinput(fin, fout, 'I')
181 self.cresult = channeledoutput(fout, 'r')
181 self.cresult = channeledoutput(fout, 'r')
182
182
183 self.client = fin
183 self.client = fin
184
184
185 def cleanup(self):
185 def cleanup(self):
186 """release and restore resources taken during server session"""
186 """release and restore resources taken during server session"""
187 pass
187 pass
188
188
189 def _read(self, size):
189 def _read(self, size):
190 if not size:
190 if not size:
191 return ''
191 return ''
192
192
193 data = self.client.read(size)
193 data = self.client.read(size)
194
194
195 # is the other end closed?
195 # is the other end closed?
196 if not data:
196 if not data:
197 raise EOFError
197 raise EOFError
198
198
199 return data
199 return data
200
200
201 def _readstr(self):
201 def _readstr(self):
202 """read a string from the channel
202 """read a string from the channel
203
203
204 format:
204 format:
205 data length (uint32), data
205 data length (uint32), data
206 """
206 """
207 length = struct.unpack('>I', self._read(4))[0]
207 length = struct.unpack('>I', self._read(4))[0]
208 if not length:
208 if not length:
209 return ''
209 return ''
210 return self._read(length)
210 return self._read(length)
211
211
212 def _readlist(self):
212 def _readlist(self):
213 """read a list of NULL separated strings from the channel"""
213 """read a list of NULL separated strings from the channel"""
214 s = self._readstr()
214 s = self._readstr()
215 if s:
215 if s:
216 return s.split('\0')
216 return s.split('\0')
217 else:
217 else:
218 return []
218 return []
219
219
220 def runcommand(self):
220 def runcommand(self):
221 """ reads a list of \0 terminated arguments, executes
221 """ reads a list of \0 terminated arguments, executes
222 and writes the return code to the result channel """
222 and writes the return code to the result channel """
223 from . import dispatch # avoid cycle
223 from . import dispatch # avoid cycle
224
224
225 args = self._readlist()
225 args = self._readlist()
226
226
227 # copy the uis so changes (e.g. --config or --verbose) don't
227 # copy the uis so changes (e.g. --config or --verbose) don't
228 # persist between requests
228 # persist between requests
229 copiedui = self.ui.copy()
229 copiedui = self.ui.copy()
230 uis = [copiedui]
230 uis = [copiedui]
231 if self.repo:
231 if self.repo:
232 self.repo.baseui = copiedui
232 self.repo.baseui = copiedui
233 # clone ui without using ui.copy because this is protected
233 # clone ui without using ui.copy because this is protected
234 repoui = self.repoui.__class__(self.repoui)
234 repoui = self.repoui.__class__(self.repoui)
235 repoui.copy = copiedui.copy # redo copy protection
235 repoui.copy = copiedui.copy # redo copy protection
236 uis.append(repoui)
236 uis.append(repoui)
237 self.repo.ui = self.repo.dirstate._ui = repoui
237 self.repo.ui = self.repo.dirstate._ui = repoui
238 self.repo.invalidateall()
238 self.repo.invalidateall()
239
239
240 for ui in uis:
240 for ui in uis:
241 ui.resetstate()
241 ui.resetstate()
242 # any kind of interaction must use server channels, but chg may
242 # any kind of interaction must use server channels, but chg may
243 # replace channels by fully functional tty files. so nontty is
243 # replace channels by fully functional tty files. so nontty is
244 # enforced only if cin is a channel.
244 # enforced only if cin is a channel.
245 if not util.safehasattr(self.cin, 'fileno'):
245 if not util.safehasattr(self.cin, 'fileno'):
246 ui.setconfig('ui', 'nontty', 'true', 'commandserver')
246 ui.setconfig('ui', 'nontty', 'true', 'commandserver')
247
247
248 req = dispatch.request(args[:], copiedui, self.repo, self.cin,
248 req = dispatch.request(args[:], copiedui, self.repo, self.cin,
249 self.cout, self.cerr)
249 self.cout, self.cerr)
250
250
251 ret = (dispatch.dispatch(req) or 0) & 255 # might return None
251 ret = (dispatch.dispatch(req) or 0) & 255 # might return None
252
252
253 # restore old cwd
253 # restore old cwd
254 if '--cwd' in args:
254 if '--cwd' in args:
255 os.chdir(self.cwd)
255 os.chdir(self.cwd)
256
256
257 self.cresult.write(struct.pack('>i', int(ret)))
257 self.cresult.write(struct.pack('>i', int(ret)))
258
258
259 def getencoding(self):
259 def getencoding(self):
260 """ writes the current encoding to the result channel """
260 """ writes the current encoding to the result channel """
261 self.cresult.write(encoding.encoding)
261 self.cresult.write(encoding.encoding)
262
262
263 def serveone(self):
263 def serveone(self):
264 cmd = self.client.readline()[:-1]
264 cmd = self.client.readline()[:-1]
265 if cmd:
265 if cmd:
266 handler = self.capabilities.get(cmd)
266 handler = self.capabilities.get(cmd)
267 if handler:
267 if handler:
268 handler(self)
268 handler(self)
269 else:
269 else:
270 # clients are expected to check what commands are supported by
270 # clients are expected to check what commands are supported by
271 # looking at the servers capabilities
271 # looking at the servers capabilities
272 raise error.Abort(_('unknown command %s') % cmd)
272 raise error.Abort(_('unknown command %s') % cmd)
273
273
274 return cmd != ''
274 return cmd != ''
275
275
276 capabilities = {'runcommand' : runcommand,
276 capabilities = {'runcommand' : runcommand,
277 'getencoding' : getencoding}
277 'getencoding' : getencoding}
278
278
279 def serve(self):
279 def serve(self):
280 hellomsg = 'capabilities: ' + ' '.join(sorted(self.capabilities))
280 hellomsg = 'capabilities: ' + ' '.join(sorted(self.capabilities))
281 hellomsg += '\n'
281 hellomsg += '\n'
282 hellomsg += 'encoding: ' + encoding.encoding
282 hellomsg += 'encoding: ' + encoding.encoding
283 hellomsg += '\n'
283 hellomsg += '\n'
284 hellomsg += 'pid: %d' % util.getpid()
284 hellomsg += 'pid: %d' % util.getpid()
285 if util.safehasattr(os, 'getpgid'):
285 if util.safehasattr(os, 'getpgid'):
286 hellomsg += '\n'
286 hellomsg += '\n'
287 hellomsg += 'pgid: %d' % os.getpgid(0)
287 hellomsg += 'pgid: %d' % os.getpgid(0)
288
288
289 # write the hello msg in -one- chunk
289 # write the hello msg in -one- chunk
290 self.cout.write(hellomsg)
290 self.cout.write(hellomsg)
291
291
292 try:
292 try:
293 while self.serveone():
293 while self.serveone():
294 pass
294 pass
295 except EOFError:
295 except EOFError:
296 # we'll get here if the client disconnected while we were reading
296 # we'll get here if the client disconnected while we were reading
297 # its request
297 # its request
298 return 1
298 return 1
299
299
300 return 0
300 return 0
301
301
302 def _protectio(ui):
302 def _protectio(ui):
303 """ duplicates streams and redirect original to null if ui uses stdio """
303 """ duplicates streams and redirect original to null if ui uses stdio """
304 ui.flush()
304 ui.flush()
305 newfiles = []
305 newfiles = []
306 nullfd = os.open(os.devnull, os.O_RDWR)
306 nullfd = os.open(os.devnull, os.O_RDWR)
307 for f, sysf, mode in [(ui.fin, util.stdin, 'rb'),
307 for f, sysf, mode in [(ui.fin, util.stdin, pycompat.sysstr('rb')),
308 (ui.fout, util.stdout, 'wb')]:
308 (ui.fout, util.stdout, pycompat.sysstr('wb'))]:
309 if f is sysf:
309 if f is sysf:
310 newfd = os.dup(f.fileno())
310 newfd = os.dup(f.fileno())
311 os.dup2(nullfd, f.fileno())
311 os.dup2(nullfd, f.fileno())
312 f = os.fdopen(newfd, mode)
312 f = os.fdopen(newfd, mode)
313 newfiles.append(f)
313 newfiles.append(f)
314 os.close(nullfd)
314 os.close(nullfd)
315 return tuple(newfiles)
315 return tuple(newfiles)
316
316
317 def _restoreio(ui, fin, fout):
317 def _restoreio(ui, fin, fout):
318 """ restores streams from duplicated ones """
318 """ restores streams from duplicated ones """
319 ui.flush()
319 ui.flush()
320 for f, uif in [(fin, ui.fin), (fout, ui.fout)]:
320 for f, uif in [(fin, ui.fin), (fout, ui.fout)]:
321 if f is not uif:
321 if f is not uif:
322 os.dup2(f.fileno(), uif.fileno())
322 os.dup2(f.fileno(), uif.fileno())
323 f.close()
323 f.close()
324
324
325 class pipeservice(object):
325 class pipeservice(object):
326 def __init__(self, ui, repo, opts):
326 def __init__(self, ui, repo, opts):
327 self.ui = ui
327 self.ui = ui
328 self.repo = repo
328 self.repo = repo
329
329
330 def init(self):
330 def init(self):
331 pass
331 pass
332
332
333 def run(self):
333 def run(self):
334 ui = self.ui
334 ui = self.ui
335 # redirect stdio to null device so that broken extensions or in-process
335 # redirect stdio to null device so that broken extensions or in-process
336 # hooks will never cause corruption of channel protocol.
336 # hooks will never cause corruption of channel protocol.
337 fin, fout = _protectio(ui)
337 fin, fout = _protectio(ui)
338 try:
338 try:
339 sv = server(ui, self.repo, fin, fout)
339 sv = server(ui, self.repo, fin, fout)
340 return sv.serve()
340 return sv.serve()
341 finally:
341 finally:
342 sv.cleanup()
342 sv.cleanup()
343 _restoreio(ui, fin, fout)
343 _restoreio(ui, fin, fout)
344
344
345 def _initworkerprocess():
345 def _initworkerprocess():
346 # use a different process group from the master process, in order to:
346 # use a different process group from the master process, in order to:
347 # 1. make the current process group no longer "orphaned" (because the
347 # 1. make the current process group no longer "orphaned" (because the
348 # parent of this process is in a different process group while
348 # parent of this process is in a different process group while
349 # remains in a same session)
349 # remains in a same session)
350 # according to POSIX 2.2.2.52, orphaned process group will ignore
350 # according to POSIX 2.2.2.52, orphaned process group will ignore
351 # terminal-generated stop signals like SIGTSTP (Ctrl+Z), which will
351 # terminal-generated stop signals like SIGTSTP (Ctrl+Z), which will
352 # cause trouble for things like ncurses.
352 # cause trouble for things like ncurses.
353 # 2. the client can use kill(-pgid, sig) to simulate terminal-generated
353 # 2. the client can use kill(-pgid, sig) to simulate terminal-generated
354 # SIGINT (Ctrl+C) and process-exit-generated SIGHUP. our child
354 # SIGINT (Ctrl+C) and process-exit-generated SIGHUP. our child
355 # processes like ssh will be killed properly, without affecting
355 # processes like ssh will be killed properly, without affecting
356 # unrelated processes.
356 # unrelated processes.
357 os.setpgid(0, 0)
357 os.setpgid(0, 0)
358 # change random state otherwise forked request handlers would have a
358 # change random state otherwise forked request handlers would have a
359 # same state inherited from parent.
359 # same state inherited from parent.
360 random.seed()
360 random.seed()
361
361
362 def _serverequest(ui, repo, conn, createcmdserver):
362 def _serverequest(ui, repo, conn, createcmdserver):
363 fin = conn.makefile('rb')
363 fin = conn.makefile('rb')
364 fout = conn.makefile('wb')
364 fout = conn.makefile('wb')
365 sv = None
365 sv = None
366 try:
366 try:
367 sv = createcmdserver(repo, conn, fin, fout)
367 sv = createcmdserver(repo, conn, fin, fout)
368 try:
368 try:
369 sv.serve()
369 sv.serve()
370 # handle exceptions that may be raised by command server. most of
370 # handle exceptions that may be raised by command server. most of
371 # known exceptions are caught by dispatch.
371 # known exceptions are caught by dispatch.
372 except error.Abort as inst:
372 except error.Abort as inst:
373 ui.warn(_('abort: %s\n') % inst)
373 ui.warn(_('abort: %s\n') % inst)
374 except IOError as inst:
374 except IOError as inst:
375 if inst.errno != errno.EPIPE:
375 if inst.errno != errno.EPIPE:
376 raise
376 raise
377 except KeyboardInterrupt:
377 except KeyboardInterrupt:
378 pass
378 pass
379 finally:
379 finally:
380 sv.cleanup()
380 sv.cleanup()
381 except: # re-raises
381 except: # re-raises
382 # also write traceback to error channel. otherwise client cannot
382 # also write traceback to error channel. otherwise client cannot
383 # see it because it is written to server's stderr by default.
383 # see it because it is written to server's stderr by default.
384 if sv:
384 if sv:
385 cerr = sv.cerr
385 cerr = sv.cerr
386 else:
386 else:
387 cerr = channeledoutput(fout, 'e')
387 cerr = channeledoutput(fout, 'e')
388 traceback.print_exc(file=cerr)
388 traceback.print_exc(file=cerr)
389 raise
389 raise
390 finally:
390 finally:
391 fin.close()
391 fin.close()
392 try:
392 try:
393 fout.close() # implicit flush() may cause another EPIPE
393 fout.close() # implicit flush() may cause another EPIPE
394 except IOError as inst:
394 except IOError as inst:
395 if inst.errno != errno.EPIPE:
395 if inst.errno != errno.EPIPE:
396 raise
396 raise
397
397
398 class unixservicehandler(object):
398 class unixservicehandler(object):
399 """Set of pluggable operations for unix-mode services
399 """Set of pluggable operations for unix-mode services
400
400
401 Almost all methods except for createcmdserver() are called in the main
401 Almost all methods except for createcmdserver() are called in the main
402 process. You can't pass mutable resource back from createcmdserver().
402 process. You can't pass mutable resource back from createcmdserver().
403 """
403 """
404
404
405 pollinterval = None
405 pollinterval = None
406
406
407 def __init__(self, ui):
407 def __init__(self, ui):
408 self.ui = ui
408 self.ui = ui
409
409
410 def bindsocket(self, sock, address):
410 def bindsocket(self, sock, address):
411 util.bindunixsocket(sock, address)
411 util.bindunixsocket(sock, address)
412
412
413 def unlinksocket(self, address):
413 def unlinksocket(self, address):
414 os.unlink(address)
414 os.unlink(address)
415
415
416 def printbanner(self, address):
416 def printbanner(self, address):
417 self.ui.status(_('listening at %s\n') % address)
417 self.ui.status(_('listening at %s\n') % address)
418 self.ui.flush() # avoid buffering of status message
418 self.ui.flush() # avoid buffering of status message
419
419
420 def shouldexit(self):
420 def shouldexit(self):
421 """True if server should shut down; checked per pollinterval"""
421 """True if server should shut down; checked per pollinterval"""
422 return False
422 return False
423
423
424 def newconnection(self):
424 def newconnection(self):
425 """Called when main process notices new connection"""
425 """Called when main process notices new connection"""
426 pass
426 pass
427
427
428 def createcmdserver(self, repo, conn, fin, fout):
428 def createcmdserver(self, repo, conn, fin, fout):
429 """Create new command server instance; called in the process that
429 """Create new command server instance; called in the process that
430 serves for the current connection"""
430 serves for the current connection"""
431 return server(self.ui, repo, fin, fout)
431 return server(self.ui, repo, fin, fout)
432
432
433 class unixforkingservice(object):
433 class unixforkingservice(object):
434 """
434 """
435 Listens on unix domain socket and forks server per connection
435 Listens on unix domain socket and forks server per connection
436 """
436 """
437
437
438 def __init__(self, ui, repo, opts, handler=None):
438 def __init__(self, ui, repo, opts, handler=None):
439 self.ui = ui
439 self.ui = ui
440 self.repo = repo
440 self.repo = repo
441 self.address = opts['address']
441 self.address = opts['address']
442 if not util.safehasattr(socket, 'AF_UNIX'):
442 if not util.safehasattr(socket, 'AF_UNIX'):
443 raise error.Abort(_('unsupported platform'))
443 raise error.Abort(_('unsupported platform'))
444 if not self.address:
444 if not self.address:
445 raise error.Abort(_('no socket path specified with --address'))
445 raise error.Abort(_('no socket path specified with --address'))
446 self._servicehandler = handler or unixservicehandler(ui)
446 self._servicehandler = handler or unixservicehandler(ui)
447 self._sock = None
447 self._sock = None
448 self._oldsigchldhandler = None
448 self._oldsigchldhandler = None
449 self._workerpids = set() # updated by signal handler; do not iterate
449 self._workerpids = set() # updated by signal handler; do not iterate
450 self._socketunlinked = None
450 self._socketunlinked = None
451
451
452 def init(self):
452 def init(self):
453 self._sock = socket.socket(socket.AF_UNIX)
453 self._sock = socket.socket(socket.AF_UNIX)
454 self._servicehandler.bindsocket(self._sock, self.address)
454 self._servicehandler.bindsocket(self._sock, self.address)
455 self._sock.listen(socket.SOMAXCONN)
455 self._sock.listen(socket.SOMAXCONN)
456 o = signal.signal(signal.SIGCHLD, self._sigchldhandler)
456 o = signal.signal(signal.SIGCHLD, self._sigchldhandler)
457 self._oldsigchldhandler = o
457 self._oldsigchldhandler = o
458 self._servicehandler.printbanner(self.address)
458 self._servicehandler.printbanner(self.address)
459 self._socketunlinked = False
459 self._socketunlinked = False
460
460
461 def _unlinksocket(self):
461 def _unlinksocket(self):
462 if not self._socketunlinked:
462 if not self._socketunlinked:
463 self._servicehandler.unlinksocket(self.address)
463 self._servicehandler.unlinksocket(self.address)
464 self._socketunlinked = True
464 self._socketunlinked = True
465
465
466 def _cleanup(self):
466 def _cleanup(self):
467 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
467 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
468 self._sock.close()
468 self._sock.close()
469 self._unlinksocket()
469 self._unlinksocket()
470 # don't kill child processes as they have active clients, just wait
470 # don't kill child processes as they have active clients, just wait
471 self._reapworkers(0)
471 self._reapworkers(0)
472
472
473 def run(self):
473 def run(self):
474 try:
474 try:
475 self._mainloop()
475 self._mainloop()
476 finally:
476 finally:
477 self._cleanup()
477 self._cleanup()
478
478
479 def _mainloop(self):
479 def _mainloop(self):
480 exiting = False
480 exiting = False
481 h = self._servicehandler
481 h = self._servicehandler
482 while True:
482 while True:
483 if not exiting and h.shouldexit():
483 if not exiting and h.shouldexit():
484 # clients can no longer connect() to the domain socket, so
484 # clients can no longer connect() to the domain socket, so
485 # we stop queuing new requests.
485 # we stop queuing new requests.
486 # for requests that are queued (connect()-ed, but haven't been
486 # for requests that are queued (connect()-ed, but haven't been
487 # accept()-ed), handle them before exit. otherwise, clients
487 # accept()-ed), handle them before exit. otherwise, clients
488 # waiting for recv() will receive ECONNRESET.
488 # waiting for recv() will receive ECONNRESET.
489 self._unlinksocket()
489 self._unlinksocket()
490 exiting = True
490 exiting = True
491 try:
491 try:
492 ready = select.select([self._sock], [], [], h.pollinterval)[0]
492 ready = select.select([self._sock], [], [], h.pollinterval)[0]
493 if not ready:
493 if not ready:
494 # only exit if we completed all queued requests
494 # only exit if we completed all queued requests
495 if exiting:
495 if exiting:
496 break
496 break
497 continue
497 continue
498 conn, _addr = self._sock.accept()
498 conn, _addr = self._sock.accept()
499 except (select.error, socket.error) as inst:
499 except (select.error, socket.error) as inst:
500 if inst.args[0] == errno.EINTR:
500 if inst.args[0] == errno.EINTR:
501 continue
501 continue
502 raise
502 raise
503
503
504 pid = os.fork()
504 pid = os.fork()
505 if pid:
505 if pid:
506 try:
506 try:
507 self.ui.debug('forked worker process (pid=%d)\n' % pid)
507 self.ui.debug('forked worker process (pid=%d)\n' % pid)
508 self._workerpids.add(pid)
508 self._workerpids.add(pid)
509 h.newconnection()
509 h.newconnection()
510 finally:
510 finally:
511 conn.close() # release handle in parent process
511 conn.close() # release handle in parent process
512 else:
512 else:
513 try:
513 try:
514 self._runworker(conn)
514 self._runworker(conn)
515 conn.close()
515 conn.close()
516 os._exit(0)
516 os._exit(0)
517 except: # never return, hence no re-raises
517 except: # never return, hence no re-raises
518 try:
518 try:
519 self.ui.traceback(force=True)
519 self.ui.traceback(force=True)
520 finally:
520 finally:
521 os._exit(255)
521 os._exit(255)
522
522
523 def _sigchldhandler(self, signal, frame):
523 def _sigchldhandler(self, signal, frame):
524 self._reapworkers(os.WNOHANG)
524 self._reapworkers(os.WNOHANG)
525
525
526 def _reapworkers(self, options):
526 def _reapworkers(self, options):
527 while self._workerpids:
527 while self._workerpids:
528 try:
528 try:
529 pid, _status = os.waitpid(-1, options)
529 pid, _status = os.waitpid(-1, options)
530 except OSError as inst:
530 except OSError as inst:
531 if inst.errno == errno.EINTR:
531 if inst.errno == errno.EINTR:
532 continue
532 continue
533 if inst.errno != errno.ECHILD:
533 if inst.errno != errno.ECHILD:
534 raise
534 raise
535 # no child processes at all (reaped by other waitpid()?)
535 # no child processes at all (reaped by other waitpid()?)
536 self._workerpids.clear()
536 self._workerpids.clear()
537 return
537 return
538 if pid == 0:
538 if pid == 0:
539 # no waitable child processes
539 # no waitable child processes
540 return
540 return
541 self.ui.debug('worker process exited (pid=%d)\n' % pid)
541 self.ui.debug('worker process exited (pid=%d)\n' % pid)
542 self._workerpids.discard(pid)
542 self._workerpids.discard(pid)
543
543
544 def _runworker(self, conn):
544 def _runworker(self, conn):
545 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
545 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
546 _initworkerprocess()
546 _initworkerprocess()
547 h = self._servicehandler
547 h = self._servicehandler
548 try:
548 try:
549 _serverequest(self.ui, self.repo, conn, h.createcmdserver)
549 _serverequest(self.ui, self.repo, conn, h.createcmdserver)
550 finally:
550 finally:
551 gc.collect() # trigger __del__ since worker process uses os._exit
551 gc.collect() # trigger __del__ since worker process uses os._exit
@@ -1,718 +1,718 b''
1 # filemerge.py - file-level merge handling for Mercurial
1 # filemerge.py - file-level merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import filecmp
10 import filecmp
11 import os
11 import os
12 import re
12 import re
13 import tempfile
13 import tempfile
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import nullid, short
16 from .node import nullid, short
17
17
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 formatter,
21 formatter,
22 match,
22 match,
23 pycompat,
23 pycompat,
24 scmutil,
24 scmutil,
25 simplemerge,
25 simplemerge,
26 tagmerge,
26 tagmerge,
27 templatekw,
27 templatekw,
28 templater,
28 templater,
29 util,
29 util,
30 )
30 )
31
31
32 def _toolstr(ui, tool, part, default=""):
32 def _toolstr(ui, tool, part, default=""):
33 return ui.config("merge-tools", tool + "." + part, default)
33 return ui.config("merge-tools", tool + "." + part, default)
34
34
35 def _toolbool(ui, tool, part, default=False):
35 def _toolbool(ui, tool, part, default=False):
36 return ui.configbool("merge-tools", tool + "." + part, default)
36 return ui.configbool("merge-tools", tool + "." + part, default)
37
37
38 def _toollist(ui, tool, part, default=[]):
38 def _toollist(ui, tool, part, default=[]):
39 return ui.configlist("merge-tools", tool + "." + part, default)
39 return ui.configlist("merge-tools", tool + "." + part, default)
40
40
41 internals = {}
41 internals = {}
42 # Merge tools to document.
42 # Merge tools to document.
43 internalsdoc = {}
43 internalsdoc = {}
44
44
45 # internal tool merge types
45 # internal tool merge types
46 nomerge = None
46 nomerge = None
47 mergeonly = 'mergeonly' # just the full merge, no premerge
47 mergeonly = 'mergeonly' # just the full merge, no premerge
48 fullmerge = 'fullmerge' # both premerge and merge
48 fullmerge = 'fullmerge' # both premerge and merge
49
49
50 class absentfilectx(object):
50 class absentfilectx(object):
51 """Represents a file that's ostensibly in a context but is actually not
51 """Represents a file that's ostensibly in a context but is actually not
52 present in it.
52 present in it.
53
53
54 This is here because it's very specific to the filemerge code for now --
54 This is here because it's very specific to the filemerge code for now --
55 other code is likely going to break with the values this returns."""
55 other code is likely going to break with the values this returns."""
56 def __init__(self, ctx, f):
56 def __init__(self, ctx, f):
57 self._ctx = ctx
57 self._ctx = ctx
58 self._f = f
58 self._f = f
59
59
60 def path(self):
60 def path(self):
61 return self._f
61 return self._f
62
62
63 def size(self):
63 def size(self):
64 return None
64 return None
65
65
66 def data(self):
66 def data(self):
67 return None
67 return None
68
68
69 def filenode(self):
69 def filenode(self):
70 return nullid
70 return nullid
71
71
72 _customcmp = True
72 _customcmp = True
73 def cmp(self, fctx):
73 def cmp(self, fctx):
74 """compare with other file context
74 """compare with other file context
75
75
76 returns True if different from fctx.
76 returns True if different from fctx.
77 """
77 """
78 return not (fctx.isabsent() and
78 return not (fctx.isabsent() and
79 fctx.ctx() == self.ctx() and
79 fctx.ctx() == self.ctx() and
80 fctx.path() == self.path())
80 fctx.path() == self.path())
81
81
82 def flags(self):
82 def flags(self):
83 return ''
83 return ''
84
84
85 def changectx(self):
85 def changectx(self):
86 return self._ctx
86 return self._ctx
87
87
88 def isbinary(self):
88 def isbinary(self):
89 return False
89 return False
90
90
91 def isabsent(self):
91 def isabsent(self):
92 return True
92 return True
93
93
94 def internaltool(name, mergetype, onfailure=None, precheck=None):
94 def internaltool(name, mergetype, onfailure=None, precheck=None):
95 '''return a decorator for populating internal merge tool table'''
95 '''return a decorator for populating internal merge tool table'''
96 def decorator(func):
96 def decorator(func):
97 fullname = ':' + name
97 fullname = ':' + name
98 func.__doc__ = (pycompat.sysstr("``%s``\n" % fullname)
98 func.__doc__ = (pycompat.sysstr("``%s``\n" % fullname)
99 + func.__doc__.strip())
99 + func.__doc__.strip())
100 internals[fullname] = func
100 internals[fullname] = func
101 internals['internal:' + name] = func
101 internals['internal:' + name] = func
102 internalsdoc[fullname] = func
102 internalsdoc[fullname] = func
103 func.mergetype = mergetype
103 func.mergetype = mergetype
104 func.onfailure = onfailure
104 func.onfailure = onfailure
105 func.precheck = precheck
105 func.precheck = precheck
106 return func
106 return func
107 return decorator
107 return decorator
108
108
109 def _findtool(ui, tool):
109 def _findtool(ui, tool):
110 if tool in internals:
110 if tool in internals:
111 return tool
111 return tool
112 return findexternaltool(ui, tool)
112 return findexternaltool(ui, tool)
113
113
114 def findexternaltool(ui, tool):
114 def findexternaltool(ui, tool):
115 for kn in ("regkey", "regkeyalt"):
115 for kn in ("regkey", "regkeyalt"):
116 k = _toolstr(ui, tool, kn)
116 k = _toolstr(ui, tool, kn)
117 if not k:
117 if not k:
118 continue
118 continue
119 p = util.lookupreg(k, _toolstr(ui, tool, "regname"))
119 p = util.lookupreg(k, _toolstr(ui, tool, "regname"))
120 if p:
120 if p:
121 p = util.findexe(p + _toolstr(ui, tool, "regappend"))
121 p = util.findexe(p + _toolstr(ui, tool, "regappend"))
122 if p:
122 if p:
123 return p
123 return p
124 exe = _toolstr(ui, tool, "executable", tool)
124 exe = _toolstr(ui, tool, "executable", tool)
125 return util.findexe(util.expandpath(exe))
125 return util.findexe(util.expandpath(exe))
126
126
127 def _picktool(repo, ui, path, binary, symlink, changedelete):
127 def _picktool(repo, ui, path, binary, symlink, changedelete):
128 def supportscd(tool):
128 def supportscd(tool):
129 return tool in internals and internals[tool].mergetype == nomerge
129 return tool in internals and internals[tool].mergetype == nomerge
130
130
131 def check(tool, pat, symlink, binary, changedelete):
131 def check(tool, pat, symlink, binary, changedelete):
132 tmsg = tool
132 tmsg = tool
133 if pat:
133 if pat:
134 tmsg += " specified for " + pat
134 tmsg += " specified for " + pat
135 if not _findtool(ui, tool):
135 if not _findtool(ui, tool):
136 if pat: # explicitly requested tool deserves a warning
136 if pat: # explicitly requested tool deserves a warning
137 ui.warn(_("couldn't find merge tool %s\n") % tmsg)
137 ui.warn(_("couldn't find merge tool %s\n") % tmsg)
138 else: # configured but non-existing tools are more silent
138 else: # configured but non-existing tools are more silent
139 ui.note(_("couldn't find merge tool %s\n") % tmsg)
139 ui.note(_("couldn't find merge tool %s\n") % tmsg)
140 elif symlink and not _toolbool(ui, tool, "symlink"):
140 elif symlink and not _toolbool(ui, tool, "symlink"):
141 ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
141 ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
142 elif binary and not _toolbool(ui, tool, "binary"):
142 elif binary and not _toolbool(ui, tool, "binary"):
143 ui.warn(_("tool %s can't handle binary\n") % tmsg)
143 ui.warn(_("tool %s can't handle binary\n") % tmsg)
144 elif changedelete and not supportscd(tool):
144 elif changedelete and not supportscd(tool):
145 # the nomerge tools are the only tools that support change/delete
145 # the nomerge tools are the only tools that support change/delete
146 # conflicts
146 # conflicts
147 pass
147 pass
148 elif not util.gui() and _toolbool(ui, tool, "gui"):
148 elif not util.gui() and _toolbool(ui, tool, "gui"):
149 ui.warn(_("tool %s requires a GUI\n") % tmsg)
149 ui.warn(_("tool %s requires a GUI\n") % tmsg)
150 else:
150 else:
151 return True
151 return True
152 return False
152 return False
153
153
154 # internal config: ui.forcemerge
154 # internal config: ui.forcemerge
155 # forcemerge comes from command line arguments, highest priority
155 # forcemerge comes from command line arguments, highest priority
156 force = ui.config('ui', 'forcemerge')
156 force = ui.config('ui', 'forcemerge')
157 if force:
157 if force:
158 toolpath = _findtool(ui, force)
158 toolpath = _findtool(ui, force)
159 if changedelete and not supportscd(toolpath):
159 if changedelete and not supportscd(toolpath):
160 return ":prompt", None
160 return ":prompt", None
161 else:
161 else:
162 if toolpath:
162 if toolpath:
163 return (force, util.shellquote(toolpath))
163 return (force, util.shellquote(toolpath))
164 else:
164 else:
165 # mimic HGMERGE if given tool not found
165 # mimic HGMERGE if given tool not found
166 return (force, force)
166 return (force, force)
167
167
168 # HGMERGE takes next precedence
168 # HGMERGE takes next precedence
169 hgmerge = encoding.environ.get("HGMERGE")
169 hgmerge = encoding.environ.get("HGMERGE")
170 if hgmerge:
170 if hgmerge:
171 if changedelete and not supportscd(hgmerge):
171 if changedelete and not supportscd(hgmerge):
172 return ":prompt", None
172 return ":prompt", None
173 else:
173 else:
174 return (hgmerge, hgmerge)
174 return (hgmerge, hgmerge)
175
175
176 # then patterns
176 # then patterns
177 for pat, tool in ui.configitems("merge-patterns"):
177 for pat, tool in ui.configitems("merge-patterns"):
178 mf = match.match(repo.root, '', [pat])
178 mf = match.match(repo.root, '', [pat])
179 if mf(path) and check(tool, pat, symlink, False, changedelete):
179 if mf(path) and check(tool, pat, symlink, False, changedelete):
180 toolpath = _findtool(ui, tool)
180 toolpath = _findtool(ui, tool)
181 return (tool, util.shellquote(toolpath))
181 return (tool, util.shellquote(toolpath))
182
182
183 # then merge tools
183 # then merge tools
184 tools = {}
184 tools = {}
185 disabled = set()
185 disabled = set()
186 for k, v in ui.configitems("merge-tools"):
186 for k, v in ui.configitems("merge-tools"):
187 t = k.split('.')[0]
187 t = k.split('.')[0]
188 if t not in tools:
188 if t not in tools:
189 tools[t] = int(_toolstr(ui, t, "priority", "0"))
189 tools[t] = int(_toolstr(ui, t, "priority", "0"))
190 if _toolbool(ui, t, "disabled", False):
190 if _toolbool(ui, t, "disabled", False):
191 disabled.add(t)
191 disabled.add(t)
192 names = tools.keys()
192 names = tools.keys()
193 tools = sorted([(-p, tool) for tool, p in tools.items()
193 tools = sorted([(-p, tool) for tool, p in tools.items()
194 if tool not in disabled])
194 if tool not in disabled])
195 uimerge = ui.config("ui", "merge")
195 uimerge = ui.config("ui", "merge")
196 if uimerge:
196 if uimerge:
197 # external tools defined in uimerge won't be able to handle
197 # external tools defined in uimerge won't be able to handle
198 # change/delete conflicts
198 # change/delete conflicts
199 if uimerge not in names and not changedelete:
199 if uimerge not in names and not changedelete:
200 return (uimerge, uimerge)
200 return (uimerge, uimerge)
201 tools.insert(0, (None, uimerge)) # highest priority
201 tools.insert(0, (None, uimerge)) # highest priority
202 tools.append((None, "hgmerge")) # the old default, if found
202 tools.append((None, "hgmerge")) # the old default, if found
203 for p, t in tools:
203 for p, t in tools:
204 if check(t, None, symlink, binary, changedelete):
204 if check(t, None, symlink, binary, changedelete):
205 toolpath = _findtool(ui, t)
205 toolpath = _findtool(ui, t)
206 return (t, util.shellquote(toolpath))
206 return (t, util.shellquote(toolpath))
207
207
208 # internal merge or prompt as last resort
208 # internal merge or prompt as last resort
209 if symlink or binary or changedelete:
209 if symlink or binary or changedelete:
210 return ":prompt", None
210 return ":prompt", None
211 return ":merge", None
211 return ":merge", None
212
212
213 def _eoltype(data):
213 def _eoltype(data):
214 "Guess the EOL type of a file"
214 "Guess the EOL type of a file"
215 if '\0' in data: # binary
215 if '\0' in data: # binary
216 return None
216 return None
217 if '\r\n' in data: # Windows
217 if '\r\n' in data: # Windows
218 return '\r\n'
218 return '\r\n'
219 if '\r' in data: # Old Mac
219 if '\r' in data: # Old Mac
220 return '\r'
220 return '\r'
221 if '\n' in data: # UNIX
221 if '\n' in data: # UNIX
222 return '\n'
222 return '\n'
223 return None # unknown
223 return None # unknown
224
224
225 def _matcheol(file, origfile):
225 def _matcheol(file, origfile):
226 "Convert EOL markers in a file to match origfile"
226 "Convert EOL markers in a file to match origfile"
227 tostyle = _eoltype(util.readfile(origfile))
227 tostyle = _eoltype(util.readfile(origfile))
228 if tostyle:
228 if tostyle:
229 data = util.readfile(file)
229 data = util.readfile(file)
230 style = _eoltype(data)
230 style = _eoltype(data)
231 if style:
231 if style:
232 newdata = data.replace(style, tostyle)
232 newdata = data.replace(style, tostyle)
233 if newdata != data:
233 if newdata != data:
234 util.writefile(file, newdata)
234 util.writefile(file, newdata)
235
235
236 @internaltool('prompt', nomerge)
236 @internaltool('prompt', nomerge)
237 def _iprompt(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
237 def _iprompt(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
238 """Asks the user which of the local `p1()` or the other `p2()` version to
238 """Asks the user which of the local `p1()` or the other `p2()` version to
239 keep as the merged version."""
239 keep as the merged version."""
240 ui = repo.ui
240 ui = repo.ui
241 fd = fcd.path()
241 fd = fcd.path()
242
242
243 prompts = partextras(labels)
243 prompts = partextras(labels)
244 prompts['fd'] = fd
244 prompts['fd'] = fd
245 try:
245 try:
246 if fco.isabsent():
246 if fco.isabsent():
247 index = ui.promptchoice(
247 index = ui.promptchoice(
248 _("local%(l)s changed %(fd)s which other%(o)s deleted\n"
248 _("local%(l)s changed %(fd)s which other%(o)s deleted\n"
249 "use (c)hanged version, (d)elete, or leave (u)nresolved?"
249 "use (c)hanged version, (d)elete, or leave (u)nresolved?"
250 "$$ &Changed $$ &Delete $$ &Unresolved") % prompts, 2)
250 "$$ &Changed $$ &Delete $$ &Unresolved") % prompts, 2)
251 choice = ['local', 'other', 'unresolved'][index]
251 choice = ['local', 'other', 'unresolved'][index]
252 elif fcd.isabsent():
252 elif fcd.isabsent():
253 index = ui.promptchoice(
253 index = ui.promptchoice(
254 _("other%(o)s changed %(fd)s which local%(l)s deleted\n"
254 _("other%(o)s changed %(fd)s which local%(l)s deleted\n"
255 "use (c)hanged version, leave (d)eleted, or "
255 "use (c)hanged version, leave (d)eleted, or "
256 "leave (u)nresolved?"
256 "leave (u)nresolved?"
257 "$$ &Changed $$ &Deleted $$ &Unresolved") % prompts, 2)
257 "$$ &Changed $$ &Deleted $$ &Unresolved") % prompts, 2)
258 choice = ['other', 'local', 'unresolved'][index]
258 choice = ['other', 'local', 'unresolved'][index]
259 else:
259 else:
260 index = ui.promptchoice(
260 index = ui.promptchoice(
261 _("no tool found to merge %(fd)s\n"
261 _("no tool found to merge %(fd)s\n"
262 "keep (l)ocal%(l)s, take (o)ther%(o)s, or leave (u)nresolved?"
262 "keep (l)ocal%(l)s, take (o)ther%(o)s, or leave (u)nresolved?"
263 "$$ &Local $$ &Other $$ &Unresolved") % prompts, 2)
263 "$$ &Local $$ &Other $$ &Unresolved") % prompts, 2)
264 choice = ['local', 'other', 'unresolved'][index]
264 choice = ['local', 'other', 'unresolved'][index]
265
265
266 if choice == 'other':
266 if choice == 'other':
267 return _iother(repo, mynode, orig, fcd, fco, fca, toolconf,
267 return _iother(repo, mynode, orig, fcd, fco, fca, toolconf,
268 labels)
268 labels)
269 elif choice == 'local':
269 elif choice == 'local':
270 return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf,
270 return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf,
271 labels)
271 labels)
272 elif choice == 'unresolved':
272 elif choice == 'unresolved':
273 return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf,
273 return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf,
274 labels)
274 labels)
275 except error.ResponseExpected:
275 except error.ResponseExpected:
276 ui.write("\n")
276 ui.write("\n")
277 return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf,
277 return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf,
278 labels)
278 labels)
279
279
280 @internaltool('local', nomerge)
280 @internaltool('local', nomerge)
281 def _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
281 def _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
282 """Uses the local `p1()` version of files as the merged version."""
282 """Uses the local `p1()` version of files as the merged version."""
283 return 0, fcd.isabsent()
283 return 0, fcd.isabsent()
284
284
285 @internaltool('other', nomerge)
285 @internaltool('other', nomerge)
286 def _iother(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
286 def _iother(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
287 """Uses the other `p2()` version of files as the merged version."""
287 """Uses the other `p2()` version of files as the merged version."""
288 if fco.isabsent():
288 if fco.isabsent():
289 # local changed, remote deleted -- 'deleted' picked
289 # local changed, remote deleted -- 'deleted' picked
290 repo.wvfs.unlinkpath(fcd.path())
290 repo.wvfs.unlinkpath(fcd.path())
291 deleted = True
291 deleted = True
292 else:
292 else:
293 repo.wwrite(fcd.path(), fco.data(), fco.flags())
293 repo.wwrite(fcd.path(), fco.data(), fco.flags())
294 deleted = False
294 deleted = False
295 return 0, deleted
295 return 0, deleted
296
296
297 @internaltool('fail', nomerge)
297 @internaltool('fail', nomerge)
298 def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
298 def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
299 """
299 """
300 Rather than attempting to merge files that were modified on both
300 Rather than attempting to merge files that were modified on both
301 branches, it marks them as unresolved. The resolve command must be
301 branches, it marks them as unresolved. The resolve command must be
302 used to resolve these conflicts."""
302 used to resolve these conflicts."""
303 # for change/delete conflicts write out the changed version, then fail
303 # for change/delete conflicts write out the changed version, then fail
304 if fcd.isabsent():
304 if fcd.isabsent():
305 repo.wwrite(fcd.path(), fco.data(), fco.flags())
305 repo.wwrite(fcd.path(), fco.data(), fco.flags())
306 return 1, False
306 return 1, False
307
307
308 def _premerge(repo, fcd, fco, fca, toolconf, files, labels=None):
308 def _premerge(repo, fcd, fco, fca, toolconf, files, labels=None):
309 tool, toolpath, binary, symlink = toolconf
309 tool, toolpath, binary, symlink = toolconf
310 if symlink or fcd.isabsent() or fco.isabsent():
310 if symlink or fcd.isabsent() or fco.isabsent():
311 return 1
311 return 1
312 a, b, c, back = files
312 a, b, c, back = files
313
313
314 ui = repo.ui
314 ui = repo.ui
315
315
316 validkeep = ['keep', 'keep-merge3']
316 validkeep = ['keep', 'keep-merge3']
317
317
318 # do we attempt to simplemerge first?
318 # do we attempt to simplemerge first?
319 try:
319 try:
320 premerge = _toolbool(ui, tool, "premerge", not binary)
320 premerge = _toolbool(ui, tool, "premerge", not binary)
321 except error.ConfigError:
321 except error.ConfigError:
322 premerge = _toolstr(ui, tool, "premerge").lower()
322 premerge = _toolstr(ui, tool, "premerge").lower()
323 if premerge not in validkeep:
323 if premerge not in validkeep:
324 _valid = ', '.join(["'" + v + "'" for v in validkeep])
324 _valid = ', '.join(["'" + v + "'" for v in validkeep])
325 raise error.ConfigError(_("%s.premerge not valid "
325 raise error.ConfigError(_("%s.premerge not valid "
326 "('%s' is neither boolean nor %s)") %
326 "('%s' is neither boolean nor %s)") %
327 (tool, premerge, _valid))
327 (tool, premerge, _valid))
328
328
329 if premerge:
329 if premerge:
330 if premerge == 'keep-merge3':
330 if premerge == 'keep-merge3':
331 if not labels:
331 if not labels:
332 labels = _defaultconflictlabels
332 labels = _defaultconflictlabels
333 if len(labels) < 3:
333 if len(labels) < 3:
334 labels.append('base')
334 labels.append('base')
335 r = simplemerge.simplemerge(ui, a, b, c, quiet=True, label=labels)
335 r = simplemerge.simplemerge(ui, a, b, c, quiet=True, label=labels)
336 if not r:
336 if not r:
337 ui.debug(" premerge successful\n")
337 ui.debug(" premerge successful\n")
338 return 0
338 return 0
339 if premerge not in validkeep:
339 if premerge not in validkeep:
340 util.copyfile(back, a) # restore from backup and try again
340 util.copyfile(back, a) # restore from backup and try again
341 return 1 # continue merging
341 return 1 # continue merging
342
342
343 def _mergecheck(repo, mynode, orig, fcd, fco, fca, toolconf):
343 def _mergecheck(repo, mynode, orig, fcd, fco, fca, toolconf):
344 tool, toolpath, binary, symlink = toolconf
344 tool, toolpath, binary, symlink = toolconf
345 if symlink:
345 if symlink:
346 repo.ui.warn(_('warning: internal %s cannot merge symlinks '
346 repo.ui.warn(_('warning: internal %s cannot merge symlinks '
347 'for %s\n') % (tool, fcd.path()))
347 'for %s\n') % (tool, fcd.path()))
348 return False
348 return False
349 if fcd.isabsent() or fco.isabsent():
349 if fcd.isabsent() or fco.isabsent():
350 repo.ui.warn(_('warning: internal %s cannot merge change/delete '
350 repo.ui.warn(_('warning: internal %s cannot merge change/delete '
351 'conflict for %s\n') % (tool, fcd.path()))
351 'conflict for %s\n') % (tool, fcd.path()))
352 return False
352 return False
353 return True
353 return True
354
354
355 def _merge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, mode):
355 def _merge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, mode):
356 """
356 """
357 Uses the internal non-interactive simple merge algorithm for merging
357 Uses the internal non-interactive simple merge algorithm for merging
358 files. It will fail if there are any conflicts and leave markers in
358 files. It will fail if there are any conflicts and leave markers in
359 the partially merged file. Markers will have two sections, one for each side
359 the partially merged file. Markers will have two sections, one for each side
360 of merge, unless mode equals 'union' which suppresses the markers."""
360 of merge, unless mode equals 'union' which suppresses the markers."""
361 a, b, c, back = files
361 a, b, c, back = files
362
362
363 ui = repo.ui
363 ui = repo.ui
364
364
365 r = simplemerge.simplemerge(ui, a, b, c, label=labels, mode=mode)
365 r = simplemerge.simplemerge(ui, a, b, c, label=labels, mode=mode)
366 return True, r, False
366 return True, r, False
367
367
368 @internaltool('union', fullmerge,
368 @internaltool('union', fullmerge,
369 _("warning: conflicts while merging %s! "
369 _("warning: conflicts while merging %s! "
370 "(edit, then use 'hg resolve --mark')\n"),
370 "(edit, then use 'hg resolve --mark')\n"),
371 precheck=_mergecheck)
371 precheck=_mergecheck)
372 def _iunion(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
372 def _iunion(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
373 """
373 """
374 Uses the internal non-interactive simple merge algorithm for merging
374 Uses the internal non-interactive simple merge algorithm for merging
375 files. It will use both left and right sides for conflict regions.
375 files. It will use both left and right sides for conflict regions.
376 No markers are inserted."""
376 No markers are inserted."""
377 return _merge(repo, mynode, orig, fcd, fco, fca, toolconf,
377 return _merge(repo, mynode, orig, fcd, fco, fca, toolconf,
378 files, labels, 'union')
378 files, labels, 'union')
379
379
380 @internaltool('merge', fullmerge,
380 @internaltool('merge', fullmerge,
381 _("warning: conflicts while merging %s! "
381 _("warning: conflicts while merging %s! "
382 "(edit, then use 'hg resolve --mark')\n"),
382 "(edit, then use 'hg resolve --mark')\n"),
383 precheck=_mergecheck)
383 precheck=_mergecheck)
384 def _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
384 def _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
385 """
385 """
386 Uses the internal non-interactive simple merge algorithm for merging
386 Uses the internal non-interactive simple merge algorithm for merging
387 files. It will fail if there are any conflicts and leave markers in
387 files. It will fail if there are any conflicts and leave markers in
388 the partially merged file. Markers will have two sections, one for each side
388 the partially merged file. Markers will have two sections, one for each side
389 of merge."""
389 of merge."""
390 return _merge(repo, mynode, orig, fcd, fco, fca, toolconf,
390 return _merge(repo, mynode, orig, fcd, fco, fca, toolconf,
391 files, labels, 'merge')
391 files, labels, 'merge')
392
392
393 @internaltool('merge3', fullmerge,
393 @internaltool('merge3', fullmerge,
394 _("warning: conflicts while merging %s! "
394 _("warning: conflicts while merging %s! "
395 "(edit, then use 'hg resolve --mark')\n"),
395 "(edit, then use 'hg resolve --mark')\n"),
396 precheck=_mergecheck)
396 precheck=_mergecheck)
397 def _imerge3(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
397 def _imerge3(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
398 """
398 """
399 Uses the internal non-interactive simple merge algorithm for merging
399 Uses the internal non-interactive simple merge algorithm for merging
400 files. It will fail if there are any conflicts and leave markers in
400 files. It will fail if there are any conflicts and leave markers in
401 the partially merged file. Marker will have three sections, one from each
401 the partially merged file. Marker will have three sections, one from each
402 side of the merge and one for the base content."""
402 side of the merge and one for the base content."""
403 if not labels:
403 if not labels:
404 labels = _defaultconflictlabels
404 labels = _defaultconflictlabels
405 if len(labels) < 3:
405 if len(labels) < 3:
406 labels.append('base')
406 labels.append('base')
407 return _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels)
407 return _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels)
408
408
409 def _imergeauto(repo, mynode, orig, fcd, fco, fca, toolconf, files,
409 def _imergeauto(repo, mynode, orig, fcd, fco, fca, toolconf, files,
410 labels=None, localorother=None):
410 labels=None, localorother=None):
411 """
411 """
412 Generic driver for _imergelocal and _imergeother
412 Generic driver for _imergelocal and _imergeother
413 """
413 """
414 assert localorother is not None
414 assert localorother is not None
415 tool, toolpath, binary, symlink = toolconf
415 tool, toolpath, binary, symlink = toolconf
416 a, b, c, back = files
416 a, b, c, back = files
417 r = simplemerge.simplemerge(repo.ui, a, b, c, label=labels,
417 r = simplemerge.simplemerge(repo.ui, a, b, c, label=labels,
418 localorother=localorother)
418 localorother=localorother)
419 return True, r
419 return True, r
420
420
421 @internaltool('merge-local', mergeonly, precheck=_mergecheck)
421 @internaltool('merge-local', mergeonly, precheck=_mergecheck)
422 def _imergelocal(*args, **kwargs):
422 def _imergelocal(*args, **kwargs):
423 """
423 """
424 Like :merge, but resolve all conflicts non-interactively in favor
424 Like :merge, but resolve all conflicts non-interactively in favor
425 of the local `p1()` changes."""
425 of the local `p1()` changes."""
426 success, status = _imergeauto(localorother='local', *args, **kwargs)
426 success, status = _imergeauto(localorother='local', *args, **kwargs)
427 return success, status, False
427 return success, status, False
428
428
429 @internaltool('merge-other', mergeonly, precheck=_mergecheck)
429 @internaltool('merge-other', mergeonly, precheck=_mergecheck)
430 def _imergeother(*args, **kwargs):
430 def _imergeother(*args, **kwargs):
431 """
431 """
432 Like :merge, but resolve all conflicts non-interactively in favor
432 Like :merge, but resolve all conflicts non-interactively in favor
433 of the other `p2()` changes."""
433 of the other `p2()` changes."""
434 success, status = _imergeauto(localorother='other', *args, **kwargs)
434 success, status = _imergeauto(localorother='other', *args, **kwargs)
435 return success, status, False
435 return success, status, False
436
436
437 @internaltool('tagmerge', mergeonly,
437 @internaltool('tagmerge', mergeonly,
438 _("automatic tag merging of %s failed! "
438 _("automatic tag merging of %s failed! "
439 "(use 'hg resolve --tool :merge' or another merge "
439 "(use 'hg resolve --tool :merge' or another merge "
440 "tool of your choice)\n"))
440 "tool of your choice)\n"))
441 def _itagmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
441 def _itagmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
442 """
442 """
443 Uses the internal tag merge algorithm (experimental).
443 Uses the internal tag merge algorithm (experimental).
444 """
444 """
445 success, status = tagmerge.merge(repo, fcd, fco, fca)
445 success, status = tagmerge.merge(repo, fcd, fco, fca)
446 return success, status, False
446 return success, status, False
447
447
448 @internaltool('dump', fullmerge)
448 @internaltool('dump', fullmerge)
449 def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
449 def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
450 """
450 """
451 Creates three versions of the files to merge, containing the
451 Creates three versions of the files to merge, containing the
452 contents of local, other and base. These files can then be used to
452 contents of local, other and base. These files can then be used to
453 perform a merge manually. If the file to be merged is named
453 perform a merge manually. If the file to be merged is named
454 ``a.txt``, these files will accordingly be named ``a.txt.local``,
454 ``a.txt``, these files will accordingly be named ``a.txt.local``,
455 ``a.txt.other`` and ``a.txt.base`` and they will be placed in the
455 ``a.txt.other`` and ``a.txt.base`` and they will be placed in the
456 same directory as ``a.txt``."""
456 same directory as ``a.txt``."""
457 a, b, c, back = files
457 a, b, c, back = files
458
458
459 fd = fcd.path()
459 fd = fcd.path()
460
460
461 util.copyfile(a, a + ".local")
461 util.copyfile(a, a + ".local")
462 repo.wwrite(fd + ".other", fco.data(), fco.flags())
462 repo.wwrite(fd + ".other", fco.data(), fco.flags())
463 repo.wwrite(fd + ".base", fca.data(), fca.flags())
463 repo.wwrite(fd + ".base", fca.data(), fca.flags())
464 return False, 1, False
464 return False, 1, False
465
465
466 def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
466 def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
467 tool, toolpath, binary, symlink = toolconf
467 tool, toolpath, binary, symlink = toolconf
468 if fcd.isabsent() or fco.isabsent():
468 if fcd.isabsent() or fco.isabsent():
469 repo.ui.warn(_('warning: %s cannot merge change/delete conflict '
469 repo.ui.warn(_('warning: %s cannot merge change/delete conflict '
470 'for %s\n') % (tool, fcd.path()))
470 'for %s\n') % (tool, fcd.path()))
471 return False, 1, None
471 return False, 1, None
472 a, b, c, back = files
472 a, b, c, back = files
473 out = ""
473 out = ""
474 env = {'HG_FILE': fcd.path(),
474 env = {'HG_FILE': fcd.path(),
475 'HG_MY_NODE': short(mynode),
475 'HG_MY_NODE': short(mynode),
476 'HG_OTHER_NODE': str(fco.changectx()),
476 'HG_OTHER_NODE': str(fco.changectx()),
477 'HG_BASE_NODE': str(fca.changectx()),
477 'HG_BASE_NODE': str(fca.changectx()),
478 'HG_MY_ISLINK': 'l' in fcd.flags(),
478 'HG_MY_ISLINK': 'l' in fcd.flags(),
479 'HG_OTHER_ISLINK': 'l' in fco.flags(),
479 'HG_OTHER_ISLINK': 'l' in fco.flags(),
480 'HG_BASE_ISLINK': 'l' in fca.flags(),
480 'HG_BASE_ISLINK': 'l' in fca.flags(),
481 }
481 }
482
482
483 ui = repo.ui
483 ui = repo.ui
484
484
485 args = _toolstr(ui, tool, "args", '$local $base $other')
485 args = _toolstr(ui, tool, "args", '$local $base $other')
486 if "$output" in args:
486 if "$output" in args:
487 out, a = a, back # read input from backup, write to original
487 out, a = a, back # read input from backup, write to original
488 replace = {'local': a, 'base': b, 'other': c, 'output': out}
488 replace = {'local': a, 'base': b, 'other': c, 'output': out}
489 args = util.interpolate(r'\$', replace, args,
489 args = util.interpolate(r'\$', replace, args,
490 lambda s: util.shellquote(util.localpath(s)))
490 lambda s: util.shellquote(util.localpath(s)))
491 cmd = toolpath + ' ' + args
491 cmd = toolpath + ' ' + args
492 if _toolbool(ui, tool, "gui"):
492 if _toolbool(ui, tool, "gui"):
493 repo.ui.status(_('running merge tool %s for file %s\n') %
493 repo.ui.status(_('running merge tool %s for file %s\n') %
494 (tool, fcd.path()))
494 (tool, fcd.path()))
495 repo.ui.debug('launching merge tool: %s\n' % cmd)
495 repo.ui.debug('launching merge tool: %s\n' % cmd)
496 r = ui.system(cmd, cwd=repo.root, environ=env)
496 r = ui.system(cmd, cwd=repo.root, environ=env)
497 repo.ui.debug('merge tool returned: %s\n' % r)
497 repo.ui.debug('merge tool returned: %s\n' % r)
498 return True, r, False
498 return True, r, False
499
499
500 def _formatconflictmarker(repo, ctx, template, label, pad):
500 def _formatconflictmarker(repo, ctx, template, label, pad):
501 """Applies the given template to the ctx, prefixed by the label.
501 """Applies the given template to the ctx, prefixed by the label.
502
502
503 Pad is the minimum width of the label prefix, so that multiple markers
503 Pad is the minimum width of the label prefix, so that multiple markers
504 can have aligned templated parts.
504 can have aligned templated parts.
505 """
505 """
506 if ctx.node() is None:
506 if ctx.node() is None:
507 ctx = ctx.p1()
507 ctx = ctx.p1()
508
508
509 props = templatekw.keywords.copy()
509 props = templatekw.keywords.copy()
510 props['templ'] = template
510 props['templ'] = template
511 props['ctx'] = ctx
511 props['ctx'] = ctx
512 props['repo'] = repo
512 props['repo'] = repo
513 templateresult = template('conflictmarker', **props)
513 templateresult = template('conflictmarker', **props)
514
514
515 label = ('%s:' % label).ljust(pad + 1)
515 label = ('%s:' % label).ljust(pad + 1)
516 mark = '%s %s' % (label, templater.stringify(templateresult))
516 mark = '%s %s' % (label, templater.stringify(templateresult))
517
517
518 if mark:
518 if mark:
519 mark = mark.splitlines()[0] # split for safety
519 mark = mark.splitlines()[0] # split for safety
520
520
521 # 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ')
521 # 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ')
522 return util.ellipsis(mark, 80 - 8)
522 return util.ellipsis(mark, 80 - 8)
523
523
524 _defaultconflictmarker = ('{node|short} '
524 _defaultconflictmarker = ('{node|short} '
525 '{ifeq(tags, "tip", "", '
525 '{ifeq(tags, "tip", "", '
526 'ifeq(tags, "", "", "{tags} "))}'
526 'ifeq(tags, "", "", "{tags} "))}'
527 '{if(bookmarks, "{bookmarks} ")}'
527 '{if(bookmarks, "{bookmarks} ")}'
528 '{ifeq(branch, "default", "", "{branch} ")}'
528 '{ifeq(branch, "default", "", "{branch} ")}'
529 '- {author|user}: {desc|firstline}')
529 '- {author|user}: {desc|firstline}')
530
530
531 _defaultconflictlabels = ['local', 'other']
531 _defaultconflictlabels = ['local', 'other']
532
532
533 def _formatlabels(repo, fcd, fco, fca, labels):
533 def _formatlabels(repo, fcd, fco, fca, labels):
534 """Formats the given labels using the conflict marker template.
534 """Formats the given labels using the conflict marker template.
535
535
536 Returns a list of formatted labels.
536 Returns a list of formatted labels.
537 """
537 """
538 cd = fcd.changectx()
538 cd = fcd.changectx()
539 co = fco.changectx()
539 co = fco.changectx()
540 ca = fca.changectx()
540 ca = fca.changectx()
541
541
542 ui = repo.ui
542 ui = repo.ui
543 template = ui.config('ui', 'mergemarkertemplate', _defaultconflictmarker)
543 template = ui.config('ui', 'mergemarkertemplate', _defaultconflictmarker)
544 tmpl = formatter.maketemplater(ui, 'conflictmarker', template)
544 tmpl = formatter.maketemplater(ui, 'conflictmarker', template)
545
545
546 pad = max(len(l) for l in labels)
546 pad = max(len(l) for l in labels)
547
547
548 newlabels = [_formatconflictmarker(repo, cd, tmpl, labels[0], pad),
548 newlabels = [_formatconflictmarker(repo, cd, tmpl, labels[0], pad),
549 _formatconflictmarker(repo, co, tmpl, labels[1], pad)]
549 _formatconflictmarker(repo, co, tmpl, labels[1], pad)]
550 if len(labels) > 2:
550 if len(labels) > 2:
551 newlabels.append(_formatconflictmarker(repo, ca, tmpl, labels[2], pad))
551 newlabels.append(_formatconflictmarker(repo, ca, tmpl, labels[2], pad))
552 return newlabels
552 return newlabels
553
553
554 def partextras(labels):
554 def partextras(labels):
555 """Return a dictionary of extra labels for use in prompts to the user
555 """Return a dictionary of extra labels for use in prompts to the user
556
556
557 Intended use is in strings of the form "(l)ocal%(l)s".
557 Intended use is in strings of the form "(l)ocal%(l)s".
558 """
558 """
559 if labels is None:
559 if labels is None:
560 return {
560 return {
561 "l": "",
561 "l": "",
562 "o": "",
562 "o": "",
563 }
563 }
564
564
565 return {
565 return {
566 "l": " [%s]" % labels[0],
566 "l": " [%s]" % labels[0],
567 "o": " [%s]" % labels[1],
567 "o": " [%s]" % labels[1],
568 }
568 }
569
569
570 def _filemerge(premerge, repo, mynode, orig, fcd, fco, fca, labels=None):
570 def _filemerge(premerge, repo, mynode, orig, fcd, fco, fca, labels=None):
571 """perform a 3-way merge in the working directory
571 """perform a 3-way merge in the working directory
572
572
573 premerge = whether this is a premerge
573 premerge = whether this is a premerge
574 mynode = parent node before merge
574 mynode = parent node before merge
575 orig = original local filename before merge
575 orig = original local filename before merge
576 fco = other file context
576 fco = other file context
577 fca = ancestor file context
577 fca = ancestor file context
578 fcd = local file context for current/destination file
578 fcd = local file context for current/destination file
579
579
580 Returns whether the merge is complete, the return value of the merge, and
580 Returns whether the merge is complete, the return value of the merge, and
581 a boolean indicating whether the file was deleted from disk."""
581 a boolean indicating whether the file was deleted from disk."""
582
582
583 def temp(prefix, ctx):
583 def temp(prefix, ctx):
584 fullbase, ext = os.path.splitext(ctx.path())
584 fullbase, ext = os.path.splitext(ctx.path())
585 pre = "%s~%s." % (os.path.basename(fullbase), prefix)
585 pre = "%s~%s." % (os.path.basename(fullbase), prefix)
586 (fd, name) = tempfile.mkstemp(prefix=pre, suffix=ext)
586 (fd, name) = tempfile.mkstemp(prefix=pre, suffix=ext)
587 data = repo.wwritedata(ctx.path(), ctx.data())
587 data = repo.wwritedata(ctx.path(), ctx.data())
588 f = os.fdopen(fd, "wb")
588 f = os.fdopen(fd, pycompat.sysstr("wb"))
589 f.write(data)
589 f.write(data)
590 f.close()
590 f.close()
591 return name
591 return name
592
592
593 if not fco.cmp(fcd): # files identical?
593 if not fco.cmp(fcd): # files identical?
594 return True, None, False
594 return True, None, False
595
595
596 ui = repo.ui
596 ui = repo.ui
597 fd = fcd.path()
597 fd = fcd.path()
598 binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
598 binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
599 symlink = 'l' in fcd.flags() + fco.flags()
599 symlink = 'l' in fcd.flags() + fco.flags()
600 changedelete = fcd.isabsent() or fco.isabsent()
600 changedelete = fcd.isabsent() or fco.isabsent()
601 tool, toolpath = _picktool(repo, ui, fd, binary, symlink, changedelete)
601 tool, toolpath = _picktool(repo, ui, fd, binary, symlink, changedelete)
602 if tool in internals and tool.startswith('internal:'):
602 if tool in internals and tool.startswith('internal:'):
603 # normalize to new-style names (':merge' etc)
603 # normalize to new-style names (':merge' etc)
604 tool = tool[len('internal'):]
604 tool = tool[len('internal'):]
605 ui.debug("picked tool '%s' for %s (binary %s symlink %s changedelete %s)\n"
605 ui.debug("picked tool '%s' for %s (binary %s symlink %s changedelete %s)\n"
606 % (tool, fd, binary, symlink, changedelete))
606 % (tool, fd, binary, symlink, changedelete))
607
607
608 if tool in internals:
608 if tool in internals:
609 func = internals[tool]
609 func = internals[tool]
610 mergetype = func.mergetype
610 mergetype = func.mergetype
611 onfailure = func.onfailure
611 onfailure = func.onfailure
612 precheck = func.precheck
612 precheck = func.precheck
613 else:
613 else:
614 func = _xmerge
614 func = _xmerge
615 mergetype = fullmerge
615 mergetype = fullmerge
616 onfailure = _("merging %s failed!\n")
616 onfailure = _("merging %s failed!\n")
617 precheck = None
617 precheck = None
618
618
619 toolconf = tool, toolpath, binary, symlink
619 toolconf = tool, toolpath, binary, symlink
620
620
621 if mergetype == nomerge:
621 if mergetype == nomerge:
622 r, deleted = func(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
622 r, deleted = func(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
623 return True, r, deleted
623 return True, r, deleted
624
624
625 if premerge:
625 if premerge:
626 if orig != fco.path():
626 if orig != fco.path():
627 ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
627 ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
628 else:
628 else:
629 ui.status(_("merging %s\n") % fd)
629 ui.status(_("merging %s\n") % fd)
630
630
631 ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))
631 ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))
632
632
633 if precheck and not precheck(repo, mynode, orig, fcd, fco, fca,
633 if precheck and not precheck(repo, mynode, orig, fcd, fco, fca,
634 toolconf):
634 toolconf):
635 if onfailure:
635 if onfailure:
636 ui.warn(onfailure % fd)
636 ui.warn(onfailure % fd)
637 return True, 1, False
637 return True, 1, False
638
638
639 a = repo.wjoin(fd)
639 a = repo.wjoin(fd)
640 b = temp("base", fca)
640 b = temp("base", fca)
641 c = temp("other", fco)
641 c = temp("other", fco)
642 if not fcd.isabsent():
642 if not fcd.isabsent():
643 back = scmutil.origpath(ui, repo, a)
643 back = scmutil.origpath(ui, repo, a)
644 if premerge:
644 if premerge:
645 util.copyfile(a, back)
645 util.copyfile(a, back)
646 else:
646 else:
647 back = None
647 back = None
648 files = (a, b, c, back)
648 files = (a, b, c, back)
649
649
650 r = 1
650 r = 1
651 try:
651 try:
652 markerstyle = ui.config('ui', 'mergemarkers', 'basic')
652 markerstyle = ui.config('ui', 'mergemarkers', 'basic')
653 if not labels:
653 if not labels:
654 labels = _defaultconflictlabels
654 labels = _defaultconflictlabels
655 if markerstyle != 'basic':
655 if markerstyle != 'basic':
656 labels = _formatlabels(repo, fcd, fco, fca, labels)
656 labels = _formatlabels(repo, fcd, fco, fca, labels)
657
657
658 if premerge and mergetype == fullmerge:
658 if premerge and mergetype == fullmerge:
659 r = _premerge(repo, fcd, fco, fca, toolconf, files, labels=labels)
659 r = _premerge(repo, fcd, fco, fca, toolconf, files, labels=labels)
660 # complete if premerge successful (r is 0)
660 # complete if premerge successful (r is 0)
661 return not r, r, False
661 return not r, r, False
662
662
663 needcheck, r, deleted = func(repo, mynode, orig, fcd, fco, fca,
663 needcheck, r, deleted = func(repo, mynode, orig, fcd, fco, fca,
664 toolconf, files, labels=labels)
664 toolconf, files, labels=labels)
665
665
666 if needcheck:
666 if needcheck:
667 r = _check(r, ui, tool, fcd, files)
667 r = _check(r, ui, tool, fcd, files)
668
668
669 if r:
669 if r:
670 if onfailure:
670 if onfailure:
671 ui.warn(onfailure % fd)
671 ui.warn(onfailure % fd)
672
672
673 return True, r, deleted
673 return True, r, deleted
674 finally:
674 finally:
675 if not r and back is not None:
675 if not r and back is not None:
676 util.unlink(back)
676 util.unlink(back)
677 util.unlink(b)
677 util.unlink(b)
678 util.unlink(c)
678 util.unlink(c)
679
679
680 def _check(r, ui, tool, fcd, files):
680 def _check(r, ui, tool, fcd, files):
681 fd = fcd.path()
681 fd = fcd.path()
682 a, b, c, back = files
682 a, b, c, back = files
683
683
684 if not r and (_toolbool(ui, tool, "checkconflicts") or
684 if not r and (_toolbool(ui, tool, "checkconflicts") or
685 'conflicts' in _toollist(ui, tool, "check")):
685 'conflicts' in _toollist(ui, tool, "check")):
686 if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
686 if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
687 re.MULTILINE):
687 re.MULTILINE):
688 r = 1
688 r = 1
689
689
690 checked = False
690 checked = False
691 if 'prompt' in _toollist(ui, tool, "check"):
691 if 'prompt' in _toollist(ui, tool, "check"):
692 checked = True
692 checked = True
693 if ui.promptchoice(_("was merge of '%s' successful (yn)?"
693 if ui.promptchoice(_("was merge of '%s' successful (yn)?"
694 "$$ &Yes $$ &No") % fd, 1):
694 "$$ &Yes $$ &No") % fd, 1):
695 r = 1
695 r = 1
696
696
697 if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
697 if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
698 'changed' in
698 'changed' in
699 _toollist(ui, tool, "check")):
699 _toollist(ui, tool, "check")):
700 if back is not None and filecmp.cmp(a, back):
700 if back is not None and filecmp.cmp(a, back):
701 if ui.promptchoice(_(" output file %s appears unchanged\n"
701 if ui.promptchoice(_(" output file %s appears unchanged\n"
702 "was merge successful (yn)?"
702 "was merge successful (yn)?"
703 "$$ &Yes $$ &No") % fd, 1):
703 "$$ &Yes $$ &No") % fd, 1):
704 r = 1
704 r = 1
705
705
706 if back is not None and _toolbool(ui, tool, "fixeol"):
706 if back is not None and _toolbool(ui, tool, "fixeol"):
707 _matcheol(a, back)
707 _matcheol(a, back)
708
708
709 return r
709 return r
710
710
711 def premerge(repo, mynode, orig, fcd, fco, fca, labels=None):
711 def premerge(repo, mynode, orig, fcd, fco, fca, labels=None):
712 return _filemerge(True, repo, mynode, orig, fcd, fco, fca, labels=labels)
712 return _filemerge(True, repo, mynode, orig, fcd, fco, fca, labels=labels)
713
713
714 def filemerge(repo, mynode, orig, fcd, fco, fca, labels=None):
714 def filemerge(repo, mynode, orig, fcd, fco, fca, labels=None):
715 return _filemerge(False, repo, mynode, orig, fcd, fco, fca, labels=labels)
715 return _filemerge(False, repo, mynode, orig, fcd, fco, fca, labels=labels)
716
716
717 # tell hggettext to extract docstrings from these functions:
717 # tell hggettext to extract docstrings from these functions:
718 i18nfunctions = internals.values()
718 i18nfunctions = internals.values()
@@ -1,382 +1,383 b''
1 # httppeer.py - HTTP repository proxy classes for mercurial
1 # httppeer.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import socket
13 import socket
14 import struct
14 import struct
15 import tempfile
15 import tempfile
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import nullid
18 from .node import nullid
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 error,
21 error,
22 httpconnection,
22 httpconnection,
23 pycompat,
23 statichttprepo,
24 statichttprepo,
24 url,
25 url,
25 util,
26 util,
26 wireproto,
27 wireproto,
27 )
28 )
28
29
29 httplib = util.httplib
30 httplib = util.httplib
30 urlerr = util.urlerr
31 urlerr = util.urlerr
31 urlreq = util.urlreq
32 urlreq = util.urlreq
32
33
33 # FUTURE: consider refactoring this API to use generators. This will
34 # FUTURE: consider refactoring this API to use generators. This will
34 # require a compression engine API to emit generators.
35 # require a compression engine API to emit generators.
35 def decompressresponse(response, engine):
36 def decompressresponse(response, engine):
36 try:
37 try:
37 reader = engine.decompressorreader(response)
38 reader = engine.decompressorreader(response)
38 except httplib.HTTPException:
39 except httplib.HTTPException:
39 raise IOError(None, _('connection ended unexpectedly'))
40 raise IOError(None, _('connection ended unexpectedly'))
40
41
41 # We need to wrap reader.read() so HTTPException on subsequent
42 # We need to wrap reader.read() so HTTPException on subsequent
42 # reads is also converted.
43 # reads is also converted.
43 # Ideally we'd use super() here. However, if ``reader`` isn't a new-style
44 # Ideally we'd use super() here. However, if ``reader`` isn't a new-style
44 # class, this can raise:
45 # class, this can raise:
45 # TypeError: super() argument 1 must be type, not classobj
46 # TypeError: super() argument 1 must be type, not classobj
46 origread = reader.read
47 origread = reader.read
47 class readerproxy(reader.__class__):
48 class readerproxy(reader.__class__):
48 def read(self, *args, **kwargs):
49 def read(self, *args, **kwargs):
49 try:
50 try:
50 return origread(*args, **kwargs)
51 return origread(*args, **kwargs)
51 except httplib.HTTPException:
52 except httplib.HTTPException:
52 raise IOError(None, _('connection ended unexpectedly'))
53 raise IOError(None, _('connection ended unexpectedly'))
53
54
54 reader.__class__ = readerproxy
55 reader.__class__ = readerproxy
55 return reader
56 return reader
56
57
57 def encodevalueinheaders(value, header, limit):
58 def encodevalueinheaders(value, header, limit):
58 """Encode a string value into multiple HTTP headers.
59 """Encode a string value into multiple HTTP headers.
59
60
60 ``value`` will be encoded into 1 or more HTTP headers with the names
61 ``value`` will be encoded into 1 or more HTTP headers with the names
61 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
62 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
62 name + value will be at most ``limit`` bytes long.
63 name + value will be at most ``limit`` bytes long.
63
64
64 Returns an iterable of 2-tuples consisting of header names and values.
65 Returns an iterable of 2-tuples consisting of header names and values.
65 """
66 """
66 fmt = header + '-%s'
67 fmt = header + '-%s'
67 valuelen = limit - len(fmt % '000') - len(': \r\n')
68 valuelen = limit - len(fmt % '000') - len(': \r\n')
68 result = []
69 result = []
69
70
70 n = 0
71 n = 0
71 for i in xrange(0, len(value), valuelen):
72 for i in xrange(0, len(value), valuelen):
72 n += 1
73 n += 1
73 result.append((fmt % str(n), value[i:i + valuelen]))
74 result.append((fmt % str(n), value[i:i + valuelen]))
74
75
75 return result
76 return result
76
77
77 class httppeer(wireproto.wirepeer):
78 class httppeer(wireproto.wirepeer):
78 def __init__(self, ui, path):
79 def __init__(self, ui, path):
79 self.path = path
80 self.path = path
80 self.caps = None
81 self.caps = None
81 self.handler = None
82 self.handler = None
82 self.urlopener = None
83 self.urlopener = None
83 self.requestbuilder = None
84 self.requestbuilder = None
84 u = util.url(path)
85 u = util.url(path)
85 if u.query or u.fragment:
86 if u.query or u.fragment:
86 raise error.Abort(_('unsupported URL component: "%s"') %
87 raise error.Abort(_('unsupported URL component: "%s"') %
87 (u.query or u.fragment))
88 (u.query or u.fragment))
88
89
89 # urllib cannot handle URLs with embedded user or passwd
90 # urllib cannot handle URLs with embedded user or passwd
90 self._url, authinfo = u.authinfo()
91 self._url, authinfo = u.authinfo()
91
92
92 self.ui = ui
93 self.ui = ui
93 self.ui.debug('using %s\n' % self._url)
94 self.ui.debug('using %s\n' % self._url)
94
95
95 self.urlopener = url.opener(ui, authinfo)
96 self.urlopener = url.opener(ui, authinfo)
96 self.requestbuilder = urlreq.request
97 self.requestbuilder = urlreq.request
97
98
98 def __del__(self):
99 def __del__(self):
99 urlopener = getattr(self, 'urlopener', None)
100 urlopener = getattr(self, 'urlopener', None)
100 if urlopener:
101 if urlopener:
101 for h in urlopener.handlers:
102 for h in urlopener.handlers:
102 h.close()
103 h.close()
103 getattr(h, "close_all", lambda : None)()
104 getattr(h, "close_all", lambda : None)()
104
105
105 def url(self):
106 def url(self):
106 return self.path
107 return self.path
107
108
108 # look up capabilities only when needed
109 # look up capabilities only when needed
109
110
110 def _fetchcaps(self):
111 def _fetchcaps(self):
111 self.caps = set(self._call('capabilities').split())
112 self.caps = set(self._call('capabilities').split())
112
113
113 def _capabilities(self):
114 def _capabilities(self):
114 if self.caps is None:
115 if self.caps is None:
115 try:
116 try:
116 self._fetchcaps()
117 self._fetchcaps()
117 except error.RepoError:
118 except error.RepoError:
118 self.caps = set()
119 self.caps = set()
119 self.ui.debug('capabilities: %s\n' %
120 self.ui.debug('capabilities: %s\n' %
120 (' '.join(self.caps or ['none'])))
121 (' '.join(self.caps or ['none'])))
121 return self.caps
122 return self.caps
122
123
123 def lock(self):
124 def lock(self):
124 raise error.Abort(_('operation not supported over http'))
125 raise error.Abort(_('operation not supported over http'))
125
126
126 def _callstream(self, cmd, _compressible=False, **args):
127 def _callstream(self, cmd, _compressible=False, **args):
127 if cmd == 'pushkey':
128 if cmd == 'pushkey':
128 args['data'] = ''
129 args['data'] = ''
129 data = args.pop('data', None)
130 data = args.pop('data', None)
130 headers = args.pop('headers', {})
131 headers = args.pop('headers', {})
131
132
132 self.ui.debug("sending %s command\n" % cmd)
133 self.ui.debug("sending %s command\n" % cmd)
133 q = [('cmd', cmd)]
134 q = [('cmd', cmd)]
134 headersize = 0
135 headersize = 0
135 varyheaders = []
136 varyheaders = []
136 # Important: don't use self.capable() here or else you end up
137 # Important: don't use self.capable() here or else you end up
137 # with infinite recursion when trying to look up capabilities
138 # with infinite recursion when trying to look up capabilities
138 # for the first time.
139 # for the first time.
139 postargsok = self.caps is not None and 'httppostargs' in self.caps
140 postargsok = self.caps is not None and 'httppostargs' in self.caps
140 # TODO: support for httppostargs when data is a file-like
141 # TODO: support for httppostargs when data is a file-like
141 # object rather than a basestring
142 # object rather than a basestring
142 canmungedata = not data or isinstance(data, basestring)
143 canmungedata = not data or isinstance(data, basestring)
143 if postargsok and canmungedata:
144 if postargsok and canmungedata:
144 strargs = urlreq.urlencode(sorted(args.items()))
145 strargs = urlreq.urlencode(sorted(args.items()))
145 if strargs:
146 if strargs:
146 if not data:
147 if not data:
147 data = strargs
148 data = strargs
148 elif isinstance(data, basestring):
149 elif isinstance(data, basestring):
149 data = strargs + data
150 data = strargs + data
150 headers['X-HgArgs-Post'] = len(strargs)
151 headers['X-HgArgs-Post'] = len(strargs)
151 else:
152 else:
152 if len(args) > 0:
153 if len(args) > 0:
153 httpheader = self.capable('httpheader')
154 httpheader = self.capable('httpheader')
154 if httpheader:
155 if httpheader:
155 headersize = int(httpheader.split(',', 1)[0])
156 headersize = int(httpheader.split(',', 1)[0])
156 if headersize > 0:
157 if headersize > 0:
157 # The headers can typically carry more data than the URL.
158 # The headers can typically carry more data than the URL.
158 encargs = urlreq.urlencode(sorted(args.items()))
159 encargs = urlreq.urlencode(sorted(args.items()))
159 for header, value in encodevalueinheaders(encargs, 'X-HgArg',
160 for header, value in encodevalueinheaders(encargs, 'X-HgArg',
160 headersize):
161 headersize):
161 headers[header] = value
162 headers[header] = value
162 varyheaders.append(header)
163 varyheaders.append(header)
163 else:
164 else:
164 q += sorted(args.items())
165 q += sorted(args.items())
165 qs = '?%s' % urlreq.urlencode(q)
166 qs = '?%s' % urlreq.urlencode(q)
166 cu = "%s%s" % (self._url, qs)
167 cu = "%s%s" % (self._url, qs)
167 size = 0
168 size = 0
168 if util.safehasattr(data, 'length'):
169 if util.safehasattr(data, 'length'):
169 size = data.length
170 size = data.length
170 elif data is not None:
171 elif data is not None:
171 size = len(data)
172 size = len(data)
172 if size and self.ui.configbool('ui', 'usehttp2', False):
173 if size and self.ui.configbool('ui', 'usehttp2', False):
173 headers['Expect'] = '100-Continue'
174 headers['Expect'] = '100-Continue'
174 headers['X-HgHttp2'] = '1'
175 headers['X-HgHttp2'] = '1'
175 if data is not None and 'Content-Type' not in headers:
176 if data is not None and 'Content-Type' not in headers:
176 headers['Content-Type'] = 'application/mercurial-0.1'
177 headers['Content-Type'] = 'application/mercurial-0.1'
177
178
178 # Tell the server we accept application/mercurial-0.2 and multiple
179 # Tell the server we accept application/mercurial-0.2 and multiple
179 # compression formats if the server is capable of emitting those
180 # compression formats if the server is capable of emitting those
180 # payloads.
181 # payloads.
181 protoparams = []
182 protoparams = []
182
183
183 mediatypes = set()
184 mediatypes = set()
184 if self.caps is not None:
185 if self.caps is not None:
185 mt = self.capable('httpmediatype')
186 mt = self.capable('httpmediatype')
186 if mt:
187 if mt:
187 protoparams.append('0.1')
188 protoparams.append('0.1')
188 mediatypes = set(mt.split(','))
189 mediatypes = set(mt.split(','))
189
190
190 if '0.2tx' in mediatypes:
191 if '0.2tx' in mediatypes:
191 protoparams.append('0.2')
192 protoparams.append('0.2')
192
193
193 if '0.2tx' in mediatypes and self.capable('compression'):
194 if '0.2tx' in mediatypes and self.capable('compression'):
194 # We /could/ compare supported compression formats and prune
195 # We /could/ compare supported compression formats and prune
195 # non-mutually supported or error if nothing is mutually supported.
196 # non-mutually supported or error if nothing is mutually supported.
196 # For now, send the full list to the server and have it error.
197 # For now, send the full list to the server and have it error.
197 comps = [e.wireprotosupport().name for e in
198 comps = [e.wireprotosupport().name for e in
198 util.compengines.supportedwireengines(util.CLIENTROLE)]
199 util.compengines.supportedwireengines(util.CLIENTROLE)]
199 protoparams.append('comp=%s' % ','.join(comps))
200 protoparams.append('comp=%s' % ','.join(comps))
200
201
201 if protoparams:
202 if protoparams:
202 protoheaders = encodevalueinheaders(' '.join(protoparams),
203 protoheaders = encodevalueinheaders(' '.join(protoparams),
203 'X-HgProto',
204 'X-HgProto',
204 headersize or 1024)
205 headersize or 1024)
205 for header, value in protoheaders:
206 for header, value in protoheaders:
206 headers[header] = value
207 headers[header] = value
207 varyheaders.append(header)
208 varyheaders.append(header)
208
209
209 headers['Vary'] = ','.join(varyheaders)
210 headers['Vary'] = ','.join(varyheaders)
210 req = self.requestbuilder(cu, data, headers)
211 req = self.requestbuilder(cu, data, headers)
211
212
212 if data is not None:
213 if data is not None:
213 self.ui.debug("sending %s bytes\n" % size)
214 self.ui.debug("sending %s bytes\n" % size)
214 req.add_unredirected_header('Content-Length', '%d' % size)
215 req.add_unredirected_header('Content-Length', '%d' % size)
215 try:
216 try:
216 resp = self.urlopener.open(req)
217 resp = self.urlopener.open(req)
217 except urlerr.httperror as inst:
218 except urlerr.httperror as inst:
218 if inst.code == 401:
219 if inst.code == 401:
219 raise error.Abort(_('authorization failed'))
220 raise error.Abort(_('authorization failed'))
220 raise
221 raise
221 except httplib.HTTPException as inst:
222 except httplib.HTTPException as inst:
222 self.ui.debug('http error while sending %s command\n' % cmd)
223 self.ui.debug('http error while sending %s command\n' % cmd)
223 self.ui.traceback()
224 self.ui.traceback()
224 raise IOError(None, inst)
225 raise IOError(None, inst)
225 # record the url we got redirected to
226 # record the url we got redirected to
226 resp_url = resp.geturl()
227 resp_url = resp.geturl()
227 if resp_url.endswith(qs):
228 if resp_url.endswith(qs):
228 resp_url = resp_url[:-len(qs)]
229 resp_url = resp_url[:-len(qs)]
229 if self._url.rstrip('/') != resp_url.rstrip('/'):
230 if self._url.rstrip('/') != resp_url.rstrip('/'):
230 if not self.ui.quiet:
231 if not self.ui.quiet:
231 self.ui.warn(_('real URL is %s\n') % resp_url)
232 self.ui.warn(_('real URL is %s\n') % resp_url)
232 self._url = resp_url
233 self._url = resp_url
233 try:
234 try:
234 proto = resp.getheader('content-type')
235 proto = resp.getheader('content-type')
235 except AttributeError:
236 except AttributeError:
236 proto = resp.headers.get('content-type', '')
237 proto = resp.headers.get('content-type', '')
237
238
238 safeurl = util.hidepassword(self._url)
239 safeurl = util.hidepassword(self._url)
239 if proto.startswith('application/hg-error'):
240 if proto.startswith('application/hg-error'):
240 raise error.OutOfBandError(resp.read())
241 raise error.OutOfBandError(resp.read())
241 # accept old "text/plain" and "application/hg-changegroup" for now
242 # accept old "text/plain" and "application/hg-changegroup" for now
242 if not (proto.startswith('application/mercurial-') or
243 if not (proto.startswith('application/mercurial-') or
243 (proto.startswith('text/plain')
244 (proto.startswith('text/plain')
244 and not resp.headers.get('content-length')) or
245 and not resp.headers.get('content-length')) or
245 proto.startswith('application/hg-changegroup')):
246 proto.startswith('application/hg-changegroup')):
246 self.ui.debug("requested URL: '%s'\n" % util.hidepassword(cu))
247 self.ui.debug("requested URL: '%s'\n" % util.hidepassword(cu))
247 raise error.RepoError(
248 raise error.RepoError(
248 _("'%s' does not appear to be an hg repository:\n"
249 _("'%s' does not appear to be an hg repository:\n"
249 "---%%<--- (%s)\n%s\n---%%<---\n")
250 "---%%<--- (%s)\n%s\n---%%<---\n")
250 % (safeurl, proto or 'no content-type', resp.read(1024)))
251 % (safeurl, proto or 'no content-type', resp.read(1024)))
251
252
252 if proto.startswith('application/mercurial-'):
253 if proto.startswith('application/mercurial-'):
253 try:
254 try:
254 version = proto.split('-', 1)[1]
255 version = proto.split('-', 1)[1]
255 version_info = tuple([int(n) for n in version.split('.')])
256 version_info = tuple([int(n) for n in version.split('.')])
256 except ValueError:
257 except ValueError:
257 raise error.RepoError(_("'%s' sent a broken Content-Type "
258 raise error.RepoError(_("'%s' sent a broken Content-Type "
258 "header (%s)") % (safeurl, proto))
259 "header (%s)") % (safeurl, proto))
259
260
260 if version_info == (0, 1):
261 if version_info == (0, 1):
261 if _compressible:
262 if _compressible:
262 return decompressresponse(resp, util.compengines['zlib'])
263 return decompressresponse(resp, util.compengines['zlib'])
263 return resp
264 return resp
264 elif version_info == (0, 2):
265 elif version_info == (0, 2):
265 # application/mercurial-0.2 always identifies the compression
266 # application/mercurial-0.2 always identifies the compression
266 # engine in the payload header.
267 # engine in the payload header.
267 elen = struct.unpack('B', resp.read(1))[0]
268 elen = struct.unpack('B', resp.read(1))[0]
268 ename = resp.read(elen)
269 ename = resp.read(elen)
269 engine = util.compengines.forwiretype(ename)
270 engine = util.compengines.forwiretype(ename)
270 return decompressresponse(resp, engine)
271 return decompressresponse(resp, engine)
271 else:
272 else:
272 raise error.RepoError(_("'%s' uses newer protocol %s") %
273 raise error.RepoError(_("'%s' uses newer protocol %s") %
273 (safeurl, version))
274 (safeurl, version))
274
275
275 if _compressible:
276 if _compressible:
276 return decompressresponse(resp, util.compengines['zlib'])
277 return decompressresponse(resp, util.compengines['zlib'])
277
278
278 return resp
279 return resp
279
280
280 def _call(self, cmd, **args):
281 def _call(self, cmd, **args):
281 fp = self._callstream(cmd, **args)
282 fp = self._callstream(cmd, **args)
282 try:
283 try:
283 return fp.read()
284 return fp.read()
284 finally:
285 finally:
285 # if using keepalive, allow connection to be reused
286 # if using keepalive, allow connection to be reused
286 fp.close()
287 fp.close()
287
288
288 def _callpush(self, cmd, cg, **args):
289 def _callpush(self, cmd, cg, **args):
289 # have to stream bundle to a temp file because we do not have
290 # have to stream bundle to a temp file because we do not have
290 # http 1.1 chunked transfer.
291 # http 1.1 chunked transfer.
291
292
292 types = self.capable('unbundle')
293 types = self.capable('unbundle')
293 try:
294 try:
294 types = types.split(',')
295 types = types.split(',')
295 except AttributeError:
296 except AttributeError:
296 # servers older than d1b16a746db6 will send 'unbundle' as a
297 # servers older than d1b16a746db6 will send 'unbundle' as a
297 # boolean capability. They only support headerless/uncompressed
298 # boolean capability. They only support headerless/uncompressed
298 # bundles.
299 # bundles.
299 types = [""]
300 types = [""]
300 for x in types:
301 for x in types:
301 if x in bundle2.bundletypes:
302 if x in bundle2.bundletypes:
302 type = x
303 type = x
303 break
304 break
304
305
305 tempname = bundle2.writebundle(self.ui, cg, None, type)
306 tempname = bundle2.writebundle(self.ui, cg, None, type)
306 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
307 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
307 headers = {'Content-Type': 'application/mercurial-0.1'}
308 headers = {'Content-Type': 'application/mercurial-0.1'}
308
309
309 try:
310 try:
310 r = self._call(cmd, data=fp, headers=headers, **args)
311 r = self._call(cmd, data=fp, headers=headers, **args)
311 vals = r.split('\n', 1)
312 vals = r.split('\n', 1)
312 if len(vals) < 2:
313 if len(vals) < 2:
313 raise error.ResponseError(_("unexpected response:"), r)
314 raise error.ResponseError(_("unexpected response:"), r)
314 return vals
315 return vals
315 except socket.error as err:
316 except socket.error as err:
316 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
317 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
317 raise error.Abort(_('push failed: %s') % err.args[1])
318 raise error.Abort(_('push failed: %s') % err.args[1])
318 raise error.Abort(err.args[1])
319 raise error.Abort(err.args[1])
319 finally:
320 finally:
320 fp.close()
321 fp.close()
321 os.unlink(tempname)
322 os.unlink(tempname)
322
323
323 def _calltwowaystream(self, cmd, fp, **args):
324 def _calltwowaystream(self, cmd, fp, **args):
324 fh = None
325 fh = None
325 fp_ = None
326 fp_ = None
326 filename = None
327 filename = None
327 try:
328 try:
328 # dump bundle to disk
329 # dump bundle to disk
329 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
330 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
330 fh = os.fdopen(fd, "wb")
331 fh = os.fdopen(fd, pycompat.sysstr("wb"))
331 d = fp.read(4096)
332 d = fp.read(4096)
332 while d:
333 while d:
333 fh.write(d)
334 fh.write(d)
334 d = fp.read(4096)
335 d = fp.read(4096)
335 fh.close()
336 fh.close()
336 # start http push
337 # start http push
337 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
338 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
338 headers = {'Content-Type': 'application/mercurial-0.1'}
339 headers = {'Content-Type': 'application/mercurial-0.1'}
339 return self._callstream(cmd, data=fp_, headers=headers, **args)
340 return self._callstream(cmd, data=fp_, headers=headers, **args)
340 finally:
341 finally:
341 if fp_ is not None:
342 if fp_ is not None:
342 fp_.close()
343 fp_.close()
343 if fh is not None:
344 if fh is not None:
344 fh.close()
345 fh.close()
345 os.unlink(filename)
346 os.unlink(filename)
346
347
347 def _callcompressable(self, cmd, **args):
348 def _callcompressable(self, cmd, **args):
348 return self._callstream(cmd, _compressible=True, **args)
349 return self._callstream(cmd, _compressible=True, **args)
349
350
350 def _abort(self, exception):
351 def _abort(self, exception):
351 raise exception
352 raise exception
352
353
353 class httpspeer(httppeer):
354 class httpspeer(httppeer):
354 def __init__(self, ui, path):
355 def __init__(self, ui, path):
355 if not url.has_https:
356 if not url.has_https:
356 raise error.Abort(_('Python support for SSL and HTTPS '
357 raise error.Abort(_('Python support for SSL and HTTPS '
357 'is not installed'))
358 'is not installed'))
358 httppeer.__init__(self, ui, path)
359 httppeer.__init__(self, ui, path)
359
360
360 def instance(ui, path, create):
361 def instance(ui, path, create):
361 if create:
362 if create:
362 raise error.Abort(_('cannot create new http repository'))
363 raise error.Abort(_('cannot create new http repository'))
363 try:
364 try:
364 if path.startswith('https:'):
365 if path.startswith('https:'):
365 inst = httpspeer(ui, path)
366 inst = httpspeer(ui, path)
366 else:
367 else:
367 inst = httppeer(ui, path)
368 inst = httppeer(ui, path)
368 try:
369 try:
369 # Try to do useful work when checking compatibility.
370 # Try to do useful work when checking compatibility.
370 # Usually saves a roundtrip since we want the caps anyway.
371 # Usually saves a roundtrip since we want the caps anyway.
371 inst._fetchcaps()
372 inst._fetchcaps()
372 except error.RepoError:
373 except error.RepoError:
373 # No luck, try older compatibility check.
374 # No luck, try older compatibility check.
374 inst.between([(nullid, nullid)])
375 inst.between([(nullid, nullid)])
375 return inst
376 return inst
376 except error.RepoError as httpexception:
377 except error.RepoError as httpexception:
377 try:
378 try:
378 r = statichttprepo.instance(ui, "static-" + path, create)
379 r = statichttprepo.instance(ui, "static-" + path, create)
379 ui.note(_('(falling back to static-http)\n'))
380 ui.note(_('(falling back to static-http)\n'))
380 return r
381 return r
381 except error.RepoError:
382 except error.RepoError:
382 raise httpexception # use the original http RepoError instead
383 raise httpexception # use the original http RepoError instead
@@ -1,2653 +1,2654 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import email
13 import email
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 base85,
29 base85,
30 copies,
30 copies,
31 diffhelpers,
31 diffhelpers,
32 encoding,
32 encoding,
33 error,
33 error,
34 mail,
34 mail,
35 mdiff,
35 mdiff,
36 pathutil,
36 pathutil,
37 pycompat,
37 scmutil,
38 scmutil,
38 similar,
39 similar,
39 util,
40 util,
40 )
41 )
41 stringio = util.stringio
42 stringio = util.stringio
42
43
43 gitre = re.compile('diff --git a/(.*) b/(.*)')
44 gitre = re.compile('diff --git a/(.*) b/(.*)')
44 tabsplitter = re.compile(r'(\t+|[^\t]+)')
45 tabsplitter = re.compile(r'(\t+|[^\t]+)')
45
46
46 class PatchError(Exception):
47 class PatchError(Exception):
47 pass
48 pass
48
49
49
50
50 # public functions
51 # public functions
51
52
52 def split(stream):
53 def split(stream):
53 '''return an iterator of individual patches from a stream'''
54 '''return an iterator of individual patches from a stream'''
54 def isheader(line, inheader):
55 def isheader(line, inheader):
55 if inheader and line[0] in (' ', '\t'):
56 if inheader and line[0] in (' ', '\t'):
56 # continuation
57 # continuation
57 return True
58 return True
58 if line[0] in (' ', '-', '+'):
59 if line[0] in (' ', '-', '+'):
59 # diff line - don't check for header pattern in there
60 # diff line - don't check for header pattern in there
60 return False
61 return False
61 l = line.split(': ', 1)
62 l = line.split(': ', 1)
62 return len(l) == 2 and ' ' not in l[0]
63 return len(l) == 2 and ' ' not in l[0]
63
64
64 def chunk(lines):
65 def chunk(lines):
65 return stringio(''.join(lines))
66 return stringio(''.join(lines))
66
67
67 def hgsplit(stream, cur):
68 def hgsplit(stream, cur):
68 inheader = True
69 inheader = True
69
70
70 for line in stream:
71 for line in stream:
71 if not line.strip():
72 if not line.strip():
72 inheader = False
73 inheader = False
73 if not inheader and line.startswith('# HG changeset patch'):
74 if not inheader and line.startswith('# HG changeset patch'):
74 yield chunk(cur)
75 yield chunk(cur)
75 cur = []
76 cur = []
76 inheader = True
77 inheader = True
77
78
78 cur.append(line)
79 cur.append(line)
79
80
80 if cur:
81 if cur:
81 yield chunk(cur)
82 yield chunk(cur)
82
83
83 def mboxsplit(stream, cur):
84 def mboxsplit(stream, cur):
84 for line in stream:
85 for line in stream:
85 if line.startswith('From '):
86 if line.startswith('From '):
86 for c in split(chunk(cur[1:])):
87 for c in split(chunk(cur[1:])):
87 yield c
88 yield c
88 cur = []
89 cur = []
89
90
90 cur.append(line)
91 cur.append(line)
91
92
92 if cur:
93 if cur:
93 for c in split(chunk(cur[1:])):
94 for c in split(chunk(cur[1:])):
94 yield c
95 yield c
95
96
96 def mimesplit(stream, cur):
97 def mimesplit(stream, cur):
97 def msgfp(m):
98 def msgfp(m):
98 fp = stringio()
99 fp = stringio()
99 g = email.Generator.Generator(fp, mangle_from_=False)
100 g = email.Generator.Generator(fp, mangle_from_=False)
100 g.flatten(m)
101 g.flatten(m)
101 fp.seek(0)
102 fp.seek(0)
102 return fp
103 return fp
103
104
104 for line in stream:
105 for line in stream:
105 cur.append(line)
106 cur.append(line)
106 c = chunk(cur)
107 c = chunk(cur)
107
108
108 m = email.Parser.Parser().parse(c)
109 m = email.Parser.Parser().parse(c)
109 if not m.is_multipart():
110 if not m.is_multipart():
110 yield msgfp(m)
111 yield msgfp(m)
111 else:
112 else:
112 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
113 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
113 for part in m.walk():
114 for part in m.walk():
114 ct = part.get_content_type()
115 ct = part.get_content_type()
115 if ct not in ok_types:
116 if ct not in ok_types:
116 continue
117 continue
117 yield msgfp(part)
118 yield msgfp(part)
118
119
119 def headersplit(stream, cur):
120 def headersplit(stream, cur):
120 inheader = False
121 inheader = False
121
122
122 for line in stream:
123 for line in stream:
123 if not inheader and isheader(line, inheader):
124 if not inheader and isheader(line, inheader):
124 yield chunk(cur)
125 yield chunk(cur)
125 cur = []
126 cur = []
126 inheader = True
127 inheader = True
127 if inheader and not isheader(line, inheader):
128 if inheader and not isheader(line, inheader):
128 inheader = False
129 inheader = False
129
130
130 cur.append(line)
131 cur.append(line)
131
132
132 if cur:
133 if cur:
133 yield chunk(cur)
134 yield chunk(cur)
134
135
135 def remainder(cur):
136 def remainder(cur):
136 yield chunk(cur)
137 yield chunk(cur)
137
138
138 class fiter(object):
139 class fiter(object):
139 def __init__(self, fp):
140 def __init__(self, fp):
140 self.fp = fp
141 self.fp = fp
141
142
142 def __iter__(self):
143 def __iter__(self):
143 return self
144 return self
144
145
145 def next(self):
146 def next(self):
146 l = self.fp.readline()
147 l = self.fp.readline()
147 if not l:
148 if not l:
148 raise StopIteration
149 raise StopIteration
149 return l
150 return l
150
151
151 inheader = False
152 inheader = False
152 cur = []
153 cur = []
153
154
154 mimeheaders = ['content-type']
155 mimeheaders = ['content-type']
155
156
156 if not util.safehasattr(stream, 'next'):
157 if not util.safehasattr(stream, 'next'):
157 # http responses, for example, have readline but not next
158 # http responses, for example, have readline but not next
158 stream = fiter(stream)
159 stream = fiter(stream)
159
160
160 for line in stream:
161 for line in stream:
161 cur.append(line)
162 cur.append(line)
162 if line.startswith('# HG changeset patch'):
163 if line.startswith('# HG changeset patch'):
163 return hgsplit(stream, cur)
164 return hgsplit(stream, cur)
164 elif line.startswith('From '):
165 elif line.startswith('From '):
165 return mboxsplit(stream, cur)
166 return mboxsplit(stream, cur)
166 elif isheader(line, inheader):
167 elif isheader(line, inheader):
167 inheader = True
168 inheader = True
168 if line.split(':', 1)[0].lower() in mimeheaders:
169 if line.split(':', 1)[0].lower() in mimeheaders:
169 # let email parser handle this
170 # let email parser handle this
170 return mimesplit(stream, cur)
171 return mimesplit(stream, cur)
171 elif line.startswith('--- ') and inheader:
172 elif line.startswith('--- ') and inheader:
172 # No evil headers seen by diff start, split by hand
173 # No evil headers seen by diff start, split by hand
173 return headersplit(stream, cur)
174 return headersplit(stream, cur)
174 # Not enough info, keep reading
175 # Not enough info, keep reading
175
176
176 # if we are here, we have a very plain patch
177 # if we are here, we have a very plain patch
177 return remainder(cur)
178 return remainder(cur)
178
179
179 ## Some facility for extensible patch parsing:
180 ## Some facility for extensible patch parsing:
180 # list of pairs ("header to match", "data key")
181 # list of pairs ("header to match", "data key")
181 patchheadermap = [('Date', 'date'),
182 patchheadermap = [('Date', 'date'),
182 ('Branch', 'branch'),
183 ('Branch', 'branch'),
183 ('Node ID', 'nodeid'),
184 ('Node ID', 'nodeid'),
184 ]
185 ]
185
186
186 def extract(ui, fileobj):
187 def extract(ui, fileobj):
187 '''extract patch from data read from fileobj.
188 '''extract patch from data read from fileobj.
188
189
189 patch can be a normal patch or contained in an email message.
190 patch can be a normal patch or contained in an email message.
190
191
191 return a dictionary. Standard keys are:
192 return a dictionary. Standard keys are:
192 - filename,
193 - filename,
193 - message,
194 - message,
194 - user,
195 - user,
195 - date,
196 - date,
196 - branch,
197 - branch,
197 - node,
198 - node,
198 - p1,
199 - p1,
199 - p2.
200 - p2.
200 Any item can be missing from the dictionary. If filename is missing,
201 Any item can be missing from the dictionary. If filename is missing,
201 fileobj did not contain a patch. Caller must unlink filename when done.'''
202 fileobj did not contain a patch. Caller must unlink filename when done.'''
202
203
203 # attempt to detect the start of a patch
204 # attempt to detect the start of a patch
204 # (this heuristic is borrowed from quilt)
205 # (this heuristic is borrowed from quilt)
205 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
206 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
206 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
207 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
207 r'---[ \t].*?^\+\+\+[ \t]|'
208 r'---[ \t].*?^\+\+\+[ \t]|'
208 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
209 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
209
210
210 data = {}
211 data = {}
211 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
212 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
212 tmpfp = os.fdopen(fd, 'w')
213 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
213 try:
214 try:
214 msg = email.Parser.Parser().parse(fileobj)
215 msg = email.Parser.Parser().parse(fileobj)
215
216
216 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
217 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
217 data['user'] = msg['From'] and mail.headdecode(msg['From'])
218 data['user'] = msg['From'] and mail.headdecode(msg['From'])
218 if not subject and not data['user']:
219 if not subject and not data['user']:
219 # Not an email, restore parsed headers if any
220 # Not an email, restore parsed headers if any
220 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
221 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
221
222
222 # should try to parse msg['Date']
223 # should try to parse msg['Date']
223 parents = []
224 parents = []
224
225
225 if subject:
226 if subject:
226 if subject.startswith('[PATCH'):
227 if subject.startswith('[PATCH'):
227 pend = subject.find(']')
228 pend = subject.find(']')
228 if pend >= 0:
229 if pend >= 0:
229 subject = subject[pend + 1:].lstrip()
230 subject = subject[pend + 1:].lstrip()
230 subject = re.sub(r'\n[ \t]+', ' ', subject)
231 subject = re.sub(r'\n[ \t]+', ' ', subject)
231 ui.debug('Subject: %s\n' % subject)
232 ui.debug('Subject: %s\n' % subject)
232 if data['user']:
233 if data['user']:
233 ui.debug('From: %s\n' % data['user'])
234 ui.debug('From: %s\n' % data['user'])
234 diffs_seen = 0
235 diffs_seen = 0
235 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
236 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
236 message = ''
237 message = ''
237 for part in msg.walk():
238 for part in msg.walk():
238 content_type = part.get_content_type()
239 content_type = part.get_content_type()
239 ui.debug('Content-Type: %s\n' % content_type)
240 ui.debug('Content-Type: %s\n' % content_type)
240 if content_type not in ok_types:
241 if content_type not in ok_types:
241 continue
242 continue
242 payload = part.get_payload(decode=True)
243 payload = part.get_payload(decode=True)
243 m = diffre.search(payload)
244 m = diffre.search(payload)
244 if m:
245 if m:
245 hgpatch = False
246 hgpatch = False
246 hgpatchheader = False
247 hgpatchheader = False
247 ignoretext = False
248 ignoretext = False
248
249
249 ui.debug('found patch at byte %d\n' % m.start(0))
250 ui.debug('found patch at byte %d\n' % m.start(0))
250 diffs_seen += 1
251 diffs_seen += 1
251 cfp = stringio()
252 cfp = stringio()
252 for line in payload[:m.start(0)].splitlines():
253 for line in payload[:m.start(0)].splitlines():
253 if line.startswith('# HG changeset patch') and not hgpatch:
254 if line.startswith('# HG changeset patch') and not hgpatch:
254 ui.debug('patch generated by hg export\n')
255 ui.debug('patch generated by hg export\n')
255 hgpatch = True
256 hgpatch = True
256 hgpatchheader = True
257 hgpatchheader = True
257 # drop earlier commit message content
258 # drop earlier commit message content
258 cfp.seek(0)
259 cfp.seek(0)
259 cfp.truncate()
260 cfp.truncate()
260 subject = None
261 subject = None
261 elif hgpatchheader:
262 elif hgpatchheader:
262 if line.startswith('# User '):
263 if line.startswith('# User '):
263 data['user'] = line[7:]
264 data['user'] = line[7:]
264 ui.debug('From: %s\n' % data['user'])
265 ui.debug('From: %s\n' % data['user'])
265 elif line.startswith("# Parent "):
266 elif line.startswith("# Parent "):
266 parents.append(line[9:].lstrip())
267 parents.append(line[9:].lstrip())
267 elif line.startswith("# "):
268 elif line.startswith("# "):
268 for header, key in patchheadermap:
269 for header, key in patchheadermap:
269 prefix = '# %s ' % header
270 prefix = '# %s ' % header
270 if line.startswith(prefix):
271 if line.startswith(prefix):
271 data[key] = line[len(prefix):]
272 data[key] = line[len(prefix):]
272 else:
273 else:
273 hgpatchheader = False
274 hgpatchheader = False
274 elif line == '---':
275 elif line == '---':
275 ignoretext = True
276 ignoretext = True
276 if not hgpatchheader and not ignoretext:
277 if not hgpatchheader and not ignoretext:
277 cfp.write(line)
278 cfp.write(line)
278 cfp.write('\n')
279 cfp.write('\n')
279 message = cfp.getvalue()
280 message = cfp.getvalue()
280 if tmpfp:
281 if tmpfp:
281 tmpfp.write(payload)
282 tmpfp.write(payload)
282 if not payload.endswith('\n'):
283 if not payload.endswith('\n'):
283 tmpfp.write('\n')
284 tmpfp.write('\n')
284 elif not diffs_seen and message and content_type == 'text/plain':
285 elif not diffs_seen and message and content_type == 'text/plain':
285 message += '\n' + payload
286 message += '\n' + payload
286 except: # re-raises
287 except: # re-raises
287 tmpfp.close()
288 tmpfp.close()
288 os.unlink(tmpname)
289 os.unlink(tmpname)
289 raise
290 raise
290
291
291 if subject and not message.startswith(subject):
292 if subject and not message.startswith(subject):
292 message = '%s\n%s' % (subject, message)
293 message = '%s\n%s' % (subject, message)
293 data['message'] = message
294 data['message'] = message
294 tmpfp.close()
295 tmpfp.close()
295 if parents:
296 if parents:
296 data['p1'] = parents.pop(0)
297 data['p1'] = parents.pop(0)
297 if parents:
298 if parents:
298 data['p2'] = parents.pop(0)
299 data['p2'] = parents.pop(0)
299
300
300 if diffs_seen:
301 if diffs_seen:
301 data['filename'] = tmpname
302 data['filename'] = tmpname
302 else:
303 else:
303 os.unlink(tmpname)
304 os.unlink(tmpname)
304 return data
305 return data
305
306
306 class patchmeta(object):
307 class patchmeta(object):
307 """Patched file metadata
308 """Patched file metadata
308
309
309 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
310 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
310 or COPY. 'path' is patched file path. 'oldpath' is set to the
311 or COPY. 'path' is patched file path. 'oldpath' is set to the
311 origin file when 'op' is either COPY or RENAME, None otherwise. If
312 origin file when 'op' is either COPY or RENAME, None otherwise. If
312 file mode is changed, 'mode' is a tuple (islink, isexec) where
313 file mode is changed, 'mode' is a tuple (islink, isexec) where
313 'islink' is True if the file is a symlink and 'isexec' is True if
314 'islink' is True if the file is a symlink and 'isexec' is True if
314 the file is executable. Otherwise, 'mode' is None.
315 the file is executable. Otherwise, 'mode' is None.
315 """
316 """
316 def __init__(self, path):
317 def __init__(self, path):
317 self.path = path
318 self.path = path
318 self.oldpath = None
319 self.oldpath = None
319 self.mode = None
320 self.mode = None
320 self.op = 'MODIFY'
321 self.op = 'MODIFY'
321 self.binary = False
322 self.binary = False
322
323
323 def setmode(self, mode):
324 def setmode(self, mode):
324 islink = mode & 0o20000
325 islink = mode & 0o20000
325 isexec = mode & 0o100
326 isexec = mode & 0o100
326 self.mode = (islink, isexec)
327 self.mode = (islink, isexec)
327
328
328 def copy(self):
329 def copy(self):
329 other = patchmeta(self.path)
330 other = patchmeta(self.path)
330 other.oldpath = self.oldpath
331 other.oldpath = self.oldpath
331 other.mode = self.mode
332 other.mode = self.mode
332 other.op = self.op
333 other.op = self.op
333 other.binary = self.binary
334 other.binary = self.binary
334 return other
335 return other
335
336
336 def _ispatchinga(self, afile):
337 def _ispatchinga(self, afile):
337 if afile == '/dev/null':
338 if afile == '/dev/null':
338 return self.op == 'ADD'
339 return self.op == 'ADD'
339 return afile == 'a/' + (self.oldpath or self.path)
340 return afile == 'a/' + (self.oldpath or self.path)
340
341
341 def _ispatchingb(self, bfile):
342 def _ispatchingb(self, bfile):
342 if bfile == '/dev/null':
343 if bfile == '/dev/null':
343 return self.op == 'DELETE'
344 return self.op == 'DELETE'
344 return bfile == 'b/' + self.path
345 return bfile == 'b/' + self.path
345
346
346 def ispatching(self, afile, bfile):
347 def ispatching(self, afile, bfile):
347 return self._ispatchinga(afile) and self._ispatchingb(bfile)
348 return self._ispatchinga(afile) and self._ispatchingb(bfile)
348
349
349 def __repr__(self):
350 def __repr__(self):
350 return "<patchmeta %s %r>" % (self.op, self.path)
351 return "<patchmeta %s %r>" % (self.op, self.path)
351
352
352 def readgitpatch(lr):
353 def readgitpatch(lr):
353 """extract git-style metadata about patches from <patchname>"""
354 """extract git-style metadata about patches from <patchname>"""
354
355
355 # Filter patch for git information
356 # Filter patch for git information
356 gp = None
357 gp = None
357 gitpatches = []
358 gitpatches = []
358 for line in lr:
359 for line in lr:
359 line = line.rstrip(' \r\n')
360 line = line.rstrip(' \r\n')
360 if line.startswith('diff --git a/'):
361 if line.startswith('diff --git a/'):
361 m = gitre.match(line)
362 m = gitre.match(line)
362 if m:
363 if m:
363 if gp:
364 if gp:
364 gitpatches.append(gp)
365 gitpatches.append(gp)
365 dst = m.group(2)
366 dst = m.group(2)
366 gp = patchmeta(dst)
367 gp = patchmeta(dst)
367 elif gp:
368 elif gp:
368 if line.startswith('--- '):
369 if line.startswith('--- '):
369 gitpatches.append(gp)
370 gitpatches.append(gp)
370 gp = None
371 gp = None
371 continue
372 continue
372 if line.startswith('rename from '):
373 if line.startswith('rename from '):
373 gp.op = 'RENAME'
374 gp.op = 'RENAME'
374 gp.oldpath = line[12:]
375 gp.oldpath = line[12:]
375 elif line.startswith('rename to '):
376 elif line.startswith('rename to '):
376 gp.path = line[10:]
377 gp.path = line[10:]
377 elif line.startswith('copy from '):
378 elif line.startswith('copy from '):
378 gp.op = 'COPY'
379 gp.op = 'COPY'
379 gp.oldpath = line[10:]
380 gp.oldpath = line[10:]
380 elif line.startswith('copy to '):
381 elif line.startswith('copy to '):
381 gp.path = line[8:]
382 gp.path = line[8:]
382 elif line.startswith('deleted file'):
383 elif line.startswith('deleted file'):
383 gp.op = 'DELETE'
384 gp.op = 'DELETE'
384 elif line.startswith('new file mode '):
385 elif line.startswith('new file mode '):
385 gp.op = 'ADD'
386 gp.op = 'ADD'
386 gp.setmode(int(line[-6:], 8))
387 gp.setmode(int(line[-6:], 8))
387 elif line.startswith('new mode '):
388 elif line.startswith('new mode '):
388 gp.setmode(int(line[-6:], 8))
389 gp.setmode(int(line[-6:], 8))
389 elif line.startswith('GIT binary patch'):
390 elif line.startswith('GIT binary patch'):
390 gp.binary = True
391 gp.binary = True
391 if gp:
392 if gp:
392 gitpatches.append(gp)
393 gitpatches.append(gp)
393
394
394 return gitpatches
395 return gitpatches
395
396
396 class linereader(object):
397 class linereader(object):
397 # simple class to allow pushing lines back into the input stream
398 # simple class to allow pushing lines back into the input stream
398 def __init__(self, fp):
399 def __init__(self, fp):
399 self.fp = fp
400 self.fp = fp
400 self.buf = []
401 self.buf = []
401
402
402 def push(self, line):
403 def push(self, line):
403 if line is not None:
404 if line is not None:
404 self.buf.append(line)
405 self.buf.append(line)
405
406
406 def readline(self):
407 def readline(self):
407 if self.buf:
408 if self.buf:
408 l = self.buf[0]
409 l = self.buf[0]
409 del self.buf[0]
410 del self.buf[0]
410 return l
411 return l
411 return self.fp.readline()
412 return self.fp.readline()
412
413
413 def __iter__(self):
414 def __iter__(self):
414 return iter(self.readline, '')
415 return iter(self.readline, '')
415
416
416 class abstractbackend(object):
417 class abstractbackend(object):
417 def __init__(self, ui):
418 def __init__(self, ui):
418 self.ui = ui
419 self.ui = ui
419
420
420 def getfile(self, fname):
421 def getfile(self, fname):
421 """Return target file data and flags as a (data, (islink,
422 """Return target file data and flags as a (data, (islink,
422 isexec)) tuple. Data is None if file is missing/deleted.
423 isexec)) tuple. Data is None if file is missing/deleted.
423 """
424 """
424 raise NotImplementedError
425 raise NotImplementedError
425
426
426 def setfile(self, fname, data, mode, copysource):
427 def setfile(self, fname, data, mode, copysource):
427 """Write data to target file fname and set its mode. mode is a
428 """Write data to target file fname and set its mode. mode is a
428 (islink, isexec) tuple. If data is None, the file content should
429 (islink, isexec) tuple. If data is None, the file content should
429 be left unchanged. If the file is modified after being copied,
430 be left unchanged. If the file is modified after being copied,
430 copysource is set to the original file name.
431 copysource is set to the original file name.
431 """
432 """
432 raise NotImplementedError
433 raise NotImplementedError
433
434
434 def unlink(self, fname):
435 def unlink(self, fname):
435 """Unlink target file."""
436 """Unlink target file."""
436 raise NotImplementedError
437 raise NotImplementedError
437
438
438 def writerej(self, fname, failed, total, lines):
439 def writerej(self, fname, failed, total, lines):
439 """Write rejected lines for fname. total is the number of hunks
440 """Write rejected lines for fname. total is the number of hunks
440 which failed to apply and total the total number of hunks for this
441 which failed to apply and total the total number of hunks for this
441 files.
442 files.
442 """
443 """
443 pass
444 pass
444
445
445 def exists(self, fname):
446 def exists(self, fname):
446 raise NotImplementedError
447 raise NotImplementedError
447
448
448 class fsbackend(abstractbackend):
449 class fsbackend(abstractbackend):
449 def __init__(self, ui, basedir):
450 def __init__(self, ui, basedir):
450 super(fsbackend, self).__init__(ui)
451 super(fsbackend, self).__init__(ui)
451 self.opener = scmutil.opener(basedir)
452 self.opener = scmutil.opener(basedir)
452
453
453 def _join(self, f):
454 def _join(self, f):
454 return os.path.join(self.opener.base, f)
455 return os.path.join(self.opener.base, f)
455
456
456 def getfile(self, fname):
457 def getfile(self, fname):
457 if self.opener.islink(fname):
458 if self.opener.islink(fname):
458 return (self.opener.readlink(fname), (True, False))
459 return (self.opener.readlink(fname), (True, False))
459
460
460 isexec = False
461 isexec = False
461 try:
462 try:
462 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
463 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
463 except OSError as e:
464 except OSError as e:
464 if e.errno != errno.ENOENT:
465 if e.errno != errno.ENOENT:
465 raise
466 raise
466 try:
467 try:
467 return (self.opener.read(fname), (False, isexec))
468 return (self.opener.read(fname), (False, isexec))
468 except IOError as e:
469 except IOError as e:
469 if e.errno != errno.ENOENT:
470 if e.errno != errno.ENOENT:
470 raise
471 raise
471 return None, None
472 return None, None
472
473
473 def setfile(self, fname, data, mode, copysource):
474 def setfile(self, fname, data, mode, copysource):
474 islink, isexec = mode
475 islink, isexec = mode
475 if data is None:
476 if data is None:
476 self.opener.setflags(fname, islink, isexec)
477 self.opener.setflags(fname, islink, isexec)
477 return
478 return
478 if islink:
479 if islink:
479 self.opener.symlink(data, fname)
480 self.opener.symlink(data, fname)
480 else:
481 else:
481 self.opener.write(fname, data)
482 self.opener.write(fname, data)
482 if isexec:
483 if isexec:
483 self.opener.setflags(fname, False, True)
484 self.opener.setflags(fname, False, True)
484
485
485 def unlink(self, fname):
486 def unlink(self, fname):
486 self.opener.unlinkpath(fname, ignoremissing=True)
487 self.opener.unlinkpath(fname, ignoremissing=True)
487
488
488 def writerej(self, fname, failed, total, lines):
489 def writerej(self, fname, failed, total, lines):
489 fname = fname + ".rej"
490 fname = fname + ".rej"
490 self.ui.warn(
491 self.ui.warn(
491 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
492 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
492 (failed, total, fname))
493 (failed, total, fname))
493 fp = self.opener(fname, 'w')
494 fp = self.opener(fname, 'w')
494 fp.writelines(lines)
495 fp.writelines(lines)
495 fp.close()
496 fp.close()
496
497
497 def exists(self, fname):
498 def exists(self, fname):
498 return self.opener.lexists(fname)
499 return self.opener.lexists(fname)
499
500
500 class workingbackend(fsbackend):
501 class workingbackend(fsbackend):
501 def __init__(self, ui, repo, similarity):
502 def __init__(self, ui, repo, similarity):
502 super(workingbackend, self).__init__(ui, repo.root)
503 super(workingbackend, self).__init__(ui, repo.root)
503 self.repo = repo
504 self.repo = repo
504 self.similarity = similarity
505 self.similarity = similarity
505 self.removed = set()
506 self.removed = set()
506 self.changed = set()
507 self.changed = set()
507 self.copied = []
508 self.copied = []
508
509
509 def _checkknown(self, fname):
510 def _checkknown(self, fname):
510 if self.repo.dirstate[fname] == '?' and self.exists(fname):
511 if self.repo.dirstate[fname] == '?' and self.exists(fname):
511 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
512 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
512
513
513 def setfile(self, fname, data, mode, copysource):
514 def setfile(self, fname, data, mode, copysource):
514 self._checkknown(fname)
515 self._checkknown(fname)
515 super(workingbackend, self).setfile(fname, data, mode, copysource)
516 super(workingbackend, self).setfile(fname, data, mode, copysource)
516 if copysource is not None:
517 if copysource is not None:
517 self.copied.append((copysource, fname))
518 self.copied.append((copysource, fname))
518 self.changed.add(fname)
519 self.changed.add(fname)
519
520
520 def unlink(self, fname):
521 def unlink(self, fname):
521 self._checkknown(fname)
522 self._checkknown(fname)
522 super(workingbackend, self).unlink(fname)
523 super(workingbackend, self).unlink(fname)
523 self.removed.add(fname)
524 self.removed.add(fname)
524 self.changed.add(fname)
525 self.changed.add(fname)
525
526
526 def close(self):
527 def close(self):
527 wctx = self.repo[None]
528 wctx = self.repo[None]
528 changed = set(self.changed)
529 changed = set(self.changed)
529 for src, dst in self.copied:
530 for src, dst in self.copied:
530 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
531 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
531 if self.removed:
532 if self.removed:
532 wctx.forget(sorted(self.removed))
533 wctx.forget(sorted(self.removed))
533 for f in self.removed:
534 for f in self.removed:
534 if f not in self.repo.dirstate:
535 if f not in self.repo.dirstate:
535 # File was deleted and no longer belongs to the
536 # File was deleted and no longer belongs to the
536 # dirstate, it was probably marked added then
537 # dirstate, it was probably marked added then
537 # deleted, and should not be considered by
538 # deleted, and should not be considered by
538 # marktouched().
539 # marktouched().
539 changed.discard(f)
540 changed.discard(f)
540 if changed:
541 if changed:
541 scmutil.marktouched(self.repo, changed, self.similarity)
542 scmutil.marktouched(self.repo, changed, self.similarity)
542 return sorted(self.changed)
543 return sorted(self.changed)
543
544
544 class filestore(object):
545 class filestore(object):
545 def __init__(self, maxsize=None):
546 def __init__(self, maxsize=None):
546 self.opener = None
547 self.opener = None
547 self.files = {}
548 self.files = {}
548 self.created = 0
549 self.created = 0
549 self.maxsize = maxsize
550 self.maxsize = maxsize
550 if self.maxsize is None:
551 if self.maxsize is None:
551 self.maxsize = 4*(2**20)
552 self.maxsize = 4*(2**20)
552 self.size = 0
553 self.size = 0
553 self.data = {}
554 self.data = {}
554
555
555 def setfile(self, fname, data, mode, copied=None):
556 def setfile(self, fname, data, mode, copied=None):
556 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
557 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
557 self.data[fname] = (data, mode, copied)
558 self.data[fname] = (data, mode, copied)
558 self.size += len(data)
559 self.size += len(data)
559 else:
560 else:
560 if self.opener is None:
561 if self.opener is None:
561 root = tempfile.mkdtemp(prefix='hg-patch-')
562 root = tempfile.mkdtemp(prefix='hg-patch-')
562 self.opener = scmutil.opener(root)
563 self.opener = scmutil.opener(root)
563 # Avoid filename issues with these simple names
564 # Avoid filename issues with these simple names
564 fn = str(self.created)
565 fn = str(self.created)
565 self.opener.write(fn, data)
566 self.opener.write(fn, data)
566 self.created += 1
567 self.created += 1
567 self.files[fname] = (fn, mode, copied)
568 self.files[fname] = (fn, mode, copied)
568
569
569 def getfile(self, fname):
570 def getfile(self, fname):
570 if fname in self.data:
571 if fname in self.data:
571 return self.data[fname]
572 return self.data[fname]
572 if not self.opener or fname not in self.files:
573 if not self.opener or fname not in self.files:
573 return None, None, None
574 return None, None, None
574 fn, mode, copied = self.files[fname]
575 fn, mode, copied = self.files[fname]
575 return self.opener.read(fn), mode, copied
576 return self.opener.read(fn), mode, copied
576
577
577 def close(self):
578 def close(self):
578 if self.opener:
579 if self.opener:
579 shutil.rmtree(self.opener.base)
580 shutil.rmtree(self.opener.base)
580
581
581 class repobackend(abstractbackend):
582 class repobackend(abstractbackend):
582 def __init__(self, ui, repo, ctx, store):
583 def __init__(self, ui, repo, ctx, store):
583 super(repobackend, self).__init__(ui)
584 super(repobackend, self).__init__(ui)
584 self.repo = repo
585 self.repo = repo
585 self.ctx = ctx
586 self.ctx = ctx
586 self.store = store
587 self.store = store
587 self.changed = set()
588 self.changed = set()
588 self.removed = set()
589 self.removed = set()
589 self.copied = {}
590 self.copied = {}
590
591
591 def _checkknown(self, fname):
592 def _checkknown(self, fname):
592 if fname not in self.ctx:
593 if fname not in self.ctx:
593 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
594 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
594
595
595 def getfile(self, fname):
596 def getfile(self, fname):
596 try:
597 try:
597 fctx = self.ctx[fname]
598 fctx = self.ctx[fname]
598 except error.LookupError:
599 except error.LookupError:
599 return None, None
600 return None, None
600 flags = fctx.flags()
601 flags = fctx.flags()
601 return fctx.data(), ('l' in flags, 'x' in flags)
602 return fctx.data(), ('l' in flags, 'x' in flags)
602
603
603 def setfile(self, fname, data, mode, copysource):
604 def setfile(self, fname, data, mode, copysource):
604 if copysource:
605 if copysource:
605 self._checkknown(copysource)
606 self._checkknown(copysource)
606 if data is None:
607 if data is None:
607 data = self.ctx[fname].data()
608 data = self.ctx[fname].data()
608 self.store.setfile(fname, data, mode, copysource)
609 self.store.setfile(fname, data, mode, copysource)
609 self.changed.add(fname)
610 self.changed.add(fname)
610 if copysource:
611 if copysource:
611 self.copied[fname] = copysource
612 self.copied[fname] = copysource
612
613
613 def unlink(self, fname):
614 def unlink(self, fname):
614 self._checkknown(fname)
615 self._checkknown(fname)
615 self.removed.add(fname)
616 self.removed.add(fname)
616
617
617 def exists(self, fname):
618 def exists(self, fname):
618 return fname in self.ctx
619 return fname in self.ctx
619
620
620 def close(self):
621 def close(self):
621 return self.changed | self.removed
622 return self.changed | self.removed
622
623
623 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
624 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
624 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
625 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
625 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
626 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
626 eolmodes = ['strict', 'crlf', 'lf', 'auto']
627 eolmodes = ['strict', 'crlf', 'lf', 'auto']
627
628
628 class patchfile(object):
629 class patchfile(object):
629 def __init__(self, ui, gp, backend, store, eolmode='strict'):
630 def __init__(self, ui, gp, backend, store, eolmode='strict'):
630 self.fname = gp.path
631 self.fname = gp.path
631 self.eolmode = eolmode
632 self.eolmode = eolmode
632 self.eol = None
633 self.eol = None
633 self.backend = backend
634 self.backend = backend
634 self.ui = ui
635 self.ui = ui
635 self.lines = []
636 self.lines = []
636 self.exists = False
637 self.exists = False
637 self.missing = True
638 self.missing = True
638 self.mode = gp.mode
639 self.mode = gp.mode
639 self.copysource = gp.oldpath
640 self.copysource = gp.oldpath
640 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
641 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
641 self.remove = gp.op == 'DELETE'
642 self.remove = gp.op == 'DELETE'
642 if self.copysource is None:
643 if self.copysource is None:
643 data, mode = backend.getfile(self.fname)
644 data, mode = backend.getfile(self.fname)
644 else:
645 else:
645 data, mode = store.getfile(self.copysource)[:2]
646 data, mode = store.getfile(self.copysource)[:2]
646 if data is not None:
647 if data is not None:
647 self.exists = self.copysource is None or backend.exists(self.fname)
648 self.exists = self.copysource is None or backend.exists(self.fname)
648 self.missing = False
649 self.missing = False
649 if data:
650 if data:
650 self.lines = mdiff.splitnewlines(data)
651 self.lines = mdiff.splitnewlines(data)
651 if self.mode is None:
652 if self.mode is None:
652 self.mode = mode
653 self.mode = mode
653 if self.lines:
654 if self.lines:
654 # Normalize line endings
655 # Normalize line endings
655 if self.lines[0].endswith('\r\n'):
656 if self.lines[0].endswith('\r\n'):
656 self.eol = '\r\n'
657 self.eol = '\r\n'
657 elif self.lines[0].endswith('\n'):
658 elif self.lines[0].endswith('\n'):
658 self.eol = '\n'
659 self.eol = '\n'
659 if eolmode != 'strict':
660 if eolmode != 'strict':
660 nlines = []
661 nlines = []
661 for l in self.lines:
662 for l in self.lines:
662 if l.endswith('\r\n'):
663 if l.endswith('\r\n'):
663 l = l[:-2] + '\n'
664 l = l[:-2] + '\n'
664 nlines.append(l)
665 nlines.append(l)
665 self.lines = nlines
666 self.lines = nlines
666 else:
667 else:
667 if self.create:
668 if self.create:
668 self.missing = False
669 self.missing = False
669 if self.mode is None:
670 if self.mode is None:
670 self.mode = (False, False)
671 self.mode = (False, False)
671 if self.missing:
672 if self.missing:
672 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
673 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
673 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
674 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
674 "current directory)\n"))
675 "current directory)\n"))
675
676
676 self.hash = {}
677 self.hash = {}
677 self.dirty = 0
678 self.dirty = 0
678 self.offset = 0
679 self.offset = 0
679 self.skew = 0
680 self.skew = 0
680 self.rej = []
681 self.rej = []
681 self.fileprinted = False
682 self.fileprinted = False
682 self.printfile(False)
683 self.printfile(False)
683 self.hunks = 0
684 self.hunks = 0
684
685
685 def writelines(self, fname, lines, mode):
686 def writelines(self, fname, lines, mode):
686 if self.eolmode == 'auto':
687 if self.eolmode == 'auto':
687 eol = self.eol
688 eol = self.eol
688 elif self.eolmode == 'crlf':
689 elif self.eolmode == 'crlf':
689 eol = '\r\n'
690 eol = '\r\n'
690 else:
691 else:
691 eol = '\n'
692 eol = '\n'
692
693
693 if self.eolmode != 'strict' and eol and eol != '\n':
694 if self.eolmode != 'strict' and eol and eol != '\n':
694 rawlines = []
695 rawlines = []
695 for l in lines:
696 for l in lines:
696 if l and l[-1] == '\n':
697 if l and l[-1] == '\n':
697 l = l[:-1] + eol
698 l = l[:-1] + eol
698 rawlines.append(l)
699 rawlines.append(l)
699 lines = rawlines
700 lines = rawlines
700
701
701 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
702 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
702
703
703 def printfile(self, warn):
704 def printfile(self, warn):
704 if self.fileprinted:
705 if self.fileprinted:
705 return
706 return
706 if warn or self.ui.verbose:
707 if warn or self.ui.verbose:
707 self.fileprinted = True
708 self.fileprinted = True
708 s = _("patching file %s\n") % self.fname
709 s = _("patching file %s\n") % self.fname
709 if warn:
710 if warn:
710 self.ui.warn(s)
711 self.ui.warn(s)
711 else:
712 else:
712 self.ui.note(s)
713 self.ui.note(s)
713
714
714
715
715 def findlines(self, l, linenum):
716 def findlines(self, l, linenum):
716 # looks through the hash and finds candidate lines. The
717 # looks through the hash and finds candidate lines. The
717 # result is a list of line numbers sorted based on distance
718 # result is a list of line numbers sorted based on distance
718 # from linenum
719 # from linenum
719
720
720 cand = self.hash.get(l, [])
721 cand = self.hash.get(l, [])
721 if len(cand) > 1:
722 if len(cand) > 1:
722 # resort our list of potentials forward then back.
723 # resort our list of potentials forward then back.
723 cand.sort(key=lambda x: abs(x - linenum))
724 cand.sort(key=lambda x: abs(x - linenum))
724 return cand
725 return cand
725
726
726 def write_rej(self):
727 def write_rej(self):
727 # our rejects are a little different from patch(1). This always
728 # our rejects are a little different from patch(1). This always
728 # creates rejects in the same form as the original patch. A file
729 # creates rejects in the same form as the original patch. A file
729 # header is inserted so that you can run the reject through patch again
730 # header is inserted so that you can run the reject through patch again
730 # without having to type the filename.
731 # without having to type the filename.
731 if not self.rej:
732 if not self.rej:
732 return
733 return
733 base = os.path.basename(self.fname)
734 base = os.path.basename(self.fname)
734 lines = ["--- %s\n+++ %s\n" % (base, base)]
735 lines = ["--- %s\n+++ %s\n" % (base, base)]
735 for x in self.rej:
736 for x in self.rej:
736 for l in x.hunk:
737 for l in x.hunk:
737 lines.append(l)
738 lines.append(l)
738 if l[-1] != '\n':
739 if l[-1] != '\n':
739 lines.append("\n\ No newline at end of file\n")
740 lines.append("\n\ No newline at end of file\n")
740 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
741 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
741
742
742 def apply(self, h):
743 def apply(self, h):
743 if not h.complete():
744 if not h.complete():
744 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
745 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
745 (h.number, h.desc, len(h.a), h.lena, len(h.b),
746 (h.number, h.desc, len(h.a), h.lena, len(h.b),
746 h.lenb))
747 h.lenb))
747
748
748 self.hunks += 1
749 self.hunks += 1
749
750
750 if self.missing:
751 if self.missing:
751 self.rej.append(h)
752 self.rej.append(h)
752 return -1
753 return -1
753
754
754 if self.exists and self.create:
755 if self.exists and self.create:
755 if self.copysource:
756 if self.copysource:
756 self.ui.warn(_("cannot create %s: destination already "
757 self.ui.warn(_("cannot create %s: destination already "
757 "exists\n") % self.fname)
758 "exists\n") % self.fname)
758 else:
759 else:
759 self.ui.warn(_("file %s already exists\n") % self.fname)
760 self.ui.warn(_("file %s already exists\n") % self.fname)
760 self.rej.append(h)
761 self.rej.append(h)
761 return -1
762 return -1
762
763
763 if isinstance(h, binhunk):
764 if isinstance(h, binhunk):
764 if self.remove:
765 if self.remove:
765 self.backend.unlink(self.fname)
766 self.backend.unlink(self.fname)
766 else:
767 else:
767 l = h.new(self.lines)
768 l = h.new(self.lines)
768 self.lines[:] = l
769 self.lines[:] = l
769 self.offset += len(l)
770 self.offset += len(l)
770 self.dirty = True
771 self.dirty = True
771 return 0
772 return 0
772
773
773 horig = h
774 horig = h
774 if (self.eolmode in ('crlf', 'lf')
775 if (self.eolmode in ('crlf', 'lf')
775 or self.eolmode == 'auto' and self.eol):
776 or self.eolmode == 'auto' and self.eol):
776 # If new eols are going to be normalized, then normalize
777 # If new eols are going to be normalized, then normalize
777 # hunk data before patching. Otherwise, preserve input
778 # hunk data before patching. Otherwise, preserve input
778 # line-endings.
779 # line-endings.
779 h = h.getnormalized()
780 h = h.getnormalized()
780
781
781 # fast case first, no offsets, no fuzz
782 # fast case first, no offsets, no fuzz
782 old, oldstart, new, newstart = h.fuzzit(0, False)
783 old, oldstart, new, newstart = h.fuzzit(0, False)
783 oldstart += self.offset
784 oldstart += self.offset
784 orig_start = oldstart
785 orig_start = oldstart
785 # if there's skew we want to emit the "(offset %d lines)" even
786 # if there's skew we want to emit the "(offset %d lines)" even
786 # when the hunk cleanly applies at start + skew, so skip the
787 # when the hunk cleanly applies at start + skew, so skip the
787 # fast case code
788 # fast case code
788 if (self.skew == 0 and
789 if (self.skew == 0 and
789 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
790 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
790 if self.remove:
791 if self.remove:
791 self.backend.unlink(self.fname)
792 self.backend.unlink(self.fname)
792 else:
793 else:
793 self.lines[oldstart:oldstart + len(old)] = new
794 self.lines[oldstart:oldstart + len(old)] = new
794 self.offset += len(new) - len(old)
795 self.offset += len(new) - len(old)
795 self.dirty = True
796 self.dirty = True
796 return 0
797 return 0
797
798
798 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
799 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
799 self.hash = {}
800 self.hash = {}
800 for x, s in enumerate(self.lines):
801 for x, s in enumerate(self.lines):
801 self.hash.setdefault(s, []).append(x)
802 self.hash.setdefault(s, []).append(x)
802
803
803 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
804 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
804 for toponly in [True, False]:
805 for toponly in [True, False]:
805 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
806 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
806 oldstart = oldstart + self.offset + self.skew
807 oldstart = oldstart + self.offset + self.skew
807 oldstart = min(oldstart, len(self.lines))
808 oldstart = min(oldstart, len(self.lines))
808 if old:
809 if old:
809 cand = self.findlines(old[0][1:], oldstart)
810 cand = self.findlines(old[0][1:], oldstart)
810 else:
811 else:
811 # Only adding lines with no or fuzzed context, just
812 # Only adding lines with no or fuzzed context, just
812 # take the skew in account
813 # take the skew in account
813 cand = [oldstart]
814 cand = [oldstart]
814
815
815 for l in cand:
816 for l in cand:
816 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
817 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
817 self.lines[l : l + len(old)] = new
818 self.lines[l : l + len(old)] = new
818 self.offset += len(new) - len(old)
819 self.offset += len(new) - len(old)
819 self.skew = l - orig_start
820 self.skew = l - orig_start
820 self.dirty = True
821 self.dirty = True
821 offset = l - orig_start - fuzzlen
822 offset = l - orig_start - fuzzlen
822 if fuzzlen:
823 if fuzzlen:
823 msg = _("Hunk #%d succeeded at %d "
824 msg = _("Hunk #%d succeeded at %d "
824 "with fuzz %d "
825 "with fuzz %d "
825 "(offset %d lines).\n")
826 "(offset %d lines).\n")
826 self.printfile(True)
827 self.printfile(True)
827 self.ui.warn(msg %
828 self.ui.warn(msg %
828 (h.number, l + 1, fuzzlen, offset))
829 (h.number, l + 1, fuzzlen, offset))
829 else:
830 else:
830 msg = _("Hunk #%d succeeded at %d "
831 msg = _("Hunk #%d succeeded at %d "
831 "(offset %d lines).\n")
832 "(offset %d lines).\n")
832 self.ui.note(msg % (h.number, l + 1, offset))
833 self.ui.note(msg % (h.number, l + 1, offset))
833 return fuzzlen
834 return fuzzlen
834 self.printfile(True)
835 self.printfile(True)
835 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
836 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
836 self.rej.append(horig)
837 self.rej.append(horig)
837 return -1
838 return -1
838
839
839 def close(self):
840 def close(self):
840 if self.dirty:
841 if self.dirty:
841 self.writelines(self.fname, self.lines, self.mode)
842 self.writelines(self.fname, self.lines, self.mode)
842 self.write_rej()
843 self.write_rej()
843 return len(self.rej)
844 return len(self.rej)
844
845
845 class header(object):
846 class header(object):
846 """patch header
847 """patch header
847 """
848 """
848 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
849 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
849 diff_re = re.compile('diff -r .* (.*)$')
850 diff_re = re.compile('diff -r .* (.*)$')
850 allhunks_re = re.compile('(?:index|deleted file) ')
851 allhunks_re = re.compile('(?:index|deleted file) ')
851 pretty_re = re.compile('(?:new file|deleted file) ')
852 pretty_re = re.compile('(?:new file|deleted file) ')
852 special_re = re.compile('(?:index|deleted|copy|rename) ')
853 special_re = re.compile('(?:index|deleted|copy|rename) ')
853 newfile_re = re.compile('(?:new file)')
854 newfile_re = re.compile('(?:new file)')
854
855
855 def __init__(self, header):
856 def __init__(self, header):
856 self.header = header
857 self.header = header
857 self.hunks = []
858 self.hunks = []
858
859
859 def binary(self):
860 def binary(self):
860 return any(h.startswith('index ') for h in self.header)
861 return any(h.startswith('index ') for h in self.header)
861
862
862 def pretty(self, fp):
863 def pretty(self, fp):
863 for h in self.header:
864 for h in self.header:
864 if h.startswith('index '):
865 if h.startswith('index '):
865 fp.write(_('this modifies a binary file (all or nothing)\n'))
866 fp.write(_('this modifies a binary file (all or nothing)\n'))
866 break
867 break
867 if self.pretty_re.match(h):
868 if self.pretty_re.match(h):
868 fp.write(h)
869 fp.write(h)
869 if self.binary():
870 if self.binary():
870 fp.write(_('this is a binary file\n'))
871 fp.write(_('this is a binary file\n'))
871 break
872 break
872 if h.startswith('---'):
873 if h.startswith('---'):
873 fp.write(_('%d hunks, %d lines changed\n') %
874 fp.write(_('%d hunks, %d lines changed\n') %
874 (len(self.hunks),
875 (len(self.hunks),
875 sum([max(h.added, h.removed) for h in self.hunks])))
876 sum([max(h.added, h.removed) for h in self.hunks])))
876 break
877 break
877 fp.write(h)
878 fp.write(h)
878
879
879 def write(self, fp):
880 def write(self, fp):
880 fp.write(''.join(self.header))
881 fp.write(''.join(self.header))
881
882
882 def allhunks(self):
883 def allhunks(self):
883 return any(self.allhunks_re.match(h) for h in self.header)
884 return any(self.allhunks_re.match(h) for h in self.header)
884
885
885 def files(self):
886 def files(self):
886 match = self.diffgit_re.match(self.header[0])
887 match = self.diffgit_re.match(self.header[0])
887 if match:
888 if match:
888 fromfile, tofile = match.groups()
889 fromfile, tofile = match.groups()
889 if fromfile == tofile:
890 if fromfile == tofile:
890 return [fromfile]
891 return [fromfile]
891 return [fromfile, tofile]
892 return [fromfile, tofile]
892 else:
893 else:
893 return self.diff_re.match(self.header[0]).groups()
894 return self.diff_re.match(self.header[0]).groups()
894
895
895 def filename(self):
896 def filename(self):
896 return self.files()[-1]
897 return self.files()[-1]
897
898
898 def __repr__(self):
899 def __repr__(self):
899 return '<header %s>' % (' '.join(map(repr, self.files())))
900 return '<header %s>' % (' '.join(map(repr, self.files())))
900
901
901 def isnewfile(self):
902 def isnewfile(self):
902 return any(self.newfile_re.match(h) for h in self.header)
903 return any(self.newfile_re.match(h) for h in self.header)
903
904
904 def special(self):
905 def special(self):
905 # Special files are shown only at the header level and not at the hunk
906 # Special files are shown only at the header level and not at the hunk
906 # level for example a file that has been deleted is a special file.
907 # level for example a file that has been deleted is a special file.
907 # The user cannot change the content of the operation, in the case of
908 # The user cannot change the content of the operation, in the case of
908 # the deleted file he has to take the deletion or not take it, he
909 # the deleted file he has to take the deletion or not take it, he
909 # cannot take some of it.
910 # cannot take some of it.
910 # Newly added files are special if they are empty, they are not special
911 # Newly added files are special if they are empty, they are not special
911 # if they have some content as we want to be able to change it
912 # if they have some content as we want to be able to change it
912 nocontent = len(self.header) == 2
913 nocontent = len(self.header) == 2
913 emptynewfile = self.isnewfile() and nocontent
914 emptynewfile = self.isnewfile() and nocontent
914 return emptynewfile or \
915 return emptynewfile or \
915 any(self.special_re.match(h) for h in self.header)
916 any(self.special_re.match(h) for h in self.header)
916
917
917 class recordhunk(object):
918 class recordhunk(object):
918 """patch hunk
919 """patch hunk
919
920
920 XXX shouldn't we merge this with the other hunk class?
921 XXX shouldn't we merge this with the other hunk class?
921 """
922 """
922 maxcontext = 3
923 maxcontext = 3
923
924
924 def __init__(self, header, fromline, toline, proc, before, hunk, after):
925 def __init__(self, header, fromline, toline, proc, before, hunk, after):
925 def trimcontext(number, lines):
926 def trimcontext(number, lines):
926 delta = len(lines) - self.maxcontext
927 delta = len(lines) - self.maxcontext
927 if False and delta > 0:
928 if False and delta > 0:
928 return number + delta, lines[:self.maxcontext]
929 return number + delta, lines[:self.maxcontext]
929 return number, lines
930 return number, lines
930
931
931 self.header = header
932 self.header = header
932 self.fromline, self.before = trimcontext(fromline, before)
933 self.fromline, self.before = trimcontext(fromline, before)
933 self.toline, self.after = trimcontext(toline, after)
934 self.toline, self.after = trimcontext(toline, after)
934 self.proc = proc
935 self.proc = proc
935 self.hunk = hunk
936 self.hunk = hunk
936 self.added, self.removed = self.countchanges(self.hunk)
937 self.added, self.removed = self.countchanges(self.hunk)
937
938
938 def __eq__(self, v):
939 def __eq__(self, v):
939 if not isinstance(v, recordhunk):
940 if not isinstance(v, recordhunk):
940 return False
941 return False
941
942
942 return ((v.hunk == self.hunk) and
943 return ((v.hunk == self.hunk) and
943 (v.proc == self.proc) and
944 (v.proc == self.proc) and
944 (self.fromline == v.fromline) and
945 (self.fromline == v.fromline) and
945 (self.header.files() == v.header.files()))
946 (self.header.files() == v.header.files()))
946
947
947 def __hash__(self):
948 def __hash__(self):
948 return hash((tuple(self.hunk),
949 return hash((tuple(self.hunk),
949 tuple(self.header.files()),
950 tuple(self.header.files()),
950 self.fromline,
951 self.fromline,
951 self.proc))
952 self.proc))
952
953
953 def countchanges(self, hunk):
954 def countchanges(self, hunk):
954 """hunk -> (n+,n-)"""
955 """hunk -> (n+,n-)"""
955 add = len([h for h in hunk if h[0] == '+'])
956 add = len([h for h in hunk if h[0] == '+'])
956 rem = len([h for h in hunk if h[0] == '-'])
957 rem = len([h for h in hunk if h[0] == '-'])
957 return add, rem
958 return add, rem
958
959
959 def write(self, fp):
960 def write(self, fp):
960 delta = len(self.before) + len(self.after)
961 delta = len(self.before) + len(self.after)
961 if self.after and self.after[-1] == '\\ No newline at end of file\n':
962 if self.after and self.after[-1] == '\\ No newline at end of file\n':
962 delta -= 1
963 delta -= 1
963 fromlen = delta + self.removed
964 fromlen = delta + self.removed
964 tolen = delta + self.added
965 tolen = delta + self.added
965 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
966 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
966 (self.fromline, fromlen, self.toline, tolen,
967 (self.fromline, fromlen, self.toline, tolen,
967 self.proc and (' ' + self.proc)))
968 self.proc and (' ' + self.proc)))
968 fp.write(''.join(self.before + self.hunk + self.after))
969 fp.write(''.join(self.before + self.hunk + self.after))
969
970
970 pretty = write
971 pretty = write
971
972
972 def filename(self):
973 def filename(self):
973 return self.header.filename()
974 return self.header.filename()
974
975
975 def __repr__(self):
976 def __repr__(self):
976 return '<hunk %r@%d>' % (self.filename(), self.fromline)
977 return '<hunk %r@%d>' % (self.filename(), self.fromline)
977
978
978 def filterpatch(ui, headers, operation=None):
979 def filterpatch(ui, headers, operation=None):
979 """Interactively filter patch chunks into applied-only chunks"""
980 """Interactively filter patch chunks into applied-only chunks"""
980 if operation is None:
981 if operation is None:
981 operation = 'record'
982 operation = 'record'
982 messages = {
983 messages = {
983 'multiple': {
984 'multiple': {
984 'discard': _("discard change %d/%d to '%s'?"),
985 'discard': _("discard change %d/%d to '%s'?"),
985 'record': _("record change %d/%d to '%s'?"),
986 'record': _("record change %d/%d to '%s'?"),
986 'revert': _("revert change %d/%d to '%s'?"),
987 'revert': _("revert change %d/%d to '%s'?"),
987 }[operation],
988 }[operation],
988 'single': {
989 'single': {
989 'discard': _("discard this change to '%s'?"),
990 'discard': _("discard this change to '%s'?"),
990 'record': _("record this change to '%s'?"),
991 'record': _("record this change to '%s'?"),
991 'revert': _("revert this change to '%s'?"),
992 'revert': _("revert this change to '%s'?"),
992 }[operation],
993 }[operation],
993 }
994 }
994
995
995 def prompt(skipfile, skipall, query, chunk):
996 def prompt(skipfile, skipall, query, chunk):
996 """prompt query, and process base inputs
997 """prompt query, and process base inputs
997
998
998 - y/n for the rest of file
999 - y/n for the rest of file
999 - y/n for the rest
1000 - y/n for the rest
1000 - ? (help)
1001 - ? (help)
1001 - q (quit)
1002 - q (quit)
1002
1003
1003 Return True/False and possibly updated skipfile and skipall.
1004 Return True/False and possibly updated skipfile and skipall.
1004 """
1005 """
1005 newpatches = None
1006 newpatches = None
1006 if skipall is not None:
1007 if skipall is not None:
1007 return skipall, skipfile, skipall, newpatches
1008 return skipall, skipfile, skipall, newpatches
1008 if skipfile is not None:
1009 if skipfile is not None:
1009 return skipfile, skipfile, skipall, newpatches
1010 return skipfile, skipfile, skipall, newpatches
1010 while True:
1011 while True:
1011 resps = _('[Ynesfdaq?]'
1012 resps = _('[Ynesfdaq?]'
1012 '$$ &Yes, record this change'
1013 '$$ &Yes, record this change'
1013 '$$ &No, skip this change'
1014 '$$ &No, skip this change'
1014 '$$ &Edit this change manually'
1015 '$$ &Edit this change manually'
1015 '$$ &Skip remaining changes to this file'
1016 '$$ &Skip remaining changes to this file'
1016 '$$ Record remaining changes to this &file'
1017 '$$ Record remaining changes to this &file'
1017 '$$ &Done, skip remaining changes and files'
1018 '$$ &Done, skip remaining changes and files'
1018 '$$ Record &all changes to all remaining files'
1019 '$$ Record &all changes to all remaining files'
1019 '$$ &Quit, recording no changes'
1020 '$$ &Quit, recording no changes'
1020 '$$ &? (display help)')
1021 '$$ &? (display help)')
1021 r = ui.promptchoice("%s %s" % (query, resps))
1022 r = ui.promptchoice("%s %s" % (query, resps))
1022 ui.write("\n")
1023 ui.write("\n")
1023 if r == 8: # ?
1024 if r == 8: # ?
1024 for c, t in ui.extractchoices(resps)[1]:
1025 for c, t in ui.extractchoices(resps)[1]:
1025 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1026 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1026 continue
1027 continue
1027 elif r == 0: # yes
1028 elif r == 0: # yes
1028 ret = True
1029 ret = True
1029 elif r == 1: # no
1030 elif r == 1: # no
1030 ret = False
1031 ret = False
1031 elif r == 2: # Edit patch
1032 elif r == 2: # Edit patch
1032 if chunk is None:
1033 if chunk is None:
1033 ui.write(_('cannot edit patch for whole file'))
1034 ui.write(_('cannot edit patch for whole file'))
1034 ui.write("\n")
1035 ui.write("\n")
1035 continue
1036 continue
1036 if chunk.header.binary():
1037 if chunk.header.binary():
1037 ui.write(_('cannot edit patch for binary file'))
1038 ui.write(_('cannot edit patch for binary file'))
1038 ui.write("\n")
1039 ui.write("\n")
1039 continue
1040 continue
1040 # Patch comment based on the Git one (based on comment at end of
1041 # Patch comment based on the Git one (based on comment at end of
1041 # https://mercurial-scm.org/wiki/RecordExtension)
1042 # https://mercurial-scm.org/wiki/RecordExtension)
1042 phelp = '---' + _("""
1043 phelp = '---' + _("""
1043 To remove '-' lines, make them ' ' lines (context).
1044 To remove '-' lines, make them ' ' lines (context).
1044 To remove '+' lines, delete them.
1045 To remove '+' lines, delete them.
1045 Lines starting with # will be removed from the patch.
1046 Lines starting with # will be removed from the patch.
1046
1047
1047 If the patch applies cleanly, the edited hunk will immediately be
1048 If the patch applies cleanly, the edited hunk will immediately be
1048 added to the record list. If it does not apply cleanly, a rejects
1049 added to the record list. If it does not apply cleanly, a rejects
1049 file will be generated: you can use that when you try again. If
1050 file will be generated: you can use that when you try again. If
1050 all lines of the hunk are removed, then the edit is aborted and
1051 all lines of the hunk are removed, then the edit is aborted and
1051 the hunk is left unchanged.
1052 the hunk is left unchanged.
1052 """)
1053 """)
1053 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1054 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1054 suffix=".diff", text=True)
1055 suffix=".diff", text=True)
1055 ncpatchfp = None
1056 ncpatchfp = None
1056 try:
1057 try:
1057 # Write the initial patch
1058 # Write the initial patch
1058 f = os.fdopen(patchfd, "w")
1059 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1059 chunk.header.write(f)
1060 chunk.header.write(f)
1060 chunk.write(f)
1061 chunk.write(f)
1061 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1062 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1062 f.close()
1063 f.close()
1063 # Start the editor and wait for it to complete
1064 # Start the editor and wait for it to complete
1064 editor = ui.geteditor()
1065 editor = ui.geteditor()
1065 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1066 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1066 environ={'HGUSER': ui.username()})
1067 environ={'HGUSER': ui.username()})
1067 if ret != 0:
1068 if ret != 0:
1068 ui.warn(_("editor exited with exit code %d\n") % ret)
1069 ui.warn(_("editor exited with exit code %d\n") % ret)
1069 continue
1070 continue
1070 # Remove comment lines
1071 # Remove comment lines
1071 patchfp = open(patchfn)
1072 patchfp = open(patchfn)
1072 ncpatchfp = stringio()
1073 ncpatchfp = stringio()
1073 for line in util.iterfile(patchfp):
1074 for line in util.iterfile(patchfp):
1074 if not line.startswith('#'):
1075 if not line.startswith('#'):
1075 ncpatchfp.write(line)
1076 ncpatchfp.write(line)
1076 patchfp.close()
1077 patchfp.close()
1077 ncpatchfp.seek(0)
1078 ncpatchfp.seek(0)
1078 newpatches = parsepatch(ncpatchfp)
1079 newpatches = parsepatch(ncpatchfp)
1079 finally:
1080 finally:
1080 os.unlink(patchfn)
1081 os.unlink(patchfn)
1081 del ncpatchfp
1082 del ncpatchfp
1082 # Signal that the chunk shouldn't be applied as-is, but
1083 # Signal that the chunk shouldn't be applied as-is, but
1083 # provide the new patch to be used instead.
1084 # provide the new patch to be used instead.
1084 ret = False
1085 ret = False
1085 elif r == 3: # Skip
1086 elif r == 3: # Skip
1086 ret = skipfile = False
1087 ret = skipfile = False
1087 elif r == 4: # file (Record remaining)
1088 elif r == 4: # file (Record remaining)
1088 ret = skipfile = True
1089 ret = skipfile = True
1089 elif r == 5: # done, skip remaining
1090 elif r == 5: # done, skip remaining
1090 ret = skipall = False
1091 ret = skipall = False
1091 elif r == 6: # all
1092 elif r == 6: # all
1092 ret = skipall = True
1093 ret = skipall = True
1093 elif r == 7: # quit
1094 elif r == 7: # quit
1094 raise error.Abort(_('user quit'))
1095 raise error.Abort(_('user quit'))
1095 return ret, skipfile, skipall, newpatches
1096 return ret, skipfile, skipall, newpatches
1096
1097
1097 seen = set()
1098 seen = set()
1098 applied = {} # 'filename' -> [] of chunks
1099 applied = {} # 'filename' -> [] of chunks
1099 skipfile, skipall = None, None
1100 skipfile, skipall = None, None
1100 pos, total = 1, sum(len(h.hunks) for h in headers)
1101 pos, total = 1, sum(len(h.hunks) for h in headers)
1101 for h in headers:
1102 for h in headers:
1102 pos += len(h.hunks)
1103 pos += len(h.hunks)
1103 skipfile = None
1104 skipfile = None
1104 fixoffset = 0
1105 fixoffset = 0
1105 hdr = ''.join(h.header)
1106 hdr = ''.join(h.header)
1106 if hdr in seen:
1107 if hdr in seen:
1107 continue
1108 continue
1108 seen.add(hdr)
1109 seen.add(hdr)
1109 if skipall is None:
1110 if skipall is None:
1110 h.pretty(ui)
1111 h.pretty(ui)
1111 msg = (_('examine changes to %s?') %
1112 msg = (_('examine changes to %s?') %
1112 _(' and ').join("'%s'" % f for f in h.files()))
1113 _(' and ').join("'%s'" % f for f in h.files()))
1113 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1114 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1114 if not r:
1115 if not r:
1115 continue
1116 continue
1116 applied[h.filename()] = [h]
1117 applied[h.filename()] = [h]
1117 if h.allhunks():
1118 if h.allhunks():
1118 applied[h.filename()] += h.hunks
1119 applied[h.filename()] += h.hunks
1119 continue
1120 continue
1120 for i, chunk in enumerate(h.hunks):
1121 for i, chunk in enumerate(h.hunks):
1121 if skipfile is None and skipall is None:
1122 if skipfile is None and skipall is None:
1122 chunk.pretty(ui)
1123 chunk.pretty(ui)
1123 if total == 1:
1124 if total == 1:
1124 msg = messages['single'] % chunk.filename()
1125 msg = messages['single'] % chunk.filename()
1125 else:
1126 else:
1126 idx = pos - len(h.hunks) + i
1127 idx = pos - len(h.hunks) + i
1127 msg = messages['multiple'] % (idx, total, chunk.filename())
1128 msg = messages['multiple'] % (idx, total, chunk.filename())
1128 r, skipfile, skipall, newpatches = prompt(skipfile,
1129 r, skipfile, skipall, newpatches = prompt(skipfile,
1129 skipall, msg, chunk)
1130 skipall, msg, chunk)
1130 if r:
1131 if r:
1131 if fixoffset:
1132 if fixoffset:
1132 chunk = copy.copy(chunk)
1133 chunk = copy.copy(chunk)
1133 chunk.toline += fixoffset
1134 chunk.toline += fixoffset
1134 applied[chunk.filename()].append(chunk)
1135 applied[chunk.filename()].append(chunk)
1135 elif newpatches is not None:
1136 elif newpatches is not None:
1136 for newpatch in newpatches:
1137 for newpatch in newpatches:
1137 for newhunk in newpatch.hunks:
1138 for newhunk in newpatch.hunks:
1138 if fixoffset:
1139 if fixoffset:
1139 newhunk.toline += fixoffset
1140 newhunk.toline += fixoffset
1140 applied[newhunk.filename()].append(newhunk)
1141 applied[newhunk.filename()].append(newhunk)
1141 else:
1142 else:
1142 fixoffset += chunk.removed - chunk.added
1143 fixoffset += chunk.removed - chunk.added
1143 return (sum([h for h in applied.itervalues()
1144 return (sum([h for h in applied.itervalues()
1144 if h[0].special() or len(h) > 1], []), {})
1145 if h[0].special() or len(h) > 1], []), {})
1145 class hunk(object):
1146 class hunk(object):
1146 def __init__(self, desc, num, lr, context):
1147 def __init__(self, desc, num, lr, context):
1147 self.number = num
1148 self.number = num
1148 self.desc = desc
1149 self.desc = desc
1149 self.hunk = [desc]
1150 self.hunk = [desc]
1150 self.a = []
1151 self.a = []
1151 self.b = []
1152 self.b = []
1152 self.starta = self.lena = None
1153 self.starta = self.lena = None
1153 self.startb = self.lenb = None
1154 self.startb = self.lenb = None
1154 if lr is not None:
1155 if lr is not None:
1155 if context:
1156 if context:
1156 self.read_context_hunk(lr)
1157 self.read_context_hunk(lr)
1157 else:
1158 else:
1158 self.read_unified_hunk(lr)
1159 self.read_unified_hunk(lr)
1159
1160
1160 def getnormalized(self):
1161 def getnormalized(self):
1161 """Return a copy with line endings normalized to LF."""
1162 """Return a copy with line endings normalized to LF."""
1162
1163
1163 def normalize(lines):
1164 def normalize(lines):
1164 nlines = []
1165 nlines = []
1165 for line in lines:
1166 for line in lines:
1166 if line.endswith('\r\n'):
1167 if line.endswith('\r\n'):
1167 line = line[:-2] + '\n'
1168 line = line[:-2] + '\n'
1168 nlines.append(line)
1169 nlines.append(line)
1169 return nlines
1170 return nlines
1170
1171
1171 # Dummy object, it is rebuilt manually
1172 # Dummy object, it is rebuilt manually
1172 nh = hunk(self.desc, self.number, None, None)
1173 nh = hunk(self.desc, self.number, None, None)
1173 nh.number = self.number
1174 nh.number = self.number
1174 nh.desc = self.desc
1175 nh.desc = self.desc
1175 nh.hunk = self.hunk
1176 nh.hunk = self.hunk
1176 nh.a = normalize(self.a)
1177 nh.a = normalize(self.a)
1177 nh.b = normalize(self.b)
1178 nh.b = normalize(self.b)
1178 nh.starta = self.starta
1179 nh.starta = self.starta
1179 nh.startb = self.startb
1180 nh.startb = self.startb
1180 nh.lena = self.lena
1181 nh.lena = self.lena
1181 nh.lenb = self.lenb
1182 nh.lenb = self.lenb
1182 return nh
1183 return nh
1183
1184
1184 def read_unified_hunk(self, lr):
1185 def read_unified_hunk(self, lr):
1185 m = unidesc.match(self.desc)
1186 m = unidesc.match(self.desc)
1186 if not m:
1187 if not m:
1187 raise PatchError(_("bad hunk #%d") % self.number)
1188 raise PatchError(_("bad hunk #%d") % self.number)
1188 self.starta, self.lena, self.startb, self.lenb = m.groups()
1189 self.starta, self.lena, self.startb, self.lenb = m.groups()
1189 if self.lena is None:
1190 if self.lena is None:
1190 self.lena = 1
1191 self.lena = 1
1191 else:
1192 else:
1192 self.lena = int(self.lena)
1193 self.lena = int(self.lena)
1193 if self.lenb is None:
1194 if self.lenb is None:
1194 self.lenb = 1
1195 self.lenb = 1
1195 else:
1196 else:
1196 self.lenb = int(self.lenb)
1197 self.lenb = int(self.lenb)
1197 self.starta = int(self.starta)
1198 self.starta = int(self.starta)
1198 self.startb = int(self.startb)
1199 self.startb = int(self.startb)
1199 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1200 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1200 self.b)
1201 self.b)
1201 # if we hit eof before finishing out the hunk, the last line will
1202 # if we hit eof before finishing out the hunk, the last line will
1202 # be zero length. Lets try to fix it up.
1203 # be zero length. Lets try to fix it up.
1203 while len(self.hunk[-1]) == 0:
1204 while len(self.hunk[-1]) == 0:
1204 del self.hunk[-1]
1205 del self.hunk[-1]
1205 del self.a[-1]
1206 del self.a[-1]
1206 del self.b[-1]
1207 del self.b[-1]
1207 self.lena -= 1
1208 self.lena -= 1
1208 self.lenb -= 1
1209 self.lenb -= 1
1209 self._fixnewline(lr)
1210 self._fixnewline(lr)
1210
1211
1211 def read_context_hunk(self, lr):
1212 def read_context_hunk(self, lr):
1212 self.desc = lr.readline()
1213 self.desc = lr.readline()
1213 m = contextdesc.match(self.desc)
1214 m = contextdesc.match(self.desc)
1214 if not m:
1215 if not m:
1215 raise PatchError(_("bad hunk #%d") % self.number)
1216 raise PatchError(_("bad hunk #%d") % self.number)
1216 self.starta, aend = m.groups()
1217 self.starta, aend = m.groups()
1217 self.starta = int(self.starta)
1218 self.starta = int(self.starta)
1218 if aend is None:
1219 if aend is None:
1219 aend = self.starta
1220 aend = self.starta
1220 self.lena = int(aend) - self.starta
1221 self.lena = int(aend) - self.starta
1221 if self.starta:
1222 if self.starta:
1222 self.lena += 1
1223 self.lena += 1
1223 for x in xrange(self.lena):
1224 for x in xrange(self.lena):
1224 l = lr.readline()
1225 l = lr.readline()
1225 if l.startswith('---'):
1226 if l.startswith('---'):
1226 # lines addition, old block is empty
1227 # lines addition, old block is empty
1227 lr.push(l)
1228 lr.push(l)
1228 break
1229 break
1229 s = l[2:]
1230 s = l[2:]
1230 if l.startswith('- ') or l.startswith('! '):
1231 if l.startswith('- ') or l.startswith('! '):
1231 u = '-' + s
1232 u = '-' + s
1232 elif l.startswith(' '):
1233 elif l.startswith(' '):
1233 u = ' ' + s
1234 u = ' ' + s
1234 else:
1235 else:
1235 raise PatchError(_("bad hunk #%d old text line %d") %
1236 raise PatchError(_("bad hunk #%d old text line %d") %
1236 (self.number, x))
1237 (self.number, x))
1237 self.a.append(u)
1238 self.a.append(u)
1238 self.hunk.append(u)
1239 self.hunk.append(u)
1239
1240
1240 l = lr.readline()
1241 l = lr.readline()
1241 if l.startswith('\ '):
1242 if l.startswith('\ '):
1242 s = self.a[-1][:-1]
1243 s = self.a[-1][:-1]
1243 self.a[-1] = s
1244 self.a[-1] = s
1244 self.hunk[-1] = s
1245 self.hunk[-1] = s
1245 l = lr.readline()
1246 l = lr.readline()
1246 m = contextdesc.match(l)
1247 m = contextdesc.match(l)
1247 if not m:
1248 if not m:
1248 raise PatchError(_("bad hunk #%d") % self.number)
1249 raise PatchError(_("bad hunk #%d") % self.number)
1249 self.startb, bend = m.groups()
1250 self.startb, bend = m.groups()
1250 self.startb = int(self.startb)
1251 self.startb = int(self.startb)
1251 if bend is None:
1252 if bend is None:
1252 bend = self.startb
1253 bend = self.startb
1253 self.lenb = int(bend) - self.startb
1254 self.lenb = int(bend) - self.startb
1254 if self.startb:
1255 if self.startb:
1255 self.lenb += 1
1256 self.lenb += 1
1256 hunki = 1
1257 hunki = 1
1257 for x in xrange(self.lenb):
1258 for x in xrange(self.lenb):
1258 l = lr.readline()
1259 l = lr.readline()
1259 if l.startswith('\ '):
1260 if l.startswith('\ '):
1260 # XXX: the only way to hit this is with an invalid line range.
1261 # XXX: the only way to hit this is with an invalid line range.
1261 # The no-eol marker is not counted in the line range, but I
1262 # The no-eol marker is not counted in the line range, but I
1262 # guess there are diff(1) out there which behave differently.
1263 # guess there are diff(1) out there which behave differently.
1263 s = self.b[-1][:-1]
1264 s = self.b[-1][:-1]
1264 self.b[-1] = s
1265 self.b[-1] = s
1265 self.hunk[hunki - 1] = s
1266 self.hunk[hunki - 1] = s
1266 continue
1267 continue
1267 if not l:
1268 if not l:
1268 # line deletions, new block is empty and we hit EOF
1269 # line deletions, new block is empty and we hit EOF
1269 lr.push(l)
1270 lr.push(l)
1270 break
1271 break
1271 s = l[2:]
1272 s = l[2:]
1272 if l.startswith('+ ') or l.startswith('! '):
1273 if l.startswith('+ ') or l.startswith('! '):
1273 u = '+' + s
1274 u = '+' + s
1274 elif l.startswith(' '):
1275 elif l.startswith(' '):
1275 u = ' ' + s
1276 u = ' ' + s
1276 elif len(self.b) == 0:
1277 elif len(self.b) == 0:
1277 # line deletions, new block is empty
1278 # line deletions, new block is empty
1278 lr.push(l)
1279 lr.push(l)
1279 break
1280 break
1280 else:
1281 else:
1281 raise PatchError(_("bad hunk #%d old text line %d") %
1282 raise PatchError(_("bad hunk #%d old text line %d") %
1282 (self.number, x))
1283 (self.number, x))
1283 self.b.append(s)
1284 self.b.append(s)
1284 while True:
1285 while True:
1285 if hunki >= len(self.hunk):
1286 if hunki >= len(self.hunk):
1286 h = ""
1287 h = ""
1287 else:
1288 else:
1288 h = self.hunk[hunki]
1289 h = self.hunk[hunki]
1289 hunki += 1
1290 hunki += 1
1290 if h == u:
1291 if h == u:
1291 break
1292 break
1292 elif h.startswith('-'):
1293 elif h.startswith('-'):
1293 continue
1294 continue
1294 else:
1295 else:
1295 self.hunk.insert(hunki - 1, u)
1296 self.hunk.insert(hunki - 1, u)
1296 break
1297 break
1297
1298
1298 if not self.a:
1299 if not self.a:
1299 # this happens when lines were only added to the hunk
1300 # this happens when lines were only added to the hunk
1300 for x in self.hunk:
1301 for x in self.hunk:
1301 if x.startswith('-') or x.startswith(' '):
1302 if x.startswith('-') or x.startswith(' '):
1302 self.a.append(x)
1303 self.a.append(x)
1303 if not self.b:
1304 if not self.b:
1304 # this happens when lines were only deleted from the hunk
1305 # this happens when lines were only deleted from the hunk
1305 for x in self.hunk:
1306 for x in self.hunk:
1306 if x.startswith('+') or x.startswith(' '):
1307 if x.startswith('+') or x.startswith(' '):
1307 self.b.append(x[1:])
1308 self.b.append(x[1:])
1308 # @@ -start,len +start,len @@
1309 # @@ -start,len +start,len @@
1309 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1310 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1310 self.startb, self.lenb)
1311 self.startb, self.lenb)
1311 self.hunk[0] = self.desc
1312 self.hunk[0] = self.desc
1312 self._fixnewline(lr)
1313 self._fixnewline(lr)
1313
1314
1314 def _fixnewline(self, lr):
1315 def _fixnewline(self, lr):
1315 l = lr.readline()
1316 l = lr.readline()
1316 if l.startswith('\ '):
1317 if l.startswith('\ '):
1317 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1318 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1318 else:
1319 else:
1319 lr.push(l)
1320 lr.push(l)
1320
1321
1321 def complete(self):
1322 def complete(self):
1322 return len(self.a) == self.lena and len(self.b) == self.lenb
1323 return len(self.a) == self.lena and len(self.b) == self.lenb
1323
1324
1324 def _fuzzit(self, old, new, fuzz, toponly):
1325 def _fuzzit(self, old, new, fuzz, toponly):
1325 # this removes context lines from the top and bottom of list 'l'. It
1326 # this removes context lines from the top and bottom of list 'l'. It
1326 # checks the hunk to make sure only context lines are removed, and then
1327 # checks the hunk to make sure only context lines are removed, and then
1327 # returns a new shortened list of lines.
1328 # returns a new shortened list of lines.
1328 fuzz = min(fuzz, len(old))
1329 fuzz = min(fuzz, len(old))
1329 if fuzz:
1330 if fuzz:
1330 top = 0
1331 top = 0
1331 bot = 0
1332 bot = 0
1332 hlen = len(self.hunk)
1333 hlen = len(self.hunk)
1333 for x in xrange(hlen - 1):
1334 for x in xrange(hlen - 1):
1334 # the hunk starts with the @@ line, so use x+1
1335 # the hunk starts with the @@ line, so use x+1
1335 if self.hunk[x + 1][0] == ' ':
1336 if self.hunk[x + 1][0] == ' ':
1336 top += 1
1337 top += 1
1337 else:
1338 else:
1338 break
1339 break
1339 if not toponly:
1340 if not toponly:
1340 for x in xrange(hlen - 1):
1341 for x in xrange(hlen - 1):
1341 if self.hunk[hlen - bot - 1][0] == ' ':
1342 if self.hunk[hlen - bot - 1][0] == ' ':
1342 bot += 1
1343 bot += 1
1343 else:
1344 else:
1344 break
1345 break
1345
1346
1346 bot = min(fuzz, bot)
1347 bot = min(fuzz, bot)
1347 top = min(fuzz, top)
1348 top = min(fuzz, top)
1348 return old[top:len(old) - bot], new[top:len(new) - bot], top
1349 return old[top:len(old) - bot], new[top:len(new) - bot], top
1349 return old, new, 0
1350 return old, new, 0
1350
1351
1351 def fuzzit(self, fuzz, toponly):
1352 def fuzzit(self, fuzz, toponly):
1352 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1353 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1353 oldstart = self.starta + top
1354 oldstart = self.starta + top
1354 newstart = self.startb + top
1355 newstart = self.startb + top
1355 # zero length hunk ranges already have their start decremented
1356 # zero length hunk ranges already have their start decremented
1356 if self.lena and oldstart > 0:
1357 if self.lena and oldstart > 0:
1357 oldstart -= 1
1358 oldstart -= 1
1358 if self.lenb and newstart > 0:
1359 if self.lenb and newstart > 0:
1359 newstart -= 1
1360 newstart -= 1
1360 return old, oldstart, new, newstart
1361 return old, oldstart, new, newstart
1361
1362
1362 class binhunk(object):
1363 class binhunk(object):
1363 'A binary patch file.'
1364 'A binary patch file.'
1364 def __init__(self, lr, fname):
1365 def __init__(self, lr, fname):
1365 self.text = None
1366 self.text = None
1366 self.delta = False
1367 self.delta = False
1367 self.hunk = ['GIT binary patch\n']
1368 self.hunk = ['GIT binary patch\n']
1368 self._fname = fname
1369 self._fname = fname
1369 self._read(lr)
1370 self._read(lr)
1370
1371
1371 def complete(self):
1372 def complete(self):
1372 return self.text is not None
1373 return self.text is not None
1373
1374
1374 def new(self, lines):
1375 def new(self, lines):
1375 if self.delta:
1376 if self.delta:
1376 return [applybindelta(self.text, ''.join(lines))]
1377 return [applybindelta(self.text, ''.join(lines))]
1377 return [self.text]
1378 return [self.text]
1378
1379
1379 def _read(self, lr):
1380 def _read(self, lr):
1380 def getline(lr, hunk):
1381 def getline(lr, hunk):
1381 l = lr.readline()
1382 l = lr.readline()
1382 hunk.append(l)
1383 hunk.append(l)
1383 return l.rstrip('\r\n')
1384 return l.rstrip('\r\n')
1384
1385
1385 size = 0
1386 size = 0
1386 while True:
1387 while True:
1387 line = getline(lr, self.hunk)
1388 line = getline(lr, self.hunk)
1388 if not line:
1389 if not line:
1389 raise PatchError(_('could not extract "%s" binary data')
1390 raise PatchError(_('could not extract "%s" binary data')
1390 % self._fname)
1391 % self._fname)
1391 if line.startswith('literal '):
1392 if line.startswith('literal '):
1392 size = int(line[8:].rstrip())
1393 size = int(line[8:].rstrip())
1393 break
1394 break
1394 if line.startswith('delta '):
1395 if line.startswith('delta '):
1395 size = int(line[6:].rstrip())
1396 size = int(line[6:].rstrip())
1396 self.delta = True
1397 self.delta = True
1397 break
1398 break
1398 dec = []
1399 dec = []
1399 line = getline(lr, self.hunk)
1400 line = getline(lr, self.hunk)
1400 while len(line) > 1:
1401 while len(line) > 1:
1401 l = line[0]
1402 l = line[0]
1402 if l <= 'Z' and l >= 'A':
1403 if l <= 'Z' and l >= 'A':
1403 l = ord(l) - ord('A') + 1
1404 l = ord(l) - ord('A') + 1
1404 else:
1405 else:
1405 l = ord(l) - ord('a') + 27
1406 l = ord(l) - ord('a') + 27
1406 try:
1407 try:
1407 dec.append(base85.b85decode(line[1:])[:l])
1408 dec.append(base85.b85decode(line[1:])[:l])
1408 except ValueError as e:
1409 except ValueError as e:
1409 raise PatchError(_('could not decode "%s" binary patch: %s')
1410 raise PatchError(_('could not decode "%s" binary patch: %s')
1410 % (self._fname, str(e)))
1411 % (self._fname, str(e)))
1411 line = getline(lr, self.hunk)
1412 line = getline(lr, self.hunk)
1412 text = zlib.decompress(''.join(dec))
1413 text = zlib.decompress(''.join(dec))
1413 if len(text) != size:
1414 if len(text) != size:
1414 raise PatchError(_('"%s" length is %d bytes, should be %d')
1415 raise PatchError(_('"%s" length is %d bytes, should be %d')
1415 % (self._fname, len(text), size))
1416 % (self._fname, len(text), size))
1416 self.text = text
1417 self.text = text
1417
1418
1418 def parsefilename(str):
1419 def parsefilename(str):
1419 # --- filename \t|space stuff
1420 # --- filename \t|space stuff
1420 s = str[4:].rstrip('\r\n')
1421 s = str[4:].rstrip('\r\n')
1421 i = s.find('\t')
1422 i = s.find('\t')
1422 if i < 0:
1423 if i < 0:
1423 i = s.find(' ')
1424 i = s.find(' ')
1424 if i < 0:
1425 if i < 0:
1425 return s
1426 return s
1426 return s[:i]
1427 return s[:i]
1427
1428
1428 def reversehunks(hunks):
1429 def reversehunks(hunks):
1429 '''reverse the signs in the hunks given as argument
1430 '''reverse the signs in the hunks given as argument
1430
1431
1431 This function operates on hunks coming out of patch.filterpatch, that is
1432 This function operates on hunks coming out of patch.filterpatch, that is
1432 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1433 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1433
1434
1434 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1435 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1435 ... --- a/folder1/g
1436 ... --- a/folder1/g
1436 ... +++ b/folder1/g
1437 ... +++ b/folder1/g
1437 ... @@ -1,7 +1,7 @@
1438 ... @@ -1,7 +1,7 @@
1438 ... +firstline
1439 ... +firstline
1439 ... c
1440 ... c
1440 ... 1
1441 ... 1
1441 ... 2
1442 ... 2
1442 ... + 3
1443 ... + 3
1443 ... -4
1444 ... -4
1444 ... 5
1445 ... 5
1445 ... d
1446 ... d
1446 ... +lastline"""
1447 ... +lastline"""
1447 >>> hunks = parsepatch(rawpatch)
1448 >>> hunks = parsepatch(rawpatch)
1448 >>> hunkscomingfromfilterpatch = []
1449 >>> hunkscomingfromfilterpatch = []
1449 >>> for h in hunks:
1450 >>> for h in hunks:
1450 ... hunkscomingfromfilterpatch.append(h)
1451 ... hunkscomingfromfilterpatch.append(h)
1451 ... hunkscomingfromfilterpatch.extend(h.hunks)
1452 ... hunkscomingfromfilterpatch.extend(h.hunks)
1452
1453
1453 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1454 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1454 >>> from . import util
1455 >>> from . import util
1455 >>> fp = util.stringio()
1456 >>> fp = util.stringio()
1456 >>> for c in reversedhunks:
1457 >>> for c in reversedhunks:
1457 ... c.write(fp)
1458 ... c.write(fp)
1458 >>> fp.seek(0)
1459 >>> fp.seek(0)
1459 >>> reversedpatch = fp.read()
1460 >>> reversedpatch = fp.read()
1460 >>> print reversedpatch
1461 >>> print reversedpatch
1461 diff --git a/folder1/g b/folder1/g
1462 diff --git a/folder1/g b/folder1/g
1462 --- a/folder1/g
1463 --- a/folder1/g
1463 +++ b/folder1/g
1464 +++ b/folder1/g
1464 @@ -1,4 +1,3 @@
1465 @@ -1,4 +1,3 @@
1465 -firstline
1466 -firstline
1466 c
1467 c
1467 1
1468 1
1468 2
1469 2
1469 @@ -1,6 +2,6 @@
1470 @@ -1,6 +2,6 @@
1470 c
1471 c
1471 1
1472 1
1472 2
1473 2
1473 - 3
1474 - 3
1474 +4
1475 +4
1475 5
1476 5
1476 d
1477 d
1477 @@ -5,3 +6,2 @@
1478 @@ -5,3 +6,2 @@
1478 5
1479 5
1479 d
1480 d
1480 -lastline
1481 -lastline
1481
1482
1482 '''
1483 '''
1483
1484
1484 from . import crecord as crecordmod
1485 from . import crecord as crecordmod
1485 newhunks = []
1486 newhunks = []
1486 for c in hunks:
1487 for c in hunks:
1487 if isinstance(c, crecordmod.uihunk):
1488 if isinstance(c, crecordmod.uihunk):
1488 # curses hunks encapsulate the record hunk in _hunk
1489 # curses hunks encapsulate the record hunk in _hunk
1489 c = c._hunk
1490 c = c._hunk
1490 if isinstance(c, recordhunk):
1491 if isinstance(c, recordhunk):
1491 for j, line in enumerate(c.hunk):
1492 for j, line in enumerate(c.hunk):
1492 if line.startswith("-"):
1493 if line.startswith("-"):
1493 c.hunk[j] = "+" + c.hunk[j][1:]
1494 c.hunk[j] = "+" + c.hunk[j][1:]
1494 elif line.startswith("+"):
1495 elif line.startswith("+"):
1495 c.hunk[j] = "-" + c.hunk[j][1:]
1496 c.hunk[j] = "-" + c.hunk[j][1:]
1496 c.added, c.removed = c.removed, c.added
1497 c.added, c.removed = c.removed, c.added
1497 newhunks.append(c)
1498 newhunks.append(c)
1498 return newhunks
1499 return newhunks
1499
1500
1500 def parsepatch(originalchunks):
1501 def parsepatch(originalchunks):
1501 """patch -> [] of headers -> [] of hunks """
1502 """patch -> [] of headers -> [] of hunks """
1502 class parser(object):
1503 class parser(object):
1503 """patch parsing state machine"""
1504 """patch parsing state machine"""
1504 def __init__(self):
1505 def __init__(self):
1505 self.fromline = 0
1506 self.fromline = 0
1506 self.toline = 0
1507 self.toline = 0
1507 self.proc = ''
1508 self.proc = ''
1508 self.header = None
1509 self.header = None
1509 self.context = []
1510 self.context = []
1510 self.before = []
1511 self.before = []
1511 self.hunk = []
1512 self.hunk = []
1512 self.headers = []
1513 self.headers = []
1513
1514
1514 def addrange(self, limits):
1515 def addrange(self, limits):
1515 fromstart, fromend, tostart, toend, proc = limits
1516 fromstart, fromend, tostart, toend, proc = limits
1516 self.fromline = int(fromstart)
1517 self.fromline = int(fromstart)
1517 self.toline = int(tostart)
1518 self.toline = int(tostart)
1518 self.proc = proc
1519 self.proc = proc
1519
1520
1520 def addcontext(self, context):
1521 def addcontext(self, context):
1521 if self.hunk:
1522 if self.hunk:
1522 h = recordhunk(self.header, self.fromline, self.toline,
1523 h = recordhunk(self.header, self.fromline, self.toline,
1523 self.proc, self.before, self.hunk, context)
1524 self.proc, self.before, self.hunk, context)
1524 self.header.hunks.append(h)
1525 self.header.hunks.append(h)
1525 self.fromline += len(self.before) + h.removed
1526 self.fromline += len(self.before) + h.removed
1526 self.toline += len(self.before) + h.added
1527 self.toline += len(self.before) + h.added
1527 self.before = []
1528 self.before = []
1528 self.hunk = []
1529 self.hunk = []
1529 self.context = context
1530 self.context = context
1530
1531
1531 def addhunk(self, hunk):
1532 def addhunk(self, hunk):
1532 if self.context:
1533 if self.context:
1533 self.before = self.context
1534 self.before = self.context
1534 self.context = []
1535 self.context = []
1535 self.hunk = hunk
1536 self.hunk = hunk
1536
1537
1537 def newfile(self, hdr):
1538 def newfile(self, hdr):
1538 self.addcontext([])
1539 self.addcontext([])
1539 h = header(hdr)
1540 h = header(hdr)
1540 self.headers.append(h)
1541 self.headers.append(h)
1541 self.header = h
1542 self.header = h
1542
1543
1543 def addother(self, line):
1544 def addother(self, line):
1544 pass # 'other' lines are ignored
1545 pass # 'other' lines are ignored
1545
1546
1546 def finished(self):
1547 def finished(self):
1547 self.addcontext([])
1548 self.addcontext([])
1548 return self.headers
1549 return self.headers
1549
1550
1550 transitions = {
1551 transitions = {
1551 'file': {'context': addcontext,
1552 'file': {'context': addcontext,
1552 'file': newfile,
1553 'file': newfile,
1553 'hunk': addhunk,
1554 'hunk': addhunk,
1554 'range': addrange},
1555 'range': addrange},
1555 'context': {'file': newfile,
1556 'context': {'file': newfile,
1556 'hunk': addhunk,
1557 'hunk': addhunk,
1557 'range': addrange,
1558 'range': addrange,
1558 'other': addother},
1559 'other': addother},
1559 'hunk': {'context': addcontext,
1560 'hunk': {'context': addcontext,
1560 'file': newfile,
1561 'file': newfile,
1561 'range': addrange},
1562 'range': addrange},
1562 'range': {'context': addcontext,
1563 'range': {'context': addcontext,
1563 'hunk': addhunk},
1564 'hunk': addhunk},
1564 'other': {'other': addother},
1565 'other': {'other': addother},
1565 }
1566 }
1566
1567
1567 p = parser()
1568 p = parser()
1568 fp = stringio()
1569 fp = stringio()
1569 fp.write(''.join(originalchunks))
1570 fp.write(''.join(originalchunks))
1570 fp.seek(0)
1571 fp.seek(0)
1571
1572
1572 state = 'context'
1573 state = 'context'
1573 for newstate, data in scanpatch(fp):
1574 for newstate, data in scanpatch(fp):
1574 try:
1575 try:
1575 p.transitions[state][newstate](p, data)
1576 p.transitions[state][newstate](p, data)
1576 except KeyError:
1577 except KeyError:
1577 raise PatchError('unhandled transition: %s -> %s' %
1578 raise PatchError('unhandled transition: %s -> %s' %
1578 (state, newstate))
1579 (state, newstate))
1579 state = newstate
1580 state = newstate
1580 del fp
1581 del fp
1581 return p.finished()
1582 return p.finished()
1582
1583
1583 def pathtransform(path, strip, prefix):
1584 def pathtransform(path, strip, prefix):
1584 '''turn a path from a patch into a path suitable for the repository
1585 '''turn a path from a patch into a path suitable for the repository
1585
1586
1586 prefix, if not empty, is expected to be normalized with a / at the end.
1587 prefix, if not empty, is expected to be normalized with a / at the end.
1587
1588
1588 Returns (stripped components, path in repository).
1589 Returns (stripped components, path in repository).
1589
1590
1590 >>> pathtransform('a/b/c', 0, '')
1591 >>> pathtransform('a/b/c', 0, '')
1591 ('', 'a/b/c')
1592 ('', 'a/b/c')
1592 >>> pathtransform(' a/b/c ', 0, '')
1593 >>> pathtransform(' a/b/c ', 0, '')
1593 ('', ' a/b/c')
1594 ('', ' a/b/c')
1594 >>> pathtransform(' a/b/c ', 2, '')
1595 >>> pathtransform(' a/b/c ', 2, '')
1595 ('a/b/', 'c')
1596 ('a/b/', 'c')
1596 >>> pathtransform('a/b/c', 0, 'd/e/')
1597 >>> pathtransform('a/b/c', 0, 'd/e/')
1597 ('', 'd/e/a/b/c')
1598 ('', 'd/e/a/b/c')
1598 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1599 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1599 ('a//b/', 'd/e/c')
1600 ('a//b/', 'd/e/c')
1600 >>> pathtransform('a/b/c', 3, '')
1601 >>> pathtransform('a/b/c', 3, '')
1601 Traceback (most recent call last):
1602 Traceback (most recent call last):
1602 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1603 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1603 '''
1604 '''
1604 pathlen = len(path)
1605 pathlen = len(path)
1605 i = 0
1606 i = 0
1606 if strip == 0:
1607 if strip == 0:
1607 return '', prefix + path.rstrip()
1608 return '', prefix + path.rstrip()
1608 count = strip
1609 count = strip
1609 while count > 0:
1610 while count > 0:
1610 i = path.find('/', i)
1611 i = path.find('/', i)
1611 if i == -1:
1612 if i == -1:
1612 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1613 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1613 (count, strip, path))
1614 (count, strip, path))
1614 i += 1
1615 i += 1
1615 # consume '//' in the path
1616 # consume '//' in the path
1616 while i < pathlen - 1 and path[i] == '/':
1617 while i < pathlen - 1 and path[i] == '/':
1617 i += 1
1618 i += 1
1618 count -= 1
1619 count -= 1
1619 return path[:i].lstrip(), prefix + path[i:].rstrip()
1620 return path[:i].lstrip(), prefix + path[i:].rstrip()
1620
1621
1621 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1622 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1622 nulla = afile_orig == "/dev/null"
1623 nulla = afile_orig == "/dev/null"
1623 nullb = bfile_orig == "/dev/null"
1624 nullb = bfile_orig == "/dev/null"
1624 create = nulla and hunk.starta == 0 and hunk.lena == 0
1625 create = nulla and hunk.starta == 0 and hunk.lena == 0
1625 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1626 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1626 abase, afile = pathtransform(afile_orig, strip, prefix)
1627 abase, afile = pathtransform(afile_orig, strip, prefix)
1627 gooda = not nulla and backend.exists(afile)
1628 gooda = not nulla and backend.exists(afile)
1628 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1629 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1629 if afile == bfile:
1630 if afile == bfile:
1630 goodb = gooda
1631 goodb = gooda
1631 else:
1632 else:
1632 goodb = not nullb and backend.exists(bfile)
1633 goodb = not nullb and backend.exists(bfile)
1633 missing = not goodb and not gooda and not create
1634 missing = not goodb and not gooda and not create
1634
1635
1635 # some diff programs apparently produce patches where the afile is
1636 # some diff programs apparently produce patches where the afile is
1636 # not /dev/null, but afile starts with bfile
1637 # not /dev/null, but afile starts with bfile
1637 abasedir = afile[:afile.rfind('/') + 1]
1638 abasedir = afile[:afile.rfind('/') + 1]
1638 bbasedir = bfile[:bfile.rfind('/') + 1]
1639 bbasedir = bfile[:bfile.rfind('/') + 1]
1639 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1640 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1640 and hunk.starta == 0 and hunk.lena == 0):
1641 and hunk.starta == 0 and hunk.lena == 0):
1641 create = True
1642 create = True
1642 missing = False
1643 missing = False
1643
1644
1644 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1645 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1645 # diff is between a file and its backup. In this case, the original
1646 # diff is between a file and its backup. In this case, the original
1646 # file should be patched (see original mpatch code).
1647 # file should be patched (see original mpatch code).
1647 isbackup = (abase == bbase and bfile.startswith(afile))
1648 isbackup = (abase == bbase and bfile.startswith(afile))
1648 fname = None
1649 fname = None
1649 if not missing:
1650 if not missing:
1650 if gooda and goodb:
1651 if gooda and goodb:
1651 if isbackup:
1652 if isbackup:
1652 fname = afile
1653 fname = afile
1653 else:
1654 else:
1654 fname = bfile
1655 fname = bfile
1655 elif gooda:
1656 elif gooda:
1656 fname = afile
1657 fname = afile
1657
1658
1658 if not fname:
1659 if not fname:
1659 if not nullb:
1660 if not nullb:
1660 if isbackup:
1661 if isbackup:
1661 fname = afile
1662 fname = afile
1662 else:
1663 else:
1663 fname = bfile
1664 fname = bfile
1664 elif not nulla:
1665 elif not nulla:
1665 fname = afile
1666 fname = afile
1666 else:
1667 else:
1667 raise PatchError(_("undefined source and destination files"))
1668 raise PatchError(_("undefined source and destination files"))
1668
1669
1669 gp = patchmeta(fname)
1670 gp = patchmeta(fname)
1670 if create:
1671 if create:
1671 gp.op = 'ADD'
1672 gp.op = 'ADD'
1672 elif remove:
1673 elif remove:
1673 gp.op = 'DELETE'
1674 gp.op = 'DELETE'
1674 return gp
1675 return gp
1675
1676
1676 def scanpatch(fp):
1677 def scanpatch(fp):
1677 """like patch.iterhunks, but yield different events
1678 """like patch.iterhunks, but yield different events
1678
1679
1679 - ('file', [header_lines + fromfile + tofile])
1680 - ('file', [header_lines + fromfile + tofile])
1680 - ('context', [context_lines])
1681 - ('context', [context_lines])
1681 - ('hunk', [hunk_lines])
1682 - ('hunk', [hunk_lines])
1682 - ('range', (-start,len, +start,len, proc))
1683 - ('range', (-start,len, +start,len, proc))
1683 """
1684 """
1684 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1685 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1685 lr = linereader(fp)
1686 lr = linereader(fp)
1686
1687
1687 def scanwhile(first, p):
1688 def scanwhile(first, p):
1688 """scan lr while predicate holds"""
1689 """scan lr while predicate holds"""
1689 lines = [first]
1690 lines = [first]
1690 for line in iter(lr.readline, ''):
1691 for line in iter(lr.readline, ''):
1691 if p(line):
1692 if p(line):
1692 lines.append(line)
1693 lines.append(line)
1693 else:
1694 else:
1694 lr.push(line)
1695 lr.push(line)
1695 break
1696 break
1696 return lines
1697 return lines
1697
1698
1698 for line in iter(lr.readline, ''):
1699 for line in iter(lr.readline, ''):
1699 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1700 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1700 def notheader(line):
1701 def notheader(line):
1701 s = line.split(None, 1)
1702 s = line.split(None, 1)
1702 return not s or s[0] not in ('---', 'diff')
1703 return not s or s[0] not in ('---', 'diff')
1703 header = scanwhile(line, notheader)
1704 header = scanwhile(line, notheader)
1704 fromfile = lr.readline()
1705 fromfile = lr.readline()
1705 if fromfile.startswith('---'):
1706 if fromfile.startswith('---'):
1706 tofile = lr.readline()
1707 tofile = lr.readline()
1707 header += [fromfile, tofile]
1708 header += [fromfile, tofile]
1708 else:
1709 else:
1709 lr.push(fromfile)
1710 lr.push(fromfile)
1710 yield 'file', header
1711 yield 'file', header
1711 elif line[0] == ' ':
1712 elif line[0] == ' ':
1712 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1713 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1713 elif line[0] in '-+':
1714 elif line[0] in '-+':
1714 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1715 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1715 else:
1716 else:
1716 m = lines_re.match(line)
1717 m = lines_re.match(line)
1717 if m:
1718 if m:
1718 yield 'range', m.groups()
1719 yield 'range', m.groups()
1719 else:
1720 else:
1720 yield 'other', line
1721 yield 'other', line
1721
1722
1722 def scangitpatch(lr, firstline):
1723 def scangitpatch(lr, firstline):
1723 """
1724 """
1724 Git patches can emit:
1725 Git patches can emit:
1725 - rename a to b
1726 - rename a to b
1726 - change b
1727 - change b
1727 - copy a to c
1728 - copy a to c
1728 - change c
1729 - change c
1729
1730
1730 We cannot apply this sequence as-is, the renamed 'a' could not be
1731 We cannot apply this sequence as-is, the renamed 'a' could not be
1731 found for it would have been renamed already. And we cannot copy
1732 found for it would have been renamed already. And we cannot copy
1732 from 'b' instead because 'b' would have been changed already. So
1733 from 'b' instead because 'b' would have been changed already. So
1733 we scan the git patch for copy and rename commands so we can
1734 we scan the git patch for copy and rename commands so we can
1734 perform the copies ahead of time.
1735 perform the copies ahead of time.
1735 """
1736 """
1736 pos = 0
1737 pos = 0
1737 try:
1738 try:
1738 pos = lr.fp.tell()
1739 pos = lr.fp.tell()
1739 fp = lr.fp
1740 fp = lr.fp
1740 except IOError:
1741 except IOError:
1741 fp = stringio(lr.fp.read())
1742 fp = stringio(lr.fp.read())
1742 gitlr = linereader(fp)
1743 gitlr = linereader(fp)
1743 gitlr.push(firstline)
1744 gitlr.push(firstline)
1744 gitpatches = readgitpatch(gitlr)
1745 gitpatches = readgitpatch(gitlr)
1745 fp.seek(pos)
1746 fp.seek(pos)
1746 return gitpatches
1747 return gitpatches
1747
1748
1748 def iterhunks(fp):
1749 def iterhunks(fp):
1749 """Read a patch and yield the following events:
1750 """Read a patch and yield the following events:
1750 - ("file", afile, bfile, firsthunk): select a new target file.
1751 - ("file", afile, bfile, firsthunk): select a new target file.
1751 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1752 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1752 "file" event.
1753 "file" event.
1753 - ("git", gitchanges): current diff is in git format, gitchanges
1754 - ("git", gitchanges): current diff is in git format, gitchanges
1754 maps filenames to gitpatch records. Unique event.
1755 maps filenames to gitpatch records. Unique event.
1755 """
1756 """
1756 afile = ""
1757 afile = ""
1757 bfile = ""
1758 bfile = ""
1758 state = None
1759 state = None
1759 hunknum = 0
1760 hunknum = 0
1760 emitfile = newfile = False
1761 emitfile = newfile = False
1761 gitpatches = None
1762 gitpatches = None
1762
1763
1763 # our states
1764 # our states
1764 BFILE = 1
1765 BFILE = 1
1765 context = None
1766 context = None
1766 lr = linereader(fp)
1767 lr = linereader(fp)
1767
1768
1768 for x in iter(lr.readline, ''):
1769 for x in iter(lr.readline, ''):
1769 if state == BFILE and (
1770 if state == BFILE and (
1770 (not context and x[0] == '@')
1771 (not context and x[0] == '@')
1771 or (context is not False and x.startswith('***************'))
1772 or (context is not False and x.startswith('***************'))
1772 or x.startswith('GIT binary patch')):
1773 or x.startswith('GIT binary patch')):
1773 gp = None
1774 gp = None
1774 if (gitpatches and
1775 if (gitpatches and
1775 gitpatches[-1].ispatching(afile, bfile)):
1776 gitpatches[-1].ispatching(afile, bfile)):
1776 gp = gitpatches.pop()
1777 gp = gitpatches.pop()
1777 if x.startswith('GIT binary patch'):
1778 if x.startswith('GIT binary patch'):
1778 h = binhunk(lr, gp.path)
1779 h = binhunk(lr, gp.path)
1779 else:
1780 else:
1780 if context is None and x.startswith('***************'):
1781 if context is None and x.startswith('***************'):
1781 context = True
1782 context = True
1782 h = hunk(x, hunknum + 1, lr, context)
1783 h = hunk(x, hunknum + 1, lr, context)
1783 hunknum += 1
1784 hunknum += 1
1784 if emitfile:
1785 if emitfile:
1785 emitfile = False
1786 emitfile = False
1786 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1787 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1787 yield 'hunk', h
1788 yield 'hunk', h
1788 elif x.startswith('diff --git a/'):
1789 elif x.startswith('diff --git a/'):
1789 m = gitre.match(x.rstrip(' \r\n'))
1790 m = gitre.match(x.rstrip(' \r\n'))
1790 if not m:
1791 if not m:
1791 continue
1792 continue
1792 if gitpatches is None:
1793 if gitpatches is None:
1793 # scan whole input for git metadata
1794 # scan whole input for git metadata
1794 gitpatches = scangitpatch(lr, x)
1795 gitpatches = scangitpatch(lr, x)
1795 yield 'git', [g.copy() for g in gitpatches
1796 yield 'git', [g.copy() for g in gitpatches
1796 if g.op in ('COPY', 'RENAME')]
1797 if g.op in ('COPY', 'RENAME')]
1797 gitpatches.reverse()
1798 gitpatches.reverse()
1798 afile = 'a/' + m.group(1)
1799 afile = 'a/' + m.group(1)
1799 bfile = 'b/' + m.group(2)
1800 bfile = 'b/' + m.group(2)
1800 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1801 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1801 gp = gitpatches.pop()
1802 gp = gitpatches.pop()
1802 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1803 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1803 if not gitpatches:
1804 if not gitpatches:
1804 raise PatchError(_('failed to synchronize metadata for "%s"')
1805 raise PatchError(_('failed to synchronize metadata for "%s"')
1805 % afile[2:])
1806 % afile[2:])
1806 gp = gitpatches[-1]
1807 gp = gitpatches[-1]
1807 newfile = True
1808 newfile = True
1808 elif x.startswith('---'):
1809 elif x.startswith('---'):
1809 # check for a unified diff
1810 # check for a unified diff
1810 l2 = lr.readline()
1811 l2 = lr.readline()
1811 if not l2.startswith('+++'):
1812 if not l2.startswith('+++'):
1812 lr.push(l2)
1813 lr.push(l2)
1813 continue
1814 continue
1814 newfile = True
1815 newfile = True
1815 context = False
1816 context = False
1816 afile = parsefilename(x)
1817 afile = parsefilename(x)
1817 bfile = parsefilename(l2)
1818 bfile = parsefilename(l2)
1818 elif x.startswith('***'):
1819 elif x.startswith('***'):
1819 # check for a context diff
1820 # check for a context diff
1820 l2 = lr.readline()
1821 l2 = lr.readline()
1821 if not l2.startswith('---'):
1822 if not l2.startswith('---'):
1822 lr.push(l2)
1823 lr.push(l2)
1823 continue
1824 continue
1824 l3 = lr.readline()
1825 l3 = lr.readline()
1825 lr.push(l3)
1826 lr.push(l3)
1826 if not l3.startswith("***************"):
1827 if not l3.startswith("***************"):
1827 lr.push(l2)
1828 lr.push(l2)
1828 continue
1829 continue
1829 newfile = True
1830 newfile = True
1830 context = True
1831 context = True
1831 afile = parsefilename(x)
1832 afile = parsefilename(x)
1832 bfile = parsefilename(l2)
1833 bfile = parsefilename(l2)
1833
1834
1834 if newfile:
1835 if newfile:
1835 newfile = False
1836 newfile = False
1836 emitfile = True
1837 emitfile = True
1837 state = BFILE
1838 state = BFILE
1838 hunknum = 0
1839 hunknum = 0
1839
1840
1840 while gitpatches:
1841 while gitpatches:
1841 gp = gitpatches.pop()
1842 gp = gitpatches.pop()
1842 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1843 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1843
1844
1844 def applybindelta(binchunk, data):
1845 def applybindelta(binchunk, data):
1845 """Apply a binary delta hunk
1846 """Apply a binary delta hunk
1846 The algorithm used is the algorithm from git's patch-delta.c
1847 The algorithm used is the algorithm from git's patch-delta.c
1847 """
1848 """
1848 def deltahead(binchunk):
1849 def deltahead(binchunk):
1849 i = 0
1850 i = 0
1850 for c in binchunk:
1851 for c in binchunk:
1851 i += 1
1852 i += 1
1852 if not (ord(c) & 0x80):
1853 if not (ord(c) & 0x80):
1853 return i
1854 return i
1854 return i
1855 return i
1855 out = ""
1856 out = ""
1856 s = deltahead(binchunk)
1857 s = deltahead(binchunk)
1857 binchunk = binchunk[s:]
1858 binchunk = binchunk[s:]
1858 s = deltahead(binchunk)
1859 s = deltahead(binchunk)
1859 binchunk = binchunk[s:]
1860 binchunk = binchunk[s:]
1860 i = 0
1861 i = 0
1861 while i < len(binchunk):
1862 while i < len(binchunk):
1862 cmd = ord(binchunk[i])
1863 cmd = ord(binchunk[i])
1863 i += 1
1864 i += 1
1864 if (cmd & 0x80):
1865 if (cmd & 0x80):
1865 offset = 0
1866 offset = 0
1866 size = 0
1867 size = 0
1867 if (cmd & 0x01):
1868 if (cmd & 0x01):
1868 offset = ord(binchunk[i])
1869 offset = ord(binchunk[i])
1869 i += 1
1870 i += 1
1870 if (cmd & 0x02):
1871 if (cmd & 0x02):
1871 offset |= ord(binchunk[i]) << 8
1872 offset |= ord(binchunk[i]) << 8
1872 i += 1
1873 i += 1
1873 if (cmd & 0x04):
1874 if (cmd & 0x04):
1874 offset |= ord(binchunk[i]) << 16
1875 offset |= ord(binchunk[i]) << 16
1875 i += 1
1876 i += 1
1876 if (cmd & 0x08):
1877 if (cmd & 0x08):
1877 offset |= ord(binchunk[i]) << 24
1878 offset |= ord(binchunk[i]) << 24
1878 i += 1
1879 i += 1
1879 if (cmd & 0x10):
1880 if (cmd & 0x10):
1880 size = ord(binchunk[i])
1881 size = ord(binchunk[i])
1881 i += 1
1882 i += 1
1882 if (cmd & 0x20):
1883 if (cmd & 0x20):
1883 size |= ord(binchunk[i]) << 8
1884 size |= ord(binchunk[i]) << 8
1884 i += 1
1885 i += 1
1885 if (cmd & 0x40):
1886 if (cmd & 0x40):
1886 size |= ord(binchunk[i]) << 16
1887 size |= ord(binchunk[i]) << 16
1887 i += 1
1888 i += 1
1888 if size == 0:
1889 if size == 0:
1889 size = 0x10000
1890 size = 0x10000
1890 offset_end = offset + size
1891 offset_end = offset + size
1891 out += data[offset:offset_end]
1892 out += data[offset:offset_end]
1892 elif cmd != 0:
1893 elif cmd != 0:
1893 offset_end = i + cmd
1894 offset_end = i + cmd
1894 out += binchunk[i:offset_end]
1895 out += binchunk[i:offset_end]
1895 i += cmd
1896 i += cmd
1896 else:
1897 else:
1897 raise PatchError(_('unexpected delta opcode 0'))
1898 raise PatchError(_('unexpected delta opcode 0'))
1898 return out
1899 return out
1899
1900
1900 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1901 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1901 """Reads a patch from fp and tries to apply it.
1902 """Reads a patch from fp and tries to apply it.
1902
1903
1903 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1904 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1904 there was any fuzz.
1905 there was any fuzz.
1905
1906
1906 If 'eolmode' is 'strict', the patch content and patched file are
1907 If 'eolmode' is 'strict', the patch content and patched file are
1907 read in binary mode. Otherwise, line endings are ignored when
1908 read in binary mode. Otherwise, line endings are ignored when
1908 patching then normalized according to 'eolmode'.
1909 patching then normalized according to 'eolmode'.
1909 """
1910 """
1910 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1911 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1911 prefix=prefix, eolmode=eolmode)
1912 prefix=prefix, eolmode=eolmode)
1912
1913
1913 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1914 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1914 eolmode='strict'):
1915 eolmode='strict'):
1915
1916
1916 if prefix:
1917 if prefix:
1917 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1918 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1918 prefix)
1919 prefix)
1919 if prefix != '':
1920 if prefix != '':
1920 prefix += '/'
1921 prefix += '/'
1921 def pstrip(p):
1922 def pstrip(p):
1922 return pathtransform(p, strip - 1, prefix)[1]
1923 return pathtransform(p, strip - 1, prefix)[1]
1923
1924
1924 rejects = 0
1925 rejects = 0
1925 err = 0
1926 err = 0
1926 current_file = None
1927 current_file = None
1927
1928
1928 for state, values in iterhunks(fp):
1929 for state, values in iterhunks(fp):
1929 if state == 'hunk':
1930 if state == 'hunk':
1930 if not current_file:
1931 if not current_file:
1931 continue
1932 continue
1932 ret = current_file.apply(values)
1933 ret = current_file.apply(values)
1933 if ret > 0:
1934 if ret > 0:
1934 err = 1
1935 err = 1
1935 elif state == 'file':
1936 elif state == 'file':
1936 if current_file:
1937 if current_file:
1937 rejects += current_file.close()
1938 rejects += current_file.close()
1938 current_file = None
1939 current_file = None
1939 afile, bfile, first_hunk, gp = values
1940 afile, bfile, first_hunk, gp = values
1940 if gp:
1941 if gp:
1941 gp.path = pstrip(gp.path)
1942 gp.path = pstrip(gp.path)
1942 if gp.oldpath:
1943 if gp.oldpath:
1943 gp.oldpath = pstrip(gp.oldpath)
1944 gp.oldpath = pstrip(gp.oldpath)
1944 else:
1945 else:
1945 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1946 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1946 prefix)
1947 prefix)
1947 if gp.op == 'RENAME':
1948 if gp.op == 'RENAME':
1948 backend.unlink(gp.oldpath)
1949 backend.unlink(gp.oldpath)
1949 if not first_hunk:
1950 if not first_hunk:
1950 if gp.op == 'DELETE':
1951 if gp.op == 'DELETE':
1951 backend.unlink(gp.path)
1952 backend.unlink(gp.path)
1952 continue
1953 continue
1953 data, mode = None, None
1954 data, mode = None, None
1954 if gp.op in ('RENAME', 'COPY'):
1955 if gp.op in ('RENAME', 'COPY'):
1955 data, mode = store.getfile(gp.oldpath)[:2]
1956 data, mode = store.getfile(gp.oldpath)[:2]
1956 if data is None:
1957 if data is None:
1957 # This means that the old path does not exist
1958 # This means that the old path does not exist
1958 raise PatchError(_("source file '%s' does not exist")
1959 raise PatchError(_("source file '%s' does not exist")
1959 % gp.oldpath)
1960 % gp.oldpath)
1960 if gp.mode:
1961 if gp.mode:
1961 mode = gp.mode
1962 mode = gp.mode
1962 if gp.op == 'ADD':
1963 if gp.op == 'ADD':
1963 # Added files without content have no hunk and
1964 # Added files without content have no hunk and
1964 # must be created
1965 # must be created
1965 data = ''
1966 data = ''
1966 if data or mode:
1967 if data or mode:
1967 if (gp.op in ('ADD', 'RENAME', 'COPY')
1968 if (gp.op in ('ADD', 'RENAME', 'COPY')
1968 and backend.exists(gp.path)):
1969 and backend.exists(gp.path)):
1969 raise PatchError(_("cannot create %s: destination "
1970 raise PatchError(_("cannot create %s: destination "
1970 "already exists") % gp.path)
1971 "already exists") % gp.path)
1971 backend.setfile(gp.path, data, mode, gp.oldpath)
1972 backend.setfile(gp.path, data, mode, gp.oldpath)
1972 continue
1973 continue
1973 try:
1974 try:
1974 current_file = patcher(ui, gp, backend, store,
1975 current_file = patcher(ui, gp, backend, store,
1975 eolmode=eolmode)
1976 eolmode=eolmode)
1976 except PatchError as inst:
1977 except PatchError as inst:
1977 ui.warn(str(inst) + '\n')
1978 ui.warn(str(inst) + '\n')
1978 current_file = None
1979 current_file = None
1979 rejects += 1
1980 rejects += 1
1980 continue
1981 continue
1981 elif state == 'git':
1982 elif state == 'git':
1982 for gp in values:
1983 for gp in values:
1983 path = pstrip(gp.oldpath)
1984 path = pstrip(gp.oldpath)
1984 data, mode = backend.getfile(path)
1985 data, mode = backend.getfile(path)
1985 if data is None:
1986 if data is None:
1986 # The error ignored here will trigger a getfile()
1987 # The error ignored here will trigger a getfile()
1987 # error in a place more appropriate for error
1988 # error in a place more appropriate for error
1988 # handling, and will not interrupt the patching
1989 # handling, and will not interrupt the patching
1989 # process.
1990 # process.
1990 pass
1991 pass
1991 else:
1992 else:
1992 store.setfile(path, data, mode)
1993 store.setfile(path, data, mode)
1993 else:
1994 else:
1994 raise error.Abort(_('unsupported parser state: %s') % state)
1995 raise error.Abort(_('unsupported parser state: %s') % state)
1995
1996
1996 if current_file:
1997 if current_file:
1997 rejects += current_file.close()
1998 rejects += current_file.close()
1998
1999
1999 if rejects:
2000 if rejects:
2000 return -1
2001 return -1
2001 return err
2002 return err
2002
2003
2003 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2004 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2004 similarity):
2005 similarity):
2005 """use <patcher> to apply <patchname> to the working directory.
2006 """use <patcher> to apply <patchname> to the working directory.
2006 returns whether patch was applied with fuzz factor."""
2007 returns whether patch was applied with fuzz factor."""
2007
2008
2008 fuzz = False
2009 fuzz = False
2009 args = []
2010 args = []
2010 cwd = repo.root
2011 cwd = repo.root
2011 if cwd:
2012 if cwd:
2012 args.append('-d %s' % util.shellquote(cwd))
2013 args.append('-d %s' % util.shellquote(cwd))
2013 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2014 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2014 util.shellquote(patchname)))
2015 util.shellquote(patchname)))
2015 try:
2016 try:
2016 for line in util.iterfile(fp):
2017 for line in util.iterfile(fp):
2017 line = line.rstrip()
2018 line = line.rstrip()
2018 ui.note(line + '\n')
2019 ui.note(line + '\n')
2019 if line.startswith('patching file '):
2020 if line.startswith('patching file '):
2020 pf = util.parsepatchoutput(line)
2021 pf = util.parsepatchoutput(line)
2021 printed_file = False
2022 printed_file = False
2022 files.add(pf)
2023 files.add(pf)
2023 elif line.find('with fuzz') >= 0:
2024 elif line.find('with fuzz') >= 0:
2024 fuzz = True
2025 fuzz = True
2025 if not printed_file:
2026 if not printed_file:
2026 ui.warn(pf + '\n')
2027 ui.warn(pf + '\n')
2027 printed_file = True
2028 printed_file = True
2028 ui.warn(line + '\n')
2029 ui.warn(line + '\n')
2029 elif line.find('saving rejects to file') >= 0:
2030 elif line.find('saving rejects to file') >= 0:
2030 ui.warn(line + '\n')
2031 ui.warn(line + '\n')
2031 elif line.find('FAILED') >= 0:
2032 elif line.find('FAILED') >= 0:
2032 if not printed_file:
2033 if not printed_file:
2033 ui.warn(pf + '\n')
2034 ui.warn(pf + '\n')
2034 printed_file = True
2035 printed_file = True
2035 ui.warn(line + '\n')
2036 ui.warn(line + '\n')
2036 finally:
2037 finally:
2037 if files:
2038 if files:
2038 scmutil.marktouched(repo, files, similarity)
2039 scmutil.marktouched(repo, files, similarity)
2039 code = fp.close()
2040 code = fp.close()
2040 if code:
2041 if code:
2041 raise PatchError(_("patch command failed: %s") %
2042 raise PatchError(_("patch command failed: %s") %
2042 util.explainexit(code)[0])
2043 util.explainexit(code)[0])
2043 return fuzz
2044 return fuzz
2044
2045
2045 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2046 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2046 eolmode='strict'):
2047 eolmode='strict'):
2047 if files is None:
2048 if files is None:
2048 files = set()
2049 files = set()
2049 if eolmode is None:
2050 if eolmode is None:
2050 eolmode = ui.config('patch', 'eol', 'strict')
2051 eolmode = ui.config('patch', 'eol', 'strict')
2051 if eolmode.lower() not in eolmodes:
2052 if eolmode.lower() not in eolmodes:
2052 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2053 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2053 eolmode = eolmode.lower()
2054 eolmode = eolmode.lower()
2054
2055
2055 store = filestore()
2056 store = filestore()
2056 try:
2057 try:
2057 fp = open(patchobj, 'rb')
2058 fp = open(patchobj, 'rb')
2058 except TypeError:
2059 except TypeError:
2059 fp = patchobj
2060 fp = patchobj
2060 try:
2061 try:
2061 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2062 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2062 eolmode=eolmode)
2063 eolmode=eolmode)
2063 finally:
2064 finally:
2064 if fp != patchobj:
2065 if fp != patchobj:
2065 fp.close()
2066 fp.close()
2066 files.update(backend.close())
2067 files.update(backend.close())
2067 store.close()
2068 store.close()
2068 if ret < 0:
2069 if ret < 0:
2069 raise PatchError(_('patch failed to apply'))
2070 raise PatchError(_('patch failed to apply'))
2070 return ret > 0
2071 return ret > 0
2071
2072
2072 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2073 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2073 eolmode='strict', similarity=0):
2074 eolmode='strict', similarity=0):
2074 """use builtin patch to apply <patchobj> to the working directory.
2075 """use builtin patch to apply <patchobj> to the working directory.
2075 returns whether patch was applied with fuzz factor."""
2076 returns whether patch was applied with fuzz factor."""
2076 backend = workingbackend(ui, repo, similarity)
2077 backend = workingbackend(ui, repo, similarity)
2077 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2078 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2078
2079
2079 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2080 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2080 eolmode='strict'):
2081 eolmode='strict'):
2081 backend = repobackend(ui, repo, ctx, store)
2082 backend = repobackend(ui, repo, ctx, store)
2082 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2083 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2083
2084
2084 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2085 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2085 similarity=0):
2086 similarity=0):
2086 """Apply <patchname> to the working directory.
2087 """Apply <patchname> to the working directory.
2087
2088
2088 'eolmode' specifies how end of lines should be handled. It can be:
2089 'eolmode' specifies how end of lines should be handled. It can be:
2089 - 'strict': inputs are read in binary mode, EOLs are preserved
2090 - 'strict': inputs are read in binary mode, EOLs are preserved
2090 - 'crlf': EOLs are ignored when patching and reset to CRLF
2091 - 'crlf': EOLs are ignored when patching and reset to CRLF
2091 - 'lf': EOLs are ignored when patching and reset to LF
2092 - 'lf': EOLs are ignored when patching and reset to LF
2092 - None: get it from user settings, default to 'strict'
2093 - None: get it from user settings, default to 'strict'
2093 'eolmode' is ignored when using an external patcher program.
2094 'eolmode' is ignored when using an external patcher program.
2094
2095
2095 Returns whether patch was applied with fuzz factor.
2096 Returns whether patch was applied with fuzz factor.
2096 """
2097 """
2097 patcher = ui.config('ui', 'patch')
2098 patcher = ui.config('ui', 'patch')
2098 if files is None:
2099 if files is None:
2099 files = set()
2100 files = set()
2100 if patcher:
2101 if patcher:
2101 return _externalpatch(ui, repo, patcher, patchname, strip,
2102 return _externalpatch(ui, repo, patcher, patchname, strip,
2102 files, similarity)
2103 files, similarity)
2103 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2104 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2104 similarity)
2105 similarity)
2105
2106
2106 def changedfiles(ui, repo, patchpath, strip=1):
2107 def changedfiles(ui, repo, patchpath, strip=1):
2107 backend = fsbackend(ui, repo.root)
2108 backend = fsbackend(ui, repo.root)
2108 with open(patchpath, 'rb') as fp:
2109 with open(patchpath, 'rb') as fp:
2109 changed = set()
2110 changed = set()
2110 for state, values in iterhunks(fp):
2111 for state, values in iterhunks(fp):
2111 if state == 'file':
2112 if state == 'file':
2112 afile, bfile, first_hunk, gp = values
2113 afile, bfile, first_hunk, gp = values
2113 if gp:
2114 if gp:
2114 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2115 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2115 if gp.oldpath:
2116 if gp.oldpath:
2116 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2117 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2117 else:
2118 else:
2118 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2119 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2119 '')
2120 '')
2120 changed.add(gp.path)
2121 changed.add(gp.path)
2121 if gp.op == 'RENAME':
2122 if gp.op == 'RENAME':
2122 changed.add(gp.oldpath)
2123 changed.add(gp.oldpath)
2123 elif state not in ('hunk', 'git'):
2124 elif state not in ('hunk', 'git'):
2124 raise error.Abort(_('unsupported parser state: %s') % state)
2125 raise error.Abort(_('unsupported parser state: %s') % state)
2125 return changed
2126 return changed
2126
2127
2127 class GitDiffRequired(Exception):
2128 class GitDiffRequired(Exception):
2128 pass
2129 pass
2129
2130
2130 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2131 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2131 '''return diffopts with all features supported and parsed'''
2132 '''return diffopts with all features supported and parsed'''
2132 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2133 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2133 git=True, whitespace=True, formatchanging=True)
2134 git=True, whitespace=True, formatchanging=True)
2134
2135
2135 diffopts = diffallopts
2136 diffopts = diffallopts
2136
2137
2137 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2138 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2138 whitespace=False, formatchanging=False):
2139 whitespace=False, formatchanging=False):
2139 '''return diffopts with only opted-in features parsed
2140 '''return diffopts with only opted-in features parsed
2140
2141
2141 Features:
2142 Features:
2142 - git: git-style diffs
2143 - git: git-style diffs
2143 - whitespace: whitespace options like ignoreblanklines and ignorews
2144 - whitespace: whitespace options like ignoreblanklines and ignorews
2144 - formatchanging: options that will likely break or cause correctness issues
2145 - formatchanging: options that will likely break or cause correctness issues
2145 with most diff parsers
2146 with most diff parsers
2146 '''
2147 '''
2147 def get(key, name=None, getter=ui.configbool, forceplain=None):
2148 def get(key, name=None, getter=ui.configbool, forceplain=None):
2148 if opts:
2149 if opts:
2149 v = opts.get(key)
2150 v = opts.get(key)
2150 # diffopts flags are either None-default (which is passed
2151 # diffopts flags are either None-default (which is passed
2151 # through unchanged, so we can identify unset values), or
2152 # through unchanged, so we can identify unset values), or
2152 # some other falsey default (eg --unified, which defaults
2153 # some other falsey default (eg --unified, which defaults
2153 # to an empty string). We only want to override the config
2154 # to an empty string). We only want to override the config
2154 # entries from hgrc with command line values if they
2155 # entries from hgrc with command line values if they
2155 # appear to have been set, which is any truthy value,
2156 # appear to have been set, which is any truthy value,
2156 # True, or False.
2157 # True, or False.
2157 if v or isinstance(v, bool):
2158 if v or isinstance(v, bool):
2158 return v
2159 return v
2159 if forceplain is not None and ui.plain():
2160 if forceplain is not None and ui.plain():
2160 return forceplain
2161 return forceplain
2161 return getter(section, name or key, None, untrusted=untrusted)
2162 return getter(section, name or key, None, untrusted=untrusted)
2162
2163
2163 # core options, expected to be understood by every diff parser
2164 # core options, expected to be understood by every diff parser
2164 buildopts = {
2165 buildopts = {
2165 'nodates': get('nodates'),
2166 'nodates': get('nodates'),
2166 'showfunc': get('show_function', 'showfunc'),
2167 'showfunc': get('show_function', 'showfunc'),
2167 'context': get('unified', getter=ui.config),
2168 'context': get('unified', getter=ui.config),
2168 }
2169 }
2169
2170
2170 if git:
2171 if git:
2171 buildopts['git'] = get('git')
2172 buildopts['git'] = get('git')
2172
2173
2173 # since this is in the experimental section, we need to call
2174 # since this is in the experimental section, we need to call
2174 # ui.configbool directory
2175 # ui.configbool directory
2175 buildopts['showsimilarity'] = ui.configbool('experimental',
2176 buildopts['showsimilarity'] = ui.configbool('experimental',
2176 'extendedheader.similarity')
2177 'extendedheader.similarity')
2177
2178
2178 # need to inspect the ui object instead of using get() since we want to
2179 # need to inspect the ui object instead of using get() since we want to
2179 # test for an int
2180 # test for an int
2180 hconf = ui.config('experimental', 'extendedheader.index')
2181 hconf = ui.config('experimental', 'extendedheader.index')
2181 if hconf is not None:
2182 if hconf is not None:
2182 hlen = None
2183 hlen = None
2183 try:
2184 try:
2184 # the hash config could be an integer (for length of hash) or a
2185 # the hash config could be an integer (for length of hash) or a
2185 # word (e.g. short, full, none)
2186 # word (e.g. short, full, none)
2186 hlen = int(hconf)
2187 hlen = int(hconf)
2187 if hlen < 0 or hlen > 40:
2188 if hlen < 0 or hlen > 40:
2188 msg = _("invalid length for extendedheader.index: '%d'\n")
2189 msg = _("invalid length for extendedheader.index: '%d'\n")
2189 ui.warn(msg % hlen)
2190 ui.warn(msg % hlen)
2190 except ValueError:
2191 except ValueError:
2191 # default value
2192 # default value
2192 if hconf == 'short' or hconf == '':
2193 if hconf == 'short' or hconf == '':
2193 hlen = 12
2194 hlen = 12
2194 elif hconf == 'full':
2195 elif hconf == 'full':
2195 hlen = 40
2196 hlen = 40
2196 elif hconf != 'none':
2197 elif hconf != 'none':
2197 msg = _("invalid value for extendedheader.index: '%s'\n")
2198 msg = _("invalid value for extendedheader.index: '%s'\n")
2198 ui.warn(msg % hconf)
2199 ui.warn(msg % hconf)
2199 finally:
2200 finally:
2200 buildopts['index'] = hlen
2201 buildopts['index'] = hlen
2201
2202
2202 if whitespace:
2203 if whitespace:
2203 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2204 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2204 buildopts['ignorewsamount'] = get('ignore_space_change',
2205 buildopts['ignorewsamount'] = get('ignore_space_change',
2205 'ignorewsamount')
2206 'ignorewsamount')
2206 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2207 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2207 'ignoreblanklines')
2208 'ignoreblanklines')
2208 if formatchanging:
2209 if formatchanging:
2209 buildopts['text'] = opts and opts.get('text')
2210 buildopts['text'] = opts and opts.get('text')
2210 buildopts['nobinary'] = get('nobinary', forceplain=False)
2211 buildopts['nobinary'] = get('nobinary', forceplain=False)
2211 buildopts['noprefix'] = get('noprefix', forceplain=False)
2212 buildopts['noprefix'] = get('noprefix', forceplain=False)
2212
2213
2213 return mdiff.diffopts(**buildopts)
2214 return mdiff.diffopts(**buildopts)
2214
2215
2215 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2216 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2216 losedatafn=None, prefix='', relroot='', copy=None):
2217 losedatafn=None, prefix='', relroot='', copy=None):
2217 '''yields diff of changes to files between two nodes, or node and
2218 '''yields diff of changes to files between two nodes, or node and
2218 working directory.
2219 working directory.
2219
2220
2220 if node1 is None, use first dirstate parent instead.
2221 if node1 is None, use first dirstate parent instead.
2221 if node2 is None, compare node1 with working directory.
2222 if node2 is None, compare node1 with working directory.
2222
2223
2223 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2224 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2224 every time some change cannot be represented with the current
2225 every time some change cannot be represented with the current
2225 patch format. Return False to upgrade to git patch format, True to
2226 patch format. Return False to upgrade to git patch format, True to
2226 accept the loss or raise an exception to abort the diff. It is
2227 accept the loss or raise an exception to abort the diff. It is
2227 called with the name of current file being diffed as 'fn'. If set
2228 called with the name of current file being diffed as 'fn'. If set
2228 to None, patches will always be upgraded to git format when
2229 to None, patches will always be upgraded to git format when
2229 necessary.
2230 necessary.
2230
2231
2231 prefix is a filename prefix that is prepended to all filenames on
2232 prefix is a filename prefix that is prepended to all filenames on
2232 display (used for subrepos).
2233 display (used for subrepos).
2233
2234
2234 relroot, if not empty, must be normalized with a trailing /. Any match
2235 relroot, if not empty, must be normalized with a trailing /. Any match
2235 patterns that fall outside it will be ignored.
2236 patterns that fall outside it will be ignored.
2236
2237
2237 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2238 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2238 information.'''
2239 information.'''
2239
2240
2240 if opts is None:
2241 if opts is None:
2241 opts = mdiff.defaultopts
2242 opts = mdiff.defaultopts
2242
2243
2243 if not node1 and not node2:
2244 if not node1 and not node2:
2244 node1 = repo.dirstate.p1()
2245 node1 = repo.dirstate.p1()
2245
2246
2246 def lrugetfilectx():
2247 def lrugetfilectx():
2247 cache = {}
2248 cache = {}
2248 order = collections.deque()
2249 order = collections.deque()
2249 def getfilectx(f, ctx):
2250 def getfilectx(f, ctx):
2250 fctx = ctx.filectx(f, filelog=cache.get(f))
2251 fctx = ctx.filectx(f, filelog=cache.get(f))
2251 if f not in cache:
2252 if f not in cache:
2252 if len(cache) > 20:
2253 if len(cache) > 20:
2253 del cache[order.popleft()]
2254 del cache[order.popleft()]
2254 cache[f] = fctx.filelog()
2255 cache[f] = fctx.filelog()
2255 else:
2256 else:
2256 order.remove(f)
2257 order.remove(f)
2257 order.append(f)
2258 order.append(f)
2258 return fctx
2259 return fctx
2259 return getfilectx
2260 return getfilectx
2260 getfilectx = lrugetfilectx()
2261 getfilectx = lrugetfilectx()
2261
2262
2262 ctx1 = repo[node1]
2263 ctx1 = repo[node1]
2263 ctx2 = repo[node2]
2264 ctx2 = repo[node2]
2264
2265
2265 relfiltered = False
2266 relfiltered = False
2266 if relroot != '' and match.always():
2267 if relroot != '' and match.always():
2267 # as a special case, create a new matcher with just the relroot
2268 # as a special case, create a new matcher with just the relroot
2268 pats = [relroot]
2269 pats = [relroot]
2269 match = scmutil.match(ctx2, pats, default='path')
2270 match = scmutil.match(ctx2, pats, default='path')
2270 relfiltered = True
2271 relfiltered = True
2271
2272
2272 if not changes:
2273 if not changes:
2273 changes = repo.status(ctx1, ctx2, match=match)
2274 changes = repo.status(ctx1, ctx2, match=match)
2274 modified, added, removed = changes[:3]
2275 modified, added, removed = changes[:3]
2275
2276
2276 if not modified and not added and not removed:
2277 if not modified and not added and not removed:
2277 return []
2278 return []
2278
2279
2279 if repo.ui.debugflag:
2280 if repo.ui.debugflag:
2280 hexfunc = hex
2281 hexfunc = hex
2281 else:
2282 else:
2282 hexfunc = short
2283 hexfunc = short
2283 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2284 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2284
2285
2285 if copy is None:
2286 if copy is None:
2286 copy = {}
2287 copy = {}
2287 if opts.git or opts.upgrade:
2288 if opts.git or opts.upgrade:
2288 copy = copies.pathcopies(ctx1, ctx2, match=match)
2289 copy = copies.pathcopies(ctx1, ctx2, match=match)
2289
2290
2290 if relroot is not None:
2291 if relroot is not None:
2291 if not relfiltered:
2292 if not relfiltered:
2292 # XXX this would ideally be done in the matcher, but that is
2293 # XXX this would ideally be done in the matcher, but that is
2293 # generally meant to 'or' patterns, not 'and' them. In this case we
2294 # generally meant to 'or' patterns, not 'and' them. In this case we
2294 # need to 'and' all the patterns from the matcher with relroot.
2295 # need to 'and' all the patterns from the matcher with relroot.
2295 def filterrel(l):
2296 def filterrel(l):
2296 return [f for f in l if f.startswith(relroot)]
2297 return [f for f in l if f.startswith(relroot)]
2297 modified = filterrel(modified)
2298 modified = filterrel(modified)
2298 added = filterrel(added)
2299 added = filterrel(added)
2299 removed = filterrel(removed)
2300 removed = filterrel(removed)
2300 relfiltered = True
2301 relfiltered = True
2301 # filter out copies where either side isn't inside the relative root
2302 # filter out copies where either side isn't inside the relative root
2302 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2303 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2303 if dst.startswith(relroot)
2304 if dst.startswith(relroot)
2304 and src.startswith(relroot)))
2305 and src.startswith(relroot)))
2305
2306
2306 modifiedset = set(modified)
2307 modifiedset = set(modified)
2307 addedset = set(added)
2308 addedset = set(added)
2308 removedset = set(removed)
2309 removedset = set(removed)
2309 for f in modified:
2310 for f in modified:
2310 if f not in ctx1:
2311 if f not in ctx1:
2311 # Fix up added, since merged-in additions appear as
2312 # Fix up added, since merged-in additions appear as
2312 # modifications during merges
2313 # modifications during merges
2313 modifiedset.remove(f)
2314 modifiedset.remove(f)
2314 addedset.add(f)
2315 addedset.add(f)
2315 for f in removed:
2316 for f in removed:
2316 if f not in ctx1:
2317 if f not in ctx1:
2317 # Merged-in additions that are then removed are reported as removed.
2318 # Merged-in additions that are then removed are reported as removed.
2318 # They are not in ctx1, so We don't want to show them in the diff.
2319 # They are not in ctx1, so We don't want to show them in the diff.
2319 removedset.remove(f)
2320 removedset.remove(f)
2320 modified = sorted(modifiedset)
2321 modified = sorted(modifiedset)
2321 added = sorted(addedset)
2322 added = sorted(addedset)
2322 removed = sorted(removedset)
2323 removed = sorted(removedset)
2323 for dst, src in copy.items():
2324 for dst, src in copy.items():
2324 if src not in ctx1:
2325 if src not in ctx1:
2325 # Files merged in during a merge and then copied/renamed are
2326 # Files merged in during a merge and then copied/renamed are
2326 # reported as copies. We want to show them in the diff as additions.
2327 # reported as copies. We want to show them in the diff as additions.
2327 del copy[dst]
2328 del copy[dst]
2328
2329
2329 def difffn(opts, losedata):
2330 def difffn(opts, losedata):
2330 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2331 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2331 copy, getfilectx, opts, losedata, prefix, relroot)
2332 copy, getfilectx, opts, losedata, prefix, relroot)
2332 if opts.upgrade and not opts.git:
2333 if opts.upgrade and not opts.git:
2333 try:
2334 try:
2334 def losedata(fn):
2335 def losedata(fn):
2335 if not losedatafn or not losedatafn(fn=fn):
2336 if not losedatafn or not losedatafn(fn=fn):
2336 raise GitDiffRequired
2337 raise GitDiffRequired
2337 # Buffer the whole output until we are sure it can be generated
2338 # Buffer the whole output until we are sure it can be generated
2338 return list(difffn(opts.copy(git=False), losedata))
2339 return list(difffn(opts.copy(git=False), losedata))
2339 except GitDiffRequired:
2340 except GitDiffRequired:
2340 return difffn(opts.copy(git=True), None)
2341 return difffn(opts.copy(git=True), None)
2341 else:
2342 else:
2342 return difffn(opts, None)
2343 return difffn(opts, None)
2343
2344
2344 def difflabel(func, *args, **kw):
2345 def difflabel(func, *args, **kw):
2345 '''yields 2-tuples of (output, label) based on the output of func()'''
2346 '''yields 2-tuples of (output, label) based on the output of func()'''
2346 headprefixes = [('diff', 'diff.diffline'),
2347 headprefixes = [('diff', 'diff.diffline'),
2347 ('copy', 'diff.extended'),
2348 ('copy', 'diff.extended'),
2348 ('rename', 'diff.extended'),
2349 ('rename', 'diff.extended'),
2349 ('old', 'diff.extended'),
2350 ('old', 'diff.extended'),
2350 ('new', 'diff.extended'),
2351 ('new', 'diff.extended'),
2351 ('deleted', 'diff.extended'),
2352 ('deleted', 'diff.extended'),
2352 ('index', 'diff.extended'),
2353 ('index', 'diff.extended'),
2353 ('similarity', 'diff.extended'),
2354 ('similarity', 'diff.extended'),
2354 ('---', 'diff.file_a'),
2355 ('---', 'diff.file_a'),
2355 ('+++', 'diff.file_b')]
2356 ('+++', 'diff.file_b')]
2356 textprefixes = [('@', 'diff.hunk'),
2357 textprefixes = [('@', 'diff.hunk'),
2357 ('-', 'diff.deleted'),
2358 ('-', 'diff.deleted'),
2358 ('+', 'diff.inserted')]
2359 ('+', 'diff.inserted')]
2359 head = False
2360 head = False
2360 for chunk in func(*args, **kw):
2361 for chunk in func(*args, **kw):
2361 lines = chunk.split('\n')
2362 lines = chunk.split('\n')
2362 for i, line in enumerate(lines):
2363 for i, line in enumerate(lines):
2363 if i != 0:
2364 if i != 0:
2364 yield ('\n', '')
2365 yield ('\n', '')
2365 if head:
2366 if head:
2366 if line.startswith('@'):
2367 if line.startswith('@'):
2367 head = False
2368 head = False
2368 else:
2369 else:
2369 if line and line[0] not in ' +-@\\':
2370 if line and line[0] not in ' +-@\\':
2370 head = True
2371 head = True
2371 stripline = line
2372 stripline = line
2372 diffline = False
2373 diffline = False
2373 if not head and line and line[0] in '+-':
2374 if not head and line and line[0] in '+-':
2374 # highlight tabs and trailing whitespace, but only in
2375 # highlight tabs and trailing whitespace, but only in
2375 # changed lines
2376 # changed lines
2376 stripline = line.rstrip()
2377 stripline = line.rstrip()
2377 diffline = True
2378 diffline = True
2378
2379
2379 prefixes = textprefixes
2380 prefixes = textprefixes
2380 if head:
2381 if head:
2381 prefixes = headprefixes
2382 prefixes = headprefixes
2382 for prefix, label in prefixes:
2383 for prefix, label in prefixes:
2383 if stripline.startswith(prefix):
2384 if stripline.startswith(prefix):
2384 if diffline:
2385 if diffline:
2385 for token in tabsplitter.findall(stripline):
2386 for token in tabsplitter.findall(stripline):
2386 if '\t' == token[0]:
2387 if '\t' == token[0]:
2387 yield (token, 'diff.tab')
2388 yield (token, 'diff.tab')
2388 else:
2389 else:
2389 yield (token, label)
2390 yield (token, label)
2390 else:
2391 else:
2391 yield (stripline, label)
2392 yield (stripline, label)
2392 break
2393 break
2393 else:
2394 else:
2394 yield (line, '')
2395 yield (line, '')
2395 if line != stripline:
2396 if line != stripline:
2396 yield (line[len(stripline):], 'diff.trailingwhitespace')
2397 yield (line[len(stripline):], 'diff.trailingwhitespace')
2397
2398
2398 def diffui(*args, **kw):
2399 def diffui(*args, **kw):
2399 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2400 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2400 return difflabel(diff, *args, **kw)
2401 return difflabel(diff, *args, **kw)
2401
2402
2402 def _filepairs(modified, added, removed, copy, opts):
2403 def _filepairs(modified, added, removed, copy, opts):
2403 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2404 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2404 before and f2 is the the name after. For added files, f1 will be None,
2405 before and f2 is the the name after. For added files, f1 will be None,
2405 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2406 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2406 or 'rename' (the latter two only if opts.git is set).'''
2407 or 'rename' (the latter two only if opts.git is set).'''
2407 gone = set()
2408 gone = set()
2408
2409
2409 copyto = dict([(v, k) for k, v in copy.items()])
2410 copyto = dict([(v, k) for k, v in copy.items()])
2410
2411
2411 addedset, removedset = set(added), set(removed)
2412 addedset, removedset = set(added), set(removed)
2412
2413
2413 for f in sorted(modified + added + removed):
2414 for f in sorted(modified + added + removed):
2414 copyop = None
2415 copyop = None
2415 f1, f2 = f, f
2416 f1, f2 = f, f
2416 if f in addedset:
2417 if f in addedset:
2417 f1 = None
2418 f1 = None
2418 if f in copy:
2419 if f in copy:
2419 if opts.git:
2420 if opts.git:
2420 f1 = copy[f]
2421 f1 = copy[f]
2421 if f1 in removedset and f1 not in gone:
2422 if f1 in removedset and f1 not in gone:
2422 copyop = 'rename'
2423 copyop = 'rename'
2423 gone.add(f1)
2424 gone.add(f1)
2424 else:
2425 else:
2425 copyop = 'copy'
2426 copyop = 'copy'
2426 elif f in removedset:
2427 elif f in removedset:
2427 f2 = None
2428 f2 = None
2428 if opts.git:
2429 if opts.git:
2429 # have we already reported a copy above?
2430 # have we already reported a copy above?
2430 if (f in copyto and copyto[f] in addedset
2431 if (f in copyto and copyto[f] in addedset
2431 and copy[copyto[f]] == f):
2432 and copy[copyto[f]] == f):
2432 continue
2433 continue
2433 yield f1, f2, copyop
2434 yield f1, f2, copyop
2434
2435
2435 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2436 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2436 copy, getfilectx, opts, losedatafn, prefix, relroot):
2437 copy, getfilectx, opts, losedatafn, prefix, relroot):
2437 '''given input data, generate a diff and yield it in blocks
2438 '''given input data, generate a diff and yield it in blocks
2438
2439
2439 If generating a diff would lose data like flags or binary data and
2440 If generating a diff would lose data like flags or binary data and
2440 losedatafn is not None, it will be called.
2441 losedatafn is not None, it will be called.
2441
2442
2442 relroot is removed and prefix is added to every path in the diff output.
2443 relroot is removed and prefix is added to every path in the diff output.
2443
2444
2444 If relroot is not empty, this function expects every path in modified,
2445 If relroot is not empty, this function expects every path in modified,
2445 added, removed and copy to start with it.'''
2446 added, removed and copy to start with it.'''
2446
2447
2447 def gitindex(text):
2448 def gitindex(text):
2448 if not text:
2449 if not text:
2449 text = ""
2450 text = ""
2450 l = len(text)
2451 l = len(text)
2451 s = hashlib.sha1('blob %d\0' % l)
2452 s = hashlib.sha1('blob %d\0' % l)
2452 s.update(text)
2453 s.update(text)
2453 return s.hexdigest()
2454 return s.hexdigest()
2454
2455
2455 if opts.noprefix:
2456 if opts.noprefix:
2456 aprefix = bprefix = ''
2457 aprefix = bprefix = ''
2457 else:
2458 else:
2458 aprefix = 'a/'
2459 aprefix = 'a/'
2459 bprefix = 'b/'
2460 bprefix = 'b/'
2460
2461
2461 def diffline(f, revs):
2462 def diffline(f, revs):
2462 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2463 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2463 return 'diff %s %s' % (revinfo, f)
2464 return 'diff %s %s' % (revinfo, f)
2464
2465
2465 date1 = util.datestr(ctx1.date())
2466 date1 = util.datestr(ctx1.date())
2466 date2 = util.datestr(ctx2.date())
2467 date2 = util.datestr(ctx2.date())
2467
2468
2468 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2469 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2469
2470
2470 if relroot != '' and (repo.ui.configbool('devel', 'all')
2471 if relroot != '' and (repo.ui.configbool('devel', 'all')
2471 or repo.ui.configbool('devel', 'check-relroot')):
2472 or repo.ui.configbool('devel', 'check-relroot')):
2472 for f in modified + added + removed + copy.keys() + copy.values():
2473 for f in modified + added + removed + copy.keys() + copy.values():
2473 if f is not None and not f.startswith(relroot):
2474 if f is not None and not f.startswith(relroot):
2474 raise AssertionError(
2475 raise AssertionError(
2475 "file %s doesn't start with relroot %s" % (f, relroot))
2476 "file %s doesn't start with relroot %s" % (f, relroot))
2476
2477
2477 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2478 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2478 content1 = None
2479 content1 = None
2479 content2 = None
2480 content2 = None
2480 flag1 = None
2481 flag1 = None
2481 flag2 = None
2482 flag2 = None
2482 if f1:
2483 if f1:
2483 content1 = getfilectx(f1, ctx1).data()
2484 content1 = getfilectx(f1, ctx1).data()
2484 if opts.git or losedatafn:
2485 if opts.git or losedatafn:
2485 flag1 = ctx1.flags(f1)
2486 flag1 = ctx1.flags(f1)
2486 if f2:
2487 if f2:
2487 content2 = getfilectx(f2, ctx2).data()
2488 content2 = getfilectx(f2, ctx2).data()
2488 if opts.git or losedatafn:
2489 if opts.git or losedatafn:
2489 flag2 = ctx2.flags(f2)
2490 flag2 = ctx2.flags(f2)
2490 binary = False
2491 binary = False
2491 if opts.git or losedatafn:
2492 if opts.git or losedatafn:
2492 binary = util.binary(content1) or util.binary(content2)
2493 binary = util.binary(content1) or util.binary(content2)
2493
2494
2494 if losedatafn and not opts.git:
2495 if losedatafn and not opts.git:
2495 if (binary or
2496 if (binary or
2496 # copy/rename
2497 # copy/rename
2497 f2 in copy or
2498 f2 in copy or
2498 # empty file creation
2499 # empty file creation
2499 (not f1 and not content2) or
2500 (not f1 and not content2) or
2500 # empty file deletion
2501 # empty file deletion
2501 (not content1 and not f2) or
2502 (not content1 and not f2) or
2502 # create with flags
2503 # create with flags
2503 (not f1 and flag2) or
2504 (not f1 and flag2) or
2504 # change flags
2505 # change flags
2505 (f1 and f2 and flag1 != flag2)):
2506 (f1 and f2 and flag1 != flag2)):
2506 losedatafn(f2 or f1)
2507 losedatafn(f2 or f1)
2507
2508
2508 path1 = f1 or f2
2509 path1 = f1 or f2
2509 path2 = f2 or f1
2510 path2 = f2 or f1
2510 path1 = posixpath.join(prefix, path1[len(relroot):])
2511 path1 = posixpath.join(prefix, path1[len(relroot):])
2511 path2 = posixpath.join(prefix, path2[len(relroot):])
2512 path2 = posixpath.join(prefix, path2[len(relroot):])
2512 header = []
2513 header = []
2513 if opts.git:
2514 if opts.git:
2514 header.append('diff --git %s%s %s%s' %
2515 header.append('diff --git %s%s %s%s' %
2515 (aprefix, path1, bprefix, path2))
2516 (aprefix, path1, bprefix, path2))
2516 if not f1: # added
2517 if not f1: # added
2517 header.append('new file mode %s' % gitmode[flag2])
2518 header.append('new file mode %s' % gitmode[flag2])
2518 elif not f2: # removed
2519 elif not f2: # removed
2519 header.append('deleted file mode %s' % gitmode[flag1])
2520 header.append('deleted file mode %s' % gitmode[flag1])
2520 else: # modified/copied/renamed
2521 else: # modified/copied/renamed
2521 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2522 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2522 if mode1 != mode2:
2523 if mode1 != mode2:
2523 header.append('old mode %s' % mode1)
2524 header.append('old mode %s' % mode1)
2524 header.append('new mode %s' % mode2)
2525 header.append('new mode %s' % mode2)
2525 if copyop is not None:
2526 if copyop is not None:
2526 if opts.showsimilarity:
2527 if opts.showsimilarity:
2527 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2528 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2528 header.append('similarity index %d%%' % sim)
2529 header.append('similarity index %d%%' % sim)
2529 header.append('%s from %s' % (copyop, path1))
2530 header.append('%s from %s' % (copyop, path1))
2530 header.append('%s to %s' % (copyop, path2))
2531 header.append('%s to %s' % (copyop, path2))
2531 elif revs and not repo.ui.quiet:
2532 elif revs and not repo.ui.quiet:
2532 header.append(diffline(path1, revs))
2533 header.append(diffline(path1, revs))
2533
2534
2534 if binary and opts.git and not opts.nobinary:
2535 if binary and opts.git and not opts.nobinary:
2535 text = mdiff.b85diff(content1, content2)
2536 text = mdiff.b85diff(content1, content2)
2536 if text:
2537 if text:
2537 header.append('index %s..%s' %
2538 header.append('index %s..%s' %
2538 (gitindex(content1), gitindex(content2)))
2539 (gitindex(content1), gitindex(content2)))
2539 else:
2540 else:
2540 if opts.git and opts.index > 0:
2541 if opts.git and opts.index > 0:
2541 flag = flag1
2542 flag = flag1
2542 if flag is None:
2543 if flag is None:
2543 flag = flag2
2544 flag = flag2
2544 header.append('index %s..%s %s' %
2545 header.append('index %s..%s %s' %
2545 (gitindex(content1)[0:opts.index],
2546 (gitindex(content1)[0:opts.index],
2546 gitindex(content2)[0:opts.index],
2547 gitindex(content2)[0:opts.index],
2547 gitmode[flag]))
2548 gitmode[flag]))
2548
2549
2549 text = mdiff.unidiff(content1, date1,
2550 text = mdiff.unidiff(content1, date1,
2550 content2, date2,
2551 content2, date2,
2551 path1, path2, opts=opts)
2552 path1, path2, opts=opts)
2552 if header and (text or len(header) > 1):
2553 if header and (text or len(header) > 1):
2553 yield '\n'.join(header) + '\n'
2554 yield '\n'.join(header) + '\n'
2554 if text:
2555 if text:
2555 yield text
2556 yield text
2556
2557
2557 def diffstatsum(stats):
2558 def diffstatsum(stats):
2558 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2559 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2559 for f, a, r, b in stats:
2560 for f, a, r, b in stats:
2560 maxfile = max(maxfile, encoding.colwidth(f))
2561 maxfile = max(maxfile, encoding.colwidth(f))
2561 maxtotal = max(maxtotal, a + r)
2562 maxtotal = max(maxtotal, a + r)
2562 addtotal += a
2563 addtotal += a
2563 removetotal += r
2564 removetotal += r
2564 binary = binary or b
2565 binary = binary or b
2565
2566
2566 return maxfile, maxtotal, addtotal, removetotal, binary
2567 return maxfile, maxtotal, addtotal, removetotal, binary
2567
2568
2568 def diffstatdata(lines):
2569 def diffstatdata(lines):
2569 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2570 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2570
2571
2571 results = []
2572 results = []
2572 filename, adds, removes, isbinary = None, 0, 0, False
2573 filename, adds, removes, isbinary = None, 0, 0, False
2573
2574
2574 def addresult():
2575 def addresult():
2575 if filename:
2576 if filename:
2576 results.append((filename, adds, removes, isbinary))
2577 results.append((filename, adds, removes, isbinary))
2577
2578
2578 for line in lines:
2579 for line in lines:
2579 if line.startswith('diff'):
2580 if line.startswith('diff'):
2580 addresult()
2581 addresult()
2581 # set numbers to 0 anyway when starting new file
2582 # set numbers to 0 anyway when starting new file
2582 adds, removes, isbinary = 0, 0, False
2583 adds, removes, isbinary = 0, 0, False
2583 if line.startswith('diff --git a/'):
2584 if line.startswith('diff --git a/'):
2584 filename = gitre.search(line).group(2)
2585 filename = gitre.search(line).group(2)
2585 elif line.startswith('diff -r'):
2586 elif line.startswith('diff -r'):
2586 # format: "diff -r ... -r ... filename"
2587 # format: "diff -r ... -r ... filename"
2587 filename = diffre.search(line).group(1)
2588 filename = diffre.search(line).group(1)
2588 elif line.startswith('+') and not line.startswith('+++ '):
2589 elif line.startswith('+') and not line.startswith('+++ '):
2589 adds += 1
2590 adds += 1
2590 elif line.startswith('-') and not line.startswith('--- '):
2591 elif line.startswith('-') and not line.startswith('--- '):
2591 removes += 1
2592 removes += 1
2592 elif (line.startswith('GIT binary patch') or
2593 elif (line.startswith('GIT binary patch') or
2593 line.startswith('Binary file')):
2594 line.startswith('Binary file')):
2594 isbinary = True
2595 isbinary = True
2595 addresult()
2596 addresult()
2596 return results
2597 return results
2597
2598
2598 def diffstat(lines, width=80):
2599 def diffstat(lines, width=80):
2599 output = []
2600 output = []
2600 stats = diffstatdata(lines)
2601 stats = diffstatdata(lines)
2601 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2602 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2602
2603
2603 countwidth = len(str(maxtotal))
2604 countwidth = len(str(maxtotal))
2604 if hasbinary and countwidth < 3:
2605 if hasbinary and countwidth < 3:
2605 countwidth = 3
2606 countwidth = 3
2606 graphwidth = width - countwidth - maxname - 6
2607 graphwidth = width - countwidth - maxname - 6
2607 if graphwidth < 10:
2608 if graphwidth < 10:
2608 graphwidth = 10
2609 graphwidth = 10
2609
2610
2610 def scale(i):
2611 def scale(i):
2611 if maxtotal <= graphwidth:
2612 if maxtotal <= graphwidth:
2612 return i
2613 return i
2613 # If diffstat runs out of room it doesn't print anything,
2614 # If diffstat runs out of room it doesn't print anything,
2614 # which isn't very useful, so always print at least one + or -
2615 # which isn't very useful, so always print at least one + or -
2615 # if there were at least some changes.
2616 # if there were at least some changes.
2616 return max(i * graphwidth // maxtotal, int(bool(i)))
2617 return max(i * graphwidth // maxtotal, int(bool(i)))
2617
2618
2618 for filename, adds, removes, isbinary in stats:
2619 for filename, adds, removes, isbinary in stats:
2619 if isbinary:
2620 if isbinary:
2620 count = 'Bin'
2621 count = 'Bin'
2621 else:
2622 else:
2622 count = adds + removes
2623 count = adds + removes
2623 pluses = '+' * scale(adds)
2624 pluses = '+' * scale(adds)
2624 minuses = '-' * scale(removes)
2625 minuses = '-' * scale(removes)
2625 output.append(' %s%s | %*s %s%s\n' %
2626 output.append(' %s%s | %*s %s%s\n' %
2626 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2627 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2627 countwidth, count, pluses, minuses))
2628 countwidth, count, pluses, minuses))
2628
2629
2629 if stats:
2630 if stats:
2630 output.append(_(' %d files changed, %d insertions(+), '
2631 output.append(_(' %d files changed, %d insertions(+), '
2631 '%d deletions(-)\n')
2632 '%d deletions(-)\n')
2632 % (len(stats), totaladds, totalremoves))
2633 % (len(stats), totaladds, totalremoves))
2633
2634
2634 return ''.join(output)
2635 return ''.join(output)
2635
2636
2636 def diffstatui(*args, **kw):
2637 def diffstatui(*args, **kw):
2637 '''like diffstat(), but yields 2-tuples of (output, label) for
2638 '''like diffstat(), but yields 2-tuples of (output, label) for
2638 ui.write()
2639 ui.write()
2639 '''
2640 '''
2640
2641
2641 for line in diffstat(*args, **kw).splitlines():
2642 for line in diffstat(*args, **kw).splitlines():
2642 if line and line[-1] in '+-':
2643 if line and line[-1] in '+-':
2643 name, graph = line.rsplit(' ', 1)
2644 name, graph = line.rsplit(' ', 1)
2644 yield (name + ' ', '')
2645 yield (name + ' ', '')
2645 m = re.search(r'\++', graph)
2646 m = re.search(r'\++', graph)
2646 if m:
2647 if m:
2647 yield (m.group(0), 'diffstat.inserted')
2648 yield (m.group(0), 'diffstat.inserted')
2648 m = re.search(r'-+', graph)
2649 m = re.search(r'-+', graph)
2649 if m:
2650 if m:
2650 yield (m.group(0), 'diffstat.deleted')
2651 yield (m.group(0), 'diffstat.deleted')
2651 else:
2652 else:
2652 yield (line, '')
2653 yield (line, '')
2653 yield ('\n', '')
2654 yield ('\n', '')
@@ -1,1050 +1,1051 b''
1 # wireproto.py - generic wire protocol support functions
1 # wireproto.py - generic wire protocol support functions
2 #
2 #
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11 import itertools
11 import itertools
12 import os
12 import os
13 import tempfile
13 import tempfile
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 )
19 )
20
20
21 from . import (
21 from . import (
22 bundle2,
22 bundle2,
23 changegroup as changegroupmod,
23 changegroup as changegroupmod,
24 encoding,
24 encoding,
25 error,
25 error,
26 exchange,
26 exchange,
27 peer,
27 peer,
28 pushkey as pushkeymod,
28 pushkey as pushkeymod,
29 pycompat,
29 streamclone,
30 streamclone,
30 util,
31 util,
31 )
32 )
32
33
33 urlerr = util.urlerr
34 urlerr = util.urlerr
34 urlreq = util.urlreq
35 urlreq = util.urlreq
35
36
36 bundle2requiredmain = _('incompatible Mercurial client; bundle2 required')
37 bundle2requiredmain = _('incompatible Mercurial client; bundle2 required')
37 bundle2requiredhint = _('see https://www.mercurial-scm.org/wiki/'
38 bundle2requiredhint = _('see https://www.mercurial-scm.org/wiki/'
38 'IncompatibleClient')
39 'IncompatibleClient')
39 bundle2required = '%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
40 bundle2required = '%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
40
41
41 class abstractserverproto(object):
42 class abstractserverproto(object):
42 """abstract class that summarizes the protocol API
43 """abstract class that summarizes the protocol API
43
44
44 Used as reference and documentation.
45 Used as reference and documentation.
45 """
46 """
46
47
47 def getargs(self, args):
48 def getargs(self, args):
48 """return the value for arguments in <args>
49 """return the value for arguments in <args>
49
50
50 returns a list of values (same order as <args>)"""
51 returns a list of values (same order as <args>)"""
51 raise NotImplementedError()
52 raise NotImplementedError()
52
53
53 def getfile(self, fp):
54 def getfile(self, fp):
54 """write the whole content of a file into a file like object
55 """write the whole content of a file into a file like object
55
56
56 The file is in the form::
57 The file is in the form::
57
58
58 (<chunk-size>\n<chunk>)+0\n
59 (<chunk-size>\n<chunk>)+0\n
59
60
60 chunk size is the ascii version of the int.
61 chunk size is the ascii version of the int.
61 """
62 """
62 raise NotImplementedError()
63 raise NotImplementedError()
63
64
64 def redirect(self):
65 def redirect(self):
65 """may setup interception for stdout and stderr
66 """may setup interception for stdout and stderr
66
67
67 See also the `restore` method."""
68 See also the `restore` method."""
68 raise NotImplementedError()
69 raise NotImplementedError()
69
70
70 # If the `redirect` function does install interception, the `restore`
71 # If the `redirect` function does install interception, the `restore`
71 # function MUST be defined. If interception is not used, this function
72 # function MUST be defined. If interception is not used, this function
72 # MUST NOT be defined.
73 # MUST NOT be defined.
73 #
74 #
74 # left commented here on purpose
75 # left commented here on purpose
75 #
76 #
76 #def restore(self):
77 #def restore(self):
77 # """reinstall previous stdout and stderr and return intercepted stdout
78 # """reinstall previous stdout and stderr and return intercepted stdout
78 # """
79 # """
79 # raise NotImplementedError()
80 # raise NotImplementedError()
80
81
81 class remotebatch(peer.batcher):
82 class remotebatch(peer.batcher):
82 '''batches the queued calls; uses as few roundtrips as possible'''
83 '''batches the queued calls; uses as few roundtrips as possible'''
83 def __init__(self, remote):
84 def __init__(self, remote):
84 '''remote must support _submitbatch(encbatch) and
85 '''remote must support _submitbatch(encbatch) and
85 _submitone(op, encargs)'''
86 _submitone(op, encargs)'''
86 peer.batcher.__init__(self)
87 peer.batcher.__init__(self)
87 self.remote = remote
88 self.remote = remote
88 def submit(self):
89 def submit(self):
89 req, rsp = [], []
90 req, rsp = [], []
90 for name, args, opts, resref in self.calls:
91 for name, args, opts, resref in self.calls:
91 mtd = getattr(self.remote, name)
92 mtd = getattr(self.remote, name)
92 batchablefn = getattr(mtd, 'batchable', None)
93 batchablefn = getattr(mtd, 'batchable', None)
93 if batchablefn is not None:
94 if batchablefn is not None:
94 batchable = batchablefn(mtd.im_self, *args, **opts)
95 batchable = batchablefn(mtd.im_self, *args, **opts)
95 encargsorres, encresref = next(batchable)
96 encargsorres, encresref = next(batchable)
96 if encresref:
97 if encresref:
97 req.append((name, encargsorres,))
98 req.append((name, encargsorres,))
98 rsp.append((batchable, encresref, resref,))
99 rsp.append((batchable, encresref, resref,))
99 else:
100 else:
100 resref.set(encargsorres)
101 resref.set(encargsorres)
101 else:
102 else:
102 if req:
103 if req:
103 self._submitreq(req, rsp)
104 self._submitreq(req, rsp)
104 req, rsp = [], []
105 req, rsp = [], []
105 resref.set(mtd(*args, **opts))
106 resref.set(mtd(*args, **opts))
106 if req:
107 if req:
107 self._submitreq(req, rsp)
108 self._submitreq(req, rsp)
108 def _submitreq(self, req, rsp):
109 def _submitreq(self, req, rsp):
109 encresults = self.remote._submitbatch(req)
110 encresults = self.remote._submitbatch(req)
110 for encres, r in zip(encresults, rsp):
111 for encres, r in zip(encresults, rsp):
111 batchable, encresref, resref = r
112 batchable, encresref, resref = r
112 encresref.set(encres)
113 encresref.set(encres)
113 resref.set(next(batchable))
114 resref.set(next(batchable))
114
115
115 class remoteiterbatcher(peer.iterbatcher):
116 class remoteiterbatcher(peer.iterbatcher):
116 def __init__(self, remote):
117 def __init__(self, remote):
117 super(remoteiterbatcher, self).__init__()
118 super(remoteiterbatcher, self).__init__()
118 self._remote = remote
119 self._remote = remote
119
120
120 def __getattr__(self, name):
121 def __getattr__(self, name):
121 if not getattr(self._remote, name, False):
122 if not getattr(self._remote, name, False):
122 raise AttributeError(
123 raise AttributeError(
123 'Attempted to iterbatch non-batchable call to %r' % name)
124 'Attempted to iterbatch non-batchable call to %r' % name)
124 return super(remoteiterbatcher, self).__getattr__(name)
125 return super(remoteiterbatcher, self).__getattr__(name)
125
126
126 def submit(self):
127 def submit(self):
127 """Break the batch request into many patch calls and pipeline them.
128 """Break the batch request into many patch calls and pipeline them.
128
129
129 This is mostly valuable over http where request sizes can be
130 This is mostly valuable over http where request sizes can be
130 limited, but can be used in other places as well.
131 limited, but can be used in other places as well.
131 """
132 """
132 req, rsp = [], []
133 req, rsp = [], []
133 for name, args, opts, resref in self.calls:
134 for name, args, opts, resref in self.calls:
134 mtd = getattr(self._remote, name)
135 mtd = getattr(self._remote, name)
135 batchable = mtd.batchable(mtd.im_self, *args, **opts)
136 batchable = mtd.batchable(mtd.im_self, *args, **opts)
136 encargsorres, encresref = next(batchable)
137 encargsorres, encresref = next(batchable)
137 assert encresref
138 assert encresref
138 req.append((name, encargsorres))
139 req.append((name, encargsorres))
139 rsp.append((batchable, encresref))
140 rsp.append((batchable, encresref))
140 if req:
141 if req:
141 self._resultiter = self._remote._submitbatch(req)
142 self._resultiter = self._remote._submitbatch(req)
142 self._rsp = rsp
143 self._rsp = rsp
143
144
144 def results(self):
145 def results(self):
145 for (batchable, encresref), encres in itertools.izip(
146 for (batchable, encresref), encres in itertools.izip(
146 self._rsp, self._resultiter):
147 self._rsp, self._resultiter):
147 encresref.set(encres)
148 encresref.set(encres)
148 yield next(batchable)
149 yield next(batchable)
149
150
150 # Forward a couple of names from peer to make wireproto interactions
151 # Forward a couple of names from peer to make wireproto interactions
151 # slightly more sensible.
152 # slightly more sensible.
152 batchable = peer.batchable
153 batchable = peer.batchable
153 future = peer.future
154 future = peer.future
154
155
155 # list of nodes encoding / decoding
156 # list of nodes encoding / decoding
156
157
157 def decodelist(l, sep=' '):
158 def decodelist(l, sep=' '):
158 if l:
159 if l:
159 return map(bin, l.split(sep))
160 return map(bin, l.split(sep))
160 return []
161 return []
161
162
162 def encodelist(l, sep=' '):
163 def encodelist(l, sep=' '):
163 try:
164 try:
164 return sep.join(map(hex, l))
165 return sep.join(map(hex, l))
165 except TypeError:
166 except TypeError:
166 raise
167 raise
167
168
168 # batched call argument encoding
169 # batched call argument encoding
169
170
170 def escapearg(plain):
171 def escapearg(plain):
171 return (plain
172 return (plain
172 .replace(':', ':c')
173 .replace(':', ':c')
173 .replace(',', ':o')
174 .replace(',', ':o')
174 .replace(';', ':s')
175 .replace(';', ':s')
175 .replace('=', ':e'))
176 .replace('=', ':e'))
176
177
177 def unescapearg(escaped):
178 def unescapearg(escaped):
178 return (escaped
179 return (escaped
179 .replace(':e', '=')
180 .replace(':e', '=')
180 .replace(':s', ';')
181 .replace(':s', ';')
181 .replace(':o', ',')
182 .replace(':o', ',')
182 .replace(':c', ':'))
183 .replace(':c', ':'))
183
184
184 def encodebatchcmds(req):
185 def encodebatchcmds(req):
185 """Return a ``cmds`` argument value for the ``batch`` command."""
186 """Return a ``cmds`` argument value for the ``batch`` command."""
186 cmds = []
187 cmds = []
187 for op, argsdict in req:
188 for op, argsdict in req:
188 # Old servers didn't properly unescape argument names. So prevent
189 # Old servers didn't properly unescape argument names. So prevent
189 # the sending of argument names that may not be decoded properly by
190 # the sending of argument names that may not be decoded properly by
190 # servers.
191 # servers.
191 assert all(escapearg(k) == k for k in argsdict)
192 assert all(escapearg(k) == k for k in argsdict)
192
193
193 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
194 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
194 for k, v in argsdict.iteritems())
195 for k, v in argsdict.iteritems())
195 cmds.append('%s %s' % (op, args))
196 cmds.append('%s %s' % (op, args))
196
197
197 return ';'.join(cmds)
198 return ';'.join(cmds)
198
199
199 # mapping of options accepted by getbundle and their types
200 # mapping of options accepted by getbundle and their types
200 #
201 #
201 # Meant to be extended by extensions. It is extensions responsibility to ensure
202 # Meant to be extended by extensions. It is extensions responsibility to ensure
202 # such options are properly processed in exchange.getbundle.
203 # such options are properly processed in exchange.getbundle.
203 #
204 #
204 # supported types are:
205 # supported types are:
205 #
206 #
206 # :nodes: list of binary nodes
207 # :nodes: list of binary nodes
207 # :csv: list of comma-separated values
208 # :csv: list of comma-separated values
208 # :scsv: list of comma-separated values return as set
209 # :scsv: list of comma-separated values return as set
209 # :plain: string with no transformation needed.
210 # :plain: string with no transformation needed.
210 gboptsmap = {'heads': 'nodes',
211 gboptsmap = {'heads': 'nodes',
211 'common': 'nodes',
212 'common': 'nodes',
212 'obsmarkers': 'boolean',
213 'obsmarkers': 'boolean',
213 'bundlecaps': 'scsv',
214 'bundlecaps': 'scsv',
214 'listkeys': 'csv',
215 'listkeys': 'csv',
215 'cg': 'boolean',
216 'cg': 'boolean',
216 'cbattempted': 'boolean'}
217 'cbattempted': 'boolean'}
217
218
218 # client side
219 # client side
219
220
220 class wirepeer(peer.peerrepository):
221 class wirepeer(peer.peerrepository):
221 """Client-side interface for communicating with a peer repository.
222 """Client-side interface for communicating with a peer repository.
222
223
223 Methods commonly call wire protocol commands of the same name.
224 Methods commonly call wire protocol commands of the same name.
224
225
225 See also httppeer.py and sshpeer.py for protocol-specific
226 See also httppeer.py and sshpeer.py for protocol-specific
226 implementations of this interface.
227 implementations of this interface.
227 """
228 """
228 def batch(self):
229 def batch(self):
229 if self.capable('batch'):
230 if self.capable('batch'):
230 return remotebatch(self)
231 return remotebatch(self)
231 else:
232 else:
232 return peer.localbatch(self)
233 return peer.localbatch(self)
233 def _submitbatch(self, req):
234 def _submitbatch(self, req):
234 """run batch request <req> on the server
235 """run batch request <req> on the server
235
236
236 Returns an iterator of the raw responses from the server.
237 Returns an iterator of the raw responses from the server.
237 """
238 """
238 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
239 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
239 chunk = rsp.read(1024)
240 chunk = rsp.read(1024)
240 work = [chunk]
241 work = [chunk]
241 while chunk:
242 while chunk:
242 while ';' not in chunk and chunk:
243 while ';' not in chunk and chunk:
243 chunk = rsp.read(1024)
244 chunk = rsp.read(1024)
244 work.append(chunk)
245 work.append(chunk)
245 merged = ''.join(work)
246 merged = ''.join(work)
246 while ';' in merged:
247 while ';' in merged:
247 one, merged = merged.split(';', 1)
248 one, merged = merged.split(';', 1)
248 yield unescapearg(one)
249 yield unescapearg(one)
249 chunk = rsp.read(1024)
250 chunk = rsp.read(1024)
250 work = [merged, chunk]
251 work = [merged, chunk]
251 yield unescapearg(''.join(work))
252 yield unescapearg(''.join(work))
252
253
253 def _submitone(self, op, args):
254 def _submitone(self, op, args):
254 return self._call(op, **args)
255 return self._call(op, **args)
255
256
256 def iterbatch(self):
257 def iterbatch(self):
257 return remoteiterbatcher(self)
258 return remoteiterbatcher(self)
258
259
259 @batchable
260 @batchable
260 def lookup(self, key):
261 def lookup(self, key):
261 self.requirecap('lookup', _('look up remote revision'))
262 self.requirecap('lookup', _('look up remote revision'))
262 f = future()
263 f = future()
263 yield {'key': encoding.fromlocal(key)}, f
264 yield {'key': encoding.fromlocal(key)}, f
264 d = f.value
265 d = f.value
265 success, data = d[:-1].split(" ", 1)
266 success, data = d[:-1].split(" ", 1)
266 if int(success):
267 if int(success):
267 yield bin(data)
268 yield bin(data)
268 self._abort(error.RepoError(data))
269 self._abort(error.RepoError(data))
269
270
270 @batchable
271 @batchable
271 def heads(self):
272 def heads(self):
272 f = future()
273 f = future()
273 yield {}, f
274 yield {}, f
274 d = f.value
275 d = f.value
275 try:
276 try:
276 yield decodelist(d[:-1])
277 yield decodelist(d[:-1])
277 except ValueError:
278 except ValueError:
278 self._abort(error.ResponseError(_("unexpected response:"), d))
279 self._abort(error.ResponseError(_("unexpected response:"), d))
279
280
280 @batchable
281 @batchable
281 def known(self, nodes):
282 def known(self, nodes):
282 f = future()
283 f = future()
283 yield {'nodes': encodelist(nodes)}, f
284 yield {'nodes': encodelist(nodes)}, f
284 d = f.value
285 d = f.value
285 try:
286 try:
286 yield [bool(int(b)) for b in d]
287 yield [bool(int(b)) for b in d]
287 except ValueError:
288 except ValueError:
288 self._abort(error.ResponseError(_("unexpected response:"), d))
289 self._abort(error.ResponseError(_("unexpected response:"), d))
289
290
290 @batchable
291 @batchable
291 def branchmap(self):
292 def branchmap(self):
292 f = future()
293 f = future()
293 yield {}, f
294 yield {}, f
294 d = f.value
295 d = f.value
295 try:
296 try:
296 branchmap = {}
297 branchmap = {}
297 for branchpart in d.splitlines():
298 for branchpart in d.splitlines():
298 branchname, branchheads = branchpart.split(' ', 1)
299 branchname, branchheads = branchpart.split(' ', 1)
299 branchname = encoding.tolocal(urlreq.unquote(branchname))
300 branchname = encoding.tolocal(urlreq.unquote(branchname))
300 branchheads = decodelist(branchheads)
301 branchheads = decodelist(branchheads)
301 branchmap[branchname] = branchheads
302 branchmap[branchname] = branchheads
302 yield branchmap
303 yield branchmap
303 except TypeError:
304 except TypeError:
304 self._abort(error.ResponseError(_("unexpected response:"), d))
305 self._abort(error.ResponseError(_("unexpected response:"), d))
305
306
306 def branches(self, nodes):
307 def branches(self, nodes):
307 n = encodelist(nodes)
308 n = encodelist(nodes)
308 d = self._call("branches", nodes=n)
309 d = self._call("branches", nodes=n)
309 try:
310 try:
310 br = [tuple(decodelist(b)) for b in d.splitlines()]
311 br = [tuple(decodelist(b)) for b in d.splitlines()]
311 return br
312 return br
312 except ValueError:
313 except ValueError:
313 self._abort(error.ResponseError(_("unexpected response:"), d))
314 self._abort(error.ResponseError(_("unexpected response:"), d))
314
315
315 def between(self, pairs):
316 def between(self, pairs):
316 batch = 8 # avoid giant requests
317 batch = 8 # avoid giant requests
317 r = []
318 r = []
318 for i in xrange(0, len(pairs), batch):
319 for i in xrange(0, len(pairs), batch):
319 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
320 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
320 d = self._call("between", pairs=n)
321 d = self._call("between", pairs=n)
321 try:
322 try:
322 r.extend(l and decodelist(l) or [] for l in d.splitlines())
323 r.extend(l and decodelist(l) or [] for l in d.splitlines())
323 except ValueError:
324 except ValueError:
324 self._abort(error.ResponseError(_("unexpected response:"), d))
325 self._abort(error.ResponseError(_("unexpected response:"), d))
325 return r
326 return r
326
327
327 @batchable
328 @batchable
328 def pushkey(self, namespace, key, old, new):
329 def pushkey(self, namespace, key, old, new):
329 if not self.capable('pushkey'):
330 if not self.capable('pushkey'):
330 yield False, None
331 yield False, None
331 f = future()
332 f = future()
332 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
333 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
333 yield {'namespace': encoding.fromlocal(namespace),
334 yield {'namespace': encoding.fromlocal(namespace),
334 'key': encoding.fromlocal(key),
335 'key': encoding.fromlocal(key),
335 'old': encoding.fromlocal(old),
336 'old': encoding.fromlocal(old),
336 'new': encoding.fromlocal(new)}, f
337 'new': encoding.fromlocal(new)}, f
337 d = f.value
338 d = f.value
338 d, output = d.split('\n', 1)
339 d, output = d.split('\n', 1)
339 try:
340 try:
340 d = bool(int(d))
341 d = bool(int(d))
341 except ValueError:
342 except ValueError:
342 raise error.ResponseError(
343 raise error.ResponseError(
343 _('push failed (unexpected response):'), d)
344 _('push failed (unexpected response):'), d)
344 for l in output.splitlines(True):
345 for l in output.splitlines(True):
345 self.ui.status(_('remote: '), l)
346 self.ui.status(_('remote: '), l)
346 yield d
347 yield d
347
348
348 @batchable
349 @batchable
349 def listkeys(self, namespace):
350 def listkeys(self, namespace):
350 if not self.capable('pushkey'):
351 if not self.capable('pushkey'):
351 yield {}, None
352 yield {}, None
352 f = future()
353 f = future()
353 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
354 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
354 yield {'namespace': encoding.fromlocal(namespace)}, f
355 yield {'namespace': encoding.fromlocal(namespace)}, f
355 d = f.value
356 d = f.value
356 self.ui.debug('received listkey for "%s": %i bytes\n'
357 self.ui.debug('received listkey for "%s": %i bytes\n'
357 % (namespace, len(d)))
358 % (namespace, len(d)))
358 yield pushkeymod.decodekeys(d)
359 yield pushkeymod.decodekeys(d)
359
360
360 def stream_out(self):
361 def stream_out(self):
361 return self._callstream('stream_out')
362 return self._callstream('stream_out')
362
363
363 def changegroup(self, nodes, kind):
364 def changegroup(self, nodes, kind):
364 n = encodelist(nodes)
365 n = encodelist(nodes)
365 f = self._callcompressable("changegroup", roots=n)
366 f = self._callcompressable("changegroup", roots=n)
366 return changegroupmod.cg1unpacker(f, 'UN')
367 return changegroupmod.cg1unpacker(f, 'UN')
367
368
368 def changegroupsubset(self, bases, heads, kind):
369 def changegroupsubset(self, bases, heads, kind):
369 self.requirecap('changegroupsubset', _('look up remote changes'))
370 self.requirecap('changegroupsubset', _('look up remote changes'))
370 bases = encodelist(bases)
371 bases = encodelist(bases)
371 heads = encodelist(heads)
372 heads = encodelist(heads)
372 f = self._callcompressable("changegroupsubset",
373 f = self._callcompressable("changegroupsubset",
373 bases=bases, heads=heads)
374 bases=bases, heads=heads)
374 return changegroupmod.cg1unpacker(f, 'UN')
375 return changegroupmod.cg1unpacker(f, 'UN')
375
376
376 def getbundle(self, source, **kwargs):
377 def getbundle(self, source, **kwargs):
377 self.requirecap('getbundle', _('look up remote changes'))
378 self.requirecap('getbundle', _('look up remote changes'))
378 opts = {}
379 opts = {}
379 bundlecaps = kwargs.get('bundlecaps')
380 bundlecaps = kwargs.get('bundlecaps')
380 if bundlecaps is not None:
381 if bundlecaps is not None:
381 kwargs['bundlecaps'] = sorted(bundlecaps)
382 kwargs['bundlecaps'] = sorted(bundlecaps)
382 else:
383 else:
383 bundlecaps = () # kwargs could have it to None
384 bundlecaps = () # kwargs could have it to None
384 for key, value in kwargs.iteritems():
385 for key, value in kwargs.iteritems():
385 if value is None:
386 if value is None:
386 continue
387 continue
387 keytype = gboptsmap.get(key)
388 keytype = gboptsmap.get(key)
388 if keytype is None:
389 if keytype is None:
389 assert False, 'unexpected'
390 assert False, 'unexpected'
390 elif keytype == 'nodes':
391 elif keytype == 'nodes':
391 value = encodelist(value)
392 value = encodelist(value)
392 elif keytype in ('csv', 'scsv'):
393 elif keytype in ('csv', 'scsv'):
393 value = ','.join(value)
394 value = ','.join(value)
394 elif keytype == 'boolean':
395 elif keytype == 'boolean':
395 value = '%i' % bool(value)
396 value = '%i' % bool(value)
396 elif keytype != 'plain':
397 elif keytype != 'plain':
397 raise KeyError('unknown getbundle option type %s'
398 raise KeyError('unknown getbundle option type %s'
398 % keytype)
399 % keytype)
399 opts[key] = value
400 opts[key] = value
400 f = self._callcompressable("getbundle", **opts)
401 f = self._callcompressable("getbundle", **opts)
401 if any((cap.startswith('HG2') for cap in bundlecaps)):
402 if any((cap.startswith('HG2') for cap in bundlecaps)):
402 return bundle2.getunbundler(self.ui, f)
403 return bundle2.getunbundler(self.ui, f)
403 else:
404 else:
404 return changegroupmod.cg1unpacker(f, 'UN')
405 return changegroupmod.cg1unpacker(f, 'UN')
405
406
406 def unbundle(self, cg, heads, url):
407 def unbundle(self, cg, heads, url):
407 '''Send cg (a readable file-like object representing the
408 '''Send cg (a readable file-like object representing the
408 changegroup to push, typically a chunkbuffer object) to the
409 changegroup to push, typically a chunkbuffer object) to the
409 remote server as a bundle.
410 remote server as a bundle.
410
411
411 When pushing a bundle10 stream, return an integer indicating the
412 When pushing a bundle10 stream, return an integer indicating the
412 result of the push (see localrepository.addchangegroup()).
413 result of the push (see localrepository.addchangegroup()).
413
414
414 When pushing a bundle20 stream, return a bundle20 stream.
415 When pushing a bundle20 stream, return a bundle20 stream.
415
416
416 `url` is the url the client thinks it's pushing to, which is
417 `url` is the url the client thinks it's pushing to, which is
417 visible to hooks.
418 visible to hooks.
418 '''
419 '''
419
420
420 if heads != ['force'] and self.capable('unbundlehash'):
421 if heads != ['force'] and self.capable('unbundlehash'):
421 heads = encodelist(['hashed',
422 heads = encodelist(['hashed',
422 hashlib.sha1(''.join(sorted(heads))).digest()])
423 hashlib.sha1(''.join(sorted(heads))).digest()])
423 else:
424 else:
424 heads = encodelist(heads)
425 heads = encodelist(heads)
425
426
426 if util.safehasattr(cg, 'deltaheader'):
427 if util.safehasattr(cg, 'deltaheader'):
427 # this a bundle10, do the old style call sequence
428 # this a bundle10, do the old style call sequence
428 ret, output = self._callpush("unbundle", cg, heads=heads)
429 ret, output = self._callpush("unbundle", cg, heads=heads)
429 if ret == "":
430 if ret == "":
430 raise error.ResponseError(
431 raise error.ResponseError(
431 _('push failed:'), output)
432 _('push failed:'), output)
432 try:
433 try:
433 ret = int(ret)
434 ret = int(ret)
434 except ValueError:
435 except ValueError:
435 raise error.ResponseError(
436 raise error.ResponseError(
436 _('push failed (unexpected response):'), ret)
437 _('push failed (unexpected response):'), ret)
437
438
438 for l in output.splitlines(True):
439 for l in output.splitlines(True):
439 self.ui.status(_('remote: '), l)
440 self.ui.status(_('remote: '), l)
440 else:
441 else:
441 # bundle2 push. Send a stream, fetch a stream.
442 # bundle2 push. Send a stream, fetch a stream.
442 stream = self._calltwowaystream('unbundle', cg, heads=heads)
443 stream = self._calltwowaystream('unbundle', cg, heads=heads)
443 ret = bundle2.getunbundler(self.ui, stream)
444 ret = bundle2.getunbundler(self.ui, stream)
444 return ret
445 return ret
445
446
446 def debugwireargs(self, one, two, three=None, four=None, five=None):
447 def debugwireargs(self, one, two, three=None, four=None, five=None):
447 # don't pass optional arguments left at their default value
448 # don't pass optional arguments left at their default value
448 opts = {}
449 opts = {}
449 if three is not None:
450 if three is not None:
450 opts['three'] = three
451 opts['three'] = three
451 if four is not None:
452 if four is not None:
452 opts['four'] = four
453 opts['four'] = four
453 return self._call('debugwireargs', one=one, two=two, **opts)
454 return self._call('debugwireargs', one=one, two=two, **opts)
454
455
455 def _call(self, cmd, **args):
456 def _call(self, cmd, **args):
456 """execute <cmd> on the server
457 """execute <cmd> on the server
457
458
458 The command is expected to return a simple string.
459 The command is expected to return a simple string.
459
460
460 returns the server reply as a string."""
461 returns the server reply as a string."""
461 raise NotImplementedError()
462 raise NotImplementedError()
462
463
463 def _callstream(self, cmd, **args):
464 def _callstream(self, cmd, **args):
464 """execute <cmd> on the server
465 """execute <cmd> on the server
465
466
466 The command is expected to return a stream. Note that if the
467 The command is expected to return a stream. Note that if the
467 command doesn't return a stream, _callstream behaves
468 command doesn't return a stream, _callstream behaves
468 differently for ssh and http peers.
469 differently for ssh and http peers.
469
470
470 returns the server reply as a file like object.
471 returns the server reply as a file like object.
471 """
472 """
472 raise NotImplementedError()
473 raise NotImplementedError()
473
474
474 def _callcompressable(self, cmd, **args):
475 def _callcompressable(self, cmd, **args):
475 """execute <cmd> on the server
476 """execute <cmd> on the server
476
477
477 The command is expected to return a stream.
478 The command is expected to return a stream.
478
479
479 The stream may have been compressed in some implementations. This
480 The stream may have been compressed in some implementations. This
480 function takes care of the decompression. This is the only difference
481 function takes care of the decompression. This is the only difference
481 with _callstream.
482 with _callstream.
482
483
483 returns the server reply as a file like object.
484 returns the server reply as a file like object.
484 """
485 """
485 raise NotImplementedError()
486 raise NotImplementedError()
486
487
487 def _callpush(self, cmd, fp, **args):
488 def _callpush(self, cmd, fp, **args):
488 """execute a <cmd> on server
489 """execute a <cmd> on server
489
490
490 The command is expected to be related to a push. Push has a special
491 The command is expected to be related to a push. Push has a special
491 return method.
492 return method.
492
493
493 returns the server reply as a (ret, output) tuple. ret is either
494 returns the server reply as a (ret, output) tuple. ret is either
494 empty (error) or a stringified int.
495 empty (error) or a stringified int.
495 """
496 """
496 raise NotImplementedError()
497 raise NotImplementedError()
497
498
498 def _calltwowaystream(self, cmd, fp, **args):
499 def _calltwowaystream(self, cmd, fp, **args):
499 """execute <cmd> on server
500 """execute <cmd> on server
500
501
501 The command will send a stream to the server and get a stream in reply.
502 The command will send a stream to the server and get a stream in reply.
502 """
503 """
503 raise NotImplementedError()
504 raise NotImplementedError()
504
505
505 def _abort(self, exception):
506 def _abort(self, exception):
506 """clearly abort the wire protocol connection and raise the exception
507 """clearly abort the wire protocol connection and raise the exception
507 """
508 """
508 raise NotImplementedError()
509 raise NotImplementedError()
509
510
510 # server side
511 # server side
511
512
512 # wire protocol command can either return a string or one of these classes.
513 # wire protocol command can either return a string or one of these classes.
513 class streamres(object):
514 class streamres(object):
514 """wireproto reply: binary stream
515 """wireproto reply: binary stream
515
516
516 The call was successful and the result is a stream.
517 The call was successful and the result is a stream.
517
518
518 Accepts either a generator or an object with a ``read(size)`` method.
519 Accepts either a generator or an object with a ``read(size)`` method.
519
520
520 ``v1compressible`` indicates whether this data can be compressed to
521 ``v1compressible`` indicates whether this data can be compressed to
521 "version 1" clients (technically: HTTP peers using
522 "version 1" clients (technically: HTTP peers using
522 application/mercurial-0.1 media type). This flag should NOT be used on
523 application/mercurial-0.1 media type). This flag should NOT be used on
523 new commands because new clients should support a more modern compression
524 new commands because new clients should support a more modern compression
524 mechanism.
525 mechanism.
525 """
526 """
526 def __init__(self, gen=None, reader=None, v1compressible=False):
527 def __init__(self, gen=None, reader=None, v1compressible=False):
527 self.gen = gen
528 self.gen = gen
528 self.reader = reader
529 self.reader = reader
529 self.v1compressible = v1compressible
530 self.v1compressible = v1compressible
530
531
531 class pushres(object):
532 class pushres(object):
532 """wireproto reply: success with simple integer return
533 """wireproto reply: success with simple integer return
533
534
534 The call was successful and returned an integer contained in `self.res`.
535 The call was successful and returned an integer contained in `self.res`.
535 """
536 """
536 def __init__(self, res):
537 def __init__(self, res):
537 self.res = res
538 self.res = res
538
539
539 class pusherr(object):
540 class pusherr(object):
540 """wireproto reply: failure
541 """wireproto reply: failure
541
542
542 The call failed. The `self.res` attribute contains the error message.
543 The call failed. The `self.res` attribute contains the error message.
543 """
544 """
544 def __init__(self, res):
545 def __init__(self, res):
545 self.res = res
546 self.res = res
546
547
547 class ooberror(object):
548 class ooberror(object):
548 """wireproto reply: failure of a batch of operation
549 """wireproto reply: failure of a batch of operation
549
550
550 Something failed during a batch call. The error message is stored in
551 Something failed during a batch call. The error message is stored in
551 `self.message`.
552 `self.message`.
552 """
553 """
553 def __init__(self, message):
554 def __init__(self, message):
554 self.message = message
555 self.message = message
555
556
556 def getdispatchrepo(repo, proto, command):
557 def getdispatchrepo(repo, proto, command):
557 """Obtain the repo used for processing wire protocol commands.
558 """Obtain the repo used for processing wire protocol commands.
558
559
559 The intent of this function is to serve as a monkeypatch point for
560 The intent of this function is to serve as a monkeypatch point for
560 extensions that need commands to operate on different repo views under
561 extensions that need commands to operate on different repo views under
561 specialized circumstances.
562 specialized circumstances.
562 """
563 """
563 return repo.filtered('served')
564 return repo.filtered('served')
564
565
565 def dispatch(repo, proto, command):
566 def dispatch(repo, proto, command):
566 repo = getdispatchrepo(repo, proto, command)
567 repo = getdispatchrepo(repo, proto, command)
567 func, spec = commands[command]
568 func, spec = commands[command]
568 args = proto.getargs(spec)
569 args = proto.getargs(spec)
569 return func(repo, proto, *args)
570 return func(repo, proto, *args)
570
571
571 def options(cmd, keys, others):
572 def options(cmd, keys, others):
572 opts = {}
573 opts = {}
573 for k in keys:
574 for k in keys:
574 if k in others:
575 if k in others:
575 opts[k] = others[k]
576 opts[k] = others[k]
576 del others[k]
577 del others[k]
577 if others:
578 if others:
578 util.stderr.write("warning: %s ignored unexpected arguments %s\n"
579 util.stderr.write("warning: %s ignored unexpected arguments %s\n"
579 % (cmd, ",".join(others)))
580 % (cmd, ",".join(others)))
580 return opts
581 return opts
581
582
582 def bundle1allowed(repo, action):
583 def bundle1allowed(repo, action):
583 """Whether a bundle1 operation is allowed from the server.
584 """Whether a bundle1 operation is allowed from the server.
584
585
585 Priority is:
586 Priority is:
586
587
587 1. server.bundle1gd.<action> (if generaldelta active)
588 1. server.bundle1gd.<action> (if generaldelta active)
588 2. server.bundle1.<action>
589 2. server.bundle1.<action>
589 3. server.bundle1gd (if generaldelta active)
590 3. server.bundle1gd (if generaldelta active)
590 4. server.bundle1
591 4. server.bundle1
591 """
592 """
592 ui = repo.ui
593 ui = repo.ui
593 gd = 'generaldelta' in repo.requirements
594 gd = 'generaldelta' in repo.requirements
594
595
595 if gd:
596 if gd:
596 v = ui.configbool('server', 'bundle1gd.%s' % action, None)
597 v = ui.configbool('server', 'bundle1gd.%s' % action, None)
597 if v is not None:
598 if v is not None:
598 return v
599 return v
599
600
600 v = ui.configbool('server', 'bundle1.%s' % action, None)
601 v = ui.configbool('server', 'bundle1.%s' % action, None)
601 if v is not None:
602 if v is not None:
602 return v
603 return v
603
604
604 if gd:
605 if gd:
605 v = ui.configbool('server', 'bundle1gd', None)
606 v = ui.configbool('server', 'bundle1gd', None)
606 if v is not None:
607 if v is not None:
607 return v
608 return v
608
609
609 return ui.configbool('server', 'bundle1', True)
610 return ui.configbool('server', 'bundle1', True)
610
611
611 def supportedcompengines(ui, proto, role):
612 def supportedcompengines(ui, proto, role):
612 """Obtain the list of supported compression engines for a request."""
613 """Obtain the list of supported compression engines for a request."""
613 assert role in (util.CLIENTROLE, util.SERVERROLE)
614 assert role in (util.CLIENTROLE, util.SERVERROLE)
614
615
615 compengines = util.compengines.supportedwireengines(role)
616 compengines = util.compengines.supportedwireengines(role)
616
617
617 # Allow config to override default list and ordering.
618 # Allow config to override default list and ordering.
618 if role == util.SERVERROLE:
619 if role == util.SERVERROLE:
619 configengines = ui.configlist('server', 'compressionengines')
620 configengines = ui.configlist('server', 'compressionengines')
620 config = 'server.compressionengines'
621 config = 'server.compressionengines'
621 else:
622 else:
622 # This is currently implemented mainly to facilitate testing. In most
623 # This is currently implemented mainly to facilitate testing. In most
623 # cases, the server should be in charge of choosing a compression engine
624 # cases, the server should be in charge of choosing a compression engine
624 # because a server has the most to lose from a sub-optimal choice. (e.g.
625 # because a server has the most to lose from a sub-optimal choice. (e.g.
625 # CPU DoS due to an expensive engine or a network DoS due to poor
626 # CPU DoS due to an expensive engine or a network DoS due to poor
626 # compression ratio).
627 # compression ratio).
627 configengines = ui.configlist('experimental',
628 configengines = ui.configlist('experimental',
628 'clientcompressionengines')
629 'clientcompressionengines')
629 config = 'experimental.clientcompressionengines'
630 config = 'experimental.clientcompressionengines'
630
631
631 # No explicit config. Filter out the ones that aren't supposed to be
632 # No explicit config. Filter out the ones that aren't supposed to be
632 # advertised and return default ordering.
633 # advertised and return default ordering.
633 if not configengines:
634 if not configengines:
634 attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
635 attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
635 return [e for e in compengines
636 return [e for e in compengines
636 if getattr(e.wireprotosupport(), attr) > 0]
637 if getattr(e.wireprotosupport(), attr) > 0]
637
638
638 # If compression engines are listed in the config, assume there is a good
639 # If compression engines are listed in the config, assume there is a good
639 # reason for it (like server operators wanting to achieve specific
640 # reason for it (like server operators wanting to achieve specific
640 # performance characteristics). So fail fast if the config references
641 # performance characteristics). So fail fast if the config references
641 # unusable compression engines.
642 # unusable compression engines.
642 validnames = set(e.name() for e in compengines)
643 validnames = set(e.name() for e in compengines)
643 invalidnames = set(e for e in configengines if e not in validnames)
644 invalidnames = set(e for e in configengines if e not in validnames)
644 if invalidnames:
645 if invalidnames:
645 raise error.Abort(_('invalid compression engine defined in %s: %s') %
646 raise error.Abort(_('invalid compression engine defined in %s: %s') %
646 (config, ', '.join(sorted(invalidnames))))
647 (config, ', '.join(sorted(invalidnames))))
647
648
648 compengines = [e for e in compengines if e.name() in configengines]
649 compengines = [e for e in compengines if e.name() in configengines]
649 compengines = sorted(compengines,
650 compengines = sorted(compengines,
650 key=lambda e: configengines.index(e.name()))
651 key=lambda e: configengines.index(e.name()))
651
652
652 if not compengines:
653 if not compengines:
653 raise error.Abort(_('%s config option does not specify any known '
654 raise error.Abort(_('%s config option does not specify any known '
654 'compression engines') % config,
655 'compression engines') % config,
655 hint=_('usable compression engines: %s') %
656 hint=_('usable compression engines: %s') %
656 ', '.sorted(validnames))
657 ', '.sorted(validnames))
657
658
658 return compengines
659 return compengines
659
660
660 # list of commands
661 # list of commands
661 commands = {}
662 commands = {}
662
663
663 def wireprotocommand(name, args=''):
664 def wireprotocommand(name, args=''):
664 """decorator for wire protocol command"""
665 """decorator for wire protocol command"""
665 def register(func):
666 def register(func):
666 commands[name] = (func, args)
667 commands[name] = (func, args)
667 return func
668 return func
668 return register
669 return register
669
670
670 @wireprotocommand('batch', 'cmds *')
671 @wireprotocommand('batch', 'cmds *')
671 def batch(repo, proto, cmds, others):
672 def batch(repo, proto, cmds, others):
672 repo = repo.filtered("served")
673 repo = repo.filtered("served")
673 res = []
674 res = []
674 for pair in cmds.split(';'):
675 for pair in cmds.split(';'):
675 op, args = pair.split(' ', 1)
676 op, args = pair.split(' ', 1)
676 vals = {}
677 vals = {}
677 for a in args.split(','):
678 for a in args.split(','):
678 if a:
679 if a:
679 n, v = a.split('=')
680 n, v = a.split('=')
680 vals[unescapearg(n)] = unescapearg(v)
681 vals[unescapearg(n)] = unescapearg(v)
681 func, spec = commands[op]
682 func, spec = commands[op]
682 if spec:
683 if spec:
683 keys = spec.split()
684 keys = spec.split()
684 data = {}
685 data = {}
685 for k in keys:
686 for k in keys:
686 if k == '*':
687 if k == '*':
687 star = {}
688 star = {}
688 for key in vals.keys():
689 for key in vals.keys():
689 if key not in keys:
690 if key not in keys:
690 star[key] = vals[key]
691 star[key] = vals[key]
691 data['*'] = star
692 data['*'] = star
692 else:
693 else:
693 data[k] = vals[k]
694 data[k] = vals[k]
694 result = func(repo, proto, *[data[k] for k in keys])
695 result = func(repo, proto, *[data[k] for k in keys])
695 else:
696 else:
696 result = func(repo, proto)
697 result = func(repo, proto)
697 if isinstance(result, ooberror):
698 if isinstance(result, ooberror):
698 return result
699 return result
699 res.append(escapearg(result))
700 res.append(escapearg(result))
700 return ';'.join(res)
701 return ';'.join(res)
701
702
702 @wireprotocommand('between', 'pairs')
703 @wireprotocommand('between', 'pairs')
703 def between(repo, proto, pairs):
704 def between(repo, proto, pairs):
704 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
705 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
705 r = []
706 r = []
706 for b in repo.between(pairs):
707 for b in repo.between(pairs):
707 r.append(encodelist(b) + "\n")
708 r.append(encodelist(b) + "\n")
708 return "".join(r)
709 return "".join(r)
709
710
710 @wireprotocommand('branchmap')
711 @wireprotocommand('branchmap')
711 def branchmap(repo, proto):
712 def branchmap(repo, proto):
712 branchmap = repo.branchmap()
713 branchmap = repo.branchmap()
713 heads = []
714 heads = []
714 for branch, nodes in branchmap.iteritems():
715 for branch, nodes in branchmap.iteritems():
715 branchname = urlreq.quote(encoding.fromlocal(branch))
716 branchname = urlreq.quote(encoding.fromlocal(branch))
716 branchnodes = encodelist(nodes)
717 branchnodes = encodelist(nodes)
717 heads.append('%s %s' % (branchname, branchnodes))
718 heads.append('%s %s' % (branchname, branchnodes))
718 return '\n'.join(heads)
719 return '\n'.join(heads)
719
720
720 @wireprotocommand('branches', 'nodes')
721 @wireprotocommand('branches', 'nodes')
721 def branches(repo, proto, nodes):
722 def branches(repo, proto, nodes):
722 nodes = decodelist(nodes)
723 nodes = decodelist(nodes)
723 r = []
724 r = []
724 for b in repo.branches(nodes):
725 for b in repo.branches(nodes):
725 r.append(encodelist(b) + "\n")
726 r.append(encodelist(b) + "\n")
726 return "".join(r)
727 return "".join(r)
727
728
728 @wireprotocommand('clonebundles', '')
729 @wireprotocommand('clonebundles', '')
729 def clonebundles(repo, proto):
730 def clonebundles(repo, proto):
730 """Server command for returning info for available bundles to seed clones.
731 """Server command for returning info for available bundles to seed clones.
731
732
732 Clients will parse this response and determine what bundle to fetch.
733 Clients will parse this response and determine what bundle to fetch.
733
734
734 Extensions may wrap this command to filter or dynamically emit data
735 Extensions may wrap this command to filter or dynamically emit data
735 depending on the request. e.g. you could advertise URLs for the closest
736 depending on the request. e.g. you could advertise URLs for the closest
736 data center given the client's IP address.
737 data center given the client's IP address.
737 """
738 """
738 return repo.opener.tryread('clonebundles.manifest')
739 return repo.opener.tryread('clonebundles.manifest')
739
740
740 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
741 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
741 'known', 'getbundle', 'unbundlehash', 'batch']
742 'known', 'getbundle', 'unbundlehash', 'batch']
742
743
743 def _capabilities(repo, proto):
744 def _capabilities(repo, proto):
744 """return a list of capabilities for a repo
745 """return a list of capabilities for a repo
745
746
746 This function exists to allow extensions to easily wrap capabilities
747 This function exists to allow extensions to easily wrap capabilities
747 computation
748 computation
748
749
749 - returns a lists: easy to alter
750 - returns a lists: easy to alter
750 - change done here will be propagated to both `capabilities` and `hello`
751 - change done here will be propagated to both `capabilities` and `hello`
751 command without any other action needed.
752 command without any other action needed.
752 """
753 """
753 # copy to prevent modification of the global list
754 # copy to prevent modification of the global list
754 caps = list(wireprotocaps)
755 caps = list(wireprotocaps)
755 if streamclone.allowservergeneration(repo.ui):
756 if streamclone.allowservergeneration(repo.ui):
756 if repo.ui.configbool('server', 'preferuncompressed', False):
757 if repo.ui.configbool('server', 'preferuncompressed', False):
757 caps.append('stream-preferred')
758 caps.append('stream-preferred')
758 requiredformats = repo.requirements & repo.supportedformats
759 requiredformats = repo.requirements & repo.supportedformats
759 # if our local revlogs are just revlogv1, add 'stream' cap
760 # if our local revlogs are just revlogv1, add 'stream' cap
760 if not requiredformats - set(('revlogv1',)):
761 if not requiredformats - set(('revlogv1',)):
761 caps.append('stream')
762 caps.append('stream')
762 # otherwise, add 'streamreqs' detailing our local revlog format
763 # otherwise, add 'streamreqs' detailing our local revlog format
763 else:
764 else:
764 caps.append('streamreqs=%s' % ','.join(sorted(requiredformats)))
765 caps.append('streamreqs=%s' % ','.join(sorted(requiredformats)))
765 if repo.ui.configbool('experimental', 'bundle2-advertise', True):
766 if repo.ui.configbool('experimental', 'bundle2-advertise', True):
766 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
767 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
767 caps.append('bundle2=' + urlreq.quote(capsblob))
768 caps.append('bundle2=' + urlreq.quote(capsblob))
768 caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
769 caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
769
770
770 if proto.name == 'http':
771 if proto.name == 'http':
771 caps.append('httpheader=%d' %
772 caps.append('httpheader=%d' %
772 repo.ui.configint('server', 'maxhttpheaderlen', 1024))
773 repo.ui.configint('server', 'maxhttpheaderlen', 1024))
773 if repo.ui.configbool('experimental', 'httppostargs', False):
774 if repo.ui.configbool('experimental', 'httppostargs', False):
774 caps.append('httppostargs')
775 caps.append('httppostargs')
775
776
776 # FUTURE advertise 0.2rx once support is implemented
777 # FUTURE advertise 0.2rx once support is implemented
777 # FUTURE advertise minrx and mintx after consulting config option
778 # FUTURE advertise minrx and mintx after consulting config option
778 caps.append('httpmediatype=0.1rx,0.1tx,0.2tx')
779 caps.append('httpmediatype=0.1rx,0.1tx,0.2tx')
779
780
780 compengines = supportedcompengines(repo.ui, proto, util.SERVERROLE)
781 compengines = supportedcompengines(repo.ui, proto, util.SERVERROLE)
781 if compengines:
782 if compengines:
782 comptypes = ','.join(urlreq.quote(e.wireprotosupport().name)
783 comptypes = ','.join(urlreq.quote(e.wireprotosupport().name)
783 for e in compengines)
784 for e in compengines)
784 caps.append('compression=%s' % comptypes)
785 caps.append('compression=%s' % comptypes)
785
786
786 return caps
787 return caps
787
788
788 # If you are writing an extension and consider wrapping this function. Wrap
789 # If you are writing an extension and consider wrapping this function. Wrap
789 # `_capabilities` instead.
790 # `_capabilities` instead.
790 @wireprotocommand('capabilities')
791 @wireprotocommand('capabilities')
791 def capabilities(repo, proto):
792 def capabilities(repo, proto):
792 return ' '.join(_capabilities(repo, proto))
793 return ' '.join(_capabilities(repo, proto))
793
794
794 @wireprotocommand('changegroup', 'roots')
795 @wireprotocommand('changegroup', 'roots')
795 def changegroup(repo, proto, roots):
796 def changegroup(repo, proto, roots):
796 nodes = decodelist(roots)
797 nodes = decodelist(roots)
797 cg = changegroupmod.changegroup(repo, nodes, 'serve')
798 cg = changegroupmod.changegroup(repo, nodes, 'serve')
798 return streamres(reader=cg, v1compressible=True)
799 return streamres(reader=cg, v1compressible=True)
799
800
800 @wireprotocommand('changegroupsubset', 'bases heads')
801 @wireprotocommand('changegroupsubset', 'bases heads')
801 def changegroupsubset(repo, proto, bases, heads):
802 def changegroupsubset(repo, proto, bases, heads):
802 bases = decodelist(bases)
803 bases = decodelist(bases)
803 heads = decodelist(heads)
804 heads = decodelist(heads)
804 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
805 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
805 return streamres(reader=cg, v1compressible=True)
806 return streamres(reader=cg, v1compressible=True)
806
807
807 @wireprotocommand('debugwireargs', 'one two *')
808 @wireprotocommand('debugwireargs', 'one two *')
808 def debugwireargs(repo, proto, one, two, others):
809 def debugwireargs(repo, proto, one, two, others):
809 # only accept optional args from the known set
810 # only accept optional args from the known set
810 opts = options('debugwireargs', ['three', 'four'], others)
811 opts = options('debugwireargs', ['three', 'four'], others)
811 return repo.debugwireargs(one, two, **opts)
812 return repo.debugwireargs(one, two, **opts)
812
813
813 @wireprotocommand('getbundle', '*')
814 @wireprotocommand('getbundle', '*')
814 def getbundle(repo, proto, others):
815 def getbundle(repo, proto, others):
815 opts = options('getbundle', gboptsmap.keys(), others)
816 opts = options('getbundle', gboptsmap.keys(), others)
816 for k, v in opts.iteritems():
817 for k, v in opts.iteritems():
817 keytype = gboptsmap[k]
818 keytype = gboptsmap[k]
818 if keytype == 'nodes':
819 if keytype == 'nodes':
819 opts[k] = decodelist(v)
820 opts[k] = decodelist(v)
820 elif keytype == 'csv':
821 elif keytype == 'csv':
821 opts[k] = list(v.split(','))
822 opts[k] = list(v.split(','))
822 elif keytype == 'scsv':
823 elif keytype == 'scsv':
823 opts[k] = set(v.split(','))
824 opts[k] = set(v.split(','))
824 elif keytype == 'boolean':
825 elif keytype == 'boolean':
825 # Client should serialize False as '0', which is a non-empty string
826 # Client should serialize False as '0', which is a non-empty string
826 # so it evaluates as a True bool.
827 # so it evaluates as a True bool.
827 if v == '0':
828 if v == '0':
828 opts[k] = False
829 opts[k] = False
829 else:
830 else:
830 opts[k] = bool(v)
831 opts[k] = bool(v)
831 elif keytype != 'plain':
832 elif keytype != 'plain':
832 raise KeyError('unknown getbundle option type %s'
833 raise KeyError('unknown getbundle option type %s'
833 % keytype)
834 % keytype)
834
835
835 if not bundle1allowed(repo, 'pull'):
836 if not bundle1allowed(repo, 'pull'):
836 if not exchange.bundle2requested(opts.get('bundlecaps')):
837 if not exchange.bundle2requested(opts.get('bundlecaps')):
837 if proto.name == 'http':
838 if proto.name == 'http':
838 return ooberror(bundle2required)
839 return ooberror(bundle2required)
839 raise error.Abort(bundle2requiredmain,
840 raise error.Abort(bundle2requiredmain,
840 hint=bundle2requiredhint)
841 hint=bundle2requiredhint)
841
842
842 #chunks = exchange.getbundlechunks(repo, 'serve', **opts)
843 #chunks = exchange.getbundlechunks(repo, 'serve', **opts)
843 try:
844 try:
844 chunks = exchange.getbundlechunks(repo, 'serve', **opts)
845 chunks = exchange.getbundlechunks(repo, 'serve', **opts)
845 except error.Abort as exc:
846 except error.Abort as exc:
846 # cleanly forward Abort error to the client
847 # cleanly forward Abort error to the client
847 if not exchange.bundle2requested(opts.get('bundlecaps')):
848 if not exchange.bundle2requested(opts.get('bundlecaps')):
848 if proto.name == 'http':
849 if proto.name == 'http':
849 return ooberror(str(exc) + '\n')
850 return ooberror(str(exc) + '\n')
850 raise # cannot do better for bundle1 + ssh
851 raise # cannot do better for bundle1 + ssh
851 # bundle2 request expect a bundle2 reply
852 # bundle2 request expect a bundle2 reply
852 bundler = bundle2.bundle20(repo.ui)
853 bundler = bundle2.bundle20(repo.ui)
853 manargs = [('message', str(exc))]
854 manargs = [('message', str(exc))]
854 advargs = []
855 advargs = []
855 if exc.hint is not None:
856 if exc.hint is not None:
856 advargs.append(('hint', exc.hint))
857 advargs.append(('hint', exc.hint))
857 bundler.addpart(bundle2.bundlepart('error:abort',
858 bundler.addpart(bundle2.bundlepart('error:abort',
858 manargs, advargs))
859 manargs, advargs))
859 return streamres(gen=bundler.getchunks(), v1compressible=True)
860 return streamres(gen=bundler.getchunks(), v1compressible=True)
860 return streamres(gen=chunks, v1compressible=True)
861 return streamres(gen=chunks, v1compressible=True)
861
862
862 @wireprotocommand('heads')
863 @wireprotocommand('heads')
863 def heads(repo, proto):
864 def heads(repo, proto):
864 h = repo.heads()
865 h = repo.heads()
865 return encodelist(h) + "\n"
866 return encodelist(h) + "\n"
866
867
867 @wireprotocommand('hello')
868 @wireprotocommand('hello')
868 def hello(repo, proto):
869 def hello(repo, proto):
869 '''the hello command returns a set of lines describing various
870 '''the hello command returns a set of lines describing various
870 interesting things about the server, in an RFC822-like format.
871 interesting things about the server, in an RFC822-like format.
871 Currently the only one defined is "capabilities", which
872 Currently the only one defined is "capabilities", which
872 consists of a line in the form:
873 consists of a line in the form:
873
874
874 capabilities: space separated list of tokens
875 capabilities: space separated list of tokens
875 '''
876 '''
876 return "capabilities: %s\n" % (capabilities(repo, proto))
877 return "capabilities: %s\n" % (capabilities(repo, proto))
877
878
878 @wireprotocommand('listkeys', 'namespace')
879 @wireprotocommand('listkeys', 'namespace')
879 def listkeys(repo, proto, namespace):
880 def listkeys(repo, proto, namespace):
880 d = repo.listkeys(encoding.tolocal(namespace)).items()
881 d = repo.listkeys(encoding.tolocal(namespace)).items()
881 return pushkeymod.encodekeys(d)
882 return pushkeymod.encodekeys(d)
882
883
883 @wireprotocommand('lookup', 'key')
884 @wireprotocommand('lookup', 'key')
884 def lookup(repo, proto, key):
885 def lookup(repo, proto, key):
885 try:
886 try:
886 k = encoding.tolocal(key)
887 k = encoding.tolocal(key)
887 c = repo[k]
888 c = repo[k]
888 r = c.hex()
889 r = c.hex()
889 success = 1
890 success = 1
890 except Exception as inst:
891 except Exception as inst:
891 r = str(inst)
892 r = str(inst)
892 success = 0
893 success = 0
893 return "%s %s\n" % (success, r)
894 return "%s %s\n" % (success, r)
894
895
895 @wireprotocommand('known', 'nodes *')
896 @wireprotocommand('known', 'nodes *')
896 def known(repo, proto, nodes, others):
897 def known(repo, proto, nodes, others):
897 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
898 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
898
899
899 @wireprotocommand('pushkey', 'namespace key old new')
900 @wireprotocommand('pushkey', 'namespace key old new')
900 def pushkey(repo, proto, namespace, key, old, new):
901 def pushkey(repo, proto, namespace, key, old, new):
901 # compatibility with pre-1.8 clients which were accidentally
902 # compatibility with pre-1.8 clients which were accidentally
902 # sending raw binary nodes rather than utf-8-encoded hex
903 # sending raw binary nodes rather than utf-8-encoded hex
903 if len(new) == 20 and new.encode('string-escape') != new:
904 if len(new) == 20 and new.encode('string-escape') != new:
904 # looks like it could be a binary node
905 # looks like it could be a binary node
905 try:
906 try:
906 new.decode('utf-8')
907 new.decode('utf-8')
907 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
908 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
908 except UnicodeDecodeError:
909 except UnicodeDecodeError:
909 pass # binary, leave unmodified
910 pass # binary, leave unmodified
910 else:
911 else:
911 new = encoding.tolocal(new) # normal path
912 new = encoding.tolocal(new) # normal path
912
913
913 if util.safehasattr(proto, 'restore'):
914 if util.safehasattr(proto, 'restore'):
914
915
915 proto.redirect()
916 proto.redirect()
916
917
917 try:
918 try:
918 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
919 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
919 encoding.tolocal(old), new) or False
920 encoding.tolocal(old), new) or False
920 except error.Abort:
921 except error.Abort:
921 r = False
922 r = False
922
923
923 output = proto.restore()
924 output = proto.restore()
924
925
925 return '%s\n%s' % (int(r), output)
926 return '%s\n%s' % (int(r), output)
926
927
927 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
928 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
928 encoding.tolocal(old), new)
929 encoding.tolocal(old), new)
929 return '%s\n' % int(r)
930 return '%s\n' % int(r)
930
931
931 @wireprotocommand('stream_out')
932 @wireprotocommand('stream_out')
932 def stream(repo, proto):
933 def stream(repo, proto):
933 '''If the server supports streaming clone, it advertises the "stream"
934 '''If the server supports streaming clone, it advertises the "stream"
934 capability with a value representing the version and flags of the repo
935 capability with a value representing the version and flags of the repo
935 it is serving. Client checks to see if it understands the format.
936 it is serving. Client checks to see if it understands the format.
936 '''
937 '''
937 if not streamclone.allowservergeneration(repo.ui):
938 if not streamclone.allowservergeneration(repo.ui):
938 return '1\n'
939 return '1\n'
939
940
940 def getstream(it):
941 def getstream(it):
941 yield '0\n'
942 yield '0\n'
942 for chunk in it:
943 for chunk in it:
943 yield chunk
944 yield chunk
944
945
945 try:
946 try:
946 # LockError may be raised before the first result is yielded. Don't
947 # LockError may be raised before the first result is yielded. Don't
947 # emit output until we're sure we got the lock successfully.
948 # emit output until we're sure we got the lock successfully.
948 it = streamclone.generatev1wireproto(repo)
949 it = streamclone.generatev1wireproto(repo)
949 return streamres(gen=getstream(it))
950 return streamres(gen=getstream(it))
950 except error.LockError:
951 except error.LockError:
951 return '2\n'
952 return '2\n'
952
953
953 @wireprotocommand('unbundle', 'heads')
954 @wireprotocommand('unbundle', 'heads')
954 def unbundle(repo, proto, heads):
955 def unbundle(repo, proto, heads):
955 their_heads = decodelist(heads)
956 their_heads = decodelist(heads)
956
957
957 try:
958 try:
958 proto.redirect()
959 proto.redirect()
959
960
960 exchange.check_heads(repo, their_heads, 'preparing changes')
961 exchange.check_heads(repo, their_heads, 'preparing changes')
961
962
962 # write bundle data to temporary file because it can be big
963 # write bundle data to temporary file because it can be big
963 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
964 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
964 fp = os.fdopen(fd, 'wb+')
965 fp = os.fdopen(fd, pycompat.sysstr('wb+'))
965 r = 0
966 r = 0
966 try:
967 try:
967 proto.getfile(fp)
968 proto.getfile(fp)
968 fp.seek(0)
969 fp.seek(0)
969 gen = exchange.readbundle(repo.ui, fp, None)
970 gen = exchange.readbundle(repo.ui, fp, None)
970 if (isinstance(gen, changegroupmod.cg1unpacker)
971 if (isinstance(gen, changegroupmod.cg1unpacker)
971 and not bundle1allowed(repo, 'push')):
972 and not bundle1allowed(repo, 'push')):
972 if proto.name == 'http':
973 if proto.name == 'http':
973 # need to special case http because stderr do not get to
974 # need to special case http because stderr do not get to
974 # the http client on failed push so we need to abuse some
975 # the http client on failed push so we need to abuse some
975 # other error type to make sure the message get to the
976 # other error type to make sure the message get to the
976 # user.
977 # user.
977 return ooberror(bundle2required)
978 return ooberror(bundle2required)
978 raise error.Abort(bundle2requiredmain,
979 raise error.Abort(bundle2requiredmain,
979 hint=bundle2requiredhint)
980 hint=bundle2requiredhint)
980
981
981 r = exchange.unbundle(repo, gen, their_heads, 'serve',
982 r = exchange.unbundle(repo, gen, their_heads, 'serve',
982 proto._client())
983 proto._client())
983 if util.safehasattr(r, 'addpart'):
984 if util.safehasattr(r, 'addpart'):
984 # The return looks streamable, we are in the bundle2 case and
985 # The return looks streamable, we are in the bundle2 case and
985 # should return a stream.
986 # should return a stream.
986 return streamres(gen=r.getchunks())
987 return streamres(gen=r.getchunks())
987 return pushres(r)
988 return pushres(r)
988
989
989 finally:
990 finally:
990 fp.close()
991 fp.close()
991 os.unlink(tempname)
992 os.unlink(tempname)
992
993
993 except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
994 except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
994 # handle non-bundle2 case first
995 # handle non-bundle2 case first
995 if not getattr(exc, 'duringunbundle2', False):
996 if not getattr(exc, 'duringunbundle2', False):
996 try:
997 try:
997 raise
998 raise
998 except error.Abort:
999 except error.Abort:
999 # The old code we moved used util.stderr directly.
1000 # The old code we moved used util.stderr directly.
1000 # We did not change it to minimise code change.
1001 # We did not change it to minimise code change.
1001 # This need to be moved to something proper.
1002 # This need to be moved to something proper.
1002 # Feel free to do it.
1003 # Feel free to do it.
1003 util.stderr.write("abort: %s\n" % exc)
1004 util.stderr.write("abort: %s\n" % exc)
1004 if exc.hint is not None:
1005 if exc.hint is not None:
1005 util.stderr.write("(%s)\n" % exc.hint)
1006 util.stderr.write("(%s)\n" % exc.hint)
1006 return pushres(0)
1007 return pushres(0)
1007 except error.PushRaced:
1008 except error.PushRaced:
1008 return pusherr(str(exc))
1009 return pusherr(str(exc))
1009
1010
1010 bundler = bundle2.bundle20(repo.ui)
1011 bundler = bundle2.bundle20(repo.ui)
1011 for out in getattr(exc, '_bundle2salvagedoutput', ()):
1012 for out in getattr(exc, '_bundle2salvagedoutput', ()):
1012 bundler.addpart(out)
1013 bundler.addpart(out)
1013 try:
1014 try:
1014 try:
1015 try:
1015 raise
1016 raise
1016 except error.PushkeyFailed as exc:
1017 except error.PushkeyFailed as exc:
1017 # check client caps
1018 # check client caps
1018 remotecaps = getattr(exc, '_replycaps', None)
1019 remotecaps = getattr(exc, '_replycaps', None)
1019 if (remotecaps is not None
1020 if (remotecaps is not None
1020 and 'pushkey' not in remotecaps.get('error', ())):
1021 and 'pushkey' not in remotecaps.get('error', ())):
1021 # no support remote side, fallback to Abort handler.
1022 # no support remote side, fallback to Abort handler.
1022 raise
1023 raise
1023 part = bundler.newpart('error:pushkey')
1024 part = bundler.newpart('error:pushkey')
1024 part.addparam('in-reply-to', exc.partid)
1025 part.addparam('in-reply-to', exc.partid)
1025 if exc.namespace is not None:
1026 if exc.namespace is not None:
1026 part.addparam('namespace', exc.namespace, mandatory=False)
1027 part.addparam('namespace', exc.namespace, mandatory=False)
1027 if exc.key is not None:
1028 if exc.key is not None:
1028 part.addparam('key', exc.key, mandatory=False)
1029 part.addparam('key', exc.key, mandatory=False)
1029 if exc.new is not None:
1030 if exc.new is not None:
1030 part.addparam('new', exc.new, mandatory=False)
1031 part.addparam('new', exc.new, mandatory=False)
1031 if exc.old is not None:
1032 if exc.old is not None:
1032 part.addparam('old', exc.old, mandatory=False)
1033 part.addparam('old', exc.old, mandatory=False)
1033 if exc.ret is not None:
1034 if exc.ret is not None:
1034 part.addparam('ret', exc.ret, mandatory=False)
1035 part.addparam('ret', exc.ret, mandatory=False)
1035 except error.BundleValueError as exc:
1036 except error.BundleValueError as exc:
1036 errpart = bundler.newpart('error:unsupportedcontent')
1037 errpart = bundler.newpart('error:unsupportedcontent')
1037 if exc.parttype is not None:
1038 if exc.parttype is not None:
1038 errpart.addparam('parttype', exc.parttype)
1039 errpart.addparam('parttype', exc.parttype)
1039 if exc.params:
1040 if exc.params:
1040 errpart.addparam('params', '\0'.join(exc.params))
1041 errpart.addparam('params', '\0'.join(exc.params))
1041 except error.Abort as exc:
1042 except error.Abort as exc:
1042 manargs = [('message', str(exc))]
1043 manargs = [('message', str(exc))]
1043 advargs = []
1044 advargs = []
1044 if exc.hint is not None:
1045 if exc.hint is not None:
1045 advargs.append(('hint', exc.hint))
1046 advargs.append(('hint', exc.hint))
1046 bundler.addpart(bundle2.bundlepart('error:abort',
1047 bundler.addpart(bundle2.bundlepart('error:abort',
1047 manargs, advargs))
1048 manargs, advargs))
1048 except error.PushRaced as exc:
1049 except error.PushRaced as exc:
1049 bundler.newpart('error:pushraced', [('message', str(exc))])
1050 bundler.newpart('error:pushraced', [('message', str(exc))])
1050 return streamres(gen=bundler.getchunks())
1051 return streamres(gen=bundler.getchunks())
@@ -1,224 +1,224 b''
1 # worker.py - master-slave parallelism support
1 # worker.py - master-slave parallelism support
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import signal
12 import signal
13 import sys
13 import sys
14
14
15 from .i18n import _
15 from .i18n import _
16 from . import (
16 from . import (
17 encoding,
17 encoding,
18 error,
18 error,
19 pycompat,
19 pycompat,
20 scmutil,
20 scmutil,
21 util,
21 util,
22 )
22 )
23
23
24 def countcpus():
24 def countcpus():
25 '''try to count the number of CPUs on the system'''
25 '''try to count the number of CPUs on the system'''
26
26
27 # posix
27 # posix
28 try:
28 try:
29 n = int(os.sysconf('SC_NPROCESSORS_ONLN'))
29 n = int(os.sysconf('SC_NPROCESSORS_ONLN'))
30 if n > 0:
30 if n > 0:
31 return n
31 return n
32 except (AttributeError, ValueError):
32 except (AttributeError, ValueError):
33 pass
33 pass
34
34
35 # windows
35 # windows
36 try:
36 try:
37 n = int(encoding.environ['NUMBER_OF_PROCESSORS'])
37 n = int(encoding.environ['NUMBER_OF_PROCESSORS'])
38 if n > 0:
38 if n > 0:
39 return n
39 return n
40 except (KeyError, ValueError):
40 except (KeyError, ValueError):
41 pass
41 pass
42
42
43 return 1
43 return 1
44
44
45 def _numworkers(ui):
45 def _numworkers(ui):
46 s = ui.config('worker', 'numcpus')
46 s = ui.config('worker', 'numcpus')
47 if s:
47 if s:
48 try:
48 try:
49 n = int(s)
49 n = int(s)
50 if n >= 1:
50 if n >= 1:
51 return n
51 return n
52 except ValueError:
52 except ValueError:
53 raise error.Abort(_('number of cpus must be an integer'))
53 raise error.Abort(_('number of cpus must be an integer'))
54 return min(max(countcpus(), 4), 32)
54 return min(max(countcpus(), 4), 32)
55
55
56 if pycompat.osname == 'posix':
56 if pycompat.osname == 'posix':
57 _startupcost = 0.01
57 _startupcost = 0.01
58 else:
58 else:
59 _startupcost = 1e30
59 _startupcost = 1e30
60
60
61 def worthwhile(ui, costperop, nops):
61 def worthwhile(ui, costperop, nops):
62 '''try to determine whether the benefit of multiple processes can
62 '''try to determine whether the benefit of multiple processes can
63 outweigh the cost of starting them'''
63 outweigh the cost of starting them'''
64 linear = costperop * nops
64 linear = costperop * nops
65 workers = _numworkers(ui)
65 workers = _numworkers(ui)
66 benefit = linear - (_startupcost * workers + linear / workers)
66 benefit = linear - (_startupcost * workers + linear / workers)
67 return benefit >= 0.15
67 return benefit >= 0.15
68
68
69 def worker(ui, costperarg, func, staticargs, args):
69 def worker(ui, costperarg, func, staticargs, args):
70 '''run a function, possibly in parallel in multiple worker
70 '''run a function, possibly in parallel in multiple worker
71 processes.
71 processes.
72
72
73 returns a progress iterator
73 returns a progress iterator
74
74
75 costperarg - cost of a single task
75 costperarg - cost of a single task
76
76
77 func - function to run
77 func - function to run
78
78
79 staticargs - arguments to pass to every invocation of the function
79 staticargs - arguments to pass to every invocation of the function
80
80
81 args - arguments to split into chunks, to pass to individual
81 args - arguments to split into chunks, to pass to individual
82 workers
82 workers
83 '''
83 '''
84 if worthwhile(ui, costperarg, len(args)):
84 if worthwhile(ui, costperarg, len(args)):
85 return _platformworker(ui, func, staticargs, args)
85 return _platformworker(ui, func, staticargs, args)
86 return func(*staticargs + (args,))
86 return func(*staticargs + (args,))
87
87
88 def _posixworker(ui, func, staticargs, args):
88 def _posixworker(ui, func, staticargs, args):
89 rfd, wfd = os.pipe()
89 rfd, wfd = os.pipe()
90 workers = _numworkers(ui)
90 workers = _numworkers(ui)
91 oldhandler = signal.getsignal(signal.SIGINT)
91 oldhandler = signal.getsignal(signal.SIGINT)
92 signal.signal(signal.SIGINT, signal.SIG_IGN)
92 signal.signal(signal.SIGINT, signal.SIG_IGN)
93 pids, problem = set(), [0]
93 pids, problem = set(), [0]
94 def killworkers():
94 def killworkers():
95 # unregister SIGCHLD handler as all children will be killed. This
95 # unregister SIGCHLD handler as all children will be killed. This
96 # function shouldn't be interrupted by another SIGCHLD; otherwise pids
96 # function shouldn't be interrupted by another SIGCHLD; otherwise pids
97 # could be updated while iterating, which would cause inconsistency.
97 # could be updated while iterating, which would cause inconsistency.
98 signal.signal(signal.SIGCHLD, oldchldhandler)
98 signal.signal(signal.SIGCHLD, oldchldhandler)
99 # if one worker bails, there's no good reason to wait for the rest
99 # if one worker bails, there's no good reason to wait for the rest
100 for p in pids:
100 for p in pids:
101 try:
101 try:
102 os.kill(p, signal.SIGTERM)
102 os.kill(p, signal.SIGTERM)
103 except OSError as err:
103 except OSError as err:
104 if err.errno != errno.ESRCH:
104 if err.errno != errno.ESRCH:
105 raise
105 raise
106 def waitforworkers(blocking=True):
106 def waitforworkers(blocking=True):
107 for pid in pids.copy():
107 for pid in pids.copy():
108 p = st = 0
108 p = st = 0
109 while True:
109 while True:
110 try:
110 try:
111 p, st = os.waitpid(pid, (0 if blocking else os.WNOHANG))
111 p, st = os.waitpid(pid, (0 if blocking else os.WNOHANG))
112 break
112 break
113 except OSError as e:
113 except OSError as e:
114 if e.errno == errno.EINTR:
114 if e.errno == errno.EINTR:
115 continue
115 continue
116 elif e.errno == errno.ECHILD:
116 elif e.errno == errno.ECHILD:
117 # child would already be reaped, but pids yet been
117 # child would already be reaped, but pids yet been
118 # updated (maybe interrupted just after waitpid)
118 # updated (maybe interrupted just after waitpid)
119 pids.discard(pid)
119 pids.discard(pid)
120 break
120 break
121 else:
121 else:
122 raise
122 raise
123 if p:
123 if p:
124 pids.discard(p)
124 pids.discard(p)
125 st = _exitstatus(st)
125 st = _exitstatus(st)
126 if st and not problem[0]:
126 if st and not problem[0]:
127 problem[0] = st
127 problem[0] = st
128 def sigchldhandler(signum, frame):
128 def sigchldhandler(signum, frame):
129 waitforworkers(blocking=False)
129 waitforworkers(blocking=False)
130 if problem[0]:
130 if problem[0]:
131 killworkers()
131 killworkers()
132 oldchldhandler = signal.signal(signal.SIGCHLD, sigchldhandler)
132 oldchldhandler = signal.signal(signal.SIGCHLD, sigchldhandler)
133 for pargs in partition(args, workers):
133 for pargs in partition(args, workers):
134 pid = os.fork()
134 pid = os.fork()
135 if pid == 0:
135 if pid == 0:
136 signal.signal(signal.SIGINT, oldhandler)
136 signal.signal(signal.SIGINT, oldhandler)
137 signal.signal(signal.SIGCHLD, oldchldhandler)
137 signal.signal(signal.SIGCHLD, oldchldhandler)
138
138
139 def workerfunc():
139 def workerfunc():
140 os.close(rfd)
140 os.close(rfd)
141 for i, item in func(*(staticargs + (pargs,))):
141 for i, item in func(*(staticargs + (pargs,))):
142 os.write(wfd, '%d %s\n' % (i, item))
142 os.write(wfd, '%d %s\n' % (i, item))
143
143
144 # make sure we use os._exit in all code paths. otherwise the worker
144 # make sure we use os._exit in all code paths. otherwise the worker
145 # may do some clean-ups which could cause surprises like deadlock.
145 # may do some clean-ups which could cause surprises like deadlock.
146 # see sshpeer.cleanup for example.
146 # see sshpeer.cleanup for example.
147 try:
147 try:
148 scmutil.callcatch(ui, workerfunc)
148 scmutil.callcatch(ui, workerfunc)
149 except KeyboardInterrupt:
149 except KeyboardInterrupt:
150 os._exit(255)
150 os._exit(255)
151 except: # never return, therefore no re-raises
151 except: # never return, therefore no re-raises
152 try:
152 try:
153 ui.traceback()
153 ui.traceback()
154 finally:
154 finally:
155 os._exit(255)
155 os._exit(255)
156 else:
156 else:
157 os._exit(0)
157 os._exit(0)
158 pids.add(pid)
158 pids.add(pid)
159 os.close(wfd)
159 os.close(wfd)
160 fp = os.fdopen(rfd, 'rb', 0)
160 fp = os.fdopen(rfd, pycompat.sysstr('rb'), 0)
161 def cleanup():
161 def cleanup():
162 signal.signal(signal.SIGINT, oldhandler)
162 signal.signal(signal.SIGINT, oldhandler)
163 waitforworkers()
163 waitforworkers()
164 signal.signal(signal.SIGCHLD, oldchldhandler)
164 signal.signal(signal.SIGCHLD, oldchldhandler)
165 status = problem[0]
165 status = problem[0]
166 if status:
166 if status:
167 if status < 0:
167 if status < 0:
168 os.kill(os.getpid(), -status)
168 os.kill(os.getpid(), -status)
169 sys.exit(status)
169 sys.exit(status)
170 try:
170 try:
171 for line in util.iterfile(fp):
171 for line in util.iterfile(fp):
172 l = line.split(' ', 1)
172 l = line.split(' ', 1)
173 yield int(l[0]), l[1][:-1]
173 yield int(l[0]), l[1][:-1]
174 except: # re-raises
174 except: # re-raises
175 killworkers()
175 killworkers()
176 cleanup()
176 cleanup()
177 raise
177 raise
178 cleanup()
178 cleanup()
179
179
180 def _posixexitstatus(code):
180 def _posixexitstatus(code):
181 '''convert a posix exit status into the same form returned by
181 '''convert a posix exit status into the same form returned by
182 os.spawnv
182 os.spawnv
183
183
184 returns None if the process was stopped instead of exiting'''
184 returns None if the process was stopped instead of exiting'''
185 if os.WIFEXITED(code):
185 if os.WIFEXITED(code):
186 return os.WEXITSTATUS(code)
186 return os.WEXITSTATUS(code)
187 elif os.WIFSIGNALED(code):
187 elif os.WIFSIGNALED(code):
188 return -os.WTERMSIG(code)
188 return -os.WTERMSIG(code)
189
189
190 if pycompat.osname != 'nt':
190 if pycompat.osname != 'nt':
191 _platformworker = _posixworker
191 _platformworker = _posixworker
192 _exitstatus = _posixexitstatus
192 _exitstatus = _posixexitstatus
193
193
194 def partition(lst, nslices):
194 def partition(lst, nslices):
195 '''partition a list into N slices of roughly equal size
195 '''partition a list into N slices of roughly equal size
196
196
197 The current strategy takes every Nth element from the input. If
197 The current strategy takes every Nth element from the input. If
198 we ever write workers that need to preserve grouping in input
198 we ever write workers that need to preserve grouping in input
199 we should consider allowing callers to specify a partition strategy.
199 we should consider allowing callers to specify a partition strategy.
200
200
201 mpm is not a fan of this partitioning strategy when files are involved.
201 mpm is not a fan of this partitioning strategy when files are involved.
202 In his words:
202 In his words:
203
203
204 Single-threaded Mercurial makes a point of creating and visiting
204 Single-threaded Mercurial makes a point of creating and visiting
205 files in a fixed order (alphabetical). When creating files in order,
205 files in a fixed order (alphabetical). When creating files in order,
206 a typical filesystem is likely to allocate them on nearby regions on
206 a typical filesystem is likely to allocate them on nearby regions on
207 disk. Thus, when revisiting in the same order, locality is maximized
207 disk. Thus, when revisiting in the same order, locality is maximized
208 and various forms of OS and disk-level caching and read-ahead get a
208 and various forms of OS and disk-level caching and read-ahead get a
209 chance to work.
209 chance to work.
210
210
211 This effect can be quite significant on spinning disks. I discovered it
211 This effect can be quite significant on spinning disks. I discovered it
212 circa Mercurial v0.4 when revlogs were named by hashes of filenames.
212 circa Mercurial v0.4 when revlogs were named by hashes of filenames.
213 Tarring a repo and copying it to another disk effectively randomized
213 Tarring a repo and copying it to another disk effectively randomized
214 the revlog ordering on disk by sorting the revlogs by hash and suddenly
214 the revlog ordering on disk by sorting the revlogs by hash and suddenly
215 performance of my kernel checkout benchmark dropped by ~10x because the
215 performance of my kernel checkout benchmark dropped by ~10x because the
216 "working set" of sectors visited no longer fit in the drive's cache and
216 "working set" of sectors visited no longer fit in the drive's cache and
217 the workload switched from streaming to random I/O.
217 the workload switched from streaming to random I/O.
218
218
219 What we should really be doing is have workers read filenames from a
219 What we should really be doing is have workers read filenames from a
220 ordered queue. This preserves locality and also keeps any worker from
220 ordered queue. This preserves locality and also keeps any worker from
221 getting more than one file out of balance.
221 getting more than one file out of balance.
222 '''
222 '''
223 for i in range(nslices):
223 for i in range(nslices):
224 yield lst[i::nslices]
224 yield lst[i::nslices]
General Comments 0
You need to be logged in to leave comments. Login now