Show More
@@ -1,333 +1,332 b'' | |||
|
1 | 1 | # cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | import errno |
|
10 | 10 | import os |
|
11 | 11 | import re |
|
12 | 12 | import socket |
|
13 | 13 | |
|
14 | 14 | from mercurial.i18n import _ |
|
15 | 15 | from mercurial.pycompat import ( |
|
16 | 16 | getattr, |
|
17 | 17 | open, |
|
18 | 18 | ) |
|
19 | 19 | from mercurial import ( |
|
20 | 20 | encoding, |
|
21 | 21 | error, |
|
22 | 22 | pycompat, |
|
23 | 23 | util, |
|
24 | 24 | ) |
|
25 | 25 | from mercurial.utils import ( |
|
26 | 26 | dateutil, |
|
27 | 27 | procutil, |
|
28 | 28 | ) |
|
29 | 29 | |
|
30 | 30 | from . import ( |
|
31 | 31 | common, |
|
32 | 32 | cvsps, |
|
33 | 33 | ) |
|
34 | 34 | |
|
35 | 35 | stringio = util.stringio |
|
36 | 36 | checktool = common.checktool |
|
37 | 37 | commit = common.commit |
|
38 | 38 | converter_source = common.converter_source |
|
39 | 39 | makedatetimestamp = common.makedatetimestamp |
|
40 | 40 | NoRepo = common.NoRepo |
|
41 | 41 | |
|
42 | 42 | |
|
43 | 43 | class convert_cvs(converter_source): |
|
44 | 44 | def __init__(self, ui, repotype, path, revs=None): |
|
45 | 45 | super(convert_cvs, self).__init__(ui, repotype, path, revs=revs) |
|
46 | 46 | |
|
47 | 47 | cvs = os.path.join(path, b"CVS") |
|
48 | 48 | if not os.path.exists(cvs): |
|
49 | 49 | raise NoRepo(_(b"%s does not look like a CVS checkout") % path) |
|
50 | 50 | |
|
51 | 51 | checktool(b'cvs') |
|
52 | 52 | |
|
53 | 53 | self.changeset = None |
|
54 | 54 | self.files = {} |
|
55 | 55 | self.tags = {} |
|
56 | 56 | self.lastbranch = {} |
|
57 | 57 | self.socket = None |
|
58 | 58 | self.cvsroot = open(os.path.join(cvs, b"Root"), b'rb').read()[:-1] |
|
59 | 59 | self.cvsrepo = open(os.path.join(cvs, b"Repository"), b'rb').read()[:-1] |
|
60 | 60 | self.encoding = encoding.encoding |
|
61 | 61 | |
|
62 | 62 | self._connect() |
|
63 | 63 | |
|
64 | 64 | def _parse(self): |
|
65 | 65 | if self.changeset is not None: |
|
66 | 66 | return |
|
67 | 67 | self.changeset = {} |
|
68 | 68 | |
|
69 | 69 | maxrev = 0 |
|
70 | 70 | if self.revs: |
|
71 | 71 | if len(self.revs) > 1: |
|
72 | 72 | raise error.Abort( |
|
73 | 73 | _( |
|
74 | 74 | b'cvs source does not support specifying ' |
|
75 | 75 | b'multiple revs' |
|
76 | 76 | ) |
|
77 | 77 | ) |
|
78 | 78 | # TODO: handle tags |
|
79 | 79 | try: |
|
80 | 80 | # patchset number? |
|
81 | 81 | maxrev = int(self.revs[0]) |
|
82 | 82 | except ValueError: |
|
83 | 83 | raise error.Abort( |
|
84 | 84 | _(b'revision %s is not a patchset number') % self.revs[0] |
|
85 | 85 | ) |
|
86 | 86 | |
|
87 | 87 | d = encoding.getcwd() |
|
88 | 88 | try: |
|
89 | 89 | os.chdir(self.path) |
|
90 | 90 | |
|
91 | 91 | cache = b'update' |
|
92 | 92 | if not self.ui.configbool(b'convert', b'cvsps.cache'): |
|
93 | 93 | cache = None |
|
94 | 94 | db = cvsps.createlog(self.ui, cache=cache) |
|
95 | 95 | db = cvsps.createchangeset( |
|
96 | 96 | self.ui, |
|
97 | 97 | db, |
|
98 | 98 | fuzz=int(self.ui.config(b'convert', b'cvsps.fuzz')), |
|
99 | 99 | mergeto=self.ui.config(b'convert', b'cvsps.mergeto'), |
|
100 | 100 | mergefrom=self.ui.config(b'convert', b'cvsps.mergefrom'), |
|
101 | 101 | ) |
|
102 | 102 | |
|
103 | 103 | for cs in db: |
|
104 | 104 | if maxrev and cs.id > maxrev: |
|
105 | 105 | break |
|
106 | 106 | id = b"%d" % cs.id |
|
107 | 107 | cs.author = self.recode(cs.author) |
|
108 | 108 | self.lastbranch[cs.branch] = id |
|
109 | 109 | cs.comment = self.recode(cs.comment) |
|
110 | 110 | if self.ui.configbool(b'convert', b'localtimezone'): |
|
111 | 111 | cs.date = makedatetimestamp(cs.date[0]) |
|
112 | 112 | date = dateutil.datestr(cs.date, b'%Y-%m-%d %H:%M:%S %1%2') |
|
113 | 113 | self.tags.update(dict.fromkeys(cs.tags, id)) |
|
114 | 114 | |
|
115 | 115 | files = {} |
|
116 | 116 | for f in cs.entries: |
|
117 | 117 | files[f.file] = b"%s%s" % ( |
|
118 | 118 | b'.'.join([(b"%d" % x) for x in f.revision]), |
|
119 | 119 | [b'', b'(DEAD)'][f.dead], |
|
120 | 120 | ) |
|
121 | 121 | |
|
122 | 122 | # add current commit to set |
|
123 | 123 | c = commit( |
|
124 | 124 | author=cs.author, |
|
125 | 125 | date=date, |
|
126 | 126 | parents=[(b"%d" % p.id) for p in cs.parents], |
|
127 | 127 | desc=cs.comment, |
|
128 | 128 | branch=cs.branch or b'', |
|
129 | 129 | ) |
|
130 | 130 | self.changeset[id] = c |
|
131 | 131 | self.files[id] = files |
|
132 | 132 | |
|
133 | 133 | self.heads = self.lastbranch.values() |
|
134 | 134 | finally: |
|
135 | 135 | os.chdir(d) |
|
136 | 136 | |
|
137 | 137 | def _connect(self): |
|
138 | 138 | root = self.cvsroot |
|
139 | 139 | conntype = None |
|
140 | 140 | user, host = None, None |
|
141 | 141 | cmd = [b'cvs', b'server'] |
|
142 | 142 | |
|
143 | 143 | self.ui.status(_(b"connecting to %s\n") % root) |
|
144 | 144 | |
|
145 | 145 | if root.startswith(b":pserver:"): |
|
146 | 146 | root = root[9:] |
|
147 | 147 | m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:/]*)(?::(\d*))?(.*)', root) |
|
148 | 148 | if m: |
|
149 | 149 | conntype = b"pserver" |
|
150 | 150 | user, passw, serv, port, root = m.groups() |
|
151 | 151 | if not user: |
|
152 | 152 | user = b"anonymous" |
|
153 | 153 | if not port: |
|
154 | 154 | port = 2401 |
|
155 | 155 | else: |
|
156 | 156 | port = int(port) |
|
157 | 157 | format0 = b":pserver:%s@%s:%s" % (user, serv, root) |
|
158 | 158 | format1 = b":pserver:%s@%s:%d%s" % (user, serv, port, root) |
|
159 | 159 | |
|
160 | 160 | if not passw: |
|
161 | 161 | passw = b"A" |
|
162 | 162 | cvspass = os.path.expanduser(b"~/.cvspass") |
|
163 | 163 | try: |
|
164 | 164 | pf = open(cvspass, b'rb') |
|
165 | 165 | for line in pf.read().splitlines(): |
|
166 | 166 | part1, part2 = line.split(b' ', 1) |
|
167 | 167 | # /1 :pserver:user@example.com:2401/cvsroot/foo |
|
168 | 168 | # Ah<Z |
|
169 | 169 | if part1 == b'/1': |
|
170 | 170 | part1, part2 = part2.split(b' ', 1) |
|
171 | 171 | format = format1 |
|
172 | 172 | # :pserver:user@example.com:/cvsroot/foo Ah<Z |
|
173 | 173 | else: |
|
174 | 174 | format = format0 |
|
175 | 175 | if part1 == format: |
|
176 | 176 | passw = part2 |
|
177 | 177 | break |
|
178 | 178 | pf.close() |
|
179 | 179 | except IOError as inst: |
|
180 | 180 | if inst.errno != errno.ENOENT: |
|
181 | 181 | if not getattr(inst, 'filename', None): |
|
182 | 182 | inst.filename = cvspass |
|
183 | 183 | raise |
|
184 | 184 | |
|
185 | 185 | sck = socket.socket() |
|
186 | 186 | sck.connect((serv, port)) |
|
187 | 187 | sck.send( |
|
188 | 188 | b"\n".join( |
|
189 | 189 | [ |
|
190 | 190 | b"BEGIN AUTH REQUEST", |
|
191 | 191 | root, |
|
192 | 192 | user, |
|
193 | 193 | passw, |
|
194 | 194 | b"END AUTH REQUEST", |
|
195 | 195 | b"", |
|
196 | 196 | ] |
|
197 | 197 | ) |
|
198 | 198 | ) |
|
199 | 199 | if sck.recv(128) != b"I LOVE YOU\n": |
|
200 | 200 | raise error.Abort(_(b"CVS pserver authentication failed")) |
|
201 | 201 | |
|
202 | 202 | self.writep = self.readp = sck.makefile(b'r+') |
|
203 | 203 | |
|
204 | 204 | if not conntype and root.startswith(b":local:"): |
|
205 | 205 | conntype = b"local" |
|
206 | 206 | root = root[7:] |
|
207 | 207 | |
|
208 | 208 | if not conntype: |
|
209 | 209 | # :ext:user@host/home/user/path/to/cvsroot |
|
210 | 210 | if root.startswith(b":ext:"): |
|
211 | 211 | root = root[5:] |
|
212 | 212 | m = re.match(br'(?:([^@:/]+)@)?([^:/]+):?(.*)', root) |
|
213 | 213 | # Do not take Windows path "c:\foo\bar" for a connection strings |
|
214 | 214 | if os.path.isdir(root) or not m: |
|
215 | 215 | conntype = b"local" |
|
216 | 216 | else: |
|
217 | 217 | conntype = b"rsh" |
|
218 | 218 | user, host, root = m.group(1), m.group(2), m.group(3) |
|
219 | 219 | |
|
220 | 220 | if conntype != b"pserver": |
|
221 | 221 | if conntype == b"rsh": |
|
222 | 222 | rsh = encoding.environ.get(b"CVS_RSH") or b"ssh" |
|
223 | 223 | if user: |
|
224 | 224 | cmd = [rsh, b'-l', user, host] + cmd |
|
225 | 225 | else: |
|
226 | 226 | cmd = [rsh, host] + cmd |
|
227 | 227 | |
|
228 | 228 | # popen2 does not support argument lists under Windows |
|
229 |
cmd = |
|
|
230 | cmd = procutil.quotecommand(b' '.join(cmd)) | |
|
229 | cmd = b' '.join(procutil.shellquote(arg) for arg in cmd) | |
|
231 | 230 | self.writep, self.readp = procutil.popen2(cmd) |
|
232 | 231 | |
|
233 | 232 | self.realroot = root |
|
234 | 233 | |
|
235 | 234 | self.writep.write(b"Root %s\n" % root) |
|
236 | 235 | self.writep.write( |
|
237 | 236 | b"Valid-responses ok error Valid-requests Mode" |
|
238 | 237 | b" M Mbinary E Checked-in Created Updated" |
|
239 | 238 | b" Merged Removed\n" |
|
240 | 239 | ) |
|
241 | 240 | self.writep.write(b"valid-requests\n") |
|
242 | 241 | self.writep.flush() |
|
243 | 242 | r = self.readp.readline() |
|
244 | 243 | if not r.startswith(b"Valid-requests"): |
|
245 | 244 | raise error.Abort( |
|
246 | 245 | _( |
|
247 | 246 | b'unexpected response from CVS server ' |
|
248 | 247 | b'(expected "Valid-requests", but got %r)' |
|
249 | 248 | ) |
|
250 | 249 | % r |
|
251 | 250 | ) |
|
252 | 251 | if b"UseUnchanged" in r: |
|
253 | 252 | self.writep.write(b"UseUnchanged\n") |
|
254 | 253 | self.writep.flush() |
|
255 | 254 | self.readp.readline() |
|
256 | 255 | |
|
257 | 256 | def getheads(self): |
|
258 | 257 | self._parse() |
|
259 | 258 | return self.heads |
|
260 | 259 | |
|
261 | 260 | def getfile(self, name, rev): |
|
262 | 261 | def chunkedread(fp, count): |
|
263 | 262 | # file-objects returned by socket.makefile() do not handle |
|
264 | 263 | # large read() requests very well. |
|
265 | 264 | chunksize = 65536 |
|
266 | 265 | output = stringio() |
|
267 | 266 | while count > 0: |
|
268 | 267 | data = fp.read(min(count, chunksize)) |
|
269 | 268 | if not data: |
|
270 | 269 | raise error.Abort( |
|
271 | 270 | _(b"%d bytes missing from remote file") % count |
|
272 | 271 | ) |
|
273 | 272 | count -= len(data) |
|
274 | 273 | output.write(data) |
|
275 | 274 | return output.getvalue() |
|
276 | 275 | |
|
277 | 276 | self._parse() |
|
278 | 277 | if rev.endswith(b"(DEAD)"): |
|
279 | 278 | return None, None |
|
280 | 279 | |
|
281 | 280 | args = (b"-N -P -kk -r %s --" % rev).split() |
|
282 | 281 | args.append(self.cvsrepo + b'/' + name) |
|
283 | 282 | for x in args: |
|
284 | 283 | self.writep.write(b"Argument %s\n" % x) |
|
285 | 284 | self.writep.write(b"Directory .\n%s\nco\n" % self.realroot) |
|
286 | 285 | self.writep.flush() |
|
287 | 286 | |
|
288 | 287 | data = b"" |
|
289 | 288 | mode = None |
|
290 | 289 | while True: |
|
291 | 290 | line = self.readp.readline() |
|
292 | 291 | if line.startswith(b"Created ") or line.startswith(b"Updated "): |
|
293 | 292 | self.readp.readline() # path |
|
294 | 293 | self.readp.readline() # entries |
|
295 | 294 | mode = self.readp.readline()[:-1] |
|
296 | 295 | count = int(self.readp.readline()[:-1]) |
|
297 | 296 | data = chunkedread(self.readp, count) |
|
298 | 297 | elif line.startswith(b" "): |
|
299 | 298 | data += line[1:] |
|
300 | 299 | elif line.startswith(b"M "): |
|
301 | 300 | pass |
|
302 | 301 | elif line.startswith(b"Mbinary "): |
|
303 | 302 | count = int(self.readp.readline()[:-1]) |
|
304 | 303 | data = chunkedread(self.readp, count) |
|
305 | 304 | else: |
|
306 | 305 | if line == b"ok\n": |
|
307 | 306 | if mode is None: |
|
308 | 307 | raise error.Abort(_(b'malformed response from CVS')) |
|
309 | 308 | return (data, b"x" in mode and b"x" or b"") |
|
310 | 309 | elif line.startswith(b"E "): |
|
311 | 310 | self.ui.warn(_(b"cvs server: %s\n") % line[2:]) |
|
312 | 311 | elif line.startswith(b"Remove"): |
|
313 | 312 | self.readp.readline() |
|
314 | 313 | else: |
|
315 | 314 | raise error.Abort(_(b"unknown CVS response: %s") % line) |
|
316 | 315 | |
|
317 | 316 | def getchanges(self, rev, full): |
|
318 | 317 | if full: |
|
319 | 318 | raise error.Abort(_(b"convert from cvs does not support --full")) |
|
320 | 319 | self._parse() |
|
321 | 320 | return sorted(pycompat.iteritems(self.files[rev])), {}, set() |
|
322 | 321 | |
|
323 | 322 | def getcommit(self, rev): |
|
324 | 323 | self._parse() |
|
325 | 324 | return self.changeset[rev] |
|
326 | 325 | |
|
327 | 326 | def gettags(self): |
|
328 | 327 | self._parse() |
|
329 | 328 | return self.tags |
|
330 | 329 | |
|
331 | 330 | def getchangedfiles(self, rev, i): |
|
332 | 331 | self._parse() |
|
333 | 332 | return sorted(self.files[rev]) |
@@ -1,378 +1,378 b'' | |||
|
1 | 1 | # gnuarch.py - GNU Arch support for the convert extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2008, 2009 Aleix Conchillo Flaque <aleix@member.fsf.org> |
|
4 | 4 | # and others |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import os |
|
11 | 11 | import shutil |
|
12 | 12 | import stat |
|
13 | 13 | import tempfile |
|
14 | 14 | |
|
15 | 15 | from mercurial.i18n import _ |
|
16 | 16 | from mercurial import ( |
|
17 | 17 | encoding, |
|
18 | 18 | error, |
|
19 | 19 | mail, |
|
20 | 20 | pycompat, |
|
21 | 21 | util, |
|
22 | 22 | ) |
|
23 | 23 | from mercurial.utils import ( |
|
24 | 24 | dateutil, |
|
25 | 25 | procutil, |
|
26 | 26 | ) |
|
27 | 27 | from . import common |
|
28 | 28 | |
|
29 | 29 | |
|
30 | 30 | class gnuarch_source(common.converter_source, common.commandline): |
|
31 | 31 | class gnuarch_rev(object): |
|
32 | 32 | def __init__(self, rev): |
|
33 | 33 | self.rev = rev |
|
34 | 34 | self.summary = b'' |
|
35 | 35 | self.date = None |
|
36 | 36 | self.author = b'' |
|
37 | 37 | self.continuationof = None |
|
38 | 38 | self.add_files = [] |
|
39 | 39 | self.mod_files = [] |
|
40 | 40 | self.del_files = [] |
|
41 | 41 | self.ren_files = {} |
|
42 | 42 | self.ren_dirs = {} |
|
43 | 43 | |
|
44 | 44 | def __init__(self, ui, repotype, path, revs=None): |
|
45 | 45 | super(gnuarch_source, self).__init__(ui, repotype, path, revs=revs) |
|
46 | 46 | |
|
47 | 47 | if not os.path.exists(os.path.join(path, b'{arch}')): |
|
48 | 48 | raise common.NoRepo( |
|
49 | 49 | _(b"%s does not look like a GNU Arch repository") % path |
|
50 | 50 | ) |
|
51 | 51 | |
|
52 | 52 | # Could use checktool, but we want to check for baz or tla. |
|
53 | 53 | self.execmd = None |
|
54 | 54 | if procutil.findexe(b'baz'): |
|
55 | 55 | self.execmd = b'baz' |
|
56 | 56 | else: |
|
57 | 57 | if procutil.findexe(b'tla'): |
|
58 | 58 | self.execmd = b'tla' |
|
59 | 59 | else: |
|
60 | 60 | raise error.Abort(_(b'cannot find a GNU Arch tool')) |
|
61 | 61 | |
|
62 | 62 | common.commandline.__init__(self, ui, self.execmd) |
|
63 | 63 | |
|
64 | 64 | self.path = os.path.realpath(path) |
|
65 | 65 | self.tmppath = None |
|
66 | 66 | |
|
67 | 67 | self.treeversion = None |
|
68 | 68 | self.lastrev = None |
|
69 | 69 | self.changes = {} |
|
70 | 70 | self.parents = {} |
|
71 | 71 | self.tags = {} |
|
72 | 72 | self.encoding = encoding.encoding |
|
73 | 73 | self.archives = [] |
|
74 | 74 | |
|
75 | 75 | def before(self): |
|
76 | 76 | # Get registered archives |
|
77 | 77 | self.archives = [ |
|
78 | 78 | i.rstrip(b'\n') for i in self.runlines0(b'archives', b'-n') |
|
79 | 79 | ] |
|
80 | 80 | |
|
81 | 81 | if self.execmd == b'tla': |
|
82 | 82 | output = self.run0(b'tree-version', self.path) |
|
83 | 83 | else: |
|
84 | 84 | output = self.run0(b'tree-version', b'-d', self.path) |
|
85 | 85 | self.treeversion = output.strip() |
|
86 | 86 | |
|
87 | 87 | # Get name of temporary directory |
|
88 | 88 | version = self.treeversion.split(b'/') |
|
89 | 89 | self.tmppath = os.path.join( |
|
90 | 90 | pycompat.fsencode(tempfile.gettempdir()), b'hg-%s' % version[1] |
|
91 | 91 | ) |
|
92 | 92 | |
|
93 | 93 | # Generate parents dictionary |
|
94 | 94 | self.parents[None] = [] |
|
95 | 95 | treeversion = self.treeversion |
|
96 | 96 | child = None |
|
97 | 97 | while treeversion: |
|
98 | 98 | self.ui.status(_(b'analyzing tree version %s...\n') % treeversion) |
|
99 | 99 | |
|
100 | 100 | archive = treeversion.split(b'/')[0] |
|
101 | 101 | if archive not in self.archives: |
|
102 | 102 | self.ui.status( |
|
103 | 103 | _( |
|
104 | 104 | b'tree analysis stopped because it points to ' |
|
105 | 105 | b'an unregistered archive %s...\n' |
|
106 | 106 | ) |
|
107 | 107 | % archive |
|
108 | 108 | ) |
|
109 | 109 | break |
|
110 | 110 | |
|
111 | 111 | # Get the complete list of revisions for that tree version |
|
112 | 112 | output, status = self.runlines( |
|
113 | 113 | b'revisions', b'-r', b'-f', treeversion |
|
114 | 114 | ) |
|
115 | 115 | self.checkexit( |
|
116 | 116 | status, b'failed retrieving revisions for %s' % treeversion |
|
117 | 117 | ) |
|
118 | 118 | |
|
119 | 119 | # No new iteration unless a revision has a continuation-of header |
|
120 | 120 | treeversion = None |
|
121 | 121 | |
|
122 | 122 | for l in output: |
|
123 | 123 | rev = l.strip() |
|
124 | 124 | self.changes[rev] = self.gnuarch_rev(rev) |
|
125 | 125 | self.parents[rev] = [] |
|
126 | 126 | |
|
127 | 127 | # Read author, date and summary |
|
128 | 128 | catlog, status = self.run(b'cat-log', b'-d', self.path, rev) |
|
129 | 129 | if status: |
|
130 | 130 | catlog = self.run0(b'cat-archive-log', rev) |
|
131 | 131 | self._parsecatlog(catlog, rev) |
|
132 | 132 | |
|
133 | 133 | # Populate the parents map |
|
134 | 134 | self.parents[child].append(rev) |
|
135 | 135 | |
|
136 | 136 | # Keep track of the current revision as the child of the next |
|
137 | 137 | # revision scanned |
|
138 | 138 | child = rev |
|
139 | 139 | |
|
140 | 140 | # Check if we have to follow the usual incremental history |
|
141 | 141 | # or if we have to 'jump' to a different treeversion given |
|
142 | 142 | # by the continuation-of header. |
|
143 | 143 | if self.changes[rev].continuationof: |
|
144 | 144 | treeversion = b'--'.join( |
|
145 | 145 | self.changes[rev].continuationof.split(b'--')[:-1] |
|
146 | 146 | ) |
|
147 | 147 | break |
|
148 | 148 | |
|
149 | 149 | # If we reached a base-0 revision w/o any continuation-of |
|
150 | 150 | # header, it means the tree history ends here. |
|
151 | 151 | if rev[-6:] == b'base-0': |
|
152 | 152 | break |
|
153 | 153 | |
|
154 | 154 | def after(self): |
|
155 | 155 | self.ui.debug(b'cleaning up %s\n' % self.tmppath) |
|
156 | 156 | shutil.rmtree(self.tmppath, ignore_errors=True) |
|
157 | 157 | |
|
158 | 158 | def getheads(self): |
|
159 | 159 | return self.parents[None] |
|
160 | 160 | |
|
161 | 161 | def getfile(self, name, rev): |
|
162 | 162 | if rev != self.lastrev: |
|
163 | 163 | raise error.Abort(_(b'internal calling inconsistency')) |
|
164 | 164 | |
|
165 | 165 | if not os.path.lexists(os.path.join(self.tmppath, name)): |
|
166 | 166 | return None, None |
|
167 | 167 | |
|
168 | 168 | return self._getfile(name, rev) |
|
169 | 169 | |
|
170 | 170 | def getchanges(self, rev, full): |
|
171 | 171 | if full: |
|
172 | 172 | raise error.Abort(_(b"convert from arch does not support --full")) |
|
173 | 173 | self._update(rev) |
|
174 | 174 | changes = [] |
|
175 | 175 | copies = {} |
|
176 | 176 | |
|
177 | 177 | for f in self.changes[rev].add_files: |
|
178 | 178 | changes.append((f, rev)) |
|
179 | 179 | |
|
180 | 180 | for f in self.changes[rev].mod_files: |
|
181 | 181 | changes.append((f, rev)) |
|
182 | 182 | |
|
183 | 183 | for f in self.changes[rev].del_files: |
|
184 | 184 | changes.append((f, rev)) |
|
185 | 185 | |
|
186 | 186 | for src in self.changes[rev].ren_files: |
|
187 | 187 | to = self.changes[rev].ren_files[src] |
|
188 | 188 | changes.append((src, rev)) |
|
189 | 189 | changes.append((to, rev)) |
|
190 | 190 | copies[to] = src |
|
191 | 191 | |
|
192 | 192 | for src in self.changes[rev].ren_dirs: |
|
193 | 193 | to = self.changes[rev].ren_dirs[src] |
|
194 | 194 | chgs, cps = self._rendirchanges(src, to) |
|
195 | 195 | changes += [(f, rev) for f in chgs] |
|
196 | 196 | copies.update(cps) |
|
197 | 197 | |
|
198 | 198 | self.lastrev = rev |
|
199 | 199 | return sorted(set(changes)), copies, set() |
|
200 | 200 | |
|
201 | 201 | def getcommit(self, rev): |
|
202 | 202 | changes = self.changes[rev] |
|
203 | 203 | return common.commit( |
|
204 | 204 | author=changes.author, |
|
205 | 205 | date=changes.date, |
|
206 | 206 | desc=changes.summary, |
|
207 | 207 | parents=self.parents[rev], |
|
208 | 208 | rev=rev, |
|
209 | 209 | ) |
|
210 | 210 | |
|
211 | 211 | def gettags(self): |
|
212 | 212 | return self.tags |
|
213 | 213 | |
|
214 | 214 | def _execute(self, cmd, *args, **kwargs): |
|
215 | 215 | cmdline = [self.execmd, cmd] |
|
216 | 216 | cmdline += args |
|
217 | 217 | cmdline = [procutil.shellquote(arg) for arg in cmdline] |
|
218 | 218 | bdevnull = pycompat.bytestr(os.devnull) |
|
219 | 219 | cmdline += [b'>', bdevnull, b'2>', bdevnull] |
|
220 |
cmdline = |
|
|
220 | cmdline = b' '.join(cmdline) | |
|
221 | 221 | self.ui.debug(cmdline, b'\n') |
|
222 | 222 | return os.system(pycompat.rapply(procutil.tonativestr, cmdline)) |
|
223 | 223 | |
|
224 | 224 | def _update(self, rev): |
|
225 | 225 | self.ui.debug(b'applying revision %s...\n' % rev) |
|
226 | 226 | changeset, status = self.runlines(b'replay', b'-d', self.tmppath, rev) |
|
227 | 227 | if status: |
|
228 | 228 | # Something went wrong while merging (baz or tla |
|
229 | 229 | # issue?), get latest revision and try from there |
|
230 | 230 | shutil.rmtree(self.tmppath, ignore_errors=True) |
|
231 | 231 | self._obtainrevision(rev) |
|
232 | 232 | else: |
|
233 | 233 | old_rev = self.parents[rev][0] |
|
234 | 234 | self.ui.debug( |
|
235 | 235 | b'computing changeset between %s and %s...\n' % (old_rev, rev) |
|
236 | 236 | ) |
|
237 | 237 | self._parsechangeset(changeset, rev) |
|
238 | 238 | |
|
239 | 239 | def _getfile(self, name, rev): |
|
240 | 240 | mode = os.lstat(os.path.join(self.tmppath, name)).st_mode |
|
241 | 241 | if stat.S_ISLNK(mode): |
|
242 | 242 | data = util.readlink(os.path.join(self.tmppath, name)) |
|
243 | 243 | if mode: |
|
244 | 244 | mode = b'l' |
|
245 | 245 | else: |
|
246 | 246 | mode = b'' |
|
247 | 247 | else: |
|
248 | 248 | data = util.readfile(os.path.join(self.tmppath, name)) |
|
249 | 249 | mode = (mode & 0o111) and b'x' or b'' |
|
250 | 250 | return data, mode |
|
251 | 251 | |
|
252 | 252 | def _exclude(self, name): |
|
253 | 253 | exclude = [b'{arch}', b'.arch-ids', b'.arch-inventory'] |
|
254 | 254 | for exc in exclude: |
|
255 | 255 | if name.find(exc) != -1: |
|
256 | 256 | return True |
|
257 | 257 | return False |
|
258 | 258 | |
|
259 | 259 | def _readcontents(self, path): |
|
260 | 260 | files = [] |
|
261 | 261 | contents = os.listdir(path) |
|
262 | 262 | while len(contents) > 0: |
|
263 | 263 | c = contents.pop() |
|
264 | 264 | p = os.path.join(path, c) |
|
265 | 265 | # os.walk could be used, but here we avoid internal GNU |
|
266 | 266 | # Arch files and directories, thus saving a lot time. |
|
267 | 267 | if not self._exclude(p): |
|
268 | 268 | if os.path.isdir(p): |
|
269 | 269 | contents += [os.path.join(c, f) for f in os.listdir(p)] |
|
270 | 270 | else: |
|
271 | 271 | files.append(c) |
|
272 | 272 | return files |
|
273 | 273 | |
|
274 | 274 | def _rendirchanges(self, src, dest): |
|
275 | 275 | changes = [] |
|
276 | 276 | copies = {} |
|
277 | 277 | files = self._readcontents(os.path.join(self.tmppath, dest)) |
|
278 | 278 | for f in files: |
|
279 | 279 | s = os.path.join(src, f) |
|
280 | 280 | d = os.path.join(dest, f) |
|
281 | 281 | changes.append(s) |
|
282 | 282 | changes.append(d) |
|
283 | 283 | copies[d] = s |
|
284 | 284 | return changes, copies |
|
285 | 285 | |
|
286 | 286 | def _obtainrevision(self, rev): |
|
287 | 287 | self.ui.debug(b'obtaining revision %s...\n' % rev) |
|
288 | 288 | output = self._execute(b'get', rev, self.tmppath) |
|
289 | 289 | self.checkexit(output) |
|
290 | 290 | self.ui.debug(b'analyzing revision %s...\n' % rev) |
|
291 | 291 | files = self._readcontents(self.tmppath) |
|
292 | 292 | self.changes[rev].add_files += files |
|
293 | 293 | |
|
294 | 294 | def _stripbasepath(self, path): |
|
295 | 295 | if path.startswith(b'./'): |
|
296 | 296 | return path[2:] |
|
297 | 297 | return path |
|
298 | 298 | |
|
299 | 299 | def _parsecatlog(self, data, rev): |
|
300 | 300 | try: |
|
301 | 301 | catlog = mail.parsebytes(data) |
|
302 | 302 | |
|
303 | 303 | # Commit date |
|
304 | 304 | self.changes[rev].date = dateutil.datestr( |
|
305 | 305 | dateutil.strdate(catlog['Standard-date'], b'%Y-%m-%d %H:%M:%S') |
|
306 | 306 | ) |
|
307 | 307 | |
|
308 | 308 | # Commit author |
|
309 | 309 | self.changes[rev].author = self.recode(catlog['Creator']) |
|
310 | 310 | |
|
311 | 311 | # Commit description |
|
312 | 312 | self.changes[rev].summary = b'\n\n'.join( |
|
313 | 313 | ( |
|
314 | 314 | self.recode(catlog['Summary']), |
|
315 | 315 | self.recode(catlog.get_payload()), |
|
316 | 316 | ) |
|
317 | 317 | ) |
|
318 | 318 | self.changes[rev].summary = self.recode(self.changes[rev].summary) |
|
319 | 319 | |
|
320 | 320 | # Commit revision origin when dealing with a branch or tag |
|
321 | 321 | if 'Continuation-of' in catlog: |
|
322 | 322 | self.changes[rev].continuationof = self.recode( |
|
323 | 323 | catlog['Continuation-of'] |
|
324 | 324 | ) |
|
325 | 325 | except Exception: |
|
326 | 326 | raise error.Abort(_(b'could not parse cat-log of %s') % rev) |
|
327 | 327 | |
|
328 | 328 | def _parsechangeset(self, data, rev): |
|
329 | 329 | for l in data: |
|
330 | 330 | l = l.strip() |
|
331 | 331 | # Added file (ignore added directory) |
|
332 | 332 | if l.startswith(b'A') and not l.startswith(b'A/'): |
|
333 | 333 | file = self._stripbasepath(l[1:].strip()) |
|
334 | 334 | if not self._exclude(file): |
|
335 | 335 | self.changes[rev].add_files.append(file) |
|
336 | 336 | # Deleted file (ignore deleted directory) |
|
337 | 337 | elif l.startswith(b'D') and not l.startswith(b'D/'): |
|
338 | 338 | file = self._stripbasepath(l[1:].strip()) |
|
339 | 339 | if not self._exclude(file): |
|
340 | 340 | self.changes[rev].del_files.append(file) |
|
341 | 341 | # Modified binary file |
|
342 | 342 | elif l.startswith(b'Mb'): |
|
343 | 343 | file = self._stripbasepath(l[2:].strip()) |
|
344 | 344 | if not self._exclude(file): |
|
345 | 345 | self.changes[rev].mod_files.append(file) |
|
346 | 346 | # Modified link |
|
347 | 347 | elif l.startswith(b'M->'): |
|
348 | 348 | file = self._stripbasepath(l[3:].strip()) |
|
349 | 349 | if not self._exclude(file): |
|
350 | 350 | self.changes[rev].mod_files.append(file) |
|
351 | 351 | # Modified file |
|
352 | 352 | elif l.startswith(b'M'): |
|
353 | 353 | file = self._stripbasepath(l[1:].strip()) |
|
354 | 354 | if not self._exclude(file): |
|
355 | 355 | self.changes[rev].mod_files.append(file) |
|
356 | 356 | # Renamed file (or link) |
|
357 | 357 | elif l.startswith(b'=>'): |
|
358 | 358 | files = l[2:].strip().split(b' ') |
|
359 | 359 | if len(files) == 1: |
|
360 | 360 | files = l[2:].strip().split(b'\t') |
|
361 | 361 | src = self._stripbasepath(files[0]) |
|
362 | 362 | dst = self._stripbasepath(files[1]) |
|
363 | 363 | if not self._exclude(src) and not self._exclude(dst): |
|
364 | 364 | self.changes[rev].ren_files[src] = dst |
|
365 | 365 | # Conversion from file to link or from link to file (modified) |
|
366 | 366 | elif l.startswith(b'ch'): |
|
367 | 367 | file = self._stripbasepath(l[2:].strip()) |
|
368 | 368 | if not self._exclude(file): |
|
369 | 369 | self.changes[rev].mod_files.append(file) |
|
370 | 370 | # Renamed directory |
|
371 | 371 | elif l.startswith(b'/>'): |
|
372 | 372 | dirs = l[2:].strip().split(b' ') |
|
373 | 373 | if len(dirs) == 1: |
|
374 | 374 | dirs = l[2:].strip().split(b'\t') |
|
375 | 375 | src = self._stripbasepath(dirs[0]) |
|
376 | 376 | dst = self._stripbasepath(dirs[1]) |
|
377 | 377 | if not self._exclude(src) and not self._exclude(dst): |
|
378 | 378 | self.changes[rev].ren_dirs[src] = dst |
@@ -1,1565 +1,1565 b'' | |||
|
1 | 1 | # Subversion 1.4/1.5 Python API backend |
|
2 | 2 | # |
|
3 | 3 | # Copyright(C) 2007 Daniel Holth et al |
|
4 | 4 | from __future__ import absolute_import |
|
5 | 5 | |
|
6 | 6 | import os |
|
7 | 7 | import re |
|
8 | 8 | import xml.dom.minidom |
|
9 | 9 | |
|
10 | 10 | from mercurial.i18n import _ |
|
11 | 11 | from mercurial.pycompat import open |
|
12 | 12 | from mercurial import ( |
|
13 | 13 | encoding, |
|
14 | 14 | error, |
|
15 | 15 | pycompat, |
|
16 | 16 | util, |
|
17 | 17 | vfs as vfsmod, |
|
18 | 18 | ) |
|
19 | 19 | from mercurial.utils import ( |
|
20 | 20 | dateutil, |
|
21 | 21 | procutil, |
|
22 | 22 | stringutil, |
|
23 | 23 | ) |
|
24 | 24 | |
|
25 | 25 | from . import common |
|
26 | 26 | |
|
27 | 27 | pickle = util.pickle |
|
28 | 28 | stringio = util.stringio |
|
29 | 29 | propertycache = util.propertycache |
|
30 | 30 | urlerr = util.urlerr |
|
31 | 31 | urlreq = util.urlreq |
|
32 | 32 | |
|
33 | 33 | commandline = common.commandline |
|
34 | 34 | commit = common.commit |
|
35 | 35 | converter_sink = common.converter_sink |
|
36 | 36 | converter_source = common.converter_source |
|
37 | 37 | decodeargs = common.decodeargs |
|
38 | 38 | encodeargs = common.encodeargs |
|
39 | 39 | makedatetimestamp = common.makedatetimestamp |
|
40 | 40 | mapfile = common.mapfile |
|
41 | 41 | MissingTool = common.MissingTool |
|
42 | 42 | NoRepo = common.NoRepo |
|
43 | 43 | |
|
44 | 44 | # Subversion stuff. Works best with very recent Python SVN bindings |
|
45 | 45 | # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing |
|
46 | 46 | # these bindings. |
|
47 | 47 | |
|
48 | 48 | try: |
|
49 | 49 | import svn |
|
50 | 50 | import svn.client |
|
51 | 51 | import svn.core |
|
52 | 52 | import svn.ra |
|
53 | 53 | import svn.delta |
|
54 | 54 | from . import transport |
|
55 | 55 | import warnings |
|
56 | 56 | |
|
57 | 57 | warnings.filterwarnings( |
|
58 | 58 | b'ignore', module=b'svn.core', category=DeprecationWarning |
|
59 | 59 | ) |
|
60 | 60 | svn.core.SubversionException # trigger import to catch error |
|
61 | 61 | |
|
62 | 62 | except ImportError: |
|
63 | 63 | svn = None |
|
64 | 64 | |
|
65 | 65 | |
|
66 | 66 | class SvnPathNotFound(Exception): |
|
67 | 67 | pass |
|
68 | 68 | |
|
69 | 69 | |
|
70 | 70 | def revsplit(rev): |
|
71 | 71 | """Parse a revision string and return (uuid, path, revnum). |
|
72 | 72 | >>> revsplit(b'svn:a2147622-4a9f-4db4-a8d3-13562ff547b2' |
|
73 | 73 | ... b'/proj%20B/mytrunk/mytrunk@1') |
|
74 | 74 | ('a2147622-4a9f-4db4-a8d3-13562ff547b2', '/proj%20B/mytrunk/mytrunk', 1) |
|
75 | 75 | >>> revsplit(b'svn:8af66a51-67f5-4354-b62c-98d67cc7be1d@1') |
|
76 | 76 | ('', '', 1) |
|
77 | 77 | >>> revsplit(b'@7') |
|
78 | 78 | ('', '', 7) |
|
79 | 79 | >>> revsplit(b'7') |
|
80 | 80 | ('', '', 0) |
|
81 | 81 | >>> revsplit(b'bad') |
|
82 | 82 | ('', '', 0) |
|
83 | 83 | """ |
|
84 | 84 | parts = rev.rsplit(b'@', 1) |
|
85 | 85 | revnum = 0 |
|
86 | 86 | if len(parts) > 1: |
|
87 | 87 | revnum = int(parts[1]) |
|
88 | 88 | parts = parts[0].split(b'/', 1) |
|
89 | 89 | uuid = b'' |
|
90 | 90 | mod = b'' |
|
91 | 91 | if len(parts) > 1 and parts[0].startswith(b'svn:'): |
|
92 | 92 | uuid = parts[0][4:] |
|
93 | 93 | mod = b'/' + parts[1] |
|
94 | 94 | return uuid, mod, revnum |
|
95 | 95 | |
|
96 | 96 | |
|
97 | 97 | def quote(s): |
|
98 | 98 | # As of svn 1.7, many svn calls expect "canonical" paths. In |
|
99 | 99 | # theory, we should call svn.core.*canonicalize() on all paths |
|
100 | 100 | # before passing them to the API. Instead, we assume the base url |
|
101 | 101 | # is canonical and copy the behaviour of svn URL encoding function |
|
102 | 102 | # so we can extend it safely with new components. The "safe" |
|
103 | 103 | # characters were taken from the "svn_uri__char_validity" table in |
|
104 | 104 | # libsvn_subr/path.c. |
|
105 | 105 | return urlreq.quote(s, b"!$&'()*+,-./:=@_~") |
|
106 | 106 | |
|
107 | 107 | |
|
108 | 108 | def geturl(path): |
|
109 | 109 | try: |
|
110 | 110 | return svn.client.url_from_path(svn.core.svn_path_canonicalize(path)) |
|
111 | 111 | except svn.core.SubversionException: |
|
112 | 112 | # svn.client.url_from_path() fails with local repositories |
|
113 | 113 | pass |
|
114 | 114 | if os.path.isdir(path): |
|
115 | 115 | path = os.path.normpath(os.path.abspath(path)) |
|
116 | 116 | if pycompat.iswindows: |
|
117 | 117 | path = b'/' + util.normpath(path) |
|
118 | 118 | # Module URL is later compared with the repository URL returned |
|
119 | 119 | # by svn API, which is UTF-8. |
|
120 | 120 | path = encoding.tolocal(path) |
|
121 | 121 | path = b'file://%s' % quote(path) |
|
122 | 122 | return svn.core.svn_path_canonicalize(path) |
|
123 | 123 | |
|
124 | 124 | |
|
125 | 125 | def optrev(number): |
|
126 | 126 | optrev = svn.core.svn_opt_revision_t() |
|
127 | 127 | optrev.kind = svn.core.svn_opt_revision_number |
|
128 | 128 | optrev.value.number = number |
|
129 | 129 | return optrev |
|
130 | 130 | |
|
131 | 131 | |
|
132 | 132 | class changedpath(object): |
|
133 | 133 | def __init__(self, p): |
|
134 | 134 | self.copyfrom_path = p.copyfrom_path |
|
135 | 135 | self.copyfrom_rev = p.copyfrom_rev |
|
136 | 136 | self.action = p.action |
|
137 | 137 | |
|
138 | 138 | |
|
139 | 139 | def get_log_child( |
|
140 | 140 | fp, |
|
141 | 141 | url, |
|
142 | 142 | paths, |
|
143 | 143 | start, |
|
144 | 144 | end, |
|
145 | 145 | limit=0, |
|
146 | 146 | discover_changed_paths=True, |
|
147 | 147 | strict_node_history=False, |
|
148 | 148 | ): |
|
149 | 149 | protocol = -1 |
|
150 | 150 | |
|
151 | 151 | def receiver(orig_paths, revnum, author, date, message, pool): |
|
152 | 152 | paths = {} |
|
153 | 153 | if orig_paths is not None: |
|
154 | 154 | for k, v in pycompat.iteritems(orig_paths): |
|
155 | 155 | paths[k] = changedpath(v) |
|
156 | 156 | pickle.dump((paths, revnum, author, date, message), fp, protocol) |
|
157 | 157 | |
|
158 | 158 | try: |
|
159 | 159 | # Use an ra of our own so that our parent can consume |
|
160 | 160 | # our results without confusing the server. |
|
161 | 161 | t = transport.SvnRaTransport(url=url) |
|
162 | 162 | svn.ra.get_log( |
|
163 | 163 | t.ra, |
|
164 | 164 | paths, |
|
165 | 165 | start, |
|
166 | 166 | end, |
|
167 | 167 | limit, |
|
168 | 168 | discover_changed_paths, |
|
169 | 169 | strict_node_history, |
|
170 | 170 | receiver, |
|
171 | 171 | ) |
|
172 | 172 | except IOError: |
|
173 | 173 | # Caller may interrupt the iteration |
|
174 | 174 | pickle.dump(None, fp, protocol) |
|
175 | 175 | except Exception as inst: |
|
176 | 176 | pickle.dump(stringutil.forcebytestr(inst), fp, protocol) |
|
177 | 177 | else: |
|
178 | 178 | pickle.dump(None, fp, protocol) |
|
179 | 179 | fp.flush() |
|
180 | 180 | # With large history, cleanup process goes crazy and suddenly |
|
181 | 181 | # consumes *huge* amount of memory. The output file being closed, |
|
182 | 182 | # there is no need for clean termination. |
|
183 | 183 | os._exit(0) |
|
184 | 184 | |
|
185 | 185 | |
|
186 | 186 | def debugsvnlog(ui, **opts): |
|
187 | 187 | """Fetch SVN log in a subprocess and channel them back to parent to |
|
188 | 188 | avoid memory collection issues. |
|
189 | 189 | """ |
|
190 | 190 | if svn is None: |
|
191 | 191 | raise error.Abort( |
|
192 | 192 | _(b'debugsvnlog could not load Subversion python bindings') |
|
193 | 193 | ) |
|
194 | 194 | |
|
195 | 195 | args = decodeargs(ui.fin.read()) |
|
196 | 196 | get_log_child(ui.fout, *args) |
|
197 | 197 | |
|
198 | 198 | |
|
199 | 199 | class logstream(object): |
|
200 | 200 | """Interruptible revision log iterator.""" |
|
201 | 201 | |
|
202 | 202 | def __init__(self, stdout): |
|
203 | 203 | self._stdout = stdout |
|
204 | 204 | |
|
205 | 205 | def __iter__(self): |
|
206 | 206 | while True: |
|
207 | 207 | try: |
|
208 | 208 | entry = pickle.load(self._stdout) |
|
209 | 209 | except EOFError: |
|
210 | 210 | raise error.Abort( |
|
211 | 211 | _( |
|
212 | 212 | b'Mercurial failed to run itself, check' |
|
213 | 213 | b' hg executable is in PATH' |
|
214 | 214 | ) |
|
215 | 215 | ) |
|
216 | 216 | try: |
|
217 | 217 | orig_paths, revnum, author, date, message = entry |
|
218 | 218 | except (TypeError, ValueError): |
|
219 | 219 | if entry is None: |
|
220 | 220 | break |
|
221 | 221 | raise error.Abort(_(b"log stream exception '%s'") % entry) |
|
222 | 222 | yield entry |
|
223 | 223 | |
|
224 | 224 | def close(self): |
|
225 | 225 | if self._stdout: |
|
226 | 226 | self._stdout.close() |
|
227 | 227 | self._stdout = None |
|
228 | 228 | |
|
229 | 229 | |
|
230 | 230 | class directlogstream(list): |
|
231 | 231 | """Direct revision log iterator. |
|
232 | 232 | This can be used for debugging and development but it will probably leak |
|
233 | 233 | memory and is not suitable for real conversions.""" |
|
234 | 234 | |
|
235 | 235 | def __init__( |
|
236 | 236 | self, |
|
237 | 237 | url, |
|
238 | 238 | paths, |
|
239 | 239 | start, |
|
240 | 240 | end, |
|
241 | 241 | limit=0, |
|
242 | 242 | discover_changed_paths=True, |
|
243 | 243 | strict_node_history=False, |
|
244 | 244 | ): |
|
245 | 245 | def receiver(orig_paths, revnum, author, date, message, pool): |
|
246 | 246 | paths = {} |
|
247 | 247 | if orig_paths is not None: |
|
248 | 248 | for k, v in pycompat.iteritems(orig_paths): |
|
249 | 249 | paths[k] = changedpath(v) |
|
250 | 250 | self.append((paths, revnum, author, date, message)) |
|
251 | 251 | |
|
252 | 252 | # Use an ra of our own so that our parent can consume |
|
253 | 253 | # our results without confusing the server. |
|
254 | 254 | t = transport.SvnRaTransport(url=url) |
|
255 | 255 | svn.ra.get_log( |
|
256 | 256 | t.ra, |
|
257 | 257 | paths, |
|
258 | 258 | start, |
|
259 | 259 | end, |
|
260 | 260 | limit, |
|
261 | 261 | discover_changed_paths, |
|
262 | 262 | strict_node_history, |
|
263 | 263 | receiver, |
|
264 | 264 | ) |
|
265 | 265 | |
|
266 | 266 | def close(self): |
|
267 | 267 | pass |
|
268 | 268 | |
|
269 | 269 | |
|
270 | 270 | # Check to see if the given path is a local Subversion repo. Verify this by |
|
271 | 271 | # looking for several svn-specific files and directories in the given |
|
272 | 272 | # directory. |
|
273 | 273 | def filecheck(ui, path, proto): |
|
274 | 274 | for x in (b'locks', b'hooks', b'format', b'db'): |
|
275 | 275 | if not os.path.exists(os.path.join(path, x)): |
|
276 | 276 | return False |
|
277 | 277 | return True |
|
278 | 278 | |
|
279 | 279 | |
|
280 | 280 | # Check to see if a given path is the root of an svn repo over http. We verify |
|
281 | 281 | # this by requesting a version-controlled URL we know can't exist and looking |
|
282 | 282 | # for the svn-specific "not found" XML. |
|
283 | 283 | def httpcheck(ui, path, proto): |
|
284 | 284 | try: |
|
285 | 285 | opener = urlreq.buildopener() |
|
286 | 286 | rsp = opener.open(b'%s://%s/!svn/ver/0/.svn' % (proto, path), b'rb') |
|
287 | 287 | data = rsp.read() |
|
288 | 288 | except urlerr.httperror as inst: |
|
289 | 289 | if inst.code != 404: |
|
290 | 290 | # Except for 404 we cannot know for sure this is not an svn repo |
|
291 | 291 | ui.warn( |
|
292 | 292 | _( |
|
293 | 293 | b'svn: cannot probe remote repository, assume it could ' |
|
294 | 294 | b'be a subversion repository. Use --source-type if you ' |
|
295 | 295 | b'know better.\n' |
|
296 | 296 | ) |
|
297 | 297 | ) |
|
298 | 298 | return True |
|
299 | 299 | data = inst.fp.read() |
|
300 | 300 | except Exception: |
|
301 | 301 | # Could be urlerr.urlerror if the URL is invalid or anything else. |
|
302 | 302 | return False |
|
303 | 303 | return b'<m:human-readable errcode="160013">' in data |
|
304 | 304 | |
|
305 | 305 | |
|
306 | 306 | protomap = { |
|
307 | 307 | b'http': httpcheck, |
|
308 | 308 | b'https': httpcheck, |
|
309 | 309 | b'file': filecheck, |
|
310 | 310 | } |
|
311 | 311 | |
|
312 | 312 | |
|
313 | 313 | def issvnurl(ui, url): |
|
314 | 314 | try: |
|
315 | 315 | proto, path = url.split(b'://', 1) |
|
316 | 316 | if proto == b'file': |
|
317 | 317 | if ( |
|
318 | 318 | pycompat.iswindows |
|
319 | 319 | and path[:1] == b'/' |
|
320 | 320 | and path[1:2].isalpha() |
|
321 | 321 | and path[2:6].lower() == b'%3a/' |
|
322 | 322 | ): |
|
323 | 323 | path = path[:2] + b':/' + path[6:] |
|
324 | 324 | path = urlreq.url2pathname(path) |
|
325 | 325 | except ValueError: |
|
326 | 326 | proto = b'file' |
|
327 | 327 | path = os.path.abspath(url) |
|
328 | 328 | if proto == b'file': |
|
329 | 329 | path = util.pconvert(path) |
|
330 | 330 | check = protomap.get(proto, lambda *args: False) |
|
331 | 331 | while b'/' in path: |
|
332 | 332 | if check(ui, path, proto): |
|
333 | 333 | return True |
|
334 | 334 | path = path.rsplit(b'/', 1)[0] |
|
335 | 335 | return False |
|
336 | 336 | |
|
337 | 337 | |
|
338 | 338 | # SVN conversion code stolen from bzr-svn and tailor |
|
339 | 339 | # |
|
340 | 340 | # Subversion looks like a versioned filesystem, branches structures |
|
341 | 341 | # are defined by conventions and not enforced by the tool. First, |
|
342 | 342 | # we define the potential branches (modules) as "trunk" and "branches" |
|
343 | 343 | # children directories. Revisions are then identified by their |
|
344 | 344 | # module and revision number (and a repository identifier). |
|
345 | 345 | # |
|
346 | 346 | # The revision graph is really a tree (or a forest). By default, a |
|
347 | 347 | # revision parent is the previous revision in the same module. If the |
|
348 | 348 | # module directory is copied/moved from another module then the |
|
349 | 349 | # revision is the module root and its parent the source revision in |
|
350 | 350 | # the parent module. A revision has at most one parent. |
|
351 | 351 | # |
|
352 | 352 | class svn_source(converter_source): |
|
353 | 353 | def __init__(self, ui, repotype, url, revs=None): |
|
354 | 354 | super(svn_source, self).__init__(ui, repotype, url, revs=revs) |
|
355 | 355 | |
|
356 | 356 | if not ( |
|
357 | 357 | url.startswith(b'svn://') |
|
358 | 358 | or url.startswith(b'svn+ssh://') |
|
359 | 359 | or ( |
|
360 | 360 | os.path.exists(url) |
|
361 | 361 | and os.path.exists(os.path.join(url, b'.svn')) |
|
362 | 362 | ) |
|
363 | 363 | or issvnurl(ui, url) |
|
364 | 364 | ): |
|
365 | 365 | raise NoRepo( |
|
366 | 366 | _(b"%s does not look like a Subversion repository") % url |
|
367 | 367 | ) |
|
368 | 368 | if svn is None: |
|
369 | 369 | raise MissingTool(_(b'could not load Subversion python bindings')) |
|
370 | 370 | |
|
371 | 371 | try: |
|
372 | 372 | version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR |
|
373 | 373 | if version < (1, 4): |
|
374 | 374 | raise MissingTool( |
|
375 | 375 | _( |
|
376 | 376 | b'Subversion python bindings %d.%d found, ' |
|
377 | 377 | b'1.4 or later required' |
|
378 | 378 | ) |
|
379 | 379 | % version |
|
380 | 380 | ) |
|
381 | 381 | except AttributeError: |
|
382 | 382 | raise MissingTool( |
|
383 | 383 | _( |
|
384 | 384 | b'Subversion python bindings are too old, 1.4 ' |
|
385 | 385 | b'or later required' |
|
386 | 386 | ) |
|
387 | 387 | ) |
|
388 | 388 | |
|
389 | 389 | self.lastrevs = {} |
|
390 | 390 | |
|
391 | 391 | latest = None |
|
392 | 392 | try: |
|
393 | 393 | # Support file://path@rev syntax. Useful e.g. to convert |
|
394 | 394 | # deleted branches. |
|
395 | 395 | at = url.rfind(b'@') |
|
396 | 396 | if at >= 0: |
|
397 | 397 | latest = int(url[at + 1 :]) |
|
398 | 398 | url = url[:at] |
|
399 | 399 | except ValueError: |
|
400 | 400 | pass |
|
401 | 401 | self.url = geturl(url) |
|
402 | 402 | self.encoding = b'UTF-8' # Subversion is always nominal UTF-8 |
|
403 | 403 | try: |
|
404 | 404 | self.transport = transport.SvnRaTransport(url=self.url) |
|
405 | 405 | self.ra = self.transport.ra |
|
406 | 406 | self.ctx = self.transport.client |
|
407 | 407 | self.baseurl = svn.ra.get_repos_root(self.ra) |
|
408 | 408 | # Module is either empty or a repository path starting with |
|
409 | 409 | # a slash and not ending with a slash. |
|
410 | 410 | self.module = urlreq.unquote(self.url[len(self.baseurl) :]) |
|
411 | 411 | self.prevmodule = None |
|
412 | 412 | self.rootmodule = self.module |
|
413 | 413 | self.commits = {} |
|
414 | 414 | self.paths = {} |
|
415 | 415 | self.uuid = svn.ra.get_uuid(self.ra) |
|
416 | 416 | except svn.core.SubversionException: |
|
417 | 417 | ui.traceback() |
|
418 | 418 | svnversion = b'%d.%d.%d' % ( |
|
419 | 419 | svn.core.SVN_VER_MAJOR, |
|
420 | 420 | svn.core.SVN_VER_MINOR, |
|
421 | 421 | svn.core.SVN_VER_MICRO, |
|
422 | 422 | ) |
|
423 | 423 | raise NoRepo( |
|
424 | 424 | _( |
|
425 | 425 | b"%s does not look like a Subversion repository " |
|
426 | 426 | b"to libsvn version %s" |
|
427 | 427 | ) |
|
428 | 428 | % (self.url, svnversion) |
|
429 | 429 | ) |
|
430 | 430 | |
|
431 | 431 | if revs: |
|
432 | 432 | if len(revs) > 1: |
|
433 | 433 | raise error.Abort( |
|
434 | 434 | _( |
|
435 | 435 | b'subversion source does not support ' |
|
436 | 436 | b'specifying multiple revisions' |
|
437 | 437 | ) |
|
438 | 438 | ) |
|
439 | 439 | try: |
|
440 | 440 | latest = int(revs[0]) |
|
441 | 441 | except ValueError: |
|
442 | 442 | raise error.Abort( |
|
443 | 443 | _(b'svn: revision %s is not an integer') % revs[0] |
|
444 | 444 | ) |
|
445 | 445 | |
|
446 | 446 | trunkcfg = self.ui.config(b'convert', b'svn.trunk') |
|
447 | 447 | if trunkcfg is None: |
|
448 | 448 | trunkcfg = b'trunk' |
|
449 | 449 | self.trunkname = trunkcfg.strip(b'/') |
|
450 | 450 | self.startrev = self.ui.config(b'convert', b'svn.startrev') |
|
451 | 451 | try: |
|
452 | 452 | self.startrev = int(self.startrev) |
|
453 | 453 | if self.startrev < 0: |
|
454 | 454 | self.startrev = 0 |
|
455 | 455 | except ValueError: |
|
456 | 456 | raise error.Abort( |
|
457 | 457 | _(b'svn: start revision %s is not an integer') % self.startrev |
|
458 | 458 | ) |
|
459 | 459 | |
|
460 | 460 | try: |
|
461 | 461 | self.head = self.latest(self.module, latest) |
|
462 | 462 | except SvnPathNotFound: |
|
463 | 463 | self.head = None |
|
464 | 464 | if not self.head: |
|
465 | 465 | raise error.Abort( |
|
466 | 466 | _(b'no revision found in module %s') % self.module |
|
467 | 467 | ) |
|
468 | 468 | self.last_changed = self.revnum(self.head) |
|
469 | 469 | |
|
470 | 470 | self._changescache = (None, None) |
|
471 | 471 | |
|
472 | 472 | if os.path.exists(os.path.join(url, b'.svn/entries')): |
|
473 | 473 | self.wc = url |
|
474 | 474 | else: |
|
475 | 475 | self.wc = None |
|
476 | 476 | self.convertfp = None |
|
477 | 477 | |
|
478 | 478 | def setrevmap(self, revmap): |
|
479 | 479 | lastrevs = {} |
|
480 | 480 | for revid in revmap: |
|
481 | 481 | uuid, module, revnum = revsplit(revid) |
|
482 | 482 | lastrevnum = lastrevs.setdefault(module, revnum) |
|
483 | 483 | if revnum > lastrevnum: |
|
484 | 484 | lastrevs[module] = revnum |
|
485 | 485 | self.lastrevs = lastrevs |
|
486 | 486 | |
|
487 | 487 | def exists(self, path, optrev): |
|
488 | 488 | try: |
|
489 | 489 | svn.client.ls( |
|
490 | 490 | self.url.rstrip(b'/') + b'/' + quote(path), |
|
491 | 491 | optrev, |
|
492 | 492 | False, |
|
493 | 493 | self.ctx, |
|
494 | 494 | ) |
|
495 | 495 | return True |
|
496 | 496 | except svn.core.SubversionException: |
|
497 | 497 | return False |
|
498 | 498 | |
|
499 | 499 | def getheads(self): |
|
500 | 500 | def isdir(path, revnum): |
|
501 | 501 | kind = self._checkpath(path, revnum) |
|
502 | 502 | return kind == svn.core.svn_node_dir |
|
503 | 503 | |
|
504 | 504 | def getcfgpath(name, rev): |
|
505 | 505 | cfgpath = self.ui.config(b'convert', b'svn.' + name) |
|
506 | 506 | if cfgpath is not None and cfgpath.strip() == b'': |
|
507 | 507 | return None |
|
508 | 508 | path = (cfgpath or name).strip(b'/') |
|
509 | 509 | if not self.exists(path, rev): |
|
510 | 510 | if self.module.endswith(path) and name == b'trunk': |
|
511 | 511 | # we are converting from inside this directory |
|
512 | 512 | return None |
|
513 | 513 | if cfgpath: |
|
514 | 514 | raise error.Abort( |
|
515 | 515 | _(b'expected %s to be at %r, but not found') |
|
516 | 516 | % (name, path) |
|
517 | 517 | ) |
|
518 | 518 | return None |
|
519 | 519 | self.ui.note(_(b'found %s at %r\n') % (name, path)) |
|
520 | 520 | return path |
|
521 | 521 | |
|
522 | 522 | rev = optrev(self.last_changed) |
|
523 | 523 | oldmodule = b'' |
|
524 | 524 | trunk = getcfgpath(b'trunk', rev) |
|
525 | 525 | self.tags = getcfgpath(b'tags', rev) |
|
526 | 526 | branches = getcfgpath(b'branches', rev) |
|
527 | 527 | |
|
528 | 528 | # If the project has a trunk or branches, we will extract heads |
|
529 | 529 | # from them. We keep the project root otherwise. |
|
530 | 530 | if trunk: |
|
531 | 531 | oldmodule = self.module or b'' |
|
532 | 532 | self.module += b'/' + trunk |
|
533 | 533 | self.head = self.latest(self.module, self.last_changed) |
|
534 | 534 | if not self.head: |
|
535 | 535 | raise error.Abort( |
|
536 | 536 | _(b'no revision found in module %s') % self.module |
|
537 | 537 | ) |
|
538 | 538 | |
|
539 | 539 | # First head in the list is the module's head |
|
540 | 540 | self.heads = [self.head] |
|
541 | 541 | if self.tags is not None: |
|
542 | 542 | self.tags = b'%s/%s' % (oldmodule, (self.tags or b'tags')) |
|
543 | 543 | |
|
544 | 544 | # Check if branches bring a few more heads to the list |
|
545 | 545 | if branches: |
|
546 | 546 | rpath = self.url.strip(b'/') |
|
547 | 547 | branchnames = svn.client.ls( |
|
548 | 548 | rpath + b'/' + quote(branches), rev, False, self.ctx |
|
549 | 549 | ) |
|
550 | 550 | for branch in sorted(branchnames): |
|
551 | 551 | module = b'%s/%s/%s' % (oldmodule, branches, branch) |
|
552 | 552 | if not isdir(module, self.last_changed): |
|
553 | 553 | continue |
|
554 | 554 | brevid = self.latest(module, self.last_changed) |
|
555 | 555 | if not brevid: |
|
556 | 556 | self.ui.note(_(b'ignoring empty branch %s\n') % branch) |
|
557 | 557 | continue |
|
558 | 558 | self.ui.note( |
|
559 | 559 | _(b'found branch %s at %d\n') |
|
560 | 560 | % (branch, self.revnum(brevid)) |
|
561 | 561 | ) |
|
562 | 562 | self.heads.append(brevid) |
|
563 | 563 | |
|
564 | 564 | if self.startrev and self.heads: |
|
565 | 565 | if len(self.heads) > 1: |
|
566 | 566 | raise error.Abort( |
|
567 | 567 | _( |
|
568 | 568 | b'svn: start revision is not supported ' |
|
569 | 569 | b'with more than one branch' |
|
570 | 570 | ) |
|
571 | 571 | ) |
|
572 | 572 | revnum = self.revnum(self.heads[0]) |
|
573 | 573 | if revnum < self.startrev: |
|
574 | 574 | raise error.Abort( |
|
575 | 575 | _(b'svn: no revision found after start revision %d') |
|
576 | 576 | % self.startrev |
|
577 | 577 | ) |
|
578 | 578 | |
|
579 | 579 | return self.heads |
|
580 | 580 | |
|
581 | 581 | def _getchanges(self, rev, full): |
|
582 | 582 | (paths, parents) = self.paths[rev] |
|
583 | 583 | copies = {} |
|
584 | 584 | if parents: |
|
585 | 585 | files, self.removed, copies = self.expandpaths(rev, paths, parents) |
|
586 | 586 | if full or not parents: |
|
587 | 587 | # Perform a full checkout on roots |
|
588 | 588 | uuid, module, revnum = revsplit(rev) |
|
589 | 589 | entries = svn.client.ls( |
|
590 | 590 | self.baseurl + quote(module), optrev(revnum), True, self.ctx |
|
591 | 591 | ) |
|
592 | 592 | files = [ |
|
593 | 593 | n |
|
594 | 594 | for n, e in pycompat.iteritems(entries) |
|
595 | 595 | if e.kind == svn.core.svn_node_file |
|
596 | 596 | ] |
|
597 | 597 | self.removed = set() |
|
598 | 598 | |
|
599 | 599 | files.sort() |
|
600 | 600 | files = zip(files, [rev] * len(files)) |
|
601 | 601 | return (files, copies) |
|
602 | 602 | |
|
603 | 603 | def getchanges(self, rev, full): |
|
604 | 604 | # reuse cache from getchangedfiles |
|
605 | 605 | if self._changescache[0] == rev and not full: |
|
606 | 606 | (files, copies) = self._changescache[1] |
|
607 | 607 | else: |
|
608 | 608 | (files, copies) = self._getchanges(rev, full) |
|
609 | 609 | # caller caches the result, so free it here to release memory |
|
610 | 610 | del self.paths[rev] |
|
611 | 611 | return (files, copies, set()) |
|
612 | 612 | |
|
613 | 613 | def getchangedfiles(self, rev, i): |
|
614 | 614 | # called from filemap - cache computed values for reuse in getchanges |
|
615 | 615 | (files, copies) = self._getchanges(rev, False) |
|
616 | 616 | self._changescache = (rev, (files, copies)) |
|
617 | 617 | return [f[0] for f in files] |
|
618 | 618 | |
|
619 | 619 | def getcommit(self, rev): |
|
620 | 620 | if rev not in self.commits: |
|
621 | 621 | uuid, module, revnum = revsplit(rev) |
|
622 | 622 | self.module = module |
|
623 | 623 | self.reparent(module) |
|
624 | 624 | # We assume that: |
|
625 | 625 | # - requests for revisions after "stop" come from the |
|
626 | 626 | # revision graph backward traversal. Cache all of them |
|
627 | 627 | # down to stop, they will be used eventually. |
|
628 | 628 | # - requests for revisions before "stop" come to get |
|
629 | 629 | # isolated branches parents. Just fetch what is needed. |
|
630 | 630 | stop = self.lastrevs.get(module, 0) |
|
631 | 631 | if revnum < stop: |
|
632 | 632 | stop = revnum + 1 |
|
633 | 633 | self._fetch_revisions(revnum, stop) |
|
634 | 634 | if rev not in self.commits: |
|
635 | 635 | raise error.Abort(_(b'svn: revision %s not found') % revnum) |
|
636 | 636 | revcommit = self.commits[rev] |
|
637 | 637 | # caller caches the result, so free it here to release memory |
|
638 | 638 | del self.commits[rev] |
|
639 | 639 | return revcommit |
|
640 | 640 | |
|
641 | 641 | def checkrevformat(self, revstr, mapname=b'splicemap'): |
|
642 | 642 | """ fails if revision format does not match the correct format""" |
|
643 | 643 | if not re.match( |
|
644 | 644 | r'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-' |
|
645 | 645 | r'[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]' |
|
646 | 646 | r'{12,12}(.*)@[0-9]+$', |
|
647 | 647 | revstr, |
|
648 | 648 | ): |
|
649 | 649 | raise error.Abort( |
|
650 | 650 | _(b'%s entry %s is not a valid revision identifier') |
|
651 | 651 | % (mapname, revstr) |
|
652 | 652 | ) |
|
653 | 653 | |
|
654 | 654 | def numcommits(self): |
|
655 | 655 | return int(self.head.rsplit(b'@', 1)[1]) - self.startrev |
|
656 | 656 | |
|
657 | 657 | def gettags(self): |
|
658 | 658 | tags = {} |
|
659 | 659 | if self.tags is None: |
|
660 | 660 | return tags |
|
661 | 661 | |
|
662 | 662 | # svn tags are just a convention, project branches left in a |
|
663 | 663 | # 'tags' directory. There is no other relationship than |
|
664 | 664 | # ancestry, which is expensive to discover and makes them hard |
|
665 | 665 | # to update incrementally. Worse, past revisions may be |
|
666 | 666 | # referenced by tags far away in the future, requiring a deep |
|
667 | 667 | # history traversal on every calculation. Current code |
|
668 | 668 | # performs a single backward traversal, tracking moves within |
|
669 | 669 | # the tags directory (tag renaming) and recording a new tag |
|
670 | 670 | # everytime a project is copied from outside the tags |
|
671 | 671 | # directory. It also lists deleted tags, this behaviour may |
|
672 | 672 | # change in the future. |
|
673 | 673 | pendings = [] |
|
674 | 674 | tagspath = self.tags |
|
675 | 675 | start = svn.ra.get_latest_revnum(self.ra) |
|
676 | 676 | stream = self._getlog([self.tags], start, self.startrev) |
|
677 | 677 | try: |
|
678 | 678 | for entry in stream: |
|
679 | 679 | origpaths, revnum, author, date, message = entry |
|
680 | 680 | if not origpaths: |
|
681 | 681 | origpaths = [] |
|
682 | 682 | copies = [ |
|
683 | 683 | (e.copyfrom_path, e.copyfrom_rev, p) |
|
684 | 684 | for p, e in pycompat.iteritems(origpaths) |
|
685 | 685 | if e.copyfrom_path |
|
686 | 686 | ] |
|
687 | 687 | # Apply moves/copies from more specific to general |
|
688 | 688 | copies.sort(reverse=True) |
|
689 | 689 | |
|
690 | 690 | srctagspath = tagspath |
|
691 | 691 | if copies and copies[-1][2] == tagspath: |
|
692 | 692 | # Track tags directory moves |
|
693 | 693 | srctagspath = copies.pop()[0] |
|
694 | 694 | |
|
695 | 695 | for source, sourcerev, dest in copies: |
|
696 | 696 | if not dest.startswith(tagspath + b'/'): |
|
697 | 697 | continue |
|
698 | 698 | for tag in pendings: |
|
699 | 699 | if tag[0].startswith(dest): |
|
700 | 700 | tagpath = source + tag[0][len(dest) :] |
|
701 | 701 | tag[:2] = [tagpath, sourcerev] |
|
702 | 702 | break |
|
703 | 703 | else: |
|
704 | 704 | pendings.append([source, sourcerev, dest]) |
|
705 | 705 | |
|
706 | 706 | # Filter out tags with children coming from different |
|
707 | 707 | # parts of the repository like: |
|
708 | 708 | # /tags/tag.1 (from /trunk:10) |
|
709 | 709 | # /tags/tag.1/foo (from /branches/foo:12) |
|
710 | 710 | # Here/tags/tag.1 discarded as well as its children. |
|
711 | 711 | # It happens with tools like cvs2svn. Such tags cannot |
|
712 | 712 | # be represented in mercurial. |
|
713 | 713 | addeds = { |
|
714 | 714 | p: e.copyfrom_path |
|
715 | 715 | for p, e in pycompat.iteritems(origpaths) |
|
716 | 716 | if e.action == b'A' and e.copyfrom_path |
|
717 | 717 | } |
|
718 | 718 | badroots = set() |
|
719 | 719 | for destroot in addeds: |
|
720 | 720 | for source, sourcerev, dest in pendings: |
|
721 | 721 | if not dest.startswith( |
|
722 | 722 | destroot + b'/' |
|
723 | 723 | ) or source.startswith(addeds[destroot] + b'/'): |
|
724 | 724 | continue |
|
725 | 725 | badroots.add(destroot) |
|
726 | 726 | break |
|
727 | 727 | |
|
728 | 728 | for badroot in badroots: |
|
729 | 729 | pendings = [ |
|
730 | 730 | p |
|
731 | 731 | for p in pendings |
|
732 | 732 | if p[2] != badroot |
|
733 | 733 | and not p[2].startswith(badroot + b'/') |
|
734 | 734 | ] |
|
735 | 735 | |
|
736 | 736 | # Tell tag renamings from tag creations |
|
737 | 737 | renamings = [] |
|
738 | 738 | for source, sourcerev, dest in pendings: |
|
739 | 739 | tagname = dest.split(b'/')[-1] |
|
740 | 740 | if source.startswith(srctagspath): |
|
741 | 741 | renamings.append([source, sourcerev, tagname]) |
|
742 | 742 | continue |
|
743 | 743 | if tagname in tags: |
|
744 | 744 | # Keep the latest tag value |
|
745 | 745 | continue |
|
746 | 746 | # From revision may be fake, get one with changes |
|
747 | 747 | try: |
|
748 | 748 | tagid = self.latest(source, sourcerev) |
|
749 | 749 | if tagid and tagname not in tags: |
|
750 | 750 | tags[tagname] = tagid |
|
751 | 751 | except SvnPathNotFound: |
|
752 | 752 | # It happens when we are following directories |
|
753 | 753 | # we assumed were copied with their parents |
|
754 | 754 | # but were really created in the tag |
|
755 | 755 | # directory. |
|
756 | 756 | pass |
|
757 | 757 | pendings = renamings |
|
758 | 758 | tagspath = srctagspath |
|
759 | 759 | finally: |
|
760 | 760 | stream.close() |
|
761 | 761 | return tags |
|
762 | 762 | |
|
763 | 763 | def converted(self, rev, destrev): |
|
764 | 764 | if not self.wc: |
|
765 | 765 | return |
|
766 | 766 | if self.convertfp is None: |
|
767 | 767 | self.convertfp = open( |
|
768 | 768 | os.path.join(self.wc, b'.svn', b'hg-shamap'), b'ab' |
|
769 | 769 | ) |
|
770 | 770 | self.convertfp.write( |
|
771 | 771 | util.tonativeeol(b'%s %d\n' % (destrev, self.revnum(rev))) |
|
772 | 772 | ) |
|
773 | 773 | self.convertfp.flush() |
|
774 | 774 | |
|
775 | 775 | def revid(self, revnum, module=None): |
|
776 | 776 | return b'svn:%s%s@%s' % (self.uuid, module or self.module, revnum) |
|
777 | 777 | |
|
778 | 778 | def revnum(self, rev): |
|
779 | 779 | return int(rev.split(b'@')[-1]) |
|
780 | 780 | |
|
781 | 781 | def latest(self, path, stop=None): |
|
782 | 782 | """Find the latest revid affecting path, up to stop revision |
|
783 | 783 | number. If stop is None, default to repository latest |
|
784 | 784 | revision. It may return a revision in a different module, |
|
785 | 785 | since a branch may be moved without a change being |
|
786 | 786 | reported. Return None if computed module does not belong to |
|
787 | 787 | rootmodule subtree. |
|
788 | 788 | """ |
|
789 | 789 | |
|
790 | 790 | def findchanges(path, start, stop=None): |
|
791 | 791 | stream = self._getlog([path], start, stop or 1) |
|
792 | 792 | try: |
|
793 | 793 | for entry in stream: |
|
794 | 794 | paths, revnum, author, date, message = entry |
|
795 | 795 | if stop is None and paths: |
|
796 | 796 | # We do not know the latest changed revision, |
|
797 | 797 | # keep the first one with changed paths. |
|
798 | 798 | break |
|
799 | 799 | if revnum <= stop: |
|
800 | 800 | break |
|
801 | 801 | |
|
802 | 802 | for p in paths: |
|
803 | 803 | if not path.startswith(p) or not paths[p].copyfrom_path: |
|
804 | 804 | continue |
|
805 | 805 | newpath = paths[p].copyfrom_path + path[len(p) :] |
|
806 | 806 | self.ui.debug( |
|
807 | 807 | b"branch renamed from %s to %s at %d\n" |
|
808 | 808 | % (path, newpath, revnum) |
|
809 | 809 | ) |
|
810 | 810 | path = newpath |
|
811 | 811 | break |
|
812 | 812 | if not paths: |
|
813 | 813 | revnum = None |
|
814 | 814 | return revnum, path |
|
815 | 815 | finally: |
|
816 | 816 | stream.close() |
|
817 | 817 | |
|
818 | 818 | if not path.startswith(self.rootmodule): |
|
819 | 819 | # Requests on foreign branches may be forbidden at server level |
|
820 | 820 | self.ui.debug(b'ignoring foreign branch %r\n' % path) |
|
821 | 821 | return None |
|
822 | 822 | |
|
823 | 823 | if stop is None: |
|
824 | 824 | stop = svn.ra.get_latest_revnum(self.ra) |
|
825 | 825 | try: |
|
826 | 826 | prevmodule = self.reparent(b'') |
|
827 | 827 | dirent = svn.ra.stat(self.ra, path.strip(b'/'), stop) |
|
828 | 828 | self.reparent(prevmodule) |
|
829 | 829 | except svn.core.SubversionException: |
|
830 | 830 | dirent = None |
|
831 | 831 | if not dirent: |
|
832 | 832 | raise SvnPathNotFound( |
|
833 | 833 | _(b'%s not found up to revision %d') % (path, stop) |
|
834 | 834 | ) |
|
835 | 835 | |
|
836 | 836 | # stat() gives us the previous revision on this line of |
|
837 | 837 | # development, but it might be in *another module*. Fetch the |
|
838 | 838 | # log and detect renames down to the latest revision. |
|
839 | 839 | revnum, realpath = findchanges(path, stop, dirent.created_rev) |
|
840 | 840 | if revnum is None: |
|
841 | 841 | # Tools like svnsync can create empty revision, when |
|
842 | 842 | # synchronizing only a subtree for instance. These empty |
|
843 | 843 | # revisions created_rev still have their original values |
|
844 | 844 | # despite all changes having disappeared and can be |
|
845 | 845 | # returned by ra.stat(), at least when stating the root |
|
846 | 846 | # module. In that case, do not trust created_rev and scan |
|
847 | 847 | # the whole history. |
|
848 | 848 | revnum, realpath = findchanges(path, stop) |
|
849 | 849 | if revnum is None: |
|
850 | 850 | self.ui.debug(b'ignoring empty branch %r\n' % realpath) |
|
851 | 851 | return None |
|
852 | 852 | |
|
853 | 853 | if not realpath.startswith(self.rootmodule): |
|
854 | 854 | self.ui.debug(b'ignoring foreign branch %r\n' % realpath) |
|
855 | 855 | return None |
|
856 | 856 | return self.revid(revnum, realpath) |
|
857 | 857 | |
|
858 | 858 | def reparent(self, module): |
|
859 | 859 | """Reparent the svn transport and return the previous parent.""" |
|
860 | 860 | if self.prevmodule == module: |
|
861 | 861 | return module |
|
862 | 862 | svnurl = self.baseurl + quote(module) |
|
863 | 863 | prevmodule = self.prevmodule |
|
864 | 864 | if prevmodule is None: |
|
865 | 865 | prevmodule = b'' |
|
866 | 866 | self.ui.debug(b"reparent to %s\n" % svnurl) |
|
867 | 867 | svn.ra.reparent(self.ra, svnurl) |
|
868 | 868 | self.prevmodule = module |
|
869 | 869 | return prevmodule |
|
870 | 870 | |
|
871 | 871 | def expandpaths(self, rev, paths, parents): |
|
872 | 872 | changed, removed = set(), set() |
|
873 | 873 | copies = {} |
|
874 | 874 | |
|
875 | 875 | new_module, revnum = revsplit(rev)[1:] |
|
876 | 876 | if new_module != self.module: |
|
877 | 877 | self.module = new_module |
|
878 | 878 | self.reparent(self.module) |
|
879 | 879 | |
|
880 | 880 | progress = self.ui.makeprogress( |
|
881 | 881 | _(b'scanning paths'), unit=_(b'paths'), total=len(paths) |
|
882 | 882 | ) |
|
883 | 883 | for i, (path, ent) in enumerate(paths): |
|
884 | 884 | progress.update(i, item=path) |
|
885 | 885 | entrypath = self.getrelpath(path) |
|
886 | 886 | |
|
887 | 887 | kind = self._checkpath(entrypath, revnum) |
|
888 | 888 | if kind == svn.core.svn_node_file: |
|
889 | 889 | changed.add(self.recode(entrypath)) |
|
890 | 890 | if not ent.copyfrom_path or not parents: |
|
891 | 891 | continue |
|
892 | 892 | # Copy sources not in parent revisions cannot be |
|
893 | 893 | # represented, ignore their origin for now |
|
894 | 894 | pmodule, prevnum = revsplit(parents[0])[1:] |
|
895 | 895 | if ent.copyfrom_rev < prevnum: |
|
896 | 896 | continue |
|
897 | 897 | copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule) |
|
898 | 898 | if not copyfrom_path: |
|
899 | 899 | continue |
|
900 | 900 | self.ui.debug( |
|
901 | 901 | b"copied to %s from %s@%s\n" |
|
902 | 902 | % (entrypath, copyfrom_path, ent.copyfrom_rev) |
|
903 | 903 | ) |
|
904 | 904 | copies[self.recode(entrypath)] = self.recode(copyfrom_path) |
|
905 | 905 | elif kind == 0: # gone, but had better be a deleted *file* |
|
906 | 906 | self.ui.debug(b"gone from %s\n" % ent.copyfrom_rev) |
|
907 | 907 | pmodule, prevnum = revsplit(parents[0])[1:] |
|
908 | 908 | parentpath = pmodule + b"/" + entrypath |
|
909 | 909 | fromkind = self._checkpath(entrypath, prevnum, pmodule) |
|
910 | 910 | |
|
911 | 911 | if fromkind == svn.core.svn_node_file: |
|
912 | 912 | removed.add(self.recode(entrypath)) |
|
913 | 913 | elif fromkind == svn.core.svn_node_dir: |
|
914 | 914 | oroot = parentpath.strip(b'/') |
|
915 | 915 | nroot = path.strip(b'/') |
|
916 | 916 | children = self._iterfiles(oroot, prevnum) |
|
917 | 917 | for childpath in children: |
|
918 | 918 | childpath = childpath.replace(oroot, nroot) |
|
919 | 919 | childpath = self.getrelpath(b"/" + childpath, pmodule) |
|
920 | 920 | if childpath: |
|
921 | 921 | removed.add(self.recode(childpath)) |
|
922 | 922 | else: |
|
923 | 923 | self.ui.debug( |
|
924 | 924 | b'unknown path in revision %d: %s\n' % (revnum, path) |
|
925 | 925 | ) |
|
926 | 926 | elif kind == svn.core.svn_node_dir: |
|
927 | 927 | if ent.action == b'M': |
|
928 | 928 | # If the directory just had a prop change, |
|
929 | 929 | # then we shouldn't need to look for its children. |
|
930 | 930 | continue |
|
931 | 931 | if ent.action == b'R' and parents: |
|
932 | 932 | # If a directory is replacing a file, mark the previous |
|
933 | 933 | # file as deleted |
|
934 | 934 | pmodule, prevnum = revsplit(parents[0])[1:] |
|
935 | 935 | pkind = self._checkpath(entrypath, prevnum, pmodule) |
|
936 | 936 | if pkind == svn.core.svn_node_file: |
|
937 | 937 | removed.add(self.recode(entrypath)) |
|
938 | 938 | elif pkind == svn.core.svn_node_dir: |
|
939 | 939 | # We do not know what files were kept or removed, |
|
940 | 940 | # mark them all as changed. |
|
941 | 941 | for childpath in self._iterfiles(pmodule, prevnum): |
|
942 | 942 | childpath = self.getrelpath(b"/" + childpath) |
|
943 | 943 | if childpath: |
|
944 | 944 | changed.add(self.recode(childpath)) |
|
945 | 945 | |
|
946 | 946 | for childpath in self._iterfiles(path, revnum): |
|
947 | 947 | childpath = self.getrelpath(b"/" + childpath) |
|
948 | 948 | if childpath: |
|
949 | 949 | changed.add(self.recode(childpath)) |
|
950 | 950 | |
|
951 | 951 | # Handle directory copies |
|
952 | 952 | if not ent.copyfrom_path or not parents: |
|
953 | 953 | continue |
|
954 | 954 | # Copy sources not in parent revisions cannot be |
|
955 | 955 | # represented, ignore their origin for now |
|
956 | 956 | pmodule, prevnum = revsplit(parents[0])[1:] |
|
957 | 957 | if ent.copyfrom_rev < prevnum: |
|
958 | 958 | continue |
|
959 | 959 | copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule) |
|
960 | 960 | if not copyfrompath: |
|
961 | 961 | continue |
|
962 | 962 | self.ui.debug( |
|
963 | 963 | b"mark %s came from %s:%d\n" |
|
964 | 964 | % (path, copyfrompath, ent.copyfrom_rev) |
|
965 | 965 | ) |
|
966 | 966 | children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev) |
|
967 | 967 | for childpath in children: |
|
968 | 968 | childpath = self.getrelpath(b"/" + childpath, pmodule) |
|
969 | 969 | if not childpath: |
|
970 | 970 | continue |
|
971 | 971 | copytopath = path + childpath[len(copyfrompath) :] |
|
972 | 972 | copytopath = self.getrelpath(copytopath) |
|
973 | 973 | copies[self.recode(copytopath)] = self.recode(childpath) |
|
974 | 974 | |
|
975 | 975 | progress.complete() |
|
976 | 976 | changed.update(removed) |
|
977 | 977 | return (list(changed), removed, copies) |
|
978 | 978 | |
|
979 | 979 | def _fetch_revisions(self, from_revnum, to_revnum): |
|
980 | 980 | if from_revnum < to_revnum: |
|
981 | 981 | from_revnum, to_revnum = to_revnum, from_revnum |
|
982 | 982 | |
|
983 | 983 | self.child_cset = None |
|
984 | 984 | |
|
985 | 985 | def parselogentry(orig_paths, revnum, author, date, message): |
|
986 | 986 | """Return the parsed commit object or None, and True if |
|
987 | 987 | the revision is a branch root. |
|
988 | 988 | """ |
|
989 | 989 | self.ui.debug( |
|
990 | 990 | b"parsing revision %d (%d changes)\n" |
|
991 | 991 | % (revnum, len(orig_paths)) |
|
992 | 992 | ) |
|
993 | 993 | |
|
994 | 994 | branched = False |
|
995 | 995 | rev = self.revid(revnum) |
|
996 | 996 | # branch log might return entries for a parent we already have |
|
997 | 997 | |
|
998 | 998 | if rev in self.commits or revnum < to_revnum: |
|
999 | 999 | return None, branched |
|
1000 | 1000 | |
|
1001 | 1001 | parents = [] |
|
1002 | 1002 | # check whether this revision is the start of a branch or part |
|
1003 | 1003 | # of a branch renaming |
|
1004 | 1004 | orig_paths = sorted(pycompat.iteritems(orig_paths)) |
|
1005 | 1005 | root_paths = [ |
|
1006 | 1006 | (p, e) for p, e in orig_paths if self.module.startswith(p) |
|
1007 | 1007 | ] |
|
1008 | 1008 | if root_paths: |
|
1009 | 1009 | path, ent = root_paths[-1] |
|
1010 | 1010 | if ent.copyfrom_path: |
|
1011 | 1011 | branched = True |
|
1012 | 1012 | newpath = ent.copyfrom_path + self.module[len(path) :] |
|
1013 | 1013 | # ent.copyfrom_rev may not be the actual last revision |
|
1014 | 1014 | previd = self.latest(newpath, ent.copyfrom_rev) |
|
1015 | 1015 | if previd is not None: |
|
1016 | 1016 | prevmodule, prevnum = revsplit(previd)[1:] |
|
1017 | 1017 | if prevnum >= self.startrev: |
|
1018 | 1018 | parents = [previd] |
|
1019 | 1019 | self.ui.note( |
|
1020 | 1020 | _(b'found parent of branch %s at %d: %s\n') |
|
1021 | 1021 | % (self.module, prevnum, prevmodule) |
|
1022 | 1022 | ) |
|
1023 | 1023 | else: |
|
1024 | 1024 | self.ui.debug(b"no copyfrom path, don't know what to do.\n") |
|
1025 | 1025 | |
|
1026 | 1026 | paths = [] |
|
1027 | 1027 | # filter out unrelated paths |
|
1028 | 1028 | for path, ent in orig_paths: |
|
1029 | 1029 | if self.getrelpath(path) is None: |
|
1030 | 1030 | continue |
|
1031 | 1031 | paths.append((path, ent)) |
|
1032 | 1032 | |
|
1033 | 1033 | # Example SVN datetime. Includes microseconds. |
|
1034 | 1034 | # ISO-8601 conformant |
|
1035 | 1035 | # '2007-01-04T17:35:00.902377Z' |
|
1036 | 1036 | date = dateutil.parsedate( |
|
1037 | 1037 | date[:19] + b" UTC", [b"%Y-%m-%dT%H:%M:%S"] |
|
1038 | 1038 | ) |
|
1039 | 1039 | if self.ui.configbool(b'convert', b'localtimezone'): |
|
1040 | 1040 | date = makedatetimestamp(date[0]) |
|
1041 | 1041 | |
|
1042 | 1042 | if message: |
|
1043 | 1043 | log = self.recode(message) |
|
1044 | 1044 | else: |
|
1045 | 1045 | log = b'' |
|
1046 | 1046 | |
|
1047 | 1047 | if author: |
|
1048 | 1048 | author = self.recode(author) |
|
1049 | 1049 | else: |
|
1050 | 1050 | author = b'' |
|
1051 | 1051 | |
|
1052 | 1052 | try: |
|
1053 | 1053 | branch = self.module.split(b"/")[-1] |
|
1054 | 1054 | if branch == self.trunkname: |
|
1055 | 1055 | branch = None |
|
1056 | 1056 | except IndexError: |
|
1057 | 1057 | branch = None |
|
1058 | 1058 | |
|
1059 | 1059 | cset = commit( |
|
1060 | 1060 | author=author, |
|
1061 | 1061 | date=dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2'), |
|
1062 | 1062 | desc=log, |
|
1063 | 1063 | parents=parents, |
|
1064 | 1064 | branch=branch, |
|
1065 | 1065 | rev=rev, |
|
1066 | 1066 | ) |
|
1067 | 1067 | |
|
1068 | 1068 | self.commits[rev] = cset |
|
1069 | 1069 | # The parents list is *shared* among self.paths and the |
|
1070 | 1070 | # commit object. Both will be updated below. |
|
1071 | 1071 | self.paths[rev] = (paths, cset.parents) |
|
1072 | 1072 | if self.child_cset and not self.child_cset.parents: |
|
1073 | 1073 | self.child_cset.parents[:] = [rev] |
|
1074 | 1074 | self.child_cset = cset |
|
1075 | 1075 | return cset, branched |
|
1076 | 1076 | |
|
1077 | 1077 | self.ui.note( |
|
1078 | 1078 | _(b'fetching revision log for "%s" from %d to %d\n') |
|
1079 | 1079 | % (self.module, from_revnum, to_revnum) |
|
1080 | 1080 | ) |
|
1081 | 1081 | |
|
1082 | 1082 | try: |
|
1083 | 1083 | firstcset = None |
|
1084 | 1084 | lastonbranch = False |
|
1085 | 1085 | stream = self._getlog([self.module], from_revnum, to_revnum) |
|
1086 | 1086 | try: |
|
1087 | 1087 | for entry in stream: |
|
1088 | 1088 | paths, revnum, author, date, message = entry |
|
1089 | 1089 | if revnum < self.startrev: |
|
1090 | 1090 | lastonbranch = True |
|
1091 | 1091 | break |
|
1092 | 1092 | if not paths: |
|
1093 | 1093 | self.ui.debug(b'revision %d has no entries\n' % revnum) |
|
1094 | 1094 | # If we ever leave the loop on an empty |
|
1095 | 1095 | # revision, do not try to get a parent branch |
|
1096 | 1096 | lastonbranch = lastonbranch or revnum == 0 |
|
1097 | 1097 | continue |
|
1098 | 1098 | cset, lastonbranch = parselogentry( |
|
1099 | 1099 | paths, revnum, author, date, message |
|
1100 | 1100 | ) |
|
1101 | 1101 | if cset: |
|
1102 | 1102 | firstcset = cset |
|
1103 | 1103 | if lastonbranch: |
|
1104 | 1104 | break |
|
1105 | 1105 | finally: |
|
1106 | 1106 | stream.close() |
|
1107 | 1107 | |
|
1108 | 1108 | if not lastonbranch and firstcset and not firstcset.parents: |
|
1109 | 1109 | # The first revision of the sequence (the last fetched one) |
|
1110 | 1110 | # has invalid parents if not a branch root. Find the parent |
|
1111 | 1111 | # revision now, if any. |
|
1112 | 1112 | try: |
|
1113 | 1113 | firstrevnum = self.revnum(firstcset.rev) |
|
1114 | 1114 | if firstrevnum > 1: |
|
1115 | 1115 | latest = self.latest(self.module, firstrevnum - 1) |
|
1116 | 1116 | if latest: |
|
1117 | 1117 | firstcset.parents.append(latest) |
|
1118 | 1118 | except SvnPathNotFound: |
|
1119 | 1119 | pass |
|
1120 | 1120 | except svn.core.SubversionException as xxx_todo_changeme: |
|
1121 | 1121 | (inst, num) = xxx_todo_changeme.args |
|
1122 | 1122 | if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION: |
|
1123 | 1123 | raise error.Abort( |
|
1124 | 1124 | _(b'svn: branch has no revision %s') % to_revnum |
|
1125 | 1125 | ) |
|
1126 | 1126 | raise |
|
1127 | 1127 | |
|
1128 | 1128 | def getfile(self, file, rev): |
|
1129 | 1129 | # TODO: ra.get_file transmits the whole file instead of diffs. |
|
1130 | 1130 | if file in self.removed: |
|
1131 | 1131 | return None, None |
|
1132 | 1132 | try: |
|
1133 | 1133 | new_module, revnum = revsplit(rev)[1:] |
|
1134 | 1134 | if self.module != new_module: |
|
1135 | 1135 | self.module = new_module |
|
1136 | 1136 | self.reparent(self.module) |
|
1137 | 1137 | io = stringio() |
|
1138 | 1138 | info = svn.ra.get_file(self.ra, file, revnum, io) |
|
1139 | 1139 | data = io.getvalue() |
|
1140 | 1140 | # ra.get_file() seems to keep a reference on the input buffer |
|
1141 | 1141 | # preventing collection. Release it explicitly. |
|
1142 | 1142 | io.close() |
|
1143 | 1143 | if isinstance(info, list): |
|
1144 | 1144 | info = info[-1] |
|
1145 | 1145 | mode = (b"svn:executable" in info) and b'x' or b'' |
|
1146 | 1146 | mode = (b"svn:special" in info) and b'l' or mode |
|
1147 | 1147 | except svn.core.SubversionException as e: |
|
1148 | 1148 | notfound = ( |
|
1149 | 1149 | svn.core.SVN_ERR_FS_NOT_FOUND, |
|
1150 | 1150 | svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND, |
|
1151 | 1151 | ) |
|
1152 | 1152 | if e.apr_err in notfound: # File not found |
|
1153 | 1153 | return None, None |
|
1154 | 1154 | raise |
|
1155 | 1155 | if mode == b'l': |
|
1156 | 1156 | link_prefix = b"link " |
|
1157 | 1157 | if data.startswith(link_prefix): |
|
1158 | 1158 | data = data[len(link_prefix) :] |
|
1159 | 1159 | return data, mode |
|
1160 | 1160 | |
|
1161 | 1161 | def _iterfiles(self, path, revnum): |
|
1162 | 1162 | """Enumerate all files in path at revnum, recursively.""" |
|
1163 | 1163 | path = path.strip(b'/') |
|
1164 | 1164 | pool = svn.core.Pool() |
|
1165 | 1165 | rpath = b'/'.join([self.baseurl, quote(path)]).strip(b'/') |
|
1166 | 1166 | entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool) |
|
1167 | 1167 | if path: |
|
1168 | 1168 | path += b'/' |
|
1169 | 1169 | return ( |
|
1170 | 1170 | (path + p) |
|
1171 | 1171 | for p, e in pycompat.iteritems(entries) |
|
1172 | 1172 | if e.kind == svn.core.svn_node_file |
|
1173 | 1173 | ) |
|
1174 | 1174 | |
|
1175 | 1175 | def getrelpath(self, path, module=None): |
|
1176 | 1176 | if module is None: |
|
1177 | 1177 | module = self.module |
|
1178 | 1178 | # Given the repository url of this wc, say |
|
1179 | 1179 | # "http://server/plone/CMFPlone/branches/Plone-2_0-branch" |
|
1180 | 1180 | # extract the "entry" portion (a relative path) from what |
|
1181 | 1181 | # svn log --xml says, i.e. |
|
1182 | 1182 | # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py" |
|
1183 | 1183 | # that is to say "tests/PloneTestCase.py" |
|
1184 | 1184 | if path.startswith(module): |
|
1185 | 1185 | relative = path.rstrip(b'/')[len(module) :] |
|
1186 | 1186 | if relative.startswith(b'/'): |
|
1187 | 1187 | return relative[1:] |
|
1188 | 1188 | elif relative == b'': |
|
1189 | 1189 | return relative |
|
1190 | 1190 | |
|
1191 | 1191 | # The path is outside our tracked tree... |
|
1192 | 1192 | self.ui.debug(b'%r is not under %r, ignoring\n' % (path, module)) |
|
1193 | 1193 | return None |
|
1194 | 1194 | |
|
1195 | 1195 | def _checkpath(self, path, revnum, module=None): |
|
1196 | 1196 | if module is not None: |
|
1197 | 1197 | prevmodule = self.reparent(b'') |
|
1198 | 1198 | path = module + b'/' + path |
|
1199 | 1199 | try: |
|
1200 | 1200 | # ra.check_path does not like leading slashes very much, it leads |
|
1201 | 1201 | # to PROPFIND subversion errors |
|
1202 | 1202 | return svn.ra.check_path(self.ra, path.strip(b'/'), revnum) |
|
1203 | 1203 | finally: |
|
1204 | 1204 | if module is not None: |
|
1205 | 1205 | self.reparent(prevmodule) |
|
1206 | 1206 | |
|
1207 | 1207 | def _getlog( |
|
1208 | 1208 | self, |
|
1209 | 1209 | paths, |
|
1210 | 1210 | start, |
|
1211 | 1211 | end, |
|
1212 | 1212 | limit=0, |
|
1213 | 1213 | discover_changed_paths=True, |
|
1214 | 1214 | strict_node_history=False, |
|
1215 | 1215 | ): |
|
1216 | 1216 | # Normalize path names, svn >= 1.5 only wants paths relative to |
|
1217 | 1217 | # supplied URL |
|
1218 | 1218 | relpaths = [] |
|
1219 | 1219 | for p in paths: |
|
1220 | 1220 | if not p.startswith(b'/'): |
|
1221 | 1221 | p = self.module + b'/' + p |
|
1222 | 1222 | relpaths.append(p.strip(b'/')) |
|
1223 | 1223 | args = [ |
|
1224 | 1224 | self.baseurl, |
|
1225 | 1225 | relpaths, |
|
1226 | 1226 | start, |
|
1227 | 1227 | end, |
|
1228 | 1228 | limit, |
|
1229 | 1229 | discover_changed_paths, |
|
1230 | 1230 | strict_node_history, |
|
1231 | 1231 | ] |
|
1232 | 1232 | # developer config: convert.svn.debugsvnlog |
|
1233 | 1233 | if not self.ui.configbool(b'convert', b'svn.debugsvnlog'): |
|
1234 | 1234 | return directlogstream(*args) |
|
1235 | 1235 | arg = encodeargs(args) |
|
1236 | 1236 | hgexe = procutil.hgexecutable() |
|
1237 | 1237 | cmd = b'%s debugsvnlog' % procutil.shellquote(hgexe) |
|
1238 |
stdin, stdout = procutil.popen2( |
|
|
1238 | stdin, stdout = procutil.popen2(cmd) | |
|
1239 | 1239 | stdin.write(arg) |
|
1240 | 1240 | try: |
|
1241 | 1241 | stdin.close() |
|
1242 | 1242 | except IOError: |
|
1243 | 1243 | raise error.Abort( |
|
1244 | 1244 | _( |
|
1245 | 1245 | b'Mercurial failed to run itself, check' |
|
1246 | 1246 | b' hg executable is in PATH' |
|
1247 | 1247 | ) |
|
1248 | 1248 | ) |
|
1249 | 1249 | return logstream(stdout) |
|
1250 | 1250 | |
|
1251 | 1251 | |
|
1252 | 1252 | pre_revprop_change = b'''#!/bin/sh |
|
1253 | 1253 | |
|
1254 | 1254 | REPOS="$1" |
|
1255 | 1255 | REV="$2" |
|
1256 | 1256 | USER="$3" |
|
1257 | 1257 | PROPNAME="$4" |
|
1258 | 1258 | ACTION="$5" |
|
1259 | 1259 | |
|
1260 | 1260 | if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi |
|
1261 | 1261 | if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi |
|
1262 | 1262 | if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi |
|
1263 | 1263 | |
|
1264 | 1264 | echo "Changing prohibited revision property" >&2 |
|
1265 | 1265 | exit 1 |
|
1266 | 1266 | ''' |
|
1267 | 1267 | |
|
1268 | 1268 | |
|
1269 | 1269 | class svn_sink(converter_sink, commandline): |
|
1270 | 1270 | commit_re = re.compile(br'Committed revision (\d+).', re.M) |
|
1271 | 1271 | uuid_re = re.compile(br'Repository UUID:\s*(\S+)', re.M) |
|
1272 | 1272 | |
|
1273 | 1273 | def prerun(self): |
|
1274 | 1274 | if self.wc: |
|
1275 | 1275 | os.chdir(self.wc) |
|
1276 | 1276 | |
|
1277 | 1277 | def postrun(self): |
|
1278 | 1278 | if self.wc: |
|
1279 | 1279 | os.chdir(self.cwd) |
|
1280 | 1280 | |
|
1281 | 1281 | def join(self, name): |
|
1282 | 1282 | return os.path.join(self.wc, b'.svn', name) |
|
1283 | 1283 | |
|
1284 | 1284 | def revmapfile(self): |
|
1285 | 1285 | return self.join(b'hg-shamap') |
|
1286 | 1286 | |
|
1287 | 1287 | def authorfile(self): |
|
1288 | 1288 | return self.join(b'hg-authormap') |
|
1289 | 1289 | |
|
1290 | 1290 | def __init__(self, ui, repotype, path): |
|
1291 | 1291 | |
|
1292 | 1292 | converter_sink.__init__(self, ui, repotype, path) |
|
1293 | 1293 | commandline.__init__(self, ui, b'svn') |
|
1294 | 1294 | self.delete = [] |
|
1295 | 1295 | self.setexec = [] |
|
1296 | 1296 | self.delexec = [] |
|
1297 | 1297 | self.copies = [] |
|
1298 | 1298 | self.wc = None |
|
1299 | 1299 | self.cwd = encoding.getcwd() |
|
1300 | 1300 | |
|
1301 | 1301 | created = False |
|
1302 | 1302 | if os.path.isfile(os.path.join(path, b'.svn', b'entries')): |
|
1303 | 1303 | self.wc = os.path.realpath(path) |
|
1304 | 1304 | self.run0(b'update') |
|
1305 | 1305 | else: |
|
1306 | 1306 | if not re.search(br'^(file|http|https|svn|svn\+ssh)://', path): |
|
1307 | 1307 | path = os.path.realpath(path) |
|
1308 | 1308 | if os.path.isdir(os.path.dirname(path)): |
|
1309 | 1309 | if not os.path.exists( |
|
1310 | 1310 | os.path.join(path, b'db', b'fs-type') |
|
1311 | 1311 | ): |
|
1312 | 1312 | ui.status( |
|
1313 | 1313 | _(b"initializing svn repository '%s'\n") |
|
1314 | 1314 | % os.path.basename(path) |
|
1315 | 1315 | ) |
|
1316 | 1316 | commandline(ui, b'svnadmin').run0(b'create', path) |
|
1317 | 1317 | created = path |
|
1318 | 1318 | path = util.normpath(path) |
|
1319 | 1319 | if not path.startswith(b'/'): |
|
1320 | 1320 | path = b'/' + path |
|
1321 | 1321 | path = b'file://' + path |
|
1322 | 1322 | |
|
1323 | 1323 | wcpath = os.path.join( |
|
1324 | 1324 | encoding.getcwd(), os.path.basename(path) + b'-wc' |
|
1325 | 1325 | ) |
|
1326 | 1326 | ui.status( |
|
1327 | 1327 | _(b"initializing svn working copy '%s'\n") |
|
1328 | 1328 | % os.path.basename(wcpath) |
|
1329 | 1329 | ) |
|
1330 | 1330 | self.run0(b'checkout', path, wcpath) |
|
1331 | 1331 | |
|
1332 | 1332 | self.wc = wcpath |
|
1333 | 1333 | self.opener = vfsmod.vfs(self.wc) |
|
1334 | 1334 | self.wopener = vfsmod.vfs(self.wc) |
|
1335 | 1335 | self.childmap = mapfile(ui, self.join(b'hg-childmap')) |
|
1336 | 1336 | if util.checkexec(self.wc): |
|
1337 | 1337 | self.is_exec = util.isexec |
|
1338 | 1338 | else: |
|
1339 | 1339 | self.is_exec = None |
|
1340 | 1340 | |
|
1341 | 1341 | if created: |
|
1342 | 1342 | hook = os.path.join(created, b'hooks', b'pre-revprop-change') |
|
1343 | 1343 | fp = open(hook, b'wb') |
|
1344 | 1344 | fp.write(pre_revprop_change) |
|
1345 | 1345 | fp.close() |
|
1346 | 1346 | util.setflags(hook, False, True) |
|
1347 | 1347 | |
|
1348 | 1348 | output = self.run0(b'info') |
|
1349 | 1349 | self.uuid = self.uuid_re.search(output).group(1).strip() |
|
1350 | 1350 | |
|
1351 | 1351 | def wjoin(self, *names): |
|
1352 | 1352 | return os.path.join(self.wc, *names) |
|
1353 | 1353 | |
|
1354 | 1354 | @propertycache |
|
1355 | 1355 | def manifest(self): |
|
1356 | 1356 | # As of svn 1.7, the "add" command fails when receiving |
|
1357 | 1357 | # already tracked entries, so we have to track and filter them |
|
1358 | 1358 | # ourselves. |
|
1359 | 1359 | m = set() |
|
1360 | 1360 | output = self.run0(b'ls', recursive=True, xml=True) |
|
1361 | 1361 | doc = xml.dom.minidom.parseString(output) |
|
1362 | 1362 | for e in doc.getElementsByTagName('entry'): |
|
1363 | 1363 | for n in e.childNodes: |
|
1364 | 1364 | if n.nodeType != n.ELEMENT_NODE or n.tagName != 'name': |
|
1365 | 1365 | continue |
|
1366 | 1366 | name = ''.join( |
|
1367 | 1367 | c.data for c in n.childNodes if c.nodeType == c.TEXT_NODE |
|
1368 | 1368 | ) |
|
1369 | 1369 | # Entries are compared with names coming from |
|
1370 | 1370 | # mercurial, so bytes with undefined encoding. Our |
|
1371 | 1371 | # best bet is to assume they are in local |
|
1372 | 1372 | # encoding. They will be passed to command line calls |
|
1373 | 1373 | # later anyway, so they better be. |
|
1374 | 1374 | m.add(encoding.unitolocal(name)) |
|
1375 | 1375 | break |
|
1376 | 1376 | return m |
|
1377 | 1377 | |
|
1378 | 1378 | def putfile(self, filename, flags, data): |
|
1379 | 1379 | if b'l' in flags: |
|
1380 | 1380 | self.wopener.symlink(data, filename) |
|
1381 | 1381 | else: |
|
1382 | 1382 | try: |
|
1383 | 1383 | if os.path.islink(self.wjoin(filename)): |
|
1384 | 1384 | os.unlink(filename) |
|
1385 | 1385 | except OSError: |
|
1386 | 1386 | pass |
|
1387 | 1387 | |
|
1388 | 1388 | if self.is_exec: |
|
1389 | 1389 | # We need to check executability of the file before the change, |
|
1390 | 1390 | # because `vfs.write` is able to reset exec bit. |
|
1391 | 1391 | wasexec = False |
|
1392 | 1392 | if os.path.exists(self.wjoin(filename)): |
|
1393 | 1393 | wasexec = self.is_exec(self.wjoin(filename)) |
|
1394 | 1394 | |
|
1395 | 1395 | self.wopener.write(filename, data) |
|
1396 | 1396 | |
|
1397 | 1397 | if self.is_exec: |
|
1398 | 1398 | if wasexec: |
|
1399 | 1399 | if b'x' not in flags: |
|
1400 | 1400 | self.delexec.append(filename) |
|
1401 | 1401 | else: |
|
1402 | 1402 | if b'x' in flags: |
|
1403 | 1403 | self.setexec.append(filename) |
|
1404 | 1404 | util.setflags(self.wjoin(filename), False, b'x' in flags) |
|
1405 | 1405 | |
|
1406 | 1406 | def _copyfile(self, source, dest): |
|
1407 | 1407 | # SVN's copy command pukes if the destination file exists, but |
|
1408 | 1408 | # our copyfile method expects to record a copy that has |
|
1409 | 1409 | # already occurred. Cross the semantic gap. |
|
1410 | 1410 | wdest = self.wjoin(dest) |
|
1411 | 1411 | exists = os.path.lexists(wdest) |
|
1412 | 1412 | if exists: |
|
1413 | 1413 | fd, tempname = pycompat.mkstemp( |
|
1414 | 1414 | prefix=b'hg-copy-', dir=os.path.dirname(wdest) |
|
1415 | 1415 | ) |
|
1416 | 1416 | os.close(fd) |
|
1417 | 1417 | os.unlink(tempname) |
|
1418 | 1418 | os.rename(wdest, tempname) |
|
1419 | 1419 | try: |
|
1420 | 1420 | self.run0(b'copy', source, dest) |
|
1421 | 1421 | finally: |
|
1422 | 1422 | self.manifest.add(dest) |
|
1423 | 1423 | if exists: |
|
1424 | 1424 | try: |
|
1425 | 1425 | os.unlink(wdest) |
|
1426 | 1426 | except OSError: |
|
1427 | 1427 | pass |
|
1428 | 1428 | os.rename(tempname, wdest) |
|
1429 | 1429 | |
|
1430 | 1430 | def dirs_of(self, files): |
|
1431 | 1431 | dirs = set() |
|
1432 | 1432 | for f in files: |
|
1433 | 1433 | if os.path.isdir(self.wjoin(f)): |
|
1434 | 1434 | dirs.add(f) |
|
1435 | 1435 | i = len(f) |
|
1436 | 1436 | for i in iter(lambda: f.rfind(b'/', 0, i), -1): |
|
1437 | 1437 | dirs.add(f[:i]) |
|
1438 | 1438 | return dirs |
|
1439 | 1439 | |
|
1440 | 1440 | def add_dirs(self, files): |
|
1441 | 1441 | add_dirs = [ |
|
1442 | 1442 | d for d in sorted(self.dirs_of(files)) if d not in self.manifest |
|
1443 | 1443 | ] |
|
1444 | 1444 | if add_dirs: |
|
1445 | 1445 | self.manifest.update(add_dirs) |
|
1446 | 1446 | self.xargs(add_dirs, b'add', non_recursive=True, quiet=True) |
|
1447 | 1447 | return add_dirs |
|
1448 | 1448 | |
|
1449 | 1449 | def add_files(self, files): |
|
1450 | 1450 | files = [f for f in files if f not in self.manifest] |
|
1451 | 1451 | if files: |
|
1452 | 1452 | self.manifest.update(files) |
|
1453 | 1453 | self.xargs(files, b'add', quiet=True) |
|
1454 | 1454 | return files |
|
1455 | 1455 | |
|
1456 | 1456 | def addchild(self, parent, child): |
|
1457 | 1457 | self.childmap[parent] = child |
|
1458 | 1458 | |
|
1459 | 1459 | def revid(self, rev): |
|
1460 | 1460 | return b"svn:%s@%s" % (self.uuid, rev) |
|
1461 | 1461 | |
|
1462 | 1462 | def putcommit( |
|
1463 | 1463 | self, files, copies, parents, commit, source, revmap, full, cleanp2 |
|
1464 | 1464 | ): |
|
1465 | 1465 | for parent in parents: |
|
1466 | 1466 | try: |
|
1467 | 1467 | return self.revid(self.childmap[parent]) |
|
1468 | 1468 | except KeyError: |
|
1469 | 1469 | pass |
|
1470 | 1470 | |
|
1471 | 1471 | # Apply changes to working copy |
|
1472 | 1472 | for f, v in files: |
|
1473 | 1473 | data, mode = source.getfile(f, v) |
|
1474 | 1474 | if data is None: |
|
1475 | 1475 | self.delete.append(f) |
|
1476 | 1476 | else: |
|
1477 | 1477 | self.putfile(f, mode, data) |
|
1478 | 1478 | if f in copies: |
|
1479 | 1479 | self.copies.append([copies[f], f]) |
|
1480 | 1480 | if full: |
|
1481 | 1481 | self.delete.extend(sorted(self.manifest.difference(files))) |
|
1482 | 1482 | files = [f[0] for f in files] |
|
1483 | 1483 | |
|
1484 | 1484 | entries = set(self.delete) |
|
1485 | 1485 | files = frozenset(files) |
|
1486 | 1486 | entries.update(self.add_dirs(files.difference(entries))) |
|
1487 | 1487 | if self.copies: |
|
1488 | 1488 | for s, d in self.copies: |
|
1489 | 1489 | self._copyfile(s, d) |
|
1490 | 1490 | self.copies = [] |
|
1491 | 1491 | if self.delete: |
|
1492 | 1492 | self.xargs(self.delete, b'delete') |
|
1493 | 1493 | for f in self.delete: |
|
1494 | 1494 | self.manifest.remove(f) |
|
1495 | 1495 | self.delete = [] |
|
1496 | 1496 | entries.update(self.add_files(files.difference(entries))) |
|
1497 | 1497 | if self.delexec: |
|
1498 | 1498 | self.xargs(self.delexec, b'propdel', b'svn:executable') |
|
1499 | 1499 | self.delexec = [] |
|
1500 | 1500 | if self.setexec: |
|
1501 | 1501 | self.xargs(self.setexec, b'propset', b'svn:executable', b'*') |
|
1502 | 1502 | self.setexec = [] |
|
1503 | 1503 | |
|
1504 | 1504 | fd, messagefile = pycompat.mkstemp(prefix=b'hg-convert-') |
|
1505 | 1505 | fp = os.fdopen(fd, 'wb') |
|
1506 | 1506 | fp.write(util.tonativeeol(commit.desc)) |
|
1507 | 1507 | fp.close() |
|
1508 | 1508 | try: |
|
1509 | 1509 | output = self.run0( |
|
1510 | 1510 | b'commit', |
|
1511 | 1511 | username=stringutil.shortuser(commit.author), |
|
1512 | 1512 | file=messagefile, |
|
1513 | 1513 | encoding=b'utf-8', |
|
1514 | 1514 | ) |
|
1515 | 1515 | try: |
|
1516 | 1516 | rev = self.commit_re.search(output).group(1) |
|
1517 | 1517 | except AttributeError: |
|
1518 | 1518 | if not files: |
|
1519 | 1519 | return parents[0] if parents else b'None' |
|
1520 | 1520 | self.ui.warn(_(b'unexpected svn output:\n')) |
|
1521 | 1521 | self.ui.warn(output) |
|
1522 | 1522 | raise error.Abort(_(b'unable to cope with svn output')) |
|
1523 | 1523 | if commit.rev: |
|
1524 | 1524 | self.run( |
|
1525 | 1525 | b'propset', |
|
1526 | 1526 | b'hg:convert-rev', |
|
1527 | 1527 | commit.rev, |
|
1528 | 1528 | revprop=True, |
|
1529 | 1529 | revision=rev, |
|
1530 | 1530 | ) |
|
1531 | 1531 | if commit.branch and commit.branch != b'default': |
|
1532 | 1532 | self.run( |
|
1533 | 1533 | b'propset', |
|
1534 | 1534 | b'hg:convert-branch', |
|
1535 | 1535 | commit.branch, |
|
1536 | 1536 | revprop=True, |
|
1537 | 1537 | revision=rev, |
|
1538 | 1538 | ) |
|
1539 | 1539 | for parent in parents: |
|
1540 | 1540 | self.addchild(parent, rev) |
|
1541 | 1541 | return self.revid(rev) |
|
1542 | 1542 | finally: |
|
1543 | 1543 | os.unlink(messagefile) |
|
1544 | 1544 | |
|
1545 | 1545 | def puttags(self, tags): |
|
1546 | 1546 | self.ui.warn(_(b'writing Subversion tags is not yet implemented\n')) |
|
1547 | 1547 | return None, None |
|
1548 | 1548 | |
|
1549 | 1549 | def hascommitfrommap(self, rev): |
|
1550 | 1550 | # We trust that revisions referenced in a map still is present |
|
1551 | 1551 | # TODO: implement something better if necessary and feasible |
|
1552 | 1552 | return True |
|
1553 | 1553 | |
|
1554 | 1554 | def hascommitforsplicemap(self, rev): |
|
1555 | 1555 | # This is not correct as one can convert to an existing subversion |
|
1556 | 1556 | # repository and childmap would not list all revisions. Too bad. |
|
1557 | 1557 | if rev in self.childmap: |
|
1558 | 1558 | return True |
|
1559 | 1559 | raise error.Abort( |
|
1560 | 1560 | _( |
|
1561 | 1561 | b'splice map revision %s not found in subversion ' |
|
1562 | 1562 | b'child map (revision lookups are not implemented)' |
|
1563 | 1563 | ) |
|
1564 | 1564 | % rev |
|
1565 | 1565 | ) |
@@ -1,720 +1,719 b'' | |||
|
1 | 1 | # extdiff.py - external diff program support for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | '''command to allow external programs to compare revisions |
|
9 | 9 | |
|
10 | 10 | The extdiff Mercurial extension allows you to use external programs |
|
11 | 11 | to compare revisions, or revision with working directory. The external |
|
12 | 12 | diff programs are called with a configurable set of options and two |
|
13 | 13 | non-option arguments: paths to directories containing snapshots of |
|
14 | 14 | files to compare. |
|
15 | 15 | |
|
16 | 16 | If there is more than one file being compared and the "child" revision |
|
17 | 17 | is the working directory, any modifications made in the external diff |
|
18 | 18 | program will be copied back to the working directory from the temporary |
|
19 | 19 | directory. |
|
20 | 20 | |
|
21 | 21 | The extdiff extension also allows you to configure new diff commands, so |
|
22 | 22 | you do not need to type :hg:`extdiff -p kdiff3` always. :: |
|
23 | 23 | |
|
24 | 24 | [extdiff] |
|
25 | 25 | # add new command that runs GNU diff(1) in 'context diff' mode |
|
26 | 26 | cdiff = gdiff -Nprc5 |
|
27 | 27 | ## or the old way: |
|
28 | 28 | #cmd.cdiff = gdiff |
|
29 | 29 | #opts.cdiff = -Nprc5 |
|
30 | 30 | |
|
31 | 31 | # add new command called meld, runs meld (no need to name twice). If |
|
32 | 32 | # the meld executable is not available, the meld tool in [merge-tools] |
|
33 | 33 | # will be used, if available |
|
34 | 34 | meld = |
|
35 | 35 | |
|
36 | 36 | # add new command called vimdiff, runs gvimdiff with DirDiff plugin |
|
37 | 37 | # (see http://www.vim.org/scripts/script.php?script_id=102) Non |
|
38 | 38 | # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in |
|
39 | 39 | # your .vimrc |
|
40 | 40 | vimdiff = gvim -f "+next" \\ |
|
41 | 41 | "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))" |
|
42 | 42 | |
|
43 | 43 | Tool arguments can include variables that are expanded at runtime:: |
|
44 | 44 | |
|
45 | 45 | $parent1, $plabel1 - filename, descriptive label of first parent |
|
46 | 46 | $child, $clabel - filename, descriptive label of child revision |
|
47 | 47 | $parent2, $plabel2 - filename, descriptive label of second parent |
|
48 | 48 | $root - repository root |
|
49 | 49 | $parent is an alias for $parent1. |
|
50 | 50 | |
|
51 | 51 | The extdiff extension will look in your [diff-tools] and [merge-tools] |
|
52 | 52 | sections for diff tool arguments, when none are specified in [extdiff]. |
|
53 | 53 | |
|
54 | 54 | :: |
|
55 | 55 | |
|
56 | 56 | [extdiff] |
|
57 | 57 | kdiff3 = |
|
58 | 58 | |
|
59 | 59 | [diff-tools] |
|
60 | 60 | kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child |
|
61 | 61 | |
|
62 | 62 | If a program has a graphical interface, it might be interesting to tell |
|
63 | 63 | Mercurial about it. It will prevent the program from being mistakenly |
|
64 | 64 | used in a terminal-only environment (such as an SSH terminal session), |
|
65 | 65 | and will make :hg:`extdiff --per-file` open multiple file diffs at once |
|
66 | 66 | instead of one by one (if you still want to open file diffs one by one, |
|
67 | 67 | you can use the --confirm option). |
|
68 | 68 | |
|
69 | 69 | Declaring that a tool has a graphical interface can be done with the |
|
70 | 70 | ``gui`` flag next to where ``diffargs`` are specified: |
|
71 | 71 | |
|
72 | 72 | :: |
|
73 | 73 | |
|
74 | 74 | [diff-tools] |
|
75 | 75 | kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child |
|
76 | 76 | kdiff3.gui = true |
|
77 | 77 | |
|
78 | 78 | You can use -I/-X and list of file or directory names like normal |
|
79 | 79 | :hg:`diff` command. The extdiff extension makes snapshots of only |
|
80 | 80 | needed files, so running the external diff program will actually be |
|
81 | 81 | pretty fast (at least faster than having to compare the entire tree). |
|
82 | 82 | ''' |
|
83 | 83 | |
|
84 | 84 | from __future__ import absolute_import |
|
85 | 85 | |
|
86 | 86 | import os |
|
87 | 87 | import re |
|
88 | 88 | import shutil |
|
89 | 89 | import stat |
|
90 | 90 | import subprocess |
|
91 | 91 | |
|
92 | 92 | from mercurial.i18n import _ |
|
93 | 93 | from mercurial.node import ( |
|
94 | 94 | nullid, |
|
95 | 95 | short, |
|
96 | 96 | ) |
|
97 | 97 | from mercurial import ( |
|
98 | 98 | archival, |
|
99 | 99 | cmdutil, |
|
100 | 100 | encoding, |
|
101 | 101 | error, |
|
102 | 102 | filemerge, |
|
103 | 103 | formatter, |
|
104 | 104 | pycompat, |
|
105 | 105 | registrar, |
|
106 | 106 | scmutil, |
|
107 | 107 | util, |
|
108 | 108 | ) |
|
109 | 109 | from mercurial.utils import ( |
|
110 | 110 | procutil, |
|
111 | 111 | stringutil, |
|
112 | 112 | ) |
|
113 | 113 | |
|
114 | 114 | cmdtable = {} |
|
115 | 115 | command = registrar.command(cmdtable) |
|
116 | 116 | |
|
117 | 117 | configtable = {} |
|
118 | 118 | configitem = registrar.configitem(configtable) |
|
119 | 119 | |
|
120 | 120 | configitem( |
|
121 | 121 | b'extdiff', br'opts\..*', default=b'', generic=True, |
|
122 | 122 | ) |
|
123 | 123 | |
|
124 | 124 | configitem( |
|
125 | 125 | b'extdiff', br'gui\..*', generic=True, |
|
126 | 126 | ) |
|
127 | 127 | |
|
128 | 128 | configitem( |
|
129 | 129 | b'diff-tools', br'.*\.diffargs$', default=None, generic=True, |
|
130 | 130 | ) |
|
131 | 131 | |
|
132 | 132 | configitem( |
|
133 | 133 | b'diff-tools', br'.*\.gui$', generic=True, |
|
134 | 134 | ) |
|
135 | 135 | |
|
136 | 136 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
137 | 137 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
138 | 138 | # be specifying the version(s) of Mercurial they are tested with, or |
|
139 | 139 | # leave the attribute unspecified. |
|
140 | 140 | testedwith = b'ships-with-hg-core' |
|
141 | 141 | |
|
142 | 142 | |
|
143 | 143 | def snapshot(ui, repo, files, node, tmproot, listsubrepos): |
|
144 | 144 | '''snapshot files as of some revision |
|
145 | 145 | if not using snapshot, -I/-X does not work and recursive diff |
|
146 | 146 | in tools like kdiff3 and meld displays too many files.''' |
|
147 | 147 | dirname = os.path.basename(repo.root) |
|
148 | 148 | if dirname == b"": |
|
149 | 149 | dirname = b"root" |
|
150 | 150 | if node is not None: |
|
151 | 151 | dirname = b'%s.%s' % (dirname, short(node)) |
|
152 | 152 | base = os.path.join(tmproot, dirname) |
|
153 | 153 | os.mkdir(base) |
|
154 | 154 | fnsandstat = [] |
|
155 | 155 | |
|
156 | 156 | if node is not None: |
|
157 | 157 | ui.note( |
|
158 | 158 | _(b'making snapshot of %d files from rev %s\n') |
|
159 | 159 | % (len(files), short(node)) |
|
160 | 160 | ) |
|
161 | 161 | else: |
|
162 | 162 | ui.note( |
|
163 | 163 | _(b'making snapshot of %d files from working directory\n') |
|
164 | 164 | % (len(files)) |
|
165 | 165 | ) |
|
166 | 166 | |
|
167 | 167 | if files: |
|
168 | 168 | repo.ui.setconfig(b"ui", b"archivemeta", False) |
|
169 | 169 | |
|
170 | 170 | archival.archive( |
|
171 | 171 | repo, |
|
172 | 172 | base, |
|
173 | 173 | node, |
|
174 | 174 | b'files', |
|
175 | 175 | match=scmutil.matchfiles(repo, files), |
|
176 | 176 | subrepos=listsubrepos, |
|
177 | 177 | ) |
|
178 | 178 | |
|
179 | 179 | for fn in sorted(files): |
|
180 | 180 | wfn = util.pconvert(fn) |
|
181 | 181 | ui.note(b' %s\n' % wfn) |
|
182 | 182 | |
|
183 | 183 | if node is None: |
|
184 | 184 | dest = os.path.join(base, wfn) |
|
185 | 185 | |
|
186 | 186 | fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest))) |
|
187 | 187 | return dirname, fnsandstat |
|
188 | 188 | |
|
189 | 189 | |
|
190 | 190 | def formatcmdline( |
|
191 | 191 | cmdline, |
|
192 | 192 | repo_root, |
|
193 | 193 | do3way, |
|
194 | 194 | parent1, |
|
195 | 195 | plabel1, |
|
196 | 196 | parent2, |
|
197 | 197 | plabel2, |
|
198 | 198 | child, |
|
199 | 199 | clabel, |
|
200 | 200 | ): |
|
201 | 201 | # Function to quote file/dir names in the argument string. |
|
202 | 202 | # When not operating in 3-way mode, an empty string is |
|
203 | 203 | # returned for parent2 |
|
204 | 204 | replace = { |
|
205 | 205 | b'parent': parent1, |
|
206 | 206 | b'parent1': parent1, |
|
207 | 207 | b'parent2': parent2, |
|
208 | 208 | b'plabel1': plabel1, |
|
209 | 209 | b'plabel2': plabel2, |
|
210 | 210 | b'child': child, |
|
211 | 211 | b'clabel': clabel, |
|
212 | 212 | b'root': repo_root, |
|
213 | 213 | } |
|
214 | 214 | |
|
215 | 215 | def quote(match): |
|
216 | 216 | pre = match.group(2) |
|
217 | 217 | key = match.group(3) |
|
218 | 218 | if not do3way and key == b'parent2': |
|
219 | 219 | return pre |
|
220 | 220 | return pre + procutil.shellquote(replace[key]) |
|
221 | 221 | |
|
222 | 222 | # Match parent2 first, so 'parent1?' will match both parent1 and parent |
|
223 | 223 | regex = ( |
|
224 | 224 | br'''(['"]?)([^\s'"$]*)''' |
|
225 | 225 | br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1' |
|
226 | 226 | ) |
|
227 | 227 | if not do3way and not re.search(regex, cmdline): |
|
228 | 228 | cmdline += b' $parent1 $child' |
|
229 | 229 | return re.sub(regex, quote, cmdline) |
|
230 | 230 | |
|
231 | 231 | |
|
232 | 232 | def _systembackground(cmd, environ=None, cwd=None): |
|
233 | 233 | ''' like 'procutil.system', but returns the Popen object directly |
|
234 | 234 | so we don't have to wait on it. |
|
235 | 235 | ''' |
|
236 | cmd = procutil.quotecommand(cmd) | |
|
237 | 236 | env = procutil.shellenviron(environ) |
|
238 | 237 | proc = subprocess.Popen( |
|
239 | 238 | procutil.tonativestr(cmd), |
|
240 | 239 | shell=True, |
|
241 | 240 | close_fds=procutil.closefds, |
|
242 | 241 | env=procutil.tonativeenv(env), |
|
243 | 242 | cwd=pycompat.rapply(procutil.tonativestr, cwd), |
|
244 | 243 | ) |
|
245 | 244 | return proc |
|
246 | 245 | |
|
247 | 246 | |
|
248 | 247 | def _runperfilediff( |
|
249 | 248 | cmdline, |
|
250 | 249 | repo_root, |
|
251 | 250 | ui, |
|
252 | 251 | guitool, |
|
253 | 252 | do3way, |
|
254 | 253 | confirm, |
|
255 | 254 | commonfiles, |
|
256 | 255 | tmproot, |
|
257 | 256 | dir1a, |
|
258 | 257 | dir1b, |
|
259 | 258 | dir2root, |
|
260 | 259 | dir2, |
|
261 | 260 | rev1a, |
|
262 | 261 | rev1b, |
|
263 | 262 | rev2, |
|
264 | 263 | ): |
|
265 | 264 | # Note that we need to sort the list of files because it was |
|
266 | 265 | # built in an "unstable" way and it's annoying to get files in a |
|
267 | 266 | # random order, especially when "confirm" mode is enabled. |
|
268 | 267 | waitprocs = [] |
|
269 | 268 | totalfiles = len(commonfiles) |
|
270 | 269 | for idx, commonfile in enumerate(sorted(commonfiles)): |
|
271 | 270 | path1a = os.path.join(tmproot, dir1a, commonfile) |
|
272 | 271 | label1a = commonfile + rev1a |
|
273 | 272 | if not os.path.isfile(path1a): |
|
274 | 273 | path1a = pycompat.osdevnull |
|
275 | 274 | |
|
276 | 275 | path1b = b'' |
|
277 | 276 | label1b = b'' |
|
278 | 277 | if do3way: |
|
279 | 278 | path1b = os.path.join(tmproot, dir1b, commonfile) |
|
280 | 279 | label1b = commonfile + rev1b |
|
281 | 280 | if not os.path.isfile(path1b): |
|
282 | 281 | path1b = pycompat.osdevnull |
|
283 | 282 | |
|
284 | 283 | path2 = os.path.join(dir2root, dir2, commonfile) |
|
285 | 284 | label2 = commonfile + rev2 |
|
286 | 285 | |
|
287 | 286 | if confirm: |
|
288 | 287 | # Prompt before showing this diff |
|
289 | 288 | difffiles = _(b'diff %s (%d of %d)') % ( |
|
290 | 289 | commonfile, |
|
291 | 290 | idx + 1, |
|
292 | 291 | totalfiles, |
|
293 | 292 | ) |
|
294 | 293 | responses = _( |
|
295 | 294 | b'[Yns?]' |
|
296 | 295 | b'$$ &Yes, show diff' |
|
297 | 296 | b'$$ &No, skip this diff' |
|
298 | 297 | b'$$ &Skip remaining diffs' |
|
299 | 298 | b'$$ &? (display help)' |
|
300 | 299 | ) |
|
301 | 300 | r = ui.promptchoice(b'%s %s' % (difffiles, responses)) |
|
302 | 301 | if r == 3: # ? |
|
303 | 302 | while r == 3: |
|
304 | 303 | for c, t in ui.extractchoices(responses)[1]: |
|
305 | 304 | ui.write(b'%s - %s\n' % (c, encoding.lower(t))) |
|
306 | 305 | r = ui.promptchoice(b'%s %s' % (difffiles, responses)) |
|
307 | 306 | if r == 0: # yes |
|
308 | 307 | pass |
|
309 | 308 | elif r == 1: # no |
|
310 | 309 | continue |
|
311 | 310 | elif r == 2: # skip |
|
312 | 311 | break |
|
313 | 312 | |
|
314 | 313 | curcmdline = formatcmdline( |
|
315 | 314 | cmdline, |
|
316 | 315 | repo_root, |
|
317 | 316 | do3way=do3way, |
|
318 | 317 | parent1=path1a, |
|
319 | 318 | plabel1=label1a, |
|
320 | 319 | parent2=path1b, |
|
321 | 320 | plabel2=label1b, |
|
322 | 321 | child=path2, |
|
323 | 322 | clabel=label2, |
|
324 | 323 | ) |
|
325 | 324 | |
|
326 | 325 | if confirm or not guitool: |
|
327 | 326 | # Run the comparison program and wait for it to exit |
|
328 | 327 | # before we show the next file. |
|
329 | 328 | # This is because either we need to wait for confirmation |
|
330 | 329 | # from the user between each invocation, or because, as far |
|
331 | 330 | # as we know, the tool doesn't have a GUI, in which case |
|
332 | 331 | # we can't run multiple CLI programs at the same time. |
|
333 | 332 | ui.debug( |
|
334 | 333 | b'running %r in %s\n' % (pycompat.bytestr(curcmdline), tmproot) |
|
335 | 334 | ) |
|
336 | 335 | ui.system(curcmdline, cwd=tmproot, blockedtag=b'extdiff') |
|
337 | 336 | else: |
|
338 | 337 | # Run the comparison program but don't wait, as we're |
|
339 | 338 | # going to rapid-fire each file diff and then wait on |
|
340 | 339 | # the whole group. |
|
341 | 340 | ui.debug( |
|
342 | 341 | b'running %r in %s (backgrounded)\n' |
|
343 | 342 | % (pycompat.bytestr(curcmdline), tmproot) |
|
344 | 343 | ) |
|
345 | 344 | proc = _systembackground(curcmdline, cwd=tmproot) |
|
346 | 345 | waitprocs.append(proc) |
|
347 | 346 | |
|
348 | 347 | if waitprocs: |
|
349 | 348 | with ui.timeblockedsection(b'extdiff'): |
|
350 | 349 | for proc in waitprocs: |
|
351 | 350 | proc.wait() |
|
352 | 351 | |
|
353 | 352 | |
|
354 | 353 | def dodiff(ui, repo, cmdline, pats, opts, guitool=False): |
|
355 | 354 | '''Do the actual diff: |
|
356 | 355 | |
|
357 | 356 | - copy to a temp structure if diffing 2 internal revisions |
|
358 | 357 | - copy to a temp structure if diffing working revision with |
|
359 | 358 | another one and more than 1 file is changed |
|
360 | 359 | - just invoke the diff for a single file in the working dir |
|
361 | 360 | ''' |
|
362 | 361 | |
|
363 | 362 | cmdutil.check_at_most_one_arg(opts, b'rev', b'change') |
|
364 | 363 | revs = opts.get(b'rev') |
|
365 | 364 | change = opts.get(b'change') |
|
366 | 365 | do3way = b'$parent2' in cmdline |
|
367 | 366 | |
|
368 | 367 | if change: |
|
369 | 368 | ctx2 = scmutil.revsingle(repo, change, None) |
|
370 | 369 | ctx1a, ctx1b = ctx2.p1(), ctx2.p2() |
|
371 | 370 | else: |
|
372 | 371 | ctx1a, ctx2 = scmutil.revpair(repo, revs) |
|
373 | 372 | if not revs: |
|
374 | 373 | ctx1b = repo[None].p2() |
|
375 | 374 | else: |
|
376 | 375 | ctx1b = repo[nullid] |
|
377 | 376 | |
|
378 | 377 | perfile = opts.get(b'per_file') |
|
379 | 378 | confirm = opts.get(b'confirm') |
|
380 | 379 | |
|
381 | 380 | node1a = ctx1a.node() |
|
382 | 381 | node1b = ctx1b.node() |
|
383 | 382 | node2 = ctx2.node() |
|
384 | 383 | |
|
385 | 384 | # Disable 3-way merge if there is only one parent |
|
386 | 385 | if do3way: |
|
387 | 386 | if node1b == nullid: |
|
388 | 387 | do3way = False |
|
389 | 388 | |
|
390 | 389 | subrepos = opts.get(b'subrepos') |
|
391 | 390 | |
|
392 | 391 | matcher = scmutil.match(repo[node2], pats, opts) |
|
393 | 392 | |
|
394 | 393 | if opts.get(b'patch'): |
|
395 | 394 | if subrepos: |
|
396 | 395 | raise error.Abort(_(b'--patch cannot be used with --subrepos')) |
|
397 | 396 | if perfile: |
|
398 | 397 | raise error.Abort(_(b'--patch cannot be used with --per-file')) |
|
399 | 398 | if node2 is None: |
|
400 | 399 | raise error.Abort(_(b'--patch requires two revisions')) |
|
401 | 400 | else: |
|
402 | 401 | st = repo.status(node1a, node2, matcher, listsubrepos=subrepos) |
|
403 | 402 | mod_a, add_a, rem_a = set(st.modified), set(st.added), set(st.removed) |
|
404 | 403 | if do3way: |
|
405 | 404 | stb = repo.status(node1b, node2, matcher, listsubrepos=subrepos) |
|
406 | 405 | mod_b, add_b, rem_b = ( |
|
407 | 406 | set(stb.modified), |
|
408 | 407 | set(stb.added), |
|
409 | 408 | set(stb.removed), |
|
410 | 409 | ) |
|
411 | 410 | else: |
|
412 | 411 | mod_b, add_b, rem_b = set(), set(), set() |
|
413 | 412 | modadd = mod_a | add_a | mod_b | add_b |
|
414 | 413 | common = modadd | rem_a | rem_b |
|
415 | 414 | if not common: |
|
416 | 415 | return 0 |
|
417 | 416 | |
|
418 | 417 | tmproot = pycompat.mkdtemp(prefix=b'extdiff.') |
|
419 | 418 | try: |
|
420 | 419 | if not opts.get(b'patch'): |
|
421 | 420 | # Always make a copy of node1a (and node1b, if applicable) |
|
422 | 421 | dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a) |
|
423 | 422 | dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot, subrepos)[ |
|
424 | 423 | 0 |
|
425 | 424 | ] |
|
426 | 425 | rev1a = b'@%d' % repo[node1a].rev() |
|
427 | 426 | if do3way: |
|
428 | 427 | dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b) |
|
429 | 428 | dir1b = snapshot( |
|
430 | 429 | ui, repo, dir1b_files, node1b, tmproot, subrepos |
|
431 | 430 | )[0] |
|
432 | 431 | rev1b = b'@%d' % repo[node1b].rev() |
|
433 | 432 | else: |
|
434 | 433 | dir1b = None |
|
435 | 434 | rev1b = b'' |
|
436 | 435 | |
|
437 | 436 | fnsandstat = [] |
|
438 | 437 | |
|
439 | 438 | # If node2 in not the wc or there is >1 change, copy it |
|
440 | 439 | dir2root = b'' |
|
441 | 440 | rev2 = b'' |
|
442 | 441 | if node2: |
|
443 | 442 | dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0] |
|
444 | 443 | rev2 = b'@%d' % repo[node2].rev() |
|
445 | 444 | elif len(common) > 1: |
|
446 | 445 | # we only actually need to get the files to copy back to |
|
447 | 446 | # the working dir in this case (because the other cases |
|
448 | 447 | # are: diffing 2 revisions or single file -- in which case |
|
449 | 448 | # the file is already directly passed to the diff tool). |
|
450 | 449 | dir2, fnsandstat = snapshot( |
|
451 | 450 | ui, repo, modadd, None, tmproot, subrepos |
|
452 | 451 | ) |
|
453 | 452 | else: |
|
454 | 453 | # This lets the diff tool open the changed file directly |
|
455 | 454 | dir2 = b'' |
|
456 | 455 | dir2root = repo.root |
|
457 | 456 | |
|
458 | 457 | label1a = rev1a |
|
459 | 458 | label1b = rev1b |
|
460 | 459 | label2 = rev2 |
|
461 | 460 | |
|
462 | 461 | # If only one change, diff the files instead of the directories |
|
463 | 462 | # Handle bogus modifies correctly by checking if the files exist |
|
464 | 463 | if len(common) == 1: |
|
465 | 464 | common_file = util.localpath(common.pop()) |
|
466 | 465 | dir1a = os.path.join(tmproot, dir1a, common_file) |
|
467 | 466 | label1a = common_file + rev1a |
|
468 | 467 | if not os.path.isfile(dir1a): |
|
469 | 468 | dir1a = pycompat.osdevnull |
|
470 | 469 | if do3way: |
|
471 | 470 | dir1b = os.path.join(tmproot, dir1b, common_file) |
|
472 | 471 | label1b = common_file + rev1b |
|
473 | 472 | if not os.path.isfile(dir1b): |
|
474 | 473 | dir1b = pycompat.osdevnull |
|
475 | 474 | dir2 = os.path.join(dir2root, dir2, common_file) |
|
476 | 475 | label2 = common_file + rev2 |
|
477 | 476 | else: |
|
478 | 477 | template = b'hg-%h.patch' |
|
479 | 478 | with formatter.nullformatter(ui, b'extdiff', {}) as fm: |
|
480 | 479 | cmdutil.export( |
|
481 | 480 | repo, |
|
482 | 481 | [repo[node1a].rev(), repo[node2].rev()], |
|
483 | 482 | fm, |
|
484 | 483 | fntemplate=repo.vfs.reljoin(tmproot, template), |
|
485 | 484 | match=matcher, |
|
486 | 485 | ) |
|
487 | 486 | label1a = cmdutil.makefilename(repo[node1a], template) |
|
488 | 487 | label2 = cmdutil.makefilename(repo[node2], template) |
|
489 | 488 | dir1a = repo.vfs.reljoin(tmproot, label1a) |
|
490 | 489 | dir2 = repo.vfs.reljoin(tmproot, label2) |
|
491 | 490 | dir1b = None |
|
492 | 491 | label1b = None |
|
493 | 492 | fnsandstat = [] |
|
494 | 493 | |
|
495 | 494 | if not perfile: |
|
496 | 495 | # Run the external tool on the 2 temp directories or the patches |
|
497 | 496 | cmdline = formatcmdline( |
|
498 | 497 | cmdline, |
|
499 | 498 | repo.root, |
|
500 | 499 | do3way=do3way, |
|
501 | 500 | parent1=dir1a, |
|
502 | 501 | plabel1=label1a, |
|
503 | 502 | parent2=dir1b, |
|
504 | 503 | plabel2=label1b, |
|
505 | 504 | child=dir2, |
|
506 | 505 | clabel=label2, |
|
507 | 506 | ) |
|
508 | 507 | ui.debug( |
|
509 | 508 | b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot) |
|
510 | 509 | ) |
|
511 | 510 | ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff') |
|
512 | 511 | else: |
|
513 | 512 | # Run the external tool once for each pair of files |
|
514 | 513 | _runperfilediff( |
|
515 | 514 | cmdline, |
|
516 | 515 | repo.root, |
|
517 | 516 | ui, |
|
518 | 517 | guitool=guitool, |
|
519 | 518 | do3way=do3way, |
|
520 | 519 | confirm=confirm, |
|
521 | 520 | commonfiles=common, |
|
522 | 521 | tmproot=tmproot, |
|
523 | 522 | dir1a=dir1a, |
|
524 | 523 | dir1b=dir1b, |
|
525 | 524 | dir2root=dir2root, |
|
526 | 525 | dir2=dir2, |
|
527 | 526 | rev1a=rev1a, |
|
528 | 527 | rev1b=rev1b, |
|
529 | 528 | rev2=rev2, |
|
530 | 529 | ) |
|
531 | 530 | |
|
532 | 531 | for copy_fn, working_fn, st in fnsandstat: |
|
533 | 532 | cpstat = os.lstat(copy_fn) |
|
534 | 533 | # Some tools copy the file and attributes, so mtime may not detect |
|
535 | 534 | # all changes. A size check will detect more cases, but not all. |
|
536 | 535 | # The only certain way to detect every case is to diff all files, |
|
537 | 536 | # which could be expensive. |
|
538 | 537 | # copyfile() carries over the permission, so the mode check could |
|
539 | 538 | # be in an 'elif' branch, but for the case where the file has |
|
540 | 539 | # changed without affecting mtime or size. |
|
541 | 540 | if ( |
|
542 | 541 | cpstat[stat.ST_MTIME] != st[stat.ST_MTIME] |
|
543 | 542 | or cpstat.st_size != st.st_size |
|
544 | 543 | or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100) |
|
545 | 544 | ): |
|
546 | 545 | ui.debug( |
|
547 | 546 | b'file changed while diffing. ' |
|
548 | 547 | b'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn) |
|
549 | 548 | ) |
|
550 | 549 | util.copyfile(copy_fn, working_fn) |
|
551 | 550 | |
|
552 | 551 | return 1 |
|
553 | 552 | finally: |
|
554 | 553 | ui.note(_(b'cleaning up temp directory\n')) |
|
555 | 554 | shutil.rmtree(tmproot) |
|
556 | 555 | |
|
557 | 556 | |
|
558 | 557 | extdiffopts = ( |
|
559 | 558 | [ |
|
560 | 559 | ( |
|
561 | 560 | b'o', |
|
562 | 561 | b'option', |
|
563 | 562 | [], |
|
564 | 563 | _(b'pass option to comparison program'), |
|
565 | 564 | _(b'OPT'), |
|
566 | 565 | ), |
|
567 | 566 | (b'r', b'rev', [], _(b'revision'), _(b'REV')), |
|
568 | 567 | (b'c', b'change', b'', _(b'change made by revision'), _(b'REV')), |
|
569 | 568 | ( |
|
570 | 569 | b'', |
|
571 | 570 | b'per-file', |
|
572 | 571 | False, |
|
573 | 572 | _(b'compare each file instead of revision snapshots'), |
|
574 | 573 | ), |
|
575 | 574 | ( |
|
576 | 575 | b'', |
|
577 | 576 | b'confirm', |
|
578 | 577 | False, |
|
579 | 578 | _(b'prompt user before each external program invocation'), |
|
580 | 579 | ), |
|
581 | 580 | (b'', b'patch', None, _(b'compare patches for two revisions')), |
|
582 | 581 | ] |
|
583 | 582 | + cmdutil.walkopts |
|
584 | 583 | + cmdutil.subrepoopts |
|
585 | 584 | ) |
|
586 | 585 | |
|
587 | 586 | |
|
588 | 587 | @command( |
|
589 | 588 | b'extdiff', |
|
590 | 589 | [(b'p', b'program', b'', _(b'comparison program to run'), _(b'CMD')),] |
|
591 | 590 | + extdiffopts, |
|
592 | 591 | _(b'hg extdiff [OPT]... [FILE]...'), |
|
593 | 592 | helpcategory=command.CATEGORY_FILE_CONTENTS, |
|
594 | 593 | inferrepo=True, |
|
595 | 594 | ) |
|
596 | 595 | def extdiff(ui, repo, *pats, **opts): |
|
597 | 596 | '''use external program to diff repository (or selected files) |
|
598 | 597 | |
|
599 | 598 | Show differences between revisions for the specified files, using |
|
600 | 599 | an external program. The default program used is diff, with |
|
601 | 600 | default options "-Npru". |
|
602 | 601 | |
|
603 | 602 | To select a different program, use the -p/--program option. The |
|
604 | 603 | program will be passed the names of two directories to compare, |
|
605 | 604 | unless the --per-file option is specified (see below). To pass |
|
606 | 605 | additional options to the program, use -o/--option. These will be |
|
607 | 606 | passed before the names of the directories or files to compare. |
|
608 | 607 | |
|
609 | 608 | When two revision arguments are given, then changes are shown |
|
610 | 609 | between those revisions. If only one revision is specified then |
|
611 | 610 | that revision is compared to the working directory, and, when no |
|
612 | 611 | revisions are specified, the working directory files are compared |
|
613 | 612 | to its parent. |
|
614 | 613 | |
|
615 | 614 | The --per-file option runs the external program repeatedly on each |
|
616 | 615 | file to diff, instead of once on two directories. By default, |
|
617 | 616 | this happens one by one, where the next file diff is open in the |
|
618 | 617 | external program only once the previous external program (for the |
|
619 | 618 | previous file diff) has exited. If the external program has a |
|
620 | 619 | graphical interface, it can open all the file diffs at once instead |
|
621 | 620 | of one by one. See :hg:`help -e extdiff` for information about how |
|
622 | 621 | to tell Mercurial that a given program has a graphical interface. |
|
623 | 622 | |
|
624 | 623 | The --confirm option will prompt the user before each invocation of |
|
625 | 624 | the external program. It is ignored if --per-file isn't specified. |
|
626 | 625 | ''' |
|
627 | 626 | opts = pycompat.byteskwargs(opts) |
|
628 | 627 | program = opts.get(b'program') |
|
629 | 628 | option = opts.get(b'option') |
|
630 | 629 | if not program: |
|
631 | 630 | program = b'diff' |
|
632 | 631 | option = option or [b'-Npru'] |
|
633 | 632 | cmdline = b' '.join(map(procutil.shellquote, [program] + option)) |
|
634 | 633 | return dodiff(ui, repo, cmdline, pats, opts) |
|
635 | 634 | |
|
636 | 635 | |
|
637 | 636 | class savedcmd(object): |
|
638 | 637 | """use external program to diff repository (or selected files) |
|
639 | 638 | |
|
640 | 639 | Show differences between revisions for the specified files, using |
|
641 | 640 | the following program:: |
|
642 | 641 | |
|
643 | 642 | %(path)s |
|
644 | 643 | |
|
645 | 644 | When two revision arguments are given, then changes are shown |
|
646 | 645 | between those revisions. If only one revision is specified then |
|
647 | 646 | that revision is compared to the working directory, and, when no |
|
648 | 647 | revisions are specified, the working directory files are compared |
|
649 | 648 | to its parent. |
|
650 | 649 | """ |
|
651 | 650 | |
|
652 | 651 | def __init__(self, path, cmdline, isgui): |
|
653 | 652 | # We can't pass non-ASCII through docstrings (and path is |
|
654 | 653 | # in an unknown encoding anyway), but avoid double separators on |
|
655 | 654 | # Windows |
|
656 | 655 | docpath = stringutil.escapestr(path).replace(b'\\\\', b'\\') |
|
657 | 656 | self.__doc__ %= {'path': pycompat.sysstr(stringutil.uirepr(docpath))} |
|
658 | 657 | self._cmdline = cmdline |
|
659 | 658 | self._isgui = isgui |
|
660 | 659 | |
|
661 | 660 | def __call__(self, ui, repo, *pats, **opts): |
|
662 | 661 | opts = pycompat.byteskwargs(opts) |
|
663 | 662 | options = b' '.join(map(procutil.shellquote, opts[b'option'])) |
|
664 | 663 | if options: |
|
665 | 664 | options = b' ' + options |
|
666 | 665 | return dodiff( |
|
667 | 666 | ui, repo, self._cmdline + options, pats, opts, guitool=self._isgui |
|
668 | 667 | ) |
|
669 | 668 | |
|
670 | 669 | |
|
671 | 670 | def uisetup(ui): |
|
672 | 671 | for cmd, path in ui.configitems(b'extdiff'): |
|
673 | 672 | path = util.expandpath(path) |
|
674 | 673 | if cmd.startswith(b'cmd.'): |
|
675 | 674 | cmd = cmd[4:] |
|
676 | 675 | if not path: |
|
677 | 676 | path = procutil.findexe(cmd) |
|
678 | 677 | if path is None: |
|
679 | 678 | path = filemerge.findexternaltool(ui, cmd) or cmd |
|
680 | 679 | diffopts = ui.config(b'extdiff', b'opts.' + cmd) |
|
681 | 680 | cmdline = procutil.shellquote(path) |
|
682 | 681 | if diffopts: |
|
683 | 682 | cmdline += b' ' + diffopts |
|
684 | 683 | isgui = ui.configbool(b'extdiff', b'gui.' + cmd) |
|
685 | 684 | elif cmd.startswith(b'opts.') or cmd.startswith(b'gui.'): |
|
686 | 685 | continue |
|
687 | 686 | else: |
|
688 | 687 | if path: |
|
689 | 688 | # case "cmd = path opts" |
|
690 | 689 | cmdline = path |
|
691 | 690 | diffopts = len(pycompat.shlexsplit(cmdline)) > 1 |
|
692 | 691 | else: |
|
693 | 692 | # case "cmd =" |
|
694 | 693 | path = procutil.findexe(cmd) |
|
695 | 694 | if path is None: |
|
696 | 695 | path = filemerge.findexternaltool(ui, cmd) or cmd |
|
697 | 696 | cmdline = procutil.shellquote(path) |
|
698 | 697 | diffopts = False |
|
699 | 698 | isgui = ui.configbool(b'extdiff', b'gui.' + cmd) |
|
700 | 699 | # look for diff arguments in [diff-tools] then [merge-tools] |
|
701 | 700 | if not diffopts: |
|
702 | 701 | key = cmd + b'.diffargs' |
|
703 | 702 | for section in (b'diff-tools', b'merge-tools'): |
|
704 | 703 | args = ui.config(section, key) |
|
705 | 704 | if args: |
|
706 | 705 | cmdline += b' ' + args |
|
707 | 706 | if isgui is None: |
|
708 | 707 | isgui = ui.configbool(section, cmd + b'.gui') or False |
|
709 | 708 | break |
|
710 | 709 | command( |
|
711 | 710 | cmd, |
|
712 | 711 | extdiffopts[:], |
|
713 | 712 | _(b'hg %s [OPTION]... [FILE]...') % cmd, |
|
714 | 713 | helpcategory=command.CATEGORY_FILE_CONTENTS, |
|
715 | 714 | inferrepo=True, |
|
716 | 715 | )(savedcmd(path, cmdline, isgui)) |
|
717 | 716 | |
|
718 | 717 | |
|
719 | 718 | # tell hggettext to extract docstrings from these functions: |
|
720 | 719 | i18nfunctions = [savedcmd] |
@@ -1,718 +1,718 b'' | |||
|
1 | 1 | # chgserver.py - command server extension for cHg |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2011 Yuya Nishihara <yuya@tcha.org> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | """command server extension for cHg |
|
9 | 9 | |
|
10 | 10 | 'S' channel (read/write) |
|
11 | 11 | propagate ui.system() request to client |
|
12 | 12 | |
|
13 | 13 | 'attachio' command |
|
14 | 14 | attach client's stdio passed by sendmsg() |
|
15 | 15 | |
|
16 | 16 | 'chdir' command |
|
17 | 17 | change current directory |
|
18 | 18 | |
|
19 | 19 | 'setenv' command |
|
20 | 20 | replace os.environ completely |
|
21 | 21 | |
|
22 | 22 | 'setumask' command (DEPRECATED) |
|
23 | 23 | 'setumask2' command |
|
24 | 24 | set umask |
|
25 | 25 | |
|
26 | 26 | 'validate' command |
|
27 | 27 | reload the config and check if the server is up to date |
|
28 | 28 | |
|
29 | 29 | Config |
|
30 | 30 | ------ |
|
31 | 31 | |
|
32 | 32 | :: |
|
33 | 33 | |
|
34 | 34 | [chgserver] |
|
35 | 35 | # how long (in seconds) should an idle chg server exit |
|
36 | 36 | idletimeout = 3600 |
|
37 | 37 | |
|
38 | 38 | # whether to skip config or env change checks |
|
39 | 39 | skiphash = False |
|
40 | 40 | """ |
|
41 | 41 | |
|
42 | 42 | from __future__ import absolute_import |
|
43 | 43 | |
|
44 | 44 | import inspect |
|
45 | 45 | import os |
|
46 | 46 | import re |
|
47 | 47 | import socket |
|
48 | 48 | import stat |
|
49 | 49 | import struct |
|
50 | 50 | import time |
|
51 | 51 | |
|
52 | 52 | from .i18n import _ |
|
53 | 53 | from .pycompat import ( |
|
54 | 54 | getattr, |
|
55 | 55 | setattr, |
|
56 | 56 | ) |
|
57 | 57 | |
|
58 | 58 | from . import ( |
|
59 | 59 | commandserver, |
|
60 | 60 | encoding, |
|
61 | 61 | error, |
|
62 | 62 | extensions, |
|
63 | 63 | node, |
|
64 | 64 | pycompat, |
|
65 | 65 | util, |
|
66 | 66 | ) |
|
67 | 67 | |
|
68 | 68 | from .utils import ( |
|
69 | 69 | hashutil, |
|
70 | 70 | procutil, |
|
71 | 71 | stringutil, |
|
72 | 72 | ) |
|
73 | 73 | |
|
74 | 74 | |
|
75 | 75 | def _hashlist(items): |
|
76 | 76 | """return sha1 hexdigest for a list""" |
|
77 | 77 | return node.hex(hashutil.sha1(stringutil.pprint(items)).digest()) |
|
78 | 78 | |
|
79 | 79 | |
|
80 | 80 | # sensitive config sections affecting confighash |
|
81 | 81 | _configsections = [ |
|
82 | 82 | b'alias', # affects global state commands.table |
|
83 | 83 | b'diff-tools', # affects whether gui or not in extdiff's uisetup |
|
84 | 84 | b'eol', # uses setconfig('eol', ...) |
|
85 | 85 | b'extdiff', # uisetup will register new commands |
|
86 | 86 | b'extensions', |
|
87 | 87 | b'fastannotate', # affects annotate command and adds fastannonate cmd |
|
88 | 88 | b'merge-tools', # affects whether gui or not in extdiff's uisetup |
|
89 | 89 | b'schemes', # extsetup will update global hg.schemes |
|
90 | 90 | ] |
|
91 | 91 | |
|
92 | 92 | _configsectionitems = [ |
|
93 | 93 | (b'commands', b'show.aliasprefix'), # show.py reads it in extsetup |
|
94 | 94 | ] |
|
95 | 95 | |
|
96 | 96 | # sensitive environment variables affecting confighash |
|
97 | 97 | _envre = re.compile( |
|
98 | 98 | br'''\A(?: |
|
99 | 99 | CHGHG |
|
100 | 100 | |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)? |
|
101 | 101 | |HG(?:ENCODING|PLAIN).* |
|
102 | 102 | |LANG(?:UAGE)? |
|
103 | 103 | |LC_.* |
|
104 | 104 | |LD_.* |
|
105 | 105 | |PATH |
|
106 | 106 | |PYTHON.* |
|
107 | 107 | |TERM(?:INFO)? |
|
108 | 108 | |TZ |
|
109 | 109 | )\Z''', |
|
110 | 110 | re.X, |
|
111 | 111 | ) |
|
112 | 112 | |
|
113 | 113 | |
|
114 | 114 | def _confighash(ui): |
|
115 | 115 | """return a quick hash for detecting config/env changes |
|
116 | 116 | |
|
117 | 117 | confighash is the hash of sensitive config items and environment variables. |
|
118 | 118 | |
|
119 | 119 | for chgserver, it is designed that once confighash changes, the server is |
|
120 | 120 | not qualified to serve its client and should redirect the client to a new |
|
121 | 121 | server. different from mtimehash, confighash change will not mark the |
|
122 | 122 | server outdated and exit since the user can have different configs at the |
|
123 | 123 | same time. |
|
124 | 124 | """ |
|
125 | 125 | sectionitems = [] |
|
126 | 126 | for section in _configsections: |
|
127 | 127 | sectionitems.append(ui.configitems(section)) |
|
128 | 128 | for section, item in _configsectionitems: |
|
129 | 129 | sectionitems.append(ui.config(section, item)) |
|
130 | 130 | sectionhash = _hashlist(sectionitems) |
|
131 | 131 | # If $CHGHG is set, the change to $HG should not trigger a new chg server |
|
132 | 132 | if b'CHGHG' in encoding.environ: |
|
133 | 133 | ignored = {b'HG'} |
|
134 | 134 | else: |
|
135 | 135 | ignored = set() |
|
136 | 136 | envitems = [ |
|
137 | 137 | (k, v) |
|
138 | 138 | for k, v in pycompat.iteritems(encoding.environ) |
|
139 | 139 | if _envre.match(k) and k not in ignored |
|
140 | 140 | ] |
|
141 | 141 | envhash = _hashlist(sorted(envitems)) |
|
142 | 142 | return sectionhash[:6] + envhash[:6] |
|
143 | 143 | |
|
144 | 144 | |
|
145 | 145 | def _getmtimepaths(ui): |
|
146 | 146 | """get a list of paths that should be checked to detect change |
|
147 | 147 | |
|
148 | 148 | The list will include: |
|
149 | 149 | - extensions (will not cover all files for complex extensions) |
|
150 | 150 | - mercurial/__version__.py |
|
151 | 151 | - python binary |
|
152 | 152 | """ |
|
153 | 153 | modules = [m for n, m in extensions.extensions(ui)] |
|
154 | 154 | try: |
|
155 | 155 | from . import __version__ |
|
156 | 156 | |
|
157 | 157 | modules.append(__version__) |
|
158 | 158 | except ImportError: |
|
159 | 159 | pass |
|
160 | 160 | files = [] |
|
161 | 161 | if pycompat.sysexecutable: |
|
162 | 162 | files.append(pycompat.sysexecutable) |
|
163 | 163 | for m in modules: |
|
164 | 164 | try: |
|
165 | 165 | files.append(pycompat.fsencode(inspect.getabsfile(m))) |
|
166 | 166 | except TypeError: |
|
167 | 167 | pass |
|
168 | 168 | return sorted(set(files)) |
|
169 | 169 | |
|
170 | 170 | |
|
171 | 171 | def _mtimehash(paths): |
|
172 | 172 | """return a quick hash for detecting file changes |
|
173 | 173 | |
|
174 | 174 | mtimehash calls stat on given paths and calculate a hash based on size and |
|
175 | 175 | mtime of each file. mtimehash does not read file content because reading is |
|
176 | 176 | expensive. therefore it's not 100% reliable for detecting content changes. |
|
177 | 177 | it's possible to return different hashes for same file contents. |
|
178 | 178 | it's also possible to return a same hash for different file contents for |
|
179 | 179 | some carefully crafted situation. |
|
180 | 180 | |
|
181 | 181 | for chgserver, it is designed that once mtimehash changes, the server is |
|
182 | 182 | considered outdated immediately and should no longer provide service. |
|
183 | 183 | |
|
184 | 184 | mtimehash is not included in confighash because we only know the paths of |
|
185 | 185 | extensions after importing them (there is imp.find_module but that faces |
|
186 | 186 | race conditions). We need to calculate confighash without importing. |
|
187 | 187 | """ |
|
188 | 188 | |
|
189 | 189 | def trystat(path): |
|
190 | 190 | try: |
|
191 | 191 | st = os.stat(path) |
|
192 | 192 | return (st[stat.ST_MTIME], st.st_size) |
|
193 | 193 | except OSError: |
|
194 | 194 | # could be ENOENT, EPERM etc. not fatal in any case |
|
195 | 195 | pass |
|
196 | 196 | |
|
197 | 197 | return _hashlist(pycompat.maplist(trystat, paths))[:12] |
|
198 | 198 | |
|
199 | 199 | |
|
200 | 200 | class hashstate(object): |
|
201 | 201 | """a structure storing confighash, mtimehash, paths used for mtimehash""" |
|
202 | 202 | |
|
203 | 203 | def __init__(self, confighash, mtimehash, mtimepaths): |
|
204 | 204 | self.confighash = confighash |
|
205 | 205 | self.mtimehash = mtimehash |
|
206 | 206 | self.mtimepaths = mtimepaths |
|
207 | 207 | |
|
208 | 208 | @staticmethod |
|
209 | 209 | def fromui(ui, mtimepaths=None): |
|
210 | 210 | if mtimepaths is None: |
|
211 | 211 | mtimepaths = _getmtimepaths(ui) |
|
212 | 212 | confighash = _confighash(ui) |
|
213 | 213 | mtimehash = _mtimehash(mtimepaths) |
|
214 | 214 | ui.log( |
|
215 | 215 | b'cmdserver', |
|
216 | 216 | b'confighash = %s mtimehash = %s\n', |
|
217 | 217 | confighash, |
|
218 | 218 | mtimehash, |
|
219 | 219 | ) |
|
220 | 220 | return hashstate(confighash, mtimehash, mtimepaths) |
|
221 | 221 | |
|
222 | 222 | |
|
223 | 223 | def _newchgui(srcui, csystem, attachio): |
|
224 | 224 | class chgui(srcui.__class__): |
|
225 | 225 | def __init__(self, src=None): |
|
226 | 226 | super(chgui, self).__init__(src) |
|
227 | 227 | if src: |
|
228 | 228 | self._csystem = getattr(src, '_csystem', csystem) |
|
229 | 229 | else: |
|
230 | 230 | self._csystem = csystem |
|
231 | 231 | |
|
232 | 232 | def _runsystem(self, cmd, environ, cwd, out): |
|
233 | 233 | # fallback to the original system method if |
|
234 | 234 | # a. the output stream is not stdout (e.g. stderr, cStringIO), |
|
235 | 235 | # b. or stdout is redirected by protectfinout(), |
|
236 | 236 | # because the chg client is not aware of these situations and |
|
237 | 237 | # will behave differently (i.e. write to stdout). |
|
238 | 238 | if ( |
|
239 | 239 | out is not self.fout |
|
240 | 240 | or not util.safehasattr(self.fout, b'fileno') |
|
241 | 241 | or self.fout.fileno() != procutil.stdout.fileno() |
|
242 | 242 | or self._finoutredirected |
|
243 | 243 | ): |
|
244 | 244 | return procutil.system(cmd, environ=environ, cwd=cwd, out=out) |
|
245 | 245 | self.flush() |
|
246 | 246 | return self._csystem(cmd, procutil.shellenviron(environ), cwd) |
|
247 | 247 | |
|
248 | 248 | def _runpager(self, cmd, env=None): |
|
249 | 249 | self._csystem( |
|
250 | 250 | cmd, |
|
251 | 251 | procutil.shellenviron(env), |
|
252 | 252 | type=b'pager', |
|
253 | 253 | cmdtable={b'attachio': attachio}, |
|
254 | 254 | ) |
|
255 | 255 | return True |
|
256 | 256 | |
|
257 | 257 | return chgui(srcui) |
|
258 | 258 | |
|
259 | 259 | |
|
260 | 260 | def _loadnewui(srcui, args, cdebug): |
|
261 | 261 | from . import dispatch # avoid cycle |
|
262 | 262 | |
|
263 | 263 | newui = srcui.__class__.load() |
|
264 | 264 | for a in [b'fin', b'fout', b'ferr', b'environ']: |
|
265 | 265 | setattr(newui, a, getattr(srcui, a)) |
|
266 | 266 | if util.safehasattr(srcui, b'_csystem'): |
|
267 | 267 | newui._csystem = srcui._csystem |
|
268 | 268 | |
|
269 | 269 | # command line args |
|
270 | 270 | options = dispatch._earlyparseopts(newui, args) |
|
271 | 271 | dispatch._parseconfig(newui, options[b'config']) |
|
272 | 272 | |
|
273 | 273 | # stolen from tortoisehg.util.copydynamicconfig() |
|
274 | 274 | for section, name, value in srcui.walkconfig(): |
|
275 | 275 | source = srcui.configsource(section, name) |
|
276 | 276 | if b':' in source or source == b'--config' or source.startswith(b'$'): |
|
277 | 277 | # path:line or command line, or environ |
|
278 | 278 | continue |
|
279 | 279 | newui.setconfig(section, name, value, source) |
|
280 | 280 | |
|
281 | 281 | # load wd and repo config, copied from dispatch.py |
|
282 | 282 | cwd = options[b'cwd'] |
|
283 | 283 | cwd = cwd and os.path.realpath(cwd) or None |
|
284 | 284 | rpath = options[b'repository'] |
|
285 | 285 | path, newlui = dispatch._getlocal(newui, rpath, wd=cwd) |
|
286 | 286 | |
|
287 | 287 | extensions.populateui(newui) |
|
288 | 288 | commandserver.setuplogging(newui, fp=cdebug) |
|
289 | 289 | if newui is not newlui: |
|
290 | 290 | extensions.populateui(newlui) |
|
291 | 291 | commandserver.setuplogging(newlui, fp=cdebug) |
|
292 | 292 | |
|
293 | 293 | return (newui, newlui) |
|
294 | 294 | |
|
295 | 295 | |
|
296 | 296 | class channeledsystem(object): |
|
297 | 297 | """Propagate ui.system() request in the following format: |
|
298 | 298 | |
|
299 | 299 | payload length (unsigned int), |
|
300 | 300 | type, '\0', |
|
301 | 301 | cmd, '\0', |
|
302 | 302 | cwd, '\0', |
|
303 | 303 | envkey, '=', val, '\0', |
|
304 | 304 | ... |
|
305 | 305 | envkey, '=', val |
|
306 | 306 | |
|
307 | 307 | if type == 'system', waits for: |
|
308 | 308 | |
|
309 | 309 | exitcode length (unsigned int), |
|
310 | 310 | exitcode (int) |
|
311 | 311 | |
|
312 | 312 | if type == 'pager', repetitively waits for a command name ending with '\n' |
|
313 | 313 | and executes it defined by cmdtable, or exits the loop if the command name |
|
314 | 314 | is empty. |
|
315 | 315 | """ |
|
316 | 316 | |
|
317 | 317 | def __init__(self, in_, out, channel): |
|
318 | 318 | self.in_ = in_ |
|
319 | 319 | self.out = out |
|
320 | 320 | self.channel = channel |
|
321 | 321 | |
|
322 | 322 | def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None): |
|
323 |
args = [type, |
|
|
323 | args = [type, cmd, os.path.abspath(cwd or b'.')] | |
|
324 | 324 | args.extend(b'%s=%s' % (k, v) for k, v in pycompat.iteritems(environ)) |
|
325 | 325 | data = b'\0'.join(args) |
|
326 | 326 | self.out.write(struct.pack(b'>cI', self.channel, len(data))) |
|
327 | 327 | self.out.write(data) |
|
328 | 328 | self.out.flush() |
|
329 | 329 | |
|
330 | 330 | if type == b'system': |
|
331 | 331 | length = self.in_.read(4) |
|
332 | 332 | (length,) = struct.unpack(b'>I', length) |
|
333 | 333 | if length != 4: |
|
334 | 334 | raise error.Abort(_(b'invalid response')) |
|
335 | 335 | (rc,) = struct.unpack(b'>i', self.in_.read(4)) |
|
336 | 336 | return rc |
|
337 | 337 | elif type == b'pager': |
|
338 | 338 | while True: |
|
339 | 339 | cmd = self.in_.readline()[:-1] |
|
340 | 340 | if not cmd: |
|
341 | 341 | break |
|
342 | 342 | if cmdtable and cmd in cmdtable: |
|
343 | 343 | cmdtable[cmd]() |
|
344 | 344 | else: |
|
345 | 345 | raise error.Abort(_(b'unexpected command: %s') % cmd) |
|
346 | 346 | else: |
|
347 | 347 | raise error.ProgrammingError(b'invalid S channel type: %s' % type) |
|
348 | 348 | |
|
349 | 349 | |
|
350 | 350 | _iochannels = [ |
|
351 | 351 | # server.ch, ui.fp, mode |
|
352 | 352 | (b'cin', b'fin', 'rb'), |
|
353 | 353 | (b'cout', b'fout', 'wb'), |
|
354 | 354 | (b'cerr', b'ferr', 'wb'), |
|
355 | 355 | ] |
|
356 | 356 | |
|
357 | 357 | |
|
358 | 358 | class chgcmdserver(commandserver.server): |
|
359 | 359 | def __init__( |
|
360 | 360 | self, ui, repo, fin, fout, sock, prereposetups, hashstate, baseaddress |
|
361 | 361 | ): |
|
362 | 362 | super(chgcmdserver, self).__init__( |
|
363 | 363 | _newchgui(ui, channeledsystem(fin, fout, b'S'), self.attachio), |
|
364 | 364 | repo, |
|
365 | 365 | fin, |
|
366 | 366 | fout, |
|
367 | 367 | prereposetups, |
|
368 | 368 | ) |
|
369 | 369 | self.clientsock = sock |
|
370 | 370 | self._ioattached = False |
|
371 | 371 | self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio" |
|
372 | 372 | self.hashstate = hashstate |
|
373 | 373 | self.baseaddress = baseaddress |
|
374 | 374 | if hashstate is not None: |
|
375 | 375 | self.capabilities = self.capabilities.copy() |
|
376 | 376 | self.capabilities[b'validate'] = chgcmdserver.validate |
|
377 | 377 | |
|
378 | 378 | def cleanup(self): |
|
379 | 379 | super(chgcmdserver, self).cleanup() |
|
380 | 380 | # dispatch._runcatch() does not flush outputs if exception is not |
|
381 | 381 | # handled by dispatch._dispatch() |
|
382 | 382 | self.ui.flush() |
|
383 | 383 | self._restoreio() |
|
384 | 384 | self._ioattached = False |
|
385 | 385 | |
|
386 | 386 | def attachio(self): |
|
387 | 387 | """Attach to client's stdio passed via unix domain socket; all |
|
388 | 388 | channels except cresult will no longer be used |
|
389 | 389 | """ |
|
390 | 390 | # tell client to sendmsg() with 1-byte payload, which makes it |
|
391 | 391 | # distinctive from "attachio\n" command consumed by client.read() |
|
392 | 392 | self.clientsock.sendall(struct.pack(b'>cI', b'I', 1)) |
|
393 | 393 | clientfds = util.recvfds(self.clientsock.fileno()) |
|
394 | 394 | self.ui.log(b'chgserver', b'received fds: %r\n', clientfds) |
|
395 | 395 | |
|
396 | 396 | ui = self.ui |
|
397 | 397 | ui.flush() |
|
398 | 398 | self._saveio() |
|
399 | 399 | for fd, (cn, fn, mode) in zip(clientfds, _iochannels): |
|
400 | 400 | assert fd > 0 |
|
401 | 401 | fp = getattr(ui, fn) |
|
402 | 402 | os.dup2(fd, fp.fileno()) |
|
403 | 403 | os.close(fd) |
|
404 | 404 | if self._ioattached: |
|
405 | 405 | continue |
|
406 | 406 | # reset buffering mode when client is first attached. as we want |
|
407 | 407 | # to see output immediately on pager, the mode stays unchanged |
|
408 | 408 | # when client re-attached. ferr is unchanged because it should |
|
409 | 409 | # be unbuffered no matter if it is a tty or not. |
|
410 | 410 | if fn == b'ferr': |
|
411 | 411 | newfp = fp |
|
412 | 412 | else: |
|
413 | 413 | # make it line buffered explicitly because the default is |
|
414 | 414 | # decided on first write(), where fout could be a pager. |
|
415 | 415 | if fp.isatty(): |
|
416 | 416 | bufsize = 1 # line buffered |
|
417 | 417 | else: |
|
418 | 418 | bufsize = -1 # system default |
|
419 | 419 | newfp = os.fdopen(fp.fileno(), mode, bufsize) |
|
420 | 420 | setattr(ui, fn, newfp) |
|
421 | 421 | setattr(self, cn, newfp) |
|
422 | 422 | |
|
423 | 423 | self._ioattached = True |
|
424 | 424 | self.cresult.write(struct.pack(b'>i', len(clientfds))) |
|
425 | 425 | |
|
426 | 426 | def _saveio(self): |
|
427 | 427 | if self._oldios: |
|
428 | 428 | return |
|
429 | 429 | ui = self.ui |
|
430 | 430 | for cn, fn, _mode in _iochannels: |
|
431 | 431 | ch = getattr(self, cn) |
|
432 | 432 | fp = getattr(ui, fn) |
|
433 | 433 | fd = os.dup(fp.fileno()) |
|
434 | 434 | self._oldios.append((ch, fp, fd)) |
|
435 | 435 | |
|
436 | 436 | def _restoreio(self): |
|
437 | 437 | ui = self.ui |
|
438 | 438 | for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels): |
|
439 | 439 | newfp = getattr(ui, fn) |
|
440 | 440 | # close newfp while it's associated with client; otherwise it |
|
441 | 441 | # would be closed when newfp is deleted |
|
442 | 442 | if newfp is not fp: |
|
443 | 443 | newfp.close() |
|
444 | 444 | # restore original fd: fp is open again |
|
445 | 445 | os.dup2(fd, fp.fileno()) |
|
446 | 446 | os.close(fd) |
|
447 | 447 | setattr(self, cn, ch) |
|
448 | 448 | setattr(ui, fn, fp) |
|
449 | 449 | del self._oldios[:] |
|
450 | 450 | |
|
451 | 451 | def validate(self): |
|
452 | 452 | """Reload the config and check if the server is up to date |
|
453 | 453 | |
|
454 | 454 | Read a list of '\0' separated arguments. |
|
455 | 455 | Write a non-empty list of '\0' separated instruction strings or '\0' |
|
456 | 456 | if the list is empty. |
|
457 | 457 | An instruction string could be either: |
|
458 | 458 | - "unlink $path", the client should unlink the path to stop the |
|
459 | 459 | outdated server. |
|
460 | 460 | - "redirect $path", the client should attempt to connect to $path |
|
461 | 461 | first. If it does not work, start a new server. It implies |
|
462 | 462 | "reconnect". |
|
463 | 463 | - "exit $n", the client should exit directly with code n. |
|
464 | 464 | This may happen if we cannot parse the config. |
|
465 | 465 | - "reconnect", the client should close the connection and |
|
466 | 466 | reconnect. |
|
467 | 467 | If neither "reconnect" nor "redirect" is included in the instruction |
|
468 | 468 | list, the client can continue with this server after completing all |
|
469 | 469 | the instructions. |
|
470 | 470 | """ |
|
471 | 471 | from . import dispatch # avoid cycle |
|
472 | 472 | |
|
473 | 473 | args = self._readlist() |
|
474 | 474 | try: |
|
475 | 475 | self.ui, lui = _loadnewui(self.ui, args, self.cdebug) |
|
476 | 476 | except error.ParseError as inst: |
|
477 | 477 | dispatch._formatparse(self.ui.warn, inst) |
|
478 | 478 | self.ui.flush() |
|
479 | 479 | self.cresult.write(b'exit 255') |
|
480 | 480 | return |
|
481 | 481 | except error.Abort as inst: |
|
482 | 482 | self.ui.error(_(b"abort: %s\n") % inst) |
|
483 | 483 | if inst.hint: |
|
484 | 484 | self.ui.error(_(b"(%s)\n") % inst.hint) |
|
485 | 485 | self.ui.flush() |
|
486 | 486 | self.cresult.write(b'exit 255') |
|
487 | 487 | return |
|
488 | 488 | newhash = hashstate.fromui(lui, self.hashstate.mtimepaths) |
|
489 | 489 | insts = [] |
|
490 | 490 | if newhash.mtimehash != self.hashstate.mtimehash: |
|
491 | 491 | addr = _hashaddress(self.baseaddress, self.hashstate.confighash) |
|
492 | 492 | insts.append(b'unlink %s' % addr) |
|
493 | 493 | # mtimehash is empty if one or more extensions fail to load. |
|
494 | 494 | # to be compatible with hg, still serve the client this time. |
|
495 | 495 | if self.hashstate.mtimehash: |
|
496 | 496 | insts.append(b'reconnect') |
|
497 | 497 | if newhash.confighash != self.hashstate.confighash: |
|
498 | 498 | addr = _hashaddress(self.baseaddress, newhash.confighash) |
|
499 | 499 | insts.append(b'redirect %s' % addr) |
|
500 | 500 | self.ui.log(b'chgserver', b'validate: %s\n', stringutil.pprint(insts)) |
|
501 | 501 | self.cresult.write(b'\0'.join(insts) or b'\0') |
|
502 | 502 | |
|
503 | 503 | def chdir(self): |
|
504 | 504 | """Change current directory |
|
505 | 505 | |
|
506 | 506 | Note that the behavior of --cwd option is bit different from this. |
|
507 | 507 | It does not affect --config parameter. |
|
508 | 508 | """ |
|
509 | 509 | path = self._readstr() |
|
510 | 510 | if not path: |
|
511 | 511 | return |
|
512 | 512 | self.ui.log(b'chgserver', b"chdir to '%s'\n", path) |
|
513 | 513 | os.chdir(path) |
|
514 | 514 | |
|
515 | 515 | def setumask(self): |
|
516 | 516 | """Change umask (DEPRECATED)""" |
|
517 | 517 | # BUG: this does not follow the message frame structure, but kept for |
|
518 | 518 | # backward compatibility with old chg clients for some time |
|
519 | 519 | self._setumask(self._read(4)) |
|
520 | 520 | |
|
521 | 521 | def setumask2(self): |
|
522 | 522 | """Change umask""" |
|
523 | 523 | data = self._readstr() |
|
524 | 524 | if len(data) != 4: |
|
525 | 525 | raise ValueError(b'invalid mask length in setumask2 request') |
|
526 | 526 | self._setumask(data) |
|
527 | 527 | |
|
528 | 528 | def _setumask(self, data): |
|
529 | 529 | mask = struct.unpack(b'>I', data)[0] |
|
530 | 530 | self.ui.log(b'chgserver', b'setumask %r\n', mask) |
|
531 | 531 | util.setumask(mask) |
|
532 | 532 | |
|
533 | 533 | def runcommand(self): |
|
534 | 534 | # pager may be attached within the runcommand session, which should |
|
535 | 535 | # be detached at the end of the session. otherwise the pager wouldn't |
|
536 | 536 | # receive EOF. |
|
537 | 537 | globaloldios = self._oldios |
|
538 | 538 | self._oldios = [] |
|
539 | 539 | try: |
|
540 | 540 | return super(chgcmdserver, self).runcommand() |
|
541 | 541 | finally: |
|
542 | 542 | self._restoreio() |
|
543 | 543 | self._oldios = globaloldios |
|
544 | 544 | |
|
545 | 545 | def setenv(self): |
|
546 | 546 | """Clear and update os.environ |
|
547 | 547 | |
|
548 | 548 | Note that not all variables can make an effect on the running process. |
|
549 | 549 | """ |
|
550 | 550 | l = self._readlist() |
|
551 | 551 | try: |
|
552 | 552 | newenv = dict(s.split(b'=', 1) for s in l) |
|
553 | 553 | except ValueError: |
|
554 | 554 | raise ValueError(b'unexpected value in setenv request') |
|
555 | 555 | self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys())) |
|
556 | 556 | |
|
557 | 557 | encoding.environ.clear() |
|
558 | 558 | encoding.environ.update(newenv) |
|
559 | 559 | |
|
560 | 560 | capabilities = commandserver.server.capabilities.copy() |
|
561 | 561 | capabilities.update( |
|
562 | 562 | { |
|
563 | 563 | b'attachio': attachio, |
|
564 | 564 | b'chdir': chdir, |
|
565 | 565 | b'runcommand': runcommand, |
|
566 | 566 | b'setenv': setenv, |
|
567 | 567 | b'setumask': setumask, |
|
568 | 568 | b'setumask2': setumask2, |
|
569 | 569 | } |
|
570 | 570 | ) |
|
571 | 571 | |
|
572 | 572 | if util.safehasattr(procutil, b'setprocname'): |
|
573 | 573 | |
|
574 | 574 | def setprocname(self): |
|
575 | 575 | """Change process title""" |
|
576 | 576 | name = self._readstr() |
|
577 | 577 | self.ui.log(b'chgserver', b'setprocname: %r\n', name) |
|
578 | 578 | procutil.setprocname(name) |
|
579 | 579 | |
|
580 | 580 | capabilities[b'setprocname'] = setprocname |
|
581 | 581 | |
|
582 | 582 | |
|
583 | 583 | def _tempaddress(address): |
|
584 | 584 | return b'%s.%d.tmp' % (address, os.getpid()) |
|
585 | 585 | |
|
586 | 586 | |
|
587 | 587 | def _hashaddress(address, hashstr): |
|
588 | 588 | # if the basename of address contains '.', use only the left part. this |
|
589 | 589 | # makes it possible for the client to pass 'server.tmp$PID' and follow by |
|
590 | 590 | # an atomic rename to avoid locking when spawning new servers. |
|
591 | 591 | dirname, basename = os.path.split(address) |
|
592 | 592 | basename = basename.split(b'.', 1)[0] |
|
593 | 593 | return b'%s-%s' % (os.path.join(dirname, basename), hashstr) |
|
594 | 594 | |
|
595 | 595 | |
|
596 | 596 | class chgunixservicehandler(object): |
|
597 | 597 | """Set of operations for chg services""" |
|
598 | 598 | |
|
599 | 599 | pollinterval = 1 # [sec] |
|
600 | 600 | |
|
601 | 601 | def __init__(self, ui): |
|
602 | 602 | self.ui = ui |
|
603 | 603 | self._idletimeout = ui.configint(b'chgserver', b'idletimeout') |
|
604 | 604 | self._lastactive = time.time() |
|
605 | 605 | |
|
606 | 606 | def bindsocket(self, sock, address): |
|
607 | 607 | self._inithashstate(address) |
|
608 | 608 | self._checkextensions() |
|
609 | 609 | self._bind(sock) |
|
610 | 610 | self._createsymlink() |
|
611 | 611 | # no "listening at" message should be printed to simulate hg behavior |
|
612 | 612 | |
|
613 | 613 | def _inithashstate(self, address): |
|
614 | 614 | self._baseaddress = address |
|
615 | 615 | if self.ui.configbool(b'chgserver', b'skiphash'): |
|
616 | 616 | self._hashstate = None |
|
617 | 617 | self._realaddress = address |
|
618 | 618 | return |
|
619 | 619 | self._hashstate = hashstate.fromui(self.ui) |
|
620 | 620 | self._realaddress = _hashaddress(address, self._hashstate.confighash) |
|
621 | 621 | |
|
622 | 622 | def _checkextensions(self): |
|
623 | 623 | if not self._hashstate: |
|
624 | 624 | return |
|
625 | 625 | if extensions.notloaded(): |
|
626 | 626 | # one or more extensions failed to load. mtimehash becomes |
|
627 | 627 | # meaningless because we do not know the paths of those extensions. |
|
628 | 628 | # set mtimehash to an illegal hash value to invalidate the server. |
|
629 | 629 | self._hashstate.mtimehash = b'' |
|
630 | 630 | |
|
631 | 631 | def _bind(self, sock): |
|
632 | 632 | # use a unique temp address so we can stat the file and do ownership |
|
633 | 633 | # check later |
|
634 | 634 | tempaddress = _tempaddress(self._realaddress) |
|
635 | 635 | util.bindunixsocket(sock, tempaddress) |
|
636 | 636 | self._socketstat = os.stat(tempaddress) |
|
637 | 637 | sock.listen(socket.SOMAXCONN) |
|
638 | 638 | # rename will replace the old socket file if exists atomically. the |
|
639 | 639 | # old server will detect ownership change and exit. |
|
640 | 640 | util.rename(tempaddress, self._realaddress) |
|
641 | 641 | |
|
642 | 642 | def _createsymlink(self): |
|
643 | 643 | if self._baseaddress == self._realaddress: |
|
644 | 644 | return |
|
645 | 645 | tempaddress = _tempaddress(self._baseaddress) |
|
646 | 646 | os.symlink(os.path.basename(self._realaddress), tempaddress) |
|
647 | 647 | util.rename(tempaddress, self._baseaddress) |
|
648 | 648 | |
|
649 | 649 | def _issocketowner(self): |
|
650 | 650 | try: |
|
651 | 651 | st = os.stat(self._realaddress) |
|
652 | 652 | return ( |
|
653 | 653 | st.st_ino == self._socketstat.st_ino |
|
654 | 654 | and st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME] |
|
655 | 655 | ) |
|
656 | 656 | except OSError: |
|
657 | 657 | return False |
|
658 | 658 | |
|
659 | 659 | def unlinksocket(self, address): |
|
660 | 660 | if not self._issocketowner(): |
|
661 | 661 | return |
|
662 | 662 | # it is possible to have a race condition here that we may |
|
663 | 663 | # remove another server's socket file. but that's okay |
|
664 | 664 | # since that server will detect and exit automatically and |
|
665 | 665 | # the client will start a new server on demand. |
|
666 | 666 | util.tryunlink(self._realaddress) |
|
667 | 667 | |
|
668 | 668 | def shouldexit(self): |
|
669 | 669 | if not self._issocketowner(): |
|
670 | 670 | self.ui.log( |
|
671 | 671 | b'chgserver', b'%s is not owned, exiting.\n', self._realaddress |
|
672 | 672 | ) |
|
673 | 673 | return True |
|
674 | 674 | if time.time() - self._lastactive > self._idletimeout: |
|
675 | 675 | self.ui.log(b'chgserver', b'being idle too long. exiting.\n') |
|
676 | 676 | return True |
|
677 | 677 | return False |
|
678 | 678 | |
|
679 | 679 | def newconnection(self): |
|
680 | 680 | self._lastactive = time.time() |
|
681 | 681 | |
|
682 | 682 | def createcmdserver(self, repo, conn, fin, fout, prereposetups): |
|
683 | 683 | return chgcmdserver( |
|
684 | 684 | self.ui, |
|
685 | 685 | repo, |
|
686 | 686 | fin, |
|
687 | 687 | fout, |
|
688 | 688 | conn, |
|
689 | 689 | prereposetups, |
|
690 | 690 | self._hashstate, |
|
691 | 691 | self._baseaddress, |
|
692 | 692 | ) |
|
693 | 693 | |
|
694 | 694 | |
|
695 | 695 | def chgunixservice(ui, repo, opts): |
|
696 | 696 | # CHGINTERNALMARK is set by chg client. It is an indication of things are |
|
697 | 697 | # started by chg so other code can do things accordingly, like disabling |
|
698 | 698 | # demandimport or detecting chg client started by chg client. When executed |
|
699 | 699 | # here, CHGINTERNALMARK is no longer useful and hence dropped to make |
|
700 | 700 | # environ cleaner. |
|
701 | 701 | if b'CHGINTERNALMARK' in encoding.environ: |
|
702 | 702 | del encoding.environ[b'CHGINTERNALMARK'] |
|
703 | 703 | # Python3.7+ "coerces" the LC_CTYPE environment variable to a UTF-8 one if |
|
704 | 704 | # it thinks the current value is "C". This breaks the hash computation and |
|
705 | 705 | # causes chg to restart loop. |
|
706 | 706 | if b'CHGORIG_LC_CTYPE' in encoding.environ: |
|
707 | 707 | encoding.environ[b'LC_CTYPE'] = encoding.environ[b'CHGORIG_LC_CTYPE'] |
|
708 | 708 | del encoding.environ[b'CHGORIG_LC_CTYPE'] |
|
709 | 709 | elif b'CHG_CLEAR_LC_CTYPE' in encoding.environ: |
|
710 | 710 | if b'LC_CTYPE' in encoding.environ: |
|
711 | 711 | del encoding.environ[b'LC_CTYPE'] |
|
712 | 712 | del encoding.environ[b'CHG_CLEAR_LC_CTYPE'] |
|
713 | 713 | |
|
714 | 714 | if repo: |
|
715 | 715 | # one chgserver can serve multiple repos. drop repo information |
|
716 | 716 | ui.setconfig(b'bundle', b'mainreporoot', b'', b'repo') |
|
717 | 717 | h = chgunixservicehandler(ui) |
|
718 | 718 | return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h) |
@@ -1,779 +1,775 b'' | |||
|
1 | 1 | # posix.py - Posix utility function implementations for Mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import errno |
|
11 | 11 | import fcntl |
|
12 | 12 | import getpass |
|
13 | 13 | import grp |
|
14 | 14 | import os |
|
15 | 15 | import pwd |
|
16 | 16 | import re |
|
17 | 17 | import select |
|
18 | 18 | import stat |
|
19 | 19 | import sys |
|
20 | 20 | import tempfile |
|
21 | 21 | import unicodedata |
|
22 | 22 | |
|
23 | 23 | from .i18n import _ |
|
24 | 24 | from .pycompat import ( |
|
25 | 25 | getattr, |
|
26 | 26 | open, |
|
27 | 27 | ) |
|
28 | 28 | from . import ( |
|
29 | 29 | encoding, |
|
30 | 30 | error, |
|
31 | 31 | policy, |
|
32 | 32 | pycompat, |
|
33 | 33 | ) |
|
34 | 34 | |
|
35 | 35 | osutil = policy.importmod('osutil') |
|
36 | 36 | |
|
37 | 37 | normpath = os.path.normpath |
|
38 | 38 | samestat = os.path.samestat |
|
39 | 39 | try: |
|
40 | 40 | oslink = os.link |
|
41 | 41 | except AttributeError: |
|
42 | 42 | # Some platforms build Python without os.link on systems that are |
|
43 | 43 | # vaguely unix-like but don't have hardlink support. For those |
|
44 | 44 | # poor souls, just say we tried and that it failed so we fall back |
|
45 | 45 | # to copies. |
|
46 | 46 | def oslink(src, dst): |
|
47 | 47 | raise OSError( |
|
48 | 48 | errno.EINVAL, b'hardlinks not supported: %s to %s' % (src, dst) |
|
49 | 49 | ) |
|
50 | 50 | |
|
51 | 51 | |
|
52 | 52 | readlink = os.readlink |
|
53 | 53 | unlink = os.unlink |
|
54 | 54 | rename = os.rename |
|
55 | 55 | removedirs = os.removedirs |
|
56 | 56 | expandglobs = False |
|
57 | 57 | |
|
58 | 58 | umask = os.umask(0) |
|
59 | 59 | os.umask(umask) |
|
60 | 60 | |
|
61 | 61 | if not pycompat.ispy3: |
|
62 | 62 | |
|
63 | 63 | def posixfile(name, mode='r', buffering=-1): |
|
64 | 64 | fp = open(name, mode=mode, buffering=buffering) |
|
65 | 65 | # The position when opening in append mode is implementation defined, so |
|
66 | 66 | # make it consistent by always seeking to the end. |
|
67 | 67 | if 'a' in mode: |
|
68 | 68 | fp.seek(0, os.SEEK_END) |
|
69 | 69 | return fp |
|
70 | 70 | |
|
71 | 71 | |
|
72 | 72 | else: |
|
73 | 73 | # The underlying file object seeks as required in Python 3: |
|
74 | 74 | # https://github.com/python/cpython/blob/v3.7.3/Modules/_io/fileio.c#L474 |
|
75 | 75 | posixfile = open |
|
76 | 76 | |
|
77 | 77 | |
|
78 | 78 | def split(p): |
|
79 | 79 | '''Same as posixpath.split, but faster |
|
80 | 80 | |
|
81 | 81 | >>> import posixpath |
|
82 | 82 | >>> for f in [b'/absolute/path/to/file', |
|
83 | 83 | ... b'relative/path/to/file', |
|
84 | 84 | ... b'file_alone', |
|
85 | 85 | ... b'path/to/directory/', |
|
86 | 86 | ... b'/multiple/path//separators', |
|
87 | 87 | ... b'/file_at_root', |
|
88 | 88 | ... b'///multiple_leading_separators_at_root', |
|
89 | 89 | ... b'']: |
|
90 | 90 | ... assert split(f) == posixpath.split(f), f |
|
91 | 91 | ''' |
|
92 | 92 | ht = p.rsplit(b'/', 1) |
|
93 | 93 | if len(ht) == 1: |
|
94 | 94 | return b'', p |
|
95 | 95 | nh = ht[0].rstrip(b'/') |
|
96 | 96 | if nh: |
|
97 | 97 | return nh, ht[1] |
|
98 | 98 | return ht[0] + b'/', ht[1] |
|
99 | 99 | |
|
100 | 100 | |
|
101 | 101 | def openhardlinks(): |
|
102 | 102 | '''return true if it is safe to hold open file handles to hardlinks''' |
|
103 | 103 | return True |
|
104 | 104 | |
|
105 | 105 | |
|
106 | 106 | def nlinks(name): |
|
107 | 107 | '''return number of hardlinks for the given file''' |
|
108 | 108 | return os.lstat(name).st_nlink |
|
109 | 109 | |
|
110 | 110 | |
|
111 | 111 | def parsepatchoutput(output_line): |
|
112 | 112 | """parses the output produced by patch and returns the filename""" |
|
113 | 113 | pf = output_line[14:] |
|
114 | 114 | if pycompat.sysplatform == b'OpenVMS': |
|
115 | 115 | if pf[0] == b'`': |
|
116 | 116 | pf = pf[1:-1] # Remove the quotes |
|
117 | 117 | else: |
|
118 | 118 | if pf.startswith(b"'") and pf.endswith(b"'") and b" " in pf: |
|
119 | 119 | pf = pf[1:-1] # Remove the quotes |
|
120 | 120 | return pf |
|
121 | 121 | |
|
122 | 122 | |
|
123 | 123 | def sshargs(sshcmd, host, user, port): |
|
124 | 124 | '''Build argument list for ssh''' |
|
125 | 125 | args = user and (b"%s@%s" % (user, host)) or host |
|
126 | 126 | if b'-' in args[:1]: |
|
127 | 127 | raise error.Abort( |
|
128 | 128 | _(b'illegal ssh hostname or username starting with -: %s') % args |
|
129 | 129 | ) |
|
130 | 130 | args = shellquote(args) |
|
131 | 131 | if port: |
|
132 | 132 | args = b'-p %s %s' % (shellquote(port), args) |
|
133 | 133 | return args |
|
134 | 134 | |
|
135 | 135 | |
|
136 | 136 | def isexec(f): |
|
137 | 137 | """check whether a file is executable""" |
|
138 | 138 | return os.lstat(f).st_mode & 0o100 != 0 |
|
139 | 139 | |
|
140 | 140 | |
|
141 | 141 | def setflags(f, l, x): |
|
142 | 142 | st = os.lstat(f) |
|
143 | 143 | s = st.st_mode |
|
144 | 144 | if l: |
|
145 | 145 | if not stat.S_ISLNK(s): |
|
146 | 146 | # switch file to link |
|
147 | 147 | fp = open(f, b'rb') |
|
148 | 148 | data = fp.read() |
|
149 | 149 | fp.close() |
|
150 | 150 | unlink(f) |
|
151 | 151 | try: |
|
152 | 152 | os.symlink(data, f) |
|
153 | 153 | except OSError: |
|
154 | 154 | # failed to make a link, rewrite file |
|
155 | 155 | fp = open(f, b"wb") |
|
156 | 156 | fp.write(data) |
|
157 | 157 | fp.close() |
|
158 | 158 | # no chmod needed at this point |
|
159 | 159 | return |
|
160 | 160 | if stat.S_ISLNK(s): |
|
161 | 161 | # switch link to file |
|
162 | 162 | data = os.readlink(f) |
|
163 | 163 | unlink(f) |
|
164 | 164 | fp = open(f, b"wb") |
|
165 | 165 | fp.write(data) |
|
166 | 166 | fp.close() |
|
167 | 167 | s = 0o666 & ~umask # avoid restatting for chmod |
|
168 | 168 | |
|
169 | 169 | sx = s & 0o100 |
|
170 | 170 | if st.st_nlink > 1 and bool(x) != bool(sx): |
|
171 | 171 | # the file is a hardlink, break it |
|
172 | 172 | with open(f, b"rb") as fp: |
|
173 | 173 | data = fp.read() |
|
174 | 174 | unlink(f) |
|
175 | 175 | with open(f, b"wb") as fp: |
|
176 | 176 | fp.write(data) |
|
177 | 177 | |
|
178 | 178 | if x and not sx: |
|
179 | 179 | # Turn on +x for every +r bit when making a file executable |
|
180 | 180 | # and obey umask. |
|
181 | 181 | os.chmod(f, s | (s & 0o444) >> 2 & ~umask) |
|
182 | 182 | elif not x and sx: |
|
183 | 183 | # Turn off all +x bits |
|
184 | 184 | os.chmod(f, s & 0o666) |
|
185 | 185 | |
|
186 | 186 | |
|
187 | 187 | def copymode(src, dst, mode=None, enforcewritable=False): |
|
188 | 188 | '''Copy the file mode from the file at path src to dst. |
|
189 | 189 | If src doesn't exist, we're using mode instead. If mode is None, we're |
|
190 | 190 | using umask.''' |
|
191 | 191 | try: |
|
192 | 192 | st_mode = os.lstat(src).st_mode & 0o777 |
|
193 | 193 | except OSError as inst: |
|
194 | 194 | if inst.errno != errno.ENOENT: |
|
195 | 195 | raise |
|
196 | 196 | st_mode = mode |
|
197 | 197 | if st_mode is None: |
|
198 | 198 | st_mode = ~umask |
|
199 | 199 | st_mode &= 0o666 |
|
200 | 200 | |
|
201 | 201 | new_mode = st_mode |
|
202 | 202 | |
|
203 | 203 | if enforcewritable: |
|
204 | 204 | new_mode |= stat.S_IWUSR |
|
205 | 205 | |
|
206 | 206 | os.chmod(dst, new_mode) |
|
207 | 207 | |
|
208 | 208 | |
|
209 | 209 | def checkexec(path): |
|
210 | 210 | """ |
|
211 | 211 | Check whether the given path is on a filesystem with UNIX-like exec flags |
|
212 | 212 | |
|
213 | 213 | Requires a directory (like /foo/.hg) |
|
214 | 214 | """ |
|
215 | 215 | |
|
216 | 216 | # VFAT on some Linux versions can flip mode but it doesn't persist |
|
217 | 217 | # a FS remount. Frequently we can detect it if files are created |
|
218 | 218 | # with exec bit on. |
|
219 | 219 | |
|
220 | 220 | try: |
|
221 | 221 | EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
|
222 | 222 | basedir = os.path.join(path, b'.hg') |
|
223 | 223 | cachedir = os.path.join(basedir, b'wcache') |
|
224 | 224 | storedir = os.path.join(basedir, b'store') |
|
225 | 225 | if not os.path.exists(cachedir): |
|
226 | 226 | try: |
|
227 | 227 | # we want to create the 'cache' directory, not the '.hg' one. |
|
228 | 228 | # Automatically creating '.hg' directory could silently spawn |
|
229 | 229 | # invalid Mercurial repositories. That seems like a bad idea. |
|
230 | 230 | os.mkdir(cachedir) |
|
231 | 231 | if os.path.exists(storedir): |
|
232 | 232 | copymode(storedir, cachedir) |
|
233 | 233 | else: |
|
234 | 234 | copymode(basedir, cachedir) |
|
235 | 235 | except (IOError, OSError): |
|
236 | 236 | # we other fallback logic triggers |
|
237 | 237 | pass |
|
238 | 238 | if os.path.isdir(cachedir): |
|
239 | 239 | checkisexec = os.path.join(cachedir, b'checkisexec') |
|
240 | 240 | checknoexec = os.path.join(cachedir, b'checknoexec') |
|
241 | 241 | |
|
242 | 242 | try: |
|
243 | 243 | m = os.stat(checkisexec).st_mode |
|
244 | 244 | except OSError as e: |
|
245 | 245 | if e.errno != errno.ENOENT: |
|
246 | 246 | raise |
|
247 | 247 | # checkisexec does not exist - fall through ... |
|
248 | 248 | else: |
|
249 | 249 | # checkisexec exists, check if it actually is exec |
|
250 | 250 | if m & EXECFLAGS != 0: |
|
251 | 251 | # ensure checkisexec exists, check it isn't exec |
|
252 | 252 | try: |
|
253 | 253 | m = os.stat(checknoexec).st_mode |
|
254 | 254 | except OSError as e: |
|
255 | 255 | if e.errno != errno.ENOENT: |
|
256 | 256 | raise |
|
257 | 257 | open(checknoexec, b'w').close() # might fail |
|
258 | 258 | m = os.stat(checknoexec).st_mode |
|
259 | 259 | if m & EXECFLAGS == 0: |
|
260 | 260 | # check-exec is exec and check-no-exec is not exec |
|
261 | 261 | return True |
|
262 | 262 | # checknoexec exists but is exec - delete it |
|
263 | 263 | unlink(checknoexec) |
|
264 | 264 | # checkisexec exists but is not exec - delete it |
|
265 | 265 | unlink(checkisexec) |
|
266 | 266 | |
|
267 | 267 | # check using one file, leave it as checkisexec |
|
268 | 268 | checkdir = cachedir |
|
269 | 269 | else: |
|
270 | 270 | # check directly in path and don't leave checkisexec behind |
|
271 | 271 | checkdir = path |
|
272 | 272 | checkisexec = None |
|
273 | 273 | fh, fn = pycompat.mkstemp(dir=checkdir, prefix=b'hg-checkexec-') |
|
274 | 274 | try: |
|
275 | 275 | os.close(fh) |
|
276 | 276 | m = os.stat(fn).st_mode |
|
277 | 277 | if m & EXECFLAGS == 0: |
|
278 | 278 | os.chmod(fn, m & 0o777 | EXECFLAGS) |
|
279 | 279 | if os.stat(fn).st_mode & EXECFLAGS != 0: |
|
280 | 280 | if checkisexec is not None: |
|
281 | 281 | os.rename(fn, checkisexec) |
|
282 | 282 | fn = None |
|
283 | 283 | return True |
|
284 | 284 | finally: |
|
285 | 285 | if fn is not None: |
|
286 | 286 | unlink(fn) |
|
287 | 287 | except (IOError, OSError): |
|
288 | 288 | # we don't care, the user probably won't be able to commit anyway |
|
289 | 289 | return False |
|
290 | 290 | |
|
291 | 291 | |
|
292 | 292 | def checklink(path): |
|
293 | 293 | """check whether the given path is on a symlink-capable filesystem""" |
|
294 | 294 | # mktemp is not racy because symlink creation will fail if the |
|
295 | 295 | # file already exists |
|
296 | 296 | while True: |
|
297 | 297 | cachedir = os.path.join(path, b'.hg', b'wcache') |
|
298 | 298 | checklink = os.path.join(cachedir, b'checklink') |
|
299 | 299 | # try fast path, read only |
|
300 | 300 | if os.path.islink(checklink): |
|
301 | 301 | return True |
|
302 | 302 | if os.path.isdir(cachedir): |
|
303 | 303 | checkdir = cachedir |
|
304 | 304 | else: |
|
305 | 305 | checkdir = path |
|
306 | 306 | cachedir = None |
|
307 | 307 | name = tempfile.mktemp( |
|
308 | 308 | dir=pycompat.fsdecode(checkdir), prefix=r'checklink-' |
|
309 | 309 | ) |
|
310 | 310 | name = pycompat.fsencode(name) |
|
311 | 311 | try: |
|
312 | 312 | fd = None |
|
313 | 313 | if cachedir is None: |
|
314 | 314 | fd = pycompat.namedtempfile( |
|
315 | 315 | dir=checkdir, prefix=b'hg-checklink-' |
|
316 | 316 | ) |
|
317 | 317 | target = os.path.basename(fd.name) |
|
318 | 318 | else: |
|
319 | 319 | # create a fixed file to link to; doesn't matter if it |
|
320 | 320 | # already exists. |
|
321 | 321 | target = b'checklink-target' |
|
322 | 322 | try: |
|
323 | 323 | fullpath = os.path.join(cachedir, target) |
|
324 | 324 | open(fullpath, b'w').close() |
|
325 | 325 | except IOError as inst: |
|
326 | 326 | if ( |
|
327 | 327 | inst[0] == errno.EACCES |
|
328 | 328 | ): # pytype: disable=unsupported-operands |
|
329 | 329 | # If we can't write to cachedir, just pretend |
|
330 | 330 | # that the fs is readonly and by association |
|
331 | 331 | # that the fs won't support symlinks. This |
|
332 | 332 | # seems like the least dangerous way to avoid |
|
333 | 333 | # data loss. |
|
334 | 334 | return False |
|
335 | 335 | raise |
|
336 | 336 | try: |
|
337 | 337 | os.symlink(target, name) |
|
338 | 338 | if cachedir is None: |
|
339 | 339 | unlink(name) |
|
340 | 340 | else: |
|
341 | 341 | try: |
|
342 | 342 | os.rename(name, checklink) |
|
343 | 343 | except OSError: |
|
344 | 344 | unlink(name) |
|
345 | 345 | return True |
|
346 | 346 | except OSError as inst: |
|
347 | 347 | # link creation might race, try again |
|
348 | 348 | if inst.errno == errno.EEXIST: |
|
349 | 349 | continue |
|
350 | 350 | raise |
|
351 | 351 | finally: |
|
352 | 352 | if fd is not None: |
|
353 | 353 | fd.close() |
|
354 | 354 | except AttributeError: |
|
355 | 355 | return False |
|
356 | 356 | except OSError as inst: |
|
357 | 357 | # sshfs might report failure while successfully creating the link |
|
358 | 358 | if inst.errno == errno.EIO and os.path.exists(name): |
|
359 | 359 | unlink(name) |
|
360 | 360 | return False |
|
361 | 361 | |
|
362 | 362 | |
|
363 | 363 | def checkosfilename(path): |
|
364 | 364 | '''Check that the base-relative path is a valid filename on this platform. |
|
365 | 365 | Returns None if the path is ok, or a UI string describing the problem.''' |
|
366 | 366 | return None # on posix platforms, every path is ok |
|
367 | 367 | |
|
368 | 368 | |
|
369 | 369 | def getfsmountpoint(dirpath): |
|
370 | 370 | '''Get the filesystem mount point from a directory (best-effort) |
|
371 | 371 | |
|
372 | 372 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. |
|
373 | 373 | ''' |
|
374 | 374 | return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath) |
|
375 | 375 | |
|
376 | 376 | |
|
377 | 377 | def getfstype(dirpath): |
|
378 | 378 | '''Get the filesystem type name from a directory (best-effort) |
|
379 | 379 | |
|
380 | 380 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. |
|
381 | 381 | ''' |
|
382 | 382 | return getattr(osutil, 'getfstype', lambda x: None)(dirpath) |
|
383 | 383 | |
|
384 | 384 | |
|
385 | 385 | def setbinary(fd): |
|
386 | 386 | pass |
|
387 | 387 | |
|
388 | 388 | |
|
389 | 389 | def pconvert(path): |
|
390 | 390 | return path |
|
391 | 391 | |
|
392 | 392 | |
|
393 | 393 | def localpath(path): |
|
394 | 394 | return path |
|
395 | 395 | |
|
396 | 396 | |
|
397 | 397 | def samefile(fpath1, fpath2): |
|
398 | 398 | """Returns whether path1 and path2 refer to the same file. This is only |
|
399 | 399 | guaranteed to work for files, not directories.""" |
|
400 | 400 | return os.path.samefile(fpath1, fpath2) |
|
401 | 401 | |
|
402 | 402 | |
|
403 | 403 | def samedevice(fpath1, fpath2): |
|
404 | 404 | """Returns whether fpath1 and fpath2 are on the same device. This is only |
|
405 | 405 | guaranteed to work for files, not directories.""" |
|
406 | 406 | st1 = os.lstat(fpath1) |
|
407 | 407 | st2 = os.lstat(fpath2) |
|
408 | 408 | return st1.st_dev == st2.st_dev |
|
409 | 409 | |
|
410 | 410 | |
|
411 | 411 | # os.path.normcase is a no-op, which doesn't help us on non-native filesystems |
|
412 | 412 | def normcase(path): |
|
413 | 413 | return path.lower() |
|
414 | 414 | |
|
415 | 415 | |
|
416 | 416 | # what normcase does to ASCII strings |
|
417 | 417 | normcasespec = encoding.normcasespecs.lower |
|
418 | 418 | # fallback normcase function for non-ASCII strings |
|
419 | 419 | normcasefallback = normcase |
|
420 | 420 | |
|
421 | 421 | if pycompat.isdarwin: |
|
422 | 422 | |
|
423 | 423 | def normcase(path): |
|
424 | 424 | ''' |
|
425 | 425 | Normalize a filename for OS X-compatible comparison: |
|
426 | 426 | - escape-encode invalid characters |
|
427 | 427 | - decompose to NFD |
|
428 | 428 | - lowercase |
|
429 | 429 | - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff] |
|
430 | 430 | |
|
431 | 431 | >>> normcase(b'UPPER') |
|
432 | 432 | 'upper' |
|
433 | 433 | >>> normcase(b'Caf\\xc3\\xa9') |
|
434 | 434 | 'cafe\\xcc\\x81' |
|
435 | 435 | >>> normcase(b'\\xc3\\x89') |
|
436 | 436 | 'e\\xcc\\x81' |
|
437 | 437 | >>> normcase(b'\\xb8\\xca\\xc3\\xca\\xbe\\xc8.JPG') # issue3918 |
|
438 | 438 | '%b8%ca%c3\\xca\\xbe%c8.jpg' |
|
439 | 439 | ''' |
|
440 | 440 | |
|
441 | 441 | try: |
|
442 | 442 | return encoding.asciilower(path) # exception for non-ASCII |
|
443 | 443 | except UnicodeDecodeError: |
|
444 | 444 | return normcasefallback(path) |
|
445 | 445 | |
|
446 | 446 | normcasespec = encoding.normcasespecs.lower |
|
447 | 447 | |
|
448 | 448 | def normcasefallback(path): |
|
449 | 449 | try: |
|
450 | 450 | u = path.decode('utf-8') |
|
451 | 451 | except UnicodeDecodeError: |
|
452 | 452 | # OS X percent-encodes any bytes that aren't valid utf-8 |
|
453 | 453 | s = b'' |
|
454 | 454 | pos = 0 |
|
455 | 455 | l = len(path) |
|
456 | 456 | while pos < l: |
|
457 | 457 | try: |
|
458 | 458 | c = encoding.getutf8char(path, pos) |
|
459 | 459 | pos += len(c) |
|
460 | 460 | except ValueError: |
|
461 | 461 | c = b'%%%02X' % ord(path[pos : pos + 1]) |
|
462 | 462 | pos += 1 |
|
463 | 463 | s += c |
|
464 | 464 | |
|
465 | 465 | u = s.decode('utf-8') |
|
466 | 466 | |
|
467 | 467 | # Decompose then lowercase (HFS+ technote specifies lower) |
|
468 | 468 | enc = unicodedata.normalize('NFD', u).lower().encode('utf-8') |
|
469 | 469 | # drop HFS+ ignored characters |
|
470 | 470 | return encoding.hfsignoreclean(enc) |
|
471 | 471 | |
|
472 | 472 | |
|
473 | 473 | if pycompat.sysplatform == b'cygwin': |
|
474 | 474 | # workaround for cygwin, in which mount point part of path is |
|
475 | 475 | # treated as case sensitive, even though underlying NTFS is case |
|
476 | 476 | # insensitive. |
|
477 | 477 | |
|
478 | 478 | # default mount points |
|
479 | 479 | cygwinmountpoints = sorted( |
|
480 | 480 | [b"/usr/bin", b"/usr/lib", b"/cygdrive",], reverse=True |
|
481 | 481 | ) |
|
482 | 482 | |
|
483 | 483 | # use upper-ing as normcase as same as NTFS workaround |
|
484 | 484 | def normcase(path): |
|
485 | 485 | pathlen = len(path) |
|
486 | 486 | if (pathlen == 0) or (path[0] != pycompat.ossep): |
|
487 | 487 | # treat as relative |
|
488 | 488 | return encoding.upper(path) |
|
489 | 489 | |
|
490 | 490 | # to preserve case of mountpoint part |
|
491 | 491 | for mp in cygwinmountpoints: |
|
492 | 492 | if not path.startswith(mp): |
|
493 | 493 | continue |
|
494 | 494 | |
|
495 | 495 | mplen = len(mp) |
|
496 | 496 | if mplen == pathlen: # mount point itself |
|
497 | 497 | return mp |
|
498 | 498 | if path[mplen] == pycompat.ossep: |
|
499 | 499 | return mp + encoding.upper(path[mplen:]) |
|
500 | 500 | |
|
501 | 501 | return encoding.upper(path) |
|
502 | 502 | |
|
503 | 503 | normcasespec = encoding.normcasespecs.other |
|
504 | 504 | normcasefallback = normcase |
|
505 | 505 | |
|
506 | 506 | # Cygwin translates native ACLs to POSIX permissions, |
|
507 | 507 | # but these translations are not supported by native |
|
508 | 508 | # tools, so the exec bit tends to be set erroneously. |
|
509 | 509 | # Therefore, disable executable bit access on Cygwin. |
|
510 | 510 | def checkexec(path): |
|
511 | 511 | return False |
|
512 | 512 | |
|
513 | 513 | # Similarly, Cygwin's symlink emulation is likely to create |
|
514 | 514 | # problems when Mercurial is used from both Cygwin and native |
|
515 | 515 | # Windows, with other native tools, or on shared volumes |
|
516 | 516 | def checklink(path): |
|
517 | 517 | return False |
|
518 | 518 | |
|
519 | 519 | |
|
520 | 520 | _needsshellquote = None |
|
521 | 521 | |
|
522 | 522 | |
|
523 | 523 | def shellquote(s): |
|
524 | 524 | if pycompat.sysplatform == b'OpenVMS': |
|
525 | 525 | return b'"%s"' % s |
|
526 | 526 | global _needsshellquote |
|
527 | 527 | if _needsshellquote is None: |
|
528 | 528 | _needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search |
|
529 | 529 | if s and not _needsshellquote(s): |
|
530 | 530 | # "s" shouldn't have to be quoted |
|
531 | 531 | return s |
|
532 | 532 | else: |
|
533 | 533 | return b"'%s'" % s.replace(b"'", b"'\\''") |
|
534 | 534 | |
|
535 | 535 | |
|
536 | 536 | def shellsplit(s): |
|
537 | 537 | """Parse a command string in POSIX shell way (best-effort)""" |
|
538 | 538 | return pycompat.shlexsplit(s, posix=True) |
|
539 | 539 | |
|
540 | 540 | |
|
541 | def quotecommand(cmd): | |
|
542 | return cmd | |
|
543 | ||
|
544 | ||
|
545 | 541 | def testpid(pid): |
|
546 | 542 | '''return False if pid dead, True if running or not sure''' |
|
547 | 543 | if pycompat.sysplatform == b'OpenVMS': |
|
548 | 544 | return True |
|
549 | 545 | try: |
|
550 | 546 | os.kill(pid, 0) |
|
551 | 547 | return True |
|
552 | 548 | except OSError as inst: |
|
553 | 549 | return inst.errno != errno.ESRCH |
|
554 | 550 | |
|
555 | 551 | |
|
556 | 552 | def isowner(st): |
|
557 | 553 | """Return True if the stat object st is from the current user.""" |
|
558 | 554 | return st.st_uid == os.getuid() |
|
559 | 555 | |
|
560 | 556 | |
|
561 | 557 | def findexe(command): |
|
562 | 558 | '''Find executable for command searching like which does. |
|
563 | 559 | If command is a basename then PATH is searched for command. |
|
564 | 560 | PATH isn't searched if command is an absolute or relative path. |
|
565 | 561 | If command isn't found None is returned.''' |
|
566 | 562 | if pycompat.sysplatform == b'OpenVMS': |
|
567 | 563 | return command |
|
568 | 564 | |
|
569 | 565 | def findexisting(executable): |
|
570 | 566 | b'Will return executable if existing file' |
|
571 | 567 | if os.path.isfile(executable) and os.access(executable, os.X_OK): |
|
572 | 568 | return executable |
|
573 | 569 | return None |
|
574 | 570 | |
|
575 | 571 | if pycompat.ossep in command: |
|
576 | 572 | return findexisting(command) |
|
577 | 573 | |
|
578 | 574 | if pycompat.sysplatform == b'plan9': |
|
579 | 575 | return findexisting(os.path.join(b'/bin', command)) |
|
580 | 576 | |
|
581 | 577 | for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep): |
|
582 | 578 | executable = findexisting(os.path.join(path, command)) |
|
583 | 579 | if executable is not None: |
|
584 | 580 | return executable |
|
585 | 581 | return None |
|
586 | 582 | |
|
587 | 583 | |
|
588 | 584 | def setsignalhandler(): |
|
589 | 585 | pass |
|
590 | 586 | |
|
591 | 587 | |
|
592 | 588 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} |
|
593 | 589 | |
|
594 | 590 | |
|
595 | 591 | def statfiles(files): |
|
596 | 592 | '''Stat each file in files. Yield each stat, or None if a file does not |
|
597 | 593 | exist or has a type we don't care about.''' |
|
598 | 594 | lstat = os.lstat |
|
599 | 595 | getkind = stat.S_IFMT |
|
600 | 596 | for nf in files: |
|
601 | 597 | try: |
|
602 | 598 | st = lstat(nf) |
|
603 | 599 | if getkind(st.st_mode) not in _wantedkinds: |
|
604 | 600 | st = None |
|
605 | 601 | except OSError as err: |
|
606 | 602 | if err.errno not in (errno.ENOENT, errno.ENOTDIR): |
|
607 | 603 | raise |
|
608 | 604 | st = None |
|
609 | 605 | yield st |
|
610 | 606 | |
|
611 | 607 | |
|
612 | 608 | def getuser(): |
|
613 | 609 | '''return name of current user''' |
|
614 | 610 | return pycompat.fsencode(getpass.getuser()) |
|
615 | 611 | |
|
616 | 612 | |
|
617 | 613 | def username(uid=None): |
|
618 | 614 | """Return the name of the user with the given uid. |
|
619 | 615 | |
|
620 | 616 | If uid is None, return the name of the current user.""" |
|
621 | 617 | |
|
622 | 618 | if uid is None: |
|
623 | 619 | uid = os.getuid() |
|
624 | 620 | try: |
|
625 | 621 | return pycompat.fsencode(pwd.getpwuid(uid)[0]) |
|
626 | 622 | except KeyError: |
|
627 | 623 | return b'%d' % uid |
|
628 | 624 | |
|
629 | 625 | |
|
630 | 626 | def groupname(gid=None): |
|
631 | 627 | """Return the name of the group with the given gid. |
|
632 | 628 | |
|
633 | 629 | If gid is None, return the name of the current group.""" |
|
634 | 630 | |
|
635 | 631 | if gid is None: |
|
636 | 632 | gid = os.getgid() |
|
637 | 633 | try: |
|
638 | 634 | return pycompat.fsencode(grp.getgrgid(gid)[0]) |
|
639 | 635 | except KeyError: |
|
640 | 636 | return pycompat.bytestr(gid) |
|
641 | 637 | |
|
642 | 638 | |
|
643 | 639 | def groupmembers(name): |
|
644 | 640 | """Return the list of members of the group with the given |
|
645 | 641 | name, KeyError if the group does not exist. |
|
646 | 642 | """ |
|
647 | 643 | name = pycompat.fsdecode(name) |
|
648 | 644 | return pycompat.rapply(pycompat.fsencode, list(grp.getgrnam(name).gr_mem)) |
|
649 | 645 | |
|
650 | 646 | |
|
651 | 647 | def spawndetached(args): |
|
652 | 648 | return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0), args[0], args) |
|
653 | 649 | |
|
654 | 650 | |
|
655 | 651 | def gethgcmd(): |
|
656 | 652 | return sys.argv[:1] |
|
657 | 653 | |
|
658 | 654 | |
|
659 | 655 | def makedir(path, notindexed): |
|
660 | 656 | os.mkdir(path) |
|
661 | 657 | |
|
662 | 658 | |
|
663 | 659 | def lookupreg(key, name=None, scope=None): |
|
664 | 660 | return None |
|
665 | 661 | |
|
666 | 662 | |
|
667 | 663 | def hidewindow(): |
|
668 | 664 | """Hide current shell window. |
|
669 | 665 | |
|
670 | 666 | Used to hide the window opened when starting asynchronous |
|
671 | 667 | child process under Windows, unneeded on other systems. |
|
672 | 668 | """ |
|
673 | 669 | pass |
|
674 | 670 | |
|
675 | 671 | |
|
676 | 672 | class cachestat(object): |
|
677 | 673 | def __init__(self, path): |
|
678 | 674 | self.stat = os.stat(path) |
|
679 | 675 | |
|
680 | 676 | def cacheable(self): |
|
681 | 677 | return bool(self.stat.st_ino) |
|
682 | 678 | |
|
683 | 679 | __hash__ = object.__hash__ |
|
684 | 680 | |
|
685 | 681 | def __eq__(self, other): |
|
686 | 682 | try: |
|
687 | 683 | # Only dev, ino, size, mtime and atime are likely to change. Out |
|
688 | 684 | # of these, we shouldn't compare atime but should compare the |
|
689 | 685 | # rest. However, one of the other fields changing indicates |
|
690 | 686 | # something fishy going on, so return False if anything but atime |
|
691 | 687 | # changes. |
|
692 | 688 | return ( |
|
693 | 689 | self.stat.st_mode == other.stat.st_mode |
|
694 | 690 | and self.stat.st_ino == other.stat.st_ino |
|
695 | 691 | and self.stat.st_dev == other.stat.st_dev |
|
696 | 692 | and self.stat.st_nlink == other.stat.st_nlink |
|
697 | 693 | and self.stat.st_uid == other.stat.st_uid |
|
698 | 694 | and self.stat.st_gid == other.stat.st_gid |
|
699 | 695 | and self.stat.st_size == other.stat.st_size |
|
700 | 696 | and self.stat[stat.ST_MTIME] == other.stat[stat.ST_MTIME] |
|
701 | 697 | and self.stat[stat.ST_CTIME] == other.stat[stat.ST_CTIME] |
|
702 | 698 | ) |
|
703 | 699 | except AttributeError: |
|
704 | 700 | return False |
|
705 | 701 | |
|
706 | 702 | def __ne__(self, other): |
|
707 | 703 | return not self == other |
|
708 | 704 | |
|
709 | 705 | |
|
710 | 706 | def statislink(st): |
|
711 | 707 | '''check whether a stat result is a symlink''' |
|
712 | 708 | return st and stat.S_ISLNK(st.st_mode) |
|
713 | 709 | |
|
714 | 710 | |
|
715 | 711 | def statisexec(st): |
|
716 | 712 | '''check whether a stat result is an executable file''' |
|
717 | 713 | return st and (st.st_mode & 0o100 != 0) |
|
718 | 714 | |
|
719 | 715 | |
|
720 | 716 | def poll(fds): |
|
721 | 717 | """block until something happens on any file descriptor |
|
722 | 718 | |
|
723 | 719 | This is a generic helper that will check for any activity |
|
724 | 720 | (read, write. exception) and return the list of touched files. |
|
725 | 721 | |
|
726 | 722 | In unsupported cases, it will raise a NotImplementedError""" |
|
727 | 723 | try: |
|
728 | 724 | while True: |
|
729 | 725 | try: |
|
730 | 726 | res = select.select(fds, fds, fds) |
|
731 | 727 | break |
|
732 | 728 | except select.error as inst: |
|
733 | 729 | if inst.args[0] == errno.EINTR: |
|
734 | 730 | continue |
|
735 | 731 | raise |
|
736 | 732 | except ValueError: # out of range file descriptor |
|
737 | 733 | raise NotImplementedError() |
|
738 | 734 | return sorted(list(set(sum(res, [])))) |
|
739 | 735 | |
|
740 | 736 | |
|
741 | 737 | def readpipe(pipe): |
|
742 | 738 | """Read all available data from a pipe.""" |
|
743 | 739 | # We can't fstat() a pipe because Linux will always report 0. |
|
744 | 740 | # So, we set the pipe to non-blocking mode and read everything |
|
745 | 741 | # that's available. |
|
746 | 742 | flags = fcntl.fcntl(pipe, fcntl.F_GETFL) |
|
747 | 743 | flags |= os.O_NONBLOCK |
|
748 | 744 | oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags) |
|
749 | 745 | |
|
750 | 746 | try: |
|
751 | 747 | chunks = [] |
|
752 | 748 | while True: |
|
753 | 749 | try: |
|
754 | 750 | s = pipe.read() |
|
755 | 751 | if not s: |
|
756 | 752 | break |
|
757 | 753 | chunks.append(s) |
|
758 | 754 | except IOError: |
|
759 | 755 | break |
|
760 | 756 | |
|
761 | 757 | return b''.join(chunks) |
|
762 | 758 | finally: |
|
763 | 759 | fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags) |
|
764 | 760 | |
|
765 | 761 | |
|
766 | 762 | def bindunixsocket(sock, path): |
|
767 | 763 | """Bind the UNIX domain socket to the specified path""" |
|
768 | 764 | # use relative path instead of full path at bind() if possible, since |
|
769 | 765 | # AF_UNIX path has very small length limit (107 chars) on common |
|
770 | 766 | # platforms (see sys/un.h) |
|
771 | 767 | dirname, basename = os.path.split(path) |
|
772 | 768 | bakwdfd = None |
|
773 | 769 | if dirname: |
|
774 | 770 | bakwdfd = os.open(b'.', os.O_DIRECTORY) |
|
775 | 771 | os.chdir(dirname) |
|
776 | 772 | sock.bind(basename) |
|
777 | 773 | if bakwdfd: |
|
778 | 774 | os.fchdir(bakwdfd) |
|
779 | 775 | os.close(bakwdfd) |
@@ -1,709 +1,708 b'' | |||
|
1 | 1 | # sshpeer.py - ssh repository proxy class for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import re |
|
11 | 11 | import uuid |
|
12 | 12 | |
|
13 | 13 | from .i18n import _ |
|
14 | 14 | from .pycompat import getattr |
|
15 | 15 | from . import ( |
|
16 | 16 | error, |
|
17 | 17 | pycompat, |
|
18 | 18 | util, |
|
19 | 19 | wireprotoserver, |
|
20 | 20 | wireprototypes, |
|
21 | 21 | wireprotov1peer, |
|
22 | 22 | wireprotov1server, |
|
23 | 23 | ) |
|
24 | 24 | from .utils import ( |
|
25 | 25 | procutil, |
|
26 | 26 | stringutil, |
|
27 | 27 | ) |
|
28 | 28 | |
|
29 | 29 | |
|
30 | 30 | def _serverquote(s): |
|
31 | 31 | """quote a string for the remote shell ... which we assume is sh""" |
|
32 | 32 | if not s: |
|
33 | 33 | return s |
|
34 | 34 | if re.match(b'[a-zA-Z0-9@%_+=:,./-]*$', s): |
|
35 | 35 | return s |
|
36 | 36 | return b"'%s'" % s.replace(b"'", b"'\\''") |
|
37 | 37 | |
|
38 | 38 | |
|
39 | 39 | def _forwardoutput(ui, pipe, warn=False): |
|
40 | 40 | """display all data currently available on pipe as remote output. |
|
41 | 41 | |
|
42 | 42 | This is non blocking.""" |
|
43 | 43 | if pipe: |
|
44 | 44 | s = procutil.readpipe(pipe) |
|
45 | 45 | if s: |
|
46 | 46 | display = ui.warn if warn else ui.status |
|
47 | 47 | for l in s.splitlines(): |
|
48 | 48 | display(_(b"remote: "), l, b'\n') |
|
49 | 49 | |
|
50 | 50 | |
|
51 | 51 | class doublepipe(object): |
|
52 | 52 | """Operate a side-channel pipe in addition of a main one |
|
53 | 53 | |
|
54 | 54 | The side-channel pipe contains server output to be forwarded to the user |
|
55 | 55 | input. The double pipe will behave as the "main" pipe, but will ensure the |
|
56 | 56 | content of the "side" pipe is properly processed while we wait for blocking |
|
57 | 57 | call on the "main" pipe. |
|
58 | 58 | |
|
59 | 59 | If large amounts of data are read from "main", the forward will cease after |
|
60 | 60 | the first bytes start to appear. This simplifies the implementation |
|
61 | 61 | without affecting actual output of sshpeer too much as we rarely issue |
|
62 | 62 | large read for data not yet emitted by the server. |
|
63 | 63 | |
|
64 | 64 | The main pipe is expected to be a 'bufferedinputpipe' from the util module |
|
65 | 65 | that handle all the os specific bits. This class lives in this module |
|
66 | 66 | because it focus on behavior specific to the ssh protocol.""" |
|
67 | 67 | |
|
68 | 68 | def __init__(self, ui, main, side): |
|
69 | 69 | self._ui = ui |
|
70 | 70 | self._main = main |
|
71 | 71 | self._side = side |
|
72 | 72 | |
|
73 | 73 | def _wait(self): |
|
74 | 74 | """wait until some data are available on main or side |
|
75 | 75 | |
|
76 | 76 | return a pair of boolean (ismainready, issideready) |
|
77 | 77 | |
|
78 | 78 | (This will only wait for data if the setup is supported by `util.poll`) |
|
79 | 79 | """ |
|
80 | 80 | if ( |
|
81 | 81 | isinstance(self._main, util.bufferedinputpipe) |
|
82 | 82 | and self._main.hasbuffer |
|
83 | 83 | ): |
|
84 | 84 | # Main has data. Assume side is worth poking at. |
|
85 | 85 | return True, True |
|
86 | 86 | |
|
87 | 87 | fds = [self._main.fileno(), self._side.fileno()] |
|
88 | 88 | try: |
|
89 | 89 | act = util.poll(fds) |
|
90 | 90 | except NotImplementedError: |
|
91 | 91 | # non supported yet case, assume all have data. |
|
92 | 92 | act = fds |
|
93 | 93 | return (self._main.fileno() in act, self._side.fileno() in act) |
|
94 | 94 | |
|
95 | 95 | def write(self, data): |
|
96 | 96 | return self._call(b'write', data) |
|
97 | 97 | |
|
98 | 98 | def read(self, size): |
|
99 | 99 | r = self._call(b'read', size) |
|
100 | 100 | if size != 0 and not r: |
|
101 | 101 | # We've observed a condition that indicates the |
|
102 | 102 | # stdout closed unexpectedly. Check stderr one |
|
103 | 103 | # more time and snag anything that's there before |
|
104 | 104 | # letting anyone know the main part of the pipe |
|
105 | 105 | # closed prematurely. |
|
106 | 106 | _forwardoutput(self._ui, self._side) |
|
107 | 107 | return r |
|
108 | 108 | |
|
109 | 109 | def unbufferedread(self, size): |
|
110 | 110 | r = self._call(b'unbufferedread', size) |
|
111 | 111 | if size != 0 and not r: |
|
112 | 112 | # We've observed a condition that indicates the |
|
113 | 113 | # stdout closed unexpectedly. Check stderr one |
|
114 | 114 | # more time and snag anything that's there before |
|
115 | 115 | # letting anyone know the main part of the pipe |
|
116 | 116 | # closed prematurely. |
|
117 | 117 | _forwardoutput(self._ui, self._side) |
|
118 | 118 | return r |
|
119 | 119 | |
|
120 | 120 | def readline(self): |
|
121 | 121 | return self._call(b'readline') |
|
122 | 122 | |
|
123 | 123 | def _call(self, methname, data=None): |
|
124 | 124 | """call <methname> on "main", forward output of "side" while blocking |
|
125 | 125 | """ |
|
126 | 126 | # data can be '' or 0 |
|
127 | 127 | if (data is not None and not data) or self._main.closed: |
|
128 | 128 | _forwardoutput(self._ui, self._side) |
|
129 | 129 | return b'' |
|
130 | 130 | while True: |
|
131 | 131 | mainready, sideready = self._wait() |
|
132 | 132 | if sideready: |
|
133 | 133 | _forwardoutput(self._ui, self._side) |
|
134 | 134 | if mainready: |
|
135 | 135 | meth = getattr(self._main, methname) |
|
136 | 136 | if data is None: |
|
137 | 137 | return meth() |
|
138 | 138 | else: |
|
139 | 139 | return meth(data) |
|
140 | 140 | |
|
141 | 141 | def close(self): |
|
142 | 142 | return self._main.close() |
|
143 | 143 | |
|
144 | 144 | def flush(self): |
|
145 | 145 | return self._main.flush() |
|
146 | 146 | |
|
147 | 147 | |
|
148 | 148 | def _cleanuppipes(ui, pipei, pipeo, pipee): |
|
149 | 149 | """Clean up pipes used by an SSH connection.""" |
|
150 | 150 | if pipeo: |
|
151 | 151 | pipeo.close() |
|
152 | 152 | if pipei: |
|
153 | 153 | pipei.close() |
|
154 | 154 | |
|
155 | 155 | if pipee: |
|
156 | 156 | # Try to read from the err descriptor until EOF. |
|
157 | 157 | try: |
|
158 | 158 | for l in pipee: |
|
159 | 159 | ui.status(_(b'remote: '), l) |
|
160 | 160 | except (IOError, ValueError): |
|
161 | 161 | pass |
|
162 | 162 | |
|
163 | 163 | pipee.close() |
|
164 | 164 | |
|
165 | 165 | |
|
166 | 166 | def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None): |
|
167 | 167 | """Create an SSH connection to a server. |
|
168 | 168 | |
|
169 | 169 | Returns a tuple of (process, stdin, stdout, stderr) for the |
|
170 | 170 | spawned process. |
|
171 | 171 | """ |
|
172 | 172 | cmd = b'%s %s %s' % ( |
|
173 | 173 | sshcmd, |
|
174 | 174 | args, |
|
175 | 175 | procutil.shellquote( |
|
176 | 176 | b'%s -R %s serve --stdio' |
|
177 | 177 | % (_serverquote(remotecmd), _serverquote(path)) |
|
178 | 178 | ), |
|
179 | 179 | ) |
|
180 | 180 | |
|
181 | 181 | ui.debug(b'running %s\n' % cmd) |
|
182 | cmd = procutil.quotecommand(cmd) | |
|
183 | 182 | |
|
184 | 183 | # no buffer allow the use of 'select' |
|
185 | 184 | # feel free to remove buffering and select usage when we ultimately |
|
186 | 185 | # move to threading. |
|
187 | 186 | stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv) |
|
188 | 187 | |
|
189 | 188 | return proc, stdin, stdout, stderr |
|
190 | 189 | |
|
191 | 190 | |
|
192 | 191 | def _clientcapabilities(): |
|
193 | 192 | """Return list of capabilities of this client. |
|
194 | 193 | |
|
195 | 194 | Returns a list of capabilities that are supported by this client. |
|
196 | 195 | """ |
|
197 | 196 | protoparams = {b'partial-pull'} |
|
198 | 197 | comps = [ |
|
199 | 198 | e.wireprotosupport().name |
|
200 | 199 | for e in util.compengines.supportedwireengines(util.CLIENTROLE) |
|
201 | 200 | ] |
|
202 | 201 | protoparams.add(b'comp=%s' % b','.join(comps)) |
|
203 | 202 | return protoparams |
|
204 | 203 | |
|
205 | 204 | |
|
206 | 205 | def _performhandshake(ui, stdin, stdout, stderr): |
|
207 | 206 | def badresponse(): |
|
208 | 207 | # Flush any output on stderr. In general, the stderr contains errors |
|
209 | 208 | # from the remote (ssh errors, some hg errors), and status indications |
|
210 | 209 | # (like "adding changes"), with no current way to tell them apart. |
|
211 | 210 | # Here we failed so early that it's almost certainly only errors, so |
|
212 | 211 | # use warn=True so -q doesn't hide them. |
|
213 | 212 | _forwardoutput(ui, stderr, warn=True) |
|
214 | 213 | |
|
215 | 214 | msg = _(b'no suitable response from remote hg') |
|
216 | 215 | hint = ui.config(b'ui', b'ssherrorhint') |
|
217 | 216 | raise error.RepoError(msg, hint=hint) |
|
218 | 217 | |
|
219 | 218 | # The handshake consists of sending wire protocol commands in reverse |
|
220 | 219 | # order of protocol implementation and then sniffing for a response |
|
221 | 220 | # to one of them. |
|
222 | 221 | # |
|
223 | 222 | # Those commands (from oldest to newest) are: |
|
224 | 223 | # |
|
225 | 224 | # ``between`` |
|
226 | 225 | # Asks for the set of revisions between a pair of revisions. Command |
|
227 | 226 | # present in all Mercurial server implementations. |
|
228 | 227 | # |
|
229 | 228 | # ``hello`` |
|
230 | 229 | # Instructs the server to advertise its capabilities. Introduced in |
|
231 | 230 | # Mercurial 0.9.1. |
|
232 | 231 | # |
|
233 | 232 | # ``upgrade`` |
|
234 | 233 | # Requests upgrade from default transport protocol version 1 to |
|
235 | 234 | # a newer version. Introduced in Mercurial 4.6 as an experimental |
|
236 | 235 | # feature. |
|
237 | 236 | # |
|
238 | 237 | # The ``between`` command is issued with a request for the null |
|
239 | 238 | # range. If the remote is a Mercurial server, this request will |
|
240 | 239 | # generate a specific response: ``1\n\n``. This represents the |
|
241 | 240 | # wire protocol encoded value for ``\n``. We look for ``1\n\n`` |
|
242 | 241 | # in the output stream and know this is the response to ``between`` |
|
243 | 242 | # and we're at the end of our handshake reply. |
|
244 | 243 | # |
|
245 | 244 | # The response to the ``hello`` command will be a line with the |
|
246 | 245 | # length of the value returned by that command followed by that |
|
247 | 246 | # value. If the server doesn't support ``hello`` (which should be |
|
248 | 247 | # rare), that line will be ``0\n``. Otherwise, the value will contain |
|
249 | 248 | # RFC 822 like lines. Of these, the ``capabilities:`` line contains |
|
250 | 249 | # the capabilities of the server. |
|
251 | 250 | # |
|
252 | 251 | # The ``upgrade`` command isn't really a command in the traditional |
|
253 | 252 | # sense of version 1 of the transport because it isn't using the |
|
254 | 253 | # proper mechanism for formatting insteads: instead, it just encodes |
|
255 | 254 | # arguments on the line, delimited by spaces. |
|
256 | 255 | # |
|
257 | 256 | # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``. |
|
258 | 257 | # If the server doesn't support protocol upgrades, it will reply to |
|
259 | 258 | # this line with ``0\n``. Otherwise, it emits an |
|
260 | 259 | # ``upgraded <token> <protocol>`` line to both stdout and stderr. |
|
261 | 260 | # Content immediately following this line describes additional |
|
262 | 261 | # protocol and server state. |
|
263 | 262 | # |
|
264 | 263 | # In addition to the responses to our command requests, the server |
|
265 | 264 | # may emit "banner" output on stdout. SSH servers are allowed to |
|
266 | 265 | # print messages to stdout on login. Issuing commands on connection |
|
267 | 266 | # allows us to flush this banner output from the server by scanning |
|
268 | 267 | # for output to our well-known ``between`` command. Of course, if |
|
269 | 268 | # the banner contains ``1\n\n``, this will throw off our detection. |
|
270 | 269 | |
|
271 | 270 | requestlog = ui.configbool(b'devel', b'debug.peer-request') |
|
272 | 271 | |
|
273 | 272 | # Generate a random token to help identify responses to version 2 |
|
274 | 273 | # upgrade request. |
|
275 | 274 | token = pycompat.sysbytes(str(uuid.uuid4())) |
|
276 | 275 | upgradecaps = [ |
|
277 | 276 | (b'proto', wireprotoserver.SSHV2), |
|
278 | 277 | ] |
|
279 | 278 | upgradecaps = util.urlreq.urlencode(upgradecaps) |
|
280 | 279 | |
|
281 | 280 | try: |
|
282 | 281 | pairsarg = b'%s-%s' % (b'0' * 40, b'0' * 40) |
|
283 | 282 | handshake = [ |
|
284 | 283 | b'hello\n', |
|
285 | 284 | b'between\n', |
|
286 | 285 | b'pairs %d\n' % len(pairsarg), |
|
287 | 286 | pairsarg, |
|
288 | 287 | ] |
|
289 | 288 | |
|
290 | 289 | # Request upgrade to version 2 if configured. |
|
291 | 290 | if ui.configbool(b'experimental', b'sshpeer.advertise-v2'): |
|
292 | 291 | ui.debug(b'sending upgrade request: %s %s\n' % (token, upgradecaps)) |
|
293 | 292 | handshake.insert(0, b'upgrade %s %s\n' % (token, upgradecaps)) |
|
294 | 293 | |
|
295 | 294 | if requestlog: |
|
296 | 295 | ui.debug(b'devel-peer-request: hello+between\n') |
|
297 | 296 | ui.debug(b'devel-peer-request: pairs: %d bytes\n' % len(pairsarg)) |
|
298 | 297 | ui.debug(b'sending hello command\n') |
|
299 | 298 | ui.debug(b'sending between command\n') |
|
300 | 299 | |
|
301 | 300 | stdin.write(b''.join(handshake)) |
|
302 | 301 | stdin.flush() |
|
303 | 302 | except IOError: |
|
304 | 303 | badresponse() |
|
305 | 304 | |
|
306 | 305 | # Assume version 1 of wire protocol by default. |
|
307 | 306 | protoname = wireprototypes.SSHV1 |
|
308 | 307 | reupgraded = re.compile(b'^upgraded %s (.*)$' % stringutil.reescape(token)) |
|
309 | 308 | |
|
310 | 309 | lines = [b'', b'dummy'] |
|
311 | 310 | max_noise = 500 |
|
312 | 311 | while lines[-1] and max_noise: |
|
313 | 312 | try: |
|
314 | 313 | l = stdout.readline() |
|
315 | 314 | _forwardoutput(ui, stderr, warn=True) |
|
316 | 315 | |
|
317 | 316 | # Look for reply to protocol upgrade request. It has a token |
|
318 | 317 | # in it, so there should be no false positives. |
|
319 | 318 | m = reupgraded.match(l) |
|
320 | 319 | if m: |
|
321 | 320 | protoname = m.group(1) |
|
322 | 321 | ui.debug(b'protocol upgraded to %s\n' % protoname) |
|
323 | 322 | # If an upgrade was handled, the ``hello`` and ``between`` |
|
324 | 323 | # requests are ignored. The next output belongs to the |
|
325 | 324 | # protocol, so stop scanning lines. |
|
326 | 325 | break |
|
327 | 326 | |
|
328 | 327 | # Otherwise it could be a banner, ``0\n`` response if server |
|
329 | 328 | # doesn't support upgrade. |
|
330 | 329 | |
|
331 | 330 | if lines[-1] == b'1\n' and l == b'\n': |
|
332 | 331 | break |
|
333 | 332 | if l: |
|
334 | 333 | ui.debug(b'remote: ', l) |
|
335 | 334 | lines.append(l) |
|
336 | 335 | max_noise -= 1 |
|
337 | 336 | except IOError: |
|
338 | 337 | badresponse() |
|
339 | 338 | else: |
|
340 | 339 | badresponse() |
|
341 | 340 | |
|
342 | 341 | caps = set() |
|
343 | 342 | |
|
344 | 343 | # For version 1, we should see a ``capabilities`` line in response to the |
|
345 | 344 | # ``hello`` command. |
|
346 | 345 | if protoname == wireprototypes.SSHV1: |
|
347 | 346 | for l in reversed(lines): |
|
348 | 347 | # Look for response to ``hello`` command. Scan from the back so |
|
349 | 348 | # we don't misinterpret banner output as the command reply. |
|
350 | 349 | if l.startswith(b'capabilities:'): |
|
351 | 350 | caps.update(l[:-1].split(b':')[1].split()) |
|
352 | 351 | break |
|
353 | 352 | elif protoname == wireprotoserver.SSHV2: |
|
354 | 353 | # We see a line with number of bytes to follow and then a value |
|
355 | 354 | # looking like ``capabilities: *``. |
|
356 | 355 | line = stdout.readline() |
|
357 | 356 | try: |
|
358 | 357 | valuelen = int(line) |
|
359 | 358 | except ValueError: |
|
360 | 359 | badresponse() |
|
361 | 360 | |
|
362 | 361 | capsline = stdout.read(valuelen) |
|
363 | 362 | if not capsline.startswith(b'capabilities: '): |
|
364 | 363 | badresponse() |
|
365 | 364 | |
|
366 | 365 | ui.debug(b'remote: %s\n' % capsline) |
|
367 | 366 | |
|
368 | 367 | caps.update(capsline.split(b':')[1].split()) |
|
369 | 368 | # Trailing newline. |
|
370 | 369 | stdout.read(1) |
|
371 | 370 | |
|
372 | 371 | # Error if we couldn't find capabilities, this means: |
|
373 | 372 | # |
|
374 | 373 | # 1. Remote isn't a Mercurial server |
|
375 | 374 | # 2. Remote is a <0.9.1 Mercurial server |
|
376 | 375 | # 3. Remote is a future Mercurial server that dropped ``hello`` |
|
377 | 376 | # and other attempted handshake mechanisms. |
|
378 | 377 | if not caps: |
|
379 | 378 | badresponse() |
|
380 | 379 | |
|
381 | 380 | # Flush any output on stderr before proceeding. |
|
382 | 381 | _forwardoutput(ui, stderr, warn=True) |
|
383 | 382 | |
|
384 | 383 | return protoname, caps |
|
385 | 384 | |
|
386 | 385 | |
|
387 | 386 | class sshv1peer(wireprotov1peer.wirepeer): |
|
388 | 387 | def __init__( |
|
389 | 388 | self, ui, url, proc, stdin, stdout, stderr, caps, autoreadstderr=True |
|
390 | 389 | ): |
|
391 | 390 | """Create a peer from an existing SSH connection. |
|
392 | 391 | |
|
393 | 392 | ``proc`` is a handle on the underlying SSH process. |
|
394 | 393 | ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio |
|
395 | 394 | pipes for that process. |
|
396 | 395 | ``caps`` is a set of capabilities supported by the remote. |
|
397 | 396 | ``autoreadstderr`` denotes whether to automatically read from |
|
398 | 397 | stderr and to forward its output. |
|
399 | 398 | """ |
|
400 | 399 | self._url = url |
|
401 | 400 | self.ui = ui |
|
402 | 401 | # self._subprocess is unused. Keeping a handle on the process |
|
403 | 402 | # holds a reference and prevents it from being garbage collected. |
|
404 | 403 | self._subprocess = proc |
|
405 | 404 | |
|
406 | 405 | # And we hook up our "doublepipe" wrapper to allow querying |
|
407 | 406 | # stderr any time we perform I/O. |
|
408 | 407 | if autoreadstderr: |
|
409 | 408 | stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr) |
|
410 | 409 | stdin = doublepipe(ui, stdin, stderr) |
|
411 | 410 | |
|
412 | 411 | self._pipeo = stdin |
|
413 | 412 | self._pipei = stdout |
|
414 | 413 | self._pipee = stderr |
|
415 | 414 | self._caps = caps |
|
416 | 415 | self._autoreadstderr = autoreadstderr |
|
417 | 416 | |
|
418 | 417 | # Commands that have a "framed" response where the first line of the |
|
419 | 418 | # response contains the length of that response. |
|
420 | 419 | _FRAMED_COMMANDS = { |
|
421 | 420 | b'batch', |
|
422 | 421 | } |
|
423 | 422 | |
|
424 | 423 | # Begin of ipeerconnection interface. |
|
425 | 424 | |
|
426 | 425 | def url(self): |
|
427 | 426 | return self._url |
|
428 | 427 | |
|
429 | 428 | def local(self): |
|
430 | 429 | return None |
|
431 | 430 | |
|
432 | 431 | def peer(self): |
|
433 | 432 | return self |
|
434 | 433 | |
|
435 | 434 | def canpush(self): |
|
436 | 435 | return True |
|
437 | 436 | |
|
438 | 437 | def close(self): |
|
439 | 438 | pass |
|
440 | 439 | |
|
441 | 440 | # End of ipeerconnection interface. |
|
442 | 441 | |
|
443 | 442 | # Begin of ipeercommands interface. |
|
444 | 443 | |
|
445 | 444 | def capabilities(self): |
|
446 | 445 | return self._caps |
|
447 | 446 | |
|
448 | 447 | # End of ipeercommands interface. |
|
449 | 448 | |
|
450 | 449 | def _readerr(self): |
|
451 | 450 | _forwardoutput(self.ui, self._pipee) |
|
452 | 451 | |
|
453 | 452 | def _abort(self, exception): |
|
454 | 453 | self._cleanup() |
|
455 | 454 | raise exception |
|
456 | 455 | |
|
457 | 456 | def _cleanup(self): |
|
458 | 457 | _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee) |
|
459 | 458 | |
|
460 | 459 | __del__ = _cleanup |
|
461 | 460 | |
|
462 | 461 | def _sendrequest(self, cmd, args, framed=False): |
|
463 | 462 | if self.ui.debugflag and self.ui.configbool( |
|
464 | 463 | b'devel', b'debug.peer-request' |
|
465 | 464 | ): |
|
466 | 465 | dbg = self.ui.debug |
|
467 | 466 | line = b'devel-peer-request: %s\n' |
|
468 | 467 | dbg(line % cmd) |
|
469 | 468 | for key, value in sorted(args.items()): |
|
470 | 469 | if not isinstance(value, dict): |
|
471 | 470 | dbg(line % b' %s: %d bytes' % (key, len(value))) |
|
472 | 471 | else: |
|
473 | 472 | for dk, dv in sorted(value.items()): |
|
474 | 473 | dbg(line % b' %s-%s: %d' % (key, dk, len(dv))) |
|
475 | 474 | self.ui.debug(b"sending %s command\n" % cmd) |
|
476 | 475 | self._pipeo.write(b"%s\n" % cmd) |
|
477 | 476 | _func, names = wireprotov1server.commands[cmd] |
|
478 | 477 | keys = names.split() |
|
479 | 478 | wireargs = {} |
|
480 | 479 | for k in keys: |
|
481 | 480 | if k == b'*': |
|
482 | 481 | wireargs[b'*'] = args |
|
483 | 482 | break |
|
484 | 483 | else: |
|
485 | 484 | wireargs[k] = args[k] |
|
486 | 485 | del args[k] |
|
487 | 486 | for k, v in sorted(pycompat.iteritems(wireargs)): |
|
488 | 487 | self._pipeo.write(b"%s %d\n" % (k, len(v))) |
|
489 | 488 | if isinstance(v, dict): |
|
490 | 489 | for dk, dv in pycompat.iteritems(v): |
|
491 | 490 | self._pipeo.write(b"%s %d\n" % (dk, len(dv))) |
|
492 | 491 | self._pipeo.write(dv) |
|
493 | 492 | else: |
|
494 | 493 | self._pipeo.write(v) |
|
495 | 494 | self._pipeo.flush() |
|
496 | 495 | |
|
497 | 496 | # We know exactly how many bytes are in the response. So return a proxy |
|
498 | 497 | # around the raw output stream that allows reading exactly this many |
|
499 | 498 | # bytes. Callers then can read() without fear of overrunning the |
|
500 | 499 | # response. |
|
501 | 500 | if framed: |
|
502 | 501 | amount = self._getamount() |
|
503 | 502 | return util.cappedreader(self._pipei, amount) |
|
504 | 503 | |
|
505 | 504 | return self._pipei |
|
506 | 505 | |
|
507 | 506 | def _callstream(self, cmd, **args): |
|
508 | 507 | args = pycompat.byteskwargs(args) |
|
509 | 508 | return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS) |
|
510 | 509 | |
|
511 | 510 | def _callcompressable(self, cmd, **args): |
|
512 | 511 | args = pycompat.byteskwargs(args) |
|
513 | 512 | return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS) |
|
514 | 513 | |
|
515 | 514 | def _call(self, cmd, **args): |
|
516 | 515 | args = pycompat.byteskwargs(args) |
|
517 | 516 | return self._sendrequest(cmd, args, framed=True).read() |
|
518 | 517 | |
|
519 | 518 | def _callpush(self, cmd, fp, **args): |
|
520 | 519 | # The server responds with an empty frame if the client should |
|
521 | 520 | # continue submitting the payload. |
|
522 | 521 | r = self._call(cmd, **args) |
|
523 | 522 | if r: |
|
524 | 523 | return b'', r |
|
525 | 524 | |
|
526 | 525 | # The payload consists of frames with content followed by an empty |
|
527 | 526 | # frame. |
|
528 | 527 | for d in iter(lambda: fp.read(4096), b''): |
|
529 | 528 | self._writeframed(d) |
|
530 | 529 | self._writeframed(b"", flush=True) |
|
531 | 530 | |
|
532 | 531 | # In case of success, there is an empty frame and a frame containing |
|
533 | 532 | # the integer result (as a string). |
|
534 | 533 | # In case of error, there is a non-empty frame containing the error. |
|
535 | 534 | r = self._readframed() |
|
536 | 535 | if r: |
|
537 | 536 | return b'', r |
|
538 | 537 | return self._readframed(), b'' |
|
539 | 538 | |
|
540 | 539 | def _calltwowaystream(self, cmd, fp, **args): |
|
541 | 540 | # The server responds with an empty frame if the client should |
|
542 | 541 | # continue submitting the payload. |
|
543 | 542 | r = self._call(cmd, **args) |
|
544 | 543 | if r: |
|
545 | 544 | # XXX needs to be made better |
|
546 | 545 | raise error.Abort(_(b'unexpected remote reply: %s') % r) |
|
547 | 546 | |
|
548 | 547 | # The payload consists of frames with content followed by an empty |
|
549 | 548 | # frame. |
|
550 | 549 | for d in iter(lambda: fp.read(4096), b''): |
|
551 | 550 | self._writeframed(d) |
|
552 | 551 | self._writeframed(b"", flush=True) |
|
553 | 552 | |
|
554 | 553 | return self._pipei |
|
555 | 554 | |
|
556 | 555 | def _getamount(self): |
|
557 | 556 | l = self._pipei.readline() |
|
558 | 557 | if l == b'\n': |
|
559 | 558 | if self._autoreadstderr: |
|
560 | 559 | self._readerr() |
|
561 | 560 | msg = _(b'check previous remote output') |
|
562 | 561 | self._abort(error.OutOfBandError(hint=msg)) |
|
563 | 562 | if self._autoreadstderr: |
|
564 | 563 | self._readerr() |
|
565 | 564 | try: |
|
566 | 565 | return int(l) |
|
567 | 566 | except ValueError: |
|
568 | 567 | self._abort(error.ResponseError(_(b"unexpected response:"), l)) |
|
569 | 568 | |
|
570 | 569 | def _readframed(self): |
|
571 | 570 | size = self._getamount() |
|
572 | 571 | if not size: |
|
573 | 572 | return b'' |
|
574 | 573 | |
|
575 | 574 | return self._pipei.read(size) |
|
576 | 575 | |
|
577 | 576 | def _writeframed(self, data, flush=False): |
|
578 | 577 | self._pipeo.write(b"%d\n" % len(data)) |
|
579 | 578 | if data: |
|
580 | 579 | self._pipeo.write(data) |
|
581 | 580 | if flush: |
|
582 | 581 | self._pipeo.flush() |
|
583 | 582 | if self._autoreadstderr: |
|
584 | 583 | self._readerr() |
|
585 | 584 | |
|
586 | 585 | |
|
587 | 586 | class sshv2peer(sshv1peer): |
|
588 | 587 | """A peer that speakers version 2 of the transport protocol.""" |
|
589 | 588 | |
|
590 | 589 | # Currently version 2 is identical to version 1 post handshake. |
|
591 | 590 | # And handshake is performed before the peer is instantiated. So |
|
592 | 591 | # we need no custom code. |
|
593 | 592 | |
|
594 | 593 | |
|
595 | 594 | def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True): |
|
596 | 595 | """Make a peer instance from existing pipes. |
|
597 | 596 | |
|
598 | 597 | ``path`` and ``proc`` are stored on the eventual peer instance and may |
|
599 | 598 | not be used for anything meaningful. |
|
600 | 599 | |
|
601 | 600 | ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the |
|
602 | 601 | SSH server's stdio handles. |
|
603 | 602 | |
|
604 | 603 | This function is factored out to allow creating peers that don't |
|
605 | 604 | actually spawn a new process. It is useful for starting SSH protocol |
|
606 | 605 | servers and clients via non-standard means, which can be useful for |
|
607 | 606 | testing. |
|
608 | 607 | """ |
|
609 | 608 | try: |
|
610 | 609 | protoname, caps = _performhandshake(ui, stdin, stdout, stderr) |
|
611 | 610 | except Exception: |
|
612 | 611 | _cleanuppipes(ui, stdout, stdin, stderr) |
|
613 | 612 | raise |
|
614 | 613 | |
|
615 | 614 | if protoname == wireprototypes.SSHV1: |
|
616 | 615 | return sshv1peer( |
|
617 | 616 | ui, |
|
618 | 617 | path, |
|
619 | 618 | proc, |
|
620 | 619 | stdin, |
|
621 | 620 | stdout, |
|
622 | 621 | stderr, |
|
623 | 622 | caps, |
|
624 | 623 | autoreadstderr=autoreadstderr, |
|
625 | 624 | ) |
|
626 | 625 | elif protoname == wireprototypes.SSHV2: |
|
627 | 626 | return sshv2peer( |
|
628 | 627 | ui, |
|
629 | 628 | path, |
|
630 | 629 | proc, |
|
631 | 630 | stdin, |
|
632 | 631 | stdout, |
|
633 | 632 | stderr, |
|
634 | 633 | caps, |
|
635 | 634 | autoreadstderr=autoreadstderr, |
|
636 | 635 | ) |
|
637 | 636 | else: |
|
638 | 637 | _cleanuppipes(ui, stdout, stdin, stderr) |
|
639 | 638 | raise error.RepoError( |
|
640 | 639 | _(b'unknown version of SSH protocol: %s') % protoname |
|
641 | 640 | ) |
|
642 | 641 | |
|
643 | 642 | |
|
644 | 643 | def instance(ui, path, create, intents=None, createopts=None): |
|
645 | 644 | """Create an SSH peer. |
|
646 | 645 | |
|
647 | 646 | The returned object conforms to the ``wireprotov1peer.wirepeer`` interface. |
|
648 | 647 | """ |
|
649 | 648 | u = util.url(path, parsequery=False, parsefragment=False) |
|
650 | 649 | if u.scheme != b'ssh' or not u.host or u.path is None: |
|
651 | 650 | raise error.RepoError(_(b"couldn't parse location %s") % path) |
|
652 | 651 | |
|
653 | 652 | util.checksafessh(path) |
|
654 | 653 | |
|
655 | 654 | if u.passwd is not None: |
|
656 | 655 | raise error.RepoError(_(b'password in URL not supported')) |
|
657 | 656 | |
|
658 | 657 | sshcmd = ui.config(b'ui', b'ssh') |
|
659 | 658 | remotecmd = ui.config(b'ui', b'remotecmd') |
|
660 | 659 | sshaddenv = dict(ui.configitems(b'sshenv')) |
|
661 | 660 | sshenv = procutil.shellenviron(sshaddenv) |
|
662 | 661 | remotepath = u.path or b'.' |
|
663 | 662 | |
|
664 | 663 | args = procutil.sshargs(sshcmd, u.host, u.user, u.port) |
|
665 | 664 | |
|
666 | 665 | if create: |
|
667 | 666 | # We /could/ do this, but only if the remote init command knows how to |
|
668 | 667 | # handle them. We don't yet make any assumptions about that. And without |
|
669 | 668 | # querying the remote, there's no way of knowing if the remote even |
|
670 | 669 | # supports said requested feature. |
|
671 | 670 | if createopts: |
|
672 | 671 | raise error.RepoError( |
|
673 | 672 | _( |
|
674 | 673 | b'cannot create remote SSH repositories ' |
|
675 | 674 | b'with extra options' |
|
676 | 675 | ) |
|
677 | 676 | ) |
|
678 | 677 | |
|
679 | 678 | cmd = b'%s %s %s' % ( |
|
680 | 679 | sshcmd, |
|
681 | 680 | args, |
|
682 | 681 | procutil.shellquote( |
|
683 | 682 | b'%s init %s' |
|
684 | 683 | % (_serverquote(remotecmd), _serverquote(remotepath)) |
|
685 | 684 | ), |
|
686 | 685 | ) |
|
687 | 686 | ui.debug(b'running %s\n' % cmd) |
|
688 | 687 | res = ui.system(cmd, blockedtag=b'sshpeer', environ=sshenv) |
|
689 | 688 | if res != 0: |
|
690 | 689 | raise error.RepoError(_(b'could not create remote repo')) |
|
691 | 690 | |
|
692 | 691 | proc, stdin, stdout, stderr = _makeconnection( |
|
693 | 692 | ui, sshcmd, args, remotecmd, remotepath, sshenv |
|
694 | 693 | ) |
|
695 | 694 | |
|
696 | 695 | peer = makepeer(ui, path, proc, stdin, stdout, stderr) |
|
697 | 696 | |
|
698 | 697 | # Finally, if supported by the server, notify it about our own |
|
699 | 698 | # capabilities. |
|
700 | 699 | if b'protocaps' in peer.capabilities(): |
|
701 | 700 | try: |
|
702 | 701 | peer._call( |
|
703 | 702 | b"protocaps", caps=b' '.join(sorted(_clientcapabilities())) |
|
704 | 703 | ) |
|
705 | 704 | except IOError: |
|
706 | 705 | peer._cleanup() |
|
707 | 706 | raise error.RepoError(_(b'capability exchange failed')) |
|
708 | 707 | |
|
709 | 708 | return peer |
@@ -1,679 +1,677 b'' | |||
|
1 | 1 | # procutil.py - utility for managing processes and executable environment |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005 K. Thananchayan <thananck@yahoo.com> |
|
4 | 4 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
5 | 5 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
6 | 6 | # |
|
7 | 7 | # This software may be used and distributed according to the terms of the |
|
8 | 8 | # GNU General Public License version 2 or any later version. |
|
9 | 9 | |
|
10 | 10 | from __future__ import absolute_import |
|
11 | 11 | |
|
12 | 12 | import contextlib |
|
13 | 13 | import errno |
|
14 | 14 | import io |
|
15 | 15 | import os |
|
16 | 16 | import signal |
|
17 | 17 | import subprocess |
|
18 | 18 | import sys |
|
19 | 19 | import threading |
|
20 | 20 | import time |
|
21 | 21 | |
|
22 | 22 | from ..i18n import _ |
|
23 | 23 | from ..pycompat import ( |
|
24 | 24 | getattr, |
|
25 | 25 | open, |
|
26 | 26 | ) |
|
27 | 27 | |
|
28 | 28 | from .. import ( |
|
29 | 29 | encoding, |
|
30 | 30 | error, |
|
31 | 31 | policy, |
|
32 | 32 | pycompat, |
|
33 | 33 | ) |
|
34 | 34 | |
|
35 | 35 | # Import like this to keep import-checker happy |
|
36 | 36 | from ..utils import resourceutil |
|
37 | 37 | |
|
38 | 38 | osutil = policy.importmod('osutil') |
|
39 | 39 | |
|
40 | 40 | stderr = pycompat.stderr |
|
41 | 41 | stdin = pycompat.stdin |
|
42 | 42 | stdout = pycompat.stdout |
|
43 | 43 | |
|
44 | 44 | |
|
45 | 45 | def isatty(fp): |
|
46 | 46 | try: |
|
47 | 47 | return fp.isatty() |
|
48 | 48 | except AttributeError: |
|
49 | 49 | return False |
|
50 | 50 | |
|
51 | 51 | |
|
52 | 52 | # glibc determines buffering on first write to stdout - if we replace a TTY |
|
53 | 53 | # destined stdout with a pipe destined stdout (e.g. pager), we want line |
|
54 | 54 | # buffering (or unbuffered, on Windows) |
|
55 | 55 | if isatty(stdout): |
|
56 | 56 | if pycompat.iswindows: |
|
57 | 57 | # Windows doesn't support line buffering |
|
58 | 58 | stdout = os.fdopen(stdout.fileno(), 'wb', 0) |
|
59 | 59 | elif not pycompat.ispy3: |
|
60 | 60 | # on Python 3, stdout (sys.stdout.buffer) is already line buffered and |
|
61 | 61 | # buffering=1 is not handled in binary mode |
|
62 | 62 | stdout = os.fdopen(stdout.fileno(), 'wb', 1) |
|
63 | 63 | |
|
64 | 64 | if pycompat.iswindows: |
|
65 | 65 | from .. import windows as platform |
|
66 | 66 | |
|
67 | 67 | stdout = platform.winstdout(stdout) |
|
68 | 68 | else: |
|
69 | 69 | from .. import posix as platform |
|
70 | 70 | |
|
71 | 71 | findexe = platform.findexe |
|
72 | 72 | _gethgcmd = platform.gethgcmd |
|
73 | 73 | getuser = platform.getuser |
|
74 | 74 | getpid = os.getpid |
|
75 | 75 | hidewindow = platform.hidewindow |
|
76 | quotecommand = platform.quotecommand | |
|
77 | 76 | readpipe = platform.readpipe |
|
78 | 77 | setbinary = platform.setbinary |
|
79 | 78 | setsignalhandler = platform.setsignalhandler |
|
80 | 79 | shellquote = platform.shellquote |
|
81 | 80 | shellsplit = platform.shellsplit |
|
82 | 81 | spawndetached = platform.spawndetached |
|
83 | 82 | sshargs = platform.sshargs |
|
84 | 83 | testpid = platform.testpid |
|
85 | 84 | |
|
86 | 85 | try: |
|
87 | 86 | setprocname = osutil.setprocname |
|
88 | 87 | except AttributeError: |
|
89 | 88 | pass |
|
90 | 89 | try: |
|
91 | 90 | unblocksignal = osutil.unblocksignal |
|
92 | 91 | except AttributeError: |
|
93 | 92 | pass |
|
94 | 93 | |
|
95 | 94 | closefds = pycompat.isposix |
|
96 | 95 | |
|
97 | 96 | |
|
98 | 97 | def explainexit(code): |
|
99 | 98 | """return a message describing a subprocess status |
|
100 | 99 | (codes from kill are negative - not os.system/wait encoding)""" |
|
101 | 100 | if code >= 0: |
|
102 | 101 | return _(b"exited with status %d") % code |
|
103 | 102 | return _(b"killed by signal %d") % -code |
|
104 | 103 | |
|
105 | 104 | |
|
106 | 105 | class _pfile(object): |
|
107 | 106 | """File-like wrapper for a stream opened by subprocess.Popen()""" |
|
108 | 107 | |
|
109 | 108 | def __init__(self, proc, fp): |
|
110 | 109 | self._proc = proc |
|
111 | 110 | self._fp = fp |
|
112 | 111 | |
|
113 | 112 | def close(self): |
|
114 | 113 | # unlike os.popen(), this returns an integer in subprocess coding |
|
115 | 114 | self._fp.close() |
|
116 | 115 | return self._proc.wait() |
|
117 | 116 | |
|
118 | 117 | def __iter__(self): |
|
119 | 118 | return iter(self._fp) |
|
120 | 119 | |
|
121 | 120 | def __getattr__(self, attr): |
|
122 | 121 | return getattr(self._fp, attr) |
|
123 | 122 | |
|
124 | 123 | def __enter__(self): |
|
125 | 124 | return self |
|
126 | 125 | |
|
127 | 126 | def __exit__(self, exc_type, exc_value, exc_tb): |
|
128 | 127 | self.close() |
|
129 | 128 | |
|
130 | 129 | |
|
131 | 130 | def popen(cmd, mode=b'rb', bufsize=-1): |
|
132 | 131 | if mode == b'rb': |
|
133 | 132 | return _popenreader(cmd, bufsize) |
|
134 | 133 | elif mode == b'wb': |
|
135 | 134 | return _popenwriter(cmd, bufsize) |
|
136 | 135 | raise error.ProgrammingError(b'unsupported mode: %r' % mode) |
|
137 | 136 | |
|
138 | 137 | |
|
139 | 138 | def _popenreader(cmd, bufsize): |
|
140 | 139 | p = subprocess.Popen( |
|
141 |
tonativestr( |
|
|
140 | tonativestr(cmd), | |
|
142 | 141 | shell=True, |
|
143 | 142 | bufsize=bufsize, |
|
144 | 143 | close_fds=closefds, |
|
145 | 144 | stdout=subprocess.PIPE, |
|
146 | 145 | ) |
|
147 | 146 | return _pfile(p, p.stdout) |
|
148 | 147 | |
|
149 | 148 | |
|
150 | 149 | def _popenwriter(cmd, bufsize): |
|
151 | 150 | p = subprocess.Popen( |
|
152 |
tonativestr( |
|
|
151 | tonativestr(cmd), | |
|
153 | 152 | shell=True, |
|
154 | 153 | bufsize=bufsize, |
|
155 | 154 | close_fds=closefds, |
|
156 | 155 | stdin=subprocess.PIPE, |
|
157 | 156 | ) |
|
158 | 157 | return _pfile(p, p.stdin) |
|
159 | 158 | |
|
160 | 159 | |
|
161 | 160 | def popen2(cmd, env=None): |
|
162 | 161 | # Setting bufsize to -1 lets the system decide the buffer size. |
|
163 | 162 | # The default for bufsize is 0, meaning unbuffered. This leads to |
|
164 | 163 | # poor performance on Mac OS X: http://bugs.python.org/issue4194 |
|
165 | 164 | p = subprocess.Popen( |
|
166 | 165 | tonativestr(cmd), |
|
167 | 166 | shell=True, |
|
168 | 167 | bufsize=-1, |
|
169 | 168 | close_fds=closefds, |
|
170 | 169 | stdin=subprocess.PIPE, |
|
171 | 170 | stdout=subprocess.PIPE, |
|
172 | 171 | env=tonativeenv(env), |
|
173 | 172 | ) |
|
174 | 173 | return p.stdin, p.stdout |
|
175 | 174 | |
|
176 | 175 | |
|
177 | 176 | def popen3(cmd, env=None): |
|
178 | 177 | stdin, stdout, stderr, p = popen4(cmd, env) |
|
179 | 178 | return stdin, stdout, stderr |
|
180 | 179 | |
|
181 | 180 | |
|
182 | 181 | def popen4(cmd, env=None, bufsize=-1): |
|
183 | 182 | p = subprocess.Popen( |
|
184 | 183 | tonativestr(cmd), |
|
185 | 184 | shell=True, |
|
186 | 185 | bufsize=bufsize, |
|
187 | 186 | close_fds=closefds, |
|
188 | 187 | stdin=subprocess.PIPE, |
|
189 | 188 | stdout=subprocess.PIPE, |
|
190 | 189 | stderr=subprocess.PIPE, |
|
191 | 190 | env=tonativeenv(env), |
|
192 | 191 | ) |
|
193 | 192 | return p.stdin, p.stdout, p.stderr, p |
|
194 | 193 | |
|
195 | 194 | |
|
196 | 195 | def pipefilter(s, cmd): |
|
197 | 196 | '''filter string S through command CMD, returning its output''' |
|
198 | 197 | p = subprocess.Popen( |
|
199 | 198 | tonativestr(cmd), |
|
200 | 199 | shell=True, |
|
201 | 200 | close_fds=closefds, |
|
202 | 201 | stdin=subprocess.PIPE, |
|
203 | 202 | stdout=subprocess.PIPE, |
|
204 | 203 | ) |
|
205 | 204 | pout, perr = p.communicate(s) |
|
206 | 205 | return pout |
|
207 | 206 | |
|
208 | 207 | |
|
209 | 208 | def tempfilter(s, cmd): |
|
210 | 209 | '''filter string S through a pair of temporary files with CMD. |
|
211 | 210 | CMD is used as a template to create the real command to be run, |
|
212 | 211 | with the strings INFILE and OUTFILE replaced by the real names of |
|
213 | 212 | the temporary files generated.''' |
|
214 | 213 | inname, outname = None, None |
|
215 | 214 | try: |
|
216 | 215 | infd, inname = pycompat.mkstemp(prefix=b'hg-filter-in-') |
|
217 | 216 | fp = os.fdopen(infd, 'wb') |
|
218 | 217 | fp.write(s) |
|
219 | 218 | fp.close() |
|
220 | 219 | outfd, outname = pycompat.mkstemp(prefix=b'hg-filter-out-') |
|
221 | 220 | os.close(outfd) |
|
222 | 221 | cmd = cmd.replace(b'INFILE', inname) |
|
223 | 222 | cmd = cmd.replace(b'OUTFILE', outname) |
|
224 | 223 | code = system(cmd) |
|
225 | 224 | if pycompat.sysplatform == b'OpenVMS' and code & 1: |
|
226 | 225 | code = 0 |
|
227 | 226 | if code: |
|
228 | 227 | raise error.Abort( |
|
229 | 228 | _(b"command '%s' failed: %s") % (cmd, explainexit(code)) |
|
230 | 229 | ) |
|
231 | 230 | with open(outname, b'rb') as fp: |
|
232 | 231 | return fp.read() |
|
233 | 232 | finally: |
|
234 | 233 | try: |
|
235 | 234 | if inname: |
|
236 | 235 | os.unlink(inname) |
|
237 | 236 | except OSError: |
|
238 | 237 | pass |
|
239 | 238 | try: |
|
240 | 239 | if outname: |
|
241 | 240 | os.unlink(outname) |
|
242 | 241 | except OSError: |
|
243 | 242 | pass |
|
244 | 243 | |
|
245 | 244 | |
|
246 | 245 | _filtertable = { |
|
247 | 246 | b'tempfile:': tempfilter, |
|
248 | 247 | b'pipe:': pipefilter, |
|
249 | 248 | } |
|
250 | 249 | |
|
251 | 250 | |
|
252 | 251 | def filter(s, cmd): |
|
253 | 252 | """filter a string through a command that transforms its input to its |
|
254 | 253 | output""" |
|
255 | 254 | for name, fn in pycompat.iteritems(_filtertable): |
|
256 | 255 | if cmd.startswith(name): |
|
257 | 256 | return fn(s, cmd[len(name) :].lstrip()) |
|
258 | 257 | return pipefilter(s, cmd) |
|
259 | 258 | |
|
260 | 259 | |
|
261 | 260 | _hgexecutable = None |
|
262 | 261 | |
|
263 | 262 | |
|
264 | 263 | def hgexecutable(): |
|
265 | 264 | """return location of the 'hg' executable. |
|
266 | 265 | |
|
267 | 266 | Defaults to $HG or 'hg' in the search path. |
|
268 | 267 | """ |
|
269 | 268 | if _hgexecutable is None: |
|
270 | 269 | hg = encoding.environ.get(b'HG') |
|
271 | 270 | mainmod = sys.modules['__main__'] |
|
272 | 271 | if hg: |
|
273 | 272 | _sethgexecutable(hg) |
|
274 | 273 | elif resourceutil.mainfrozen(): |
|
275 | 274 | if getattr(sys, 'frozen', None) == 'macosx_app': |
|
276 | 275 | # Env variable set by py2app |
|
277 | 276 | _sethgexecutable(encoding.environ[b'EXECUTABLEPATH']) |
|
278 | 277 | else: |
|
279 | 278 | _sethgexecutable(pycompat.sysexecutable) |
|
280 | 279 | elif ( |
|
281 | 280 | not pycompat.iswindows |
|
282 | 281 | and os.path.basename(getattr(mainmod, '__file__', '')) == 'hg' |
|
283 | 282 | ): |
|
284 | 283 | _sethgexecutable(pycompat.fsencode(mainmod.__file__)) |
|
285 | 284 | else: |
|
286 | 285 | _sethgexecutable( |
|
287 | 286 | findexe(b'hg') or os.path.basename(pycompat.sysargv[0]) |
|
288 | 287 | ) |
|
289 | 288 | return _hgexecutable |
|
290 | 289 | |
|
291 | 290 | |
|
292 | 291 | def _sethgexecutable(path): |
|
293 | 292 | """set location of the 'hg' executable""" |
|
294 | 293 | global _hgexecutable |
|
295 | 294 | _hgexecutable = path |
|
296 | 295 | |
|
297 | 296 | |
|
298 | 297 | def _testfileno(f, stdf): |
|
299 | 298 | fileno = getattr(f, 'fileno', None) |
|
300 | 299 | try: |
|
301 | 300 | return fileno and fileno() == stdf.fileno() |
|
302 | 301 | except io.UnsupportedOperation: |
|
303 | 302 | return False # fileno() raised UnsupportedOperation |
|
304 | 303 | |
|
305 | 304 | |
|
306 | 305 | def isstdin(f): |
|
307 | 306 | return _testfileno(f, sys.__stdin__) |
|
308 | 307 | |
|
309 | 308 | |
|
310 | 309 | def isstdout(f): |
|
311 | 310 | return _testfileno(f, sys.__stdout__) |
|
312 | 311 | |
|
313 | 312 | |
|
314 | 313 | def protectstdio(uin, uout): |
|
315 | 314 | """Duplicate streams and redirect original if (uin, uout) are stdio |
|
316 | 315 | |
|
317 | 316 | If uin is stdin, it's redirected to /dev/null. If uout is stdout, it's |
|
318 | 317 | redirected to stderr so the output is still readable. |
|
319 | 318 | |
|
320 | 319 | Returns (fin, fout) which point to the original (uin, uout) fds, but |
|
321 | 320 | may be copy of (uin, uout). The returned streams can be considered |
|
322 | 321 | "owned" in that print(), exec(), etc. never reach to them. |
|
323 | 322 | """ |
|
324 | 323 | uout.flush() |
|
325 | 324 | fin, fout = uin, uout |
|
326 | 325 | if _testfileno(uin, stdin): |
|
327 | 326 | newfd = os.dup(uin.fileno()) |
|
328 | 327 | nullfd = os.open(os.devnull, os.O_RDONLY) |
|
329 | 328 | os.dup2(nullfd, uin.fileno()) |
|
330 | 329 | os.close(nullfd) |
|
331 | 330 | fin = os.fdopen(newfd, 'rb') |
|
332 | 331 | if _testfileno(uout, stdout): |
|
333 | 332 | newfd = os.dup(uout.fileno()) |
|
334 | 333 | os.dup2(stderr.fileno(), uout.fileno()) |
|
335 | 334 | fout = os.fdopen(newfd, 'wb') |
|
336 | 335 | return fin, fout |
|
337 | 336 | |
|
338 | 337 | |
|
339 | 338 | def restorestdio(uin, uout, fin, fout): |
|
340 | 339 | """Restore (uin, uout) streams from possibly duplicated (fin, fout)""" |
|
341 | 340 | uout.flush() |
|
342 | 341 | for f, uif in [(fin, uin), (fout, uout)]: |
|
343 | 342 | if f is not uif: |
|
344 | 343 | os.dup2(f.fileno(), uif.fileno()) |
|
345 | 344 | f.close() |
|
346 | 345 | |
|
347 | 346 | |
|
348 | 347 | def shellenviron(environ=None): |
|
349 | 348 | """return environ with optional override, useful for shelling out""" |
|
350 | 349 | |
|
351 | 350 | def py2shell(val): |
|
352 | 351 | """convert python object into string that is useful to shell""" |
|
353 | 352 | if val is None or val is False: |
|
354 | 353 | return b'0' |
|
355 | 354 | if val is True: |
|
356 | 355 | return b'1' |
|
357 | 356 | return pycompat.bytestr(val) |
|
358 | 357 | |
|
359 | 358 | env = dict(encoding.environ) |
|
360 | 359 | if environ: |
|
361 | 360 | env.update((k, py2shell(v)) for k, v in pycompat.iteritems(environ)) |
|
362 | 361 | env[b'HG'] = hgexecutable() |
|
363 | 362 | return env |
|
364 | 363 | |
|
365 | 364 | |
|
366 | 365 | if pycompat.iswindows: |
|
367 | 366 | |
|
368 | 367 | def shelltonative(cmd, env): |
|
369 | 368 | return platform.shelltocmdexe( # pytype: disable=module-attr |
|
370 | 369 | cmd, shellenviron(env) |
|
371 | 370 | ) |
|
372 | 371 | |
|
373 | 372 | tonativestr = encoding.strfromlocal |
|
374 | 373 | else: |
|
375 | 374 | |
|
376 | 375 | def shelltonative(cmd, env): |
|
377 | 376 | return cmd |
|
378 | 377 | |
|
379 | 378 | tonativestr = pycompat.identity |
|
380 | 379 | |
|
381 | 380 | |
|
382 | 381 | def tonativeenv(env): |
|
383 | 382 | '''convert the environment from bytes to strings suitable for Popen(), etc. |
|
384 | 383 | ''' |
|
385 | 384 | return pycompat.rapply(tonativestr, env) |
|
386 | 385 | |
|
387 | 386 | |
|
388 | 387 | def system(cmd, environ=None, cwd=None, out=None): |
|
389 | 388 | '''enhanced shell command execution. |
|
390 | 389 | run with environment maybe modified, maybe in different dir. |
|
391 | 390 | |
|
392 | 391 | if out is specified, it is assumed to be a file-like object that has a |
|
393 | 392 | write() method. stdout and stderr will be redirected to out.''' |
|
394 | 393 | try: |
|
395 | 394 | stdout.flush() |
|
396 | 395 | except Exception: |
|
397 | 396 | pass |
|
398 | cmd = quotecommand(cmd) | |
|
399 | 397 | env = shellenviron(environ) |
|
400 | 398 | if out is None or isstdout(out): |
|
401 | 399 | rc = subprocess.call( |
|
402 | 400 | tonativestr(cmd), |
|
403 | 401 | shell=True, |
|
404 | 402 | close_fds=closefds, |
|
405 | 403 | env=tonativeenv(env), |
|
406 | 404 | cwd=pycompat.rapply(tonativestr, cwd), |
|
407 | 405 | ) |
|
408 | 406 | else: |
|
409 | 407 | proc = subprocess.Popen( |
|
410 | 408 | tonativestr(cmd), |
|
411 | 409 | shell=True, |
|
412 | 410 | close_fds=closefds, |
|
413 | 411 | env=tonativeenv(env), |
|
414 | 412 | cwd=pycompat.rapply(tonativestr, cwd), |
|
415 | 413 | stdout=subprocess.PIPE, |
|
416 | 414 | stderr=subprocess.STDOUT, |
|
417 | 415 | ) |
|
418 | 416 | for line in iter(proc.stdout.readline, b''): |
|
419 | 417 | out.write(line) |
|
420 | 418 | proc.wait() |
|
421 | 419 | rc = proc.returncode |
|
422 | 420 | if pycompat.sysplatform == b'OpenVMS' and rc & 1: |
|
423 | 421 | rc = 0 |
|
424 | 422 | return rc |
|
425 | 423 | |
|
426 | 424 | |
|
427 | 425 | _is_gui = None |
|
428 | 426 | |
|
429 | 427 | |
|
430 | 428 | def _gui(): |
|
431 | 429 | '''Are we running in a GUI?''' |
|
432 | 430 | if pycompat.isdarwin: |
|
433 | 431 | if b'SSH_CONNECTION' in encoding.environ: |
|
434 | 432 | # handle SSH access to a box where the user is logged in |
|
435 | 433 | return False |
|
436 | 434 | elif getattr(osutil, 'isgui', None): |
|
437 | 435 | # check if a CoreGraphics session is available |
|
438 | 436 | return osutil.isgui() |
|
439 | 437 | else: |
|
440 | 438 | # pure build; use a safe default |
|
441 | 439 | return True |
|
442 | 440 | else: |
|
443 | 441 | return pycompat.iswindows or encoding.environ.get(b"DISPLAY") |
|
444 | 442 | |
|
445 | 443 | |
|
446 | 444 | def gui(): |
|
447 | 445 | global _is_gui |
|
448 | 446 | if _is_gui is None: |
|
449 | 447 | _is_gui = _gui() |
|
450 | 448 | return _is_gui |
|
451 | 449 | |
|
452 | 450 | |
|
453 | 451 | def hgcmd(): |
|
454 | 452 | """Return the command used to execute current hg |
|
455 | 453 | |
|
456 | 454 | This is different from hgexecutable() because on Windows we want |
|
457 | 455 | to avoid things opening new shell windows like batch files, so we |
|
458 | 456 | get either the python call or current executable. |
|
459 | 457 | """ |
|
460 | 458 | if resourceutil.mainfrozen(): |
|
461 | 459 | if getattr(sys, 'frozen', None) == 'macosx_app': |
|
462 | 460 | # Env variable set by py2app |
|
463 | 461 | return [encoding.environ[b'EXECUTABLEPATH']] |
|
464 | 462 | else: |
|
465 | 463 | return [pycompat.sysexecutable] |
|
466 | 464 | return _gethgcmd() |
|
467 | 465 | |
|
468 | 466 | |
|
469 | 467 | def rundetached(args, condfn): |
|
470 | 468 | """Execute the argument list in a detached process. |
|
471 | 469 | |
|
472 | 470 | condfn is a callable which is called repeatedly and should return |
|
473 | 471 | True once the child process is known to have started successfully. |
|
474 | 472 | At this point, the child process PID is returned. If the child |
|
475 | 473 | process fails to start or finishes before condfn() evaluates to |
|
476 | 474 | True, return -1. |
|
477 | 475 | """ |
|
478 | 476 | # Windows case is easier because the child process is either |
|
479 | 477 | # successfully starting and validating the condition or exiting |
|
480 | 478 | # on failure. We just poll on its PID. On Unix, if the child |
|
481 | 479 | # process fails to start, it will be left in a zombie state until |
|
482 | 480 | # the parent wait on it, which we cannot do since we expect a long |
|
483 | 481 | # running process on success. Instead we listen for SIGCHLD telling |
|
484 | 482 | # us our child process terminated. |
|
485 | 483 | terminated = set() |
|
486 | 484 | |
|
487 | 485 | def handler(signum, frame): |
|
488 | 486 | terminated.add(os.wait()) |
|
489 | 487 | |
|
490 | 488 | prevhandler = None |
|
491 | 489 | SIGCHLD = getattr(signal, 'SIGCHLD', None) |
|
492 | 490 | if SIGCHLD is not None: |
|
493 | 491 | prevhandler = signal.signal(SIGCHLD, handler) |
|
494 | 492 | try: |
|
495 | 493 | pid = spawndetached(args) |
|
496 | 494 | while not condfn(): |
|
497 | 495 | if (pid in terminated or not testpid(pid)) and not condfn(): |
|
498 | 496 | return -1 |
|
499 | 497 | time.sleep(0.1) |
|
500 | 498 | return pid |
|
501 | 499 | finally: |
|
502 | 500 | if prevhandler is not None: |
|
503 | 501 | signal.signal(signal.SIGCHLD, prevhandler) |
|
504 | 502 | |
|
505 | 503 | |
|
506 | 504 | @contextlib.contextmanager |
|
507 | 505 | def uninterruptible(warn): |
|
508 | 506 | """Inhibit SIGINT handling on a region of code. |
|
509 | 507 | |
|
510 | 508 | Note that if this is called in a non-main thread, it turns into a no-op. |
|
511 | 509 | |
|
512 | 510 | Args: |
|
513 | 511 | warn: A callable which takes no arguments, and returns True if the |
|
514 | 512 | previous signal handling should be restored. |
|
515 | 513 | """ |
|
516 | 514 | |
|
517 | 515 | oldsiginthandler = [signal.getsignal(signal.SIGINT)] |
|
518 | 516 | shouldbail = [] |
|
519 | 517 | |
|
520 | 518 | def disabledsiginthandler(*args): |
|
521 | 519 | if warn(): |
|
522 | 520 | signal.signal(signal.SIGINT, oldsiginthandler[0]) |
|
523 | 521 | del oldsiginthandler[0] |
|
524 | 522 | shouldbail.append(True) |
|
525 | 523 | |
|
526 | 524 | try: |
|
527 | 525 | try: |
|
528 | 526 | signal.signal(signal.SIGINT, disabledsiginthandler) |
|
529 | 527 | except ValueError: |
|
530 | 528 | # wrong thread, oh well, we tried |
|
531 | 529 | del oldsiginthandler[0] |
|
532 | 530 | yield |
|
533 | 531 | finally: |
|
534 | 532 | if oldsiginthandler: |
|
535 | 533 | signal.signal(signal.SIGINT, oldsiginthandler[0]) |
|
536 | 534 | if shouldbail: |
|
537 | 535 | raise KeyboardInterrupt |
|
538 | 536 | |
|
539 | 537 | |
|
540 | 538 | if pycompat.iswindows: |
|
541 | 539 | # no fork on Windows, but we can create a detached process |
|
542 | 540 | # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx |
|
543 | 541 | # No stdlib constant exists for this value |
|
544 | 542 | DETACHED_PROCESS = 0x00000008 |
|
545 | 543 | # Following creation flags might create a console GUI window. |
|
546 | 544 | # Using subprocess.CREATE_NEW_CONSOLE might helps. |
|
547 | 545 | # See https://phab.mercurial-scm.org/D1701 for discussion |
|
548 | 546 | _creationflags = ( |
|
549 | 547 | DETACHED_PROCESS |
|
550 | 548 | | subprocess.CREATE_NEW_PROCESS_GROUP # pytype: disable=module-attr |
|
551 | 549 | ) |
|
552 | 550 | |
|
553 | 551 | def runbgcommand( |
|
554 | 552 | script, |
|
555 | 553 | env, |
|
556 | 554 | shell=False, |
|
557 | 555 | stdout=None, |
|
558 | 556 | stderr=None, |
|
559 | 557 | ensurestart=True, |
|
560 | 558 | record_wait=None, |
|
561 | 559 | ): |
|
562 | 560 | '''Spawn a command without waiting for it to finish.''' |
|
563 | 561 | # we can't use close_fds *and* redirect stdin. I'm not sure that we |
|
564 | 562 | # need to because the detached process has no console connection. |
|
565 | 563 | p = subprocess.Popen( |
|
566 | 564 | tonativestr(script), |
|
567 | 565 | shell=shell, |
|
568 | 566 | env=tonativeenv(env), |
|
569 | 567 | close_fds=True, |
|
570 | 568 | creationflags=_creationflags, |
|
571 | 569 | stdout=stdout, |
|
572 | 570 | stderr=stderr, |
|
573 | 571 | ) |
|
574 | 572 | if record_wait is not None: |
|
575 | 573 | record_wait(p.wait) |
|
576 | 574 | |
|
577 | 575 | |
|
578 | 576 | else: |
|
579 | 577 | |
|
580 | 578 | def runbgcommand( |
|
581 | 579 | cmd, |
|
582 | 580 | env, |
|
583 | 581 | shell=False, |
|
584 | 582 | stdout=None, |
|
585 | 583 | stderr=None, |
|
586 | 584 | ensurestart=True, |
|
587 | 585 | record_wait=None, |
|
588 | 586 | ): |
|
589 | 587 | '''Spawn a command without waiting for it to finish. |
|
590 | 588 | |
|
591 | 589 | |
|
592 | 590 | When `record_wait` is not None, the spawned process will not be fully |
|
593 | 591 | detached and the `record_wait` argument will be called with a the |
|
594 | 592 | `Subprocess.wait` function for the spawned process. This is mostly |
|
595 | 593 | useful for developers that need to make sure the spawned process |
|
596 | 594 | finished before a certain point. (eg: writing test)''' |
|
597 | 595 | if pycompat.isdarwin: |
|
598 | 596 | # avoid crash in CoreFoundation in case another thread |
|
599 | 597 | # calls gui() while we're calling fork(). |
|
600 | 598 | gui() |
|
601 | 599 | |
|
602 | 600 | # double-fork to completely detach from the parent process |
|
603 | 601 | # based on http://code.activestate.com/recipes/278731 |
|
604 | 602 | if record_wait is None: |
|
605 | 603 | pid = os.fork() |
|
606 | 604 | if pid: |
|
607 | 605 | if not ensurestart: |
|
608 | 606 | # Even though we're not waiting on the child process, |
|
609 | 607 | # we still must call waitpid() on it at some point so |
|
610 | 608 | # it's not a zombie/defunct. This is especially relevant for |
|
611 | 609 | # chg since the parent process won't die anytime soon. |
|
612 | 610 | # We use a thread to make the overhead tiny. |
|
613 | 611 | def _do_wait(): |
|
614 | 612 | os.waitpid(pid, 0) |
|
615 | 613 | |
|
616 | 614 | threading.Thread(target=_do_wait, daemon=True).start() |
|
617 | 615 | return |
|
618 | 616 | # Parent process |
|
619 | 617 | (_pid, status) = os.waitpid(pid, 0) |
|
620 | 618 | if os.WIFEXITED(status): |
|
621 | 619 | returncode = os.WEXITSTATUS(status) |
|
622 | 620 | else: |
|
623 | 621 | returncode = -(os.WTERMSIG(status)) |
|
624 | 622 | if returncode != 0: |
|
625 | 623 | # The child process's return code is 0 on success, an errno |
|
626 | 624 | # value on failure, or 255 if we don't have a valid errno |
|
627 | 625 | # value. |
|
628 | 626 | # |
|
629 | 627 | # (It would be slightly nicer to return the full exception info |
|
630 | 628 | # over a pipe as the subprocess module does. For now it |
|
631 | 629 | # doesn't seem worth adding that complexity here, though.) |
|
632 | 630 | if returncode == 255: |
|
633 | 631 | returncode = errno.EINVAL |
|
634 | 632 | raise OSError( |
|
635 | 633 | returncode, |
|
636 | 634 | b'error running %r: %s' |
|
637 | 635 | % (cmd, os.strerror(returncode)), |
|
638 | 636 | ) |
|
639 | 637 | return |
|
640 | 638 | |
|
641 | 639 | returncode = 255 |
|
642 | 640 | try: |
|
643 | 641 | if record_wait is None: |
|
644 | 642 | # Start a new session |
|
645 | 643 | os.setsid() |
|
646 | 644 | |
|
647 | 645 | stdin = open(os.devnull, b'r') |
|
648 | 646 | if stdout is None: |
|
649 | 647 | stdout = open(os.devnull, b'w') |
|
650 | 648 | if stderr is None: |
|
651 | 649 | stderr = open(os.devnull, b'w') |
|
652 | 650 | |
|
653 | 651 | # connect stdin to devnull to make sure the subprocess can't |
|
654 | 652 | # muck up that stream for mercurial. |
|
655 | 653 | p = subprocess.Popen( |
|
656 | 654 | cmd, |
|
657 | 655 | shell=shell, |
|
658 | 656 | env=env, |
|
659 | 657 | close_fds=True, |
|
660 | 658 | stdin=stdin, |
|
661 | 659 | stdout=stdout, |
|
662 | 660 | stderr=stderr, |
|
663 | 661 | ) |
|
664 | 662 | if record_wait is not None: |
|
665 | 663 | record_wait(p.wait) |
|
666 | 664 | returncode = 0 |
|
667 | 665 | except EnvironmentError as ex: |
|
668 | 666 | returncode = ex.errno & 0xFF |
|
669 | 667 | if returncode == 0: |
|
670 | 668 | # This shouldn't happen, but just in case make sure the |
|
671 | 669 | # return code is never 0 here. |
|
672 | 670 | returncode = 255 |
|
673 | 671 | except Exception: |
|
674 | 672 | returncode = 255 |
|
675 | 673 | finally: |
|
676 | 674 | # mission accomplished, this child needs to exit and not |
|
677 | 675 | # continue the hg process here. |
|
678 | 676 | if record_wait is None: |
|
679 | 677 | os._exit(returncode) |
@@ -1,680 +1,675 b'' | |||
|
1 | 1 | # windows.py - Windows utility function implementations for Mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import errno |
|
11 | 11 | import getpass |
|
12 | 12 | import msvcrt |
|
13 | 13 | import os |
|
14 | 14 | import re |
|
15 | 15 | import stat |
|
16 | 16 | import string |
|
17 | 17 | import sys |
|
18 | 18 | |
|
19 | 19 | from .i18n import _ |
|
20 | 20 | from .pycompat import getattr |
|
21 | 21 | from . import ( |
|
22 | 22 | encoding, |
|
23 | 23 | error, |
|
24 | 24 | policy, |
|
25 | 25 | pycompat, |
|
26 | 26 | win32, |
|
27 | 27 | ) |
|
28 | 28 | |
|
29 | 29 | try: |
|
30 | 30 | import _winreg as winreg # pytype: disable=import-error |
|
31 | 31 | |
|
32 | 32 | winreg.CloseKey |
|
33 | 33 | except ImportError: |
|
34 | 34 | # py2 only |
|
35 | 35 | import winreg # pytype: disable=import-error |
|
36 | 36 | |
|
37 | 37 | osutil = policy.importmod('osutil') |
|
38 | 38 | |
|
39 | 39 | getfsmountpoint = win32.getvolumename |
|
40 | 40 | getfstype = win32.getfstype |
|
41 | 41 | getuser = win32.getuser |
|
42 | 42 | hidewindow = win32.hidewindow |
|
43 | 43 | makedir = win32.makedir |
|
44 | 44 | nlinks = win32.nlinks |
|
45 | 45 | oslink = win32.oslink |
|
46 | 46 | samedevice = win32.samedevice |
|
47 | 47 | samefile = win32.samefile |
|
48 | 48 | setsignalhandler = win32.setsignalhandler |
|
49 | 49 | spawndetached = win32.spawndetached |
|
50 | 50 | split = os.path.split |
|
51 | 51 | testpid = win32.testpid |
|
52 | 52 | unlink = win32.unlink |
|
53 | 53 | |
|
54 | 54 | umask = 0o022 |
|
55 | 55 | |
|
56 | 56 | |
|
57 | 57 | class mixedfilemodewrapper(object): |
|
58 | 58 | """Wraps a file handle when it is opened in read/write mode. |
|
59 | 59 | |
|
60 | 60 | fopen() and fdopen() on Windows have a specific-to-Windows requirement |
|
61 | 61 | that files opened with mode r+, w+, or a+ make a call to a file positioning |
|
62 | 62 | function when switching between reads and writes. Without this extra call, |
|
63 | 63 | Python will raise a not very intuitive "IOError: [Errno 0] Error." |
|
64 | 64 | |
|
65 | 65 | This class wraps posixfile instances when the file is opened in read/write |
|
66 | 66 | mode and automatically adds checks or inserts appropriate file positioning |
|
67 | 67 | calls when necessary. |
|
68 | 68 | """ |
|
69 | 69 | |
|
70 | 70 | OPNONE = 0 |
|
71 | 71 | OPREAD = 1 |
|
72 | 72 | OPWRITE = 2 |
|
73 | 73 | |
|
74 | 74 | def __init__(self, fp): |
|
75 | 75 | object.__setattr__(self, '_fp', fp) |
|
76 | 76 | object.__setattr__(self, '_lastop', 0) |
|
77 | 77 | |
|
78 | 78 | def __enter__(self): |
|
79 | 79 | self._fp.__enter__() |
|
80 | 80 | return self |
|
81 | 81 | |
|
82 | 82 | def __exit__(self, exc_type, exc_val, exc_tb): |
|
83 | 83 | self._fp.__exit__(exc_type, exc_val, exc_tb) |
|
84 | 84 | |
|
85 | 85 | def __getattr__(self, name): |
|
86 | 86 | return getattr(self._fp, name) |
|
87 | 87 | |
|
88 | 88 | def __setattr__(self, name, value): |
|
89 | 89 | return self._fp.__setattr__(name, value) |
|
90 | 90 | |
|
91 | 91 | def _noopseek(self): |
|
92 | 92 | self._fp.seek(0, os.SEEK_CUR) |
|
93 | 93 | |
|
94 | 94 | def seek(self, *args, **kwargs): |
|
95 | 95 | object.__setattr__(self, '_lastop', self.OPNONE) |
|
96 | 96 | return self._fp.seek(*args, **kwargs) |
|
97 | 97 | |
|
98 | 98 | def write(self, d): |
|
99 | 99 | if self._lastop == self.OPREAD: |
|
100 | 100 | self._noopseek() |
|
101 | 101 | |
|
102 | 102 | object.__setattr__(self, '_lastop', self.OPWRITE) |
|
103 | 103 | return self._fp.write(d) |
|
104 | 104 | |
|
105 | 105 | def writelines(self, *args, **kwargs): |
|
106 | 106 | if self._lastop == self.OPREAD: |
|
107 | 107 | self._noopeseek() |
|
108 | 108 | |
|
109 | 109 | object.__setattr__(self, '_lastop', self.OPWRITE) |
|
110 | 110 | return self._fp.writelines(*args, **kwargs) |
|
111 | 111 | |
|
112 | 112 | def read(self, *args, **kwargs): |
|
113 | 113 | if self._lastop == self.OPWRITE: |
|
114 | 114 | self._noopseek() |
|
115 | 115 | |
|
116 | 116 | object.__setattr__(self, '_lastop', self.OPREAD) |
|
117 | 117 | return self._fp.read(*args, **kwargs) |
|
118 | 118 | |
|
119 | 119 | def readline(self, *args, **kwargs): |
|
120 | 120 | if self._lastop == self.OPWRITE: |
|
121 | 121 | self._noopseek() |
|
122 | 122 | |
|
123 | 123 | object.__setattr__(self, '_lastop', self.OPREAD) |
|
124 | 124 | return self._fp.readline(*args, **kwargs) |
|
125 | 125 | |
|
126 | 126 | def readlines(self, *args, **kwargs): |
|
127 | 127 | if self._lastop == self.OPWRITE: |
|
128 | 128 | self._noopseek() |
|
129 | 129 | |
|
130 | 130 | object.__setattr__(self, '_lastop', self.OPREAD) |
|
131 | 131 | return self._fp.readlines(*args, **kwargs) |
|
132 | 132 | |
|
133 | 133 | |
|
134 | 134 | class fdproxy(object): |
|
135 | 135 | """Wraps osutil.posixfile() to override the name attribute to reflect the |
|
136 | 136 | underlying file name. |
|
137 | 137 | """ |
|
138 | 138 | |
|
139 | 139 | def __init__(self, name, fp): |
|
140 | 140 | self.name = name |
|
141 | 141 | self._fp = fp |
|
142 | 142 | |
|
143 | 143 | def __enter__(self): |
|
144 | 144 | self._fp.__enter__() |
|
145 | 145 | # Return this wrapper for the context manager so that the name is |
|
146 | 146 | # still available. |
|
147 | 147 | return self |
|
148 | 148 | |
|
149 | 149 | def __exit__(self, exc_type, exc_value, traceback): |
|
150 | 150 | self._fp.__exit__(exc_type, exc_value, traceback) |
|
151 | 151 | |
|
152 | 152 | def __iter__(self): |
|
153 | 153 | return iter(self._fp) |
|
154 | 154 | |
|
155 | 155 | def __getattr__(self, name): |
|
156 | 156 | return getattr(self._fp, name) |
|
157 | 157 | |
|
158 | 158 | |
|
159 | 159 | def posixfile(name, mode=b'r', buffering=-1): |
|
160 | 160 | '''Open a file with even more POSIX-like semantics''' |
|
161 | 161 | try: |
|
162 | 162 | fp = osutil.posixfile(name, mode, buffering) # may raise WindowsError |
|
163 | 163 | |
|
164 | 164 | # PyFile_FromFd() ignores the name, and seems to report fp.name as the |
|
165 | 165 | # underlying file descriptor. |
|
166 | 166 | if pycompat.ispy3: |
|
167 | 167 | fp = fdproxy(name, fp) |
|
168 | 168 | |
|
169 | 169 | # The position when opening in append mode is implementation defined, so |
|
170 | 170 | # make it consistent with other platforms, which position at EOF. |
|
171 | 171 | if b'a' in mode: |
|
172 | 172 | fp.seek(0, os.SEEK_END) |
|
173 | 173 | |
|
174 | 174 | if b'+' in mode: |
|
175 | 175 | return mixedfilemodewrapper(fp) |
|
176 | 176 | |
|
177 | 177 | return fp |
|
178 | 178 | except WindowsError as err: |
|
179 | 179 | # convert to a friendlier exception |
|
180 | 180 | raise IOError( |
|
181 | 181 | err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror) |
|
182 | 182 | ) |
|
183 | 183 | |
|
184 | 184 | |
|
185 | 185 | # may be wrapped by win32mbcs extension |
|
186 | 186 | listdir = osutil.listdir |
|
187 | 187 | |
|
188 | 188 | |
|
189 | 189 | class winstdout(object): |
|
190 | 190 | '''stdout on windows misbehaves if sent through a pipe''' |
|
191 | 191 | |
|
192 | 192 | def __init__(self, fp): |
|
193 | 193 | self.fp = fp |
|
194 | 194 | |
|
195 | 195 | def __getattr__(self, key): |
|
196 | 196 | return getattr(self.fp, key) |
|
197 | 197 | |
|
198 | 198 | def close(self): |
|
199 | 199 | try: |
|
200 | 200 | self.fp.close() |
|
201 | 201 | except IOError: |
|
202 | 202 | pass |
|
203 | 203 | |
|
204 | 204 | def write(self, s): |
|
205 | 205 | try: |
|
206 | 206 | # This is workaround for "Not enough space" error on |
|
207 | 207 | # writing large size of data to console. |
|
208 | 208 | limit = 16000 |
|
209 | 209 | l = len(s) |
|
210 | 210 | start = 0 |
|
211 | 211 | self.softspace = 0 |
|
212 | 212 | while start < l: |
|
213 | 213 | end = start + limit |
|
214 | 214 | self.fp.write(s[start:end]) |
|
215 | 215 | start = end |
|
216 | 216 | except IOError as inst: |
|
217 | 217 | if inst.errno != 0 and not win32.lasterrorwaspipeerror(inst): |
|
218 | 218 | raise |
|
219 | 219 | self.close() |
|
220 | 220 | raise IOError(errno.EPIPE, 'Broken pipe') |
|
221 | 221 | |
|
222 | 222 | def flush(self): |
|
223 | 223 | try: |
|
224 | 224 | return self.fp.flush() |
|
225 | 225 | except IOError as inst: |
|
226 | 226 | if not win32.lasterrorwaspipeerror(inst): |
|
227 | 227 | raise |
|
228 | 228 | raise IOError(errno.EPIPE, 'Broken pipe') |
|
229 | 229 | |
|
230 | 230 | |
|
231 | 231 | def openhardlinks(): |
|
232 | 232 | return True |
|
233 | 233 | |
|
234 | 234 | |
|
235 | 235 | def parsepatchoutput(output_line): |
|
236 | 236 | """parses the output produced by patch and returns the filename""" |
|
237 | 237 | pf = output_line[14:] |
|
238 | 238 | if pf[0] == b'`': |
|
239 | 239 | pf = pf[1:-1] # Remove the quotes |
|
240 | 240 | return pf |
|
241 | 241 | |
|
242 | 242 | |
|
243 | 243 | def sshargs(sshcmd, host, user, port): |
|
244 | 244 | '''Build argument list for ssh or Plink''' |
|
245 | 245 | pflag = b'plink' in sshcmd.lower() and b'-P' or b'-p' |
|
246 | 246 | args = user and (b"%s@%s" % (user, host)) or host |
|
247 | 247 | if args.startswith(b'-') or args.startswith(b'/'): |
|
248 | 248 | raise error.Abort( |
|
249 | 249 | _(b'illegal ssh hostname or username starting with - or /: %s') |
|
250 | 250 | % args |
|
251 | 251 | ) |
|
252 | 252 | args = shellquote(args) |
|
253 | 253 | if port: |
|
254 | 254 | args = b'%s %s %s' % (pflag, shellquote(port), args) |
|
255 | 255 | return args |
|
256 | 256 | |
|
257 | 257 | |
|
258 | 258 | def setflags(f, l, x): |
|
259 | 259 | pass |
|
260 | 260 | |
|
261 | 261 | |
|
262 | 262 | def copymode(src, dst, mode=None, enforcewritable=False): |
|
263 | 263 | pass |
|
264 | 264 | |
|
265 | 265 | |
|
266 | 266 | def checkexec(path): |
|
267 | 267 | return False |
|
268 | 268 | |
|
269 | 269 | |
|
270 | 270 | def checklink(path): |
|
271 | 271 | return False |
|
272 | 272 | |
|
273 | 273 | |
|
274 | 274 | def setbinary(fd): |
|
275 | 275 | # When run without console, pipes may expose invalid |
|
276 | 276 | # fileno(), usually set to -1. |
|
277 | 277 | fno = getattr(fd, 'fileno', None) |
|
278 | 278 | if fno is not None and fno() >= 0: |
|
279 | 279 | msvcrt.setmode(fno(), os.O_BINARY) # pytype: disable=module-attr |
|
280 | 280 | |
|
281 | 281 | |
|
282 | 282 | def pconvert(path): |
|
283 | 283 | return path.replace(pycompat.ossep, b'/') |
|
284 | 284 | |
|
285 | 285 | |
|
286 | 286 | def localpath(path): |
|
287 | 287 | return path.replace(b'/', b'\\') |
|
288 | 288 | |
|
289 | 289 | |
|
290 | 290 | def normpath(path): |
|
291 | 291 | return pconvert(os.path.normpath(path)) |
|
292 | 292 | |
|
293 | 293 | |
|
294 | 294 | def normcase(path): |
|
295 | 295 | return encoding.upper(path) # NTFS compares via upper() |
|
296 | 296 | |
|
297 | 297 | |
|
298 | 298 | # see posix.py for definitions |
|
299 | 299 | normcasespec = encoding.normcasespecs.upper |
|
300 | 300 | normcasefallback = encoding.upperfallback |
|
301 | 301 | |
|
302 | 302 | |
|
303 | 303 | def samestat(s1, s2): |
|
304 | 304 | return False |
|
305 | 305 | |
|
306 | 306 | |
|
307 | 307 | def shelltocmdexe(path, env): |
|
308 | 308 | r"""Convert shell variables in the form $var and ${var} inside ``path`` |
|
309 | 309 | to %var% form. Existing Windows style variables are left unchanged. |
|
310 | 310 | |
|
311 | 311 | The variables are limited to the given environment. Unknown variables are |
|
312 | 312 | left unchanged. |
|
313 | 313 | |
|
314 | 314 | >>> e = {b'var1': b'v1', b'var2': b'v2', b'var3': b'v3'} |
|
315 | 315 | >>> # Only valid values are expanded |
|
316 | 316 | >>> shelltocmdexe(b'cmd $var1 ${var2} %var3% $missing ${missing} %missing%', |
|
317 | 317 | ... e) |
|
318 | 318 | 'cmd %var1% %var2% %var3% $missing ${missing} %missing%' |
|
319 | 319 | >>> # Single quote prevents expansion, as does \$ escaping |
|
320 | 320 | >>> shelltocmdexe(b"cmd '$var1 ${var2} %var3%' \$var1 \${var2} \\", e) |
|
321 | 321 | 'cmd "$var1 ${var2} %var3%" $var1 ${var2} \\' |
|
322 | 322 | >>> # $$ is not special. %% is not special either, but can be the end and |
|
323 | 323 | >>> # start of consecutive variables |
|
324 | 324 | >>> shelltocmdexe(b"cmd $$ %% %var1%%var2%", e) |
|
325 | 325 | 'cmd $$ %% %var1%%var2%' |
|
326 | 326 | >>> # No double substitution |
|
327 | 327 | >>> shelltocmdexe(b"$var1 %var1%", {b'var1': b'%var2%', b'var2': b'boom'}) |
|
328 | 328 | '%var1% %var1%' |
|
329 | 329 | >>> # Tilde expansion |
|
330 | 330 | >>> shelltocmdexe(b"~/dir ~\dir2 ~tmpfile \~/", {}) |
|
331 | 331 | '%USERPROFILE%/dir %USERPROFILE%\\dir2 ~tmpfile ~/' |
|
332 | 332 | """ |
|
333 | 333 | if not any(c in path for c in b"$'~"): |
|
334 | 334 | return path |
|
335 | 335 | |
|
336 | 336 | varchars = pycompat.sysbytes(string.ascii_letters + string.digits) + b'_-' |
|
337 | 337 | |
|
338 | 338 | res = b'' |
|
339 | 339 | index = 0 |
|
340 | 340 | pathlen = len(path) |
|
341 | 341 | while index < pathlen: |
|
342 | 342 | c = path[index : index + 1] |
|
343 | 343 | if c == b'\'': # no expansion within single quotes |
|
344 | 344 | path = path[index + 1 :] |
|
345 | 345 | pathlen = len(path) |
|
346 | 346 | try: |
|
347 | 347 | index = path.index(b'\'') |
|
348 | 348 | res += b'"' + path[:index] + b'"' |
|
349 | 349 | except ValueError: |
|
350 | 350 | res += c + path |
|
351 | 351 | index = pathlen - 1 |
|
352 | 352 | elif c == b'%': # variable |
|
353 | 353 | path = path[index + 1 :] |
|
354 | 354 | pathlen = len(path) |
|
355 | 355 | try: |
|
356 | 356 | index = path.index(b'%') |
|
357 | 357 | except ValueError: |
|
358 | 358 | res += b'%' + path |
|
359 | 359 | index = pathlen - 1 |
|
360 | 360 | else: |
|
361 | 361 | var = path[:index] |
|
362 | 362 | res += b'%' + var + b'%' |
|
363 | 363 | elif c == b'$': # variable |
|
364 | 364 | if path[index + 1 : index + 2] == b'{': |
|
365 | 365 | path = path[index + 2 :] |
|
366 | 366 | pathlen = len(path) |
|
367 | 367 | try: |
|
368 | 368 | index = path.index(b'}') |
|
369 | 369 | var = path[:index] |
|
370 | 370 | |
|
371 | 371 | # See below for why empty variables are handled specially |
|
372 | 372 | if env.get(var, b'') != b'': |
|
373 | 373 | res += b'%' + var + b'%' |
|
374 | 374 | else: |
|
375 | 375 | res += b'${' + var + b'}' |
|
376 | 376 | except ValueError: |
|
377 | 377 | res += b'${' + path |
|
378 | 378 | index = pathlen - 1 |
|
379 | 379 | else: |
|
380 | 380 | var = b'' |
|
381 | 381 | index += 1 |
|
382 | 382 | c = path[index : index + 1] |
|
383 | 383 | while c != b'' and c in varchars: |
|
384 | 384 | var += c |
|
385 | 385 | index += 1 |
|
386 | 386 | c = path[index : index + 1] |
|
387 | 387 | # Some variables (like HG_OLDNODE) may be defined, but have an |
|
388 | 388 | # empty value. Those need to be skipped because when spawning |
|
389 | 389 | # cmd.exe to run the hook, it doesn't replace %VAR% for an empty |
|
390 | 390 | # VAR, and that really confuses things like revset expressions. |
|
391 | 391 | # OTOH, if it's left in Unix format and the hook runs sh.exe, it |
|
392 | 392 | # will substitute to an empty string, and everything is happy. |
|
393 | 393 | if env.get(var, b'') != b'': |
|
394 | 394 | res += b'%' + var + b'%' |
|
395 | 395 | else: |
|
396 | 396 | res += b'$' + var |
|
397 | 397 | |
|
398 | 398 | if c != b'': |
|
399 | 399 | index -= 1 |
|
400 | 400 | elif ( |
|
401 | 401 | c == b'~' |
|
402 | 402 | and index + 1 < pathlen |
|
403 | 403 | and path[index + 1 : index + 2] in (b'\\', b'/') |
|
404 | 404 | ): |
|
405 | 405 | res += b"%USERPROFILE%" |
|
406 | 406 | elif ( |
|
407 | 407 | c == b'\\' |
|
408 | 408 | and index + 1 < pathlen |
|
409 | 409 | and path[index + 1 : index + 2] in (b'$', b'~') |
|
410 | 410 | ): |
|
411 | 411 | # Skip '\', but only if it is escaping $ or ~ |
|
412 | 412 | res += path[index + 1 : index + 2] |
|
413 | 413 | index += 1 |
|
414 | 414 | else: |
|
415 | 415 | res += c |
|
416 | 416 | |
|
417 | 417 | index += 1 |
|
418 | 418 | return res |
|
419 | 419 | |
|
420 | 420 | |
|
421 | 421 | # A sequence of backslashes is special iff it precedes a double quote: |
|
422 | 422 | # - if there's an even number of backslashes, the double quote is not |
|
423 | 423 | # quoted (i.e. it ends the quoted region) |
|
424 | 424 | # - if there's an odd number of backslashes, the double quote is quoted |
|
425 | 425 | # - in both cases, every pair of backslashes is unquoted into a single |
|
426 | 426 | # backslash |
|
427 | 427 | # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx ) |
|
428 | 428 | # So, to quote a string, we must surround it in double quotes, double |
|
429 | 429 | # the number of backslashes that precede double quotes and add another |
|
430 | 430 | # backslash before every double quote (being careful with the double |
|
431 | 431 | # quote we've appended to the end) |
|
432 | 432 | _quotere = None |
|
433 | 433 | _needsshellquote = None |
|
434 | 434 | |
|
435 | 435 | |
|
436 | 436 | def shellquote(s): |
|
437 | 437 | r""" |
|
438 | 438 | >>> shellquote(br'C:\Users\xyz') |
|
439 | 439 | '"C:\\Users\\xyz"' |
|
440 | 440 | >>> shellquote(br'C:\Users\xyz/mixed') |
|
441 | 441 | '"C:\\Users\\xyz/mixed"' |
|
442 | 442 | >>> # Would be safe not to quote too, since it is all double backslashes |
|
443 | 443 | >>> shellquote(br'C:\\Users\\xyz') |
|
444 | 444 | '"C:\\\\Users\\\\xyz"' |
|
445 | 445 | >>> # But this must be quoted |
|
446 | 446 | >>> shellquote(br'C:\\Users\\xyz/abc') |
|
447 | 447 | '"C:\\\\Users\\\\xyz/abc"' |
|
448 | 448 | """ |
|
449 | 449 | global _quotere |
|
450 | 450 | if _quotere is None: |
|
451 | 451 | _quotere = re.compile(br'(\\*)("|\\$)') |
|
452 | 452 | global _needsshellquote |
|
453 | 453 | if _needsshellquote is None: |
|
454 | 454 | # ":" is also treated as "safe character", because it is used as a part |
|
455 | 455 | # of path name on Windows. "\" is also part of a path name, but isn't |
|
456 | 456 | # safe because shlex.split() (kind of) treats it as an escape char and |
|
457 | 457 | # drops it. It will leave the next character, even if it is another |
|
458 | 458 | # "\". |
|
459 | 459 | _needsshellquote = re.compile(br'[^a-zA-Z0-9._:/-]').search |
|
460 | 460 | if s and not _needsshellquote(s) and not _quotere.search(s): |
|
461 | 461 | # "s" shouldn't have to be quoted |
|
462 | 462 | return s |
|
463 | 463 | return b'"%s"' % _quotere.sub(br'\1\1\\\2', s) |
|
464 | 464 | |
|
465 | 465 | |
|
466 | 466 | def _unquote(s): |
|
467 | 467 | if s.startswith(b'"') and s.endswith(b'"'): |
|
468 | 468 | return s[1:-1] |
|
469 | 469 | return s |
|
470 | 470 | |
|
471 | 471 | |
|
472 | 472 | def shellsplit(s): |
|
473 | 473 | """Parse a command string in cmd.exe way (best-effort)""" |
|
474 | 474 | return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False)) |
|
475 | 475 | |
|
476 | 476 | |
|
477 | def quotecommand(cmd): | |
|
478 | """Build a command string suitable for os.popen* calls.""" | |
|
479 | return cmd | |
|
480 | ||
|
481 | ||
|
482 | 477 | # if you change this stub into a real check, please try to implement the |
|
483 | 478 | # username and groupname functions above, too. |
|
484 | 479 | def isowner(st): |
|
485 | 480 | return True |
|
486 | 481 | |
|
487 | 482 | |
|
488 | 483 | def findexe(command): |
|
489 | 484 | '''Find executable for command searching like cmd.exe does. |
|
490 | 485 | If command is a basename then PATH is searched for command. |
|
491 | 486 | PATH isn't searched if command is an absolute or relative path. |
|
492 | 487 | An extension from PATHEXT is found and added if not present. |
|
493 | 488 | If command isn't found None is returned.''' |
|
494 | 489 | pathext = encoding.environ.get(b'PATHEXT', b'.COM;.EXE;.BAT;.CMD') |
|
495 | 490 | pathexts = [ext for ext in pathext.lower().split(pycompat.ospathsep)] |
|
496 | 491 | if os.path.splitext(command)[1].lower() in pathexts: |
|
497 | 492 | pathexts = [b''] |
|
498 | 493 | |
|
499 | 494 | def findexisting(pathcommand): |
|
500 | 495 | """Will append extension (if needed) and return existing file""" |
|
501 | 496 | for ext in pathexts: |
|
502 | 497 | executable = pathcommand + ext |
|
503 | 498 | if os.path.exists(executable): |
|
504 | 499 | return executable |
|
505 | 500 | return None |
|
506 | 501 | |
|
507 | 502 | if pycompat.ossep in command: |
|
508 | 503 | return findexisting(command) |
|
509 | 504 | |
|
510 | 505 | for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep): |
|
511 | 506 | executable = findexisting(os.path.join(path, command)) |
|
512 | 507 | if executable is not None: |
|
513 | 508 | return executable |
|
514 | 509 | return findexisting(os.path.expanduser(os.path.expandvars(command))) |
|
515 | 510 | |
|
516 | 511 | |
|
517 | 512 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} |
|
518 | 513 | |
|
519 | 514 | |
|
520 | 515 | def statfiles(files): |
|
521 | 516 | '''Stat each file in files. Yield each stat, or None if a file |
|
522 | 517 | does not exist or has a type we don't care about. |
|
523 | 518 | |
|
524 | 519 | Cluster and cache stat per directory to minimize number of OS stat calls.''' |
|
525 | 520 | dircache = {} # dirname -> filename -> status | None if file does not exist |
|
526 | 521 | getkind = stat.S_IFMT |
|
527 | 522 | for nf in files: |
|
528 | 523 | nf = normcase(nf) |
|
529 | 524 | dir, base = os.path.split(nf) |
|
530 | 525 | if not dir: |
|
531 | 526 | dir = b'.' |
|
532 | 527 | cache = dircache.get(dir, None) |
|
533 | 528 | if cache is None: |
|
534 | 529 | try: |
|
535 | 530 | dmap = { |
|
536 | 531 | normcase(n): s |
|
537 | 532 | for n, k, s in listdir(dir, True) |
|
538 | 533 | if getkind(s.st_mode) in _wantedkinds |
|
539 | 534 | } |
|
540 | 535 | except OSError as err: |
|
541 | 536 | # Python >= 2.5 returns ENOENT and adds winerror field |
|
542 | 537 | # EINVAL is raised if dir is not a directory. |
|
543 | 538 | if err.errno not in (errno.ENOENT, errno.EINVAL, errno.ENOTDIR): |
|
544 | 539 | raise |
|
545 | 540 | dmap = {} |
|
546 | 541 | cache = dircache.setdefault(dir, dmap) |
|
547 | 542 | yield cache.get(base, None) |
|
548 | 543 | |
|
549 | 544 | |
|
550 | 545 | def username(uid=None): |
|
551 | 546 | """Return the name of the user with the given uid. |
|
552 | 547 | |
|
553 | 548 | If uid is None, return the name of the current user.""" |
|
554 | 549 | if not uid: |
|
555 | 550 | return pycompat.fsencode(getpass.getuser()) |
|
556 | 551 | return None |
|
557 | 552 | |
|
558 | 553 | |
|
559 | 554 | def groupname(gid=None): |
|
560 | 555 | """Return the name of the group with the given gid. |
|
561 | 556 | |
|
562 | 557 | If gid is None, return the name of the current group.""" |
|
563 | 558 | return None |
|
564 | 559 | |
|
565 | 560 | |
|
566 | 561 | def readlink(pathname): |
|
567 | 562 | return pycompat.fsencode(os.readlink(pycompat.fsdecode(pathname))) |
|
568 | 563 | |
|
569 | 564 | |
|
570 | 565 | def removedirs(name): |
|
571 | 566 | """special version of os.removedirs that does not remove symlinked |
|
572 | 567 | directories or junction points if they actually contain files""" |
|
573 | 568 | if listdir(name): |
|
574 | 569 | return |
|
575 | 570 | os.rmdir(name) |
|
576 | 571 | head, tail = os.path.split(name) |
|
577 | 572 | if not tail: |
|
578 | 573 | head, tail = os.path.split(head) |
|
579 | 574 | while head and tail: |
|
580 | 575 | try: |
|
581 | 576 | if listdir(head): |
|
582 | 577 | return |
|
583 | 578 | os.rmdir(head) |
|
584 | 579 | except (ValueError, OSError): |
|
585 | 580 | break |
|
586 | 581 | head, tail = os.path.split(head) |
|
587 | 582 | |
|
588 | 583 | |
|
589 | 584 | def rename(src, dst): |
|
590 | 585 | '''atomically rename file src to dst, replacing dst if it exists''' |
|
591 | 586 | try: |
|
592 | 587 | os.rename(src, dst) |
|
593 | 588 | except OSError as e: |
|
594 | 589 | if e.errno != errno.EEXIST: |
|
595 | 590 | raise |
|
596 | 591 | unlink(dst) |
|
597 | 592 | os.rename(src, dst) |
|
598 | 593 | |
|
599 | 594 | |
|
600 | 595 | def gethgcmd(): |
|
601 | 596 | return [encoding.strtolocal(arg) for arg in [sys.executable] + sys.argv[:1]] |
|
602 | 597 | |
|
603 | 598 | |
|
604 | 599 | def groupmembers(name): |
|
605 | 600 | # Don't support groups on Windows for now |
|
606 | 601 | raise KeyError |
|
607 | 602 | |
|
608 | 603 | |
|
609 | 604 | def isexec(f): |
|
610 | 605 | return False |
|
611 | 606 | |
|
612 | 607 | |
|
613 | 608 | class cachestat(object): |
|
614 | 609 | def __init__(self, path): |
|
615 | 610 | pass |
|
616 | 611 | |
|
617 | 612 | def cacheable(self): |
|
618 | 613 | return False |
|
619 | 614 | |
|
620 | 615 | |
|
621 | 616 | def lookupreg(key, valname=None, scope=None): |
|
622 | 617 | ''' Look up a key/value name in the Windows registry. |
|
623 | 618 | |
|
624 | 619 | valname: value name. If unspecified, the default value for the key |
|
625 | 620 | is used. |
|
626 | 621 | scope: optionally specify scope for registry lookup, this can be |
|
627 | 622 | a sequence of scopes to look up in order. Default (CURRENT_USER, |
|
628 | 623 | LOCAL_MACHINE). |
|
629 | 624 | ''' |
|
630 | 625 | if scope is None: |
|
631 | 626 | scope = (winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE) |
|
632 | 627 | elif not isinstance(scope, (list, tuple)): |
|
633 | 628 | scope = (scope,) |
|
634 | 629 | for s in scope: |
|
635 | 630 | try: |
|
636 | 631 | with winreg.OpenKey(s, encoding.strfromlocal(key)) as hkey: |
|
637 | 632 | name = valname and encoding.strfromlocal(valname) or valname |
|
638 | 633 | val = winreg.QueryValueEx(hkey, name)[0] |
|
639 | 634 | # never let a Unicode string escape into the wild |
|
640 | 635 | return encoding.unitolocal(val) |
|
641 | 636 | except EnvironmentError: |
|
642 | 637 | pass |
|
643 | 638 | |
|
644 | 639 | |
|
645 | 640 | expandglobs = True |
|
646 | 641 | |
|
647 | 642 | |
|
648 | 643 | def statislink(st): |
|
649 | 644 | '''check whether a stat result is a symlink''' |
|
650 | 645 | return False |
|
651 | 646 | |
|
652 | 647 | |
|
653 | 648 | def statisexec(st): |
|
654 | 649 | '''check whether a stat result is an executable file''' |
|
655 | 650 | return False |
|
656 | 651 | |
|
657 | 652 | |
|
658 | 653 | def poll(fds): |
|
659 | 654 | # see posix.py for description |
|
660 | 655 | raise NotImplementedError() |
|
661 | 656 | |
|
662 | 657 | |
|
663 | 658 | def readpipe(pipe): |
|
664 | 659 | """Read all available data from a pipe.""" |
|
665 | 660 | chunks = [] |
|
666 | 661 | while True: |
|
667 | 662 | size = win32.peekpipe(pipe) |
|
668 | 663 | if not size: |
|
669 | 664 | break |
|
670 | 665 | |
|
671 | 666 | s = pipe.read(size) |
|
672 | 667 | if not s: |
|
673 | 668 | break |
|
674 | 669 | chunks.append(s) |
|
675 | 670 | |
|
676 | 671 | return b''.join(chunks) |
|
677 | 672 | |
|
678 | 673 | |
|
679 | 674 | def bindunixsocket(sock, path): |
|
680 | 675 | raise NotImplementedError('unsupported platform') |
General Comments 0
You need to be logged in to leave comments.
Login now